sched_ext: Plumb enq_flags through the consume path

Add enq_flags parameter to consume_dispatch_q() and consume_remote_task(),
passing it through to move_{local,remote}_task_to_local_dsq(). All callers
pass 0.

No functional change. This prepares for SCX_ENQ_IMMED support on the consume
path.

Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Andrea Righi <arighi@nvidia.com>
This commit is contained in:
Tejun Heo 2026-03-13 09:43:23 -10:00
parent 98d709cba3
commit da32a2986e

View File

@ -2355,13 +2355,14 @@ static bool unlink_dsq_and_lock_src_rq(struct task_struct *p,
!WARN_ON_ONCE(src_rq != task_rq(p));
}
static bool consume_remote_task(struct rq *this_rq, struct task_struct *p,
static bool consume_remote_task(struct rq *this_rq,
struct task_struct *p, u64 enq_flags,
struct scx_dispatch_q *dsq, struct rq *src_rq)
{
raw_spin_rq_unlock(this_rq);
if (unlink_dsq_and_lock_src_rq(p, dsq, src_rq)) {
move_remote_task_to_local_dsq(p, 0, src_rq, this_rq);
move_remote_task_to_local_dsq(p, enq_flags, src_rq, this_rq);
return true;
} else {
raw_spin_rq_unlock(src_rq);
@ -2441,7 +2442,7 @@ static struct rq *move_task_between_dsqs(struct scx_sched *sch,
}
static bool consume_dispatch_q(struct scx_sched *sch, struct rq *rq,
struct scx_dispatch_q *dsq)
struct scx_dispatch_q *dsq, u64 enq_flags)
{
struct task_struct *p;
retry:
@ -2471,13 +2472,13 @@ static bool consume_dispatch_q(struct scx_sched *sch, struct rq *rq,
if (rq == task_rq) {
task_unlink_from_dsq(p, dsq);
move_local_task_to_local_dsq(p, 0, dsq, rq);
move_local_task_to_local_dsq(p, enq_flags, dsq, rq);
raw_spin_unlock(&dsq->lock);
return true;
}
if (task_can_run_on_remote_rq(sch, p, rq, false)) {
if (likely(consume_remote_task(rq, p, dsq, task_rq)))
if (likely(consume_remote_task(rq, p, enq_flags, dsq, task_rq)))
return true;
goto retry;
}
@ -2491,7 +2492,7 @@ static bool consume_global_dsq(struct scx_sched *sch, struct rq *rq)
{
int node = cpu_to_node(cpu_of(rq));
return consume_dispatch_q(sch, rq, &sch->pnode[node]->global_dsq);
return consume_dispatch_q(sch, rq, &sch->pnode[node]->global_dsq, 0);
}
/**
@ -2727,7 +2728,7 @@ scx_dispatch_sched(struct scx_sched *sch, struct rq *rq,
if (bypass_dsp_enabled(sch)) {
/* if @sch is bypassing, only the bypass DSQs are active */
if (scx_bypassing(sch, cpu))
return consume_dispatch_q(sch, rq, bypass_dsq(sch, cpu));
return consume_dispatch_q(sch, rq, bypass_dsq(sch, cpu), 0);
#ifdef CONFIG_EXT_SUB_SCHED
/*
@ -2745,7 +2746,7 @@ scx_dispatch_sched(struct scx_sched *sch, struct rq *rq,
struct scx_sched_pcpu *pcpu = per_cpu_ptr(sch->pcpu, cpu);
if (!(pcpu->bypass_host_seq++ % SCX_BYPASS_HOST_NTH) &&
consume_dispatch_q(sch, rq, bypass_dsq(sch, cpu))) {
consume_dispatch_q(sch, rq, bypass_dsq(sch, cpu), 0)) {
__scx_add_event(sch, SCX_EV_SUB_BYPASS_DISPATCH, 1);
return true;
}
@ -2817,7 +2818,7 @@ scx_dispatch_sched(struct scx_sched *sch, struct rq *rq,
* scheduler's ops.dispatch() doesn't yield any tasks.
*/
if (bypass_dsp_enabled(sch))
return consume_dispatch_q(sch, rq, bypass_dsq(sch, cpu));
return consume_dispatch_q(sch, rq, bypass_dsq(sch, cpu), 0);
return false;
}
@ -8195,7 +8196,7 @@ __bpf_kfunc bool scx_bpf_dsq_move_to_local(u64 dsq_id, const struct bpf_prog_aux
return false;
}
if (consume_dispatch_q(sch, dspc->rq, dsq)) {
if (consume_dispatch_q(sch, dspc->rq, dsq, 0)) {
/*
* A successfully consumed task can be dequeued before it starts
* running while the CPU is trying to migrate other dispatched