sched_ext: Skip past-sched_ext_dead() tasks in scx_task_iter_next_locked()

scx_task_iter's cgroup-scoped mode can return tasks whose
sched_ext_dead() has already completed: cgroup_task_dead() removes
from cset->tasks after sched_ext_dead() in finish_task_switch() and is
irq-work deferred on PREEMPT_RT. The global mode is fine -
sched_ext_dead() removes from scx_tasks via list_del_init() first.

Callers (sub-sched enable prep/abort/apply, scx_sub_disable(),
scx_fail_parent()) assume returned tasks are still on @sch and trip
WARN_ON_ONCE() or operate on torn-down state otherwise.

Set %SCX_TASK_OFF_TASKS in sched_ext_dead() under @p's rq lock and
have scx_task_iter_next_locked() skip flagged tasks under the same
lock. Setter and reader serialize on the per-task rq lock - no race.

Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
Tejun Heo 2026-04-27 14:16:35 -10:00
parent 60f21a2649
commit ff9eda4ea9
2 changed files with 27 additions and 9 deletions

View File

@ -101,6 +101,7 @@ enum scx_ent_flags {
SCX_TASK_DEQD_FOR_SLEEP = 1 << 3, /* last dequeue was for SLEEP */
SCX_TASK_SUB_INIT = 1 << 4, /* task being initialized for a sub sched */
SCX_TASK_IMMED = 1 << 5, /* task is on local DSQ with %SCX_ENQ_IMMED */
SCX_TASK_OFF_TASKS = 1 << 6, /* removed from scx_tasks by sched_ext_dead() */
/*
* Bits 8 and 9 are used to carry task state:

View File

@ -928,16 +928,27 @@ static struct task_struct *scx_task_iter_next_locked(struct scx_task_iter *iter)
*
* Test for idle_sched_class as only init_tasks are on it.
*/
if (p->sched_class != &idle_sched_class)
break;
if (p->sched_class == &idle_sched_class)
continue;
iter->rq = task_rq_lock(p, &iter->rf);
iter->locked_task = p;
/*
* cgroup_task_dead() removes the dead tasks from cset->tasks
* after sched_ext_dead() and cgroup iteration may see tasks
* which already finished sched_ext_dead(). %SCX_TASK_OFF_TASKS
* is set by sched_ext_dead() under @p's rq lock. Test it to
* avoid visiting tasks which are already dead from SCX POV.
*/
if (p->scx.flags & SCX_TASK_OFF_TASKS) {
__scx_task_iter_rq_unlock(iter);
continue;
}
return p;
}
if (!p)
return NULL;
iter->rq = task_rq_lock(p, &iter->rf);
iter->locked_task = p;
return p;
return NULL;
}
/**
@ -3850,6 +3861,11 @@ void sched_ext_dead(struct task_struct *p)
/*
* @p is off scx_tasks and wholly ours. scx_root_enable()'s READY ->
* ENABLED transitions can't race us. Disable ops for @p.
*
* %SCX_TASK_OFF_TASKS synchronizes against cgroup task iteration - see
* scx_task_iter_next_locked(). NONE tasks need no marking: cgroup
* iteration is only used from sub-sched paths, which require root
* enabled. Root enable transitions every live task to at least READY.
*/
if (scx_get_task_state(p) != SCX_TASK_NONE) {
struct rq_flags rf;
@ -3857,6 +3873,7 @@ void sched_ext_dead(struct task_struct *p)
rq = task_rq_lock(p, &rf);
scx_disable_and_exit_task(scx_task_sched(p), p);
p->scx.flags |= SCX_TASK_OFF_TASKS;
task_rq_unlock(rq, p, &rf);
}
}