mirror of
https://github.com/torvalds/linux.git
synced 2026-05-12 16:18:45 +02:00
sched_ext: Reorganize enable/disable path for multi-scheduler support
In preparation for multiple scheduler support, reorganize the enable and
disable paths to make scheduler instances explicit. Extract
scx_root_disable() from scx_disable_workfn(). Rename scx_enable_workfn()
to scx_root_enable_workfn(). Change scx_disable() to take @sch parameter
and only queue disable_work if scx_claim_exit() succeeds for consistency.
Move exit_kind validation into scx_claim_exit(). The sysrq handler now
prints a message when no scheduler is loaded.
These changes don't materially affect user-visible behavior.
v2: Keep scx_enable() name as-is and only rename the workfn to
scx_root_enable_workfn(). Change scx_enable() return type to s32.
Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Andrea Righi <arighi@nvidia.com>
This commit is contained in:
parent
0454a604b9
commit
dbd542a8fa
|
|
@ -3267,8 +3267,8 @@ void sched_ext_dead(struct task_struct *p)
|
|||
raw_spin_unlock_irqrestore(&scx_tasks_lock, flags);
|
||||
|
||||
/*
|
||||
* @p is off scx_tasks and wholly ours. scx_enable()'s READY -> ENABLED
|
||||
* transitions can't race us. Disable ops for @p.
|
||||
* @p is off scx_tasks and wholly ours. scx_root_enable()'s READY ->
|
||||
* ENABLED transitions can't race us. Disable ops for @p.
|
||||
*/
|
||||
if (scx_get_task_state(p) != SCX_TASK_NONE) {
|
||||
struct rq_flags rf;
|
||||
|
|
@ -4430,24 +4430,12 @@ static void free_kick_syncs(void)
|
|||
}
|
||||
}
|
||||
|
||||
static void scx_disable_workfn(struct kthread_work *work)
|
||||
static void scx_root_disable(struct scx_sched *sch)
|
||||
{
|
||||
struct scx_sched *sch = container_of(work, struct scx_sched, disable_work);
|
||||
struct scx_exit_info *ei = sch->exit_info;
|
||||
struct scx_task_iter sti;
|
||||
struct task_struct *p;
|
||||
int kind, cpu;
|
||||
|
||||
kind = atomic_read(&sch->exit_kind);
|
||||
while (true) {
|
||||
if (kind == SCX_EXIT_DONE) /* already disabled? */
|
||||
return;
|
||||
WARN_ON_ONCE(kind == SCX_EXIT_NONE);
|
||||
if (atomic_try_cmpxchg(&sch->exit_kind, &kind, SCX_EXIT_DONE))
|
||||
break;
|
||||
}
|
||||
ei->kind = kind;
|
||||
ei->reason = scx_exit_reason(ei->kind);
|
||||
int cpu;
|
||||
|
||||
/* guarantee forward progress by bypassing scx_ops */
|
||||
scx_bypass(true);
|
||||
|
|
@ -4591,6 +4579,9 @@ static bool scx_claim_exit(struct scx_sched *sch, enum scx_exit_kind kind)
|
|||
|
||||
lockdep_assert_preemption_disabled();
|
||||
|
||||
if (WARN_ON_ONCE(kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE))
|
||||
kind = SCX_EXIT_ERROR;
|
||||
|
||||
if (!atomic_try_cmpxchg(&sch->exit_kind, &none, kind))
|
||||
return false;
|
||||
|
||||
|
|
@ -4603,21 +4594,31 @@ static bool scx_claim_exit(struct scx_sched *sch, enum scx_exit_kind kind)
|
|||
return true;
|
||||
}
|
||||
|
||||
static void scx_disable(enum scx_exit_kind kind)
|
||||
static void scx_disable_workfn(struct kthread_work *work)
|
||||
{
|
||||
struct scx_sched *sch;
|
||||
struct scx_sched *sch = container_of(work, struct scx_sched, disable_work);
|
||||
struct scx_exit_info *ei = sch->exit_info;
|
||||
int kind;
|
||||
|
||||
if (WARN_ON_ONCE(kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE))
|
||||
kind = SCX_EXIT_ERROR;
|
||||
|
||||
rcu_read_lock();
|
||||
sch = rcu_dereference(scx_root);
|
||||
if (sch) {
|
||||
guard(preempt)();
|
||||
scx_claim_exit(sch, kind);
|
||||
kthread_queue_work(sch->helper, &sch->disable_work);
|
||||
kind = atomic_read(&sch->exit_kind);
|
||||
while (true) {
|
||||
if (kind == SCX_EXIT_DONE) /* already disabled? */
|
||||
return;
|
||||
WARN_ON_ONCE(kind == SCX_EXIT_NONE);
|
||||
if (atomic_try_cmpxchg(&sch->exit_kind, &kind, SCX_EXIT_DONE))
|
||||
break;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
ei->kind = kind;
|
||||
ei->reason = scx_exit_reason(ei->kind);
|
||||
|
||||
scx_root_disable(sch);
|
||||
}
|
||||
|
||||
static void scx_disable(struct scx_sched *sch, enum scx_exit_kind kind)
|
||||
{
|
||||
guard(preempt)();
|
||||
if (scx_claim_exit(sch, kind))
|
||||
kthread_queue_work(sch->helper, &sch->disable_work);
|
||||
}
|
||||
|
||||
static void dump_newline(struct seq_buf *s)
|
||||
|
|
@ -5135,10 +5136,9 @@ struct scx_enable_cmd {
|
|||
int ret;
|
||||
};
|
||||
|
||||
static void scx_enable_workfn(struct kthread_work *work)
|
||||
static void scx_root_enable_workfn(struct kthread_work *work)
|
||||
{
|
||||
struct scx_enable_cmd *cmd =
|
||||
container_of(work, struct scx_enable_cmd, work);
|
||||
struct scx_enable_cmd *cmd = container_of(work, struct scx_enable_cmd, work);
|
||||
struct sched_ext_ops *ops = cmd->ops;
|
||||
struct scx_sched *sch;
|
||||
struct scx_task_iter sti;
|
||||
|
|
@ -5387,12 +5387,12 @@ static void scx_enable_workfn(struct kthread_work *work)
|
|||
* Flush scx_disable_work to ensure that error is reported before init
|
||||
* completion. sch's base reference will be put by bpf_scx_unreg().
|
||||
*/
|
||||
scx_error(sch, "scx_enable() failed (%d)", ret);
|
||||
scx_error(sch, "scx_root_enable() failed (%d)", ret);
|
||||
kthread_flush_work(&sch->disable_work);
|
||||
cmd->ret = 0;
|
||||
}
|
||||
|
||||
static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link)
|
||||
static s32 scx_enable(struct sched_ext_ops *ops, struct bpf_link *link)
|
||||
{
|
||||
static struct kthread_worker *helper;
|
||||
static DEFINE_MUTEX(helper_mutex);
|
||||
|
|
@ -5418,7 +5418,7 @@ static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link)
|
|||
mutex_unlock(&helper_mutex);
|
||||
}
|
||||
|
||||
kthread_init_work(&cmd.work, scx_enable_workfn);
|
||||
kthread_init_work(&cmd.work, scx_root_enable_workfn);
|
||||
cmd.ops = ops;
|
||||
|
||||
kthread_queue_work(READ_ONCE(helper), &cmd.work);
|
||||
|
|
@ -5561,7 +5561,7 @@ static void bpf_scx_unreg(void *kdata, struct bpf_link *link)
|
|||
struct sched_ext_ops *ops = kdata;
|
||||
struct scx_sched *sch = ops->priv;
|
||||
|
||||
scx_disable(SCX_EXIT_UNREG);
|
||||
scx_disable(sch, SCX_EXIT_UNREG);
|
||||
kthread_flush_work(&sch->disable_work);
|
||||
kobject_put(&sch->kobj);
|
||||
}
|
||||
|
|
@ -5689,7 +5689,15 @@ static struct bpf_struct_ops bpf_sched_ext_ops = {
|
|||
|
||||
static void sysrq_handle_sched_ext_reset(u8 key)
|
||||
{
|
||||
scx_disable(SCX_EXIT_SYSRQ);
|
||||
struct scx_sched *sch;
|
||||
|
||||
rcu_read_lock();
|
||||
sch = rcu_dereference(scx_root);
|
||||
if (likely(sch))
|
||||
scx_disable(sch, SCX_EXIT_SYSRQ);
|
||||
else
|
||||
pr_info("sched_ext: BPF schedulers not loaded\n");
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static const struct sysrq_key_op sysrq_sched_ext_reset_op = {
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user