mirror of
https://github.com/torvalds/linux.git
synced 2026-05-12 16:18:45 +02:00
sched_ext: Add per-CPU data to DSQs
Add per-CPU data structure to dispatch queues. Each DSQ now has a percpu
scx_dsq_pcpu which contains a back-pointer to the DSQ. This will be used by
future changes to implement per-CPU reenqueue tracking for user DSQs.
init_dsq() now allocates the percpu data and can fail, so it returns an
error code. All callers are updated to handle failures. exit_dsq() is added
to free the percpu data and is called from all DSQ cleanup paths.
In scx_bpf_create_dsq(), init_dsq() is called before rcu_read_lock() since
alloc_percpu() requires GFP_KERNEL context, and dsq->sched is set
afterwards.
v2: Fix err_free_pcpu to only exit_dsq() initialized bypass DSQs (Andrea
Righi).
Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Andrea Righi <arighi@nvidia.com>
This commit is contained in:
parent
ffa7ae0724
commit
30b0515342
|
|
@ -62,6 +62,10 @@ enum scx_dsq_id_flags {
|
|||
SCX_DSQ_LOCAL_CPU_MASK = 0xffffffffLLU,
|
||||
};
|
||||
|
||||
struct scx_dsq_pcpu {
|
||||
struct scx_dispatch_q *dsq;
|
||||
};
|
||||
|
||||
/*
|
||||
* A dispatch queue (DSQ) can be either a FIFO or p->scx.dsq_vtime ordered
|
||||
* queue. A built-in DSQ is always a FIFO. The built-in local DSQs are used to
|
||||
|
|
@ -79,6 +83,7 @@ struct scx_dispatch_q {
|
|||
struct rhash_head hash_node;
|
||||
struct llist_node free_node;
|
||||
struct scx_sched *sched;
|
||||
struct scx_dsq_pcpu __percpu *pcpu;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -4020,15 +4020,42 @@ DEFINE_SCHED_CLASS(ext) = {
|
|||
#endif
|
||||
};
|
||||
|
||||
static void init_dsq(struct scx_dispatch_q *dsq, u64 dsq_id,
|
||||
struct scx_sched *sch)
|
||||
static s32 init_dsq(struct scx_dispatch_q *dsq, u64 dsq_id,
|
||||
struct scx_sched *sch)
|
||||
{
|
||||
s32 cpu;
|
||||
|
||||
memset(dsq, 0, sizeof(*dsq));
|
||||
|
||||
raw_spin_lock_init(&dsq->lock);
|
||||
INIT_LIST_HEAD(&dsq->list);
|
||||
dsq->id = dsq_id;
|
||||
dsq->sched = sch;
|
||||
|
||||
dsq->pcpu = alloc_percpu(struct scx_dsq_pcpu);
|
||||
if (!dsq->pcpu)
|
||||
return -ENOMEM;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct scx_dsq_pcpu *pcpu = per_cpu_ptr(dsq->pcpu, cpu);
|
||||
|
||||
pcpu->dsq = dsq;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void exit_dsq(struct scx_dispatch_q *dsq)
|
||||
{
|
||||
free_percpu(dsq->pcpu);
|
||||
}
|
||||
|
||||
static void free_dsq_rcufn(struct rcu_head *rcu)
|
||||
{
|
||||
struct scx_dispatch_q *dsq = container_of(rcu, struct scx_dispatch_q, rcu);
|
||||
|
||||
exit_dsq(dsq);
|
||||
kfree(dsq);
|
||||
}
|
||||
|
||||
static void free_dsq_irq_workfn(struct irq_work *irq_work)
|
||||
|
|
@ -4037,7 +4064,7 @@ static void free_dsq_irq_workfn(struct irq_work *irq_work)
|
|||
struct scx_dispatch_q *dsq, *tmp_dsq;
|
||||
|
||||
llist_for_each_entry_safe(dsq, tmp_dsq, to_free, free_node)
|
||||
kfree_rcu(dsq, rcu);
|
||||
call_rcu(&dsq->rcu, free_dsq_rcufn);
|
||||
}
|
||||
|
||||
static DEFINE_IRQ_WORK(free_dsq_irq_work, free_dsq_irq_workfn);
|
||||
|
|
@ -4234,15 +4261,17 @@ static void scx_sched_free_rcu_work(struct work_struct *work)
|
|||
cgroup_put(sch_cgroup(sch));
|
||||
#endif /* CONFIG_EXT_SUB_SCHED */
|
||||
|
||||
/*
|
||||
* $sch would have entered bypass mode before the RCU grace period. As
|
||||
* that blocks new deferrals, all deferred_reenq_local_node's must be
|
||||
* off-list by now.
|
||||
*/
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct scx_sched_pcpu *pcpu = per_cpu_ptr(sch->pcpu, cpu);
|
||||
|
||||
/*
|
||||
* $sch would have entered bypass mode before the RCU grace
|
||||
* period. As that blocks new deferrals, all
|
||||
* deferred_reenq_local_node's must be off-list by now.
|
||||
*/
|
||||
WARN_ON_ONCE(!list_empty(&pcpu->deferred_reenq_local.node));
|
||||
|
||||
exit_dsq(bypass_dsq(sch, cpu));
|
||||
}
|
||||
|
||||
free_percpu(sch->pcpu);
|
||||
|
|
@ -5787,6 +5816,9 @@ static int alloc_kick_syncs(void)
|
|||
|
||||
static void free_pnode(struct scx_sched_pnode *pnode)
|
||||
{
|
||||
if (!pnode)
|
||||
return;
|
||||
exit_dsq(&pnode->global_dsq);
|
||||
kfree(pnode);
|
||||
}
|
||||
|
||||
|
|
@ -5798,7 +5830,10 @@ static struct scx_sched_pnode *alloc_pnode(struct scx_sched *sch, int node)
|
|||
if (!pnode)
|
||||
return NULL;
|
||||
|
||||
init_dsq(&pnode->global_dsq, SCX_DSQ_GLOBAL, sch);
|
||||
if (init_dsq(&pnode->global_dsq, SCX_DSQ_GLOBAL, sch)) {
|
||||
kfree(pnode);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return pnode;
|
||||
}
|
||||
|
|
@ -5809,7 +5844,7 @@ static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops,
|
|||
{
|
||||
struct scx_sched *sch;
|
||||
s32 level = parent ? parent->level + 1 : 0;
|
||||
s32 node, cpu, ret;
|
||||
s32 node, cpu, ret, bypass_fail_cpu = nr_cpu_ids;
|
||||
|
||||
sch = kzalloc_flex(*sch, ancestors, level);
|
||||
if (!sch)
|
||||
|
|
@ -5848,8 +5883,13 @@ static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops,
|
|||
goto err_free_pnode;
|
||||
}
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
init_dsq(bypass_dsq(sch, cpu), SCX_DSQ_BYPASS, sch);
|
||||
for_each_possible_cpu(cpu) {
|
||||
ret = init_dsq(bypass_dsq(sch, cpu), SCX_DSQ_BYPASS, sch);
|
||||
if (ret) {
|
||||
bypass_fail_cpu = cpu;
|
||||
goto err_free_pcpu;
|
||||
}
|
||||
}
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct scx_sched_pcpu *pcpu = per_cpu_ptr(sch->pcpu, cpu);
|
||||
|
|
@ -5931,6 +5971,11 @@ static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops,
|
|||
err_stop_helper:
|
||||
kthread_destroy_worker(sch->helper);
|
||||
err_free_pcpu:
|
||||
for_each_possible_cpu(cpu) {
|
||||
if (cpu == bypass_fail_cpu)
|
||||
break;
|
||||
exit_dsq(bypass_dsq(sch, cpu));
|
||||
}
|
||||
free_percpu(sch->pcpu);
|
||||
err_free_pnode:
|
||||
for_each_node_state(node, N_POSSIBLE)
|
||||
|
|
@ -7173,7 +7218,7 @@ void __init init_sched_ext_class(void)
|
|||
int n = cpu_to_node(cpu);
|
||||
|
||||
/* local_dsq's sch will be set during scx_root_enable() */
|
||||
init_dsq(&rq->scx.local_dsq, SCX_DSQ_LOCAL, NULL);
|
||||
BUG_ON(init_dsq(&rq->scx.local_dsq, SCX_DSQ_LOCAL, NULL));
|
||||
|
||||
INIT_LIST_HEAD(&rq->scx.runnable_list);
|
||||
INIT_LIST_HEAD(&rq->scx.ddsp_deferred_locals);
|
||||
|
|
@ -7872,11 +7917,21 @@ __bpf_kfunc s32 scx_bpf_create_dsq(u64 dsq_id, s32 node, const struct bpf_prog_a
|
|||
if (!dsq)
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* init_dsq() must be called in GFP_KERNEL context. Init it with NULL
|
||||
* @sch and update afterwards.
|
||||
*/
|
||||
ret = init_dsq(dsq, dsq_id, NULL);
|
||||
if (ret) {
|
||||
kfree(dsq);
|
||||
return ret;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
sch = scx_prog_sched(aux);
|
||||
if (sch) {
|
||||
init_dsq(dsq, dsq_id, sch);
|
||||
dsq->sched = sch;
|
||||
ret = rhashtable_lookup_insert_fast(&sch->dsq_hash, &dsq->hash_node,
|
||||
dsq_hash_params);
|
||||
} else {
|
||||
|
|
@ -7884,8 +7939,10 @@ __bpf_kfunc s32 scx_bpf_create_dsq(u64 dsq_id, s32 node, const struct bpf_prog_a
|
|||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
if (ret)
|
||||
if (ret) {
|
||||
exit_dsq(dsq);
|
||||
kfree(dsq);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user