mirror of
https://github.com/torvalds/linux.git
synced 2026-05-12 16:18:45 +02:00
sched_ext: Update demo schedulers and selftests to use scx_bpf_task_set_dsq_vtime()
Direct writes to p->scx.dsq_vtime are deprecated in favor of scx_bpf_task_set_dsq_vtime(). Update scx_simple, scx_flatcg, and select_cpu_vtime selftest to use the new kfunc with scale_by_task_weight_inverse(). Signed-off-by: Cheng-Yang Chou <yphbchou0911@gmail.com> Reviewed-by: Andrea Righi <arighi@nvidia.com> Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
parent
c959218c65
commit
6712c4fefc
|
|
@ -551,9 +551,11 @@ void BPF_STRUCT_OPS(fcg_stopping, struct task_struct *p, bool runnable)
|
|||
* too much, determine the execution time by taking explicit timestamps
|
||||
* instead of depending on @p->scx.slice.
|
||||
*/
|
||||
if (!fifo_sched)
|
||||
p->scx.dsq_vtime +=
|
||||
(SCX_SLICE_DFL - p->scx.slice) * 100 / p->scx.weight;
|
||||
if (!fifo_sched) {
|
||||
u64 delta = scale_by_task_weight_inverse(p, SCX_SLICE_DFL - p->scx.slice);
|
||||
|
||||
scx_bpf_task_set_dsq_vtime(p, p->scx.dsq_vtime + delta);
|
||||
}
|
||||
|
||||
taskc = bpf_task_storage_get(&task_ctx, p, 0, 0);
|
||||
if (!taskc) {
|
||||
|
|
@ -822,7 +824,7 @@ s32 BPF_STRUCT_OPS(fcg_init_task, struct task_struct *p,
|
|||
if (!(cgc = find_cgrp_ctx(args->cgroup)))
|
||||
return -ENOENT;
|
||||
|
||||
p->scx.dsq_vtime = cgc->tvtime_now;
|
||||
scx_bpf_task_set_dsq_vtime(p, cgc->tvtime_now);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -924,7 +926,7 @@ void BPF_STRUCT_OPS(fcg_cgroup_move, struct task_struct *p,
|
|||
return;
|
||||
|
||||
delta = time_delta(p->scx.dsq_vtime, from_cgc->tvtime_now);
|
||||
p->scx.dsq_vtime = to_cgc->tvtime_now + delta;
|
||||
scx_bpf_task_set_dsq_vtime(p, to_cgc->tvtime_now + delta);
|
||||
}
|
||||
|
||||
s32 BPF_STRUCT_OPS_SLEEPABLE(fcg_init)
|
||||
|
|
|
|||
|
|
@ -121,12 +121,14 @@ void BPF_STRUCT_OPS(simple_stopping, struct task_struct *p, bool runnable)
|
|||
* too much, determine the execution time by taking explicit timestamps
|
||||
* instead of depending on @p->scx.slice.
|
||||
*/
|
||||
p->scx.dsq_vtime += (SCX_SLICE_DFL - p->scx.slice) * 100 / p->scx.weight;
|
||||
u64 delta = scale_by_task_weight_inverse(p, SCX_SLICE_DFL - p->scx.slice);
|
||||
|
||||
scx_bpf_task_set_dsq_vtime(p, p->scx.dsq_vtime + delta);
|
||||
}
|
||||
|
||||
void BPF_STRUCT_OPS(simple_enable, struct task_struct *p)
|
||||
{
|
||||
p->scx.dsq_vtime = vtime_now;
|
||||
scx_bpf_task_set_dsq_vtime(p, vtime_now);
|
||||
}
|
||||
|
||||
s32 BPF_STRUCT_OPS_SLEEPABLE(simple_init)
|
||||
|
|
|
|||
|
|
@ -66,12 +66,14 @@ void BPF_STRUCT_OPS(select_cpu_vtime_running, struct task_struct *p)
|
|||
void BPF_STRUCT_OPS(select_cpu_vtime_stopping, struct task_struct *p,
|
||||
bool runnable)
|
||||
{
|
||||
p->scx.dsq_vtime += (SCX_SLICE_DFL - p->scx.slice) * 100 / p->scx.weight;
|
||||
u64 delta = scale_by_task_weight_inverse(p, SCX_SLICE_DFL - p->scx.slice);
|
||||
|
||||
scx_bpf_task_set_dsq_vtime(p, p->scx.dsq_vtime + delta);
|
||||
}
|
||||
|
||||
void BPF_STRUCT_OPS(select_cpu_vtime_enable, struct task_struct *p)
|
||||
{
|
||||
p->scx.dsq_vtime = vtime_now;
|
||||
scx_bpf_task_set_dsq_vtime(p, vtime_now);
|
||||
}
|
||||
|
||||
s32 BPF_STRUCT_OPS_SLEEPABLE(select_cpu_vtime_init)
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user