diff options
| author | Cheng-Yang Chou <yphbchou0911@gmail.com> | 2026-03-15 11:24:40 +0300 |
|---|---|---|
| committer | Tejun Heo <tj@kernel.org> | 2026-03-15 11:53:59 +0300 |
| commit | 6712c4fefca0422851b71d1a58a32ea03f69310f (patch) | |
| tree | ed98886d0d2f2c09b1fa322ad693ef9c5fdb0bc1 | |
| parent | c959218c6533cf7e373cb5ccddb93f582ee5d47b (diff) | |
| download | linux-6712c4fefca0422851b71d1a58a32ea03f69310f.tar.xz | |
sched_ext: Update demo schedulers and selftests to use scx_bpf_task_set_dsq_vtime()
Direct writes to p->scx.dsq_vtime are deprecated in favor of
scx_bpf_task_set_dsq_vtime(). Update scx_simple, scx_flatcg, and
select_cpu_vtime selftest to use the new kfunc with
scale_by_task_weight_inverse().
Signed-off-by: Cheng-Yang Chou <yphbchou0911@gmail.com>
Reviewed-by: Andrea Righi <arighi@nvidia.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
| -rw-r--r-- | tools/sched_ext/scx_flatcg.bpf.c | 12 | ||||
| -rw-r--r-- | tools/sched_ext/scx_simple.bpf.c | 6 | ||||
| -rw-r--r-- | tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c | 6 |
3 files changed, 15 insertions, 9 deletions
diff --git a/tools/sched_ext/scx_flatcg.bpf.c b/tools/sched_ext/scx_flatcg.bpf.c index 1351377f64d5..6d3a028d7b59 100644 --- a/tools/sched_ext/scx_flatcg.bpf.c +++ b/tools/sched_ext/scx_flatcg.bpf.c @@ -551,9 +551,11 @@ void BPF_STRUCT_OPS(fcg_stopping, struct task_struct *p, bool runnable) * too much, determine the execution time by taking explicit timestamps * instead of depending on @p->scx.slice. */ - if (!fifo_sched) - p->scx.dsq_vtime += - (SCX_SLICE_DFL - p->scx.slice) * 100 / p->scx.weight; + if (!fifo_sched) { + u64 delta = scale_by_task_weight_inverse(p, SCX_SLICE_DFL - p->scx.slice); + + scx_bpf_task_set_dsq_vtime(p, p->scx.dsq_vtime + delta); + } taskc = bpf_task_storage_get(&task_ctx, p, 0, 0); if (!taskc) { @@ -822,7 +824,7 @@ s32 BPF_STRUCT_OPS(fcg_init_task, struct task_struct *p, if (!(cgc = find_cgrp_ctx(args->cgroup))) return -ENOENT; - p->scx.dsq_vtime = cgc->tvtime_now; + scx_bpf_task_set_dsq_vtime(p, cgc->tvtime_now); return 0; } @@ -924,7 +926,7 @@ void BPF_STRUCT_OPS(fcg_cgroup_move, struct task_struct *p, return; delta = time_delta(p->scx.dsq_vtime, from_cgc->tvtime_now); - p->scx.dsq_vtime = to_cgc->tvtime_now + delta; + scx_bpf_task_set_dsq_vtime(p, to_cgc->tvtime_now + delta); } s32 BPF_STRUCT_OPS_SLEEPABLE(fcg_init) diff --git a/tools/sched_ext/scx_simple.bpf.c b/tools/sched_ext/scx_simple.bpf.c index 9ad6f0949987..cc40552b2b5f 100644 --- a/tools/sched_ext/scx_simple.bpf.c +++ b/tools/sched_ext/scx_simple.bpf.c @@ -121,12 +121,14 @@ void BPF_STRUCT_OPS(simple_stopping, struct task_struct *p, bool runnable) * too much, determine the execution time by taking explicit timestamps * instead of depending on @p->scx.slice. */ - p->scx.dsq_vtime += (SCX_SLICE_DFL - p->scx.slice) * 100 / p->scx.weight; + u64 delta = scale_by_task_weight_inverse(p, SCX_SLICE_DFL - p->scx.slice); + + scx_bpf_task_set_dsq_vtime(p, p->scx.dsq_vtime + delta); } void BPF_STRUCT_OPS(simple_enable, struct task_struct *p) { - p->scx.dsq_vtime = vtime_now; + scx_bpf_task_set_dsq_vtime(p, vtime_now); } s32 BPF_STRUCT_OPS_SLEEPABLE(simple_init) diff --git a/tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c b/tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c index 7aa5dc6bfb93..eec70d388cbf 100644 --- a/tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c +++ b/tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c @@ -66,12 +66,14 @@ void BPF_STRUCT_OPS(select_cpu_vtime_running, struct task_struct *p) void BPF_STRUCT_OPS(select_cpu_vtime_stopping, struct task_struct *p, bool runnable) { - p->scx.dsq_vtime += (SCX_SLICE_DFL - p->scx.slice) * 100 / p->scx.weight; + u64 delta = scale_by_task_weight_inverse(p, SCX_SLICE_DFL - p->scx.slice); + + scx_bpf_task_set_dsq_vtime(p, p->scx.dsq_vtime + delta); } void BPF_STRUCT_OPS(select_cpu_vtime_enable, struct task_struct *p) { - p->scx.dsq_vtime = vtime_now; + scx_bpf_task_set_dsq_vtime(p, vtime_now); } s32 BPF_STRUCT_OPS_SLEEPABLE(select_cpu_vtime_init) |
