summaryrefslogtreecommitdiff
path: root/tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c')
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c b/tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c
index e6c67bcf5e6e..bfcb96cd4954 100644
--- a/tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c
+++ b/tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c
@@ -2,8 +2,8 @@
/*
* A scheduler that validates that enqueue flags are properly stored and
* applied at dispatch time when a task is directly dispatched from
- * ops.select_cpu(). We validate this by using scx_bpf_dispatch_vtime(), and
- * making the test a very basic vtime scheduler.
+ * ops.select_cpu(). We validate this by using scx_bpf_dsq_insert_vtime(),
+ * and making the test a very basic vtime scheduler.
*
* Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
* Copyright (c) 2024 David Vernet <dvernet@meta.com>
@@ -47,13 +47,13 @@ s32 BPF_STRUCT_OPS(select_cpu_vtime_select_cpu, struct task_struct *p,
cpu = prev_cpu;
scx_bpf_test_and_clear_cpu_idle(cpu);
ddsp:
- scx_bpf_dispatch_vtime(p, VTIME_DSQ, SCX_SLICE_DFL, task_vtime(p), 0);
+ scx_bpf_dsq_insert_vtime(p, VTIME_DSQ, SCX_SLICE_DFL, task_vtime(p), 0);
return cpu;
}
void BPF_STRUCT_OPS(select_cpu_vtime_dispatch, s32 cpu, struct task_struct *p)
{
- if (scx_bpf_consume(VTIME_DSQ))
+ if (scx_bpf_dsq_move_to_local(VTIME_DSQ))
consumed = true;
}