diff options
Diffstat (limited to 'tools/testing/selftests/sched_ext')
7 files changed, 392 insertions, 105 deletions
diff --git a/tools/testing/selftests/sched_ext/Makefile b/tools/testing/selftests/sched_ext/Makefile index f4531327b8e7..9d9d6b4c38b0 100644 --- a/tools/testing/selftests/sched_ext/Makefile +++ b/tools/testing/selftests/sched_ext/Makefile @@ -162,10 +162,10 @@ all_test_bpfprogs := $(foreach prog,$(wildcard *.bpf.c),$(INCLUDE_DIR)/$(patsubs auto-test-targets := \ create_dsq \ enq_last_no_enq_fails \ - enq_select_cpu_fails \ ddsp_bogus_dsq_fail \ ddsp_vtimelocal_fail \ dsp_local_on \ + enq_select_cpu \ exit \ hotplug \ init_enable_count \ @@ -173,6 +173,7 @@ auto-test-targets := \ maybe_null \ minimal \ numa \ + allowed_cpus \ prog_run \ reload_loop \ select_cpu_dfl \ diff --git a/tools/testing/selftests/sched_ext/allowed_cpus.bpf.c b/tools/testing/selftests/sched_ext/allowed_cpus.bpf.c new file mode 100644 index 000000000000..35923e74a2ec --- /dev/null +++ b/tools/testing/selftests/sched_ext/allowed_cpus.bpf.c @@ -0,0 +1,144 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * A scheduler that validates the behavior of scx_bpf_select_cpu_and() by + * selecting idle CPUs strictly within a subset of allowed CPUs. + * + * Copyright (c) 2025 Andrea Righi <arighi@nvidia.com> + */ + +#include <scx/common.bpf.h> + +char _license[] SEC("license") = "GPL"; + +UEI_DEFINE(uei); + +private(PREF_CPUS) struct bpf_cpumask __kptr * allowed_cpumask; + +static void +validate_idle_cpu(const struct task_struct *p, const struct cpumask *allowed, s32 cpu) +{ + if (scx_bpf_test_and_clear_cpu_idle(cpu)) + scx_bpf_error("CPU %d should be marked as busy", cpu); + + if (bpf_cpumask_subset(allowed, p->cpus_ptr) && + !bpf_cpumask_test_cpu(cpu, allowed)) + scx_bpf_error("CPU %d not in the allowed domain for %d (%s)", + cpu, p->pid, p->comm); +} + +s32 BPF_STRUCT_OPS(allowed_cpus_select_cpu, + struct task_struct *p, s32 prev_cpu, u64 wake_flags) +{ + const struct cpumask *allowed; + s32 cpu; + + allowed = cast_mask(allowed_cpumask); + if (!allowed) { + scx_bpf_error("allowed domain not initialized"); + return -EINVAL; + } + + /* + * Select an idle CPU strictly within the allowed domain. + */ + cpu = scx_bpf_select_cpu_and(p, prev_cpu, wake_flags, allowed, 0); + if (cpu >= 0) { + validate_idle_cpu(p, allowed, cpu); + scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL, 0); + + return cpu; + } + + return prev_cpu; +} + +void BPF_STRUCT_OPS(allowed_cpus_enqueue, struct task_struct *p, u64 enq_flags) +{ + const struct cpumask *allowed; + s32 prev_cpu = scx_bpf_task_cpu(p), cpu; + + scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0); + + allowed = cast_mask(allowed_cpumask); + if (!allowed) { + scx_bpf_error("allowed domain not initialized"); + return; + } + + /* + * Use scx_bpf_select_cpu_and() to proactively kick an idle CPU + * within @allowed_cpumask, usable by @p. + */ + cpu = scx_bpf_select_cpu_and(p, prev_cpu, 0, allowed, 0); + if (cpu >= 0) { + validate_idle_cpu(p, allowed, cpu); + scx_bpf_kick_cpu(cpu, SCX_KICK_IDLE); + } +} + +s32 BPF_STRUCT_OPS_SLEEPABLE(allowed_cpus_init) +{ + struct bpf_cpumask *mask; + + mask = bpf_cpumask_create(); + if (!mask) + return -ENOMEM; + + mask = bpf_kptr_xchg(&allowed_cpumask, mask); + if (mask) + bpf_cpumask_release(mask); + + bpf_rcu_read_lock(); + + /* + * Assign the first online CPU to the allowed domain. + */ + mask = allowed_cpumask; + if (mask) { + const struct cpumask *online = scx_bpf_get_online_cpumask(); + + bpf_cpumask_set_cpu(bpf_cpumask_first(online), mask); + scx_bpf_put_cpumask(online); + } + + bpf_rcu_read_unlock(); + + return 0; +} + +void BPF_STRUCT_OPS(allowed_cpus_exit, struct scx_exit_info *ei) +{ + UEI_RECORD(uei, ei); +} + +struct task_cpu_arg { + pid_t pid; +}; + +SEC("syscall") +int select_cpu_from_user(struct task_cpu_arg *input) +{ + struct task_struct *p; + int cpu; + + p = bpf_task_from_pid(input->pid); + if (!p) + return -EINVAL; + + bpf_rcu_read_lock(); + cpu = scx_bpf_select_cpu_and(p, bpf_get_smp_processor_id(), 0, p->cpus_ptr, 0); + bpf_rcu_read_unlock(); + + bpf_task_release(p); + + return cpu; +} + +SEC(".struct_ops.link") +struct sched_ext_ops allowed_cpus_ops = { + .select_cpu = (void *)allowed_cpus_select_cpu, + .enqueue = (void *)allowed_cpus_enqueue, + .init = (void *)allowed_cpus_init, + .exit = (void *)allowed_cpus_exit, + .name = "allowed_cpus", +}; diff --git a/tools/testing/selftests/sched_ext/allowed_cpus.c b/tools/testing/selftests/sched_ext/allowed_cpus.c new file mode 100644 index 000000000000..093f285ab4ba --- /dev/null +++ b/tools/testing/selftests/sched_ext/allowed_cpus.c @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2025 Andrea Righi <arighi@nvidia.com> + */ +#include <bpf/bpf.h> +#include <scx/common.h> +#include <sys/wait.h> +#include <unistd.h> +#include "allowed_cpus.bpf.skel.h" +#include "scx_test.h" + +static enum scx_test_status setup(void **ctx) +{ + struct allowed_cpus *skel; + + skel = allowed_cpus__open(); + SCX_FAIL_IF(!skel, "Failed to open"); + SCX_ENUM_INIT(skel); + SCX_FAIL_IF(allowed_cpus__load(skel), "Failed to load skel"); + + *ctx = skel; + + return SCX_TEST_PASS; +} + +static int test_select_cpu_from_user(const struct allowed_cpus *skel) +{ + int fd, ret; + __u64 args[1]; + + LIBBPF_OPTS(bpf_test_run_opts, attr, + .ctx_in = args, + .ctx_size_in = sizeof(args), + ); + + args[0] = getpid(); + fd = bpf_program__fd(skel->progs.select_cpu_from_user); + if (fd < 0) + return fd; + + ret = bpf_prog_test_run_opts(fd, &attr); + if (ret < 0) + return ret; + + fprintf(stderr, "%s: CPU %d\n", __func__, attr.retval); + + return 0; +} + +static enum scx_test_status run(void *ctx) +{ + struct allowed_cpus *skel = ctx; + struct bpf_link *link; + + link = bpf_map__attach_struct_ops(skel->maps.allowed_cpus_ops); + SCX_FAIL_IF(!link, "Failed to attach scheduler"); + + /* Pick an idle CPU from user-space */ + SCX_FAIL_IF(test_select_cpu_from_user(skel), "Failed to pick idle CPU"); + + /* Just sleeping is fine, plenty of scheduling events happening */ + sleep(1); + + SCX_EQ(skel->data->uei.kind, EXIT_KIND(SCX_EXIT_NONE)); + bpf_link__destroy(link); + + return SCX_TEST_PASS; +} + +static void cleanup(void *ctx) +{ + struct allowed_cpus *skel = ctx; + + allowed_cpus__destroy(skel); +} + +struct scx_test allowed_cpus = { + .name = "allowed_cpus", + .description = "Verify scx_bpf_select_cpu_and()", + .setup = setup, + .run = run, + .cleanup = cleanup, +}; +REGISTER_SCX_TEST(&allowed_cpus) diff --git a/tools/testing/selftests/sched_ext/enq_select_cpu.bpf.c b/tools/testing/selftests/sched_ext/enq_select_cpu.bpf.c new file mode 100644 index 000000000000..ee2c9b89716e --- /dev/null +++ b/tools/testing/selftests/sched_ext/enq_select_cpu.bpf.c @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2023 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2023 David Vernet <dvernet@meta.com> + * Copyright (c) 2023 Tejun Heo <tj@kernel.org> + */ + +#include <scx/common.bpf.h> + +char _license[] SEC("license") = "GPL"; + +UEI_DEFINE(uei); + +s32 BPF_STRUCT_OPS(enq_select_cpu_select_cpu, struct task_struct *p, + s32 prev_cpu, u64 wake_flags) +{ + /* Bounce all tasks to ops.enqueue() */ + return prev_cpu; +} + +void BPF_STRUCT_OPS(enq_select_cpu_enqueue, struct task_struct *p, + u64 enq_flags) +{ + s32 cpu, prev_cpu = scx_bpf_task_cpu(p); + bool found = false; + + cpu = scx_bpf_select_cpu_dfl(p, prev_cpu, 0, &found); + if (found) { + scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL_ON | cpu, SCX_SLICE_DFL, enq_flags); + return; + } + + scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags); +} + +void BPF_STRUCT_OPS(enq_select_cpu_exit, struct scx_exit_info *ei) +{ + UEI_RECORD(uei, ei); +} + +struct task_cpu_arg { + pid_t pid; +}; + +SEC("syscall") +int select_cpu_from_user(struct task_cpu_arg *input) +{ + struct task_struct *p; + bool found = false; + s32 cpu; + + p = bpf_task_from_pid(input->pid); + if (!p) + return -EINVAL; + + bpf_rcu_read_lock(); + cpu = scx_bpf_select_cpu_dfl(p, bpf_get_smp_processor_id(), 0, &found); + if (!found) + cpu = -EBUSY; + bpf_rcu_read_unlock(); + + bpf_task_release(p); + + return cpu; +} + +SEC(".struct_ops.link") +struct sched_ext_ops enq_select_cpu_ops = { + .select_cpu = (void *)enq_select_cpu_select_cpu, + .enqueue = (void *)enq_select_cpu_enqueue, + .exit = (void *)enq_select_cpu_exit, + .name = "enq_select_cpu", + .timeout_ms = 1000U, +}; diff --git a/tools/testing/selftests/sched_ext/enq_select_cpu.c b/tools/testing/selftests/sched_ext/enq_select_cpu.c new file mode 100644 index 000000000000..340c6f8b86da --- /dev/null +++ b/tools/testing/selftests/sched_ext/enq_select_cpu.c @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2023 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2023 David Vernet <dvernet@meta.com> + * Copyright (c) 2023 Tejun Heo <tj@kernel.org> + */ +#include <bpf/bpf.h> +#include <scx/common.h> +#include <sys/wait.h> +#include <unistd.h> +#include "enq_select_cpu.bpf.skel.h" +#include "scx_test.h" + +static enum scx_test_status setup(void **ctx) +{ + struct enq_select_cpu *skel; + + skel = enq_select_cpu__open(); + SCX_FAIL_IF(!skel, "Failed to open"); + SCX_ENUM_INIT(skel); + SCX_FAIL_IF(enq_select_cpu__load(skel), "Failed to load skel"); + + *ctx = skel; + + return SCX_TEST_PASS; +} + +static int test_select_cpu_from_user(const struct enq_select_cpu *skel) +{ + int fd, ret; + __u64 args[1]; + + LIBBPF_OPTS(bpf_test_run_opts, attr, + .ctx_in = args, + .ctx_size_in = sizeof(args), + ); + + args[0] = getpid(); + fd = bpf_program__fd(skel->progs.select_cpu_from_user); + if (fd < 0) + return fd; + + ret = bpf_prog_test_run_opts(fd, &attr); + if (ret < 0) + return ret; + + fprintf(stderr, "%s: CPU %d\n", __func__, attr.retval); + + return 0; +} + +static enum scx_test_status run(void *ctx) +{ + struct enq_select_cpu *skel = ctx; + struct bpf_link *link; + + link = bpf_map__attach_struct_ops(skel->maps.enq_select_cpu_ops); + if (!link) { + SCX_ERR("Failed to attach scheduler"); + return SCX_TEST_FAIL; + } + + /* Pick an idle CPU from user-space */ + SCX_FAIL_IF(test_select_cpu_from_user(skel), "Failed to pick idle CPU"); + + sleep(1); + + SCX_EQ(skel->data->uei.kind, EXIT_KIND(SCX_EXIT_NONE)); + bpf_link__destroy(link); + + return SCX_TEST_PASS; +} + +static void cleanup(void *ctx) +{ + struct enq_select_cpu *skel = ctx; + + enq_select_cpu__destroy(skel); +} + +struct scx_test enq_select_cpu = { + .name = "enq_select_cpu", + .description = "Verify scx_bpf_select_cpu_dfl() from multiple contexts", + .setup = setup, + .run = run, + .cleanup = cleanup, +}; +REGISTER_SCX_TEST(&enq_select_cpu) diff --git a/tools/testing/selftests/sched_ext/enq_select_cpu_fails.bpf.c b/tools/testing/selftests/sched_ext/enq_select_cpu_fails.bpf.c deleted file mode 100644 index a7cf868d5e31..000000000000 --- a/tools/testing/selftests/sched_ext/enq_select_cpu_fails.bpf.c +++ /dev/null @@ -1,43 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (c) 2023 Meta Platforms, Inc. and affiliates. - * Copyright (c) 2023 David Vernet <dvernet@meta.com> - * Copyright (c) 2023 Tejun Heo <tj@kernel.org> - */ - -#include <scx/common.bpf.h> - -char _license[] SEC("license") = "GPL"; - -/* Manually specify the signature until the kfunc is added to the scx repo. */ -s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, - bool *found) __ksym; - -s32 BPF_STRUCT_OPS(enq_select_cpu_fails_select_cpu, struct task_struct *p, - s32 prev_cpu, u64 wake_flags) -{ - return prev_cpu; -} - -void BPF_STRUCT_OPS(enq_select_cpu_fails_enqueue, struct task_struct *p, - u64 enq_flags) -{ - /* - * Need to initialize the variable or the verifier will fail to load. - * Improving these semantics is actively being worked on. - */ - bool found = false; - - /* Can only call from ops.select_cpu() */ - scx_bpf_select_cpu_dfl(p, 0, 0, &found); - - scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags); -} - -SEC(".struct_ops.link") -struct sched_ext_ops enq_select_cpu_fails_ops = { - .select_cpu = (void *) enq_select_cpu_fails_select_cpu, - .enqueue = (void *) enq_select_cpu_fails_enqueue, - .name = "enq_select_cpu_fails", - .timeout_ms = 1000U, -}; diff --git a/tools/testing/selftests/sched_ext/enq_select_cpu_fails.c b/tools/testing/selftests/sched_ext/enq_select_cpu_fails.c deleted file mode 100644 index a80e3a3b3698..000000000000 --- a/tools/testing/selftests/sched_ext/enq_select_cpu_fails.c +++ /dev/null @@ -1,61 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (c) 2023 Meta Platforms, Inc. and affiliates. - * Copyright (c) 2023 David Vernet <dvernet@meta.com> - * Copyright (c) 2023 Tejun Heo <tj@kernel.org> - */ -#include <bpf/bpf.h> -#include <scx/common.h> -#include <sys/wait.h> -#include <unistd.h> -#include "enq_select_cpu_fails.bpf.skel.h" -#include "scx_test.h" - -static enum scx_test_status setup(void **ctx) -{ - struct enq_select_cpu_fails *skel; - - skel = enq_select_cpu_fails__open(); - SCX_FAIL_IF(!skel, "Failed to open"); - SCX_ENUM_INIT(skel); - SCX_FAIL_IF(enq_select_cpu_fails__load(skel), "Failed to load skel"); - - *ctx = skel; - - return SCX_TEST_PASS; -} - -static enum scx_test_status run(void *ctx) -{ - struct enq_select_cpu_fails *skel = ctx; - struct bpf_link *link; - - link = bpf_map__attach_struct_ops(skel->maps.enq_select_cpu_fails_ops); - if (!link) { - SCX_ERR("Failed to attach scheduler"); - return SCX_TEST_FAIL; - } - - sleep(1); - - bpf_link__destroy(link); - - return SCX_TEST_PASS; -} - -static void cleanup(void *ctx) -{ - struct enq_select_cpu_fails *skel = ctx; - - enq_select_cpu_fails__destroy(skel); -} - -struct scx_test enq_select_cpu_fails = { - .name = "enq_select_cpu_fails", - .description = "Verify we fail to call scx_bpf_select_cpu_dfl() " - "from ops.enqueue()", - .setup = setup, - .run = run, - .cleanup = cleanup, -}; -REGISTER_SCX_TEST(&enq_select_cpu_fails) |