From c465f5591aa84a6f85d66d152e28b92844a45d4f Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Tue, 17 Mar 2026 16:59:45 +0100 Subject: selftests/mount_setattr: increase tmpfs size for idmapped mount tests The mount_setattr_idmapped fixture mounts a 2 MB tmpfs at /mnt and then creates a 2 GB sparse ext4 image at /mnt/C/ext4.img. While ftruncate() succeeds (sparse file), mkfs.ext4 needs to write actual metadata blocks (inode tables, journal, bitmaps) which easily exceeds the 2 MB tmpfs limit, causing ENOSPC and failing the fixture setup for all mount_setattr_idmapped tests. This was introduced by commit d37d4720c3e7 ("selftests/mount_settattr: ensure that ext4 filesystem can be created") which increased the image size from 2 MB to 2 GB but didn't adjust the tmpfs size. Bump the tmpfs size to 256 MB which is sufficient for the ext4 metadata. Fixes: d37d4720c3e7 ("selftests/mount_settattr: ensure that ext4 filesystem can be created") Signed-off-by: Christian Brauner --- tools/testing/selftests/mount_setattr/mount_setattr_test.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/testing/selftests/mount_setattr/mount_setattr_test.c b/tools/testing/selftests/mount_setattr/mount_setattr_test.c index 7aec3ae82a44..c6dafb3cc116 100644 --- a/tools/testing/selftests/mount_setattr/mount_setattr_test.c +++ b/tools/testing/selftests/mount_setattr/mount_setattr_test.c @@ -1020,7 +1020,7 @@ FIXTURE_SETUP(mount_setattr_idmapped) "size=100000,mode=700"), 0); ASSERT_EQ(mount("testing", "/mnt", "tmpfs", MS_NOATIME | MS_NODEV, - "size=2m,mode=700"), 0); + "size=256m,mode=700"), 0); ASSERT_EQ(mkdir("/mnt/A", 0777), 0); -- cgit v1.2.3 From a54142d9ff49dadb8bd063b8d016546e5706184c Mon Sep 17 00:00:00 2001 From: Mickaël Salaün Date: Tue, 10 Mar 2026 20:04:15 +0100 Subject: selftests/landlock: Test tsync interruption and cancellation paths MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add tsync_interrupt test to exercise the signal interruption path in landlock_restrict_sibling_threads(). When a signal interrupts wait_for_completion_interruptible() while the calling thread waits for sibling threads to finish credential preparation, the kernel: 1. Sets ERESTARTNOINTR to request a transparent syscall restart. 2. Calls cancel_tsync_works() to opportunistically dequeue task works that have not started running yet. 3. Breaks out of the preparation loop, then unblocks remaining task works via complete_all() and waits for them to finish. 4. Returns the error, causing abort_creds() in the syscall handler. Specifically, cancel_tsync_works() in its entirety, the ERESTARTNOINTR error branch in landlock_restrict_sibling_threads(), and the abort_creds() error branch in the landlock_restrict_self() syscall handler are timing-dependent and not exercised by the existing tsync tests, making code coverage measurements non-deterministic. The test spawns a signaler thread that rapidly sends SIGUSR1 to the calling thread while it performs landlock_restrict_self() with LANDLOCK_RESTRICT_SELF_TSYNC. Since ERESTARTNOINTR causes a transparent restart, userspace always sees the syscall succeed. This is a best-effort coverage test: the interruption path is exercised when the signal lands during the preparation wait, which depends on thread scheduling. The test creates enough idle sibling threads (200) to ensure multiple serialized waves of credential preparation even on machines with many cores (e.g., 64), widening the window for the signaler. Deterministic coverage would require wrapping the wait call with ALLOW_ERROR_INJECTION() and using CONFIG_FAIL_FUNCTION. Test coverage for security/landlock was 90.2% of 2105 lines according to LLVM 21, and it is now 91.1% of 2105 lines with this new test. Cc: Günther Noack Cc: Justin Suess Cc: Tingmao Wang Cc: Yihan Ding Link: https://lore.kernel.org/r/20260310190416.1913908-1-mic@digikod.net Signed-off-by: Mickaël Salaün --- tools/testing/selftests/landlock/tsync_test.c | 91 ++++++++++++++++++++++++++- 1 file changed, 90 insertions(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/testing/selftests/landlock/tsync_test.c b/tools/testing/selftests/landlock/tsync_test.c index 37ef0d2270db..2b9ad4f154f4 100644 --- a/tools/testing/selftests/landlock/tsync_test.c +++ b/tools/testing/selftests/landlock/tsync_test.c @@ -6,9 +6,10 @@ */ #define _GNU_SOURCE +#include #include +#include #include -#include #include "common.h" @@ -158,4 +159,92 @@ TEST(competing_enablement) EXPECT_EQ(0, close(ruleset_fd)); } +static void signal_nop_handler(int sig) +{ +} + +struct signaler_data { + pthread_t target; + volatile bool stop; +}; + +static void *signaler_thread(void *data) +{ + struct signaler_data *sd = data; + + while (!sd->stop) + pthread_kill(sd->target, SIGUSR1); + + return NULL; +} + +/* + * Number of idle sibling threads. This must be large enough that even on + * machines with many cores, the sibling threads cannot all complete their + * credential preparation in a single parallel wave, otherwise the signaler + * thread has no window to interrupt wait_for_completion_interruptible(). + * 200 threads on a 64-core machine yields ~3 serialized waves, giving the + * tight signal loop enough time to land an interruption. + */ +#define NUM_IDLE_THREADS 200 + +/* + * Exercises the tsync interruption and cancellation paths in tsync.c. + * + * When a signal interrupts the calling thread while it waits for sibling + * threads to finish their credential preparation + * (wait_for_completion_interruptible in landlock_restrict_sibling_threads), + * the kernel sets ERESTARTNOINTR, cancels queued task works that have not + * started yet (cancel_tsync_works), then waits for the remaining works to + * finish. On the error return, syscalls.c aborts the prepared credentials. + * The kernel automatically restarts the syscall, so userspace sees success. + */ +TEST(tsync_interrupt) +{ + size_t i; + pthread_t threads[NUM_IDLE_THREADS]; + pthread_t signaler; + struct signaler_data sd; + struct sigaction sa = {}; + const int ruleset_fd = create_ruleset(_metadata); + + disable_caps(_metadata); + + /* Install a no-op SIGUSR1 handler so the signal does not kill us. */ + sa.sa_handler = signal_nop_handler; + sigemptyset(&sa.sa_mask); + ASSERT_EQ(0, sigaction(SIGUSR1, &sa, NULL)); + + ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)); + + for (i = 0; i < NUM_IDLE_THREADS; i++) + ASSERT_EQ(0, pthread_create(&threads[i], NULL, idle, NULL)); + + /* + * Start a signaler thread that continuously sends SIGUSR1 to the + * calling thread. This maximizes the chance of interrupting + * wait_for_completion_interruptible() in the kernel's tsync path. + */ + sd.target = pthread_self(); + sd.stop = false; + ASSERT_EQ(0, pthread_create(&signaler, NULL, signaler_thread, &sd)); + + /* + * The syscall may be interrupted and transparently restarted by the + * kernel (ERESTARTNOINTR). From userspace, it should always succeed. + */ + EXPECT_EQ(0, landlock_restrict_self(ruleset_fd, + LANDLOCK_RESTRICT_SELF_TSYNC)); + + sd.stop = true; + ASSERT_EQ(0, pthread_join(signaler, NULL)); + + for (i = 0; i < NUM_IDLE_THREADS; i++) { + ASSERT_EQ(0, pthread_cancel(threads[i])); + ASSERT_EQ(0, pthread_join(threads[i], NULL)); + } + + EXPECT_EQ(0, close(ruleset_fd)); +} + TEST_HARNESS_MAIN -- cgit v1.2.3 From 6680c162b4850976ee52b57372eddc4450c1d074 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 24 Mar 2026 10:21:47 -1000 Subject: selftests/cgroup: Don't require synchronous populated update on task exit test_cgcore_populated (test_core) and test_cgkill_{simple,tree,forkbomb} (test_kill) check cgroup.events "populated 0" immediately after reaping child tasks with waitpid(). This used to work because cgroup_task_exit() in do_exit() unlinked tasks from css_sets before exit_notify() woke up waitpid(). d245698d727a ("cgroup: Defer task cgroup unlink until after the task is done switching out") moved the unlink to cgroup_task_dead() in finish_task_switch(), which runs after exit_notify(). The populated counter is now decremented after the parent's waitpid() can return, so there is no longer a synchronous ordering guarantee. On PREEMPT_RT, where cgroup_task_dead() is further deferred through lazy irq_work, the race window is even larger. The synchronous populated transition was never part of the cgroup interface contract - it was an implementation artifact. Use cg_read_strcmp_wait() which retries for up to 1 second, matching what these tests actually need to verify: that the cgroup eventually becomes unpopulated after all tasks exit. Fixes: d245698d727a ("cgroup: Defer task cgroup unlink until after the task is done switching out") Reported-by: Sebastian Andrzej Siewior Signed-off-by: Tejun Heo Tested-by: Sebastian Andrzej Siewior Cc: Christian Brauner Cc: cgroups@vger.kernel.org --- tools/testing/selftests/cgroup/lib/cgroup_util.c | 15 +++++++++++++++ tools/testing/selftests/cgroup/lib/include/cgroup_util.h | 2 ++ tools/testing/selftests/cgroup/test_core.c | 3 ++- tools/testing/selftests/cgroup/test_kill.c | 7 ++++--- 4 files changed, 23 insertions(+), 4 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/cgroup/lib/cgroup_util.c b/tools/testing/selftests/cgroup/lib/cgroup_util.c index ce6c2642fd9b..6a7295347e90 100644 --- a/tools/testing/selftests/cgroup/lib/cgroup_util.c +++ b/tools/testing/selftests/cgroup/lib/cgroup_util.c @@ -123,6 +123,21 @@ int cg_read_strcmp(const char *cgroup, const char *control, return ret; } +int cg_read_strcmp_wait(const char *cgroup, const char *control, + const char *expected) +{ + int i, ret; + + for (i = 0; i < 100; i++) { + ret = cg_read_strcmp(cgroup, control, expected); + if (!ret) + return ret; + usleep(10000); + } + + return ret; +} + int cg_read_strstr(const char *cgroup, const char *control, const char *needle) { char buf[PAGE_SIZE]; diff --git a/tools/testing/selftests/cgroup/lib/include/cgroup_util.h b/tools/testing/selftests/cgroup/lib/include/cgroup_util.h index 77f386dab5e8..567b1082974c 100644 --- a/tools/testing/selftests/cgroup/lib/include/cgroup_util.h +++ b/tools/testing/selftests/cgroup/lib/include/cgroup_util.h @@ -61,6 +61,8 @@ extern int cg_read(const char *cgroup, const char *control, char *buf, size_t len); extern int cg_read_strcmp(const char *cgroup, const char *control, const char *expected); +extern int cg_read_strcmp_wait(const char *cgroup, const char *control, + const char *expected); extern int cg_read_strstr(const char *cgroup, const char *control, const char *needle); extern long cg_read_long(const char *cgroup, const char *control); diff --git a/tools/testing/selftests/cgroup/test_core.c b/tools/testing/selftests/cgroup/test_core.c index 102262555a59..7b83c7e7c9d4 100644 --- a/tools/testing/selftests/cgroup/test_core.c +++ b/tools/testing/selftests/cgroup/test_core.c @@ -233,7 +233,8 @@ static int test_cgcore_populated(const char *root) if (err) goto cleanup; - if (cg_read_strcmp(cg_test_d, "cgroup.events", "populated 0\n")) + if (cg_read_strcmp_wait(cg_test_d, "cgroup.events", + "populated 0\n")) goto cleanup; /* Remove cgroup. */ diff --git a/tools/testing/selftests/cgroup/test_kill.c b/tools/testing/selftests/cgroup/test_kill.c index c8c9d306925b..f6cd23a8ecc7 100644 --- a/tools/testing/selftests/cgroup/test_kill.c +++ b/tools/testing/selftests/cgroup/test_kill.c @@ -86,7 +86,7 @@ cleanup: wait_for_pid(pids[i]); if (ret == KSFT_PASS && - cg_read_strcmp(cgroup, "cgroup.events", "populated 0\n")) + cg_read_strcmp_wait(cgroup, "cgroup.events", "populated 0\n")) ret = KSFT_FAIL; if (cgroup) @@ -190,7 +190,8 @@ cleanup: wait_for_pid(pids[i]); if (ret == KSFT_PASS && - cg_read_strcmp(cgroup[0], "cgroup.events", "populated 0\n")) + cg_read_strcmp_wait(cgroup[0], "cgroup.events", + "populated 0\n")) ret = KSFT_FAIL; for (i = 9; i >= 0 && cgroup[i]; i--) { @@ -251,7 +252,7 @@ cleanup: wait_for_pid(pid); if (ret == KSFT_PASS && - cg_read_strcmp(cgroup, "cgroup.events", "populated 0\n")) + cg_read_strcmp_wait(cgroup, "cgroup.events", "populated 0\n")) ret = KSFT_FAIL; if (cgroup) -- cgit v1.2.3 From 5d17af9eb2dd3de6846ed344c883de7812e6cc09 Mon Sep 17 00:00:00 2001 From: Xiang Mei Date: Thu, 26 Mar 2026 13:43:10 -0700 Subject: selftests/tc-testing: add test for HFSC divide-by-zero in rtsc_min() Add a regression test for the divide-by-zero in rtsc_min() triggered when m2sm() converts a large m1 value (e.g. 32gbit) to a u64 scaled slope reaching 2^32. rtsc_min() stores the difference of two such u64 values (sm1 - sm2) in a u32 variable `dsm`, truncating 2^32 to zero and causing a divide-by-zero oops in the concave-curve intersection path. The test configures an HFSC class with m1=32gbit d=1ms m2=0bit, sends a packet to activate the class, waits for it to drain and go idle, then sends another packet to trigger reactivation through rtsc_min(). Signed-off-by: Xiang Mei Acked-by: Jamal Hadi Salim Reviewed-by: Victor Nogueira Link: https://patch.msgid.link/20260326204310.1549327-2-xmei5@asu.edu Signed-off-by: Jakub Kicinski --- .../tc-testing/tc-tests/infra/qdiscs.json | 25 ++++++++++++++++++++++ 1 file changed, 25 insertions(+) (limited to 'tools') diff --git a/tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json b/tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json index 6a39640aa2a8..1e5efb2a31eb 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json +++ b/tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json @@ -1111,5 +1111,30 @@ "teardown": [ "$TC qdisc del dev $DUMMY root handle 1:" ] + }, + { + "id": "a3d7", + "name": "HFSC with large m1 - no divide-by-zero on class reactivation", + "category": [ + "qdisc", + "hfsc" + ], + "plugins": { + "requires": "nsPlugin" + }, + "setup": [ + "$TC qdisc replace dev $DUMMY root handle 1: hfsc default 1", + "$TC class replace dev $DUMMY parent 1: classid 1:1 hfsc rt m1 32gbit d 1ms m2 0bit ls m1 32gbit d 1ms m2 0bit", + "ping -I$DUMMY -f -c1 -s64 -W1 10.10.10.1 || true", + "sleep 1" + ], + "cmdUnderTest": "ping -I$DUMMY -f -c1 -s64 -W1 10.10.10.1 || true", + "expExitCode": "0", + "verifyCmd": "$TC qdisc show dev $DUMMY", + "matchPattern": "qdisc hfsc 1: root", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DUMMY handle 1: root" + ] } ] -- cgit v1.2.3 From 2e8b1a1d12ae3338efeb1c3de3eb4e9324b87a28 Mon Sep 17 00:00:00 2001 From: Tomas Glozar Date: Mon, 30 Mar 2026 11:12:07 +0200 Subject: rtla: Fix build without libbpf header rtla supports building without libbpf. However, BPF actions patchset [1] adds an include of bpf/libbpf.h into timerlat_bpf.h, which breaks build on systems that don't have libbpf headers installed. This is a leftover from a draft version of the patchset where timerlat_bpf_set_action() (which takes a struct bpf_program * argument) was defined in the header. timerlat_bpf.c already includes bpf/libbpf.h via timerlat.skel.h when libbpf is present. Remove the redundant include to fix build on systems without libbpf headers. [1] https://lore.kernel.org/linux-trace-kernel/20251126144205.331954-1-tglozar@redhat.com/T/ Cc: John Kacur Cc: Luis Goncalves Cc: Crystal Wood Cc: Costa Shulyupin Link: https://patch.msgid.link/20260330091207.16184-1-tglozar@redhat.com Reported-by: Steven Rostedt (Google) Closes: https://lore.kernel.org/linux-trace-kernel/20260329122202.65a8b575@robin/ Fixes: 8cd0f08ac72e ("rtla/timerlat: Support tail call from BPF program") Signed-off-by: Tomas Glozar Reviewed-by: Wander Lairson Costa Signed-off-by: Steven Rostedt (Google) --- tools/tracing/rtla/src/timerlat_bpf.h | 1 - 1 file changed, 1 deletion(-) (limited to 'tools') diff --git a/tools/tracing/rtla/src/timerlat_bpf.h b/tools/tracing/rtla/src/timerlat_bpf.h index 169abeaf4363..f7c5675737fe 100644 --- a/tools/tracing/rtla/src/timerlat_bpf.h +++ b/tools/tracing/rtla/src/timerlat_bpf.h @@ -12,7 +12,6 @@ enum summary_field { }; #ifndef __bpf__ -#include #ifdef HAVE_BPF_SKEL int timerlat_bpf_init(struct timerlat_params *params); int timerlat_bpf_attach(void); -- cgit v1.2.3 From 090d34f0f0285124452373225bcc520a31e305e4 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sat, 28 Mar 2026 14:18:56 -1000 Subject: selftests/sched_ext: Add cyclic SCX_KICK_WAIT stress test Add a test that creates a 3-CPU kick_wait cycle (A->B->C->A). A BPF scheduler kicks the next CPU in the ring with SCX_KICK_WAIT on every enqueue while userspace workers generate continuous scheduling churn via sched_yield(). Without the preceding fix, this hangs the machine within seconds. Signed-off-by: Tejun Heo Reviewed-by: Christian Loehle Tested-by: Christian Loehle --- tools/testing/selftests/sched_ext/Makefile | 1 + .../selftests/sched_ext/cyclic_kick_wait.bpf.c | 68 ++++++++ .../testing/selftests/sched_ext/cyclic_kick_wait.c | 194 +++++++++++++++++++++ 3 files changed, 263 insertions(+) create mode 100644 tools/testing/selftests/sched_ext/cyclic_kick_wait.bpf.c create mode 100644 tools/testing/selftests/sched_ext/cyclic_kick_wait.c (limited to 'tools') diff --git a/tools/testing/selftests/sched_ext/Makefile b/tools/testing/selftests/sched_ext/Makefile index 006300ac6dff..1c9ca328cca1 100644 --- a/tools/testing/selftests/sched_ext/Makefile +++ b/tools/testing/selftests/sched_ext/Makefile @@ -188,6 +188,7 @@ auto-test-targets := \ rt_stall \ test_example \ total_bw \ + cyclic_kick_wait \ testcase-targets := $(addsuffix .o,$(addprefix $(SCXOBJ_DIR)/,$(auto-test-targets))) diff --git a/tools/testing/selftests/sched_ext/cyclic_kick_wait.bpf.c b/tools/testing/selftests/sched_ext/cyclic_kick_wait.bpf.c new file mode 100644 index 000000000000..cb34d3335917 --- /dev/null +++ b/tools/testing/selftests/sched_ext/cyclic_kick_wait.bpf.c @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Stress concurrent SCX_KICK_WAIT calls to reproduce wait-cycle deadlock. + * + * Three CPUs are designated from userspace. Every enqueue from one of the + * three CPUs kicks the next CPU in the ring with SCX_KICK_WAIT, creating a + * persistent A -> B -> C -> A wait cycle pressure. + */ +#include + +char _license[] SEC("license") = "GPL"; + +const volatile s32 test_cpu_a; +const volatile s32 test_cpu_b; +const volatile s32 test_cpu_c; + +u64 nr_enqueues; +u64 nr_wait_kicks; + +UEI_DEFINE(uei); + +static s32 target_cpu(s32 cpu) +{ + if (cpu == test_cpu_a) + return test_cpu_b; + if (cpu == test_cpu_b) + return test_cpu_c; + if (cpu == test_cpu_c) + return test_cpu_a; + return -1; +} + +void BPF_STRUCT_OPS(cyclic_kick_wait_enqueue, struct task_struct *p, + u64 enq_flags) +{ + s32 this_cpu = bpf_get_smp_processor_id(); + s32 tgt; + + __sync_fetch_and_add(&nr_enqueues, 1); + + if (p->flags & PF_KTHREAD) { + scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL, SCX_SLICE_INF, + enq_flags | SCX_ENQ_PREEMPT); + return; + } + + scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags); + + tgt = target_cpu(this_cpu); + if (tgt < 0 || tgt == this_cpu) + return; + + __sync_fetch_and_add(&nr_wait_kicks, 1); + scx_bpf_kick_cpu(tgt, SCX_KICK_WAIT); +} + +void BPF_STRUCT_OPS(cyclic_kick_wait_exit, struct scx_exit_info *ei) +{ + UEI_RECORD(uei, ei); +} + +SEC(".struct_ops.link") +struct sched_ext_ops cyclic_kick_wait_ops = { + .enqueue = cyclic_kick_wait_enqueue, + .exit = cyclic_kick_wait_exit, + .name = "cyclic_kick_wait", + .timeout_ms = 1000U, +}; diff --git a/tools/testing/selftests/sched_ext/cyclic_kick_wait.c b/tools/testing/selftests/sched_ext/cyclic_kick_wait.c new file mode 100644 index 000000000000..c2e5aa9de715 --- /dev/null +++ b/tools/testing/selftests/sched_ext/cyclic_kick_wait.c @@ -0,0 +1,194 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Test SCX_KICK_WAIT forward progress under cyclic wait pressure. + * + * SCX_KICK_WAIT busy-waits until the target CPU enters the scheduling path. + * If multiple CPUs form a wait cycle (A waits for B, B waits for C, C waits + * for A), all CPUs deadlock unless the implementation breaks the cycle. + * + * This test creates that scenario: three CPUs are arranged in a ring. The BPF + * scheduler's ops.enqueue() kicks the next CPU in the ring with SCX_KICK_WAIT + * on every enqueue. Userspace pins 4 worker threads per CPU that loop calling + * sched_yield(), generating a steady stream of enqueues and thus sustained + * A->B->C->A kick_wait cycle pressure. The test passes if the system remains + * responsive for 5 seconds without the scheduler being killed by the watchdog. + */ +#define _GNU_SOURCE + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "scx_test.h" +#include "cyclic_kick_wait.bpf.skel.h" + +#define WORKERS_PER_CPU 4 +#define NR_TEST_CPUS 3 +#define NR_WORKERS (NR_TEST_CPUS * WORKERS_PER_CPU) + +struct worker_ctx { + pthread_t tid; + int cpu; + volatile bool stop; + volatile __u64 iters; + bool started; +}; + +static void *worker_fn(void *arg) +{ + struct worker_ctx *worker = arg; + cpu_set_t mask; + + CPU_ZERO(&mask); + CPU_SET(worker->cpu, &mask); + + if (sched_setaffinity(0, sizeof(mask), &mask)) + return (void *)(uintptr_t)errno; + + while (!worker->stop) { + sched_yield(); + worker->iters++; + } + + return NULL; +} + +static int join_worker(struct worker_ctx *worker) +{ + void *ret; + struct timespec ts; + int err; + + if (!worker->started) + return 0; + + if (clock_gettime(CLOCK_REALTIME, &ts)) + return -errno; + + ts.tv_sec += 2; + err = pthread_timedjoin_np(worker->tid, &ret, &ts); + if (err == ETIMEDOUT) + pthread_detach(worker->tid); + if (err) + return -err; + + if ((uintptr_t)ret) + return -(int)(uintptr_t)ret; + + return 0; +} + +static enum scx_test_status setup(void **ctx) +{ + struct cyclic_kick_wait *skel; + + skel = cyclic_kick_wait__open(); + SCX_FAIL_IF(!skel, "Failed to open skel"); + SCX_ENUM_INIT(skel); + + *ctx = skel; + return SCX_TEST_PASS; +} + +static enum scx_test_status run(void *ctx) +{ + struct cyclic_kick_wait *skel = ctx; + struct worker_ctx workers[NR_WORKERS] = {}; + struct bpf_link *link = NULL; + enum scx_test_status status = SCX_TEST_PASS; + int test_cpus[NR_TEST_CPUS]; + int nr_cpus = 0; + cpu_set_t mask; + int ret, i; + + if (sched_getaffinity(0, sizeof(mask), &mask)) { + SCX_ERR("Failed to get affinity (%d)", errno); + return SCX_TEST_FAIL; + } + + for (i = 0; i < CPU_SETSIZE; i++) { + if (CPU_ISSET(i, &mask)) + test_cpus[nr_cpus++] = i; + if (nr_cpus == NR_TEST_CPUS) + break; + } + + if (nr_cpus < NR_TEST_CPUS) + return SCX_TEST_SKIP; + + skel->rodata->test_cpu_a = test_cpus[0]; + skel->rodata->test_cpu_b = test_cpus[1]; + skel->rodata->test_cpu_c = test_cpus[2]; + + if (cyclic_kick_wait__load(skel)) { + SCX_ERR("Failed to load skel"); + return SCX_TEST_FAIL; + } + + link = bpf_map__attach_struct_ops(skel->maps.cyclic_kick_wait_ops); + if (!link) { + SCX_ERR("Failed to attach scheduler"); + return SCX_TEST_FAIL; + } + + for (i = 0; i < NR_WORKERS; i++) + workers[i].cpu = test_cpus[i / WORKERS_PER_CPU]; + + for (i = 0; i < NR_WORKERS; i++) { + ret = pthread_create(&workers[i].tid, NULL, worker_fn, &workers[i]); + if (ret) { + SCX_ERR("Failed to create worker thread %d (%d)", i, ret); + status = SCX_TEST_FAIL; + goto out; + } + workers[i].started = true; + } + + sleep(5); + + if (skel->data->uei.kind != EXIT_KIND(SCX_EXIT_NONE)) { + SCX_ERR("Scheduler exited unexpectedly (kind=%llu code=%lld)", + (unsigned long long)skel->data->uei.kind, + (long long)skel->data->uei.exit_code); + status = SCX_TEST_FAIL; + } + +out: + for (i = 0; i < NR_WORKERS; i++) + workers[i].stop = true; + + for (i = 0; i < NR_WORKERS; i++) { + ret = join_worker(&workers[i]); + if (ret && status == SCX_TEST_PASS) { + SCX_ERR("Failed to join worker thread %d (%d)", i, ret); + status = SCX_TEST_FAIL; + } + } + + if (link) + bpf_link__destroy(link); + + return status; +} + +static void cleanup(void *ctx) +{ + struct cyclic_kick_wait *skel = ctx; + + cyclic_kick_wait__destroy(skel); +} + +struct scx_test cyclic_kick_wait = { + .name = "cyclic_kick_wait", + .description = "Verify SCX_KICK_WAIT forward progress under a 3-CPU wait cycle", + .setup = setup, + .run = run, + .cleanup = cleanup, +}; +REGISTER_SCX_TEST(&cyclic_kick_wait) -- cgit v1.2.3 From 70f73562d278d9f88e7095e327f2a50082a82c65 Mon Sep 17 00:00:00 2001 From: Xiang Mei Date: Mon, 30 Mar 2026 22:02:17 -0700 Subject: selftests/tc-testing: add tests for cls_fw and cls_flow on shared blocks Regression tests for the shared-block NULL derefs fixed in the previous two patches: - fw: attempt to attach an empty fw filter to a shared block and verify the configuration is rejected with EINVAL. - flow: create a flow filter on a shared block without a baseclass and verify the configuration is rejected with EINVAL. Signed-off-by: Xiang Mei Acked-by: Jamal Hadi Salim Reviewed-by: Victor Nogueira Link: https://patch.msgid.link/20260331050217.504278-3-xmei5@asu.edu Signed-off-by: Paolo Abeni --- .../tc-testing/tc-tests/infra/filter.json | 44 ++++++++++++++++++++++ 1 file changed, 44 insertions(+) (limited to 'tools') diff --git a/tools/testing/selftests/tc-testing/tc-tests/infra/filter.json b/tools/testing/selftests/tc-testing/tc-tests/infra/filter.json index 8d10042b489b..dbce6436ed26 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/infra/filter.json +++ b/tools/testing/selftests/tc-testing/tc-tests/infra/filter.json @@ -22,5 +22,49 @@ "teardown": [ "$TC qdisc del dev $DUMMY root handle 1: htb default 1" ] + }, + { + "id": "b7e3", + "name": "Empty fw filter on shared block - rejected at config time", + "category": [ + "filter", + "fw" + ], + "plugins": { + "requires": "nsPlugin" + }, + "setup": [ + "$TC qdisc add dev $DEV1 egress_block 1 clsact" + ], + "cmdUnderTest": "$TC filter add block 1 protocol ip prio 1 fw", + "expExitCode": "2", + "verifyCmd": "$TC filter show block 1", + "matchPattern": "fw", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DEV1 clsact" + ] + }, + { + "id": "c8f4", + "name": "Flow filter on shared block without baseclass - rejected at config time", + "category": [ + "filter", + "flow" + ], + "plugins": { + "requires": "nsPlugin" + }, + "setup": [ + "$TC qdisc add dev $DEV1 ingress_block 1 clsact" + ], + "cmdUnderTest": "$TC filter add block 1 protocol ip prio 1 handle 1 flow map key dst", + "expExitCode": "2", + "verifyCmd": "$TC filter show block 1", + "matchPattern": "flow", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DEV1 clsact" + ] } ] -- cgit v1.2.3