summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSun Jian <sun.jian.kdev@gmail.com>2026-02-28 10:45:55 +0300
committerAndrii Nakryiko <andrii@kernel.org>2026-03-06 02:07:40 +0300
commit7f20d371fd879f5bb9be739c86c9bac13ea920c8 (patch)
tree3c3e12a0901a7d339cb62e9b530b85566923f63f
parent74d3305e620b3c71b414198f9cf9b1609148d277 (diff)
downloadlinux-7f20d371fd879f5bb9be739c86c9bac13ea920c8.tar.xz
selftests/bpf: bpf_cookie: Make perf_event subtest trigger reliably
The perf_event subtest relies on SW_CPU_CLOCK sampling to trigger the BPF program, but the current CPU burn loop can be too short on slower systems and may fail to generate any overflow sample. This leaves pe_res unchanged and makes the test flaky. Make burn_cpu() take a loop count and use a longer burn only for the perf_event subtest. Also scope perf_event_open() to the current task to avoid wasting samples on unrelated activity. Signed-off-by: Sun Jian <sun.jian.kdev@gmail.com> Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Acked-by: Jiri Olsa <jolsa@kernel.org> Link: https://lore.kernel.org/bpf/20260228074555.122950-3-sun.jian.kdev@gmail.com
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_cookie.c19
1 files changed, 11 insertions, 8 deletions
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
index b7643a5bf7ad..35adc3f6d443 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
@@ -6,6 +6,7 @@
#include <sys/syscall.h>
#include <sys/mman.h>
#include <unistd.h>
+#include <linux/compiler.h>
#include <test_progs.h>
#include <network_helpers.h>
#include <bpf/btf.h>
@@ -431,11 +432,12 @@ cleanup:
bpf_link__destroy(link3);
}
-static void burn_cpu(void)
+static void burn_cpu(long loops)
{
- volatile int j = 0;
+ long j = 0;
cpu_set_t cpu_set;
- int i, err;
+ long i;
+ int err;
/* generate some branches on cpu 0 */
CPU_ZERO(&cpu_set);
@@ -443,9 +445,10 @@ static void burn_cpu(void)
err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set);
ASSERT_OK(err, "set_thread_affinity");
- /* spin the loop for a while (random high number) */
- for (i = 0; i < 1000000; ++i)
+ for (i = 0; i < loops; ++i) {
++j;
+ barrier();
+ }
}
static void pe_subtest(struct test_bpf_cookie *skel)
@@ -461,7 +464,7 @@ static void pe_subtest(struct test_bpf_cookie *skel)
attr.type = PERF_TYPE_SOFTWARE;
attr.config = PERF_COUNT_SW_CPU_CLOCK;
attr.sample_period = 100000;
- pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);
+ pfd = syscall(__NR_perf_event_open, &attr, 0, -1, -1, PERF_FLAG_FD_CLOEXEC);
if (!ASSERT_GE(pfd, 0, "perf_fd"))
goto cleanup;
@@ -470,7 +473,7 @@ static void pe_subtest(struct test_bpf_cookie *skel)
if (!ASSERT_OK_PTR(link, "link1"))
goto cleanup;
- burn_cpu(); /* trigger BPF prog */
+ burn_cpu(100000000L); /* trigger BPF prog */
ASSERT_EQ(skel->bss->pe_res, 0x100000, "pe_res1");
@@ -489,7 +492,7 @@ static void pe_subtest(struct test_bpf_cookie *skel)
if (!ASSERT_OK_PTR(link, "link2"))
goto cleanup;
- burn_cpu(); /* trigger BPF prog */
+ burn_cpu(100000000L); /* trigger BPF prog */
ASSERT_EQ(skel->bss->pe_res, 0x200000, "pe_res2");