diff options
| author | Andrii Nakryiko <andrii@kernel.org> | 2026-04-02 23:23:19 +0300 |
|---|---|---|
| committer | Andrii Nakryiko <andrii@kernel.org> | 2026-04-02 23:23:19 +0300 |
| commit | e8aec1058ca598fe2013ef3489ae729a8070801b (patch) | |
| tree | db8aa70bb6ed814b79c44634c9caf018e8669a41 /tools/testing | |
| parent | 1cc96e0e20489159398009d2f453e59c10e413c9 (diff) | |
| parent | 9d77cefe8fcd4bd1c0bcfd4073fe6cd4325c8d9e (diff) | |
| download | linux-e8aec1058ca598fe2013ef3489ae729a8070801b.tar.xz | |
Merge branch 'libbpf-clarify-raw-address-single-kprobe-attach-behavior'
Hoyeon Lee says:
====================
libbpf: clarify raw-address single kprobe attach behavior
Today libbpf documents single-kprobe attach through func_name, with an
optional offset. For the PMU-based path, func_name = NULL with an
absolute address in offset already works as well, but that is not
described in the API.
This patchset clarifies this behavior. First commit fixes kprobe
and uprobe attach error handling to use direct error codes. Next adds
kprobe API comments for the raw-address form and rejects it explicitly
for legacy tracefs/debugfs kprobes. Last adds PERF and LINK selftests
for the raw-address form, and checks that LEGACY rejects it.
---
Changes in v7:
- Change selftest line wrapping and assertions
Changes in v6:
- Split the kprobe/uprobe direct error-code fix into a separate patch
Changes in v5:
- Add kprobe API docs, use -EOPNOTSUPP, and switch selftests to LIBBPF_OPTS
Changes in v4:
- Inline raw-address error formatting and remove the probe_target buffer
Changes in v3:
- Drop bpf_kprobe_opts.addr and reuse offset when func_name is NULL
- Make legacy tracefs/debugfs kprobes reject the raw-address form
- Update selftests to cover PERF/LINK raw-address attach and LEGACY reject
Changes in v2:
- Fix line wrapping and indentation
====================
Link: https://patch.msgid.link/20260401143116.185049-1-hoyeon.lee@suse.com
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Diffstat (limited to 'tools/testing')
| -rw-r--r-- | tools/testing/selftests/bpf/prog_tests/attach_probe.c | 80 |
1 files changed, 80 insertions, 0 deletions
diff --git a/tools/testing/selftests/bpf/prog_tests/attach_probe.c b/tools/testing/selftests/bpf/prog_tests/attach_probe.c index 38852df70c0d..12a841afda68 100644 --- a/tools/testing/selftests/bpf/prog_tests/attach_probe.c +++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c @@ -123,6 +123,80 @@ cleanup: test_attach_probe_manual__destroy(skel); } +/* manual attach address-based kprobe/kretprobe testings */ +static void test_attach_kprobe_by_addr(enum probe_attach_mode attach_mode) +{ + LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts); + struct test_attach_probe_manual *skel; + unsigned long func_addr; + + if (!ASSERT_OK(load_kallsyms(), "load_kallsyms")) + return; + + func_addr = ksym_get_addr(SYS_NANOSLEEP_KPROBE_NAME); + if (!ASSERT_NEQ(func_addr, 0UL, "func_addr")) + return; + + skel = test_attach_probe_manual__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_kprobe_manual_open_and_load")) + return; + + kprobe_opts.attach_mode = attach_mode; + kprobe_opts.retprobe = false; + kprobe_opts.offset = func_addr; + skel->links.handle_kprobe = + bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe, + NULL, &kprobe_opts); + if (!ASSERT_OK_PTR(skel->links.handle_kprobe, "attach_kprobe_by_addr")) + goto cleanup; + + kprobe_opts.retprobe = true; + skel->links.handle_kretprobe = + bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe, + NULL, &kprobe_opts); + if (!ASSERT_OK_PTR(skel->links.handle_kretprobe, "attach_kretprobe_by_addr")) + goto cleanup; + + /* trigger & validate kprobe && kretprobe */ + usleep(1); + + ASSERT_EQ(skel->bss->kprobe_res, 1, "check_kprobe_res"); + ASSERT_EQ(skel->bss->kretprobe_res, 2, "check_kretprobe_res"); + +cleanup: + test_attach_probe_manual__destroy(skel); +} + +/* reject legacy address-based kprobe attach */ +static void test_attach_kprobe_legacy_by_addr_reject(void) +{ + LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts); + struct test_attach_probe_manual *skel; + unsigned long func_addr; + + if (!ASSERT_OK(load_kallsyms(), "load_kallsyms")) + return; + + func_addr = ksym_get_addr(SYS_NANOSLEEP_KPROBE_NAME); + if (!ASSERT_NEQ(func_addr, 0UL, "func_addr")) + return; + + skel = test_attach_probe_manual__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_kprobe_manual_open_and_load")) + return; + + kprobe_opts.attach_mode = PROBE_ATTACH_MODE_LEGACY; + kprobe_opts.offset = func_addr; + skel->links.handle_kprobe = + bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe, + NULL, &kprobe_opts); + ASSERT_ERR_PTR(skel->links.handle_kprobe, "attach_kprobe_legacy_by_addr"); + ASSERT_EQ(libbpf_get_error(skel->links.handle_kprobe), + -EOPNOTSUPP, "attach_kprobe_legacy_by_addr_err"); + + test_attach_probe_manual__destroy(skel); +} + /* attach uprobe/uretprobe long event name testings */ static void test_attach_uprobe_long_event_name(void) { @@ -478,6 +552,12 @@ void test_attach_probe(void) test_attach_probe_manual(PROBE_ATTACH_MODE_PERF); if (test__start_subtest("manual-link")) test_attach_probe_manual(PROBE_ATTACH_MODE_LINK); + if (test__start_subtest("kprobe-perf-by-addr")) + test_attach_kprobe_by_addr(PROBE_ATTACH_MODE_PERF); + if (test__start_subtest("kprobe-link-by-addr")) + test_attach_kprobe_by_addr(PROBE_ATTACH_MODE_LINK); + if (test__start_subtest("kprobe-legacy-by-addr-reject")) + test_attach_kprobe_legacy_by_addr_reject(); if (test__start_subtest("auto")) test_attach_probe_auto(skel); |
