summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
authorAndrii Nakryiko <andrii@kernel.org>2026-03-06 02:14:25 +0300
committerAndrii Nakryiko <andrii@kernel.org>2026-03-06 02:16:20 +0300
commit6dd780f973816133f189efec04118c1e6b1b443d (patch)
treea3b8d83ae866ec5c264a31ed4821fc747de2831d /tools
parent748f9c6811b973a518436136fb86e6284a8854c1 (diff)
parenta28441dd29617b330d7284dc00b610be196b783f (diff)
downloadlinux-6dd780f973816133f189efec04118c1e6b1b443d.tar.xz
Merge branch 'optimize-kprobe-session-attachment-for-exact-function-names'
Andrey Grodzovsky says: ==================== Optimize kprobe.session attachment for exact function names When libbpf attaches kprobe.session programs with exact function names (the common case: SEC("kprobe.session/vfs_read")), the current code path has two independent performance bottlenecks: 1. Userspace (libbpf): attach_kprobe_session() always parses /proc/kallsyms to resolve function names, even when the name is exact (no wildcards). This takes ~150ms per function. 2. Kernel (ftrace): ftrace_lookup_symbols() does a full O(N) linear scan over ~200K kernel symbols via kallsyms_on_each_symbol(), decompressing every symbol name, even when resolving a single symbol (cnt == 1). This series optimizes libbpf side: libbpf detects exact function names (no wildcards) in bpf_program__attach_kprobe_multi_opts() and bypasses kallsyms parsing, passing the symbol directly to the kernel via syms[] array. ESRCH is normalized to ENOENT for API consistency. Selftests validates exact-name attachment via kprobe_multi_session.c and error consistency between wildcard and exact paths in test_attach_api_fails. Changes since v3 [3]: - Skip fast path when unique_match is set (Jiri Olsa, CI bot) Changes since v2 [2]: - Use if/else-if instead of goto (Jiri Olsa) - Use syms = &pattern directly (Jiri Olsa) - Drop unneeded pattern = NULL (Jiri Olsa) - Revert cosmetic rename in attach_kprobe_session (Jiri Olsa) - Remove "module symbols" from ftrace comment (CI bot) Changes since v1 [1]: - Move optimization into attach_kprobe_multi_opts (Jiri Olsa) - Use ftrace_location as boolean check only (Jiri Olsa) - Remove verbose perf rationale from comment (Steven Rostedt) - Consolidate tests into existing subtests (Jiri Olsa) - Delete standalone _syms.c and _errors.c files [1] https://lore.kernel.org/bpf/20260223215113.924599-1-andrey.grodzovsky@crowdstrike.com/ [2] https://lore.kernel.org/bpf/20260226173342.3565919-1-andrey.grodzovsky@crowdstrike.com/ [3] https://lore.kernel.org/bpf/20260227204052.725813-1-andrey.grodzovsky@crowdstrike.com/ ==================== Link: https://patch.msgid.link/20260302200837.317907-1-andrey.grodzovsky@crowdstrike.com Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Diffstat (limited to 'tools')
-rw-r--r--tools/lib/bpf/libbpf.c19
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c33
-rw-r--r--tools/testing/selftests/bpf/progs/kprobe_multi_session.c10
3 files changed, 59 insertions, 3 deletions
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 0be7017800fe..0662d72bad20 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -12041,7 +12041,16 @@ bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
if (addrs && syms)
return libbpf_err_ptr(-EINVAL);
- if (pattern) {
+ /*
+ * Exact function name (no wildcards) without unique_match:
+ * bypass kallsyms parsing and pass the symbol directly to the
+ * kernel via syms[] array. When unique_match is set, fall
+ * through to the slow path which detects duplicate symbols.
+ */
+ if (pattern && !strpbrk(pattern, "*?") && !unique_match) {
+ syms = &pattern;
+ cnt = 1;
+ } else if (pattern) {
if (has_available_filter_functions_addrs())
err = libbpf_available_kprobes_parse(&res);
else
@@ -12084,6 +12093,14 @@ bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
link_fd = bpf_link_create(prog_fd, 0, attach_type, &lopts);
if (link_fd < 0) {
err = -errno;
+ /*
+ * Normalize error code: when exact name bypasses kallsyms
+ * parsing, kernel returns ESRCH from ftrace_lookup_symbols().
+ * Convert to ENOENT for API consistency with the pattern
+ * matching path which returns ENOENT from userspace.
+ */
+ if (err == -ESRCH)
+ err = -ENOENT;
pr_warn("prog '%s': failed to attach: %s\n",
prog->name, errstr(err));
goto error;
diff --git a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
index f81dcd609ee9..78c974d4ea33 100644
--- a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
@@ -327,6 +327,30 @@ static void test_attach_api_fails(void)
if (!ASSERT_EQ(saved_error, -E2BIG, "fail_6_error"))
goto cleanup;
+ /* fail_7 - non-existent wildcard pattern (slow path) */
+ LIBBPF_OPTS_RESET(opts);
+
+ link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual,
+ "__nonexistent_func_xyz_*",
+ &opts);
+ saved_error = -errno;
+ if (!ASSERT_ERR_PTR(link, "fail_7"))
+ goto cleanup;
+
+ if (!ASSERT_EQ(saved_error, -ENOENT, "fail_7_error"))
+ goto cleanup;
+
+ /* fail_8 - non-existent exact name (fast path), same error as wildcard */
+ link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual,
+ "__nonexistent_func_xyz_123",
+ &opts);
+ saved_error = -errno;
+ if (!ASSERT_ERR_PTR(link, "fail_8"))
+ goto cleanup;
+
+ if (!ASSERT_EQ(saved_error, -ENOENT, "fail_8_error"))
+ goto cleanup;
+
cleanup:
bpf_link__destroy(link);
kprobe_multi__destroy(skel);
@@ -355,8 +379,13 @@ static void test_session_skel_api(void)
ASSERT_OK(err, "test_run");
ASSERT_EQ(topts.retval, 0, "test_run");
- /* bpf_fentry_test1-4 trigger return probe, result is 2 */
- for (i = 0; i < 4; i++)
+ /*
+ * bpf_fentry_test1 is hit by both the wildcard probe and the exact
+ * name probe (test_kprobe_syms), so entry + return fires twice: 4.
+ * bpf_fentry_test2-4 are hit only by the wildcard probe: 2.
+ */
+ ASSERT_EQ(skel->bss->kprobe_session_result[0], 4, "kprobe_session_result");
+ for (i = 1; i < 4; i++)
ASSERT_EQ(skel->bss->kprobe_session_result[i], 2, "kprobe_session_result");
/* bpf_fentry_test5-8 trigger only entry probe, result is 1 */
diff --git a/tools/testing/selftests/bpf/progs/kprobe_multi_session.c b/tools/testing/selftests/bpf/progs/kprobe_multi_session.c
index bd8b7fb7061e..d52a65b40bbf 100644
--- a/tools/testing/selftests/bpf/progs/kprobe_multi_session.c
+++ b/tools/testing/selftests/bpf/progs/kprobe_multi_session.c
@@ -76,3 +76,13 @@ int test_kprobe(struct pt_regs *ctx)
{
return session_check(ctx);
}
+
+/*
+ * Exact function name (no wildcards) - exercises the fast syms[] path
+ * in bpf_program__attach_kprobe_multi_opts() which bypasses kallsyms parsing.
+ */
+SEC("kprobe.session/bpf_fentry_test1")
+int test_kprobe_syms(struct pt_regs *ctx)
+{
+ return session_check(ctx);
+}