summaryrefslogtreecommitdiff
path: root/tools/lib
diff options
context:
space:
mode:
authorAndrey Grodzovsky <andrey.grodzovsky@crowdstrike.com>2026-03-02 23:08:35 +0300
committerAndrii Nakryiko <andrii@kernel.org>2026-03-06 02:14:24 +0300
commit6afc431db1b4c21fec96cf6bea29489f4dce17c5 (patch)
treec45ff52c3f18e0dc15c26555dd39c26726816b49 /tools/lib
parent748f9c6811b973a518436136fb86e6284a8854c1 (diff)
downloadlinux-6afc431db1b4c21fec96cf6bea29489f4dce17c5.tar.xz
libbpf: Optimize kprobe.session attachment for exact function names
Detect exact function names (no wildcards) in bpf_program__attach_kprobe_multi_opts() and bypass kallsyms parsing, passing the symbol directly to the kernel via syms[] array. This benefits all callers, not just kprobe.session. When the pattern contains no '*' or '?' characters, set syms to point directly at the pattern string and cnt to 1, skipping the expensive /proc/kallsyms or available_filter_functions parsing (~150ms per function). Error code normalization: the fast path returns ESRCH from kernel's ftrace_lookup_symbols(), while the slow path returns ENOENT from userspace kallsyms parsing. Convert ESRCH to ENOENT in the bpf_link_create error path to maintain API consistency - both paths now return identical error codes for "symbol not found". Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@crowdstrike.com> Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/bpf/20260302200837.317907-2-andrey.grodzovsky@crowdstrike.com
Diffstat (limited to 'tools/lib')
-rw-r--r--tools/lib/bpf/libbpf.c19
1 files changed, 18 insertions, 1 deletions
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 0be7017800fe..0662d72bad20 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -12041,7 +12041,16 @@ bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
if (addrs && syms)
return libbpf_err_ptr(-EINVAL);
- if (pattern) {
+ /*
+ * Exact function name (no wildcards) without unique_match:
+ * bypass kallsyms parsing and pass the symbol directly to the
+ * kernel via syms[] array. When unique_match is set, fall
+ * through to the slow path which detects duplicate symbols.
+ */
+ if (pattern && !strpbrk(pattern, "*?") && !unique_match) {
+ syms = &pattern;
+ cnt = 1;
+ } else if (pattern) {
if (has_available_filter_functions_addrs())
err = libbpf_available_kprobes_parse(&res);
else
@@ -12084,6 +12093,14 @@ bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
link_fd = bpf_link_create(prog_fd, 0, attach_type, &lopts);
if (link_fd < 0) {
err = -errno;
+ /*
+ * Normalize error code: when exact name bypasses kallsyms
+ * parsing, kernel returns ESRCH from ftrace_lookup_symbols().
+ * Convert to ENOENT for API consistency with the pattern
+ * matching path which returns ENOENT from userspace.
+ */
+ if (err == -ESRCH)
+ err = -ENOENT;
pr_warn("prog '%s': failed to attach: %s\n",
prog->name, errstr(err));
goto error;