diff options
| author | Mykyta Yatsenko <yatsenko@meta.com> | 2026-04-01 18:16:40 +0300 |
|---|---|---|
| committer | Andrii Nakryiko <andrii@kernel.org> | 2026-04-02 23:02:46 +0300 |
| commit | 1cc96e0e20489159398009d2f453e59c10e413c9 (patch) | |
| tree | 6e083e459dcba0e3c7ec95ce58b6a6b6fbc81f6a /tools | |
| parent | e25cfbec08558e15fdf0f31f229b9f2a491e8288 (diff) | |
| download | linux-1cc96e0e20489159398009d2f453e59c10e413c9.tar.xz | |
libbpf: Fix BTF handling in bpf_program__clone()
Align bpf_program__clone() with bpf_object_load_prog() by gating
BTF func/line info on FEAT_BTF_FUNC kernel support, and resolve
caller-provided prog_btf_fd before checking obj->btf so that callers
with their own BTF can use clone() even when the object has no BTF
loaded.
While at it, treat func_info and line_info fields as atomic groups
to prevent mismatches between pointer and count from different sources.
Move bpf_program__clone() to libbpf 1.8.
Fixes: 970bd2dced35 ("libbpf: Introduce bpf_program__clone()")
Signed-off-by: Mykyta Yatsenko <yatsenko@meta.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20260401151640.356419-1-mykyta.yatsenko5@gmail.com
Diffstat (limited to 'tools')
| -rw-r--r-- | tools/lib/bpf/libbpf.c | 59 | ||||
| -rw-r--r-- | tools/lib/bpf/libbpf.map | 2 |
2 files changed, 44 insertions, 17 deletions
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 9ea41f40dc82..589085466903 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -9852,7 +9852,9 @@ int bpf_program__clone(struct bpf_program *prog, const struct bpf_prog_load_opts { LIBBPF_OPTS(bpf_prog_load_opts, attr); struct bpf_object *obj; - int err, fd; + const void *info; + __u32 info_cnt, info_rec_size; + int err, fd, prog_btf_fd; if (!prog) return libbpf_err(-EINVAL); @@ -9878,19 +9880,41 @@ int bpf_program__clone(struct bpf_program *prog, const struct bpf_prog_load_opts if (attr.token_fd) attr.prog_flags |= BPF_F_TOKEN_FD; - /* BTF func/line info */ - if (obj->btf && btf__fd(obj->btf) >= 0) { - attr.prog_btf_fd = OPTS_GET(opts, prog_btf_fd, 0) ?: btf__fd(obj->btf); - attr.func_info = OPTS_GET(opts, func_info, NULL) ?: prog->func_info; - attr.func_info_cnt = OPTS_GET(opts, func_info_cnt, 0) ?: prog->func_info_cnt; - attr.func_info_rec_size = - OPTS_GET(opts, func_info_rec_size, 0) ?: prog->func_info_rec_size; - attr.line_info = OPTS_GET(opts, line_info, NULL) ?: prog->line_info; - attr.line_info_cnt = OPTS_GET(opts, line_info_cnt, 0) ?: prog->line_info_cnt; - attr.line_info_rec_size = - OPTS_GET(opts, line_info_rec_size, 0) ?: prog->line_info_rec_size; + prog_btf_fd = OPTS_GET(opts, prog_btf_fd, 0); + if (!prog_btf_fd && obj->btf) + prog_btf_fd = btf__fd(obj->btf); + + /* BTF func/line info: only pass if kernel supports it */ + if (kernel_supports(obj, FEAT_BTF_FUNC) && prog_btf_fd > 0) { + attr.prog_btf_fd = prog_btf_fd; + + /* func_info/line_info triples: all-or-nothing from caller */ + info = OPTS_GET(opts, func_info, NULL); + info_cnt = OPTS_GET(opts, func_info_cnt, 0); + info_rec_size = OPTS_GET(opts, func_info_rec_size, 0); + if (!!info != !!info_cnt || !!info != !!info_rec_size) { + pr_warn("prog '%s': func_info, func_info_cnt, and func_info_rec_size must all be specified or all omitted\n", + prog->name); + return libbpf_err(-EINVAL); + } + attr.func_info = info ?: prog->func_info; + attr.func_info_cnt = info ? info_cnt : prog->func_info_cnt; + attr.func_info_rec_size = info ? info_rec_size : prog->func_info_rec_size; + + info = OPTS_GET(opts, line_info, NULL); + info_cnt = OPTS_GET(opts, line_info_cnt, 0); + info_rec_size = OPTS_GET(opts, line_info_rec_size, 0); + if (!!info != !!info_cnt || !!info != !!info_rec_size) { + pr_warn("prog '%s': line_info, line_info_cnt, and line_info_rec_size must all be specified or all omitted\n", + prog->name); + return libbpf_err(-EINVAL); + } + attr.line_info = info ?: prog->line_info; + attr.line_info_cnt = info ? info_cnt : prog->line_info_cnt; + attr.line_info_rec_size = info ? info_rec_size : prog->line_info_rec_size; } + /* Logging is caller-controlled; no fallback to prog/obj log settings */ attr.log_buf = OPTS_GET(opts, log_buf, NULL); attr.log_size = OPTS_GET(opts, log_size, 0); attr.log_level = OPTS_GET(opts, log_level, 0); @@ -9912,14 +9936,17 @@ int bpf_program__clone(struct bpf_program *prog, const struct bpf_prog_load_opts /* Re-apply caller overrides for output fields */ if (OPTS_GET(opts, expected_attach_type, 0)) - attr.expected_attach_type = - OPTS_GET(opts, expected_attach_type, 0); + attr.expected_attach_type = OPTS_GET(opts, expected_attach_type, 0); if (OPTS_GET(opts, attach_btf_id, 0)) attr.attach_btf_id = OPTS_GET(opts, attach_btf_id, 0); if (OPTS_GET(opts, attach_btf_obj_fd, 0)) - attr.attach_btf_obj_fd = - OPTS_GET(opts, attach_btf_obj_fd, 0); + attr.attach_btf_obj_fd = OPTS_GET(opts, attach_btf_obj_fd, 0); + /* + * Unlike bpf_object_load_prog(), we intentionally do not call bpf_prog_bind_map() + * for RODATA maps here to avoid mutating the object's state. Callers can bind the + * required maps themselves using bpf_prog_bind_map(). + */ fd = bpf_prog_load(prog->type, prog->name, obj->license, prog->insns, prog->insns_cnt, &attr); diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 346fd346666b..dfed8d60af05 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -452,12 +452,12 @@ LIBBPF_1.7.0 { bpf_map__set_exclusive_program; bpf_map__exclusive_program; bpf_prog_assoc_struct_ops; - bpf_program__clone; bpf_program__assoc_struct_ops; btf__permute; } LIBBPF_1.6.0; LIBBPF_1.8.0 { global: + bpf_program__clone; btf__new_empty_opts; } LIBBPF_1.7.0; |
