diff options
author | Alexei Starovoitov <ast@kernel.org> | 2020-12-04 04:38:22 +0300 |
---|---|---|
committer | Alexei Starovoitov <ast@kernel.org> | 2020-12-04 04:38:42 +0300 |
commit | 8158c5fd619d42e94a006e9fb8005fb8a4e6f4d4 (patch) | |
tree | f4852f307e973fa926c8f6093af3e3b9dc4c8e47 /tools/lib | |
parent | cadd64807cd83e2213dcb70f93d12d978c02b5fa (diff) | |
parent | 1e38abefcfd65f3ef7b12895dfd48db80aca28da (diff) | |
download | linux-8158c5fd619d42e94a006e9fb8005fb8a4e6f4d4.tar.xz |
Merge branch 'Support BTF-powered BPF tracing programs for kernel modules'
Andrii Nakryiko says:
====================
This patch sets extends kernel and libbpf with support for attaching
BTF-powered raw tracepoint (tp_btf) and tracing (fentry/fexit/fmod_ret/lsm)
BPF programs to BPF hooks defined in kernel modules. As part of that, libbpf
now supports performing CO-RE relocations against types in kernel module BTFs,
in addition to existing vmlinux BTF support.
Kernel UAPI for BPF_PROG_LOAD now allows to specify kernel module (or vmlinux)
BTF object FD in attach_btf_obj_fd field, aliased to attach_prog_fd. This is
used to identify which BTF object needs to be used for finding BTF type by
provided attach_btf_id.
This patch set also sets up a convenient and fully-controlled custom kernel
module (called "bpf_testmod"), that is a predictable playground for all the
BPF selftests, that rely on module BTFs. Currently pahole doesn't generate
BTF_KIND_FUNC info for ftrace-able static functions in kernel modules, so
expose traced function in bpf_sidecar.ko. Once pahole is enhanced, we can go
back to static function.
From end user perspective there are no extra actions that need to happen.
Libbpf will continue searching across all kernel module BTFs, if desired
attach BTF type is not found in vmlinux. That way it doesn't matter if BPF
hook that user is trying to attach to is built into vmlinux image or is
loaded in kernel module.
v5->v6:
- move btf_put() back to syscall.c (kernel test robot);
- added close(fd) in patch #5 (John);
v4->v5:
- use FD to specify BTF object (Alexei);
- move prog->aux->attach_btf putting into bpf_prog_free() for consistency
with putting prog->aux->dst_prog;
- fix BTF FD leak(s) in libbpf;
v3->v4:
- merge together patch sets [0] and [1];
- avoid increasing bpf_reg_state by reordering fields (Alexei);
- preserve btf_data_size in struct module;
v2->v3:
- fix subtle uninitialized variable use in BTF ID iteration code;
v1->v2:
- module_put() inside preempt_disable() region (Alexei);
- bpf_sidecar -> bpf_testmod rename (Alexei);
- test_progs more relaxed handling of bpf_testmod;
- test_progs marks skipped sub-tests properly as SKIP now.
[0] https://patchwork.kernel.org/project/netdevbpf/list/?series=393677&state=*
[1] https://patchwork.kernel.org/project/netdevbpf/list/?series=393679&state=*
====================
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'tools/lib')
-rw-r--r-- | tools/lib/bpf/bpf.c | 101 | ||||
-rw-r--r-- | tools/lib/bpf/btf.c | 61 | ||||
-rw-r--r-- | tools/lib/bpf/libbpf.c | 500 | ||||
-rw-r--r-- | tools/lib/bpf/libbpf_internal.h | 31 |
4 files changed, 520 insertions, 173 deletions
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 4025266d0fb0..bba48ff4c5c0 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -215,59 +215,55 @@ alloc_zero_tailing_info(const void *orecord, __u32 cnt, return info; } -int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, - char *log_buf, size_t log_buf_sz) +int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr) { void *finfo = NULL, *linfo = NULL; union bpf_attr attr; - __u32 log_level; int fd; - if (!load_attr || !log_buf != !log_buf_sz) + if (!load_attr->log_buf != !load_attr->log_buf_sz) return -EINVAL; - log_level = load_attr->log_level; - if (log_level > (4 | 2 | 1) || (log_level && !log_buf)) + if (load_attr->log_level > (4 | 2 | 1) || (load_attr->log_level && !load_attr->log_buf)) return -EINVAL; memset(&attr, 0, sizeof(attr)); attr.prog_type = load_attr->prog_type; attr.expected_attach_type = load_attr->expected_attach_type; - if (attr.prog_type == BPF_PROG_TYPE_STRUCT_OPS || - attr.prog_type == BPF_PROG_TYPE_LSM) { - attr.attach_btf_id = load_attr->attach_btf_id; - } else if (attr.prog_type == BPF_PROG_TYPE_TRACING || - attr.prog_type == BPF_PROG_TYPE_EXT) { - attr.attach_btf_id = load_attr->attach_btf_id; + + if (load_attr->attach_prog_fd) attr.attach_prog_fd = load_attr->attach_prog_fd; - } else { - attr.prog_ifindex = load_attr->prog_ifindex; - attr.kern_version = load_attr->kern_version; - } - attr.insn_cnt = (__u32)load_attr->insns_cnt; + else + attr.attach_btf_obj_fd = load_attr->attach_btf_obj_fd; + attr.attach_btf_id = load_attr->attach_btf_id; + + attr.prog_ifindex = load_attr->prog_ifindex; + attr.kern_version = load_attr->kern_version; + + attr.insn_cnt = (__u32)load_attr->insn_cnt; attr.insns = ptr_to_u64(load_attr->insns); attr.license = ptr_to_u64(load_attr->license); - attr.log_level = log_level; - if (log_level) { - attr.log_buf = ptr_to_u64(log_buf); - attr.log_size = log_buf_sz; - } else { - attr.log_buf = ptr_to_u64(NULL); - attr.log_size = 0; + attr.log_level = load_attr->log_level; + if (attr.log_level) { + attr.log_buf = ptr_to_u64(load_attr->log_buf); + attr.log_size = load_attr->log_buf_sz; } attr.prog_btf_fd = load_attr->prog_btf_fd; + attr.prog_flags = load_attr->prog_flags; + attr.func_info_rec_size = load_attr->func_info_rec_size; attr.func_info_cnt = load_attr->func_info_cnt; attr.func_info = ptr_to_u64(load_attr->func_info); + attr.line_info_rec_size = load_attr->line_info_rec_size; attr.line_info_cnt = load_attr->line_info_cnt; attr.line_info = ptr_to_u64(load_attr->line_info); + if (load_attr->name) memcpy(attr.prog_name, load_attr->name, - min(strlen(load_attr->name), BPF_OBJ_NAME_LEN - 1)); - attr.prog_flags = load_attr->prog_flags; + min(strlen(load_attr->name), (size_t)BPF_OBJ_NAME_LEN - 1)); fd = sys_bpf_prog_load(&attr, sizeof(attr)); if (fd >= 0) @@ -307,19 +303,19 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, } fd = sys_bpf_prog_load(&attr, sizeof(attr)); - if (fd >= 0) goto done; } - if (log_level || !log_buf) + if (load_attr->log_level || !load_attr->log_buf) goto done; /* Try again with log */ - attr.log_buf = ptr_to_u64(log_buf); - attr.log_size = log_buf_sz; + attr.log_buf = ptr_to_u64(load_attr->log_buf); + attr.log_size = load_attr->log_buf_sz; attr.log_level = 1; - log_buf[0] = 0; + load_attr->log_buf[0] = 0; + fd = sys_bpf_prog_load(&attr, sizeof(attr)); done: free(finfo); @@ -327,6 +323,49 @@ done: return fd; } +int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, + char *log_buf, size_t log_buf_sz) +{ + struct bpf_prog_load_params p = {}; + + if (!load_attr || !log_buf != !log_buf_sz) + return -EINVAL; + + p.prog_type = load_attr->prog_type; + p.expected_attach_type = load_attr->expected_attach_type; + switch (p.prog_type) { + case BPF_PROG_TYPE_STRUCT_OPS: + case BPF_PROG_TYPE_LSM: + p.attach_btf_id = load_attr->attach_btf_id; + break; + case BPF_PROG_TYPE_TRACING: + case BPF_PROG_TYPE_EXT: + p.attach_btf_id = load_attr->attach_btf_id; + p.attach_prog_fd = load_attr->attach_prog_fd; + break; + default: + p.prog_ifindex = load_attr->prog_ifindex; + p.kern_version = load_attr->kern_version; + } + p.insn_cnt = load_attr->insns_cnt; + p.insns = load_attr->insns; + p.license = load_attr->license; + p.log_level = load_attr->log_level; + p.log_buf = log_buf; + p.log_buf_sz = log_buf_sz; + p.prog_btf_fd = load_attr->prog_btf_fd; + p.func_info_rec_size = load_attr->func_info_rec_size; + p.func_info_cnt = load_attr->func_info_cnt; + p.func_info = load_attr->func_info; + p.line_info_rec_size = load_attr->line_info_rec_size; + p.line_info_cnt = load_attr->line_info_cnt; + p.line_info = load_attr->line_info; + p.name = load_attr->name; + p.prog_flags = load_attr->prog_flags; + + return libbpf__bpf_prog_load(&p); +} + int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns, size_t insns_cnt, const char *license, __u32 kern_version, char *log_buf, diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 1935e83d309c..3c3f2bc6c652 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -1323,35 +1323,27 @@ const char *btf__name_by_offset(const struct btf *btf, __u32 offset) return btf__str_by_offset(btf, offset); } -int btf__get_from_id(__u32 id, struct btf **btf) +struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf) { - struct bpf_btf_info btf_info = { 0 }; + struct bpf_btf_info btf_info; __u32 len = sizeof(btf_info); __u32 last_size; - int btf_fd; + struct btf *btf; void *ptr; int err; - err = 0; - *btf = NULL; - btf_fd = bpf_btf_get_fd_by_id(id); - if (btf_fd < 0) - return 0; - /* we won't know btf_size until we call bpf_obj_get_info_by_fd(). so * let's start with a sane default - 4KiB here - and resize it only if * bpf_obj_get_info_by_fd() needs a bigger buffer. */ - btf_info.btf_size = 4096; - last_size = btf_info.btf_size; + last_size = 4096; ptr = malloc(last_size); - if (!ptr) { - err = -ENOMEM; - goto exit_free; - } + if (!ptr) + return ERR_PTR(-ENOMEM); - memset(ptr, 0, last_size); + memset(&btf_info, 0, sizeof(btf_info)); btf_info.btf = ptr_to_u64(ptr); + btf_info.btf_size = last_size; err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len); if (!err && btf_info.btf_size > last_size) { @@ -1360,31 +1352,48 @@ int btf__get_from_id(__u32 id, struct btf **btf) last_size = btf_info.btf_size; temp_ptr = realloc(ptr, last_size); if (!temp_ptr) { - err = -ENOMEM; + btf = ERR_PTR(-ENOMEM); goto exit_free; } ptr = temp_ptr; - memset(ptr, 0, last_size); + + len = sizeof(btf_info); + memset(&btf_info, 0, sizeof(btf_info)); btf_info.btf = ptr_to_u64(ptr); + btf_info.btf_size = last_size; + err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len); } if (err || btf_info.btf_size > last_size) { - err = errno; + btf = err ? ERR_PTR(-errno) : ERR_PTR(-E2BIG); goto exit_free; } - *btf = btf__new((__u8 *)(long)btf_info.btf, btf_info.btf_size); - if (IS_ERR(*btf)) { - err = PTR_ERR(*btf); - *btf = NULL; - } + btf = btf_new(ptr, btf_info.btf_size, base_btf); exit_free: - close(btf_fd); free(ptr); + return btf; +} - return err; +int btf__get_from_id(__u32 id, struct btf **btf) +{ + struct btf *res; + int btf_fd; + + *btf = NULL; + btf_fd = bpf_btf_get_fd_by_id(id); + if (btf_fd < 0) + return -errno; + + res = btf_get_from_fd(btf_fd, NULL); + close(btf_fd); + if (IS_ERR(res)) + return PTR_ERR(res); + + *btf = res; + return 0; } int btf__get_map_kv_tids(const struct btf *btf, const char *map_name, diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index b2e16efabde0..912e01b946fe 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -176,6 +176,8 @@ enum kern_feature_id { FEAT_PROBE_READ_KERN, /* BPF_PROG_BIND_MAP is supported */ FEAT_PROG_BIND_MAP, + /* Kernel support for module BTFs */ + FEAT_MODULE_BTF, __FEAT_CNT, }; @@ -276,6 +278,7 @@ struct bpf_program { enum bpf_prog_type type; enum bpf_attach_type expected_attach_type; int prog_ifindex; + __u32 attach_btf_obj_fd; __u32 attach_btf_id; __u32 attach_prog_fd; void *func_info; @@ -402,6 +405,13 @@ struct extern_desc { static LIST_HEAD(bpf_objects_list); +struct module_btf { + struct btf *btf; + char *name; + __u32 id; + int fd; +}; + struct bpf_object { char name[BPF_OBJ_NAME_LEN]; char license[64]; @@ -462,11 +472,19 @@ struct bpf_object { struct list_head list; struct btf *btf; + struct btf_ext *btf_ext; + /* Parse and load BTF vmlinux if any of the programs in the object need * it at load time. */ struct btf *btf_vmlinux; - struct btf_ext *btf_ext; + /* vmlinux BTF override for CO-RE relocations */ + struct btf *btf_vmlinux_override; + /* Lazily initialized kernel module BTFs */ + struct module_btf *btf_modules; + bool btf_modules_loaded; + size_t btf_module_cnt; + size_t btf_module_cap; void *priv; bpf_object_clear_priv_t clear_priv; @@ -3957,6 +3975,35 @@ static int probe_prog_bind_map(void) return ret >= 0; } +static int probe_module_btf(void) +{ + static const char strs[] = "\0int"; + __u32 types[] = { + /* int */ + BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), + }; + struct bpf_btf_info info; + __u32 len = sizeof(info); + char name[16]; + int fd, err; + + fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs)); + if (fd < 0) + return 0; /* BTF not supported at all */ + + memset(&info, 0, sizeof(info)); + info.name = ptr_to_u64(name); + info.name_len = sizeof(name); + + /* check that BPF_OBJ_GET_INFO_BY_FD supports specifying name pointer; + * kernel's module BTF support coincides with support for + * name/name_len fields in struct bpf_btf_info. + */ + err = bpf_obj_get_info_by_fd(fd, &info, &len); + close(fd); + return !err; +} + enum kern_feature_result { FEAT_UNKNOWN = 0, FEAT_SUPPORTED = 1, @@ -4000,7 +4047,10 @@ static struct kern_feature_desc { }, [FEAT_PROG_BIND_MAP] = { "BPF_PROG_BIND_MAP support", probe_prog_bind_map, - } + }, + [FEAT_MODULE_BTF] = { + "module BTF support", probe_module_btf, + }, }; static bool kernel_supports(enum kern_feature_id feat_id) @@ -4600,46 +4650,43 @@ static size_t bpf_core_essential_name_len(const char *name) return n; } -/* dynamically sized list of type IDs */ -struct ids_vec { - __u32 *data; +struct core_cand +{ + const struct btf *btf; + const struct btf_type *t; + const char *name; + __u32 id; +}; + +/* dynamically sized list of type IDs and its associated struct btf */ +struct core_cand_list { + struct core_cand *cands; int len; }; -static void bpf_core_free_cands(struct ids_vec *cand_ids) +static void bpf_core_free_cands(struct core_cand_list *cands) { - free(cand_ids->data); - free(cand_ids); + free(cands->cands); + free(cands); } -static struct ids_vec *bpf_core_find_cands(const struct btf *local_btf, - __u32 local_type_id, - const struct btf *targ_btf) +static int bpf_core_add_cands(struct core_cand *local_cand, + size_t local_essent_len, + const struct btf *targ_btf, + const char *targ_btf_name, + int targ_start_id, + struct core_cand_list *cands) { - size_t local_essent_len, targ_essent_len; - const char *local_name, *targ_name; - const struct btf_type *t, *local_t; - struct ids_vec *cand_ids; - __u32 *new_ids; - int i, err, n; - - local_t = btf__type_by_id(local_btf, local_type_id); - if (!local_t) - return ERR_PTR(-EINVAL); - - local_name = btf__name_by_offset(local_btf, local_t->name_off); - if (str_is_empty(local_name)) - return ERR_PTR(-EINVAL); - local_essent_len = bpf_core_essential_name_len(local_name); - - cand_ids = calloc(1, sizeof(*cand_ids)); - if (!cand_ids) - return ERR_PTR(-ENOMEM); + struct core_cand *new_cands, *cand; + const struct btf_type *t; + const char *targ_name; + size_t targ_essent_len; + int n, i; n = btf__get_nr_types(targ_btf); - for (i = 1; i <= n; i++) { + for (i = targ_start_id; i <= n; i++) { t = btf__type_by_id(targ_btf, i); - if (btf_kind(t) != btf_kind(local_t)) + if (btf_kind(t) != btf_kind(local_cand->t)) continue; targ_name = btf__name_by_offset(targ_btf, t->name_off); @@ -4650,24 +4697,174 @@ static struct ids_vec *bpf_core_find_cands(const struct btf *local_btf, if (targ_essent_len != local_essent_len) continue; - if (strncmp(local_name, targ_name, local_essent_len) == 0) { - pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s\n", - local_type_id, btf_kind_str(local_t), - local_name, i, btf_kind_str(t), targ_name); - new_ids = libbpf_reallocarray(cand_ids->data, - cand_ids->len + 1, - sizeof(*cand_ids->data)); - if (!new_ids) { - err = -ENOMEM; - goto err_out; - } - cand_ids->data = new_ids; - cand_ids->data[cand_ids->len++] = i; + if (strncmp(local_cand->name, targ_name, local_essent_len) != 0) + continue; + + pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s in [%s]\n", + local_cand->id, btf_kind_str(local_cand->t), + local_cand->name, i, btf_kind_str(t), targ_name, + targ_btf_name); + new_cands = libbpf_reallocarray(cands->cands, cands->len + 1, + sizeof(*cands->cands)); + if (!new_cands) + return -ENOMEM; + + cand = &new_cands[cands->len]; + cand->btf = targ_btf; + cand->t = t; + cand->name = targ_name; + cand->id = i; + + cands->cands = new_cands; + cands->len++; + } + return 0; +} + +static int load_module_btfs(struct bpf_object *obj) +{ + struct bpf_btf_info info; + struct module_btf *mod_btf; + struct btf *btf; + char name[64]; + __u32 id = 0, len; + int err, fd; + + if (obj->btf_modules_loaded) + return 0; + + /* don't do this again, even if we find no module BTFs */ + obj->btf_modules_loaded = true; + + /* kernel too old to support module BTFs */ + if (!kernel_supports(FEAT_MODULE_BTF)) + return 0; + + while (true) { + err = bpf_btf_get_next_id(id, &id); + if (err && errno == ENOENT) + return 0; + if (err) { + err = -errno; + pr_warn("failed to iterate BTF objects: %d\n", err); + return err; + } + + fd = bpf_btf_get_fd_by_id(id); + if (fd < 0) { + if (errno == ENOENT) + continue; /* expected race: BTF was unloaded */ + err = -errno; + pr_warn("failed to get BTF object #%d FD: %d\n", id, err); + return err; + } + + len = sizeof(info); + memset(&info, 0, sizeof(info)); + info.name = ptr_to_u64(name); + info.name_len = sizeof(name); + + err = bpf_obj_get_info_by_fd(fd, &info, &len); + if (err) { + err = -errno; + pr_warn("failed to get BTF object #%d info: %d\n", id, err); + goto err_out; + } + + /* ignore non-module BTFs */ + if (!info.kernel_btf || strcmp(name, "vmlinux") == 0) { + close(fd); + continue; + } + + btf = btf_get_from_fd(fd, obj->btf_vmlinux); + if (IS_ERR(btf)) { + pr_warn("failed to load module [%s]'s BTF object #%d: %ld\n", + name, id, PTR_ERR(btf)); + err = PTR_ERR(btf); + goto err_out; + } + + err = btf_ensure_mem((void **)&obj->btf_modules, &obj->btf_module_cap, + sizeof(*obj->btf_modules), obj->btf_module_cnt + 1); + if (err) + goto err_out; + + mod_btf = &obj->btf_modules[obj->btf_module_cnt++]; + + mod_btf->btf = btf; + mod_btf->id = id; + mod_btf->fd = fd; + mod_btf->name = strdup(name); + if (!mod_btf->name) { + err = -ENOMEM; + goto err_out; } + continue; + +err_out: + close(fd); + return err; + } + + return 0; +} + +static struct core_cand_list * +bpf_core_find_cands(struct bpf_object *obj, const struct btf *local_btf, __u32 local_type_id) +{ + struct core_cand local_cand = {}; + struct core_cand_list *cands; + const struct btf *main_btf; + size_t local_essent_len; + int err, i; + + local_cand.btf = local_btf; + local_cand.t = btf__type_by_id(local_btf, local_type_id); + if (!local_cand.t) + return ERR_PTR(-EINVAL); + + local_cand.name = btf__name_by_offset(local_btf, local_cand.t->name_off); + if (str_is_empty(local_cand.name)) + return ERR_PTR(-EINVAL); + local_essent_len = bpf_core_essential_name_len(local_cand.name); + + cands = calloc(1, sizeof(*cands)); + if (!cands) + return ERR_PTR(-ENOMEM); + + /* Attempt to find target candidates in vmlinux BTF first */ + main_btf = obj->btf_vmlinux_override ?: obj->btf_vmlinux; + err = bpf_core_add_cands(&local_cand, local_essent_len, main_btf, "vmlinux", 1, cands); + if (err) + goto err_out; + + /* if vmlinux BTF has any candidate, don't got for module BTFs */ + if (cands->len) + return cands; + + /* if vmlinux BTF was overridden, don't attempt to load module BTFs */ + if (obj->btf_vmlinux_override) + return cands; + + /* now look through module BTFs, trying to still find candidates */ + err = load_module_btfs(obj); + if (err) + goto err_out; + + for (i = 0; i < obj->btf_module_cnt; i++) { + err = bpf_core_add_cands(&local_cand, local_essent_len, + obj->btf_modules[i].btf, + obj->btf_modules[i].name, + btf__get_nr_types(obj->btf_vmlinux) + 1, + cands); + if (err) + goto err_out; } - return cand_ids; + + return cands; err_out: - bpf_core_free_cands(cand_ids); + bpf_core_free_cands(cands); return ERR_PTR(err); } @@ -5661,7 +5858,6 @@ static int bpf_core_apply_relo(struct bpf_program *prog, const struct bpf_core_relo *relo, int relo_idx, const struct btf *local_btf, - const struct btf *targ_btf, struct hashmap *cand_cache) { struct bpf_core_spec local_spec, cand_spec, targ_spec = {}; @@ -5669,8 +5865,8 @@ static int bpf_core_apply_relo(struct bpf_program *prog, struct bpf_core_relo_res cand_res, targ_res; const struct btf_type *local_type; const char *local_name; - struct ids_vec *cand_ids; - __u32 local_id, cand_id; + struct core_cand_list *cands = NULL; + __u32 local_id; const char *spec_str; int i, j, err; @@ -5717,24 +5913,24 @@ static int bpf_core_apply_relo(struct bpf_program *prog, return -EOPNOTSUPP; } - if (!hashmap__find(cand_cache, type_key, (void **)&cand_ids)) { - cand_ids = bpf_core_find_cands(local_btf, local_id, targ_btf); - if (IS_ERR(cand_ids)) { - pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld", + if (!hashmap__find(cand_cache, type_key, (void **)&cands)) { + cands = bpf_core_find_cands(prog->obj, local_btf, local_id); + if (IS_ERR(cands)) { + pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld\n", prog->name, relo_idx, local_id, btf_kind_str(local_type), - local_name, PTR_ERR(cand_ids)); - return PTR_ERR(cand_ids); + local_name, PTR_ERR(cands)); + return PTR_ERR(cands); } - err = hashmap__set(cand_cache, type_key, cand_ids, NULL, NULL); + err = hashmap__set(cand_cache, type_key, cands, NULL, NULL); if (err) { - bpf_core_free_cands(cand_ids); + bpf_core_free_cands(cands); return err; } } - for (i = 0, j = 0; i < cand_ids->len; i++) { - cand_id = cand_ids->data[i]; - err = bpf_core_spec_match(&local_spec, targ_btf, cand_id, &cand_spec); + for (i = 0, j = 0; i < cands->len; i++) { + err = bpf_core_spec_match(&local_spec, cands->cands[i].btf, + cands->cands[i].id, &cand_spec); if (err < 0) { pr_warn("prog '%s': relo #%d: error matching candidate #%d ", prog->name, relo_idx, i); @@ -5778,7 +5974,7 @@ static int bpf_core_apply_relo(struct bpf_program *prog, return -EINVAL; } - cand_ids->data[j++] = cand_spec.root_type_id; + cands->cands[j++] = cands->cands[i]; } /* @@ -5790,7 +5986,7 @@ static int bpf_core_apply_relo(struct bpf_program *prog, * depending on relo's kind. */ if (j > 0) - cand_ids->len = j; + cands->len = j; /* * If no candidates were found, it might be both a programmer error, @@ -5834,20 +6030,19 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path) struct hashmap_entry *entry; struct hashmap *cand_cache = NULL; struct bpf_program *prog; - struct btf *targ_btf; const char *sec_name; int i, err = 0, insn_idx, sec_idx; if (obj->btf_ext->core_relo_info.len == 0) return 0; - if (targ_btf_path) - targ_btf = btf__parse(targ_btf_path, NULL); - else - targ_btf = obj->btf_vmlinux; - if (IS_ERR_OR_NULL(targ_btf)) { - pr_warn("failed to get target BTF: %ld\n", PTR_ERR(targ_btf)); - return PTR_ERR(targ_btf); + if (targ_btf_path) { + obj->btf_vmlinux_override = btf__parse(targ_btf_path, NULL); + if (IS_ERR_OR_NULL(obj->btf_vmlinux_override)) { + err = PTR_ERR(obj->btf_vmlinux_override); + pr_warn("failed to parse target BTF: %d\n", err); + return err; + } } cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL); @@ -5899,8 +6094,7 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path) if (!prog->load) continue; - err = bpf_core_apply_relo(prog, rec, i, obj->btf, - targ_btf, cand_cache); + err = bpf_core_apply_relo(prog, rec, i, obj->btf, cand_cache); if (err) { pr_warn("prog '%s': relo #%d: failed to relocate: %d\n", prog->name, i, err); @@ -5910,9 +6104,10 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path) } out: - /* obj->btf_vmlinux is freed at the end of object load phase */ - if (targ_btf != obj->btf_vmlinux) - btf__free(targ_btf); + /* obj->btf_vmlinux and module BTFs are freed after object load */ + btf__free(obj->btf_vmlinux_override); + obj->btf_vmlinux_override = NULL; + if (!IS_ERR_OR_NULL(cand_cache)) { hashmap__for_each_entry(cand_cache, entry, i) { bpf_core_free_cands(entry->value); @@ -6623,7 +6818,7 @@ static int load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, char *license, __u32 kern_version, int *pfd) { - struct bpf_load_program_attr load_attr; + struct bpf_prog_load_params load_attr = {}; char *cp, errmsg[STRERR_BUFSIZE]; size_t log_buf_size = 0; char *log_buf = NULL; @@ -6642,7 +6837,6 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, if (!insns || !insns_cnt) return -EINVAL; - memset(&load_attr, 0, sizeof(struct bpf_load_program_attr)); load_attr.prog_type = prog->type; /* old kernels might not support specifying expected_attach_type */ if (!kernel_supports(FEAT_EXP_ATTACH_TYPE) && prog->sec_def && @@ -6653,19 +6847,17 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, if (kernel_supports(FEAT_PROG_NAME)) load_attr.name = prog->name; load_attr.insns = insns; - load_attr.insns_cnt = insns_cnt; + load_attr.insn_cnt = insns_cnt; load_attr.license = license; - if (prog->type == BPF_PROG_TYPE_STRUCT_OPS || - prog->type == BPF_PROG_TYPE_LSM) { - load_attr.attach_btf_id = prog->attach_btf_id; - } else if (prog->type == BPF_PROG_TYPE_TRACING || - prog->type == BPF_PROG_TYPE_EXT) { + load_attr.attach_btf_id = prog->attach_btf_id; + if (prog->attach_prog_fd) load_attr.attach_prog_fd = prog->attach_prog_fd; - load_attr.attach_btf_id = prog->attach_btf_id; - } else { - load_attr.kern_version = kern_version; - load_attr.prog_ifindex = prog->prog_ifindex; - } + else + load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd; + load_attr.attach_btf_id = prog->attach_btf_id; + load_attr.kern_version = kern_version; + load_attr.prog_ifindex = prog->prog_ifindex; + /* specify func_info/line_info only if kernel supports them */ btf_fd = bpf_object__btf_fd(prog->obj); if (btf_fd >= 0 && kernel_supports(FEAT_BTF_FUNC)) { @@ -6689,7 +6881,9 @@ retry_load: *log_buf = 0; } - ret = bpf_load_program_xattr(&load_attr, log_buf, log_buf_size); + load_attr.log_buf = log_buf; + load_attr.log_buf_sz = log_buf_size; + ret = libbpf__bpf_prog_load(&load_attr); if (ret >= 0) { if (log_buf && load_attr.log_level) @@ -6730,9 +6924,9 @@ retry_load: pr_warn("-- BEGIN DUMP LOG ---\n"); pr_warn("\n%s\n", log_buf); pr_warn("-- END LOG --\n"); - } else if (load_attr.insns_cnt >= BPF_MAXINSNS) { + } else if (load_attr.insn_cnt >= BPF_MAXINSNS) { pr_warn("Program too large (%zu insns), at most %d insns\n", - load_attr.insns_cnt, BPF_MAXINSNS); + load_attr.insn_cnt, BPF_MAXINSNS); ret = -LIBBPF_ERRNO__PROG2BIG; } else if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) { /* Wrong program type? */ @@ -6740,7 +6934,9 @@ retry_load: load_attr.prog_type = BPF_PROG_TYPE_KPROBE; load_attr.expected_attach_type = 0; - fd = bpf_load_program_xattr(&load_attr, NULL, 0); + load_attr.log_buf = NULL; + load_attr.log_buf_sz = 0; + fd = libbpf__bpf_prog_load(&load_attr); if (fd >= 0) { close(fd); ret = -LIBBPF_ERRNO__PROGTYPE; @@ -6753,11 +6949,11 @@ out: return ret; } -static int libbpf_find_attach_btf_id(struct bpf_program *prog); +static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd, int *btf_type_id); int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver) { - int err = 0, fd, i, btf_id; + int err = 0, fd, i; if (prog->obj->loaded) { pr_warn("prog '%s': can't load after object was loaded\n", prog->name); @@ -6767,10 +6963,14 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver) if ((prog->type == BPF_PROG_TYPE_TRACING || prog->type == BPF_PROG_TYPE_LSM || prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) { - btf_id = libbpf_find_attach_btf_id(prog); - if (btf_id <= 0) - return btf_id; - prog->attach_btf_id = btf_id; + int btf_obj_fd = 0, btf_type_id = 0; + + err = libbpf_find_attach_btf_id(prog, &btf_obj_fd, &btf_type_id); + if (err) + return err; + + prog->attach_btf_obj_fd = btf_obj_fd; + prog->attach_btf_id = btf_type_id; } if (prog->instances.nr < 0 || !prog->instances.fds) { @@ -7281,6 +7481,15 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr) err = err ? : bpf_object__relocate(obj, attr->target_btf_path); err = err ? : bpf_object__load_progs(obj, attr->log_level); + /* clean up module BTFs */ + for (i = 0; i < obj->btf_module_cnt; i++) { + close(obj->btf_modules[i].fd); + btf__free(obj->btf_modules[i].btf); + free(obj->btf_modules[i].name); + } + free(obj->btf_modules); + + /* clean up vmlinux BTF */ btf__free(obj->btf_vmlinux); obj->btf_vmlinux = NULL; @@ -8629,8 +8838,8 @@ static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix, return btf__find_by_name_kind(btf, btf_type_name, kind); } -static inline int __find_vmlinux_btf_id(struct btf *btf, const char *name, - enum bpf_attach_type attach_type) +static inline int find_attach_btf_id(struct btf *btf, const char *name, + enum bpf_attach_type attach_type) { int err; @@ -8646,9 +8855,6 @@ static inline int __find_vmlinux_btf_id(struct btf *btf, const char *name, else err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC); - if (err <= 0) - pr_warn("%s is not found in vmlinux BTF\n", name); - return err; } @@ -8664,7 +8870,10 @@ int libbpf_find_vmlinux_btf_id(const char *name, return -EINVAL; } - err = __find_vmlinux_btf_id(btf, name, attach_type); + err = find_attach_btf_id(btf, name, attach_type); + if (err <= 0) + pr_warn("%s is not found in vmlinux BTF\n", name); + btf__free(btf); return err; } @@ -8702,11 +8911,49 @@ out: return err; } -static int libbpf_find_attach_btf_id(struct bpf_program *prog) +static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name, + enum bpf_attach_type attach_type, + int *btf_obj_fd, int *btf_type_id) +{ + int ret, i; + + ret = find_attach_btf_id(obj->btf_vmlinux, attach_name, attach_type); + if (ret > 0) { + *btf_obj_fd = 0; /* vmlinux BTF */ + *btf_type_id = ret; + return 0; + } + if (ret != -ENOENT) + return ret; + + ret = load_module_btfs(obj); + if (ret) + return ret; + + for (i = 0; i < obj->btf_module_cnt; i++) { + const struct module_btf *mod = &obj->btf_modules[i]; + + ret = find_attach_btf_id(mod->btf, attach_name, attach_type); + if (ret > 0) { + *btf_obj_fd = mod->fd; + *btf_type_id = ret; + return 0; + } + if (ret == -ENOENT) + continue; + + return ret; + } + + return -ESRCH; +} + +static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd, int *btf_type_id) { enum bpf_attach_type attach_type = prog->expected_attach_type; __u32 attach_prog_fd = prog->attach_prog_fd; - const char *name = prog->sec_name; + const char *name = prog->sec_name, *attach_name; + const struct bpf_sec_def *sec = NULL; int i, err; if (!name) @@ -8717,17 +8964,37 @@ static int libbpf_find_attach_btf_id(struct bpf_program *prog) continue; if (strncmp(name, section_defs[i].sec, section_defs[i].len)) continue; - if (attach_prog_fd) - err = libbpf_find_prog_btf_id(name + section_defs[i].len, - attach_prog_fd); - else - err = __find_vmlinux_btf_id(prog->obj->btf_vmlinux, - name + section_defs[i].len, - attach_type); + + sec = §ion_defs[i]; + break; + } + + if (!sec) { + pr_warn("failed to identify BTF ID based on ELF section name '%s'\n", name); + return -ESRCH; + } + attach_name = name + sec->len; + + /* BPF program's BTF ID */ + if (attach_prog_fd) { + err = libbpf_find_prog_btf_id(attach_name, attach_prog_fd); + if (err < 0) { + pr_warn("failed to find BPF program (FD %d) BTF ID for '%s': %d\n", + attach_prog_fd, attach_name, err); + return err; + } + *btf_obj_fd = 0; + *btf_type_id = err; + return 0; + } + + /* kernel/module BTF ID */ + err = find_kernel_btf_id(prog->obj, attach_name, attach_type, btf_obj_fd, btf_type_id); + if (err) { + pr_warn("failed to find kernel BTF type ID of '%s': %d\n", attach_name, err); return err; } - pr_warn("failed to identify btf_id based on ELF section name '%s'\n", name); - return -ESRCH; + return 0; } int libbpf_attach_type_by_name(const char *name, @@ -10616,6 +10883,7 @@ int bpf_program__set_attach_target(struct bpf_program *prog, return btf_id; prog->attach_btf_id = btf_id; + prog->attach_btf_obj_fd = 0; prog->attach_prog_fd = attach_prog_fd; return 0; } diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index d99bc847bf84..969d0ac592ba 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -151,10 +151,41 @@ int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz); int libbpf__load_raw_btf(const char *raw_types, size_t types_len, const char *str_sec, size_t str_len); +struct bpf_prog_load_params { + enum bpf_prog_type prog_type; + enum bpf_attach_type expected_attach_type; + const char *name; + const struct bpf_insn *insns; + size_t insn_cnt; + const char *license; + __u32 kern_version; + __u32 attach_prog_fd; + __u32 attach_btf_obj_fd; + __u32 attach_btf_id; + __u32 prog_ifindex; + __u32 prog_btf_fd; + __u32 prog_flags; + + __u32 func_info_rec_size; + const void *func_info; + __u32 func_info_cnt; + + __u32 line_info_rec_size; + const void *line_info; + __u32 line_info_cnt; + + __u32 log_level; + char *log_buf; + size_t log_buf_sz; +}; + +int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr); + int bpf_object__section_size(const struct bpf_object *obj, const char *name, __u32 *size); int bpf_object__variable_offset(const struct bpf_object *obj, const char *name, __u32 *off); +struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf); struct btf_ext_info { /* |