diff options
Diffstat (limited to 'tools/lib/bpf/btf.c')
-rw-r--r-- | tools/lib/bpf/btf.c | 1625 |
1 files changed, 1471 insertions, 154 deletions
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 7dfca7016aaa..231b07203e3d 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* Copyright (c) 2018 Facebook */ +#include <byteswap.h> #include <endian.h> #include <stdio.h> #include <stdlib.h> @@ -21,26 +22,78 @@ #include "libbpf_internal.h" #include "hashmap.h" -/* make sure libbpf doesn't use kernel-only integer typedefs */ -#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64 - #define BTF_MAX_NR_TYPES 0x7fffffffU #define BTF_MAX_STR_OFFSET 0x7fffffffU static struct btf_type btf_void; struct btf { - union { - struct btf_header *hdr; - void *data; - }; - struct btf_type **types; - const char *strings; - void *nohdr_data; + /* raw BTF data in native endianness */ + void *raw_data; + /* raw BTF data in non-native endianness */ + void *raw_data_swapped; + __u32 raw_size; + /* whether target endianness differs from the native one */ + bool swapped_endian; + + /* + * When BTF is loaded from an ELF or raw memory it is stored + * in a contiguous memory block. The hdr, type_data, and, strs_data + * point inside that memory region to their respective parts of BTF + * representation: + * + * +--------------------------------+ + * | Header | Types | Strings | + * +--------------------------------+ + * ^ ^ ^ + * | | | + * hdr | | + * types_data-+ | + * strs_data------------+ + * + * If BTF data is later modified, e.g., due to types added or + * removed, BTF deduplication performed, etc, this contiguous + * representation is broken up into three independently allocated + * memory regions to be able to modify them independently. + * raw_data is nulled out at that point, but can be later allocated + * and cached again if user calls btf__get_raw_data(), at which point + * raw_data will contain a contiguous copy of header, types, and + * strings: + * + * +----------+ +---------+ +-----------+ + * | Header | | Types | | Strings | + * +----------+ +---------+ +-----------+ + * ^ ^ ^ + * | | | + * hdr | | + * types_data----+ | + * strs_data------------------+ + * + * +----------+---------+-----------+ + * | Header | Types | Strings | + * raw_data----->+----------+---------+-----------+ + */ + struct btf_header *hdr; + + void *types_data; + size_t types_data_cap; /* used size stored in hdr->type_len */ + + /* type ID to `struct btf_type *` lookup index */ + __u32 *type_offs; + size_t type_offs_cap; __u32 nr_types; - __u32 types_size; - __u32 data_size; + + void *strs_data; + size_t strs_data_cap; /* used size stored in hdr->str_len */ + + /* lookup index for each unique string in strings section */ + struct hashmap *strs_hash; + /* whether strings are already deduplicated */ + bool strs_deduped; + /* BTF object FD, if loaded into kernel */ int fd; + + /* Pointer size (in bytes) for a target architecture of this BTF */ int ptr_sz; }; @@ -49,60 +102,114 @@ static inline __u64 ptr_to_u64(const void *ptr) return (__u64) (unsigned long) ptr; } -static int btf_add_type(struct btf *btf, struct btf_type *t) +/* Ensure given dynamically allocated memory region pointed to by *data* with + * capacity of *cap_cnt* elements each taking *elem_sz* bytes has enough + * memory to accomodate *add_cnt* new elements, assuming *cur_cnt* elements + * are already used. At most *max_cnt* elements can be ever allocated. + * If necessary, memory is reallocated and all existing data is copied over, + * new pointer to the memory region is stored at *data, new memory region + * capacity (in number of elements) is stored in *cap. + * On success, memory pointer to the beginning of unused memory is returned. + * On error, NULL is returned. + */ +void *btf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz, + size_t cur_cnt, size_t max_cnt, size_t add_cnt) { - if (btf->types_size - btf->nr_types < 2) { - struct btf_type **new_types; - __u32 expand_by, new_size; + size_t new_cnt; + void *new_data; - if (btf->types_size == BTF_MAX_NR_TYPES) - return -E2BIG; + if (cur_cnt + add_cnt <= *cap_cnt) + return *data + cur_cnt * elem_sz; - expand_by = max(btf->types_size >> 2, 16U); - new_size = min(BTF_MAX_NR_TYPES, btf->types_size + expand_by); + /* requested more than the set limit */ + if (cur_cnt + add_cnt > max_cnt) + return NULL; - new_types = realloc(btf->types, sizeof(*new_types) * new_size); - if (!new_types) - return -ENOMEM; + new_cnt = *cap_cnt; + new_cnt += new_cnt / 4; /* expand by 25% */ + if (new_cnt < 16) /* but at least 16 elements */ + new_cnt = 16; + if (new_cnt > max_cnt) /* but not exceeding a set limit */ + new_cnt = max_cnt; + if (new_cnt < cur_cnt + add_cnt) /* also ensure we have enough memory */ + new_cnt = cur_cnt + add_cnt; + + new_data = libbpf_reallocarray(*data, new_cnt, elem_sz); + if (!new_data) + return NULL; - if (btf->nr_types == 0) - new_types[0] = &btf_void; + /* zero out newly allocated portion of memory */ + memset(new_data + (*cap_cnt) * elem_sz, 0, (new_cnt - *cap_cnt) * elem_sz); - btf->types = new_types; - btf->types_size = new_size; - } + *data = new_data; + *cap_cnt = new_cnt; + return new_data + cur_cnt * elem_sz; +} - btf->types[++(btf->nr_types)] = t; +/* Ensure given dynamically allocated memory region has enough allocated space + * to accommodate *need_cnt* elements of size *elem_sz* bytes each + */ +int btf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt) +{ + void *p; + + if (need_cnt <= *cap_cnt) + return 0; + p = btf_add_mem(data, cap_cnt, elem_sz, *cap_cnt, SIZE_MAX, need_cnt - *cap_cnt); + if (!p) + return -ENOMEM; + + return 0; +} + +static int btf_add_type_idx_entry(struct btf *btf, __u32 type_off) +{ + __u32 *p; + + p = btf_add_mem((void **)&btf->type_offs, &btf->type_offs_cap, sizeof(__u32), + btf->nr_types + 1, BTF_MAX_NR_TYPES, 1); + if (!p) + return -ENOMEM; + + *p = type_off; return 0; } +static void btf_bswap_hdr(struct btf_header *h) +{ + h->magic = bswap_16(h->magic); + h->hdr_len = bswap_32(h->hdr_len); + h->type_off = bswap_32(h->type_off); + h->type_len = bswap_32(h->type_len); + h->str_off = bswap_32(h->str_off); + h->str_len = bswap_32(h->str_len); +} + static int btf_parse_hdr(struct btf *btf) { - const struct btf_header *hdr = btf->hdr; + struct btf_header *hdr = btf->hdr; __u32 meta_left; - if (btf->data_size < sizeof(struct btf_header)) { + if (btf->raw_size < sizeof(struct btf_header)) { pr_debug("BTF header not found\n"); return -EINVAL; } - if (hdr->magic != BTF_MAGIC) { + if (hdr->magic == bswap_16(BTF_MAGIC)) { + btf->swapped_endian = true; + if (bswap_32(hdr->hdr_len) != sizeof(struct btf_header)) { + pr_warn("Can't load BTF with non-native endianness due to unsupported header length %u\n", + bswap_32(hdr->hdr_len)); + return -ENOTSUP; + } + btf_bswap_hdr(hdr); + } else if (hdr->magic != BTF_MAGIC) { pr_debug("Invalid BTF magic:%x\n", hdr->magic); return -EINVAL; } - if (hdr->version != BTF_VERSION) { - pr_debug("Unsupported BTF version:%u\n", hdr->version); - return -ENOTSUP; - } - - if (hdr->flags) { - pr_debug("Unsupported BTF flags:%x\n", hdr->flags); - return -ENOTSUP; - } - - meta_left = btf->data_size - sizeof(*hdr); + meta_left = btf->raw_size - sizeof(*hdr); if (!meta_left) { pr_debug("BTF has no data\n"); return -EINVAL; @@ -128,15 +235,13 @@ static int btf_parse_hdr(struct btf *btf) return -EINVAL; } - btf->nohdr_data = btf->hdr + 1; - return 0; } static int btf_parse_str_sec(struct btf *btf) { const struct btf_header *hdr = btf->hdr; - const char *start = btf->nohdr_data + hdr->str_off; + const char *start = btf->strs_data; const char *end = start + btf->hdr->str_len; if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_STR_OFFSET || @@ -145,14 +250,12 @@ static int btf_parse_str_sec(struct btf *btf) return -EINVAL; } - btf->strings = start; - return 0; } -static int btf_type_size(struct btf_type *t) +static int btf_type_size(const struct btf_type *t) { - int base_size = sizeof(struct btf_type); + const int base_size = sizeof(struct btf_type); __u16 vlen = btf_vlen(t); switch (btf_kind(t)) { @@ -185,25 +288,120 @@ static int btf_type_size(struct btf_type *t) } } +static void btf_bswap_type_base(struct btf_type *t) +{ + t->name_off = bswap_32(t->name_off); + t->info = bswap_32(t->info); + t->type = bswap_32(t->type); +} + +static int btf_bswap_type_rest(struct btf_type *t) +{ + struct btf_var_secinfo *v; + struct btf_member *m; + struct btf_array *a; + struct btf_param *p; + struct btf_enum *e; + __u16 vlen = btf_vlen(t); + int i; + + switch (btf_kind(t)) { + case BTF_KIND_FWD: + case BTF_KIND_CONST: + case BTF_KIND_VOLATILE: + case BTF_KIND_RESTRICT: + case BTF_KIND_PTR: + case BTF_KIND_TYPEDEF: + case BTF_KIND_FUNC: + return 0; + case BTF_KIND_INT: + *(__u32 *)(t + 1) = bswap_32(*(__u32 *)(t + 1)); + return 0; + case BTF_KIND_ENUM: + for (i = 0, e = btf_enum(t); i < vlen; i++, e++) { + e->name_off = bswap_32(e->name_off); + e->val = bswap_32(e->val); + } + return 0; + case BTF_KIND_ARRAY: + a = btf_array(t); + a->type = bswap_32(a->type); + a->index_type = bswap_32(a->index_type); + a->nelems = bswap_32(a->nelems); + return 0; + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: + for (i = 0, m = btf_members(t); i < vlen; i++, m++) { + m->name_off = bswap_32(m->name_off); + m->type = bswap_32(m->type); + m->offset = bswap_32(m->offset); + } + return 0; + case BTF_KIND_FUNC_PROTO: + for (i = 0, p = btf_params(t); i < vlen; i++, p++) { + p->name_off = bswap_32(p->name_off); + p->type = bswap_32(p->type); + } + return 0; + case BTF_KIND_VAR: + btf_var(t)->linkage = bswap_32(btf_var(t)->linkage); + return 0; + case BTF_KIND_DATASEC: + for (i = 0, v = btf_var_secinfos(t); i < vlen; i++, v++) { + v->type = bswap_32(v->type); + v->offset = bswap_32(v->offset); + v->size = bswap_32(v->size); + } + return 0; + default: + pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t)); + return -EINVAL; + } +} + static int btf_parse_type_sec(struct btf *btf) { struct btf_header *hdr = btf->hdr; - void *nohdr_data = btf->nohdr_data; - void *next_type = nohdr_data + hdr->type_off; - void *end_type = nohdr_data + hdr->str_off; + void *next_type = btf->types_data; + void *end_type = next_type + hdr->type_len; + int err, i = 0, type_size; - while (next_type < end_type) { - struct btf_type *t = next_type; - int type_size; - int err; + /* VOID (type_id == 0) is specially handled by btf__get_type_by_id(), + * so ensure we can never properly use its offset from index by + * setting it to a large value + */ + err = btf_add_type_idx_entry(btf, UINT_MAX); + if (err) + return err; + + while (next_type + sizeof(struct btf_type) <= end_type) { + i++; + + if (btf->swapped_endian) + btf_bswap_type_base(next_type); - type_size = btf_type_size(t); + type_size = btf_type_size(next_type); if (type_size < 0) return type_size; - next_type += type_size; - err = btf_add_type(btf, t); + if (next_type + type_size > end_type) { + pr_warn("BTF type [%d] is malformed\n", i); + return -EINVAL; + } + + if (btf->swapped_endian && btf_bswap_type_rest(next_type)) + return -EINVAL; + + err = btf_add_type_idx_entry(btf, next_type - btf->types_data); if (err) return err; + + next_type += type_size; + btf->nr_types++; + } + + if (next_type != end_type) { + pr_warn("BTF types data is malformed\n"); + return -EINVAL; } return 0; @@ -214,12 +412,20 @@ __u32 btf__get_nr_types(const struct btf *btf) return btf->nr_types; } +/* internal helper returning non-const pointer to a type */ +static struct btf_type *btf_type_by_id(struct btf *btf, __u32 type_id) +{ + if (type_id == 0) + return &btf_void; + + return btf->types_data + btf->type_offs[type_id]; +} + const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id) { if (type_id > btf->nr_types) return NULL; - - return btf->types[type_id]; + return btf_type_by_id((struct btf *)btf, type_id); } static int determine_ptr_size(const struct btf *btf) @@ -286,6 +492,38 @@ int btf__set_pointer_size(struct btf *btf, size_t ptr_sz) return 0; } +static bool is_host_big_endian(void) +{ +#if __BYTE_ORDER == __LITTLE_ENDIAN + return false; +#elif __BYTE_ORDER == __BIG_ENDIAN + return true; +#else +# error "Unrecognized __BYTE_ORDER__" +#endif +} + +enum btf_endianness btf__endianness(const struct btf *btf) +{ + if (is_host_big_endian()) + return btf->swapped_endian ? BTF_LITTLE_ENDIAN : BTF_BIG_ENDIAN; + else + return btf->swapped_endian ? BTF_BIG_ENDIAN : BTF_LITTLE_ENDIAN; +} + +int btf__set_endianness(struct btf *btf, enum btf_endianness endian) +{ + if (endian != BTF_LITTLE_ENDIAN && endian != BTF_BIG_ENDIAN) + return -EINVAL; + + btf->swapped_endian = is_host_big_endian() != (endian == BTF_BIG_ENDIAN); + if (!btf->swapped_endian) { + free(btf->raw_data_swapped); + btf->raw_data_swapped = NULL; + } + return 0; +} + static bool btf_type_is_void(const struct btf_type *t) { return t == &btf_void || btf_is_fwd(t); @@ -417,7 +655,7 @@ __s32 btf__find_by_name(const struct btf *btf, const char *type_name) return 0; for (i = 1; i <= btf->nr_types; i++) { - const struct btf_type *t = btf->types[i]; + const struct btf_type *t = btf__type_by_id(btf, i); const char *name = btf__name_by_offset(btf, t->name_off); if (name && !strcmp(type_name, name)) @@ -436,7 +674,7 @@ __s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name, return 0; for (i = 1; i <= btf->nr_types; i++) { - const struct btf_type *t = btf->types[i]; + const struct btf_type *t = btf__type_by_id(btf, i); const char *name; if (btf_kind(t) != kind) @@ -449,6 +687,11 @@ __s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name, return -ENOENT; } +static bool btf_is_modifiable(const struct btf *btf) +{ + return (void *)btf->hdr != btf->raw_data; +} + void btf__free(struct btf *btf) { if (IS_ERR_OR_NULL(btf)) @@ -457,11 +700,55 @@ void btf__free(struct btf *btf) if (btf->fd >= 0) close(btf->fd); - free(btf->data); - free(btf->types); + if (btf_is_modifiable(btf)) { + /* if BTF was modified after loading, it will have a split + * in-memory representation for header, types, and strings + * sections, so we need to free all of them individually. It + * might still have a cached contiguous raw data present, + * which will be unconditionally freed below. + */ + free(btf->hdr); + free(btf->types_data); + free(btf->strs_data); + } + free(btf->raw_data); + free(btf->raw_data_swapped); + free(btf->type_offs); free(btf); } +struct btf *btf__new_empty(void) +{ + struct btf *btf; + + btf = calloc(1, sizeof(*btf)); + if (!btf) + return ERR_PTR(-ENOMEM); + + btf->fd = -1; + btf->ptr_sz = sizeof(void *); + btf->swapped_endian = false; + + /* +1 for empty string at offset 0 */ + btf->raw_size = sizeof(struct btf_header) + 1; + btf->raw_data = calloc(1, btf->raw_size); + if (!btf->raw_data) { + free(btf); + return ERR_PTR(-ENOMEM); + } + + btf->hdr = btf->raw_data; + btf->hdr->hdr_len = sizeof(struct btf_header); + btf->hdr->magic = BTF_MAGIC; + btf->hdr->version = BTF_VERSION; + + btf->types_data = btf->raw_data + btf->hdr->hdr_len; + btf->strs_data = btf->raw_data + btf->hdr->hdr_len; + btf->hdr->str_len = 1; /* empty string at offset 0 */ + + return btf; +} + struct btf *btf__new(const void *data, __u32 size) { struct btf *btf; @@ -471,26 +758,28 @@ struct btf *btf__new(const void *data, __u32 size) if (!btf) return ERR_PTR(-ENOMEM); - btf->fd = -1; - - btf->data = malloc(size); - if (!btf->data) { + btf->raw_data = malloc(size); + if (!btf->raw_data) { err = -ENOMEM; goto done; } + memcpy(btf->raw_data, data, size); + btf->raw_size = size; - memcpy(btf->data, data, size); - btf->data_size = size; - + btf->hdr = btf->raw_data; err = btf_parse_hdr(btf); if (err) goto done; + btf->strs_data = btf->raw_data + btf->hdr->hdr_len + btf->hdr->str_off; + btf->types_data = btf->raw_data + btf->hdr->hdr_len + btf->hdr->type_off; + err = btf_parse_str_sec(btf); + err = err ?: btf_parse_type_sec(btf); if (err) goto done; - err = btf_parse_type_sec(btf); + btf->fd = -1; done: if (err) { @@ -501,17 +790,6 @@ done: return btf; } -static bool btf_check_endianness(const GElf_Ehdr *ehdr) -{ -#if __BYTE_ORDER == __LITTLE_ENDIAN - return ehdr->e_ident[EI_DATA] == ELFDATA2LSB; -#elif __BYTE_ORDER == __BIG_ENDIAN - return ehdr->e_ident[EI_DATA] == ELFDATA2MSB; -#else -# error "Unrecognized __BYTE_ORDER__" -#endif -} - struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext) { Elf_Data *btf_data = NULL, *btf_ext_data = NULL; @@ -544,10 +822,6 @@ struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext) pr_warn("failed to get EHDR from %s\n", path); goto done; } - if (!btf_check_endianness(&ehdr)) { - pr_warn("non-native ELF endianness is not supported\n"); - goto done; - } if (!elf_rawdata(elf_getscn(elf, ehdr.e_shstrndx), NULL)) { pr_warn("failed to get e_shstrndx from %s\n", path); goto done; @@ -659,7 +933,7 @@ struct btf *btf__parse_raw(const char *path) err = -EIO; goto err_out; } - if (magic != BTF_MAGIC) { + if (magic != BTF_MAGIC && magic != bswap_16(BTF_MAGIC)) { /* definitely not a raw BTF */ err = -EPROTO; goto err_out; @@ -792,7 +1066,7 @@ int btf__finalize_data(struct bpf_object *obj, struct btf *btf) __u32 i; for (i = 1; i <= btf->nr_types; i++) { - struct btf_type *t = btf->types[i]; + struct btf_type *t = btf_type_by_id(btf, i); /* Loader needs to fix up some of the things compiler * couldn't get its hands on while emitting BTF. This @@ -809,10 +1083,13 @@ int btf__finalize_data(struct bpf_object *obj, struct btf *btf) return err; } +static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian); + int btf__load(struct btf *btf) { - __u32 log_buf_size = 0; + __u32 log_buf_size = 0, raw_size; char *log_buf = NULL; + void *raw_data; int err = 0; if (btf->fd >= 0) @@ -827,8 +1104,16 @@ retry_load: *log_buf = 0; } - btf->fd = bpf_load_btf(btf->data, btf->data_size, - log_buf, log_buf_size, false); + raw_data = btf_get_raw_data(btf, &raw_size, false); + if (!raw_data) { + err = -ENOMEM; + goto done; + } + /* cache native raw data representation */ + btf->raw_size = raw_size; + btf->raw_data = raw_data; + + btf->fd = bpf_load_btf(raw_data, raw_size, log_buf, log_buf_size, false); if (btf->fd < 0) { if (!log_buf || errno == ENOSPC) { log_buf_size = max((__u32)BPF_LOG_BUF_SIZE, @@ -859,20 +1144,88 @@ void btf__set_fd(struct btf *btf, int fd) btf->fd = fd; } -const void *btf__get_raw_data(const struct btf *btf, __u32 *size) +static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian) +{ + struct btf_header *hdr = btf->hdr; + struct btf_type *t; + void *data, *p; + __u32 data_sz; + int i; + + data = swap_endian ? btf->raw_data_swapped : btf->raw_data; + if (data) { + *size = btf->raw_size; + return data; + } + + data_sz = hdr->hdr_len + hdr->type_len + hdr->str_len; + data = calloc(1, data_sz); + if (!data) + return NULL; + p = data; + + memcpy(p, hdr, hdr->hdr_len); + if (swap_endian) + btf_bswap_hdr(p); + p += hdr->hdr_len; + + memcpy(p, btf->types_data, hdr->type_len); + if (swap_endian) { + for (i = 1; i <= btf->nr_types; i++) { + t = p + btf->type_offs[i]; + /* btf_bswap_type_rest() relies on native t->info, so + * we swap base type info after we swapped all the + * additional information + */ + if (btf_bswap_type_rest(t)) + goto err_out; + btf_bswap_type_base(t); + } + } + p += hdr->type_len; + + memcpy(p, btf->strs_data, hdr->str_len); + p += hdr->str_len; + + *size = data_sz; + return data; +err_out: + free(data); + return NULL; +} + +const void *btf__get_raw_data(const struct btf *btf_ro, __u32 *size) { - *size = btf->data_size; - return btf->data; + struct btf *btf = (struct btf *)btf_ro; + __u32 data_sz; + void *data; + + data = btf_get_raw_data(btf, &data_sz, btf->swapped_endian); + if (!data) + return NULL; + + btf->raw_size = data_sz; + if (btf->swapped_endian) + btf->raw_data_swapped = data; + else + btf->raw_data = data; + *size = data_sz; + return data; } -const char *btf__name_by_offset(const struct btf *btf, __u32 offset) +const char *btf__str_by_offset(const struct btf *btf, __u32 offset) { if (offset < btf->hdr->str_len) - return &btf->strings[offset]; + return btf->strs_data + offset; else return NULL; } +const char *btf__name_by_offset(const struct btf *btf, __u32 offset) +{ + return btf__str_by_offset(btf, offset); +} + int btf__get_from_id(__u32 id, struct btf **btf) { struct bpf_btf_info btf_info = { 0 }; @@ -1008,6 +1361,970 @@ int btf__get_map_kv_tids(const struct btf *btf, const char *map_name, return 0; } +static size_t strs_hash_fn(const void *key, void *ctx) +{ + struct btf *btf = ctx; + const char *str = btf->strs_data + (long)key; + + return str_hash(str); +} + +static bool strs_hash_equal_fn(const void *key1, const void *key2, void *ctx) +{ + struct btf *btf = ctx; + const char *str1 = btf->strs_data + (long)key1; + const char *str2 = btf->strs_data + (long)key2; + + return strcmp(str1, str2) == 0; +} + +static void btf_invalidate_raw_data(struct btf *btf) +{ + if (btf->raw_data) { + free(btf->raw_data); + btf->raw_data = NULL; + } + if (btf->raw_data_swapped) { + free(btf->raw_data_swapped); + btf->raw_data_swapped = NULL; + } +} + +/* Ensure BTF is ready to be modified (by splitting into a three memory + * regions for header, types, and strings). Also invalidate cached + * raw_data, if any. + */ +static int btf_ensure_modifiable(struct btf *btf) +{ + void *hdr, *types, *strs, *strs_end, *s; + struct hashmap *hash = NULL; + long off; + int err; + + if (btf_is_modifiable(btf)) { + /* any BTF modification invalidates raw_data */ + btf_invalidate_raw_data(btf); + return 0; + } + + /* split raw data into three memory regions */ + hdr = malloc(btf->hdr->hdr_len); + types = malloc(btf->hdr->type_len); + strs = malloc(btf->hdr->str_len); + if (!hdr || !types || !strs) + goto err_out; + + memcpy(hdr, btf->hdr, btf->hdr->hdr_len); + memcpy(types, btf->types_data, btf->hdr->type_len); + memcpy(strs, btf->strs_data, btf->hdr->str_len); + + /* build lookup index for all strings */ + hash = hashmap__new(strs_hash_fn, strs_hash_equal_fn, btf); + if (IS_ERR(hash)) { + err = PTR_ERR(hash); + hash = NULL; + goto err_out; + } + + strs_end = strs + btf->hdr->str_len; + for (off = 0, s = strs; s < strs_end; off += strlen(s) + 1, s = strs + off) { + /* hashmap__add() returns EEXIST if string with the same + * content already is in the hash map + */ + err = hashmap__add(hash, (void *)off, (void *)off); + if (err == -EEXIST) + continue; /* duplicate */ + if (err) + goto err_out; + } + + /* only when everything was successful, update internal state */ + btf->hdr = hdr; + btf->types_data = types; + btf->types_data_cap = btf->hdr->type_len; + btf->strs_data = strs; + btf->strs_data_cap = btf->hdr->str_len; + btf->strs_hash = hash; + /* if BTF was created from scratch, all strings are guaranteed to be + * unique and deduplicated + */ + btf->strs_deduped = btf->hdr->str_len <= 1; + + /* invalidate raw_data representation */ + btf_invalidate_raw_data(btf); + + return 0; + +err_out: + hashmap__free(hash); + free(hdr); + free(types); + free(strs); + return -ENOMEM; +} + +static void *btf_add_str_mem(struct btf *btf, size_t add_sz) +{ + return btf_add_mem(&btf->strs_data, &btf->strs_data_cap, 1, + btf->hdr->str_len, BTF_MAX_STR_OFFSET, add_sz); +} + +/* Find an offset in BTF string section that corresponds to a given string *s*. + * Returns: + * - >0 offset into string section, if string is found; + * - -ENOENT, if string is not in the string section; + * - <0, on any other error. + */ +int btf__find_str(struct btf *btf, const char *s) +{ + long old_off, new_off, len; + void *p; + + /* BTF needs to be in a modifiable state to build string lookup index */ + if (btf_ensure_modifiable(btf)) + return -ENOMEM; + + /* see btf__add_str() for why we do this */ + len = strlen(s) + 1; + p = btf_add_str_mem(btf, len); + if (!p) + return -ENOMEM; + + new_off = btf->hdr->str_len; + memcpy(p, s, len); + + if (hashmap__find(btf->strs_hash, (void *)new_off, (void **)&old_off)) + return old_off; + + return -ENOENT; +} + +/* Add a string s to the BTF string section. + * Returns: + * - > 0 offset into string section, on success; + * - < 0, on error. + */ +int btf__add_str(struct btf *btf, const char *s) +{ + long old_off, new_off, len; + void *p; + int err; + + if (btf_ensure_modifiable(btf)) + return -ENOMEM; + + /* Hashmap keys are always offsets within btf->strs_data, so to even + * look up some string from the "outside", we need to first append it + * at the end, so that it can be addressed with an offset. Luckily, + * until btf->hdr->str_len is incremented, that string is just a piece + * of garbage for the rest of BTF code, so no harm, no foul. On the + * other hand, if the string is unique, it's already appended and + * ready to be used, only a simple btf->hdr->str_len increment away. + */ + len = strlen(s) + 1; + p = btf_add_str_mem(btf, len); + if (!p) + return -ENOMEM; + + new_off = btf->hdr->str_len; + memcpy(p, s, len); + + /* Now attempt to add the string, but only if the string with the same + * contents doesn't exist already (HASHMAP_ADD strategy). If such + * string exists, we'll get its offset in old_off (that's old_key). + */ + err = hashmap__insert(btf->strs_hash, (void *)new_off, (void *)new_off, + HASHMAP_ADD, (const void **)&old_off, NULL); + if (err == -EEXIST) + return old_off; /* duplicated string, return existing offset */ + if (err) + return err; + + btf->hdr->str_len += len; /* new unique string, adjust data length */ + return new_off; +} + +static void *btf_add_type_mem(struct btf *btf, size_t add_sz) +{ + return btf_add_mem(&btf->types_data, &btf->types_data_cap, 1, + btf->hdr->type_len, UINT_MAX, add_sz); +} + +static __u32 btf_type_info(int kind, int vlen, int kflag) +{ + return (kflag << 31) | (kind << 24) | vlen; +} + +static void btf_type_inc_vlen(struct btf_type *t) +{ + t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, btf_kflag(t)); +} + +/* + * Append new BTF_KIND_INT type with: + * - *name* - non-empty, non-NULL type name; + * - *sz* - power-of-2 (1, 2, 4, ..) size of the type, in bytes; + * - encoding is a combination of BTF_INT_SIGNED, BTF_INT_CHAR, BTF_INT_BOOL. + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_int(struct btf *btf, const char *name, size_t byte_sz, int encoding) +{ + struct btf_type *t; + int sz, err, name_off; + + /* non-empty name */ + if (!name || !name[0]) + return -EINVAL; + /* byte_sz must be power of 2 */ + if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 16) + return -EINVAL; + if (encoding & ~(BTF_INT_SIGNED | BTF_INT_CHAR | BTF_INT_BOOL)) + return -EINVAL; + + /* deconstruct BTF, if necessary, and invalidate raw_data */ + if (btf_ensure_modifiable(btf)) + return -ENOMEM; + + sz = sizeof(struct btf_type) + sizeof(int); + t = btf_add_type_mem(btf, sz); + if (!t) + return -ENOMEM; + + /* if something goes wrong later, we might end up with an extra string, + * but that shouldn't be a problem, because BTF can't be constructed + * completely anyway and will most probably be just discarded + */ + name_off = btf__add_str(btf, name); + if (name_off < 0) + return name_off; + + t->name_off = name_off; + t->info = btf_type_info(BTF_KIND_INT, 0, 0); + t->size = byte_sz; + /* set INT info, we don't allow setting legacy bit offset/size */ + *(__u32 *)(t + 1) = (encoding << 24) | (byte_sz * 8); + + err = btf_add_type_idx_entry(btf, btf->hdr->type_len); + if (err) + return err; + + btf->hdr->type_len += sz; + btf->hdr->str_off += sz; + btf->nr_types++; + return btf->nr_types; +} + +/* it's completely legal to append BTF types with type IDs pointing forward to + * types that haven't been appended yet, so we only make sure that id looks + * sane, we can't guarantee that ID will always be valid + */ +static int validate_type_id(int id) +{ + if (id < 0 || id > BTF_MAX_NR_TYPES) + return -EINVAL; + return 0; +} + +/* generic append function for PTR, TYPEDEF, CONST/VOLATILE/RESTRICT */ +static int btf_add_ref_kind(struct btf *btf, int kind, const char *name, int ref_type_id) +{ + struct btf_type *t; + int sz, name_off = 0, err; + + if (validate_type_id(ref_type_id)) + return -EINVAL; + + if (btf_ensure_modifiable(btf)) + return -ENOMEM; + + sz = sizeof(struct btf_type); + t = btf_add_type_mem(btf, sz); + if (!t) + return -ENOMEM; + + if (name && name[0]) { + name_off = btf__add_str(btf, name); + if (name_off < 0) + return name_off; + } + + t->name_off = name_off; + t->info = btf_type_info(kind, 0, 0); + t->type = ref_type_id; + + err = btf_add_type_idx_entry(btf, btf->hdr->type_len); + if (err) + return err; + + btf->hdr->type_len += sz; + btf->hdr->str_off += sz; + btf->nr_types++; + return btf->nr_types; +} + +/* + * Append new BTF_KIND_PTR type with: + * - *ref_type_id* - referenced type ID, it might not exist yet; + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_ptr(struct btf *btf, int ref_type_id) +{ + return btf_add_ref_kind(btf, BTF_KIND_PTR, NULL, ref_type_id); +} + +/* + * Append new BTF_KIND_ARRAY type with: + * - *index_type_id* - type ID of the type describing array index; + * - *elem_type_id* - type ID of the type describing array element; + * - *nr_elems* - the size of the array; + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_array(struct btf *btf, int index_type_id, int elem_type_id, __u32 nr_elems) +{ + struct btf_type *t; + struct btf_array *a; + int sz, err; + + if (validate_type_id(index_type_id) || validate_type_id(elem_type_id)) + return -EINVAL; + + if (btf_ensure_modifiable(btf)) + return -ENOMEM; + + sz = sizeof(struct btf_type) + sizeof(struct btf_array); + t = btf_add_type_mem(btf, sz); + if (!t) + return -ENOMEM; + + t->name_off = 0; + t->info = btf_type_info(BTF_KIND_ARRAY, 0, 0); + t->size = 0; + + a = btf_array(t); + a->type = elem_type_id; + a->index_type = index_type_id; + a->nelems = nr_elems; + + err = btf_add_type_idx_entry(btf, btf->hdr->type_len); + if (err) + return err; + + btf->hdr->type_len += sz; + btf->hdr->str_off += sz; + btf->nr_types++; + return btf->nr_types; +} + +/* generic STRUCT/UNION append function */ +static int btf_add_composite(struct btf *btf, int kind, const char *name, __u32 bytes_sz) +{ + struct btf_type *t; + int sz, err, name_off = 0; + + if (btf_ensure_modifiable(btf)) + return -ENOMEM; + + sz = sizeof(struct btf_type); + t = btf_add_type_mem(btf, sz); + if (!t) + return -ENOMEM; + + if (name && name[0]) { + name_off = btf__add_str(btf, name); + if (name_off < 0) + return name_off; + } + + /* start out with vlen=0 and no kflag; this will be adjusted when + * adding each member + */ + t->name_off = name_off; + t->info = btf_type_info(kind, 0, 0); + t->size = bytes_sz; + + err = btf_add_type_idx_entry(btf, btf->hdr->type_len); + if (err) + return err; + + btf->hdr->type_len += sz; + btf->hdr->str_off += sz; + btf->nr_types++; + return btf->nr_types; +} + +/* + * Append new BTF_KIND_STRUCT type with: + * - *name* - name of the struct, can be NULL or empty for anonymous structs; + * - *byte_sz* - size of the struct, in bytes; + * + * Struct initially has no fields in it. Fields can be added by + * btf__add_field() right after btf__add_struct() succeeds. + * + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_struct(struct btf *btf, const char *name, __u32 byte_sz) +{ + return btf_add_composite(btf, BTF_KIND_STRUCT, name, byte_sz); +} + +/* + * Append new BTF_KIND_UNION type with: + * - *name* - name of the union, can be NULL or empty for anonymous union; + * - *byte_sz* - size of the union, in bytes; + * + * Union initially has no fields in it. Fields can be added by + * btf__add_field() right after btf__add_union() succeeds. All fields + * should have *bit_offset* of 0. + * + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_union(struct btf *btf, const char *name, __u32 byte_sz) +{ + return btf_add_composite(btf, BTF_KIND_UNION, name, byte_sz); +} + +/* + * Append new field for the current STRUCT/UNION type with: + * - *name* - name of the field, can be NULL or empty for anonymous field; + * - *type_id* - type ID for the type describing field type; + * - *bit_offset* - bit offset of the start of the field within struct/union; + * - *bit_size* - bit size of a bitfield, 0 for non-bitfield fields; + * Returns: + * - 0, on success; + * - <0, on error. + */ +int btf__add_field(struct btf *btf, const char *name, int type_id, + __u32 bit_offset, __u32 bit_size) +{ + struct btf_type *t; + struct btf_member *m; + bool is_bitfield; + int sz, name_off = 0; + + /* last type should be union/struct */ + if (btf->nr_types == 0) + return -EINVAL; + t = btf_type_by_id(btf, btf->nr_types); + if (!btf_is_composite(t)) + return -EINVAL; + + if (validate_type_id(type_id)) + return -EINVAL; + /* best-effort bit field offset/size enforcement */ + is_bitfield = bit_size || (bit_offset % 8 != 0); + if (is_bitfield && (bit_size == 0 || bit_size > 255 || bit_offset > 0xffffff)) + return -EINVAL; + + /* only offset 0 is allowed for unions */ + if (btf_is_union(t) && bit_offset) + return -EINVAL; + + /* decompose and invalidate raw data */ + if (btf_ensure_modifiable(btf)) + return -ENOMEM; + + sz = sizeof(struct btf_member); + m = btf_add_type_mem(btf, sz); + if (!m) + return -ENOMEM; + + if (name && name[0]) { + name_off = btf__add_str(btf, name); + if (name_off < 0) + return name_off; + } + + m->name_off = name_off; + m->type = type_id; + m->offset = bit_offset | (bit_size << 24); + + /* btf_add_type_mem can invalidate t pointer */ + t = btf_type_by_id(btf, btf->nr_types); + /* update parent type's vlen and kflag */ + t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, is_bitfield || btf_kflag(t)); + + btf->hdr->type_len += sz; + btf->hdr->str_off += sz; + return 0; +} + +/* + * Append new BTF_KIND_ENUM type with: + * - *name* - name of the enum, can be NULL or empty for anonymous enums; + * - *byte_sz* - size of the enum, in bytes. + * + * Enum initially has no enum values in it (and corresponds to enum forward + * declaration). Enumerator values can be added by btf__add_enum_value() + * immediately after btf__add_enum() succeeds. + * + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_enum(struct btf *btf, const char *name, __u32 byte_sz) +{ + struct btf_type *t; + int sz, err, name_off = 0; + + /* byte_sz must be power of 2 */ + if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 8) + return -EINVAL; + + if (btf_ensure_modifiable(btf)) + return -ENOMEM; + + sz = sizeof(struct btf_type); + t = btf_add_type_mem(btf, sz); + if (!t) + return -ENOMEM; + + if (name && name[0]) { + name_off = btf__add_str(btf, name); + if (name_off < 0) + return name_off; + } + + /* start out with vlen=0; it will be adjusted when adding enum values */ + t->name_off = name_off; + t->info = btf_type_info(BTF_KIND_ENUM, 0, 0); + t->size = byte_sz; + + err = btf_add_type_idx_entry(btf, btf->hdr->type_len); + if (err) + return err; + + btf->hdr->type_len += sz; + btf->hdr->str_off += sz; + btf->nr_types++; + return btf->nr_types; +} + +/* + * Append new enum value for the current ENUM type with: + * - *name* - name of the enumerator value, can't be NULL or empty; + * - *value* - integer value corresponding to enum value *name*; + * Returns: + * - 0, on success; + * - <0, on error. + */ +int btf__add_enum_value(struct btf *btf, const char *name, __s64 value) +{ + struct btf_type *t; + struct btf_enum *v; + int sz, name_off; + + /* last type should be BTF_KIND_ENUM */ + if (btf->nr_types == 0) + return -EINVAL; + t = btf_type_by_id(btf, btf->nr_types); + if (!btf_is_enum(t)) + return -EINVAL; + + /* non-empty name */ + if (!name || !name[0]) + return -EINVAL; + if (value < INT_MIN || value > UINT_MAX) + return -E2BIG; + + /* decompose and invalidate raw data */ + if (btf_ensure_modifiable(btf)) + return -ENOMEM; + + sz = sizeof(struct btf_enum); + v = btf_add_type_mem(btf, sz); + if (!v) + return -ENOMEM; + + name_off = btf__add_str(btf, name); + if (name_off < 0) + return name_off; + + v->name_off = name_off; + v->val = value; + + /* update parent type's vlen */ + t = btf_type_by_id(btf, btf->nr_types); + btf_type_inc_vlen(t); + + btf->hdr->type_len += sz; + btf->hdr->str_off += sz; + return 0; +} + +/* + * Append new BTF_KIND_FWD type with: + * - *name*, non-empty/non-NULL name; + * - *fwd_kind*, kind of forward declaration, one of BTF_FWD_STRUCT, + * BTF_FWD_UNION, or BTF_FWD_ENUM; + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_fwd(struct btf *btf, const char *name, enum btf_fwd_kind fwd_kind) +{ + if (!name || !name[0]) + return -EINVAL; + + switch (fwd_kind) { + case BTF_FWD_STRUCT: + case BTF_FWD_UNION: { + struct btf_type *t; + int id; + + id = btf_add_ref_kind(btf, BTF_KIND_FWD, name, 0); + if (id <= 0) + return id; + t = btf_type_by_id(btf, id); + t->info = btf_type_info(BTF_KIND_FWD, 0, fwd_kind == BTF_FWD_UNION); + return id; + } + case BTF_FWD_ENUM: + /* enum forward in BTF currently is just an enum with no enum + * values; we also assume a standard 4-byte size for it + */ + return btf__add_enum(btf, name, sizeof(int)); + default: + return -EINVAL; + } +} + +/* + * Append new BTF_KING_TYPEDEF type with: + * - *name*, non-empty/non-NULL name; + * - *ref_type_id* - referenced type ID, it might not exist yet; + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_typedef(struct btf *btf, const char *name, int ref_type_id) +{ + if (!name || !name[0]) + return -EINVAL; + + return btf_add_ref_kind(btf, BTF_KIND_TYPEDEF, name, ref_type_id); +} + +/* + * Append new BTF_KIND_VOLATILE type with: + * - *ref_type_id* - referenced type ID, it might not exist yet; + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_volatile(struct btf *btf, int ref_type_id) +{ + return btf_add_ref_kind(btf, BTF_KIND_VOLATILE, NULL, ref_type_id); +} + +/* + * Append new BTF_KIND_CONST type with: + * - *ref_type_id* - referenced type ID, it might not exist yet; + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_const(struct btf *btf, int ref_type_id) +{ + return btf_add_ref_kind(btf, BTF_KIND_CONST, NULL, ref_type_id); +} + +/* + * Append new BTF_KIND_RESTRICT type with: + * - *ref_type_id* - referenced type ID, it might not exist yet; + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_restrict(struct btf *btf, int ref_type_id) +{ + return btf_add_ref_kind(btf, BTF_KIND_RESTRICT, NULL, ref_type_id); +} + +/* + * Append new BTF_KIND_FUNC type with: + * - *name*, non-empty/non-NULL name; + * - *proto_type_id* - FUNC_PROTO's type ID, it might not exist yet; + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_func(struct btf *btf, const char *name, + enum btf_func_linkage linkage, int proto_type_id) +{ + int id; + + if (!name || !name[0]) + return -EINVAL; + if (linkage != BTF_FUNC_STATIC && linkage != BTF_FUNC_GLOBAL && + linkage != BTF_FUNC_EXTERN) + return -EINVAL; + + id = btf_add_ref_kind(btf, BTF_KIND_FUNC, name, proto_type_id); + if (id > 0) { + struct btf_type *t = btf_type_by_id(btf, id); + + t->info = btf_type_info(BTF_KIND_FUNC, linkage, 0); + } + return id; +} + +/* + * Append new BTF_KIND_FUNC_PROTO with: + * - *ret_type_id* - type ID for return result of a function. + * + * Function prototype initially has no arguments, but they can be added by + * btf__add_func_param() one by one, immediately after + * btf__add_func_proto() succeeded. + * + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_func_proto(struct btf *btf, int ret_type_id) +{ + struct btf_type *t; + int sz, err; + + if (validate_type_id(ret_type_id)) + return -EINVAL; + + if (btf_ensure_modifiable(btf)) + return -ENOMEM; + + sz = sizeof(struct btf_type); + t = btf_add_type_mem(btf, sz); + if (!t) + return -ENOMEM; + + /* start out with vlen=0; this will be adjusted when adding enum + * values, if necessary + */ + t->name_off = 0; + t->info = btf_type_info(BTF_KIND_FUNC_PROTO, 0, 0); + t->type = ret_type_id; + + err = btf_add_type_idx_entry(btf, btf->hdr->type_len); + if (err) + return err; + + btf->hdr->type_len += sz; + btf->hdr->str_off += sz; + btf->nr_types++; + return btf->nr_types; +} + +/* + * Append new function parameter for current FUNC_PROTO type with: + * - *name* - parameter name, can be NULL or empty; + * - *type_id* - type ID describing the type of the parameter. + * Returns: + * - 0, on success; + * - <0, on error. + */ +int btf__add_func_param(struct btf *btf, const char *name, int type_id) +{ + struct btf_type *t; + struct btf_param *p; + int sz, name_off = 0; + + if (validate_type_id(type_id)) + return -EINVAL; + + /* last type should be BTF_KIND_FUNC_PROTO */ + if (btf->nr_types == 0) + return -EINVAL; + t = btf_type_by_id(btf, btf->nr_types); + if (!btf_is_func_proto(t)) + return -EINVAL; + + /* decompose and invalidate raw data */ + if (btf_ensure_modifiable(btf)) + return -ENOMEM; + + sz = sizeof(struct btf_param); + p = btf_add_type_mem(btf, sz); + if (!p) + return -ENOMEM; + + if (name && name[0]) { + name_off = btf__add_str(btf, name); + if (name_off < 0) + return name_off; + } + + p->name_off = name_off; + p->type = type_id; + + /* update parent type's vlen */ + t = btf_type_by_id(btf, btf->nr_types); + btf_type_inc_vlen(t); + + btf->hdr->type_len += sz; + btf->hdr->str_off += sz; + return 0; +} + +/* + * Append new BTF_KIND_VAR type with: + * - *name* - non-empty/non-NULL name; + * - *linkage* - variable linkage, one of BTF_VAR_STATIC, + * BTF_VAR_GLOBAL_ALLOCATED, or BTF_VAR_GLOBAL_EXTERN; + * - *type_id* - type ID of the type describing the type of the variable. + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_var(struct btf *btf, const char *name, int linkage, int type_id) +{ + struct btf_type *t; + struct btf_var *v; + int sz, err, name_off; + + /* non-empty name */ + if (!name || !name[0]) + return -EINVAL; + if (linkage != BTF_VAR_STATIC && linkage != BTF_VAR_GLOBAL_ALLOCATED && + linkage != BTF_VAR_GLOBAL_EXTERN) + return -EINVAL; + if (validate_type_id(type_id)) + return -EINVAL; + + /* deconstruct BTF, if necessary, and invalidate raw_data */ + if (btf_ensure_modifiable(btf)) + return -ENOMEM; + + sz = sizeof(struct btf_type) + sizeof(struct btf_var); + t = btf_add_type_mem(btf, sz); + if (!t) + return -ENOMEM; + + name_off = btf__add_str(btf, name); + if (name_off < 0) + return name_off; + + t->name_off = name_off; + t->info = btf_type_info(BTF_KIND_VAR, 0, 0); + t->type = type_id; + + v = btf_var(t); + v->linkage = linkage; + + err = btf_add_type_idx_entry(btf, btf->hdr->type_len); + if (err) + return err; + + btf->hdr->type_len += sz; + btf->hdr->str_off += sz; + btf->nr_types++; + return btf->nr_types; +} + +/* + * Append new BTF_KIND_DATASEC type with: + * - *name* - non-empty/non-NULL name; + * - *byte_sz* - data section size, in bytes. + * + * Data section is initially empty. Variables info can be added with + * btf__add_datasec_var_info() calls, after btf__add_datasec() succeeds. + * + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_datasec(struct btf *btf, const char *name, __u32 byte_sz) +{ + struct btf_type *t; + int sz, err, name_off; + + /* non-empty name */ + if (!name || !name[0]) + return -EINVAL; + + if (btf_ensure_modifiable(btf)) + return -ENOMEM; + + sz = sizeof(struct btf_type); + t = btf_add_type_mem(btf, sz); + if (!t) + return -ENOMEM; + + name_off = btf__add_str(btf, name); + if (name_off < 0) + return name_off; + + /* start with vlen=0, which will be update as var_secinfos are added */ + t->name_off = name_off; + t->info = btf_type_info(BTF_KIND_DATASEC, 0, 0); + t->size = byte_sz; + + err = btf_add_type_idx_entry(btf, btf->hdr->type_len); + if (err) + return err; + + btf->hdr->type_len += sz; + btf->hdr->str_off += sz; + btf->nr_types++; + return btf->nr_types; +} + +/* + * Append new data section variable information entry for current DATASEC type: + * - *var_type_id* - type ID, describing type of the variable; + * - *offset* - variable offset within data section, in bytes; + * - *byte_sz* - variable size, in bytes. + * + * Returns: + * - 0, on success; + * - <0, on error. + */ +int btf__add_datasec_var_info(struct btf *btf, int var_type_id, __u32 offset, __u32 byte_sz) +{ + struct btf_type *t; + struct btf_var_secinfo *v; + int sz; + + /* last type should be BTF_KIND_DATASEC */ + if (btf->nr_types == 0) + return -EINVAL; + t = btf_type_by_id(btf, btf->nr_types); + if (!btf_is_datasec(t)) + return -EINVAL; + + if (validate_type_id(var_type_id)) + return -EINVAL; + + /* decompose and invalidate raw data */ + if (btf_ensure_modifiable(btf)) + return -ENOMEM; + + sz = sizeof(struct btf_var_secinfo); + v = btf_add_type_mem(btf, sz); + if (!v) + return -ENOMEM; + + v->type = var_type_id; + v->offset = offset; + v->size = byte_sz; + + /* update parent type's vlen */ + t = btf_type_by_id(btf, btf->nr_types); + btf_type_inc_vlen(t); + + btf->hdr->type_len += sz; + btf->hdr->str_off += sz; + return 0; +} + struct btf_ext_sec_setup_param { __u32 off; __u32 len; @@ -1131,14 +2448,14 @@ static int btf_ext_setup_line_info(struct btf_ext *btf_ext) return btf_ext_setup_info(btf_ext, ¶m); } -static int btf_ext_setup_field_reloc(struct btf_ext *btf_ext) +static int btf_ext_setup_core_relos(struct btf_ext *btf_ext) { struct btf_ext_sec_setup_param param = { - .off = btf_ext->hdr->field_reloc_off, - .len = btf_ext->hdr->field_reloc_len, - .min_rec_size = sizeof(struct bpf_field_reloc), - .ext_info = &btf_ext->field_reloc_info, - .desc = "field_reloc", + .off = btf_ext->hdr->core_relo_off, + .len = btf_ext->hdr->core_relo_len, + .min_rec_size = sizeof(struct bpf_core_relo), + .ext_info = &btf_ext->core_relo_info, + .desc = "core_relo", }; return btf_ext_setup_info(btf_ext, ¶m); @@ -1154,7 +2471,10 @@ static int btf_ext_parse_hdr(__u8 *data, __u32 data_size) return -EINVAL; } - if (hdr->magic != BTF_MAGIC) { + if (hdr->magic == bswap_16(BTF_MAGIC)) { + pr_warn("BTF.ext in non-native endianness is not supported\n"); + return -ENOTSUP; + } else if (hdr->magic != BTF_MAGIC) { pr_debug("Invalid BTF.ext magic:%x\n", hdr->magic); return -EINVAL; } @@ -1217,10 +2537,9 @@ struct btf_ext *btf_ext__new(__u8 *data, __u32 size) if (err) goto done; - if (btf_ext->hdr->hdr_len < - offsetofend(struct btf_ext_header, field_reloc_len)) + if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, core_relo_len)) goto done; - err = btf_ext_setup_field_reloc(btf_ext); + err = btf_ext_setup_core_relos(btf_ext); if (err) goto done; @@ -1475,6 +2794,9 @@ int btf__dedup(struct btf *btf, struct btf_ext *btf_ext, return -EINVAL; } + if (btf_ensure_modifiable(btf)) + return -ENOMEM; + err = btf_dedup_strings(d); if (err < 0) { pr_debug("btf_dedup_strings failed:%d\n", err); @@ -1575,7 +2897,7 @@ static int btf_dedup_hypot_map_add(struct btf_dedup *d, __u32 *new_list; d->hypot_cap += max((size_t)16, d->hypot_cap / 2); - new_list = realloc(d->hypot_list, sizeof(__u32) * d->hypot_cap); + new_list = libbpf_reallocarray(d->hypot_list, d->hypot_cap, sizeof(__u32)); if (!new_list) return -ENOMEM; d->hypot_list = new_list; @@ -1659,7 +2981,7 @@ static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext, /* special BTF "void" type is made canonical immediately */ d->map[0] = 0; for (i = 1; i <= btf->nr_types; i++) { - struct btf_type *t = d->btf->types[i]; + struct btf_type *t = btf_type_by_id(d->btf, i); /* VAR and DATASEC are never deduped and are self-canonical */ if (btf_is_var(t) || btf_is_datasec(t)) @@ -1698,7 +3020,7 @@ static int btf_for_each_str_off(struct btf_dedup *d, str_off_fn_t fn, void *ctx) struct btf_type *t; for (i = 1; i <= d->btf->nr_types; i++) { - t = d->btf->types[i]; + t = btf_type_by_id(d->btf, i); r = fn(&t->name_off, ctx); if (r) return r; @@ -1852,8 +3174,7 @@ static int btf_str_remap_offset(__u32 *str_off_ptr, void *ctx) */ static int btf_dedup_strings(struct btf_dedup *d) { - const struct btf_header *hdr = d->btf->hdr; - char *start = (char *)d->btf->nohdr_data + hdr->str_off; + char *start = d->btf->strs_data; char *end = start + d->btf->hdr->str_len; char *p = start, *tmp_strs = NULL; struct btf_str_ptrs strs = { @@ -1865,14 +3186,16 @@ static int btf_dedup_strings(struct btf_dedup *d) int i, j, err = 0, grp_idx; bool grp_used; + if (d->btf->strs_deduped) + return 0; + /* build index of all strings */ while (p < end) { if (strs.cnt + 1 > strs.cap) { struct btf_str_ptr *new_ptrs; strs.cap += max(strs.cnt / 2, 16U); - new_ptrs = realloc(strs.ptrs, - sizeof(strs.ptrs[0]) * strs.cap); + new_ptrs = libbpf_reallocarray(strs.ptrs, strs.cap, sizeof(strs.ptrs[0])); if (!new_ptrs) { err = -ENOMEM; goto done; @@ -1958,6 +3281,7 @@ static int btf_dedup_strings(struct btf_dedup *d) goto done; d->btf->hdr->str_len = end - start; + d->btf->strs_deduped = true; done: free(tmp_strs); @@ -2234,7 +3558,7 @@ static bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2) */ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id) { - struct btf_type *t = d->btf->types[type_id]; + struct btf_type *t = btf_type_by_id(d->btf, type_id); struct hashmap_entry *hash_entry; struct btf_type *cand; /* if we don't find equivalent type, then we are canonical */ @@ -2261,7 +3585,7 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id) h = btf_hash_int(t); for_each_dedup_cand(d, hash_entry, h) { cand_id = (__u32)(long)hash_entry->value; - cand = d->btf->types[cand_id]; + cand = btf_type_by_id(d->btf, cand_id); if (btf_equal_int(t, cand)) { new_id = cand_id; break; @@ -2273,7 +3597,7 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id) h = btf_hash_enum(t); for_each_dedup_cand(d, hash_entry, h) { cand_id = (__u32)(long)hash_entry->value; - cand = d->btf->types[cand_id]; + cand = btf_type_by_id(d->btf, cand_id); if (btf_equal_enum(t, cand)) { new_id = cand_id; break; @@ -2296,7 +3620,7 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id) h = btf_hash_common(t); for_each_dedup_cand(d, hash_entry, h) { cand_id = (__u32)(long)hash_entry->value; - cand = d->btf->types[cand_id]; + cand = btf_type_by_id(d->btf, cand_id); if (btf_equal_common(t, cand)) { new_id = cand_id; break; @@ -2355,13 +3679,13 @@ static uint32_t resolve_fwd_id(struct btf_dedup *d, uint32_t type_id) { __u32 orig_type_id = type_id; - if (!btf_is_fwd(d->btf->types[type_id])) + if (!btf_is_fwd(btf__type_by_id(d->btf, type_id))) return type_id; while (is_type_mapped(d, type_id) && d->map[type_id] != type_id) type_id = d->map[type_id]; - if (!btf_is_fwd(d->btf->types[type_id])) + if (!btf_is_fwd(btf__type_by_id(d->btf, type_id))) return type_id; return orig_type_id; @@ -2489,8 +3813,8 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id, if (btf_dedup_hypot_map_add(d, canon_id, cand_id)) return -ENOMEM; - cand_type = d->btf->types[cand_id]; - canon_type = d->btf->types[canon_id]; + cand_type = btf_type_by_id(d->btf, cand_id); + canon_type = btf_type_by_id(d->btf, canon_id); cand_kind = btf_kind(cand_type); canon_kind = btf_kind(canon_type); @@ -2641,8 +3965,8 @@ static void btf_dedup_merge_hypot_map(struct btf_dedup *d) targ_type_id = d->hypot_map[cand_type_id]; t_id = resolve_type_id(d, targ_type_id); c_id = resolve_type_id(d, cand_type_id); - t_kind = btf_kind(d->btf->types[t_id]); - c_kind = btf_kind(d->btf->types[c_id]); + t_kind = btf_kind(btf__type_by_id(d->btf, t_id)); + c_kind = btf_kind(btf__type_by_id(d->btf, c_id)); /* * Resolve FWD into STRUCT/UNION. * It's ok to resolve FWD into STRUCT/UNION that's not yet @@ -2710,7 +4034,7 @@ static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id) if (d->map[type_id] <= BTF_MAX_NR_TYPES) return 0; - t = d->btf->types[type_id]; + t = btf_type_by_id(d->btf, type_id); kind = btf_kind(t); if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION) @@ -2731,7 +4055,7 @@ static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id) * creating a loop (FWD -> STRUCT and STRUCT -> FWD), because * FWD and compatible STRUCT/UNION are considered equivalent. */ - cand_type = d->btf->types[cand_id]; + cand_type = btf_type_by_id(d->btf, cand_id); if (!btf_shallow_equal_struct(t, cand_type)) continue; @@ -2803,7 +4127,7 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id) if (d->map[type_id] <= BTF_MAX_NR_TYPES) return resolve_type_id(d, type_id); - t = d->btf->types[type_id]; + t = btf_type_by_id(d->btf, type_id); d->map[type_id] = BTF_IN_PROGRESS_ID; switch (btf_kind(t)) { @@ -2821,7 +4145,7 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id) h = btf_hash_common(t); for_each_dedup_cand(d, hash_entry, h) { cand_id = (__u32)(long)hash_entry->value; - cand = d->btf->types[cand_id]; + cand = btf_type_by_id(d->btf, cand_id); if (btf_equal_common(t, cand)) { new_id = cand_id; break; @@ -2845,7 +4169,7 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id) h = btf_hash_array(t); for_each_dedup_cand(d, hash_entry, h) { cand_id = (__u32)(long)hash_entry->value; - cand = d->btf->types[cand_id]; + cand = btf_type_by_id(d->btf, cand_id); if (btf_equal_array(t, cand)) { new_id = cand_id; break; @@ -2877,7 +4201,7 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id) h = btf_hash_fnproto(t); for_each_dedup_cand(d, hash_entry, h) { cand_id = (__u32)(long)hash_entry->value; - cand = d->btf->types[cand_id]; + cand = btf_type_by_id(d->btf, cand_id); if (btf_equal_fnproto(t, cand)) { new_id = cand_id; break; @@ -2925,9 +4249,9 @@ static int btf_dedup_ref_types(struct btf_dedup *d) */ static int btf_dedup_compact_types(struct btf_dedup *d) { - struct btf_type **new_types; + __u32 *new_offs; __u32 next_type_id = 1; - char *types_start, *p; + void *p; int i, len; /* we are going to reuse hypot_map to store compaction remapping */ @@ -2935,41 +4259,34 @@ static int btf_dedup_compact_types(struct btf_dedup *d) for (i = 1; i <= d->btf->nr_types; i++) d->hypot_map[i] = BTF_UNPROCESSED_ID; - types_start = d->btf->nohdr_data + d->btf->hdr->type_off; - p = types_start; + p = d->btf->types_data; for (i = 1; i <= d->btf->nr_types; i++) { if (d->map[i] != i) continue; - len = btf_type_size(d->btf->types[i]); + len = btf_type_size(btf__type_by_id(d->btf, i)); if (len < 0) return len; - memmove(p, d->btf->types[i], len); + memmove(p, btf__type_by_id(d->btf, i), len); d->hypot_map[i] = next_type_id; - d->btf->types[next_type_id] = (struct btf_type *)p; + d->btf->type_offs[next_type_id] = p - d->btf->types_data; p += len; next_type_id++; } /* shrink struct btf's internal types index and update btf_header */ d->btf->nr_types = next_type_id - 1; - d->btf->types_size = d->btf->nr_types; - d->btf->hdr->type_len = p - types_start; - new_types = realloc(d->btf->types, - (1 + d->btf->nr_types) * sizeof(struct btf_type *)); - if (!new_types) + d->btf->type_offs_cap = d->btf->nr_types + 1; + d->btf->hdr->type_len = p - d->btf->types_data; + new_offs = libbpf_reallocarray(d->btf->type_offs, d->btf->type_offs_cap, + sizeof(*new_offs)); + if (!new_offs) return -ENOMEM; - d->btf->types = new_types; - - /* make sure string section follows type information without gaps */ - d->btf->hdr->str_off = p - (char *)d->btf->nohdr_data; - memmove(p, d->btf->strings, d->btf->hdr->str_len); - d->btf->strings = p; - p += d->btf->hdr->str_len; - - d->btf->data_size = p - (char *)d->btf->data; + d->btf->type_offs = new_offs; + d->btf->hdr->str_off = d->btf->hdr->type_len; + d->btf->raw_size = d->btf->hdr->hdr_len + d->btf->hdr->type_len + d->btf->hdr->str_len; return 0; } @@ -3002,7 +4319,7 @@ static int btf_dedup_remap_type_id(struct btf_dedup *d, __u32 type_id) */ static int btf_dedup_remap_type(struct btf_dedup *d, __u32 type_id) { - struct btf_type *t = d->btf->types[type_id]; + struct btf_type *t = btf_type_by_id(d->btf, type_id); int i, r; switch (btf_kind(t)) { |