diff options
Diffstat (limited to 'tools/lib')
42 files changed, 1494 insertions, 538 deletions
diff --git a/tools/lib/api/Makefile b/tools/lib/api/Makefile index 7f6396087b46..8665c799e0fa 100644 --- a/tools/lib/api/Makefile +++ b/tools/lib/api/Makefile @@ -95,7 +95,7 @@ install_lib: $(LIBFILE) $(call do_install_mkdir,$(libdir_SQ)); \ cp -fpR $(LIBFILE) $(DESTDIR)$(libdir_SQ) -HDRS := cpu.h debug.h io.h +HDRS := cpu.h debug.h io.h io_dir.h FD_HDRS := fd/array.h FS_HDRS := fs/fs.h fs/tracing_path.h INSTALL_HDRS_PFX := $(DESTDIR)$(prefix)/include/api diff --git a/tools/lib/api/fs/fs.c b/tools/lib/api/fs/fs.c index 337fde770e45..edec23406dbc 100644 --- a/tools/lib/api/fs/fs.c +++ b/tools/lib/api/fs/fs.c @@ -296,7 +296,7 @@ int filename__read_int(const char *filename, int *value) int fd = open(filename, O_RDONLY), err = -1; if (fd < 0) - return -1; + return -errno; if (read(fd, line, sizeof(line)) > 0) { *value = atoi(line); @@ -314,7 +314,7 @@ static int filename__read_ull_base(const char *filename, int fd = open(filename, O_RDONLY), err = -1; if (fd < 0) - return -1; + return -errno; if (read(fd, line, sizeof(line)) > 0) { *value = strtoull(line, NULL, base); @@ -372,7 +372,7 @@ int filename__write_int(const char *filename, int value) char buf[64]; if (fd < 0) - return err; + return -errno; sprintf(buf, "%d", value); if (write(fd, buf, sizeof(buf)) == sizeof(buf)) diff --git a/tools/lib/api/io_dir.h b/tools/lib/api/io_dir.h new file mode 100644 index 000000000000..ef83e967e48c --- /dev/null +++ b/tools/lib/api/io_dir.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ +/* + * Lightweight directory reading library. + */ +#ifndef __API_IO_DIR__ +#define __API_IO_DIR__ + +#include <dirent.h> +#include <fcntl.h> +#include <stdlib.h> +#include <unistd.h> +#include <sys/stat.h> +#include <sys/syscall.h> +#include <linux/limits.h> + +#if !defined(SYS_getdents64) +#if defined(__x86_64__) || defined(__arm__) + #define SYS_getdents64 217 +#elif defined(__i386__) || defined(__s390x__) || defined(__sh__) + #define SYS_getdents64 220 +#elif defined(__alpha__) + #define SYS_getdents64 377 +#elif defined(__mips__) + #define SYS_getdents64 308 +#elif defined(__powerpc64__) || defined(__powerpc__) + #define SYS_getdents64 202 +#elif defined(__sparc64__) || defined(__sparc__) + #define SYS_getdents64 154 +#elif defined(__xtensa__) + #define SYS_getdents64 60 +#else + #define SYS_getdents64 61 +#endif +#endif /* !defined(SYS_getdents64) */ + +static inline ssize_t perf_getdents64(int fd, void *dirp, size_t count) +{ +#ifdef MEMORY_SANITIZER + memset(dirp, 0, count); +#endif + return syscall(SYS_getdents64, fd, dirp, count); +} + +struct io_dirent64 { + ino64_t d_ino; /* 64-bit inode number */ + off64_t d_off; /* 64-bit offset to next structure */ + unsigned short d_reclen; /* Size of this dirent */ + unsigned char d_type; /* File type */ + char d_name[NAME_MAX + 1]; /* Filename (null-terminated) */ +}; + +struct io_dir { + int dirfd; + ssize_t available_bytes; + struct io_dirent64 *next; + struct io_dirent64 buff[4]; +}; + +static inline void io_dir__init(struct io_dir *iod, int dirfd) +{ + iod->dirfd = dirfd; + iod->available_bytes = 0; +} + +static inline void io_dir__rewinddir(struct io_dir *iod) +{ + lseek(iod->dirfd, 0, SEEK_SET); + iod->available_bytes = 0; +} + +static inline struct io_dirent64 *io_dir__readdir(struct io_dir *iod) +{ + struct io_dirent64 *entry; + + if (iod->available_bytes <= 0) { + ssize_t rc = perf_getdents64(iod->dirfd, iod->buff, sizeof(iod->buff)); + + if (rc <= 0) + return NULL; + iod->available_bytes = rc; + iod->next = iod->buff; + } + entry = iod->next; + iod->next = (struct io_dirent64 *)((char *)entry + entry->d_reclen); + iod->available_bytes -= entry->d_reclen; + return entry; +} + +static inline bool io_dir__is_dir(const struct io_dir *iod, struct io_dirent64 *dent) +{ + if (dent->d_type == DT_UNKNOWN) { + struct stat st; + + if (fstatat(iod->dirfd, dent->d_name, &st, /*flags=*/0)) + return false; + + if (S_ISDIR(st.st_mode)) { + dent->d_type = DT_DIR; + return true; + } + } + return dent->d_type == DT_DIR; +} + +#endif /* __API_IO_DIR__ */ diff --git a/tools/lib/bitmap.c b/tools/lib/bitmap.c index 2178862bb114..51255c69754d 100644 --- a/tools/lib/bitmap.c +++ b/tools/lib/bitmap.c @@ -101,6 +101,26 @@ bool __bitmap_intersects(const unsigned long *bitmap1, return false; } +void __bitmap_set(unsigned long *map, unsigned int start, int len) +{ + unsigned long *p = map + BIT_WORD(start); + const unsigned int size = start + len; + int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); + unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start); + + while (len - bits_to_set >= 0) { + *p |= mask_to_set; + len -= bits_to_set; + bits_to_set = BITS_PER_LONG; + mask_to_set = ~0UL; + p++; + } + if (len) { + mask_to_set &= BITMAP_LAST_WORD_MASK(size); + *p |= mask_to_set; + } +} + void __bitmap_clear(unsigned long *map, unsigned int start, int len) { unsigned long *p = map + BIT_WORD(start); diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile index 857a5f7b413d..168140f8e646 100644 --- a/tools/lib/bpf/Makefile +++ b/tools/lib/bpf/Makefile @@ -53,13 +53,6 @@ include $(srctree)/tools/scripts/Makefile.include # copy a bit from Linux kbuild -ifeq ("$(origin V)", "command line") - VERBOSE = $(V) -endif -ifndef VERBOSE - VERBOSE = 0 -endif - INCLUDES = -I$(or $(OUTPUT),.) \ -I$(srctree)/tools/include -I$(srctree)/tools/include/uapi \ -I$(srctree)/tools/arch/$(SRCARCH)/include @@ -96,12 +89,6 @@ override CFLAGS += $(CLANG_CROSS_FLAGS) # flags specific for shared library SHLIB_FLAGS := -DSHARED -fPIC -ifeq ($(VERBOSE),1) - Q = -else - Q = @ -endif - # Disable command line variables (CFLAGS) override from top # level Makefile (perf), otherwise build Makefile will get # the same command line setup. diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index becdfa701c75..ab40dbf9f020 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -238,7 +238,7 @@ int bpf_prog_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns, size_t insn_cnt, struct bpf_prog_load_opts *opts) { - const size_t attr_sz = offsetofend(union bpf_attr, prog_token_fd); + const size_t attr_sz = offsetofend(union bpf_attr, fd_array_cnt); void *finfo = NULL, *linfo = NULL; const char *func_info, *line_info; __u32 log_size, log_level, attach_prog_fd, attach_btf_obj_fd; @@ -311,6 +311,7 @@ int bpf_prog_load(enum bpf_prog_type prog_type, attr.line_info_cnt = OPTS_GET(opts, line_info_cnt, 0); attr.fd_array = ptr_to_u64(OPTS_GET(opts, fd_array, NULL)); + attr.fd_array_cnt = OPTS_GET(opts, fd_array_cnt, 0); if (log_level) { attr.log_buf = ptr_to_u64(log_buf); @@ -836,6 +837,50 @@ int bpf_link_create(int prog_fd, int target_fd, if (!OPTS_ZEROED(opts, netkit)) return libbpf_err(-EINVAL); break; + case BPF_CGROUP_INET_INGRESS: + case BPF_CGROUP_INET_EGRESS: + case BPF_CGROUP_INET_SOCK_CREATE: + case BPF_CGROUP_INET_SOCK_RELEASE: + case BPF_CGROUP_INET4_BIND: + case BPF_CGROUP_INET6_BIND: + case BPF_CGROUP_INET4_POST_BIND: + case BPF_CGROUP_INET6_POST_BIND: + case BPF_CGROUP_INET4_CONNECT: + case BPF_CGROUP_INET6_CONNECT: + case BPF_CGROUP_UNIX_CONNECT: + case BPF_CGROUP_INET4_GETPEERNAME: + case BPF_CGROUP_INET6_GETPEERNAME: + case BPF_CGROUP_UNIX_GETPEERNAME: + case BPF_CGROUP_INET4_GETSOCKNAME: + case BPF_CGROUP_INET6_GETSOCKNAME: + case BPF_CGROUP_UNIX_GETSOCKNAME: + case BPF_CGROUP_UDP4_SENDMSG: + case BPF_CGROUP_UDP6_SENDMSG: + case BPF_CGROUP_UNIX_SENDMSG: + case BPF_CGROUP_UDP4_RECVMSG: + case BPF_CGROUP_UDP6_RECVMSG: + case BPF_CGROUP_UNIX_RECVMSG: + case BPF_CGROUP_SOCK_OPS: + case BPF_CGROUP_DEVICE: + case BPF_CGROUP_SYSCTL: + case BPF_CGROUP_GETSOCKOPT: + case BPF_CGROUP_SETSOCKOPT: + case BPF_LSM_CGROUP: + relative_fd = OPTS_GET(opts, cgroup.relative_fd, 0); + relative_id = OPTS_GET(opts, cgroup.relative_id, 0); + if (relative_fd && relative_id) + return libbpf_err(-EINVAL); + if (relative_id) { + attr.link_create.cgroup.relative_id = relative_id; + attr.link_create.flags |= BPF_F_ID; + } else { + attr.link_create.cgroup.relative_fd = relative_fd; + } + attr.link_create.cgroup.expected_revision = + OPTS_GET(opts, cgroup.expected_revision, 0); + if (!OPTS_ZEROED(opts, cgroup)) + return libbpf_err(-EINVAL); + break; default: if (!OPTS_ZEROED(opts, flags)) return libbpf_err(-EINVAL); @@ -1096,7 +1141,7 @@ int bpf_map_get_fd_by_id(__u32 id) int bpf_btf_get_fd_by_id_opts(__u32 id, const struct bpf_get_fd_by_id_opts *opts) { - const size_t attr_sz = offsetofend(union bpf_attr, open_flags); + const size_t attr_sz = offsetofend(union bpf_attr, fd_by_id_token_fd); union bpf_attr attr; int fd; @@ -1106,6 +1151,7 @@ int bpf_btf_get_fd_by_id_opts(__u32 id, memset(&attr, 0, attr_sz); attr.btf_id = id; attr.open_flags = OPTS_GET(opts, open_flags, 0); + attr.fd_by_id_token_fd = OPTS_GET(opts, token_fd, 0); fd = sys_bpf_fd(BPF_BTF_GET_FD_BY_ID, &attr, attr_sz); return libbpf_err_errno(fd); @@ -1329,3 +1375,23 @@ int bpf_token_create(int bpffs_fd, struct bpf_token_create_opts *opts) fd = sys_bpf_fd(BPF_TOKEN_CREATE, &attr, attr_sz); return libbpf_err_errno(fd); } + +int bpf_prog_stream_read(int prog_fd, __u32 stream_id, void *buf, __u32 buf_len, + struct bpf_prog_stream_read_opts *opts) +{ + const size_t attr_sz = offsetofend(union bpf_attr, prog_stream_read); + union bpf_attr attr; + int err; + + if (!OPTS_VALID(opts, bpf_prog_stream_read_opts)) + return libbpf_err(-EINVAL); + + memset(&attr, 0, attr_sz); + attr.prog_stream_read.stream_buf = ptr_to_u64(buf); + attr.prog_stream_read.stream_buf_len = buf_len; + attr.prog_stream_read.stream_id = stream_id; + attr.prog_stream_read.prog_fd = prog_fd; + + err = sys_bpf(BPF_PROG_STREAM_READ_BY_FD, &attr, attr_sz); + return libbpf_err_errno(err); +} diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index a4a7b1ad1b63..7252150e7ad3 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -107,9 +107,12 @@ struct bpf_prog_load_opts { */ __u32 log_true_size; __u32 token_fd; + + /* if set, provides the length of fd_array */ + __u32 fd_array_cnt; size_t :0; }; -#define bpf_prog_load_opts__last_field token_fd +#define bpf_prog_load_opts__last_field fd_array_cnt LIBBPF_API int bpf_prog_load(enum bpf_prog_type prog_type, const char *prog_name, const char *license, @@ -435,6 +438,11 @@ struct bpf_link_create_opts { __u32 relative_id; __u64 expected_revision; } netkit; + struct { + __u32 relative_fd; + __u32 relative_id; + __u64 expected_revision; + } cgroup; }; size_t :0; }; @@ -484,9 +492,10 @@ LIBBPF_API int bpf_link_get_next_id(__u32 start_id, __u32 *next_id); struct bpf_get_fd_by_id_opts { size_t sz; /* size of this struct for forward/backward compatibility */ __u32 open_flags; /* permissions requested for the operation on fd */ + __u32 token_fd; size_t :0; }; -#define bpf_get_fd_by_id_opts__last_field open_flags +#define bpf_get_fd_by_id_opts__last_field token_fd LIBBPF_API int bpf_prog_get_fd_by_id(__u32 id); LIBBPF_API int bpf_prog_get_fd_by_id_opts(__u32 id, @@ -700,6 +709,27 @@ struct bpf_token_create_opts { LIBBPF_API int bpf_token_create(int bpffs_fd, struct bpf_token_create_opts *opts); +struct bpf_prog_stream_read_opts { + size_t sz; + size_t :0; +}; +#define bpf_prog_stream_read_opts__last_field sz +/** + * @brief **bpf_prog_stream_read** reads data from the BPF stream of a given BPF + * program. + * + * @param prog_fd FD for the BPF program whose BPF stream is to be read. + * @param stream_id ID of the BPF stream to be read. + * @param buf Buffer to read data into from the BPF stream. + * @param buf_len Maximum number of bytes to read from the BPF stream. + * @param opts optional options, can be NULL + * + * @return The number of bytes read, on success; negative error code, otherwise + * (errno is also set to the error code) + */ +LIBBPF_API int bpf_prog_stream_read(int prog_fd, __u32 stream_id, void *buf, __u32 buf_len, + struct bpf_prog_stream_read_opts *opts); + #ifdef __cplusplus } /* extern "C" */ #endif diff --git a/tools/lib/bpf/bpf_core_read.h b/tools/lib/bpf/bpf_core_read.h index c0e13cdf9660..b997c68bd945 100644 --- a/tools/lib/bpf/bpf_core_read.h +++ b/tools/lib/bpf/bpf_core_read.h @@ -388,7 +388,13 @@ extern void *bpf_rdonly_cast(const void *obj, __u32 btf_id) __ksym __weak; #define ___arrow10(a, b, c, d, e, f, g, h, i, j) a->b->c->d->e->f->g->h->i->j #define ___arrow(...) ___apply(___arrow, ___narg(__VA_ARGS__))(__VA_ARGS__) +#if defined(__clang__) && (__clang_major__ >= 19) +#define ___type(...) __typeof_unqual__(___arrow(__VA_ARGS__)) +#elif defined(__GNUC__) && (__GNUC__ >= 14) +#define ___type(...) __typeof_unqual__(___arrow(__VA_ARGS__)) +#else #define ___type(...) typeof(___arrow(__VA_ARGS__)) +#endif #define ___read(read_fn, dst, src_type, src, accessor) \ read_fn((void *)(dst), sizeof(*(dst)), &((src_type)(src))->accessor) diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h index 686824b8b413..80c028540656 100644 --- a/tools/lib/bpf/bpf_helpers.h +++ b/tools/lib/bpf/bpf_helpers.h @@ -15,6 +15,14 @@ #define __array(name, val) typeof(val) *name[] #define __ulong(name, val) enum { ___bpf_concat(__unique_value, __COUNTER__) = val } name +#ifndef likely +#define likely(x) (__builtin_expect(!!(x), 1)) +#endif + +#ifndef unlikely +#define unlikely(x) (__builtin_expect(!!(x), 0)) +#endif + /* * Helper macro to place programs, maps, license in * different sections in elf_bpf file. Section names @@ -207,6 +215,7 @@ enum libbpf_tristate { #define __arg_nonnull __attribute((btf_decl_tag("arg:nonnull"))) #define __arg_nullable __attribute((btf_decl_tag("arg:nullable"))) #define __arg_trusted __attribute((btf_decl_tag("arg:trusted"))) +#define __arg_untrusted __attribute((btf_decl_tag("arg:untrusted"))) #define __arg_arena __attribute((btf_decl_tag("arg:arena"))) #ifndef ___bpf_concat @@ -306,6 +315,22 @@ enum libbpf_tristate { ___param, sizeof(___param)); \ }) +extern int bpf_stream_vprintk(int stream_id, const char *fmt__str, const void *args, + __u32 len__sz, void *aux__prog) __weak __ksym; + +#define bpf_stream_printk(stream_id, fmt, args...) \ +({ \ + static const char ___fmt[] = fmt; \ + unsigned long long ___param[___bpf_narg(args)]; \ + \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ + ___bpf_fill(___param, args); \ + _Pragma("GCC diagnostic pop") \ + \ + bpf_stream_vprintk(stream_id, ___fmt, ___param, sizeof(___param), NULL);\ +}) + /* Use __bpf_printk when bpf_printk call has 3 or fewer fmt args * Otherwise use __bpf_vprintk */ diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 12468ae0d573..37682908cb0f 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -12,6 +12,7 @@ #include <sys/utsname.h> #include <sys/param.h> #include <sys/stat.h> +#include <sys/mman.h> #include <linux/kernel.h> #include <linux/err.h> #include <linux/btf.h> @@ -120,6 +121,9 @@ struct btf { /* whether base_btf should be freed in btf_free for this instance */ bool owns_base; + /* whether raw_data is a (read-only) mmap */ + bool raw_data_is_mmap; + /* BTF object FD, if loaded into kernel */ int fd; @@ -283,7 +287,7 @@ static int btf_parse_str_sec(struct btf *btf) return -EINVAL; } if (!btf->base_btf && start[0]) { - pr_debug("Invalid BTF string section\n"); + pr_debug("Malformed BTF string section, did you forget to provide base BTF?\n"); return -EINVAL; } return 0; @@ -951,6 +955,17 @@ static bool btf_is_modifiable(const struct btf *btf) return (void *)btf->hdr != btf->raw_data; } +static void btf_free_raw_data(struct btf *btf) +{ + if (btf->raw_data_is_mmap) { + munmap(btf->raw_data, btf->raw_size); + btf->raw_data_is_mmap = false; + } else { + free(btf->raw_data); + } + btf->raw_data = NULL; +} + void btf__free(struct btf *btf) { if (IS_ERR_OR_NULL(btf)) @@ -970,7 +985,7 @@ void btf__free(struct btf *btf) free(btf->types_data); strset__free(btf->strs_set); } - free(btf->raw_data); + btf_free_raw_data(btf); free(btf->raw_data_swapped); free(btf->type_offs); if (btf->owns_base) @@ -996,7 +1011,7 @@ static struct btf *btf_new_empty(struct btf *base_btf) if (base_btf) { btf->base_btf = base_btf; btf->start_id = btf__type_cnt(base_btf); - btf->start_str_off = base_btf->hdr->str_len; + btf->start_str_off = base_btf->hdr->str_len + base_btf->start_str_off; btf->swapped_endian = base_btf->swapped_endian; } @@ -1030,7 +1045,7 @@ struct btf *btf__new_empty_split(struct btf *base_btf) return libbpf_ptr(btf_new_empty(base_btf)); } -static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf) +static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf, bool is_mmap) { struct btf *btf; int err; @@ -1050,12 +1065,18 @@ static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf) btf->start_str_off = base_btf->hdr->str_len; } - btf->raw_data = malloc(size); - if (!btf->raw_data) { - err = -ENOMEM; - goto done; + if (is_mmap) { + btf->raw_data = (void *)data; + btf->raw_data_is_mmap = true; + } else { + btf->raw_data = malloc(size); + if (!btf->raw_data) { + err = -ENOMEM; + goto done; + } + memcpy(btf->raw_data, data, size); } - memcpy(btf->raw_data, data, size); + btf->raw_size = size; btf->hdr = btf->raw_data; @@ -1083,12 +1104,12 @@ done: struct btf *btf__new(const void *data, __u32 size) { - return libbpf_ptr(btf_new(data, size, NULL)); + return libbpf_ptr(btf_new(data, size, NULL, false)); } struct btf *btf__new_split(const void *data, __u32 size, struct btf *base_btf) { - return libbpf_ptr(btf_new(data, size, base_btf)); + return libbpf_ptr(btf_new(data, size, base_btf, false)); } struct btf_elf_secs { @@ -1148,6 +1169,12 @@ static int btf_find_elf_sections(Elf *elf, const char *path, struct btf_elf_secs else continue; + if (sh.sh_type != SHT_PROGBITS) { + pr_warn("unexpected section type (%d) of section(%d, %s) from %s\n", + sh.sh_type, idx, name, path); + goto err; + } + data = elf_getdata(scn, 0); if (!data) { pr_warn("failed to get section(%d, %s) data from %s\n", @@ -1186,6 +1213,7 @@ static struct btf *btf_parse_elf(const char *path, struct btf *base_btf, elf = elf_begin(fd, ELF_C_READ, NULL); if (!elf) { + err = -LIBBPF_ERRNO__FORMAT; pr_warn("failed to open %s as ELF file\n", path); goto done; } @@ -1202,7 +1230,7 @@ static struct btf *btf_parse_elf(const char *path, struct btf *base_btf, if (secs.btf_base_data) { dist_base_btf = btf_new(secs.btf_base_data->d_buf, secs.btf_base_data->d_size, - NULL); + NULL, false); if (IS_ERR(dist_base_btf)) { err = PTR_ERR(dist_base_btf); dist_base_btf = NULL; @@ -1211,7 +1239,7 @@ static struct btf *btf_parse_elf(const char *path, struct btf *base_btf, } btf = btf_new(secs.btf_data->d_buf, secs.btf_data->d_size, - dist_base_btf ?: base_btf); + dist_base_btf ?: base_btf, false); if (IS_ERR(btf)) { err = PTR_ERR(btf); goto done; @@ -1328,7 +1356,7 @@ static struct btf *btf_parse_raw(const char *path, struct btf *base_btf) } /* finally parse BTF data */ - btf = btf_new(data, sz, base_btf); + btf = btf_new(data, sz, base_btf, false); err_out: free(data); @@ -1347,6 +1375,37 @@ struct btf *btf__parse_raw_split(const char *path, struct btf *base_btf) return libbpf_ptr(btf_parse_raw(path, base_btf)); } +static struct btf *btf_parse_raw_mmap(const char *path, struct btf *base_btf) +{ + struct stat st; + void *data; + struct btf *btf; + int fd, err; + + fd = open(path, O_RDONLY); + if (fd < 0) + return ERR_PTR(-errno); + + if (fstat(fd, &st) < 0) { + err = -errno; + close(fd); + return ERR_PTR(err); + } + + data = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0); + err = -errno; + close(fd); + + if (data == MAP_FAILED) + return ERR_PTR(err); + + btf = btf_new(data, st.st_size, base_btf, true); + if (IS_ERR(btf)) + munmap(data, st.st_size); + + return btf; +} + static struct btf *btf_parse(const char *path, struct btf *base_btf, struct btf_ext **btf_ext) { struct btf *btf; @@ -1611,19 +1670,25 @@ struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf) goto exit_free; } - btf = btf_new(ptr, btf_info.btf_size, base_btf); + btf = btf_new(ptr, btf_info.btf_size, base_btf, false); exit_free: free(ptr); return btf; } -struct btf *btf__load_from_kernel_by_id_split(__u32 id, struct btf *base_btf) +struct btf *btf_load_from_kernel(__u32 id, struct btf *base_btf, int token_fd) { struct btf *btf; int btf_fd; + LIBBPF_OPTS(bpf_get_fd_by_id_opts, opts); + + if (token_fd) { + opts.open_flags |= BPF_F_TOKEN_FD; + opts.token_fd = token_fd; + } - btf_fd = bpf_btf_get_fd_by_id(id); + btf_fd = bpf_btf_get_fd_by_id_opts(id, &opts); if (btf_fd < 0) return libbpf_err_ptr(-errno); @@ -1633,6 +1698,11 @@ struct btf *btf__load_from_kernel_by_id_split(__u32 id, struct btf *base_btf) return libbpf_ptr(btf); } +struct btf *btf__load_from_kernel_by_id_split(__u32 id, struct btf *base_btf) +{ + return btf_load_from_kernel(id, base_btf, 0); +} + struct btf *btf__load_from_kernel_by_id(__u32 id) { return btf__load_from_kernel_by_id_split(id, NULL); @@ -1640,10 +1710,8 @@ struct btf *btf__load_from_kernel_by_id(__u32 id) static void btf_invalidate_raw_data(struct btf *btf) { - if (btf->raw_data) { - free(btf->raw_data); - btf->raw_data = NULL; - } + if (btf->raw_data) + btf_free_raw_data(btf); if (btf->raw_data_swapped) { free(btf->raw_data_swapped); btf->raw_data_swapped = NULL; @@ -2089,7 +2157,7 @@ static int validate_type_id(int id) } /* generic append function for PTR, TYPEDEF, CONST/VOLATILE/RESTRICT */ -static int btf_add_ref_kind(struct btf *btf, int kind, const char *name, int ref_type_id) +static int btf_add_ref_kind(struct btf *btf, int kind, const char *name, int ref_type_id, int kflag) { struct btf_type *t; int sz, name_off = 0; @@ -2112,7 +2180,7 @@ static int btf_add_ref_kind(struct btf *btf, int kind, const char *name, int ref } t->name_off = name_off; - t->info = btf_type_info(kind, 0, 0); + t->info = btf_type_info(kind, 0, kflag); t->type = ref_type_id; return btf_commit_type(btf, sz); @@ -2127,7 +2195,7 @@ static int btf_add_ref_kind(struct btf *btf, int kind, const char *name, int ref */ int btf__add_ptr(struct btf *btf, int ref_type_id) { - return btf_add_ref_kind(btf, BTF_KIND_PTR, NULL, ref_type_id); + return btf_add_ref_kind(btf, BTF_KIND_PTR, NULL, ref_type_id, 0); } /* @@ -2505,7 +2573,7 @@ int btf__add_fwd(struct btf *btf, const char *name, enum btf_fwd_kind fwd_kind) struct btf_type *t; int id; - id = btf_add_ref_kind(btf, BTF_KIND_FWD, name, 0); + id = btf_add_ref_kind(btf, BTF_KIND_FWD, name, 0, 0); if (id <= 0) return id; t = btf_type_by_id(btf, id); @@ -2535,7 +2603,7 @@ int btf__add_typedef(struct btf *btf, const char *name, int ref_type_id) if (!name || !name[0]) return libbpf_err(-EINVAL); - return btf_add_ref_kind(btf, BTF_KIND_TYPEDEF, name, ref_type_id); + return btf_add_ref_kind(btf, BTF_KIND_TYPEDEF, name, ref_type_id, 0); } /* @@ -2547,7 +2615,7 @@ int btf__add_typedef(struct btf *btf, const char *name, int ref_type_id) */ int btf__add_volatile(struct btf *btf, int ref_type_id) { - return btf_add_ref_kind(btf, BTF_KIND_VOLATILE, NULL, ref_type_id); + return btf_add_ref_kind(btf, BTF_KIND_VOLATILE, NULL, ref_type_id, 0); } /* @@ -2559,7 +2627,7 @@ int btf__add_volatile(struct btf *btf, int ref_type_id) */ int btf__add_const(struct btf *btf, int ref_type_id) { - return btf_add_ref_kind(btf, BTF_KIND_CONST, NULL, ref_type_id); + return btf_add_ref_kind(btf, BTF_KIND_CONST, NULL, ref_type_id, 0); } /* @@ -2571,7 +2639,7 @@ int btf__add_const(struct btf *btf, int ref_type_id) */ int btf__add_restrict(struct btf *btf, int ref_type_id) { - return btf_add_ref_kind(btf, BTF_KIND_RESTRICT, NULL, ref_type_id); + return btf_add_ref_kind(btf, BTF_KIND_RESTRICT, NULL, ref_type_id, 0); } /* @@ -2587,7 +2655,24 @@ int btf__add_type_tag(struct btf *btf, const char *value, int ref_type_id) if (!value || !value[0]) return libbpf_err(-EINVAL); - return btf_add_ref_kind(btf, BTF_KIND_TYPE_TAG, value, ref_type_id); + return btf_add_ref_kind(btf, BTF_KIND_TYPE_TAG, value, ref_type_id, 0); +} + +/* + * Append new BTF_KIND_TYPE_TAG type with: + * - *value*, non-empty/non-NULL tag value; + * - *ref_type_id* - referenced type ID, it might not exist yet; + * Set info->kflag to 1, indicating this tag is an __attribute__ + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_type_attr(struct btf *btf, const char *value, int ref_type_id) +{ + if (!value || !value[0]) + return libbpf_err(-EINVAL); + + return btf_add_ref_kind(btf, BTF_KIND_TYPE_TAG, value, ref_type_id, 1); } /* @@ -2609,7 +2694,7 @@ int btf__add_func(struct btf *btf, const char *name, linkage != BTF_FUNC_EXTERN) return libbpf_err(-EINVAL); - id = btf_add_ref_kind(btf, BTF_KIND_FUNC, name, proto_type_id); + id = btf_add_ref_kind(btf, BTF_KIND_FUNC, name, proto_type_id, 0); if (id > 0) { struct btf_type *t = btf_type_by_id(btf, id); @@ -2844,18 +2929,8 @@ int btf__add_datasec_var_info(struct btf *btf, int var_type_id, __u32 offset, __ return 0; } -/* - * Append new BTF_KIND_DECL_TAG type with: - * - *value* - non-empty/non-NULL string; - * - *ref_type_id* - referenced type ID, it might not exist yet; - * - *component_idx* - -1 for tagging reference type, otherwise struct/union - * member or function argument index; - * Returns: - * - >0, type ID of newly added BTF type; - * - <0, on error. - */ -int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id, - int component_idx) +static int btf_add_decl_tag(struct btf *btf, const char *value, int ref_type_id, + int component_idx, int kflag) { struct btf_type *t; int sz, value_off; @@ -2879,13 +2954,46 @@ int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id, return value_off; t->name_off = value_off; - t->info = btf_type_info(BTF_KIND_DECL_TAG, 0, false); + t->info = btf_type_info(BTF_KIND_DECL_TAG, 0, kflag); t->type = ref_type_id; btf_decl_tag(t)->component_idx = component_idx; return btf_commit_type(btf, sz); } +/* + * Append new BTF_KIND_DECL_TAG type with: + * - *value* - non-empty/non-NULL string; + * - *ref_type_id* - referenced type ID, it might not exist yet; + * - *component_idx* - -1 for tagging reference type, otherwise struct/union + * member or function argument index; + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id, + int component_idx) +{ + return btf_add_decl_tag(btf, value, ref_type_id, component_idx, 0); +} + +/* + * Append new BTF_KIND_DECL_TAG type with: + * - *value* - non-empty/non-NULL string; + * - *ref_type_id* - referenced type ID, it might not exist yet; + * - *component_idx* - -1 for tagging reference type, otherwise struct/union + * member or function argument index; + * Set info->kflag to 1, indicating this tag is an __attribute__ + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_decl_attr(struct btf *btf, const char *value, int ref_type_id, + int component_idx) +{ + return btf_add_decl_tag(btf, value, ref_type_id, component_idx, 1); +} + struct btf_ext_sec_info_param { __u32 off; __u32 len; @@ -3014,8 +3122,6 @@ static int btf_ext_parse_info(struct btf_ext *btf_ext, bool is_native) .desc = "line_info", }; struct btf_ext_sec_info_param core_relo = { - .off = btf_ext->hdr->core_relo_off, - .len = btf_ext->hdr->core_relo_len, .min_rec_size = sizeof(struct bpf_core_relo), .ext_info = &btf_ext->core_relo_info, .desc = "core_relo", @@ -3033,6 +3139,8 @@ static int btf_ext_parse_info(struct btf_ext *btf_ext, bool is_native) if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, core_relo_len)) return 0; /* skip core relos parsing */ + core_relo.off = btf_ext->hdr->core_relo_off; + core_relo.len = btf_ext->hdr->core_relo_len; err = btf_ext_parse_sec_info(btf_ext, &core_relo, is_native); if (err) return err; @@ -4298,46 +4406,109 @@ static inline __u16 btf_fwd_kind(struct btf_type *t) return btf_kflag(t) ? BTF_KIND_UNION : BTF_KIND_STRUCT; } -/* Check if given two types are identical ARRAY definitions */ -static bool btf_dedup_identical_arrays(struct btf_dedup *d, __u32 id1, __u32 id2) +static bool btf_dedup_identical_types(struct btf_dedup *d, __u32 id1, __u32 id2, int depth) { struct btf_type *t1, *t2; + int k1, k2; +recur: + if (depth <= 0) + return false; t1 = btf_type_by_id(d->btf, id1); t2 = btf_type_by_id(d->btf, id2); - if (!btf_is_array(t1) || !btf_is_array(t2)) + + k1 = btf_kind(t1); + k2 = btf_kind(t2); + if (k1 != k2) return false; - return btf_equal_array(t1, t2); -} + switch (k1) { + case BTF_KIND_UNKN: /* VOID */ + return true; + case BTF_KIND_INT: + return btf_equal_int_tag(t1, t2); + case BTF_KIND_ENUM: + case BTF_KIND_ENUM64: + return btf_compat_enum(t1, t2); + case BTF_KIND_FWD: + case BTF_KIND_FLOAT: + return btf_equal_common(t1, t2); + case BTF_KIND_CONST: + case BTF_KIND_VOLATILE: + case BTF_KIND_RESTRICT: + case BTF_KIND_PTR: + case BTF_KIND_TYPEDEF: + case BTF_KIND_FUNC: + case BTF_KIND_TYPE_TAG: + if (t1->info != t2->info || t1->name_off != t2->name_off) + return false; + id1 = t1->type; + id2 = t2->type; + goto recur; + case BTF_KIND_ARRAY: { + struct btf_array *a1, *a2; -/* Check if given two types are identical STRUCT/UNION definitions */ -static bool btf_dedup_identical_structs(struct btf_dedup *d, __u32 id1, __u32 id2) -{ - const struct btf_member *m1, *m2; - struct btf_type *t1, *t2; - int n, i; + if (!btf_compat_array(t1, t2)) + return false; - t1 = btf_type_by_id(d->btf, id1); - t2 = btf_type_by_id(d->btf, id2); + a1 = btf_array(t1); + a2 = btf_array(t1); - if (!btf_is_composite(t1) || btf_kind(t1) != btf_kind(t2)) - return false; + if (a1->index_type != a2->index_type && + !btf_dedup_identical_types(d, a1->index_type, a2->index_type, depth - 1)) + return false; - if (!btf_shallow_equal_struct(t1, t2)) - return false; + if (a1->type != a2->type && + !btf_dedup_identical_types(d, a1->type, a2->type, depth - 1)) + return false; - m1 = btf_members(t1); - m2 = btf_members(t2); - for (i = 0, n = btf_vlen(t1); i < n; i++, m1++, m2++) { - if (m1->type != m2->type && - !btf_dedup_identical_arrays(d, m1->type, m2->type) && - !btf_dedup_identical_structs(d, m1->type, m2->type)) + return true; + } + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: { + const struct btf_member *m1, *m2; + int i, n; + + if (!btf_shallow_equal_struct(t1, t2)) return false; + + m1 = btf_members(t1); + m2 = btf_members(t2); + for (i = 0, n = btf_vlen(t1); i < n; i++, m1++, m2++) { + if (m1->type == m2->type) + continue; + if (!btf_dedup_identical_types(d, m1->type, m2->type, depth - 1)) + return false; + } + return true; + } + case BTF_KIND_FUNC_PROTO: { + const struct btf_param *p1, *p2; + int i, n; + + if (!btf_compat_fnproto(t1, t2)) + return false; + + if (t1->type != t2->type && + !btf_dedup_identical_types(d, t1->type, t2->type, depth - 1)) + return false; + + p1 = btf_params(t1); + p2 = btf_params(t2); + for (i = 0, n = btf_vlen(t1); i < n; i++, p1++, p2++) { + if (p1->type == p2->type) + continue; + if (!btf_dedup_identical_types(d, p1->type, p2->type, depth - 1)) + return false; + } + return true; + } + default: + return false; } - return true; } + /* * Check equivalence of BTF type graph formed by candidate struct/union (we'll * call it "candidate graph" in this description for brevity) to a type graph @@ -4456,19 +4627,13 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id, * different fields within the *same* struct. This breaks type * equivalence check, which makes an assumption that candidate * types sub-graph has a consistent and deduped-by-compiler - * types within a single CU. So work around that by explicitly - * allowing identical array types here. - */ - if (btf_dedup_identical_arrays(d, hypot_type_id, cand_id)) - return 1; - /* It turns out that similar situation can happen with - * struct/union sometimes, sigh... Handle the case where - * structs/unions are exactly the same, down to the referenced - * type IDs. Anything more complicated (e.g., if referenced - * types are different, but equivalent) is *way more* - * complicated and requires a many-to-many equivalence mapping. + * types within a single CU. And similar situation can happen + * with struct/union sometimes, and event with pointers. + * So accommodate cases like this doing a structural + * comparison recursively, but avoiding being stuck in endless + * loops by limiting the depth up to which we check. */ - if (btf_dedup_identical_structs(d, hypot_type_id, cand_id)) + if (btf_dedup_identical_types(d, hypot_type_id, cand_id, 16)) return 1; return 0; } @@ -5216,7 +5381,10 @@ struct btf *btf__load_vmlinux_btf(void) pr_warn("kernel BTF is missing at '%s', was CONFIG_DEBUG_INFO_BTF enabled?\n", sysfs_btf_path); } else { - btf = btf__parse(sysfs_btf_path, NULL); + btf = btf_parse_raw_mmap(sysfs_btf_path, NULL); + if (IS_ERR(btf)) + btf = btf__parse(sysfs_btf_path, NULL); + if (!btf) { err = -errno; pr_warn("failed to read kernel BTF from '%s': %s\n", diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index 47ee8f6ac489..ccfd905f03df 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -227,6 +227,7 @@ LIBBPF_API int btf__add_volatile(struct btf *btf, int ref_type_id); LIBBPF_API int btf__add_const(struct btf *btf, int ref_type_id); LIBBPF_API int btf__add_restrict(struct btf *btf, int ref_type_id); LIBBPF_API int btf__add_type_tag(struct btf *btf, const char *value, int ref_type_id); +LIBBPF_API int btf__add_type_attr(struct btf *btf, const char *value, int ref_type_id); /* func and func_proto construction APIs */ LIBBPF_API int btf__add_func(struct btf *btf, const char *name, @@ -243,6 +244,8 @@ LIBBPF_API int btf__add_datasec_var_info(struct btf *btf, int var_type_id, /* tag construction API */ LIBBPF_API int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id, int component_idx); +LIBBPF_API int btf__add_decl_attr(struct btf *btf, const char *value, int ref_type_id, + int component_idx); struct btf_dedup_opts { size_t sz; @@ -323,9 +326,10 @@ struct btf_dump_type_data_opts { bool compact; /* no newlines/indentation */ bool skip_names; /* skip member/type names */ bool emit_zeroes; /* show 0-valued fields */ + bool emit_strings; /* print char arrays as strings */ size_t :0; }; -#define btf_dump_type_data_opts__last_field emit_zeroes +#define btf_dump_type_data_opts__last_field emit_strings LIBBPF_API int btf_dump__dump_type_data(struct btf_dump *d, __u32 id, diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c index a3fc6908f6c9..f09f25eccf3c 100644 --- a/tools/lib/bpf/btf_dump.c +++ b/tools/lib/bpf/btf_dump.c @@ -68,6 +68,7 @@ struct btf_dump_data { bool compact; bool skip_names; bool emit_zeroes; + bool emit_strings; __u8 indent_lvl; /* base indent level */ char indent_str[BTF_DATA_INDENT_STR_LEN]; /* below are used during iteration */ @@ -226,6 +227,9 @@ static void btf_dump_free_names(struct hashmap *map) size_t bkt; struct hashmap_entry *cur; + if (!map) + return; + hashmap__for_each_entry(map, cur, bkt) free((void *)cur->pkey); @@ -1494,7 +1498,10 @@ static void btf_dump_emit_type_chain(struct btf_dump *d, case BTF_KIND_TYPE_TAG: btf_dump_emit_mods(d, decls); name = btf_name_of(d, t->name_off); - btf_dump_printf(d, " __attribute__((btf_type_tag(\"%s\")))", name); + if (btf_kflag(t)) + btf_dump_printf(d, " __attribute__((%s))", name); + else + btf_dump_printf(d, " __attribute__((btf_type_tag(\"%s\")))", name); break; case BTF_KIND_ARRAY: { const struct btf_array *a = btf_array(t); @@ -2025,6 +2032,52 @@ static int btf_dump_var_data(struct btf_dump *d, return btf_dump_dump_type_data(d, NULL, t, type_id, data, 0, 0); } +static int btf_dump_string_data(struct btf_dump *d, + const struct btf_type *t, + __u32 id, + const void *data) +{ + const struct btf_array *array = btf_array(t); + const char *chars = data; + __u32 i; + + /* Make sure it is a NUL-terminated string. */ + for (i = 0; i < array->nelems; i++) { + if ((void *)(chars + i) >= d->typed_dump->data_end) + return -E2BIG; + if (chars[i] == '\0') + break; + } + if (i == array->nelems) { + /* The caller will print this as a regular array. */ + return -EINVAL; + } + + btf_dump_data_pfx(d); + btf_dump_printf(d, "\""); + + for (i = 0; i < array->nelems; i++) { + char c = chars[i]; + + if (c == '\0') { + /* + * When printing character arrays as strings, NUL bytes + * are always treated as string terminators; they are + * never printed. + */ + break; + } + if (isprint(c)) + btf_dump_printf(d, "%c", c); + else + btf_dump_printf(d, "\\x%02x", (__u8)c); + } + + btf_dump_printf(d, "\""); + + return 0; +} + static int btf_dump_array_data(struct btf_dump *d, const struct btf_type *t, __u32 id, @@ -2052,8 +2105,13 @@ static int btf_dump_array_data(struct btf_dump *d, * char arrays, so if size is 1 and element is * printable as a char, we'll do that. */ - if (elem_size == 1) + if (elem_size == 1) { + if (d->typed_dump->emit_strings && + btf_dump_string_data(d, t, id, data) == 0) { + return 0; + } d->typed_dump->is_array_char = true; + } } /* note that we increment depth before calling btf_dump_print() below; @@ -2541,6 +2599,7 @@ int btf_dump__dump_type_data(struct btf_dump *d, __u32 id, d->typed_dump->compact = OPTS_GET(opts, compact, false); d->typed_dump->skip_names = OPTS_GET(opts, skip_names, false); d->typed_dump->emit_zeroes = OPTS_GET(opts, emit_zeroes, false); + d->typed_dump->emit_strings = OPTS_GET(opts, emit_strings, false); ret = btf_dump_dump_type_data(d, NULL, t, id, data, 0, 0); diff --git a/tools/lib/bpf/btf_relocate.c b/tools/lib/bpf/btf_relocate.c index b72f83e15156..53d1f3541bce 100644 --- a/tools/lib/bpf/btf_relocate.c +++ b/tools/lib/bpf/btf_relocate.c @@ -212,7 +212,7 @@ static int btf_relocate_map_distilled_base(struct btf_relocate *r) * need to match both name and size, otherwise embedding the base * struct/union in the split type is invalid. */ - for (id = r->nr_dist_base_types; id < r->nr_split_types; id++) { + for (id = r->nr_dist_base_types; id < r->nr_dist_base_types + r->nr_split_types; id++) { err = btf_mark_embedded_composite_type_ids(r, id); if (err) goto done; diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 66173ddb5a2d..8f5a81b672e1 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -60,6 +60,8 @@ #define BPF_FS_MAGIC 0xcafe4a11 #endif +#define MAX_EVENT_NAME_LEN 64 + #define BPF_FS_DEFAULT_PATH "/sys/fs/bpf" #define BPF_INSN_SZ (sizeof(struct bpf_insn)) @@ -284,7 +286,7 @@ void libbpf_print(enum libbpf_print_level level, const char *format, ...) old_errno = errno; va_start(args, format); - __libbpf_pr(level, format, args); + print_fn(level, format, args); va_end(args); errno = old_errno; @@ -595,7 +597,7 @@ struct extern_desc { int sym_idx; int btf_id; int sec_btf_id; - const char *name; + char *name; char *essent_name; bool is_set; bool is_weak; @@ -670,11 +672,18 @@ struct elf_state { struct usdt_manager; +enum bpf_object_state { + OBJ_OPEN, + OBJ_PREPARED, + OBJ_LOADED, +}; + struct bpf_object { char name[BPF_OBJ_NAME_LEN]; char license[64]; __u32 kern_version; + enum bpf_object_state state; struct bpf_program *programs; size_t nr_programs; struct bpf_map *maps; @@ -686,7 +695,6 @@ struct bpf_object { int nr_extern; int kconfig_map_idx; - bool loaded; bool has_subcalls; bool has_rodata; @@ -727,7 +735,7 @@ struct bpf_object { struct usdt_manager *usdt_man; - struct bpf_map *arena_map; + int arena_map_idx; void *arena_data; size_t arena_data_sz; @@ -890,7 +898,7 @@ bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data, return -LIBBPF_ERRNO__FORMAT; } - if (sec_off + prog_sz > sec_sz) { + if (sec_off + prog_sz > sec_sz || sec_off + prog_sz < sec_off) { pr_warn("sec '%s': program at offset %zu crosses section boundary\n", sec_name, sec_off); return -LIBBPF_ERRNO__FORMAT; @@ -1509,9 +1517,10 @@ static struct bpf_object *bpf_object__new(const char *path, obj->efile.obj_buf_sz = obj_buf_sz; obj->efile.btf_maps_shndx = -1; obj->kconfig_map_idx = -1; + obj->arena_map_idx = -1; obj->kern_version = get_kernel_version(); - obj->loaded = false; + obj->state = OBJ_OPEN; return obj; } @@ -1719,24 +1728,27 @@ static Elf64_Sym *find_elf_var_sym(const struct bpf_object *obj, const char *nam return ERR_PTR(-ENOENT); } -/* Some versions of Android don't provide memfd_create() in their libc - * implementation, so avoid complications and just go straight to Linux - * syscall. - */ -static int sys_memfd_create(const char *name, unsigned flags) -{ - return syscall(__NR_memfd_create, name, flags); -} - #ifndef MFD_CLOEXEC #define MFD_CLOEXEC 0x0001U #endif +#ifndef MFD_NOEXEC_SEAL +#define MFD_NOEXEC_SEAL 0x0008U +#endif static int create_placeholder_fd(void) { + unsigned int flags = MFD_CLOEXEC | MFD_NOEXEC_SEAL; + const char *name = "libbpf-placeholder-fd"; int fd; - fd = ensure_good_fd(sys_memfd_create("libbpf-placeholder-fd", MFD_CLOEXEC)); + fd = ensure_good_fd(sys_memfd_create(name, flags)); + if (fd >= 0) + return fd; + else if (errno != EINVAL) + return -errno; + + /* Possibly running on kernel without MFD_NOEXEC_SEAL */ + fd = ensure_good_fd(sys_memfd_create(name, flags & ~MFD_NOEXEC_SEAL)); if (fd < 0) return -errno; return fd; @@ -2094,7 +2106,7 @@ static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val, } len = strlen(value); - if (value[len - 1] != '"') { + if (len < 2 || value[len - 1] != '"') { pr_warn("extern (kcfg) '%s': invalid string config '%s'\n", ext->name, value); return -EINVAL; @@ -2953,7 +2965,7 @@ static int init_arena_map_data(struct bpf_object *obj, struct bpf_map *map, const long page_sz = sysconf(_SC_PAGE_SIZE); size_t mmap_sz; - mmap_sz = bpf_map_mmap_sz(obj->arena_map); + mmap_sz = bpf_map_mmap_sz(map); if (roundup(data_sz, page_sz) > mmap_sz) { pr_warn("elf: sec '%s': declared ARENA map size (%zu) is too small to hold global __arena variables of size %zu\n", sec_name, mmap_sz, data_sz); @@ -3027,12 +3039,12 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict, if (map->def.type != BPF_MAP_TYPE_ARENA) continue; - if (obj->arena_map) { + if (obj->arena_map_idx >= 0) { pr_warn("map '%s': only single ARENA map is supported (map '%s' is also ARENA)\n", - map->name, obj->arena_map->name); + map->name, obj->maps[obj->arena_map_idx].name); return -EINVAL; } - obj->arena_map = map; + obj->arena_map_idx = i; if (obj->efile.arena_data) { err = init_arena_map_data(obj, map, ARENA_SEC, obj->efile.arena_data_shndx, @@ -3042,7 +3054,7 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict, return err; } } - if (obj->efile.arena_data && !obj->arena_map) { + if (obj->efile.arena_data && obj->arena_map_idx < 0) { pr_warn("elf: sec '%s': to use global __arena variables the ARENA map should be explicitly declared in SEC(\".maps\")\n", ARENA_SEC); return -ENOENT; @@ -4248,7 +4260,9 @@ static int bpf_object__collect_externs(struct bpf_object *obj) return ext->btf_id; } t = btf__type_by_id(obj->btf, ext->btf_id); - ext->name = btf__name_by_offset(obj->btf, t->name_off); + ext->name = strdup(btf__name_by_offset(obj->btf, t->name_off)); + if (!ext->name) + return -ENOMEM; ext->sym_idx = i; ext->is_weak = ELF64_ST_BIND(sym->st_info) == STB_WEAK; @@ -4568,10 +4582,20 @@ static int bpf_program__record_reloc(struct bpf_program *prog, /* arena data relocation */ if (shdr_idx == obj->efile.arena_data_shndx) { + if (obj->arena_map_idx < 0) { + pr_warn("prog '%s': bad arena data relocation at insn %u, no arena maps defined\n", + prog->name, insn_idx); + return -LIBBPF_ERRNO__RELOC; + } reloc_desc->type = RELO_DATA; reloc_desc->insn_idx = insn_idx; - reloc_desc->map_idx = obj->arena_map - obj->maps; + reloc_desc->map_idx = obj->arena_map_idx; reloc_desc->sym_off = sym->st_value; + + map = &obj->maps[obj->arena_map_idx]; + pr_debug("prog '%s': found arena map %d (%s, sec %d, off %zu) for insn %u\n", + prog->name, obj->arena_map_idx, map->name, map->sec_idx, + map->sec_offset, insn_idx); return 0; } @@ -4833,6 +4857,11 @@ static int bpf_get_map_info_from_fdinfo(int fd, struct bpf_map_info *info) return 0; } +static bool map_is_created(const struct bpf_map *map) +{ + return map->obj->state >= OBJ_PREPARED || map->reused; +} + bool bpf_map__autocreate(const struct bpf_map *map) { return map->autocreate; @@ -4840,7 +4869,7 @@ bool bpf_map__autocreate(const struct bpf_map *map) int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate) { - if (map->obj->loaded) + if (map_is_created(map)) return libbpf_err(-EBUSY); map->autocreate = autocreate; @@ -4934,7 +4963,7 @@ struct bpf_map *bpf_map__inner_map(struct bpf_map *map) int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries) { - if (map->obj->loaded) + if (map_is_created(map)) return libbpf_err(-EBUSY); map->def.max_entries = max_entries; @@ -5179,11 +5208,6 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map) static void bpf_map__destroy(struct bpf_map *map); -static bool map_is_created(const struct bpf_map *map) -{ - return map->obj->loaded || map->reused; -} - static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner) { LIBBPF_OPTS(bpf_map_create_opts, create_attr); @@ -7885,13 +7909,6 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level) for (i = 0; i < obj->nr_programs; i++) { prog = &obj->programs[i]; - err = bpf_object__sanitize_prog(obj, prog); - if (err) - return err; - } - - for (i = 0; i < obj->nr_programs; i++) { - prog = &obj->programs[i]; if (prog_is_subprog(obj, prog)) continue; if (!prog->autoload) { @@ -7915,6 +7932,21 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level) return 0; } +static int bpf_object_prepare_progs(struct bpf_object *obj) +{ + struct bpf_program *prog; + size_t i; + int err; + + for (i = 0; i < obj->nr_programs; i++) { + prog = &obj->programs[i]; + err = bpf_object__sanitize_prog(obj, prog); + if (err) + return err; + } + return 0; +} + static const struct bpf_sec_def *find_sec_def(const char *sec_name); static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object_open_opts *opts) @@ -8531,14 +8563,77 @@ static int bpf_object_prepare_struct_ops(struct bpf_object *obj) return 0; } +static void bpf_object_unpin(struct bpf_object *obj) +{ + int i; + + /* unpin any maps that were auto-pinned during load */ + for (i = 0; i < obj->nr_maps; i++) + if (obj->maps[i].pinned && !obj->maps[i].reused) + bpf_map__unpin(&obj->maps[i], NULL); +} + +static void bpf_object_post_load_cleanup(struct bpf_object *obj) +{ + int i; + + /* clean up fd_array */ + zfree(&obj->fd_array); + + /* clean up module BTFs */ + for (i = 0; i < obj->btf_module_cnt; i++) { + close(obj->btf_modules[i].fd); + btf__free(obj->btf_modules[i].btf); + free(obj->btf_modules[i].name); + } + obj->btf_module_cnt = 0; + zfree(&obj->btf_modules); + + /* clean up vmlinux BTF */ + btf__free(obj->btf_vmlinux); + obj->btf_vmlinux = NULL; +} + +static int bpf_object_prepare(struct bpf_object *obj, const char *target_btf_path) +{ + int err; + + if (obj->state >= OBJ_PREPARED) { + pr_warn("object '%s': prepare loading can't be attempted twice\n", obj->name); + return -EINVAL; + } + + err = bpf_object_prepare_token(obj); + err = err ? : bpf_object__probe_loading(obj); + err = err ? : bpf_object__load_vmlinux_btf(obj, false); + err = err ? : bpf_object__resolve_externs(obj, obj->kconfig); + err = err ? : bpf_object__sanitize_maps(obj); + err = err ? : bpf_object__init_kern_struct_ops_maps(obj); + err = err ? : bpf_object_adjust_struct_ops_autoload(obj); + err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path); + err = err ? : bpf_object__sanitize_and_load_btf(obj); + err = err ? : bpf_object__create_maps(obj); + err = err ? : bpf_object_prepare_progs(obj); + + if (err) { + bpf_object_unpin(obj); + bpf_object_unload(obj); + obj->state = OBJ_LOADED; + return err; + } + + obj->state = OBJ_PREPARED; + return 0; +} + static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const char *target_btf_path) { - int err, i; + int err; if (!obj) return libbpf_err(-EINVAL); - if (obj->loaded) { + if (obj->state >= OBJ_LOADED) { pr_warn("object '%s': load can't be attempted twice\n", obj->name); return libbpf_err(-EINVAL); } @@ -8553,17 +8648,12 @@ static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch return libbpf_err(-LIBBPF_ERRNO__ENDIAN); } - err = bpf_object_prepare_token(obj); - err = err ? : bpf_object__probe_loading(obj); - err = err ? : bpf_object__load_vmlinux_btf(obj, false); - err = err ? : bpf_object__resolve_externs(obj, obj->kconfig); - err = err ? : bpf_object__sanitize_maps(obj); - err = err ? : bpf_object__init_kern_struct_ops_maps(obj); - err = err ? : bpf_object_adjust_struct_ops_autoload(obj); - err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path); - err = err ? : bpf_object__sanitize_and_load_btf(obj); - err = err ? : bpf_object__create_maps(obj); - err = err ? : bpf_object__load_progs(obj, extra_log_level); + if (obj->state < OBJ_PREPARED) { + err = bpf_object_prepare(obj, target_btf_path); + if (err) + return libbpf_err(err); + } + err = bpf_object__load_progs(obj, extra_log_level); err = err ? : bpf_object_init_prog_arrays(obj); err = err ? : bpf_object_prepare_struct_ops(obj); @@ -8575,36 +8665,22 @@ static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch err = bpf_gen__finish(obj->gen_loader, obj->nr_programs, obj->nr_maps); } - /* clean up fd_array */ - zfree(&obj->fd_array); + bpf_object_post_load_cleanup(obj); + obj->state = OBJ_LOADED; /* doesn't matter if successfully or not */ - /* clean up module BTFs */ - for (i = 0; i < obj->btf_module_cnt; i++) { - close(obj->btf_modules[i].fd); - btf__free(obj->btf_modules[i].btf); - free(obj->btf_modules[i].name); + if (err) { + bpf_object_unpin(obj); + bpf_object_unload(obj); + pr_warn("failed to load object '%s'\n", obj->path); + return libbpf_err(err); } - free(obj->btf_modules); - - /* clean up vmlinux BTF */ - btf__free(obj->btf_vmlinux); - obj->btf_vmlinux = NULL; - - obj->loaded = true; /* doesn't matter if successfully or not */ - - if (err) - goto out; return 0; -out: - /* unpin any maps that were auto-pinned during load */ - for (i = 0; i < obj->nr_maps; i++) - if (obj->maps[i].pinned && !obj->maps[i].reused) - bpf_map__unpin(&obj->maps[i], NULL); +} - bpf_object_unload(obj); - pr_warn("failed to load object '%s'\n", obj->path); - return libbpf_err(err); +int bpf_object__prepare(struct bpf_object *obj) +{ + return libbpf_err(bpf_object_prepare(obj, NULL)); } int bpf_object__load(struct bpf_object *obj) @@ -8854,7 +8930,7 @@ int bpf_object__pin_maps(struct bpf_object *obj, const char *path) if (!obj) return libbpf_err(-ENOENT); - if (!obj->loaded) { + if (obj->state < OBJ_PREPARED) { pr_warn("object not yet loaded; load it first\n"); return libbpf_err(-ENOENT); } @@ -8933,7 +9009,7 @@ int bpf_object__pin_programs(struct bpf_object *obj, const char *path) if (!obj) return libbpf_err(-ENOENT); - if (!obj->loaded) { + if (obj->state < OBJ_LOADED) { pr_warn("object not yet loaded; load it first\n"); return libbpf_err(-ENOENT); } @@ -9052,6 +9128,13 @@ void bpf_object__close(struct bpf_object *obj) if (IS_ERR_OR_NULL(obj)) return; + /* + * if user called bpf_object__prepare() without ever getting to + * bpf_object__load(), we need to clean up stuff that is normally + * cleaned up at the end of loading step + */ + bpf_object_post_load_cleanup(obj); + usdt_manager_free(obj->usdt_man); obj->usdt_man = NULL; @@ -9068,8 +9151,10 @@ void bpf_object__close(struct bpf_object *obj) zfree(&obj->btf_custom_path); zfree(&obj->kconfig); - for (i = 0; i < obj->nr_extern; i++) + for (i = 0; i < obj->nr_extern; i++) { + zfree(&obj->externs[i].name); zfree(&obj->externs[i].essent_name); + } zfree(&obj->externs); obj->nr_extern = 0; @@ -9120,7 +9205,7 @@ int bpf_object__btf_fd(const struct bpf_object *obj) int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version) { - if (obj->loaded) + if (obj->state >= OBJ_LOADED) return libbpf_err(-EINVAL); obj->kern_version = kern_version; @@ -9133,12 +9218,12 @@ int bpf_object__gen_loader(struct bpf_object *obj, struct gen_loader_opts *opts) struct bpf_gen *gen; if (!opts) - return -EFAULT; + return libbpf_err(-EFAULT); if (!OPTS_VALID(opts, gen_loader_opts)) - return -EINVAL; - gen = calloc(sizeof(*gen), 1); + return libbpf_err(-EINVAL); + gen = calloc(1, sizeof(*gen)); if (!gen) - return -ENOMEM; + return libbpf_err(-ENOMEM); gen->opts = opts; gen->swapped_endian = !is_native_endianness(obj); obj->gen_loader = gen; @@ -9217,7 +9302,7 @@ bool bpf_program__autoload(const struct bpf_program *prog) int bpf_program__set_autoload(struct bpf_program *prog, bool autoload) { - if (prog->obj->loaded) + if (prog->obj->state >= OBJ_LOADED) return libbpf_err(-EINVAL); prog->autoload = autoload; @@ -9249,14 +9334,14 @@ int bpf_program__set_insns(struct bpf_program *prog, { struct bpf_insn *insns; - if (prog->obj->loaded) - return -EBUSY; + if (prog->obj->state >= OBJ_LOADED) + return libbpf_err(-EBUSY); insns = libbpf_reallocarray(prog->insns, new_insn_cnt, sizeof(*insns)); /* NULL is a valid return from reallocarray if the new count is zero */ if (!insns && new_insn_cnt) { pr_warn("prog '%s': failed to realloc prog code\n", prog->name); - return -ENOMEM; + return libbpf_err(-ENOMEM); } memcpy(insns, new_insns, new_insn_cnt * sizeof(*insns)); @@ -9292,7 +9377,7 @@ static int last_custom_sec_def_handler_id; int bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type) { - if (prog->obj->loaded) + if (prog->obj->state >= OBJ_LOADED) return libbpf_err(-EBUSY); /* if type is not changed, do nothing */ @@ -9323,7 +9408,7 @@ enum bpf_attach_type bpf_program__expected_attach_type(const struct bpf_program int bpf_program__set_expected_attach_type(struct bpf_program *prog, enum bpf_attach_type type) { - if (prog->obj->loaded) + if (prog->obj->state >= OBJ_LOADED) return libbpf_err(-EBUSY); prog->expected_attach_type = type; @@ -9337,7 +9422,7 @@ __u32 bpf_program__flags(const struct bpf_program *prog) int bpf_program__set_flags(struct bpf_program *prog, __u32 flags) { - if (prog->obj->loaded) + if (prog->obj->state >= OBJ_LOADED) return libbpf_err(-EBUSY); prog->prog_flags = flags; @@ -9351,7 +9436,7 @@ __u32 bpf_program__log_level(const struct bpf_program *prog) int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_level) { - if (prog->obj->loaded) + if (prog->obj->state >= OBJ_LOADED) return libbpf_err(-EBUSY); prog->log_level = log_level; @@ -9367,17 +9452,41 @@ const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_siz int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size) { if (log_size && !log_buf) - return -EINVAL; + return libbpf_err(-EINVAL); if (prog->log_size > UINT_MAX) - return -EINVAL; - if (prog->obj->loaded) - return -EBUSY; + return libbpf_err(-EINVAL); + if (prog->obj->state >= OBJ_LOADED) + return libbpf_err(-EBUSY); prog->log_buf = log_buf; prog->log_size = log_size; return 0; } +struct bpf_func_info *bpf_program__func_info(const struct bpf_program *prog) +{ + if (prog->func_info_rec_size != sizeof(struct bpf_func_info)) + return libbpf_err_ptr(-EOPNOTSUPP); + return prog->func_info; +} + +__u32 bpf_program__func_info_cnt(const struct bpf_program *prog) +{ + return prog->func_info_cnt; +} + +struct bpf_line_info *bpf_program__line_info(const struct bpf_program *prog) +{ + if (prog->line_info_rec_size != sizeof(struct bpf_line_info)) + return libbpf_err_ptr(-EOPNOTSUPP); + return prog->line_info; +} + +__u32 bpf_program__line_info_cnt(const struct bpf_program *prog) +{ + return prog->line_info_cnt; +} + #define SEC_DEF(sec_pfx, ptype, atype, flags, ...) { \ .sec = (char *)sec_pfx, \ .prog_type = BPF_PROG_TYPE_##ptype, \ @@ -9947,7 +10056,7 @@ int libbpf_find_vmlinux_btf_id(const char *name, return libbpf_err(err); } -static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd) +static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd, int token_fd) { struct bpf_prog_info info; __u32 info_len = sizeof(info); @@ -9967,7 +10076,7 @@ static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd) pr_warn("The target program doesn't have BTF\n"); goto out; } - btf = btf__load_from_kernel_by_id(info.btf_id); + btf = btf_load_from_kernel(info.btf_id, NULL, token_fd); err = libbpf_get_error(btf); if (err) { pr_warn("Failed to get BTF %d of the program: %s\n", info.btf_id, errstr(err)); @@ -9987,7 +10096,7 @@ static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name, enum bpf_attach_type attach_type, int *btf_obj_fd, int *btf_type_id) { - int ret, i, mod_len; + int ret, i, mod_len = 0; const char *fn_name, *mod_name = NULL; fn_name = strchr(attach_name, ':'); @@ -10050,7 +10159,7 @@ static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attac pr_warn("prog '%s': attach program FD is not set\n", prog->name); return -EINVAL; } - err = libbpf_find_prog_btf_id(attach_name, attach_prog_fd); + err = libbpf_find_prog_btf_id(attach_name, attach_prog_fd, prog->obj->token_fd); if (err < 0) { pr_warn("prog '%s': failed to find BPF program (FD %d) BTF ID for '%s': %s\n", prog->name, attach_prog_fd, attach_name, errstr(err)); @@ -10287,7 +10396,7 @@ static int map_btf_datasec_resize(struct bpf_map *map, __u32 size) int bpf_map__set_value_size(struct bpf_map *map, __u32 size) { - if (map->obj->loaded || map->reused) + if (map_is_created(map)) return libbpf_err(-EBUSY); if (map->mmaped) { @@ -10295,7 +10404,7 @@ int bpf_map__set_value_size(struct bpf_map *map, __u32 size) int err; if (map->def.type != BPF_MAP_TYPE_ARRAY) - return -EOPNOTSUPP; + return libbpf_err(-EOPNOTSUPP); mmap_old_sz = bpf_map_mmap_sz(map); mmap_new_sz = array_map_mmap_sz(size, map->def.max_entries); @@ -10303,7 +10412,7 @@ int bpf_map__set_value_size(struct bpf_map *map, __u32 size) if (err) { pr_warn("map '%s': failed to resize memory-mapped region: %s\n", bpf_map__name(map), errstr(err)); - return err; + return libbpf_err(err); } err = map_btf_datasec_resize(map, size); if (err && err != -ENOENT) { @@ -10333,7 +10442,7 @@ int bpf_map__set_initial_value(struct bpf_map *map, { size_t actual_sz; - if (map->obj->loaded || map->reused) + if (map_is_created(map)) return libbpf_err(-EBUSY); if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG) @@ -10856,11 +10965,14 @@ struct bpf_link *bpf_program__attach_perf_event_opts(const struct bpf_program *p } link->link.fd = pfd; } - if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) { - err = -errno; - pr_warn("prog '%s': failed to enable perf_event FD %d: %s\n", - prog->name, pfd, errstr(err)); - goto err_out; + + if (!OPTS_GET(opts, dont_enable, false)) { + if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) { + err = -errno; + pr_warn("prog '%s': failed to enable perf_event FD %d: %s\n", + prog->name, pfd, errstr(err)); + goto err_out; + } } return &link->link; @@ -11044,16 +11156,16 @@ static const char *tracefs_available_filter_functions_addrs(void) : TRACEFS"/available_filter_functions_addrs"; } -static void gen_kprobe_legacy_event_name(char *buf, size_t buf_sz, - const char *kfunc_name, size_t offset) +static void gen_probe_legacy_event_name(char *buf, size_t buf_sz, + const char *name, size_t offset) { static int index = 0; int i; - snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx_%d", getpid(), kfunc_name, offset, - __sync_fetch_and_add(&index, 1)); + snprintf(buf, buf_sz, "libbpf_%u_%d_%s_0x%zx", getpid(), + __sync_fetch_and_add(&index, 1), name, offset); - /* sanitize binary_path in the probe name */ + /* sanitize name in the probe name */ for (i = 0; buf[i]; i++) { if (!isalnum(buf[i])) buf[i] = '_'; @@ -11178,9 +11290,9 @@ int probe_kern_syscall_wrapper(int token_fd) return pfd >= 0 ? 1 : 0; } else { /* legacy mode */ - char probe_name[128]; + char probe_name[MAX_EVENT_NAME_LEN]; - gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name), syscall_name, 0); + gen_probe_legacy_event_name(probe_name, sizeof(probe_name), syscall_name, 0); if (add_kprobe_event_legacy(probe_name, false, syscall_name, 0) < 0) return 0; @@ -11236,10 +11348,10 @@ bpf_program__attach_kprobe_opts(const struct bpf_program *prog, func_name, offset, -1 /* pid */, 0 /* ref_ctr_off */); } else { - char probe_name[256]; + char probe_name[MAX_EVENT_NAME_LEN]; - gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name), - func_name, offset); + gen_probe_legacy_event_name(probe_name, sizeof(probe_name), + func_name, offset); legacy_probe = strdup(probe_name); if (!legacy_probe) @@ -11375,9 +11487,33 @@ static int avail_kallsyms_cb(unsigned long long sym_addr, char sym_type, struct kprobe_multi_resolve *res = data->res; int err; - if (!bsearch(&sym_name, data->syms, data->cnt, sizeof(*data->syms), avail_func_cmp)) + if (!glob_match(sym_name, res->pattern)) return 0; + if (!bsearch(&sym_name, data->syms, data->cnt, sizeof(*data->syms), avail_func_cmp)) { + /* Some versions of kernel strip out .llvm.<hash> suffix from + * function names reported in available_filter_functions, but + * don't do so for kallsyms. While this is clearly a kernel + * bug (fixed by [0]) we try to accommodate that in libbpf to + * make multi-kprobe usability a bit better: if no match is + * found, we will strip .llvm. suffix and try one more time. + * + * [0] fb6a421fb615 ("kallsyms: Match symbols exactly with CONFIG_LTO_CLANG") + */ + char sym_trim[256], *psym_trim = sym_trim, *sym_sfx; + + if (!(sym_sfx = strstr(sym_name, ".llvm."))) + return 0; + + /* psym_trim vs sym_trim dance is done to avoid pointer vs array + * coercion differences and get proper `const char **` pointer + * which avail_func_cmp() expects + */ + snprintf(sym_trim, sizeof(sym_trim), "%.*s", (int)(sym_sfx - sym_name), sym_name); + if (!bsearch(&psym_trim, data->syms, data->cnt, sizeof(*data->syms), avail_func_cmp)) + return 0; + } + err = libbpf_ensure_mem((void **)&res->addrs, &res->cap, sizeof(*res->addrs), res->cnt + 1); if (err) return err; @@ -11522,7 +11658,7 @@ bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog, struct bpf_link *link = NULL; const unsigned long *addrs; int err, link_fd, prog_fd; - bool retprobe, session; + bool retprobe, session, unique_match; const __u64 *cookies; const char **syms; size_t cnt; @@ -11541,6 +11677,7 @@ bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog, addrs = OPTS_GET(opts, addrs, false); cnt = OPTS_GET(opts, cnt, false); cookies = OPTS_GET(opts, cookies, false); + unique_match = OPTS_GET(opts, unique_match, false); if (!pattern && !addrs && !syms) return libbpf_err_ptr(-EINVAL); @@ -11548,6 +11685,8 @@ bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog, return libbpf_err_ptr(-EINVAL); if (!pattern && !cnt) return libbpf_err_ptr(-EINVAL); + if (!pattern && unique_match) + return libbpf_err_ptr(-EINVAL); if (addrs && syms) return libbpf_err_ptr(-EINVAL); @@ -11558,6 +11697,14 @@ bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog, err = libbpf_available_kallsyms_parse(&res); if (err) goto error; + + if (unique_match && res.cnt != 1) { + pr_warn("prog '%s': failed to find a unique match for '%s' (%zu matches)\n", + prog->name, pattern, res.cnt); + err = -EINVAL; + goto error; + } + addrs = res.addrs; cnt = res.cnt; } @@ -11748,20 +11895,6 @@ static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, stru return ret; } -static void gen_uprobe_legacy_event_name(char *buf, size_t buf_sz, - const char *binary_path, uint64_t offset) -{ - int i; - - snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx", getpid(), binary_path, (size_t)offset); - - /* sanitize binary_path in the probe name */ - for (i = 0; buf[i]; i++) { - if (!isalnum(buf[i])) - buf[i] = '_'; - } -} - static inline int add_uprobe_event_legacy(const char *probe_name, bool retprobe, const char *binary_path, size_t offset) { @@ -12185,13 +12318,14 @@ bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid, pfd = perf_event_open_probe(true /* uprobe */, retprobe, binary_path, func_offset, pid, ref_ctr_off); } else { - char probe_name[PATH_MAX + 64]; + char probe_name[MAX_EVENT_NAME_LEN]; if (ref_ctr_off) return libbpf_err_ptr(-EINVAL); - gen_uprobe_legacy_event_name(probe_name, sizeof(probe_name), - binary_path, func_offset); + gen_probe_legacy_event_name(probe_name, sizeof(probe_name), + strrchr(binary_path, '/') ? : binary_path, + func_offset); legacy_probe = strdup(probe_name); if (!legacy_probe) @@ -12722,6 +12856,34 @@ struct bpf_link *bpf_program__attach_xdp(const struct bpf_program *prog, int ifi } struct bpf_link * +bpf_program__attach_cgroup_opts(const struct bpf_program *prog, int cgroup_fd, + const struct bpf_cgroup_opts *opts) +{ + LIBBPF_OPTS(bpf_link_create_opts, link_create_opts); + __u32 relative_id; + int relative_fd; + + if (!OPTS_VALID(opts, bpf_cgroup_opts)) + return libbpf_err_ptr(-EINVAL); + + relative_id = OPTS_GET(opts, relative_id, 0); + relative_fd = OPTS_GET(opts, relative_fd, 0); + + if (relative_fd && relative_id) { + pr_warn("prog '%s': relative_fd and relative_id cannot be set at the same time\n", + prog->name); + return libbpf_err_ptr(-EINVAL); + } + + link_create_opts.cgroup.expected_revision = OPTS_GET(opts, expected_revision, 0); + link_create_opts.cgroup.relative_fd = relative_fd; + link_create_opts.cgroup.relative_id = relative_id; + link_create_opts.flags = OPTS_GET(opts, flags, 0); + + return bpf_program_attach_fd(prog, cgroup_fd, "cgroup", &link_create_opts); +} + +struct bpf_link * bpf_program__attach_tcx(const struct bpf_program *prog, int ifindex, const struct bpf_tcx_opts *opts) { @@ -12811,7 +12973,7 @@ struct bpf_link *bpf_program__attach_freplace(const struct bpf_program *prog, if (target_fd) { LIBBPF_OPTS(bpf_link_create_opts, target_opts); - btf_id = libbpf_find_prog_btf_id(attach_func_name, target_fd); + btf_id = libbpf_find_prog_btf_id(attach_func_name, target_fd, prog->obj->token_fd); if (btf_id < 0) return libbpf_err_ptr(btf_id); @@ -13023,17 +13185,17 @@ int bpf_link__update_map(struct bpf_link *link, const struct bpf_map *map) int err; if (!bpf_map__is_struct_ops(map)) - return -EINVAL; + return libbpf_err(-EINVAL); if (map->fd < 0) { pr_warn("map '%s': can't use BPF map without FD (was it created?)\n", map->name); - return -EINVAL; + return libbpf_err(-EINVAL); } st_ops_link = container_of(link, struct bpf_link_struct_ops, link); /* Ensure the type of a link is correct */ if (st_ops_link->map_fd < 0) - return -EINVAL; + return libbpf_err(-EINVAL); err = bpf_map_update_elem(map->fd, &zero, map->st_ops->kern_vdata, 0); /* It can be EBUSY if the map has been used to create or @@ -13259,7 +13421,6 @@ struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt, attr.config = PERF_COUNT_SW_BPF_OUTPUT; attr.type = PERF_TYPE_SOFTWARE; attr.sample_type = PERF_SAMPLE_RAW; - attr.sample_period = sample_period; attr.wakeup_events = sample_period; p.attr = &attr; @@ -13619,7 +13780,7 @@ int bpf_program__set_attach_target(struct bpf_program *prog, if (!prog || attach_prog_fd < 0) return libbpf_err(-EINVAL); - if (prog->obj->loaded) + if (prog->obj->state >= OBJ_LOADED) return libbpf_err(-EINVAL); if (attach_prog_fd && !attach_func_name) { @@ -13632,7 +13793,7 @@ int bpf_program__set_attach_target(struct bpf_program *prog, if (attach_prog_fd) { btf_id = libbpf_find_prog_btf_id(attach_func_name, - attach_prog_fd); + attach_prog_fd, prog->obj->token_fd); if (btf_id < 0) return libbpf_err(btf_id); } else { @@ -13987,6 +14148,12 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s) } link = map_skel->link; + if (!link) { + pr_warn("map '%s': BPF map skeleton link is uninitialized\n", + bpf_map__name(map)); + continue; + } + if (*link) continue; diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index b2ce3a72b11d..455a957cb702 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -242,6 +242,19 @@ bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz, const struct bpf_object_open_opts *opts); /** + * @brief **bpf_object__prepare()** prepares BPF object for loading: + * performs ELF processing, relocations, prepares final state of BPF program + * instructions (accessible with bpf_program__insns()), creates and + * (potentially) pins maps. Leaves BPF object in the state ready for program + * loading. + * @param obj Pointer to a valid BPF object instance returned by + * **bpf_object__open*()** API + * @return 0, on success; negative error code, otherwise, error code is + * stored in errno + */ +int bpf_object__prepare(struct bpf_object *obj); + +/** * @brief **bpf_object__load()** loads BPF object into kernel. * @param obj Pointer to a valid BPF object instance returned by * **bpf_object__open*()** APIs @@ -486,9 +499,11 @@ struct bpf_perf_event_opts { __u64 bpf_cookie; /* don't use BPF link when attach BPF program */ bool force_ioctl_attach; + /* don't automatically enable the event */ + bool dont_enable; size_t :0; }; -#define bpf_perf_event_opts__last_field force_ioctl_attach +#define bpf_perf_event_opts__last_field dont_enable LIBBPF_API struct bpf_link * bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd); @@ -552,10 +567,12 @@ struct bpf_kprobe_multi_opts { bool retprobe; /* create session kprobes */ bool session; + /* enforce unique match */ + bool unique_match; size_t :0; }; -#define bpf_kprobe_multi_opts__last_field session +#define bpf_kprobe_multi_opts__last_field unique_match LIBBPF_API struct bpf_link * bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog, @@ -862,6 +879,21 @@ LIBBPF_API struct bpf_link * bpf_program__attach_netkit(const struct bpf_program *prog, int ifindex, const struct bpf_netkit_opts *opts); +struct bpf_cgroup_opts { + /* size of this struct, for forward/backward compatibility */ + size_t sz; + __u32 flags; + __u32 relative_fd; + __u32 relative_id; + __u64 expected_revision; + size_t :0; +}; +#define bpf_cgroup_opts__last_field expected_revision + +LIBBPF_API struct bpf_link * +bpf_program__attach_cgroup_opts(const struct bpf_program *prog, int cgroup_fd, + const struct bpf_cgroup_opts *opts); + struct bpf_map; LIBBPF_API struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map); @@ -925,6 +957,12 @@ LIBBPF_API int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_le LIBBPF_API const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_size); LIBBPF_API int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size); +LIBBPF_API struct bpf_func_info *bpf_program__func_info(const struct bpf_program *prog); +LIBBPF_API __u32 bpf_program__func_info_cnt(const struct bpf_program *prog); + +LIBBPF_API struct bpf_line_info *bpf_program__line_info(const struct bpf_program *prog); +LIBBPF_API __u32 bpf_program__line_info_cnt(const struct bpf_program *prog); + /** * @brief **bpf_program__set_attach_target()** sets BTF-based attach target * for supported BPF program types: @@ -1268,6 +1306,7 @@ enum bpf_tc_attach_point { BPF_TC_INGRESS = 1 << 0, BPF_TC_EGRESS = 1 << 1, BPF_TC_CUSTOM = 1 << 2, + BPF_TC_QDISC = 1 << 3, }; #define BPF_TC_PARENT(a, b) \ @@ -1282,9 +1321,11 @@ struct bpf_tc_hook { int ifindex; enum bpf_tc_attach_point attach_point; __u32 parent; + __u32 handle; + const char *qdisc; size_t :0; }; -#define bpf_tc_hook__last_field parent +#define bpf_tc_hook__last_field qdisc struct bpf_tc_opts { size_t sz; @@ -1796,9 +1837,14 @@ struct bpf_linker_file_opts { struct bpf_linker; LIBBPF_API struct bpf_linker *bpf_linker__new(const char *filename, struct bpf_linker_opts *opts); +LIBBPF_API struct bpf_linker *bpf_linker__new_fd(int fd, struct bpf_linker_opts *opts); LIBBPF_API int bpf_linker__add_file(struct bpf_linker *linker, const char *filename, const struct bpf_linker_file_opts *opts); +LIBBPF_API int bpf_linker__add_fd(struct bpf_linker *linker, int fd, + const struct bpf_linker_file_opts *opts); +LIBBPF_API int bpf_linker__add_buf(struct bpf_linker *linker, void *buf, size_t buf_sz, + const struct bpf_linker_file_opts *opts); LIBBPF_API int bpf_linker__finalize(struct bpf_linker *linker); LIBBPF_API void bpf_linker__free(struct bpf_linker *linker); diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 54b6f312cfa8..d7bd463e7017 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -432,4 +432,20 @@ LIBBPF_1.5.0 { } LIBBPF_1.4.0; LIBBPF_1.6.0 { + global: + bpf_linker__add_buf; + bpf_linker__add_fd; + bpf_linker__new_fd; + bpf_object__prepare; + bpf_prog_stream_read; + bpf_program__attach_cgroup_opts; + bpf_program__func_info; + bpf_program__func_info_cnt; + bpf_program__line_info; + bpf_program__line_info_cnt; + btf__add_decl_attr; + btf__add_type_attr; } LIBBPF_1.5.0; + +LIBBPF_1.7.0 { +} LIBBPF_1.6.0; diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index de498e2dd6b0..477a3b3389a0 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -409,6 +409,7 @@ int libbpf__load_raw_btf(const char *raw_types, size_t types_len, int btf_load_into_kernel(struct btf *btf, char *log_buf, size_t log_sz, __u32 log_level, int token_fd); +struct btf *btf_load_from_kernel(__u32 id, struct btf *base_btf, int token_fd); struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf); void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type, @@ -666,6 +667,15 @@ static inline int sys_dup3(int oldfd, int newfd, int flags) return syscall(__NR_dup3, oldfd, newfd, flags); } +/* Some versions of Android don't provide memfd_create() in their libc + * implementation, so avoid complications and just go straight to Linux + * syscall. + */ +static inline int sys_memfd_create(const char *name, unsigned flags) +{ + return syscall(__NR_memfd_create, name, flags); +} + /* Point *fixed_fd* to the same file that *tmp_fd* points to. * Regardless of success, *tmp_fd* is closed. * Whatever *fixed_fd* pointed to is closed silently. diff --git a/tools/lib/bpf/libbpf_version.h b/tools/lib/bpf/libbpf_version.h index 28c58fb17250..99331e317dee 100644 --- a/tools/lib/bpf/libbpf_version.h +++ b/tools/lib/bpf/libbpf_version.h @@ -4,6 +4,6 @@ #define __LIBBPF_VERSION_H #define LIBBPF_MAJOR_VERSION 1 -#define LIBBPF_MINOR_VERSION 6 +#define LIBBPF_MINOR_VERSION 7 #endif /* __LIBBPF_VERSION_H */ diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c index cf71d149fe26..a469e5d4fee7 100644 --- a/tools/lib/bpf/linker.c +++ b/tools/lib/bpf/linker.c @@ -4,6 +4,10 @@ * * Copyright (c) 2021 Facebook */ +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif + #include <stdbool.h> #include <stddef.h> #include <stdio.h> @@ -16,6 +20,7 @@ #include <elf.h> #include <libelf.h> #include <fcntl.h> +#include <sys/mman.h> #include "libbpf.h" #include "btf.h" #include "libbpf_internal.h" @@ -152,15 +157,19 @@ struct bpf_linker { /* global (including extern) ELF symbols */ int glob_sym_cnt; struct glob_sym *glob_syms; + + bool fd_is_owned; }; #define pr_warn_elf(fmt, ...) \ libbpf_print(LIBBPF_WARN, "libbpf: " fmt ": %s\n", ##__VA_ARGS__, elf_errmsg(-1)) -static int init_output_elf(struct bpf_linker *linker, const char *file); +static int init_output_elf(struct bpf_linker *linker); -static int linker_load_obj_file(struct bpf_linker *linker, const char *filename, - const struct bpf_linker_file_opts *opts, +static int bpf_linker_add_file(struct bpf_linker *linker, int fd, + const char *filename); + +static int linker_load_obj_file(struct bpf_linker *linker, struct src_obj *obj); static int linker_sanity_check_elf(struct src_obj *obj); static int linker_sanity_check_elf_symtab(struct src_obj *obj, struct src_sec *sec); @@ -191,7 +200,7 @@ void bpf_linker__free(struct bpf_linker *linker) if (linker->elf) elf_end(linker->elf); - if (linker->fd >= 0) + if (linker->fd >= 0 && linker->fd_is_owned) close(linker->fd); strset__free(linker->strtab_strs); @@ -233,9 +242,63 @@ struct bpf_linker *bpf_linker__new(const char *filename, struct bpf_linker_opts if (!linker) return errno = ENOMEM, NULL; - linker->fd = -1; + linker->filename = strdup(filename); + if (!linker->filename) { + err = -ENOMEM; + goto err_out; + } - err = init_output_elf(linker, filename); + linker->fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC | O_CLOEXEC, 0644); + if (linker->fd < 0) { + err = -errno; + pr_warn("failed to create '%s': %d\n", filename, err); + goto err_out; + } + linker->fd_is_owned = true; + + err = init_output_elf(linker); + if (err) + goto err_out; + + return linker; + +err_out: + bpf_linker__free(linker); + return errno = -err, NULL; +} + +struct bpf_linker *bpf_linker__new_fd(int fd, struct bpf_linker_opts *opts) +{ + struct bpf_linker *linker; + char filename[32]; + int err; + + if (fd < 0) + return errno = EINVAL, NULL; + + if (!OPTS_VALID(opts, bpf_linker_opts)) + return errno = EINVAL, NULL; + + if (elf_version(EV_CURRENT) == EV_NONE) { + pr_warn_elf("libelf initialization failed"); + return errno = EINVAL, NULL; + } + + linker = calloc(1, sizeof(*linker)); + if (!linker) + return errno = ENOMEM, NULL; + + snprintf(filename, sizeof(filename), "fd:%d", fd); + linker->filename = strdup(filename); + if (!linker->filename) { + err = -ENOMEM; + goto err_out; + } + + linker->fd = fd; + linker->fd_is_owned = false; + + err = init_output_elf(linker); if (err) goto err_out; @@ -294,23 +357,12 @@ static Elf64_Sym *add_new_sym(struct bpf_linker *linker, size_t *sym_idx) return sym; } -static int init_output_elf(struct bpf_linker *linker, const char *file) +static int init_output_elf(struct bpf_linker *linker) { int err, str_off; Elf64_Sym *init_sym; struct dst_sec *sec; - linker->filename = strdup(file); - if (!linker->filename) - return -ENOMEM; - - linker->fd = open(file, O_WRONLY | O_CREAT | O_TRUNC | O_CLOEXEC, 0644); - if (linker->fd < 0) { - err = -errno; - pr_warn("failed to create '%s': %s\n", file, errstr(err)); - return err; - } - linker->elf = elf_begin(linker->fd, ELF_C_WRITE, NULL); if (!linker->elf) { pr_warn_elf("failed to create ELF object"); @@ -436,19 +488,16 @@ static int init_output_elf(struct bpf_linker *linker, const char *file) return 0; } -int bpf_linker__add_file(struct bpf_linker *linker, const char *filename, - const struct bpf_linker_file_opts *opts) +static int bpf_linker_add_file(struct bpf_linker *linker, int fd, + const char *filename) { struct src_obj obj = {}; int err = 0; - if (!OPTS_VALID(opts, bpf_linker_file_opts)) - return libbpf_err(-EINVAL); - - if (!linker->elf) - return libbpf_err(-EINVAL); + obj.filename = filename; + obj.fd = fd; - err = err ?: linker_load_obj_file(linker, filename, opts, &obj); + err = err ?: linker_load_obj_file(linker, &obj); err = err ?: linker_append_sec_data(linker, &obj); err = err ?: linker_append_elf_syms(linker, &obj); err = err ?: linker_append_elf_relos(linker, &obj); @@ -463,12 +512,91 @@ int bpf_linker__add_file(struct bpf_linker *linker, const char *filename, free(obj.sym_map); if (obj.elf) elf_end(obj.elf); - if (obj.fd >= 0) - close(obj.fd); + return err; +} + +int bpf_linker__add_file(struct bpf_linker *linker, const char *filename, + const struct bpf_linker_file_opts *opts) +{ + int fd, err; + + if (!OPTS_VALID(opts, bpf_linker_file_opts)) + return libbpf_err(-EINVAL); + + if (!linker->elf) + return libbpf_err(-EINVAL); + + fd = open(filename, O_RDONLY | O_CLOEXEC); + if (fd < 0) { + err = -errno; + pr_warn("failed to open file '%s': %s\n", filename, errstr(err)); + return libbpf_err(err); + } + + err = bpf_linker_add_file(linker, fd, filename); + close(fd); + return libbpf_err(err); +} + +int bpf_linker__add_fd(struct bpf_linker *linker, int fd, + const struct bpf_linker_file_opts *opts) +{ + char filename[32]; + int err; + + if (!OPTS_VALID(opts, bpf_linker_file_opts)) + return libbpf_err(-EINVAL); + + if (!linker->elf) + return libbpf_err(-EINVAL); + + if (fd < 0) + return libbpf_err(-EINVAL); + + snprintf(filename, sizeof(filename), "fd:%d", fd); + err = bpf_linker_add_file(linker, fd, filename); return libbpf_err(err); } +int bpf_linker__add_buf(struct bpf_linker *linker, void *buf, size_t buf_sz, + const struct bpf_linker_file_opts *opts) +{ + char filename[32]; + int fd, written, ret; + + if (!OPTS_VALID(opts, bpf_linker_file_opts)) + return libbpf_err(-EINVAL); + + if (!linker->elf) + return libbpf_err(-EINVAL); + + snprintf(filename, sizeof(filename), "mem:%p+%zu", buf, buf_sz); + + fd = sys_memfd_create(filename, 0); + if (fd < 0) { + ret = -errno; + pr_warn("failed to create memfd '%s': %s\n", filename, errstr(ret)); + return libbpf_err(ret); + } + + written = 0; + while (written < buf_sz) { + ret = write(fd, buf, buf_sz); + if (ret < 0) { + ret = -errno; + pr_warn("failed to write '%s': %s\n", filename, errstr(ret)); + goto err_out; + } + written += ret; + } + + ret = bpf_linker_add_file(linker, fd, filename); +err_out: + close(fd); + return libbpf_err(ret); +} + static bool is_dwarf_sec_name(const char *name) { /* approximation, but the actual list is too long */ @@ -534,8 +662,7 @@ static struct src_sec *add_src_sec(struct src_obj *obj, const char *sec_name) return sec; } -static int linker_load_obj_file(struct bpf_linker *linker, const char *filename, - const struct bpf_linker_file_opts *opts, +static int linker_load_obj_file(struct bpf_linker *linker, struct src_obj *obj) { int err = 0; @@ -554,36 +681,26 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename, #error "Unknown __BYTE_ORDER__" #endif - pr_debug("linker: adding object file '%s'...\n", filename); - - obj->filename = filename; + pr_debug("linker: adding object file '%s'...\n", obj->filename); - obj->fd = open(filename, O_RDONLY | O_CLOEXEC); - if (obj->fd < 0) { - err = -errno; - pr_warn("failed to open file '%s': %s\n", filename, errstr(err)); - return err; - } obj->elf = elf_begin(obj->fd, ELF_C_READ_MMAP, NULL); if (!obj->elf) { - err = -errno; - pr_warn_elf("failed to parse ELF file '%s'", filename); - return err; + pr_warn_elf("failed to parse ELF file '%s'", obj->filename); + return -EINVAL; } /* Sanity check ELF file high-level properties */ ehdr = elf64_getehdr(obj->elf); if (!ehdr) { - err = -errno; - pr_warn_elf("failed to get ELF header for %s", filename); - return err; + pr_warn_elf("failed to get ELF header for %s", obj->filename); + return -EINVAL; } /* Linker output endianness set by first input object */ obj_byteorder = ehdr->e_ident[EI_DATA]; if (obj_byteorder != ELFDATA2LSB && obj_byteorder != ELFDATA2MSB) { err = -EOPNOTSUPP; - pr_warn("unknown byte order of ELF file %s\n", filename); + pr_warn("unknown byte order of ELF file %s\n", obj->filename); return err; } if (link_byteorder == ELFDATANONE) { @@ -593,7 +710,7 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename, obj_byteorder == ELFDATA2MSB ? "big" : "little"); } else if (link_byteorder != obj_byteorder) { err = -EOPNOTSUPP; - pr_warn("byte order mismatch with ELF file %s\n", filename); + pr_warn("byte order mismatch with ELF file %s\n", obj->filename); return err; } @@ -601,14 +718,13 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename, || ehdr->e_machine != EM_BPF || ehdr->e_ident[EI_CLASS] != ELFCLASS64) { err = -EOPNOTSUPP; - pr_warn_elf("unsupported kind of ELF file %s", filename); + pr_warn_elf("unsupported kind of ELF file %s", obj->filename); return err; } if (elf_getshdrstrndx(obj->elf, &obj->shstrs_sec_idx)) { - err = -errno; - pr_warn_elf("failed to get SHSTRTAB section index for %s", filename); - return err; + pr_warn_elf("failed to get SHSTRTAB section index for %s", obj->filename); + return -EINVAL; } scn = NULL; @@ -618,26 +734,23 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename, shdr = elf64_getshdr(scn); if (!shdr) { - err = -errno; pr_warn_elf("failed to get section #%zu header for %s", - sec_idx, filename); - return err; + sec_idx, obj->filename); + return -EINVAL; } sec_name = elf_strptr(obj->elf, obj->shstrs_sec_idx, shdr->sh_name); if (!sec_name) { - err = -errno; pr_warn_elf("failed to get section #%zu name for %s", - sec_idx, filename); - return err; + sec_idx, obj->filename); + return -EINVAL; } data = elf_getdata(scn, 0); if (!data) { - err = -errno; pr_warn_elf("failed to get section #%zu (%s) data from %s", - sec_idx, sec_name, filename); - return err; + sec_idx, sec_name, obj->filename); + return -EINVAL; } sec = add_src_sec(obj, sec_name); @@ -672,7 +785,7 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename, err = libbpf_get_error(obj->btf); if (err) { pr_warn("failed to parse .BTF from %s: %s\n", - filename, errstr(err)); + obj->filename, errstr(err)); return err; } sec->skipped = true; @@ -683,7 +796,7 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename, err = libbpf_get_error(obj->btf_ext); if (err) { pr_warn("failed to parse .BTF.ext from '%s': %s\n", - filename, errstr(err)); + obj->filename, errstr(err)); return err; } sec->skipped = true; @@ -700,7 +813,7 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename, break; default: pr_warn("unrecognized section #%zu (%s) in %s\n", - sec_idx, sec_name, filename); + sec_idx, sec_name, obj->filename); err = -EINVAL; return err; } @@ -1263,7 +1376,7 @@ static int linker_append_sec_data(struct bpf_linker *linker, struct src_obj *obj } else { if (!secs_match(dst_sec, src_sec)) { pr_warn("ELF sections %s are incompatible\n", src_sec->sec_name); - return -1; + return -EINVAL; } /* "license" and "version" sections are deduped */ @@ -2050,7 +2163,7 @@ add_sym: obj->sym_map[src_sym_idx] = dst_sym_idx; - if (sym_type == STT_SECTION && dst_sym) { + if (sym_type == STT_SECTION && dst_sec) { dst_sec->sec_sym_idx = dst_sym_idx; dst_sym->st_value = 0; } @@ -2110,7 +2223,7 @@ static int linker_append_elf_relos(struct bpf_linker *linker, struct src_obj *ob } } else if (!secs_match(dst_sec, src_sec)) { pr_warn("sections %s are not compatible\n", src_sec->sec_name); - return -1; + return -EINVAL; } /* shdr->sh_link points to SYMTAB */ @@ -2680,22 +2793,23 @@ int bpf_linker__finalize(struct bpf_linker *linker) /* Finalize ELF layout */ if (elf_update(linker->elf, ELF_C_NULL) < 0) { - err = -errno; + err = -EINVAL; pr_warn_elf("failed to finalize ELF layout"); return libbpf_err(err); } /* Write out final ELF contents */ if (elf_update(linker->elf, ELF_C_WRITE) < 0) { - err = -errno; + err = -EINVAL; pr_warn_elf("failed to write ELF contents"); return libbpf_err(err); } elf_end(linker->elf); - close(linker->fd); - linker->elf = NULL; + + if (linker->fd_is_owned) + close(linker->fd); linker->fd = -1; return 0; diff --git a/tools/lib/bpf/netlink.c b/tools/lib/bpf/netlink.c index 68a2def17175..c997e69d507f 100644 --- a/tools/lib/bpf/netlink.c +++ b/tools/lib/bpf/netlink.c @@ -529,9 +529,9 @@ int bpf_xdp_query_id(int ifindex, int flags, __u32 *prog_id) } -typedef int (*qdisc_config_t)(struct libbpf_nla_req *req); +typedef int (*qdisc_config_t)(struct libbpf_nla_req *req, const struct bpf_tc_hook *hook); -static int clsact_config(struct libbpf_nla_req *req) +static int clsact_config(struct libbpf_nla_req *req, const struct bpf_tc_hook *hook) { req->tc.tcm_parent = TC_H_CLSACT; req->tc.tcm_handle = TC_H_MAKE(TC_H_CLSACT, 0); @@ -539,6 +539,16 @@ static int clsact_config(struct libbpf_nla_req *req) return nlattr_add(req, TCA_KIND, "clsact", sizeof("clsact")); } +static int qdisc_config(struct libbpf_nla_req *req, const struct bpf_tc_hook *hook) +{ + const char *qdisc = OPTS_GET(hook, qdisc, NULL); + + req->tc.tcm_parent = OPTS_GET(hook, parent, TC_H_ROOT); + req->tc.tcm_handle = OPTS_GET(hook, handle, 0); + + return nlattr_add(req, TCA_KIND, qdisc, strlen(qdisc) + 1); +} + static int attach_point_to_config(struct bpf_tc_hook *hook, qdisc_config_t *config) { @@ -552,6 +562,9 @@ static int attach_point_to_config(struct bpf_tc_hook *hook, return 0; case BPF_TC_CUSTOM: return -EOPNOTSUPP; + case BPF_TC_QDISC: + *config = &qdisc_config; + return 0; default: return -EINVAL; } @@ -596,7 +609,7 @@ static int tc_qdisc_modify(struct bpf_tc_hook *hook, int cmd, int flags) req.tc.tcm_family = AF_UNSPEC; req.tc.tcm_ifindex = OPTS_GET(hook, ifindex, 0); - ret = config(&req); + ret = config(&req, hook); if (ret < 0) return ret; @@ -639,6 +652,7 @@ int bpf_tc_hook_destroy(struct bpf_tc_hook *hook) case BPF_TC_INGRESS: case BPF_TC_EGRESS: return libbpf_err(__bpf_tc_detach(hook, NULL, true)); + case BPF_TC_QDISC: case BPF_TC_INGRESS | BPF_TC_EGRESS: return libbpf_err(tc_qdisc_delete(hook)); case BPF_TC_CUSTOM: diff --git a/tools/lib/bpf/nlattr.c b/tools/lib/bpf/nlattr.c index 975e265eab3b..06663f9ea581 100644 --- a/tools/lib/bpf/nlattr.c +++ b/tools/lib/bpf/nlattr.c @@ -63,16 +63,16 @@ static int validate_nla(struct nlattr *nla, int maxtype, minlen = nla_attr_minlen[pt->type]; if (libbpf_nla_len(nla) < minlen) - return -1; + return -EINVAL; if (pt->maxlen && libbpf_nla_len(nla) > pt->maxlen) - return -1; + return -EINVAL; if (pt->type == LIBBPF_NLA_STRING) { char *data = libbpf_nla_data(nla); if (data[libbpf_nla_len(nla) - 1] != '\0') - return -1; + return -EINVAL; } return 0; @@ -118,19 +118,18 @@ int libbpf_nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, if (policy) { err = validate_nla(nla, maxtype, policy); if (err < 0) - goto errout; + return err; } - if (tb[type]) + if (tb[type]) { pr_warn("Attribute of type %#x found multiple times in message, " "previous attribute is being ignored.\n", type); + } tb[type] = nla; } - err = 0; -errout: - return err; + return 0; } /** diff --git a/tools/lib/bpf/relo_core.c b/tools/lib/bpf/relo_core.c index 7632e9d41827..2b83c98a1137 100644 --- a/tools/lib/bpf/relo_core.c +++ b/tools/lib/bpf/relo_core.c @@ -683,7 +683,7 @@ static int bpf_core_calc_field_relo(const char *prog_name, { const struct bpf_core_accessor *acc; const struct btf_type *t; - __u32 byte_off, byte_sz, bit_off, bit_sz, field_type_id; + __u32 byte_off, byte_sz, bit_off, bit_sz, field_type_id, elem_id; const struct btf_member *m; const struct btf_type *mt; bool bitfield; @@ -706,8 +706,14 @@ static int bpf_core_calc_field_relo(const char *prog_name, if (!acc->name) { if (relo->kind == BPF_CORE_FIELD_BYTE_OFFSET) { *val = spec->bit_offset / 8; - /* remember field size for load/store mem size */ - sz = btf__resolve_size(spec->btf, acc->type_id); + /* remember field size for load/store mem size; + * note, for arrays we care about individual element + * sizes, not the overall array size + */ + t = skip_mods_and_typedefs(spec->btf, acc->type_id, &elem_id); + while (btf_is_array(t)) + t = skip_mods_and_typedefs(spec->btf, btf_array(t)->type, &elem_id); + sz = btf__resolve_size(spec->btf, elem_id); if (sz < 0) return -EINVAL; *field_sz = sz; @@ -767,7 +773,17 @@ static int bpf_core_calc_field_relo(const char *prog_name, case BPF_CORE_FIELD_BYTE_OFFSET: *val = byte_off; if (!bitfield) { - *field_sz = byte_sz; + /* remember field size for load/store mem size; + * note, for arrays we care about individual element + * sizes, not the overall array size + */ + t = skip_mods_and_typedefs(spec->btf, field_type_id, &elem_id); + while (btf_is_array(t)) + t = skip_mods_and_typedefs(spec->btf, btf_array(t)->type, &elem_id); + sz = btf__resolve_size(spec->btf, elem_id); + if (sz < 0) + return -EINVAL; + *field_sz = sz; *type_id = field_type_id; } break; diff --git a/tools/lib/bpf/str_error.c b/tools/lib/bpf/str_error.c index 8743049e32b7..9a541762f54c 100644 --- a/tools/lib/bpf/str_error.c +++ b/tools/lib/bpf/str_error.c @@ -36,7 +36,7 @@ char *libbpf_strerror_r(int err, char *dst, int len) return dst; } -const char *errstr(int err) +const char *libbpf_errstr(int err) { static __thread char buf[12]; diff --git a/tools/lib/bpf/str_error.h b/tools/lib/bpf/str_error.h index 66ffebde0684..53e7fbffc13e 100644 --- a/tools/lib/bpf/str_error.h +++ b/tools/lib/bpf/str_error.h @@ -7,10 +7,13 @@ char *libbpf_strerror_r(int err, char *dst, int len); /** - * @brief **errstr()** returns string corresponding to numeric errno + * @brief **libbpf_errstr()** returns string corresponding to numeric errno * @param err negative numeric errno * @return pointer to string representation of the errno, that is invalidated * upon the next call. */ -const char *errstr(int err); +const char *libbpf_errstr(int err); + +#define errstr(err) libbpf_errstr(err) + #endif /* __LIBBPF_STR_ERROR_H */ diff --git a/tools/lib/bpf/usdt.bpf.h b/tools/lib/bpf/usdt.bpf.h index b811f754939f..2a7865c8e3fe 100644 --- a/tools/lib/bpf/usdt.bpf.h +++ b/tools/lib/bpf/usdt.bpf.h @@ -108,6 +108,38 @@ int bpf_usdt_arg_cnt(struct pt_regs *ctx) return spec->arg_cnt; } +/* Returns the size in bytes of the #*arg_num* (zero-indexed) USDT argument. + * Returns negative error if argument is not found or arg_num is invalid. + */ +static __always_inline +int bpf_usdt_arg_size(struct pt_regs *ctx, __u64 arg_num) +{ + struct __bpf_usdt_arg_spec *arg_spec; + struct __bpf_usdt_spec *spec; + int spec_id; + + spec_id = __bpf_usdt_spec_id(ctx); + if (spec_id < 0) + return -ESRCH; + + spec = bpf_map_lookup_elem(&__bpf_usdt_specs, &spec_id); + if (!spec) + return -ESRCH; + + if (arg_num >= BPF_USDT_MAX_ARG_CNT) + return -ENOENT; + barrier_var(arg_num); + if (arg_num >= spec->arg_cnt) + return -ENOENT; + + arg_spec = &spec->args[arg_num]; + + /* arg_spec->arg_bitshift = 64 - arg_sz * 8 + * so: arg_sz = (64 - arg_spec->arg_bitshift) / 8 + */ + return (unsigned int)(64 - arg_spec->arg_bitshift) / 8; +} + /* Fetch USDT argument #*arg_num* (zero-indexed) and put its value into *res. * Returns 0 on success; negative error, otherwise. * On error *res is guaranteed to be set to zero. diff --git a/tools/lib/bpf/usdt.c b/tools/lib/bpf/usdt.c index 5f085736c6c4..3373b9d45ac4 100644 --- a/tools/lib/bpf/usdt.c +++ b/tools/lib/bpf/usdt.c @@ -59,7 +59,7 @@ * * STAP_PROBE3(my_usdt_provider, my_usdt_probe_name, 123, x, &y); * - * USDT is identified by it's <provider-name>:<probe-name> pair of names. Each + * USDT is identified by its <provider-name>:<probe-name> pair of names. Each * individual USDT has a fixed number of arguments (3 in the above example) * and specifies values of each argument as if it was a function call. * @@ -81,7 +81,7 @@ * NOP instruction that kernel can replace with an interrupt instruction to * trigger instrumentation code (BPF program for all that we care about). * - * Semaphore above is and optional feature. It records an address of a 2-byte + * Semaphore above is an optional feature. It records an address of a 2-byte * refcount variable (normally in '.probes' ELF section) used for signaling if * there is anything that is attached to USDT. This is useful for user * applications if, for example, they need to prepare some arguments that are @@ -121,7 +121,7 @@ * a uprobe BPF program (which for kernel, at least currently, is just a kprobe * program, so BPF_PROG_TYPE_KPROBE program type). With the only difference * that uprobe is usually attached at the function entry, while USDT will - * normally will be somewhere inside the function. But it should always be + * normally be somewhere inside the function. But it should always be * pointing to NOP instruction, which makes such uprobes the fastest uprobe * kind. * @@ -151,7 +151,7 @@ * libbpf sets to spec ID during attach time, or, if kernel is too old to * support BPF cookie, through IP-to-spec-ID map that libbpf maintains in such * case. The latter means that some modes of operation can't be supported - * without BPF cookie. Such mode is attaching to shared library "generically", + * without BPF cookie. Such a mode is attaching to shared library "generically", * without specifying target process. In such case, it's impossible to * calculate absolute IP addresses for IP-to-spec-ID map, and thus such mode * is not supported without BPF cookie support. @@ -185,7 +185,7 @@ * as even if USDT spec string is the same, USDT cookie value can be * different. It was deemed excessive to try to deduplicate across independent * USDT attachments by taking into account USDT spec string *and* USDT cookie - * value, which would complicated spec ID accounting significantly for little + * value, which would complicate spec ID accounting significantly for little * gain. */ @@ -661,7 +661,7 @@ static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char * * [0] https://sourceware.org/systemtap/wiki/UserSpaceProbeImplementation */ usdt_abs_ip = note.loc_addr; - if (base_addr) + if (base_addr && note.base_addr) usdt_abs_ip += base_addr - note.base_addr; /* When attaching uprobes (which is what USDTs basically are) diff --git a/tools/lib/perf/Documentation/libperf.txt b/tools/lib/perf/Documentation/libperf.txt index fcfb9499ef9c..4072bc9b7670 100644 --- a/tools/lib/perf/Documentation/libperf.txt +++ b/tools/lib/perf/Documentation/libperf.txt @@ -39,7 +39,6 @@ SYNOPSIS struct perf_cpu_map *perf_cpu_map__new_any_cpu(void); struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list); - struct perf_cpu_map *perf_cpu_map__read(FILE *file); struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map); struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig, struct perf_cpu_map *other); @@ -211,6 +210,7 @@ SYNOPSIS struct perf_record_time_conv; struct perf_record_header_feature; struct perf_record_compressed; + struct perf_record_compressed2; -- DESCRIPTION diff --git a/tools/lib/perf/Makefile b/tools/lib/perf/Makefile index 3a9b2140aa04..7fbb50b74c00 100644 --- a/tools/lib/perf/Makefile +++ b/tools/lib/perf/Makefile @@ -39,29 +39,10 @@ libdir = $(prefix)/$(libdir_relative) libdir_SQ = $(subst ','\'',$(libdir)) libdir_relative_SQ = $(subst ','\'',$(libdir_relative)) -ifeq ("$(origin V)", "command line") - VERBOSE = $(V) -endif -ifndef VERBOSE - VERBOSE = 0 -endif - -ifeq ($(VERBOSE),1) - Q = -else - Q = @ -endif - TEST_ARGS := $(if $(V),-v) -# Set compile option CFLAGS -ifdef EXTRA_CFLAGS - CFLAGS := $(EXTRA_CFLAGS) -else - CFLAGS := -g -Wall -endif - INCLUDES = \ +-I$(OUTPUT)arch/$(SRCARCH)/include/generated/uapi \ -I$(srctree)/tools/lib/perf/include \ -I$(srctree)/tools/lib/ \ -I$(srctree)/tools/include \ @@ -70,11 +51,12 @@ INCLUDES = \ -I$(srctree)/tools/include/uapi # Append required CFLAGS -override CFLAGS += $(EXTRA_WARNINGS) -override CFLAGS += -Werror -Wall +override CFLAGS += -g -Werror -Wall override CFLAGS += -fPIC override CFLAGS += $(INCLUDES) override CFLAGS += -fvisibility=hidden +override CFLAGS += $(EXTRA_WARNINGS) +override CFLAGS += $(EXTRA_CFLAGS) all: @@ -118,7 +100,16 @@ $(LIBAPI)-clean: $(call QUIET_CLEAN, libapi) $(Q)$(MAKE) -C $(LIB_DIR) O=$(OUTPUT) clean >/dev/null -$(LIBPERF_IN): FORCE +uapi-asm := $(OUTPUT)arch/$(SRCARCH)/include/generated/uapi/asm +ifeq ($(SRCARCH),arm64) + syscall-y := $(uapi-asm)/unistd_64.h +endif +uapi-asm-generic: + $(if $(syscall-y),\ + $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.asm-headers obj=$(uapi-asm) \ + generic=include/uapi/asm-generic $(syscall-y),) + +$(LIBPERF_IN): uapi-asm-generic FORCE $(Q)$(MAKE) $(build)=libperf $(LIBPERF_A): $(LIBPERF_IN) @@ -139,7 +130,7 @@ all: fixdep clean: $(LIBAPI)-clean $(call QUIET_CLEAN, libperf) $(RM) $(LIBPERF_A) \ *.o *~ *.a *.so *.so.$(VERSION) *.so.$(LIBPERF_VERSION) .*.d .*.cmd tests/*.o LIBPERF-CFLAGS $(LIBPERF_PC) \ - $(TESTS_STATIC) $(TESTS_SHARED) + $(TESTS_STATIC) $(TESTS_SHARED) $(syscall-y) TESTS_IN = tests-in.o diff --git a/tools/lib/perf/cpumap.c b/tools/lib/perf/cpumap.c index cae799ad44e1..b20a5280f2b3 100644 --- a/tools/lib/perf/cpumap.c +++ b/tools/lib/perf/cpumap.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only +#include <errno.h> #include <perf/cpumap.h> #include <stdlib.h> #include <linux/refcount.h> @@ -10,6 +11,9 @@ #include <ctype.h> #include <limits.h> #include "internal.h" +#include <api/fs/fs.h> + +#define MAX_NR_CPUS 4096 void perf_cpu_map__set_nr(struct perf_cpu_map *map, int nr_cpus) { @@ -100,12 +104,12 @@ static struct perf_cpu_map *cpu_map__new_sysconf(void) static struct perf_cpu_map *cpu_map__new_sysfs_online(void) { struct perf_cpu_map *cpus = NULL; - FILE *onlnf; + char *buf = NULL; + size_t buf_len; - onlnf = fopen("/sys/devices/system/cpu/online", "r"); - if (onlnf) { - cpus = perf_cpu_map__read(onlnf); - fclose(onlnf); + if (sysfs__read_str("devices/system/cpu/online", &buf, &buf_len) >= 0) { + cpus = perf_cpu_map__new(buf); + free(buf); } return cpus; } @@ -158,62 +162,6 @@ static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, const struct perf_cpu return cpus; } -struct perf_cpu_map *perf_cpu_map__read(FILE *file) -{ - struct perf_cpu_map *cpus = NULL; - int nr_cpus = 0; - struct perf_cpu *tmp_cpus = NULL, *tmp; - int max_entries = 0; - int n, cpu, prev; - char sep; - - sep = 0; - prev = -1; - for (;;) { - n = fscanf(file, "%u%c", &cpu, &sep); - if (n <= 0) - break; - if (prev >= 0) { - int new_max = nr_cpus + cpu - prev - 1; - - WARN_ONCE(new_max >= MAX_NR_CPUS, "Perf can support %d CPUs. " - "Consider raising MAX_NR_CPUS\n", MAX_NR_CPUS); - - if (new_max >= max_entries) { - max_entries = new_max + MAX_NR_CPUS / 2; - tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu)); - if (tmp == NULL) - goto out_free_tmp; - tmp_cpus = tmp; - } - - while (++prev < cpu) - tmp_cpus[nr_cpus++].cpu = prev; - } - if (nr_cpus == max_entries) { - max_entries += MAX_NR_CPUS; - tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu)); - if (tmp == NULL) - goto out_free_tmp; - tmp_cpus = tmp; - } - - tmp_cpus[nr_cpus++].cpu = cpu; - if (n == 2 && sep == '-') - prev = cpu; - else - prev = -1; - if (n == 1 || sep == '\n') - break; - } - - if (nr_cpus > 0) - cpus = cpu_map__trim_new(nr_cpus, tmp_cpus); -out_free_tmp: - free(tmp_cpus); - return cpus; -} - struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list) { struct perf_cpu_map *cpus = NULL; @@ -237,8 +185,8 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list) while (isdigit(*cpu_list)) { p = NULL; start_cpu = strtoul(cpu_list, &p, 0); - if (start_cpu >= INT_MAX - || (*p != '\0' && *p != ',' && *p != '-')) + if (start_cpu >= INT16_MAX + || (*p != '\0' && *p != ',' && *p != '-' && *p != '\n')) goto invalid; if (*p == '-') { @@ -246,7 +194,7 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list) p = NULL; end_cpu = strtoul(cpu_list, &p, 0); - if (end_cpu >= INT_MAX || (*p != '\0' && *p != ',')) + if (end_cpu >= INT16_MAX || (*p != '\0' && *p != ',' && *p != '\n')) goto invalid; if (end_cpu < start_cpu) @@ -261,17 +209,17 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list) for (; start_cpu <= end_cpu; start_cpu++) { /* check for duplicates */ for (i = 0; i < nr_cpus; i++) - if (tmp_cpus[i].cpu == (int)start_cpu) + if (tmp_cpus[i].cpu == (int16_t)start_cpu) goto invalid; if (nr_cpus == max_entries) { - max_entries += MAX_NR_CPUS; + max_entries += max(end_cpu - start_cpu + 1, 16UL); tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu)); if (tmp == NULL) goto invalid; tmp_cpus = tmp; } - tmp_cpus[nr_cpus++].cpu = (int)start_cpu; + tmp_cpus[nr_cpus++].cpu = (int16_t)start_cpu; } if (*p) ++p; @@ -279,20 +227,31 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list) cpu_list = p; } - if (nr_cpus > 0) + if (nr_cpus > 0) { cpus = cpu_map__trim_new(nr_cpus, tmp_cpus); - else if (*cpu_list != '\0') { + } else if (*cpu_list != '\0') { pr_warning("Unexpected characters at end of cpu list ('%s'), using online CPUs.", cpu_list); cpus = perf_cpu_map__new_online_cpus(); - } else + } else { cpus = perf_cpu_map__new_any_cpu(); + } invalid: free(tmp_cpus); out: return cpus; } +struct perf_cpu_map *perf_cpu_map__new_int(int cpu) +{ + struct perf_cpu_map *cpus = perf_cpu_map__alloc(1); + + if (cpus) + RC_CHK_ACCESS(cpus)->map[0].cpu = cpu; + + return cpus; +} + static int __perf_cpu_map__nr(const struct perf_cpu_map *cpus) { return RC_CHK_ACCESS(cpus)->nr; @@ -436,46 +395,49 @@ bool perf_cpu_map__is_subset(const struct perf_cpu_map *a, const struct perf_cpu } /* - * Merge two cpumaps + * Merge two cpumaps. * - * orig either gets freed and replaced with a new map, or reused - * with no reference count change (similar to "realloc") - * other has its reference count increased. + * If 'other' is subset of '*orig', '*orig' keeps itself with no reference count + * change (similar to "realloc"). + * + * If '*orig' is subset of 'other', '*orig' reuses 'other' with its reference + * count increased. + * + * Otherwise, '*orig' gets freed and replaced with a new map. */ - -struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig, - struct perf_cpu_map *other) +int perf_cpu_map__merge(struct perf_cpu_map **orig, struct perf_cpu_map *other) { struct perf_cpu *tmp_cpus; int tmp_len; int i, j, k; struct perf_cpu_map *merged; - if (perf_cpu_map__is_subset(orig, other)) - return orig; - if (perf_cpu_map__is_subset(other, orig)) { - perf_cpu_map__put(orig); - return perf_cpu_map__get(other); + if (perf_cpu_map__is_subset(*orig, other)) + return 0; + if (perf_cpu_map__is_subset(other, *orig)) { + perf_cpu_map__put(*orig); + *orig = perf_cpu_map__get(other); + return 0; } - tmp_len = __perf_cpu_map__nr(orig) + __perf_cpu_map__nr(other); + tmp_len = __perf_cpu_map__nr(*orig) + __perf_cpu_map__nr(other); tmp_cpus = malloc(tmp_len * sizeof(struct perf_cpu)); if (!tmp_cpus) - return NULL; + return -ENOMEM; /* Standard merge algorithm from wikipedia */ i = j = k = 0; - while (i < __perf_cpu_map__nr(orig) && j < __perf_cpu_map__nr(other)) { - if (__perf_cpu_map__cpu(orig, i).cpu <= __perf_cpu_map__cpu(other, j).cpu) { - if (__perf_cpu_map__cpu(orig, i).cpu == __perf_cpu_map__cpu(other, j).cpu) + while (i < __perf_cpu_map__nr(*orig) && j < __perf_cpu_map__nr(other)) { + if (__perf_cpu_map__cpu(*orig, i).cpu <= __perf_cpu_map__cpu(other, j).cpu) { + if (__perf_cpu_map__cpu(*orig, i).cpu == __perf_cpu_map__cpu(other, j).cpu) j++; - tmp_cpus[k++] = __perf_cpu_map__cpu(orig, i++); + tmp_cpus[k++] = __perf_cpu_map__cpu(*orig, i++); } else tmp_cpus[k++] = __perf_cpu_map__cpu(other, j++); } - while (i < __perf_cpu_map__nr(orig)) - tmp_cpus[k++] = __perf_cpu_map__cpu(orig, i++); + while (i < __perf_cpu_map__nr(*orig)) + tmp_cpus[k++] = __perf_cpu_map__cpu(*orig, i++); while (j < __perf_cpu_map__nr(other)) tmp_cpus[k++] = __perf_cpu_map__cpu(other, j++); @@ -483,8 +445,9 @@ struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig, merged = cpu_map__trim_new(k, tmp_cpus); free(tmp_cpus); - perf_cpu_map__put(orig); - return merged; + perf_cpu_map__put(*orig); + *orig = merged; + return 0; } struct perf_cpu_map *perf_cpu_map__intersect(struct perf_cpu_map *orig, diff --git a/tools/lib/perf/evlist.c b/tools/lib/perf/evlist.c index 83c43dc13313..3ed023f4b190 100644 --- a/tools/lib/perf/evlist.c +++ b/tools/lib/perf/evlist.c @@ -36,49 +36,88 @@ void perf_evlist__init(struct perf_evlist *evlist) static void __perf_evlist__propagate_maps(struct perf_evlist *evlist, struct perf_evsel *evsel) { - if (evsel->system_wide) { - /* System wide: set the cpu map of the evsel to all online CPUs. */ - perf_cpu_map__put(evsel->cpus); - evsel->cpus = perf_cpu_map__new_online_cpus(); - } else if (evlist->has_user_cpus && evsel->is_pmu_core) { - /* - * User requested CPUs on a core PMU, ensure the requested CPUs - * are valid by intersecting with those of the PMU. - */ + if (perf_cpu_map__is_empty(evsel->cpus)) { + if (perf_cpu_map__is_empty(evsel->pmu_cpus)) { + /* + * Assume the unset PMU cpus were for a system-wide + * event, like a software or tracepoint. + */ + evsel->pmu_cpus = perf_cpu_map__new_online_cpus(); + } + if (evlist->has_user_cpus && !evsel->system_wide) { + /* + * Use the user CPUs unless the evsel is set to be + * system wide, such as the dummy event. + */ + evsel->cpus = perf_cpu_map__get(evlist->user_requested_cpus); + } else { + /* + * System wide and other modes, assume the cpu map + * should be set to all PMU CPUs. + */ + evsel->cpus = perf_cpu_map__get(evsel->pmu_cpus); + } + } + /* + * Avoid "any CPU"(-1) for uncore and PMUs that require a CPU, even if + * requested. + */ + if (evsel->requires_cpu && perf_cpu_map__has_any_cpu(evsel->cpus)) { perf_cpu_map__put(evsel->cpus); - evsel->cpus = perf_cpu_map__intersect(evlist->user_requested_cpus, evsel->own_cpus); + evsel->cpus = perf_cpu_map__get(evsel->pmu_cpus); + } - /* - * Empty cpu lists would eventually get opened as "any" so remove - * genuinely empty ones before they're opened in the wrong place. - */ - if (perf_cpu_map__is_empty(evsel->cpus)) { - struct perf_evsel *next = perf_evlist__next(evlist, evsel); - - perf_evlist__remove(evlist, evsel); - /* Keep idx contiguous */ - if (next) - list_for_each_entry_from(next, &evlist->entries, node) - next->idx--; + /* + * Globally requested CPUs replace user requested unless the evsel is + * set to be system wide. + */ + if (evlist->has_user_cpus && !evsel->system_wide) { + assert(!perf_cpu_map__has_any_cpu(evlist->user_requested_cpus)); + if (!perf_cpu_map__equal(evsel->cpus, evlist->user_requested_cpus)) { + perf_cpu_map__put(evsel->cpus); + evsel->cpus = perf_cpu_map__get(evlist->user_requested_cpus); } - } else if (!evsel->own_cpus || evlist->has_user_cpus || - (!evsel->requires_cpu && perf_cpu_map__has_any_cpu(evlist->user_requested_cpus))) { - /* - * The PMU didn't specify a default cpu map, this isn't a core - * event and the user requested CPUs or the evlist user - * requested CPUs have the "any CPU" (aka dummy) CPU value. In - * which case use the user requested CPUs rather than the PMU - * ones. - */ + } + + /* Ensure cpus only references valid PMU CPUs. */ + if (!perf_cpu_map__has_any_cpu(evsel->cpus) && + !perf_cpu_map__is_subset(evsel->pmu_cpus, evsel->cpus)) { + struct perf_cpu_map *tmp = perf_cpu_map__intersect(evsel->pmu_cpus, evsel->cpus); + perf_cpu_map__put(evsel->cpus); - evsel->cpus = perf_cpu_map__get(evlist->user_requested_cpus); - } else if (evsel->cpus != evsel->own_cpus) { - /* - * No user requested cpu map but the PMU cpu map doesn't match - * the evsel's. Reset it back to the PMU cpu map. - */ + evsel->cpus = tmp; + } + + /* + * Was event requested on all the PMU's CPUs but the user requested is + * any CPU (-1)? If so switch to using any CPU (-1) to reduce the number + * of events. + */ + if (!evsel->system_wide && + !evsel->requires_cpu && + perf_cpu_map__equal(evsel->cpus, evsel->pmu_cpus) && + perf_cpu_map__has_any_cpu(evlist->user_requested_cpus)) { perf_cpu_map__put(evsel->cpus); - evsel->cpus = perf_cpu_map__get(evsel->own_cpus); + evsel->cpus = perf_cpu_map__get(evlist->user_requested_cpus); + } + + /* Sanity check assert before the evsel is potentially removed. */ + assert(!evsel->requires_cpu || !perf_cpu_map__has_any_cpu(evsel->cpus)); + + /* + * Empty cpu lists would eventually get opened as "any" so remove + * genuinely empty ones before they're opened in the wrong place. + */ + if (perf_cpu_map__is_empty(evsel->cpus)) { + struct perf_evsel *next = perf_evlist__next(evlist, evsel); + + perf_evlist__remove(evlist, evsel); + /* Keep idx contiguous */ + if (next) + list_for_each_entry_from(next, &evlist->entries, node) + next->idx--; + + return; } if (evsel->system_wide) { @@ -89,7 +128,7 @@ static void __perf_evlist__propagate_maps(struct perf_evlist *evlist, evsel->threads = perf_thread_map__get(evlist->threads); } - evlist->all_cpus = perf_cpu_map__merge(evlist->all_cpus, evsel->cpus); + perf_cpu_map__merge(&evlist->all_cpus, evsel->cpus); } static void perf_evlist__propagate_maps(struct perf_evlist *evlist) @@ -98,6 +137,10 @@ static void perf_evlist__propagate_maps(struct perf_evlist *evlist) evlist->needs_map_propagation = true; + /* Clear the all_cpus set which will be merged into during propagation. */ + perf_cpu_map__put(evlist->all_cpus); + evlist->all_cpus = NULL; + list_for_each_entry_safe(evsel, n, &evlist->entries, node) __perf_evlist__propagate_maps(evlist, evsel); } diff --git a/tools/lib/perf/evsel.c b/tools/lib/perf/evsel.c index c475319e2e41..13a307fc75ae 100644 --- a/tools/lib/perf/evsel.c +++ b/tools/lib/perf/evsel.c @@ -40,8 +40,19 @@ struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr) return evsel; } +void perf_evsel__exit(struct perf_evsel *evsel) +{ + assert(evsel->fd == NULL); /* If not fds were not closed. */ + assert(evsel->mmap == NULL); /* If not munmap wasn't called. */ + assert(evsel->sample_id == NULL); /* If not free_id wasn't called. */ + perf_cpu_map__put(evsel->cpus); + perf_cpu_map__put(evsel->pmu_cpus); + perf_thread_map__put(evsel->threads); +} + void perf_evsel__delete(struct perf_evsel *evsel) { + perf_evsel__exit(evsel); free(evsel); } diff --git a/tools/lib/perf/include/internal/cpumap.h b/tools/lib/perf/include/internal/cpumap.h index 49649eb51ce4..e2be2d17c32b 100644 --- a/tools/lib/perf/include/internal/cpumap.h +++ b/tools/lib/perf/include/internal/cpumap.h @@ -21,10 +21,6 @@ DECLARE_RC_STRUCT(perf_cpu_map) { struct perf_cpu map[]; }; -#ifndef MAX_NR_CPUS -#define MAX_NR_CPUS 2048 -#endif - struct perf_cpu_map *perf_cpu_map__alloc(int nr_cpus); int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu); bool perf_cpu_map__is_subset(const struct perf_cpu_map *a, const struct perf_cpu_map *b); diff --git a/tools/lib/perf/include/internal/evsel.h b/tools/lib/perf/include/internal/evsel.h index ea78defa77d0..fefe64ba5e26 100644 --- a/tools/lib/perf/include/internal/evsel.h +++ b/tools/lib/perf/include/internal/evsel.h @@ -99,7 +99,7 @@ struct perf_evsel { * cpu map for opening the event on, for example, the first CPU on a * socket for an uncore event. */ - struct perf_cpu_map *own_cpus; + struct perf_cpu_map *pmu_cpus; struct perf_thread_map *threads; struct xyarray *fd; struct xyarray *mmap; @@ -133,6 +133,7 @@ struct perf_evsel { void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr, int idx); +void perf_evsel__exit(struct perf_evsel *evsel); int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads); void perf_evsel__close_fd(struct perf_evsel *evsel); void perf_evsel__free_fd(struct perf_evsel *evsel); diff --git a/tools/lib/perf/include/perf/cpumap.h b/tools/lib/perf/include/perf/cpumap.h index 90457d17fb2f..58cc5c5fa47c 100644 --- a/tools/lib/perf/include/perf/cpumap.h +++ b/tools/lib/perf/include/perf/cpumap.h @@ -3,12 +3,12 @@ #define __LIBPERF_CPUMAP_H #include <perf/core.h> -#include <stdio.h> #include <stdbool.h> +#include <stdint.h> /** A wrapper around a CPU to avoid confusion with the perf_cpu_map's map's indices. */ struct perf_cpu { - int cpu; + int16_t cpu; }; struct perf_cache { @@ -37,10 +37,11 @@ LIBPERF_API struct perf_cpu_map *perf_cpu_map__new_online_cpus(void); * perf_cpu_map__new_online_cpus is returned. */ LIBPERF_API struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list); -LIBPERF_API struct perf_cpu_map *perf_cpu_map__read(FILE *file); +/** perf_cpu_map__new_int - create a map with the one given cpu. */ +LIBPERF_API struct perf_cpu_map *perf_cpu_map__new_int(int cpu); LIBPERF_API struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map); -LIBPERF_API struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig, - struct perf_cpu_map *other); +LIBPERF_API int perf_cpu_map__merge(struct perf_cpu_map **orig, + struct perf_cpu_map *other); LIBPERF_API struct perf_cpu_map *perf_cpu_map__intersect(struct perf_cpu_map *orig, struct perf_cpu_map *other); LIBPERF_API void perf_cpu_map__put(struct perf_cpu_map *map); diff --git a/tools/lib/perf/include/perf/event.h b/tools/lib/perf/include/perf/event.h index 37bb7771d914..6608f1e3701b 100644 --- a/tools/lib/perf/include/perf/event.h +++ b/tools/lib/perf/include/perf/event.h @@ -457,6 +457,32 @@ struct perf_record_compressed { char data[]; }; +/* + * `header.size` includes the padding we are going to add while writing the record. + * `data_size` only includes the size of `data[]` itself. + */ +struct perf_record_compressed2 { + struct perf_event_header header; + __u64 data_size; + char data[]; +}; + +#define BPF_METADATA_KEY_LEN 64 +#define BPF_METADATA_VALUE_LEN 256 +#define BPF_PROG_NAME_LEN KSYM_NAME_LEN + +struct perf_record_bpf_metadata_entry { + char key[BPF_METADATA_KEY_LEN]; + char value[BPF_METADATA_VALUE_LEN]; +}; + +struct perf_record_bpf_metadata { + struct perf_event_header header; + char prog_name[BPF_PROG_NAME_LEN]; + __u64 nr_entries; + struct perf_record_bpf_metadata_entry entries[]; +}; + enum perf_user_event_type { /* above any possible kernel type */ PERF_RECORD_USER_TYPE_START = 64, PERF_RECORD_HEADER_ATTR = 64, @@ -478,6 +504,8 @@ enum perf_user_event_type { /* above any possible kernel type */ PERF_RECORD_HEADER_FEATURE = 80, PERF_RECORD_COMPRESSED = 81, PERF_RECORD_FINISHED_INIT = 82, + PERF_RECORD_COMPRESSED2 = 83, + PERF_RECORD_BPF_METADATA = 84, PERF_RECORD_HEADER_MAX }; @@ -518,6 +546,8 @@ union perf_event { struct perf_record_time_conv time_conv; struct perf_record_header_feature feat; struct perf_record_compressed pack; + struct perf_record_compressed2 pack2; + struct perf_record_bpf_metadata bpf_metadata; }; #endif /* __LIBPERF_EVENT_H */ diff --git a/tools/lib/perf/include/perf/threadmap.h b/tools/lib/perf/include/perf/threadmap.h index 8b40e7777cea..44deb815b817 100644 --- a/tools/lib/perf/include/perf/threadmap.h +++ b/tools/lib/perf/include/perf/threadmap.h @@ -14,6 +14,7 @@ LIBPERF_API void perf_thread_map__set_pid(struct perf_thread_map *map, int idx, LIBPERF_API char *perf_thread_map__comm(struct perf_thread_map *map, int idx); LIBPERF_API int perf_thread_map__nr(struct perf_thread_map *threads); LIBPERF_API pid_t perf_thread_map__pid(struct perf_thread_map *map, int idx); +LIBPERF_API int perf_thread_map__idx(struct perf_thread_map *map, pid_t pid); LIBPERF_API struct perf_thread_map *perf_thread_map__get(struct perf_thread_map *map); LIBPERF_API void perf_thread_map__put(struct perf_thread_map *map); diff --git a/tools/lib/perf/libperf.map b/tools/lib/perf/libperf.map index 2aa79b696032..fdd8304fe9d0 100644 --- a/tools/lib/perf/libperf.map +++ b/tools/lib/perf/libperf.map @@ -6,7 +6,6 @@ LIBPERF_0.0.1 { perf_cpu_map__get; perf_cpu_map__put; perf_cpu_map__new; - perf_cpu_map__read; perf_cpu_map__nr; perf_cpu_map__cpu; perf_cpu_map__has_any_cpu_or_is_empty; diff --git a/tools/lib/perf/threadmap.c b/tools/lib/perf/threadmap.c index 07968f3ea093..db431b036f57 100644 --- a/tools/lib/perf/threadmap.c +++ b/tools/lib/perf/threadmap.c @@ -97,5 +97,22 @@ int perf_thread_map__nr(struct perf_thread_map *threads) pid_t perf_thread_map__pid(struct perf_thread_map *map, int idx) { + if (!map) { + assert(idx == 0); + return -1; + } + return map->map[idx].pid; } + +int perf_thread_map__idx(struct perf_thread_map *threads, pid_t pid) +{ + if (!threads) + return pid == -1 ? 0 : -1; + + for (int i = 0; i < threads->nr; ++i) { + if (threads->map[i].pid == pid) + return i; + } + return -1; +} diff --git a/tools/lib/slab.c b/tools/lib/slab.c index 959997fb0652..981a21404f32 100644 --- a/tools/lib/slab.c +++ b/tools/lib/slab.c @@ -36,3 +36,19 @@ void kfree(void *p) printf("Freeing %p to malloc\n", p); free(p); } + +void *kmalloc_array(size_t n, size_t size, gfp_t gfp) +{ + void *ret; + + if (!(gfp & __GFP_DIRECT_RECLAIM)) + return NULL; + + ret = calloc(n, size); + uatomic_inc(&kmalloc_nr_allocated); + if (kmalloc_verbose) + printf("Allocating %p from calloc\n", ret); + if (gfp & __GFP_ZERO) + memset(ret, 0, n * size); + return ret; +} diff --git a/tools/lib/subcmd/help.c b/tools/lib/subcmd/help.c index 8561b0f01a24..9ef569492560 100644 --- a/tools/lib/subcmd/help.c +++ b/tools/lib/subcmd/help.c @@ -9,6 +9,7 @@ #include <sys/stat.h> #include <unistd.h> #include <dirent.h> +#include <assert.h> #include "subcmd-util.h" #include "help.h" #include "exec-cmd.h" @@ -82,10 +83,11 @@ void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes) ci++; cj++; } else { - zfree(&cmds->names[cj]); - cmds->names[cj++] = cmds->names[ci++]; + cmds->names[cj++] = cmds->names[ci]; + cmds->names[ci++] = NULL; } } else if (cmp == 0) { + zfree(&cmds->names[ci]); ci++; ei++; } else if (cmp > 0) { @@ -94,12 +96,12 @@ void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes) } if (ci != cj) { while (ci < cmds->cnt) { - zfree(&cmds->names[cj]); - cmds->names[cj++] = cmds->names[ci++]; + cmds->names[cj++] = cmds->names[ci]; + cmds->names[ci++] = NULL; } } for (ci = cj; ci < cmds->cnt; ci++) - zfree(&cmds->names[ci]); + assert(cmds->names[ci] == NULL); cmds->cnt = cj; } diff --git a/tools/lib/subcmd/run-command.c b/tools/lib/subcmd/run-command.c index 0a764c25c384..b7510f83209a 100644 --- a/tools/lib/subcmd/run-command.c +++ b/tools/lib/subcmd/run-command.c @@ -5,6 +5,7 @@ #include <ctype.h> #include <fcntl.h> #include <string.h> +#include <linux/compiler.h> #include <linux/string.h> #include <errno.h> #include <sys/wait.h> @@ -216,10 +217,20 @@ static int wait_or_whine(struct child_process *cmd, bool block) return result; } +/* + * Conservative estimate of number of characaters needed to hold an a decoded + * integer, assume each 3 bits needs a character byte and plus a possible sign + * character. + */ +#ifndef is_signed_type +#define is_signed_type(type) (((type)(-1)) < (type)1) +#endif +#define MAX_STRLEN_TYPE(type) (sizeof(type) * 8 / 3 + (is_signed_type(type) ? 1 : 0)) + int check_if_command_finished(struct child_process *cmd) { #ifdef __linux__ - char filename[FILENAME_MAX + 12]; + char filename[6 + MAX_STRLEN_TYPE(typeof(cmd->pid)) + 7 + 1]; char status_line[256]; FILE *status_file; @@ -227,7 +238,7 @@ int check_if_command_finished(struct child_process *cmd) * Check by reading /proc/<pid>/status as calling waitpid causes * stdout/stderr to be closed and data lost. */ - sprintf(filename, "/proc/%d/status", cmd->pid); + sprintf(filename, "/proc/%u/status", cmd->pid); status_file = fopen(filename, "r"); if (status_file == NULL) { /* Open failed assume finish_command was called. */ diff --git a/tools/lib/thermal/Makefile b/tools/lib/thermal/Makefile index 8890fd57b110..a1f5e388644d 100644 --- a/tools/lib/thermal/Makefile +++ b/tools/lib/thermal/Makefile @@ -39,19 +39,6 @@ libdir = $(prefix)/$(libdir_relative) libdir_SQ = $(subst ','\'',$(libdir)) libdir_relative_SQ = $(subst ','\'',$(libdir_relative)) -ifeq ("$(origin V)", "command line") - VERBOSE = $(V) -endif -ifndef VERBOSE - VERBOSE = 0 -endif - -ifeq ($(VERBOSE),1) - Q = -else - Q = @ -endif - # Set compile option CFLAGS ifdef EXTRA_CFLAGS CFLAGS := $(EXTRA_CFLAGS) |