diff options
Diffstat (limited to 'tools/lib/bpf')
| -rw-r--r-- | tools/lib/bpf/bpf_core_read.h | 6 | ||||
| -rw-r--r-- | tools/lib/bpf/bpf_helpers.h | 8 | ||||
| -rw-r--r-- | tools/lib/bpf/btf.c | 226 | ||||
| -rw-r--r-- | tools/lib/bpf/libbpf.c | 87 | ||||
| -rw-r--r-- | tools/lib/bpf/libbpf.h | 11 | ||||
| -rw-r--r-- | tools/lib/bpf/libbpf.map | 4 | ||||
| -rw-r--r-- | tools/lib/bpf/libbpf_internal.h | 9 | ||||
| -rw-r--r-- | tools/lib/bpf/linker.c | 6 | ||||
| -rw-r--r-- | tools/lib/bpf/netlink.c | 20 | ||||
| -rw-r--r-- | tools/lib/bpf/nlattr.c | 15 | 
10 files changed, 283 insertions, 109 deletions
diff --git a/tools/lib/bpf/bpf_core_read.h b/tools/lib/bpf/bpf_core_read.h index c0e13cdf9660..b997c68bd945 100644 --- a/tools/lib/bpf/bpf_core_read.h +++ b/tools/lib/bpf/bpf_core_read.h @@ -388,7 +388,13 @@ extern void *bpf_rdonly_cast(const void *obj, __u32 btf_id) __ksym __weak;  #define ___arrow10(a, b, c, d, e, f, g, h, i, j) a->b->c->d->e->f->g->h->i->j  #define ___arrow(...) ___apply(___arrow, ___narg(__VA_ARGS__))(__VA_ARGS__) +#if defined(__clang__) && (__clang_major__ >= 19) +#define ___type(...) __typeof_unqual__(___arrow(__VA_ARGS__)) +#elif defined(__GNUC__) && (__GNUC__ >= 14) +#define ___type(...) __typeof_unqual__(___arrow(__VA_ARGS__)) +#else  #define ___type(...) typeof(___arrow(__VA_ARGS__)) +#endif  #define ___read(read_fn, dst, src_type, src, accessor)			    \  	read_fn((void *)(dst), sizeof(*(dst)), &((src_type)(src))->accessor) diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h index 686824b8b413..a50773d4616e 100644 --- a/tools/lib/bpf/bpf_helpers.h +++ b/tools/lib/bpf/bpf_helpers.h @@ -15,6 +15,14 @@  #define __array(name, val) typeof(val) *name[]  #define __ulong(name, val) enum { ___bpf_concat(__unique_value, __COUNTER__) = val } name +#ifndef likely +#define likely(x)      (__builtin_expect(!!(x), 1)) +#endif + +#ifndef unlikely +#define unlikely(x)    (__builtin_expect(!!(x), 0)) +#endif +  /*   * Helper macro to place programs, maps, license in   * different sections in elf_bpf file. Section names diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 38bc6b14b066..f1d495dc66bb 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -12,6 +12,7 @@  #include <sys/utsname.h>  #include <sys/param.h>  #include <sys/stat.h> +#include <sys/mman.h>  #include <linux/kernel.h>  #include <linux/err.h>  #include <linux/btf.h> @@ -120,6 +121,9 @@ struct btf {  	/* whether base_btf should be freed in btf_free for this instance */  	bool owns_base; +	/* whether raw_data is a (read-only) mmap */ +	bool raw_data_is_mmap; +  	/* BTF object FD, if loaded into kernel */  	int fd; @@ -951,6 +955,17 @@ static bool btf_is_modifiable(const struct btf *btf)  	return (void *)btf->hdr != btf->raw_data;  } +static void btf_free_raw_data(struct btf *btf) +{ +	if (btf->raw_data_is_mmap) { +		munmap(btf->raw_data, btf->raw_size); +		btf->raw_data_is_mmap = false; +	} else { +		free(btf->raw_data); +	} +	btf->raw_data = NULL; +} +  void btf__free(struct btf *btf)  {  	if (IS_ERR_OR_NULL(btf)) @@ -970,7 +985,7 @@ void btf__free(struct btf *btf)  		free(btf->types_data);  		strset__free(btf->strs_set);  	} -	free(btf->raw_data); +	btf_free_raw_data(btf);  	free(btf->raw_data_swapped);  	free(btf->type_offs);  	if (btf->owns_base) @@ -996,7 +1011,7 @@ static struct btf *btf_new_empty(struct btf *base_btf)  	if (base_btf) {  		btf->base_btf = base_btf;  		btf->start_id = btf__type_cnt(base_btf); -		btf->start_str_off = base_btf->hdr->str_len; +		btf->start_str_off = base_btf->hdr->str_len + base_btf->start_str_off;  		btf->swapped_endian = base_btf->swapped_endian;  	} @@ -1030,7 +1045,7 @@ struct btf *btf__new_empty_split(struct btf *base_btf)  	return libbpf_ptr(btf_new_empty(base_btf));  } -static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf) +static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf, bool is_mmap)  {  	struct btf *btf;  	int err; @@ -1050,12 +1065,18 @@ static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf)  		btf->start_str_off = base_btf->hdr->str_len;  	} -	btf->raw_data = malloc(size); -	if (!btf->raw_data) { -		err = -ENOMEM; -		goto done; +	if (is_mmap) { +		btf->raw_data = (void *)data; +		btf->raw_data_is_mmap = true; +	} else { +		btf->raw_data = malloc(size); +		if (!btf->raw_data) { +			err = -ENOMEM; +			goto done; +		} +		memcpy(btf->raw_data, data, size);  	} -	memcpy(btf->raw_data, data, size); +  	btf->raw_size = size;  	btf->hdr = btf->raw_data; @@ -1083,12 +1104,12 @@ done:  struct btf *btf__new(const void *data, __u32 size)  { -	return libbpf_ptr(btf_new(data, size, NULL)); +	return libbpf_ptr(btf_new(data, size, NULL, false));  }  struct btf *btf__new_split(const void *data, __u32 size, struct btf *base_btf)  { -	return libbpf_ptr(btf_new(data, size, base_btf)); +	return libbpf_ptr(btf_new(data, size, base_btf, false));  }  struct btf_elf_secs { @@ -1148,6 +1169,12 @@ static int btf_find_elf_sections(Elf *elf, const char *path, struct btf_elf_secs  		else  			continue; +		if (sh.sh_type != SHT_PROGBITS) { +			pr_warn("unexpected section type (%d) of section(%d, %s) from %s\n", +				sh.sh_type, idx, name, path); +			goto err; +		} +  		data = elf_getdata(scn, 0);  		if (!data) {  			pr_warn("failed to get section(%d, %s) data from %s\n", @@ -1203,7 +1230,7 @@ static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,  	if (secs.btf_base_data) {  		dist_base_btf = btf_new(secs.btf_base_data->d_buf, secs.btf_base_data->d_size, -					NULL); +					NULL, false);  		if (IS_ERR(dist_base_btf)) {  			err = PTR_ERR(dist_base_btf);  			dist_base_btf = NULL; @@ -1212,7 +1239,7 @@ static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,  	}  	btf = btf_new(secs.btf_data->d_buf, secs.btf_data->d_size, -		      dist_base_btf ?: base_btf); +		      dist_base_btf ?: base_btf, false);  	if (IS_ERR(btf)) {  		err = PTR_ERR(btf);  		goto done; @@ -1329,7 +1356,7 @@ static struct btf *btf_parse_raw(const char *path, struct btf *base_btf)  	}  	/* finally parse BTF data */ -	btf = btf_new(data, sz, base_btf); +	btf = btf_new(data, sz, base_btf, false);  err_out:  	free(data); @@ -1348,6 +1375,37 @@ struct btf *btf__parse_raw_split(const char *path, struct btf *base_btf)  	return libbpf_ptr(btf_parse_raw(path, base_btf));  } +static struct btf *btf_parse_raw_mmap(const char *path, struct btf *base_btf) +{ +	struct stat st; +	void *data; +	struct btf *btf; +	int fd, err; + +	fd = open(path, O_RDONLY); +	if (fd < 0) +		return libbpf_err_ptr(-errno); + +	if (fstat(fd, &st) < 0) { +		err = -errno; +		close(fd); +		return libbpf_err_ptr(err); +	} + +	data = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0); +	err = -errno; +	close(fd); + +	if (data == MAP_FAILED) +		return libbpf_err_ptr(err); + +	btf = btf_new(data, st.st_size, base_btf, true); +	if (IS_ERR(btf)) +		munmap(data, st.st_size); + +	return btf; +} +  static struct btf *btf_parse(const char *path, struct btf *base_btf, struct btf_ext **btf_ext)  {  	struct btf *btf; @@ -1612,7 +1670,7 @@ struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf)  		goto exit_free;  	} -	btf = btf_new(ptr, btf_info.btf_size, base_btf); +	btf = btf_new(ptr, btf_info.btf_size, base_btf, false);  exit_free:  	free(ptr); @@ -1652,10 +1710,8 @@ struct btf *btf__load_from_kernel_by_id(__u32 id)  static void btf_invalidate_raw_data(struct btf *btf)  { -	if (btf->raw_data) { -		free(btf->raw_data); -		btf->raw_data = NULL; -	} +	if (btf->raw_data) +		btf_free_raw_data(btf);  	if (btf->raw_data_swapped) {  		free(btf->raw_data_swapped);  		btf->raw_data_swapped = NULL; @@ -4350,46 +4406,109 @@ static inline __u16 btf_fwd_kind(struct btf_type *t)  	return btf_kflag(t) ? BTF_KIND_UNION : BTF_KIND_STRUCT;  } -/* Check if given two types are identical ARRAY definitions */ -static bool btf_dedup_identical_arrays(struct btf_dedup *d, __u32 id1, __u32 id2) +static bool btf_dedup_identical_types(struct btf_dedup *d, __u32 id1, __u32 id2, int depth)  {  	struct btf_type *t1, *t2; +	int k1, k2; +recur: +	if (depth <= 0) +		return false;  	t1 = btf_type_by_id(d->btf, id1);  	t2 = btf_type_by_id(d->btf, id2); -	if (!btf_is_array(t1) || !btf_is_array(t2)) + +	k1 = btf_kind(t1); +	k2 = btf_kind(t2); +	if (k1 != k2)  		return false; -	return btf_equal_array(t1, t2); -} +	switch (k1) { +	case BTF_KIND_UNKN: /* VOID */ +		return true; +	case BTF_KIND_INT: +		return btf_equal_int_tag(t1, t2); +	case BTF_KIND_ENUM: +	case BTF_KIND_ENUM64: +		return btf_compat_enum(t1, t2); +	case BTF_KIND_FWD: +	case BTF_KIND_FLOAT: +		return btf_equal_common(t1, t2); +	case BTF_KIND_CONST: +	case BTF_KIND_VOLATILE: +	case BTF_KIND_RESTRICT: +	case BTF_KIND_PTR: +	case BTF_KIND_TYPEDEF: +	case BTF_KIND_FUNC: +	case BTF_KIND_TYPE_TAG: +		if (t1->info != t2->info || t1->name_off != t2->name_off) +			return false; +		id1 = t1->type; +		id2 = t2->type; +		goto recur; +	case BTF_KIND_ARRAY: { +		struct btf_array *a1, *a2; -/* Check if given two types are identical STRUCT/UNION definitions */ -static bool btf_dedup_identical_structs(struct btf_dedup *d, __u32 id1, __u32 id2) -{ -	const struct btf_member *m1, *m2; -	struct btf_type *t1, *t2; -	int n, i; +		if (!btf_compat_array(t1, t2)) +			return false; -	t1 = btf_type_by_id(d->btf, id1); -	t2 = btf_type_by_id(d->btf, id2); +		a1 = btf_array(t1); +		a2 = btf_array(t1); -	if (!btf_is_composite(t1) || btf_kind(t1) != btf_kind(t2)) -		return false; +		if (a1->index_type != a2->index_type && +		    !btf_dedup_identical_types(d, a1->index_type, a2->index_type, depth - 1)) +			return false; -	if (!btf_shallow_equal_struct(t1, t2)) -		return false; +		if (a1->type != a2->type && +		    !btf_dedup_identical_types(d, a1->type, a2->type, depth - 1)) +			return false; -	m1 = btf_members(t1); -	m2 = btf_members(t2); -	for (i = 0, n = btf_vlen(t1); i < n; i++, m1++, m2++) { -		if (m1->type != m2->type && -		    !btf_dedup_identical_arrays(d, m1->type, m2->type) && -		    !btf_dedup_identical_structs(d, m1->type, m2->type)) +		return true; +	} +	case BTF_KIND_STRUCT: +	case BTF_KIND_UNION: { +		const struct btf_member *m1, *m2; +		int i, n; + +		if (!btf_shallow_equal_struct(t1, t2))  			return false; + +		m1 = btf_members(t1); +		m2 = btf_members(t2); +		for (i = 0, n = btf_vlen(t1); i < n; i++, m1++, m2++) { +			if (m1->type == m2->type) +				continue; +			if (!btf_dedup_identical_types(d, m1->type, m2->type, depth - 1)) +				return false; +		} +		return true; +	} +	case BTF_KIND_FUNC_PROTO: { +		const struct btf_param *p1, *p2; +		int i, n; + +		if (!btf_compat_fnproto(t1, t2)) +			return false; + +		if (t1->type != t2->type && +		    !btf_dedup_identical_types(d, t1->type, t2->type, depth - 1)) +			return false; + +		p1 = btf_params(t1); +		p2 = btf_params(t2); +		for (i = 0, n = btf_vlen(t1); i < n; i++, p1++, p2++) { +			if (p1->type == p2->type) +				continue; +			if (!btf_dedup_identical_types(d, p1->type, p2->type, depth - 1)) +				return false; +		} +		return true; +	} +	default: +		return false;  	} -	return true;  } +  /*   * Check equivalence of BTF type graph formed by candidate struct/union (we'll   * call it "candidate graph" in this description for brevity) to a type graph @@ -4508,19 +4627,13 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,  		 * different fields within the *same* struct. This breaks type  		 * equivalence check, which makes an assumption that candidate  		 * types sub-graph has a consistent and deduped-by-compiler -		 * types within a single CU. So work around that by explicitly -		 * allowing identical array types here. +		 * types within a single CU. And similar situation can happen +		 * with struct/union sometimes, and event with pointers. +		 * So accommodate cases like this doing a structural +		 * comparison recursively, but avoiding being stuck in endless +		 * loops by limiting the depth up to which we check.  		 */ -		if (btf_dedup_identical_arrays(d, hypot_type_id, cand_id)) -			return 1; -		/* It turns out that similar situation can happen with -		 * struct/union sometimes, sigh... Handle the case where -		 * structs/unions are exactly the same, down to the referenced -		 * type IDs. Anything more complicated (e.g., if referenced -		 * types are different, but equivalent) is *way more* -		 * complicated and requires a many-to-many equivalence mapping. -		 */ -		if (btf_dedup_identical_structs(d, hypot_type_id, cand_id)) +		if (btf_dedup_identical_types(d, hypot_type_id, cand_id, 16))  			return 1;  		return 0;  	} @@ -5268,7 +5381,10 @@ struct btf *btf__load_vmlinux_btf(void)  		pr_warn("kernel BTF is missing at '%s', was CONFIG_DEBUG_INFO_BTF enabled?\n",  			sysfs_btf_path);  	} else { -		btf = btf__parse(sysfs_btf_path, NULL); +		btf = btf_parse_raw_mmap(sysfs_btf_path, NULL); +		if (IS_ERR(btf)) +			btf = btf__parse(sysfs_btf_path, NULL); +  		if (!btf) {  			err = -errno;  			pr_warn("failed to read kernel BTF from '%s': %s\n", diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 6b85060f07b3..e9c641a2fb20 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -60,6 +60,8 @@  #define BPF_FS_MAGIC		0xcafe4a11  #endif +#define MAX_EVENT_NAME_LEN	64 +  #define BPF_FS_DEFAULT_PATH "/sys/fs/bpf"  #define BPF_INSN_SZ (sizeof(struct bpf_insn)) @@ -284,7 +286,7 @@ void libbpf_print(enum libbpf_print_level level, const char *format, ...)  	old_errno = errno;  	va_start(args, format); -	__libbpf_pr(level, format, args); +	print_fn(level, format, args);  	va_end(args);  	errno = old_errno; @@ -896,7 +898,7 @@ bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,  			return -LIBBPF_ERRNO__FORMAT;  		} -		if (sec_off + prog_sz > sec_sz) { +		if (sec_off + prog_sz > sec_sz || sec_off + prog_sz < sec_off) {  			pr_warn("sec '%s': program at offset %zu crosses section boundary\n",  				sec_name, sec_off);  			return -LIBBPF_ERRNO__FORMAT; @@ -1725,15 +1727,6 @@ static Elf64_Sym *find_elf_var_sym(const struct bpf_object *obj, const char *nam  	return ERR_PTR(-ENOENT);  } -/* Some versions of Android don't provide memfd_create() in their libc - * implementation, so avoid complications and just go straight to Linux - * syscall. - */ -static int sys_memfd_create(const char *name, unsigned flags) -{ -	return syscall(__NR_memfd_create, name, flags); -} -  #ifndef MFD_CLOEXEC  #define MFD_CLOEXEC 0x0001U  #endif @@ -9455,6 +9448,30 @@ int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log  	return 0;  } +struct bpf_func_info *bpf_program__func_info(const struct bpf_program *prog) +{ +	if (prog->func_info_rec_size != sizeof(struct bpf_func_info)) +		return libbpf_err_ptr(-EOPNOTSUPP); +	return prog->func_info; +} + +__u32 bpf_program__func_info_cnt(const struct bpf_program *prog) +{ +	return prog->func_info_cnt; +} + +struct bpf_line_info *bpf_program__line_info(const struct bpf_program *prog) +{ +	if (prog->line_info_rec_size != sizeof(struct bpf_line_info)) +		return libbpf_err_ptr(-EOPNOTSUPP); +	return prog->line_info; +} + +__u32 bpf_program__line_info_cnt(const struct bpf_program *prog) +{ +	return prog->line_info_cnt; +} +  #define SEC_DEF(sec_pfx, ptype, atype, flags, ...) {			    \  	.sec = (char *)sec_pfx,						    \  	.prog_type = BPF_PROG_TYPE_##ptype,				    \ @@ -11121,16 +11138,16 @@ static const char *tracefs_available_filter_functions_addrs(void)  			     : TRACEFS"/available_filter_functions_addrs";  } -static void gen_kprobe_legacy_event_name(char *buf, size_t buf_sz, -					 const char *kfunc_name, size_t offset) +static void gen_probe_legacy_event_name(char *buf, size_t buf_sz, +					const char *name, size_t offset)  {  	static int index = 0;  	int i; -	snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx_%d", getpid(), kfunc_name, offset, -		 __sync_fetch_and_add(&index, 1)); +	snprintf(buf, buf_sz, "libbpf_%u_%d_%s_0x%zx", getpid(), +		 __sync_fetch_and_add(&index, 1), name, offset); -	/* sanitize binary_path in the probe name */ +	/* sanitize name in the probe name */  	for (i = 0; buf[i]; i++) {  		if (!isalnum(buf[i]))  			buf[i] = '_'; @@ -11255,9 +11272,9 @@ int probe_kern_syscall_wrapper(int token_fd)  		return pfd >= 0 ? 1 : 0;  	} else { /* legacy mode */ -		char probe_name[128]; +		char probe_name[MAX_EVENT_NAME_LEN]; -		gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name), syscall_name, 0); +		gen_probe_legacy_event_name(probe_name, sizeof(probe_name), syscall_name, 0);  		if (add_kprobe_event_legacy(probe_name, false, syscall_name, 0) < 0)  			return 0; @@ -11313,10 +11330,10 @@ bpf_program__attach_kprobe_opts(const struct bpf_program *prog,  					    func_name, offset,  					    -1 /* pid */, 0 /* ref_ctr_off */);  	} else { -		char probe_name[256]; +		char probe_name[MAX_EVENT_NAME_LEN]; -		gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name), -					     func_name, offset); +		gen_probe_legacy_event_name(probe_name, sizeof(probe_name), +					    func_name, offset);  		legacy_probe = strdup(probe_name);  		if (!legacy_probe) @@ -11860,20 +11877,6 @@ static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, stru  	return ret;  } -static void gen_uprobe_legacy_event_name(char *buf, size_t buf_sz, -					 const char *binary_path, uint64_t offset) -{ -	int i; - -	snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx", getpid(), binary_path, (size_t)offset); - -	/* sanitize binary_path in the probe name */ -	for (i = 0; buf[i]; i++) { -		if (!isalnum(buf[i])) -			buf[i] = '_'; -	} -} -  static inline int add_uprobe_event_legacy(const char *probe_name, bool retprobe,  					  const char *binary_path, size_t offset)  { @@ -12297,13 +12300,14 @@ bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,  		pfd = perf_event_open_probe(true /* uprobe */, retprobe, binary_path,  					    func_offset, pid, ref_ctr_off);  	} else { -		char probe_name[PATH_MAX + 64]; +		char probe_name[MAX_EVENT_NAME_LEN];  		if (ref_ctr_off)  			return libbpf_err_ptr(-EINVAL); -		gen_uprobe_legacy_event_name(probe_name, sizeof(probe_name), -					     binary_path, func_offset); +		gen_probe_legacy_event_name(probe_name, sizeof(probe_name), +					    strrchr(binary_path, '/') ? : binary_path, +					    func_offset);  		legacy_probe = strdup(probe_name);  		if (!legacy_probe) @@ -13371,7 +13375,6 @@ struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,  	attr.config = PERF_COUNT_SW_BPF_OUTPUT;  	attr.type = PERF_TYPE_SOFTWARE;  	attr.sample_type = PERF_SAMPLE_RAW; -	attr.sample_period = sample_period;  	attr.wakeup_events = sample_period;  	p.attr = &attr; @@ -14099,6 +14102,12 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)  		}  		link = map_skel->link; +		if (!link) { +			pr_warn("map '%s': BPF map skeleton link is uninitialized\n", +				bpf_map__name(map)); +			continue; +		} +  		if (*link)  			continue; diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index e0605403f977..1137e7d2e1b5 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -940,6 +940,12 @@ LIBBPF_API int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_le  LIBBPF_API const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_size);  LIBBPF_API int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size); +LIBBPF_API struct bpf_func_info *bpf_program__func_info(const struct bpf_program *prog); +LIBBPF_API __u32 bpf_program__func_info_cnt(const struct bpf_program *prog); + +LIBBPF_API struct bpf_line_info *bpf_program__line_info(const struct bpf_program *prog); +LIBBPF_API __u32 bpf_program__line_info_cnt(const struct bpf_program *prog); +  /**   * @brief **bpf_program__set_attach_target()** sets BTF-based attach target   * for supported BPF program types: @@ -1283,6 +1289,7 @@ enum bpf_tc_attach_point {  	BPF_TC_INGRESS = 1 << 0,  	BPF_TC_EGRESS  = 1 << 1,  	BPF_TC_CUSTOM  = 1 << 2, +	BPF_TC_QDISC   = 1 << 3,  };  #define BPF_TC_PARENT(a, b) 	\ @@ -1297,9 +1304,11 @@ struct bpf_tc_hook {  	int ifindex;  	enum bpf_tc_attach_point attach_point;  	__u32 parent; +	__u32 handle; +	const char *qdisc;  	size_t :0;  }; -#define bpf_tc_hook__last_field parent +#define bpf_tc_hook__last_field qdisc  struct bpf_tc_opts {  	size_t sz; diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index d8b71f22f197..1205f9a4fe04 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -437,6 +437,10 @@ LIBBPF_1.6.0 {  		bpf_linker__add_fd;  		bpf_linker__new_fd;  		bpf_object__prepare; +		bpf_program__func_info; +		bpf_program__func_info_cnt; +		bpf_program__line_info; +		bpf_program__line_info_cnt;  		btf__add_decl_attr;  		btf__add_type_attr;  } LIBBPF_1.5.0; diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index 76669c73dcd1..477a3b3389a0 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -667,6 +667,15 @@ static inline int sys_dup3(int oldfd, int newfd, int flags)  	return syscall(__NR_dup3, oldfd, newfd, flags);  } +/* Some versions of Android don't provide memfd_create() in their libc + * implementation, so avoid complications and just go straight to Linux + * syscall. + */ +static inline int sys_memfd_create(const char *name, unsigned flags) +{ +	return syscall(__NR_memfd_create, name, flags); +} +  /* Point *fixed_fd* to the same file that *tmp_fd* points to.   * Regardless of success, *tmp_fd* is closed.   * Whatever *fixed_fd* pointed to is closed silently. diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c index 800e0ef09c37..a469e5d4fee7 100644 --- a/tools/lib/bpf/linker.c +++ b/tools/lib/bpf/linker.c @@ -573,7 +573,7 @@ int bpf_linker__add_buf(struct bpf_linker *linker, void *buf, size_t buf_sz,  	snprintf(filename, sizeof(filename), "mem:%p+%zu", buf, buf_sz); -	fd = memfd_create(filename, 0); +	fd = sys_memfd_create(filename, 0);  	if (fd < 0) {  		ret = -errno;  		pr_warn("failed to create memfd '%s': %s\n", filename, errstr(ret)); @@ -1376,7 +1376,7 @@ static int linker_append_sec_data(struct bpf_linker *linker, struct src_obj *obj  		} else {  			if (!secs_match(dst_sec, src_sec)) {  				pr_warn("ELF sections %s are incompatible\n", src_sec->sec_name); -				return -1; +				return -EINVAL;  			}  			/* "license" and "version" sections are deduped */ @@ -2223,7 +2223,7 @@ static int linker_append_elf_relos(struct bpf_linker *linker, struct src_obj *ob  			}  		} else if (!secs_match(dst_sec, src_sec)) {  			pr_warn("sections %s are not compatible\n", src_sec->sec_name); -			return -1; +			return -EINVAL;  		}  		/* shdr->sh_link points to SYMTAB */ diff --git a/tools/lib/bpf/netlink.c b/tools/lib/bpf/netlink.c index 68a2def17175..c997e69d507f 100644 --- a/tools/lib/bpf/netlink.c +++ b/tools/lib/bpf/netlink.c @@ -529,9 +529,9 @@ int bpf_xdp_query_id(int ifindex, int flags, __u32 *prog_id)  } -typedef int (*qdisc_config_t)(struct libbpf_nla_req *req); +typedef int (*qdisc_config_t)(struct libbpf_nla_req *req, const struct bpf_tc_hook *hook); -static int clsact_config(struct libbpf_nla_req *req) +static int clsact_config(struct libbpf_nla_req *req, const struct bpf_tc_hook *hook)  {  	req->tc.tcm_parent = TC_H_CLSACT;  	req->tc.tcm_handle = TC_H_MAKE(TC_H_CLSACT, 0); @@ -539,6 +539,16 @@ static int clsact_config(struct libbpf_nla_req *req)  	return nlattr_add(req, TCA_KIND, "clsact", sizeof("clsact"));  } +static int qdisc_config(struct libbpf_nla_req *req, const struct bpf_tc_hook *hook) +{ +	const char *qdisc = OPTS_GET(hook, qdisc, NULL); + +	req->tc.tcm_parent = OPTS_GET(hook, parent, TC_H_ROOT); +	req->tc.tcm_handle = OPTS_GET(hook, handle, 0); + +	return nlattr_add(req, TCA_KIND, qdisc, strlen(qdisc) + 1); +} +  static int attach_point_to_config(struct bpf_tc_hook *hook,  				  qdisc_config_t *config)  { @@ -552,6 +562,9 @@ static int attach_point_to_config(struct bpf_tc_hook *hook,  		return 0;  	case BPF_TC_CUSTOM:  		return -EOPNOTSUPP; +	case BPF_TC_QDISC: +		*config = &qdisc_config; +		return 0;  	default:  		return -EINVAL;  	} @@ -596,7 +609,7 @@ static int tc_qdisc_modify(struct bpf_tc_hook *hook, int cmd, int flags)  	req.tc.tcm_family  = AF_UNSPEC;  	req.tc.tcm_ifindex = OPTS_GET(hook, ifindex, 0); -	ret = config(&req); +	ret = config(&req, hook);  	if (ret < 0)  		return ret; @@ -639,6 +652,7 @@ int bpf_tc_hook_destroy(struct bpf_tc_hook *hook)  	case BPF_TC_INGRESS:  	case BPF_TC_EGRESS:  		return libbpf_err(__bpf_tc_detach(hook, NULL, true)); +	case BPF_TC_QDISC:  	case BPF_TC_INGRESS | BPF_TC_EGRESS:  		return libbpf_err(tc_qdisc_delete(hook));  	case BPF_TC_CUSTOM: diff --git a/tools/lib/bpf/nlattr.c b/tools/lib/bpf/nlattr.c index 975e265eab3b..06663f9ea581 100644 --- a/tools/lib/bpf/nlattr.c +++ b/tools/lib/bpf/nlattr.c @@ -63,16 +63,16 @@ static int validate_nla(struct nlattr *nla, int maxtype,  		minlen = nla_attr_minlen[pt->type];  	if (libbpf_nla_len(nla) < minlen) -		return -1; +		return -EINVAL;  	if (pt->maxlen && libbpf_nla_len(nla) > pt->maxlen) -		return -1; +		return -EINVAL;  	if (pt->type == LIBBPF_NLA_STRING) {  		char *data = libbpf_nla_data(nla);  		if (data[libbpf_nla_len(nla) - 1] != '\0') -			return -1; +			return -EINVAL;  	}  	return 0; @@ -118,19 +118,18 @@ int libbpf_nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head,  		if (policy) {  			err = validate_nla(nla, maxtype, policy);  			if (err < 0) -				goto errout; +				return err;  		} -		if (tb[type]) +		if (tb[type]) {  			pr_warn("Attribute of type %#x found multiple times in message, "  				"previous attribute is being ignored.\n", type); +		}  		tb[type] = nla;  	} -	err = 0; -errout: -	return err; +	return 0;  }  /**  | 
