diff options
author | David S. Miller <davem@davemloft.net> | 2019-03-04 21:14:31 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-03-04 21:14:31 +0300 |
commit | f7fb7c1a1c8f86005d34f28278524213c521f761 (patch) | |
tree | 05a3b21c5e0b1667b106153fc0f0eb88cd980ab2 /tools | |
parent | 8c4238df4d0cc3420c5ee14b54d200d74267cfe5 (diff) | |
parent | 87dab7c3d54ce0f1ff6b54840bf7279d0944bc6a (diff) | |
download | linux-f7fb7c1a1c8f86005d34f28278524213c521f761.tar.xz |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Daniel Borkmann says:
====================
pull-request: bpf-next 2019-03-04
The following pull-request contains BPF updates for your *net-next* tree.
The main changes are:
1) Add AF_XDP support to libbpf. Rationale is to facilitate writing
AF_XDP applications by offering higher-level APIs that hide many
of the details of the AF_XDP uapi. Sample programs are converted
over to this new interface as well, from Magnus.
2) Introduce a new cant_sleep() macro for annotation of functions
that cannot sleep and use it in BPF_PROG_RUN() to assert that
BPF programs run under preemption disabled context, from Peter.
3) Introduce per BPF prog stats in order to monitor the usage
of BPF; this is controlled by kernel.bpf_stats_enabled sysctl
knob where monitoring tools can make use of this to efficiently
determine the average cost of programs, from Alexei.
4) Split up BPF selftest's test_progs similarly as we already
did with test_verifier. This allows to further reduce merge
conflicts in future and to get more structure into our
quickly growing BPF selftest suite, from Stanislav.
5) Fix a bug in BTF's dedup algorithm which can cause an infinite
loop in some circumstances; also various BPF doc fixes and
improvements, from Andrii.
6) Various BPF sample cleanups and migration to libbpf in order
to further isolate the old sample loader code (so we can get
rid of it at some point), from Jakub.
7) Add a new BPF helper for BPF cgroup skb progs that allows
to set ECN CE code point and a Host Bandwidth Manager (HBM)
sample program for limiting the bandwidth used by v2 cgroups,
from Lawrence.
8) Enable write access to skb->queue_mapping from tc BPF egress
programs in order to let BPF pick TX queue, from Jesper.
9) Fix a bug in BPF spinlock handling for map-in-map which did
not propagate spin_lock_off to the meta map, from Yonghong.
10) Fix a bug in the new per-CPU BPF prog counters to properly
initialize stats for each CPU, from Eric.
11) Add various BPF helper prototypes to selftest's bpf_helpers.h,
from Willem.
12) Fix various BPF samples bugs in XDP and tracing progs,
from Toke, Daniel and Yonghong.
13) Silence preemption splat in test_bpf after BPF_PROG_RUN()
enforces it now everywhere, from Anders.
14) Fix a signedness bug in libbpf's btf_dedup_ref_type() to
get error handling working, from Dan.
15) Fix bpftool documentation and auto-completion with regards
to stream_{verdict,parser} attach types, from Alban.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'tools')
52 files changed, 3411 insertions, 2031 deletions
diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst index 7e59495cb028..9386bd6e0396 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst @@ -42,7 +42,7 @@ PROG COMMANDS | **cgroup/connect4** | **cgroup/connect6** | **cgroup/sendmsg4** | **cgroup/sendmsg6** | } | *ATTACH_TYPE* := { -| **msg_verdict** | **skb_verdict** | **skb_parse** | **flow_dissector** +| **msg_verdict** | **stream_verdict** | **stream_parser** | **flow_dissector** | } @@ -171,7 +171,7 @@ EXAMPLES :: - 10: xdp name some_prog tag 005a3d2123620c8b gpl + 10: xdp name some_prog tag 005a3d2123620c8b gpl run_time_ns 81632 run_cnt 10 loaded_at 2017-09-29T20:11:00+0000 uid 0 xlated 528B jited 370B memlock 4096B map_ids 10 @@ -184,6 +184,8 @@ EXAMPLES "type": "xdp", "tag": "005a3d2123620c8b", "gpl_compatible": true, + "run_time_ns": 81632, + "run_cnt": 10, "loaded_at": 1506715860, "uid": 0, "bytes_xlated": 528, diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool index 763dd12482aa..b803827d01e8 100644 --- a/tools/bpf/bpftool/bash-completion/bpftool +++ b/tools/bpf/bpftool/bash-completion/bpftool @@ -311,8 +311,8 @@ _bpftool() return 0 ;; 5) - COMPREPLY=( $( compgen -W 'msg_verdict skb_verdict \ - skb_parse flow_dissector' -- "$cur" ) ) + COMPREPLY=( $( compgen -W 'msg_verdict stream_verdict \ + stream_parser flow_dissector' -- "$cur" ) ) return 0 ;; 6) diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index 33ed0806ccc0..8ef80d65a474 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -214,6 +214,10 @@ static void print_prog_json(struct bpf_prog_info *info, int fd) info->tag[4], info->tag[5], info->tag[6], info->tag[7]); jsonw_bool_field(json_wtr, "gpl_compatible", info->gpl_compatible); + if (info->run_time_ns) { + jsonw_uint_field(json_wtr, "run_time_ns", info->run_time_ns); + jsonw_uint_field(json_wtr, "run_cnt", info->run_cnt); + } print_dev_json(info->ifindex, info->netns_dev, info->netns_ino); @@ -277,6 +281,9 @@ static void print_prog_plain(struct bpf_prog_info *info, int fd) fprint_hex(stdout, info->tag, BPF_TAG_SIZE, ""); print_dev_plain(info->ifindex, info->netns_dev, info->netns_ino); printf("%s", info->gpl_compatible ? " gpl" : ""); + if (info->run_time_ns) + printf(" run_time_ns %lld run_cnt %lld", + info->run_time_ns, info->run_cnt); printf("\n"); if (info->load_time) { @@ -1046,7 +1053,7 @@ static int load_with_options(int argc, char **argv, bool first_prog_only) j = 0; while (j < old_map_fds && map_replace[j].name) { i = 0; - bpf_map__for_each(map, obj) { + bpf_object__for_each_map(map, obj) { if (!strcmp(bpf_map__name(map), map_replace[j].name)) { map_replace[j].idx = i; break; @@ -1067,7 +1074,7 @@ static int load_with_options(int argc, char **argv, bool first_prog_only) /* Set ifindex and name reuse */ j = 0; idx = 0; - bpf_map__for_each(map, obj) { + bpf_object__for_each_map(map, obj) { if (!bpf_map__is_offload_neutral(map)) bpf_map__set_ifindex(map, ifindex); @@ -1199,7 +1206,7 @@ static int do_help(int argc, char **argv) " cgroup/bind4 | cgroup/bind6 | cgroup/post_bind4 |\n" " cgroup/post_bind6 | cgroup/connect4 | cgroup/connect6 |\n" " cgroup/sendmsg4 | cgroup/sendmsg6 }\n" - " ATTACH_TYPE := { msg_verdict | skb_verdict | skb_parse |\n" + " ATTACH_TYPE := { msg_verdict | stream_verdict | stream_parser |\n" " flow_dissector }\n" " " HELP_SPEC_OPTIONS "\n" "", diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index bcdd2474eee7..3c38ac9a92a7 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -2359,6 +2359,13 @@ union bpf_attr { * Return * A **struct bpf_tcp_sock** pointer on success, or NULL in * case of failure. + * + * int bpf_skb_ecn_set_ce(struct sk_buf *skb) + * Description + * Sets ECN of IP header to ce (congestion encountered) if + * current value is ect (ECN capable). Works with IPv6 and IPv4. + * Return + * 1 if set, 0 if not set. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -2457,7 +2464,8 @@ union bpf_attr { FN(spin_lock), \ FN(spin_unlock), \ FN(sk_fullsock), \ - FN(tcp_sock), + FN(tcp_sock), \ + FN(skb_ecn_set_ce), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call @@ -2813,6 +2821,8 @@ struct bpf_prog_info { __u32 jited_line_info_rec_size; __u32 nr_prog_tags; __aligned_u64 prog_tags; + __u64 run_time_ns; + __u64 run_cnt; } __attribute__((aligned(8))); struct bpf_map_info { diff --git a/tools/include/uapi/linux/ethtool.h b/tools/include/uapi/linux/ethtool.h new file mode 100644 index 000000000000..c86c3e942df9 --- /dev/null +++ b/tools/include/uapi/linux/ethtool.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * ethtool.h: Defines for Linux ethtool. + * + * Copyright (C) 1998 David S. Miller (davem@redhat.com) + * Copyright 2001 Jeff Garzik <jgarzik@pobox.com> + * Portions Copyright 2001 Sun Microsystems (thockin@sun.com) + * Portions Copyright 2002 Intel (eli.kupermann@intel.com, + * christopher.leech@intel.com, + * scott.feldman@intel.com) + * Portions Copyright (C) Sun Microsystems 2008 + */ + +#ifndef _UAPI_LINUX_ETHTOOL_H +#define _UAPI_LINUX_ETHTOOL_H + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/if_ether.h> + +#define ETHTOOL_GCHANNELS 0x0000003c /* Get no of channels */ + +/** + * struct ethtool_channels - configuring number of network channel + * @cmd: ETHTOOL_{G,S}CHANNELS + * @max_rx: Read only. Maximum number of receive channel the driver support. + * @max_tx: Read only. Maximum number of transmit channel the driver support. + * @max_other: Read only. Maximum number of other channel the driver support. + * @max_combined: Read only. Maximum number of combined channel the driver + * support. Set of queues RX, TX or other. + * @rx_count: Valid values are in the range 1 to the max_rx. + * @tx_count: Valid values are in the range 1 to the max_tx. + * @other_count: Valid values are in the range 1 to the max_other. + * @combined_count: Valid values are in the range 1 to the max_combined. + * + * This can be used to configure RX, TX and other channels. + */ + +struct ethtool_channels { + __u32 cmd; + __u32 max_rx; + __u32 max_tx; + __u32 max_other; + __u32 max_combined; + __u32 rx_count; + __u32 tx_count; + __u32 other_count; + __u32 combined_count; +}; + +#endif /* _UAPI_LINUX_ETHTOOL_H */ diff --git a/tools/include/uapi/linux/if_xdp.h b/tools/include/uapi/linux/if_xdp.h new file mode 100644 index 000000000000..caed8b1614ff --- /dev/null +++ b/tools/include/uapi/linux/if_xdp.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * if_xdp: XDP socket user-space interface + * Copyright(c) 2018 Intel Corporation. + * + * Author(s): Björn Töpel <bjorn.topel@intel.com> + * Magnus Karlsson <magnus.karlsson@intel.com> + */ + +#ifndef _LINUX_IF_XDP_H +#define _LINUX_IF_XDP_H + +#include <linux/types.h> + +/* Options for the sxdp_flags field */ +#define XDP_SHARED_UMEM (1 << 0) +#define XDP_COPY (1 << 1) /* Force copy-mode */ +#define XDP_ZEROCOPY (1 << 2) /* Force zero-copy mode */ + +struct sockaddr_xdp { + __u16 sxdp_family; + __u16 sxdp_flags; + __u32 sxdp_ifindex; + __u32 sxdp_queue_id; + __u32 sxdp_shared_umem_fd; +}; + +struct xdp_ring_offset { + __u64 producer; + __u64 consumer; + __u64 desc; +}; + +struct xdp_mmap_offsets { + struct xdp_ring_offset rx; + struct xdp_ring_offset tx; + struct xdp_ring_offset fr; /* Fill */ + struct xdp_ring_offset cr; /* Completion */ +}; + +/* XDP socket options */ +#define XDP_MMAP_OFFSETS 1 +#define XDP_RX_RING 2 +#define XDP_TX_RING 3 +#define XDP_UMEM_REG 4 +#define XDP_UMEM_FILL_RING 5 +#define XDP_UMEM_COMPLETION_RING 6 +#define XDP_STATISTICS 7 + +struct xdp_umem_reg { + __u64 addr; /* Start of packet data area */ + __u64 len; /* Length of packet data area */ + __u32 chunk_size; + __u32 headroom; +}; + +struct xdp_statistics { + __u64 rx_dropped; /* Dropped for reasons other than invalid desc */ + __u64 rx_invalid_descs; /* Dropped due to invalid descriptor */ + __u64 tx_invalid_descs; /* Dropped due to invalid descriptor */ +}; + +/* Pgoff for mmaping the rings */ +#define XDP_PGOFF_RX_RING 0 +#define XDP_PGOFF_TX_RING 0x80000000 +#define XDP_UMEM_PGOFF_FILL_RING 0x100000000ULL +#define XDP_UMEM_PGOFF_COMPLETION_RING 0x180000000ULL + +/* Rx/Tx descriptor */ +struct xdp_desc { + __u64 addr; + __u32 len; + __u32 options; +}; + +/* UMEM descriptor is __u64 */ + +#endif /* _LINUX_IF_XDP_H */ diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build index bfd9bfc82c3b..ee9d5362f35b 100644 --- a/tools/lib/bpf/Build +++ b/tools/lib/bpf/Build @@ -1 +1 @@ -libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o netlink.o bpf_prog_linfo.o libbpf_probes.o +libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o netlink.o bpf_prog_linfo.o libbpf_probes.o xsk.o diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile index 847916273696..a05c43468bd0 100644 --- a/tools/lib/bpf/Makefile +++ b/tools/lib/bpf/Makefile @@ -132,9 +132,9 @@ BPF_IN := $(OUTPUT)libbpf-in.o LIB_FILE := $(addprefix $(OUTPUT),$(LIB_FILE)) VERSION_SCRIPT := libbpf.map -GLOBAL_SYM_COUNT = $(shell readelf -s $(BPF_IN) | \ +GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN) | \ awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {s++} END{print s}') -VERSIONED_SYM_COUNT = $(shell readelf -s $(OUTPUT)libbpf.so | \ +VERSIONED_SYM_COUNT = $(shell readelf -s --wide $(OUTPUT)libbpf.so | \ grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | sort -u | wc -l) CMD_TARGETS = $(LIB_FILE) @@ -164,6 +164,9 @@ $(BPF_IN): force elfdep bpfdep @(test -f ../../include/uapi/linux/if_link.h -a -f ../../../include/uapi/linux/if_link.h && ( \ (diff -B ../../include/uapi/linux/if_link.h ../../../include/uapi/linux/if_link.h >/dev/null) || \ echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_link.h' differs from latest version at 'include/uapi/linux/if_link.h'" >&2 )) || true + @(test -f ../../include/uapi/linux/if_xdp.h -a -f ../../../include/uapi/linux/if_xdp.h && ( \ + (diff -B ../../include/uapi/linux/if_xdp.h ../../../include/uapi/linux/if_xdp.h >/dev/null) || \ + echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_xdp.h' differs from latest version at 'include/uapi/linux/if_xdp.h'" >&2 )) || true $(Q)$(MAKE) $(build)=libbpf $(OUTPUT)libbpf.so: $(BPF_IN) @@ -174,7 +177,7 @@ $(OUTPUT)libbpf.a: $(BPF_IN) $(QUIET_LINK)$(RM) $@; $(AR) rcs $@ $^ $(OUTPUT)test_libbpf: test_libbpf.cpp $(OUTPUT)libbpf.a - $(QUIET_LINK)$(CXX) $^ -lelf -o $@ + $(QUIET_LINK)$(CXX) $(INCLUDES) $^ -lelf -o $@ check: check_abi diff --git a/tools/lib/bpf/README.rst b/tools/lib/bpf/README.rst index 607aae40f4ed..5788479384ca 100644 --- a/tools/lib/bpf/README.rst +++ b/tools/lib/bpf/README.rst @@ -9,7 +9,7 @@ described here. It's recommended to follow these conventions whenever a new function or type is added to keep libbpf API clean and consistent. All types and functions provided by libbpf API should have one of the -following prefixes: ``bpf_``, ``btf_``, ``libbpf_``. +following prefixes: ``bpf_``, ``btf_``, ``libbpf_``, ``xsk_``. System call wrappers -------------------- @@ -62,6 +62,19 @@ Auxiliary functions and types that don't fit well in any of categories described above should have ``libbpf_`` prefix, e.g. ``libbpf_get_error`` or ``libbpf_prog_type_by_name``. +AF_XDP functions +------------------- + +AF_XDP functions should have an ``xsk_`` prefix, e.g. +``xsk_umem__get_data`` or ``xsk_umem__create``. The interface consists +of both low-level ring access functions and high-level configuration +functions. These can be mixed and matched. Note that these functions +are not reentrant for performance reasons. + +Please take a look at Documentation/networking/af_xdp.rst in the Linux +kernel source tree on how to use XDP sockets and for some common +mistakes in case you do not get any traffic up to user space. + libbpf ABI ========== diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 68b50e9bbde1..1b8d8cdd3575 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -1070,8 +1070,8 @@ done: return err; } -#define BTF_DEDUP_TABLE_SIZE_LOG 14 -#define BTF_DEDUP_TABLE_MOD ((1 << BTF_DEDUP_TABLE_SIZE_LOG) - 1) +#define BTF_DEDUP_TABLE_DEFAULT_SIZE (1 << 14) +#define BTF_DEDUP_TABLE_MAX_SIZE_LOG 31 #define BTF_UNPROCESSED_ID ((__u32)-1) #define BTF_IN_PROGRESS_ID ((__u32)-2) @@ -1128,18 +1128,21 @@ static inline __u32 hash_combine(__u32 h, __u32 value) #undef GOLDEN_RATIO_PRIME } -#define for_each_hash_node(table, hash, node) \ - for (node = table[hash & BTF_DEDUP_TABLE_MOD]; node; node = node->next) +#define for_each_dedup_cand(d, hash, node) \ + for (node = d->dedup_table[hash & (d->opts.dedup_table_size - 1)]; \ + node; \ + node = node->next) static int btf_dedup_table_add(struct btf_dedup *d, __u32 hash, __u32 type_id) { struct btf_dedup_node *node = malloc(sizeof(struct btf_dedup_node)); + int bucket = hash & (d->opts.dedup_table_size - 1); if (!node) return -ENOMEM; node->type_id = type_id; - node->next = d->dedup_table[hash & BTF_DEDUP_TABLE_MOD]; - d->dedup_table[hash & BTF_DEDUP_TABLE_MOD] = node; + node->next = d->dedup_table[bucket]; + d->dedup_table[bucket] = node; return 0; } @@ -1177,7 +1180,7 @@ static void btf_dedup_table_free(struct btf_dedup *d) if (!d->dedup_table) return; - for (i = 0; i < (1 << BTF_DEDUP_TABLE_SIZE_LOG); i++) { + for (i = 0; i < d->opts.dedup_table_size; i++) { while (d->dedup_table[i]) { tmp = d->dedup_table[i]; d->dedup_table[i] = tmp->next; @@ -1212,19 +1215,37 @@ static void btf_dedup_free(struct btf_dedup *d) free(d); } +/* Find closest power of two >= to size, capped at 2^max_size_log */ +static __u32 roundup_pow2_max(__u32 size, int max_size_log) +{ + int i; + + for (i = 0; i < max_size_log && (1U << i) < size; i++) + ; + return 1U << i; +} + + static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext, const struct btf_dedup_opts *opts) { struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup)); int i, err = 0; + __u32 sz; if (!d) return ERR_PTR(-ENOMEM); + d->opts.dont_resolve_fwds = opts && opts->dont_resolve_fwds; + sz = opts && opts->dedup_table_size ? opts->dedup_table_size + : BTF_DEDUP_TABLE_DEFAULT_SIZE; + sz = roundup_pow2_max(sz, BTF_DEDUP_TABLE_MAX_SIZE_LOG); + d->opts.dedup_table_size = sz; + d->btf = btf; d->btf_ext = btf_ext; - d->dedup_table = calloc(1 << BTF_DEDUP_TABLE_SIZE_LOG, + d->dedup_table = calloc(d->opts.dedup_table_size, sizeof(struct btf_dedup_node *)); if (!d->dedup_table) { err = -ENOMEM; @@ -1249,8 +1270,6 @@ static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext, for (i = 0; i <= btf->nr_types; i++) d->hypot_map[i] = BTF_UNPROCESSED_ID; - d->opts.dont_resolve_fwds = opts && opts->dont_resolve_fwds; - done: if (err) { btf_dedup_free(d); @@ -1644,7 +1663,7 @@ static __u32 btf_hash_struct(struct btf_type *t) * IDs. This check is performed during type graph equivalence check and * referenced types equivalence is checked separately. */ -static bool btf_equal_struct(struct btf_type *t1, struct btf_type *t2) +static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2) { struct btf_member *m1, *m2; __u16 vlen; @@ -1824,7 +1843,7 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id) case BTF_KIND_INT: h = btf_hash_int(t); - for_each_hash_node(d->dedup_table, h, cand_node) { + for_each_dedup_cand(d, h, cand_node) { cand = d->btf->types[cand_node->type_id]; if (btf_equal_int(t, cand)) { new_id = cand_node->type_id; @@ -1835,7 +1854,7 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id) case BTF_KIND_ENUM: h = btf_hash_enum(t); - for_each_hash_node(d->dedup_table, h, cand_node) { + for_each_dedup_cand(d, h, cand_node) { cand = d->btf->types[cand_node->type_id]; if (btf_equal_enum(t, cand)) { new_id = cand_node->type_id; @@ -1846,7 +1865,7 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id) case BTF_KIND_FWD: h = btf_hash_common(t); - for_each_hash_node(d->dedup_table, h, cand_node) { + for_each_dedup_cand(d, h, cand_node) { cand = d->btf->types[cand_node->type_id]; if (btf_equal_common(t, cand)) { new_id = cand_node->type_id; @@ -2105,7 +2124,7 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id, struct btf_member *cand_m, *canon_m; __u16 vlen; - if (!btf_equal_struct(cand_type, canon_type)) + if (!btf_shallow_equal_struct(cand_type, canon_type)) return 0; vlen = BTF_INFO_VLEN(cand_type->info); cand_m = (struct btf_member *)(cand_type + 1); @@ -2246,7 +2265,7 @@ static void btf_dedup_merge_hypot_map(struct btf_dedup *d) static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id) { struct btf_dedup_node *cand_node; - struct btf_type *t; + struct btf_type *cand_type, *t; /* if we don't find equivalent type, then we are canonical */ __u32 new_id = type_id; __u16 kind; @@ -2263,9 +2282,23 @@ static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id) return 0; h = btf_hash_struct(t); - for_each_hash_node(d->dedup_table, h, cand_node) { + for_each_dedup_cand(d, h, cand_node) { int eq; + /* + * Even though btf_dedup_is_equiv() checks for + * btf_shallow_equal_struct() internally when checking two + * structs (unions) for equivalence, we need to guard here + * from picking matching FWD type as a dedup candidate. + * This can happen due to hash collision. In such case just + * relying on btf_dedup_is_equiv() would lead to potentially + * creating a loop (FWD -> STRUCT and STRUCT -> FWD), because + * FWD and compatible STRUCT/UNION are considered equivalent. + */ + cand_type = d->btf->types[cand_node->type_id]; + if (!btf_shallow_equal_struct(t, cand_type)) + continue; + btf_dedup_clear_hypot_map(d); eq = btf_dedup_is_equiv(d, type_id, cand_node->type_id); if (eq < 0) @@ -2326,7 +2359,8 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id) struct btf_type *t, *cand; /* if we don't find equivalent type, then we are representative type */ __u32 new_id = type_id; - __u32 h, ref_type_id; + int ref_type_id; + __u32 h; if (d->map[type_id] == BTF_IN_PROGRESS_ID) return -ELOOP; @@ -2349,7 +2383,7 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id) t->type = ref_type_id; h = btf_hash_common(t); - for_each_hash_node(d->dedup_table, h, cand_node) { + for_each_dedup_cand(d, h, cand_node) { cand = d->btf->types[cand_node->type_id]; if (btf_equal_common(t, cand)) { new_id = cand_node->type_id; @@ -2372,7 +2406,7 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id) info->index_type = ref_type_id; h = btf_hash_array(t); - for_each_hash_node(d->dedup_table, h, cand_node) { + for_each_dedup_cand(d, h, cand_node) { cand = d->btf->types[cand_node->type_id]; if (btf_equal_array(t, cand)) { new_id = cand_node->type_id; @@ -2403,7 +2437,7 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id) } h = btf_hash_fnproto(t); - for_each_hash_node(d->dedup_table, h, cand_node) { + for_each_dedup_cand(d, h, cand_node) { cand = d->btf->types[cand_node->type_id]; if (btf_equal_fnproto(t, cand)) { new_id = cand_node->type_id; diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index 94bbc249b0f1..28a1e1e59861 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -76,7 +76,7 @@ LIBBPF_API int btf__get_map_kv_tids(const struct btf *btf, const char *map_name, LIBBPF_API struct btf_ext *btf_ext__new(__u8 *data, __u32 size); LIBBPF_API void btf_ext__free(struct btf_ext *btf_ext); -LIBBPF_API const void *btf_ext__get_raw_data(const struct btf_ext* btf_ext, +LIBBPF_API const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size); LIBBPF_API int btf_ext__reloc_func_info(const struct btf *btf, const struct btf_ext *btf_ext, @@ -90,6 +90,7 @@ LIBBPF_API __u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext); LIBBPF_API __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext); struct btf_dedup_opts { + unsigned int dedup_table_size; bool dont_resolve_fwds; }; diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index b38dcbe7460a..f5eb60379c8d 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -2100,7 +2100,7 @@ int bpf_object__pin_maps(struct bpf_object *obj, const char *path) if (err) return err; - bpf_map__for_each(map, obj) { + bpf_object__for_each_map(map, obj) { char buf[PATH_MAX]; int len; @@ -2147,7 +2147,7 @@ int bpf_object__unpin_maps(struct bpf_object *obj, const char *path) if (!obj) return -ENOENT; - bpf_map__for_each(map, obj) { + bpf_object__for_each_map(map, obj) { char buf[PATH_MAX]; int len; @@ -2835,7 +2835,7 @@ bpf_object__find_map_by_name(struct bpf_object *obj, const char *name) { struct bpf_map *pos; - bpf_map__for_each(pos, obj) { + bpf_object__for_each_map(pos, obj) { if (pos->name && !strcmp(pos->name, name)) return pos; } @@ -2928,7 +2928,7 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, first_prog = prog; } - bpf_map__for_each(map, obj) { + bpf_object__for_each_map(map, obj) { if (!bpf_map__is_offload_neutral(map)) map->map_ifindex = attr->ifindex; } diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 6c0168f8bba5..b4652aa1a58a 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -278,10 +278,11 @@ bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset); LIBBPF_API struct bpf_map * bpf_map__next(struct bpf_map *map, struct bpf_object *obj); -#define bpf_map__for_each(pos, obj) \ +#define bpf_object__for_each_map(pos, obj) \ for ((pos) = bpf_map__next(NULL, (obj)); \ (pos) != NULL; \ (pos) = bpf_map__next((pos), (obj))) +#define bpf_map__for_each bpf_object__for_each_map LIBBPF_API struct bpf_map * bpf_map__prev(struct bpf_map *map, struct bpf_object *obj); diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 99dfa710c818..778a26702a70 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -147,4 +147,10 @@ LIBBPF_0.0.2 { btf_ext__new; btf_ext__reloc_func_info; btf_ext__reloc_line_info; + xsk_umem__create; + xsk_socket__create; + xsk_umem__delete; + xsk_socket__delete; + xsk_umem__fd; + xsk_socket__fd; } LIBBPF_0.0.1; diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c new file mode 100644 index 000000000000..f98ac82c9aea --- /dev/null +++ b/tools/lib/bpf/xsk.c @@ -0,0 +1,723 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) + +/* + * AF_XDP user-space access library. + * + * Copyright(c) 2018 - 2019 Intel Corporation. + * + * Author(s): Magnus Karlsson <magnus.karlsson@intel.com> + */ + +#include <errno.h> +#include <stdlib.h> +#include <string.h> +#include <unistd.h> +#include <arpa/inet.h> +#include <asm/barrier.h> +#include <linux/compiler.h> +#include <linux/ethtool.h> +#include <linux/filter.h> +#include <linux/if_ether.h> +#include <linux/if_packet.h> +#include <linux/if_xdp.h> +#include <linux/sockios.h> +#include <net/if.h> +#include <sys/ioctl.h> +#include <sys/mman.h> +#include <sys/socket.h> +#include <sys/types.h> + +#include "bpf.h" +#include "libbpf.h" +#include "libbpf_util.h" +#include "xsk.h" + +#ifndef SOL_XDP + #define SOL_XDP 283 +#endif + +#ifndef AF_XDP + #define AF_XDP 44 +#endif + +#ifndef PF_XDP + #define PF_XDP AF_XDP +#endif + +struct xsk_umem { + struct xsk_ring_prod *fill; + struct xsk_ring_cons *comp; + char *umem_area; + struct xsk_umem_config config; + int fd; + int refcount; +}; + +struct xsk_socket { + struct xsk_ring_cons *rx; + struct xsk_ring_prod *tx; + __u64 outstanding_tx; + struct xsk_umem *umem; + struct xsk_socket_config config; + int fd; + int xsks_map; + int ifindex; + int prog_fd; + int qidconf_map_fd; + int xsks_map_fd; + __u32 queue_id; + char ifname[IFNAMSIZ]; +}; + +struct xsk_nl_info { + bool xdp_prog_attached; + int ifindex; + int fd; +}; + +/* For 32-bit systems, we need to use mmap2 as the offsets are 64-bit. + * Unfortunately, it is not part of glibc. + */ +static inline void *xsk_mmap(void *addr, size_t length, int prot, int flags, + int fd, __u64 offset) +{ +#ifdef __NR_mmap2 + unsigned int page_shift = __builtin_ffs(getpagesize()) - 1; + long ret = syscall(__NR_mmap2, addr, length, prot, flags, fd, + (off_t)(offset >> page_shift)); + + return (void *)ret; +#else + return mmap(addr, length, prot, flags, fd, offset); +#endif +} + +int xsk_umem__fd(const struct xsk_umem *umem) +{ + return umem ? umem->fd : -EINVAL; +} + +int xsk_socket__fd(const struct xsk_socket *xsk) +{ + return xsk ? xsk->fd : -EINVAL; +} + +static bool xsk_page_aligned(void *buffer) +{ + unsigned long addr = (unsigned long)buffer; + + return !(addr & (getpagesize() - 1)); +} + +static void xsk_set_umem_config(struct xsk_umem_config *cfg, + const struct xsk_umem_config *usr_cfg) +{ + if (!usr_cfg) { + cfg->fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS; + cfg->comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS; + cfg->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE; + cfg->frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM; + return; + } + + cfg->fill_size = usr_cfg->fill_size; + cfg->comp_size = usr_cfg->comp_size; + cfg->frame_size = usr_cfg->frame_size; + cfg->frame_headroom = usr_cfg->frame_headroom; +} + +static void xsk_set_xdp_socket_config(struct xsk_socket_config *cfg, + const struct xsk_socket_config *usr_cfg) +{ + if (!usr_cfg) { + cfg->rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS; + cfg->tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS; + cfg->libbpf_flags = 0; + cfg->xdp_flags = 0; + cfg->bind_flags = 0; + return; + } + + cfg->rx_size = usr_cfg->rx_size; + cfg->tx_size = usr_cfg->tx_size; + cfg->libbpf_flags = usr_cfg->libbpf_flags; + cfg->xdp_flags = usr_cfg->xdp_flags; + cfg->bind_flags = usr_cfg->bind_flags; +} + +int xsk_umem__create(struct xsk_umem **umem_ptr, void *umem_area, __u64 size, + struct xsk_ring_prod *fill, struct xsk_ring_cons *comp, + const struct xsk_umem_config *usr_config) +{ + struct xdp_mmap_offsets off; + struct xdp_umem_reg mr; + struct xsk_umem *umem; + socklen_t optlen; + void *map; + int err; + + if (!umem_area || !umem_ptr || !fill || !comp) + return -EFAULT; + if (!size && !xsk_page_aligned(umem_area)) + return -EINVAL; + + umem = calloc(1, sizeof(*umem)); + if (!umem) + return -ENOMEM; + + umem->fd = socket(AF_XDP, SOCK_RAW, 0); + if (umem->fd < 0) { + err = -errno; + goto out_umem_alloc; + } + + umem->umem_area = umem_area; + xsk_set_umem_config(&umem->config, usr_config); + + mr.addr = (uintptr_t)umem_area; + mr.len = size; + mr.chunk_size = umem->config.frame_size; + mr.headroom = umem->config.frame_headroom; + + err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_REG, &mr, sizeof(mr)); + if (err) { + err = -errno; + goto out_socket; + } + err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_FILL_RING, + &umem->config.fill_size, + sizeof(umem->config.fill_size)); + if (err) { + err = -errno; + goto out_socket; + } + err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_COMPLETION_RING, + &umem->config.comp_size, + sizeof(umem->config.comp_size)); + if (err) { + err = -errno; + goto out_socket; + } + + optlen = sizeof(off); + err = getsockopt(umem->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen); + if (err) { + err = -errno; + goto out_socket; + } + + map = xsk_mmap(NULL, off.fr.desc + + umem->config.fill_size * sizeof(__u64), + PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, + umem->fd, XDP_UMEM_PGOFF_FILL_RING); + if (map == MAP_FAILED) { + err = -errno; + goto out_socket; + } + + umem->fill = fill; + fill->mask = umem->config.fill_size - 1; + fill->size = umem->config.fill_size; + fill->producer = map + off.fr.producer; + fill->consumer = map + off.fr.consumer; + fill->ring = map + off.fr.desc; + fill->cached_cons = umem->config.fill_size; + + map = xsk_mmap(NULL, + off.cr.desc + umem->config.comp_size * sizeof(__u64), + PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, + umem->fd, XDP_UMEM_PGOFF_COMPLETION_RING); + if (map == MAP_FAILED) { + err = -errno; + goto out_mmap; + } + + umem->comp = comp; + comp->mask = umem->config.comp_size - 1; + comp->size = umem->config.comp_size; + comp->producer = map + off.cr.producer; + comp->consumer = map + off.cr.consumer; + comp->ring = map + off.cr.desc; + + *umem_ptr = umem; + return 0; + +out_mmap: + munmap(umem->fill, + off.fr.desc + umem->config.fill_size * sizeof(__u64)); +out_socket: + close(umem->fd); +out_umem_alloc: + free(umem); + return err; +} + +static int xsk_load_xdp_prog(struct xsk_socket *xsk) +{ + char bpf_log_buf[BPF_LOG_BUF_SIZE]; + int err, prog_fd; + + /* This is the C-program: + * SEC("xdp_sock") int xdp_sock_prog(struct xdp_md *ctx) + * { + * int *qidconf, index = ctx->rx_queue_index; + * + * // A set entry here means that the correspnding queue_id + * // has an active AF_XDP socket bound to it. + * qidconf = bpf_map_lookup_elem(&qidconf_map, &index); + * if (!qidconf) + * return XDP_ABORTED; + * + * if (*qidconf) + * return bpf_redirect_map(&xsks_map, index, 0); + * + * return XDP_PASS; + * } + */ + struct bpf_insn prog[] = { + /* r1 = *(u32 *)(r1 + 16) */ + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 16), + /* *(u32 *)(r10 - 4) = r1 */ + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_1, -4), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), + BPF_LD_MAP_FD(BPF_REG_1, xsk->qidconf_map_fd), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV32_IMM(BPF_REG_0, 0), + /* if r1 == 0 goto +8 */ + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8), + BPF_MOV32_IMM(BPF_REG_0, 2), + /* r1 = *(u32 *)(r1 + 0) */ + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 0), + /* if r1 == 0 goto +5 */ + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 5), + /* r2 = *(u32 *)(r10 - 4) */ + BPF_LD_MAP_FD(BPF_REG_1, xsk->xsks_map_fd), + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_10, -4), + BPF_MOV32_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_redirect_map), + /* The jumps are to this instruction */ + BPF_EXIT_INSN(), + }; + size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn); + + prog_fd = bpf_load_program(BPF_PROG_TYPE_XDP, prog, insns_cnt, + "LGPL-2.1 or BSD-2-Clause", 0, bpf_log_buf, + BPF_LOG_BUF_SIZE); + if (prog_fd < 0) { + pr_warning("BPF log buffer:\n%s", bpf_log_buf); + return prog_fd; + } + + err = bpf_set_link_xdp_fd(xsk->ifindex, prog_fd, xsk->config.xdp_flags); + if (err) { + close(prog_fd); + return err; + } + + xsk->prog_fd = prog_fd; + return 0; +} + +static int xsk_get_max_queues(struct xsk_socket *xsk) +{ + struct ethtool_channels channels; + struct ifreq ifr; + int fd, err, ret; + + fd = socket(AF_INET, SOCK_DGRAM, 0); + if (fd < 0) + return -errno; + + channels.cmd = ETHTOOL_GCHANNELS; + ifr.ifr_data = (void *)&channels; + strncpy(ifr.ifr_name, xsk->ifname, IFNAMSIZ); + err = ioctl(fd, SIOCETHTOOL, &ifr); + if (err && errno != EOPNOTSUPP) { + ret = -errno; + goto out; + } + + if (channels.max_combined == 0 || errno == EOPNOTSUPP) + /* If the device says it has no channels, then all traffic + * is sent to a single stream, so max queues = 1. + */ + ret = 1; + else + ret = channels.max_combined; + +out: + close(fd); + return ret; +} + +static int xsk_create_bpf_maps(struct xsk_socket *xsk) +{ + int max_queues; + int fd; + + max_queues = xsk_get_max_queues(xsk); + if (max_queues < 0) + return max_queues; + + fd = bpf_create_map_name(BPF_MAP_TYPE_ARRAY, "qidconf_map", + sizeof(int), sizeof(int), max_queues, 0); + if (fd < 0) + return fd; + xsk->qidconf_map_fd = fd; + + fd = bpf_create_map_name(BPF_MAP_TYPE_XSKMAP, "xsks_map", + sizeof(int), sizeof(int), max_queues, 0); + if (fd < 0) { + close(xsk->qidconf_map_fd); + return fd; + } + xsk->xsks_map_fd = fd; + + return 0; +} + +static void xsk_delete_bpf_maps(struct xsk_socket *xsk) +{ + close(xsk->qidconf_map_fd); + close(xsk->xsks_map_fd); +} + +static int xsk_update_bpf_maps(struct xsk_socket *xsk, int qidconf_value, + int xsks_value) +{ + bool qidconf_map_updated = false, xsks_map_updated = false; + struct bpf_prog_info prog_info = {}; + __u32 prog_len = sizeof(prog_info); + struct bpf_map_info map_info; + __u32 map_len = sizeof(map_info); + __u32 *map_ids; + int reset_value = 0; + __u32 num_maps; + unsigned int i; + int err; + + err = bpf_obj_get_info_by_fd(xsk->prog_fd, &prog_info, &prog_len); + if (err) + return err; + + num_maps = prog_info.nr_map_ids; + + map_ids = calloc(prog_info.nr_map_ids, sizeof(*map_ids)); + if (!map_ids) + return -ENOMEM; + + memset(&prog_info, 0, prog_len); + prog_info.nr_map_ids = num_maps; + prog_info.map_ids = (__u64)(unsigned long)map_ids; + + err = bpf_obj_get_info_by_fd(xsk->prog_fd, &prog_info, &prog_len); + if (err) + goto out_map_ids; + + for (i = 0; i < prog_info.nr_map_ids; i++) { + int fd; + + fd = bpf_map_get_fd_by_id(map_ids[i]); + if (fd < 0) { + err = -errno; + goto out_maps; + } + + err = bpf_obj_get_info_by_fd(fd, &map_info, &map_len); + if (err) + goto out_maps; + + if (!strcmp(map_info.name, "qidconf_map")) { + err = bpf_map_update_elem(fd, &xsk->queue_id, + &qidconf_value, 0); + if (err) + goto out_maps; + qidconf_map_updated = true; + xsk->qidconf_map_fd = fd; + } else if (!strcmp(map_info.name, "xsks_map")) { + err = bpf_map_update_elem(fd, &xsk->queue_id, + &xsks_value, 0); + if (err) + goto out_maps; + xsks_map_updated = true; + xsk->xsks_map_fd = fd; + } + + if (qidconf_map_updated && xsks_map_updated) + break; + } + + if (!(qidconf_map_updated && xsks_map_updated)) { + err = -ENOENT; + goto out_maps; + } + + err = 0; + goto out_success; + +out_maps: + if (qidconf_map_updated) + (void)bpf_map_update_elem(xsk->qidconf_map_fd, &xsk->queue_id, + &reset_value, 0); + if (xsks_map_updated) + (void)bpf_map_update_elem(xsk->xsks_map_fd, &xsk->queue_id, + &reset_value, 0); +out_success: + if (qidconf_map_updated) + close(xsk->qidconf_map_fd); + if (xsks_map_updated) + close(xsk->xsks_map_fd); +out_map_ids: + free(map_ids); + return err; +} + +static int xsk_setup_xdp_prog(struct xsk_socket *xsk) +{ + bool prog_attached = false; + __u32 prog_id = 0; + int err; + + err = bpf_get_link_xdp_id(xsk->ifindex, &prog_id, + xsk->config.xdp_flags); + if (err) + return err; + + if (!prog_id) { + prog_attached = true; + err = xsk_create_bpf_maps(xsk); + if (err) + return err; + + err = xsk_load_xdp_prog(xsk); + if (err) + goto out_maps; + } else { + xsk->prog_fd = bpf_prog_get_fd_by_id(prog_id); + } + + err = xsk_update_bpf_maps(xsk, true, xsk->fd); + if (err) + goto out_load; + + return 0; + +out_load: + if (prog_attached) + close(xsk->prog_fd); +out_maps: + if (prog_attached) + xsk_delete_bpf_maps(xsk); + return err; +} + +int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname, + __u32 queue_id, struct xsk_umem *umem, + struct xsk_ring_cons *rx, struct xsk_ring_prod *tx, + const struct xsk_socket_config *usr_config) +{ + struct sockaddr_xdp sxdp = {}; + struct xdp_mmap_offsets off; + struct xsk_socket *xsk; + socklen_t optlen; + void *map; + int err; + + if (!umem || !xsk_ptr || !rx || !tx) + return -EFAULT; + + if (umem->refcount) { + pr_warning("Error: shared umems not supported by libbpf.\n"); + return -EBUSY; + } + + xsk = calloc(1, sizeof(*xsk)); + if (!xsk) + return -ENOMEM; + + if (umem->refcount++ > 0) { + xsk->fd = socket(AF_XDP, SOCK_RAW, 0); + if (xsk->fd < 0) { + err = -errno; + goto out_xsk_alloc; + } + } else { + xsk->fd = umem->fd; + } + + xsk->outstanding_tx = 0; + xsk->queue_id = queue_id; + xsk->umem = umem; + xsk->ifindex = if_nametoindex(ifname); + if (!xsk->ifindex) { + err = -errno; + goto out_socket; + } + strncpy(xsk->ifname, ifname, IFNAMSIZ); + + xsk_set_xdp_socket_config(&xsk->config, usr_config); + + if (rx) { + err = setsockopt(xsk->fd, SOL_XDP, XDP_RX_RING, + &xsk->config.rx_size, + sizeof(xsk->config.rx_size)); + if (err) { + err = -errno; + goto out_socket; + } + } + if (tx) { + err = setsockopt(xsk->fd, SOL_XDP, XDP_TX_RING, + &xsk->config.tx_size, + sizeof(xsk->config.tx_size)); + if (err) { + err = -errno; + goto out_socket; + } + } + + optlen = sizeof(off); + err = getsockopt(xsk->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen); + if (err) { + err = -errno; + goto out_socket; + } + + if (rx) { + map = xsk_mmap(NULL, off.rx.desc + + xsk->config.rx_size * sizeof(struct xdp_desc), + PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_POPULATE, + xsk->fd, XDP_PGOFF_RX_RING); + if (map == MAP_FAILED) { + err = -errno; + goto out_socket; + } + + rx->mask = xsk->config.rx_size - 1; + rx->size = xsk->config.rx_size; + rx->producer = map + off.rx.producer; + rx->consumer = map + off.rx.consumer; + rx->ring = map + off.rx.desc; + } + xsk->rx = rx; + + if (tx) { + map = xsk_mmap(NULL, off.tx.desc + + xsk->config.tx_size * sizeof(struct xdp_desc), + PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_POPULATE, + xsk->fd, XDP_PGOFF_TX_RING); + if (map == MAP_FAILED) { + err = -errno; + goto out_mmap_rx; + } + + tx->mask = xsk->config.tx_size - 1; + tx->size = xsk->config.tx_size; + tx->producer = map + off.tx.producer; + tx->consumer = map + off.tx.consumer; + tx->ring = map + off.tx.desc; + tx->cached_cons = xsk->config.tx_size; + } + xsk->tx = tx; + + sxdp.sxdp_family = PF_XDP; + sxdp.sxdp_ifindex = xsk->ifindex; + sxdp.sxdp_queue_id = xsk->queue_id; + sxdp.sxdp_flags = xsk->config.bind_flags; + + err = bind(xsk->fd, (struct sockaddr *)&sxdp, sizeof(sxdp)); + if (err) { + err = -errno; + goto out_mmap_tx; + } + + if (!(xsk->config.libbpf_flags & XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)) { + err = xsk_setup_xdp_prog(xsk); + if (err) + goto out_mmap_tx; + } + + *xsk_ptr = xsk; + return 0; + +out_mmap_tx: + if (tx) + munmap(xsk->tx, + off.tx.desc + + xsk->config.tx_size * sizeof(struct xdp_desc)); +out_mmap_rx: + if (rx) + munmap(xsk->rx, + off.rx.desc + + xsk->config.rx_size * sizeof(struct xdp_desc)); +out_socket: + if (--umem->refcount) + close(xsk->fd); +out_xsk_alloc: + free(xsk); + return err; +} + +int xsk_umem__delete(struct xsk_umem *umem) +{ + struct xdp_mmap_offsets off; + socklen_t optlen; + int err; + + if (!umem) + return 0; + + if (umem->refcount) + return -EBUSY; + + optlen = sizeof(off); + err = getsockopt(umem->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen); + if (!err) { + munmap(umem->fill->ring, + off.fr.desc + umem->config.fill_size * sizeof(__u64)); + munmap(umem->comp->ring, + off.cr.desc + umem->config.comp_size * sizeof(__u64)); + } + + close(umem->fd); + free(umem); + + return 0; +} + +void xsk_socket__delete(struct xsk_socket *xsk) +{ + struct xdp_mmap_offsets off; + socklen_t optlen; + int err; + + if (!xsk) + return; + + (void)xsk_update_bpf_maps(xsk, 0, 0); + + optlen = sizeof(off); + err = getsockopt(xsk->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen); + if (!err) { + if (xsk->rx) + munmap(xsk->rx->ring, + off.rx.desc + + xsk->config.rx_size * sizeof(struct xdp_desc)); + if (xsk->tx) + munmap(xsk->tx->ring, + off.tx.desc + + xsk->config.tx_size * sizeof(struct xdp_desc)); + } + + xsk->umem->refcount--; + /* Do not close an fd that also has an associated umem connected + * to it. + */ + if (xsk->fd != xsk->umem->fd) + close(xsk->fd); + free(xsk); +} diff --git a/tools/lib/bpf/xsk.h b/tools/lib/bpf/xsk.h new file mode 100644 index 000000000000..a497f00e2962 --- /dev/null +++ b/tools/lib/bpf/xsk.h @@ -0,0 +1,203 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ + +/* + * AF_XDP user-space access library. + * + * Copyright(c) 2018 - 2019 Intel Corporation. + * + * Author(s): Magnus Karlsson <magnus.karlsson@intel.com> + */ + +#ifndef __LIBBPF_XSK_H +#define __LIBBPF_XSK_H + +#include <stdio.h> +#include <stdint.h> +#include <linux/if_xdp.h> + +#include "libbpf.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Do not access these members directly. Use the functions below. */ +#define DEFINE_XSK_RING(name) \ +struct name { \ + __u32 cached_prod; \ + __u32 cached_cons; \ + __u32 mask; \ + __u32 size; \ + __u32 *producer; \ + __u32 *consumer; \ + void *ring; \ +} + +DEFINE_XSK_RING(xsk_ring_prod); +DEFINE_XSK_RING(xsk_ring_cons); + +struct xsk_umem; +struct xsk_socket; + +static inline __u64 *xsk_ring_prod__fill_addr(struct xsk_ring_prod *fill, + __u32 idx) +{ + __u64 *addrs = (__u64 *)fill->ring; + + return &addrs[idx & fill->mask]; +} + +static inline const __u64 * +xsk_ring_cons__comp_addr(const struct xsk_ring_cons *comp, __u32 idx) +{ + const __u64 *addrs = (const __u64 *)comp->ring; + + return &addrs[idx & comp->mask]; +} + +static inline struct xdp_desc *xsk_ring_prod__tx_desc(struct xsk_ring_prod *tx, + __u32 idx) +{ + struct xdp_desc *descs = (struct xdp_desc *)tx->ring; + + return &descs[idx & tx->mask]; +} + +static inline const struct xdp_desc * +xsk_ring_cons__rx_desc(const struct xsk_ring_cons *rx, __u32 idx) +{ + const struct xdp_desc *descs = (const struct xdp_desc *)rx->ring; + + return &descs[idx & rx->mask]; +} + +static inline __u32 xsk_prod_nb_free(struct xsk_ring_prod *r, __u32 nb) +{ + __u32 free_entries = r->cached_cons - r->cached_prod; + + if (free_entries >= nb) + return free_entries; + + /* Refresh the local tail pointer. + * cached_cons is r->size bigger than the real consumer pointer so + * that this addition can be avoided in the more frequently + * executed code that computs free_entries in the beginning of + * this function. Without this optimization it whould have been + * free_entries = r->cached_prod - r->cached_cons + r->size. + */ + r->cached_cons = *r->consumer + r->size; + + return r->cached_cons - r->cached_prod; +} + +static inline __u32 xsk_cons_nb_avail(struct xsk_ring_cons *r, __u32 nb) +{ + __u32 entries = r->cached_prod - r->cached_cons; + + if (entries == 0) { + r->cached_prod = *r->producer; + entries = r->cached_prod - r->cached_cons; + } + + return (entries > nb) ? nb : entries; +} + +static inline size_t xsk_ring_prod__reserve(struct xsk_ring_prod *prod, + size_t nb, __u32 *idx) +{ + if (unlikely(xsk_prod_nb_free(prod, nb) < nb)) + return 0; + + *idx = prod->cached_prod; + prod->cached_prod += nb; + + return nb; +} + +static inline void xsk_ring_prod__submit(struct xsk_ring_prod *prod, size_t nb) +{ + /* Make sure everything has been written to the ring before signalling + * this to the kernel. + */ + smp_wmb(); + + *prod->producer += nb; +} + +static inline size_t xsk_ring_cons__peek(struct xsk_ring_cons *cons, + size_t nb, __u32 *idx) +{ + size_t entries = xsk_cons_nb_avail(cons, nb); + + if (likely(entries > 0)) { + /* Make sure we do not speculatively read the data before + * we have received the packet buffers from the ring. + */ + smp_rmb(); + + *idx = cons->cached_cons; + cons->cached_cons += entries; + } + + return entries; +} + +static inline void xsk_ring_cons__release(struct xsk_ring_cons *cons, size_t nb) +{ + *cons->consumer += nb; +} + +static inline void *xsk_umem__get_data(void *umem_area, __u64 addr) +{ + return &((char *)umem_area)[addr]; +} + +LIBBPF_API int xsk_umem__fd(const struct xsk_umem *umem); +LIBBPF_API int xsk_socket__fd(const struct xsk_socket *xsk); + +#define XSK_RING_CONS__DEFAULT_NUM_DESCS 2048 +#define XSK_RING_PROD__DEFAULT_NUM_DESCS 2048 +#define XSK_UMEM__DEFAULT_FRAME_SHIFT 11 /* 2048 bytes */ +#define XSK_UMEM__DEFAULT_FRAME_SIZE (1 << XSK_UMEM__DEFAULT_FRAME_SHIFT) +#define XSK_UMEM__DEFAULT_FRAME_HEADROOM 0 + +struct xsk_umem_config { + __u32 fill_size; + __u32 comp_size; + __u32 frame_size; + __u32 frame_headroom; +}; + +/* Flags for the libbpf_flags field. */ +#define XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD (1 << 0) + +struct xsk_socket_config { + __u32 rx_size; + __u32 tx_size; + __u32 libbpf_flags; + __u32 xdp_flags; + __u16 bind_flags; +}; + +/* Set config to NULL to get the default configuration. */ +LIBBPF_API int xsk_umem__create(struct xsk_umem **umem, + void *umem_area, __u64 size, + struct xsk_ring_prod *fill, + struct xsk_ring_cons *comp, + const struct xsk_umem_config *config); +LIBBPF_API int xsk_socket__create(struct xsk_socket **xsk, + const char *ifname, __u32 queue_id, + struct xsk_umem *umem, + struct xsk_ring_cons *rx, + struct xsk_ring_prod *tx, + const struct xsk_socket_config *config); + +/* Returns 0 for success and -EBUSY if the umem is still in use. */ +LIBBPF_API int xsk_umem__delete(struct xsk_umem *umem); +LIBBPF_API void xsk_socket__delete(struct xsk_socket *xsk); + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* __LIBBPF_XSK_H */ diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c index 037d8ff6a634..31b7e5a1453b 100644 --- a/tools/perf/util/bpf-loader.c +++ b/tools/perf/util/bpf-loader.c @@ -1489,7 +1489,7 @@ apply_obj_config_object(struct bpf_object *obj) struct bpf_map *map; int err; - bpf_map__for_each(map, obj) { + bpf_object__for_each_map(map, obj) { err = apply_obj_config_map(map); if (err) return err; @@ -1513,7 +1513,7 @@ int bpf__apply_obj_config(void) #define bpf__for_each_map(pos, obj, objtmp) \ bpf_object__for_each_safe(obj, objtmp) \ - bpf_map__for_each(pos, obj) + bpf_object__for_each_map(pos, obj) #define bpf__for_each_map_named(pos, obj, objtmp, name) \ bpf__for_each_map(pos, obj, objtmp) \ diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore index e47168d1257d..3b74d23fffab 100644 --- a/tools/testing/selftests/bpf/.gitignore +++ b/tools/testing/selftests/bpf/.gitignore @@ -14,6 +14,7 @@ feature test_libbpf_open test_sock test_sock_addr +test_sock_fields urandom_read test_btf test_sockmap diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index ccffaa0a0787..518cd587cd63 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -165,7 +165,11 @@ $(ALU32_BUILD_DIR)/urandom_read: $(OUTPUT)/urandom_read $(ALU32_BUILD_DIR)/test_progs_32: test_progs.c $(ALU32_BUILD_DIR) \ $(ALU32_BUILD_DIR)/urandom_read $(CC) $(CFLAGS) -o $(ALU32_BUILD_DIR)/test_progs_32 $< \ - trace_helpers.c $(OUTPUT)/libbpf.a $(LDLIBS) + trace_helpers.c prog_tests/*.c $(OUTPUT)/libbpf.a $(LDLIBS) + +$(ALU32_BUILD_DIR)/test_progs_32: $(PROG_TESTS_H) +$(ALU32_BUILD_DIR)/test_progs_32: CFLAGS += -I$(OUTPUT) +$(ALU32_BUILD_DIR)/test_progs_32: prog_tests/*.c $(ALU32_BUILD_DIR)/%.o: progs/%.c $(ALU32_BUILD_DIR) \ $(ALU32_BUILD_DIR)/test_progs_32 @@ -196,6 +200,25 @@ ifeq ($(DWARF2BTF),y) $(BTF_PAHOLE) -J $@ endif +PROG_TESTS_H := $(OUTPUT)/prog_tests/tests.h +$(OUTPUT)/test_progs: $(PROG_TESTS_H) +$(OUTPUT)/test_progs: CFLAGS += -I$(OUTPUT) +$(OUTPUT)/test_progs: prog_tests/*.c + +PROG_TESTS_FILES := $(wildcard prog_tests/*.c) +$(PROG_TESTS_H): $(PROG_TESTS_FILES) + $(shell ( cd prog_tests/ + echo '/* Generated header, do not edit */'; \ + echo '#ifdef DECLARE'; \ + ls *.c 2> /dev/null | \ + sed -e 's@\([^\.]*\)\.c@extern void test_\1(void);@'; \ + echo '#endif'; \ + echo '#ifdef CALL'; \ + ls *.c 2> /dev/null | \ + sed -e 's@\([^\.]*\)\.c@test_\1();@'; \ + echo '#endif' \ + ) > $(PROG_TESTS_H)) + VERIFIER_TESTS_H := $(OUTPUT)/verifier/tests.h $(OUTPUT)/test_verifier: $(VERIFIER_TESTS_H) $(OUTPUT)/test_verifier: CFLAGS += -I$(OUTPUT) @@ -211,4 +234,4 @@ $(OUTPUT)/verifier/tests.h: $(VERIFIER_TEST_FILES) ) > $(VERIFIER_TESTS_H)) EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(ALU32_BUILD_DIR) \ - $(VERIFIER_TESTS_H) + $(VERIFIER_TESTS_H) $(PROG_TESTS_H) diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h index d9999f1ed1d2..c9433a496d54 100644 --- a/tools/testing/selftests/bpf/bpf_helpers.h +++ b/tools/testing/selftests/bpf/bpf_helpers.h @@ -180,6 +180,8 @@ static struct bpf_sock *(*bpf_sk_fullsock)(struct bpf_sock *sk) = (void *) BPF_FUNC_sk_fullsock; static struct bpf_tcp_sock *(*bpf_tcp_sock)(struct bpf_sock *sk) = (void *) BPF_FUNC_tcp_sock; +static int (*bpf_skb_ecn_set_ce)(void *ctx) = + (void *) BPF_FUNC_skb_ecn_set_ce; /* llvm builtin functions that eBPF C program may use to * emit BPF_LD_ABS and BPF_LD_IND instructions @@ -232,6 +234,36 @@ static int (*bpf_skb_change_head)(void *, int len, int flags) = (void *) BPF_FUNC_skb_change_head; static int (*bpf_skb_pull_data)(void *, int len) = (void *) BPF_FUNC_skb_pull_data; +static unsigned int (*bpf_get_cgroup_classid)(void *ctx) = + (void *) BPF_FUNC_get_cgroup_classid; +static unsigned int (*bpf_get_route_realm)(void *ctx) = + (void *) BPF_FUNC_get_route_realm; +static int (*bpf_skb_change_proto)(void *ctx, __be16 proto, __u64 flags) = + (void *) BPF_FUNC_skb_change_proto; +static int (*bpf_skb_change_type)(void *ctx, __u32 type) = + (void *) BPF_FUNC_skb_change_type; +static unsigned int (*bpf_get_hash_recalc)(void *ctx) = + (void *) BPF_FUNC_get_hash_recalc; +static unsigned long long (*bpf_get_current_task)(void *ctx) = + (void *) BPF_FUNC_get_current_task; +static int (*bpf_skb_change_tail)(void *ctx, __u32 len, __u64 flags) = + (void *) BPF_FUNC_skb_change_tail; +static long long (*bpf_csum_update)(void *ctx, __u32 csum) = + (void *) BPF_FUNC_csum_update; +static void (*bpf_set_hash_invalid)(void *ctx) = + (void *) BPF_FUNC_set_hash_invalid; +static int (*bpf_get_numa_node_id)(void) = + (void *) BPF_FUNC_get_numa_node_id; +static int (*bpf_probe_read_str)(void *ctx, __u32 size, + const void *unsafe_ptr) = + (void *) BPF_FUNC_probe_read_str; +static unsigned int (*bpf_get_socket_uid)(void *ctx) = + (void *) BPF_FUNC_get_socket_uid; +static unsigned int (*bpf_set_hash)(void *ctx, __u32 hash) = + (void *) BPF_FUNC_set_hash; +static int (*bpf_skb_adjust_room)(void *ctx, __s32 len_diff, __u32 mode, + unsigned long long flags) = + (void *) BPF_FUNC_skb_adjust_room; /* Scan the ARCH passed in from ARCH env variable (see Makefile) */ #if defined(__TARGET_ARCH_x86) diff --git a/tools/testing/selftests/bpf/prog_tests/.gitignore b/tools/testing/selftests/bpf/prog_tests/.gitignore new file mode 100644 index 000000000000..45984a364647 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/.gitignore @@ -0,0 +1 @@ +tests.h diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c b/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c new file mode 100644 index 000000000000..a64f7a02139c --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c @@ -0,0 +1,249 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +void test_bpf_obj_id(void) +{ + const __u64 array_magic_value = 0xfaceb00c; + const __u32 array_key = 0; + const int nr_iters = 2; + const char *file = "./test_obj_id.o"; + const char *expected_prog_name = "test_obj_id"; + const char *expected_map_name = "test_map_id"; + const __u64 nsec_per_sec = 1000000000; + + struct bpf_object *objs[nr_iters]; + int prog_fds[nr_iters], map_fds[nr_iters]; + /* +1 to test for the info_len returned by kernel */ + struct bpf_prog_info prog_infos[nr_iters + 1]; + struct bpf_map_info map_infos[nr_iters + 1]; + /* Each prog only uses one map. +1 to test nr_map_ids + * returned by kernel. + */ + __u32 map_ids[nr_iters + 1]; + char jited_insns[128], xlated_insns[128], zeros[128]; + __u32 i, next_id, info_len, nr_id_found, duration = 0; + struct timespec real_time_ts, boot_time_ts; + int err = 0; + __u64 array_value; + uid_t my_uid = getuid(); + time_t now, load_time; + + err = bpf_prog_get_fd_by_id(0); + CHECK(err >= 0 || errno != ENOENT, + "get-fd-by-notexist-prog-id", "err %d errno %d\n", err, errno); + + err = bpf_map_get_fd_by_id(0); + CHECK(err >= 0 || errno != ENOENT, + "get-fd-by-notexist-map-id", "err %d errno %d\n", err, errno); + + for (i = 0; i < nr_iters; i++) + objs[i] = NULL; + + /* Check bpf_obj_get_info_by_fd() */ + bzero(zeros, sizeof(zeros)); + for (i = 0; i < nr_iters; i++) { + now = time(NULL); + err = bpf_prog_load(file, BPF_PROG_TYPE_SOCKET_FILTER, + &objs[i], &prog_fds[i]); + /* test_obj_id.o is a dumb prog. It should never fail + * to load. + */ + if (err) + error_cnt++; + assert(!err); + + /* Insert a magic value to the map */ + map_fds[i] = bpf_find_map(__func__, objs[i], "test_map_id"); + assert(map_fds[i] >= 0); + err = bpf_map_update_elem(map_fds[i], &array_key, + &array_magic_value, 0); + assert(!err); + + /* Check getting map info */ + info_len = sizeof(struct bpf_map_info) * 2; + bzero(&map_infos[i], info_len); + err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i], + &info_len); + if (CHECK(err || + map_infos[i].type != BPF_MAP_TYPE_ARRAY || + map_infos[i].key_size != sizeof(__u32) || + map_infos[i].value_size != sizeof(__u64) || + map_infos[i].max_entries != 1 || + map_infos[i].map_flags != 0 || + info_len != sizeof(struct bpf_map_info) || + strcmp((char *)map_infos[i].name, expected_map_name), + "get-map-info(fd)", + "err %d errno %d type %d(%d) info_len %u(%Zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n", + err, errno, + map_infos[i].type, BPF_MAP_TYPE_ARRAY, + info_len, sizeof(struct bpf_map_info), + map_infos[i].key_size, + map_infos[i].value_size, + map_infos[i].max_entries, + map_infos[i].map_flags, + map_infos[i].name, expected_map_name)) + goto done; + + /* Check getting prog info */ + info_len = sizeof(struct bpf_prog_info) * 2; + bzero(&prog_infos[i], info_len); + bzero(jited_insns, sizeof(jited_insns)); + bzero(xlated_insns, sizeof(xlated_insns)); + prog_infos[i].jited_prog_insns = ptr_to_u64(jited_insns); + prog_infos[i].jited_prog_len = sizeof(jited_insns); + prog_infos[i].xlated_prog_insns = ptr_to_u64(xlated_insns); + prog_infos[i].xlated_prog_len = sizeof(xlated_insns); + prog_infos[i].map_ids = ptr_to_u64(map_ids + i); + prog_infos[i].nr_map_ids = 2; + err = clock_gettime(CLOCK_REALTIME, &real_time_ts); + assert(!err); + err = clock_gettime(CLOCK_BOOTTIME, &boot_time_ts); + assert(!err); + err = bpf_obj_get_info_by_fd(prog_fds[i], &prog_infos[i], + &info_len); + load_time = (real_time_ts.tv_sec - boot_time_ts.tv_sec) + + (prog_infos[i].load_time / nsec_per_sec); + if (CHECK(err || + prog_infos[i].type != BPF_PROG_TYPE_SOCKET_FILTER || + info_len != sizeof(struct bpf_prog_info) || + (jit_enabled && !prog_infos[i].jited_prog_len) || + (jit_enabled && + !memcmp(jited_insns, zeros, sizeof(zeros))) || + !prog_infos[i].xlated_prog_len || + !memcmp(xlated_insns, zeros, sizeof(zeros)) || + load_time < now - 60 || load_time > now + 60 || + prog_infos[i].created_by_uid != my_uid || + prog_infos[i].nr_map_ids != 1 || + *(int *)(long)prog_infos[i].map_ids != map_infos[i].id || + strcmp((char *)prog_infos[i].name, expected_prog_name), + "get-prog-info(fd)", + "err %d errno %d i %d type %d(%d) info_len %u(%Zu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n", + err, errno, i, + prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER, + info_len, sizeof(struct bpf_prog_info), + jit_enabled, + prog_infos[i].jited_prog_len, + prog_infos[i].xlated_prog_len, + !!memcmp(jited_insns, zeros, sizeof(zeros)), + !!memcmp(xlated_insns, zeros, sizeof(zeros)), + load_time, now, + prog_infos[i].created_by_uid, my_uid, + prog_infos[i].nr_map_ids, 1, + *(int *)(long)prog_infos[i].map_ids, map_infos[i].id, + prog_infos[i].name, expected_prog_name)) + goto done; + } + + /* Check bpf_prog_get_next_id() */ + nr_id_found = 0; + next_id = 0; + while (!bpf_prog_get_next_id(next_id, &next_id)) { + struct bpf_prog_info prog_info = {}; + __u32 saved_map_id; + int prog_fd; + + info_len = sizeof(prog_info); + + prog_fd = bpf_prog_get_fd_by_id(next_id); + if (prog_fd < 0 && errno == ENOENT) + /* The bpf_prog is in the dead row */ + continue; + if (CHECK(prog_fd < 0, "get-prog-fd(next_id)", + "prog_fd %d next_id %d errno %d\n", + prog_fd, next_id, errno)) + break; + + for (i = 0; i < nr_iters; i++) + if (prog_infos[i].id == next_id) + break; + + if (i == nr_iters) + continue; + + nr_id_found++; + + /* Negative test: + * prog_info.nr_map_ids = 1 + * prog_info.map_ids = NULL + */ + prog_info.nr_map_ids = 1; + err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len); + if (CHECK(!err || errno != EFAULT, + "get-prog-fd-bad-nr-map-ids", "err %d errno %d(%d)", + err, errno, EFAULT)) + break; + bzero(&prog_info, sizeof(prog_info)); + info_len = sizeof(prog_info); + + saved_map_id = *(int *)((long)prog_infos[i].map_ids); + prog_info.map_ids = prog_infos[i].map_ids; + prog_info.nr_map_ids = 2; + err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len); + prog_infos[i].jited_prog_insns = 0; + prog_infos[i].xlated_prog_insns = 0; + CHECK(err || info_len != sizeof(struct bpf_prog_info) || + memcmp(&prog_info, &prog_infos[i], info_len) || + *(int *)(long)prog_info.map_ids != saved_map_id, + "get-prog-info(next_id->fd)", + "err %d errno %d info_len %u(%Zu) memcmp %d map_id %u(%u)\n", + err, errno, info_len, sizeof(struct bpf_prog_info), + memcmp(&prog_info, &prog_infos[i], info_len), + *(int *)(long)prog_info.map_ids, saved_map_id); + close(prog_fd); + } + CHECK(nr_id_found != nr_iters, + "check total prog id found by get_next_id", + "nr_id_found %u(%u)\n", + nr_id_found, nr_iters); + + /* Check bpf_map_get_next_id() */ + nr_id_found = 0; + next_id = 0; + while (!bpf_map_get_next_id(next_id, &next_id)) { + struct bpf_map_info map_info = {}; + int map_fd; + + info_len = sizeof(map_info); + + map_fd = bpf_map_get_fd_by_id(next_id); + if (map_fd < 0 && errno == ENOENT) + /* The bpf_map is in the dead row */ + continue; + if (CHECK(map_fd < 0, "get-map-fd(next_id)", + "map_fd %d next_id %u errno %d\n", + map_fd, next_id, errno)) + break; + + for (i = 0; i < nr_iters; i++) + if (map_infos[i].id == next_id) + break; + + if (i == nr_iters) + continue; + + nr_id_found++; + + err = bpf_map_lookup_elem(map_fd, &array_key, &array_value); + assert(!err); + + err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len); + CHECK(err || info_len != sizeof(struct bpf_map_info) || + memcmp(&map_info, &map_infos[i], info_len) || + array_value != array_magic_value, + "check get-map-info(next_id->fd)", + "err %d errno %d info_len %u(%Zu) memcmp %d array_value %llu(%llu)\n", + err, errno, info_len, sizeof(struct bpf_map_info), + memcmp(&map_info, &map_infos[i], info_len), + array_value, array_magic_value); + + close(map_fd); + } + CHECK(nr_id_found != nr_iters, + "check total map id found by get_next_id", + "nr_id_found %u(%u)\n", + nr_id_found, nr_iters); + +done: + for (i = 0; i < nr_iters; i++) + bpf_object__close(objs[i]); +} diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c new file mode 100644 index 000000000000..bcbd928c96ab --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +#define CHECK_FLOW_KEYS(desc, got, expected) \ + CHECK(memcmp(&got, &expected, sizeof(got)) != 0, \ + desc, \ + "nhoff=%u/%u " \ + "thoff=%u/%u " \ + "addr_proto=0x%x/0x%x " \ + "is_frag=%u/%u " \ + "is_first_frag=%u/%u " \ + "is_encap=%u/%u " \ + "n_proto=0x%x/0x%x " \ + "sport=%u/%u " \ + "dport=%u/%u\n", \ + got.nhoff, expected.nhoff, \ + got.thoff, expected.thoff, \ + got.addr_proto, expected.addr_proto, \ + got.is_frag, expected.is_frag, \ + got.is_first_frag, expected.is_first_frag, \ + got.is_encap, expected.is_encap, \ + got.n_proto, expected.n_proto, \ + got.sport, expected.sport, \ + got.dport, expected.dport) + +static struct bpf_flow_keys pkt_v4_flow_keys = { + .nhoff = 0, + .thoff = sizeof(struct iphdr), + .addr_proto = ETH_P_IP, + .ip_proto = IPPROTO_TCP, + .n_proto = __bpf_constant_htons(ETH_P_IP), +}; + +static struct bpf_flow_keys pkt_v6_flow_keys = { + .nhoff = 0, + .thoff = sizeof(struct ipv6hdr), + .addr_proto = ETH_P_IPV6, + .ip_proto = IPPROTO_TCP, + .n_proto = __bpf_constant_htons(ETH_P_IPV6), +}; + +void test_flow_dissector(void) +{ + struct bpf_flow_keys flow_keys; + struct bpf_object *obj; + __u32 duration, retval; + int err, prog_fd; + __u32 size; + + err = bpf_flow_load(&obj, "./bpf_flow.o", "flow_dissector", + "jmp_table", &prog_fd); + if (err) { + error_cnt++; + return; + } + + err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4), + &flow_keys, &size, &retval, &duration); + CHECK(size != sizeof(flow_keys) || err || retval != 1, "ipv4", + "err %d errno %d retval %d duration %d size %u/%lu\n", + err, errno, retval, duration, size, sizeof(flow_keys)); + CHECK_FLOW_KEYS("ipv4_flow_keys", flow_keys, pkt_v4_flow_keys); + + err = bpf_prog_test_run(prog_fd, 10, &pkt_v6, sizeof(pkt_v6), + &flow_keys, &size, &retval, &duration); + CHECK(size != sizeof(flow_keys) || err || retval != 1, "ipv6", + "err %d errno %d retval %d duration %d size %u/%lu\n", + err, errno, retval, duration, size, sizeof(flow_keys)); + CHECK_FLOW_KEYS("ipv6_flow_keys", flow_keys, pkt_v6_flow_keys); + + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c new file mode 100644 index 000000000000..d7bb5beb1c57 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c @@ -0,0 +1,139 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +#define MAX_CNT_RAWTP 10ull +#define MAX_STACK_RAWTP 100 +struct get_stack_trace_t { + int pid; + int kern_stack_size; + int user_stack_size; + int user_stack_buildid_size; + __u64 kern_stack[MAX_STACK_RAWTP]; + __u64 user_stack[MAX_STACK_RAWTP]; + struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP]; +}; + +static int get_stack_print_output(void *data, int size) +{ + bool good_kern_stack = false, good_user_stack = false; + const char *nonjit_func = "___bpf_prog_run"; + struct get_stack_trace_t *e = data; + int i, num_stack; + static __u64 cnt; + struct ksym *ks; + + cnt++; + + if (size < sizeof(struct get_stack_trace_t)) { + __u64 *raw_data = data; + bool found = false; + + num_stack = size / sizeof(__u64); + /* If jit is enabled, we do not have a good way to + * verify the sanity of the kernel stack. So we + * just assume it is good if the stack is not empty. + * This could be improved in the future. + */ + if (jit_enabled) { + found = num_stack > 0; + } else { + for (i = 0; i < num_stack; i++) { + ks = ksym_search(raw_data[i]); + if (strcmp(ks->name, nonjit_func) == 0) { + found = true; + break; + } + } + } + if (found) { + good_kern_stack = true; + good_user_stack = true; + } + } else { + num_stack = e->kern_stack_size / sizeof(__u64); + if (jit_enabled) { + good_kern_stack = num_stack > 0; + } else { + for (i = 0; i < num_stack; i++) { + ks = ksym_search(e->kern_stack[i]); + if (strcmp(ks->name, nonjit_func) == 0) { + good_kern_stack = true; + break; + } + } + } + if (e->user_stack_size > 0 && e->user_stack_buildid_size > 0) + good_user_stack = true; + } + if (!good_kern_stack || !good_user_stack) + return LIBBPF_PERF_EVENT_ERROR; + + if (cnt == MAX_CNT_RAWTP) + return LIBBPF_PERF_EVENT_DONE; + + return LIBBPF_PERF_EVENT_CONT; +} + +void test_get_stack_raw_tp(void) +{ + const char *file = "./test_get_stack_rawtp.o"; + int i, efd, err, prog_fd, pmu_fd, perfmap_fd; + struct perf_event_attr attr = {}; + struct timespec tv = {0, 10}; + __u32 key = 0, duration = 0; + struct bpf_object *obj; + + err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd); + if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno)) + return; + + efd = bpf_raw_tracepoint_open("sys_enter", prog_fd); + if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno)) + goto close_prog; + + perfmap_fd = bpf_find_map(__func__, obj, "perfmap"); + if (CHECK(perfmap_fd < 0, "bpf_find_map", "err %d errno %d\n", + perfmap_fd, errno)) + goto close_prog; + + err = load_kallsyms(); + if (CHECK(err < 0, "load_kallsyms", "err %d errno %d\n", err, errno)) + goto close_prog; + + attr.sample_type = PERF_SAMPLE_RAW; + attr.type = PERF_TYPE_SOFTWARE; + attr.config = PERF_COUNT_SW_BPF_OUTPUT; + pmu_fd = syscall(__NR_perf_event_open, &attr, getpid()/*pid*/, -1/*cpu*/, + -1/*group_fd*/, 0); + if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", pmu_fd, + errno)) + goto close_prog; + + err = bpf_map_update_elem(perfmap_fd, &key, &pmu_fd, BPF_ANY); + if (CHECK(err < 0, "bpf_map_update_elem", "err %d errno %d\n", err, + errno)) + goto close_prog; + + err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0); + if (CHECK(err < 0, "ioctl PERF_EVENT_IOC_ENABLE", "err %d errno %d\n", + err, errno)) + goto close_prog; + + err = perf_event_mmap(pmu_fd); + if (CHECK(err < 0, "perf_event_mmap", "err %d errno %d\n", err, errno)) + goto close_prog; + + /* trigger some syscall action */ + for (i = 0; i < MAX_CNT_RAWTP; i++) + nanosleep(&tv, NULL); + + err = perf_event_poller(pmu_fd, get_stack_print_output); + if (CHECK(err < 0, "perf_event_poller", "err %d errno %d\n", err, errno)) + goto close_prog; + + goto close_prog_noerr; +close_prog: + error_cnt++; +close_prog_noerr: + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/l4lb_all.c b/tools/testing/selftests/bpf/prog_tests/l4lb_all.c new file mode 100644 index 000000000000..20ddca830e68 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/l4lb_all.c @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +static void test_l4lb(const char *file) +{ + unsigned int nr_cpus = bpf_num_possible_cpus(); + struct vip key = {.protocol = 6}; + struct vip_meta { + __u32 flags; + __u32 vip_num; + } value = {.vip_num = VIP_NUM}; + __u32 stats_key = VIP_NUM; + struct vip_stats { + __u64 bytes; + __u64 pkts; + } stats[nr_cpus]; + struct real_definition { + union { + __be32 dst; + __be32 dstv6[4]; + }; + __u8 flags; + } real_def = {.dst = MAGIC_VAL}; + __u32 ch_key = 11, real_num = 3; + __u32 duration, retval, size; + int err, i, prog_fd, map_fd; + __u64 bytes = 0, pkts = 0; + struct bpf_object *obj; + char buf[128]; + u32 *magic = (u32 *)buf; + + err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); + if (err) { + error_cnt++; + return; + } + + map_fd = bpf_find_map(__func__, obj, "vip_map"); + if (map_fd < 0) + goto out; + bpf_map_update_elem(map_fd, &key, &value, 0); + + map_fd = bpf_find_map(__func__, obj, "ch_rings"); + if (map_fd < 0) + goto out; + bpf_map_update_elem(map_fd, &ch_key, &real_num, 0); + + map_fd = bpf_find_map(__func__, obj, "reals"); + if (map_fd < 0) + goto out; + bpf_map_update_elem(map_fd, &real_num, &real_def, 0); + + err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4), + buf, &size, &retval, &duration); + CHECK(err || retval != 7/*TC_ACT_REDIRECT*/ || size != 54 || + *magic != MAGIC_VAL, "ipv4", + "err %d errno %d retval %d size %d magic %x\n", + err, errno, retval, size, *magic); + + err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6), + buf, &size, &retval, &duration); + CHECK(err || retval != 7/*TC_ACT_REDIRECT*/ || size != 74 || + *magic != MAGIC_VAL, "ipv6", + "err %d errno %d retval %d size %d magic %x\n", + err, errno, retval, size, *magic); + + map_fd = bpf_find_map(__func__, obj, "stats"); + if (map_fd < 0) + goto out; + bpf_map_lookup_elem(map_fd, &stats_key, stats); + for (i = 0; i < nr_cpus; i++) { + bytes += stats[i].bytes; + pkts += stats[i].pkts; + } + if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) { + error_cnt++; + printf("test_l4lb:FAIL:stats %lld %lld\n", bytes, pkts); + } +out: + bpf_object__close(obj); +} + +void test_l4lb_all(void) +{ + const char *file1 = "./test_l4lb.o"; + const char *file2 = "./test_l4lb_noinline.o"; + + test_l4lb(file1); + test_l4lb(file2); +} diff --git a/tools/testing/selftests/bpf/prog_tests/map_lock.c b/tools/testing/selftests/bpf/prog_tests/map_lock.c new file mode 100644 index 000000000000..90f8a206340a --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/map_lock.c @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +static void *parallel_map_access(void *arg) +{ + int err, map_fd = *(u32 *) arg; + int vars[17], i, j, rnd, key = 0; + + for (i = 0; i < 10000; i++) { + err = bpf_map_lookup_elem_flags(map_fd, &key, vars, BPF_F_LOCK); + if (err) { + printf("lookup failed\n"); + error_cnt++; + goto out; + } + if (vars[0] != 0) { + printf("lookup #%d var[0]=%d\n", i, vars[0]); + error_cnt++; + goto out; + } + rnd = vars[1]; + for (j = 2; j < 17; j++) { + if (vars[j] == rnd) + continue; + printf("lookup #%d var[1]=%d var[%d]=%d\n", + i, rnd, j, vars[j]); + error_cnt++; + goto out; + } + } +out: + pthread_exit(arg); +} + +void test_map_lock(void) +{ + const char *file = "./test_map_lock.o"; + int prog_fd, map_fd[2], vars[17] = {}; + pthread_t thread_id[6]; + struct bpf_object *obj; + int err = 0, key = 0, i; + void *ret; + + err = bpf_prog_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd); + if (err) { + printf("test_map_lock:bpf_prog_load errno %d\n", errno); + goto close_prog; + } + map_fd[0] = bpf_find_map(__func__, obj, "hash_map"); + if (map_fd[0] < 0) + goto close_prog; + map_fd[1] = bpf_find_map(__func__, obj, "array_map"); + if (map_fd[1] < 0) + goto close_prog; + + bpf_map_update_elem(map_fd[0], &key, vars, BPF_F_LOCK); + + for (i = 0; i < 4; i++) + assert(pthread_create(&thread_id[i], NULL, + &spin_lock_thread, &prog_fd) == 0); + for (i = 4; i < 6; i++) + assert(pthread_create(&thread_id[i], NULL, + ¶llel_map_access, &map_fd[i - 4]) == 0); + for (i = 0; i < 4; i++) + assert(pthread_join(thread_id[i], &ret) == 0 && + ret == (void *)&prog_fd); + for (i = 4; i < 6; i++) + assert(pthread_join(thread_id[i], &ret) == 0 && + ret == (void *)&map_fd[i - 4]); + goto close_prog_noerr; +close_prog: + error_cnt++; +close_prog_noerr: + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/obj_name.c b/tools/testing/selftests/bpf/prog_tests/obj_name.c new file mode 100644 index 000000000000..e178416bddad --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/obj_name.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +void test_obj_name(void) +{ + struct { + const char *name; + int success; + int expected_errno; + } tests[] = { + { "", 1, 0 }, + { "_123456789ABCDE", 1, 0 }, + { "_123456789ABCDEF", 0, EINVAL }, + { "_123456789ABCD\n", 0, EINVAL }, + }; + struct bpf_insn prog[] = { + BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + __u32 duration = 0; + int i; + + for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) { + size_t name_len = strlen(tests[i].name) + 1; + union bpf_attr attr; + size_t ncopy; + int fd; + + /* test different attr.prog_name during BPF_PROG_LOAD */ + ncopy = name_len < sizeof(attr.prog_name) ? + name_len : sizeof(attr.prog_name); + bzero(&attr, sizeof(attr)); + attr.prog_type = BPF_PROG_TYPE_SCHED_CLS; + attr.insn_cnt = 2; + attr.insns = ptr_to_u64(prog); + attr.license = ptr_to_u64(""); + memcpy(attr.prog_name, tests[i].name, ncopy); + + fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr)); + CHECK((tests[i].success && fd < 0) || + (!tests[i].success && fd != -1) || + (!tests[i].success && errno != tests[i].expected_errno), + "check-bpf-prog-name", + "fd %d(%d) errno %d(%d)\n", + fd, tests[i].success, errno, tests[i].expected_errno); + + if (fd != -1) + close(fd); + + /* test different attr.map_name during BPF_MAP_CREATE */ + ncopy = name_len < sizeof(attr.map_name) ? + name_len : sizeof(attr.map_name); + bzero(&attr, sizeof(attr)); + attr.map_type = BPF_MAP_TYPE_ARRAY; + attr.key_size = 4; + attr.value_size = 4; + attr.max_entries = 1; + attr.map_flags = 0; + memcpy(attr.map_name, tests[i].name, ncopy); + fd = syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr)); + CHECK((tests[i].success && fd < 0) || + (!tests[i].success && fd != -1) || + (!tests[i].success && errno != tests[i].expected_errno), + "check-bpf-map-name", + "fd %d(%d) errno %d(%d)\n", + fd, tests[i].success, errno, tests[i].expected_errno); + + if (fd != -1) + close(fd); + } +} diff --git a/tools/testing/selftests/bpf/prog_tests/pkt_access.c b/tools/testing/selftests/bpf/prog_tests/pkt_access.c new file mode 100644 index 000000000000..4ecfd721a044 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/pkt_access.c @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +void test_pkt_access(void) +{ + const char *file = "./test_pkt_access.o"; + struct bpf_object *obj; + __u32 duration, retval; + int err, prog_fd; + + err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); + if (err) { + error_cnt++; + return; + } + + err = bpf_prog_test_run(prog_fd, 100000, &pkt_v4, sizeof(pkt_v4), + NULL, NULL, &retval, &duration); + CHECK(err || retval, "ipv4", + "err %d errno %d retval %d duration %d\n", + err, errno, retval, duration); + + err = bpf_prog_test_run(prog_fd, 100000, &pkt_v6, sizeof(pkt_v6), + NULL, NULL, &retval, &duration); + CHECK(err || retval, "ipv6", + "err %d errno %d retval %d duration %d\n", + err, errno, retval, duration); + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c b/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c new file mode 100644 index 000000000000..ac0d43435806 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +void test_pkt_md_access(void) +{ + const char *file = "./test_pkt_md_access.o"; + struct bpf_object *obj; + __u32 duration, retval; + int err, prog_fd; + + err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); + if (err) { + error_cnt++; + return; + } + + err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4), + NULL, NULL, &retval, &duration); + CHECK(err || retval, "", + "err %d errno %d retval %d duration %d\n", + err, errno, retval, duration); + + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c b/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c new file mode 100644 index 000000000000..5dd89b941f53 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +void test_prog_run_xattr(void) +{ + const char *file = "./test_pkt_access.o"; + struct bpf_object *obj; + char buf[10]; + int err; + struct bpf_prog_test_run_attr tattr = { + .repeat = 1, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .data_out = buf, + .data_size_out = 5, + }; + + err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, + &tattr.prog_fd); + if (CHECK_ATTR(err, "load", "err %d errno %d\n", err, errno)) + return; + + memset(buf, 0, sizeof(buf)); + + err = bpf_prog_test_run_xattr(&tattr); + CHECK_ATTR(err != -1 || errno != ENOSPC || tattr.retval, "run", + "err %d errno %d retval %d\n", err, errno, tattr.retval); + + CHECK_ATTR(tattr.data_size_out != sizeof(pkt_v4), "data_size_out", + "incorrect output size, want %lu have %u\n", + sizeof(pkt_v4), tattr.data_size_out); + + CHECK_ATTR(buf[5] != 0, "overflow", + "BPF_PROG_TEST_RUN ignored size hint\n"); + + tattr.data_out = NULL; + tattr.data_size_out = 0; + errno = 0; + + err = bpf_prog_test_run_xattr(&tattr); + CHECK_ATTR(err || errno || tattr.retval, "run_no_output", + "err %d errno %d retval %d\n", err, errno, tattr.retval); + + tattr.data_size_out = 1; + err = bpf_prog_test_run_xattr(&tattr); + CHECK_ATTR(err != -EINVAL, "run_wrong_size_out", "err %d\n", err); + + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c b/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c new file mode 100644 index 000000000000..e60cd5ff1f55 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +enum { + QUEUE, + STACK, +}; + +static void test_queue_stack_map_by_type(int type) +{ + const int MAP_SIZE = 32; + __u32 vals[MAP_SIZE], duration, retval, size, val; + int i, err, prog_fd, map_in_fd, map_out_fd; + char file[32], buf[128]; + struct bpf_object *obj; + struct iphdr *iph = (void *)buf + sizeof(struct ethhdr); + + /* Fill test values to be used */ + for (i = 0; i < MAP_SIZE; i++) + vals[i] = rand(); + + if (type == QUEUE) + strncpy(file, "./test_queue_map.o", sizeof(file)); + else if (type == STACK) + strncpy(file, "./test_stack_map.o", sizeof(file)); + else + return; + + err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); + if (err) { + error_cnt++; + return; + } + + map_in_fd = bpf_find_map(__func__, obj, "map_in"); + if (map_in_fd < 0) + goto out; + + map_out_fd = bpf_find_map(__func__, obj, "map_out"); + if (map_out_fd < 0) + goto out; + + /* Push 32 elements to the input map */ + for (i = 0; i < MAP_SIZE; i++) { + err = bpf_map_update_elem(map_in_fd, NULL, &vals[i], 0); + if (err) { + error_cnt++; + goto out; + } + } + + /* The eBPF program pushes iph.saddr in the output map, + * pops the input map and saves this value in iph.daddr + */ + for (i = 0; i < MAP_SIZE; i++) { + if (type == QUEUE) { + val = vals[i]; + pkt_v4.iph.saddr = vals[i] * 5; + } else if (type == STACK) { + val = vals[MAP_SIZE - 1 - i]; + pkt_v4.iph.saddr = vals[MAP_SIZE - 1 - i] * 5; + } + + err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), + buf, &size, &retval, &duration); + if (err || retval || size != sizeof(pkt_v4) || + iph->daddr != val) + break; + } + + CHECK(err || retval || size != sizeof(pkt_v4) || iph->daddr != val, + "bpf_map_pop_elem", + "err %d errno %d retval %d size %d iph->daddr %u\n", + err, errno, retval, size, iph->daddr); + + /* Queue is empty, program should return TC_ACT_SHOT */ + err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), + buf, &size, &retval, &duration); + CHECK(err || retval != 2 /* TC_ACT_SHOT */|| size != sizeof(pkt_v4), + "check-queue-stack-map-empty", + "err %d errno %d retval %d size %d\n", + err, errno, retval, size); + + /* Check that the program pushed elements correctly */ + for (i = 0; i < MAP_SIZE; i++) { + err = bpf_map_lookup_and_delete_elem(map_out_fd, NULL, &val); + if (err || val != vals[i] * 5) + break; + } + + CHECK(i != MAP_SIZE && (err || val != vals[i] * 5), + "bpf_map_push_elem", "err %d value %u\n", err, val); + +out: + pkt_v4.iph.saddr = 0; + bpf_object__close(obj); +} + +void test_queue_stack_map(void) +{ + test_queue_stack_map_by_type(QUEUE); + test_queue_stack_map_by_type(STACK); +} diff --git a/tools/testing/selftests/bpf/prog_tests/reference_tracking.c b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c new file mode 100644 index 000000000000..5633be43828f --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +static int libbpf_debug_print(enum libbpf_print_level level, + const char *format, va_list args) +{ + if (level == LIBBPF_DEBUG) + return 0; + + return vfprintf(stderr, format, args); +} + +void test_reference_tracking(void) +{ + const char *file = "./test_sk_lookup_kern.o"; + struct bpf_object *obj; + struct bpf_program *prog; + __u32 duration = 0; + int err = 0; + + obj = bpf_object__open(file); + if (IS_ERR(obj)) { + error_cnt++; + return; + } + + bpf_object__for_each_program(prog, obj) { + const char *title; + + /* Ignore .text sections */ + title = bpf_program__title(prog, false); + if (strstr(title, ".text") != NULL) + continue; + + bpf_program__set_type(prog, BPF_PROG_TYPE_SCHED_CLS); + + /* Expect verifier failure if test name has 'fail' */ + if (strstr(title, "fail") != NULL) { + libbpf_set_print(NULL); + err = !bpf_program__load(prog, "GPL", 0); + libbpf_set_print(libbpf_debug_print); + } else { + err = bpf_program__load(prog, "GPL", 0); + } + CHECK(err, title, "\n"); + } + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/signal_pending.c b/tools/testing/selftests/bpf/prog_tests/signal_pending.c new file mode 100644 index 000000000000..f2a37bbf91ab --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/signal_pending.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +static void sigalrm_handler(int s) {} +static struct sigaction sigalrm_action = { + .sa_handler = sigalrm_handler, +}; + +static void test_signal_pending_by_type(enum bpf_prog_type prog_type) +{ + struct bpf_insn prog[4096]; + struct itimerval timeo = { + .it_value.tv_usec = 100000, /* 100ms */ + }; + __u32 duration, retval; + int prog_fd; + int err; + int i; + + for (i = 0; i < ARRAY_SIZE(prog); i++) + prog[i] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0); + prog[ARRAY_SIZE(prog) - 1] = BPF_EXIT_INSN(); + + prog_fd = bpf_load_program(prog_type, prog, ARRAY_SIZE(prog), + "GPL", 0, NULL, 0); + CHECK(prog_fd < 0, "test-run", "errno %d\n", errno); + + err = sigaction(SIGALRM, &sigalrm_action, NULL); + CHECK(err, "test-run-signal-sigaction", "errno %d\n", errno); + + err = setitimer(ITIMER_REAL, &timeo, NULL); + CHECK(err, "test-run-signal-timer", "errno %d\n", errno); + + err = bpf_prog_test_run(prog_fd, 0xffffffff, &pkt_v4, sizeof(pkt_v4), + NULL, NULL, &retval, &duration); + CHECK(duration > 500000000, /* 500ms */ + "test-run-signal-duration", + "duration %dns > 500ms\n", + duration); + + signal(SIGALRM, SIG_DFL); +} + +void test_signal_pending(enum bpf_prog_type prog_type) +{ + test_signal_pending_by_type(BPF_PROG_TYPE_SOCKET_FILTER); + test_signal_pending_by_type(BPF_PROG_TYPE_FLOW_DISSECTOR); +} diff --git a/tools/testing/selftests/bpf/prog_tests/spinlock.c b/tools/testing/selftests/bpf/prog_tests/spinlock.c new file mode 100644 index 000000000000..9a573a9675d7 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/spinlock.c @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +void test_spinlock(void) +{ + const char *file = "./test_spin_lock.o"; + pthread_t thread_id[4]; + struct bpf_object *obj; + int prog_fd; + int err = 0, i; + void *ret; + + err = bpf_prog_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd); + if (err) { + printf("test_spin_lock:bpf_prog_load errno %d\n", errno); + goto close_prog; + } + for (i = 0; i < 4; i++) + assert(pthread_create(&thread_id[i], NULL, + &spin_lock_thread, &prog_fd) == 0); + for (i = 0; i < 4; i++) + assert(pthread_join(thread_id[i], &ret) == 0 && + ret == (void *)&prog_fd); + goto close_prog_noerr; +close_prog: + error_cnt++; +close_prog_noerr: + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c new file mode 100644 index 000000000000..3aab2b083c71 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c @@ -0,0 +1,165 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +void test_stacktrace_build_id(void) +{ + int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd; + const char *file = "./test_stacktrace_build_id.o"; + int bytes, efd, err, pmu_fd, prog_fd, stack_trace_len; + struct perf_event_attr attr = {}; + __u32 key, previous_key, val, duration = 0; + struct bpf_object *obj; + char buf[256]; + int i, j; + struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; + int build_id_matches = 0; + int retry = 1; + +retry: + err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); + if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) + goto out; + + /* Get the ID for the sched/sched_switch tracepoint */ + snprintf(buf, sizeof(buf), + "/sys/kernel/debug/tracing/events/random/urandom_read/id"); + efd = open(buf, O_RDONLY, 0); + if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno)) + goto close_prog; + + bytes = read(efd, buf, sizeof(buf)); + close(efd); + if (CHECK(bytes <= 0 || bytes >= sizeof(buf), + "read", "bytes %d errno %d\n", bytes, errno)) + goto close_prog; + + /* Open the perf event and attach bpf progrram */ + attr.config = strtol(buf, NULL, 0); + attr.type = PERF_TYPE_TRACEPOINT; + attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN; + attr.sample_period = 1; + attr.wakeup_events = 1; + pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, + 0 /* cpu 0 */, -1 /* group id */, + 0 /* flags */); + if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", + pmu_fd, errno)) + goto close_prog; + + err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0); + if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", + err, errno)) + goto close_pmu; + + err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd); + if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", + err, errno)) + goto disable_pmu; + + /* find map fds */ + control_map_fd = bpf_find_map(__func__, obj, "control_map"); + if (CHECK(control_map_fd < 0, "bpf_find_map control_map", + "err %d errno %d\n", err, errno)) + goto disable_pmu; + + stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap"); + if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap", + "err %d errno %d\n", err, errno)) + goto disable_pmu; + + stackmap_fd = bpf_find_map(__func__, obj, "stackmap"); + if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n", + err, errno)) + goto disable_pmu; + + stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap"); + if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap", + "err %d errno %d\n", err, errno)) + goto disable_pmu; + + assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null") + == 0); + assert(system("./urandom_read") == 0); + /* disable stack trace collection */ + key = 0; + val = 1; + bpf_map_update_elem(control_map_fd, &key, &val, 0); + + /* for every element in stackid_hmap, we can find a corresponding one + * in stackmap, and vise versa. + */ + err = compare_map_keys(stackid_hmap_fd, stackmap_fd); + if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap", + "err %d errno %d\n", err, errno)) + goto disable_pmu; + + err = compare_map_keys(stackmap_fd, stackid_hmap_fd); + if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap", + "err %d errno %d\n", err, errno)) + goto disable_pmu; + + err = extract_build_id(buf, 256); + + if (CHECK(err, "get build_id with readelf", + "err %d errno %d\n", err, errno)) + goto disable_pmu; + + err = bpf_map_get_next_key(stackmap_fd, NULL, &key); + if (CHECK(err, "get_next_key from stackmap", + "err %d, errno %d\n", err, errno)) + goto disable_pmu; + + do { + char build_id[64]; + + err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs); + if (CHECK(err, "lookup_elem from stackmap", + "err %d, errno %d\n", err, errno)) + goto disable_pmu; + for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i) + if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID && + id_offs[i].offset != 0) { + for (j = 0; j < 20; ++j) + sprintf(build_id + 2 * j, "%02x", + id_offs[i].build_id[j] & 0xff); + if (strstr(buf, build_id) != NULL) + build_id_matches = 1; + } + previous_key = key; + } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); + + /* stack_map_get_build_id_offset() is racy and sometimes can return + * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID; + * try it one more time. + */ + if (build_id_matches < 1 && retry--) { + ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); + close(pmu_fd); + bpf_object__close(obj); + printf("%s:WARN:Didn't find expected build ID from the map, retrying\n", + __func__); + goto retry; + } + + if (CHECK(build_id_matches < 1, "build id match", + "Didn't find expected build ID from the map\n")) + goto disable_pmu; + + stack_trace_len = PERF_MAX_STACK_DEPTH + * sizeof(struct bpf_stack_build_id); + err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len); + CHECK(err, "compare_stack_ips stackmap vs. stack_amap", + "err %d errno %d\n", err, errno); + +disable_pmu: + ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); + +close_pmu: + close(pmu_fd); + +close_prog: + bpf_object__close(obj); + +out: + return; +} diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c new file mode 100644 index 000000000000..8a114bb1c379 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c @@ -0,0 +1,150 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +void test_stacktrace_build_id_nmi(void) +{ + int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd; + const char *file = "./test_stacktrace_build_id.o"; + int err, pmu_fd, prog_fd; + struct perf_event_attr attr = { + .sample_freq = 5000, + .freq = 1, + .type = PERF_TYPE_HARDWARE, + .config = PERF_COUNT_HW_CPU_CYCLES, + }; + __u32 key, previous_key, val, duration = 0; + struct bpf_object *obj; + char buf[256]; + int i, j; + struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; + int build_id_matches = 0; + int retry = 1; + +retry: + err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd); + if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) + return; + + pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, + 0 /* cpu 0 */, -1 /* group id */, + 0 /* flags */); + if (CHECK(pmu_fd < 0, "perf_event_open", + "err %d errno %d. Does the test host support PERF_COUNT_HW_CPU_CYCLES?\n", + pmu_fd, errno)) + goto close_prog; + + err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0); + if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", + err, errno)) + goto close_pmu; + + err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd); + if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", + err, errno)) + goto disable_pmu; + + /* find map fds */ + control_map_fd = bpf_find_map(__func__, obj, "control_map"); + if (CHECK(control_map_fd < 0, "bpf_find_map control_map", + "err %d errno %d\n", err, errno)) + goto disable_pmu; + + stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap"); + if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap", + "err %d errno %d\n", err, errno)) + goto disable_pmu; + + stackmap_fd = bpf_find_map(__func__, obj, "stackmap"); + if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n", + err, errno)) + goto disable_pmu; + + stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap"); + if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap", + "err %d errno %d\n", err, errno)) + goto disable_pmu; + + assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null") + == 0); + assert(system("taskset 0x1 ./urandom_read 100000") == 0); + /* disable stack trace collection */ + key = 0; + val = 1; + bpf_map_update_elem(control_map_fd, &key, &val, 0); + + /* for every element in stackid_hmap, we can find a corresponding one + * in stackmap, and vise versa. + */ + err = compare_map_keys(stackid_hmap_fd, stackmap_fd); + if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap", + "err %d errno %d\n", err, errno)) + goto disable_pmu; + + err = compare_map_keys(stackmap_fd, stackid_hmap_fd); + if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap", + "err %d errno %d\n", err, errno)) + goto disable_pmu; + + err = extract_build_id(buf, 256); + + if (CHECK(err, "get build_id with readelf", + "err %d errno %d\n", err, errno)) + goto disable_pmu; + + err = bpf_map_get_next_key(stackmap_fd, NULL, &key); + if (CHECK(err, "get_next_key from stackmap", + "err %d, errno %d\n", err, errno)) + goto disable_pmu; + + do { + char build_id[64]; + + err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs); + if (CHECK(err, "lookup_elem from stackmap", + "err %d, errno %d\n", err, errno)) + goto disable_pmu; + for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i) + if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID && + id_offs[i].offset != 0) { + for (j = 0; j < 20; ++j) + sprintf(build_id + 2 * j, "%02x", + id_offs[i].build_id[j] & 0xff); + if (strstr(buf, build_id) != NULL) + build_id_matches = 1; + } + previous_key = key; + } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); + + /* stack_map_get_build_id_offset() is racy and sometimes can return + * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID; + * try it one more time. + */ + if (build_id_matches < 1 && retry--) { + ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); + close(pmu_fd); + bpf_object__close(obj); + printf("%s:WARN:Didn't find expected build ID from the map, retrying\n", + __func__); + goto retry; + } + + if (CHECK(build_id_matches < 1, "build id match", + "Didn't find expected build ID from the map\n")) + goto disable_pmu; + + /* + * We intentionally skip compare_stack_ips(). This is because we + * only support one in_nmi() ips-to-build_id translation per cpu + * at any time, thus stack_amap here will always fallback to + * BPF_STACK_BUILD_ID_IP; + */ + +disable_pmu: + ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); + +close_pmu: + close(pmu_fd); + +close_prog: + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c new file mode 100644 index 000000000000..2bfd50a0d6d1 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +void test_stacktrace_map(void) +{ + int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd; + const char *file = "./test_stacktrace_map.o"; + int bytes, efd, err, pmu_fd, prog_fd, stack_trace_len; + struct perf_event_attr attr = {}; + __u32 key, val, duration = 0; + struct bpf_object *obj; + char buf[256]; + + err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); + if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) + return; + + /* Get the ID for the sched/sched_switch tracepoint */ + snprintf(buf, sizeof(buf), + "/sys/kernel/debug/tracing/events/sched/sched_switch/id"); + efd = open(buf, O_RDONLY, 0); + if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno)) + goto close_prog; + + bytes = read(efd, buf, sizeof(buf)); + close(efd); + if (bytes <= 0 || bytes >= sizeof(buf)) + goto close_prog; + + /* Open the perf event and attach bpf progrram */ + attr.config = strtol(buf, NULL, 0); + attr.type = PERF_TYPE_TRACEPOINT; + attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN; + attr.sample_period = 1; + attr.wakeup_events = 1; + pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, + 0 /* cpu 0 */, -1 /* group id */, + 0 /* flags */); + if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", + pmu_fd, errno)) + goto close_prog; + + err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0); + if (err) + goto disable_pmu; + + err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd); + if (err) + goto disable_pmu; + + /* find map fds */ + control_map_fd = bpf_find_map(__func__, obj, "control_map"); + if (control_map_fd < 0) + goto disable_pmu; + + stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap"); + if (stackid_hmap_fd < 0) + goto disable_pmu; + + stackmap_fd = bpf_find_map(__func__, obj, "stackmap"); + if (stackmap_fd < 0) + goto disable_pmu; + + stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap"); + if (stack_amap_fd < 0) + goto disable_pmu; + + /* give some time for bpf program run */ + sleep(1); + + /* disable stack trace collection */ + key = 0; + val = 1; + bpf_map_update_elem(control_map_fd, &key, &val, 0); + + /* for every element in stackid_hmap, we can find a corresponding one + * in stackmap, and vise versa. + */ + err = compare_map_keys(stackid_hmap_fd, stackmap_fd); + if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap", + "err %d errno %d\n", err, errno)) + goto disable_pmu_noerr; + + err = compare_map_keys(stackmap_fd, stackid_hmap_fd); + if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap", + "err %d errno %d\n", err, errno)) + goto disable_pmu_noerr; + + stack_trace_len = PERF_MAX_STACK_DEPTH * sizeof(__u64); + err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len); + if (CHECK(err, "compare_stack_ips stackmap vs. stack_amap", + "err %d errno %d\n", err, errno)) + goto disable_pmu_noerr; + + goto disable_pmu_noerr; +disable_pmu: + error_cnt++; +disable_pmu_noerr: + ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); + close(pmu_fd); +close_prog: + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c new file mode 100644 index 000000000000..1f8387d80fd7 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +void test_stacktrace_map_raw_tp(void) +{ + int control_map_fd, stackid_hmap_fd, stackmap_fd; + const char *file = "./test_stacktrace_map.o"; + int efd, err, prog_fd; + __u32 key, val, duration = 0; + struct bpf_object *obj; + + err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd); + if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno)) + return; + + efd = bpf_raw_tracepoint_open("sched_switch", prog_fd); + if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno)) + goto close_prog; + + /* find map fds */ + control_map_fd = bpf_find_map(__func__, obj, "control_map"); + if (control_map_fd < 0) + goto close_prog; + + stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap"); + if (stackid_hmap_fd < 0) + goto close_prog; + + stackmap_fd = bpf_find_map(__func__, obj, "stackmap"); + if (stackmap_fd < 0) + goto close_prog; + + /* give some time for bpf program run */ + sleep(1); + + /* disable stack trace collection */ + key = 0; + val = 1; + bpf_map_update_elem(control_map_fd, &key, &val, 0); + + /* for every element in stackid_hmap, we can find a corresponding one + * in stackmap, and vise versa. + */ + err = compare_map_keys(stackid_hmap_fd, stackmap_fd); + if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap", + "err %d errno %d\n", err, errno)) + goto close_prog; + + err = compare_map_keys(stackmap_fd, stackid_hmap_fd); + if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap", + "err %d errno %d\n", err, errno)) + goto close_prog; + + goto close_prog_noerr; +close_prog: + error_cnt++; +close_prog_noerr: + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c b/tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c new file mode 100644 index 000000000000..958a3d88de99 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +void test_task_fd_query_rawtp(void) +{ + const char *file = "./test_get_stack_rawtp.o"; + __u64 probe_offset, probe_addr; + __u32 len, prog_id, fd_type; + struct bpf_object *obj; + int efd, err, prog_fd; + __u32 duration = 0; + char buf[256]; + + err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd); + if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno)) + return; + + efd = bpf_raw_tracepoint_open("sys_enter", prog_fd); + if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno)) + goto close_prog; + + /* query (getpid(), efd) */ + len = sizeof(buf); + err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id, + &fd_type, &probe_offset, &probe_addr); + if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err, + errno)) + goto close_prog; + + err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT && + strcmp(buf, "sys_enter") == 0; + if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n", + fd_type, buf)) + goto close_prog; + + /* test zero len */ + len = 0; + err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id, + &fd_type, &probe_offset, &probe_addr); + if (CHECK(err < 0, "bpf_task_fd_query (len = 0)", "err %d errno %d\n", + err, errno)) + goto close_prog; + err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT && + len == strlen("sys_enter"); + if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len)) + goto close_prog; + + /* test empty buffer */ + len = sizeof(buf); + err = bpf_task_fd_query(getpid(), efd, 0, 0, &len, &prog_id, + &fd_type, &probe_offset, &probe_addr); + if (CHECK(err < 0, "bpf_task_fd_query (buf = 0)", "err %d errno %d\n", + err, errno)) + goto close_prog; + err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT && + len == strlen("sys_enter"); + if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len)) + goto close_prog; + + /* test smaller buffer */ + len = 3; + err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id, + &fd_type, &probe_offset, &probe_addr); + if (CHECK(err >= 0 || errno != ENOSPC, "bpf_task_fd_query (len = 3)", + "err %d errno %d\n", err, errno)) + goto close_prog; + err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT && + len == strlen("sys_enter") && + strcmp(buf, "sy") == 0; + if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len)) + goto close_prog; + + goto close_prog_noerr; +close_prog: + error_cnt++; +close_prog_noerr: + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c b/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c new file mode 100644 index 000000000000..d636a4f39476 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +static void test_task_fd_query_tp_core(const char *probe_name, + const char *tp_name) +{ + const char *file = "./test_tracepoint.o"; + int err, bytes, efd, prog_fd, pmu_fd; + struct perf_event_attr attr = {}; + __u64 probe_offset, probe_addr; + __u32 len, prog_id, fd_type; + struct bpf_object *obj; + __u32 duration = 0; + char buf[256]; + + err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); + if (CHECK(err, "bpf_prog_load", "err %d errno %d\n", err, errno)) + goto close_prog; + + snprintf(buf, sizeof(buf), + "/sys/kernel/debug/tracing/events/%s/id", probe_name); + efd = open(buf, O_RDONLY, 0); + if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno)) + goto close_prog; + bytes = read(efd, buf, sizeof(buf)); + close(efd); + if (CHECK(bytes <= 0 || bytes >= sizeof(buf), "read", + "bytes %d errno %d\n", bytes, errno)) + goto close_prog; + + attr.config = strtol(buf, NULL, 0); + attr.type = PERF_TYPE_TRACEPOINT; + attr.sample_type = PERF_SAMPLE_RAW; + attr.sample_period = 1; + attr.wakeup_events = 1; + pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, + 0 /* cpu 0 */, -1 /* group id */, + 0 /* flags */); + if (CHECK(err, "perf_event_open", "err %d errno %d\n", err, errno)) + goto close_pmu; + + err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0); + if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", err, + errno)) + goto close_pmu; + + err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd); + if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", err, + errno)) + goto close_pmu; + + /* query (getpid(), pmu_fd) */ + len = sizeof(buf); + err = bpf_task_fd_query(getpid(), pmu_fd, 0, buf, &len, &prog_id, + &fd_type, &probe_offset, &probe_addr); + if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err, + errno)) + goto close_pmu; + + err = (fd_type == BPF_FD_TYPE_TRACEPOINT) && !strcmp(buf, tp_name); + if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n", + fd_type, buf)) + goto close_pmu; + + close(pmu_fd); + goto close_prog_noerr; + +close_pmu: + close(pmu_fd); +close_prog: + error_cnt++; +close_prog_noerr: + bpf_object__close(obj); +} + +void test_task_fd_query_tp(void) +{ + test_task_fd_query_tp_core("sched/sched_switch", + "sched_switch"); + test_task_fd_query_tp_core("syscalls/sys_enter_read", + "sys_enter_read"); +} diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_estats.c b/tools/testing/selftests/bpf/prog_tests/tcp_estats.c new file mode 100644 index 000000000000..bb8759d69099 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/tcp_estats.c @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +void test_tcp_estats(void) +{ + const char *file = "./test_tcp_estats.o"; + int err, prog_fd; + struct bpf_object *obj; + __u32 duration = 0; + + err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); + CHECK(err, "", "err %d errno %d\n", err, errno); + if (err) { + error_cnt++; + return; + } + + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c b/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c new file mode 100644 index 000000000000..a2f476f91637 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c @@ -0,0 +1,132 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +void test_tp_attach_query(void) +{ + const int num_progs = 3; + int i, j, bytes, efd, err, prog_fd[num_progs], pmu_fd[num_progs]; + __u32 duration = 0, info_len, saved_prog_ids[num_progs]; + const char *file = "./test_tracepoint.o"; + struct perf_event_query_bpf *query; + struct perf_event_attr attr = {}; + struct bpf_object *obj[num_progs]; + struct bpf_prog_info prog_info; + char buf[256]; + + snprintf(buf, sizeof(buf), + "/sys/kernel/debug/tracing/events/sched/sched_switch/id"); + efd = open(buf, O_RDONLY, 0); + if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno)) + return; + bytes = read(efd, buf, sizeof(buf)); + close(efd); + if (CHECK(bytes <= 0 || bytes >= sizeof(buf), + "read", "bytes %d errno %d\n", bytes, errno)) + return; + + attr.config = strtol(buf, NULL, 0); + attr.type = PERF_TYPE_TRACEPOINT; + attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN; + attr.sample_period = 1; + attr.wakeup_events = 1; + + query = malloc(sizeof(*query) + sizeof(__u32) * num_progs); + for (i = 0; i < num_progs; i++) { + err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj[i], + &prog_fd[i]); + if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) + goto cleanup1; + + bzero(&prog_info, sizeof(prog_info)); + prog_info.jited_prog_len = 0; + prog_info.xlated_prog_len = 0; + prog_info.nr_map_ids = 0; + info_len = sizeof(prog_info); + err = bpf_obj_get_info_by_fd(prog_fd[i], &prog_info, &info_len); + if (CHECK(err, "bpf_obj_get_info_by_fd", "err %d errno %d\n", + err, errno)) + goto cleanup1; + saved_prog_ids[i] = prog_info.id; + + pmu_fd[i] = syscall(__NR_perf_event_open, &attr, -1 /* pid */, + 0 /* cpu 0 */, -1 /* group id */, + 0 /* flags */); + if (CHECK(pmu_fd[i] < 0, "perf_event_open", "err %d errno %d\n", + pmu_fd[i], errno)) + goto cleanup2; + err = ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0); + if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", + err, errno)) + goto cleanup3; + + if (i == 0) { + /* check NULL prog array query */ + query->ids_len = num_progs; + err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query); + if (CHECK(err || query->prog_cnt != 0, + "perf_event_ioc_query_bpf", + "err %d errno %d query->prog_cnt %u\n", + err, errno, query->prog_cnt)) + goto cleanup3; + } + + err = ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[i]); + if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", + err, errno)) + goto cleanup3; + + if (i == 1) { + /* try to get # of programs only */ + query->ids_len = 0; + err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query); + if (CHECK(err || query->prog_cnt != 2, + "perf_event_ioc_query_bpf", + "err %d errno %d query->prog_cnt %u\n", + err, errno, query->prog_cnt)) + goto cleanup3; + + /* try a few negative tests */ + /* invalid query pointer */ + err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, + (struct perf_event_query_bpf *)0x1); + if (CHECK(!err || errno != EFAULT, + "perf_event_ioc_query_bpf", + "err %d errno %d\n", err, errno)) + goto cleanup3; + + /* no enough space */ + query->ids_len = 1; + err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query); + if (CHECK(!err || errno != ENOSPC || query->prog_cnt != 2, + "perf_event_ioc_query_bpf", + "err %d errno %d query->prog_cnt %u\n", + err, errno, query->prog_cnt)) + goto cleanup3; + } + + query->ids_len = num_progs; + err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query); + if (CHECK(err || query->prog_cnt != (i + 1), + "perf_event_ioc_query_bpf", + "err %d errno %d query->prog_cnt %u\n", + err, errno, query->prog_cnt)) + goto cleanup3; + for (j = 0; j < i + 1; j++) + if (CHECK(saved_prog_ids[j] != query->ids[j], + "perf_event_ioc_query_bpf", + "#%d saved_prog_id %x query prog_id %x\n", + j, saved_prog_ids[j], query->ids[j])) + goto cleanup3; + } + + i = num_progs - 1; + for (; i >= 0; i--) { + cleanup3: + ioctl(pmu_fd[i], PERF_EVENT_IOC_DISABLE); + cleanup2: + close(pmu_fd[i]); + cleanup1: + bpf_object__close(obj[i]); + } + free(query); +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp.c b/tools/testing/selftests/bpf/prog_tests/xdp.c new file mode 100644 index 000000000000..a74167289545 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/xdp.c @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +void test_xdp(void) +{ + struct vip key4 = {.protocol = 6, .family = AF_INET}; + struct vip key6 = {.protocol = 6, .family = AF_INET6}; + struct iptnl_info value4 = {.family = AF_INET}; + struct iptnl_info value6 = {.family = AF_INET6}; + const char *file = "./test_xdp.o"; + struct bpf_object *obj; + char buf[128]; + struct ipv6hdr *iph6 = (void *)buf + sizeof(struct ethhdr); + struct iphdr *iph = (void *)buf + sizeof(struct ethhdr); + __u32 duration, retval, size; + int err, prog_fd, map_fd; + + err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); + if (err) { + error_cnt++; + return; + } + + map_fd = bpf_find_map(__func__, obj, "vip2tnl"); + if (map_fd < 0) + goto out; + bpf_map_update_elem(map_fd, &key4, &value4, 0); + bpf_map_update_elem(map_fd, &key6, &value6, 0); + + err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), + buf, &size, &retval, &duration); + + CHECK(err || retval != XDP_TX || size != 74 || + iph->protocol != IPPROTO_IPIP, "ipv4", + "err %d errno %d retval %d size %d\n", + err, errno, retval, size); + + err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6), + buf, &size, &retval, &duration); + CHECK(err || retval != XDP_TX || size != 114 || + iph6->nexthdr != IPPROTO_IPV6, "ipv6", + "err %d errno %d retval %d size %d\n", + err, errno, retval, size); +out: + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c new file mode 100644 index 000000000000..922aa0a19764 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +void test_xdp_adjust_tail(void) +{ + const char *file = "./test_adjust_tail.o"; + struct bpf_object *obj; + char buf[128]; + __u32 duration, retval, size; + int err, prog_fd; + + err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); + if (err) { + error_cnt++; + return; + } + + err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), + buf, &size, &retval, &duration); + + CHECK(err || retval != XDP_DROP, + "ipv4", "err %d errno %d retval %d size %d\n", + err, errno, retval, size); + + err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6), + buf, &size, &retval, &duration); + CHECK(err || retval != XDP_TX || size != 54, + "ipv6", "err %d errno %d retval %d size %d\n", + err, errno, retval, size); + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c b/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c new file mode 100644 index 000000000000..09e6b46f5515 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +void test_xdp_noinline(void) +{ + const char *file = "./test_xdp_noinline.o"; + unsigned int nr_cpus = bpf_num_possible_cpus(); + struct vip key = {.protocol = 6}; + struct vip_meta { + __u32 flags; + __u32 vip_num; + } value = {.vip_num = VIP_NUM}; + __u32 stats_key = VIP_NUM; + struct vip_stats { + __u64 bytes; + __u64 pkts; + } stats[nr_cpus]; + struct real_definition { + union { + __be32 dst; + __be32 dstv6[4]; + }; + __u8 flags; + } real_def = {.dst = MAGIC_VAL}; + __u32 ch_key = 11, real_num = 3; + __u32 duration, retval, size; + int err, i, prog_fd, map_fd; + __u64 bytes = 0, pkts = 0; + struct bpf_object *obj; + char buf[128]; + u32 *magic = (u32 *)buf; + + err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); + if (err) { + error_cnt++; + return; + } + + map_fd = bpf_find_map(__func__, obj, "vip_map"); + if (map_fd < 0) + goto out; + bpf_map_update_elem(map_fd, &key, &value, 0); + + map_fd = bpf_find_map(__func__, obj, "ch_rings"); + if (map_fd < 0) + goto out; + bpf_map_update_elem(map_fd, &ch_key, &real_num, 0); + + map_fd = bpf_find_map(__func__, obj, "reals"); + if (map_fd < 0) + goto out; + bpf_map_update_elem(map_fd, &real_num, &real_def, 0); + + err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4), + buf, &size, &retval, &duration); + CHECK(err || retval != 1 || size != 54 || + *magic != MAGIC_VAL, "ipv4", + "err %d errno %d retval %d size %d magic %x\n", + err, errno, retval, size, *magic); + + err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6), + buf, &size, &retval, &duration); + CHECK(err || retval != 1 || size != 74 || + *magic != MAGIC_VAL, "ipv6", + "err %d errno %d retval %d size %d magic %x\n", + err, errno, retval, size, *magic); + + map_fd = bpf_find_map(__func__, obj, "stats"); + if (map_fd < 0) + goto out; + bpf_map_lookup_elem(map_fd, &stats_key, stats); + for (i = 0; i < nr_cpus; i++) { + bytes += stats[i].bytes; + pkts += stats[i].pkts; + } + if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) { + error_cnt++; + printf("test_xdp_noinline:FAIL:stats %lld %lld\n", bytes, pkts); + } +out: + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/progs/test_map_in_map.c b/tools/testing/selftests/bpf/progs/test_map_in_map.c index ce923e67e08e..2985f262846e 100644 --- a/tools/testing/selftests/bpf/progs/test_map_in_map.c +++ b/tools/testing/selftests/bpf/progs/test_map_in_map.c @@ -27,6 +27,7 @@ SEC("xdp_mimtest") int xdp_mimtest0(struct xdp_md *ctx) { int value = 123; + int *value_p; int key = 0; void *map; @@ -35,6 +36,9 @@ int xdp_mimtest0(struct xdp_md *ctx) return XDP_DROP; bpf_map_update_elem(map, &key, &value, 0); + value_p = bpf_map_lookup_elem(map, &key); + if (!value_p || *value_p != 123) + return XDP_DROP; map = bpf_map_lookup_elem(&mim_hash, &key); if (!map) diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c index 02d314383a9c..38797aa627a7 100644 --- a/tools/testing/selftests/bpf/test_btf.c +++ b/tools/testing/selftests/bpf/test_btf.c @@ -5732,6 +5732,51 @@ const struct btf_dedup_test dedup_tests[] = { }, }, { + .descr = "dedup: struct <-> fwd resolution w/ hash collision", + /* + * // CU 1: + * struct x; + * struct s { + * struct x *x; + * }; + * // CU 2: + * struct x {}; + * struct s { + * struct x *x; + * }; + */ + .input = { + .raw_types = { + /* CU 1 */ + BTF_FWD_ENC(NAME_TBD, 0 /* struct fwd */), /* [1] fwd x */ + BTF_PTR_ENC(1), /* [2] ptr -> [1] */ + BTF_STRUCT_ENC(NAME_TBD, 1, 8), /* [3] struct s */ + BTF_MEMBER_ENC(NAME_TBD, 2, 0), + /* CU 2 */ + BTF_STRUCT_ENC(NAME_TBD, 0, 0), /* [4] struct x */ + BTF_PTR_ENC(4), /* [5] ptr -> [4] */ + BTF_STRUCT_ENC(NAME_TBD, 1, 8), /* [6] struct s */ + BTF_MEMBER_ENC(NAME_TBD, 5, 0), + BTF_END_RAW, + }, + BTF_STR_SEC("\0x\0s\0x\0x\0s\0x\0"), + }, + .expect = { + .raw_types = { + BTF_PTR_ENC(3), /* [1] ptr -> [3] */ + BTF_STRUCT_ENC(NAME_TBD, 1, 8), /* [2] struct s */ + BTF_MEMBER_ENC(NAME_TBD, 1, 0), + BTF_STRUCT_ENC(NAME_NTH(2), 0, 0), /* [3] struct x */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0s\0x"), + }, + .opts = { + .dont_resolve_fwds = false, + .dedup_table_size = 1, /* force hash collisions */ + }, +}, +{ .descr = "dedup: all possible kinds (no duplicates)", .input = { .raw_types = { @@ -5936,9 +5981,9 @@ static int do_test_dedup(unsigned int test_num) } test_hdr = test_btf_data; - test_strs = test_btf_data + test_hdr->str_off; + test_strs = test_btf_data + sizeof(*test_hdr) + test_hdr->str_off; expect_hdr = expect_btf_data; - expect_strs = expect_btf_data + expect_hdr->str_off; + expect_strs = expect_btf_data + sizeof(*test_hdr) + expect_hdr->str_off; if (CHECK(test_hdr->str_len != expect_hdr->str_len, "test_hdr->str_len:%u != expect_hdr->str_len:%u", test_hdr->str_len, expect_hdr->str_len)) { diff --git a/tools/testing/selftests/bpf/test_libbpf_open.c b/tools/testing/selftests/bpf/test_libbpf_open.c index 1909ecf4d999..65cbd30704b5 100644 --- a/tools/testing/selftests/bpf/test_libbpf_open.c +++ b/tools/testing/selftests/bpf/test_libbpf_open.c @@ -67,7 +67,7 @@ int test_walk_maps(struct bpf_object *obj, bool verbose) struct bpf_map *map; int cnt = 0; - bpf_map__for_each(map, obj) { + bpf_object__for_each_map(map, obj) { cnt++; if (verbose) printf("Map (count:%d) name: %s\n", cnt, diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index c52bd90fbb34..5d10aee9e277 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -4,55 +4,13 @@ * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. */ -#include <stdio.h> -#include <unistd.h> -#include <errno.h> -#include <string.h> -#include <assert.h> -#include <stdlib.h> -#include <stdarg.h> -#include <time.h> - -#include <linux/types.h> -typedef __u16 __sum16; -#include <arpa/inet.h> -#include <linux/if_ether.h> -#include <linux/if_packet.h> -#include <linux/ip.h> -#include <linux/ipv6.h> -#include <linux/tcp.h> -#include <linux/filter.h> -#include <linux/perf_event.h> -#include <linux/unistd.h> - -#include <sys/ioctl.h> -#include <sys/wait.h> -#include <sys/types.h> -#include <fcntl.h> -#include <pthread.h> -#include <linux/bpf.h> -#include <linux/err.h> -#include <bpf/bpf.h> -#include <bpf/libbpf.h> - -#include "test_iptunnel_common.h" -#include "bpf_util.h" -#include "bpf_endian.h" +#include "test_progs.h" #include "bpf_rlimit.h" -#include "trace_helpers.h" -#include "flow_dissector_load.h" - -static int error_cnt, pass_cnt; -static bool jit_enabled; -#define MAGIC_BYTES 123 +int error_cnt, pass_cnt; +bool jit_enabled; -/* ipv4 test vector */ -static struct { - struct ethhdr eth; - struct iphdr iph; - struct tcphdr tcp; -} __packed pkt_v4 = { +struct ipv4_packet pkt_v4 = { .eth.h_proto = __bpf_constant_htons(ETH_P_IP), .iph.ihl = 5, .iph.protocol = IPPROTO_TCP, @@ -61,12 +19,7 @@ static struct { .tcp.doff = 5, }; -/* ipv6 test vector */ -static struct { - struct ethhdr eth; - struct ipv6hdr iph; - struct tcphdr tcp; -} __packed pkt_v6 = { +struct ipv6_packet pkt_v6 = { .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6), .iph.nexthdr = IPPROTO_TCP, .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), @@ -74,26 +27,7 @@ static struct { .tcp.doff = 5, }; -#define _CHECK(condition, tag, duration, format...) ({ \ - int __ret = !!(condition); \ - if (__ret) { \ - error_cnt++; \ - printf("%s:FAIL:%s ", __func__, tag); \ - printf(format); \ - } else { \ - pass_cnt++; \ - printf("%s:PASS:%s %d nsec\n", __func__, tag, duration);\ - } \ - __ret; \ -}) - -#define CHECK(condition, tag, format...) \ - _CHECK(condition, tag, duration, format) -#define CHECK_ATTR(condition, tag, format...) \ - _CHECK(condition, tag, tattr.duration, format) - -static int bpf_find_map(const char *test, struct bpf_object *obj, - const char *name) +int bpf_find_map(const char *test, struct bpf_object *obj, const char *name) { struct bpf_map *map; @@ -106,349 +40,6 @@ static int bpf_find_map(const char *test, struct bpf_object *obj, return bpf_map__fd(map); } -static void test_pkt_access(void) -{ - const char *file = "./test_pkt_access.o"; - struct bpf_object *obj; - __u32 duration, retval; - int err, prog_fd; - - err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); - if (err) { - error_cnt++; - return; - } - - err = bpf_prog_test_run(prog_fd, 100000, &pkt_v4, sizeof(pkt_v4), - NULL, NULL, &retval, &duration); - CHECK(err || retval, "ipv4", - "err %d errno %d retval %d duration %d\n", - err, errno, retval, duration); - - err = bpf_prog_test_run(prog_fd, 100000, &pkt_v6, sizeof(pkt_v6), - NULL, NULL, &retval, &duration); - CHECK(err || retval, "ipv6", - "err %d errno %d retval %d duration %d\n", - err, errno, retval, duration); - bpf_object__close(obj); -} - -static void test_prog_run_xattr(void) -{ - const char *file = "./test_pkt_access.o"; - struct bpf_object *obj; - char buf[10]; - int err; - struct bpf_prog_test_run_attr tattr = { - .repeat = 1, - .data_in = &pkt_v4, - .data_size_in = sizeof(pkt_v4), - .data_out = buf, - .data_size_out = 5, - }; - - err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, - &tattr.prog_fd); - if (CHECK_ATTR(err, "load", "err %d errno %d\n", err, errno)) - return; - - memset(buf, 0, sizeof(buf)); - - err = bpf_prog_test_run_xattr(&tattr); - CHECK_ATTR(err != -1 || errno != ENOSPC || tattr.retval, "run", - "err %d errno %d retval %d\n", err, errno, tattr.retval); - - CHECK_ATTR(tattr.data_size_out != sizeof(pkt_v4), "data_size_out", - "incorrect output size, want %lu have %u\n", - sizeof(pkt_v4), tattr.data_size_out); - - CHECK_ATTR(buf[5] != 0, "overflow", - "BPF_PROG_TEST_RUN ignored size hint\n"); - - tattr.data_out = NULL; - tattr.data_size_out = 0; - errno = 0; - - err = bpf_prog_test_run_xattr(&tattr); - CHECK_ATTR(err || errno || tattr.retval, "run_no_output", - "err %d errno %d retval %d\n", err, errno, tattr.retval); - - tattr.data_size_out = 1; - err = bpf_prog_test_run_xattr(&tattr); - CHECK_ATTR(err != -EINVAL, "run_wrong_size_out", "err %d\n", err); - - bpf_object__close(obj); -} - -static void test_xdp(void) -{ - struct vip key4 = {.protocol = 6, .family = AF_INET}; - struct vip key6 = {.protocol = 6, .family = AF_INET6}; - struct iptnl_info value4 = {.family = AF_INET}; - struct iptnl_info value6 = {.family = AF_INET6}; - const char *file = "./test_xdp.o"; - struct bpf_object *obj; - char buf[128]; - struct ipv6hdr *iph6 = (void *)buf + sizeof(struct ethhdr); - struct iphdr *iph = (void *)buf + sizeof(struct ethhdr); - __u32 duration, retval, size; - int err, prog_fd, map_fd; - - err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); - if (err) { - error_cnt++; - return; - } - - map_fd = bpf_find_map(__func__, obj, "vip2tnl"); - if (map_fd < 0) - goto out; - bpf_map_update_elem(map_fd, &key4, &value4, 0); - bpf_map_update_elem(map_fd, &key6, &value6, 0); - - err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), - buf, &size, &retval, &duration); - - CHECK(err || retval != XDP_TX || size != 74 || - iph->protocol != IPPROTO_IPIP, "ipv4", - "err %d errno %d retval %d size %d\n", - err, errno, retval, size); - - err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6), - buf, &size, &retval, &duration); - CHECK(err || retval != XDP_TX || size != 114 || - iph6->nexthdr != IPPROTO_IPV6, "ipv6", - "err %d errno %d retval %d size %d\n", - err, errno, retval, size); -out: - bpf_object__close(obj); -} - -static void test_xdp_adjust_tail(void) -{ - const char *file = "./test_adjust_tail.o"; - struct bpf_object *obj; - char buf[128]; - __u32 duration, retval, size; - int err, prog_fd; - - err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); - if (err) { - error_cnt++; - return; - } - - err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), - buf, &size, &retval, &duration); - - CHECK(err || retval != XDP_DROP, - "ipv4", "err %d errno %d retval %d size %d\n", - err, errno, retval, size); - - err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6), - buf, &size, &retval, &duration); - CHECK(err || retval != XDP_TX || size != 54, - "ipv6", "err %d errno %d retval %d size %d\n", - err, errno, retval, size); - bpf_object__close(obj); -} - - - -#define MAGIC_VAL 0x1234 -#define NUM_ITER 100000 -#define VIP_NUM 5 - -static void test_l4lb(const char *file) -{ - unsigned int nr_cpus = bpf_num_possible_cpus(); - struct vip key = {.protocol = 6}; - struct vip_meta { - __u32 flags; - __u32 vip_num; - } value = {.vip_num = VIP_NUM}; - __u32 stats_key = VIP_NUM; - struct vip_stats { - __u64 bytes; - __u64 pkts; - } stats[nr_cpus]; - struct real_definition { - union { - __be32 dst; - __be32 dstv6[4]; - }; - __u8 flags; - } real_def = {.dst = MAGIC_VAL}; - __u32 ch_key = 11, real_num = 3; - __u32 duration, retval, size; - int err, i, prog_fd, map_fd; - __u64 bytes = 0, pkts = 0; - struct bpf_object *obj; - char buf[128]; - u32 *magic = (u32 *)buf; - - err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); - if (err) { - error_cnt++; - return; - } - - map_fd = bpf_find_map(__func__, obj, "vip_map"); - if (map_fd < 0) - goto out; - bpf_map_update_elem(map_fd, &key, &value, 0); - - map_fd = bpf_find_map(__func__, obj, "ch_rings"); - if (map_fd < 0) - goto out; - bpf_map_update_elem(map_fd, &ch_key, &real_num, 0); - - map_fd = bpf_find_map(__func__, obj, "reals"); - if (map_fd < 0) - goto out; - bpf_map_update_elem(map_fd, &real_num, &real_def, 0); - - err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4), - buf, &size, &retval, &duration); - CHECK(err || retval != 7/*TC_ACT_REDIRECT*/ || size != 54 || - *magic != MAGIC_VAL, "ipv4", - "err %d errno %d retval %d size %d magic %x\n", - err, errno, retval, size, *magic); - - err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6), - buf, &size, &retval, &duration); - CHECK(err || retval != 7/*TC_ACT_REDIRECT*/ || size != 74 || - *magic != MAGIC_VAL, "ipv6", - "err %d errno %d retval %d size %d magic %x\n", - err, errno, retval, size, *magic); - - map_fd = bpf_find_map(__func__, obj, "stats"); - if (map_fd < 0) - goto out; - bpf_map_lookup_elem(map_fd, &stats_key, stats); - for (i = 0; i < nr_cpus; i++) { - bytes += stats[i].bytes; - pkts += stats[i].pkts; - } - if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) { - error_cnt++; - printf("test_l4lb:FAIL:stats %lld %lld\n", bytes, pkts); - } -out: - bpf_object__close(obj); -} - -static void test_l4lb_all(void) -{ - const char *file1 = "./test_l4lb.o"; - const char *file2 = "./test_l4lb_noinline.o"; - - test_l4lb(file1); - test_l4lb(file2); -} - -static void test_xdp_noinline(void) -{ - const char *file = "./test_xdp_noinline.o"; - unsigned int nr_cpus = bpf_num_possible_cpus(); - struct vip key = {.protocol = 6}; - struct vip_meta { - __u32 flags; - __u32 vip_num; - } value = {.vip_num = VIP_NUM}; - __u32 stats_key = VIP_NUM; - struct vip_stats { - __u64 bytes; - __u64 pkts; - } stats[nr_cpus]; - struct real_definition { - union { - __be32 dst; - __be32 dstv6[4]; - }; - __u8 flags; - } real_def = {.dst = MAGIC_VAL}; - __u32 ch_key = 11, real_num = 3; - __u32 duration, retval, size; - int err, i, prog_fd, map_fd; - __u64 bytes = 0, pkts = 0; - struct bpf_object *obj; - char buf[128]; - u32 *magic = (u32 *)buf; - - err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); - if (err) { - error_cnt++; - return; - } - - map_fd = bpf_find_map(__func__, obj, "vip_map"); - if (map_fd < 0) - goto out; - bpf_map_update_elem(map_fd, &key, &value, 0); - - map_fd = bpf_find_map(__func__, obj, "ch_rings"); - if (map_fd < 0) - goto out; - bpf_map_update_elem(map_fd, &ch_key, &real_num, 0); - - map_fd = bpf_find_map(__func__, obj, "reals"); - if (map_fd < 0) - goto out; - bpf_map_update_elem(map_fd, &real_num, &real_def, 0); - - err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4), - buf, &size, &retval, &duration); - CHECK(err || retval != 1 || size != 54 || - *magic != MAGIC_VAL, "ipv4", - "err %d errno %d retval %d size %d magic %x\n", - err, errno, retval, size, *magic); - - err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6), - buf, &size, &retval, &duration); - CHECK(err || retval != 1 || size != 74 || - *magic != MAGIC_VAL, "ipv6", - "err %d errno %d retval %d size %d magic %x\n", - err, errno, retval, size, *magic); - - map_fd = bpf_find_map(__func__, obj, "stats"); - if (map_fd < 0) - goto out; - bpf_map_lookup_elem(map_fd, &stats_key, stats); - for (i = 0; i < nr_cpus; i++) { - bytes += stats[i].bytes; - pkts += stats[i].pkts; - } - if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) { - error_cnt++; - printf("test_xdp_noinline:FAIL:stats %lld %lld\n", bytes, pkts); - } -out: - bpf_object__close(obj); -} - -static void test_tcp_estats(void) -{ - const char *file = "./test_tcp_estats.o"; - int err, prog_fd; - struct bpf_object *obj; - __u32 duration = 0; - - err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); - CHECK(err, "", "err %d errno %d\n", err, errno); - if (err) { - error_cnt++; - return; - } - - bpf_object__close(obj); -} - -static inline __u64 ptr_to_u64(const void *ptr) -{ - return (__u64) (unsigned long) ptr; -} - static bool is_jit_enabled(void) { const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable"; @@ -467,475 +58,7 @@ static bool is_jit_enabled(void) return enabled; } -static void test_bpf_obj_id(void) -{ - const __u64 array_magic_value = 0xfaceb00c; - const __u32 array_key = 0; - const int nr_iters = 2; - const char *file = "./test_obj_id.o"; - const char *expected_prog_name = "test_obj_id"; - const char *expected_map_name = "test_map_id"; - const __u64 nsec_per_sec = 1000000000; - - struct bpf_object *objs[nr_iters]; - int prog_fds[nr_iters], map_fds[nr_iters]; - /* +1 to test for the info_len returned by kernel */ - struct bpf_prog_info prog_infos[nr_iters + 1]; - struct bpf_map_info map_infos[nr_iters + 1]; - /* Each prog only uses one map. +1 to test nr_map_ids - * returned by kernel. - */ - __u32 map_ids[nr_iters + 1]; - char jited_insns[128], xlated_insns[128], zeros[128]; - __u32 i, next_id, info_len, nr_id_found, duration = 0; - struct timespec real_time_ts, boot_time_ts; - int err = 0; - __u64 array_value; - uid_t my_uid = getuid(); - time_t now, load_time; - - err = bpf_prog_get_fd_by_id(0); - CHECK(err >= 0 || errno != ENOENT, - "get-fd-by-notexist-prog-id", "err %d errno %d\n", err, errno); - - err = bpf_map_get_fd_by_id(0); - CHECK(err >= 0 || errno != ENOENT, - "get-fd-by-notexist-map-id", "err %d errno %d\n", err, errno); - - for (i = 0; i < nr_iters; i++) - objs[i] = NULL; - - /* Check bpf_obj_get_info_by_fd() */ - bzero(zeros, sizeof(zeros)); - for (i = 0; i < nr_iters; i++) { - now = time(NULL); - err = bpf_prog_load(file, BPF_PROG_TYPE_SOCKET_FILTER, - &objs[i], &prog_fds[i]); - /* test_obj_id.o is a dumb prog. It should never fail - * to load. - */ - if (err) - error_cnt++; - assert(!err); - - /* Insert a magic value to the map */ - map_fds[i] = bpf_find_map(__func__, objs[i], "test_map_id"); - assert(map_fds[i] >= 0); - err = bpf_map_update_elem(map_fds[i], &array_key, - &array_magic_value, 0); - assert(!err); - - /* Check getting map info */ - info_len = sizeof(struct bpf_map_info) * 2; - bzero(&map_infos[i], info_len); - err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i], - &info_len); - if (CHECK(err || - map_infos[i].type != BPF_MAP_TYPE_ARRAY || - map_infos[i].key_size != sizeof(__u32) || - map_infos[i].value_size != sizeof(__u64) || - map_infos[i].max_entries != 1 || - map_infos[i].map_flags != 0 || - info_len != sizeof(struct bpf_map_info) || - strcmp((char *)map_infos[i].name, expected_map_name), - "get-map-info(fd)", - "err %d errno %d type %d(%d) info_len %u(%Zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n", - err, errno, - map_infos[i].type, BPF_MAP_TYPE_ARRAY, - info_len, sizeof(struct bpf_map_info), - map_infos[i].key_size, - map_infos[i].value_size, - map_infos[i].max_entries, - map_infos[i].map_flags, - map_infos[i].name, expected_map_name)) - goto done; - - /* Check getting prog info */ - info_len = sizeof(struct bpf_prog_info) * 2; - bzero(&prog_infos[i], info_len); - bzero(jited_insns, sizeof(jited_insns)); - bzero(xlated_insns, sizeof(xlated_insns)); - prog_infos[i].jited_prog_insns = ptr_to_u64(jited_insns); - prog_infos[i].jited_prog_len = sizeof(jited_insns); - prog_infos[i].xlated_prog_insns = ptr_to_u64(xlated_insns); - prog_infos[i].xlated_prog_len = sizeof(xlated_insns); - prog_infos[i].map_ids = ptr_to_u64(map_ids + i); - prog_infos[i].nr_map_ids = 2; - err = clock_gettime(CLOCK_REALTIME, &real_time_ts); - assert(!err); - err = clock_gettime(CLOCK_BOOTTIME, &boot_time_ts); - assert(!err); - err = bpf_obj_get_info_by_fd(prog_fds[i], &prog_infos[i], - &info_len); - load_time = (real_time_ts.tv_sec - boot_time_ts.tv_sec) - + (prog_infos[i].load_time / nsec_per_sec); - if (CHECK(err || - prog_infos[i].type != BPF_PROG_TYPE_SOCKET_FILTER || - info_len != sizeof(struct bpf_prog_info) || - (jit_enabled && !prog_infos[i].jited_prog_len) || - (jit_enabled && - !memcmp(jited_insns, zeros, sizeof(zeros))) || - !prog_infos[i].xlated_prog_len || - !memcmp(xlated_insns, zeros, sizeof(zeros)) || - load_time < now - 60 || load_time > now + 60 || - prog_infos[i].created_by_uid != my_uid || - prog_infos[i].nr_map_ids != 1 || - *(int *)(long)prog_infos[i].map_ids != map_infos[i].id || - strcmp((char *)prog_infos[i].name, expected_prog_name), - "get-prog-info(fd)", - "err %d errno %d i %d type %d(%d) info_len %u(%Zu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n", - err, errno, i, - prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER, - info_len, sizeof(struct bpf_prog_info), - jit_enabled, - prog_infos[i].jited_prog_len, - prog_infos[i].xlated_prog_len, - !!memcmp(jited_insns, zeros, sizeof(zeros)), - !!memcmp(xlated_insns, zeros, sizeof(zeros)), - load_time, now, - prog_infos[i].created_by_uid, my_uid, - prog_infos[i].nr_map_ids, 1, - *(int *)(long)prog_infos[i].map_ids, map_infos[i].id, - prog_infos[i].name, expected_prog_name)) - goto done; - } - - /* Check bpf_prog_get_next_id() */ - nr_id_found = 0; - next_id = 0; - while (!bpf_prog_get_next_id(next_id, &next_id)) { - struct bpf_prog_info prog_info = {}; - __u32 saved_map_id; - int prog_fd; - - info_len = sizeof(prog_info); - - prog_fd = bpf_prog_get_fd_by_id(next_id); - if (prog_fd < 0 && errno == ENOENT) - /* The bpf_prog is in the dead row */ - continue; - if (CHECK(prog_fd < 0, "get-prog-fd(next_id)", - "prog_fd %d next_id %d errno %d\n", - prog_fd, next_id, errno)) - break; - - for (i = 0; i < nr_iters; i++) - if (prog_infos[i].id == next_id) - break; - - if (i == nr_iters) - continue; - - nr_id_found++; - - /* Negative test: - * prog_info.nr_map_ids = 1 - * prog_info.map_ids = NULL - */ - prog_info.nr_map_ids = 1; - err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len); - if (CHECK(!err || errno != EFAULT, - "get-prog-fd-bad-nr-map-ids", "err %d errno %d(%d)", - err, errno, EFAULT)) - break; - bzero(&prog_info, sizeof(prog_info)); - info_len = sizeof(prog_info); - - saved_map_id = *(int *)((long)prog_infos[i].map_ids); - prog_info.map_ids = prog_infos[i].map_ids; - prog_info.nr_map_ids = 2; - err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len); - prog_infos[i].jited_prog_insns = 0; - prog_infos[i].xlated_prog_insns = 0; - CHECK(err || info_len != sizeof(struct bpf_prog_info) || - memcmp(&prog_info, &prog_infos[i], info_len) || - *(int *)(long)prog_info.map_ids != saved_map_id, - "get-prog-info(next_id->fd)", - "err %d errno %d info_len %u(%Zu) memcmp %d map_id %u(%u)\n", - err, errno, info_len, sizeof(struct bpf_prog_info), - memcmp(&prog_info, &prog_infos[i], info_len), - *(int *)(long)prog_info.map_ids, saved_map_id); - close(prog_fd); - } - CHECK(nr_id_found != nr_iters, - "check total prog id found by get_next_id", - "nr_id_found %u(%u)\n", - nr_id_found, nr_iters); - - /* Check bpf_map_get_next_id() */ - nr_id_found = 0; - next_id = 0; - while (!bpf_map_get_next_id(next_id, &next_id)) { - struct bpf_map_info map_info = {}; - int map_fd; - - info_len = sizeof(map_info); - - map_fd = bpf_map_get_fd_by_id(next_id); - if (map_fd < 0 && errno == ENOENT) - /* The bpf_map is in the dead row */ - continue; - if (CHECK(map_fd < 0, "get-map-fd(next_id)", - "map_fd %d next_id %u errno %d\n", - map_fd, next_id, errno)) - break; - - for (i = 0; i < nr_iters; i++) - if (map_infos[i].id == next_id) - break; - - if (i == nr_iters) - continue; - - nr_id_found++; - - err = bpf_map_lookup_elem(map_fd, &array_key, &array_value); - assert(!err); - - err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len); - CHECK(err || info_len != sizeof(struct bpf_map_info) || - memcmp(&map_info, &map_infos[i], info_len) || - array_value != array_magic_value, - "check get-map-info(next_id->fd)", - "err %d errno %d info_len %u(%Zu) memcmp %d array_value %llu(%llu)\n", - err, errno, info_len, sizeof(struct bpf_map_info), - memcmp(&map_info, &map_infos[i], info_len), - array_value, array_magic_value); - - close(map_fd); - } - CHECK(nr_id_found != nr_iters, - "check total map id found by get_next_id", - "nr_id_found %u(%u)\n", - nr_id_found, nr_iters); - -done: - for (i = 0; i < nr_iters; i++) - bpf_object__close(objs[i]); -} - -static void test_pkt_md_access(void) -{ - const char *file = "./test_pkt_md_access.o"; - struct bpf_object *obj; - __u32 duration, retval; - int err, prog_fd; - - err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); - if (err) { - error_cnt++; - return; - } - - err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4), - NULL, NULL, &retval, &duration); - CHECK(err || retval, "", - "err %d errno %d retval %d duration %d\n", - err, errno, retval, duration); - - bpf_object__close(obj); -} - -static void test_obj_name(void) -{ - struct { - const char *name; - int success; - int expected_errno; - } tests[] = { - { "", 1, 0 }, - { "_123456789ABCDE", 1, 0 }, - { "_123456789ABCDEF", 0, EINVAL }, - { "_123456789ABCD\n", 0, EINVAL }, - }; - struct bpf_insn prog[] = { - BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0), - BPF_EXIT_INSN(), - }; - __u32 duration = 0; - int i; - - for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) { - size_t name_len = strlen(tests[i].name) + 1; - union bpf_attr attr; - size_t ncopy; - int fd; - - /* test different attr.prog_name during BPF_PROG_LOAD */ - ncopy = name_len < sizeof(attr.prog_name) ? - name_len : sizeof(attr.prog_name); - bzero(&attr, sizeof(attr)); - attr.prog_type = BPF_PROG_TYPE_SCHED_CLS; - attr.insn_cnt = 2; - attr.insns = ptr_to_u64(prog); - attr.license = ptr_to_u64(""); - memcpy(attr.prog_name, tests[i].name, ncopy); - - fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr)); - CHECK((tests[i].success && fd < 0) || - (!tests[i].success && fd != -1) || - (!tests[i].success && errno != tests[i].expected_errno), - "check-bpf-prog-name", - "fd %d(%d) errno %d(%d)\n", - fd, tests[i].success, errno, tests[i].expected_errno); - - if (fd != -1) - close(fd); - - /* test different attr.map_name during BPF_MAP_CREATE */ - ncopy = name_len < sizeof(attr.map_name) ? - name_len : sizeof(attr.map_name); - bzero(&attr, sizeof(attr)); - attr.map_type = BPF_MAP_TYPE_ARRAY; - attr.key_size = 4; - attr.value_size = 4; - attr.max_entries = 1; - attr.map_flags = 0; - memcpy(attr.map_name, tests[i].name, ncopy); - fd = syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr)); - CHECK((tests[i].success && fd < 0) || - (!tests[i].success && fd != -1) || - (!tests[i].success && errno != tests[i].expected_errno), - "check-bpf-map-name", - "fd %d(%d) errno %d(%d)\n", - fd, tests[i].success, errno, tests[i].expected_errno); - - if (fd != -1) - close(fd); - } -} - -static void test_tp_attach_query(void) -{ - const int num_progs = 3; - int i, j, bytes, efd, err, prog_fd[num_progs], pmu_fd[num_progs]; - __u32 duration = 0, info_len, saved_prog_ids[num_progs]; - const char *file = "./test_tracepoint.o"; - struct perf_event_query_bpf *query; - struct perf_event_attr attr = {}; - struct bpf_object *obj[num_progs]; - struct bpf_prog_info prog_info; - char buf[256]; - - snprintf(buf, sizeof(buf), - "/sys/kernel/debug/tracing/events/sched/sched_switch/id"); - efd = open(buf, O_RDONLY, 0); - if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno)) - return; - bytes = read(efd, buf, sizeof(buf)); - close(efd); - if (CHECK(bytes <= 0 || bytes >= sizeof(buf), - "read", "bytes %d errno %d\n", bytes, errno)) - return; - - attr.config = strtol(buf, NULL, 0); - attr.type = PERF_TYPE_TRACEPOINT; - attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN; - attr.sample_period = 1; - attr.wakeup_events = 1; - - query = malloc(sizeof(*query) + sizeof(__u32) * num_progs); - for (i = 0; i < num_progs; i++) { - err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj[i], - &prog_fd[i]); - if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) - goto cleanup1; - - bzero(&prog_info, sizeof(prog_info)); - prog_info.jited_prog_len = 0; - prog_info.xlated_prog_len = 0; - prog_info.nr_map_ids = 0; - info_len = sizeof(prog_info); - err = bpf_obj_get_info_by_fd(prog_fd[i], &prog_info, &info_len); - if (CHECK(err, "bpf_obj_get_info_by_fd", "err %d errno %d\n", - err, errno)) - goto cleanup1; - saved_prog_ids[i] = prog_info.id; - - pmu_fd[i] = syscall(__NR_perf_event_open, &attr, -1 /* pid */, - 0 /* cpu 0 */, -1 /* group id */, - 0 /* flags */); - if (CHECK(pmu_fd[i] < 0, "perf_event_open", "err %d errno %d\n", - pmu_fd[i], errno)) - goto cleanup2; - err = ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0); - if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", - err, errno)) - goto cleanup3; - - if (i == 0) { - /* check NULL prog array query */ - query->ids_len = num_progs; - err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query); - if (CHECK(err || query->prog_cnt != 0, - "perf_event_ioc_query_bpf", - "err %d errno %d query->prog_cnt %u\n", - err, errno, query->prog_cnt)) - goto cleanup3; - } - - err = ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[i]); - if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", - err, errno)) - goto cleanup3; - - if (i == 1) { - /* try to get # of programs only */ - query->ids_len = 0; - err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query); - if (CHECK(err || query->prog_cnt != 2, - "perf_event_ioc_query_bpf", - "err %d errno %d query->prog_cnt %u\n", - err, errno, query->prog_cnt)) - goto cleanup3; - - /* try a few negative tests */ - /* invalid query pointer */ - err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, - (struct perf_event_query_bpf *)0x1); - if (CHECK(!err || errno != EFAULT, - "perf_event_ioc_query_bpf", - "err %d errno %d\n", err, errno)) - goto cleanup3; - - /* no enough space */ - query->ids_len = 1; - err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query); - if (CHECK(!err || errno != ENOSPC || query->prog_cnt != 2, - "perf_event_ioc_query_bpf", - "err %d errno %d query->prog_cnt %u\n", - err, errno, query->prog_cnt)) - goto cleanup3; - } - - query->ids_len = num_progs; - err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query); - if (CHECK(err || query->prog_cnt != (i + 1), - "perf_event_ioc_query_bpf", - "err %d errno %d query->prog_cnt %u\n", - err, errno, query->prog_cnt)) - goto cleanup3; - for (j = 0; j < i + 1; j++) - if (CHECK(saved_prog_ids[j] != query->ids[j], - "perf_event_ioc_query_bpf", - "#%d saved_prog_id %x query prog_id %x\n", - j, saved_prog_ids[j], query->ids[j])) - goto cleanup3; - } - - i = num_progs - 1; - for (; i >= 0; i--) { - cleanup3: - ioctl(pmu_fd[i], PERF_EVENT_IOC_DISABLE); - cleanup2: - close(pmu_fd[i]); - cleanup1: - bpf_object__close(obj[i]); - } - free(query); -} - -static int compare_map_keys(int map1_fd, int map2_fd) +int compare_map_keys(int map1_fd, int map2_fd) { __u32 key, next_key; char val_buf[PERF_MAX_STACK_DEPTH * @@ -962,7 +85,7 @@ static int compare_map_keys(int map1_fd, int map2_fd) return 0; } -static int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len) +int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len) { __u32 key, next_key, *cur_key_p, *next_key_p; char *val_buf1, *val_buf2; @@ -998,165 +121,7 @@ out: return err; } -static void test_stacktrace_map() -{ - int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd; - const char *file = "./test_stacktrace_map.o"; - int bytes, efd, err, pmu_fd, prog_fd, stack_trace_len; - struct perf_event_attr attr = {}; - __u32 key, val, duration = 0; - struct bpf_object *obj; - char buf[256]; - - err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); - if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) - return; - - /* Get the ID for the sched/sched_switch tracepoint */ - snprintf(buf, sizeof(buf), - "/sys/kernel/debug/tracing/events/sched/sched_switch/id"); - efd = open(buf, O_RDONLY, 0); - if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno)) - goto close_prog; - - bytes = read(efd, buf, sizeof(buf)); - close(efd); - if (bytes <= 0 || bytes >= sizeof(buf)) - goto close_prog; - - /* Open the perf event and attach bpf progrram */ - attr.config = strtol(buf, NULL, 0); - attr.type = PERF_TYPE_TRACEPOINT; - attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN; - attr.sample_period = 1; - attr.wakeup_events = 1; - pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, - 0 /* cpu 0 */, -1 /* group id */, - 0 /* flags */); - if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", - pmu_fd, errno)) - goto close_prog; - - err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0); - if (err) - goto disable_pmu; - - err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd); - if (err) - goto disable_pmu; - - /* find map fds */ - control_map_fd = bpf_find_map(__func__, obj, "control_map"); - if (control_map_fd < 0) - goto disable_pmu; - - stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap"); - if (stackid_hmap_fd < 0) - goto disable_pmu; - - stackmap_fd = bpf_find_map(__func__, obj, "stackmap"); - if (stackmap_fd < 0) - goto disable_pmu; - - stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap"); - if (stack_amap_fd < 0) - goto disable_pmu; - - /* give some time for bpf program run */ - sleep(1); - - /* disable stack trace collection */ - key = 0; - val = 1; - bpf_map_update_elem(control_map_fd, &key, &val, 0); - - /* for every element in stackid_hmap, we can find a corresponding one - * in stackmap, and vise versa. - */ - err = compare_map_keys(stackid_hmap_fd, stackmap_fd); - if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap", - "err %d errno %d\n", err, errno)) - goto disable_pmu_noerr; - - err = compare_map_keys(stackmap_fd, stackid_hmap_fd); - if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap", - "err %d errno %d\n", err, errno)) - goto disable_pmu_noerr; - - stack_trace_len = PERF_MAX_STACK_DEPTH * sizeof(__u64); - err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len); - if (CHECK(err, "compare_stack_ips stackmap vs. stack_amap", - "err %d errno %d\n", err, errno)) - goto disable_pmu_noerr; - - goto disable_pmu_noerr; -disable_pmu: - error_cnt++; -disable_pmu_noerr: - ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); - close(pmu_fd); -close_prog: - bpf_object__close(obj); -} - -static void test_stacktrace_map_raw_tp() -{ - int control_map_fd, stackid_hmap_fd, stackmap_fd; - const char *file = "./test_stacktrace_map.o"; - int efd, err, prog_fd; - __u32 key, val, duration = 0; - struct bpf_object *obj; - - err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd); - if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno)) - return; - - efd = bpf_raw_tracepoint_open("sched_switch", prog_fd); - if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno)) - goto close_prog; - - /* find map fds */ - control_map_fd = bpf_find_map(__func__, obj, "control_map"); - if (control_map_fd < 0) - goto close_prog; - - stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap"); - if (stackid_hmap_fd < 0) - goto close_prog; - - stackmap_fd = bpf_find_map(__func__, obj, "stackmap"); - if (stackmap_fd < 0) - goto close_prog; - - /* give some time for bpf program run */ - sleep(1); - - /* disable stack trace collection */ - key = 0; - val = 1; - bpf_map_update_elem(control_map_fd, &key, &val, 0); - - /* for every element in stackid_hmap, we can find a corresponding one - * in stackmap, and vise versa. - */ - err = compare_map_keys(stackid_hmap_fd, stackmap_fd); - if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap", - "err %d errno %d\n", err, errno)) - goto close_prog; - - err = compare_map_keys(stackmap_fd, stackid_hmap_fd); - if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap", - "err %d errno %d\n", err, errno)) - goto close_prog; - - goto close_prog_noerr; -close_prog: - error_cnt++; -close_prog_noerr: - bpf_object__close(obj); -} - -static int extract_build_id(char *build_id, size_t size) +int extract_build_id(char *build_id, size_t size) { FILE *fp; char *line = NULL; @@ -1180,822 +145,7 @@ err: return -1; } -static void test_stacktrace_build_id(void) -{ - int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd; - const char *file = "./test_stacktrace_build_id.o"; - int bytes, efd, err, pmu_fd, prog_fd, stack_trace_len; - struct perf_event_attr attr = {}; - __u32 key, previous_key, val, duration = 0; - struct bpf_object *obj; - char buf[256]; - int i, j; - struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; - int build_id_matches = 0; - int retry = 1; - -retry: - err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); - if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) - goto out; - - /* Get the ID for the sched/sched_switch tracepoint */ - snprintf(buf, sizeof(buf), - "/sys/kernel/debug/tracing/events/random/urandom_read/id"); - efd = open(buf, O_RDONLY, 0); - if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno)) - goto close_prog; - - bytes = read(efd, buf, sizeof(buf)); - close(efd); - if (CHECK(bytes <= 0 || bytes >= sizeof(buf), - "read", "bytes %d errno %d\n", bytes, errno)) - goto close_prog; - - /* Open the perf event and attach bpf progrram */ - attr.config = strtol(buf, NULL, 0); - attr.type = PERF_TYPE_TRACEPOINT; - attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN; - attr.sample_period = 1; - attr.wakeup_events = 1; - pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, - 0 /* cpu 0 */, -1 /* group id */, - 0 /* flags */); - if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", - pmu_fd, errno)) - goto close_prog; - - err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0); - if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", - err, errno)) - goto close_pmu; - - err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd); - if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", - err, errno)) - goto disable_pmu; - - /* find map fds */ - control_map_fd = bpf_find_map(__func__, obj, "control_map"); - if (CHECK(control_map_fd < 0, "bpf_find_map control_map", - "err %d errno %d\n", err, errno)) - goto disable_pmu; - - stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap"); - if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap", - "err %d errno %d\n", err, errno)) - goto disable_pmu; - - stackmap_fd = bpf_find_map(__func__, obj, "stackmap"); - if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n", - err, errno)) - goto disable_pmu; - - stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap"); - if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap", - "err %d errno %d\n", err, errno)) - goto disable_pmu; - - assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null") - == 0); - assert(system("./urandom_read") == 0); - /* disable stack trace collection */ - key = 0; - val = 1; - bpf_map_update_elem(control_map_fd, &key, &val, 0); - - /* for every element in stackid_hmap, we can find a corresponding one - * in stackmap, and vise versa. - */ - err = compare_map_keys(stackid_hmap_fd, stackmap_fd); - if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap", - "err %d errno %d\n", err, errno)) - goto disable_pmu; - - err = compare_map_keys(stackmap_fd, stackid_hmap_fd); - if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap", - "err %d errno %d\n", err, errno)) - goto disable_pmu; - - err = extract_build_id(buf, 256); - - if (CHECK(err, "get build_id with readelf", - "err %d errno %d\n", err, errno)) - goto disable_pmu; - - err = bpf_map_get_next_key(stackmap_fd, NULL, &key); - if (CHECK(err, "get_next_key from stackmap", - "err %d, errno %d\n", err, errno)) - goto disable_pmu; - - do { - char build_id[64]; - - err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs); - if (CHECK(err, "lookup_elem from stackmap", - "err %d, errno %d\n", err, errno)) - goto disable_pmu; - for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i) - if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID && - id_offs[i].offset != 0) { - for (j = 0; j < 20; ++j) - sprintf(build_id + 2 * j, "%02x", - id_offs[i].build_id[j] & 0xff); - if (strstr(buf, build_id) != NULL) - build_id_matches = 1; - } - previous_key = key; - } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); - - /* stack_map_get_build_id_offset() is racy and sometimes can return - * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID; - * try it one more time. - */ - if (build_id_matches < 1 && retry--) { - ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); - close(pmu_fd); - bpf_object__close(obj); - printf("%s:WARN:Didn't find expected build ID from the map, retrying\n", - __func__); - goto retry; - } - - if (CHECK(build_id_matches < 1, "build id match", - "Didn't find expected build ID from the map\n")) - goto disable_pmu; - - stack_trace_len = PERF_MAX_STACK_DEPTH - * sizeof(struct bpf_stack_build_id); - err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len); - CHECK(err, "compare_stack_ips stackmap vs. stack_amap", - "err %d errno %d\n", err, errno); - -disable_pmu: - ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); - -close_pmu: - close(pmu_fd); - -close_prog: - bpf_object__close(obj); - -out: - return; -} - -static void test_stacktrace_build_id_nmi(void) -{ - int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd; - const char *file = "./test_stacktrace_build_id.o"; - int err, pmu_fd, prog_fd; - struct perf_event_attr attr = { - .sample_freq = 5000, - .freq = 1, - .type = PERF_TYPE_HARDWARE, - .config = PERF_COUNT_HW_CPU_CYCLES, - }; - __u32 key, previous_key, val, duration = 0; - struct bpf_object *obj; - char buf[256]; - int i, j; - struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; - int build_id_matches = 0; - int retry = 1; - -retry: - err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd); - if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) - return; - - pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, - 0 /* cpu 0 */, -1 /* group id */, - 0 /* flags */); - if (CHECK(pmu_fd < 0, "perf_event_open", - "err %d errno %d. Does the test host support PERF_COUNT_HW_CPU_CYCLES?\n", - pmu_fd, errno)) - goto close_prog; - - err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0); - if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", - err, errno)) - goto close_pmu; - - err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd); - if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", - err, errno)) - goto disable_pmu; - - /* find map fds */ - control_map_fd = bpf_find_map(__func__, obj, "control_map"); - if (CHECK(control_map_fd < 0, "bpf_find_map control_map", - "err %d errno %d\n", err, errno)) - goto disable_pmu; - - stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap"); - if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap", - "err %d errno %d\n", err, errno)) - goto disable_pmu; - - stackmap_fd = bpf_find_map(__func__, obj, "stackmap"); - if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n", - err, errno)) - goto disable_pmu; - - stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap"); - if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap", - "err %d errno %d\n", err, errno)) - goto disable_pmu; - - assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null") - == 0); - assert(system("taskset 0x1 ./urandom_read 100000") == 0); - /* disable stack trace collection */ - key = 0; - val = 1; - bpf_map_update_elem(control_map_fd, &key, &val, 0); - - /* for every element in stackid_hmap, we can find a corresponding one - * in stackmap, and vise versa. - */ - err = compare_map_keys(stackid_hmap_fd, stackmap_fd); - if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap", - "err %d errno %d\n", err, errno)) - goto disable_pmu; - - err = compare_map_keys(stackmap_fd, stackid_hmap_fd); - if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap", - "err %d errno %d\n", err, errno)) - goto disable_pmu; - - err = extract_build_id(buf, 256); - - if (CHECK(err, "get build_id with readelf", - "err %d errno %d\n", err, errno)) - goto disable_pmu; - - err = bpf_map_get_next_key(stackmap_fd, NULL, &key); - if (CHECK(err, "get_next_key from stackmap", - "err %d, errno %d\n", err, errno)) - goto disable_pmu; - - do { - char build_id[64]; - - err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs); - if (CHECK(err, "lookup_elem from stackmap", - "err %d, errno %d\n", err, errno)) - goto disable_pmu; - for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i) - if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID && - id_offs[i].offset != 0) { - for (j = 0; j < 20; ++j) - sprintf(build_id + 2 * j, "%02x", - id_offs[i].build_id[j] & 0xff); - if (strstr(buf, build_id) != NULL) - build_id_matches = 1; - } - previous_key = key; - } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); - - /* stack_map_get_build_id_offset() is racy and sometimes can return - * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID; - * try it one more time. - */ - if (build_id_matches < 1 && retry--) { - ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); - close(pmu_fd); - bpf_object__close(obj); - printf("%s:WARN:Didn't find expected build ID from the map, retrying\n", - __func__); - goto retry; - } - - if (CHECK(build_id_matches < 1, "build id match", - "Didn't find expected build ID from the map\n")) - goto disable_pmu; - - /* - * We intentionally skip compare_stack_ips(). This is because we - * only support one in_nmi() ips-to-build_id translation per cpu - * at any time, thus stack_amap here will always fallback to - * BPF_STACK_BUILD_ID_IP; - */ - -disable_pmu: - ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); - -close_pmu: - close(pmu_fd); - -close_prog: - bpf_object__close(obj); -} - -#define MAX_CNT_RAWTP 10ull -#define MAX_STACK_RAWTP 100 -struct get_stack_trace_t { - int pid; - int kern_stack_size; - int user_stack_size; - int user_stack_buildid_size; - __u64 kern_stack[MAX_STACK_RAWTP]; - __u64 user_stack[MAX_STACK_RAWTP]; - struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP]; -}; - -static int get_stack_print_output(void *data, int size) -{ - bool good_kern_stack = false, good_user_stack = false; - const char *nonjit_func = "___bpf_prog_run"; - struct get_stack_trace_t *e = data; - int i, num_stack; - static __u64 cnt; - struct ksym *ks; - - cnt++; - - if (size < sizeof(struct get_stack_trace_t)) { - __u64 *raw_data = data; - bool found = false; - - num_stack = size / sizeof(__u64); - /* If jit is enabled, we do not have a good way to - * verify the sanity of the kernel stack. So we - * just assume it is good if the stack is not empty. - * This could be improved in the future. - */ - if (jit_enabled) { - found = num_stack > 0; - } else { - for (i = 0; i < num_stack; i++) { - ks = ksym_search(raw_data[i]); - if (strcmp(ks->name, nonjit_func) == 0) { - found = true; - break; - } - } - } - if (found) { - good_kern_stack = true; - good_user_stack = true; - } - } else { - num_stack = e->kern_stack_size / sizeof(__u64); - if (jit_enabled) { - good_kern_stack = num_stack > 0; - } else { - for (i = 0; i < num_stack; i++) { - ks = ksym_search(e->kern_stack[i]); - if (strcmp(ks->name, nonjit_func) == 0) { - good_kern_stack = true; - break; - } - } - } - if (e->user_stack_size > 0 && e->user_stack_buildid_size > 0) - good_user_stack = true; - } - if (!good_kern_stack || !good_user_stack) - return LIBBPF_PERF_EVENT_ERROR; - - if (cnt == MAX_CNT_RAWTP) - return LIBBPF_PERF_EVENT_DONE; - - return LIBBPF_PERF_EVENT_CONT; -} - -static void test_get_stack_raw_tp(void) -{ - const char *file = "./test_get_stack_rawtp.o"; - int i, efd, err, prog_fd, pmu_fd, perfmap_fd; - struct perf_event_attr attr = {}; - struct timespec tv = {0, 10}; - __u32 key = 0, duration = 0; - struct bpf_object *obj; - - err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd); - if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno)) - return; - - efd = bpf_raw_tracepoint_open("sys_enter", prog_fd); - if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno)) - goto close_prog; - - perfmap_fd = bpf_find_map(__func__, obj, "perfmap"); - if (CHECK(perfmap_fd < 0, "bpf_find_map", "err %d errno %d\n", - perfmap_fd, errno)) - goto close_prog; - - err = load_kallsyms(); - if (CHECK(err < 0, "load_kallsyms", "err %d errno %d\n", err, errno)) - goto close_prog; - - attr.sample_type = PERF_SAMPLE_RAW; - attr.type = PERF_TYPE_SOFTWARE; - attr.config = PERF_COUNT_SW_BPF_OUTPUT; - pmu_fd = syscall(__NR_perf_event_open, &attr, getpid()/*pid*/, -1/*cpu*/, - -1/*group_fd*/, 0); - if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", pmu_fd, - errno)) - goto close_prog; - - err = bpf_map_update_elem(perfmap_fd, &key, &pmu_fd, BPF_ANY); - if (CHECK(err < 0, "bpf_map_update_elem", "err %d errno %d\n", err, - errno)) - goto close_prog; - - err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0); - if (CHECK(err < 0, "ioctl PERF_EVENT_IOC_ENABLE", "err %d errno %d\n", - err, errno)) - goto close_prog; - - err = perf_event_mmap(pmu_fd); - if (CHECK(err < 0, "perf_event_mmap", "err %d errno %d\n", err, errno)) - goto close_prog; - - /* trigger some syscall action */ - for (i = 0; i < MAX_CNT_RAWTP; i++) - nanosleep(&tv, NULL); - - err = perf_event_poller(pmu_fd, get_stack_print_output); - if (CHECK(err < 0, "perf_event_poller", "err %d errno %d\n", err, errno)) - goto close_prog; - - goto close_prog_noerr; -close_prog: - error_cnt++; -close_prog_noerr: - bpf_object__close(obj); -} - -static void test_task_fd_query_rawtp(void) -{ - const char *file = "./test_get_stack_rawtp.o"; - __u64 probe_offset, probe_addr; - __u32 len, prog_id, fd_type; - struct bpf_object *obj; - int efd, err, prog_fd; - __u32 duration = 0; - char buf[256]; - - err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd); - if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno)) - return; - - efd = bpf_raw_tracepoint_open("sys_enter", prog_fd); - if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno)) - goto close_prog; - - /* query (getpid(), efd) */ - len = sizeof(buf); - err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id, - &fd_type, &probe_offset, &probe_addr); - if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err, - errno)) - goto close_prog; - - err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT && - strcmp(buf, "sys_enter") == 0; - if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n", - fd_type, buf)) - goto close_prog; - - /* test zero len */ - len = 0; - err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id, - &fd_type, &probe_offset, &probe_addr); - if (CHECK(err < 0, "bpf_task_fd_query (len = 0)", "err %d errno %d\n", - err, errno)) - goto close_prog; - err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT && - len == strlen("sys_enter"); - if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len)) - goto close_prog; - - /* test empty buffer */ - len = sizeof(buf); - err = bpf_task_fd_query(getpid(), efd, 0, 0, &len, &prog_id, - &fd_type, &probe_offset, &probe_addr); - if (CHECK(err < 0, "bpf_task_fd_query (buf = 0)", "err %d errno %d\n", - err, errno)) - goto close_prog; - err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT && - len == strlen("sys_enter"); - if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len)) - goto close_prog; - - /* test smaller buffer */ - len = 3; - err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id, - &fd_type, &probe_offset, &probe_addr); - if (CHECK(err >= 0 || errno != ENOSPC, "bpf_task_fd_query (len = 3)", - "err %d errno %d\n", err, errno)) - goto close_prog; - err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT && - len == strlen("sys_enter") && - strcmp(buf, "sy") == 0; - if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len)) - goto close_prog; - - goto close_prog_noerr; -close_prog: - error_cnt++; -close_prog_noerr: - bpf_object__close(obj); -} - -static void test_task_fd_query_tp_core(const char *probe_name, - const char *tp_name) -{ - const char *file = "./test_tracepoint.o"; - int err, bytes, efd, prog_fd, pmu_fd; - struct perf_event_attr attr = {}; - __u64 probe_offset, probe_addr; - __u32 len, prog_id, fd_type; - struct bpf_object *obj; - __u32 duration = 0; - char buf[256]; - - err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); - if (CHECK(err, "bpf_prog_load", "err %d errno %d\n", err, errno)) - goto close_prog; - - snprintf(buf, sizeof(buf), - "/sys/kernel/debug/tracing/events/%s/id", probe_name); - efd = open(buf, O_RDONLY, 0); - if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno)) - goto close_prog; - bytes = read(efd, buf, sizeof(buf)); - close(efd); - if (CHECK(bytes <= 0 || bytes >= sizeof(buf), "read", - "bytes %d errno %d\n", bytes, errno)) - goto close_prog; - - attr.config = strtol(buf, NULL, 0); - attr.type = PERF_TYPE_TRACEPOINT; - attr.sample_type = PERF_SAMPLE_RAW; - attr.sample_period = 1; - attr.wakeup_events = 1; - pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, - 0 /* cpu 0 */, -1 /* group id */, - 0 /* flags */); - if (CHECK(err, "perf_event_open", "err %d errno %d\n", err, errno)) - goto close_pmu; - - err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0); - if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", err, - errno)) - goto close_pmu; - - err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd); - if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", err, - errno)) - goto close_pmu; - - /* query (getpid(), pmu_fd) */ - len = sizeof(buf); - err = bpf_task_fd_query(getpid(), pmu_fd, 0, buf, &len, &prog_id, - &fd_type, &probe_offset, &probe_addr); - if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err, - errno)) - goto close_pmu; - - err = (fd_type == BPF_FD_TYPE_TRACEPOINT) && !strcmp(buf, tp_name); - if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n", - fd_type, buf)) - goto close_pmu; - - close(pmu_fd); - goto close_prog_noerr; - -close_pmu: - close(pmu_fd); -close_prog: - error_cnt++; -close_prog_noerr: - bpf_object__close(obj); -} - -static void test_task_fd_query_tp(void) -{ - test_task_fd_query_tp_core("sched/sched_switch", - "sched_switch"); - test_task_fd_query_tp_core("syscalls/sys_enter_read", - "sys_enter_read"); -} - -static int libbpf_debug_print(enum libbpf_print_level level, - const char *format, va_list args) -{ - if (level == LIBBPF_DEBUG) - return 0; - - return vfprintf(stderr, format, args); -} - -static void test_reference_tracking() -{ - const char *file = "./test_sk_lookup_kern.o"; - struct bpf_object *obj; - struct bpf_program *prog; - __u32 duration = 0; - int err = 0; - - obj = bpf_object__open(file); - if (IS_ERR(obj)) { - error_cnt++; - return; - } - - bpf_object__for_each_program(prog, obj) { - const char *title; - - /* Ignore .text sections */ - title = bpf_program__title(prog, false); - if (strstr(title, ".text") != NULL) - continue; - - bpf_program__set_type(prog, BPF_PROG_TYPE_SCHED_CLS); - - /* Expect verifier failure if test name has 'fail' */ - if (strstr(title, "fail") != NULL) { - libbpf_set_print(NULL); - err = !bpf_program__load(prog, "GPL", 0); - libbpf_set_print(libbpf_debug_print); - } else { - err = bpf_program__load(prog, "GPL", 0); - } - CHECK(err, title, "\n"); - } - bpf_object__close(obj); -} - -enum { - QUEUE, - STACK, -}; - -static void test_queue_stack_map(int type) -{ - const int MAP_SIZE = 32; - __u32 vals[MAP_SIZE], duration, retval, size, val; - int i, err, prog_fd, map_in_fd, map_out_fd; - char file[32], buf[128]; - struct bpf_object *obj; - struct iphdr *iph = (void *)buf + sizeof(struct ethhdr); - - /* Fill test values to be used */ - for (i = 0; i < MAP_SIZE; i++) - vals[i] = rand(); - - if (type == QUEUE) - strncpy(file, "./test_queue_map.o", sizeof(file)); - else if (type == STACK) - strncpy(file, "./test_stack_map.o", sizeof(file)); - else - return; - - err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); - if (err) { - error_cnt++; - return; - } - - map_in_fd = bpf_find_map(__func__, obj, "map_in"); - if (map_in_fd < 0) - goto out; - - map_out_fd = bpf_find_map(__func__, obj, "map_out"); - if (map_out_fd < 0) - goto out; - - /* Push 32 elements to the input map */ - for (i = 0; i < MAP_SIZE; i++) { - err = bpf_map_update_elem(map_in_fd, NULL, &vals[i], 0); - if (err) { - error_cnt++; - goto out; - } - } - - /* The eBPF program pushes iph.saddr in the output map, - * pops the input map and saves this value in iph.daddr - */ - for (i = 0; i < MAP_SIZE; i++) { - if (type == QUEUE) { - val = vals[i]; - pkt_v4.iph.saddr = vals[i] * 5; - } else if (type == STACK) { - val = vals[MAP_SIZE - 1 - i]; - pkt_v4.iph.saddr = vals[MAP_SIZE - 1 - i] * 5; - } - - err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), - buf, &size, &retval, &duration); - if (err || retval || size != sizeof(pkt_v4) || - iph->daddr != val) - break; - } - - CHECK(err || retval || size != sizeof(pkt_v4) || iph->daddr != val, - "bpf_map_pop_elem", - "err %d errno %d retval %d size %d iph->daddr %u\n", - err, errno, retval, size, iph->daddr); - - /* Queue is empty, program should return TC_ACT_SHOT */ - err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), - buf, &size, &retval, &duration); - CHECK(err || retval != 2 /* TC_ACT_SHOT */|| size != sizeof(pkt_v4), - "check-queue-stack-map-empty", - "err %d errno %d retval %d size %d\n", - err, errno, retval, size); - - /* Check that the program pushed elements correctly */ - for (i = 0; i < MAP_SIZE; i++) { - err = bpf_map_lookup_and_delete_elem(map_out_fd, NULL, &val); - if (err || val != vals[i] * 5) - break; - } - - CHECK(i != MAP_SIZE && (err || val != vals[i] * 5), - "bpf_map_push_elem", "err %d value %u\n", err, val); - -out: - pkt_v4.iph.saddr = 0; - bpf_object__close(obj); -} - -#define CHECK_FLOW_KEYS(desc, got, expected) \ - CHECK(memcmp(&got, &expected, sizeof(got)) != 0, \ - desc, \ - "nhoff=%u/%u " \ - "thoff=%u/%u " \ - "addr_proto=0x%x/0x%x " \ - "is_frag=%u/%u " \ - "is_first_frag=%u/%u " \ - "is_encap=%u/%u " \ - "n_proto=0x%x/0x%x " \ - "sport=%u/%u " \ - "dport=%u/%u\n", \ - got.nhoff, expected.nhoff, \ - got.thoff, expected.thoff, \ - got.addr_proto, expected.addr_proto, \ - got.is_frag, expected.is_frag, \ - got.is_first_frag, expected.is_first_frag, \ - got.is_encap, expected.is_encap, \ - got.n_proto, expected.n_proto, \ - got.sport, expected.sport, \ - got.dport, expected.dport) - -static struct bpf_flow_keys pkt_v4_flow_keys = { - .nhoff = 0, - .thoff = sizeof(struct iphdr), - .addr_proto = ETH_P_IP, - .ip_proto = IPPROTO_TCP, - .n_proto = bpf_htons(ETH_P_IP), -}; - -static struct bpf_flow_keys pkt_v6_flow_keys = { - .nhoff = 0, - .thoff = sizeof(struct ipv6hdr), - .addr_proto = ETH_P_IPV6, - .ip_proto = IPPROTO_TCP, - .n_proto = bpf_htons(ETH_P_IPV6), -}; - -static void test_flow_dissector(void) -{ - struct bpf_flow_keys flow_keys; - struct bpf_object *obj; - __u32 duration, retval; - int err, prog_fd; - __u32 size; - - err = bpf_flow_load(&obj, "./bpf_flow.o", "flow_dissector", - "jmp_table", &prog_fd); - if (err) { - error_cnt++; - return; - } - - err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4), - &flow_keys, &size, &retval, &duration); - CHECK(size != sizeof(flow_keys) || err || retval != 1, "ipv4", - "err %d errno %d retval %d duration %d size %u/%lu\n", - err, errno, retval, duration, size, sizeof(flow_keys)); - CHECK_FLOW_KEYS("ipv4_flow_keys", flow_keys, pkt_v4_flow_keys); - - err = bpf_prog_test_run(prog_fd, 10, &pkt_v6, sizeof(pkt_v6), - &flow_keys, &size, &retval, &duration); - CHECK(size != sizeof(flow_keys) || err || retval != 1, "ipv6", - "err %d errno %d retval %d duration %d size %u/%lu\n", - err, errno, retval, duration, size, sizeof(flow_keys)); - CHECK_FLOW_KEYS("ipv6_flow_keys", flow_keys, pkt_v6_flow_keys); - - bpf_object__close(obj); -} - -static void *test_spin_lock(void *arg) +void *spin_lock_thread(void *arg) { __u32 duration, retval; int err, prog_fd = *(u32 *) arg; @@ -2008,105 +158,9 @@ static void *test_spin_lock(void *arg) pthread_exit(arg); } -static void test_spinlock(void) -{ - const char *file = "./test_spin_lock.o"; - pthread_t thread_id[4]; - struct bpf_object *obj; - int prog_fd; - int err = 0, i; - void *ret; - - err = bpf_prog_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd); - if (err) { - printf("test_spin_lock:bpf_prog_load errno %d\n", errno); - goto close_prog; - } - for (i = 0; i < 4; i++) - assert(pthread_create(&thread_id[i], NULL, - &test_spin_lock, &prog_fd) == 0); - for (i = 0; i < 4; i++) - assert(pthread_join(thread_id[i], &ret) == 0 && - ret == (void *)&prog_fd); - goto close_prog_noerr; -close_prog: - error_cnt++; -close_prog_noerr: - bpf_object__close(obj); -} - -static void *parallel_map_access(void *arg) -{ - int err, map_fd = *(u32 *) arg; - int vars[17], i, j, rnd, key = 0; - - for (i = 0; i < 10000; i++) { - err = bpf_map_lookup_elem_flags(map_fd, &key, vars, BPF_F_LOCK); - if (err) { - printf("lookup failed\n"); - error_cnt++; - goto out; - } - if (vars[0] != 0) { - printf("lookup #%d var[0]=%d\n", i, vars[0]); - error_cnt++; - goto out; - } - rnd = vars[1]; - for (j = 2; j < 17; j++) { - if (vars[j] == rnd) - continue; - printf("lookup #%d var[1]=%d var[%d]=%d\n", - i, rnd, j, vars[j]); - error_cnt++; - goto out; - } - } -out: - pthread_exit(arg); -} - -static void test_map_lock(void) -{ - const char *file = "./test_map_lock.o"; - int prog_fd, map_fd[2], vars[17] = {}; - pthread_t thread_id[6]; - struct bpf_object *obj; - int err = 0, key = 0, i; - void *ret; - - err = bpf_prog_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd); - if (err) { - printf("test_map_lock:bpf_prog_load errno %d\n", errno); - goto close_prog; - } - map_fd[0] = bpf_find_map(__func__, obj, "hash_map"); - if (map_fd[0] < 0) - goto close_prog; - map_fd[1] = bpf_find_map(__func__, obj, "array_map"); - if (map_fd[1] < 0) - goto close_prog; - - bpf_map_update_elem(map_fd[0], &key, vars, BPF_F_LOCK); - - for (i = 0; i < 4; i++) - assert(pthread_create(&thread_id[i], NULL, - &test_spin_lock, &prog_fd) == 0); - for (i = 4; i < 6; i++) - assert(pthread_create(&thread_id[i], NULL, - ¶llel_map_access, &map_fd[i - 4]) == 0); - for (i = 0; i < 4; i++) - assert(pthread_join(thread_id[i], &ret) == 0 && - ret == (void *)&prog_fd); - for (i = 4; i < 6; i++) - assert(pthread_join(thread_id[i], &ret) == 0 && - ret == (void *)&map_fd[i - 4]); - goto close_prog_noerr; -close_prog: - error_cnt++; -close_prog_noerr: - bpf_object__close(obj); -} +#define DECLARE +#include <prog_tests/tests.h> +#undef DECLARE int main(void) { @@ -2114,30 +168,9 @@ int main(void) jit_enabled = is_jit_enabled(); - test_pkt_access(); - test_prog_run_xattr(); - test_xdp(); - test_xdp_adjust_tail(); - test_l4lb_all(); - test_xdp_noinline(); - test_tcp_estats(); - test_bpf_obj_id(); - test_pkt_md_access(); - test_obj_name(); - test_tp_attach_query(); - test_stacktrace_map(); - test_stacktrace_build_id(); - test_stacktrace_build_id_nmi(); - test_stacktrace_map_raw_tp(); - test_get_stack_raw_tp(); - test_task_fd_query_rawtp(); - test_task_fd_query_tp(); - test_reference_tracking(); - test_queue_stack_map(QUEUE); - test_queue_stack_map(STACK); - test_flow_dissector(); - test_spinlock(); - test_map_lock(); +#define CALL +#include <prog_tests/tests.h> +#undef CALL printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt); return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS; diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h new file mode 100644 index 000000000000..51a07367cd43 --- /dev/null +++ b/tools/testing/selftests/bpf/test_progs.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <stdio.h> +#include <unistd.h> +#include <errno.h> +#include <string.h> +#include <assert.h> +#include <stdlib.h> +#include <stdarg.h> +#include <time.h> +#include <signal.h> + +#include <linux/types.h> +typedef __u16 __sum16; +#include <arpa/inet.h> +#include <linux/if_ether.h> +#include <linux/if_packet.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/tcp.h> +#include <linux/filter.h> +#include <linux/perf_event.h> +#include <linux/unistd.h> + +#include <sys/ioctl.h> +#include <sys/wait.h> +#include <sys/types.h> +#include <sys/time.h> +#include <fcntl.h> +#include <pthread.h> +#include <linux/bpf.h> +#include <linux/err.h> +#include <bpf/bpf.h> +#include <bpf/libbpf.h> + +#include "test_iptunnel_common.h" +#include "bpf_util.h" +#include "bpf_endian.h" +#include "trace_helpers.h" +#include "flow_dissector_load.h" + +extern int error_cnt, pass_cnt; +extern bool jit_enabled; + +#define MAGIC_BYTES 123 + +/* ipv4 test vector */ +struct ipv4_packet { + struct ethhdr eth; + struct iphdr iph; + struct tcphdr tcp; +} __packed; +extern struct ipv4_packet pkt_v4; + +/* ipv6 test vector */ +struct ipv6_packet { + struct ethhdr eth; + struct ipv6hdr iph; + struct tcphdr tcp; +} __packed; +extern struct ipv6_packet pkt_v6; + +#define _CHECK(condition, tag, duration, format...) ({ \ + int __ret = !!(condition); \ + if (__ret) { \ + error_cnt++; \ + printf("%s:FAIL:%s ", __func__, tag); \ + printf(format); \ + } else { \ + pass_cnt++; \ + printf("%s:PASS:%s %d nsec\n", __func__, tag, duration);\ + } \ + __ret; \ +}) + +#define CHECK(condition, tag, format...) \ + _CHECK(condition, tag, duration, format) +#define CHECK_ATTR(condition, tag, format...) \ + _CHECK(condition, tag, tattr.duration, format) + +#define MAGIC_VAL 0x1234 +#define NUM_ITER 100000 +#define VIP_NUM 5 + +static inline __u64 ptr_to_u64(const void *ptr) +{ + return (__u64) (unsigned long) ptr; +} + +int bpf_find_map(const char *test, struct bpf_object *obj, const char *name); +int compare_map_keys(int map1_fd, int map2_fd); +int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len); +int extract_build_id(char *build_id, size_t size); +void *spin_lock_thread(void *arg); diff --git a/tools/testing/selftests/bpf/test_sock_fields.c b/tools/testing/selftests/bpf/test_sock_fields.c index 9bb58369b481..bc8943938bf5 100644 --- a/tools/testing/selftests/bpf/test_sock_fields.c +++ b/tools/testing/selftests/bpf/test_sock_fields.c @@ -14,6 +14,7 @@ #include <bpf/libbpf.h> #include "cgroup_helpers.h" +#include "bpf_rlimit.h" enum bpf_array_idx { SRV_IDX, diff --git a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c index 4b721a77bebb..c3de1a2c9dc5 100644 --- a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c +++ b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c @@ -173,7 +173,7 @@ .retval = 0, }, { - "sanitation: alu with different scalars", + "sanitation: alu with different scalars 1", .insns = { BPF_MOV64_IMM(BPF_REG_0, 1), BPF_LD_MAP_FD(BPF_REG_ARG1, 0), @@ -199,6 +199,48 @@ .retval = 0x100000, }, { + "sanitation: alu with different scalars 2", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_FP), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), + BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0), + BPF_EMIT_CALL(BPF_FUNC_map_delete_elem), + BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_FP), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), + BPF_EMIT_CALL(BPF_FUNC_map_delete_elem), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_MOV64_REG(BPF_REG_8, BPF_REG_6), + BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_7), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_8), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 1 }, + .result = ACCEPT, + .retval = -EINVAL * 2, +}, +{ + "sanitation: alu with different scalars 3", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, EINVAL), + BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, -1), + BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_0, EINVAL), + BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, -1), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_MOV64_REG(BPF_REG_8, BPF_REG_6), + BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_7), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_8), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = -EINVAL * 2, +}, +{ "map access: value_ptr += known scalar, upper oob arith, test 1", .insns = { BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), |