summaryrefslogtreecommitdiff
path: root/tools/testing
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2019-11-21 05:11:23 +0300
committerDavid S. Miller <davem@davemloft.net>2019-11-21 05:11:23 +0300
commitee5a489fd9645104925e5cdf8f8e455d833730b9 (patch)
tree1e46a8c460e1d51d465fe472e42cf1c16f92f9c7 /tools/testing
parente2193c9334291ecdc437cdbd9fe9ac35c14fffa8 (diff)
parent196e8ca74886c433dcfc64a809707074b936aaf5 (diff)
downloadlinux-ee5a489fd9645104925e5cdf8f8e455d833730b9.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Daniel Borkmann says: ==================== pull-request: bpf-next 2019-11-20 The following pull-request contains BPF updates for your *net-next* tree. We've added 81 non-merge commits during the last 17 day(s) which contain a total of 120 files changed, 4958 insertions(+), 1081 deletions(-). There are 3 trivial conflicts, resolve it by always taking the chunk from 196e8ca74886c433: <<<<<<< HEAD ======= void *bpf_map_area_mmapable_alloc(u64 size, int numa_node); >>>>>>> 196e8ca74886c433dcfc64a809707074b936aaf5 <<<<<<< HEAD void *bpf_map_area_alloc(u64 size, int numa_node) ======= static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable) >>>>>>> 196e8ca74886c433dcfc64a809707074b936aaf5 <<<<<<< HEAD if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { ======= /* kmalloc()'ed memory can't be mmap()'ed */ if (!mmapable && size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { >>>>>>> 196e8ca74886c433dcfc64a809707074b936aaf5 The main changes are: 1) Addition of BPF trampoline which works as a bridge between kernel functions, BPF programs and other BPF programs along with two new use cases: i) fentry/fexit BPF programs for tracing with practically zero overhead to call into BPF (as opposed to k[ret]probes) and ii) attachment of the former to networking related programs to see input/output of networking programs (covering xdpdump use case), from Alexei Starovoitov. 2) BPF array map mmap support and use in libbpf for global data maps; also a big batch of libbpf improvements, among others, support for reading bitfields in a relocatable manner (via libbpf's CO-RE helper API), from Andrii Nakryiko. 3) Extend s390x JIT with usage of relative long jumps and loads in order to lift the current 64/512k size limits on JITed BPF programs there, from Ilya Leoshkevich. 4) Add BPF audit support and emit messages upon successful prog load and unload in order to have a timeline of events, from Daniel Borkmann and Jiri Olsa. 5) Extension to libbpf and xdpsock sample programs to demo the shared umem mode (XDP_SHARED_UMEM) as well as RX-only and TX-only sockets, from Magnus Karlsson. 6) Several follow-up bug fixes for libbpf's auto-pinning code and a new API call named bpf_get_link_xdp_info() for retrieving the full set of prog IDs attached to XDP, from Toke Høiland-Jørgensen. 7) Add BTF support for array of int, array of struct and multidimensional arrays and enable it for skb->cb[] access in kfree_skb test, from Martin KaFai Lau. 8) Fix AF_XDP by using the correct number of channels from ethtool, from Luigi Rizzo. 9) Two fixes for BPF selftest to get rid of a hang in test_tc_tunnel and to avoid xdping to be run as standalone, from Jiri Benc. 10) Various BPF selftest fixes when run with latest LLVM trunk, from Yonghong Song. 11) Fix a memory leak in BPF fentry test run data, from Colin Ian King. 12) Various smaller misc cleanups and improvements mostly all over BPF selftests and samples, from Daniel T. Lee, Andre Guedes, Anders Roxell, Mao Wenan, Yue Haibing. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'tools/testing')
-rw-r--r--tools/testing/selftests/bpf/Makefile14
-rw-r--r--tools/testing/selftests/bpf/prog_tests/core_reloc.c174
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fentry_fexit.c90
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fentry_test.c64
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c76
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_stress.c76
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_test.c64
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kfree_skb.c93
-rw-r--r--tools/testing/selftests/bpf/prog_tests/mmap.c220
-rw-r--r--tools/testing/selftests/bpf/prog_tests/pinning.c20
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type1.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type2.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___bit_sz_change.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___bitfield_vs_int.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___err_too_big_bitfield.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___just_big_enough.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_bitfield.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_16.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_32.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_64.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_8.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_size.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_size___diff_sz.c3
-rw-r--r--tools/testing/selftests/bpf/progs/core_reloc_types.h173
-rw-r--r--tools/testing/selftests/bpf/progs/fentry_test.c90
-rw-r--r--tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c91
-rw-r--r--tools/testing/selftests/bpf/progs/fexit_test.c98
-rw-r--r--tools/testing/selftests/bpf/progs/kfree_skb.c77
-rw-r--r--tools/testing/selftests/bpf/progs/test_btf_haskv.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_btf_newkv.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_btf_nokv.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_direct.c63
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_probed.c57
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_size.c51
-rw-r--r--tools/testing/selftests/bpf/progs/test_mmap.c45
-rw-r--r--tools/testing/selftests/bpf/progs/test_pinning.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_pkt_access.c38
-rw-r--r--tools/testing/selftests/bpf/progs/test_seg6_loop.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_sysctl_loop1.c5
-rwxr-xr-xtools/testing/selftests/bpf/test_tc_tunnel.sh5
42 files changed, 1590 insertions, 157 deletions
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index b334a6db15c1..4fe4aec0367c 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -30,7 +30,7 @@ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test
test_sock test_btf test_sockmap get_cgroup_id_user test_socket_cookie \
test_cgroup_storage test_select_reuseport \
test_netcnt test_tcpnotify_user test_sock_fields test_sysctl test_hashmap \
- test_cgroup_attach xdping test_progs-no_alu32
+ test_cgroup_attach test_progs-no_alu32
# Also test bpf-gcc, if present
ifneq ($(BPF_GCC),)
@@ -38,7 +38,8 @@ TEST_GEN_PROGS += test_progs-bpf_gcc
endif
TEST_GEN_FILES =
-TEST_FILES =
+TEST_FILES = test_lwt_ip_encap.o \
+ test_tc_edt.o
# Order correspond to 'make run_tests' order
TEST_PROGS := test_kmod.sh \
@@ -70,7 +71,7 @@ TEST_PROGS_EXTENDED := with_addr.sh \
# Compile but not part of 'make run_tests'
TEST_GEN_PROGS_EXTENDED = test_sock_addr test_skb_cgroup_id_user \
flow_dissector_load test_flow_dissector test_tcp_check_syncookie_user \
- test_lirc_mode2_user
+ test_lirc_mode2_user xdping
TEST_CUSTOM_PROGS = urandom_read
@@ -162,6 +163,12 @@ define CLANG_BPF_BUILD_RULE
-c $1 -o - || echo "BPF obj compilation failed") | \
$(LLC) -march=bpf -mcpu=probe $4 -filetype=obj -o $2
endef
+# Similar to CLANG_BPF_BUILD_RULE, but with disabled alu32
+define CLANG_NOALU32_BPF_BUILD_RULE
+ ($(CLANG) $3 -O2 -target bpf -emit-llvm \
+ -c $1 -o - || echo "BPF obj compilation failed") | \
+ $(LLC) -march=bpf -mcpu=v2 $4 -filetype=obj -o $2
+endef
# Similar to CLANG_BPF_BUILD_RULE, but using native Clang and bpf LLC
define CLANG_NATIVE_BPF_BUILD_RULE
($(CLANG) $3 -O2 -emit-llvm \
@@ -274,6 +281,7 @@ TRUNNER_BPF_LDFLAGS := -mattr=+alu32
$(eval $(call DEFINE_TEST_RUNNER,test_progs))
# Define test_progs-no_alu32 test runner.
+TRUNNER_BPF_BUILD_RULE := CLANG_NOALU32_BPF_BUILD_RULE
TRUNNER_BPF_LDFLAGS :=
$(eval $(call DEFINE_TEST_RUNNER,test_progs,no_alu32))
diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
index 09dfa75fe948..ec9e2fdd6b89 100644
--- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c
+++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "progs/core_reloc_types.h"
+#include <sys/mman.h>
#define STRUCT_TO_CHAR_PTR(struct_name) (const char *)&(struct struct_name)
@@ -174,21 +175,82 @@
.fails = true, \
}
-#define EXISTENCE_DATA(struct_name) STRUCT_TO_CHAR_PTR(struct_name) { \
- .a = 42, \
-}
-
#define EXISTENCE_CASE_COMMON(name) \
.case_name = #name, \
.bpf_obj_file = "test_core_reloc_existence.o", \
.btf_src_file = "btf__core_reloc_" #name ".o", \
- .relaxed_core_relocs = true \
+ .relaxed_core_relocs = true
#define EXISTENCE_ERR_CASE(name) { \
EXISTENCE_CASE_COMMON(name), \
.fails = true, \
}
+#define BITFIELDS_CASE_COMMON(objfile, test_name_prefix, name) \
+ .case_name = test_name_prefix#name, \
+ .bpf_obj_file = objfile, \
+ .btf_src_file = "btf__core_reloc_" #name ".o"
+
+#define BITFIELDS_CASE(name, ...) { \
+ BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_probed.o", \
+ "direct:", name), \
+ .input = STRUCT_TO_CHAR_PTR(core_reloc_##name) __VA_ARGS__, \
+ .input_len = sizeof(struct core_reloc_##name), \
+ .output = STRUCT_TO_CHAR_PTR(core_reloc_bitfields_output) \
+ __VA_ARGS__, \
+ .output_len = sizeof(struct core_reloc_bitfields_output), \
+}, { \
+ BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_direct.o", \
+ "probed:", name), \
+ .input = STRUCT_TO_CHAR_PTR(core_reloc_##name) __VA_ARGS__, \
+ .input_len = sizeof(struct core_reloc_##name), \
+ .output = STRUCT_TO_CHAR_PTR(core_reloc_bitfields_output) \
+ __VA_ARGS__, \
+ .output_len = sizeof(struct core_reloc_bitfields_output), \
+ .direct_raw_tp = true, \
+}
+
+
+#define BITFIELDS_ERR_CASE(name) { \
+ BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_probed.o", \
+ "probed:", name), \
+ .fails = true, \
+}, { \
+ BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_direct.o", \
+ "direct:", name), \
+ .direct_raw_tp = true, \
+ .fails = true, \
+}
+
+#define SIZE_CASE_COMMON(name) \
+ .case_name = #name, \
+ .bpf_obj_file = "test_core_reloc_size.o", \
+ .btf_src_file = "btf__core_reloc_" #name ".o", \
+ .relaxed_core_relocs = true
+
+#define SIZE_OUTPUT_DATA(type) \
+ STRUCT_TO_CHAR_PTR(core_reloc_size_output) { \
+ .int_sz = sizeof(((type *)0)->int_field), \
+ .struct_sz = sizeof(((type *)0)->struct_field), \
+ .union_sz = sizeof(((type *)0)->union_field), \
+ .arr_sz = sizeof(((type *)0)->arr_field), \
+ .arr_elem_sz = sizeof(((type *)0)->arr_field[0]), \
+ .ptr_sz = sizeof(((type *)0)->ptr_field), \
+ .enum_sz = sizeof(((type *)0)->enum_field), \
+ }
+
+#define SIZE_CASE(name) { \
+ SIZE_CASE_COMMON(name), \
+ .input_len = 0, \
+ .output = SIZE_OUTPUT_DATA(struct core_reloc_##name), \
+ .output_len = sizeof(struct core_reloc_size_output), \
+}
+
+#define SIZE_ERR_CASE(name) { \
+ SIZE_CASE_COMMON(name), \
+ .fails = true, \
+}
+
struct core_reloc_test_case {
const char *case_name;
const char *bpf_obj_file;
@@ -199,6 +261,7 @@ struct core_reloc_test_case {
int output_len;
bool fails;
bool relaxed_core_relocs;
+ bool direct_raw_tp;
};
static struct core_reloc_test_case test_cases[] = {
@@ -275,12 +338,6 @@ static struct core_reloc_test_case test_cases[] = {
INTS_CASE(ints___bool),
INTS_CASE(ints___reverse_sign),
- INTS_ERR_CASE(ints___err_bitfield),
- INTS_ERR_CASE(ints___err_wrong_sz_8),
- INTS_ERR_CASE(ints___err_wrong_sz_16),
- INTS_ERR_CASE(ints___err_wrong_sz_32),
- INTS_ERR_CASE(ints___err_wrong_sz_64),
-
/* validate edge cases of capturing relocations */
{
.case_name = "misc",
@@ -352,6 +409,44 @@ static struct core_reloc_test_case test_cases[] = {
EXISTENCE_ERR_CASE(existence__err_arr_kind),
EXISTENCE_ERR_CASE(existence__err_arr_value_type),
EXISTENCE_ERR_CASE(existence__err_struct_type),
+
+ /* bitfield relocation checks */
+ BITFIELDS_CASE(bitfields, {
+ .ub1 = 1,
+ .ub2 = 2,
+ .ub7 = 96,
+ .sb4 = -7,
+ .sb20 = -0x76543,
+ .u32 = 0x80000000,
+ .s32 = -0x76543210,
+ }),
+ BITFIELDS_CASE(bitfields___bit_sz_change, {
+ .ub1 = 6,
+ .ub2 = 0xABCDE,
+ .ub7 = 1,
+ .sb4 = -1,
+ .sb20 = -0x17654321,
+ .u32 = 0xBEEF,
+ .s32 = -0x3FEDCBA987654321,
+ }),
+ BITFIELDS_CASE(bitfields___bitfield_vs_int, {
+ .ub1 = 0xFEDCBA9876543210,
+ .ub2 = 0xA6,
+ .ub7 = -0x7EDCBA987654321,
+ .sb4 = -0x6123456789ABCDE,
+ .sb20 = 0xD00D,
+ .u32 = -0x76543,
+ .s32 = 0x0ADEADBEEFBADB0B,
+ }),
+ BITFIELDS_CASE(bitfields___just_big_enough, {
+ .ub1 = 0xF,
+ .ub2 = 0x0812345678FEDCBA,
+ }),
+ BITFIELDS_ERR_CASE(bitfields___err_too_big_bitfield),
+
+ /* size relocation checks */
+ SIZE_CASE(size),
+ SIZE_CASE(size___diff_sz),
};
struct data {
@@ -359,18 +454,25 @@ struct data {
char out[256];
};
+static size_t roundup_page(size_t sz)
+{
+ long page_size = sysconf(_SC_PAGE_SIZE);
+ return (sz + page_size - 1) / page_size * page_size;
+}
+
void test_core_reloc(void)
{
- const char *probe_name = "raw_tracepoint/sys_enter";
+ const size_t mmap_sz = roundup_page(sizeof(struct data));
struct bpf_object_load_attr load_attr = {};
struct core_reloc_test_case *test_case;
+ const char *tp_name, *probe_name;
int err, duration = 0, i, equal;
struct bpf_link *link = NULL;
struct bpf_map *data_map;
struct bpf_program *prog;
struct bpf_object *obj;
- const int zero = 0;
- struct data data;
+ struct data *data;
+ void *mmap_data = NULL;
for (i = 0; i < ARRAY_SIZE(test_cases); i++) {
test_case = &test_cases[i];
@@ -382,11 +484,19 @@ void test_core_reloc(void)
);
obj = bpf_object__open_file(test_case->bpf_obj_file, &opts);
- if (CHECK(IS_ERR_OR_NULL(obj), "obj_open",
- "failed to open '%s': %ld\n",
+ if (CHECK(IS_ERR(obj), "obj_open", "failed to open '%s': %ld\n",
test_case->bpf_obj_file, PTR_ERR(obj)))
continue;
+ /* for typed raw tracepoints, NULL should be specified */
+ if (test_case->direct_raw_tp) {
+ probe_name = "tp_btf/sys_enter";
+ tp_name = NULL;
+ } else {
+ probe_name = "raw_tracepoint/sys_enter";
+ tp_name = "sys_enter";
+ }
+
prog = bpf_object__find_program_by_title(obj, probe_name);
if (CHECK(!prog, "find_probe",
"prog '%s' not found\n", probe_name))
@@ -407,7 +517,7 @@ void test_core_reloc(void)
goto cleanup;
}
- link = bpf_program__attach_raw_tracepoint(prog, "sys_enter");
+ link = bpf_program__attach_raw_tracepoint(prog, tp_name);
if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n",
PTR_ERR(link)))
goto cleanup;
@@ -416,24 +526,22 @@ void test_core_reloc(void)
if (CHECK(!data_map, "find_data_map", "data map not found\n"))
goto cleanup;
- memset(&data, 0, sizeof(data));
- memcpy(data.in, test_case->input, test_case->input_len);
-
- err = bpf_map_update_elem(bpf_map__fd(data_map),
- &zero, &data, 0);
- if (CHECK(err, "update_data_map",
- "failed to update .data map: %d\n", err))
+ mmap_data = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE,
+ MAP_SHARED, bpf_map__fd(data_map), 0);
+ if (CHECK(mmap_data == MAP_FAILED, "mmap",
+ ".bss mmap failed: %d", errno)) {
+ mmap_data = NULL;
goto cleanup;
+ }
+ data = mmap_data;
+
+ memset(mmap_data, 0, sizeof(*data));
+ memcpy(data->in, test_case->input, test_case->input_len);
/* trigger test run */
usleep(1);
- err = bpf_map_lookup_elem(bpf_map__fd(data_map), &zero, &data);
- if (CHECK(err, "get_result",
- "failed to get output data: %d\n", err))
- goto cleanup;
-
- equal = memcmp(data.out, test_case->output,
+ equal = memcmp(data->out, test_case->output,
test_case->output_len) == 0;
if (CHECK(!equal, "check_result",
"input/output data don't match\n")) {
@@ -445,12 +553,16 @@ void test_core_reloc(void)
}
for (j = 0; j < test_case->output_len; j++) {
printf("output byte #%d: EXP 0x%02hhx GOT 0x%02hhx\n",
- j, test_case->output[j], data.out[j]);
+ j, test_case->output[j], data->out[j]);
}
goto cleanup;
}
cleanup:
+ if (mmap_data) {
+ CHECK_FAIL(munmap(mmap_data, mmap_sz));
+ mmap_data = NULL;
+ }
if (!IS_ERR_OR_NULL(link)) {
bpf_link__destroy(link);
link = NULL;
diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c b/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c
new file mode 100644
index 000000000000..40bcff2cc274
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Facebook */
+#include <test_progs.h>
+
+void test_fentry_fexit(void)
+{
+ struct bpf_prog_load_attr attr_fentry = {
+ .file = "./fentry_test.o",
+ };
+ struct bpf_prog_load_attr attr_fexit = {
+ .file = "./fexit_test.o",
+ };
+
+ struct bpf_object *obj_fentry = NULL, *obj_fexit = NULL, *pkt_obj;
+ struct bpf_map *data_map_fentry, *data_map_fexit;
+ char fentry_name[] = "fentry/bpf_fentry_testX";
+ char fexit_name[] = "fexit/bpf_fentry_testX";
+ int err, pkt_fd, kfree_skb_fd, i;
+ struct bpf_link *link[12] = {};
+ struct bpf_program *prog[12];
+ __u32 duration, retval;
+ const int zero = 0;
+ u64 result[12];
+
+ err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS,
+ &pkt_obj, &pkt_fd);
+ if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno))
+ return;
+ err = bpf_prog_load_xattr(&attr_fentry, &obj_fentry, &kfree_skb_fd);
+ if (CHECK(err, "prog_load fail", "err %d errno %d\n", err, errno))
+ goto close_prog;
+ err = bpf_prog_load_xattr(&attr_fexit, &obj_fexit, &kfree_skb_fd);
+ if (CHECK(err, "prog_load fail", "err %d errno %d\n", err, errno))
+ goto close_prog;
+
+ for (i = 0; i < 6; i++) {
+ fentry_name[sizeof(fentry_name) - 2] = '1' + i;
+ prog[i] = bpf_object__find_program_by_title(obj_fentry, fentry_name);
+ if (CHECK(!prog[i], "find_prog", "prog %s not found\n", fentry_name))
+ goto close_prog;
+ link[i] = bpf_program__attach_trace(prog[i]);
+ if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n"))
+ goto close_prog;
+ }
+ data_map_fentry = bpf_object__find_map_by_name(obj_fentry, "fentry_t.bss");
+ if (CHECK(!data_map_fentry, "find_data_map", "data map not found\n"))
+ goto close_prog;
+
+ for (i = 6; i < 12; i++) {
+ fexit_name[sizeof(fexit_name) - 2] = '1' + i - 6;
+ prog[i] = bpf_object__find_program_by_title(obj_fexit, fexit_name);
+ if (CHECK(!prog[i], "find_prog", "prog %s not found\n", fexit_name))
+ goto close_prog;
+ link[i] = bpf_program__attach_trace(prog[i]);
+ if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n"))
+ goto close_prog;
+ }
+ data_map_fexit = bpf_object__find_map_by_name(obj_fexit, "fexit_te.bss");
+ if (CHECK(!data_map_fexit, "find_data_map", "data map not found\n"))
+ goto close_prog;
+
+ err = bpf_prog_test_run(pkt_fd, 1, &pkt_v6, sizeof(pkt_v6),
+ NULL, NULL, &retval, &duration);
+ CHECK(err || retval, "ipv6",
+ "err %d errno %d retval %d duration %d\n",
+ err, errno, retval, duration);
+
+ err = bpf_map_lookup_elem(bpf_map__fd(data_map_fentry), &zero, &result);
+ if (CHECK(err, "get_result",
+ "failed to get output data: %d\n", err))
+ goto close_prog;
+
+ err = bpf_map_lookup_elem(bpf_map__fd(data_map_fexit), &zero, result + 6);
+ if (CHECK(err, "get_result",
+ "failed to get output data: %d\n", err))
+ goto close_prog;
+
+ for (i = 0; i < 12; i++)
+ if (CHECK(result[i] != 1, "result", "bpf_fentry_test%d failed err %ld\n",
+ i % 6 + 1, result[i]))
+ goto close_prog;
+
+close_prog:
+ for (i = 0; i < 12; i++)
+ if (!IS_ERR_OR_NULL(link[i]))
+ bpf_link__destroy(link[i]);
+ bpf_object__close(obj_fentry);
+ bpf_object__close(obj_fexit);
+ bpf_object__close(pkt_obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_test.c b/tools/testing/selftests/bpf/prog_tests/fentry_test.c
new file mode 100644
index 000000000000..9fb103193878
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/fentry_test.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Facebook */
+#include <test_progs.h>
+
+void test_fentry_test(void)
+{
+ struct bpf_prog_load_attr attr = {
+ .file = "./fentry_test.o",
+ };
+
+ char prog_name[] = "fentry/bpf_fentry_testX";
+ struct bpf_object *obj = NULL, *pkt_obj;
+ int err, pkt_fd, kfree_skb_fd, i;
+ struct bpf_link *link[6] = {};
+ struct bpf_program *prog[6];
+ __u32 duration, retval;
+ struct bpf_map *data_map;
+ const int zero = 0;
+ u64 result[6];
+
+ err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS,
+ &pkt_obj, &pkt_fd);
+ if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno))
+ return;
+ err = bpf_prog_load_xattr(&attr, &obj, &kfree_skb_fd);
+ if (CHECK(err, "prog_load fail", "err %d errno %d\n", err, errno))
+ goto close_prog;
+
+ for (i = 0; i < 6; i++) {
+ prog_name[sizeof(prog_name) - 2] = '1' + i;
+ prog[i] = bpf_object__find_program_by_title(obj, prog_name);
+ if (CHECK(!prog[i], "find_prog", "prog %s not found\n", prog_name))
+ goto close_prog;
+ link[i] = bpf_program__attach_trace(prog[i]);
+ if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n"))
+ goto close_prog;
+ }
+ data_map = bpf_object__find_map_by_name(obj, "fentry_t.bss");
+ if (CHECK(!data_map, "find_data_map", "data map not found\n"))
+ goto close_prog;
+
+ err = bpf_prog_test_run(pkt_fd, 1, &pkt_v6, sizeof(pkt_v6),
+ NULL, NULL, &retval, &duration);
+ CHECK(err || retval, "ipv6",
+ "err %d errno %d retval %d duration %d\n",
+ err, errno, retval, duration);
+
+ err = bpf_map_lookup_elem(bpf_map__fd(data_map), &zero, &result);
+ if (CHECK(err, "get_result",
+ "failed to get output data: %d\n", err))
+ goto close_prog;
+
+ for (i = 0; i < 6; i++)
+ if (CHECK(result[i] != 1, "result", "bpf_fentry_test%d failed err %ld\n",
+ i + 1, result[i]))
+ goto close_prog;
+
+close_prog:
+ for (i = 0; i < 6; i++)
+ if (!IS_ERR_OR_NULL(link[i]))
+ bpf_link__destroy(link[i]);
+ bpf_object__close(obj);
+ bpf_object__close(pkt_obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
new file mode 100644
index 000000000000..15c7378362dd
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Facebook */
+#include <test_progs.h>
+
+#define PROG_CNT 3
+
+void test_fexit_bpf2bpf(void)
+{
+ const char *prog_name[PROG_CNT] = {
+ "fexit/test_pkt_access",
+ "fexit/test_pkt_access_subprog1",
+ "fexit/test_pkt_access_subprog2",
+ };
+ struct bpf_object *obj = NULL, *pkt_obj;
+ int err, pkt_fd, i;
+ struct bpf_link *link[PROG_CNT] = {};
+ struct bpf_program *prog[PROG_CNT];
+ __u32 duration, retval;
+ struct bpf_map *data_map;
+ const int zero = 0;
+ u64 result[PROG_CNT];
+
+ err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_UNSPEC,
+ &pkt_obj, &pkt_fd);
+ if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno))
+ return;
+ DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
+ .attach_prog_fd = pkt_fd,
+ );
+
+ obj = bpf_object__open_file("./fexit_bpf2bpf.o", &opts);
+ if (CHECK(IS_ERR_OR_NULL(obj), "obj_open",
+ "failed to open fexit_bpf2bpf: %ld\n",
+ PTR_ERR(obj)))
+ goto close_prog;
+
+ err = bpf_object__load(obj);
+ if (CHECK(err, "obj_load", "err %d\n", err))
+ goto close_prog;
+
+ for (i = 0; i < PROG_CNT; i++) {
+ prog[i] = bpf_object__find_program_by_title(obj, prog_name[i]);
+ if (CHECK(!prog[i], "find_prog", "prog %s not found\n", prog_name[i]))
+ goto close_prog;
+ link[i] = bpf_program__attach_trace(prog[i]);
+ if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n"))
+ goto close_prog;
+ }
+ data_map = bpf_object__find_map_by_name(obj, "fexit_bp.bss");
+ if (CHECK(!data_map, "find_data_map", "data map not found\n"))
+ goto close_prog;
+
+ err = bpf_prog_test_run(pkt_fd, 1, &pkt_v6, sizeof(pkt_v6),
+ NULL, NULL, &retval, &duration);
+ CHECK(err || retval, "ipv6",
+ "err %d errno %d retval %d duration %d\n",
+ err, errno, retval, duration);
+
+ err = bpf_map_lookup_elem(bpf_map__fd(data_map), &zero, &result);
+ if (CHECK(err, "get_result",
+ "failed to get output data: %d\n", err))
+ goto close_prog;
+
+ for (i = 0; i < PROG_CNT; i++)
+ if (CHECK(result[i] != 1, "result", "fexit_bpf2bpf failed err %ld\n",
+ result[i]))
+ goto close_prog;
+
+close_prog:
+ for (i = 0; i < PROG_CNT; i++)
+ if (!IS_ERR_OR_NULL(link[i]))
+ bpf_link__destroy(link[i]);
+ if (!IS_ERR_OR_NULL(obj))
+ bpf_object__close(obj);
+ bpf_object__close(pkt_obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
new file mode 100644
index 000000000000..3b9dbf7433f0
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Facebook */
+#include <test_progs.h>
+
+/* x86-64 fits 55 JITed and 43 interpreted progs into half page */
+#define CNT 40
+
+void test_fexit_stress(void)
+{
+ char test_skb[128] = {};
+ int fexit_fd[CNT] = {};
+ int link_fd[CNT] = {};
+ __u32 duration = 0;
+ char error[4096];
+ __u32 prog_ret;
+ int err, i, filter_fd;
+
+ const struct bpf_insn trace_program[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+
+ struct bpf_load_program_attr load_attr = {
+ .prog_type = BPF_PROG_TYPE_TRACING,
+ .license = "GPL",
+ .insns = trace_program,
+ .insns_cnt = sizeof(trace_program) / sizeof(struct bpf_insn),
+ .expected_attach_type = BPF_TRACE_FEXIT,
+ };
+
+ const struct bpf_insn skb_program[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+
+ struct bpf_load_program_attr skb_load_attr = {
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .license = "GPL",
+ .insns = skb_program,
+ .insns_cnt = sizeof(skb_program) / sizeof(struct bpf_insn),
+ };
+
+ err = libbpf_find_vmlinux_btf_id("bpf_fentry_test1",
+ load_attr.expected_attach_type);
+ if (CHECK(err <= 0, "find_vmlinux_btf_id", "failed: %d\n", err))
+ goto out;
+ load_attr.attach_btf_id = err;
+
+ for (i = 0; i < CNT; i++) {
+ fexit_fd[i] = bpf_load_program_xattr(&load_attr, error, sizeof(error));
+ if (CHECK(fexit_fd[i] < 0, "fexit loaded",
+ "failed: %d errno %d\n", fexit_fd[i], errno))
+ goto out;
+ link_fd[i] = bpf_raw_tracepoint_open(NULL, fexit_fd[i]);
+ if (CHECK(link_fd[i] < 0, "fexit attach failed",
+ "prog %d failed: %d err %d\n", i, link_fd[i], errno))
+ goto out;
+ }
+
+ filter_fd = bpf_load_program_xattr(&skb_load_attr, error, sizeof(error));
+ if (CHECK(filter_fd < 0, "test_program_loaded", "failed: %d errno %d\n",
+ filter_fd, errno))
+ goto out;
+
+ err = bpf_prog_test_run(filter_fd, 1, test_skb, sizeof(test_skb), 0,
+ 0, &prog_ret, 0);
+ close(filter_fd);
+ CHECK_FAIL(err);
+out:
+ for (i = 0; i < CNT; i++) {
+ if (link_fd[i])
+ close(link_fd[i]);
+ if (fexit_fd[i])
+ close(fexit_fd[i]);
+ }
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_test.c b/tools/testing/selftests/bpf/prog_tests/fexit_test.c
new file mode 100644
index 000000000000..f99013222c74
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/fexit_test.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Facebook */
+#include <test_progs.h>
+
+void test_fexit_test(void)
+{
+ struct bpf_prog_load_attr attr = {
+ .file = "./fexit_test.o",
+ };
+
+ char prog_name[] = "fexit/bpf_fentry_testX";
+ struct bpf_object *obj = NULL, *pkt_obj;
+ int err, pkt_fd, kfree_skb_fd, i;
+ struct bpf_link *link[6] = {};
+ struct bpf_program *prog[6];
+ __u32 duration, retval;
+ struct bpf_map *data_map;
+ const int zero = 0;
+ u64 result[6];
+
+ err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS,
+ &pkt_obj, &pkt_fd);
+ if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno))
+ return;
+ err = bpf_prog_load_xattr(&attr, &obj, &kfree_skb_fd);
+ if (CHECK(err, "prog_load fail", "err %d errno %d\n", err, errno))
+ goto close_prog;
+
+ for (i = 0; i < 6; i++) {
+ prog_name[sizeof(prog_name) - 2] = '1' + i;
+ prog[i] = bpf_object__find_program_by_title(obj, prog_name);
+ if (CHECK(!prog[i], "find_prog", "prog %s not found\n", prog_name))
+ goto close_prog;
+ link[i] = bpf_program__attach_trace(prog[i]);
+ if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n"))
+ goto close_prog;
+ }
+ data_map = bpf_object__find_map_by_name(obj, "fexit_te.bss");
+ if (CHECK(!data_map, "find_data_map", "data map not found\n"))
+ goto close_prog;
+
+ err = bpf_prog_test_run(pkt_fd, 1, &pkt_v6, sizeof(pkt_v6),
+ NULL, NULL, &retval, &duration);
+ CHECK(err || retval, "ipv6",
+ "err %d errno %d retval %d duration %d\n",
+ err, errno, retval, duration);
+
+ err = bpf_map_lookup_elem(bpf_map__fd(data_map), &zero, &result);
+ if (CHECK(err, "get_result",
+ "failed to get output data: %d\n", err))
+ goto close_prog;
+
+ for (i = 0; i < 6; i++)
+ if (CHECK(result[i] != 1, "result", "bpf_fentry_test%d failed err %ld\n",
+ i + 1, result[i]))
+ goto close_prog;
+
+close_prog:
+ for (i = 0; i < 6; i++)
+ if (!IS_ERR_OR_NULL(link[i]))
+ bpf_link__destroy(link[i]);
+ bpf_object__close(obj);
+ bpf_object__close(pkt_obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c
index 430b50de1583..7507c8f689bc 100644
--- a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c
+++ b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c
@@ -1,15 +1,38 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
+struct meta {
+ int ifindex;
+ __u32 cb32_0;
+ __u8 cb8_0;
+};
+
+static union {
+ __u32 cb32[5];
+ __u8 cb8[20];
+} cb = {
+ .cb32[0] = 0x81828384,
+};
+
static void on_sample(void *ctx, int cpu, void *data, __u32 size)
{
- int ifindex = *(int *)data, duration = 0;
- struct ipv6_packet *pkt_v6 = data + 4;
+ struct meta *meta = (struct meta *)data;
+ struct ipv6_packet *pkt_v6 = data + sizeof(*meta);
+ int duration = 0;
- if (ifindex != 1)
+ if (CHECK(size != 72 + sizeof(*meta), "check_size", "size %u != %zu\n",
+ size, 72 + sizeof(*meta)))
+ return;
+ if (CHECK(meta->ifindex != 1, "check_meta_ifindex",
+ "meta->ifindex = %d\n", meta->ifindex))
/* spurious kfree_skb not on loopback device */
return;
- if (CHECK(size != 76, "check_size", "size %u != 76\n", size))
+ if (CHECK(meta->cb8_0 != cb.cb8[0], "check_cb8_0", "cb8_0 %x != %x\n",
+ meta->cb8_0, cb.cb8[0]))
+ return;
+ if (CHECK(meta->cb32_0 != cb.cb32[0], "check_cb32_0",
+ "cb32_0 %x != %x\n",
+ meta->cb32_0, cb.cb32[0]))
return;
if (CHECK(pkt_v6->eth.h_proto != 0xdd86, "check_eth",
"h_proto %x\n", pkt_v6->eth.h_proto))
@@ -26,21 +49,31 @@ static void on_sample(void *ctx, int cpu, void *data, __u32 size)
void test_kfree_skb(void)
{
+ struct __sk_buff skb = {};
+ struct bpf_prog_test_run_attr tattr = {
+ .data_in = &pkt_v6,
+ .data_size_in = sizeof(pkt_v6),
+ .ctx_in = &skb,
+ .ctx_size_in = sizeof(skb),
+ };
struct bpf_prog_load_attr attr = {
.file = "./kfree_skb.o",
};
+ struct bpf_link *link = NULL, *link_fentry = NULL, *link_fexit = NULL;
+ struct bpf_map *perf_buf_map, *global_data;
+ struct bpf_program *prog, *fentry, *fexit;
struct bpf_object *obj, *obj2 = NULL;
struct perf_buffer_opts pb_opts = {};
struct perf_buffer *pb = NULL;
- struct bpf_link *link = NULL;
- struct bpf_map *perf_buf_map;
- struct bpf_program *prog;
- __u32 duration, retval;
- int err, pkt_fd, kfree_skb_fd;
+ int err, kfree_skb_fd;
bool passed = false;
+ __u32 duration = 0;
+ const int zero = 0;
+ bool test_ok[2];
- err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS, &obj, &pkt_fd);
+ err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS,
+ &obj, &tattr.prog_fd);
if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno))
return;
@@ -51,9 +84,28 @@ void test_kfree_skb(void)
prog = bpf_object__find_program_by_title(obj2, "tp_btf/kfree_skb");
if (CHECK(!prog, "find_prog", "prog kfree_skb not found\n"))
goto close_prog;
+ fentry = bpf_object__find_program_by_title(obj2, "fentry/eth_type_trans");
+ if (CHECK(!fentry, "find_prog", "prog eth_type_trans not found\n"))
+ goto close_prog;
+ fexit = bpf_object__find_program_by_title(obj2, "fexit/eth_type_trans");
+ if (CHECK(!fexit, "find_prog", "prog eth_type_trans not found\n"))
+ goto close_prog;
+
+ global_data = bpf_object__find_map_by_name(obj2, "kfree_sk.bss");
+ if (CHECK(!global_data, "find global data", "not found\n"))
+ goto close_prog;
+
link = bpf_program__attach_raw_tracepoint(prog, NULL);
if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n", PTR_ERR(link)))
goto close_prog;
+ link_fentry = bpf_program__attach_trace(fentry);
+ if (CHECK(IS_ERR(link_fentry), "attach fentry", "err %ld\n",
+ PTR_ERR(link_fentry)))
+ goto close_prog;
+ link_fexit = bpf_program__attach_trace(fexit);
+ if (CHECK(IS_ERR(link_fexit), "attach fexit", "err %ld\n",
+ PTR_ERR(link_fexit)))
+ goto close_prog;
perf_buf_map = bpf_object__find_map_by_name(obj2, "perf_buf_map");
if (CHECK(!perf_buf_map, "find_perf_buf_map", "not found\n"))
@@ -66,24 +118,37 @@ void test_kfree_skb(void)
if (CHECK(IS_ERR(pb), "perf_buf__new", "err %ld\n", PTR_ERR(pb)))
goto close_prog;
- err = bpf_prog_test_run(pkt_fd, 1, &pkt_v6, sizeof(pkt_v6),
- NULL, NULL, &retval, &duration);
- CHECK(err || retval, "ipv6",
+ memcpy(skb.cb, &cb, sizeof(cb));
+ err = bpf_prog_test_run_xattr(&tattr);
+ duration = tattr.duration;
+ CHECK(err || tattr.retval, "ipv6",
"err %d errno %d retval %d duration %d\n",
- err, errno, retval, duration);
+ err, errno, tattr.retval, duration);
/* read perf buffer */
err = perf_buffer__poll(pb, 100);
if (CHECK(err < 0, "perf_buffer__poll", "err %d\n", err))
goto close_prog;
+
/* make sure kfree_skb program was triggered
* and it sent expected skb into ring buffer
*/
CHECK_FAIL(!passed);
+
+ err = bpf_map_lookup_elem(bpf_map__fd(global_data), &zero, test_ok);
+ if (CHECK(err, "get_result",
+ "failed to get output data: %d\n", err))
+ goto close_prog;
+
+ CHECK_FAIL(!test_ok[0] || !test_ok[1]);
close_prog:
perf_buffer__free(pb);
if (!IS_ERR_OR_NULL(link))
bpf_link__destroy(link);
+ if (!IS_ERR_OR_NULL(link_fentry))
+ bpf_link__destroy(link_fentry);
+ if (!IS_ERR_OR_NULL(link_fexit))
+ bpf_link__destroy(link_fexit);
bpf_object__close(obj);
bpf_object__close(obj2);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/mmap.c b/tools/testing/selftests/bpf/prog_tests/mmap.c
new file mode 100644
index 000000000000..051a6d48762c
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/mmap.c
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include <sys/mman.h>
+
+struct map_data {
+ __u64 val[512 * 4];
+};
+
+struct bss_data {
+ __u64 in_val;
+ __u64 out_val;
+};
+
+static size_t roundup_page(size_t sz)
+{
+ long page_size = sysconf(_SC_PAGE_SIZE);
+ return (sz + page_size - 1) / page_size * page_size;
+}
+
+void test_mmap(void)
+{
+ const char *file = "test_mmap.o";
+ const char *probe_name = "raw_tracepoint/sys_enter";
+ const char *tp_name = "sys_enter";
+ const size_t bss_sz = roundup_page(sizeof(struct bss_data));
+ const size_t map_sz = roundup_page(sizeof(struct map_data));
+ const int zero = 0, one = 1, two = 2, far = 1500;
+ const long page_size = sysconf(_SC_PAGE_SIZE);
+ int err, duration = 0, i, data_map_fd;
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ struct bpf_link *link = NULL;
+ struct bpf_map *data_map, *bss_map;
+ void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp1, *tmp2;
+ volatile struct bss_data *bss_data;
+ volatile struct map_data *map_data;
+ __u64 val = 0;
+
+ obj = bpf_object__open_file("test_mmap.o", NULL);
+ if (CHECK(IS_ERR(obj), "obj_open", "failed to open '%s': %ld\n",
+ file, PTR_ERR(obj)))
+ return;
+ prog = bpf_object__find_program_by_title(obj, probe_name);
+ if (CHECK(!prog, "find_probe", "prog '%s' not found\n", probe_name))
+ goto cleanup;
+ err = bpf_object__load(obj);
+ if (CHECK(err, "obj_load", "failed to load prog '%s': %d\n",
+ probe_name, err))
+ goto cleanup;
+
+ bss_map = bpf_object__find_map_by_name(obj, "test_mma.bss");
+ if (CHECK(!bss_map, "find_bss_map", ".bss map not found\n"))
+ goto cleanup;
+ data_map = bpf_object__find_map_by_name(obj, "data_map");
+ if (CHECK(!data_map, "find_data_map", "data_map map not found\n"))
+ goto cleanup;
+ data_map_fd = bpf_map__fd(data_map);
+
+ bss_mmaped = mmap(NULL, bss_sz, PROT_READ | PROT_WRITE, MAP_SHARED,
+ bpf_map__fd(bss_map), 0);
+ if (CHECK(bss_mmaped == MAP_FAILED, "bss_mmap",
+ ".bss mmap failed: %d\n", errno)) {
+ bss_mmaped = NULL;
+ goto cleanup;
+ }
+ /* map as R/W first */
+ map_mmaped = mmap(NULL, map_sz, PROT_READ | PROT_WRITE, MAP_SHARED,
+ data_map_fd, 0);
+ if (CHECK(map_mmaped == MAP_FAILED, "data_mmap",
+ "data_map mmap failed: %d\n", errno)) {
+ map_mmaped = NULL;
+ goto cleanup;
+ }
+
+ bss_data = bss_mmaped;
+ map_data = map_mmaped;
+
+ CHECK_FAIL(bss_data->in_val);
+ CHECK_FAIL(bss_data->out_val);
+ CHECK_FAIL(map_data->val[0]);
+ CHECK_FAIL(map_data->val[1]);
+ CHECK_FAIL(map_data->val[2]);
+ CHECK_FAIL(map_data->val[far]);
+
+ link = bpf_program__attach_raw_tracepoint(prog, tp_name);
+ if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n", PTR_ERR(link)))
+ goto cleanup;
+
+ bss_data->in_val = 123;
+ val = 111;
+ CHECK_FAIL(bpf_map_update_elem(data_map_fd, &zero, &val, 0));
+
+ usleep(1);
+
+ CHECK_FAIL(bss_data->in_val != 123);
+ CHECK_FAIL(bss_data->out_val != 123);
+ CHECK_FAIL(map_data->val[0] != 111);
+ CHECK_FAIL(map_data->val[1] != 222);
+ CHECK_FAIL(map_data->val[2] != 123);
+ CHECK_FAIL(map_data->val[far] != 3 * 123);
+
+ CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &zero, &val));
+ CHECK_FAIL(val != 111);
+ CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &one, &val));
+ CHECK_FAIL(val != 222);
+ CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &two, &val));
+ CHECK_FAIL(val != 123);
+ CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &far, &val));
+ CHECK_FAIL(val != 3 * 123);
+
+ /* data_map freeze should fail due to R/W mmap() */
+ err = bpf_map_freeze(data_map_fd);
+ if (CHECK(!err || errno != EBUSY, "no_freeze",
+ "data_map freeze succeeded: err=%d, errno=%d\n", err, errno))
+ goto cleanup;
+
+ /* unmap R/W mapping */
+ err = munmap(map_mmaped, map_sz);
+ map_mmaped = NULL;
+ if (CHECK(err, "data_map_munmap", "data_map munmap failed: %d\n", errno))
+ goto cleanup;
+
+ /* re-map as R/O now */
+ map_mmaped = mmap(NULL, map_sz, PROT_READ, MAP_SHARED, data_map_fd, 0);
+ if (CHECK(map_mmaped == MAP_FAILED, "data_mmap",
+ "data_map R/O mmap failed: %d\n", errno)) {
+ map_mmaped = NULL;
+ goto cleanup;
+ }
+ map_data = map_mmaped;
+
+ /* map/unmap in a loop to test ref counting */
+ for (i = 0; i < 10; i++) {
+ int flags = i % 2 ? PROT_READ : PROT_WRITE;
+ void *p;
+
+ p = mmap(NULL, map_sz, flags, MAP_SHARED, data_map_fd, 0);
+ if (CHECK_FAIL(p == MAP_FAILED))
+ goto cleanup;
+ err = munmap(p, map_sz);
+ if (CHECK_FAIL(err))
+ goto cleanup;
+ }
+
+ /* data_map freeze should now succeed due to no R/W mapping */
+ err = bpf_map_freeze(data_map_fd);
+ if (CHECK(err, "freeze", "data_map freeze failed: err=%d, errno=%d\n",
+ err, errno))
+ goto cleanup;
+
+ /* mapping as R/W now should fail */
+ tmp1 = mmap(NULL, map_sz, PROT_READ | PROT_WRITE, MAP_SHARED,
+ data_map_fd, 0);
+ if (CHECK(tmp1 != MAP_FAILED, "data_mmap", "mmap succeeded\n")) {
+ munmap(tmp1, map_sz);
+ goto cleanup;
+ }
+
+ bss_data->in_val = 321;
+ usleep(1);
+ CHECK_FAIL(bss_data->in_val != 321);
+ CHECK_FAIL(bss_data->out_val != 321);
+ CHECK_FAIL(map_data->val[0] != 111);
+ CHECK_FAIL(map_data->val[1] != 222);
+ CHECK_FAIL(map_data->val[2] != 321);
+ CHECK_FAIL(map_data->val[far] != 3 * 321);
+
+ /* check some more advanced mmap() manipulations */
+
+ /* map all but last page: pages 1-3 mapped */
+ tmp1 = mmap(NULL, 3 * page_size, PROT_READ, MAP_SHARED,
+ data_map_fd, 0);
+ if (CHECK(tmp1 == MAP_FAILED, "adv_mmap1", "errno %d\n", errno))
+ goto cleanup;
+
+ /* unmap second page: pages 1, 3 mapped */
+ err = munmap(tmp1 + page_size, page_size);
+ if (CHECK(err, "adv_mmap2", "errno %d\n", errno)) {
+ munmap(tmp1, map_sz);
+ goto cleanup;
+ }
+
+ /* map page 2 back */
+ tmp2 = mmap(tmp1 + page_size, page_size, PROT_READ,
+ MAP_SHARED | MAP_FIXED, data_map_fd, 0);
+ if (CHECK(tmp2 == MAP_FAILED, "adv_mmap3", "errno %d\n", errno)) {
+ munmap(tmp1, page_size);
+ munmap(tmp1 + 2*page_size, page_size);
+ goto cleanup;
+ }
+ CHECK(tmp1 + page_size != tmp2, "adv_mmap4",
+ "tmp1: %p, tmp2: %p\n", tmp1, tmp2);
+
+ /* re-map all 4 pages */
+ tmp2 = mmap(tmp1, 4 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED,
+ data_map_fd, 0);
+ if (CHECK(tmp2 == MAP_FAILED, "adv_mmap5", "errno %d\n", errno)) {
+ munmap(tmp1, 3 * page_size); /* unmap page 1 */
+ goto cleanup;
+ }
+ CHECK(tmp1 != tmp2, "adv_mmap6", "tmp1: %p, tmp2: %p\n", tmp1, tmp2);
+
+ map_data = tmp2;
+ CHECK_FAIL(bss_data->in_val != 321);
+ CHECK_FAIL(bss_data->out_val != 321);
+ CHECK_FAIL(map_data->val[0] != 111);
+ CHECK_FAIL(map_data->val[1] != 222);
+ CHECK_FAIL(map_data->val[2] != 321);
+ CHECK_FAIL(map_data->val[far] != 3 * 321);
+
+ munmap(tmp2, 4 * page_size);
+cleanup:
+ if (bss_mmaped)
+ CHECK_FAIL(munmap(bss_mmaped, bss_sz));
+ if (map_mmaped)
+ CHECK_FAIL(munmap(map_mmaped, map_sz));
+ if (!IS_ERR_OR_NULL(link))
+ bpf_link__destroy(link);
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/pinning.c b/tools/testing/selftests/bpf/prog_tests/pinning.c
index 525388971e08..041952524c55 100644
--- a/tools/testing/selftests/bpf/prog_tests/pinning.c
+++ b/tools/testing/selftests/bpf/prog_tests/pinning.c
@@ -163,12 +163,15 @@ void test_pinning(void)
goto out;
}
- /* swap pin paths of the two maps */
+ /* set pin paths so that nopinmap2 will attempt to reuse the map at
+ * pinpath (which will fail), but not before pinmap has already been
+ * reused
+ */
bpf_object__for_each_map(map, obj) {
if (!strcmp(bpf_map__name(map), "nopinmap"))
+ err = bpf_map__set_pin_path(map, nopinpath2);
+ else if (!strcmp(bpf_map__name(map), "nopinmap2"))
err = bpf_map__set_pin_path(map, pinpath);
- else if (!strcmp(bpf_map__name(map), "pinmap"))
- err = bpf_map__set_pin_path(map, NULL);
else
continue;
@@ -181,6 +184,17 @@ void test_pinning(void)
if (CHECK(err != -EINVAL, "param mismatch load", "err %d errno %d\n", err, errno))
goto out;
+ /* nopinmap2 should have been pinned and cleaned up again */
+ err = stat(nopinpath2, &statbuf);
+ if (CHECK(!err || errno != ENOENT, "stat nopinpath2",
+ "err %d errno %d\n", err, errno))
+ goto out;
+
+ /* pinmap should still be there */
+ err = stat(pinpath, &statbuf);
+ if (CHECK(err, "stat pinpath", "err %d errno %d\n", err, errno))
+ goto out;
+
bpf_object__close(obj);
/* test auto-pinning at custom path with open opt */
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type.c
new file mode 100644
index 000000000000..f5a7c832d0f2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_arrays___err_wrong_val_type x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type1.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type1.c
deleted file mode 100644
index 795a5b729176..000000000000
--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type1.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#include "core_reloc_types.h"
-
-void f(struct core_reloc_arrays___err_wrong_val_type1 x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type2.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type2.c
deleted file mode 100644
index 3af74b837c4d..000000000000
--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type2.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#include "core_reloc_types.h"
-
-void f(struct core_reloc_arrays___err_wrong_val_type2 x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields.c
new file mode 100644
index 000000000000..cff6f1836cc5
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_bitfields x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___bit_sz_change.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___bit_sz_change.c
new file mode 100644
index 000000000000..a1cd157d5451
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___bit_sz_change.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_bitfields___bit_sz_change x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___bitfield_vs_int.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___bitfield_vs_int.c
new file mode 100644
index 000000000000..3f2c7b07c456
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___bitfield_vs_int.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_bitfields___bitfield_vs_int x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___err_too_big_bitfield.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___err_too_big_bitfield.c
new file mode 100644
index 000000000000..f9746d6be399
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___err_too_big_bitfield.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_bitfields___err_too_big_bitfield x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___just_big_enough.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___just_big_enough.c
new file mode 100644
index 000000000000..e7c75a6953dd
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___just_big_enough.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_bitfields___just_big_enough x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_bitfield.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_bitfield.c
deleted file mode 100644
index 50369e8320a0..000000000000
--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_bitfield.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#include "core_reloc_types.h"
-
-void f(struct core_reloc_ints___err_bitfield x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_16.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_16.c
deleted file mode 100644
index 823bac13d641..000000000000
--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_16.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#include "core_reloc_types.h"
-
-void f(struct core_reloc_ints___err_wrong_sz_16 x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_32.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_32.c
deleted file mode 100644
index b44f3be18535..000000000000
--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_32.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#include "core_reloc_types.h"
-
-void f(struct core_reloc_ints___err_wrong_sz_32 x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_64.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_64.c
deleted file mode 100644
index 9a3dd2099c0f..000000000000
--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_64.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#include "core_reloc_types.h"
-
-void f(struct core_reloc_ints___err_wrong_sz_64 x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_8.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_8.c
deleted file mode 100644
index 9f11ef5f6e88..000000000000
--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_8.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#include "core_reloc_types.h"
-
-void f(struct core_reloc_ints___err_wrong_sz_8 x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_size.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_size.c
new file mode 100644
index 000000000000..3c80903da5a4
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_size.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_size x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_size___diff_sz.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_size___diff_sz.c
new file mode 100644
index 000000000000..6dbd14436b52
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_size___diff_sz.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_size___diff_sz x) {}
diff --git a/tools/testing/selftests/bpf/progs/core_reloc_types.h b/tools/testing/selftests/bpf/progs/core_reloc_types.h
index f5939d9d5c61..9311489e14b2 100644
--- a/tools/testing/selftests/bpf/progs/core_reloc_types.h
+++ b/tools/testing/selftests/bpf/progs/core_reloc_types.h
@@ -386,14 +386,7 @@ struct core_reloc_arrays___err_non_array {
struct core_reloc_arrays_substruct d[1][2];
};
-struct core_reloc_arrays___err_wrong_val_type1 {
- char a[5]; /* char instead of int */
- char b[2][3][4];
- struct core_reloc_arrays_substruct c[3];
- struct core_reloc_arrays_substruct d[1][2];
-};
-
-struct core_reloc_arrays___err_wrong_val_type2 {
+struct core_reloc_arrays___err_wrong_val_type {
int a[5];
char b[2][3][4];
int c[3]; /* value is not a struct */
@@ -589,67 +582,6 @@ struct core_reloc_ints___bool {
int64_t s64_field;
};
-struct core_reloc_ints___err_bitfield {
- uint8_t u8_field;
- int8_t s8_field;
- uint16_t u16_field;
- int16_t s16_field;
- uint32_t u32_field: 32; /* bitfields are not supported */
- int32_t s32_field;
- uint64_t u64_field;
- int64_t s64_field;
-};
-
-struct core_reloc_ints___err_wrong_sz_8 {
- uint16_t u8_field; /* not 8-bit anymore */
- int16_t s8_field; /* not 8-bit anymore */
-
- uint16_t u16_field;
- int16_t s16_field;
- uint32_t u32_field;
- int32_t s32_field;
- uint64_t u64_field;
- int64_t s64_field;
-};
-
-struct core_reloc_ints___err_wrong_sz_16 {
- uint8_t u8_field;
- int8_t s8_field;
-
- uint32_t u16_field; /* not 16-bit anymore */
- int32_t s16_field; /* not 16-bit anymore */
-
- uint32_t u32_field;
- int32_t s32_field;
- uint64_t u64_field;
- int64_t s64_field;
-};
-
-struct core_reloc_ints___err_wrong_sz_32 {
- uint8_t u8_field;
- int8_t s8_field;
- uint16_t u16_field;
- int16_t s16_field;
-
- uint64_t u32_field; /* not 32-bit anymore */
- int64_t s32_field; /* not 32-bit anymore */
-
- uint64_t u64_field;
- int64_t s64_field;
-};
-
-struct core_reloc_ints___err_wrong_sz_64 {
- uint8_t u8_field;
- int8_t s8_field;
- uint16_t u16_field;
- int16_t s16_field;
- uint32_t u32_field;
- int32_t s32_field;
-
- uint32_t u64_field; /* not 64-bit anymore */
- int32_t s64_field; /* not 64-bit anymore */
-};
-
/*
* MISC
*/
@@ -730,3 +662,106 @@ struct core_reloc_existence___err_wrong_arr_value_type {
struct core_reloc_existence___err_wrong_struct_type {
int s;
};
+
+/*
+ * BITFIELDS
+ */
+/* bitfield read results, all as plain integers */
+struct core_reloc_bitfields_output {
+ int64_t ub1;
+ int64_t ub2;
+ int64_t ub7;
+ int64_t sb4;
+ int64_t sb20;
+ int64_t u32;
+ int64_t s32;
+};
+
+struct core_reloc_bitfields {
+ /* unsigned bitfields */
+ uint8_t ub1: 1;
+ uint8_t ub2: 2;
+ uint32_t ub7: 7;
+ /* signed bitfields */
+ int8_t sb4: 4;
+ int32_t sb20: 20;
+ /* non-bitfields */
+ uint32_t u32;
+ int32_t s32;
+};
+
+/* different bit sizes (both up and down) */
+struct core_reloc_bitfields___bit_sz_change {
+ /* unsigned bitfields */
+ uint16_t ub1: 3; /* 1 -> 3 */
+ uint32_t ub2: 20; /* 2 -> 20 */
+ uint8_t ub7: 1; /* 7 -> 1 */
+ /* signed bitfields */
+ int8_t sb4: 1; /* 4 -> 1 */
+ int32_t sb20: 30; /* 20 -> 30 */
+ /* non-bitfields */
+ uint16_t u32; /* 32 -> 16 */
+ int64_t s32; /* 32 -> 64 */
+};
+
+/* turn bitfield into non-bitfield and vice versa */
+struct core_reloc_bitfields___bitfield_vs_int {
+ uint64_t ub1; /* 3 -> 64 non-bitfield */
+ uint8_t ub2; /* 20 -> 8 non-bitfield */
+ int64_t ub7; /* 7 -> 64 non-bitfield signed */
+ int64_t sb4; /* 4 -> 64 non-bitfield signed */
+ uint64_t sb20; /* 20 -> 16 non-bitfield unsigned */
+ int32_t u32: 20; /* 32 non-bitfield -> 20 bitfield */
+ uint64_t s32: 60; /* 32 non-bitfield -> 60 bitfield */
+};
+
+struct core_reloc_bitfields___just_big_enough {
+ uint64_t ub1: 4;
+ uint64_t ub2: 60; /* packed tightly */
+ uint32_t ub7;
+ uint32_t sb4;
+ uint32_t sb20;
+ uint32_t u32;
+ uint32_t s32;
+} __attribute__((packed)) ;
+
+struct core_reloc_bitfields___err_too_big_bitfield {
+ uint64_t ub1: 4;
+ uint64_t ub2: 61; /* packed tightly */
+ uint32_t ub7;
+ uint32_t sb4;
+ uint32_t sb20;
+ uint32_t u32;
+ uint32_t s32;
+} __attribute__((packed)) ;
+
+/*
+ * SIZE
+ */
+struct core_reloc_size_output {
+ int int_sz;
+ int struct_sz;
+ int union_sz;
+ int arr_sz;
+ int arr_elem_sz;
+ int ptr_sz;
+ int enum_sz;
+};
+
+struct core_reloc_size {
+ int int_field;
+ struct { int x; } struct_field;
+ union { int x; } union_field;
+ int arr_field[4];
+ void *ptr_field;
+ enum { VALUE = 123 } enum_field;
+};
+
+struct core_reloc_size___diff_sz {
+ uint64_t int_field;
+ struct { int x; int y; int z; } struct_field;
+ union { int x; char bla[123]; } union_field;
+ char arr_field[10];
+ void *ptr_field;
+ enum { OTHER_VALUE = 0xFFFFFFFFFFFFFFFF } enum_field;
+};
diff --git a/tools/testing/selftests/bpf/progs/fentry_test.c b/tools/testing/selftests/bpf/progs/fentry_test.c
new file mode 100644
index 000000000000..545788bf8d50
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/fentry_test.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Facebook */
+#include <linux/bpf.h>
+#include "bpf_helpers.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct test1 {
+ ks32 a;
+};
+static volatile __u64 test1_result;
+SEC("fentry/bpf_fentry_test1")
+int test1(struct test1 *ctx)
+{
+ test1_result = ctx->a == 1;
+ return 0;
+}
+
+struct test2 {
+ ks32 a;
+ ku64 b;
+};
+static volatile __u64 test2_result;
+SEC("fentry/bpf_fentry_test2")
+int test2(struct test2 *ctx)
+{
+ test2_result = ctx->a == 2 && ctx->b == 3;
+ return 0;
+}
+
+struct test3 {
+ ks8 a;
+ ks32 b;
+ ku64 c;
+};
+static volatile __u64 test3_result;
+SEC("fentry/bpf_fentry_test3")
+int test3(struct test3 *ctx)
+{
+ test3_result = ctx->a == 4 && ctx->b == 5 && ctx->c == 6;
+ return 0;
+}
+
+struct test4 {
+ void *a;
+ ks8 b;
+ ks32 c;
+ ku64 d;
+};
+static volatile __u64 test4_result;
+SEC("fentry/bpf_fentry_test4")
+int test4(struct test4 *ctx)
+{
+ test4_result = ctx->a == (void *)7 && ctx->b == 8 && ctx->c == 9 &&
+ ctx->d == 10;
+ return 0;
+}
+
+struct test5 {
+ ku64 a;
+ void *b;
+ ks16 c;
+ ks32 d;
+ ku64 e;
+};
+static volatile __u64 test5_result;
+SEC("fentry/bpf_fentry_test5")
+int test5(struct test5 *ctx)
+{
+ test5_result = ctx->a == 11 && ctx->b == (void *)12 && ctx->c == 13 &&
+ ctx->d == 14 && ctx->e == 15;
+ return 0;
+}
+
+struct test6 {
+ ku64 a;
+ void *b;
+ ks16 c;
+ ks32 d;
+ void *e;
+ ks64 f;
+};
+static volatile __u64 test6_result;
+SEC("fentry/bpf_fentry_test6")
+int test6(struct test6 *ctx)
+{
+ test6_result = ctx->a == 16 && ctx->b == (void *)17 && ctx->c == 18 &&
+ ctx->d == 19 && ctx->e == (void *)20 && ctx->f == 21;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c
new file mode 100644
index 000000000000..981f0474da5a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Facebook */
+#include <linux/bpf.h>
+#include "bpf_helpers.h"
+
+struct sk_buff {
+ unsigned int len;
+};
+
+struct args {
+ struct sk_buff *skb;
+ ks32 ret;
+};
+static volatile __u64 test_result;
+SEC("fexit/test_pkt_access")
+int test_main(struct args *ctx)
+{
+ struct sk_buff *skb = ctx->skb;
+ int len;
+
+ __builtin_preserve_access_index(({
+ len = skb->len;
+ }));
+ if (len != 74 || ctx->ret != 0)
+ return 0;
+ test_result = 1;
+ return 0;
+}
+
+struct args_subprog1 {
+ struct sk_buff *skb;
+ ks32 ret;
+};
+static volatile __u64 test_result_subprog1;
+SEC("fexit/test_pkt_access_subprog1")
+int test_subprog1(struct args_subprog1 *ctx)
+{
+ struct sk_buff *skb = ctx->skb;
+ int len;
+
+ __builtin_preserve_access_index(({
+ len = skb->len;
+ }));
+ if (len != 74 || ctx->ret != 148)
+ return 0;
+ test_result_subprog1 = 1;
+ return 0;
+}
+
+/* Though test_pkt_access_subprog2() is defined in C as:
+ * static __attribute__ ((noinline))
+ * int test_pkt_access_subprog2(int val, volatile struct __sk_buff *skb)
+ * {
+ * return skb->len * val;
+ * }
+ * llvm optimizations remove 'int val' argument and generate BPF assembly:
+ * r0 = *(u32 *)(r1 + 0)
+ * w0 <<= 1
+ * exit
+ * In such case the verifier falls back to conservative and
+ * tracing program can access arguments and return value as u64
+ * instead of accurate types.
+ */
+struct args_subprog2 {
+ ku64 args[5];
+ ku64 ret;
+};
+static volatile __u64 test_result_subprog2;
+SEC("fexit/test_pkt_access_subprog2")
+int test_subprog2(struct args_subprog2 *ctx)
+{
+ struct sk_buff *skb = (void *)ctx->args[0];
+ __u64 ret;
+ int len;
+
+ bpf_probe_read_kernel(&len, sizeof(len),
+ __builtin_preserve_access_index(&skb->len));
+
+ ret = ctx->ret;
+ /* bpf_prog_load() loads "test_pkt_access.o" with BPF_F_TEST_RND_HI32
+ * which randomizes upper 32 bits after BPF_ALU32 insns.
+ * Hence after 'w0 <<= 1' upper bits of $rax are random.
+ * That is expected and correct. Trim them.
+ */
+ ret = (__u32) ret;
+ if (len != 74 || ret != 148)
+ return 0;
+ test_result_subprog2 = 1;
+ return 0;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/fexit_test.c b/tools/testing/selftests/bpf/progs/fexit_test.c
new file mode 100644
index 000000000000..8b98b1a51784
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/fexit_test.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Facebook */
+#include <linux/bpf.h>
+#include "bpf_helpers.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct test1 {
+ ks32 a;
+ ks32 ret;
+};
+static volatile __u64 test1_result;
+SEC("fexit/bpf_fentry_test1")
+int test1(struct test1 *ctx)
+{
+ test1_result = ctx->a == 1 && ctx->ret == 2;
+ return 0;
+}
+
+struct test2 {
+ ks32 a;
+ ku64 b;
+ ks32 ret;
+};
+static volatile __u64 test2_result;
+SEC("fexit/bpf_fentry_test2")
+int test2(struct test2 *ctx)
+{
+ test2_result = ctx->a == 2 && ctx->b == 3 && ctx->ret == 5;
+ return 0;
+}
+
+struct test3 {
+ ks8 a;
+ ks32 b;
+ ku64 c;
+ ks32 ret;
+};
+static volatile __u64 test3_result;
+SEC("fexit/bpf_fentry_test3")
+int test3(struct test3 *ctx)
+{
+ test3_result = ctx->a == 4 && ctx->b == 5 && ctx->c == 6 &&
+ ctx->ret == 15;
+ return 0;
+}
+
+struct test4 {
+ void *a;
+ ks8 b;
+ ks32 c;
+ ku64 d;
+ ks32 ret;
+};
+static volatile __u64 test4_result;
+SEC("fexit/bpf_fentry_test4")
+int test4(struct test4 *ctx)
+{
+ test4_result = ctx->a == (void *)7 && ctx->b == 8 && ctx->c == 9 &&
+ ctx->d == 10 && ctx->ret == 34;
+ return 0;
+}
+
+struct test5 {
+ ku64 a;
+ void *b;
+ ks16 c;
+ ks32 d;
+ ku64 e;
+ ks32 ret;
+};
+static volatile __u64 test5_result;
+SEC("fexit/bpf_fentry_test5")
+int test5(struct test5 *ctx)
+{
+ test5_result = ctx->a == 11 && ctx->b == (void *)12 && ctx->c == 13 &&
+ ctx->d == 14 && ctx->e == 15 && ctx->ret == 65;
+ return 0;
+}
+
+struct test6 {
+ ku64 a;
+ void *b;
+ ks16 c;
+ ks32 d;
+ void *e;
+ ks64 f;
+ ks32 ret;
+};
+static volatile __u64 test6_result;
+SEC("fexit/bpf_fentry_test6")
+int test6(struct test6 *ctx)
+{
+ test6_result = ctx->a == 16 && ctx->b == (void *)17 && ctx->c == 18 &&
+ ctx->d == 19 && ctx->e == (void *)20 && ctx->f == 21 &&
+ ctx->ret == 111;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/kfree_skb.c b/tools/testing/selftests/bpf/progs/kfree_skb.c
index 489319ea1d6a..dcc9feac8338 100644
--- a/tools/testing/selftests/bpf/progs/kfree_skb.c
+++ b/tools/testing/selftests/bpf/progs/kfree_skb.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <linux/bpf.h>
+#include <stdbool.h>
#include "bpf_helpers.h"
#include "bpf_endian.h"
@@ -43,6 +44,7 @@ struct sk_buff {
refcount_t users;
unsigned char *data;
char __pkt_type_offset[0];
+ char cb[48];
};
/* copy arguments from
@@ -57,28 +59,41 @@ struct trace_kfree_skb {
void *location;
};
+struct meta {
+ int ifindex;
+ __u32 cb32_0;
+ __u8 cb8_0;
+};
+
SEC("tp_btf/kfree_skb")
int trace_kfree_skb(struct trace_kfree_skb *ctx)
{
struct sk_buff *skb = ctx->skb;
struct net_device *dev;
- int ifindex;
struct callback_head *ptr;
void *func;
int users;
unsigned char *data;
unsigned short pkt_data;
+ struct meta meta = {};
char pkt_type;
+ __u32 *cb32;
+ __u8 *cb8;
__builtin_preserve_access_index(({
users = skb->users.refs.counter;
data = skb->data;
dev = skb->dev;
- ifindex = dev->ifindex;
ptr = dev->ifalias->rcuhead.next;
func = ptr->func;
+ cb8 = (__u8 *)&skb->cb;
+ cb32 = (__u32 *)&skb->cb;
}));
+ meta.ifindex = _(dev->ifindex);
+ meta.cb8_0 = cb8[8];
+ meta.cb32_0 = cb32[2];
+
bpf_probe_read_kernel(&pkt_type, sizeof(pkt_type), _(&skb->__pkt_type_offset));
pkt_type &= 7;
@@ -90,14 +105,66 @@ int trace_kfree_skb(struct trace_kfree_skb *ctx)
_(skb->len), users, pkt_type);
bpf_printk("skb->queue_mapping %d\n", _(skb->queue_mapping));
bpf_printk("dev->ifindex %d data %llx pkt_data %x\n",
- ifindex, data, pkt_data);
+ meta.ifindex, data, pkt_data);
+ bpf_printk("cb8_0:%x cb32_0:%x\n", meta.cb8_0, meta.cb32_0);
- if (users != 1 || pkt_data != bpf_htons(0x86dd) || ifindex != 1)
+ if (users != 1 || pkt_data != bpf_htons(0x86dd) || meta.ifindex != 1)
/* raw tp ignores return value */
return 0;
/* send first 72 byte of the packet to user space */
bpf_skb_output(skb, &perf_buf_map, (72ull << 32) | BPF_F_CURRENT_CPU,
- &ifindex, sizeof(ifindex));
+ &meta, sizeof(meta));
+ return 0;
+}
+
+static volatile struct {
+ bool fentry_test_ok;
+ bool fexit_test_ok;
+} result;
+
+struct eth_type_trans_args {
+ struct sk_buff *skb;
+ struct net_device *dev;
+ unsigned short protocol; /* return value available to fexit progs */
+};
+
+SEC("fentry/eth_type_trans")
+int fentry_eth_type_trans(struct eth_type_trans_args *ctx)
+{
+ struct sk_buff *skb = ctx->skb;
+ struct net_device *dev = ctx->dev;
+ int len, ifindex;
+
+ __builtin_preserve_access_index(({
+ len = skb->len;
+ ifindex = dev->ifindex;
+ }));
+
+ /* fentry sees full packet including L2 header */
+ if (len != 74 || ifindex != 1)
+ return 0;
+ result.fentry_test_ok = true;
+ return 0;
+}
+
+SEC("fexit/eth_type_trans")
+int fexit_eth_type_trans(struct eth_type_trans_args *ctx)
+{
+ struct sk_buff *skb = ctx->skb;
+ struct net_device *dev = ctx->dev;
+ int len, ifindex;
+
+ __builtin_preserve_access_index(({
+ len = skb->len;
+ ifindex = dev->ifindex;
+ }));
+
+ /* fexit sees packet without L2 header that eth_type_trans should have
+ * consumed.
+ */
+ if (len != 60 || ctx->protocol != bpf_htons(0x86dd) || ifindex != 1)
+ return 0;
+ result.fexit_test_ok = true;
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_btf_haskv.c b/tools/testing/selftests/bpf/progs/test_btf_haskv.c
index 763c51447c19..62ad7e22105e 100644
--- a/tools/testing/selftests/bpf/progs/test_btf_haskv.c
+++ b/tools/testing/selftests/bpf/progs/test_btf_haskv.c
@@ -26,7 +26,7 @@ struct dummy_tracepoint_args {
};
__attribute__((noinline))
-static int test_long_fname_2(struct dummy_tracepoint_args *arg)
+int test_long_fname_2(struct dummy_tracepoint_args *arg)
{
struct ipv_counts *counts;
int key = 0;
@@ -44,7 +44,7 @@ static int test_long_fname_2(struct dummy_tracepoint_args *arg)
}
__attribute__((noinline))
-static int test_long_fname_1(struct dummy_tracepoint_args *arg)
+int test_long_fname_1(struct dummy_tracepoint_args *arg)
{
return test_long_fname_2(arg);
}
diff --git a/tools/testing/selftests/bpf/progs/test_btf_newkv.c b/tools/testing/selftests/bpf/progs/test_btf_newkv.c
index 96f9e8451029..fb8d91a1dbe0 100644
--- a/tools/testing/selftests/bpf/progs/test_btf_newkv.c
+++ b/tools/testing/selftests/bpf/progs/test_btf_newkv.c
@@ -34,7 +34,7 @@ struct dummy_tracepoint_args {
};
__attribute__((noinline))
-static int test_long_fname_2(struct dummy_tracepoint_args *arg)
+int test_long_fname_2(struct dummy_tracepoint_args *arg)
{
struct ipv_counts *counts;
int key = 0;
@@ -57,7 +57,7 @@ static int test_long_fname_2(struct dummy_tracepoint_args *arg)
}
__attribute__((noinline))
-static int test_long_fname_1(struct dummy_tracepoint_args *arg)
+int test_long_fname_1(struct dummy_tracepoint_args *arg)
{
return test_long_fname_2(arg);
}
diff --git a/tools/testing/selftests/bpf/progs/test_btf_nokv.c b/tools/testing/selftests/bpf/progs/test_btf_nokv.c
index 434188c37774..3f4422044759 100644
--- a/tools/testing/selftests/bpf/progs/test_btf_nokv.c
+++ b/tools/testing/selftests/bpf/progs/test_btf_nokv.c
@@ -23,7 +23,7 @@ struct dummy_tracepoint_args {
};
__attribute__((noinline))
-static int test_long_fname_2(struct dummy_tracepoint_args *arg)
+int test_long_fname_2(struct dummy_tracepoint_args *arg)
{
struct ipv_counts *counts;
int key = 0;
@@ -41,7 +41,7 @@ static int test_long_fname_2(struct dummy_tracepoint_args *arg)
}
__attribute__((noinline))
-static int test_long_fname_1(struct dummy_tracepoint_args *arg)
+int test_long_fname_1(struct dummy_tracepoint_args *arg)
{
return test_long_fname_2(arg);
}
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_direct.c b/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_direct.c
new file mode 100644
index 000000000000..738b34b72655
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_direct.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Facebook
+
+#include <linux/bpf.h>
+#include <stdint.h>
+#include "bpf_helpers.h"
+#include "bpf_core_read.h"
+
+char _license[] SEC("license") = "GPL";
+
+static volatile struct data {
+ char in[256];
+ char out[256];
+} data;
+
+struct core_reloc_bitfields {
+ /* unsigned bitfields */
+ uint8_t ub1: 1;
+ uint8_t ub2: 2;
+ uint32_t ub7: 7;
+ /* signed bitfields */
+ int8_t sb4: 4;
+ int32_t sb20: 20;
+ /* non-bitfields */
+ uint32_t u32;
+ int32_t s32;
+};
+
+/* bitfield read results, all as plain integers */
+struct core_reloc_bitfields_output {
+ int64_t ub1;
+ int64_t ub2;
+ int64_t ub7;
+ int64_t sb4;
+ int64_t sb20;
+ int64_t u32;
+ int64_t s32;
+};
+
+struct pt_regs;
+
+struct trace_sys_enter {
+ struct pt_regs *regs;
+ long id;
+};
+
+SEC("tp_btf/sys_enter")
+int test_core_bitfields_direct(void *ctx)
+{
+ struct core_reloc_bitfields *in = (void *)&data.in;
+ struct core_reloc_bitfields_output *out = (void *)&data.out;
+
+ out->ub1 = BPF_CORE_READ_BITFIELD(in, ub1);
+ out->ub2 = BPF_CORE_READ_BITFIELD(in, ub2);
+ out->ub7 = BPF_CORE_READ_BITFIELD(in, ub7);
+ out->sb4 = BPF_CORE_READ_BITFIELD(in, sb4);
+ out->sb20 = BPF_CORE_READ_BITFIELD(in, sb20);
+ out->u32 = BPF_CORE_READ_BITFIELD(in, u32);
+ out->s32 = BPF_CORE_READ_BITFIELD(in, s32);
+
+ return 0;
+}
+
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_probed.c b/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_probed.c
new file mode 100644
index 000000000000..e466e3ab7de4
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_probed.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Facebook
+
+#include <linux/bpf.h>
+#include <stdint.h>
+#include "bpf_helpers.h"
+#include "bpf_core_read.h"
+
+char _license[] SEC("license") = "GPL";
+
+static volatile struct data {
+ char in[256];
+ char out[256];
+} data;
+
+struct core_reloc_bitfields {
+ /* unsigned bitfields */
+ uint8_t ub1: 1;
+ uint8_t ub2: 2;
+ uint32_t ub7: 7;
+ /* signed bitfields */
+ int8_t sb4: 4;
+ int32_t sb20: 20;
+ /* non-bitfields */
+ uint32_t u32;
+ int32_t s32;
+};
+
+/* bitfield read results, all as plain integers */
+struct core_reloc_bitfields_output {
+ int64_t ub1;
+ int64_t ub2;
+ int64_t ub7;
+ int64_t sb4;
+ int64_t sb20;
+ int64_t u32;
+ int64_t s32;
+};
+
+SEC("raw_tracepoint/sys_enter")
+int test_core_bitfields(void *ctx)
+{
+ struct core_reloc_bitfields *in = (void *)&data.in;
+ struct core_reloc_bitfields_output *out = (void *)&data.out;
+ uint64_t res;
+
+ out->ub1 = BPF_CORE_READ_BITFIELD_PROBED(in, ub1);
+ out->ub2 = BPF_CORE_READ_BITFIELD_PROBED(in, ub2);
+ out->ub7 = BPF_CORE_READ_BITFIELD_PROBED(in, ub7);
+ out->sb4 = BPF_CORE_READ_BITFIELD_PROBED(in, sb4);
+ out->sb20 = BPF_CORE_READ_BITFIELD_PROBED(in, sb20);
+ out->u32 = BPF_CORE_READ_BITFIELD_PROBED(in, u32);
+ out->s32 = BPF_CORE_READ_BITFIELD_PROBED(in, s32);
+
+ return 0;
+}
+
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_size.c b/tools/testing/selftests/bpf/progs/test_core_reloc_size.c
new file mode 100644
index 000000000000..9a92998d9107
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_size.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Facebook
+
+#include <linux/bpf.h>
+#include <stdint.h>
+#include "bpf_helpers.h"
+#include "bpf_core_read.h"
+
+char _license[] SEC("license") = "GPL";
+
+static volatile struct data {
+ char in[256];
+ char out[256];
+} data;
+
+struct core_reloc_size_output {
+ int int_sz;
+ int struct_sz;
+ int union_sz;
+ int arr_sz;
+ int arr_elem_sz;
+ int ptr_sz;
+ int enum_sz;
+};
+
+struct core_reloc_size {
+ int int_field;
+ struct { int x; } struct_field;
+ union { int x; } union_field;
+ int arr_field[4];
+ void *ptr_field;
+ enum { VALUE = 123 } enum_field;
+};
+
+SEC("raw_tracepoint/sys_enter")
+int test_core_size(void *ctx)
+{
+ struct core_reloc_size *in = (void *)&data.in;
+ struct core_reloc_size_output *out = (void *)&data.out;
+
+ out->int_sz = bpf_core_field_size(in->int_field);
+ out->struct_sz = bpf_core_field_size(in->struct_field);
+ out->union_sz = bpf_core_field_size(in->union_field);
+ out->arr_sz = bpf_core_field_size(in->arr_field);
+ out->arr_elem_sz = bpf_core_field_size(in->arr_field[0]);
+ out->ptr_sz = bpf_core_field_size(in->ptr_field);
+ out->enum_sz = bpf_core_field_size(in->enum_field);
+
+ return 0;
+}
+
diff --git a/tools/testing/selftests/bpf/progs/test_mmap.c b/tools/testing/selftests/bpf/progs/test_mmap.c
new file mode 100644
index 000000000000..0d2ec9fbcf61
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_mmap.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Facebook
+
+#include <linux/bpf.h>
+#include <stdint.h>
+#include "bpf_helpers.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 512 * 4); /* at least 4 pages of data */
+ __uint(map_flags, BPF_F_MMAPABLE);
+ __type(key, __u32);
+ __type(value, __u64);
+} data_map SEC(".maps");
+
+static volatile __u64 in_val;
+static volatile __u64 out_val;
+
+SEC("raw_tracepoint/sys_enter")
+int test_mmap(void *ctx)
+{
+ int zero = 0, one = 1, two = 2, far = 1500;
+ __u64 val, *p;
+
+ out_val = in_val;
+
+ /* data_map[2] = in_val; */
+ bpf_map_update_elem(&data_map, &two, (const void *)&in_val, 0);
+
+ /* data_map[1] = data_map[0] * 2; */
+ p = bpf_map_lookup_elem(&data_map, &zero);
+ if (p) {
+ val = (*p) * 2;
+ bpf_map_update_elem(&data_map, &one, &val, 0);
+ }
+
+ /* data_map[far] = in_val * 3; */
+ val = in_val * 3;
+ bpf_map_update_elem(&data_map, &far, &val, 0);
+
+ return 0;
+}
+
diff --git a/tools/testing/selftests/bpf/progs/test_pinning.c b/tools/testing/selftests/bpf/progs/test_pinning.c
index f69a4a50d056..f20e7e00373f 100644
--- a/tools/testing/selftests/bpf/progs/test_pinning.c
+++ b/tools/testing/selftests/bpf/progs/test_pinning.c
@@ -21,7 +21,7 @@ struct {
} nopinmap SEC(".maps");
struct {
- __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u64);
diff --git a/tools/testing/selftests/bpf/progs/test_pkt_access.c b/tools/testing/selftests/bpf/progs/test_pkt_access.c
index 7cf42d14103f..3a7b4b607ed3 100644
--- a/tools/testing/selftests/bpf/progs/test_pkt_access.c
+++ b/tools/testing/selftests/bpf/progs/test_pkt_access.c
@@ -17,8 +17,38 @@
#define barrier() __asm__ __volatile__("": : :"memory")
int _version SEC("version") = 1;
-SEC("test1")
-int process(struct __sk_buff *skb)
+/* llvm will optimize both subprograms into exactly the same BPF assembly
+ *
+ * Disassembly of section .text:
+ *
+ * 0000000000000000 test_pkt_access_subprog1:
+ * ; return skb->len * 2;
+ * 0: 61 10 00 00 00 00 00 00 r0 = *(u32 *)(r1 + 0)
+ * 1: 64 00 00 00 01 00 00 00 w0 <<= 1
+ * 2: 95 00 00 00 00 00 00 00 exit
+ *
+ * 0000000000000018 test_pkt_access_subprog2:
+ * ; return skb->len * val;
+ * 3: 61 10 00 00 00 00 00 00 r0 = *(u32 *)(r1 + 0)
+ * 4: 64 00 00 00 01 00 00 00 w0 <<= 1
+ * 5: 95 00 00 00 00 00 00 00 exit
+ *
+ * Which makes it an interesting test for BTF-enabled verifier.
+ */
+static __attribute__ ((noinline))
+int test_pkt_access_subprog1(volatile struct __sk_buff *skb)
+{
+ return skb->len * 2;
+}
+
+static __attribute__ ((noinline))
+int test_pkt_access_subprog2(int val, volatile struct __sk_buff *skb)
+{
+ return skb->len * val;
+}
+
+SEC("classifier/test_pkt_access")
+int test_pkt_access(struct __sk_buff *skb)
{
void *data_end = (void *)(long)skb->data_end;
void *data = (void *)(long)skb->data;
@@ -48,6 +78,10 @@ int process(struct __sk_buff *skb)
tcp = (struct tcphdr *)((void *)(ip6h) + ihl_len);
}
+ if (test_pkt_access_subprog1(skb) != skb->len * 2)
+ return TC_ACT_SHOT;
+ if (test_pkt_access_subprog2(2, skb) != skb->len * 2)
+ return TC_ACT_SHOT;
if (tcp) {
if (((void *)(tcp) + 20) > data_end || proto != 6)
return TC_ACT_SHOT;
diff --git a/tools/testing/selftests/bpf/progs/test_seg6_loop.c b/tools/testing/selftests/bpf/progs/test_seg6_loop.c
index c4d104428643..69880c1e7700 100644
--- a/tools/testing/selftests/bpf/progs/test_seg6_loop.c
+++ b/tools/testing/selftests/bpf/progs/test_seg6_loop.c
@@ -132,8 +132,10 @@ static __always_inline int is_valid_tlv_boundary(struct __sk_buff *skb,
*pad_off = 0;
// we can only go as far as ~10 TLVs due to the BPF max stack size
+ // workaround: define induction variable "i" as "long" instead
+ // of "int" to prevent alu32 sub-register spilling.
#pragma clang loop unroll(disable)
- for (int i = 0; i < 100; i++) {
+ for (long i = 0; i < 100; i++) {
struct sr6_tlv_t tlv;
if (cur_off == *tlv_off)
diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
index 608a06871572..d22e438198cf 100644
--- a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
+++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
@@ -44,7 +44,10 @@ int sysctl_tcp_mem(struct bpf_sysctl *ctx)
unsigned long tcp_mem[TCP_MEM_LOOPS] = {};
char value[MAX_VALUE_STR_LEN];
unsigned char i, off = 0;
- int ret;
+ /* a workaround to prevent compiler from generating
+ * codes verifier cannot handle yet.
+ */
+ volatile int ret;
if (ctx->write)
return 0;
diff --git a/tools/testing/selftests/bpf/test_tc_tunnel.sh b/tools/testing/selftests/bpf/test_tc_tunnel.sh
index ff0d31d38061..7c76b841b17b 100755
--- a/tools/testing/selftests/bpf/test_tc_tunnel.sh
+++ b/tools/testing/selftests/bpf/test_tc_tunnel.sh
@@ -62,6 +62,10 @@ cleanup() {
if [[ -f "${infile}" ]]; then
rm "${infile}"
fi
+
+ if [[ -n $server_pid ]]; then
+ kill $server_pid 2> /dev/null
+ fi
}
server_listen() {
@@ -77,6 +81,7 @@ client_connect() {
verify_data() {
wait "${server_pid}"
+ server_pid=
# sha1sum returns two fields [sha1] [filepath]
# convert to bash array and access first elem
insum=($(sha1sum ${infile}))