summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-map.rst2
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-prog.rst15
-rw-r--r--tools/bpf/bpftool/Documentation/common_options.rst17
-rw-r--r--tools/bpf/bpftool/Documentation/substitutions.rst2
-rw-r--r--tools/bpf/bpftool/Makefile74
-rw-r--r--tools/bpf/bpftool/bash-completion/bpftool3
-rw-r--r--tools/bpf/bpftool/btf.c44
-rw-r--r--tools/bpf/bpftool/btf_dumper.c2
-rw-r--r--tools/bpf/bpftool/common.c22
-rw-r--r--tools/bpf/bpftool/gen.c29
-rw-r--r--tools/bpf/bpftool/iter.c12
-rw-r--r--tools/bpf/bpftool/jit_disasm.c261
-rw-r--r--tools/bpf/bpftool/link.c10
-rw-r--r--tools/bpf/bpftool/main.c116
-rw-r--r--tools/bpf/bpftool/main.h49
-rw-r--r--tools/bpf/bpftool/map.c33
-rw-r--r--tools/bpf/bpftool/net.c2
-rw-r--r--tools/bpf/bpftool/perf.c2
-rw-r--r--tools/bpf/bpftool/pids.c16
-rw-r--r--tools/bpf/bpftool/prog.c124
-rw-r--r--tools/bpf/bpftool/struct_ops.c22
-rw-r--r--tools/bpf/bpftool/xlated_dumper.c2
-rw-r--r--tools/include/uapi/linux/bpf.h516
-rw-r--r--tools/lib/bpf/bpf.c48
-rw-r--r--tools/lib/bpf/bpf.h16
-rw-r--r--tools/lib/bpf/btf.c272
-rw-r--r--tools/lib/bpf/btf_dump.c46
-rw-r--r--tools/lib/bpf/hashmap.c18
-rw-r--r--tools/lib/bpf/hashmap.h91
-rw-r--r--tools/lib/bpf/libbpf.c266
-rw-r--r--tools/lib/bpf/libbpf.map6
-rw-r--r--tools/lib/bpf/libbpf_probes.c1
-rw-r--r--tools/lib/bpf/ringbuf.c4
-rw-r--r--tools/lib/bpf/strset.c18
-rw-r--r--tools/lib/bpf/usdt.c55
-rw-r--r--tools/perf/tests/expr.c28
-rw-r--r--tools/perf/tests/pmu-events.c6
-rw-r--r--tools/perf/util/bpf-loader.c11
-rw-r--r--tools/perf/util/evsel.c2
-rw-r--r--tools/perf/util/expr.c36
-rw-r--r--tools/perf/util/hashmap.c18
-rw-r--r--tools/perf/util/hashmap.h91
-rw-r--r--tools/perf/util/metricgroup.c10
-rw-r--r--tools/perf/util/stat-shadow.c2
-rw-r--r--tools/perf/util/stat.c9
-rw-r--r--tools/testing/selftests/Makefile1
-rw-r--r--tools/testing/selftests/bpf/DENYLIST3
-rw-r--r--tools/testing/selftests/bpf/DENYLIST.aarch6483
-rw-r--r--tools/testing/selftests/bpf/DENYLIST.s390x45
-rw-r--r--tools/testing/selftests/bpf/Makefile29
-rw-r--r--tools/testing/selftests/bpf/README.rst53
-rw-r--r--tools/testing/selftests/bpf/bpf_experimental.h68
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c24
-rw-r--r--tools/testing/selftests/bpf/bpf_util.h19
-rw-r--r--tools/testing/selftests/bpf/cgroup_helpers.c22
-rw-r--r--tools/testing/selftests/bpf/cgroup_helpers.h1
-rw-r--r--tools/testing/selftests/bpf/config3
-rw-r--r--tools/testing/selftests/bpf/config.aarch64181
-rw-r--r--tools/testing/selftests/bpf/config.s390x3
-rw-r--r--tools/testing/selftests/bpf/config.x86_641
-rw-r--r--tools/testing/selftests/bpf/network_helpers.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/align.c38
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_iter.c41
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_nf.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf.c278
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c45
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_dump.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_iter.c76
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgrp_kfunc.c175
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c171
-rw-r--r--tools/testing/selftests/bpf/prog_tests/dynptr.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/empty_skb.c146
-rw-r--r--tools/testing/selftests/bpf/prog_tests/hashmap.c190
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kfunc_dynptr_param.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c32
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kprobe_multi_testmod_test.c89
-rw-r--r--tools/testing/selftests/bpf/prog_tests/libbpf_get_fd_by_id_opts.c87
-rw-r--r--tools/testing/selftests/bpf/prog_tests/libbpf_str.c8
-rw-r--r--tools/testing/selftests/bpf/prog_tests/linked_list.c740
-rw-r--r--tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c17
-rw-r--r--tools/testing/selftests/bpf/prog_tests/map_kptr.c3
-rw-r--r--tools/testing/selftests/bpf/prog_tests/module_attach.c7
-rw-r--r--tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c158
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ringbuf.c66
-rw-r--r--tools/testing/selftests/bpf/prog_tests/skeleton.c11
-rw-r--r--tools/testing/selftests/bpf/prog_tests/spin_lock.c142
-rw-r--r--tools/testing/selftests/bpf/prog_tests/spinlock.c45
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_kfunc.c163
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_local_storage.c164
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tracing_struct.c3
-rw-r--r--tools/testing/selftests/bpf/prog_tests/type_cast.c114
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c7
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c8
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c21
-rw-r--r--tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c9
-rw-r--r--tools/testing/selftests/bpf/progs/cgrp_kfunc_common.h72
-rw-r--r--tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c260
-rw-r--r--tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c170
-rw-r--r--tools/testing/selftests/bpf/progs/cgrp_ls_attach_cgroup.c101
-rw-r--r--tools/testing/selftests/bpf/progs/cgrp_ls_negative.c26
-rw-r--r--tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c70
-rw-r--r--tools/testing/selftests/bpf/progs/cgrp_ls_tp_btf.c88
-rw-r--r--tools/testing/selftests/bpf/progs/empty_skb.c37
-rw-r--r--tools/testing/selftests/bpf/progs/kprobe_multi.c50
-rw-r--r--tools/testing/selftests/bpf/progs/linked_list.c370
-rw-r--r--tools/testing/selftests/bpf/progs/linked_list.h56
-rw-r--r--tools/testing/selftests/bpf/progs/linked_list_fail.c581
-rw-r--r--tools/testing/selftests/bpf/progs/lsm_cgroup.c8
-rw-r--r--tools/testing/selftests/bpf/progs/rcu_read_lock.c290
-rw-r--r--tools/testing/selftests/bpf/progs/task_kfunc_common.h72
-rw-r--r--tools/testing/selftests/bpf/progs/task_kfunc_failure.c273
-rw-r--r--tools/testing/selftests/bpf/progs/task_kfunc_success.c222
-rw-r--r--tools/testing/selftests/bpf/progs/task_local_storage_exit_creds.c3
-rw-r--r--tools/testing/selftests/bpf/progs/task_ls_recursion.c43
-rw-r--r--tools/testing/selftests/bpf/progs/task_storage_nodeadlock.c47
-rw-r--r--tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c36
-rw-r--r--tools/testing/selftests/bpf/progs/test_misc_tcp_hdr_options.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_module_attach.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_ringbuf_map_key.c70
-rw-r--r--tools/testing/selftests/bpf/progs/test_skeleton.c17
-rw-r--r--tools/testing/selftests/bpf/progs/test_spin_lock.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_spin_lock_fail.c204
-rw-r--r--tools/testing/selftests/bpf/progs/type_cast.c83
-rw-r--r--tools/testing/selftests/bpf/task_local_storage_helpers.h4
-rwxr-xr-xtools/testing/selftests/bpf/test_bpftool_metadata.sh7
-rwxr-xr-xtools/testing/selftests/bpf/test_bpftool_synctypes.py14
-rwxr-xr-xtools/testing/selftests/bpf/test_flow_dissector.sh6
-rwxr-xr-xtools/testing/selftests/bpf/test_lwt_ip_encap.sh17
-rwxr-xr-xtools/testing/selftests/bpf/test_lwt_seg6local.sh9
-rw-r--r--tools/testing/selftests/bpf/test_progs.c38
-rwxr-xr-xtools/testing/selftests/bpf/test_tc_edt.sh3
-rwxr-xr-xtools/testing/selftests/bpf/test_tc_tunnel.sh5
-rwxr-xr-xtools/testing/selftests/bpf/test_tunnel.sh5
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c13
-rwxr-xr-xtools/testing/selftests/bpf/test_xdp_meta.sh9
-rwxr-xr-xtools/testing/selftests/bpf/test_xdp_vlan.sh8
-rw-r--r--tools/testing/selftests/bpf/trace_helpers.c20
-rw-r--r--tools/testing/selftests/bpf/trace_helpers.h2
-rw-r--r--tools/testing/selftests/bpf/verifier/calls.c2
-rw-r--r--tools/testing/selftests/bpf/verifier/jeq_infer_not_null.c174
-rw-r--r--tools/testing/selftests/bpf/verifier/jit.c24
-rw-r--r--tools/testing/selftests/bpf/verifier/ref_tracking.c4
-rw-r--r--tools/testing/selftests/bpf/verifier/ringbuf.c2
-rw-r--r--tools/testing/selftests/bpf/verifier/spill_fill.c2
-rw-r--r--tools/testing/selftests/bpf/veristat.c918
-rwxr-xr-xtools/testing/selftests/bpf/vmtest.sh6
-rw-r--r--tools/testing/selftests/bpf/xdp_synproxy.c5
-rw-r--r--tools/testing/selftests/bpf/xsk.c26
-rw-r--r--tools/testing/selftests/bpf/xskxceiver.c3
-rw-r--r--tools/testing/selftests/drivers/net/bonding/Makefile4
-rw-r--r--tools/testing/selftests/drivers/net/bonding/lag_lib.sh106
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/mode-1-recovery-updelay.sh45
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/mode-2-recovery-updelay.sh45
-rw-r--r--tools/testing/selftests/drivers/net/bonding/settings2
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_control.sh22
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_l2_drops.sh105
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh31
-rw-r--r--tools/testing/selftests/nci/nci_dev.c11
-rw-r--r--tools/testing/selftests/net/.gitignore2
-rw-r--r--tools/testing/selftests/net/Makefile4
-rw-r--r--tools/testing/selftests/net/bpf/Makefile45
-rw-r--r--tools/testing/selftests/net/csum.c986
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_igmp.sh3
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_locked_port.sh155
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_vlan_mcast.sh3
-rw-r--r--tools/testing/selftests/net/forwarding/devlink_lib.sh19
-rwxr-xr-xtools/testing/selftests/net/forwarding/lib.sh8
-rw-r--r--tools/testing/selftests/net/hsr/Makefile7
-rw-r--r--tools/testing/selftests/net/hsr/config4
-rwxr-xr-xtools/testing/selftests/net/hsr/hsr_ping.sh256
-rwxr-xr-xtools/testing/selftests/net/mptcp/diag.sh1
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_connect.c171
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_connect.sh27
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh118
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_sockopt.sh69
-rwxr-xr-xtools/testing/selftests/net/mptcp/simult_flows.sh8
-rwxr-xr-xtools/testing/selftests/net/mptcp/userspace_pm.sh298
-rw-r--r--tools/testing/selftests/net/sctp_hello.c137
-rwxr-xr-xtools/testing/selftests/net/sctp_vrf.sh178
-rw-r--r--tools/testing/selftests/net/so_incoming_cpu.c242
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc.py125
183 files changed, 12204 insertions, 1667 deletions
diff --git a/tools/bpf/bpftool/Documentation/bpftool-map.rst b/tools/bpf/bpftool/Documentation/bpftool-map.rst
index 7f3b67a8b48f..11250c4734fe 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-map.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-map.rst
@@ -55,7 +55,7 @@ MAP COMMANDS
| | **devmap** | **devmap_hash** | **sockmap** | **cpumap** | **xskmap** | **sockhash**
| | **cgroup_storage** | **reuseport_sockarray** | **percpu_cgroup_storage**
| | **queue** | **stack** | **sk_storage** | **struct_ops** | **ringbuf** | **inode_storage**
-| | **task_storage** | **bloom_filter** | **user_ringbuf** }
+| | **task_storage** | **bloom_filter** | **user_ringbuf** | **cgrp_storage** }
DESCRIPTION
===========
diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
index eb1b2a254eb1..14de72544995 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
@@ -31,7 +31,7 @@ PROG COMMANDS
| **bpftool** **prog dump xlated** *PROG* [{**file** *FILE* | **opcodes** | **visual** | **linum**}]
| **bpftool** **prog dump jited** *PROG* [{**file** *FILE* | **opcodes** | **linum**}]
| **bpftool** **prog pin** *PROG* *FILE*
-| **bpftool** **prog** { **load** | **loadall** } *OBJ* *PATH* [**type** *TYPE*] [**map** {**idx** *IDX* | **name** *NAME*} *MAP*] [**dev** *NAME*] [**pinmaps** *MAP_DIR*]
+| **bpftool** **prog** { **load** | **loadall** } *OBJ* *PATH* [**type** *TYPE*] [**map** {**idx** *IDX* | **name** *NAME*} *MAP*] [**dev** *NAME*] [**pinmaps** *MAP_DIR*] [**autoattach**]
| **bpftool** **prog attach** *PROG* *ATTACH_TYPE* [*MAP*]
| **bpftool** **prog detach** *PROG* *ATTACH_TYPE* [*MAP*]
| **bpftool** **prog tracelog**
@@ -131,7 +131,7 @@ DESCRIPTION
contain a dot character ('.'), which is reserved for future
extensions of *bpffs*.
- **bpftool prog { load | loadall }** *OBJ* *PATH* [**type** *TYPE*] [**map** {**idx** *IDX* | **name** *NAME*} *MAP*] [**dev** *NAME*] [**pinmaps** *MAP_DIR*]
+ **bpftool prog { load | loadall }** *OBJ* *PATH* [**type** *TYPE*] [**map** {**idx** *IDX* | **name** *NAME*} *MAP*] [**dev** *NAME*] [**pinmaps** *MAP_DIR*] [**autoattach**]
Load bpf program(s) from binary *OBJ* and pin as *PATH*.
**bpftool prog load** pins only the first program from the
*OBJ* as *PATH*. **bpftool prog loadall** pins all programs
@@ -150,6 +150,17 @@ DESCRIPTION
Optional **pinmaps** argument can be provided to pin all
maps under *MAP_DIR* directory.
+ If **autoattach** is specified program will be attached
+ before pin. In that case, only the link (representing the
+ program attached to its hook) is pinned, not the program as
+ such, so the path won't show in **bpftool prog show -f**,
+ only show in **bpftool link show -f**. Also, this only works
+ when bpftool (libbpf) is able to infer all necessary
+ information from the object file, in particular, it's not
+ supported for all program types. If a program does not
+ support autoattach, bpftool falls back to regular pinning
+ for that program instead.
+
Note: *PATH* must be located in *bpffs* mount. It must not
contain a dot character ('.'), which is reserved for future
extensions of *bpffs*.
diff --git a/tools/bpf/bpftool/Documentation/common_options.rst b/tools/bpf/bpftool/Documentation/common_options.rst
index 4107a586b68b..30df7a707f02 100644
--- a/tools/bpf/bpftool/Documentation/common_options.rst
+++ b/tools/bpf/bpftool/Documentation/common_options.rst
@@ -7,10 +7,10 @@
Print bpftool's version number (similar to **bpftool version**), the
number of the libbpf version in use, and optional features that were
included when bpftool was compiled. Optional features include linking
- against libbfd to provide the disassembler for JIT-ted programs
- (**bpftool prog dump jited**) and usage of BPF skeletons (some
- features like **bpftool prog profile** or showing pids associated to
- BPF objects may rely on it).
+ against LLVM or libbfd to provide the disassembler for JIT-ted
+ programs (**bpftool prog dump jited**) and usage of BPF skeletons
+ (some features like **bpftool prog profile** or showing pids
+ associated to BPF objects may rely on it).
-j, --json
Generate JSON output. For commands that cannot produce JSON, this
@@ -23,12 +23,3 @@
Print all logs available, even debug-level information. This includes
logs from libbpf as well as from the verifier, when attempting to
load programs.
-
--l, --legacy
- Use legacy libbpf mode which has more relaxed BPF program
- requirements. By default, bpftool has more strict requirements
- about section names, changes pinning logic and doesn't support
- some of the older non-BTF map declarations.
-
- See https://github.com/libbpf/libbpf/wiki/Libbpf:-the-road-to-v1.0
- for details.
diff --git a/tools/bpf/bpftool/Documentation/substitutions.rst b/tools/bpf/bpftool/Documentation/substitutions.rst
index ccf1ffa0686c..827e3ffb1766 100644
--- a/tools/bpf/bpftool/Documentation/substitutions.rst
+++ b/tools/bpf/bpftool/Documentation/substitutions.rst
@@ -1,3 +1,3 @@
.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-.. |COMMON_OPTIONS| replace:: { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } | { **-l** | **--legacy** }
+.. |COMMON_OPTIONS| replace:: { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** }
diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile
index 4a95c017ad4c..787b857d3fb5 100644
--- a/tools/bpf/bpftool/Makefile
+++ b/tools/bpf/bpftool/Makefile
@@ -93,11 +93,22 @@ INSTALL ?= install
RM ?= rm -f
FEATURE_USER = .bpftool
-FEATURE_TESTS = libbfd libbfd-liberty libbfd-liberty-z \
- disassembler-four-args disassembler-init-styled libcap \
- clang-bpf-co-re
-FEATURE_DISPLAY = libbfd libbfd-liberty libbfd-liberty-z \
- libcap clang-bpf-co-re
+
+FEATURE_TESTS := clang-bpf-co-re
+FEATURE_TESTS += llvm
+FEATURE_TESTS += libcap
+FEATURE_TESTS += libbfd
+FEATURE_TESTS += libbfd-liberty
+FEATURE_TESTS += libbfd-liberty-z
+FEATURE_TESTS += disassembler-four-args
+FEATURE_TESTS += disassembler-init-styled
+
+FEATURE_DISPLAY := clang-bpf-co-re
+FEATURE_DISPLAY += llvm
+FEATURE_DISPLAY += libcap
+FEATURE_DISPLAY += libbfd
+FEATURE_DISPLAY += libbfd-liberty
+FEATURE_DISPLAY += libbfd-liberty-z
check_feat := 1
NON_CHECK_FEAT_TARGETS := clean uninstall doc doc-clean doc-install doc-uninstall
@@ -115,13 +126,6 @@ include $(FEATURES_DUMP)
endif
endif
-ifeq ($(feature-disassembler-four-args), 1)
-CFLAGS += -DDISASM_FOUR_ARGS_SIGNATURE
-endif
-ifeq ($(feature-disassembler-init-styled), 1)
- CFLAGS += -DDISASM_INIT_STYLED
-endif
-
LIBS = $(LIBBPF) -lelf -lz
LIBS_BOOTSTRAP = $(LIBBPF_BOOTSTRAP) -lelf -lz
ifeq ($(feature-libcap), 1)
@@ -133,21 +137,41 @@ include $(wildcard $(OUTPUT)*.d)
all: $(OUTPUT)bpftool
-BFD_SRCS = jit_disasm.c
+SRCS := $(wildcard *.c)
-SRCS = $(filter-out $(BFD_SRCS),$(wildcard *.c))
-
-ifeq ($(feature-libbfd),1)
- LIBS += -lbfd -ldl -lopcodes
-else ifeq ($(feature-libbfd-liberty),1)
- LIBS += -lbfd -ldl -lopcodes -liberty
-else ifeq ($(feature-libbfd-liberty-z),1)
- LIBS += -lbfd -ldl -lopcodes -liberty -lz
+ifeq ($(feature-llvm),1)
+ # If LLVM is available, use it for JIT disassembly
+ CFLAGS += -DHAVE_LLVM_SUPPORT
+ LLVM_CONFIG_LIB_COMPONENTS := mcdisassembler all-targets
+ CFLAGS += $(shell $(LLVM_CONFIG) --cflags --libs $(LLVM_CONFIG_LIB_COMPONENTS))
+ LIBS += $(shell $(LLVM_CONFIG) --libs $(LLVM_CONFIG_LIB_COMPONENTS))
+ LDFLAGS += $(shell $(LLVM_CONFIG) --ldflags)
+else
+ # Fall back on libbfd
+ ifeq ($(feature-libbfd),1)
+ LIBS += -lbfd -ldl -lopcodes
+ else ifeq ($(feature-libbfd-liberty),1)
+ LIBS += -lbfd -ldl -lopcodes -liberty
+ else ifeq ($(feature-libbfd-liberty-z),1)
+ LIBS += -lbfd -ldl -lopcodes -liberty -lz
+ endif
+
+ # If one of the above feature combinations is set, we support libbfd
+ ifneq ($(filter -lbfd,$(LIBS)),)
+ CFLAGS += -DHAVE_LIBBFD_SUPPORT
+
+ # Libbfd interface changed over time, figure out what we need
+ ifeq ($(feature-disassembler-four-args), 1)
+ CFLAGS += -DDISASM_FOUR_ARGS_SIGNATURE
+ endif
+ ifeq ($(feature-disassembler-init-styled), 1)
+ CFLAGS += -DDISASM_INIT_STYLED
+ endif
+ endif
endif
-
-ifneq ($(filter -lbfd,$(LIBS)),)
-CFLAGS += -DHAVE_LIBBFD_SUPPORT
-SRCS += $(BFD_SRCS)
+ifeq ($(filter -DHAVE_LLVM_SUPPORT -DHAVE_LIBBFD_SUPPORT,$(CFLAGS)),)
+ # No support for JIT disassembly
+ SRCS := $(filter-out jit_disasm.c,$(SRCS))
endif
HOST_CFLAGS = $(subst -I$(LIBBPF_INCLUDE),-I$(LIBBPF_BOOTSTRAP_INCLUDE),\
diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool
index dc1641e3670e..35f26f7c1124 100644
--- a/tools/bpf/bpftool/bash-completion/bpftool
+++ b/tools/bpf/bpftool/bash-completion/bpftool
@@ -261,7 +261,7 @@ _bpftool()
# Deal with options
if [[ ${words[cword]} == -* ]]; then
local c='--version --json --pretty --bpffs --mapcompat --debug \
- --use-loader --base-btf --legacy'
+ --use-loader --base-btf'
COMPREPLY=( $( compgen -W "$c" -- "$cur" ) )
return 0
fi
@@ -505,6 +505,7 @@ _bpftool()
_bpftool_once_attr 'type'
_bpftool_once_attr 'dev'
_bpftool_once_attr 'pinmaps'
+ _bpftool_once_attr 'autoattach'
return 0
;;
esac
diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c
index 68a70ac03c80..352290ba7b29 100644
--- a/tools/bpf/bpftool/btf.c
+++ b/tools/bpf/bpftool/btf.c
@@ -467,9 +467,8 @@ static int dump_btf_c(const struct btf *btf,
int err = 0, i;
d = btf_dump__new(btf, btf_dump_printf, NULL, NULL);
- err = libbpf_get_error(d);
- if (err)
- return err;
+ if (!d)
+ return -errno;
printf("#ifndef __VMLINUX_H__\n");
printf("#define __VMLINUX_H__\n");
@@ -512,11 +511,9 @@ static struct btf *get_vmlinux_btf_from_sysfs(void)
struct btf *base;
base = btf__parse(sysfs_vmlinux, NULL);
- if (libbpf_get_error(base)) {
- p_err("failed to parse vmlinux BTF at '%s': %ld\n",
- sysfs_vmlinux, libbpf_get_error(base));
- base = NULL;
- }
+ if (!base)
+ p_err("failed to parse vmlinux BTF at '%s': %d\n",
+ sysfs_vmlinux, -errno);
return base;
}
@@ -559,7 +556,7 @@ static int do_dump(int argc, char **argv)
__u32 btf_id = -1;
const char *src;
int fd = -1;
- int err;
+ int err = 0;
if (!REQ_ARGS(2)) {
usage();
@@ -634,8 +631,8 @@ static int do_dump(int argc, char **argv)
base = get_vmlinux_btf_from_sysfs();
btf = btf__parse_split(*argv, base ?: base_btf);
- err = libbpf_get_error(btf);
if (!btf) {
+ err = -errno;
p_err("failed to load BTF from %s: %s",
*argv, strerror(errno));
goto done;
@@ -681,8 +678,8 @@ static int do_dump(int argc, char **argv)
}
btf = btf__load_from_kernel_by_id_split(btf_id, base_btf);
- err = libbpf_get_error(btf);
if (!btf) {
+ err = -errno;
p_err("get btf by id (%u): %s", btf_id, strerror(errno));
goto done;
}
@@ -815,8 +812,7 @@ build_btf_type_table(struct hashmap *tab, enum bpf_obj_type type,
if (!btf_id)
continue;
- err = hashmap__append(tab, u32_as_hash_field(btf_id),
- u32_as_hash_field(id));
+ err = hashmap__append(tab, btf_id, id);
if (err) {
p_err("failed to append entry to hashmap for BTF ID %u, object ID %u: %s",
btf_id, id, strerror(-err));
@@ -875,17 +871,13 @@ show_btf_plain(struct bpf_btf_info *info, int fd,
printf("size %uB", info->btf_size);
n = 0;
- hashmap__for_each_key_entry(btf_prog_table, entry,
- u32_as_hash_field(info->id)) {
- printf("%s%u", n++ == 0 ? " prog_ids " : ",",
- hash_field_as_u32(entry->value));
+ hashmap__for_each_key_entry(btf_prog_table, entry, info->id) {
+ printf("%s%lu", n++ == 0 ? " prog_ids " : ",", entry->value);
}
n = 0;
- hashmap__for_each_key_entry(btf_map_table, entry,
- u32_as_hash_field(info->id)) {
- printf("%s%u", n++ == 0 ? " map_ids " : ",",
- hash_field_as_u32(entry->value));
+ hashmap__for_each_key_entry(btf_map_table, entry, info->id) {
+ printf("%s%lu", n++ == 0 ? " map_ids " : ",", entry->value);
}
emit_obj_refs_plain(refs_table, info->id, "\n\tpids ");
@@ -907,17 +899,15 @@ show_btf_json(struct bpf_btf_info *info, int fd,
jsonw_name(json_wtr, "prog_ids");
jsonw_start_array(json_wtr); /* prog_ids */
- hashmap__for_each_key_entry(btf_prog_table, entry,
- u32_as_hash_field(info->id)) {
- jsonw_uint(json_wtr, hash_field_as_u32(entry->value));
+ hashmap__for_each_key_entry(btf_prog_table, entry, info->id) {
+ jsonw_uint(json_wtr, entry->value);
}
jsonw_end_array(json_wtr); /* prog_ids */
jsonw_name(json_wtr, "map_ids");
jsonw_start_array(json_wtr); /* map_ids */
- hashmap__for_each_key_entry(btf_map_table, entry,
- u32_as_hash_field(info->id)) {
- jsonw_uint(json_wtr, hash_field_as_u32(entry->value));
+ hashmap__for_each_key_entry(btf_map_table, entry, info->id) {
+ jsonw_uint(json_wtr, entry->value);
}
jsonw_end_array(json_wtr); /* map_ids */
diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c
index 19924b6ce796..eda71fdfe95a 100644
--- a/tools/bpf/bpftool/btf_dumper.c
+++ b/tools/bpf/bpftool/btf_dumper.c
@@ -75,7 +75,7 @@ static int dump_prog_id_as_func_ptr(const struct btf_dumper *d,
goto print;
prog_btf = btf__load_from_kernel_by_id(info.btf_id);
- if (libbpf_get_error(prog_btf))
+ if (!prog_btf)
goto print;
func_type = btf__type_by_id(prog_btf, finfo.type_id);
if (!func_type || !btf_is_func(func_type))
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index 0cdb4f711510..c90b756945e3 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
+#ifndef _GNU_SOURCE
#define _GNU_SOURCE
+#endif
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
@@ -495,7 +497,7 @@ static int do_build_table_cb(const char *fpath, const struct stat *sb,
goto out_close;
}
- err = hashmap__append(build_fn_table, u32_as_hash_field(pinned_info.id), path);
+ err = hashmap__append(build_fn_table, pinned_info.id, path);
if (err) {
p_err("failed to append entry to hashmap for ID %u, path '%s': %s",
pinned_info.id, path, strerror(errno));
@@ -546,7 +548,7 @@ void delete_pinned_obj_table(struct hashmap *map)
return;
hashmap__for_each_entry(map, entry, bkt)
- free(entry->value);
+ free(entry->pvalue);
hashmap__free(map);
}
@@ -628,12 +630,11 @@ static int read_sysfs_netdev_hex_int(char *devname, const char *entry_name)
}
const char *
-ifindex_to_bfd_params(__u32 ifindex, __u64 ns_dev, __u64 ns_ino,
- const char **opt)
+ifindex_to_arch(__u32 ifindex, __u64 ns_dev, __u64 ns_ino, const char **opt)
{
+ __maybe_unused int device_id;
char devname[IF_NAMESIZE];
int vendor_id;
- int device_id;
if (!ifindex_to_name_ns(ifindex, ns_dev, ns_ino, devname)) {
p_err("Can't get net device name for ifindex %d: %s", ifindex,
@@ -648,6 +649,7 @@ ifindex_to_bfd_params(__u32 ifindex, __u64 ns_dev, __u64 ns_ino,
}
switch (vendor_id) {
+#ifdef HAVE_LIBBFD_SUPPORT
case 0x19ee:
device_id = read_sysfs_netdev_hex_int(devname, "device");
if (device_id != 0x4000 &&
@@ -656,8 +658,10 @@ ifindex_to_bfd_params(__u32 ifindex, __u64 ns_dev, __u64 ns_ino,
p_info("Unknown NFP device ID, assuming it is NFP-6xxx arch");
*opt = "ctx4";
return "NFP-6xxx";
+#endif /* HAVE_LIBBFD_SUPPORT */
+ /* No NFP support in LLVM, we have no valid triple to return. */
default:
- p_err("Can't get bfd arch name for device vendor id 0x%04x",
+ p_err("Can't get arch name for device vendor id 0x%04x",
vendor_id);
return NULL;
}
@@ -1040,12 +1044,12 @@ int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len)
return fd;
}
-size_t hash_fn_for_key_as_id(const void *key, void *ctx)
+size_t hash_fn_for_key_as_id(long key, void *ctx)
{
- return (size_t)key;
+ return key;
}
-bool equal_fn_for_key_as_id(const void *k1, const void *k2, void *ctx)
+bool equal_fn_for_key_as_id(long k1, long k2, void *ctx)
{
return k1 == k2;
}
diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c
index cf8b4e525c88..2883660d6b67 100644
--- a/tools/bpf/bpftool/gen.c
+++ b/tools/bpf/bpftool/gen.c
@@ -252,9 +252,8 @@ static int codegen_datasecs(struct bpf_object *obj, const char *obj_name)
int err = 0;
d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL);
- err = libbpf_get_error(d);
- if (err)
- return err;
+ if (!d)
+ return -errno;
bpf_object__for_each_map(map, obj) {
/* only generate definitions for memory-mapped internal maps */
@@ -976,13 +975,12 @@ static int do_skeleton(int argc, char **argv)
/* log_level1 + log_level2 + stats, but not stable UAPI */
opts.kernel_log_level = 1 + 2 + 4;
obj = bpf_object__open_mem(obj_data, file_sz, &opts);
- err = libbpf_get_error(obj);
- if (err) {
+ if (!obj) {
char err_buf[256];
+ err = -errno;
libbpf_strerror(err, err_buf, sizeof(err_buf));
p_err("failed to open BPF object file: %s", err_buf);
- obj = NULL;
goto out;
}
@@ -1660,21 +1658,16 @@ struct btfgen_info {
struct btf *marked_btf; /* btf structure used to mark used types */
};
-static size_t btfgen_hash_fn(const void *key, void *ctx)
+static size_t btfgen_hash_fn(long key, void *ctx)
{
- return (size_t)key;
+ return key;
}
-static bool btfgen_equal_fn(const void *k1, const void *k2, void *ctx)
+static bool btfgen_equal_fn(long k1, long k2, void *ctx)
{
return k1 == k2;
}
-static void *u32_as_hash_key(__u32 x)
-{
- return (void *)(uintptr_t)x;
-}
-
static void btfgen_free_info(struct btfgen_info *info)
{
if (!info)
@@ -2086,18 +2079,18 @@ static int btfgen_record_obj(struct btfgen_info *info, const char *obj_path)
struct bpf_core_spec specs_scratch[3] = {};
struct bpf_core_relo_res targ_res = {};
struct bpf_core_cand_list *cands = NULL;
- const void *type_key = u32_as_hash_key(relo->type_id);
const char *sec_name = btf__name_by_offset(btf, sec->sec_name_off);
if (relo->kind != BPF_CORE_TYPE_ID_LOCAL &&
- !hashmap__find(cand_cache, type_key, (void **)&cands)) {
+ !hashmap__find(cand_cache, relo->type_id, &cands)) {
cands = btfgen_find_cands(btf, info->src_btf, relo->type_id);
if (!cands) {
err = -errno;
goto out;
}
- err = hashmap__set(cand_cache, type_key, cands, NULL, NULL);
+ err = hashmap__set(cand_cache, relo->type_id, cands,
+ NULL, NULL);
if (err)
goto out;
}
@@ -2120,7 +2113,7 @@ out:
if (!IS_ERR_OR_NULL(cand_cache)) {
hashmap__for_each_entry(cand_cache, entry, i) {
- bpf_core_free_cands(entry->value);
+ bpf_core_free_cands(entry->pvalue);
}
hashmap__free(cand_cache);
}
diff --git a/tools/bpf/bpftool/iter.c b/tools/bpf/bpftool/iter.c
index f88fdc820d23..9a1d2365a297 100644
--- a/tools/bpf/bpftool/iter.c
+++ b/tools/bpf/bpftool/iter.c
@@ -1,7 +1,10 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
// Copyright (C) 2020 Facebook
+#ifndef _GNU_SOURCE
#define _GNU_SOURCE
+#endif
+#include <errno.h>
#include <unistd.h>
#include <linux/err.h>
#include <bpf/libbpf.h>
@@ -46,8 +49,8 @@ static int do_pin(int argc, char **argv)
}
obj = bpf_object__open(objfile);
- err = libbpf_get_error(obj);
- if (err) {
+ if (!obj) {
+ err = -errno;
p_err("can't open objfile %s", objfile);
goto close_map_fd;
}
@@ -60,13 +63,14 @@ static int do_pin(int argc, char **argv)
prog = bpf_object__next_program(obj, NULL);
if (!prog) {
+ err = -errno;
p_err("can't find bpf program in objfile %s", objfile);
goto close_obj;
}
link = bpf_program__attach_iter(prog, &iter_opts);
- err = libbpf_get_error(link);
- if (err) {
+ if (!link) {
+ err = -errno;
p_err("attach_iter failed for program %s",
bpf_program__name(prog));
goto close_obj;
diff --git a/tools/bpf/bpftool/jit_disasm.c b/tools/bpf/bpftool/jit_disasm.c
index aaf99a0168c9..7b8d9ec89ebd 100644
--- a/tools/bpf/bpftool/jit_disasm.c
+++ b/tools/bpf/bpftool/jit_disasm.c
@@ -11,35 +11,151 @@
* Licensed under the GNU General Public License, version 2.0 (GPLv2)
*/
+#ifndef _GNU_SOURCE
#define _GNU_SOURCE
+#endif
#include <stdio.h>
#include <stdarg.h>
#include <stdint.h>
#include <stdlib.h>
-#include <assert.h>
#include <unistd.h>
#include <string.h>
-#include <bfd.h>
-#include <dis-asm.h>
#include <sys/stat.h>
#include <limits.h>
#include <bpf/libbpf.h>
+
+#ifdef HAVE_LLVM_SUPPORT
+#include <llvm-c/Core.h>
+#include <llvm-c/Disassembler.h>
+#include <llvm-c/Target.h>
+#include <llvm-c/TargetMachine.h>
+#endif
+
+#ifdef HAVE_LIBBFD_SUPPORT
+#include <bfd.h>
+#include <dis-asm.h>
#include <tools/dis-asm-compat.h>
+#endif
#include "json_writer.h"
#include "main.h"
-static void get_exec_path(char *tpath, size_t size)
+static int oper_count;
+
+#ifdef HAVE_LLVM_SUPPORT
+#define DISASM_SPACER
+
+typedef LLVMDisasmContextRef disasm_ctx_t;
+
+static int printf_json(char *s)
+{
+ s = strtok(s, " \t");
+ jsonw_string_field(json_wtr, "operation", s);
+
+ jsonw_name(json_wtr, "operands");
+ jsonw_start_array(json_wtr);
+ oper_count = 1;
+
+ while ((s = strtok(NULL, " \t,()")) != 0) {
+ jsonw_string(json_wtr, s);
+ oper_count++;
+ }
+ return 0;
+}
+
+/* This callback to set the ref_type is necessary to have the LLVM disassembler
+ * print PC-relative addresses instead of byte offsets for branch instruction
+ * targets.
+ */
+static const char *
+symbol_lookup_callback(__maybe_unused void *disasm_info,
+ __maybe_unused uint64_t ref_value,
+ uint64_t *ref_type, __maybe_unused uint64_t ref_PC,
+ __maybe_unused const char **ref_name)
+{
+ *ref_type = LLVMDisassembler_ReferenceType_InOut_None;
+ return NULL;
+}
+
+static int
+init_context(disasm_ctx_t *ctx, const char *arch,
+ __maybe_unused const char *disassembler_options,
+ __maybe_unused unsigned char *image, __maybe_unused ssize_t len)
+{
+ char *triple;
+
+ if (arch)
+ triple = LLVMNormalizeTargetTriple(arch);
+ else
+ triple = LLVMGetDefaultTargetTriple();
+ if (!triple) {
+ p_err("Failed to retrieve triple");
+ return -1;
+ }
+ *ctx = LLVMCreateDisasm(triple, NULL, 0, NULL, symbol_lookup_callback);
+ LLVMDisposeMessage(triple);
+
+ if (!*ctx) {
+ p_err("Failed to create disassembler");
+ return -1;
+ }
+
+ return 0;
+}
+
+static void destroy_context(disasm_ctx_t *ctx)
+{
+ LLVMDisposeMessage(*ctx);
+}
+
+static int
+disassemble_insn(disasm_ctx_t *ctx, unsigned char *image, ssize_t len, int pc)
+{
+ char buf[256];
+ int count;
+
+ count = LLVMDisasmInstruction(*ctx, image + pc, len - pc, pc,
+ buf, sizeof(buf));
+ if (json_output)
+ printf_json(buf);
+ else
+ printf("%s", buf);
+
+ return count;
+}
+
+int disasm_init(void)
+{
+ LLVMInitializeAllTargetInfos();
+ LLVMInitializeAllTargetMCs();
+ LLVMInitializeAllDisassemblers();
+ return 0;
+}
+#endif /* HAVE_LLVM_SUPPORT */
+
+#ifdef HAVE_LIBBFD_SUPPORT
+#define DISASM_SPACER "\t"
+
+typedef struct {
+ struct disassemble_info *info;
+ disassembler_ftype disassemble;
+ bfd *bfdf;
+} disasm_ctx_t;
+
+static int get_exec_path(char *tpath, size_t size)
{
const char *path = "/proc/self/exe";
ssize_t len;
len = readlink(path, tpath, size - 1);
- assert(len > 0);
+ if (len <= 0)
+ return -1;
+
tpath[len] = 0;
+
+ return 0;
}
-static int oper_count;
static int printf_json(void *out, const char *fmt, va_list ap)
{
char *s;
@@ -97,37 +213,44 @@ static int fprintf_json_styled(void *out,
return r;
}
-void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
- const char *arch, const char *disassembler_options,
- const struct btf *btf,
- const struct bpf_prog_linfo *prog_linfo,
- __u64 func_ksym, unsigned int func_idx,
- bool linum)
+static int init_context(disasm_ctx_t *ctx, const char *arch,
+ const char *disassembler_options,
+ unsigned char *image, ssize_t len)
{
- const struct bpf_line_info *linfo = NULL;
- disassembler_ftype disassemble;
- struct disassemble_info info;
- unsigned int nr_skip = 0;
- int count, i, pc = 0;
+ struct disassemble_info *info;
char tpath[PATH_MAX];
bfd *bfdf;
- if (!len)
- return;
-
memset(tpath, 0, sizeof(tpath));
- get_exec_path(tpath, sizeof(tpath));
+ if (get_exec_path(tpath, sizeof(tpath))) {
+ p_err("failed to create disassembler (get_exec_path)");
+ return -1;
+ }
- bfdf = bfd_openr(tpath, NULL);
- assert(bfdf);
- assert(bfd_check_format(bfdf, bfd_object));
+ ctx->bfdf = bfd_openr(tpath, NULL);
+ if (!ctx->bfdf) {
+ p_err("failed to create disassembler (bfd_openr)");
+ return -1;
+ }
+ if (!bfd_check_format(ctx->bfdf, bfd_object)) {
+ p_err("failed to create disassembler (bfd_check_format)");
+ goto err_close;
+ }
+ bfdf = ctx->bfdf;
+
+ ctx->info = malloc(sizeof(struct disassemble_info));
+ if (!ctx->info) {
+ p_err("mem alloc failed");
+ goto err_close;
+ }
+ info = ctx->info;
if (json_output)
- init_disassemble_info_compat(&info, stdout,
+ init_disassemble_info_compat(info, stdout,
(fprintf_ftype) fprintf_json,
fprintf_json_styled);
else
- init_disassemble_info_compat(&info, stdout,
+ init_disassemble_info_compat(info, stdout,
(fprintf_ftype) fprintf,
fprintf_styled);
@@ -139,28 +262,77 @@ void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
bfdf->arch_info = inf;
} else {
p_err("No libbfd support for %s", arch);
- return;
+ goto err_free;
}
}
- info.arch = bfd_get_arch(bfdf);
- info.mach = bfd_get_mach(bfdf);
+ info->arch = bfd_get_arch(bfdf);
+ info->mach = bfd_get_mach(bfdf);
if (disassembler_options)
- info.disassembler_options = disassembler_options;
- info.buffer = image;
- info.buffer_length = len;
+ info->disassembler_options = disassembler_options;
+ info->buffer = image;
+ info->buffer_length = len;
- disassemble_init_for_target(&info);
+ disassemble_init_for_target(info);
#ifdef DISASM_FOUR_ARGS_SIGNATURE
- disassemble = disassembler(info.arch,
- bfd_big_endian(bfdf),
- info.mach,
- bfdf);
+ ctx->disassemble = disassembler(info->arch,
+ bfd_big_endian(bfdf),
+ info->mach,
+ bfdf);
#else
- disassemble = disassembler(bfdf);
+ ctx->disassemble = disassembler(bfdf);
#endif
- assert(disassemble);
+ if (!ctx->disassemble) {
+ p_err("failed to create disassembler");
+ goto err_free;
+ }
+ return 0;
+
+err_free:
+ free(info);
+err_close:
+ bfd_close(ctx->bfdf);
+ return -1;
+}
+
+static void destroy_context(disasm_ctx_t *ctx)
+{
+ free(ctx->info);
+ bfd_close(ctx->bfdf);
+}
+
+static int
+disassemble_insn(disasm_ctx_t *ctx, __maybe_unused unsigned char *image,
+ __maybe_unused ssize_t len, int pc)
+{
+ return ctx->disassemble(pc, ctx->info);
+}
+
+int disasm_init(void)
+{
+ bfd_init();
+ return 0;
+}
+#endif /* HAVE_LIBBPFD_SUPPORT */
+
+int disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
+ const char *arch, const char *disassembler_options,
+ const struct btf *btf,
+ const struct bpf_prog_linfo *prog_linfo,
+ __u64 func_ksym, unsigned int func_idx,
+ bool linum)
+{
+ const struct bpf_line_info *linfo = NULL;
+ unsigned int nr_skip = 0;
+ int count, i, pc = 0;
+ disasm_ctx_t ctx;
+
+ if (!len)
+ return -1;
+
+ if (init_context(&ctx, arch, disassembler_options, image, len))
+ return -1;
if (json_output)
jsonw_start_array(json_wtr);
@@ -185,10 +357,11 @@ void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
if (linfo)
btf_dump_linfo_plain(btf, linfo, "; ",
linum);
- printf("%4x:\t", pc);
+ printf("%4x:" DISASM_SPACER, pc);
}
- count = disassemble(pc, &info);
+ count = disassemble_insn(&ctx, image, len, pc);
+
if (json_output) {
/* Operand array, was started in fprintf_json. Before
* that, make sure we have a _null_ value if no operand
@@ -224,11 +397,7 @@ void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
if (json_output)
jsonw_end_array(json_wtr);
- bfd_close(bfdf);
-}
+ destroy_context(&ctx);
-int disasm_init(void)
-{
- bfd_init();
return 0;
}
diff --git a/tools/bpf/bpftool/link.c b/tools/bpf/bpftool/link.c
index 2863639706dd..6f4cfe01cad4 100644
--- a/tools/bpf/bpftool/link.c
+++ b/tools/bpf/bpftool/link.c
@@ -204,9 +204,8 @@ static int show_link_close_json(int fd, struct bpf_link_info *info)
jsonw_name(json_wtr, "pinned");
jsonw_start_array(json_wtr);
- hashmap__for_each_key_entry(link_table, entry,
- u32_as_hash_field(info->id))
- jsonw_string(json_wtr, entry->value);
+ hashmap__for_each_key_entry(link_table, entry, info->id)
+ jsonw_string(json_wtr, entry->pvalue);
jsonw_end_array(json_wtr);
}
@@ -309,9 +308,8 @@ static int show_link_close_plain(int fd, struct bpf_link_info *info)
if (!hashmap__empty(link_table)) {
struct hashmap_entry *entry;
- hashmap__for_each_key_entry(link_table, entry,
- u32_as_hash_field(info->id))
- printf("\n\tpinned %s", (char *)entry->value);
+ hashmap__for_each_key_entry(link_table, entry, info->id)
+ printf("\n\tpinned %s", (char *)entry->pvalue);
}
emit_obj_refs_plain(refs_table, info->id, "\n\tpids ");
diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
index ccd7457f92bf..08d0ac543c67 100644
--- a/tools/bpf/bpftool/main.c
+++ b/tools/bpf/bpftool/main.c
@@ -31,7 +31,6 @@ bool block_mount;
bool verifier_logs;
bool relaxed_maps;
bool use_loader;
-bool legacy_libbpf;
struct btf *base_btf;
struct hashmap *refs_table;
@@ -71,6 +70,27 @@ static int do_help(int argc, char **argv)
return 0;
}
+static int do_batch(int argc, char **argv);
+static int do_version(int argc, char **argv);
+
+static const struct cmd commands[] = {
+ { "help", do_help },
+ { "batch", do_batch },
+ { "prog", do_prog },
+ { "map", do_map },
+ { "link", do_link },
+ { "cgroup", do_cgroup },
+ { "perf", do_perf },
+ { "net", do_net },
+ { "feature", do_feature },
+ { "btf", do_btf },
+ { "gen", do_gen },
+ { "struct_ops", do_struct_ops },
+ { "iter", do_iter },
+ { "version", do_version },
+ { 0 }
+};
+
#ifndef BPFTOOL_VERSION
/* bpftool's major and minor version numbers are aligned on libbpf's. There is
* an offset of 6 for the version number, because bpftool's version was higher
@@ -82,6 +102,15 @@ static int do_help(int argc, char **argv)
#define BPFTOOL_PATCH_VERSION 0
#endif
+static void
+print_feature(const char *feature, bool state, unsigned int *nb_features)
+{
+ if (state) {
+ printf("%s %s", *nb_features ? "," : "", feature);
+ *nb_features = *nb_features + 1;
+ }
+}
+
static int do_version(int argc, char **argv)
{
#ifdef HAVE_LIBBFD_SUPPORT
@@ -89,11 +118,28 @@ static int do_version(int argc, char **argv)
#else
const bool has_libbfd = false;
#endif
+#ifdef HAVE_LLVM_SUPPORT
+ const bool has_llvm = true;
+#else
+ const bool has_llvm = false;
+#endif
#ifdef BPFTOOL_WITHOUT_SKELETONS
const bool has_skeletons = false;
#else
const bool has_skeletons = true;
#endif
+ bool bootstrap = false;
+ int i;
+
+ for (i = 0; commands[i].cmd; i++) {
+ if (!strcmp(commands[i].cmd, "prog")) {
+ /* Assume we run a bootstrap version if "bpftool prog"
+ * is not available.
+ */
+ bootstrap = !commands[i].func;
+ break;
+ }
+ }
if (json_output) {
jsonw_start_object(json_wtr); /* root object */
@@ -112,8 +158,9 @@ static int do_version(int argc, char **argv)
jsonw_name(json_wtr, "features");
jsonw_start_object(json_wtr); /* features */
jsonw_bool_field(json_wtr, "libbfd", has_libbfd);
- jsonw_bool_field(json_wtr, "libbpf_strict", !legacy_libbpf);
+ jsonw_bool_field(json_wtr, "llvm", has_llvm);
jsonw_bool_field(json_wtr, "skeletons", has_skeletons);
+ jsonw_bool_field(json_wtr, "bootstrap", bootstrap);
jsonw_end_object(json_wtr); /* features */
jsonw_end_object(json_wtr); /* root object */
@@ -128,16 +175,10 @@ static int do_version(int argc, char **argv)
#endif
printf("using libbpf %s\n", libbpf_version_string());
printf("features:");
- if (has_libbfd) {
- printf(" libbfd");
- nb_features++;
- }
- if (!legacy_libbpf) {
- printf("%s libbpf_strict", nb_features++ ? "," : "");
- nb_features++;
- }
- if (has_skeletons)
- printf("%s skeletons", nb_features++ ? "," : "");
+ print_feature("libbfd", has_libbfd, &nb_features);
+ print_feature("llvm", has_llvm, &nb_features);
+ print_feature("skeletons", has_skeletons, &nb_features);
+ print_feature("bootstrap", bootstrap, &nb_features);
printf("\n");
}
return 0;
@@ -279,26 +320,6 @@ static int make_args(char *line, char *n_argv[], int maxargs, int cmd_nb)
return n_argc;
}
-static int do_batch(int argc, char **argv);
-
-static const struct cmd cmds[] = {
- { "help", do_help },
- { "batch", do_batch },
- { "prog", do_prog },
- { "map", do_map },
- { "link", do_link },
- { "cgroup", do_cgroup },
- { "perf", do_perf },
- { "net", do_net },
- { "feature", do_feature },
- { "btf", do_btf },
- { "gen", do_gen },
- { "struct_ops", do_struct_ops },
- { "iter", do_iter },
- { "version", do_version },
- { 0 }
-};
-
static int do_batch(int argc, char **argv)
{
char buf[BATCH_LINE_LEN_MAX], contline[BATCH_LINE_LEN_MAX];
@@ -313,12 +334,12 @@ static int do_batch(int argc, char **argv)
if (argc < 2) {
p_err("too few parameters for batch");
return -1;
- } else if (!is_prefix(*argv, "file")) {
- p_err("expected 'file', got: %s", *argv);
- return -1;
} else if (argc > 2) {
p_err("too many parameters for batch");
return -1;
+ } else if (!is_prefix(*argv, "file")) {
+ p_err("expected 'file', got: %s", *argv);
+ return -1;
}
NEXT_ARG();
@@ -386,7 +407,7 @@ static int do_batch(int argc, char **argv)
jsonw_name(json_wtr, "output");
}
- err = cmd_select(cmds, n_argc, n_argv, do_help);
+ err = cmd_select(commands, n_argc, n_argv, do_help);
if (json_output)
jsonw_end_object(json_wtr);
@@ -427,7 +448,6 @@ int main(int argc, char **argv)
{ "debug", no_argument, NULL, 'd' },
{ "use-loader", no_argument, NULL, 'L' },
{ "base-btf", required_argument, NULL, 'B' },
- { "legacy", no_argument, NULL, 'l' },
{ 0 }
};
bool version_requested = false;
@@ -450,7 +470,7 @@ int main(int argc, char **argv)
json_output = false;
show_pinned = false;
block_mount = false;
- bin_name = argv[0];
+ bin_name = "bpftool";
opterr = 0;
while ((opt = getopt_long(argc, argv, "VhpjfLmndB:l",
@@ -490,19 +510,15 @@ int main(int argc, char **argv)
break;
case 'B':
base_btf = btf__parse(optarg, NULL);
- if (libbpf_get_error(base_btf)) {
- p_err("failed to parse base BTF at '%s': %ld\n",
- optarg, libbpf_get_error(base_btf));
- base_btf = NULL;
+ if (!base_btf) {
+ p_err("failed to parse base BTF at '%s': %d\n",
+ optarg, -errno);
return -1;
}
break;
case 'L':
use_loader = true;
break;
- case 'l':
- legacy_libbpf = true;
- break;
default:
p_err("unrecognized option '%s'", argv[optind - 1]);
if (json_output)
@@ -512,14 +528,6 @@ int main(int argc, char **argv)
}
}
- if (!legacy_libbpf) {
- /* Allow legacy map definitions for skeleton generation.
- * It will still be rejected if users use LIBBPF_STRICT_ALL
- * mode for loading generated skeleton.
- */
- libbpf_set_strict_mode(LIBBPF_STRICT_ALL & ~LIBBPF_STRICT_MAP_DEFINITIONS);
- }
-
argc -= optind;
argv += optind;
if (argc < 0)
@@ -528,7 +536,7 @@ int main(int argc, char **argv)
if (version_requested)
return do_version(argc, argv);
- ret = cmd_select(cmds, argc, argv, do_help);
+ ret = cmd_select(commands, argc, argv, do_help);
if (json_output)
jsonw_destroy(&json_wtr);
diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h
index 5e5060c2ac04..a84224b6a604 100644
--- a/tools/bpf/bpftool/main.h
+++ b/tools/bpf/bpftool/main.h
@@ -57,7 +57,7 @@ static inline void *u64_to_ptr(__u64 ptr)
#define HELP_SPEC_PROGRAM \
"PROG := { id PROG_ID | pinned FILE | tag PROG_TAG | name PROG_NAME }"
#define HELP_SPEC_OPTIONS \
- "OPTIONS := { {-j|--json} [{-p|--pretty}] | {-d|--debug} | {-l|--legacy}"
+ "OPTIONS := { {-j|--json} [{-p|--pretty}] | {-d|--debug}"
#define HELP_SPEC_MAP \
"MAP := { id MAP_ID | pinned FILE | name MAP_NAME }"
#define HELP_SPEC_LINK \
@@ -82,7 +82,6 @@ extern bool block_mount;
extern bool verifier_logs;
extern bool relaxed_maps;
extern bool use_loader;
-extern bool legacy_libbpf;
extern struct btf *base_btf;
extern struct hashmap *refs_table;
@@ -172,27 +171,28 @@ int map_parse_fds(int *argc, char ***argv, int **fds);
int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len);
struct bpf_prog_linfo;
-#ifdef HAVE_LIBBFD_SUPPORT
-void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
- const char *arch, const char *disassembler_options,
- const struct btf *btf,
- const struct bpf_prog_linfo *prog_linfo,
- __u64 func_ksym, unsigned int func_idx,
- bool linum);
+#if defined(HAVE_LLVM_SUPPORT) || defined(HAVE_LIBBFD_SUPPORT)
+int disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
+ const char *arch, const char *disassembler_options,
+ const struct btf *btf,
+ const struct bpf_prog_linfo *prog_linfo,
+ __u64 func_ksym, unsigned int func_idx,
+ bool linum);
int disasm_init(void);
#else
static inline
-void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
- const char *arch, const char *disassembler_options,
- const struct btf *btf,
- const struct bpf_prog_linfo *prog_linfo,
- __u64 func_ksym, unsigned int func_idx,
- bool linum)
+int disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
+ const char *arch, const char *disassembler_options,
+ const struct btf *btf,
+ const struct bpf_prog_linfo *prog_linfo,
+ __u64 func_ksym, unsigned int func_idx,
+ bool linum)
{
+ return 0;
}
static inline int disasm_init(void)
{
- p_err("No libbfd support");
+ p_err("No JIT disassembly support");
return -1;
}
#endif
@@ -202,8 +202,7 @@ void print_hex_data_json(uint8_t *data, size_t len);
unsigned int get_page_size(void);
unsigned int get_possible_cpus(void);
const char *
-ifindex_to_bfd_params(__u32 ifindex, __u64 ns_dev, __u64 ns_ino,
- const char **opt);
+ifindex_to_arch(__u32 ifindex, __u64 ns_dev, __u64 ns_ino, const char **opt);
struct btf_dumper {
const struct btf *btf;
@@ -240,8 +239,8 @@ int do_filter_dump(struct tcmsg *ifinfo, struct nlattr **tb, const char *kind,
int print_all_levels(__maybe_unused enum libbpf_print_level level,
const char *format, va_list args);
-size_t hash_fn_for_key_as_id(const void *key, void *ctx);
-bool equal_fn_for_key_as_id(const void *k1, const void *k2, void *ctx);
+size_t hash_fn_for_key_as_id(long key, void *ctx);
+bool equal_fn_for_key_as_id(long k1, long k2, void *ctx);
/* bpf_attach_type_input_str - convert the provided attach type value into a
* textual representation that we accept for input purposes.
@@ -257,16 +256,6 @@ bool equal_fn_for_key_as_id(const void *k1, const void *k2, void *ctx);
*/
const char *bpf_attach_type_input_str(enum bpf_attach_type t);
-static inline void *u32_as_hash_field(__u32 x)
-{
- return (void *)(uintptr_t)x;
-}
-
-static inline __u32 hash_field_as_u32(const void *x)
-{
- return (__u32)(uintptr_t)x;
-}
-
static inline bool hashmap__empty(struct hashmap *map)
{
return map ? hashmap__size(map) == 0 : true;
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index 9a6ca9f31133..88911d3aa2d9 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
-#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <linux/err.h>
@@ -519,9 +518,8 @@ static int show_map_close_json(int fd, struct bpf_map_info *info)
jsonw_name(json_wtr, "pinned");
jsonw_start_array(json_wtr);
- hashmap__for_each_key_entry(map_table, entry,
- u32_as_hash_field(info->id))
- jsonw_string(json_wtr, entry->value);
+ hashmap__for_each_key_entry(map_table, entry, info->id)
+ jsonw_string(json_wtr, entry->pvalue);
jsonw_end_array(json_wtr);
}
@@ -596,9 +594,8 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info)
if (!hashmap__empty(map_table)) {
struct hashmap_entry *entry;
- hashmap__for_each_key_entry(map_table, entry,
- u32_as_hash_field(info->id))
- printf("\n\tpinned %s", (char *)entry->value);
+ hashmap__for_each_key_entry(map_table, entry, info->id)
+ printf("\n\tpinned %s", (char *)entry->pvalue);
}
if (frozen_str) {
@@ -789,18 +786,18 @@ static int get_map_kv_btf(const struct bpf_map_info *info, struct btf **btf)
if (info->btf_vmlinux_value_type_id) {
if (!btf_vmlinux) {
btf_vmlinux = libbpf_find_kernel_btf();
- err = libbpf_get_error(btf_vmlinux);
- if (err) {
+ if (!btf_vmlinux) {
p_err("failed to get kernel btf");
- return err;
+ return -errno;
}
}
*btf = btf_vmlinux;
} else if (info->btf_value_type_id) {
*btf = btf__load_from_kernel_by_id(info->btf_id);
- err = libbpf_get_error(*btf);
- if (err)
+ if (!*btf) {
+ err = -errno;
p_err("failed to get btf");
+ }
} else {
*btf = NULL;
}
@@ -810,16 +807,10 @@ static int get_map_kv_btf(const struct bpf_map_info *info, struct btf **btf)
static void free_map_kv_btf(struct btf *btf)
{
- if (!libbpf_get_error(btf) && btf != btf_vmlinux)
+ if (btf != btf_vmlinux)
btf__free(btf);
}
-static void free_btf_vmlinux(void)
-{
- if (!libbpf_get_error(btf_vmlinux))
- btf__free(btf_vmlinux);
-}
-
static int
map_dump(int fd, struct bpf_map_info *info, json_writer_t *wtr,
bool show_header)
@@ -956,7 +947,7 @@ exit_close:
close(fds[i]);
exit_free:
free(fds);
- free_btf_vmlinux();
+ btf__free(btf_vmlinux);
return err;
}
@@ -1459,7 +1450,7 @@ static int do_help(int argc, char **argv)
" devmap | devmap_hash | sockmap | cpumap | xskmap | sockhash |\n"
" cgroup_storage | reuseport_sockarray | percpu_cgroup_storage |\n"
" queue | stack | sk_storage | struct_ops | ringbuf | inode_storage |\n"
- " task_storage | bloom_filter | user_ringbuf }\n"
+ " task_storage | bloom_filter | user_ringbuf | cgrp_storage }\n"
" " HELP_SPEC_OPTIONS " |\n"
" {-f|--bpffs} | {-n|--nomount} }\n"
"",
diff --git a/tools/bpf/bpftool/net.c b/tools/bpf/bpftool/net.c
index 526a332c48e6..c40e44c938ae 100644
--- a/tools/bpf/bpftool/net.c
+++ b/tools/bpf/bpftool/net.c
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
// Copyright (C) 2018 Facebook
+#ifndef _GNU_SOURCE
#define _GNU_SOURCE
+#endif
#include <errno.h>
#include <fcntl.h>
#include <stdlib.h>
diff --git a/tools/bpf/bpftool/perf.c b/tools/bpf/bpftool/perf.c
index 226ec2c39052..91743445e4c7 100644
--- a/tools/bpf/bpftool/perf.c
+++ b/tools/bpf/bpftool/perf.c
@@ -2,7 +2,9 @@
// Copyright (C) 2018 Facebook
// Author: Yonghong Song <yhs@fb.com>
+#ifndef _GNU_SOURCE
#define _GNU_SOURCE
+#endif
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
diff --git a/tools/bpf/bpftool/pids.c b/tools/bpf/bpftool/pids.c
index bb6c969a114a..00c77edb6331 100644
--- a/tools/bpf/bpftool/pids.c
+++ b/tools/bpf/bpftool/pids.c
@@ -36,8 +36,8 @@ static void add_ref(struct hashmap *map, struct pid_iter_entry *e)
int err, i;
void *tmp;
- hashmap__for_each_key_entry(map, entry, u32_as_hash_field(e->id)) {
- refs = entry->value;
+ hashmap__for_each_key_entry(map, entry, e->id) {
+ refs = entry->pvalue;
for (i = 0; i < refs->ref_cnt; i++) {
if (refs->refs[i].pid == e->pid)
@@ -81,7 +81,7 @@ static void add_ref(struct hashmap *map, struct pid_iter_entry *e)
refs->has_bpf_cookie = e->has_bpf_cookie;
refs->bpf_cookie = e->bpf_cookie;
- err = hashmap__append(map, u32_as_hash_field(e->id), refs);
+ err = hashmap__append(map, e->id, refs);
if (err)
p_err("failed to append entry to hashmap for ID %u: %s",
e->id, strerror(errno));
@@ -183,7 +183,7 @@ void delete_obj_refs_table(struct hashmap *map)
return;
hashmap__for_each_entry(map, entry, bkt) {
- struct obj_refs *refs = entry->value;
+ struct obj_refs *refs = entry->pvalue;
free(refs->refs);
free(refs);
@@ -200,8 +200,8 @@ void emit_obj_refs_json(struct hashmap *map, __u32 id,
if (hashmap__empty(map))
return;
- hashmap__for_each_key_entry(map, entry, u32_as_hash_field(id)) {
- struct obj_refs *refs = entry->value;
+ hashmap__for_each_key_entry(map, entry, id) {
+ struct obj_refs *refs = entry->pvalue;
int i;
if (refs->ref_cnt == 0)
@@ -232,8 +232,8 @@ void emit_obj_refs_plain(struct hashmap *map, __u32 id, const char *prefix)
if (hashmap__empty(map))
return;
- hashmap__for_each_key_entry(map, entry, u32_as_hash_field(id)) {
- struct obj_refs *refs = entry->value;
+ hashmap__for_each_key_entry(map, entry, id) {
+ struct obj_refs *refs = entry->pvalue;
int i;
if (refs->ref_cnt == 0)
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index c81362a001ba..cfc9fdc1e863 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
+#ifndef _GNU_SOURCE
#define _GNU_SOURCE
+#endif
#include <errno.h>
#include <fcntl.h>
#include <signal.h>
@@ -320,7 +322,7 @@ static void show_prog_metadata(int fd, __u32 num_maps)
return;
btf = btf__load_from_kernel_by_id(map_info.btf_id);
- if (libbpf_get_error(btf))
+ if (!btf)
goto out_free;
t_datasec = btf__type_by_id(btf, map_info.btf_value_type_id);
@@ -484,9 +486,8 @@ static void print_prog_json(struct bpf_prog_info *info, int fd)
jsonw_name(json_wtr, "pinned");
jsonw_start_array(json_wtr);
- hashmap__for_each_key_entry(prog_table, entry,
- u32_as_hash_field(info->id))
- jsonw_string(json_wtr, entry->value);
+ hashmap__for_each_key_entry(prog_table, entry, info->id)
+ jsonw_string(json_wtr, entry->pvalue);
jsonw_end_array(json_wtr);
}
@@ -559,9 +560,8 @@ static void print_prog_plain(struct bpf_prog_info *info, int fd)
if (!hashmap__empty(prog_table)) {
struct hashmap_entry *entry;
- hashmap__for_each_key_entry(prog_table, entry,
- u32_as_hash_field(info->id))
- printf("\n\tpinned %s", (char *)entry->value);
+ hashmap__for_each_key_entry(prog_table, entry, info->id)
+ printf("\n\tpinned %s", (char *)entry->pvalue);
}
if (info->btf_id)
@@ -726,7 +726,7 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
if (info->btf_id) {
btf = btf__load_from_kernel_by_id(info->btf_id);
- if (libbpf_get_error(btf)) {
+ if (!btf) {
p_err("failed to get btf");
return -1;
}
@@ -762,10 +762,8 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
const char *name = NULL;
if (info->ifindex) {
- name = ifindex_to_bfd_params(info->ifindex,
- info->netns_dev,
- info->netns_ino,
- &disasm_opt);
+ name = ifindex_to_arch(info->ifindex, info->netns_dev,
+ info->netns_ino, &disasm_opt);
if (!name)
goto exit_free;
}
@@ -820,10 +818,11 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
printf("%s:\n", sym_name);
}
- disasm_print_insn(img, lens[i], opcodes,
- name, disasm_opt, btf,
- prog_linfo, ksyms[i], i,
- linum);
+ if (disasm_print_insn(img, lens[i], opcodes,
+ name, disasm_opt, btf,
+ prog_linfo, ksyms[i], i,
+ linum))
+ goto exit_free;
img += lens[i];
@@ -836,8 +835,10 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
if (json_output)
jsonw_end_array(json_wtr);
} else {
- disasm_print_insn(buf, member_len, opcodes, name,
- disasm_opt, btf, NULL, 0, 0, false);
+ if (disasm_print_insn(buf, member_len, opcodes, name,
+ disasm_opt, btf, NULL, 0, 0,
+ false))
+ goto exit_free;
}
} else if (visual) {
if (json_output)
@@ -1453,6 +1454,67 @@ get_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
return ret;
}
+static int
+auto_attach_program(struct bpf_program *prog, const char *path)
+{
+ struct bpf_link *link;
+ int err;
+
+ link = bpf_program__attach(prog);
+ if (!link) {
+ p_info("Program %s does not support autoattach, falling back to pinning",
+ bpf_program__name(prog));
+ return bpf_obj_pin(bpf_program__fd(prog), path);
+ }
+
+ err = bpf_link__pin(link, path);
+ bpf_link__destroy(link);
+ return err;
+}
+
+static int pathname_concat(char *buf, size_t buf_sz, const char *path, const char *name)
+{
+ int len;
+
+ len = snprintf(buf, buf_sz, "%s/%s", path, name);
+ if (len < 0)
+ return -EINVAL;
+ if ((size_t)len >= buf_sz)
+ return -ENAMETOOLONG;
+
+ return 0;
+}
+
+static int
+auto_attach_programs(struct bpf_object *obj, const char *path)
+{
+ struct bpf_program *prog;
+ char buf[PATH_MAX];
+ int err;
+
+ bpf_object__for_each_program(prog, obj) {
+ err = pathname_concat(buf, sizeof(buf), path, bpf_program__name(prog));
+ if (err)
+ goto err_unpin_programs;
+
+ err = auto_attach_program(prog, buf);
+ if (err)
+ goto err_unpin_programs;
+ }
+
+ return 0;
+
+err_unpin_programs:
+ while ((prog = bpf_object__prev_program(obj, prog))) {
+ if (pathname_concat(buf, sizeof(buf), path, bpf_program__name(prog)))
+ continue;
+
+ bpf_program__unpin(prog, buf);
+ }
+
+ return err;
+}
+
static int load_with_options(int argc, char **argv, bool first_prog_only)
{
enum bpf_prog_type common_prog_type = BPF_PROG_TYPE_UNSPEC;
@@ -1464,6 +1526,7 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
struct bpf_program *prog = NULL, *pos;
unsigned int old_map_fds = 0;
const char *pinmaps = NULL;
+ bool auto_attach = false;
struct bpf_object *obj;
struct bpf_map *map;
const char *pinfile;
@@ -1583,6 +1646,9 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
goto err_free_reuse_maps;
pinmaps = GET_ARG();
+ } else if (is_prefix(*argv, "autoattach")) {
+ auto_attach = true;
+ NEXT_ARG();
} else {
p_err("expected no more arguments, 'type', 'map' or 'dev', got: '%s'?",
*argv);
@@ -1597,7 +1663,7 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
open_opts.kernel_log_level = 1 + 2 + 4;
obj = bpf_object__open_file(file, &open_opts);
- if (libbpf_get_error(obj)) {
+ if (!obj) {
p_err("failed to open object file");
goto err_free_reuse_maps;
}
@@ -1692,14 +1758,20 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
goto err_close_obj;
}
- err = bpf_obj_pin(bpf_program__fd(prog), pinfile);
+ if (auto_attach)
+ err = auto_attach_program(prog, pinfile);
+ else
+ err = bpf_obj_pin(bpf_program__fd(prog), pinfile);
if (err) {
p_err("failed to pin program %s",
bpf_program__section_name(prog));
goto err_close_obj;
}
} else {
- err = bpf_object__pin_programs(obj, pinfile);
+ if (auto_attach)
+ err = auto_attach_programs(obj, pinfile);
+ else
+ err = bpf_object__pin_programs(obj, pinfile);
if (err) {
p_err("failed to pin all programs");
goto err_close_obj;
@@ -1730,11 +1802,6 @@ err_unpin:
else
bpf_object__unpin_programs(obj, pinfile);
err_close_obj:
- if (!legacy_libbpf) {
- p_info("Warning: bpftool is now running in libbpf strict mode and has more stringent requirements about BPF programs.\n"
- "If it used to work for this object file but now doesn't, see --legacy option for more details.\n");
- }
-
bpf_object__close(obj);
err_free_reuse_maps:
for (i = 0; i < old_map_fds; i++)
@@ -1815,7 +1882,7 @@ static int do_loader(int argc, char **argv)
open_opts.kernel_log_level = 1 + 2 + 4;
obj = bpf_object__open_file(file, &open_opts);
- if (libbpf_get_error(obj)) {
+ if (!obj) {
p_err("failed to open object file");
goto err_close_obj;
}
@@ -2132,7 +2199,7 @@ static char *profile_target_name(int tgt_fd)
}
btf = btf__load_from_kernel_by_id(info.btf_id);
- if (libbpf_get_error(btf)) {
+ if (!btf) {
p_err("failed to load btf for prog FD %d", tgt_fd);
goto out;
}
@@ -2338,6 +2405,7 @@ static int do_help(int argc, char **argv)
" [type TYPE] [dev NAME] \\\n"
" [map { idx IDX | name NAME } MAP]\\\n"
" [pinmaps MAP_DIR]\n"
+ " [autoattach]\n"
" %1$s %2$s attach PROG ATTACH_TYPE [MAP]\n"
" %1$s %2$s detach PROG ATTACH_TYPE [MAP]\n"
" %1$s %2$s run PROG \\\n"
diff --git a/tools/bpf/bpftool/struct_ops.c b/tools/bpf/bpftool/struct_ops.c
index e08a6ff2866c..903b80ff4e9a 100644
--- a/tools/bpf/bpftool/struct_ops.c
+++ b/tools/bpf/bpftool/struct_ops.c
@@ -32,7 +32,7 @@ static const struct btf *get_btf_vmlinux(void)
return btf_vmlinux;
btf_vmlinux = libbpf_find_kernel_btf();
- if (libbpf_get_error(btf_vmlinux))
+ if (!btf_vmlinux)
p_err("struct_ops requires kernel CONFIG_DEBUG_INFO_BTF=y");
return btf_vmlinux;
@@ -45,7 +45,7 @@ static const char *get_kern_struct_ops_name(const struct bpf_map_info *info)
const char *st_ops_name;
kern_btf = get_btf_vmlinux();
- if (libbpf_get_error(kern_btf))
+ if (!kern_btf)
return "<btf_vmlinux_not_found>";
t = btf__type_by_id(kern_btf, info->btf_vmlinux_value_type_id);
@@ -63,10 +63,8 @@ static __s32 get_map_info_type_id(void)
return map_info_type_id;
kern_btf = get_btf_vmlinux();
- if (libbpf_get_error(kern_btf)) {
- map_info_type_id = PTR_ERR(kern_btf);
- return map_info_type_id;
- }
+ if (!kern_btf)
+ return 0;
map_info_type_id = btf__find_by_name_kind(kern_btf, "bpf_map_info",
BTF_KIND_STRUCT);
@@ -415,7 +413,7 @@ static int do_dump(int argc, char **argv)
}
kern_btf = get_btf_vmlinux();
- if (libbpf_get_error(kern_btf))
+ if (!kern_btf)
return -1;
if (!json_output) {
@@ -498,7 +496,7 @@ static int do_register(int argc, char **argv)
open_opts.kernel_log_level = 1 + 2 + 4;
obj = bpf_object__open_file(file, &open_opts);
- if (libbpf_get_error(obj))
+ if (!obj)
return -1;
set_max_rlimit();
@@ -513,10 +511,9 @@ static int do_register(int argc, char **argv)
continue;
link = bpf_map__attach_struct_ops(map);
- if (libbpf_get_error(link)) {
+ if (!link) {
p_err("can't register struct_ops %s: %s",
- bpf_map__name(map),
- strerror(-PTR_ERR(link)));
+ bpf_map__name(map), strerror(errno));
nr_errs++;
continue;
}
@@ -593,8 +590,7 @@ int do_struct_ops(int argc, char **argv)
err = cmd_select(cmds, argc, argv, do_help);
- if (!libbpf_get_error(btf_vmlinux))
- btf__free(btf_vmlinux);
+ btf__free(btf_vmlinux);
return err;
}
diff --git a/tools/bpf/bpftool/xlated_dumper.c b/tools/bpf/bpftool/xlated_dumper.c
index 2d9cd6a7b3c8..6fe3134ae45d 100644
--- a/tools/bpf/bpftool/xlated_dumper.c
+++ b/tools/bpf/bpftool/xlated_dumper.c
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2018 Netronome Systems, Inc. */
+#ifndef _GNU_SOURCE
#define _GNU_SOURCE
+#endif
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 51b9aa640ad2..f89de51a45db 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -922,7 +922,14 @@ enum bpf_map_type {
BPF_MAP_TYPE_CPUMAP,
BPF_MAP_TYPE_XSKMAP,
BPF_MAP_TYPE_SOCKHASH,
- BPF_MAP_TYPE_CGROUP_STORAGE,
+ BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED,
+ /* BPF_MAP_TYPE_CGROUP_STORAGE is available to bpf programs attaching
+ * to a cgroup. The newer BPF_MAP_TYPE_CGRP_STORAGE is available to
+ * both cgroup-attached and other progs and supports all functionality
+ * provided by BPF_MAP_TYPE_CGROUP_STORAGE. So mark
+ * BPF_MAP_TYPE_CGROUP_STORAGE deprecated.
+ */
+ BPF_MAP_TYPE_CGROUP_STORAGE = BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED,
BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
BPF_MAP_TYPE_QUEUE,
@@ -935,6 +942,7 @@ enum bpf_map_type {
BPF_MAP_TYPE_TASK_STORAGE,
BPF_MAP_TYPE_BLOOM_FILTER,
BPF_MAP_TYPE_USER_RINGBUF,
+ BPF_MAP_TYPE_CGRP_STORAGE,
};
/* Note that tracing related programs such as
@@ -2576,14 +2584,19 @@ union bpf_attr {
* * **SOL_SOCKET**, which supports the following *optname*\ s:
* **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**,
* **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**,
- * **SO_BINDTODEVICE**, **SO_KEEPALIVE**.
+ * **SO_BINDTODEVICE**, **SO_KEEPALIVE**, **SO_REUSEADDR**,
+ * **SO_REUSEPORT**, **SO_BINDTOIFINDEX**, **SO_TXREHASH**.
* * **IPPROTO_TCP**, which supports the following *optname*\ s:
* **TCP_CONGESTION**, **TCP_BPF_IW**,
* **TCP_BPF_SNDCWND_CLAMP**, **TCP_SAVE_SYN**,
* **TCP_KEEPIDLE**, **TCP_KEEPINTVL**, **TCP_KEEPCNT**,
- * **TCP_SYNCNT**, **TCP_USER_TIMEOUT**, **TCP_NOTSENT_LOWAT**.
+ * **TCP_SYNCNT**, **TCP_USER_TIMEOUT**, **TCP_NOTSENT_LOWAT**,
+ * **TCP_NODELAY**, **TCP_MAXSEG**, **TCP_WINDOW_CLAMP**,
+ * **TCP_THIN_LINEAR_TIMEOUTS**, **TCP_BPF_DELACK_MAX**,
+ * **TCP_BPF_RTO_MIN**.
* * **IPPROTO_IP**, which supports *optname* **IP_TOS**.
- * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**.
+ * * **IPPROTO_IPV6**, which supports the following *optname*\ s:
+ * **IPV6_TCLASS**, **IPV6_AUTOFLOWLABEL**.
* Return
* 0 on success, or a negative error in case of failure.
*
@@ -2639,7 +2652,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * long bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags)
+ * long bpf_redirect_map(struct bpf_map *map, u64 key, u64 flags)
* Description
* Redirect the packet to the endpoint referenced by *map* at
* index *key*. Depending on its type, this *map* can contain
@@ -2800,12 +2813,10 @@ union bpf_attr {
* and **BPF_CGROUP_INET6_CONNECT**.
*
* This helper actually implements a subset of **getsockopt()**.
- * It supports the following *level*\ s:
- *
- * * **IPPROTO_TCP**, which supports *optname*
- * **TCP_CONGESTION**.
- * * **IPPROTO_IP**, which supports *optname* **IP_TOS**.
- * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**.
+ * It supports the same set of *optname*\ s that is supported by
+ * the **bpf_setsockopt**\ () helper. The exceptions are
+ * **TCP_BPF_*** is **bpf_setsockopt**\ () only and
+ * **TCP_SAVED_SYN** is **bpf_getsockopt**\ () only.
* Return
* 0 on success, or a negative error in case of failure.
*
@@ -5435,226 +5446,272 @@ union bpf_attr {
* **-E2BIG** if user-space has tried to publish a sample which is
* larger than the size of the ring buffer, or which cannot fit
* within a struct bpf_dynptr.
+ *
+ * void *bpf_cgrp_storage_get(struct bpf_map *map, struct cgroup *cgroup, void *value, u64 flags)
+ * Description
+ * Get a bpf_local_storage from the *cgroup*.
+ *
+ * Logically, it could be thought of as getting the value from
+ * a *map* with *cgroup* as the **key**. From this
+ * perspective, the usage is not much different from
+ * **bpf_map_lookup_elem**\ (*map*, **&**\ *cgroup*) except this
+ * helper enforces the key must be a cgroup struct and the map must also
+ * be a **BPF_MAP_TYPE_CGRP_STORAGE**.
+ *
+ * In reality, the local-storage value is embedded directly inside of the
+ * *cgroup* object itself, rather than being located in the
+ * **BPF_MAP_TYPE_CGRP_STORAGE** map. When the local-storage value is
+ * queried for some *map* on a *cgroup* object, the kernel will perform an
+ * O(n) iteration over all of the live local-storage values for that
+ * *cgroup* object until the local-storage value for the *map* is found.
+ *
+ * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be
+ * used such that a new bpf_local_storage will be
+ * created if one does not exist. *value* can be used
+ * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify
+ * the initial value of a bpf_local_storage. If *value* is
+ * **NULL**, the new bpf_local_storage will be zero initialized.
+ * Return
+ * A bpf_local_storage pointer is returned on success.
+ *
+ * **NULL** if not found or there was an error in adding
+ * a new bpf_local_storage.
+ *
+ * long bpf_cgrp_storage_delete(struct bpf_map *map, struct cgroup *cgroup)
+ * Description
+ * Delete a bpf_local_storage from a *cgroup*.
+ * Return
+ * 0 on success.
+ *
+ * **-ENOENT** if the bpf_local_storage cannot be found.
*/
-#define __BPF_FUNC_MAPPER(FN) \
- FN(unspec), \
- FN(map_lookup_elem), \
- FN(map_update_elem), \
- FN(map_delete_elem), \
- FN(probe_read), \
- FN(ktime_get_ns), \
- FN(trace_printk), \
- FN(get_prandom_u32), \
- FN(get_smp_processor_id), \
- FN(skb_store_bytes), \
- FN(l3_csum_replace), \
- FN(l4_csum_replace), \
- FN(tail_call), \
- FN(clone_redirect), \
- FN(get_current_pid_tgid), \
- FN(get_current_uid_gid), \
- FN(get_current_comm), \
- FN(get_cgroup_classid), \
- FN(skb_vlan_push), \
- FN(skb_vlan_pop), \
- FN(skb_get_tunnel_key), \
- FN(skb_set_tunnel_key), \
- FN(perf_event_read), \
- FN(redirect), \
- FN(get_route_realm), \
- FN(perf_event_output), \
- FN(skb_load_bytes), \
- FN(get_stackid), \
- FN(csum_diff), \
- FN(skb_get_tunnel_opt), \
- FN(skb_set_tunnel_opt), \
- FN(skb_change_proto), \
- FN(skb_change_type), \
- FN(skb_under_cgroup), \
- FN(get_hash_recalc), \
- FN(get_current_task), \
- FN(probe_write_user), \
- FN(current_task_under_cgroup), \
- FN(skb_change_tail), \
- FN(skb_pull_data), \
- FN(csum_update), \
- FN(set_hash_invalid), \
- FN(get_numa_node_id), \
- FN(skb_change_head), \
- FN(xdp_adjust_head), \
- FN(probe_read_str), \
- FN(get_socket_cookie), \
- FN(get_socket_uid), \
- FN(set_hash), \
- FN(setsockopt), \
- FN(skb_adjust_room), \
- FN(redirect_map), \
- FN(sk_redirect_map), \
- FN(sock_map_update), \
- FN(xdp_adjust_meta), \
- FN(perf_event_read_value), \
- FN(perf_prog_read_value), \
- FN(getsockopt), \
- FN(override_return), \
- FN(sock_ops_cb_flags_set), \
- FN(msg_redirect_map), \
- FN(msg_apply_bytes), \
- FN(msg_cork_bytes), \
- FN(msg_pull_data), \
- FN(bind), \
- FN(xdp_adjust_tail), \
- FN(skb_get_xfrm_state), \
- FN(get_stack), \
- FN(skb_load_bytes_relative), \
- FN(fib_lookup), \
- FN(sock_hash_update), \
- FN(msg_redirect_hash), \
- FN(sk_redirect_hash), \
- FN(lwt_push_encap), \
- FN(lwt_seg6_store_bytes), \
- FN(lwt_seg6_adjust_srh), \
- FN(lwt_seg6_action), \
- FN(rc_repeat), \
- FN(rc_keydown), \
- FN(skb_cgroup_id), \
- FN(get_current_cgroup_id), \
- FN(get_local_storage), \
- FN(sk_select_reuseport), \
- FN(skb_ancestor_cgroup_id), \
- FN(sk_lookup_tcp), \
- FN(sk_lookup_udp), \
- FN(sk_release), \
- FN(map_push_elem), \
- FN(map_pop_elem), \
- FN(map_peek_elem), \
- FN(msg_push_data), \
- FN(msg_pop_data), \
- FN(rc_pointer_rel), \
- FN(spin_lock), \
- FN(spin_unlock), \
- FN(sk_fullsock), \
- FN(tcp_sock), \
- FN(skb_ecn_set_ce), \
- FN(get_listener_sock), \
- FN(skc_lookup_tcp), \
- FN(tcp_check_syncookie), \
- FN(sysctl_get_name), \
- FN(sysctl_get_current_value), \
- FN(sysctl_get_new_value), \
- FN(sysctl_set_new_value), \
- FN(strtol), \
- FN(strtoul), \
- FN(sk_storage_get), \
- FN(sk_storage_delete), \
- FN(send_signal), \
- FN(tcp_gen_syncookie), \
- FN(skb_output), \
- FN(probe_read_user), \
- FN(probe_read_kernel), \
- FN(probe_read_user_str), \
- FN(probe_read_kernel_str), \
- FN(tcp_send_ack), \
- FN(send_signal_thread), \
- FN(jiffies64), \
- FN(read_branch_records), \
- FN(get_ns_current_pid_tgid), \
- FN(xdp_output), \
- FN(get_netns_cookie), \
- FN(get_current_ancestor_cgroup_id), \
- FN(sk_assign), \
- FN(ktime_get_boot_ns), \
- FN(seq_printf), \
- FN(seq_write), \
- FN(sk_cgroup_id), \
- FN(sk_ancestor_cgroup_id), \
- FN(ringbuf_output), \
- FN(ringbuf_reserve), \
- FN(ringbuf_submit), \
- FN(ringbuf_discard), \
- FN(ringbuf_query), \
- FN(csum_level), \
- FN(skc_to_tcp6_sock), \
- FN(skc_to_tcp_sock), \
- FN(skc_to_tcp_timewait_sock), \
- FN(skc_to_tcp_request_sock), \
- FN(skc_to_udp6_sock), \
- FN(get_task_stack), \
- FN(load_hdr_opt), \
- FN(store_hdr_opt), \
- FN(reserve_hdr_opt), \
- FN(inode_storage_get), \
- FN(inode_storage_delete), \
- FN(d_path), \
- FN(copy_from_user), \
- FN(snprintf_btf), \
- FN(seq_printf_btf), \
- FN(skb_cgroup_classid), \
- FN(redirect_neigh), \
- FN(per_cpu_ptr), \
- FN(this_cpu_ptr), \
- FN(redirect_peer), \
- FN(task_storage_get), \
- FN(task_storage_delete), \
- FN(get_current_task_btf), \
- FN(bprm_opts_set), \
- FN(ktime_get_coarse_ns), \
- FN(ima_inode_hash), \
- FN(sock_from_file), \
- FN(check_mtu), \
- FN(for_each_map_elem), \
- FN(snprintf), \
- FN(sys_bpf), \
- FN(btf_find_by_name_kind), \
- FN(sys_close), \
- FN(timer_init), \
- FN(timer_set_callback), \
- FN(timer_start), \
- FN(timer_cancel), \
- FN(get_func_ip), \
- FN(get_attach_cookie), \
- FN(task_pt_regs), \
- FN(get_branch_snapshot), \
- FN(trace_vprintk), \
- FN(skc_to_unix_sock), \
- FN(kallsyms_lookup_name), \
- FN(find_vma), \
- FN(loop), \
- FN(strncmp), \
- FN(get_func_arg), \
- FN(get_func_ret), \
- FN(get_func_arg_cnt), \
- FN(get_retval), \
- FN(set_retval), \
- FN(xdp_get_buff_len), \
- FN(xdp_load_bytes), \
- FN(xdp_store_bytes), \
- FN(copy_from_user_task), \
- FN(skb_set_tstamp), \
- FN(ima_file_hash), \
- FN(kptr_xchg), \
- FN(map_lookup_percpu_elem), \
- FN(skc_to_mptcp_sock), \
- FN(dynptr_from_mem), \
- FN(ringbuf_reserve_dynptr), \
- FN(ringbuf_submit_dynptr), \
- FN(ringbuf_discard_dynptr), \
- FN(dynptr_read), \
- FN(dynptr_write), \
- FN(dynptr_data), \
- FN(tcp_raw_gen_syncookie_ipv4), \
- FN(tcp_raw_gen_syncookie_ipv6), \
- FN(tcp_raw_check_syncookie_ipv4), \
- FN(tcp_raw_check_syncookie_ipv6), \
- FN(ktime_get_tai_ns), \
- FN(user_ringbuf_drain), \
+#define ___BPF_FUNC_MAPPER(FN, ctx...) \
+ FN(unspec, 0, ##ctx) \
+ FN(map_lookup_elem, 1, ##ctx) \
+ FN(map_update_elem, 2, ##ctx) \
+ FN(map_delete_elem, 3, ##ctx) \
+ FN(probe_read, 4, ##ctx) \
+ FN(ktime_get_ns, 5, ##ctx) \
+ FN(trace_printk, 6, ##ctx) \
+ FN(get_prandom_u32, 7, ##ctx) \
+ FN(get_smp_processor_id, 8, ##ctx) \
+ FN(skb_store_bytes, 9, ##ctx) \
+ FN(l3_csum_replace, 10, ##ctx) \
+ FN(l4_csum_replace, 11, ##ctx) \
+ FN(tail_call, 12, ##ctx) \
+ FN(clone_redirect, 13, ##ctx) \
+ FN(get_current_pid_tgid, 14, ##ctx) \
+ FN(get_current_uid_gid, 15, ##ctx) \
+ FN(get_current_comm, 16, ##ctx) \
+ FN(get_cgroup_classid, 17, ##ctx) \
+ FN(skb_vlan_push, 18, ##ctx) \
+ FN(skb_vlan_pop, 19, ##ctx) \
+ FN(skb_get_tunnel_key, 20, ##ctx) \
+ FN(skb_set_tunnel_key, 21, ##ctx) \
+ FN(perf_event_read, 22, ##ctx) \
+ FN(redirect, 23, ##ctx) \
+ FN(get_route_realm, 24, ##ctx) \
+ FN(perf_event_output, 25, ##ctx) \
+ FN(skb_load_bytes, 26, ##ctx) \
+ FN(get_stackid, 27, ##ctx) \
+ FN(csum_diff, 28, ##ctx) \
+ FN(skb_get_tunnel_opt, 29, ##ctx) \
+ FN(skb_set_tunnel_opt, 30, ##ctx) \
+ FN(skb_change_proto, 31, ##ctx) \
+ FN(skb_change_type, 32, ##ctx) \
+ FN(skb_under_cgroup, 33, ##ctx) \
+ FN(get_hash_recalc, 34, ##ctx) \
+ FN(get_current_task, 35, ##ctx) \
+ FN(probe_write_user, 36, ##ctx) \
+ FN(current_task_under_cgroup, 37, ##ctx) \
+ FN(skb_change_tail, 38, ##ctx) \
+ FN(skb_pull_data, 39, ##ctx) \
+ FN(csum_update, 40, ##ctx) \
+ FN(set_hash_invalid, 41, ##ctx) \
+ FN(get_numa_node_id, 42, ##ctx) \
+ FN(skb_change_head, 43, ##ctx) \
+ FN(xdp_adjust_head, 44, ##ctx) \
+ FN(probe_read_str, 45, ##ctx) \
+ FN(get_socket_cookie, 46, ##ctx) \
+ FN(get_socket_uid, 47, ##ctx) \
+ FN(set_hash, 48, ##ctx) \
+ FN(setsockopt, 49, ##ctx) \
+ FN(skb_adjust_room, 50, ##ctx) \
+ FN(redirect_map, 51, ##ctx) \
+ FN(sk_redirect_map, 52, ##ctx) \
+ FN(sock_map_update, 53, ##ctx) \
+ FN(xdp_adjust_meta, 54, ##ctx) \
+ FN(perf_event_read_value, 55, ##ctx) \
+ FN(perf_prog_read_value, 56, ##ctx) \
+ FN(getsockopt, 57, ##ctx) \
+ FN(override_return, 58, ##ctx) \
+ FN(sock_ops_cb_flags_set, 59, ##ctx) \
+ FN(msg_redirect_map, 60, ##ctx) \
+ FN(msg_apply_bytes, 61, ##ctx) \
+ FN(msg_cork_bytes, 62, ##ctx) \
+ FN(msg_pull_data, 63, ##ctx) \
+ FN(bind, 64, ##ctx) \
+ FN(xdp_adjust_tail, 65, ##ctx) \
+ FN(skb_get_xfrm_state, 66, ##ctx) \
+ FN(get_stack, 67, ##ctx) \
+ FN(skb_load_bytes_relative, 68, ##ctx) \
+ FN(fib_lookup, 69, ##ctx) \
+ FN(sock_hash_update, 70, ##ctx) \
+ FN(msg_redirect_hash, 71, ##ctx) \
+ FN(sk_redirect_hash, 72, ##ctx) \
+ FN(lwt_push_encap, 73, ##ctx) \
+ FN(lwt_seg6_store_bytes, 74, ##ctx) \
+ FN(lwt_seg6_adjust_srh, 75, ##ctx) \
+ FN(lwt_seg6_action, 76, ##ctx) \
+ FN(rc_repeat, 77, ##ctx) \
+ FN(rc_keydown, 78, ##ctx) \
+ FN(skb_cgroup_id, 79, ##ctx) \
+ FN(get_current_cgroup_id, 80, ##ctx) \
+ FN(get_local_storage, 81, ##ctx) \
+ FN(sk_select_reuseport, 82, ##ctx) \
+ FN(skb_ancestor_cgroup_id, 83, ##ctx) \
+ FN(sk_lookup_tcp, 84, ##ctx) \
+ FN(sk_lookup_udp, 85, ##ctx) \
+ FN(sk_release, 86, ##ctx) \
+ FN(map_push_elem, 87, ##ctx) \
+ FN(map_pop_elem, 88, ##ctx) \
+ FN(map_peek_elem, 89, ##ctx) \
+ FN(msg_push_data, 90, ##ctx) \
+ FN(msg_pop_data, 91, ##ctx) \
+ FN(rc_pointer_rel, 92, ##ctx) \
+ FN(spin_lock, 93, ##ctx) \
+ FN(spin_unlock, 94, ##ctx) \
+ FN(sk_fullsock, 95, ##ctx) \
+ FN(tcp_sock, 96, ##ctx) \
+ FN(skb_ecn_set_ce, 97, ##ctx) \
+ FN(get_listener_sock, 98, ##ctx) \
+ FN(skc_lookup_tcp, 99, ##ctx) \
+ FN(tcp_check_syncookie, 100, ##ctx) \
+ FN(sysctl_get_name, 101, ##ctx) \
+ FN(sysctl_get_current_value, 102, ##ctx) \
+ FN(sysctl_get_new_value, 103, ##ctx) \
+ FN(sysctl_set_new_value, 104, ##ctx) \
+ FN(strtol, 105, ##ctx) \
+ FN(strtoul, 106, ##ctx) \
+ FN(sk_storage_get, 107, ##ctx) \
+ FN(sk_storage_delete, 108, ##ctx) \
+ FN(send_signal, 109, ##ctx) \
+ FN(tcp_gen_syncookie, 110, ##ctx) \
+ FN(skb_output, 111, ##ctx) \
+ FN(probe_read_user, 112, ##ctx) \
+ FN(probe_read_kernel, 113, ##ctx) \
+ FN(probe_read_user_str, 114, ##ctx) \
+ FN(probe_read_kernel_str, 115, ##ctx) \
+ FN(tcp_send_ack, 116, ##ctx) \
+ FN(send_signal_thread, 117, ##ctx) \
+ FN(jiffies64, 118, ##ctx) \
+ FN(read_branch_records, 119, ##ctx) \
+ FN(get_ns_current_pid_tgid, 120, ##ctx) \
+ FN(xdp_output, 121, ##ctx) \
+ FN(get_netns_cookie, 122, ##ctx) \
+ FN(get_current_ancestor_cgroup_id, 123, ##ctx) \
+ FN(sk_assign, 124, ##ctx) \
+ FN(ktime_get_boot_ns, 125, ##ctx) \
+ FN(seq_printf, 126, ##ctx) \
+ FN(seq_write, 127, ##ctx) \
+ FN(sk_cgroup_id, 128, ##ctx) \
+ FN(sk_ancestor_cgroup_id, 129, ##ctx) \
+ FN(ringbuf_output, 130, ##ctx) \
+ FN(ringbuf_reserve, 131, ##ctx) \
+ FN(ringbuf_submit, 132, ##ctx) \
+ FN(ringbuf_discard, 133, ##ctx) \
+ FN(ringbuf_query, 134, ##ctx) \
+ FN(csum_level, 135, ##ctx) \
+ FN(skc_to_tcp6_sock, 136, ##ctx) \
+ FN(skc_to_tcp_sock, 137, ##ctx) \
+ FN(skc_to_tcp_timewait_sock, 138, ##ctx) \
+ FN(skc_to_tcp_request_sock, 139, ##ctx) \
+ FN(skc_to_udp6_sock, 140, ##ctx) \
+ FN(get_task_stack, 141, ##ctx) \
+ FN(load_hdr_opt, 142, ##ctx) \
+ FN(store_hdr_opt, 143, ##ctx) \
+ FN(reserve_hdr_opt, 144, ##ctx) \
+ FN(inode_storage_get, 145, ##ctx) \
+ FN(inode_storage_delete, 146, ##ctx) \
+ FN(d_path, 147, ##ctx) \
+ FN(copy_from_user, 148, ##ctx) \
+ FN(snprintf_btf, 149, ##ctx) \
+ FN(seq_printf_btf, 150, ##ctx) \
+ FN(skb_cgroup_classid, 151, ##ctx) \
+ FN(redirect_neigh, 152, ##ctx) \
+ FN(per_cpu_ptr, 153, ##ctx) \
+ FN(this_cpu_ptr, 154, ##ctx) \
+ FN(redirect_peer, 155, ##ctx) \
+ FN(task_storage_get, 156, ##ctx) \
+ FN(task_storage_delete, 157, ##ctx) \
+ FN(get_current_task_btf, 158, ##ctx) \
+ FN(bprm_opts_set, 159, ##ctx) \
+ FN(ktime_get_coarse_ns, 160, ##ctx) \
+ FN(ima_inode_hash, 161, ##ctx) \
+ FN(sock_from_file, 162, ##ctx) \
+ FN(check_mtu, 163, ##ctx) \
+ FN(for_each_map_elem, 164, ##ctx) \
+ FN(snprintf, 165, ##ctx) \
+ FN(sys_bpf, 166, ##ctx) \
+ FN(btf_find_by_name_kind, 167, ##ctx) \
+ FN(sys_close, 168, ##ctx) \
+ FN(timer_init, 169, ##ctx) \
+ FN(timer_set_callback, 170, ##ctx) \
+ FN(timer_start, 171, ##ctx) \
+ FN(timer_cancel, 172, ##ctx) \
+ FN(get_func_ip, 173, ##ctx) \
+ FN(get_attach_cookie, 174, ##ctx) \
+ FN(task_pt_regs, 175, ##ctx) \
+ FN(get_branch_snapshot, 176, ##ctx) \
+ FN(trace_vprintk, 177, ##ctx) \
+ FN(skc_to_unix_sock, 178, ##ctx) \
+ FN(kallsyms_lookup_name, 179, ##ctx) \
+ FN(find_vma, 180, ##ctx) \
+ FN(loop, 181, ##ctx) \
+ FN(strncmp, 182, ##ctx) \
+ FN(get_func_arg, 183, ##ctx) \
+ FN(get_func_ret, 184, ##ctx) \
+ FN(get_func_arg_cnt, 185, ##ctx) \
+ FN(get_retval, 186, ##ctx) \
+ FN(set_retval, 187, ##ctx) \
+ FN(xdp_get_buff_len, 188, ##ctx) \
+ FN(xdp_load_bytes, 189, ##ctx) \
+ FN(xdp_store_bytes, 190, ##ctx) \
+ FN(copy_from_user_task, 191, ##ctx) \
+ FN(skb_set_tstamp, 192, ##ctx) \
+ FN(ima_file_hash, 193, ##ctx) \
+ FN(kptr_xchg, 194, ##ctx) \
+ FN(map_lookup_percpu_elem, 195, ##ctx) \
+ FN(skc_to_mptcp_sock, 196, ##ctx) \
+ FN(dynptr_from_mem, 197, ##ctx) \
+ FN(ringbuf_reserve_dynptr, 198, ##ctx) \
+ FN(ringbuf_submit_dynptr, 199, ##ctx) \
+ FN(ringbuf_discard_dynptr, 200, ##ctx) \
+ FN(dynptr_read, 201, ##ctx) \
+ FN(dynptr_write, 202, ##ctx) \
+ FN(dynptr_data, 203, ##ctx) \
+ FN(tcp_raw_gen_syncookie_ipv4, 204, ##ctx) \
+ FN(tcp_raw_gen_syncookie_ipv6, 205, ##ctx) \
+ FN(tcp_raw_check_syncookie_ipv4, 206, ##ctx) \
+ FN(tcp_raw_check_syncookie_ipv6, 207, ##ctx) \
+ FN(ktime_get_tai_ns, 208, ##ctx) \
+ FN(user_ringbuf_drain, 209, ##ctx) \
+ FN(cgrp_storage_get, 210, ##ctx) \
+ FN(cgrp_storage_delete, 211, ##ctx) \
/* */
+/* backwards-compatibility macros for users of __BPF_FUNC_MAPPER that don't
+ * know or care about integer value that is now passed as second argument
+ */
+#define __BPF_FUNC_MAPPER_APPLY(name, value, FN) FN(name),
+#define __BPF_FUNC_MAPPER(FN) ___BPF_FUNC_MAPPER(__BPF_FUNC_MAPPER_APPLY, FN)
+
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
*/
-#define __BPF_ENUM_FN(x) BPF_FUNC_ ## x
+#define __BPF_ENUM_FN(x, y) BPF_FUNC_ ## x = y,
enum bpf_func_id {
- __BPF_FUNC_MAPPER(__BPF_ENUM_FN)
+ ___BPF_FUNC_MAPPER(__BPF_ENUM_FN)
__BPF_FUNC_MAX_ID,
};
#undef __BPF_ENUM_FN
@@ -6391,6 +6448,7 @@ struct bpf_sock_ops {
* the outgoing header has not
* been written yet.
*/
+ __u64 skb_hwtstamp;
};
/* Definitions for bpf_sock_ops_cb_flags */
@@ -6833,6 +6891,16 @@ struct bpf_dynptr {
__u64 :64;
} __attribute__((aligned(8)));
+struct bpf_list_head {
+ __u64 :64;
+ __u64 :64;
+} __attribute__((aligned(8)));
+
+struct bpf_list_node {
+ __u64 :64;
+ __u64 :64;
+} __attribute__((aligned(8)));
+
struct bpf_sysctl {
__u32 write; /* Sysctl is being read (= 0) or written (= 1).
* Allows 1,2,4-byte read, but no write.
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 1d49a0352836..9aff98f42a3d 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -935,58 +935,98 @@ int bpf_link_get_next_id(__u32 start_id, __u32 *next_id)
return bpf_obj_get_next_id(start_id, next_id, BPF_LINK_GET_NEXT_ID);
}
-int bpf_prog_get_fd_by_id(__u32 id)
+int bpf_prog_get_fd_by_id_opts(__u32 id,
+ const struct bpf_get_fd_by_id_opts *opts)
{
const size_t attr_sz = offsetofend(union bpf_attr, open_flags);
union bpf_attr attr;
int fd;
+ if (!OPTS_VALID(opts, bpf_get_fd_by_id_opts))
+ return libbpf_err(-EINVAL);
+
memset(&attr, 0, attr_sz);
attr.prog_id = id;
+ attr.open_flags = OPTS_GET(opts, open_flags, 0);
fd = sys_bpf_fd(BPF_PROG_GET_FD_BY_ID, &attr, attr_sz);
return libbpf_err_errno(fd);
}
-int bpf_map_get_fd_by_id(__u32 id)
+int bpf_prog_get_fd_by_id(__u32 id)
+{
+ return bpf_prog_get_fd_by_id_opts(id, NULL);
+}
+
+int bpf_map_get_fd_by_id_opts(__u32 id,
+ const struct bpf_get_fd_by_id_opts *opts)
{
const size_t attr_sz = offsetofend(union bpf_attr, open_flags);
union bpf_attr attr;
int fd;
+ if (!OPTS_VALID(opts, bpf_get_fd_by_id_opts))
+ return libbpf_err(-EINVAL);
+
memset(&attr, 0, attr_sz);
attr.map_id = id;
+ attr.open_flags = OPTS_GET(opts, open_flags, 0);
fd = sys_bpf_fd(BPF_MAP_GET_FD_BY_ID, &attr, attr_sz);
return libbpf_err_errno(fd);
}
-int bpf_btf_get_fd_by_id(__u32 id)
+int bpf_map_get_fd_by_id(__u32 id)
+{
+ return bpf_map_get_fd_by_id_opts(id, NULL);
+}
+
+int bpf_btf_get_fd_by_id_opts(__u32 id,
+ const struct bpf_get_fd_by_id_opts *opts)
{
const size_t attr_sz = offsetofend(union bpf_attr, open_flags);
union bpf_attr attr;
int fd;
+ if (!OPTS_VALID(opts, bpf_get_fd_by_id_opts))
+ return libbpf_err(-EINVAL);
+
memset(&attr, 0, attr_sz);
attr.btf_id = id;
+ attr.open_flags = OPTS_GET(opts, open_flags, 0);
fd = sys_bpf_fd(BPF_BTF_GET_FD_BY_ID, &attr, attr_sz);
return libbpf_err_errno(fd);
}
-int bpf_link_get_fd_by_id(__u32 id)
+int bpf_btf_get_fd_by_id(__u32 id)
+{
+ return bpf_btf_get_fd_by_id_opts(id, NULL);
+}
+
+int bpf_link_get_fd_by_id_opts(__u32 id,
+ const struct bpf_get_fd_by_id_opts *opts)
{
const size_t attr_sz = offsetofend(union bpf_attr, open_flags);
union bpf_attr attr;
int fd;
+ if (!OPTS_VALID(opts, bpf_get_fd_by_id_opts))
+ return libbpf_err(-EINVAL);
+
memset(&attr, 0, attr_sz);
attr.link_id = id;
+ attr.open_flags = OPTS_GET(opts, open_flags, 0);
fd = sys_bpf_fd(BPF_LINK_GET_FD_BY_ID, &attr, attr_sz);
return libbpf_err_errno(fd);
}
+int bpf_link_get_fd_by_id(__u32 id)
+{
+ return bpf_link_get_fd_by_id_opts(id, NULL);
+}
+
int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len)
{
const size_t attr_sz = offsetofend(union bpf_attr, info);
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index 9c50beabdd14..a112e0ed1b19 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -365,10 +365,26 @@ LIBBPF_API int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id);
LIBBPF_API int bpf_map_get_next_id(__u32 start_id, __u32 *next_id);
LIBBPF_API int bpf_btf_get_next_id(__u32 start_id, __u32 *next_id);
LIBBPF_API int bpf_link_get_next_id(__u32 start_id, __u32 *next_id);
+
+struct bpf_get_fd_by_id_opts {
+ size_t sz; /* size of this struct for forward/backward compatibility */
+ __u32 open_flags; /* permissions requested for the operation on fd */
+ size_t :0;
+};
+#define bpf_get_fd_by_id_opts__last_field open_flags
+
LIBBPF_API int bpf_prog_get_fd_by_id(__u32 id);
+LIBBPF_API int bpf_prog_get_fd_by_id_opts(__u32 id,
+ const struct bpf_get_fd_by_id_opts *opts);
LIBBPF_API int bpf_map_get_fd_by_id(__u32 id);
+LIBBPF_API int bpf_map_get_fd_by_id_opts(__u32 id,
+ const struct bpf_get_fd_by_id_opts *opts);
LIBBPF_API int bpf_btf_get_fd_by_id(__u32 id);
+LIBBPF_API int bpf_btf_get_fd_by_id_opts(__u32 id,
+ const struct bpf_get_fd_by_id_opts *opts);
LIBBPF_API int bpf_link_get_fd_by_id(__u32 id);
+LIBBPF_API int bpf_link_get_fd_by_id_opts(__u32 id,
+ const struct bpf_get_fd_by_id_opts *opts);
LIBBPF_API int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len);
struct bpf_prog_query_opts {
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index d88647da2c7f..71e165b09ed5 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -1559,15 +1559,15 @@ struct btf_pipe {
static int btf_rewrite_str(__u32 *str_off, void *ctx)
{
struct btf_pipe *p = ctx;
- void *mapped_off;
+ long mapped_off;
int off, err;
if (!*str_off) /* nothing to do for empty strings */
return 0;
if (p->str_off_map &&
- hashmap__find(p->str_off_map, (void *)(long)*str_off, &mapped_off)) {
- *str_off = (__u32)(long)mapped_off;
+ hashmap__find(p->str_off_map, *str_off, &mapped_off)) {
+ *str_off = mapped_off;
return 0;
}
@@ -1579,7 +1579,7 @@ static int btf_rewrite_str(__u32 *str_off, void *ctx)
* performing expensive string comparisons.
*/
if (p->str_off_map) {
- err = hashmap__append(p->str_off_map, (void *)(long)*str_off, (void *)(long)off);
+ err = hashmap__append(p->str_off_map, *str_off, off);
if (err)
return err;
}
@@ -1630,8 +1630,8 @@ static int btf_rewrite_type_ids(__u32 *type_id, void *ctx)
return 0;
}
-static size_t btf_dedup_identity_hash_fn(const void *key, void *ctx);
-static bool btf_dedup_equal_fn(const void *k1, const void *k2, void *ctx);
+static size_t btf_dedup_identity_hash_fn(long key, void *ctx);
+static bool btf_dedup_equal_fn(long k1, long k2, void *ctx);
int btf__add_btf(struct btf *btf, const struct btf *src_btf)
{
@@ -1724,7 +1724,8 @@ err_out:
memset(btf->strs_data + old_strs_len, 0, btf->hdr->str_len - old_strs_len);
/* and now restore original strings section size; types data size
- * wasn't modified, so doesn't need restoring, see big comment above */
+ * wasn't modified, so doesn't need restoring, see big comment above
+ */
btf->hdr->str_len = old_strs_len;
hashmap__free(p.str_off_map);
@@ -2329,7 +2330,7 @@ int btf__add_restrict(struct btf *btf, int ref_type_id)
*/
int btf__add_type_tag(struct btf *btf, const char *value, int ref_type_id)
{
- if (!value|| !value[0])
+ if (!value || !value[0])
return libbpf_err(-EINVAL);
return btf_add_ref_kind(btf, BTF_KIND_TYPE_TAG, value, ref_type_id);
@@ -2881,6 +2882,7 @@ static int btf_dedup_strings(struct btf_dedup *d);
static int btf_dedup_prim_types(struct btf_dedup *d);
static int btf_dedup_struct_types(struct btf_dedup *d);
static int btf_dedup_ref_types(struct btf_dedup *d);
+static int btf_dedup_resolve_fwds(struct btf_dedup *d);
static int btf_dedup_compact_types(struct btf_dedup *d);
static int btf_dedup_remap_types(struct btf_dedup *d);
@@ -2988,15 +2990,16 @@ static int btf_dedup_remap_types(struct btf_dedup *d);
* Algorithm summary
* =================
*
- * Algorithm completes its work in 6 separate passes:
+ * Algorithm completes its work in 7 separate passes:
*
* 1. Strings deduplication.
* 2. Primitive types deduplication (int, enum, fwd).
* 3. Struct/union types deduplication.
- * 4. Reference types deduplication (pointers, typedefs, arrays, funcs, func
+ * 4. Resolve unambiguous forward declarations.
+ * 5. Reference types deduplication (pointers, typedefs, arrays, funcs, func
* protos, and const/volatile/restrict modifiers).
- * 5. Types compaction.
- * 6. Types remapping.
+ * 6. Types compaction.
+ * 7. Types remapping.
*
* Algorithm determines canonical type descriptor, which is a single
* representative type for each truly unique type. This canonical type is the
@@ -3060,6 +3063,11 @@ int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts)
pr_debug("btf_dedup_struct_types failed:%d\n", err);
goto done;
}
+ err = btf_dedup_resolve_fwds(d);
+ if (err < 0) {
+ pr_debug("btf_dedup_resolve_fwds failed:%d\n", err);
+ goto done;
+ }
err = btf_dedup_ref_types(d);
if (err < 0) {
pr_debug("btf_dedup_ref_types failed:%d\n", err);
@@ -3126,12 +3134,11 @@ static long hash_combine(long h, long value)
}
#define for_each_dedup_cand(d, node, hash) \
- hashmap__for_each_key_entry(d->dedup_table, node, (void *)hash)
+ hashmap__for_each_key_entry(d->dedup_table, node, hash)
static int btf_dedup_table_add(struct btf_dedup *d, long hash, __u32 type_id)
{
- return hashmap__append(d->dedup_table,
- (void *)hash, (void *)(long)type_id);
+ return hashmap__append(d->dedup_table, hash, type_id);
}
static int btf_dedup_hypot_map_add(struct btf_dedup *d,
@@ -3178,17 +3185,17 @@ static void btf_dedup_free(struct btf_dedup *d)
free(d);
}
-static size_t btf_dedup_identity_hash_fn(const void *key, void *ctx)
+static size_t btf_dedup_identity_hash_fn(long key, void *ctx)
{
- return (size_t)key;
+ return key;
}
-static size_t btf_dedup_collision_hash_fn(const void *key, void *ctx)
+static size_t btf_dedup_collision_hash_fn(long key, void *ctx)
{
return 0;
}
-static bool btf_dedup_equal_fn(const void *k1, const void *k2, void *ctx)
+static bool btf_dedup_equal_fn(long k1, long k2, void *ctx)
{
return k1 == k2;
}
@@ -3404,23 +3411,17 @@ static long btf_hash_enum(struct btf_type *t)
{
long h;
- /* don't hash vlen and enum members to support enum fwd resolving */
+ /* don't hash vlen, enum members and size to support enum fwd resolving */
h = hash_combine(0, t->name_off);
- h = hash_combine(h, t->info & ~0xffff);
- h = hash_combine(h, t->size);
return h;
}
-/* Check structural equality of two ENUMs. */
-static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2)
+static bool btf_equal_enum_members(struct btf_type *t1, struct btf_type *t2)
{
const struct btf_enum *m1, *m2;
__u16 vlen;
int i;
- if (!btf_equal_common(t1, t2))
- return false;
-
vlen = btf_vlen(t1);
m1 = btf_enum(t1);
m2 = btf_enum(t2);
@@ -3433,15 +3434,12 @@ static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2)
return true;
}
-static bool btf_equal_enum64(struct btf_type *t1, struct btf_type *t2)
+static bool btf_equal_enum64_members(struct btf_type *t1, struct btf_type *t2)
{
const struct btf_enum64 *m1, *m2;
__u16 vlen;
int i;
- if (!btf_equal_common(t1, t2))
- return false;
-
vlen = btf_vlen(t1);
m1 = btf_enum64(t1);
m2 = btf_enum64(t2);
@@ -3455,6 +3453,19 @@ static bool btf_equal_enum64(struct btf_type *t1, struct btf_type *t2)
return true;
}
+/* Check structural equality of two ENUMs or ENUM64s. */
+static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2)
+{
+ if (!btf_equal_common(t1, t2))
+ return false;
+
+ /* t1 & t2 kinds are identical because of btf_equal_common */
+ if (btf_kind(t1) == BTF_KIND_ENUM)
+ return btf_equal_enum_members(t1, t2);
+ else
+ return btf_equal_enum64_members(t1, t2);
+}
+
static inline bool btf_is_enum_fwd(struct btf_type *t)
{
return btf_is_any_enum(t) && btf_vlen(t) == 0;
@@ -3464,21 +3475,14 @@ static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2)
{
if (!btf_is_enum_fwd(t1) && !btf_is_enum_fwd(t2))
return btf_equal_enum(t1, t2);
- /* ignore vlen when comparing */
- return t1->name_off == t2->name_off &&
- (t1->info & ~0xffff) == (t2->info & ~0xffff) &&
- t1->size == t2->size;
-}
-
-static bool btf_compat_enum64(struct btf_type *t1, struct btf_type *t2)
-{
- if (!btf_is_enum_fwd(t1) && !btf_is_enum_fwd(t2))
- return btf_equal_enum64(t1, t2);
-
- /* ignore vlen when comparing */
+ /* At this point either t1 or t2 or both are forward declarations, thus:
+ * - skip comparing vlen because it is zero for forward declarations;
+ * - skip comparing size to allow enum forward declarations
+ * to be compatible with enum64 full declarations;
+ * - skip comparing kind for the same reason.
+ */
return t1->name_off == t2->name_off &&
- (t1->info & ~0xffff) == (t2->info & ~0xffff) &&
- t1->size == t2->size;
+ btf_is_any_enum(t1) && btf_is_any_enum(t2);
}
/*
@@ -3753,7 +3757,7 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
case BTF_KIND_INT:
h = btf_hash_int_decl_tag(t);
for_each_dedup_cand(d, hash_entry, h) {
- cand_id = (__u32)(long)hash_entry->value;
+ cand_id = hash_entry->value;
cand = btf_type_by_id(d->btf, cand_id);
if (btf_equal_int_tag(t, cand)) {
new_id = cand_id;
@@ -3763,9 +3767,10 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
break;
case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
h = btf_hash_enum(t);
for_each_dedup_cand(d, hash_entry, h) {
- cand_id = (__u32)(long)hash_entry->value;
+ cand_id = hash_entry->value;
cand = btf_type_by_id(d->btf, cand_id);
if (btf_equal_enum(t, cand)) {
new_id = cand_id;
@@ -3783,32 +3788,11 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
}
break;
- case BTF_KIND_ENUM64:
- h = btf_hash_enum(t);
- for_each_dedup_cand(d, hash_entry, h) {
- cand_id = (__u32)(long)hash_entry->value;
- cand = btf_type_by_id(d->btf, cand_id);
- if (btf_equal_enum64(t, cand)) {
- new_id = cand_id;
- break;
- }
- if (btf_compat_enum64(t, cand)) {
- if (btf_is_enum_fwd(t)) {
- /* resolve fwd to full enum */
- new_id = cand_id;
- break;
- }
- /* resolve canonical enum fwd to full enum */
- d->map[cand_id] = type_id;
- }
- }
- break;
-
case BTF_KIND_FWD:
case BTF_KIND_FLOAT:
h = btf_hash_common(t);
for_each_dedup_cand(d, hash_entry, h) {
- cand_id = (__u32)(long)hash_entry->value;
+ cand_id = hash_entry->value;
cand = btf_type_by_id(d->btf, cand_id);
if (btf_equal_common(t, cand)) {
new_id = cand_id;
@@ -3887,14 +3871,14 @@ static inline __u16 btf_fwd_kind(struct btf_type *t)
}
/* Check if given two types are identical ARRAY definitions */
-static int btf_dedup_identical_arrays(struct btf_dedup *d, __u32 id1, __u32 id2)
+static bool btf_dedup_identical_arrays(struct btf_dedup *d, __u32 id1, __u32 id2)
{
struct btf_type *t1, *t2;
t1 = btf_type_by_id(d->btf, id1);
t2 = btf_type_by_id(d->btf, id2);
if (!btf_is_array(t1) || !btf_is_array(t2))
- return 0;
+ return false;
return btf_equal_array(t1, t2);
}
@@ -3918,7 +3902,9 @@ static bool btf_dedup_identical_structs(struct btf_dedup *d, __u32 id1, __u32 id
m1 = btf_members(t1);
m2 = btf_members(t2);
for (i = 0, n = btf_vlen(t1); i < n; i++, m1++, m2++) {
- if (m1->type != m2->type)
+ if (m1->type != m2->type &&
+ !btf_dedup_identical_arrays(d, m1->type, m2->type) &&
+ !btf_dedup_identical_structs(d, m1->type, m2->type))
return false;
}
return true;
@@ -4097,10 +4083,8 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
return btf_equal_int_tag(cand_type, canon_type);
case BTF_KIND_ENUM:
- return btf_compat_enum(cand_type, canon_type);
-
case BTF_KIND_ENUM64:
- return btf_compat_enum64(cand_type, canon_type);
+ return btf_compat_enum(cand_type, canon_type);
case BTF_KIND_FWD:
case BTF_KIND_FLOAT:
@@ -4311,7 +4295,7 @@ static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
h = btf_hash_struct(t);
for_each_dedup_cand(d, hash_entry, h) {
- __u32 cand_id = (__u32)(long)hash_entry->value;
+ __u32 cand_id = hash_entry->value;
int eq;
/*
@@ -4416,7 +4400,7 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
h = btf_hash_common(t);
for_each_dedup_cand(d, hash_entry, h) {
- cand_id = (__u32)(long)hash_entry->value;
+ cand_id = hash_entry->value;
cand = btf_type_by_id(d->btf, cand_id);
if (btf_equal_common(t, cand)) {
new_id = cand_id;
@@ -4433,7 +4417,7 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
h = btf_hash_int_decl_tag(t);
for_each_dedup_cand(d, hash_entry, h) {
- cand_id = (__u32)(long)hash_entry->value;
+ cand_id = hash_entry->value;
cand = btf_type_by_id(d->btf, cand_id);
if (btf_equal_int_tag(t, cand)) {
new_id = cand_id;
@@ -4457,7 +4441,7 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
h = btf_hash_array(t);
for_each_dedup_cand(d, hash_entry, h) {
- cand_id = (__u32)(long)hash_entry->value;
+ cand_id = hash_entry->value;
cand = btf_type_by_id(d->btf, cand_id);
if (btf_equal_array(t, cand)) {
new_id = cand_id;
@@ -4489,7 +4473,7 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
h = btf_hash_fnproto(t);
for_each_dedup_cand(d, hash_entry, h) {
- cand_id = (__u32)(long)hash_entry->value;
+ cand_id = hash_entry->value;
cand = btf_type_by_id(d->btf, cand_id);
if (btf_equal_fnproto(t, cand)) {
new_id = cand_id;
@@ -4526,6 +4510,134 @@ static int btf_dedup_ref_types(struct btf_dedup *d)
}
/*
+ * Collect a map from type names to type ids for all canonical structs
+ * and unions. If the same name is shared by several canonical types
+ * use a special value 0 to indicate this fact.
+ */
+static int btf_dedup_fill_unique_names_map(struct btf_dedup *d, struct hashmap *names_map)
+{
+ __u32 nr_types = btf__type_cnt(d->btf);
+ struct btf_type *t;
+ __u32 type_id;
+ __u16 kind;
+ int err;
+
+ /*
+ * Iterate over base and split module ids in order to get all
+ * available structs in the map.
+ */
+ for (type_id = 1; type_id < nr_types; ++type_id) {
+ t = btf_type_by_id(d->btf, type_id);
+ kind = btf_kind(t);
+
+ if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION)
+ continue;
+
+ /* Skip non-canonical types */
+ if (type_id != d->map[type_id])
+ continue;
+
+ err = hashmap__add(names_map, t->name_off, type_id);
+ if (err == -EEXIST)
+ err = hashmap__set(names_map, t->name_off, 0, NULL, NULL);
+
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int btf_dedup_resolve_fwd(struct btf_dedup *d, struct hashmap *names_map, __u32 type_id)
+{
+ struct btf_type *t = btf_type_by_id(d->btf, type_id);
+ enum btf_fwd_kind fwd_kind = btf_kflag(t);
+ __u16 cand_kind, kind = btf_kind(t);
+ struct btf_type *cand_t;
+ uintptr_t cand_id;
+
+ if (kind != BTF_KIND_FWD)
+ return 0;
+
+ /* Skip if this FWD already has a mapping */
+ if (type_id != d->map[type_id])
+ return 0;
+
+ if (!hashmap__find(names_map, t->name_off, &cand_id))
+ return 0;
+
+ /* Zero is a special value indicating that name is not unique */
+ if (!cand_id)
+ return 0;
+
+ cand_t = btf_type_by_id(d->btf, cand_id);
+ cand_kind = btf_kind(cand_t);
+ if ((cand_kind == BTF_KIND_STRUCT && fwd_kind != BTF_FWD_STRUCT) ||
+ (cand_kind == BTF_KIND_UNION && fwd_kind != BTF_FWD_UNION))
+ return 0;
+
+ d->map[type_id] = cand_id;
+
+ return 0;
+}
+
+/*
+ * Resolve unambiguous forward declarations.
+ *
+ * The lion's share of all FWD declarations is resolved during
+ * `btf_dedup_struct_types` phase when different type graphs are
+ * compared against each other. However, if in some compilation unit a
+ * FWD declaration is not a part of a type graph compared against
+ * another type graph that declaration's canonical type would not be
+ * changed. Example:
+ *
+ * CU #1:
+ *
+ * struct foo;
+ * struct foo *some_global;
+ *
+ * CU #2:
+ *
+ * struct foo { int u; };
+ * struct foo *another_global;
+ *
+ * After `btf_dedup_struct_types` the BTF looks as follows:
+ *
+ * [1] STRUCT 'foo' size=4 vlen=1 ...
+ * [2] INT 'int' size=4 ...
+ * [3] PTR '(anon)' type_id=1
+ * [4] FWD 'foo' fwd_kind=struct
+ * [5] PTR '(anon)' type_id=4
+ *
+ * This pass assumes that such FWD declarations should be mapped to
+ * structs or unions with identical name in case if the name is not
+ * ambiguous.
+ */
+static int btf_dedup_resolve_fwds(struct btf_dedup *d)
+{
+ int i, err;
+ struct hashmap *names_map;
+
+ names_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL);
+ if (IS_ERR(names_map))
+ return PTR_ERR(names_map);
+
+ err = btf_dedup_fill_unique_names_map(d, names_map);
+ if (err < 0)
+ goto exit;
+
+ for (i = 0; i < d->btf->nr_types; i++) {
+ err = btf_dedup_resolve_fwd(d, names_map, d->btf->start_id + i);
+ if (err < 0)
+ break;
+ }
+
+exit:
+ hashmap__free(names_map);
+ return err;
+}
+
+/*
* Compact types.
*
* After we established for each type its corresponding canonical representative
diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
index 4221f73a74d0..deb2bc9a0a7b 100644
--- a/tools/lib/bpf/btf_dump.c
+++ b/tools/lib/bpf/btf_dump.c
@@ -117,14 +117,14 @@ struct btf_dump {
struct btf_dump_data *typed_dump;
};
-static size_t str_hash_fn(const void *key, void *ctx)
+static size_t str_hash_fn(long key, void *ctx)
{
- return str_hash(key);
+ return str_hash((void *)key);
}
-static bool str_equal_fn(const void *a, const void *b, void *ctx)
+static bool str_equal_fn(long a, long b, void *ctx)
{
- return strcmp(a, b) == 0;
+ return strcmp((void *)a, (void *)b) == 0;
}
static const char *btf_name_of(const struct btf_dump *d, __u32 name_off)
@@ -219,6 +219,17 @@ static int btf_dump_resize(struct btf_dump *d)
return 0;
}
+static void btf_dump_free_names(struct hashmap *map)
+{
+ size_t bkt;
+ struct hashmap_entry *cur;
+
+ hashmap__for_each_entry(map, cur, bkt)
+ free((void *)cur->pkey);
+
+ hashmap__free(map);
+}
+
void btf_dump__free(struct btf_dump *d)
{
int i;
@@ -237,8 +248,8 @@ void btf_dump__free(struct btf_dump *d)
free(d->cached_names);
free(d->emit_queue);
free(d->decl_stack);
- hashmap__free(d->type_names);
- hashmap__free(d->ident_names);
+ btf_dump_free_names(d->type_names);
+ btf_dump_free_names(d->ident_names);
free(d);
}
@@ -944,7 +955,11 @@ static void btf_dump_emit_struct_def(struct btf_dump *d,
lvl + 1);
}
- if (vlen)
+ /*
+ * Keep `struct empty {}` on a single line,
+ * only print newline when there are regular or padding fields.
+ */
+ if (vlen || t->size)
btf_dump_printf(d, "\n");
btf_dump_printf(d, "%s}", pfx(lvl));
if (packed)
@@ -1520,11 +1535,22 @@ static void btf_dump_emit_type_cast(struct btf_dump *d, __u32 id,
static size_t btf_dump_name_dups(struct btf_dump *d, struct hashmap *name_map,
const char *orig_name)
{
+ char *old_name, *new_name;
size_t dup_cnt = 0;
+ int err;
+
+ new_name = strdup(orig_name);
+ if (!new_name)
+ return 1;
- hashmap__find(name_map, orig_name, (void **)&dup_cnt);
+ (void)hashmap__find(name_map, orig_name, &dup_cnt);
dup_cnt++;
- hashmap__set(name_map, orig_name, (void *)dup_cnt, NULL, NULL);
+
+ err = hashmap__set(name_map, new_name, dup_cnt, &old_name, NULL);
+ if (err)
+ free(new_name);
+
+ free(old_name);
return dup_cnt;
}
@@ -1963,7 +1989,7 @@ static int btf_dump_struct_data(struct btf_dump *d,
{
const struct btf_member *m = btf_members(t);
__u16 n = btf_vlen(t);
- int i, err;
+ int i, err = 0;
/* note that we increment depth before calling btf_dump_print() below;
* this is intentional. btf_dump_data_newline() will not print a
diff --git a/tools/lib/bpf/hashmap.c b/tools/lib/bpf/hashmap.c
index aeb09c288716..140ee4055676 100644
--- a/tools/lib/bpf/hashmap.c
+++ b/tools/lib/bpf/hashmap.c
@@ -128,7 +128,7 @@ static int hashmap_grow(struct hashmap *map)
}
static bool hashmap_find_entry(const struct hashmap *map,
- const void *key, size_t hash,
+ const long key, size_t hash,
struct hashmap_entry ***pprev,
struct hashmap_entry **entry)
{
@@ -151,18 +151,18 @@ static bool hashmap_find_entry(const struct hashmap *map,
return false;
}
-int hashmap__insert(struct hashmap *map, const void *key, void *value,
- enum hashmap_insert_strategy strategy,
- const void **old_key, void **old_value)
+int hashmap_insert(struct hashmap *map, long key, long value,
+ enum hashmap_insert_strategy strategy,
+ long *old_key, long *old_value)
{
struct hashmap_entry *entry;
size_t h;
int err;
if (old_key)
- *old_key = NULL;
+ *old_key = 0;
if (old_value)
- *old_value = NULL;
+ *old_value = 0;
h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
if (strategy != HASHMAP_APPEND &&
@@ -203,7 +203,7 @@ int hashmap__insert(struct hashmap *map, const void *key, void *value,
return 0;
}
-bool hashmap__find(const struct hashmap *map, const void *key, void **value)
+bool hashmap_find(const struct hashmap *map, long key, long *value)
{
struct hashmap_entry *entry;
size_t h;
@@ -217,8 +217,8 @@ bool hashmap__find(const struct hashmap *map, const void *key, void **value)
return true;
}
-bool hashmap__delete(struct hashmap *map, const void *key,
- const void **old_key, void **old_value)
+bool hashmap_delete(struct hashmap *map, long key,
+ long *old_key, long *old_value)
{
struct hashmap_entry **pprev, *entry;
size_t h;
diff --git a/tools/lib/bpf/hashmap.h b/tools/lib/bpf/hashmap.h
index 10a4c4cd13cf..0a5bf1937a7c 100644
--- a/tools/lib/bpf/hashmap.h
+++ b/tools/lib/bpf/hashmap.h
@@ -40,12 +40,32 @@ static inline size_t str_hash(const char *s)
return h;
}
-typedef size_t (*hashmap_hash_fn)(const void *key, void *ctx);
-typedef bool (*hashmap_equal_fn)(const void *key1, const void *key2, void *ctx);
+typedef size_t (*hashmap_hash_fn)(long key, void *ctx);
+typedef bool (*hashmap_equal_fn)(long key1, long key2, void *ctx);
+/*
+ * Hashmap interface is polymorphic, keys and values could be either
+ * long-sized integers or pointers, this is achieved as follows:
+ * - interface functions that operate on keys and values are hidden
+ * behind auxiliary macros, e.g. hashmap_insert <-> hashmap__insert;
+ * - these auxiliary macros cast the key and value parameters as
+ * long or long *, so the user does not have to specify the casts explicitly;
+ * - for pointer parameters (e.g. old_key) the size of the pointed
+ * type is verified by hashmap_cast_ptr using _Static_assert;
+ * - when iterating using hashmap__for_each_* forms
+ * hasmap_entry->key should be used for integer keys and
+ * hasmap_entry->pkey should be used for pointer keys,
+ * same goes for values.
+ */
struct hashmap_entry {
- const void *key;
- void *value;
+ union {
+ long key;
+ const void *pkey;
+ };
+ union {
+ long value;
+ void *pvalue;
+ };
struct hashmap_entry *next;
};
@@ -102,6 +122,13 @@ enum hashmap_insert_strategy {
HASHMAP_APPEND,
};
+#define hashmap_cast_ptr(p) ({ \
+ _Static_assert((__builtin_constant_p((p)) ? (p) == NULL : 0) || \
+ sizeof(*(p)) == sizeof(long), \
+ #p " pointee should be a long-sized integer or a pointer"); \
+ (long *)(p); \
+})
+
/*
* hashmap__insert() adds key/value entry w/ various semantics, depending on
* provided strategy value. If a given key/value pair replaced already
@@ -109,42 +136,38 @@ enum hashmap_insert_strategy {
* through old_key and old_value to allow calling code do proper memory
* management.
*/
-int hashmap__insert(struct hashmap *map, const void *key, void *value,
- enum hashmap_insert_strategy strategy,
- const void **old_key, void **old_value);
+int hashmap_insert(struct hashmap *map, long key, long value,
+ enum hashmap_insert_strategy strategy,
+ long *old_key, long *old_value);
-static inline int hashmap__add(struct hashmap *map,
- const void *key, void *value)
-{
- return hashmap__insert(map, key, value, HASHMAP_ADD, NULL, NULL);
-}
+#define hashmap__insert(map, key, value, strategy, old_key, old_value) \
+ hashmap_insert((map), (long)(key), (long)(value), (strategy), \
+ hashmap_cast_ptr(old_key), \
+ hashmap_cast_ptr(old_value))
-static inline int hashmap__set(struct hashmap *map,
- const void *key, void *value,
- const void **old_key, void **old_value)
-{
- return hashmap__insert(map, key, value, HASHMAP_SET,
- old_key, old_value);
-}
+#define hashmap__add(map, key, value) \
+ hashmap__insert((map), (key), (value), HASHMAP_ADD, NULL, NULL)
-static inline int hashmap__update(struct hashmap *map,
- const void *key, void *value,
- const void **old_key, void **old_value)
-{
- return hashmap__insert(map, key, value, HASHMAP_UPDATE,
- old_key, old_value);
-}
+#define hashmap__set(map, key, value, old_key, old_value) \
+ hashmap__insert((map), (key), (value), HASHMAP_SET, (old_key), (old_value))
-static inline int hashmap__append(struct hashmap *map,
- const void *key, void *value)
-{
- return hashmap__insert(map, key, value, HASHMAP_APPEND, NULL, NULL);
-}
+#define hashmap__update(map, key, value, old_key, old_value) \
+ hashmap__insert((map), (key), (value), HASHMAP_UPDATE, (old_key), (old_value))
+
+#define hashmap__append(map, key, value) \
+ hashmap__insert((map), (key), (value), HASHMAP_APPEND, NULL, NULL)
+
+bool hashmap_delete(struct hashmap *map, long key, long *old_key, long *old_value);
+
+#define hashmap__delete(map, key, old_key, old_value) \
+ hashmap_delete((map), (long)(key), \
+ hashmap_cast_ptr(old_key), \
+ hashmap_cast_ptr(old_value))
-bool hashmap__delete(struct hashmap *map, const void *key,
- const void **old_key, void **old_value);
+bool hashmap_find(const struct hashmap *map, long key, long *value);
-bool hashmap__find(const struct hashmap *map, const void *key, void **value);
+#define hashmap__find(map, key, value) \
+ hashmap_find((map), (long)(key), hashmap_cast_ptr(value))
/*
* hashmap__for_each_entry - iterate over all entries in hashmap
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 91b7106a4a73..2a82f49ce16f 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -164,6 +164,7 @@ static const char * const map_type_name[] = {
[BPF_MAP_TYPE_TASK_STORAGE] = "task_storage",
[BPF_MAP_TYPE_BLOOM_FILTER] = "bloom_filter",
[BPF_MAP_TYPE_USER_RINGBUF] = "user_ringbuf",
+ [BPF_MAP_TYPE_CGRP_STORAGE] = "cgrp_storage",
};
static const char * const prog_type_name[] = {
@@ -346,7 +347,8 @@ enum sec_def_flags {
SEC_ATTACHABLE = 2,
SEC_ATTACHABLE_OPT = SEC_ATTACHABLE | SEC_EXP_ATTACH_OPT,
/* attachment target is specified through BTF ID in either kernel or
- * other BPF program's BTF object */
+ * other BPF program's BTF object
+ */
SEC_ATTACH_BTF = 4,
/* BPF program type allows sleeping/blocking in kernel */
SEC_SLEEPABLE = 8,
@@ -487,7 +489,7 @@ struct bpf_map {
char *name;
/* real_name is defined for special internal maps (.rodata*,
* .data*, .bss, .kconfig) and preserves their original ELF section
- * name. This is important to be be able to find corresponding BTF
+ * name. This is important to be able to find corresponding BTF
* DATASEC information.
*/
char *real_name;
@@ -597,7 +599,7 @@ struct elf_state {
size_t shstrndx; /* section index for section name strings */
size_t strtabidx;
struct elf_sec_desc *secs;
- int sec_cnt;
+ size_t sec_cnt;
int btf_maps_shndx;
__u32 btf_maps_sec_btf_id;
int text_shndx;
@@ -1408,6 +1410,10 @@ static int bpf_object__check_endianness(struct bpf_object *obj)
static int
bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
{
+ if (!data) {
+ pr_warn("invalid license section in %s\n", obj->path);
+ return -LIBBPF_ERRNO__FORMAT;
+ }
/* libbpf_strlcpy() only copies first N - 1 bytes, so size + 1 won't
* go over allowed ELF data section buffer
*/
@@ -1421,7 +1427,7 @@ bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
{
__u32 kver;
- if (size != sizeof(kver)) {
+ if (!data || size != sizeof(kver)) {
pr_warn("invalid kver section in %s\n", obj->path);
return -LIBBPF_ERRNO__FORMAT;
}
@@ -1457,15 +1463,12 @@ static int find_elf_sec_sz(const struct bpf_object *obj, const char *name, __u32
return -ENOENT;
}
-static int find_elf_var_offset(const struct bpf_object *obj, const char *name, __u32 *off)
+static Elf64_Sym *find_elf_var_sym(const struct bpf_object *obj, const char *name)
{
Elf_Data *symbols = obj->efile.symbols;
const char *sname;
size_t si;
- if (!name || !off)
- return -EINVAL;
-
for (si = 0; si < symbols->d_size / sizeof(Elf64_Sym); si++) {
Elf64_Sym *sym = elf_sym_by_idx(obj, si);
@@ -1479,15 +1482,13 @@ static int find_elf_var_offset(const struct bpf_object *obj, const char *name, _
sname = elf_sym_str(obj, sym->st_name);
if (!sname) {
pr_warn("failed to get sym name string for var %s\n", name);
- return -EIO;
- }
- if (strcmp(name, sname) == 0) {
- *off = sym->st_value;
- return 0;
+ return ERR_PTR(-EIO);
}
+ if (strcmp(name, sname) == 0)
+ return sym;
}
- return -ENOENT;
+ return ERR_PTR(-ENOENT);
}
static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
@@ -1578,7 +1579,38 @@ static char *internal_map_name(struct bpf_object *obj, const char *real_name)
}
static int
-bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map);
+map_fill_btf_type_info(struct bpf_object *obj, struct bpf_map *map);
+
+/* Internal BPF map is mmap()'able only if at least one of corresponding
+ * DATASEC's VARs are to be exposed through BPF skeleton. I.e., it's a GLOBAL
+ * variable and it's not marked as __hidden (which turns it into, effectively,
+ * a STATIC variable).
+ */
+static bool map_is_mmapable(struct bpf_object *obj, struct bpf_map *map)
+{
+ const struct btf_type *t, *vt;
+ struct btf_var_secinfo *vsi;
+ int i, n;
+
+ if (!map->btf_value_type_id)
+ return false;
+
+ t = btf__type_by_id(obj->btf, map->btf_value_type_id);
+ if (!btf_is_datasec(t))
+ return false;
+
+ vsi = btf_var_secinfos(t);
+ for (i = 0, n = btf_vlen(t); i < n; i++, vsi++) {
+ vt = btf__type_by_id(obj->btf, vsi->type);
+ if (!btf_is_var(vt))
+ continue;
+
+ if (btf_var(vt)->linkage != BTF_VAR_STATIC)
+ return true;
+ }
+
+ return false;
+}
static int
bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
@@ -1610,7 +1642,12 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
def->max_entries = 1;
def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG
? BPF_F_RDONLY_PROG : 0;
- def->map_flags |= BPF_F_MMAPABLE;
+
+ /* failures are fine because of maps like .rodata.str1.1 */
+ (void) map_fill_btf_type_info(obj, map);
+
+ if (map_is_mmapable(obj, map))
+ def->map_flags |= BPF_F_MMAPABLE;
pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n",
map->name, map->sec_idx, map->sec_offset, def->map_flags);
@@ -1627,9 +1664,6 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
return err;
}
- /* failures are fine because of maps like .rodata.str1.1 */
- (void) bpf_map_find_btf_info(obj, map);
-
if (data)
memcpy(map->mmaped, data, data_sz);
@@ -1830,12 +1864,20 @@ static int set_kcfg_value_num(struct extern_desc *ext, void *ext_val,
return -ERANGE;
}
switch (ext->kcfg.sz) {
- case 1: *(__u8 *)ext_val = value; break;
- case 2: *(__u16 *)ext_val = value; break;
- case 4: *(__u32 *)ext_val = value; break;
- case 8: *(__u64 *)ext_val = value; break;
- default:
- return -EINVAL;
+ case 1:
+ *(__u8 *)ext_val = value;
+ break;
+ case 2:
+ *(__u16 *)ext_val = value;
+ break;
+ case 4:
+ *(__u32 *)ext_val = value;
+ break;
+ case 8:
+ *(__u64 *)ext_val = value;
+ break;
+ default:
+ return -EINVAL;
}
ext->is_set = true;
return 0;
@@ -2541,7 +2583,7 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj,
fill_map_from_def(map->inner_map, &inner_def);
}
- err = bpf_map_find_btf_info(obj, map);
+ err = map_fill_btf_type_info(obj, map);
if (err)
return err;
@@ -2737,7 +2779,7 @@ static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
m->type = enum64_placeholder_id;
m->offset = 0;
}
- }
+ }
}
return 0;
@@ -2846,57 +2888,89 @@ static int compare_vsi_off(const void *_a, const void *_b)
static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
struct btf_type *t)
{
- __u32 size = 0, off = 0, i, vars = btf_vlen(t);
- const char *name = btf__name_by_offset(btf, t->name_off);
- const struct btf_type *t_var;
+ __u32 size = 0, i, vars = btf_vlen(t);
+ const char *sec_name = btf__name_by_offset(btf, t->name_off);
struct btf_var_secinfo *vsi;
- const struct btf_var *var;
- int ret;
+ bool fixup_offsets = false;
+ int err;
- if (!name) {
+ if (!sec_name) {
pr_debug("No name found in string section for DATASEC kind.\n");
return -ENOENT;
}
- /* .extern datasec size and var offsets were set correctly during
- * extern collection step, so just skip straight to sorting variables
+ /* Extern-backing datasecs (.ksyms, .kconfig) have their size and
+ * variable offsets set at the previous step. Further, not every
+ * extern BTF VAR has corresponding ELF symbol preserved, so we skip
+ * all fixups altogether for such sections and go straight to sorting
+ * VARs within their DATASEC.
*/
- if (t->size)
+ if (strcmp(sec_name, KCONFIG_SEC) == 0 || strcmp(sec_name, KSYMS_SEC) == 0)
goto sort_vars;
- ret = find_elf_sec_sz(obj, name, &size);
- if (ret || !size) {
- pr_debug("Invalid size for section %s: %u bytes\n", name, size);
- return -ENOENT;
- }
+ /* Clang leaves DATASEC size and VAR offsets as zeroes, so we need to
+ * fix this up. But BPF static linker already fixes this up and fills
+ * all the sizes and offsets during static linking. So this step has
+ * to be optional. But the STV_HIDDEN handling is non-optional for any
+ * non-extern DATASEC, so the variable fixup loop below handles both
+ * functions at the same time, paying the cost of BTF VAR <-> ELF
+ * symbol matching just once.
+ */
+ if (t->size == 0) {
+ err = find_elf_sec_sz(obj, sec_name, &size);
+ if (err || !size) {
+ pr_debug("sec '%s': failed to determine size from ELF: size %u, err %d\n",
+ sec_name, size, err);
+ return -ENOENT;
+ }
- t->size = size;
+ t->size = size;
+ fixup_offsets = true;
+ }
for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) {
+ const struct btf_type *t_var;
+ struct btf_var *var;
+ const char *var_name;
+ Elf64_Sym *sym;
+
t_var = btf__type_by_id(btf, vsi->type);
if (!t_var || !btf_is_var(t_var)) {
- pr_debug("Non-VAR type seen in section %s\n", name);
+ pr_debug("sec '%s': unexpected non-VAR type found\n", sec_name);
return -EINVAL;
}
var = btf_var(t_var);
- if (var->linkage == BTF_VAR_STATIC)
+ if (var->linkage == BTF_VAR_STATIC || var->linkage == BTF_VAR_GLOBAL_EXTERN)
continue;
- name = btf__name_by_offset(btf, t_var->name_off);
- if (!name) {
- pr_debug("No name found in string section for VAR kind\n");
+ var_name = btf__name_by_offset(btf, t_var->name_off);
+ if (!var_name) {
+ pr_debug("sec '%s': failed to find name of DATASEC's member #%d\n",
+ sec_name, i);
return -ENOENT;
}
- ret = find_elf_var_offset(obj, name, &off);
- if (ret) {
- pr_debug("No offset found in symbol table for VAR %s\n",
- name);
+ sym = find_elf_var_sym(obj, var_name);
+ if (IS_ERR(sym)) {
+ pr_debug("sec '%s': failed to find ELF symbol for VAR '%s'\n",
+ sec_name, var_name);
return -ENOENT;
}
- vsi->offset = off;
+ if (fixup_offsets)
+ vsi->offset = sym->st_value;
+
+ /* if variable is a global/weak symbol, but has restricted
+ * (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF VAR
+ * as static. This follows similar logic for functions (BPF
+ * subprogs) and influences libbpf's further decisions about
+ * whether to make global data BPF array maps as
+ * BPF_F_MMAPABLE.
+ */
+ if (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN
+ || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL)
+ var->linkage = BTF_VAR_STATIC;
}
sort_vars:
@@ -2904,13 +2978,16 @@ sort_vars:
return 0;
}
-static int btf_finalize_data(struct bpf_object *obj, struct btf *btf)
+static int bpf_object_fixup_btf(struct bpf_object *obj)
{
- int err = 0;
- __u32 i, n = btf__type_cnt(btf);
+ int i, n, err = 0;
+
+ if (!obj->btf)
+ return 0;
+ n = btf__type_cnt(obj->btf);
for (i = 1; i < n; i++) {
- struct btf_type *t = btf_type_by_id(btf, i);
+ struct btf_type *t = btf_type_by_id(obj->btf, i);
/* Loader needs to fix up some of the things compiler
* couldn't get its hands on while emitting BTF. This
@@ -2918,28 +2995,12 @@ static int btf_finalize_data(struct bpf_object *obj, struct btf *btf)
* the info from the ELF itself for this purpose.
*/
if (btf_is_datasec(t)) {
- err = btf_fixup_datasec(obj, btf, t);
+ err = btf_fixup_datasec(obj, obj->btf, t);
if (err)
- break;
+ return err;
}
}
- return libbpf_err(err);
-}
-
-static int bpf_object__finalize_btf(struct bpf_object *obj)
-{
- int err;
-
- if (!obj->btf)
- return 0;
-
- err = btf_finalize_data(obj, obj->btf);
- if (err) {
- pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err);
- return err;
- }
-
return 0;
}
@@ -3312,10 +3373,15 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
Elf64_Shdr *sh;
/* ELF section indices are 0-based, but sec #0 is special "invalid"
- * section. e_shnum does include sec #0, so e_shnum is the necessary
- * size of an array to keep all the sections.
+ * section. Since section count retrieved by elf_getshdrnum() does
+ * include sec #0, it is already the necessary size of an array to keep
+ * all the sections.
*/
- obj->efile.sec_cnt = obj->efile.ehdr->e_shnum;
+ if (elf_getshdrnum(obj->efile.elf, &obj->efile.sec_cnt)) {
+ pr_warn("elf: failed to get the number of sections for %s: %s\n",
+ obj->path, elf_errmsg(-1));
+ return -LIBBPF_ERRNO__FORMAT;
+ }
obj->efile.secs = calloc(obj->efile.sec_cnt, sizeof(*obj->efile.secs));
if (!obj->efile.secs)
return -ENOMEM;
@@ -3445,7 +3511,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
sec_desc->sec_type = SEC_RELO;
sec_desc->shdr = sh;
sec_desc->data = data;
- } else if (sh->sh_type == SHT_NOBITS && strcmp(name, BSS_SEC) == 0) {
+ } else if (sh->sh_type == SHT_NOBITS && (strcmp(name, BSS_SEC) == 0 ||
+ str_has_pfx(name, BSS_SEC "."))) {
sec_desc->sec_type = SEC_BSS;
sec_desc->shdr = sh;
sec_desc->data = data;
@@ -3461,7 +3528,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
}
/* sort BPF programs by section name and in-section instruction offset
- * for faster search */
+ * for faster search
+ */
if (obj->nr_programs)
qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs);
@@ -3760,7 +3828,7 @@ static int bpf_object__collect_externs(struct bpf_object *obj)
return -EINVAL;
}
ext->kcfg.type = find_kcfg_type(obj->btf, t->type,
- &ext->kcfg.is_signed);
+ &ext->kcfg.is_signed);
if (ext->kcfg.type == KCFG_UNKNOWN) {
pr_warn("extern (kcfg) '%s': type is unsupported\n", ext_name);
return -ENOTSUP;
@@ -4106,6 +4174,9 @@ static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj,
int l = 0, r = obj->nr_programs - 1, m;
struct bpf_program *prog;
+ if (!obj->nr_programs)
+ return NULL;
+
while (l < r) {
m = l + (r - l + 1) / 2;
prog = &obj->programs[m];
@@ -4223,7 +4294,7 @@ bpf_object__collect_prog_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Dat
return 0;
}
-static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map)
+static int map_fill_btf_type_info(struct bpf_object *obj, struct bpf_map *map)
{
int id;
@@ -4905,9 +4976,9 @@ bpf_object__reuse_map(struct bpf_map *map)
err = bpf_map__reuse_fd(map, pin_fd);
close(pin_fd);
- if (err) {
+ if (err)
return err;
- }
+
map->pinned = true;
pr_debug("reused pinned map at '%s'\n", map->pin_path);
@@ -5425,7 +5496,7 @@ static int load_module_btfs(struct bpf_object *obj)
}
err = libbpf_ensure_mem((void **)&obj->btf_modules, &obj->btf_module_cap,
- sizeof(*obj->btf_modules), obj->btf_module_cnt + 1);
+ sizeof(*obj->btf_modules), obj->btf_module_cnt + 1);
if (err)
goto err_out;
@@ -5541,21 +5612,16 @@ int bpf_core_types_match(const struct btf *local_btf, __u32 local_id,
return __bpf_core_types_match(local_btf, local_id, targ_btf, targ_id, false, 32);
}
-static size_t bpf_core_hash_fn(const void *key, void *ctx)
+static size_t bpf_core_hash_fn(const long key, void *ctx)
{
- return (size_t)key;
+ return key;
}
-static bool bpf_core_equal_fn(const void *k1, const void *k2, void *ctx)
+static bool bpf_core_equal_fn(const long k1, const long k2, void *ctx)
{
return k1 == k2;
}
-static void *u32_as_hash_key(__u32 x)
-{
- return (void *)(uintptr_t)x;
-}
-
static int record_relo_core(struct bpf_program *prog,
const struct bpf_core_relo *core_relo, int insn_idx)
{
@@ -5598,7 +5664,6 @@ static int bpf_core_resolve_relo(struct bpf_program *prog,
struct bpf_core_relo_res *targ_res)
{
struct bpf_core_spec specs_scratch[3] = {};
- const void *type_key = u32_as_hash_key(relo->type_id);
struct bpf_core_cand_list *cands = NULL;
const char *prog_name = prog->name;
const struct btf_type *local_type;
@@ -5615,7 +5680,7 @@ static int bpf_core_resolve_relo(struct bpf_program *prog,
return -EINVAL;
if (relo->kind != BPF_CORE_TYPE_ID_LOCAL &&
- !hashmap__find(cand_cache, type_key, (void **)&cands)) {
+ !hashmap__find(cand_cache, local_id, &cands)) {
cands = bpf_core_find_cands(prog->obj, local_btf, local_id);
if (IS_ERR(cands)) {
pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld\n",
@@ -5623,7 +5688,7 @@ static int bpf_core_resolve_relo(struct bpf_program *prog,
local_name, PTR_ERR(cands));
return PTR_ERR(cands);
}
- err = hashmap__set(cand_cache, type_key, cands, NULL, NULL);
+ err = hashmap__set(cand_cache, local_id, cands, NULL, NULL);
if (err) {
bpf_core_free_cands(cands);
return err;
@@ -5746,7 +5811,7 @@ out:
if (!IS_ERR_OR_NULL(cand_cache)) {
hashmap__for_each_entry(cand_cache, entry, i) {
- bpf_core_free_cands(entry->value);
+ bpf_core_free_cands(entry->pvalue);
}
hashmap__free(cand_cache);
}
@@ -6183,7 +6248,8 @@ bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
* prog; each main prog can have a different set of
* subprograms appended (potentially in different order as
* well), so position of any subprog can be different for
- * different main programs */
+ * different main programs
+ */
insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1;
pr_debug("prog '%s': insn #%zu relocated, imm %d points to subprog '%s' (now at %zu offset)\n",
@@ -7221,7 +7287,7 @@ static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf,
err = err ? : bpf_object__check_endianness(obj);
err = err ? : bpf_object__elf_collect(obj);
err = err ? : bpf_object__collect_externs(obj);
- err = err ? : bpf_object__finalize_btf(obj);
+ err = err ? : bpf_object_fixup_btf(obj);
err = err ? : bpf_object__init_maps(obj, opts);
err = err ? : bpf_object_init_progs(obj, opts);
err = err ? : bpf_object__collect_relos(obj);
@@ -10941,7 +11007,7 @@ struct bpf_link *bpf_program__attach_usdt(const struct bpf_program *prog,
usdt_cookie = OPTS_GET(opts, usdt_cookie, 0);
link = usdt_manager_attach_usdt(obj->usdt_man, prog, pid, binary_path,
- usdt_provider, usdt_name, usdt_cookie);
+ usdt_provider, usdt_name, usdt_cookie);
err = libbpf_get_error(link);
if (err)
return libbpf_err_ptr(err);
@@ -12250,7 +12316,7 @@ int bpf_object__open_subskeleton(struct bpf_object_subskeleton *s)
btf = bpf_object__btf(s->obj);
if (!btf) {
pr_warn("subskeletons require BTF at runtime (object %s)\n",
- bpf_object__name(s->obj));
+ bpf_object__name(s->obj));
return libbpf_err(-errno);
}
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index c1d6aa7c82b6..71bf5691a689 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -367,10 +367,14 @@ LIBBPF_1.0.0 {
libbpf_bpf_map_type_str;
libbpf_bpf_prog_type_str;
perf_buffer__buffer;
-};
+} LIBBPF_0.8.0;
LIBBPF_1.1.0 {
global:
+ bpf_btf_get_fd_by_id_opts;
+ bpf_link_get_fd_by_id_opts;
+ bpf_map_get_fd_by_id_opts;
+ bpf_prog_get_fd_by_id_opts;
user_ring_buffer__discard;
user_ring_buffer__free;
user_ring_buffer__new;
diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c
index d504d96adc83..b44fcbb4b42e 100644
--- a/tools/lib/bpf/libbpf_probes.c
+++ b/tools/lib/bpf/libbpf_probes.c
@@ -221,6 +221,7 @@ static int probe_map_create(enum bpf_map_type map_type)
case BPF_MAP_TYPE_SK_STORAGE:
case BPF_MAP_TYPE_INODE_STORAGE:
case BPF_MAP_TYPE_TASK_STORAGE:
+ case BPF_MAP_TYPE_CGRP_STORAGE:
btf_key_type_id = 1;
btf_value_type_id = 3;
value_size = 8;
diff --git a/tools/lib/bpf/ringbuf.c b/tools/lib/bpf/ringbuf.c
index 6af142953a94..47855af25f3b 100644
--- a/tools/lib/bpf/ringbuf.c
+++ b/tools/lib/bpf/ringbuf.c
@@ -128,7 +128,7 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd,
/* Map read-only producer page and data pages. We map twice as big
* data size to allow simple reading of samples that wrap around the
* end of a ring buffer. See kernel implementation for details.
- * */
+ */
mmap_sz = rb->page_size + 2 * (__u64)info.max_entries;
if (mmap_sz != (__u64)(size_t)mmap_sz) {
pr_warn("ringbuf: ring buffer size (%u) is too big\n", info.max_entries);
@@ -224,7 +224,7 @@ static inline int roundup_len(__u32 len)
return (len + 7) / 8 * 8;
}
-static int64_t ringbuf_process_ring(struct ring* r)
+static int64_t ringbuf_process_ring(struct ring *r)
{
int *len_ptr, len, err;
/* 64-bit to avoid overflow in case of extreme application behavior */
diff --git a/tools/lib/bpf/strset.c b/tools/lib/bpf/strset.c
index ea655318153f..2464bcbd04e0 100644
--- a/tools/lib/bpf/strset.c
+++ b/tools/lib/bpf/strset.c
@@ -19,19 +19,19 @@ struct strset {
struct hashmap *strs_hash;
};
-static size_t strset_hash_fn(const void *key, void *ctx)
+static size_t strset_hash_fn(long key, void *ctx)
{
const struct strset *s = ctx;
- const char *str = s->strs_data + (long)key;
+ const char *str = s->strs_data + key;
return str_hash(str);
}
-static bool strset_equal_fn(const void *key1, const void *key2, void *ctx)
+static bool strset_equal_fn(long key1, long key2, void *ctx)
{
const struct strset *s = ctx;
- const char *str1 = s->strs_data + (long)key1;
- const char *str2 = s->strs_data + (long)key2;
+ const char *str1 = s->strs_data + key1;
+ const char *str2 = s->strs_data + key2;
return strcmp(str1, str2) == 0;
}
@@ -67,7 +67,7 @@ struct strset *strset__new(size_t max_data_sz, const char *init_data, size_t ini
/* hashmap__add() returns EEXIST if string with the same
* content already is in the hash map
*/
- err = hashmap__add(hash, (void *)off, (void *)off);
+ err = hashmap__add(hash, off, off);
if (err == -EEXIST)
continue; /* duplicate */
if (err)
@@ -127,7 +127,7 @@ int strset__find_str(struct strset *set, const char *s)
new_off = set->strs_data_len;
memcpy(p, s, len);
- if (hashmap__find(set->strs_hash, (void *)new_off, (void **)&old_off))
+ if (hashmap__find(set->strs_hash, new_off, &old_off))
return old_off;
return -ENOENT;
@@ -165,8 +165,8 @@ int strset__add_str(struct strset *set, const char *s)
* contents doesn't exist already (HASHMAP_ADD strategy). If such
* string exists, we'll get its offset in old_off (that's old_key).
*/
- err = hashmap__insert(set->strs_hash, (void *)new_off, (void *)new_off,
- HASHMAP_ADD, (const void **)&old_off, NULL);
+ err = hashmap__insert(set->strs_hash, new_off, new_off,
+ HASHMAP_ADD, &old_off, NULL);
if (err == -EEXIST)
return old_off; /* duplicated string, return existing offset */
if (err)
diff --git a/tools/lib/bpf/usdt.c b/tools/lib/bpf/usdt.c
index e83b497c2245..b8daae265f99 100644
--- a/tools/lib/bpf/usdt.c
+++ b/tools/lib/bpf/usdt.c
@@ -873,31 +873,27 @@ static void bpf_link_usdt_dealloc(struct bpf_link *link)
free(usdt_link);
}
-static size_t specs_hash_fn(const void *key, void *ctx)
+static size_t specs_hash_fn(long key, void *ctx)
{
- const char *s = key;
-
- return str_hash(s);
+ return str_hash((char *)key);
}
-static bool specs_equal_fn(const void *key1, const void *key2, void *ctx)
+static bool specs_equal_fn(long key1, long key2, void *ctx)
{
- const char *s1 = key1;
- const char *s2 = key2;
-
- return strcmp(s1, s2) == 0;
+ return strcmp((char *)key1, (char *)key2) == 0;
}
static int allocate_spec_id(struct usdt_manager *man, struct hashmap *specs_hash,
struct bpf_link_usdt *link, struct usdt_target *target,
int *spec_id, bool *is_new)
{
- void *tmp;
+ long tmp;
+ void *new_ids;
int err;
/* check if we already allocated spec ID for this spec string */
if (hashmap__find(specs_hash, target->spec_str, &tmp)) {
- *spec_id = (long)tmp;
+ *spec_id = tmp;
*is_new = false;
return 0;
}
@@ -905,17 +901,17 @@ static int allocate_spec_id(struct usdt_manager *man, struct hashmap *specs_hash
/* otherwise it's a new ID that needs to be set up in specs map and
* returned back to usdt_manager when USDT link is detached
*/
- tmp = libbpf_reallocarray(link->spec_ids, link->spec_cnt + 1, sizeof(*link->spec_ids));
- if (!tmp)
+ new_ids = libbpf_reallocarray(link->spec_ids, link->spec_cnt + 1, sizeof(*link->spec_ids));
+ if (!new_ids)
return -ENOMEM;
- link->spec_ids = tmp;
+ link->spec_ids = new_ids;
/* get next free spec ID, giving preference to free list, if not empty */
if (man->free_spec_cnt) {
*spec_id = man->free_spec_ids[man->free_spec_cnt - 1];
/* cache spec ID for current spec string for future lookups */
- err = hashmap__add(specs_hash, target->spec_str, (void *)(long)*spec_id);
+ err = hashmap__add(specs_hash, target->spec_str, *spec_id);
if (err)
return err;
@@ -928,7 +924,7 @@ static int allocate_spec_id(struct usdt_manager *man, struct hashmap *specs_hash
*spec_id = man->next_free_spec_id;
/* cache spec ID for current spec string for future lookups */
- err = hashmap__add(specs_hash, target->spec_str, (void *)(long)*spec_id);
+ err = hashmap__add(specs_hash, target->spec_str, *spec_id);
if (err)
return err;
@@ -1225,26 +1221,24 @@ static int calc_pt_regs_off(const char *reg_name)
static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg)
{
- char *reg_name = NULL;
+ char reg_name[16];
int arg_sz, len, reg_off;
long off;
- if (sscanf(arg_str, " %d @ %ld ( %%%m[^)] ) %n", &arg_sz, &off, &reg_name, &len) == 3) {
+ if (sscanf(arg_str, " %d @ %ld ( %%%15[^)] ) %n", &arg_sz, &off, reg_name, &len) == 3) {
/* Memory dereference case, e.g., -4@-20(%rbp) */
arg->arg_type = USDT_ARG_REG_DEREF;
arg->val_off = off;
reg_off = calc_pt_regs_off(reg_name);
- free(reg_name);
if (reg_off < 0)
return reg_off;
arg->reg_off = reg_off;
- } else if (sscanf(arg_str, " %d @ %%%ms %n", &arg_sz, &reg_name, &len) == 2) {
+ } else if (sscanf(arg_str, " %d @ %%%15s %n", &arg_sz, reg_name, &len) == 2) {
/* Register read case, e.g., -4@%eax */
arg->arg_type = USDT_ARG_REG;
arg->val_off = 0;
reg_off = calc_pt_regs_off(reg_name);
- free(reg_name);
if (reg_off < 0)
return reg_off;
arg->reg_off = reg_off;
@@ -1348,25 +1342,23 @@ static int calc_pt_regs_off(const char *reg_name)
static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg)
{
- char *reg_name = NULL;
+ char reg_name[16];
int arg_sz, len, reg_off;
long off;
- if (sscanf(arg_str, " %d @ \[ %m[a-z0-9], %ld ] %n", &arg_sz, &reg_name, &off, &len) == 3) {
+ if (sscanf(arg_str, " %d @ \[ %15[a-z0-9], %ld ] %n", &arg_sz, reg_name, &off, &len) == 3) {
/* Memory dereference case, e.g., -4@[sp, 96] */
arg->arg_type = USDT_ARG_REG_DEREF;
arg->val_off = off;
reg_off = calc_pt_regs_off(reg_name);
- free(reg_name);
if (reg_off < 0)
return reg_off;
arg->reg_off = reg_off;
- } else if (sscanf(arg_str, " %d @ \[ %m[a-z0-9] ] %n", &arg_sz, &reg_name, &len) == 2) {
+ } else if (sscanf(arg_str, " %d @ \[ %15[a-z0-9] ] %n", &arg_sz, reg_name, &len) == 2) {
/* Memory dereference case, e.g., -4@[sp] */
arg->arg_type = USDT_ARG_REG_DEREF;
arg->val_off = 0;
reg_off = calc_pt_regs_off(reg_name);
- free(reg_name);
if (reg_off < 0)
return reg_off;
arg->reg_off = reg_off;
@@ -1375,12 +1367,11 @@ static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec
arg->arg_type = USDT_ARG_CONST;
arg->val_off = off;
arg->reg_off = 0;
- } else if (sscanf(arg_str, " %d @ %m[a-z0-9] %n", &arg_sz, &reg_name, &len) == 2) {
+ } else if (sscanf(arg_str, " %d @ %15[a-z0-9] %n", &arg_sz, reg_name, &len) == 2) {
/* Register read case, e.g., -8@x4 */
arg->arg_type = USDT_ARG_REG;
arg->val_off = 0;
reg_off = calc_pt_regs_off(reg_name);
- free(reg_name);
if (reg_off < 0)
return reg_off;
arg->reg_off = reg_off;
@@ -1459,16 +1450,15 @@ static int calc_pt_regs_off(const char *reg_name)
static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg)
{
- char *reg_name = NULL;
+ char reg_name[16];
int arg_sz, len, reg_off;
long off;
- if (sscanf(arg_str, " %d @ %ld ( %m[a-z0-9] ) %n", &arg_sz, &off, &reg_name, &len) == 3) {
+ if (sscanf(arg_str, " %d @ %ld ( %15[a-z0-9] ) %n", &arg_sz, &off, reg_name, &len) == 3) {
/* Memory dereference case, e.g., -8@-88(s0) */
arg->arg_type = USDT_ARG_REG_DEREF;
arg->val_off = off;
reg_off = calc_pt_regs_off(reg_name);
- free(reg_name);
if (reg_off < 0)
return reg_off;
arg->reg_off = reg_off;
@@ -1477,12 +1467,11 @@ static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec
arg->arg_type = USDT_ARG_CONST;
arg->val_off = off;
arg->reg_off = 0;
- } else if (sscanf(arg_str, " %d @ %m[a-z0-9] %n", &arg_sz, &reg_name, &len) == 2) {
+ } else if (sscanf(arg_str, " %d @ %15[a-z0-9] %n", &arg_sz, reg_name, &len) == 2) {
/* Register read case, e.g., -8@a1 */
arg->arg_type = USDT_ARG_REG;
arg->val_off = 0;
reg_off = calc_pt_regs_off(reg_name);
- free(reg_name);
if (reg_off < 0)
return reg_off;
arg->reg_off = reg_off;
diff --git a/tools/perf/tests/expr.c b/tools/perf/tests/expr.c
index 6512f5e22045..c598f95aebf3 100644
--- a/tools/perf/tests/expr.c
+++ b/tools/perf/tests/expr.c
@@ -130,12 +130,9 @@ static int test__expr(struct test_suite *t __maybe_unused, int subtest __maybe_u
expr__find_ids("FOO + BAR + BAZ + BOZO", "FOO",
ctx) == 0);
TEST_ASSERT_VAL("find ids", hashmap__size(ctx->ids) == 3);
- TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "BAR",
- (void **)&val_ptr));
- TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "BAZ",
- (void **)&val_ptr));
- TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "BOZO",
- (void **)&val_ptr));
+ TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "BAR", &val_ptr));
+ TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "BAZ", &val_ptr));
+ TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "BOZO", &val_ptr));
expr__ctx_clear(ctx);
ctx->sctx.runtime = 3;
@@ -143,20 +140,16 @@ static int test__expr(struct test_suite *t __maybe_unused, int subtest __maybe_u
expr__find_ids("EVENT1\\,param\\=?@ + EVENT2\\,param\\=?@",
NULL, ctx) == 0);
TEST_ASSERT_VAL("find ids", hashmap__size(ctx->ids) == 2);
- TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "EVENT1,param=3@",
- (void **)&val_ptr));
- TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "EVENT2,param=3@",
- (void **)&val_ptr));
+ TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "EVENT1,param=3@", &val_ptr));
+ TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "EVENT2,param=3@", &val_ptr));
expr__ctx_clear(ctx);
TEST_ASSERT_VAL("find ids",
expr__find_ids("dash\\-event1 - dash\\-event2",
NULL, ctx) == 0);
TEST_ASSERT_VAL("find ids", hashmap__size(ctx->ids) == 2);
- TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "dash-event1",
- (void **)&val_ptr));
- TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "dash-event2",
- (void **)&val_ptr));
+ TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "dash-event1", &val_ptr));
+ TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "dash-event2", &val_ptr));
/* Only EVENT1 or EVENT2 need be measured depending on the value of smt_on. */
{
@@ -174,7 +167,7 @@ static int test__expr(struct test_suite *t __maybe_unused, int subtest __maybe_u
TEST_ASSERT_VAL("find ids", hashmap__size(ctx->ids) == 1);
TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids,
smton ? "EVENT1" : "EVENT2",
- (void **)&val_ptr));
+ &val_ptr));
expr__ctx_clear(ctx);
TEST_ASSERT_VAL("find ids",
@@ -183,7 +176,7 @@ static int test__expr(struct test_suite *t __maybe_unused, int subtest __maybe_u
TEST_ASSERT_VAL("find ids", hashmap__size(ctx->ids) == 1);
TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids,
corewide ? "EVENT1" : "EVENT2",
- (void **)&val_ptr));
+ &val_ptr));
}
/* The expression is a constant 1.0 without needing to evaluate EVENT1. */
@@ -220,8 +213,7 @@ static int test__expr(struct test_suite *t __maybe_unused, int subtest __maybe_u
expr__find_ids("source_count(EVENT1)",
NULL, ctx) == 0);
TEST_ASSERT_VAL("source count", hashmap__size(ctx->ids) == 1);
- TEST_ASSERT_VAL("source count", hashmap__find(ctx->ids, "EVENT1",
- (void **)&val_ptr));
+ TEST_ASSERT_VAL("source count", hashmap__find(ctx->ids, "EVENT1", &val_ptr));
expr__ctx_free(ctx);
diff --git a/tools/perf/tests/pmu-events.c b/tools/perf/tests/pmu-events.c
index 097e05c796ab..3c2ee55e75c7 100644
--- a/tools/perf/tests/pmu-events.c
+++ b/tools/perf/tests/pmu-events.c
@@ -986,10 +986,10 @@ static int metric_parse_fake(const char *str)
*/
i = 1;
hashmap__for_each_entry(ctx->ids, cur, bkt)
- expr__add_id_val(ctx, strdup(cur->key), i++);
+ expr__add_id_val(ctx, strdup(cur->pkey), i++);
hashmap__for_each_entry(ctx->ids, cur, bkt) {
- if (check_parse_fake(cur->key)) {
+ if (check_parse_fake(cur->pkey)) {
pr_err("check_parse_fake failed\n");
goto out;
}
@@ -1003,7 +1003,7 @@ static int metric_parse_fake(const char *str)
*/
i = 1024;
hashmap__for_each_entry(ctx->ids, cur, bkt)
- expr__add_id_val(ctx, strdup(cur->key), i--);
+ expr__add_id_val(ctx, strdup(cur->pkey), i--);
if (expr__parse(&result, ctx, str)) {
pr_err("expr__parse failed\n");
ret = -1;
diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
index f4adeccdbbcb..a5dbd71cb9ab 100644
--- a/tools/perf/util/bpf-loader.c
+++ b/tools/perf/util/bpf-loader.c
@@ -318,7 +318,7 @@ static void bpf_program_hash_free(void)
return;
hashmap__for_each_entry(bpf_program_hash, cur, bkt)
- clear_prog_priv(cur->key, cur->value);
+ clear_prog_priv(cur->pkey, cur->pvalue);
hashmap__free(bpf_program_hash);
bpf_program_hash = NULL;
@@ -339,13 +339,12 @@ void bpf__clear(void)
bpf_map_hash_free();
}
-static size_t ptr_hash(const void *__key, void *ctx __maybe_unused)
+static size_t ptr_hash(const long __key, void *ctx __maybe_unused)
{
- return (size_t) __key;
+ return __key;
}
-static bool ptr_equal(const void *key1, const void *key2,
- void *ctx __maybe_unused)
+static bool ptr_equal(long key1, long key2, void *ctx __maybe_unused)
{
return key1 == key2;
}
@@ -1185,7 +1184,7 @@ static void bpf_map_hash_free(void)
return;
hashmap__for_each_entry(bpf_map_hash, cur, bkt)
- bpf_map_priv__clear(cur->key, cur->value);
+ bpf_map_priv__clear(cur->pkey, cur->pvalue);
hashmap__free(bpf_map_hash);
bpf_map_hash = NULL;
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 76605fde3507..9e8a1294c981 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -3123,7 +3123,7 @@ void evsel__zero_per_pkg(struct evsel *evsel)
if (evsel->per_pkg_mask) {
hashmap__for_each_entry(evsel->per_pkg_mask, cur, bkt)
- free((char *)cur->key);
+ free((void *)cur->pkey);
hashmap__clear(evsel->per_pkg_mask);
}
diff --git a/tools/perf/util/expr.c b/tools/perf/util/expr.c
index aaacf514dc09..2f05ecdcfe9a 100644
--- a/tools/perf/util/expr.c
+++ b/tools/perf/util/expr.c
@@ -46,7 +46,7 @@ struct expr_id_data {
} kind;
};
-static size_t key_hash(const void *key, void *ctx __maybe_unused)
+static size_t key_hash(long key, void *ctx __maybe_unused)
{
const char *str = (const char *)key;
size_t hash = 0;
@@ -59,8 +59,7 @@ static size_t key_hash(const void *key, void *ctx __maybe_unused)
return hash;
}
-static bool key_equal(const void *key1, const void *key2,
- void *ctx __maybe_unused)
+static bool key_equal(long key1, long key2, void *ctx __maybe_unused)
{
return !strcmp((const char *)key1, (const char *)key2);
}
@@ -84,8 +83,8 @@ void ids__free(struct hashmap *ids)
return;
hashmap__for_each_entry(ids, cur, bkt) {
- free((char *)cur->key);
- free(cur->value);
+ free((void *)cur->pkey);
+ free((void *)cur->pvalue);
}
hashmap__free(ids);
@@ -97,8 +96,7 @@ int ids__insert(struct hashmap *ids, const char *id)
char *old_key = NULL;
int ret;
- ret = hashmap__set(ids, id, data_ptr,
- (const void **)&old_key, (void **)&old_data);
+ ret = hashmap__set(ids, id, data_ptr, &old_key, &old_data);
if (ret)
free(data_ptr);
free(old_key);
@@ -127,8 +125,7 @@ struct hashmap *ids__union(struct hashmap *ids1, struct hashmap *ids2)
ids2 = tmp;
}
hashmap__for_each_entry(ids2, cur, bkt) {
- ret = hashmap__set(ids1, cur->key, cur->value,
- (const void **)&old_key, (void **)&old_data);
+ ret = hashmap__set(ids1, cur->key, cur->value, &old_key, &old_data);
free(old_key);
free(old_data);
@@ -169,8 +166,7 @@ int expr__add_id_val_source_count(struct expr_parse_ctx *ctx, const char *id,
data_ptr->val.source_count = source_count;
data_ptr->kind = EXPR_ID_DATA__VALUE;
- ret = hashmap__set(ctx->ids, id, data_ptr,
- (const void **)&old_key, (void **)&old_data);
+ ret = hashmap__set(ctx->ids, id, data_ptr, &old_key, &old_data);
if (ret)
free(data_ptr);
free(old_key);
@@ -205,8 +201,7 @@ int expr__add_ref(struct expr_parse_ctx *ctx, struct metric_ref *ref)
data_ptr->ref.metric_expr = ref->metric_expr;
data_ptr->kind = EXPR_ID_DATA__REF;
- ret = hashmap__set(ctx->ids, name, data_ptr,
- (const void **)&old_key, (void **)&old_data);
+ ret = hashmap__set(ctx->ids, name, data_ptr, &old_key, &old_data);
if (ret)
free(data_ptr);
@@ -221,7 +216,7 @@ int expr__add_ref(struct expr_parse_ctx *ctx, struct metric_ref *ref)
int expr__get_id(struct expr_parse_ctx *ctx, const char *id,
struct expr_id_data **data)
{
- return hashmap__find(ctx->ids, id, (void **)data) ? 0 : -1;
+ return hashmap__find(ctx->ids, id, data) ? 0 : -1;
}
bool expr__subset_of_ids(struct expr_parse_ctx *haystack,
@@ -232,7 +227,7 @@ bool expr__subset_of_ids(struct expr_parse_ctx *haystack,
struct expr_id_data *data;
hashmap__for_each_entry(needles->ids, cur, bkt) {
- if (expr__get_id(haystack, cur->key, &data))
+ if (expr__get_id(haystack, cur->pkey, &data))
return false;
}
return true;
@@ -282,8 +277,7 @@ void expr__del_id(struct expr_parse_ctx *ctx, const char *id)
struct expr_id_data *old_val = NULL;
char *old_key = NULL;
- hashmap__delete(ctx->ids, id,
- (const void **)&old_key, (void **)&old_val);
+ hashmap__delete(ctx->ids, id, &old_key, &old_val);
free(old_key);
free(old_val);
}
@@ -314,8 +308,8 @@ void expr__ctx_clear(struct expr_parse_ctx *ctx)
size_t bkt;
hashmap__for_each_entry(ctx->ids, cur, bkt) {
- free((char *)cur->key);
- free(cur->value);
+ free((void *)cur->pkey);
+ free(cur->pvalue);
}
hashmap__clear(ctx->ids);
}
@@ -330,8 +324,8 @@ void expr__ctx_free(struct expr_parse_ctx *ctx)
free(ctx->sctx.user_requested_cpu_list);
hashmap__for_each_entry(ctx->ids, cur, bkt) {
- free((char *)cur->key);
- free(cur->value);
+ free((void *)cur->pkey);
+ free(cur->pvalue);
}
hashmap__free(ctx->ids);
free(ctx);
diff --git a/tools/perf/util/hashmap.c b/tools/perf/util/hashmap.c
index aeb09c288716..140ee4055676 100644
--- a/tools/perf/util/hashmap.c
+++ b/tools/perf/util/hashmap.c
@@ -128,7 +128,7 @@ static int hashmap_grow(struct hashmap *map)
}
static bool hashmap_find_entry(const struct hashmap *map,
- const void *key, size_t hash,
+ const long key, size_t hash,
struct hashmap_entry ***pprev,
struct hashmap_entry **entry)
{
@@ -151,18 +151,18 @@ static bool hashmap_find_entry(const struct hashmap *map,
return false;
}
-int hashmap__insert(struct hashmap *map, const void *key, void *value,
- enum hashmap_insert_strategy strategy,
- const void **old_key, void **old_value)
+int hashmap_insert(struct hashmap *map, long key, long value,
+ enum hashmap_insert_strategy strategy,
+ long *old_key, long *old_value)
{
struct hashmap_entry *entry;
size_t h;
int err;
if (old_key)
- *old_key = NULL;
+ *old_key = 0;
if (old_value)
- *old_value = NULL;
+ *old_value = 0;
h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
if (strategy != HASHMAP_APPEND &&
@@ -203,7 +203,7 @@ int hashmap__insert(struct hashmap *map, const void *key, void *value,
return 0;
}
-bool hashmap__find(const struct hashmap *map, const void *key, void **value)
+bool hashmap_find(const struct hashmap *map, long key, long *value)
{
struct hashmap_entry *entry;
size_t h;
@@ -217,8 +217,8 @@ bool hashmap__find(const struct hashmap *map, const void *key, void **value)
return true;
}
-bool hashmap__delete(struct hashmap *map, const void *key,
- const void **old_key, void **old_value)
+bool hashmap_delete(struct hashmap *map, long key,
+ long *old_key, long *old_value)
{
struct hashmap_entry **pprev, *entry;
size_t h;
diff --git a/tools/perf/util/hashmap.h b/tools/perf/util/hashmap.h
index 10a4c4cd13cf..0a5bf1937a7c 100644
--- a/tools/perf/util/hashmap.h
+++ b/tools/perf/util/hashmap.h
@@ -40,12 +40,32 @@ static inline size_t str_hash(const char *s)
return h;
}
-typedef size_t (*hashmap_hash_fn)(const void *key, void *ctx);
-typedef bool (*hashmap_equal_fn)(const void *key1, const void *key2, void *ctx);
+typedef size_t (*hashmap_hash_fn)(long key, void *ctx);
+typedef bool (*hashmap_equal_fn)(long key1, long key2, void *ctx);
+/*
+ * Hashmap interface is polymorphic, keys and values could be either
+ * long-sized integers or pointers, this is achieved as follows:
+ * - interface functions that operate on keys and values are hidden
+ * behind auxiliary macros, e.g. hashmap_insert <-> hashmap__insert;
+ * - these auxiliary macros cast the key and value parameters as
+ * long or long *, so the user does not have to specify the casts explicitly;
+ * - for pointer parameters (e.g. old_key) the size of the pointed
+ * type is verified by hashmap_cast_ptr using _Static_assert;
+ * - when iterating using hashmap__for_each_* forms
+ * hasmap_entry->key should be used for integer keys and
+ * hasmap_entry->pkey should be used for pointer keys,
+ * same goes for values.
+ */
struct hashmap_entry {
- const void *key;
- void *value;
+ union {
+ long key;
+ const void *pkey;
+ };
+ union {
+ long value;
+ void *pvalue;
+ };
struct hashmap_entry *next;
};
@@ -102,6 +122,13 @@ enum hashmap_insert_strategy {
HASHMAP_APPEND,
};
+#define hashmap_cast_ptr(p) ({ \
+ _Static_assert((__builtin_constant_p((p)) ? (p) == NULL : 0) || \
+ sizeof(*(p)) == sizeof(long), \
+ #p " pointee should be a long-sized integer or a pointer"); \
+ (long *)(p); \
+})
+
/*
* hashmap__insert() adds key/value entry w/ various semantics, depending on
* provided strategy value. If a given key/value pair replaced already
@@ -109,42 +136,38 @@ enum hashmap_insert_strategy {
* through old_key and old_value to allow calling code do proper memory
* management.
*/
-int hashmap__insert(struct hashmap *map, const void *key, void *value,
- enum hashmap_insert_strategy strategy,
- const void **old_key, void **old_value);
+int hashmap_insert(struct hashmap *map, long key, long value,
+ enum hashmap_insert_strategy strategy,
+ long *old_key, long *old_value);
-static inline int hashmap__add(struct hashmap *map,
- const void *key, void *value)
-{
- return hashmap__insert(map, key, value, HASHMAP_ADD, NULL, NULL);
-}
+#define hashmap__insert(map, key, value, strategy, old_key, old_value) \
+ hashmap_insert((map), (long)(key), (long)(value), (strategy), \
+ hashmap_cast_ptr(old_key), \
+ hashmap_cast_ptr(old_value))
-static inline int hashmap__set(struct hashmap *map,
- const void *key, void *value,
- const void **old_key, void **old_value)
-{
- return hashmap__insert(map, key, value, HASHMAP_SET,
- old_key, old_value);
-}
+#define hashmap__add(map, key, value) \
+ hashmap__insert((map), (key), (value), HASHMAP_ADD, NULL, NULL)
-static inline int hashmap__update(struct hashmap *map,
- const void *key, void *value,
- const void **old_key, void **old_value)
-{
- return hashmap__insert(map, key, value, HASHMAP_UPDATE,
- old_key, old_value);
-}
+#define hashmap__set(map, key, value, old_key, old_value) \
+ hashmap__insert((map), (key), (value), HASHMAP_SET, (old_key), (old_value))
-static inline int hashmap__append(struct hashmap *map,
- const void *key, void *value)
-{
- return hashmap__insert(map, key, value, HASHMAP_APPEND, NULL, NULL);
-}
+#define hashmap__update(map, key, value, old_key, old_value) \
+ hashmap__insert((map), (key), (value), HASHMAP_UPDATE, (old_key), (old_value))
+
+#define hashmap__append(map, key, value) \
+ hashmap__insert((map), (key), (value), HASHMAP_APPEND, NULL, NULL)
+
+bool hashmap_delete(struct hashmap *map, long key, long *old_key, long *old_value);
+
+#define hashmap__delete(map, key, old_key, old_value) \
+ hashmap_delete((map), (long)(key), \
+ hashmap_cast_ptr(old_key), \
+ hashmap_cast_ptr(old_value))
-bool hashmap__delete(struct hashmap *map, const void *key,
- const void **old_key, void **old_value);
+bool hashmap_find(const struct hashmap *map, long key, long *value);
-bool hashmap__find(const struct hashmap *map, const void *key, void **value);
+#define hashmap__find(map, key, value) \
+ hashmap_find((map), (long)(key), hashmap_cast_ptr(value))
/*
* hashmap__for_each_entry - iterate over all entries in hashmap
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
index 4c98ac29ee13..6b3505b1b6ac 100644
--- a/tools/perf/util/metricgroup.c
+++ b/tools/perf/util/metricgroup.c
@@ -288,7 +288,7 @@ static int setup_metric_events(struct hashmap *ids,
* combined or shared groups, this metric may not care
* about this event.
*/
- if (hashmap__find(ids, metric_id, (void **)&val_ptr)) {
+ if (hashmap__find(ids, metric_id, &val_ptr)) {
metric_events[matched_events++] = ev;
if (matched_events >= ids_size)
@@ -764,7 +764,7 @@ static int metricgroup__build_event_string(struct strbuf *events,
#define RETURN_IF_NON_ZERO(x) do { if (x) return x; } while (0)
hashmap__for_each_entry(ctx->ids, cur, bkt) {
- const char *sep, *rsep, *id = cur->key;
+ const char *sep, *rsep, *id = cur->pkey;
enum perf_tool_event ev;
pr_debug("found event %s\n", id);
@@ -945,14 +945,14 @@ static int resolve_metric(struct list_head *metric_list,
hashmap__for_each_entry(root_metric->pctx->ids, cur, bkt) {
struct pmu_event pe;
- if (metricgroup__find_metric(cur->key, table, &pe)) {
+ if (metricgroup__find_metric(cur->pkey, table, &pe)) {
pending = realloc(pending,
(pending_cnt + 1) * sizeof(struct to_resolve));
if (!pending)
return -ENOMEM;
memcpy(&pending[pending_cnt].pe, &pe, sizeof(pe));
- pending[pending_cnt].key = cur->key;
+ pending[pending_cnt].key = cur->pkey;
pending_cnt++;
}
}
@@ -1433,7 +1433,7 @@ static int build_combined_expr_ctx(const struct list_head *metric_list,
list_for_each_entry(m, metric_list, nd) {
if (m->has_constraint && !m->modifier) {
hashmap__for_each_entry(m->pctx->ids, cur, bkt) {
- dup = strdup(cur->key);
+ dup = strdup(cur->pkey);
if (!dup) {
ret = -ENOMEM;
goto err_out;
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 07b29fe272c7..0bf71b02aa06 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -398,7 +398,7 @@ void perf_stat__collect_metric_expr(struct evlist *evsel_list)
i = 0;
hashmap__for_each_entry(ctx->ids, cur, bkt) {
- const char *metric_name = (const char *)cur->key;
+ const char *metric_name = cur->pkey;
found = false;
if (leader) {
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index 8ec8bb4a9912..c0656f85bfa5 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -278,15 +278,14 @@ void evlist__save_aggr_prev_raw_counts(struct evlist *evlist)
}
}
-static size_t pkg_id_hash(const void *__key, void *ctx __maybe_unused)
+static size_t pkg_id_hash(long __key, void *ctx __maybe_unused)
{
uint64_t *key = (uint64_t *) __key;
return *key & 0xffffffff;
}
-static bool pkg_id_equal(const void *__key1, const void *__key2,
- void *ctx __maybe_unused)
+static bool pkg_id_equal(long __key1, long __key2, void *ctx __maybe_unused)
{
uint64_t *key1 = (uint64_t *) __key1;
uint64_t *key2 = (uint64_t *) __key2;
@@ -347,11 +346,11 @@ static int check_per_pkg(struct evsel *counter, struct perf_counts_values *vals,
return -ENOMEM;
*key = (uint64_t)d << 32 | s;
- if (hashmap__find(mask, (void *)key, NULL)) {
+ if (hashmap__find(mask, key, NULL)) {
*skip = true;
free(key);
} else
- ret = hashmap__add(mask, (void *)key, (void *)1);
+ ret = hashmap__add(mask, key, 1);
return ret;
}
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index f07aef7c592c..b57b091d8026 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -48,6 +48,7 @@ TARGETS += nci
TARGETS += net
TARGETS += net/af_unix
TARGETS += net/forwarding
+TARGETS += net/hsr
TARGETS += net/mptcp
TARGETS += net/openvswitch
TARGETS += netfilter
diff --git a/tools/testing/selftests/bpf/DENYLIST b/tools/testing/selftests/bpf/DENYLIST
index 939de574fc7f..f748f2c33b22 100644
--- a/tools/testing/selftests/bpf/DENYLIST
+++ b/tools/testing/selftests/bpf/DENYLIST
@@ -1,6 +1,7 @@
# TEMPORARY
+# Alphabetical order
get_stack_raw_tp # spams with kernel warnings until next bpf -> bpf-next merge
-stacktrace_build_id_nmi
stacktrace_build_id
+stacktrace_build_id_nmi
task_fd_query_rawtp
varlen
diff --git a/tools/testing/selftests/bpf/DENYLIST.aarch64 b/tools/testing/selftests/bpf/DENYLIST.aarch64
new file mode 100644
index 000000000000..8e77515d56f6
--- /dev/null
+++ b/tools/testing/selftests/bpf/DENYLIST.aarch64
@@ -0,0 +1,83 @@
+bloom_filter_map # libbpf: prog 'check_bloom': failed to attach: ERROR: strerror_r(-524)=22
+bpf_cookie/lsm
+bpf_cookie/multi_kprobe_attach_api
+bpf_cookie/multi_kprobe_link_api
+bpf_cookie/trampoline
+bpf_loop/check_callback_fn_stop # link unexpected error: -524
+bpf_loop/check_invalid_flags
+bpf_loop/check_nested_calls
+bpf_loop/check_non_constant_callback
+bpf_loop/check_nr_loops
+bpf_loop/check_null_callback_ctx
+bpf_loop/check_stack
+bpf_mod_race # bpf_mod_kfunc_race__attach unexpected error: -524 (errno 524)
+bpf_tcp_ca/dctcp_fallback
+btf_dump/btf_dump: var_data # find type id unexpected find type id: actual -2 < expected 0
+cgroup_hierarchical_stats # attach unexpected error: -524 (errno 524)
+d_path/basic # setup attach failed: -524
+deny_namespace # attach unexpected error: -524 (errno 524)
+fentry_fexit # fentry_attach unexpected error: -1 (errno 524)
+fentry_test # fentry_attach unexpected error: -1 (errno 524)
+fexit_sleep # fexit_attach fexit attach failed: -1
+fexit_stress # fexit attach unexpected fexit attach: actual -524 < expected 0
+fexit_test # fexit_attach unexpected error: -1 (errno 524)
+get_func_args_test # get_func_args_test__attach unexpected error: -524 (errno 524) (trampoline)
+get_func_ip_test # get_func_ip_test__attach unexpected error: -524 (errno 524) (trampoline)
+htab_update/reenter_update
+kfree_skb # attach fentry unexpected error: -524 (trampoline)
+kfunc_call/subprog # extern (var ksym) 'bpf_prog_active': not found in kernel BTF
+kfunc_call/subprog_lskel # skel unexpected error: -2
+kfunc_dynptr_param/dynptr_data_null # libbpf: prog 'dynptr_data_null': failed to attach: ERROR: strerror_r(-524)=22
+kprobe_multi_test/attach_api_addrs # bpf_program__attach_kprobe_multi_opts unexpected error: -95
+kprobe_multi_test/attach_api_pattern # bpf_program__attach_kprobe_multi_opts unexpected error: -95
+kprobe_multi_test/attach_api_syms # bpf_program__attach_kprobe_multi_opts unexpected error: -95
+kprobe_multi_test/bench_attach # bpf_program__attach_kprobe_multi_opts unexpected error: -95
+kprobe_multi_test/link_api_addrs # link_fd unexpected link_fd: actual -95 < expected 0
+kprobe_multi_test/link_api_syms # link_fd unexpected link_fd: actual -95 < expected 0
+kprobe_multi_test/skel_api # kprobe_multi__attach unexpected error: -524 (errno 524)
+ksyms_module/libbpf # 'bpf_testmod_ksym_percpu': not found in kernel BTF
+ksyms_module/lskel # test_ksyms_module_lskel__open_and_load unexpected error: -2
+libbpf_get_fd_by_id_opts # test_libbpf_get_fd_by_id_opts__attach unexpected error: -524 (errno 524)
+linked_list
+lookup_key # test_lookup_key__attach unexpected error: -524 (errno 524)
+lru_bug # lru_bug__attach unexpected error: -524 (errno 524)
+modify_return # modify_return__attach failed unexpected error: -524 (errno 524)
+module_attach # skel_attach skeleton attach failed: -524
+mptcp/base # run_test mptcp unexpected error: -524 (errno 524)
+netcnt # packets unexpected packets: actual 10001 != expected 10000
+rcu_read_lock # failed to attach: ERROR: strerror_r(-524)=22
+recursion # skel_attach unexpected error: -524 (errno 524)
+ringbuf # skel_attach skeleton attachment failed: -1
+setget_sockopt # attach_cgroup unexpected error: -524
+sk_storage_tracing # test_sk_storage_tracing__attach unexpected error: -524 (errno 524)
+skc_to_unix_sock # could not attach BPF object unexpected error: -524 (errno 524)
+socket_cookie # prog_attach unexpected error: -524
+stacktrace_build_id # compare_stack_ips stackmap vs. stack_amap err -1 errno 2
+task_local_storage/exit_creds # skel_attach unexpected error: -524 (errno 524)
+task_local_storage/recursion # skel_attach unexpected error: -524 (errno 524)
+test_bprm_opts # attach attach failed: -524
+test_ima # attach attach failed: -524
+test_local_storage # attach lsm attach failed: -524
+test_lsm # test_lsm_first_attach unexpected error: -524 (errno 524)
+test_overhead # attach_fentry unexpected error: -524
+timer # timer unexpected error: -524 (errno 524)
+timer_crash # timer_crash__attach unexpected error: -524 (errno 524)
+timer_mim # timer_mim unexpected error: -524 (errno 524)
+trace_printk # trace_printk__attach unexpected error: -1 (errno 524)
+trace_vprintk # trace_vprintk__attach unexpected error: -1 (errno 524)
+tracing_struct # tracing_struct__attach unexpected error: -524 (errno 524)
+trampoline_count # attach_prog unexpected error: -524
+unpriv_bpf_disabled # skel_attach unexpected error: -524 (errno 524)
+user_ringbuf/test_user_ringbuf_post_misaligned # misaligned_skel unexpected error: -524 (errno 524)
+user_ringbuf/test_user_ringbuf_post_producer_wrong_offset
+user_ringbuf/test_user_ringbuf_post_larger_than_ringbuf_sz
+user_ringbuf/test_user_ringbuf_basic # ringbuf_basic_skel unexpected error: -524 (errno 524)
+user_ringbuf/test_user_ringbuf_sample_full_ring_buffer
+user_ringbuf/test_user_ringbuf_post_alignment_autoadjust
+user_ringbuf/test_user_ringbuf_overfill
+user_ringbuf/test_user_ringbuf_discards_properly_ignored
+user_ringbuf/test_user_ringbuf_loop
+user_ringbuf/test_user_ringbuf_msg_protocol
+user_ringbuf/test_user_ringbuf_blocking_reserve
+verify_pkcs7_sig # test_verify_pkcs7_sig__attach unexpected error: -524 (errno 524)
+vmlinux # skel_attach skeleton attach failed: -524
diff --git a/tools/testing/selftests/bpf/DENYLIST.s390x b/tools/testing/selftests/bpf/DENYLIST.s390x
index 17e074eb42b8..648a8a1b6b78 100644
--- a/tools/testing/selftests/bpf/DENYLIST.s390x
+++ b/tools/testing/selftests/bpf/DENYLIST.s390x
@@ -1,13 +1,20 @@
# TEMPORARY
+# Alphabetical order
atomics # attach(add): actual -524 <= expected 0 (trampoline)
-bpf_iter_setsockopt # JIT does not support calling kernel function (kfunc)
bloom_filter_map # failed to find kernel BTF type ID of '__x64_sys_getpgid': -3 (?)
-bpf_tcp_ca # JIT does not support calling kernel function (kfunc)
+bpf_cookie # failed to open_and_load program: -524 (trampoline)
+bpf_iter_setsockopt # JIT does not support calling kernel function (kfunc)
bpf_loop # attaches to __x64_sys_nanosleep
bpf_mod_race # BPF trampoline
bpf_nf # JIT does not support calling kernel function
+bpf_tcp_ca # JIT does not support calling kernel function (kfunc)
+cb_refs # expected error message unexpected error: -524 (trampoline)
+cgroup_hierarchical_stats # JIT does not support calling kernel function (kfunc)
+cgrp_kfunc # JIT does not support calling kernel function
+cgrp_local_storage # prog_attach unexpected error: -524 (trampoline)
core_read_macros # unknown func bpf_probe_read#4 (overlapping)
d_path # failed to auto-attach program 'prog_stat': -524 (trampoline)
+deny_namespace # failed to attach: ERROR: strerror_r(-524)=22 (trampoline)
dummy_st_ops # test_run unexpected error: -524 (errno 524) (trampoline)
fentry_fexit # fentry attach failed: -524 (trampoline)
fentry_test # fentry_first_attach unexpected error: -524 (trampoline)
@@ -18,19 +25,30 @@ fexit_test # fexit_first_attach unexpected error:
get_func_args_test # trampoline
get_func_ip_test # get_func_ip_test__attach unexpected error: -524 (trampoline)
get_stack_raw_tp # user_stack corrupted user stack (no backchain userspace)
+htab_update # failed to attach: ERROR: strerror_r(-524)=22 (trampoline)
kfree_skb # attach fentry unexpected error: -524 (trampoline)
kfunc_call # 'bpf_prog_active': not found in kernel BTF (?)
+kfunc_dynptr_param # JIT does not support calling kernel function (kfunc)
+kprobe_multi_test # relies on fentry
ksyms_module # test_ksyms_module__open_and_load unexpected error: -9 (?)
ksyms_module_libbpf # JIT does not support calling kernel function (kfunc)
ksyms_module_lskel # test_ksyms_module_lskel__open_and_load unexpected error: -9 (?)
+libbpf_get_fd_by_id_opts # failed to attach: ERROR: strerror_r(-524)=22 (trampoline)
+linked_list # JIT does not support calling kernel function (kfunc)
+lookup_key # JIT does not support calling kernel function (kfunc)
+lru_bug # prog 'printk': failed to auto-attach: -524
+map_kptr # failed to open_and_load program: -524 (trampoline)
modify_return # modify_return attach failed: -524 (trampoline)
module_attach # skel_attach skeleton attach failed: -524 (trampoline)
mptcp
-kprobe_multi_test # relies on fentry
netcnt # failed to load BPF skeleton 'netcnt_prog': -7 (?)
probe_user # check_kprobe_res wrong kprobe res from probe read (?)
+rcu_read_lock # failed to find kernel BTF type ID of '__x64_sys_getpgid': -3 (?)
recursion # skel_attach unexpected error: -524 (trampoline)
ringbuf # skel_load skeleton load failed (?)
+select_reuseport # intermittently fails on new s390x setup
+send_signal # intermittently fails to receive signal
+setget_sockopt # attach unexpected error: -524 (trampoline)
sk_assign # Can't read on server: Invalid argument (?)
sk_lookup # endianness problem
sk_storage_tracing # test_sk_storage_tracing__attach unexpected error: -524 (trampoline)
@@ -38,6 +56,7 @@ skc_to_unix_sock # could not attach BPF object unexpecte
socket_cookie # prog_attach unexpected error: -524 (trampoline)
stacktrace_build_id # compare_map_keys stackid_hmap vs. stackmap err -2 errno 2 (?)
tailcalls # tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls (?)
+task_kfunc # JIT does not support calling kernel function
task_local_storage # failed to auto-attach program 'trace_exit_creds': -524 (trampoline)
test_bpffs # bpffs test failed 255 (iterator)
test_bprm_opts # failed to auto-attach program 'secure_exec': -524 (trampoline)
@@ -52,26 +71,16 @@ timer_mim # failed to auto-attach program 'test1'
trace_ext # failed to auto-attach program 'test_pkt_md_access_new': -524 (trampoline)
trace_printk # trace_printk__load unexpected error: -2 (errno 2) (?)
trace_vprintk # trace_vprintk__open_and_load unexpected error: -9 (?)
+tracing_struct # failed to auto-attach: -524 (trampoline)
trampoline_count # prog 'prog1': failed to attach: ERROR: strerror_r(-524)=22 (trampoline)
+type_cast # JIT does not support calling kernel function
+unpriv_bpf_disabled # fentry
+user_ringbuf # failed to find kernel BTF type ID of '__s390x_sys_prctl': -3 (?)
verif_stats # trace_vprintk__open_and_load unexpected error: -9 (?)
+verify_pkcs7_sig # JIT does not support calling kernel function (kfunc)
vmlinux # failed to auto-attach program 'handle__fentry': -524 (trampoline)
xdp_adjust_tail # case-128 err 0 errno 28 retval 1 size 128 expect-size 3520 (?)
xdp_bonding # failed to auto-attach program 'trace_on_entry': -524 (trampoline)
xdp_bpf2bpf # failed to auto-attach program 'trace_on_entry': -524 (trampoline)
-map_kptr # failed to open_and_load program: -524 (trampoline)
-bpf_cookie # failed to open_and_load program: -524 (trampoline)
xdp_do_redirect # prog_run_max_size unexpected error: -22 (errno 22)
-send_signal # intermittently fails to receive signal
-select_reuseport # intermittently fails on new s390x setup
xdp_synproxy # JIT does not support calling kernel function (kfunc)
-unpriv_bpf_disabled # fentry
-lru_bug # prog 'printk': failed to auto-attach: -524
-setget_sockopt # attach unexpected error: -524 (trampoline)
-cb_refs # expected error message unexpected error: -524 (trampoline)
-cgroup_hierarchical_stats # JIT does not support calling kernel function (kfunc)
-htab_update # failed to attach: ERROR: strerror_r(-524)=22 (trampoline)
-tracing_struct # failed to auto-attach: -524 (trampoline)
-user_ringbuf # failed to find kernel BTF type ID of '__s390x_sys_prctl': -3 (?)
-lookup_key # JIT does not support calling kernel function (kfunc)
-verify_pkcs7_sig # JIT does not support calling kernel function (kfunc)
-kfunc_dynptr_param # JIT does not support calling kernel function (kfunc)
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index e6cf21fad69f..6a0f043dc410 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -182,14 +182,15 @@ endif
$(OUTPUT)/liburandom_read.so: urandom_read_lib1.c urandom_read_lib2.c
$(call msg,LIB,,$@)
$(Q)$(CLANG) $(filter-out -static,$(CFLAGS) $(LDFLAGS)) $^ $(LDLIBS) \
- -fuse-ld=$(LLD) -Wl,-znoseparate-code -fPIC -shared -o $@
+ -fuse-ld=$(LLD) -Wl,-znoseparate-code -Wl,--build-id=sha1 \
+ -fPIC -shared -o $@
$(OUTPUT)/urandom_read: urandom_read.c urandom_read_aux.c $(OUTPUT)/liburandom_read.so
$(call msg,BINARY,,$@)
$(Q)$(CLANG) $(filter-out -static,$(CFLAGS) $(LDFLAGS)) $(filter %.c,$^) \
liburandom_read.so $(LDLIBS) \
- -fuse-ld=$(LLD) -Wl,-znoseparate-code \
- -Wl,-rpath=. -Wl,--build-id=sha1 -o $@
+ -fuse-ld=$(LLD) -Wl,-znoseparate-code -Wl,--build-id=sha1 \
+ -Wl,-rpath=. -o $@
$(OUTPUT)/sign-file: ../../../../scripts/sign-file.c
$(call msg,SIGN-FILE,,$@)
@@ -200,7 +201,7 @@ $(OUTPUT)/sign-file: ../../../../scripts/sign-file.c
$(OUTPUT)/bpf_testmod.ko: $(VMLINUX_BTF) $(wildcard bpf_testmod/Makefile bpf_testmod/*.[ch])
$(call msg,MOD,,$@)
$(Q)$(RM) bpf_testmod/bpf_testmod.ko # force re-compilation
- $(Q)$(MAKE) $(submake_extras) -C bpf_testmod
+ $(Q)$(MAKE) $(submake_extras) RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) -C bpf_testmod
$(Q)cp bpf_testmod/bpf_testmod.ko $@
DEFAULT_BPFTOOL := $(HOST_SCRATCH_DIR)/sbin/bpftool
@@ -309,9 +310,9 @@ $(RESOLVE_BTFIDS): $(HOST_BPFOBJ) | $(HOST_BUILD_DIR)/resolve_btfids \
# Use '-idirafter': Don't interfere with include mechanics except where the
# build would have failed anyways.
define get_sys_includes
-$(shell $(1) -v -E - </dev/null 2>&1 \
+$(shell $(1) $(2) -v -E - </dev/null 2>&1 \
| sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }') \
-$(shell $(1) -dM -E - </dev/null | grep '__riscv_xlen ' | awk '{printf("-D__riscv_xlen=%d -D__BITS_PER_LONG=%d", $$3, $$3)}')
+$(shell $(1) $(2) -dM -E - </dev/null | grep '__riscv_xlen ' | awk '{printf("-D__riscv_xlen=%d -D__BITS_PER_LONG=%d", $$3, $$3)}')
endef
# Determine target endianness.
@@ -319,7 +320,11 @@ IS_LITTLE_ENDIAN = $(shell $(CC) -dM -E - </dev/null | \
grep 'define __BYTE_ORDER__ __ORDER_LITTLE_ENDIAN__')
MENDIAN=$(if $(IS_LITTLE_ENDIAN),-mlittle-endian,-mbig-endian)
-CLANG_SYS_INCLUDES = $(call get_sys_includes,$(CLANG))
+ifneq ($(CROSS_COMPILE),)
+CLANG_TARGET_ARCH = --target=$(notdir $(CROSS_COMPILE:%-=%))
+endif
+
+CLANG_SYS_INCLUDES = $(call get_sys_includes,$(CLANG),$(CLANG_TARGET_ARCH))
BPF_CFLAGS = -g -Werror -D__TARGET_ARCH_$(SRCARCH) $(MENDIAN) \
-I$(INCLUDE_DIR) -I$(CURDIR) -I$(APIDIR) \
-I$(abspath $(OUTPUT)/../usr/include)
@@ -359,9 +364,11 @@ LINKED_SKELS := test_static_linked.skel.h linked_funcs.skel.h \
test_subskeleton.skel.h test_subskeleton_lib.skel.h \
test_usdt.skel.h
-LSKELS := fentry_test.c fexit_test.c fexit_sleep.c \
- test_ringbuf.c atomics.c trace_printk.c trace_vprintk.c \
- map_ptr_kern.c core_kern.c core_kern_overflow.c
+LSKELS := fentry_test.c fexit_test.c fexit_sleep.c atomics.c \
+ trace_printk.c trace_vprintk.c map_ptr_kern.c \
+ core_kern.c core_kern_overflow.c test_ringbuf.c \
+ test_ringbuf_map_key.c
+
# Generate both light skeleton and libbpf skeleton for these
LSKELS_EXTRA := test_ksyms_module.c test_ksyms_weak.c kfunc_call_test.c \
kfunc_call_test_subprog.c
@@ -539,7 +546,7 @@ $(eval $(call DEFINE_TEST_RUNNER,test_progs,no_alu32))
# Define test_progs BPF-GCC-flavored test runner.
ifneq ($(BPF_GCC),)
TRUNNER_BPF_BUILD_RULE := GCC_BPF_BUILD_RULE
-TRUNNER_BPF_CFLAGS := $(BPF_CFLAGS) $(call get_sys_includes,gcc)
+TRUNNER_BPF_CFLAGS := $(BPF_CFLAGS) $(call get_sys_includes,gcc,)
$(eval $(call DEFINE_TEST_RUNNER,test_progs,bpf_gcc))
endif
diff --git a/tools/testing/selftests/bpf/README.rst b/tools/testing/selftests/bpf/README.rst
index d3c6b3da0bb1..cb9b95702ac6 100644
--- a/tools/testing/selftests/bpf/README.rst
+++ b/tools/testing/selftests/bpf/README.rst
@@ -6,18 +6,59 @@ General instructions on running selftests can be found in
__ /Documentation/bpf/bpf_devel_QA.rst#q-how-to-run-bpf-selftests
+=============
+BPF CI System
+=============
+
+BPF employs a continuous integration (CI) system to check patch submission in an
+automated fashion. The system runs selftests for each patch in a series. Results
+are propagated to patchwork, where failures are highlighted similar to
+violations of other checks (such as additional warnings being emitted or a
+``scripts/checkpatch.pl`` reported deficiency):
+
+ https://patchwork.kernel.org/project/netdevbpf/list/?delegate=121173
+
+The CI system executes tests on multiple architectures. It uses a kernel
+configuration derived from both the generic and architecture specific config
+file fragments below ``tools/testing/selftests/bpf/`` (e.g., ``config`` and
+``config.x86_64``).
+
+Denylisting Tests
+=================
+
+It is possible for some architectures to not have support for all BPF features.
+In such a case tests in CI may fail. An example of such a shortcoming is BPF
+trampoline support on IBM's s390x architecture. For cases like this, an in-tree
+deny list file, located at ``tools/testing/selftests/bpf/DENYLIST.<arch>``, can
+be used to prevent the test from running on such an architecture.
+
+In addition to that, the generic ``tools/testing/selftests/bpf/DENYLIST`` is
+honored on every architecture running tests.
+
+These files are organized in three columns. The first column lists the test in
+question. This can be the name of a test suite or of an individual test. The
+remaining two columns provide additional meta data that helps identify and
+classify the entry: column two is a copy and paste of the error being reported
+when running the test in the setting in question. The third column, if
+available, summarizes the underlying problem. A value of ``trampoline``, for
+example, indicates that lack of trampoline support is causing the test to fail.
+This last entry helps identify tests that can be re-enabled once such support is
+added.
+
=========================
Running Selftests in a VM
=========================
It's now possible to run the selftests using ``tools/testing/selftests/bpf/vmtest.sh``.
The script tries to ensure that the tests are run with the same environment as they
-would be run post-submit in the CI used by the Maintainers.
+would be run post-submit in the CI used by the Maintainers, with the exception
+that deny lists are not automatically honored.
-This script downloads a suitable Kconfig and VM userspace image from the system used by
-the CI. It builds the kernel (without overwriting your existing Kconfig), recompiles the
-bpf selftests, runs them (by default ``tools/testing/selftests/bpf/test_progs``) and
-saves the resulting output (by default in ``~/.bpf_selftests``).
+This script uses the in-tree kernel configuration and downloads a VM userspace
+image from the system used by the CI. It builds the kernel (without overwriting
+your existing Kconfig), recompiles the bpf selftests, runs them (by default
+``tools/testing/selftests/bpf/test_progs``) and saves the resulting output (by
+default in ``~/.bpf_selftests``).
Script dependencies:
- clang (preferably built from sources, https://github.com/llvm/llvm-project);
@@ -26,7 +67,7 @@ Script dependencies:
- docutils (for ``rst2man``);
- libcap-devel.
-For more information on about using the script, run:
+For more information about using the script, run:
.. code-block:: console
diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h
new file mode 100644
index 000000000000..424f7bbbfe9b
--- /dev/null
+++ b/tools/testing/selftests/bpf/bpf_experimental.h
@@ -0,0 +1,68 @@
+#ifndef __BPF_EXPERIMENTAL__
+#define __BPF_EXPERIMENTAL__
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+
+#define __contains(name, node) __attribute__((btf_decl_tag("contains:" #name ":" #node)))
+
+/* Description
+ * Allocates an object of the type represented by 'local_type_id' in
+ * program BTF. User may use the bpf_core_type_id_local macro to pass the
+ * type ID of a struct in program BTF.
+ *
+ * The 'local_type_id' parameter must be a known constant.
+ * The 'meta' parameter is a hidden argument that is ignored.
+ * Returns
+ * A pointer to an object of the type corresponding to the passed in
+ * 'local_type_id', or NULL on failure.
+ */
+extern void *bpf_obj_new_impl(__u64 local_type_id, void *meta) __ksym;
+
+/* Convenience macro to wrap over bpf_obj_new_impl */
+#define bpf_obj_new(type) ((type *)bpf_obj_new_impl(bpf_core_type_id_local(type), NULL))
+
+/* Description
+ * Free an allocated object. All fields of the object that require
+ * destruction will be destructed before the storage is freed.
+ *
+ * The 'meta' parameter is a hidden argument that is ignored.
+ * Returns
+ * Void.
+ */
+extern void bpf_obj_drop_impl(void *kptr, void *meta) __ksym;
+
+/* Convenience macro to wrap over bpf_obj_drop_impl */
+#define bpf_obj_drop(kptr) bpf_obj_drop_impl(kptr, NULL)
+
+/* Description
+ * Add a new entry to the beginning of the BPF linked list.
+ * Returns
+ * Void.
+ */
+extern void bpf_list_push_front(struct bpf_list_head *head, struct bpf_list_node *node) __ksym;
+
+/* Description
+ * Add a new entry to the end of the BPF linked list.
+ * Returns
+ * Void.
+ */
+extern void bpf_list_push_back(struct bpf_list_head *head, struct bpf_list_node *node) __ksym;
+
+/* Description
+ * Remove the entry at the beginning of the BPF linked list.
+ * Returns
+ * Pointer to bpf_list_node of deleted entry, or NULL if list is empty.
+ */
+extern struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head) __ksym;
+
+/* Description
+ * Remove the entry at the end of the BPF linked list.
+ * Returns
+ * Pointer to bpf_list_node of deleted entry, or NULL if list is empty.
+ */
+extern struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) __ksym;
+
+#endif
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
index a6021d6117b5..5085fea3cac5 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
+++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
@@ -128,6 +128,23 @@ __weak noinline struct file *bpf_testmod_return_ptr(int arg)
}
}
+noinline int bpf_testmod_fentry_test1(int a)
+{
+ return a + 1;
+}
+
+noinline int bpf_testmod_fentry_test2(int a, u64 b)
+{
+ return a + b;
+}
+
+noinline int bpf_testmod_fentry_test3(char a, int b, u64 c)
+{
+ return a + b + c;
+}
+
+int bpf_testmod_fentry_ok;
+
noinline ssize_t
bpf_testmod_test_read(struct file *file, struct kobject *kobj,
struct bin_attribute *bin_attr,
@@ -167,6 +184,13 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj,
return snprintf(buf, len, "%d\n", writable.val);
}
+ if (bpf_testmod_fentry_test1(1) != 2 ||
+ bpf_testmod_fentry_test2(2, 3) != 5 ||
+ bpf_testmod_fentry_test3(4, 5, 6) != 15)
+ goto out;
+
+ bpf_testmod_fentry_ok = 1;
+out:
return -EIO; /* always fail */
}
EXPORT_SYMBOL(bpf_testmod_test_read);
diff --git a/tools/testing/selftests/bpf/bpf_util.h b/tools/testing/selftests/bpf/bpf_util.h
index a3352a64c067..10587a29b967 100644
--- a/tools/testing/selftests/bpf/bpf_util.h
+++ b/tools/testing/selftests/bpf/bpf_util.h
@@ -20,6 +20,25 @@ static inline unsigned int bpf_num_possible_cpus(void)
return possible_cpus;
}
+/* Copy up to sz - 1 bytes from zero-terminated src string and ensure that dst
+ * is zero-terminated string no matter what (unless sz == 0, in which case
+ * it's a no-op). It's conceptually close to FreeBSD's strlcpy(), but differs
+ * in what is returned. Given this is internal helper, it's trivial to extend
+ * this, when necessary. Use this instead of strncpy inside libbpf source code.
+ */
+static inline void bpf_strlcpy(char *dst, const char *src, size_t sz)
+{
+ size_t i;
+
+ if (sz == 0)
+ return;
+
+ sz--;
+ for (i = 0; i < sz && src[i]; i++)
+ dst[i] = src[i];
+ dst[i] = '\0';
+}
+
#define __bpf_percpu_val_align __attribute__((__aligned__(8)))
#define BPF_DECLARE_PERCPU(type, name) \
diff --git a/tools/testing/selftests/bpf/cgroup_helpers.c b/tools/testing/selftests/bpf/cgroup_helpers.c
index e914cc45b766..9e95b37a7dff 100644
--- a/tools/testing/selftests/bpf/cgroup_helpers.c
+++ b/tools/testing/selftests/bpf/cgroup_helpers.c
@@ -13,6 +13,7 @@
#include <ftw.h>
#include "cgroup_helpers.h"
+#include "bpf_util.h"
/*
* To avoid relying on the system setup, when setup_cgroup_env is called
@@ -77,7 +78,7 @@ static int __enable_controllers(const char *cgroup_path, const char *controllers
enable[len] = 0;
close(fd);
} else {
- strncpy(enable, controllers, sizeof(enable));
+ bpf_strlcpy(enable, controllers, sizeof(enable));
}
snprintf(path, sizeof(path), "%s/cgroup.subtree_control", cgroup_path);
@@ -332,6 +333,25 @@ int get_root_cgroup(void)
return fd;
}
+/*
+ * remove_cgroup() - Remove a cgroup
+ * @relative_path: The cgroup path, relative to the workdir, to remove
+ *
+ * This function expects a cgroup to already be created, relative to the cgroup
+ * work dir. It also expects the cgroup doesn't have any children or live
+ * processes and it removes the cgroup.
+ *
+ * On failure, it will print an error to stderr.
+ */
+void remove_cgroup(const char *relative_path)
+{
+ char cgroup_path[PATH_MAX + 1];
+
+ format_cgroup_path(cgroup_path, relative_path);
+ if (rmdir(cgroup_path))
+ log_err("rmdiring cgroup %s .. %s", relative_path, cgroup_path);
+}
+
/**
* create_and_get_cgroup() - Create a cgroup, relative to workdir, and get the FD
* @relative_path: The cgroup path, relative to the workdir, to join
diff --git a/tools/testing/selftests/bpf/cgroup_helpers.h b/tools/testing/selftests/bpf/cgroup_helpers.h
index 3358734356ab..f099a166c94d 100644
--- a/tools/testing/selftests/bpf/cgroup_helpers.h
+++ b/tools/testing/selftests/bpf/cgroup_helpers.h
@@ -18,6 +18,7 @@ int write_cgroup_file_parent(const char *relative_path, const char *file,
int cgroup_setup_and_join(const char *relative_path);
int get_root_cgroup(void);
int create_and_get_cgroup(const char *relative_path);
+void remove_cgroup(const char *relative_path);
unsigned long long get_cgroup_id(const char *relative_path);
int join_cgroup(const char *relative_path);
diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
index 9213565c0311..f9034ea00bc9 100644
--- a/tools/testing/selftests/bpf/config
+++ b/tools/testing/selftests/bpf/config
@@ -1,4 +1,6 @@
CONFIG_BLK_DEV_LOOP=y
+CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
CONFIG_BPF=y
CONFIG_BPF_EVENTS=y
CONFIG_BPF_JIT=y
@@ -6,6 +8,7 @@ CONFIG_BPF_LIRC_MODE2=y
CONFIG_BPF_LSM=y
CONFIG_BPF_STREAM_PARSER=y
CONFIG_BPF_SYSCALL=y
+CONFIG_BPF_UNPRIV_DEFAULT_OFF=n
CONFIG_CGROUP_BPF=y
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_SHA256=y
diff --git a/tools/testing/selftests/bpf/config.aarch64 b/tools/testing/selftests/bpf/config.aarch64
new file mode 100644
index 000000000000..1f0437644186
--- /dev/null
+++ b/tools/testing/selftests/bpf/config.aarch64
@@ -0,0 +1,181 @@
+CONFIG_9P_FS=y
+CONFIG_ARCH_VEXPRESS=y
+CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y
+CONFIG_ARM_SMMU_V3=y
+CONFIG_ATA=y
+CONFIG_AUDIT=y
+CONFIG_BINFMT_MISC=y
+CONFIG_BLK_CGROUP=y
+CONFIG_BLK_DEV_BSGLIB=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BONDING=y
+CONFIG_BPFILTER=y
+CONFIG_BPF_JIT_ALWAYS_ON=y
+CONFIG_BPF_JIT_DEFAULT_ON=y
+CONFIG_BPF_PRELOAD_UMD=y
+CONFIG_BPF_PRELOAD=y
+CONFIG_BRIDGE=m
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CGROUP_NET_CLASSID=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CGROUPS=y
+CONFIG_CHECKPOINT_RESTORE=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_COMPAT=y
+CONFIG_CPUSETS=y
+CONFIG_CRASH_DUMP=y
+CONFIG_CRYPTO_USER_API_RNG=y
+CONFIG_CRYPTO_USER_API_SKCIPHER=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_INFO_BTF=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_DEBUG_LIST=y
+CONFIG_DEBUG_LOCKDEP=y
+CONFIG_DEBUG_NOTIFIERS=y
+CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_DEBUG_SG=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DEVTMPFS=y
+CONFIG_DRM_VIRTIO_GPU=y
+CONFIG_DRM=y
+CONFIG_DUMMY=y
+CONFIG_EXPERT=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_FANOTIFY=y
+CONFIG_FB=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_FUSE_FS=y
+CONFIG_FW_CFG_SYSFS_CMDLINE=y
+CONFIG_FW_CFG_SYSFS=y
+CONFIG_GDB_SCRIPTS=y
+CONFIG_HAVE_EBPF_JIT=y
+CONFIG_HAVE_KPROBES_ON_FTRACE=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HEADERS_INSTALL=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_HUGETLBFS=y
+CONFIG_HW_RANDOM_VIRTIO=y
+CONFIG_HW_RANDOM=y
+CONFIG_HZ_100=y
+CONFIG_IDLE_PAGE_TRACKING=y
+CONFIG_IKHEADERS=y
+CONFIG_INET6_ESP=y
+CONFIG_INET_ESP=y
+CONFIG_INET=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IPV6_SEG6_LWTUNNEL=y
+CONFIG_IPVLAN=y
+CONFIG_JUMP_LABEL=y
+CONFIG_KERNEL_UNCOMPRESSED=y
+CONFIG_KPROBES_ON_FTRACE=y
+CONFIG_KPROBES=y
+CONFIG_KRETPROBES=y
+CONFIG_KSM=y
+CONFIG_LATENCYTOP=y
+CONFIG_LIVEPATCH=y
+CONFIG_LOCK_STAT=y
+CONFIG_MACVLAN=y
+CONFIG_MACVTAP=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_MAILBOX=y
+CONFIG_MEMCG=y
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_NAMESPACES=y
+CONFIG_NET_9P_VIRTIO=y
+CONFIG_NET_9P=y
+CONFIG_NET_ACT_BPF=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NETDEVICES=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NET_KEY=y
+CONFIG_NET_SCH_FQ=y
+CONFIG_NET_VRF=y
+CONFIG_NET=y
+CONFIG_NF_TABLES=y
+CONFIG_NLMON=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_NR_CPUS=256
+CONFIG_NUMA=y
+CONFIG_OVERLAY_FS=y
+CONFIG_PACKET_DIAG=y
+CONFIG_PACKET=y
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_PCI=y
+CONFIG_PL320_MBOX=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROFILING=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_PTDUMP_DEBUGFS=y
+CONFIG_RC_DEVICES=y
+CONFIG_RC_LOOPBACK=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_PL031=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_SAMPLE_SECCOMP=y
+CONFIG_SAMPLES=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SCHED_TRACER=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_VIRTIO=y
+CONFIG_SCSI=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_STACK_TRACER=y
+CONFIG_STATIC_KEYS_SELFTEST=y
+CONFIG_SYSVIPC=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_XACCT=y
+CONFIG_TCG_TIS=y
+CONFIG_TCG_TPM=y
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_CONG_DCTCP=y
+CONFIG_TLS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_TMPFS=y
+CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_TUN=y
+CONFIG_UNIX=y
+CONFIG_UPROBES=y
+CONFIG_USELIB=y
+CONFIG_USER_NS=y
+CONFIG_VETH=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_BLK=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_VIRTIO_FS=y
+CONFIG_VIRTIO_INPUT=y
+CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_VIRTIO_NET=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_VLAN_8021Q=y
+CONFIG_VSOCKETS=y
+CONFIG_XFRM_USER=y
diff --git a/tools/testing/selftests/bpf/config.s390x b/tools/testing/selftests/bpf/config.s390x
index f8a7a258a718..d49f6170e7bd 100644
--- a/tools/testing/selftests/bpf/config.s390x
+++ b/tools/testing/selftests/bpf/config.s390x
@@ -82,9 +82,6 @@ CONFIG_MARCH_Z196_TUNE=y
CONFIG_MEMCG=y
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
-CONFIG_MODULE_SIG=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULES=y
CONFIG_NAMESPACES=y
CONFIG_NET=y
CONFIG_NET_9P=y
diff --git a/tools/testing/selftests/bpf/config.x86_64 b/tools/testing/selftests/bpf/config.x86_64
index 21ce5ea4304e..dd97d61d325c 100644
--- a/tools/testing/selftests/bpf/config.x86_64
+++ b/tools/testing/selftests/bpf/config.x86_64
@@ -18,7 +18,6 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=16384
CONFIG_BLK_DEV_THROTTLING=y
CONFIG_BONDING=y
-CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y
CONFIG_BOOTTIME_TRACING=y
CONFIG_BPF_JIT_ALWAYS_ON=y
CONFIG_BPF_KPROBE_OVERRIDE=y
diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c
index bec15558fd93..1f37adff7632 100644
--- a/tools/testing/selftests/bpf/network_helpers.c
+++ b/tools/testing/selftests/bpf/network_helpers.c
@@ -426,6 +426,10 @@ static int setns_by_fd(int nsfd)
if (!ASSERT_OK(err, "mount /sys/fs/bpf"))
return err;
+ err = mount("debugfs", "/sys/kernel/debug", "debugfs", 0, NULL);
+ if (!ASSERT_OK(err, "mount /sys/kernel/debug"))
+ return err;
+
return 0;
}
diff --git a/tools/testing/selftests/bpf/prog_tests/align.c b/tools/testing/selftests/bpf/prog_tests/align.c
index 970f09156eb4..4666f88f2bb4 100644
--- a/tools/testing/selftests/bpf/prog_tests/align.c
+++ b/tools/testing/selftests/bpf/prog_tests/align.c
@@ -2,7 +2,7 @@
#include <test_progs.h>
#define MAX_INSNS 512
-#define MAX_MATCHES 16
+#define MAX_MATCHES 24
struct bpf_reg_match {
unsigned int line;
@@ -267,6 +267,7 @@ static struct bpf_align_test tests[] = {
*/
BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
@@ -280,6 +281,7 @@ static struct bpf_align_test tests[] = {
BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 4),
BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
@@ -311,44 +313,52 @@ static struct bpf_align_test tests[] = {
{15, "R4=pkt(id=1,off=18,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
{15, "R5=pkt(id=1,off=14,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
/* Variable offset is added to R5 packet pointer,
- * resulting in auxiliary alignment of 4.
+ * resulting in auxiliary alignment of 4. To avoid BPF
+ * verifier's precision backtracking logging
+ * interfering we also have a no-op R4 = R5
+ * instruction to validate R5 state. We also check
+ * that R4 is what it should be in such case.
*/
- {17, "R5_w=pkt(id=2,off=0,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
+ {18, "R4_w=pkt(id=2,off=0,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
+ {18, "R5_w=pkt(id=2,off=0,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
/* Constant offset is added to R5, resulting in
* reg->off of 14.
*/
- {18, "R5_w=pkt(id=2,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
+ {19, "R5_w=pkt(id=2,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off
* (14) which is 16. Then the variable offset is 4-byte
* aligned, so the total offset is 4-byte aligned and
* meets the load's requirements.
*/
- {23, "R4=pkt(id=2,off=18,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
- {23, "R5=pkt(id=2,off=14,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
+ {24, "R4=pkt(id=2,off=18,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
+ {24, "R5=pkt(id=2,off=14,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
/* Constant offset is added to R5 packet pointer,
* resulting in reg->off value of 14.
*/
- {25, "R5_w=pkt(off=14,r=8"},
+ {26, "R5_w=pkt(off=14,r=8"},
/* Variable offset is added to R5, resulting in a
- * variable offset of (4n).
+ * variable offset of (4n). See comment for insn #18
+ * for R4 = R5 trick.
*/
- {26, "R5_w=pkt(id=3,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
+ {28, "R4_w=pkt(id=3,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
+ {28, "R5_w=pkt(id=3,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
/* Constant is added to R5 again, setting reg->off to 18. */
- {27, "R5_w=pkt(id=3,off=18,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
+ {29, "R5_w=pkt(id=3,off=18,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
/* And once more we add a variable; resulting var_off
* is still (4n), fixed offset is not changed.
* Also, we create a new reg->id.
*/
- {28, "R5_w=pkt(id=4,off=18,r=0,umax=2040,var_off=(0x0; 0x7fc)"},
+ {31, "R4_w=pkt(id=4,off=18,r=0,umax=2040,var_off=(0x0; 0x7fc)"},
+ {31, "R5_w=pkt(id=4,off=18,r=0,umax=2040,var_off=(0x0; 0x7fc)"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (18)
* which is 20. Then the variable offset is (4n), so
* the total offset is 4-byte aligned and meets the
* load's requirements.
*/
- {33, "R4=pkt(id=4,off=22,r=22,umax=2040,var_off=(0x0; 0x7fc)"},
- {33, "R5=pkt(id=4,off=18,r=22,umax=2040,var_off=(0x0; 0x7fc)"},
+ {35, "R4=pkt(id=4,off=22,r=22,umax=2040,var_off=(0x0; 0x7fc)"},
+ {35, "R5=pkt(id=4,off=18,r=22,umax=2040,var_off=(0x0; 0x7fc)"},
},
},
{
@@ -681,6 +691,6 @@ void test_align(void)
if (!test__start_subtest(test->descr))
continue;
- CHECK_FAIL(do_test_single(test));
+ ASSERT_OK(do_test_single(test), test->descr);
}
}
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
index 3369c5ec3a17..6f8ed61fc4b4 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
@@ -3,6 +3,7 @@
#include <test_progs.h>
#include <unistd.h>
#include <sys/syscall.h>
+#include <task_local_storage_helpers.h>
#include "bpf_iter_ipv6_route.skel.h"
#include "bpf_iter_netlink.skel.h"
#include "bpf_iter_bpf_map.skel.h"
@@ -175,11 +176,6 @@ static void test_bpf_map(void)
bpf_iter_bpf_map__destroy(skel);
}
-static int pidfd_open(pid_t pid, unsigned int flags)
-{
- return syscall(SYS_pidfd_open, pid, flags);
-}
-
static void check_bpf_link_info(const struct bpf_program *prog)
{
LIBBPF_OPTS(bpf_iter_attach_opts, opts);
@@ -295,8 +291,8 @@ static void test_task_pidfd(void)
union bpf_iter_link_info linfo;
int pidfd;
- pidfd = pidfd_open(getpid(), 0);
- if (!ASSERT_GT(pidfd, 0, "pidfd_open"))
+ pidfd = sys_pidfd_open(getpid(), 0);
+ if (!ASSERT_GT(pidfd, 0, "sys_pidfd_open"))
return;
memset(&linfo, 0, sizeof(linfo));
@@ -945,10 +941,10 @@ static void test_bpf_array_map(void)
{
__u64 val, expected_val = 0, res_first_val, first_val = 0;
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
- __u32 expected_key = 0, res_first_key;
+ __u32 key, expected_key = 0, res_first_key;
+ int err, i, map_fd, hash_fd, iter_fd;
struct bpf_iter_bpf_array_map *skel;
union bpf_iter_link_info linfo;
- int err, i, map_fd, iter_fd;
struct bpf_link *link;
char buf[64] = {};
int len, start;
@@ -1005,12 +1001,20 @@ static void test_bpf_array_map(void)
if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
goto close_iter;
+ hash_fd = bpf_map__fd(skel->maps.hashmap1);
for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
err = bpf_map_lookup_elem(map_fd, &i, &val);
- if (!ASSERT_OK(err, "map_lookup"))
- goto out;
- if (!ASSERT_EQ(i, val, "invalid_val"))
- goto out;
+ if (!ASSERT_OK(err, "map_lookup arraymap1"))
+ goto close_iter;
+ if (!ASSERT_EQ(i, val, "invalid_val arraymap1"))
+ goto close_iter;
+
+ val = i + 4;
+ err = bpf_map_lookup_elem(hash_fd, &val, &key);
+ if (!ASSERT_OK(err, "map_lookup hashmap1"))
+ goto close_iter;
+ if (!ASSERT_EQ(key, val - 4, "invalid_val hashmap1"))
+ goto close_iter;
}
close_iter:
@@ -1498,7 +1502,6 @@ static noinline int trigger_func(int arg)
static void test_task_vma_offset_common(struct bpf_iter_attach_opts *opts, bool one_proc)
{
struct bpf_iter_vma_offset *skel;
- struct bpf_link *link;
char buf[16] = {};
int iter_fd, len;
int pgsz, shift;
@@ -1513,11 +1516,11 @@ static void test_task_vma_offset_common(struct bpf_iter_attach_opts *opts, bool
;
skel->bss->page_shift = shift;
- link = bpf_program__attach_iter(skel->progs.get_vma_offset, opts);
- if (!ASSERT_OK_PTR(link, "attach_iter"))
- return;
+ skel->links.get_vma_offset = bpf_program__attach_iter(skel->progs.get_vma_offset, opts);
+ if (!ASSERT_OK_PTR(skel->links.get_vma_offset, "attach_iter"))
+ goto exit;
- iter_fd = bpf_iter_create(bpf_link__fd(link));
+ iter_fd = bpf_iter_create(bpf_link__fd(skel->links.get_vma_offset));
if (!ASSERT_GT(iter_fd, 0, "create_iter"))
goto exit;
@@ -1535,7 +1538,7 @@ static void test_task_vma_offset_common(struct bpf_iter_attach_opts *opts, bool
close(iter_fd);
exit:
- bpf_link__destroy(link);
+ bpf_iter_vma_offset__destroy(skel);
}
static void test_task_vma_offset(void)
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c
index 8a838ea8bdf3..c8ba4009e4ab 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c
@@ -49,14 +49,14 @@ out:
static void test_bpf_nf_ct(int mode)
{
- const char *iptables = "iptables -t raw %s PREROUTING -j CONNMARK --set-mark 42/0";
+ const char *iptables = "iptables-legacy -t raw %s PREROUTING -j CONNMARK --set-mark 42/0";
int srv_fd = -1, client_fd = -1, srv_client_fd = -1;
struct sockaddr_in peer_addr = {};
struct test_bpf_nf *skel;
int prog_fd, err;
socklen_t len;
u16 srv_port;
- char cmd[64];
+ char cmd[128];
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
@@ -69,7 +69,7 @@ static void test_bpf_nf_ct(int mode)
/* Enable connection tracking */
snprintf(cmd, sizeof(cmd), iptables, "-A");
- if (!ASSERT_OK(system(cmd), "iptables"))
+ if (!ASSERT_OK(system(cmd), cmd))
goto end;
srv_port = (mode == TEST_XDP) ? 5005 : 5006;
diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c
index 24dd6214394e..de1b5b9eb93a 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf.c
@@ -3949,6 +3949,20 @@ static struct btf_raw_test raw_tests[] = {
.err_str = "Invalid return type",
},
{
+ .descr = "decl_tag test #17, func proto, argument",
+ .raw_types = {
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DECL_TAG, 0, 0), 4), (-1), /* [1] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 0), /* [2] */
+ BTF_FUNC_PROTO_ENC(0, 1), /* [3] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_VAR_ENC(NAME_TBD, 2, 0), /* [4] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0local\0tag1\0var"),
+ .btf_load_err = true,
+ .err_str = "Invalid arg#1",
+},
+{
.descr = "type_tag test #1",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
@@ -7133,7 +7147,7 @@ static struct btf_dedup_test dedup_tests[] = {
BTF_ENUM_ENC(NAME_NTH(4), 456),
/* [4] fwd enum 'e2' after full enum */
BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 4),
- /* [5] incompatible fwd enum with different size */
+ /* [5] fwd enum with different size, size does not matter for fwd */
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 1),
/* [6] incompatible full enum with different value */
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
@@ -7150,9 +7164,7 @@ static struct btf_dedup_test dedup_tests[] = {
/* [2] full enum 'e2' */
BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
BTF_ENUM_ENC(NAME_NTH(4), 456),
- /* [3] incompatible fwd enum with different size */
- BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 1),
- /* [4] incompatible full enum with different value */
+ /* [3] incompatible full enum with different value */
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
BTF_ENUM_ENC(NAME_NTH(2), 321),
BTF_END_RAW,
@@ -7611,7 +7623,263 @@ static struct btf_dedup_test dedup_tests[] = {
BTF_STR_SEC("\0e1\0e1_val"),
},
},
-
+{
+ .descr = "dedup: enum of different size: no dedup",
+ .input = {
+ .raw_types = {
+ /* [1] enum 'e1' */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
+ BTF_ENUM_ENC(NAME_NTH(2), 1),
+ /* [2] enum 'e1' */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 2),
+ BTF_ENUM_ENC(NAME_NTH(2), 1),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0e1\0e1_val"),
+ },
+ .expect = {
+ .raw_types = {
+ /* [1] enum 'e1' */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
+ BTF_ENUM_ENC(NAME_NTH(2), 1),
+ /* [2] enum 'e1' */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 2),
+ BTF_ENUM_ENC(NAME_NTH(2), 1),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0e1\0e1_val"),
+ },
+},
+{
+ .descr = "dedup: enum fwd to enum64",
+ .input = {
+ .raw_types = {
+ /* [1] enum64 'e1' */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8),
+ BTF_ENUM64_ENC(NAME_NTH(2), 1, 0),
+ /* [2] enum 'e1' fwd */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 4),
+ /* [3] typedef enum 'e1' td */
+ BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0), 2),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0e1\0e1_val\0td"),
+ },
+ .expect = {
+ .raw_types = {
+ /* [1] enum64 'e1' */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8),
+ BTF_ENUM64_ENC(NAME_NTH(2), 1, 0),
+ /* [2] typedef enum 'e1' td */
+ BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0), 1),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0e1\0e1_val\0td"),
+ },
+},
+{
+ .descr = "dedup: enum64 fwd to enum",
+ .input = {
+ .raw_types = {
+ /* [1] enum 'e1' */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
+ BTF_ENUM_ENC(NAME_NTH(2), 1),
+ /* [2] enum64 'e1' fwd */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 0), 8),
+ /* [3] typedef enum 'e1' td */
+ BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0), 2),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0e1\0e1_val\0td"),
+ },
+ .expect = {
+ .raw_types = {
+ /* [1] enum 'e1' */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
+ BTF_ENUM_ENC(NAME_NTH(2), 1),
+ /* [2] typedef enum 'e1' td */
+ BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0), 1),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0e1\0e1_val\0td"),
+ },
+},
+{
+ .descr = "dedup: standalone fwd declaration struct",
+ /*
+ * Verify that CU1:foo and CU2:foo would be unified and that
+ * typedef/ptr would be updated to point to CU1:foo.
+ *
+ * // CU 1:
+ * struct foo { int x; };
+ *
+ * // CU 2:
+ * struct foo;
+ * typedef struct foo *foo_ptr;
+ */
+ .input = {
+ .raw_types = {
+ /* CU 1 */
+ BTF_STRUCT_ENC(NAME_NTH(1), 1, 4), /* [1] */
+ BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [2] */
+ /* CU 2 */
+ BTF_FWD_ENC(NAME_NTH(1), 0), /* [3] */
+ BTF_PTR_ENC(3), /* [4] */
+ BTF_TYPEDEF_ENC(NAME_NTH(3), 4), /* [5] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0foo\0x\0foo_ptr"),
+ },
+ .expect = {
+ .raw_types = {
+ BTF_STRUCT_ENC(NAME_NTH(1), 1, 4), /* [1] */
+ BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [2] */
+ BTF_PTR_ENC(1), /* [3] */
+ BTF_TYPEDEF_ENC(NAME_NTH(3), 3), /* [4] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0foo\0x\0foo_ptr"),
+ },
+},
+{
+ .descr = "dedup: standalone fwd declaration union",
+ /*
+ * Verify that CU1:foo and CU2:foo would be unified and that
+ * typedef/ptr would be updated to point to CU1:foo.
+ * Same as "dedup: standalone fwd declaration struct" but for unions.
+ *
+ * // CU 1:
+ * union foo { int x; };
+ *
+ * // CU 2:
+ * union foo;
+ * typedef union foo *foo_ptr;
+ */
+ .input = {
+ .raw_types = {
+ /* CU 1 */
+ BTF_UNION_ENC(NAME_NTH(1), 1, 4), /* [1] */
+ BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [2] */
+ /* CU 2 */
+ BTF_FWD_ENC(NAME_TBD, 1), /* [3] */
+ BTF_PTR_ENC(3), /* [4] */
+ BTF_TYPEDEF_ENC(NAME_NTH(3), 4), /* [5] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0foo\0x\0foo_ptr"),
+ },
+ .expect = {
+ .raw_types = {
+ BTF_UNION_ENC(NAME_NTH(1), 1, 4), /* [1] */
+ BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [2] */
+ BTF_PTR_ENC(1), /* [3] */
+ BTF_TYPEDEF_ENC(NAME_NTH(3), 3), /* [4] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0foo\0x\0foo_ptr"),
+ },
+},
+{
+ .descr = "dedup: standalone fwd declaration wrong kind",
+ /*
+ * Negative test for btf_dedup_resolve_fwds:
+ * - CU1:foo is a struct, C2:foo is a union, thus CU2:foo is not deduped;
+ * - typedef/ptr should remain unchanged as well.
+ *
+ * // CU 1:
+ * struct foo { int x; };
+ *
+ * // CU 2:
+ * union foo;
+ * typedef union foo *foo_ptr;
+ */
+ .input = {
+ .raw_types = {
+ /* CU 1 */
+ BTF_STRUCT_ENC(NAME_NTH(1), 1, 4), /* [1] */
+ BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [2] */
+ /* CU 2 */
+ BTF_FWD_ENC(NAME_NTH(3), 1), /* [3] */
+ BTF_PTR_ENC(3), /* [4] */
+ BTF_TYPEDEF_ENC(NAME_NTH(3), 4), /* [5] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0foo\0x\0foo_ptr"),
+ },
+ .expect = {
+ .raw_types = {
+ /* CU 1 */
+ BTF_STRUCT_ENC(NAME_NTH(1), 1, 4), /* [1] */
+ BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [2] */
+ /* CU 2 */
+ BTF_FWD_ENC(NAME_NTH(3), 1), /* [3] */
+ BTF_PTR_ENC(3), /* [4] */
+ BTF_TYPEDEF_ENC(NAME_NTH(3), 4), /* [5] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0foo\0x\0foo_ptr"),
+ },
+},
+{
+ .descr = "dedup: standalone fwd declaration name conflict",
+ /*
+ * Negative test for btf_dedup_resolve_fwds:
+ * - two candidates for CU2:foo dedup, thus it is unchanged;
+ * - typedef/ptr should remain unchanged as well.
+ *
+ * // CU 1:
+ * struct foo { int x; };
+ *
+ * // CU 2:
+ * struct foo;
+ * typedef struct foo *foo_ptr;
+ *
+ * // CU 3:
+ * struct foo { int x; int y; };
+ */
+ .input = {
+ .raw_types = {
+ /* CU 1 */
+ BTF_STRUCT_ENC(NAME_NTH(1), 1, 4), /* [1] */
+ BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [2] */
+ /* CU 2 */
+ BTF_FWD_ENC(NAME_NTH(1), 0), /* [3] */
+ BTF_PTR_ENC(3), /* [4] */
+ BTF_TYPEDEF_ENC(NAME_NTH(4), 4), /* [5] */
+ /* CU 3 */
+ BTF_STRUCT_ENC(NAME_NTH(1), 2, 8), /* [6] */
+ BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
+ BTF_MEMBER_ENC(NAME_NTH(3), 2, 0),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0foo\0x\0y\0foo_ptr"),
+ },
+ .expect = {
+ .raw_types = {
+ /* CU 1 */
+ BTF_STRUCT_ENC(NAME_NTH(1), 1, 4), /* [1] */
+ BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [2] */
+ /* CU 2 */
+ BTF_FWD_ENC(NAME_NTH(1), 0), /* [3] */
+ BTF_PTR_ENC(3), /* [4] */
+ BTF_TYPEDEF_ENC(NAME_NTH(4), 4), /* [5] */
+ /* CU 3 */
+ BTF_STRUCT_ENC(NAME_NTH(1), 2, 8), /* [6] */
+ BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
+ BTF_MEMBER_ENC(NAME_NTH(3), 2, 0),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0foo\0x\0y\0foo_ptr"),
+ },
+},
};
static int btf_type_size(const struct btf_type *t)
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c b/tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c
index 90aac437576d..d9024c7a892a 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c
@@ -143,6 +143,10 @@ static void test_split_fwd_resolve() {
btf__add_struct(btf1, "s2", 4); /* [5] struct s2 { */
btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */
/* } */
+ /* keep this not a part of type the graph to test btf_dedup_resolve_fwds */
+ btf__add_struct(btf1, "s3", 4); /* [6] struct s3 { */
+ btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */
+ /* } */
VALIDATE_RAW_BTF(
btf1,
@@ -153,20 +157,24 @@ static void test_split_fwd_resolve() {
"\t'f1' type_id=2 bits_offset=0\n"
"\t'f2' type_id=3 bits_offset=64",
"[5] STRUCT 's2' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[6] STRUCT 's3' size=4 vlen=1\n"
"\t'f1' type_id=1 bits_offset=0");
btf2 = btf__new_empty_split(btf1);
if (!ASSERT_OK_PTR(btf2, "empty_split_btf"))
goto cleanup;
- btf__add_int(btf2, "int", 4, BTF_INT_SIGNED); /* [6] int */
- btf__add_ptr(btf2, 10); /* [7] ptr to struct s1 */
- btf__add_fwd(btf2, "s2", BTF_FWD_STRUCT); /* [8] fwd for struct s2 */
- btf__add_ptr(btf2, 8); /* [9] ptr to fwd struct s2 */
- btf__add_struct(btf2, "s1", 16); /* [10] struct s1 { */
- btf__add_field(btf2, "f1", 7, 0, 0); /* struct s1 *f1; */
- btf__add_field(btf2, "f2", 9, 64, 0); /* struct s2 *f2; */
+ btf__add_int(btf2, "int", 4, BTF_INT_SIGNED); /* [7] int */
+ btf__add_ptr(btf2, 11); /* [8] ptr to struct s1 */
+ btf__add_fwd(btf2, "s2", BTF_FWD_STRUCT); /* [9] fwd for struct s2 */
+ btf__add_ptr(btf2, 9); /* [10] ptr to fwd struct s2 */
+ btf__add_struct(btf2, "s1", 16); /* [11] struct s1 { */
+ btf__add_field(btf2, "f1", 8, 0, 0); /* struct s1 *f1; */
+ btf__add_field(btf2, "f2", 10, 64, 0); /* struct s2 *f2; */
/* } */
+ btf__add_fwd(btf2, "s3", BTF_FWD_STRUCT); /* [12] fwd for struct s3 */
+ btf__add_ptr(btf2, 12); /* [13] ptr to struct s1 */
VALIDATE_RAW_BTF(
btf2,
@@ -178,13 +186,17 @@ static void test_split_fwd_resolve() {
"\t'f2' type_id=3 bits_offset=64",
"[5] STRUCT 's2' size=4 vlen=1\n"
"\t'f1' type_id=1 bits_offset=0",
- "[6] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
- "[7] PTR '(anon)' type_id=10",
- "[8] FWD 's2' fwd_kind=struct",
- "[9] PTR '(anon)' type_id=8",
- "[10] STRUCT 's1' size=16 vlen=2\n"
- "\t'f1' type_id=7 bits_offset=0\n"
- "\t'f2' type_id=9 bits_offset=64");
+ "[6] STRUCT 's3' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[7] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[8] PTR '(anon)' type_id=11",
+ "[9] FWD 's2' fwd_kind=struct",
+ "[10] PTR '(anon)' type_id=9",
+ "[11] STRUCT 's1' size=16 vlen=2\n"
+ "\t'f1' type_id=8 bits_offset=0\n"
+ "\t'f2' type_id=10 bits_offset=64",
+ "[12] FWD 's3' fwd_kind=struct",
+ "[13] PTR '(anon)' type_id=12");
err = btf__dedup(btf2, NULL);
if (!ASSERT_OK(err, "btf_dedup"))
@@ -199,7 +211,10 @@ static void test_split_fwd_resolve() {
"\t'f1' type_id=2 bits_offset=0\n"
"\t'f2' type_id=3 bits_offset=64",
"[5] STRUCT 's2' size=4 vlen=1\n"
- "\t'f1' type_id=1 bits_offset=0");
+ "\t'f1' type_id=1 bits_offset=0",
+ "[6] STRUCT 's3' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[7] PTR '(anon)' type_id=6");
cleanup:
btf__free(btf2);
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dump.c b/tools/testing/selftests/bpf/prog_tests/btf_dump.c
index 24da335482d4..0ba2e8b9c6ac 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf_dump.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf_dump.c
@@ -791,11 +791,11 @@ static void test_btf_dump_struct_data(struct btf *btf, struct btf_dump *d,
TEST_BTF_DUMP_DATA_OVER(btf, d, "struct", str, struct bpf_sock_ops,
sizeof(struct bpf_sock_ops) - 1,
"(struct bpf_sock_ops){\n\t.op = (__u32)1,\n",
- { .op = 1, .skb_tcp_flags = 2});
+ { .op = 1, .skb_hwtstamp = 2});
TEST_BTF_DUMP_DATA_OVER(btf, d, "struct", str, struct bpf_sock_ops,
sizeof(struct bpf_sock_ops) - 1,
"(struct bpf_sock_ops){\n\t.op = (__u32)1,\n",
- { .op = 1, .skb_tcp_flags = 0});
+ { .op = 1, .skb_hwtstamp = 0});
}
static void test_btf_dump_var_data(struct btf *btf, struct btf_dump *d,
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_iter.c b/tools/testing/selftests/bpf/prog_tests/cgroup_iter.c
index c4a2adb38da1..e02feb5fae97 100644
--- a/tools/testing/selftests/bpf/prog_tests/cgroup_iter.c
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_iter.c
@@ -189,6 +189,80 @@ static void test_walk_self_only(struct cgroup_iter *skel)
BPF_CGROUP_ITER_SELF_ONLY, "self_only");
}
+static void test_walk_dead_self_only(struct cgroup_iter *skel)
+{
+ DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
+ char expected_output[128], buf[128];
+ const char *cgrp_name = "/dead";
+ union bpf_iter_link_info linfo;
+ int len, cgrp_fd, iter_fd;
+ struct bpf_link *link;
+ size_t left;
+ char *p;
+
+ cgrp_fd = create_and_get_cgroup(cgrp_name);
+ if (!ASSERT_GE(cgrp_fd, 0, "create cgrp"))
+ return;
+
+ /* The cgroup will be dead during read() iteration, so it only has
+ * epilogue in the output
+ */
+ snprintf(expected_output, sizeof(expected_output), EPILOGUE);
+
+ memset(&linfo, 0, sizeof(linfo));
+ linfo.cgroup.cgroup_fd = cgrp_fd;
+ linfo.cgroup.order = BPF_CGROUP_ITER_SELF_ONLY;
+ opts.link_info = &linfo;
+ opts.link_info_len = sizeof(linfo);
+
+ link = bpf_program__attach_iter(skel->progs.cgroup_id_printer, &opts);
+ if (!ASSERT_OK_PTR(link, "attach_iter"))
+ goto close_cgrp;
+
+ iter_fd = bpf_iter_create(bpf_link__fd(link));
+ if (!ASSERT_GE(iter_fd, 0, "iter_create"))
+ goto free_link;
+
+ /* Close link fd and cgroup fd */
+ bpf_link__destroy(link);
+ close(cgrp_fd);
+
+ /* Remove cgroup to mark it as dead */
+ remove_cgroup(cgrp_name);
+
+ /* Two kern_sync_rcu() and usleep() pairs are used to wait for the
+ * releases of cgroup css, and the last kern_sync_rcu() and usleep()
+ * pair is used to wait for the free of cgroup itself.
+ */
+ kern_sync_rcu();
+ usleep(8000);
+ kern_sync_rcu();
+ usleep(8000);
+ kern_sync_rcu();
+ usleep(1000);
+
+ memset(buf, 0, sizeof(buf));
+ left = ARRAY_SIZE(buf);
+ p = buf;
+ while ((len = read(iter_fd, p, left)) > 0) {
+ p += len;
+ left -= len;
+ }
+
+ ASSERT_STREQ(buf, expected_output, "dead cgroup output");
+
+ /* read() after iter finishes should be ok. */
+ if (len == 0)
+ ASSERT_OK(read(iter_fd, buf, sizeof(buf)), "second_read");
+
+ close(iter_fd);
+ return;
+free_link:
+ bpf_link__destroy(link);
+close_cgrp:
+ close(cgrp_fd);
+}
+
void test_cgroup_iter(void)
{
struct cgroup_iter *skel = NULL;
@@ -217,6 +291,8 @@ void test_cgroup_iter(void)
test_early_termination(skel);
if (test__start_subtest("cgroup_iter__self_only"))
test_walk_self_only(skel);
+ if (test__start_subtest("cgroup_iter__dead_self_only"))
+ test_walk_dead_self_only(skel);
out:
cgroup_iter__destroy(skel);
cleanup_cgroups();
diff --git a/tools/testing/selftests/bpf/prog_tests/cgrp_kfunc.c b/tools/testing/selftests/bpf/prog_tests/cgrp_kfunc.c
new file mode 100644
index 000000000000..973f0c5af965
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/cgrp_kfunc.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#define _GNU_SOURCE
+#include <cgroup_helpers.h>
+#include <test_progs.h>
+
+#include "cgrp_kfunc_failure.skel.h"
+#include "cgrp_kfunc_success.skel.h"
+
+static size_t log_buf_sz = 1 << 20; /* 1 MB */
+static char obj_log_buf[1048576];
+
+static struct cgrp_kfunc_success *open_load_cgrp_kfunc_skel(void)
+{
+ struct cgrp_kfunc_success *skel;
+ int err;
+
+ skel = cgrp_kfunc_success__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return NULL;
+
+ skel->bss->pid = getpid();
+
+ err = cgrp_kfunc_success__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ return skel;
+
+cleanup:
+ cgrp_kfunc_success__destroy(skel);
+ return NULL;
+}
+
+static int mkdir_rm_test_dir(void)
+{
+ int fd;
+ const char *cgrp_path = "cgrp_kfunc";
+
+ fd = create_and_get_cgroup(cgrp_path);
+ if (!ASSERT_GT(fd, 0, "mkdir_cgrp_fd"))
+ return -1;
+
+ close(fd);
+ remove_cgroup(cgrp_path);
+
+ return 0;
+}
+
+static void run_success_test(const char *prog_name)
+{
+ struct cgrp_kfunc_success *skel;
+ struct bpf_program *prog;
+ struct bpf_link *link = NULL;
+
+ skel = open_load_cgrp_kfunc_skel();
+ if (!ASSERT_OK_PTR(skel, "open_load_skel"))
+ return;
+
+ if (!ASSERT_OK(skel->bss->err, "pre_mkdir_err"))
+ goto cleanup;
+
+ prog = bpf_object__find_program_by_name(skel->obj, prog_name);
+ if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
+ goto cleanup;
+
+ link = bpf_program__attach(prog);
+ if (!ASSERT_OK_PTR(link, "attached_link"))
+ goto cleanup;
+
+ ASSERT_EQ(skel->bss->invocations, 0, "pre_rmdir_count");
+ if (!ASSERT_OK(mkdir_rm_test_dir(), "cgrp_mkdir"))
+ goto cleanup;
+
+ ASSERT_EQ(skel->bss->invocations, 1, "post_rmdir_count");
+ ASSERT_OK(skel->bss->err, "post_rmdir_err");
+
+cleanup:
+ bpf_link__destroy(link);
+ cgrp_kfunc_success__destroy(skel);
+}
+
+static const char * const success_tests[] = {
+ "test_cgrp_acquire_release_argument",
+ "test_cgrp_acquire_leave_in_map",
+ "test_cgrp_xchg_release",
+ "test_cgrp_get_release",
+ "test_cgrp_get_ancestors",
+};
+
+static struct {
+ const char *prog_name;
+ const char *expected_err_msg;
+} failure_tests[] = {
+ {"cgrp_kfunc_acquire_untrusted", "R1 must be referenced or trusted"},
+ {"cgrp_kfunc_acquire_fp", "arg#0 pointer type STRUCT cgroup must point"},
+ {"cgrp_kfunc_acquire_unsafe_kretprobe", "reg type unsupported for arg#0 function"},
+ {"cgrp_kfunc_acquire_trusted_walked", "R1 must be referenced or trusted"},
+ {"cgrp_kfunc_acquire_null", "arg#0 pointer type STRUCT cgroup must point"},
+ {"cgrp_kfunc_acquire_unreleased", "Unreleased reference"},
+ {"cgrp_kfunc_get_non_kptr_param", "arg#0 expected pointer to map value"},
+ {"cgrp_kfunc_get_non_kptr_acquired", "arg#0 expected pointer to map value"},
+ {"cgrp_kfunc_get_null", "arg#0 expected pointer to map value"},
+ {"cgrp_kfunc_xchg_unreleased", "Unreleased reference"},
+ {"cgrp_kfunc_get_unreleased", "Unreleased reference"},
+ {"cgrp_kfunc_release_untrusted", "arg#0 is untrusted_ptr_or_null_ expected ptr_ or socket"},
+ {"cgrp_kfunc_release_fp", "arg#0 pointer type STRUCT cgroup must point"},
+ {"cgrp_kfunc_release_null", "arg#0 is ptr_or_null_ expected ptr_ or socket"},
+ {"cgrp_kfunc_release_unacquired", "release kernel function bpf_cgroup_release expects"},
+};
+
+static void verify_fail(const char *prog_name, const char *expected_err_msg)
+{
+ LIBBPF_OPTS(bpf_object_open_opts, opts);
+ struct cgrp_kfunc_failure *skel;
+ int err, i;
+
+ opts.kernel_log_buf = obj_log_buf;
+ opts.kernel_log_size = log_buf_sz;
+ opts.kernel_log_level = 1;
+
+ skel = cgrp_kfunc_failure__open_opts(&opts);
+ if (!ASSERT_OK_PTR(skel, "cgrp_kfunc_failure__open_opts"))
+ goto cleanup;
+
+ for (i = 0; i < ARRAY_SIZE(failure_tests); i++) {
+ struct bpf_program *prog;
+ const char *curr_name = failure_tests[i].prog_name;
+
+ prog = bpf_object__find_program_by_name(skel->obj, curr_name);
+ if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
+ goto cleanup;
+
+ bpf_program__set_autoload(prog, !strcmp(curr_name, prog_name));
+ }
+
+ err = cgrp_kfunc_failure__load(skel);
+ if (!ASSERT_ERR(err, "unexpected load success"))
+ goto cleanup;
+
+ if (!ASSERT_OK_PTR(strstr(obj_log_buf, expected_err_msg), "expected_err_msg")) {
+ fprintf(stderr, "Expected err_msg: %s\n", expected_err_msg);
+ fprintf(stderr, "Verifier output: %s\n", obj_log_buf);
+ }
+
+cleanup:
+ cgrp_kfunc_failure__destroy(skel);
+}
+
+void test_cgrp_kfunc(void)
+{
+ int i, err;
+
+ err = setup_cgroup_environment();
+ if (!ASSERT_OK(err, "cgrp_env_setup"))
+ goto cleanup;
+
+ for (i = 0; i < ARRAY_SIZE(success_tests); i++) {
+ if (!test__start_subtest(success_tests[i]))
+ continue;
+
+ run_success_test(success_tests[i]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(failure_tests); i++) {
+ if (!test__start_subtest(failure_tests[i].prog_name))
+ continue;
+
+ verify_fail(failure_tests[i].prog_name, failure_tests[i].expected_err_msg);
+ }
+
+cleanup:
+ cleanup_cgroup_environment();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c b/tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c
new file mode 100644
index 000000000000..1c30412ba132
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates.*/
+
+#define _GNU_SOURCE
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <test_progs.h>
+#include "cgrp_ls_tp_btf.skel.h"
+#include "cgrp_ls_recursion.skel.h"
+#include "cgrp_ls_attach_cgroup.skel.h"
+#include "cgrp_ls_negative.skel.h"
+#include "network_helpers.h"
+
+struct socket_cookie {
+ __u64 cookie_key;
+ __u32 cookie_value;
+};
+
+static void test_tp_btf(int cgroup_fd)
+{
+ struct cgrp_ls_tp_btf *skel;
+ long val1 = 1, val2 = 0;
+ int err;
+
+ skel = cgrp_ls_tp_btf__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+ return;
+
+ /* populate a value in map_b */
+ err = bpf_map_update_elem(bpf_map__fd(skel->maps.map_b), &cgroup_fd, &val1, BPF_ANY);
+ if (!ASSERT_OK(err, "map_update_elem"))
+ goto out;
+
+ /* check value */
+ err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.map_b), &cgroup_fd, &val2);
+ if (!ASSERT_OK(err, "map_lookup_elem"))
+ goto out;
+ if (!ASSERT_EQ(val2, 1, "map_lookup_elem, invalid val"))
+ goto out;
+
+ /* delete value */
+ err = bpf_map_delete_elem(bpf_map__fd(skel->maps.map_b), &cgroup_fd);
+ if (!ASSERT_OK(err, "map_delete_elem"))
+ goto out;
+
+ skel->bss->target_pid = syscall(SYS_gettid);
+
+ err = cgrp_ls_tp_btf__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto out;
+
+ syscall(SYS_gettid);
+ syscall(SYS_gettid);
+
+ skel->bss->target_pid = 0;
+
+ /* 3x syscalls: 1x attach and 2x gettid */
+ ASSERT_EQ(skel->bss->enter_cnt, 3, "enter_cnt");
+ ASSERT_EQ(skel->bss->exit_cnt, 3, "exit_cnt");
+ ASSERT_EQ(skel->bss->mismatch_cnt, 0, "mismatch_cnt");
+out:
+ cgrp_ls_tp_btf__destroy(skel);
+}
+
+static void test_attach_cgroup(int cgroup_fd)
+{
+ int server_fd = 0, client_fd = 0, err = 0;
+ socklen_t addr_len = sizeof(struct sockaddr_in6);
+ struct cgrp_ls_attach_cgroup *skel;
+ __u32 cookie_expected_value;
+ struct sockaddr_in6 addr;
+ struct socket_cookie val;
+
+ skel = cgrp_ls_attach_cgroup__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ skel->links.set_cookie = bpf_program__attach_cgroup(
+ skel->progs.set_cookie, cgroup_fd);
+ if (!ASSERT_OK_PTR(skel->links.set_cookie, "prog_attach"))
+ goto out;
+
+ skel->links.update_cookie_sockops = bpf_program__attach_cgroup(
+ skel->progs.update_cookie_sockops, cgroup_fd);
+ if (!ASSERT_OK_PTR(skel->links.update_cookie_sockops, "prog_attach"))
+ goto out;
+
+ skel->links.update_cookie_tracing = bpf_program__attach(
+ skel->progs.update_cookie_tracing);
+ if (!ASSERT_OK_PTR(skel->links.update_cookie_tracing, "prog_attach"))
+ goto out;
+
+ server_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0);
+ if (!ASSERT_GE(server_fd, 0, "start_server"))
+ goto out;
+
+ client_fd = connect_to_fd(server_fd, 0);
+ if (!ASSERT_GE(client_fd, 0, "connect_to_fd"))
+ goto close_server_fd;
+
+ err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.socket_cookies),
+ &cgroup_fd, &val);
+ if (!ASSERT_OK(err, "map_lookup(socket_cookies)"))
+ goto close_client_fd;
+
+ err = getsockname(client_fd, (struct sockaddr *)&addr, &addr_len);
+ if (!ASSERT_OK(err, "getsockname"))
+ goto close_client_fd;
+
+ cookie_expected_value = (ntohs(addr.sin6_port) << 8) | 0xFF;
+ ASSERT_EQ(val.cookie_value, cookie_expected_value, "cookie_value");
+
+close_client_fd:
+ close(client_fd);
+close_server_fd:
+ close(server_fd);
+out:
+ cgrp_ls_attach_cgroup__destroy(skel);
+}
+
+static void test_recursion(int cgroup_fd)
+{
+ struct cgrp_ls_recursion *skel;
+ int err;
+
+ skel = cgrp_ls_recursion__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+ return;
+
+ err = cgrp_ls_recursion__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto out;
+
+ /* trigger sys_enter, make sure it does not cause deadlock */
+ syscall(SYS_gettid);
+
+out:
+ cgrp_ls_recursion__destroy(skel);
+}
+
+static void test_negative(void)
+{
+ struct cgrp_ls_negative *skel;
+
+ skel = cgrp_ls_negative__open_and_load();
+ if (!ASSERT_ERR_PTR(skel, "skel_open_and_load")) {
+ cgrp_ls_negative__destroy(skel);
+ return;
+ }
+}
+
+void test_cgrp_local_storage(void)
+{
+ int cgroup_fd;
+
+ cgroup_fd = test__join_cgroup("/cgrp_local_storage");
+ if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /cgrp_local_storage"))
+ return;
+
+ if (test__start_subtest("tp_btf"))
+ test_tp_btf(cgroup_fd);
+ if (test__start_subtest("attach_cgroup"))
+ test_attach_cgroup(cgroup_fd);
+ if (test__start_subtest("recursion"))
+ test_recursion(cgroup_fd);
+ if (test__start_subtest("negative"))
+ test_negative();
+
+ close(cgroup_fd);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/dynptr.c b/tools/testing/selftests/bpf/prog_tests/dynptr.c
index 8fc4e6c02bfd..b0c06f821cb8 100644
--- a/tools/testing/selftests/bpf/prog_tests/dynptr.c
+++ b/tools/testing/selftests/bpf/prog_tests/dynptr.c
@@ -17,7 +17,7 @@ static struct {
{"ringbuf_missing_release2", "Unreleased reference id=2"},
{"ringbuf_missing_release_callback", "Unreleased reference id"},
{"use_after_invalid", "Expected an initialized dynptr as arg #3"},
- {"ringbuf_invalid_api", "type=mem expected=alloc_mem"},
+ {"ringbuf_invalid_api", "type=mem expected=ringbuf_mem"},
{"add_dynptr_to_map1", "invalid indirect read from stack"},
{"add_dynptr_to_map2", "invalid indirect read from stack"},
{"data_slice_out_of_bounds_ringbuf", "value is outside of the allowed memory range"},
diff --git a/tools/testing/selftests/bpf/prog_tests/empty_skb.c b/tools/testing/selftests/bpf/prog_tests/empty_skb.c
new file mode 100644
index 000000000000..0613f3bb8b5e
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/empty_skb.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include <network_helpers.h>
+#include <net/if.h>
+#include "empty_skb.skel.h"
+
+#define SYS(cmd) ({ \
+ if (!ASSERT_OK(system(cmd), (cmd))) \
+ goto out; \
+})
+
+void serial_test_empty_skb(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, tattr);
+ struct empty_skb *bpf_obj = NULL;
+ struct nstoken *tok = NULL;
+ struct bpf_program *prog;
+ char eth_hlen_pp[15];
+ char eth_hlen[14];
+ int veth_ifindex;
+ int ipip_ifindex;
+ int err;
+ int i;
+
+ struct {
+ const char *msg;
+ const void *data_in;
+ __u32 data_size_in;
+ int *ifindex;
+ int err;
+ int ret;
+ bool success_on_tc;
+ } tests[] = {
+ /* Empty packets are always rejected. */
+
+ {
+ /* BPF_PROG_RUN ETH_HLEN size check */
+ .msg = "veth empty ingress packet",
+ .data_in = NULL,
+ .data_size_in = 0,
+ .ifindex = &veth_ifindex,
+ .err = -EINVAL,
+ },
+ {
+ /* BPF_PROG_RUN ETH_HLEN size check */
+ .msg = "ipip empty ingress packet",
+ .data_in = NULL,
+ .data_size_in = 0,
+ .ifindex = &ipip_ifindex,
+ .err = -EINVAL,
+ },
+
+ /* ETH_HLEN-sized packets:
+ * - can not be redirected at LWT_XMIT
+ * - can be redirected at TC to non-tunneling dest
+ */
+
+ {
+ /* __bpf_redirect_common */
+ .msg = "veth ETH_HLEN packet ingress",
+ .data_in = eth_hlen,
+ .data_size_in = sizeof(eth_hlen),
+ .ifindex = &veth_ifindex,
+ .ret = -ERANGE,
+ .success_on_tc = true,
+ },
+ {
+ /* __bpf_redirect_no_mac
+ *
+ * lwt: skb->len=0 <= skb_network_offset=0
+ * tc: skb->len=14 <= skb_network_offset=14
+ */
+ .msg = "ipip ETH_HLEN packet ingress",
+ .data_in = eth_hlen,
+ .data_size_in = sizeof(eth_hlen),
+ .ifindex = &ipip_ifindex,
+ .ret = -ERANGE,
+ },
+
+ /* ETH_HLEN+1-sized packet should be redirected. */
+
+ {
+ .msg = "veth ETH_HLEN+1 packet ingress",
+ .data_in = eth_hlen_pp,
+ .data_size_in = sizeof(eth_hlen_pp),
+ .ifindex = &veth_ifindex,
+ },
+ {
+ .msg = "ipip ETH_HLEN+1 packet ingress",
+ .data_in = eth_hlen_pp,
+ .data_size_in = sizeof(eth_hlen_pp),
+ .ifindex = &ipip_ifindex,
+ },
+ };
+
+ SYS("ip netns add empty_skb");
+ tok = open_netns("empty_skb");
+ SYS("ip link add veth0 type veth peer veth1");
+ SYS("ip link set dev veth0 up");
+ SYS("ip link set dev veth1 up");
+ SYS("ip addr add 10.0.0.1/8 dev veth0");
+ SYS("ip addr add 10.0.0.2/8 dev veth1");
+ veth_ifindex = if_nametoindex("veth0");
+
+ SYS("ip link add ipip0 type ipip local 10.0.0.1 remote 10.0.0.2");
+ SYS("ip link set ipip0 up");
+ SYS("ip addr add 192.168.1.1/16 dev ipip0");
+ ipip_ifindex = if_nametoindex("ipip0");
+
+ bpf_obj = empty_skb__open_and_load();
+ if (!ASSERT_OK_PTR(bpf_obj, "open skeleton"))
+ goto out;
+
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
+ bpf_object__for_each_program(prog, bpf_obj->obj) {
+ char buf[128];
+ bool at_tc = !strncmp(bpf_program__section_name(prog), "tc", 2);
+
+ tattr.data_in = tests[i].data_in;
+ tattr.data_size_in = tests[i].data_size_in;
+
+ tattr.data_size_out = 0;
+ bpf_obj->bss->ifindex = *tests[i].ifindex;
+ bpf_obj->bss->ret = 0;
+ err = bpf_prog_test_run_opts(bpf_program__fd(prog), &tattr);
+ sprintf(buf, "err: %s [%s]", tests[i].msg, bpf_program__name(prog));
+
+ if (at_tc && tests[i].success_on_tc)
+ ASSERT_GE(err, 0, buf);
+ else
+ ASSERT_EQ(err, tests[i].err, buf);
+ sprintf(buf, "ret: %s [%s]", tests[i].msg, bpf_program__name(prog));
+ if (at_tc && tests[i].success_on_tc)
+ ASSERT_GE(bpf_obj->bss->ret, 0, buf);
+ else
+ ASSERT_EQ(bpf_obj->bss->ret, tests[i].ret, buf);
+ }
+ }
+
+out:
+ if (bpf_obj)
+ empty_skb__destroy(bpf_obj);
+ if (tok)
+ close_netns(tok);
+ system("ip netns del empty_skb");
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/hashmap.c b/tools/testing/selftests/bpf/prog_tests/hashmap.c
index 4747ab18f97f..d358a223fd2d 100644
--- a/tools/testing/selftests/bpf/prog_tests/hashmap.c
+++ b/tools/testing/selftests/bpf/prog_tests/hashmap.c
@@ -7,17 +7,18 @@
*/
#include "test_progs.h"
#include "bpf/hashmap.h"
+#include <stddef.h>
static int duration = 0;
-static size_t hash_fn(const void *k, void *ctx)
+static size_t hash_fn(long k, void *ctx)
{
- return (long)k;
+ return k;
}
-static bool equal_fn(const void *a, const void *b, void *ctx)
+static bool equal_fn(long a, long b, void *ctx)
{
- return (long)a == (long)b;
+ return a == b;
}
static inline size_t next_pow_2(size_t n)
@@ -52,8 +53,8 @@ static void test_hashmap_generic(void)
return;
for (i = 0; i < ELEM_CNT; i++) {
- const void *oldk, *k = (const void *)(long)i;
- void *oldv, *v = (void *)(long)(1024 + i);
+ long oldk, k = i;
+ long oldv, v = 1024 + i;
err = hashmap__update(map, k, v, &oldk, &oldv);
if (CHECK(err != -ENOENT, "hashmap__update",
@@ -64,20 +65,18 @@ static void test_hashmap_generic(void)
err = hashmap__add(map, k, v);
} else {
err = hashmap__set(map, k, v, &oldk, &oldv);
- if (CHECK(oldk != NULL || oldv != NULL, "check_kv",
- "unexpected k/v: %p=%p\n", oldk, oldv))
+ if (CHECK(oldk != 0 || oldv != 0, "check_kv",
+ "unexpected k/v: %ld=%ld\n", oldk, oldv))
goto cleanup;
}
- if (CHECK(err, "elem_add", "failed to add k/v %ld = %ld: %d\n",
- (long)k, (long)v, err))
+ if (CHECK(err, "elem_add", "failed to add k/v %ld = %ld: %d\n", k, v, err))
goto cleanup;
if (CHECK(!hashmap__find(map, k, &oldv), "elem_find",
- "failed to find key %ld\n", (long)k))
+ "failed to find key %ld\n", k))
goto cleanup;
- if (CHECK(oldv != v, "elem_val",
- "found value is wrong: %ld\n", (long)oldv))
+ if (CHECK(oldv != v, "elem_val", "found value is wrong: %ld\n", oldv))
goto cleanup;
}
@@ -91,8 +90,8 @@ static void test_hashmap_generic(void)
found_msk = 0;
hashmap__for_each_entry(map, entry, bkt) {
- long k = (long)entry->key;
- long v = (long)entry->value;
+ long k = entry->key;
+ long v = entry->value;
found_msk |= 1ULL << k;
if (CHECK(v - k != 1024, "check_kv",
@@ -104,8 +103,8 @@ static void test_hashmap_generic(void)
goto cleanup;
for (i = 0; i < ELEM_CNT; i++) {
- const void *oldk, *k = (const void *)(long)i;
- void *oldv, *v = (void *)(long)(256 + i);
+ long oldk, k = i;
+ long oldv, v = 256 + i;
err = hashmap__add(map, k, v);
if (CHECK(err != -EEXIST, "hashmap__add",
@@ -119,13 +118,13 @@ static void test_hashmap_generic(void)
if (CHECK(err, "elem_upd",
"failed to update k/v %ld = %ld: %d\n",
- (long)k, (long)v, err))
+ k, v, err))
goto cleanup;
if (CHECK(!hashmap__find(map, k, &oldv), "elem_find",
- "failed to find key %ld\n", (long)k))
+ "failed to find key %ld\n", k))
goto cleanup;
if (CHECK(oldv != v, "elem_val",
- "found value is wrong: %ld\n", (long)oldv))
+ "found value is wrong: %ld\n", oldv))
goto cleanup;
}
@@ -139,8 +138,8 @@ static void test_hashmap_generic(void)
found_msk = 0;
hashmap__for_each_entry_safe(map, entry, tmp, bkt) {
- long k = (long)entry->key;
- long v = (long)entry->value;
+ long k = entry->key;
+ long v = entry->value;
found_msk |= 1ULL << k;
if (CHECK(v - k != 256, "elem_check",
@@ -152,7 +151,7 @@ static void test_hashmap_generic(void)
goto cleanup;
found_cnt = 0;
- hashmap__for_each_key_entry(map, entry, (void *)0) {
+ hashmap__for_each_key_entry(map, entry, 0) {
found_cnt++;
}
if (CHECK(!found_cnt, "found_cnt",
@@ -161,27 +160,25 @@ static void test_hashmap_generic(void)
found_msk = 0;
found_cnt = 0;
- hashmap__for_each_key_entry_safe(map, entry, tmp, (void *)0) {
- const void *oldk, *k;
- void *oldv, *v;
+ hashmap__for_each_key_entry_safe(map, entry, tmp, 0) {
+ long oldk, k;
+ long oldv, v;
k = entry->key;
v = entry->value;
found_cnt++;
- found_msk |= 1ULL << (long)k;
+ found_msk |= 1ULL << k;
if (CHECK(!hashmap__delete(map, k, &oldk, &oldv), "elem_del",
- "failed to delete k/v %ld = %ld\n",
- (long)k, (long)v))
+ "failed to delete k/v %ld = %ld\n", k, v))
goto cleanup;
if (CHECK(oldk != k || oldv != v, "check_old",
"invalid deleted k/v: expected %ld = %ld, got %ld = %ld\n",
- (long)k, (long)v, (long)oldk, (long)oldv))
+ k, v, oldk, oldv))
goto cleanup;
if (CHECK(hashmap__delete(map, k, &oldk, &oldv), "elem_del",
- "unexpectedly deleted k/v %ld = %ld\n",
- (long)oldk, (long)oldv))
+ "unexpectedly deleted k/v %ld = %ld\n", oldk, oldv))
goto cleanup;
}
@@ -198,26 +195,24 @@ static void test_hashmap_generic(void)
goto cleanup;
hashmap__for_each_entry_safe(map, entry, tmp, bkt) {
- const void *oldk, *k;
- void *oldv, *v;
+ long oldk, k;
+ long oldv, v;
k = entry->key;
v = entry->value;
found_cnt++;
- found_msk |= 1ULL << (long)k;
+ found_msk |= 1ULL << k;
if (CHECK(!hashmap__delete(map, k, &oldk, &oldv), "elem_del",
- "failed to delete k/v %ld = %ld\n",
- (long)k, (long)v))
+ "failed to delete k/v %ld = %ld\n", k, v))
goto cleanup;
if (CHECK(oldk != k || oldv != v, "elem_check",
"invalid old k/v: expect %ld = %ld, got %ld = %ld\n",
- (long)k, (long)v, (long)oldk, (long)oldv))
+ k, v, oldk, oldv))
goto cleanup;
if (CHECK(hashmap__delete(map, k, &oldk, &oldv), "elem_del",
- "unexpectedly deleted k/v %ld = %ld\n",
- (long)k, (long)v))
+ "unexpectedly deleted k/v %ld = %ld\n", k, v))
goto cleanup;
}
@@ -235,7 +230,7 @@ static void test_hashmap_generic(void)
hashmap__for_each_entry(map, entry, bkt) {
CHECK(false, "elem_exists",
"unexpected map entries left: %ld = %ld\n",
- (long)entry->key, (long)entry->value);
+ entry->key, entry->value);
goto cleanup;
}
@@ -243,22 +238,107 @@ static void test_hashmap_generic(void)
hashmap__for_each_entry(map, entry, bkt) {
CHECK(false, "elem_exists",
"unexpected map entries left: %ld = %ld\n",
- (long)entry->key, (long)entry->value);
+ entry->key, entry->value);
+ goto cleanup;
+ }
+
+cleanup:
+ hashmap__free(map);
+}
+
+static size_t str_hash_fn(long a, void *ctx)
+{
+ return str_hash((char *)a);
+}
+
+static bool str_equal_fn(long a, long b, void *ctx)
+{
+ return strcmp((char *)a, (char *)b) == 0;
+}
+
+/* Verify that hashmap interface works with pointer keys and values */
+static void test_hashmap_ptr_iface(void)
+{
+ const char *key, *value, *old_key, *old_value;
+ struct hashmap_entry *cur;
+ struct hashmap *map;
+ int err, i, bkt;
+
+ map = hashmap__new(str_hash_fn, str_equal_fn, NULL);
+ if (CHECK(!map, "hashmap__new", "can't allocate hashmap\n"))
goto cleanup;
+
+#define CHECK_STR(fn, var, expected) \
+ CHECK(strcmp(var, (expected)), (fn), \
+ "wrong value of " #var ": '%s' instead of '%s'\n", var, (expected))
+
+ err = hashmap__insert(map, "a", "apricot", HASHMAP_ADD, NULL, NULL);
+ if (CHECK(err, "hashmap__insert", "unexpected error: %d\n", err))
+ goto cleanup;
+
+ err = hashmap__insert(map, "a", "apple", HASHMAP_SET, &old_key, &old_value);
+ if (CHECK(err, "hashmap__insert", "unexpected error: %d\n", err))
+ goto cleanup;
+ CHECK_STR("hashmap__update", old_key, "a");
+ CHECK_STR("hashmap__update", old_value, "apricot");
+
+ err = hashmap__add(map, "b", "banana");
+ if (CHECK(err, "hashmap__add", "unexpected error: %d\n", err))
+ goto cleanup;
+
+ err = hashmap__set(map, "b", "breadfruit", &old_key, &old_value);
+ if (CHECK(err, "hashmap__set", "unexpected error: %d\n", err))
+ goto cleanup;
+ CHECK_STR("hashmap__set", old_key, "b");
+ CHECK_STR("hashmap__set", old_value, "banana");
+
+ err = hashmap__update(map, "b", "blueberry", &old_key, &old_value);
+ if (CHECK(err, "hashmap__update", "unexpected error: %d\n", err))
+ goto cleanup;
+ CHECK_STR("hashmap__update", old_key, "b");
+ CHECK_STR("hashmap__update", old_value, "breadfruit");
+
+ err = hashmap__append(map, "c", "cherry");
+ if (CHECK(err, "hashmap__append", "unexpected error: %d\n", err))
+ goto cleanup;
+
+ if (CHECK(!hashmap__delete(map, "c", &old_key, &old_value),
+ "hashmap__delete", "expected to have entry for 'c'\n"))
+ goto cleanup;
+ CHECK_STR("hashmap__delete", old_key, "c");
+ CHECK_STR("hashmap__delete", old_value, "cherry");
+
+ CHECK(!hashmap__find(map, "b", &value), "hashmap__find", "can't find value for 'b'\n");
+ CHECK_STR("hashmap__find", value, "blueberry");
+
+ if (CHECK(!hashmap__delete(map, "b", NULL, NULL),
+ "hashmap__delete", "expected to have entry for 'b'\n"))
+ goto cleanup;
+
+ i = 0;
+ hashmap__for_each_entry(map, cur, bkt) {
+ if (CHECK(i != 0, "hashmap__for_each_entry", "too many entries"))
+ goto cleanup;
+ key = cur->pkey;
+ value = cur->pvalue;
+ CHECK_STR("entry", key, "a");
+ CHECK_STR("entry", value, "apple");
+ i++;
}
+#undef CHECK_STR
cleanup:
hashmap__free(map);
}
-static size_t collision_hash_fn(const void *k, void *ctx)
+static size_t collision_hash_fn(long k, void *ctx)
{
return 0;
}
static void test_hashmap_multimap(void)
{
- void *k1 = (void *)0, *k2 = (void *)1;
+ long k1 = 0, k2 = 1;
struct hashmap_entry *entry;
struct hashmap *map;
long found_msk;
@@ -273,23 +353,23 @@ static void test_hashmap_multimap(void)
* [0] -> 1, 2, 4;
* [1] -> 8, 16, 32;
*/
- err = hashmap__append(map, k1, (void *)1);
+ err = hashmap__append(map, k1, 1);
if (CHECK(err, "elem_add", "failed to add k/v: %d\n", err))
goto cleanup;
- err = hashmap__append(map, k1, (void *)2);
+ err = hashmap__append(map, k1, 2);
if (CHECK(err, "elem_add", "failed to add k/v: %d\n", err))
goto cleanup;
- err = hashmap__append(map, k1, (void *)4);
+ err = hashmap__append(map, k1, 4);
if (CHECK(err, "elem_add", "failed to add k/v: %d\n", err))
goto cleanup;
- err = hashmap__append(map, k2, (void *)8);
+ err = hashmap__append(map, k2, 8);
if (CHECK(err, "elem_add", "failed to add k/v: %d\n", err))
goto cleanup;
- err = hashmap__append(map, k2, (void *)16);
+ err = hashmap__append(map, k2, 16);
if (CHECK(err, "elem_add", "failed to add k/v: %d\n", err))
goto cleanup;
- err = hashmap__append(map, k2, (void *)32);
+ err = hashmap__append(map, k2, 32);
if (CHECK(err, "elem_add", "failed to add k/v: %d\n", err))
goto cleanup;
@@ -300,7 +380,7 @@ static void test_hashmap_multimap(void)
/* verify global iteration still works and sees all values */
found_msk = 0;
hashmap__for_each_entry(map, entry, bkt) {
- found_msk |= (long)entry->value;
+ found_msk |= entry->value;
}
if (CHECK(found_msk != (1 << 6) - 1, "found_msk",
"not all keys iterated: %lx\n", found_msk))
@@ -309,7 +389,7 @@ static void test_hashmap_multimap(void)
/* iterate values for key 1 */
found_msk = 0;
hashmap__for_each_key_entry(map, entry, k1) {
- found_msk |= (long)entry->value;
+ found_msk |= entry->value;
}
if (CHECK(found_msk != (1 | 2 | 4), "found_msk",
"invalid k1 values: %lx\n", found_msk))
@@ -318,7 +398,7 @@ static void test_hashmap_multimap(void)
/* iterate values for key 2 */
found_msk = 0;
hashmap__for_each_key_entry(map, entry, k2) {
- found_msk |= (long)entry->value;
+ found_msk |= entry->value;
}
if (CHECK(found_msk != (8 | 16 | 32), "found_msk",
"invalid k2 values: %lx\n", found_msk))
@@ -333,7 +413,7 @@ static void test_hashmap_empty()
struct hashmap_entry *entry;
int bkt;
struct hashmap *map;
- void *k = (void *)0;
+ long k = 0;
/* force collisions */
map = hashmap__new(hash_fn, equal_fn, NULL);
@@ -374,4 +454,6 @@ void test_hashmap()
test_hashmap_multimap();
if (test__start_subtest("empty"))
test_hashmap_empty();
+ if (test__start_subtest("ptr_iface"))
+ test_hashmap_ptr_iface();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_dynptr_param.c b/tools/testing/selftests/bpf/prog_tests/kfunc_dynptr_param.c
index c210657d4d0a..55d641c1f126 100644
--- a/tools/testing/selftests/bpf/prog_tests/kfunc_dynptr_param.c
+++ b/tools/testing/selftests/bpf/prog_tests/kfunc_dynptr_param.c
@@ -22,7 +22,7 @@ static struct {
"arg#0 pointer type STRUCT bpf_dynptr_kern points to unsupported dynamic pointer type", 0},
{"not_valid_dynptr",
"arg#0 pointer type STRUCT bpf_dynptr_kern must be valid and initialized", 0},
- {"not_ptr_to_stack", "arg#0 pointer type STRUCT bpf_dynptr_kern not to stack", 0},
+ {"not_ptr_to_stack", "arg#0 expected pointer to stack", 0},
{"dynptr_data_null", NULL, -EBADMSG},
};
diff --git a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
index a4b4133d39e9..c6f37e825f11 100644
--- a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
@@ -312,12 +312,12 @@ static inline __u64 get_time_ns(void)
return (__u64) t.tv_sec * 1000000000 + t.tv_nsec;
}
-static size_t symbol_hash(const void *key, void *ctx __maybe_unused)
+static size_t symbol_hash(long key, void *ctx __maybe_unused)
{
return str_hash((const char *) key);
}
-static bool symbol_equal(const void *key1, const void *key2, void *ctx __maybe_unused)
+static bool symbol_equal(long key1, long key2, void *ctx __maybe_unused)
{
return strcmp((const char *) key1, (const char *) key2) == 0;
}
@@ -325,7 +325,7 @@ static bool symbol_equal(const void *key1, const void *key2, void *ctx __maybe_u
static int get_syms(char ***symsp, size_t *cntp)
{
size_t cap = 0, cnt = 0, i;
- char *name, **syms = NULL;
+ char *name = NULL, **syms = NULL;
struct hashmap *map;
char buf[256];
FILE *f;
@@ -352,6 +352,8 @@ static int get_syms(char ***symsp, size_t *cntp)
/* skip modules */
if (strchr(buf, '['))
continue;
+
+ free(name);
if (sscanf(buf, "%ms$*[^\n]\n", &name) != 1)
continue;
/*
@@ -371,32 +373,32 @@ static int get_syms(char ***symsp, size_t *cntp)
if (!strncmp(name, "__ftrace_invalid_address__",
sizeof("__ftrace_invalid_address__") - 1))
continue;
- err = hashmap__add(map, name, NULL);
- if (err) {
- free(name);
- if (err == -EEXIST)
- continue;
+
+ err = hashmap__add(map, name, 0);
+ if (err == -EEXIST)
+ continue;
+ if (err)
goto error;
- }
+
err = libbpf_ensure_mem((void **) &syms, &cap,
sizeof(*syms), cnt + 1);
- if (err) {
- free(name);
+ if (err)
goto error;
- }
- syms[cnt] = name;
- cnt++;
+
+ syms[cnt++] = name;
+ name = NULL;
}
*symsp = syms;
*cntp = cnt;
error:
+ free(name);
fclose(f);
hashmap__free(map);
if (err) {
for (i = 0; i < cnt; i++)
- free(syms[cnt]);
+ free(syms[i]);
free(syms);
}
return err;
diff --git a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_testmod_test.c b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_testmod_test.c
new file mode 100644
index 000000000000..1fbe7e4ac00a
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_testmod_test.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include "kprobe_multi.skel.h"
+#include "trace_helpers.h"
+#include "bpf/libbpf_internal.h"
+
+static void kprobe_multi_testmod_check(struct kprobe_multi *skel)
+{
+ ASSERT_EQ(skel->bss->kprobe_testmod_test1_result, 1, "kprobe_test1_result");
+ ASSERT_EQ(skel->bss->kprobe_testmod_test2_result, 1, "kprobe_test2_result");
+ ASSERT_EQ(skel->bss->kprobe_testmod_test3_result, 1, "kprobe_test3_result");
+
+ ASSERT_EQ(skel->bss->kretprobe_testmod_test1_result, 1, "kretprobe_test1_result");
+ ASSERT_EQ(skel->bss->kretprobe_testmod_test2_result, 1, "kretprobe_test2_result");
+ ASSERT_EQ(skel->bss->kretprobe_testmod_test3_result, 1, "kretprobe_test3_result");
+}
+
+static void test_testmod_attach_api(struct bpf_kprobe_multi_opts *opts)
+{
+ struct kprobe_multi *skel = NULL;
+
+ skel = kprobe_multi__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load"))
+ return;
+
+ skel->bss->pid = getpid();
+
+ skel->links.test_kprobe_testmod = bpf_program__attach_kprobe_multi_opts(
+ skel->progs.test_kprobe_testmod,
+ NULL, opts);
+ if (!skel->links.test_kprobe_testmod)
+ goto cleanup;
+
+ opts->retprobe = true;
+ skel->links.test_kretprobe_testmod = bpf_program__attach_kprobe_multi_opts(
+ skel->progs.test_kretprobe_testmod,
+ NULL, opts);
+ if (!skel->links.test_kretprobe_testmod)
+ goto cleanup;
+
+ ASSERT_OK(trigger_module_test_read(1), "trigger_read");
+ kprobe_multi_testmod_check(skel);
+
+cleanup:
+ kprobe_multi__destroy(skel);
+}
+
+static void test_testmod_attach_api_addrs(void)
+{
+ LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
+ unsigned long long addrs[3];
+
+ addrs[0] = ksym_get_addr("bpf_testmod_fentry_test1");
+ ASSERT_NEQ(addrs[0], 0, "ksym_get_addr");
+ addrs[1] = ksym_get_addr("bpf_testmod_fentry_test2");
+ ASSERT_NEQ(addrs[1], 0, "ksym_get_addr");
+ addrs[2] = ksym_get_addr("bpf_testmod_fentry_test3");
+ ASSERT_NEQ(addrs[2], 0, "ksym_get_addr");
+
+ opts.addrs = (const unsigned long *) addrs;
+ opts.cnt = ARRAY_SIZE(addrs);
+
+ test_testmod_attach_api(&opts);
+}
+
+static void test_testmod_attach_api_syms(void)
+{
+ LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
+ const char *syms[3] = {
+ "bpf_testmod_fentry_test1",
+ "bpf_testmod_fentry_test2",
+ "bpf_testmod_fentry_test3",
+ };
+
+ opts.syms = syms;
+ opts.cnt = ARRAY_SIZE(syms);
+ test_testmod_attach_api(&opts);
+}
+
+void serial_test_kprobe_multi_testmod_test(void)
+{
+ if (!ASSERT_OK(load_kallsyms_refresh(), "load_kallsyms_refresh"))
+ return;
+
+ if (test__start_subtest("testmod_attach_api_syms"))
+ test_testmod_attach_api_syms();
+ if (test__start_subtest("testmod_attach_api_addrs"))
+ test_testmod_attach_api_addrs();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/libbpf_get_fd_by_id_opts.c b/tools/testing/selftests/bpf/prog_tests/libbpf_get_fd_by_id_opts.c
new file mode 100644
index 000000000000..25e5dfa9c315
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/libbpf_get_fd_by_id_opts.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (C) 2022 Huawei Technologies Duesseldorf GmbH
+ *
+ * Author: Roberto Sassu <roberto.sassu@huawei.com>
+ */
+
+#include <test_progs.h>
+
+#include "test_libbpf_get_fd_by_id_opts.skel.h"
+
+void test_libbpf_get_fd_by_id_opts(void)
+{
+ struct test_libbpf_get_fd_by_id_opts *skel;
+ struct bpf_map_info info_m = {};
+ __u32 len = sizeof(info_m), value;
+ int ret, zero = 0, fd = -1;
+ LIBBPF_OPTS(bpf_get_fd_by_id_opts, fd_opts_rdonly,
+ .open_flags = BPF_F_RDONLY,
+ );
+
+ skel = test_libbpf_get_fd_by_id_opts__open_and_load();
+ if (!ASSERT_OK_PTR(skel,
+ "test_libbpf_get_fd_by_id_opts__open_and_load"))
+ return;
+
+ ret = test_libbpf_get_fd_by_id_opts__attach(skel);
+ if (!ASSERT_OK(ret, "test_libbpf_get_fd_by_id_opts__attach"))
+ goto close_prog;
+
+ ret = bpf_obj_get_info_by_fd(bpf_map__fd(skel->maps.data_input),
+ &info_m, &len);
+ if (!ASSERT_OK(ret, "bpf_obj_get_info_by_fd"))
+ goto close_prog;
+
+ fd = bpf_map_get_fd_by_id(info_m.id);
+ if (!ASSERT_LT(fd, 0, "bpf_map_get_fd_by_id"))
+ goto close_prog;
+
+ fd = bpf_map_get_fd_by_id_opts(info_m.id, NULL);
+ if (!ASSERT_LT(fd, 0, "bpf_map_get_fd_by_id_opts"))
+ goto close_prog;
+
+ fd = bpf_map_get_fd_by_id_opts(info_m.id, &fd_opts_rdonly);
+ if (!ASSERT_GE(fd, 0, "bpf_map_get_fd_by_id_opts"))
+ goto close_prog;
+
+ /* Map lookup should work with read-only fd. */
+ ret = bpf_map_lookup_elem(fd, &zero, &value);
+ if (!ASSERT_OK(ret, "bpf_map_lookup_elem"))
+ goto close_prog;
+
+ if (!ASSERT_EQ(value, 0, "map value mismatch"))
+ goto close_prog;
+
+ /* Map update should not work with read-only fd. */
+ ret = bpf_map_update_elem(fd, &zero, &len, BPF_ANY);
+ if (!ASSERT_LT(ret, 0, "bpf_map_update_elem"))
+ goto close_prog;
+
+ /* Map update should work with read-write fd. */
+ ret = bpf_map_update_elem(bpf_map__fd(skel->maps.data_input), &zero,
+ &len, BPF_ANY);
+ if (!ASSERT_OK(ret, "bpf_map_update_elem"))
+ goto close_prog;
+
+ /* Prog get fd with opts set should not work (no kernel support). */
+ ret = bpf_prog_get_fd_by_id_opts(0, &fd_opts_rdonly);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_prog_get_fd_by_id_opts"))
+ goto close_prog;
+
+ /* Link get fd with opts set should not work (no kernel support). */
+ ret = bpf_link_get_fd_by_id_opts(0, &fd_opts_rdonly);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_link_get_fd_by_id_opts"))
+ goto close_prog;
+
+ /* BTF get fd with opts set should not work (no kernel support). */
+ ret = bpf_btf_get_fd_by_id_opts(0, &fd_opts_rdonly);
+ ASSERT_EQ(ret, -EINVAL, "bpf_btf_get_fd_by_id_opts");
+
+close_prog:
+ if (fd >= 0)
+ close(fd);
+
+ test_libbpf_get_fd_by_id_opts__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/libbpf_str.c b/tools/testing/selftests/bpf/prog_tests/libbpf_str.c
index 93e9cddaadcf..efb8bd43653c 100644
--- a/tools/testing/selftests/bpf/prog_tests/libbpf_str.c
+++ b/tools/testing/selftests/bpf/prog_tests/libbpf_str.c
@@ -139,6 +139,14 @@ static void test_libbpf_bpf_map_type_str(void)
snprintf(buf, sizeof(buf), "BPF_MAP_TYPE_%s", map_type_str);
uppercase(buf);
+ /* Special case for map_type_name BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED
+ * where it and BPF_MAP_TYPE_CGROUP_STORAGE have the same enum value
+ * (map_type). For this enum value, libbpf_bpf_map_type_str() picks
+ * BPF_MAP_TYPE_CGROUP_STORAGE.
+ */
+ if (strcmp(map_type_name, "BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED") == 0)
+ continue;
+
ASSERT_STREQ(buf, map_type_name, "exp_str_value");
}
diff --git a/tools/testing/selftests/bpf/prog_tests/linked_list.c b/tools/testing/selftests/bpf/prog_tests/linked_list.c
new file mode 100644
index 000000000000..9a7d4c47af63
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/linked_list.c
@@ -0,0 +1,740 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <bpf/btf.h>
+#include <test_btf.h>
+#include <linux/btf.h>
+#include <test_progs.h>
+#include <network_helpers.h>
+
+#include "linked_list.skel.h"
+#include "linked_list_fail.skel.h"
+
+static char log_buf[1024 * 1024];
+
+static struct {
+ const char *prog_name;
+ const char *err_msg;
+} linked_list_fail_tests[] = {
+#define TEST(test, off) \
+ { #test "_missing_lock_push_front", \
+ "bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, \
+ { #test "_missing_lock_push_back", \
+ "bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, \
+ { #test "_missing_lock_pop_front", \
+ "bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, \
+ { #test "_missing_lock_pop_back", \
+ "bpf_spin_lock at off=" #off " must be held for bpf_list_head" },
+ TEST(kptr, 32)
+ TEST(global, 16)
+ TEST(map, 0)
+ TEST(inner_map, 0)
+#undef TEST
+#define TEST(test, op) \
+ { #test "_kptr_incorrect_lock_" #op, \
+ "held lock and object are not in the same allocation\n" \
+ "bpf_spin_lock at off=32 must be held for bpf_list_head" }, \
+ { #test "_global_incorrect_lock_" #op, \
+ "held lock and object are not in the same allocation\n" \
+ "bpf_spin_lock at off=16 must be held for bpf_list_head" }, \
+ { #test "_map_incorrect_lock_" #op, \
+ "held lock and object are not in the same allocation\n" \
+ "bpf_spin_lock at off=0 must be held for bpf_list_head" }, \
+ { #test "_inner_map_incorrect_lock_" #op, \
+ "held lock and object are not in the same allocation\n" \
+ "bpf_spin_lock at off=0 must be held for bpf_list_head" },
+ TEST(kptr, push_front)
+ TEST(kptr, push_back)
+ TEST(kptr, pop_front)
+ TEST(kptr, pop_back)
+ TEST(global, push_front)
+ TEST(global, push_back)
+ TEST(global, pop_front)
+ TEST(global, pop_back)
+ TEST(map, push_front)
+ TEST(map, push_back)
+ TEST(map, pop_front)
+ TEST(map, pop_back)
+ TEST(inner_map, push_front)
+ TEST(inner_map, push_back)
+ TEST(inner_map, pop_front)
+ TEST(inner_map, pop_back)
+#undef TEST
+ { "map_compat_kprobe", "tracing progs cannot use bpf_list_head yet" },
+ { "map_compat_kretprobe", "tracing progs cannot use bpf_list_head yet" },
+ { "map_compat_tp", "tracing progs cannot use bpf_list_head yet" },
+ { "map_compat_perf", "tracing progs cannot use bpf_list_head yet" },
+ { "map_compat_raw_tp", "tracing progs cannot use bpf_list_head yet" },
+ { "map_compat_raw_tp_w", "tracing progs cannot use bpf_list_head yet" },
+ { "obj_type_id_oor", "local type ID argument must be in range [0, U32_MAX]" },
+ { "obj_new_no_composite", "bpf_obj_new type ID argument must be of a struct" },
+ { "obj_new_no_struct", "bpf_obj_new type ID argument must be of a struct" },
+ { "obj_drop_non_zero_off", "R1 must have zero offset when passed to release func" },
+ { "new_null_ret", "R0 invalid mem access 'ptr_or_null_'" },
+ { "obj_new_acq", "Unreleased reference id=" },
+ { "use_after_drop", "invalid mem access 'scalar'" },
+ { "ptr_walk_scalar", "type=scalar expected=percpu_ptr_" },
+ { "direct_read_lock", "direct access to bpf_spin_lock is disallowed" },
+ { "direct_write_lock", "direct access to bpf_spin_lock is disallowed" },
+ { "direct_read_head", "direct access to bpf_list_head is disallowed" },
+ { "direct_write_head", "direct access to bpf_list_head is disallowed" },
+ { "direct_read_node", "direct access to bpf_list_node is disallowed" },
+ { "direct_write_node", "direct access to bpf_list_node is disallowed" },
+ { "write_after_push_front", "only read is supported" },
+ { "write_after_push_back", "only read is supported" },
+ { "use_after_unlock_push_front", "invalid mem access 'scalar'" },
+ { "use_after_unlock_push_back", "invalid mem access 'scalar'" },
+ { "double_push_front", "arg#1 expected pointer to allocated object" },
+ { "double_push_back", "arg#1 expected pointer to allocated object" },
+ { "no_node_value_type", "bpf_list_node not found at offset=0" },
+ { "incorrect_value_type",
+ "operation on bpf_list_head expects arg#1 bpf_list_node at offset=0 in struct foo, "
+ "but arg is at offset=0 in struct bar" },
+ { "incorrect_node_var_off", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" },
+ { "incorrect_node_off1", "bpf_list_node not found at offset=1" },
+ { "incorrect_node_off2", "arg#1 offset=40, but expected bpf_list_node at offset=0 in struct foo" },
+ { "no_head_type", "bpf_list_head not found at offset=0" },
+ { "incorrect_head_var_off1", "R1 doesn't have constant offset" },
+ { "incorrect_head_var_off2", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" },
+ { "incorrect_head_off1", "bpf_list_head not found at offset=17" },
+ { "incorrect_head_off2", "bpf_list_head not found at offset=1" },
+ { "pop_front_off",
+ "15: (bf) r1 = r6 ; R1_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=40,imm=0) "
+ "R6_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=40,imm=0) refs=2,4\n"
+ "16: (85) call bpf_this_cpu_ptr#154\nR1 type=ptr_or_null_ expected=percpu_ptr_" },
+ { "pop_back_off",
+ "15: (bf) r1 = r6 ; R1_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=40,imm=0) "
+ "R6_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=40,imm=0) refs=2,4\n"
+ "16: (85) call bpf_this_cpu_ptr#154\nR1 type=ptr_or_null_ expected=percpu_ptr_" },
+};
+
+static void test_linked_list_fail_prog(const char *prog_name, const char *err_msg)
+{
+ LIBBPF_OPTS(bpf_object_open_opts, opts, .kernel_log_buf = log_buf,
+ .kernel_log_size = sizeof(log_buf),
+ .kernel_log_level = 1);
+ struct linked_list_fail *skel;
+ struct bpf_program *prog;
+ int ret;
+
+ skel = linked_list_fail__open_opts(&opts);
+ if (!ASSERT_OK_PTR(skel, "linked_list_fail__open_opts"))
+ return;
+
+ prog = bpf_object__find_program_by_name(skel->obj, prog_name);
+ if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
+ goto end;
+
+ bpf_program__set_autoload(prog, true);
+
+ ret = linked_list_fail__load(skel);
+ if (!ASSERT_ERR(ret, "linked_list_fail__load must fail"))
+ goto end;
+
+ if (!ASSERT_OK_PTR(strstr(log_buf, err_msg), "expected error message")) {
+ fprintf(stderr, "Expected: %s\n", err_msg);
+ fprintf(stderr, "Verifier: %s\n", log_buf);
+ }
+
+end:
+ linked_list_fail__destroy(skel);
+}
+
+static void clear_fields(struct bpf_map *map)
+{
+ char buf[24];
+ int key = 0;
+
+ memset(buf, 0xff, sizeof(buf));
+ ASSERT_OK(bpf_map__update_elem(map, &key, sizeof(key), buf, sizeof(buf), 0), "check_and_free_fields");
+}
+
+enum {
+ TEST_ALL,
+ PUSH_POP,
+ PUSH_POP_MULT,
+ LIST_IN_LIST,
+};
+
+static void test_linked_list_success(int mode, bool leave_in_map)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+ struct linked_list *skel;
+ int ret;
+
+ skel = linked_list__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "linked_list__open_and_load"))
+ return;
+
+ if (mode == LIST_IN_LIST)
+ goto lil;
+ if (mode == PUSH_POP_MULT)
+ goto ppm;
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.map_list_push_pop), &opts);
+ ASSERT_OK(ret, "map_list_push_pop");
+ ASSERT_OK(opts.retval, "map_list_push_pop retval");
+ if (!leave_in_map)
+ clear_fields(skel->maps.array_map);
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.inner_map_list_push_pop), &opts);
+ ASSERT_OK(ret, "inner_map_list_push_pop");
+ ASSERT_OK(opts.retval, "inner_map_list_push_pop retval");
+ if (!leave_in_map)
+ clear_fields(skel->maps.inner_map);
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_push_pop), &opts);
+ ASSERT_OK(ret, "global_list_push_pop");
+ ASSERT_OK(opts.retval, "global_list_push_pop retval");
+ if (!leave_in_map)
+ clear_fields(skel->maps.bss_A);
+
+ if (mode == PUSH_POP)
+ goto end;
+
+ppm:
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.map_list_push_pop_multiple), &opts);
+ ASSERT_OK(ret, "map_list_push_pop_multiple");
+ ASSERT_OK(opts.retval, "map_list_push_pop_multiple retval");
+ if (!leave_in_map)
+ clear_fields(skel->maps.array_map);
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.inner_map_list_push_pop_multiple), &opts);
+ ASSERT_OK(ret, "inner_map_list_push_pop_multiple");
+ ASSERT_OK(opts.retval, "inner_map_list_push_pop_multiple retval");
+ if (!leave_in_map)
+ clear_fields(skel->maps.inner_map);
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_push_pop_multiple), &opts);
+ ASSERT_OK(ret, "global_list_push_pop_multiple");
+ ASSERT_OK(opts.retval, "global_list_push_pop_multiple retval");
+ if (!leave_in_map)
+ clear_fields(skel->maps.bss_A);
+
+ if (mode == PUSH_POP_MULT)
+ goto end;
+
+lil:
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.map_list_in_list), &opts);
+ ASSERT_OK(ret, "map_list_in_list");
+ ASSERT_OK(opts.retval, "map_list_in_list retval");
+ if (!leave_in_map)
+ clear_fields(skel->maps.array_map);
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.inner_map_list_in_list), &opts);
+ ASSERT_OK(ret, "inner_map_list_in_list");
+ ASSERT_OK(opts.retval, "inner_map_list_in_list retval");
+ if (!leave_in_map)
+ clear_fields(skel->maps.inner_map);
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_in_list), &opts);
+ ASSERT_OK(ret, "global_list_in_list");
+ ASSERT_OK(opts.retval, "global_list_in_list retval");
+ if (!leave_in_map)
+ clear_fields(skel->maps.bss_A);
+end:
+ linked_list__destroy(skel);
+}
+
+#define SPIN_LOCK 2
+#define LIST_HEAD 3
+#define LIST_NODE 4
+
+static struct btf *init_btf(void)
+{
+ int id, lid, hid, nid;
+ struct btf *btf;
+
+ btf = btf__new_empty();
+ if (!ASSERT_OK_PTR(btf, "btf__new_empty"))
+ return NULL;
+ id = btf__add_int(btf, "int", 4, BTF_INT_SIGNED);
+ if (!ASSERT_EQ(id, 1, "btf__add_int"))
+ goto end;
+ lid = btf__add_struct(btf, "bpf_spin_lock", 4);
+ if (!ASSERT_EQ(lid, SPIN_LOCK, "btf__add_struct bpf_spin_lock"))
+ goto end;
+ hid = btf__add_struct(btf, "bpf_list_head", 16);
+ if (!ASSERT_EQ(hid, LIST_HEAD, "btf__add_struct bpf_list_head"))
+ goto end;
+ nid = btf__add_struct(btf, "bpf_list_node", 16);
+ if (!ASSERT_EQ(nid, LIST_NODE, "btf__add_struct bpf_list_node"))
+ goto end;
+ return btf;
+end:
+ btf__free(btf);
+ return NULL;
+}
+
+static void test_btf(void)
+{
+ struct btf *btf = NULL;
+ int id, err;
+
+ while (test__start_subtest("btf: too many locks")) {
+ btf = init_btf();
+ if (!ASSERT_OK_PTR(btf, "init_btf"))
+ break;
+ id = btf__add_struct(btf, "foo", 24);
+ if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
+ break;
+ err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_struct foo::a"))
+ break;
+ err = btf__add_field(btf, "b", SPIN_LOCK, 32, 0);
+ if (!ASSERT_OK(err, "btf__add_struct foo::a"))
+ break;
+ err = btf__add_field(btf, "c", LIST_HEAD, 64, 0);
+ if (!ASSERT_OK(err, "btf__add_struct foo::a"))
+ break;
+
+ err = btf__load_into_kernel(btf);
+ ASSERT_EQ(err, -E2BIG, "check btf");
+ btf__free(btf);
+ break;
+ }
+
+ while (test__start_subtest("btf: missing lock")) {
+ btf = init_btf();
+ if (!ASSERT_OK_PTR(btf, "init_btf"))
+ break;
+ id = btf__add_struct(btf, "foo", 16);
+ if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
+ break;
+ err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_struct foo::a"))
+ break;
+ id = btf__add_decl_tag(btf, "contains:baz:a", 5, 0);
+ if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:baz:a"))
+ break;
+ id = btf__add_struct(btf, "baz", 16);
+ if (!ASSERT_EQ(id, 7, "btf__add_struct baz"))
+ break;
+ err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field baz::a"))
+ break;
+
+ err = btf__load_into_kernel(btf);
+ ASSERT_EQ(err, -EINVAL, "check btf");
+ btf__free(btf);
+ break;
+ }
+
+ while (test__start_subtest("btf: bad offset")) {
+ btf = init_btf();
+ if (!ASSERT_OK_PTR(btf, "init_btf"))
+ break;
+ id = btf__add_struct(btf, "foo", 36);
+ if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
+ break;
+ err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::a"))
+ break;
+ err = btf__add_field(btf, "b", LIST_NODE, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::b"))
+ break;
+ err = btf__add_field(btf, "c", SPIN_LOCK, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::c"))
+ break;
+ id = btf__add_decl_tag(btf, "contains:foo:b", 5, 0);
+ if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:b"))
+ break;
+
+ err = btf__load_into_kernel(btf);
+ ASSERT_EQ(err, -EEXIST, "check btf");
+ btf__free(btf);
+ break;
+ }
+
+ while (test__start_subtest("btf: missing contains:")) {
+ btf = init_btf();
+ if (!ASSERT_OK_PTR(btf, "init_btf"))
+ break;
+ id = btf__add_struct(btf, "foo", 24);
+ if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
+ break;
+ err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::a"))
+ break;
+ err = btf__add_field(btf, "b", LIST_HEAD, 64, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::b"))
+ break;
+
+ err = btf__load_into_kernel(btf);
+ ASSERT_EQ(err, -EINVAL, "check btf");
+ btf__free(btf);
+ break;
+ }
+
+ while (test__start_subtest("btf: missing struct")) {
+ btf = init_btf();
+ if (!ASSERT_OK_PTR(btf, "init_btf"))
+ break;
+ id = btf__add_struct(btf, "foo", 24);
+ if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
+ break;
+ err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::a"))
+ break;
+ err = btf__add_field(btf, "b", LIST_HEAD, 64, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::b"))
+ break;
+ id = btf__add_decl_tag(btf, "contains:bar:bar", 5, 1);
+ if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:bar"))
+ break;
+
+ err = btf__load_into_kernel(btf);
+ ASSERT_EQ(err, -ENOENT, "check btf");
+ btf__free(btf);
+ break;
+ }
+
+ while (test__start_subtest("btf: missing node")) {
+ btf = init_btf();
+ if (!ASSERT_OK_PTR(btf, "init_btf"))
+ break;
+ id = btf__add_struct(btf, "foo", 24);
+ if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
+ break;
+ err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::a"))
+ break;
+ err = btf__add_field(btf, "b", LIST_HEAD, 64, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::b"))
+ break;
+ id = btf__add_decl_tag(btf, "contains:foo:c", 5, 1);
+ if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:c"))
+ break;
+
+ err = btf__load_into_kernel(btf);
+ btf__free(btf);
+ ASSERT_EQ(err, -ENOENT, "check btf");
+ break;
+ }
+
+ while (test__start_subtest("btf: node incorrect type")) {
+ btf = init_btf();
+ if (!ASSERT_OK_PTR(btf, "init_btf"))
+ break;
+ id = btf__add_struct(btf, "foo", 20);
+ if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
+ break;
+ err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::a"))
+ break;
+ err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::b"))
+ break;
+ id = btf__add_decl_tag(btf, "contains:bar:a", 5, 0);
+ if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:a"))
+ break;
+ id = btf__add_struct(btf, "bar", 4);
+ if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
+ break;
+ err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field bar::a"))
+ break;
+
+ err = btf__load_into_kernel(btf);
+ ASSERT_EQ(err, -EINVAL, "check btf");
+ btf__free(btf);
+ break;
+ }
+
+ while (test__start_subtest("btf: multiple bpf_list_node with name b")) {
+ btf = init_btf();
+ if (!ASSERT_OK_PTR(btf, "init_btf"))
+ break;
+ id = btf__add_struct(btf, "foo", 52);
+ if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
+ break;
+ err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::a"))
+ break;
+ err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::b"))
+ break;
+ err = btf__add_field(btf, "b", LIST_NODE, 256, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::c"))
+ break;
+ err = btf__add_field(btf, "d", SPIN_LOCK, 384, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::d"))
+ break;
+ id = btf__add_decl_tag(btf, "contains:foo:b", 5, 0);
+ if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:b"))
+ break;
+
+ err = btf__load_into_kernel(btf);
+ ASSERT_EQ(err, -EINVAL, "check btf");
+ btf__free(btf);
+ break;
+ }
+
+ while (test__start_subtest("btf: owning | owned AA cycle")) {
+ btf = init_btf();
+ if (!ASSERT_OK_PTR(btf, "init_btf"))
+ break;
+ id = btf__add_struct(btf, "foo", 36);
+ if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
+ break;
+ err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::a"))
+ break;
+ err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::b"))
+ break;
+ err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::c"))
+ break;
+ id = btf__add_decl_tag(btf, "contains:foo:b", 5, 0);
+ if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:b"))
+ break;
+
+ err = btf__load_into_kernel(btf);
+ ASSERT_EQ(err, -ELOOP, "check btf");
+ btf__free(btf);
+ break;
+ }
+
+ while (test__start_subtest("btf: owning | owned ABA cycle")) {
+ btf = init_btf();
+ if (!ASSERT_OK_PTR(btf, "init_btf"))
+ break;
+ id = btf__add_struct(btf, "foo", 36);
+ if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
+ break;
+ err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::a"))
+ break;
+ err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::b"))
+ break;
+ err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::c"))
+ break;
+ id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
+ if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
+ break;
+ id = btf__add_struct(btf, "bar", 36);
+ if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
+ break;
+ err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field bar::a"))
+ break;
+ err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
+ if (!ASSERT_OK(err, "btf__add_field bar::b"))
+ break;
+ err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0);
+ if (!ASSERT_OK(err, "btf__add_field bar::c"))
+ break;
+ id = btf__add_decl_tag(btf, "contains:foo:b", 7, 0);
+ if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:foo:b"))
+ break;
+
+ err = btf__load_into_kernel(btf);
+ ASSERT_EQ(err, -ELOOP, "check btf");
+ btf__free(btf);
+ break;
+ }
+
+ while (test__start_subtest("btf: owning -> owned")) {
+ btf = init_btf();
+ if (!ASSERT_OK_PTR(btf, "init_btf"))
+ break;
+ id = btf__add_struct(btf, "foo", 20);
+ if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
+ break;
+ err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::a"))
+ break;
+ err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::b"))
+ break;
+ id = btf__add_decl_tag(btf, "contains:bar:a", 5, 0);
+ if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:a"))
+ break;
+ id = btf__add_struct(btf, "bar", 16);
+ if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
+ break;
+ err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field bar::a"))
+ break;
+
+ err = btf__load_into_kernel(btf);
+ ASSERT_EQ(err, 0, "check btf");
+ btf__free(btf);
+ break;
+ }
+
+ while (test__start_subtest("btf: owning -> owning | owned -> owned")) {
+ btf = init_btf();
+ if (!ASSERT_OK_PTR(btf, "init_btf"))
+ break;
+ id = btf__add_struct(btf, "foo", 20);
+ if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
+ break;
+ err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::a"))
+ break;
+ err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::b"))
+ break;
+ id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
+ if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
+ break;
+ id = btf__add_struct(btf, "bar", 36);
+ if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
+ break;
+ err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field bar::a"))
+ break;
+ err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
+ if (!ASSERT_OK(err, "btf__add_field bar::b"))
+ break;
+ err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0);
+ if (!ASSERT_OK(err, "btf__add_field bar::c"))
+ break;
+ id = btf__add_decl_tag(btf, "contains:baz:a", 7, 0);
+ if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:baz:a"))
+ break;
+ id = btf__add_struct(btf, "baz", 16);
+ if (!ASSERT_EQ(id, 9, "btf__add_struct baz"))
+ break;
+ err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field baz:a"))
+ break;
+
+ err = btf__load_into_kernel(btf);
+ ASSERT_EQ(err, 0, "check btf");
+ btf__free(btf);
+ break;
+ }
+
+ while (test__start_subtest("btf: owning | owned -> owning | owned -> owned")) {
+ btf = init_btf();
+ if (!ASSERT_OK_PTR(btf, "init_btf"))
+ break;
+ id = btf__add_struct(btf, "foo", 36);
+ if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
+ break;
+ err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::a"))
+ break;
+ err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::b"))
+ break;
+ err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::c"))
+ break;
+ id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
+ if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
+ break;
+ id = btf__add_struct(btf, "bar", 36);
+ if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
+ break;
+ err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field bar:a"))
+ break;
+ err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
+ if (!ASSERT_OK(err, "btf__add_field bar:b"))
+ break;
+ err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0);
+ if (!ASSERT_OK(err, "btf__add_field bar:c"))
+ break;
+ id = btf__add_decl_tag(btf, "contains:baz:a", 7, 0);
+ if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:baz:a"))
+ break;
+ id = btf__add_struct(btf, "baz", 16);
+ if (!ASSERT_EQ(id, 9, "btf__add_struct baz"))
+ break;
+ err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field baz:a"))
+ break;
+
+ err = btf__load_into_kernel(btf);
+ ASSERT_EQ(err, -ELOOP, "check btf");
+ btf__free(btf);
+ break;
+ }
+
+ while (test__start_subtest("btf: owning -> owning | owned -> owning | owned -> owned")) {
+ btf = init_btf();
+ if (!ASSERT_OK_PTR(btf, "init_btf"))
+ break;
+ id = btf__add_struct(btf, "foo", 20);
+ if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
+ break;
+ err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::a"))
+ break;
+ err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::b"))
+ break;
+ id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
+ if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
+ break;
+ id = btf__add_struct(btf, "bar", 36);
+ if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
+ break;
+ err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field bar::a"))
+ break;
+ err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
+ if (!ASSERT_OK(err, "btf__add_field bar::b"))
+ break;
+ err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0);
+ if (!ASSERT_OK(err, "btf__add_field bar::c"))
+ break;
+ id = btf__add_decl_tag(btf, "contains:baz:b", 7, 0);
+ if (!ASSERT_EQ(id, 8, "btf__add_decl_tag"))
+ break;
+ id = btf__add_struct(btf, "baz", 36);
+ if (!ASSERT_EQ(id, 9, "btf__add_struct baz"))
+ break;
+ err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field bar::a"))
+ break;
+ err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
+ if (!ASSERT_OK(err, "btf__add_field bar::b"))
+ break;
+ err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0);
+ if (!ASSERT_OK(err, "btf__add_field bar::c"))
+ break;
+ id = btf__add_decl_tag(btf, "contains:bam:a", 9, 0);
+ if (!ASSERT_EQ(id, 10, "btf__add_decl_tag contains:bam:a"))
+ break;
+ id = btf__add_struct(btf, "bam", 16);
+ if (!ASSERT_EQ(id, 11, "btf__add_struct bam"))
+ break;
+ err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field bam::a"))
+ break;
+
+ err = btf__load_into_kernel(btf);
+ ASSERT_EQ(err, -ELOOP, "check btf");
+ btf__free(btf);
+ break;
+ }
+}
+
+void test_linked_list(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(linked_list_fail_tests); i++) {
+ if (!test__start_subtest(linked_list_fail_tests[i].prog_name))
+ continue;
+ test_linked_list_fail_prog(linked_list_fail_tests[i].prog_name,
+ linked_list_fail_tests[i].err_msg);
+ }
+ test_btf();
+ test_linked_list_success(PUSH_POP, false);
+ test_linked_list_success(PUSH_POP, true);
+ test_linked_list_success(PUSH_POP_MULT, false);
+ test_linked_list_success(PUSH_POP_MULT, true);
+ test_linked_list_success(LIST_IN_LIST, false);
+ test_linked_list_success(LIST_IN_LIST, true);
+ test_linked_list_success(TEST_ALL, false);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c b/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c
index 1102e4f42d2d..f117bfef68a1 100644
--- a/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c
+++ b/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c
@@ -173,10 +173,12 @@ static void test_lsm_cgroup_functional(void)
ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 4, "total prog count");
ASSERT_EQ(query_prog_cnt(cgroup_fd2, NULL), 1, "total prog count");
- /* AF_UNIX is prohibited. */
-
fd = socket(AF_UNIX, SOCK_STREAM, 0);
- ASSERT_LT(fd, 0, "socket(AF_UNIX)");
+ if (!(skel->kconfig->CONFIG_SECURITY_APPARMOR
+ || skel->kconfig->CONFIG_SECURITY_SELINUX
+ || skel->kconfig->CONFIG_SECURITY_SMACK))
+ /* AF_UNIX is prohibited. */
+ ASSERT_LT(fd, 0, "socket(AF_UNIX)");
close(fd);
/* AF_INET6 gets default policy (sk_priority). */
@@ -233,11 +235,18 @@ static void test_lsm_cgroup_functional(void)
/* AF_INET6+SOCK_STREAM
* AF_PACKET+SOCK_RAW
+ * AF_UNIX+SOCK_RAW if already have non-bpf lsms installed
* listen_fd
* client_fd
* accepted_fd
*/
- ASSERT_EQ(skel->bss->called_socket_post_create2, 5, "called_create2");
+ if (skel->kconfig->CONFIG_SECURITY_APPARMOR
+ || skel->kconfig->CONFIG_SECURITY_SELINUX
+ || skel->kconfig->CONFIG_SECURITY_SMACK)
+ /* AF_UNIX+SOCK_RAW if already have non-bpf lsms installed */
+ ASSERT_EQ(skel->bss->called_socket_post_create2, 6, "called_create2");
+ else
+ ASSERT_EQ(skel->bss->called_socket_post_create2, 5, "called_create2");
/* start_server
* bind(ETH_P_ALL)
diff --git a/tools/testing/selftests/bpf/prog_tests/map_kptr.c b/tools/testing/selftests/bpf/prog_tests/map_kptr.c
index fdcea7a61491..0d66b1524208 100644
--- a/tools/testing/selftests/bpf/prog_tests/map_kptr.c
+++ b/tools/testing/selftests/bpf/prog_tests/map_kptr.c
@@ -105,7 +105,7 @@ static void test_map_kptr_success(bool test_run)
ASSERT_OK(opts.retval, "test_map_kptr_ref2 retval");
if (test_run)
- return;
+ goto exit;
ret = bpf_map__update_elem(skel->maps.array_map,
&key, sizeof(key), buf, sizeof(buf), 0);
@@ -132,6 +132,7 @@ static void test_map_kptr_success(bool test_run)
ret = bpf_map__delete_elem(skel->maps.lru_hash_map, &key, sizeof(key), 0);
ASSERT_OK(ret, "lru_hash_map delete");
+exit:
map_kptr__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/module_attach.c b/tools/testing/selftests/bpf/prog_tests/module_attach.c
index 6d0e50dcf47c..7fc01ff490db 100644
--- a/tools/testing/selftests/bpf/prog_tests/module_attach.c
+++ b/tools/testing/selftests/bpf/prog_tests/module_attach.c
@@ -103,6 +103,13 @@ void test_module_attach(void)
ASSERT_ERR(delete_module("bpf_testmod", 0), "delete_module");
bpf_link__destroy(link);
+ link = bpf_program__attach(skel->progs.kprobe_multi);
+ if (!ASSERT_OK_PTR(link, "attach_kprobe_multi"))
+ goto cleanup;
+
+ ASSERT_ERR(delete_module("bpf_testmod", 0), "delete_module");
+ bpf_link__destroy(link);
+
cleanup:
test_module_attach__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c b/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c
new file mode 100644
index 000000000000..447d8560ecb6
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates.*/
+
+#define _GNU_SOURCE
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <test_progs.h>
+#include <bpf/btf.h>
+#include "rcu_read_lock.skel.h"
+#include "cgroup_helpers.h"
+
+static unsigned long long cgroup_id;
+
+static void test_success(void)
+{
+ struct rcu_read_lock *skel;
+ int err;
+
+ skel = rcu_read_lock__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ skel->bss->target_pid = syscall(SYS_gettid);
+
+ bpf_program__set_autoload(skel->progs.get_cgroup_id, true);
+ bpf_program__set_autoload(skel->progs.task_succ, true);
+ bpf_program__set_autoload(skel->progs.no_lock, true);
+ bpf_program__set_autoload(skel->progs.two_regions, true);
+ bpf_program__set_autoload(skel->progs.non_sleepable_1, true);
+ bpf_program__set_autoload(skel->progs.non_sleepable_2, true);
+ err = rcu_read_lock__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto out;
+
+ err = rcu_read_lock__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto out;
+
+ syscall(SYS_getpgid);
+
+ ASSERT_EQ(skel->bss->task_storage_val, 2, "task_storage_val");
+ ASSERT_EQ(skel->bss->cgroup_id, cgroup_id, "cgroup_id");
+out:
+ rcu_read_lock__destroy(skel);
+}
+
+static void test_rcuptr_acquire(void)
+{
+ struct rcu_read_lock *skel;
+ int err;
+
+ skel = rcu_read_lock__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ skel->bss->target_pid = syscall(SYS_gettid);
+
+ bpf_program__set_autoload(skel->progs.task_acquire, true);
+ err = rcu_read_lock__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto out;
+
+ err = rcu_read_lock__attach(skel);
+ ASSERT_OK(err, "skel_attach");
+out:
+ rcu_read_lock__destroy(skel);
+}
+
+static const char * const inproper_region_tests[] = {
+ "miss_lock",
+ "miss_unlock",
+ "non_sleepable_rcu_mismatch",
+ "inproper_sleepable_helper",
+ "inproper_sleepable_kfunc",
+ "nested_rcu_region",
+};
+
+static void test_inproper_region(void)
+{
+ struct rcu_read_lock *skel;
+ struct bpf_program *prog;
+ int i, err;
+
+ for (i = 0; i < ARRAY_SIZE(inproper_region_tests); i++) {
+ skel = rcu_read_lock__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ prog = bpf_object__find_program_by_name(skel->obj, inproper_region_tests[i]);
+ if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
+ goto out;
+ bpf_program__set_autoload(prog, true);
+ err = rcu_read_lock__load(skel);
+ ASSERT_ERR(err, "skel_load");
+out:
+ rcu_read_lock__destroy(skel);
+ }
+}
+
+static const char * const rcuptr_misuse_tests[] = {
+ "task_untrusted_non_rcuptr",
+ "task_untrusted_rcuptr",
+ "cross_rcu_region",
+};
+
+static void test_rcuptr_misuse(void)
+{
+ struct rcu_read_lock *skel;
+ struct bpf_program *prog;
+ int i, err;
+
+ for (i = 0; i < ARRAY_SIZE(rcuptr_misuse_tests); i++) {
+ skel = rcu_read_lock__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ prog = bpf_object__find_program_by_name(skel->obj, rcuptr_misuse_tests[i]);
+ if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
+ goto out;
+ bpf_program__set_autoload(prog, true);
+ err = rcu_read_lock__load(skel);
+ ASSERT_ERR(err, "skel_load");
+out:
+ rcu_read_lock__destroy(skel);
+ }
+}
+
+void test_rcu_read_lock(void)
+{
+ struct btf *vmlinux_btf;
+ int cgroup_fd;
+
+ vmlinux_btf = btf__load_vmlinux_btf();
+ if (!ASSERT_OK_PTR(vmlinux_btf, "could not load vmlinux BTF"))
+ return;
+ if (btf__find_by_name_kind(vmlinux_btf, "rcu", BTF_KIND_TYPE_TAG) < 0) {
+ test__skip();
+ goto out;
+ }
+
+ cgroup_fd = test__join_cgroup("/rcu_read_lock");
+ if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /rcu_read_lock"))
+ goto out;
+
+ cgroup_id = get_cgroup_id("/rcu_read_lock");
+ if (test__start_subtest("success"))
+ test_success();
+ if (test__start_subtest("rcuptr_acquire"))
+ test_rcuptr_acquire();
+ if (test__start_subtest("negative_tests_inproper_region"))
+ test_inproper_region();
+ if (test__start_subtest("negative_tests_rcuptr_misuse"))
+ test_rcuptr_misuse();
+ close(cgroup_fd);
+out:
+ btf__free(vmlinux_btf);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/ringbuf.c b/tools/testing/selftests/bpf/prog_tests/ringbuf.c
index 9a80fe8a6427..ac104dc652e3 100644
--- a/tools/testing/selftests/bpf/prog_tests/ringbuf.c
+++ b/tools/testing/selftests/bpf/prog_tests/ringbuf.c
@@ -13,6 +13,7 @@
#include <linux/perf_event.h>
#include <linux/ring_buffer.h>
#include "test_ringbuf.lskel.h"
+#include "test_ringbuf_map_key.lskel.h"
#define EDONE 7777
@@ -58,6 +59,7 @@ static int process_sample(void *ctx, void *data, size_t len)
}
}
+static struct test_ringbuf_map_key_lskel *skel_map_key;
static struct test_ringbuf_lskel *skel;
static struct ring_buffer *ringbuf;
@@ -81,7 +83,7 @@ static void *poll_thread(void *input)
return (void *)(long)ring_buffer__poll(ringbuf, timeout);
}
-void test_ringbuf(void)
+static void ringbuf_subtest(void)
{
const size_t rec_sz = BPF_RINGBUF_HDR_SZ + sizeof(struct sample);
pthread_t thread;
@@ -297,3 +299,65 @@ cleanup:
ring_buffer__free(ringbuf);
test_ringbuf_lskel__destroy(skel);
}
+
+static int process_map_key_sample(void *ctx, void *data, size_t len)
+{
+ struct sample *s;
+ int err, val;
+
+ s = data;
+ switch (s->seq) {
+ case 1:
+ ASSERT_EQ(s->value, 42, "sample_value");
+ err = bpf_map_lookup_elem(skel_map_key->maps.hash_map.map_fd,
+ s, &val);
+ ASSERT_OK(err, "hash_map bpf_map_lookup_elem");
+ ASSERT_EQ(val, 1, "hash_map val");
+ return -EDONE;
+ default:
+ return 0;
+ }
+}
+
+static void ringbuf_map_key_subtest(void)
+{
+ int err;
+
+ skel_map_key = test_ringbuf_map_key_lskel__open();
+ if (!ASSERT_OK_PTR(skel_map_key, "test_ringbuf_map_key_lskel__open"))
+ return;
+
+ skel_map_key->maps.ringbuf.max_entries = getpagesize();
+ skel_map_key->bss->pid = getpid();
+
+ err = test_ringbuf_map_key_lskel__load(skel_map_key);
+ if (!ASSERT_OK(err, "test_ringbuf_map_key_lskel__load"))
+ goto cleanup;
+
+ ringbuf = ring_buffer__new(skel_map_key->maps.ringbuf.map_fd,
+ process_map_key_sample, NULL, NULL);
+ if (!ASSERT_OK_PTR(ringbuf, "ring_buffer__new"))
+ goto cleanup;
+
+ err = test_ringbuf_map_key_lskel__attach(skel_map_key);
+ if (!ASSERT_OK(err, "test_ringbuf_map_key_lskel__attach"))
+ goto cleanup_ringbuf;
+
+ syscall(__NR_getpgid);
+ ASSERT_EQ(skel_map_key->bss->seq, 1, "skel_map_key->bss->seq");
+ err = ring_buffer__poll(ringbuf, -1);
+ ASSERT_EQ(err, -EDONE, "ring_buffer__poll");
+
+cleanup_ringbuf:
+ ring_buffer__free(ringbuf);
+cleanup:
+ test_ringbuf_map_key_lskel__destroy(skel_map_key);
+}
+
+void test_ringbuf(void)
+{
+ if (test__start_subtest("ringbuf"))
+ ringbuf_subtest();
+ if (test__start_subtest("ringbuf_map_key"))
+ ringbuf_map_key_subtest();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/skeleton.c b/tools/testing/selftests/bpf/prog_tests/skeleton.c
index 99dac5292b41..bc6817aee9aa 100644
--- a/tools/testing/selftests/bpf/prog_tests/skeleton.c
+++ b/tools/testing/selftests/bpf/prog_tests/skeleton.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
+#include <sys/mman.h>
struct s {
int a;
@@ -22,7 +23,8 @@ void test_skeleton(void)
struct test_skeleton__kconfig *kcfg;
const void *elf_bytes;
size_t elf_bytes_sz = 0;
- int i;
+ void *m;
+ int i, fd;
skel = test_skeleton__open();
if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
@@ -124,6 +126,13 @@ void test_skeleton(void)
ASSERT_EQ(bss->huge_arr[ARRAY_SIZE(bss->huge_arr) - 1], 123, "huge_arr");
+ fd = bpf_map__fd(skel->maps.data_non_mmapable);
+ m = mmap(NULL, getpagesize(), PROT_READ, MAP_SHARED, fd, 0);
+ if (!ASSERT_EQ(m, MAP_FAILED, "unexpected_mmap_success"))
+ munmap(m, getpagesize());
+
+ ASSERT_EQ(bpf_map__map_flags(skel->maps.data_non_mmapable), 0, "non_mmap_flags");
+
elf_bytes = test_skeleton__elf_bytes(&elf_bytes_sz);
ASSERT_OK_PTR(elf_bytes, "elf_bytes");
ASSERT_GE(elf_bytes_sz, 0, "elf_bytes_sz");
diff --git a/tools/testing/selftests/bpf/prog_tests/spin_lock.c b/tools/testing/selftests/bpf/prog_tests/spin_lock.c
new file mode 100644
index 000000000000..d9270bd3d920
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/spin_lock.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include <network_helpers.h>
+
+#include "test_spin_lock.skel.h"
+#include "test_spin_lock_fail.skel.h"
+
+static char log_buf[1024 * 1024];
+
+static struct {
+ const char *prog_name;
+ const char *err_msg;
+} spin_lock_fail_tests[] = {
+ { "lock_id_kptr_preserve",
+ "5: (bf) r1 = r0 ; R0_w=ptr_foo(id=2,ref_obj_id=2,off=0,imm=0) "
+ "R1_w=ptr_foo(id=2,ref_obj_id=2,off=0,imm=0) refs=2\n6: (85) call bpf_this_cpu_ptr#154\n"
+ "R1 type=ptr_ expected=percpu_ptr_" },
+ { "lock_id_global_zero",
+ "; R1_w=map_value(off=0,ks=4,vs=4,imm=0)\n2: (85) call bpf_this_cpu_ptr#154\n"
+ "R1 type=map_value expected=percpu_ptr_" },
+ { "lock_id_mapval_preserve",
+ "8: (bf) r1 = r0 ; R0_w=map_value(id=1,off=0,ks=4,vs=8,imm=0) "
+ "R1_w=map_value(id=1,off=0,ks=4,vs=8,imm=0)\n9: (85) call bpf_this_cpu_ptr#154\n"
+ "R1 type=map_value expected=percpu_ptr_" },
+ { "lock_id_innermapval_preserve",
+ "13: (bf) r1 = r0 ; R0=map_value(id=2,off=0,ks=4,vs=8,imm=0) "
+ "R1_w=map_value(id=2,off=0,ks=4,vs=8,imm=0)\n14: (85) call bpf_this_cpu_ptr#154\n"
+ "R1 type=map_value expected=percpu_ptr_" },
+ { "lock_id_mismatch_kptr_kptr", "bpf_spin_unlock of different lock" },
+ { "lock_id_mismatch_kptr_global", "bpf_spin_unlock of different lock" },
+ { "lock_id_mismatch_kptr_mapval", "bpf_spin_unlock of different lock" },
+ { "lock_id_mismatch_kptr_innermapval", "bpf_spin_unlock of different lock" },
+ { "lock_id_mismatch_global_global", "bpf_spin_unlock of different lock" },
+ { "lock_id_mismatch_global_kptr", "bpf_spin_unlock of different lock" },
+ { "lock_id_mismatch_global_mapval", "bpf_spin_unlock of different lock" },
+ { "lock_id_mismatch_global_innermapval", "bpf_spin_unlock of different lock" },
+ { "lock_id_mismatch_mapval_mapval", "bpf_spin_unlock of different lock" },
+ { "lock_id_mismatch_mapval_kptr", "bpf_spin_unlock of different lock" },
+ { "lock_id_mismatch_mapval_global", "bpf_spin_unlock of different lock" },
+ { "lock_id_mismatch_mapval_innermapval", "bpf_spin_unlock of different lock" },
+ { "lock_id_mismatch_innermapval_innermapval1", "bpf_spin_unlock of different lock" },
+ { "lock_id_mismatch_innermapval_innermapval2", "bpf_spin_unlock of different lock" },
+ { "lock_id_mismatch_innermapval_kptr", "bpf_spin_unlock of different lock" },
+ { "lock_id_mismatch_innermapval_global", "bpf_spin_unlock of different lock" },
+ { "lock_id_mismatch_innermapval_mapval", "bpf_spin_unlock of different lock" },
+};
+
+static void test_spin_lock_fail_prog(const char *prog_name, const char *err_msg)
+{
+ LIBBPF_OPTS(bpf_object_open_opts, opts, .kernel_log_buf = log_buf,
+ .kernel_log_size = sizeof(log_buf),
+ .kernel_log_level = 1);
+ struct test_spin_lock_fail *skel;
+ struct bpf_program *prog;
+ int ret;
+
+ skel = test_spin_lock_fail__open_opts(&opts);
+ if (!ASSERT_OK_PTR(skel, "test_spin_lock_fail__open_opts"))
+ return;
+
+ prog = bpf_object__find_program_by_name(skel->obj, prog_name);
+ if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
+ goto end;
+
+ bpf_program__set_autoload(prog, true);
+
+ ret = test_spin_lock_fail__load(skel);
+ if (!ASSERT_ERR(ret, "test_spin_lock_fail__load must fail"))
+ goto end;
+
+ /* Skip check if JIT does not support kfuncs */
+ if (strstr(log_buf, "JIT does not support calling kernel function")) {
+ test__skip();
+ goto end;
+ }
+
+ if (!ASSERT_OK_PTR(strstr(log_buf, err_msg), "expected error message")) {
+ fprintf(stderr, "Expected: %s\n", err_msg);
+ fprintf(stderr, "Verifier: %s\n", log_buf);
+ }
+
+end:
+ test_spin_lock_fail__destroy(skel);
+}
+
+static void *spin_lock_thread(void *arg)
+{
+ int err, prog_fd = *(u32 *) arg;
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 10000,
+ );
+
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run");
+ ASSERT_OK(topts.retval, "test_run retval");
+ pthread_exit(arg);
+}
+
+void test_spin_lock_success(void)
+{
+ struct test_spin_lock *skel;
+ pthread_t thread_id[4];
+ int prog_fd, i;
+ void *ret;
+
+ skel = test_spin_lock__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_spin_lock__open_and_load"))
+ return;
+ prog_fd = bpf_program__fd(skel->progs.bpf_spin_lock_test);
+ for (i = 0; i < 4; i++) {
+ int err;
+
+ err = pthread_create(&thread_id[i], NULL, &spin_lock_thread, &prog_fd);
+ if (!ASSERT_OK(err, "pthread_create"))
+ goto end;
+ }
+
+ for (i = 0; i < 4; i++) {
+ if (!ASSERT_OK(pthread_join(thread_id[i], &ret), "pthread_join"))
+ goto end;
+ if (!ASSERT_EQ(ret, &prog_fd, "ret == prog_fd"))
+ goto end;
+ }
+end:
+ test_spin_lock__destroy(skel);
+}
+
+void test_spin_lock(void)
+{
+ int i;
+
+ test_spin_lock_success();
+
+ for (i = 0; i < ARRAY_SIZE(spin_lock_fail_tests); i++) {
+ if (!test__start_subtest(spin_lock_fail_tests[i].prog_name))
+ continue;
+ test_spin_lock_fail_prog(spin_lock_fail_tests[i].prog_name,
+ spin_lock_fail_tests[i].err_msg);
+ }
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/spinlock.c b/tools/testing/selftests/bpf/prog_tests/spinlock.c
deleted file mode 100644
index 15eb1372d771..000000000000
--- a/tools/testing/selftests/bpf/prog_tests/spinlock.c
+++ /dev/null
@@ -1,45 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <test_progs.h>
-#include <network_helpers.h>
-
-static void *spin_lock_thread(void *arg)
-{
- int err, prog_fd = *(u32 *) arg;
- LIBBPF_OPTS(bpf_test_run_opts, topts,
- .data_in = &pkt_v4,
- .data_size_in = sizeof(pkt_v4),
- .repeat = 10000,
- );
-
- err = bpf_prog_test_run_opts(prog_fd, &topts);
- ASSERT_OK(err, "test_run");
- ASSERT_OK(topts.retval, "test_run retval");
- pthread_exit(arg);
-}
-
-void test_spinlock(void)
-{
- const char *file = "./test_spin_lock.bpf.o";
- pthread_t thread_id[4];
- struct bpf_object *obj = NULL;
- int prog_fd;
- int err = 0, i;
- void *ret;
-
- err = bpf_prog_test_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd);
- if (CHECK_FAIL(err)) {
- printf("test_spin_lock:bpf_prog_test_load errno %d\n", errno);
- goto close_prog;
- }
- for (i = 0; i < 4; i++)
- if (CHECK_FAIL(pthread_create(&thread_id[i], NULL,
- &spin_lock_thread, &prog_fd)))
- goto close_prog;
-
- for (i = 0; i < 4; i++)
- if (CHECK_FAIL(pthread_join(thread_id[i], &ret) ||
- ret != (void *)&prog_fd))
- goto close_prog;
-close_prog:
- bpf_object__close(obj);
-}
diff --git a/tools/testing/selftests/bpf/prog_tests/task_kfunc.c b/tools/testing/selftests/bpf/prog_tests/task_kfunc.c
new file mode 100644
index 000000000000..ffd8ef4303c8
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/task_kfunc.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#define _GNU_SOURCE
+#include <sys/wait.h>
+#include <test_progs.h>
+#include <unistd.h>
+
+#include "task_kfunc_failure.skel.h"
+#include "task_kfunc_success.skel.h"
+
+static size_t log_buf_sz = 1 << 20; /* 1 MB */
+static char obj_log_buf[1048576];
+
+static struct task_kfunc_success *open_load_task_kfunc_skel(void)
+{
+ struct task_kfunc_success *skel;
+ int err;
+
+ skel = task_kfunc_success__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return NULL;
+
+ skel->bss->pid = getpid();
+
+ err = task_kfunc_success__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ return skel;
+
+cleanup:
+ task_kfunc_success__destroy(skel);
+ return NULL;
+}
+
+static void run_success_test(const char *prog_name)
+{
+ struct task_kfunc_success *skel;
+ int status;
+ pid_t child_pid;
+ struct bpf_program *prog;
+ struct bpf_link *link = NULL;
+
+ skel = open_load_task_kfunc_skel();
+ if (!ASSERT_OK_PTR(skel, "open_load_skel"))
+ return;
+
+ if (!ASSERT_OK(skel->bss->err, "pre_spawn_err"))
+ goto cleanup;
+
+ prog = bpf_object__find_program_by_name(skel->obj, prog_name);
+ if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
+ goto cleanup;
+
+ link = bpf_program__attach(prog);
+ if (!ASSERT_OK_PTR(link, "attached_link"))
+ goto cleanup;
+
+ child_pid = fork();
+ if (!ASSERT_GT(child_pid, -1, "child_pid"))
+ goto cleanup;
+ if (child_pid == 0)
+ _exit(0);
+ waitpid(child_pid, &status, 0);
+
+ ASSERT_OK(skel->bss->err, "post_wait_err");
+
+cleanup:
+ bpf_link__destroy(link);
+ task_kfunc_success__destroy(skel);
+}
+
+static const char * const success_tests[] = {
+ "test_task_acquire_release_argument",
+ "test_task_acquire_release_current",
+ "test_task_acquire_leave_in_map",
+ "test_task_xchg_release",
+ "test_task_get_release",
+ "test_task_current_acquire_release",
+ "test_task_from_pid_arg",
+ "test_task_from_pid_current",
+ "test_task_from_pid_invalid",
+};
+
+static struct {
+ const char *prog_name;
+ const char *expected_err_msg;
+} failure_tests[] = {
+ {"task_kfunc_acquire_untrusted", "R1 must be referenced or trusted"},
+ {"task_kfunc_acquire_fp", "arg#0 pointer type STRUCT task_struct must point"},
+ {"task_kfunc_acquire_unsafe_kretprobe", "reg type unsupported for arg#0 function"},
+ {"task_kfunc_acquire_trusted_walked", "R1 must be referenced or trusted"},
+ {"task_kfunc_acquire_null", "arg#0 pointer type STRUCT task_struct must point"},
+ {"task_kfunc_acquire_unreleased", "Unreleased reference"},
+ {"task_kfunc_get_non_kptr_param", "arg#0 expected pointer to map value"},
+ {"task_kfunc_get_non_kptr_acquired", "arg#0 expected pointer to map value"},
+ {"task_kfunc_get_null", "arg#0 expected pointer to map value"},
+ {"task_kfunc_xchg_unreleased", "Unreleased reference"},
+ {"task_kfunc_get_unreleased", "Unreleased reference"},
+ {"task_kfunc_release_untrusted", "arg#0 is untrusted_ptr_or_null_ expected ptr_ or socket"},
+ {"task_kfunc_release_fp", "arg#0 pointer type STRUCT task_struct must point"},
+ {"task_kfunc_release_null", "arg#0 is ptr_or_null_ expected ptr_ or socket"},
+ {"task_kfunc_release_unacquired", "release kernel function bpf_task_release expects"},
+ {"task_kfunc_from_pid_no_null_check", "arg#0 is ptr_or_null_ expected ptr_ or socket"},
+};
+
+static void verify_fail(const char *prog_name, const char *expected_err_msg)
+{
+ LIBBPF_OPTS(bpf_object_open_opts, opts);
+ struct task_kfunc_failure *skel;
+ int err, i;
+
+ opts.kernel_log_buf = obj_log_buf;
+ opts.kernel_log_size = log_buf_sz;
+ opts.kernel_log_level = 1;
+
+ skel = task_kfunc_failure__open_opts(&opts);
+ if (!ASSERT_OK_PTR(skel, "task_kfunc_failure__open_opts"))
+ goto cleanup;
+
+ for (i = 0; i < ARRAY_SIZE(failure_tests); i++) {
+ struct bpf_program *prog;
+ const char *curr_name = failure_tests[i].prog_name;
+
+ prog = bpf_object__find_program_by_name(skel->obj, curr_name);
+ if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
+ goto cleanup;
+
+ bpf_program__set_autoload(prog, !strcmp(curr_name, prog_name));
+ }
+
+ err = task_kfunc_failure__load(skel);
+ if (!ASSERT_ERR(err, "unexpected load success"))
+ goto cleanup;
+
+ if (!ASSERT_OK_PTR(strstr(obj_log_buf, expected_err_msg), "expected_err_msg")) {
+ fprintf(stderr, "Expected err_msg: %s\n", expected_err_msg);
+ fprintf(stderr, "Verifier output: %s\n", obj_log_buf);
+ }
+
+cleanup:
+ task_kfunc_failure__destroy(skel);
+}
+
+void test_task_kfunc(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(success_tests); i++) {
+ if (!test__start_subtest(success_tests[i]))
+ continue;
+
+ run_success_test(success_tests[i]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(failure_tests); i++) {
+ if (!test__start_subtest(failure_tests[i].prog_name))
+ continue;
+
+ verify_fail(failure_tests[i].prog_name, failure_tests[i].expected_err_msg);
+ }
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/task_local_storage.c b/tools/testing/selftests/bpf/prog_tests/task_local_storage.c
index 035c263aab1b..a176bd75a748 100644
--- a/tools/testing/selftests/bpf/prog_tests/task_local_storage.c
+++ b/tools/testing/selftests/bpf/prog_tests/task_local_storage.c
@@ -3,12 +3,16 @@
#define _GNU_SOURCE /* See feature_test_macros(7) */
#include <unistd.h>
+#include <sched.h>
+#include <pthread.h>
#include <sys/syscall.h> /* For SYS_xxx definitions */
#include <sys/types.h>
#include <test_progs.h>
+#include "task_local_storage_helpers.h"
#include "task_local_storage.skel.h"
#include "task_local_storage_exit_creds.skel.h"
#include "task_ls_recursion.skel.h"
+#include "task_storage_nodeadlock.skel.h"
static void test_sys_enter_exit(void)
{
@@ -39,7 +43,8 @@ out:
static void test_exit_creds(void)
{
struct task_local_storage_exit_creds *skel;
- int err;
+ int err, run_count, sync_rcu_calls = 0;
+ const int MAX_SYNC_RCU_CALLS = 1000;
skel = task_local_storage_exit_creds__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
@@ -53,8 +58,19 @@ static void test_exit_creds(void)
if (CHECK_FAIL(system("ls > /dev/null")))
goto out;
- /* sync rcu to make sure exit_creds() is called for "ls" */
- kern_sync_rcu();
+ /* kern_sync_rcu is not enough on its own as the read section we want
+ * to wait for may start after we enter synchronize_rcu, so our call
+ * won't wait for the section to finish. Loop on the run counter
+ * as well to ensure the program has run.
+ */
+ do {
+ kern_sync_rcu();
+ run_count = __atomic_load_n(&skel->bss->run_count, __ATOMIC_SEQ_CST);
+ } while (run_count == 0 && ++sync_rcu_calls < MAX_SYNC_RCU_CALLS);
+
+ ASSERT_NEQ(sync_rcu_calls, MAX_SYNC_RCU_CALLS,
+ "sync_rcu count too high");
+ ASSERT_NEQ(run_count, 0, "run_count");
ASSERT_EQ(skel->bss->valid_ptr_count, 0, "valid_ptr_count");
ASSERT_NEQ(skel->bss->null_ptr_count, 0, "null_ptr_count");
out:
@@ -63,24 +79,160 @@ out:
static void test_recursion(void)
{
+ int err, map_fd, prog_fd, task_fd;
struct task_ls_recursion *skel;
- int err;
+ struct bpf_prog_info info;
+ __u32 info_len = sizeof(info);
+ long value;
+
+ task_fd = sys_pidfd_open(getpid(), 0);
+ if (!ASSERT_NEQ(task_fd, -1, "sys_pidfd_open"))
+ return;
skel = task_ls_recursion__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
- return;
+ goto out;
err = task_ls_recursion__attach(skel);
if (!ASSERT_OK(err, "skel_attach"))
goto out;
/* trigger sys_enter, make sure it does not cause deadlock */
+ skel->bss->test_pid = getpid();
syscall(SYS_gettid);
+ skel->bss->test_pid = 0;
+ task_ls_recursion__detach(skel);
+
+ /* Refer to the comment in BPF_PROG(on_update) for
+ * the explanation on the value 201 and 100.
+ */
+ map_fd = bpf_map__fd(skel->maps.map_a);
+ err = bpf_map_lookup_elem(map_fd, &task_fd, &value);
+ ASSERT_OK(err, "lookup map_a");
+ ASSERT_EQ(value, 201, "map_a value");
+ ASSERT_EQ(skel->bss->nr_del_errs, 1, "bpf_task_storage_delete busy");
+
+ map_fd = bpf_map__fd(skel->maps.map_b);
+ err = bpf_map_lookup_elem(map_fd, &task_fd, &value);
+ ASSERT_OK(err, "lookup map_b");
+ ASSERT_EQ(value, 100, "map_b value");
+
+ prog_fd = bpf_program__fd(skel->progs.on_lookup);
+ memset(&info, 0, sizeof(info));
+ err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ ASSERT_OK(err, "get prog info");
+ ASSERT_GT(info.recursion_misses, 0, "on_lookup prog recursion");
+
+ prog_fd = bpf_program__fd(skel->progs.on_update);
+ memset(&info, 0, sizeof(info));
+ err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ ASSERT_OK(err, "get prog info");
+ ASSERT_EQ(info.recursion_misses, 0, "on_update prog recursion");
+
+ prog_fd = bpf_program__fd(skel->progs.on_enter);
+ memset(&info, 0, sizeof(info));
+ err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ ASSERT_OK(err, "get prog info");
+ ASSERT_EQ(info.recursion_misses, 0, "on_enter prog recursion");
out:
+ close(task_fd);
task_ls_recursion__destroy(skel);
}
+static bool stop;
+
+static void waitall(const pthread_t *tids, int nr)
+{
+ int i;
+
+ stop = true;
+ for (i = 0; i < nr; i++)
+ pthread_join(tids[i], NULL);
+}
+
+static void *sock_create_loop(void *arg)
+{
+ struct task_storage_nodeadlock *skel = arg;
+ int fd;
+
+ while (!stop) {
+ fd = socket(AF_INET, SOCK_STREAM, 0);
+ close(fd);
+ if (skel->bss->nr_get_errs || skel->bss->nr_del_errs)
+ stop = true;
+ }
+
+ return NULL;
+}
+
+static void test_nodeadlock(void)
+{
+ struct task_storage_nodeadlock *skel;
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ const int nr_threads = 32;
+ pthread_t tids[nr_threads];
+ int i, prog_fd, err;
+ cpu_set_t old, new;
+
+ /* Pin all threads to one cpu to increase the chance of preemption
+ * in a sleepable bpf prog.
+ */
+ CPU_ZERO(&new);
+ CPU_SET(0, &new);
+ err = sched_getaffinity(getpid(), sizeof(old), &old);
+ if (!ASSERT_OK(err, "getaffinity"))
+ return;
+ err = sched_setaffinity(getpid(), sizeof(new), &new);
+ if (!ASSERT_OK(err, "setaffinity"))
+ return;
+
+ skel = task_storage_nodeadlock__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_and_load"))
+ goto done;
+
+ /* Unnecessary recursion and deadlock detection are reproducible
+ * in the preemptible kernel.
+ */
+ if (!skel->kconfig->CONFIG_PREEMPT) {
+ test__skip();
+ goto done;
+ }
+
+ err = task_storage_nodeadlock__attach(skel);
+ ASSERT_OK(err, "attach prog");
+
+ for (i = 0; i < nr_threads; i++) {
+ err = pthread_create(&tids[i], NULL, sock_create_loop, skel);
+ if (err) {
+ /* Only assert once here to avoid excessive
+ * PASS printing during test failure.
+ */
+ ASSERT_OK(err, "pthread_create");
+ waitall(tids, i);
+ goto done;
+ }
+ }
+
+ /* With 32 threads, 1s is enough to reproduce the issue */
+ sleep(1);
+ waitall(tids, nr_threads);
+
+ info_len = sizeof(info);
+ prog_fd = bpf_program__fd(skel->progs.socket_post_create);
+ err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ ASSERT_OK(err, "get prog info");
+ ASSERT_EQ(info.recursion_misses, 0, "prog recursion");
+
+ ASSERT_EQ(skel->bss->nr_get_errs, 0, "bpf_task_storage_get busy");
+ ASSERT_EQ(skel->bss->nr_del_errs, 0, "bpf_task_storage_delete busy");
+
+done:
+ task_storage_nodeadlock__destroy(skel);
+ sched_setaffinity(getpid(), sizeof(old), &old);
+}
+
void test_task_local_storage(void)
{
if (test__start_subtest("sys_enter_exit"))
@@ -89,4 +241,6 @@ void test_task_local_storage(void)
test_exit_creds();
if (test__start_subtest("recursion"))
test_recursion();
+ if (test__start_subtest("nodeadlock"))
+ test_nodeadlock();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c b/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c
index 617bbce6ef8f..5cf85d0f9827 100644
--- a/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c
+++ b/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c
@@ -485,7 +485,7 @@ static void misc(void)
goto check_linum;
ret = read(sk_fds.passive_fd, recv_msg, sizeof(recv_msg));
- if (ASSERT_EQ(ret, sizeof(send_msg), "read(msg)"))
+ if (!ASSERT_EQ(ret, sizeof(send_msg), "read(msg)"))
goto check_linum;
}
@@ -505,6 +505,8 @@ static void misc(void)
ASSERT_EQ(misc_skel->bss->nr_fin, 1, "unexpected nr_fin");
+ ASSERT_EQ(misc_skel->bss->nr_hwtstamp, 0, "nr_hwtstamp");
+
check_linum:
ASSERT_FALSE(check_error_linum(&sk_fds), "check_error_linum");
sk_fds_close(&sk_fds);
@@ -539,7 +541,7 @@ void test_tcp_hdr_options(void)
goto skel_destroy;
cg_fd = test__join_cgroup(CG_NAME);
- if (ASSERT_GE(cg_fd, 0, "join_cgroup"))
+ if (!ASSERT_GE(cg_fd, 0, "join_cgroup"))
goto skel_destroy;
for (i = 0; i < ARRAY_SIZE(tests); i++) {
diff --git a/tools/testing/selftests/bpf/prog_tests/tracing_struct.c b/tools/testing/selftests/bpf/prog_tests/tracing_struct.c
index d5022b91d1e4..48dc9472e160 100644
--- a/tools/testing/selftests/bpf/prog_tests/tracing_struct.c
+++ b/tools/testing/selftests/bpf/prog_tests/tracing_struct.c
@@ -15,7 +15,7 @@ static void test_fentry(void)
err = tracing_struct__attach(skel);
if (!ASSERT_OK(err, "tracing_struct__attach"))
- return;
+ goto destroy_skel;
ASSERT_OK(trigger_module_test_read(256), "trigger_read");
@@ -54,6 +54,7 @@ static void test_fentry(void)
ASSERT_EQ(skel->bss->t5_ret, 1, "t5 ret");
tracing_struct__detach(skel);
+destroy_skel:
tracing_struct__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/type_cast.c b/tools/testing/selftests/bpf/prog_tests/type_cast.c
new file mode 100644
index 000000000000..9317d5fa2635
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/type_cast.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include <network_helpers.h>
+#include "type_cast.skel.h"
+
+static void test_xdp(void)
+{
+ struct type_cast *skel;
+ int err, prog_fd;
+ char buf[128];
+
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .data_out = buf,
+ .data_size_out = sizeof(buf),
+ .repeat = 1,
+ );
+
+ skel = type_cast__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ bpf_program__set_autoload(skel->progs.md_xdp, true);
+ err = type_cast__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto out;
+
+ prog_fd = bpf_program__fd(skel->progs.md_xdp);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(topts.retval, XDP_PASS, "xdp test_run retval");
+
+ ASSERT_EQ(skel->bss->ifindex, 1, "xdp_md ifindex");
+ ASSERT_EQ(skel->bss->ifindex, skel->bss->ingress_ifindex, "xdp_md ingress_ifindex");
+ ASSERT_STREQ(skel->bss->name, "lo", "xdp_md name");
+ ASSERT_NEQ(skel->bss->inum, 0, "xdp_md inum");
+
+out:
+ type_cast__destroy(skel);
+}
+
+static void test_tc(void)
+{
+ struct type_cast *skel;
+ int err, prog_fd;
+
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+
+ skel = type_cast__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ bpf_program__set_autoload(skel->progs.md_skb, true);
+ err = type_cast__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto out;
+
+ prog_fd = bpf_program__fd(skel->progs.md_skb);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(topts.retval, 0, "tc test_run retval");
+
+ ASSERT_EQ(skel->bss->meta_len, 0, "skb meta_len");
+ ASSERT_EQ(skel->bss->frag0_len, 0, "skb frag0_len");
+ ASSERT_NEQ(skel->bss->kskb_len, 0, "skb len");
+ ASSERT_NEQ(skel->bss->kskb2_len, 0, "skb2 len");
+ ASSERT_EQ(skel->bss->kskb_len, skel->bss->kskb2_len, "skb len compare");
+
+out:
+ type_cast__destroy(skel);
+}
+
+static const char * const negative_tests[] = {
+ "untrusted_ptr",
+ "kctx_u64",
+};
+
+static void test_negative(void)
+{
+ struct bpf_program *prog;
+ struct type_cast *skel;
+ int i, err;
+
+ for (i = 0; i < ARRAY_SIZE(negative_tests); i++) {
+ skel = type_cast__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ prog = bpf_object__find_program_by_name(skel->obj, negative_tests[i]);
+ if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
+ goto out;
+ bpf_program__set_autoload(prog, true);
+ err = type_cast__load(skel);
+ ASSERT_ERR(err, "skel_load");
+out:
+ type_cast__destroy(skel);
+ }
+}
+
+void test_type_cast(void)
+{
+ if (test__start_subtest("xdp"))
+ test_xdp();
+ if (test__start_subtest("tc"))
+ test_tc();
+ if (test__start_subtest("negative"))
+ test_negative();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
index 9b9cf8458adf..39973ea1ce43 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
@@ -18,7 +18,7 @@ static void test_xdp_adjust_tail_shrink(void)
);
err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
- if (ASSERT_OK(err, "test_xdp_adjust_tail_shrink"))
+ if (!ASSERT_OK(err, "test_xdp_adjust_tail_shrink"))
return;
err = bpf_prog_test_run_opts(prog_fd, &topts);
@@ -53,7 +53,7 @@ static void test_xdp_adjust_tail_grow(void)
);
err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
- if (ASSERT_OK(err, "test_xdp_adjust_tail_grow"))
+ if (!ASSERT_OK(err, "test_xdp_adjust_tail_grow"))
return;
err = bpf_prog_test_run_opts(prog_fd, &topts);
@@ -63,6 +63,7 @@ static void test_xdp_adjust_tail_grow(void)
expect_sz = sizeof(pkt_v6) + 40; /* Test grow with 40 bytes */
topts.data_in = &pkt_v6;
topts.data_size_in = sizeof(pkt_v6);
+ topts.data_size_out = sizeof(buf);
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "ipv6");
ASSERT_EQ(topts.retval, XDP_TX, "ipv6 retval");
@@ -89,7 +90,7 @@ static void test_xdp_adjust_tail_grow2(void)
);
err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
- if (ASSERT_OK(err, "test_xdp_adjust_tail_grow"))
+ if (!ASSERT_OK(err, "test_xdp_adjust_tail_grow"))
return;
/* Test case-64 */
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
index a50971c6cf4a..9ac6f6a268db 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
@@ -85,7 +85,7 @@ static void test_max_pkt_size(int fd)
}
#define NUM_PKTS 10000
-void test_xdp_do_redirect(void)
+void serial_test_xdp_do_redirect(void)
{
int err, xdp_prog_fd, tc_prog_fd, ifindex_src, ifindex_dst;
char data[sizeof(pkt_udp) + sizeof(__u32)];
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c b/tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c
index 75550a40e029..13daa3746064 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c
@@ -94,12 +94,12 @@ static void test_synproxy(bool xdp)
SYS("sysctl -w net.ipv4.tcp_syncookies=2");
SYS("sysctl -w net.ipv4.tcp_timestamps=1");
SYS("sysctl -w net.netfilter.nf_conntrack_tcp_loose=0");
- SYS("iptables -t raw -I PREROUTING \
+ SYS("iptables-legacy -t raw -I PREROUTING \
-i tmp1 -p tcp -m tcp --syn --dport 8080 -j CT --notrack");
- SYS("iptables -t filter -A INPUT \
+ SYS("iptables-legacy -t filter -A INPUT \
-i tmp1 -p tcp -m tcp --dport 8080 -m state --state INVALID,UNTRACKED \
-j SYNPROXY --sack-perm --timestamp --wscale 7 --mss 1460");
- SYS("iptables -t filter -A INPUT \
+ SYS("iptables-legacy -t filter -A INPUT \
-i tmp1 -m state --state INVALID -j DROP");
ctrl_file = SYS_OUT("./xdp_synproxy --iface tmp1 --ports 8080 \
@@ -174,7 +174,7 @@ out:
system("ip netns del synproxy");
}
-void test_xdp_synproxy(void)
+void serial_test_xdp_synproxy(void)
{
if (test__start_subtest("xdp"))
test_synproxy(true);
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c
index 6286023fd62b..c5969ca6f26b 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c
@@ -19,13 +19,20 @@ struct {
__type(value, __u64);
} arraymap1 SEC(".maps");
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 10);
+ __type(key, __u64);
+ __type(value, __u32);
+} hashmap1 SEC(".maps");
+
__u32 key_sum = 0;
__u64 val_sum = 0;
SEC("iter/bpf_map_elem")
int dump_bpf_array_map(struct bpf_iter__bpf_map_elem *ctx)
{
- __u32 *key = ctx->key;
+ __u32 *hmap_val, *key = ctx->key;
__u64 *val = ctx->value;
if (key == (void *)0 || val == (void *)0)
@@ -35,6 +42,18 @@ int dump_bpf_array_map(struct bpf_iter__bpf_map_elem *ctx)
bpf_seq_write(ctx->meta->seq, val, sizeof(__u64));
key_sum += *key;
val_sum += *val;
+
+ /* workaround - It's necessary to do this convoluted (val, key)
+ * write into hashmap1, instead of simply doing
+ * bpf_map_update_elem(&hashmap1, val, key, BPF_ANY);
+ * because key has MEM_RDONLY flag and bpf_map_update elem expects
+ * types without this flag
+ */
+ bpf_map_update_elem(&hashmap1, val, val, BPF_ANY);
+ hmap_val = bpf_map_lookup_elem(&hashmap1, val);
+ if (hmap_val)
+ *hmap_val = *key;
+
*val = *key;
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c
index f2661c8d2d90..7cb522d22a66 100644
--- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c
+++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c
@@ -102,12 +102,21 @@ struct zone {
struct zone_padding __pad__;
};
+/* ----- START-EXPECTED-OUTPUT ----- */
+struct padding_wo_named_members {
+ long: 64;
+ long: 64;
+};
+
+/* ------ END-EXPECTED-OUTPUT ------ */
+
int f(struct {
struct padded_implicitly _1;
struct padded_explicitly _2;
struct padded_a_lot _3;
struct padded_cache_line _4;
struct zone _5;
+ struct padding_wo_named_members _6;
} *_)
{
return 0;
diff --git a/tools/testing/selftests/bpf/progs/cgrp_kfunc_common.h b/tools/testing/selftests/bpf/progs/cgrp_kfunc_common.h
new file mode 100644
index 000000000000..7d30855bfe78
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgrp_kfunc_common.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#ifndef _CGRP_KFUNC_COMMON_H
+#define _CGRP_KFUNC_COMMON_H
+
+#include <errno.h>
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+struct __cgrps_kfunc_map_value {
+ struct cgroup __kptr_ref * cgrp;
+};
+
+struct hash_map {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, int);
+ __type(value, struct __cgrps_kfunc_map_value);
+ __uint(max_entries, 1);
+} __cgrps_kfunc_map SEC(".maps");
+
+struct cgroup *bpf_cgroup_acquire(struct cgroup *p) __ksym;
+struct cgroup *bpf_cgroup_kptr_get(struct cgroup **pp) __ksym;
+void bpf_cgroup_release(struct cgroup *p) __ksym;
+struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) __ksym;
+
+static inline struct __cgrps_kfunc_map_value *cgrps_kfunc_map_value_lookup(struct cgroup *cgrp)
+{
+ s32 id;
+ long status;
+
+ status = bpf_probe_read_kernel(&id, sizeof(id), &cgrp->self.id);
+ if (status)
+ return NULL;
+
+ return bpf_map_lookup_elem(&__cgrps_kfunc_map, &id);
+}
+
+static inline int cgrps_kfunc_map_insert(struct cgroup *cgrp)
+{
+ struct __cgrps_kfunc_map_value local, *v;
+ long status;
+ struct cgroup *acquired, *old;
+ s32 id;
+
+ status = bpf_probe_read_kernel(&id, sizeof(id), &cgrp->self.id);
+ if (status)
+ return status;
+
+ local.cgrp = NULL;
+ status = bpf_map_update_elem(&__cgrps_kfunc_map, &id, &local, BPF_NOEXIST);
+ if (status)
+ return status;
+
+ v = bpf_map_lookup_elem(&__cgrps_kfunc_map, &id);
+ if (!v) {
+ bpf_map_delete_elem(&__cgrps_kfunc_map, &id);
+ return -ENOENT;
+ }
+
+ acquired = bpf_cgroup_acquire(cgrp);
+ old = bpf_kptr_xchg(&v->cgrp, acquired);
+ if (old) {
+ bpf_cgroup_release(old);
+ return -EEXIST;
+ }
+
+ return 0;
+}
+
+#endif /* _CGRP_KFUNC_COMMON_H */
diff --git a/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c b/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c
new file mode 100644
index 000000000000..a1369b5ebcf8
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+#include "cgrp_kfunc_common.h"
+
+char _license[] SEC("license") = "GPL";
+
+/* Prototype for all of the program trace events below:
+ *
+ * TRACE_EVENT(cgroup_mkdir,
+ * TP_PROTO(struct cgroup *cgrp, const char *path),
+ * TP_ARGS(cgrp, path)
+ */
+
+static struct __cgrps_kfunc_map_value *insert_lookup_cgrp(struct cgroup *cgrp)
+{
+ int status;
+
+ status = cgrps_kfunc_map_insert(cgrp);
+ if (status)
+ return NULL;
+
+ return cgrps_kfunc_map_value_lookup(cgrp);
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(cgrp_kfunc_acquire_untrusted, struct cgroup *cgrp, const char *path)
+{
+ struct cgroup *acquired;
+ struct __cgrps_kfunc_map_value *v;
+
+ v = insert_lookup_cgrp(cgrp);
+ if (!v)
+ return 0;
+
+ /* Can't invoke bpf_cgroup_acquire() on an untrusted pointer. */
+ acquired = bpf_cgroup_acquire(v->cgrp);
+ bpf_cgroup_release(acquired);
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(cgrp_kfunc_acquire_fp, struct cgroup *cgrp, const char *path)
+{
+ struct cgroup *acquired, *stack_cgrp = (struct cgroup *)&path;
+
+ /* Can't invoke bpf_cgroup_acquire() on a random frame pointer. */
+ acquired = bpf_cgroup_acquire((struct cgroup *)&stack_cgrp);
+ bpf_cgroup_release(acquired);
+
+ return 0;
+}
+
+SEC("kretprobe/cgroup_destroy_locked")
+int BPF_PROG(cgrp_kfunc_acquire_unsafe_kretprobe, struct cgroup *cgrp)
+{
+ struct cgroup *acquired;
+
+ /* Can't acquire an untrusted struct cgroup * pointer. */
+ acquired = bpf_cgroup_acquire(cgrp);
+ bpf_cgroup_release(acquired);
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(cgrp_kfunc_acquire_trusted_walked, struct cgroup *cgrp, const char *path)
+{
+ struct cgroup *acquired;
+
+ /* Can't invoke bpf_cgroup_acquire() on a pointer obtained from walking a trusted cgroup. */
+ acquired = bpf_cgroup_acquire(cgrp->old_dom_cgrp);
+ bpf_cgroup_release(acquired);
+
+ return 0;
+}
+
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(cgrp_kfunc_acquire_null, struct cgroup *cgrp, const char *path)
+{
+ struct cgroup *acquired;
+
+ /* Can't invoke bpf_cgroup_acquire() on a NULL pointer. */
+ acquired = bpf_cgroup_acquire(NULL);
+ if (!acquired)
+ return 0;
+ bpf_cgroup_release(acquired);
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(cgrp_kfunc_acquire_unreleased, struct cgroup *cgrp, const char *path)
+{
+ struct cgroup *acquired;
+
+ acquired = bpf_cgroup_acquire(cgrp);
+
+ /* Acquired cgroup is never released. */
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(cgrp_kfunc_get_non_kptr_param, struct cgroup *cgrp, const char *path)
+{
+ struct cgroup *kptr;
+
+ /* Cannot use bpf_cgroup_kptr_get() on a non-kptr, even on a valid cgroup. */
+ kptr = bpf_cgroup_kptr_get(&cgrp);
+ if (!kptr)
+ return 0;
+
+ bpf_cgroup_release(kptr);
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(cgrp_kfunc_get_non_kptr_acquired, struct cgroup *cgrp, const char *path)
+{
+ struct cgroup *kptr, *acquired;
+
+ acquired = bpf_cgroup_acquire(cgrp);
+
+ /* Cannot use bpf_cgroup_kptr_get() on a non-map-value, even if the kptr was acquired. */
+ kptr = bpf_cgroup_kptr_get(&acquired);
+ bpf_cgroup_release(acquired);
+ if (!kptr)
+ return 0;
+
+ bpf_cgroup_release(kptr);
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(cgrp_kfunc_get_null, struct cgroup *cgrp, const char *path)
+{
+ struct cgroup *kptr;
+
+ /* Cannot use bpf_cgroup_kptr_get() on a NULL pointer. */
+ kptr = bpf_cgroup_kptr_get(NULL);
+ if (!kptr)
+ return 0;
+
+ bpf_cgroup_release(kptr);
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(cgrp_kfunc_xchg_unreleased, struct cgroup *cgrp, const char *path)
+{
+ struct cgroup *kptr;
+ struct __cgrps_kfunc_map_value *v;
+
+ v = insert_lookup_cgrp(cgrp);
+ if (!v)
+ return 0;
+
+ kptr = bpf_kptr_xchg(&v->cgrp, NULL);
+ if (!kptr)
+ return 0;
+
+ /* Kptr retrieved from map is never released. */
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(cgrp_kfunc_get_unreleased, struct cgroup *cgrp, const char *path)
+{
+ struct cgroup *kptr;
+ struct __cgrps_kfunc_map_value *v;
+
+ v = insert_lookup_cgrp(cgrp);
+ if (!v)
+ return 0;
+
+ kptr = bpf_cgroup_kptr_get(&v->cgrp);
+ if (!kptr)
+ return 0;
+
+ /* Kptr acquired above is never released. */
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(cgrp_kfunc_release_untrusted, struct cgroup *cgrp, const char *path)
+{
+ struct __cgrps_kfunc_map_value *v;
+
+ v = insert_lookup_cgrp(cgrp);
+ if (!v)
+ return 0;
+
+ /* Can't invoke bpf_cgroup_release() on an untrusted pointer. */
+ bpf_cgroup_release(v->cgrp);
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(cgrp_kfunc_release_fp, struct cgroup *cgrp, const char *path)
+{
+ struct cgroup *acquired = (struct cgroup *)&path;
+
+ /* Cannot release random frame pointer. */
+ bpf_cgroup_release(acquired);
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(cgrp_kfunc_release_null, struct cgroup *cgrp, const char *path)
+{
+ struct __cgrps_kfunc_map_value local, *v;
+ long status;
+ struct cgroup *acquired, *old;
+ s32 id;
+
+ status = bpf_probe_read_kernel(&id, sizeof(id), &cgrp->self.id);
+ if (status)
+ return 0;
+
+ local.cgrp = NULL;
+ status = bpf_map_update_elem(&__cgrps_kfunc_map, &id, &local, BPF_NOEXIST);
+ if (status)
+ return status;
+
+ v = bpf_map_lookup_elem(&__cgrps_kfunc_map, &id);
+ if (!v)
+ return -ENOENT;
+
+ acquired = bpf_cgroup_acquire(cgrp);
+
+ old = bpf_kptr_xchg(&v->cgrp, acquired);
+
+ /* old cannot be passed to bpf_cgroup_release() without a NULL check. */
+ bpf_cgroup_release(old);
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(cgrp_kfunc_release_unacquired, struct cgroup *cgrp, const char *path)
+{
+ /* Cannot release trusted cgroup pointer which was not acquired. */
+ bpf_cgroup_release(cgrp);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c b/tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c
new file mode 100644
index 000000000000..0c23ea32df9f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+#include "cgrp_kfunc_common.h"
+
+char _license[] SEC("license") = "GPL";
+
+int err, pid, invocations;
+
+/* Prototype for all of the program trace events below:
+ *
+ * TRACE_EVENT(cgroup_mkdir,
+ * TP_PROTO(struct cgroup *cgrp, const char *path),
+ * TP_ARGS(cgrp, path)
+ */
+
+static bool is_test_kfunc_task(void)
+{
+ int cur_pid = bpf_get_current_pid_tgid() >> 32;
+ bool same = pid == cur_pid;
+
+ if (same)
+ __sync_fetch_and_add(&invocations, 1);
+
+ return same;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(test_cgrp_acquire_release_argument, struct cgroup *cgrp, const char *path)
+{
+ struct cgroup *acquired;
+
+ if (!is_test_kfunc_task())
+ return 0;
+
+ acquired = bpf_cgroup_acquire(cgrp);
+ bpf_cgroup_release(acquired);
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(test_cgrp_acquire_leave_in_map, struct cgroup *cgrp, const char *path)
+{
+ long status;
+
+ if (!is_test_kfunc_task())
+ return 0;
+
+ status = cgrps_kfunc_map_insert(cgrp);
+ if (status)
+ err = 1;
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(test_cgrp_xchg_release, struct cgroup *cgrp, const char *path)
+{
+ struct cgroup *kptr;
+ struct __cgrps_kfunc_map_value *v;
+ long status;
+
+ if (!is_test_kfunc_task())
+ return 0;
+
+ status = cgrps_kfunc_map_insert(cgrp);
+ if (status) {
+ err = 1;
+ return 0;
+ }
+
+ v = cgrps_kfunc_map_value_lookup(cgrp);
+ if (!v) {
+ err = 2;
+ return 0;
+ }
+
+ kptr = bpf_kptr_xchg(&v->cgrp, NULL);
+ if (!kptr) {
+ err = 3;
+ return 0;
+ }
+
+ bpf_cgroup_release(kptr);
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(test_cgrp_get_release, struct cgroup *cgrp, const char *path)
+{
+ struct cgroup *kptr;
+ struct __cgrps_kfunc_map_value *v;
+ long status;
+
+ if (!is_test_kfunc_task())
+ return 0;
+
+ status = cgrps_kfunc_map_insert(cgrp);
+ if (status) {
+ err = 1;
+ return 0;
+ }
+
+ v = cgrps_kfunc_map_value_lookup(cgrp);
+ if (!v) {
+ err = 2;
+ return 0;
+ }
+
+ kptr = bpf_cgroup_kptr_get(&v->cgrp);
+ if (!kptr) {
+ err = 3;
+ return 0;
+ }
+
+ bpf_cgroup_release(kptr);
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(test_cgrp_get_ancestors, struct cgroup *cgrp, const char *path)
+{
+ struct cgroup *self, *ancestor1, *invalid;
+
+ if (!is_test_kfunc_task())
+ return 0;
+
+ self = bpf_cgroup_ancestor(cgrp, cgrp->level);
+ if (!self) {
+ err = 1;
+ return 0;
+ }
+
+ if (self->self.id != cgrp->self.id) {
+ bpf_cgroup_release(self);
+ err = 2;
+ return 0;
+ }
+ bpf_cgroup_release(self);
+
+ ancestor1 = bpf_cgroup_ancestor(cgrp, cgrp->level - 1);
+ if (!ancestor1) {
+ err = 3;
+ return 0;
+ }
+ bpf_cgroup_release(ancestor1);
+
+ invalid = bpf_cgroup_ancestor(cgrp, 10000);
+ if (invalid) {
+ bpf_cgroup_release(invalid);
+ err = 4;
+ return 0;
+ }
+
+ invalid = bpf_cgroup_ancestor(cgrp, -1);
+ if (invalid) {
+ bpf_cgroup_release(invalid);
+ err = 5;
+ return 0;
+ }
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/cgrp_ls_attach_cgroup.c b/tools/testing/selftests/bpf/progs/cgrp_ls_attach_cgroup.c
new file mode 100644
index 000000000000..6652d18465b2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgrp_ls_attach_cgroup.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_tracing_net.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct socket_cookie {
+ __u64 cookie_key;
+ __u64 cookie_value;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct socket_cookie);
+} socket_cookies SEC(".maps");
+
+SEC("cgroup/connect6")
+int set_cookie(struct bpf_sock_addr *ctx)
+{
+ struct socket_cookie *p;
+ struct tcp_sock *tcp_sk;
+ struct bpf_sock *sk;
+
+ if (ctx->family != AF_INET6 || ctx->user_family != AF_INET6)
+ return 1;
+
+ sk = ctx->sk;
+ if (!sk)
+ return 1;
+
+ tcp_sk = bpf_skc_to_tcp_sock(sk);
+ if (!tcp_sk)
+ return 1;
+
+ p = bpf_cgrp_storage_get(&socket_cookies,
+ tcp_sk->inet_conn.icsk_inet.sk.sk_cgrp_data.cgroup, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!p)
+ return 1;
+
+ p->cookie_value = 0xF;
+ p->cookie_key = bpf_get_socket_cookie(ctx);
+ return 1;
+}
+
+SEC("sockops")
+int update_cookie_sockops(struct bpf_sock_ops *ctx)
+{
+ struct socket_cookie *p;
+ struct tcp_sock *tcp_sk;
+ struct bpf_sock *sk;
+
+ if (ctx->family != AF_INET6 || ctx->op != BPF_SOCK_OPS_TCP_CONNECT_CB)
+ return 1;
+
+ sk = ctx->sk;
+ if (!sk)
+ return 1;
+
+ tcp_sk = bpf_skc_to_tcp_sock(sk);
+ if (!tcp_sk)
+ return 1;
+
+ p = bpf_cgrp_storage_get(&socket_cookies,
+ tcp_sk->inet_conn.icsk_inet.sk.sk_cgrp_data.cgroup, 0, 0);
+ if (!p)
+ return 1;
+
+ if (p->cookie_key != bpf_get_socket_cookie(ctx))
+ return 1;
+
+ p->cookie_value |= (ctx->local_port << 8);
+ return 1;
+}
+
+SEC("fexit/inet_stream_connect")
+int BPF_PROG(update_cookie_tracing, struct socket *sock,
+ struct sockaddr *uaddr, int addr_len, int flags)
+{
+ struct socket_cookie *p;
+ struct tcp_sock *tcp_sk;
+
+ if (uaddr->sa_family != AF_INET6)
+ return 0;
+
+ p = bpf_cgrp_storage_get(&socket_cookies, sock->sk->sk_cgrp_data.cgroup, 0, 0);
+ if (!p)
+ return 0;
+
+ if (p->cookie_key != bpf_get_socket_cookie(sock->sk))
+ return 0;
+
+ p->cookie_value |= 0xF0;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/cgrp_ls_negative.c b/tools/testing/selftests/bpf/progs/cgrp_ls_negative.c
new file mode 100644
index 000000000000..d41f90e2ab64
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgrp_ls_negative.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, long);
+} map_a SEC(".maps");
+
+SEC("tp_btf/sys_enter")
+int BPF_PROG(on_enter, struct pt_regs *regs, long id)
+{
+ struct task_struct *task;
+
+ task = bpf_get_current_task_btf();
+ (void)bpf_cgrp_storage_get(&map_a, (struct cgroup *)task, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c b/tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c
new file mode 100644
index 000000000000..a043d8fefdac
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, long);
+} map_a SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, long);
+} map_b SEC(".maps");
+
+SEC("fentry/bpf_local_storage_lookup")
+int BPF_PROG(on_lookup)
+{
+ struct task_struct *task = bpf_get_current_task_btf();
+
+ bpf_cgrp_storage_delete(&map_a, task->cgroups->dfl_cgrp);
+ bpf_cgrp_storage_delete(&map_b, task->cgroups->dfl_cgrp);
+ return 0;
+}
+
+SEC("fentry/bpf_local_storage_update")
+int BPF_PROG(on_update)
+{
+ struct task_struct *task = bpf_get_current_task_btf();
+ long *ptr;
+
+ ptr = bpf_cgrp_storage_get(&map_a, task->cgroups->dfl_cgrp, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (ptr)
+ *ptr += 1;
+
+ ptr = bpf_cgrp_storage_get(&map_b, task->cgroups->dfl_cgrp, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (ptr)
+ *ptr += 1;
+
+ return 0;
+}
+
+SEC("tp_btf/sys_enter")
+int BPF_PROG(on_enter, struct pt_regs *regs, long id)
+{
+ struct task_struct *task;
+ long *ptr;
+
+ task = bpf_get_current_task_btf();
+ ptr = bpf_cgrp_storage_get(&map_a, task->cgroups->dfl_cgrp, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (ptr)
+ *ptr = 200;
+
+ ptr = bpf_cgrp_storage_get(&map_b, task->cgroups->dfl_cgrp, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (ptr)
+ *ptr = 100;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/cgrp_ls_tp_btf.c b/tools/testing/selftests/bpf/progs/cgrp_ls_tp_btf.c
new file mode 100644
index 000000000000..9ebb8e2fe541
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgrp_ls_tp_btf.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, long);
+} map_a SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, long);
+} map_b SEC(".maps");
+
+#define MAGIC_VALUE 0xabcd1234
+
+pid_t target_pid = 0;
+int mismatch_cnt = 0;
+int enter_cnt = 0;
+int exit_cnt = 0;
+
+SEC("tp_btf/sys_enter")
+int BPF_PROG(on_enter, struct pt_regs *regs, long id)
+{
+ struct task_struct *task;
+ long *ptr;
+ int err;
+
+ task = bpf_get_current_task_btf();
+ if (task->pid != target_pid)
+ return 0;
+
+ /* populate value 0 */
+ ptr = bpf_cgrp_storage_get(&map_a, task->cgroups->dfl_cgrp, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!ptr)
+ return 0;
+
+ /* delete value 0 */
+ err = bpf_cgrp_storage_delete(&map_a, task->cgroups->dfl_cgrp);
+ if (err)
+ return 0;
+
+ /* value is not available */
+ ptr = bpf_cgrp_storage_get(&map_a, task->cgroups->dfl_cgrp, 0, 0);
+ if (ptr)
+ return 0;
+
+ /* re-populate the value */
+ ptr = bpf_cgrp_storage_get(&map_a, task->cgroups->dfl_cgrp, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!ptr)
+ return 0;
+ __sync_fetch_and_add(&enter_cnt, 1);
+ *ptr = MAGIC_VALUE + enter_cnt;
+
+ return 0;
+}
+
+SEC("tp_btf/sys_exit")
+int BPF_PROG(on_exit, struct pt_regs *regs, long id)
+{
+ struct task_struct *task;
+ long *ptr;
+
+ task = bpf_get_current_task_btf();
+ if (task->pid != target_pid)
+ return 0;
+
+ ptr = bpf_cgrp_storage_get(&map_a, task->cgroups->dfl_cgrp, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!ptr)
+ return 0;
+
+ __sync_fetch_and_add(&exit_cnt, 1);
+ if (*ptr != MAGIC_VALUE + exit_cnt)
+ __sync_fetch_and_add(&mismatch_cnt, 1);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/empty_skb.c b/tools/testing/selftests/bpf/progs/empty_skb.c
new file mode 100644
index 000000000000..4b0cd6753251
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/empty_skb.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+char _license[] SEC("license") = "GPL";
+
+int ifindex;
+int ret;
+
+SEC("lwt_xmit")
+int redirect_ingress(struct __sk_buff *skb)
+{
+ ret = bpf_clone_redirect(skb, ifindex, BPF_F_INGRESS);
+ return 0;
+}
+
+SEC("lwt_xmit")
+int redirect_egress(struct __sk_buff *skb)
+{
+ ret = bpf_clone_redirect(skb, ifindex, 0);
+ return 0;
+}
+
+SEC("tc")
+int tc_redirect_ingress(struct __sk_buff *skb)
+{
+ ret = bpf_clone_redirect(skb, ifindex, BPF_F_INGRESS);
+ return 0;
+}
+
+SEC("tc")
+int tc_redirect_egress(struct __sk_buff *skb)
+{
+ ret = bpf_clone_redirect(skb, ifindex, 0);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/kprobe_multi.c b/tools/testing/selftests/bpf/progs/kprobe_multi.c
index 98c3399e15c0..9e1ca8e34913 100644
--- a/tools/testing/selftests/bpf/progs/kprobe_multi.c
+++ b/tools/testing/selftests/bpf/progs/kprobe_multi.c
@@ -110,3 +110,53 @@ int test_kretprobe_manual(struct pt_regs *ctx)
kprobe_multi_check(ctx, true);
return 0;
}
+
+extern const void bpf_testmod_fentry_test1 __ksym;
+extern const void bpf_testmod_fentry_test2 __ksym;
+extern const void bpf_testmod_fentry_test3 __ksym;
+
+__u64 kprobe_testmod_test1_result = 0;
+__u64 kprobe_testmod_test2_result = 0;
+__u64 kprobe_testmod_test3_result = 0;
+
+__u64 kretprobe_testmod_test1_result = 0;
+__u64 kretprobe_testmod_test2_result = 0;
+__u64 kretprobe_testmod_test3_result = 0;
+
+static void kprobe_multi_testmod_check(void *ctx, bool is_return)
+{
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return;
+
+ __u64 addr = bpf_get_func_ip(ctx);
+
+ if (is_return) {
+ if ((const void *) addr == &bpf_testmod_fentry_test1)
+ kretprobe_testmod_test1_result = 1;
+ if ((const void *) addr == &bpf_testmod_fentry_test2)
+ kretprobe_testmod_test2_result = 1;
+ if ((const void *) addr == &bpf_testmod_fentry_test3)
+ kretprobe_testmod_test3_result = 1;
+ } else {
+ if ((const void *) addr == &bpf_testmod_fentry_test1)
+ kprobe_testmod_test1_result = 1;
+ if ((const void *) addr == &bpf_testmod_fentry_test2)
+ kprobe_testmod_test2_result = 1;
+ if ((const void *) addr == &bpf_testmod_fentry_test3)
+ kprobe_testmod_test3_result = 1;
+ }
+}
+
+SEC("kprobe.multi")
+int test_kprobe_testmod(struct pt_regs *ctx)
+{
+ kprobe_multi_testmod_check(ctx, false);
+ return 0;
+}
+
+SEC("kretprobe.multi")
+int test_kretprobe_testmod(struct pt_regs *ctx)
+{
+ kprobe_multi_testmod_check(ctx, true);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/linked_list.c b/tools/testing/selftests/bpf/progs/linked_list.c
new file mode 100644
index 000000000000..2c7b615c6d41
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/linked_list.c
@@ -0,0 +1,370 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_experimental.h"
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+
+#include "linked_list.h"
+
+static __always_inline
+int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool leave_in_map)
+{
+ struct bpf_list_node *n;
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 2;
+
+ bpf_spin_lock(lock);
+ n = bpf_list_pop_front(head);
+ bpf_spin_unlock(lock);
+ if (n) {
+ bpf_obj_drop(container_of(n, struct foo, node));
+ bpf_obj_drop(f);
+ return 3;
+ }
+
+ bpf_spin_lock(lock);
+ n = bpf_list_pop_back(head);
+ bpf_spin_unlock(lock);
+ if (n) {
+ bpf_obj_drop(container_of(n, struct foo, node));
+ bpf_obj_drop(f);
+ return 4;
+ }
+
+
+ bpf_spin_lock(lock);
+ f->data = 42;
+ bpf_list_push_front(head, &f->node);
+ bpf_spin_unlock(lock);
+ if (leave_in_map)
+ return 0;
+ bpf_spin_lock(lock);
+ n = bpf_list_pop_back(head);
+ bpf_spin_unlock(lock);
+ if (!n)
+ return 5;
+ f = container_of(n, struct foo, node);
+ if (f->data != 42) {
+ bpf_obj_drop(f);
+ return 6;
+ }
+
+ bpf_spin_lock(lock);
+ f->data = 13;
+ bpf_list_push_front(head, &f->node);
+ bpf_spin_unlock(lock);
+ bpf_spin_lock(lock);
+ n = bpf_list_pop_front(head);
+ bpf_spin_unlock(lock);
+ if (!n)
+ return 7;
+ f = container_of(n, struct foo, node);
+ if (f->data != 13) {
+ bpf_obj_drop(f);
+ return 8;
+ }
+ bpf_obj_drop(f);
+
+ bpf_spin_lock(lock);
+ n = bpf_list_pop_front(head);
+ bpf_spin_unlock(lock);
+ if (n) {
+ bpf_obj_drop(container_of(n, struct foo, node));
+ return 9;
+ }
+
+ bpf_spin_lock(lock);
+ n = bpf_list_pop_back(head);
+ bpf_spin_unlock(lock);
+ if (n) {
+ bpf_obj_drop(container_of(n, struct foo, node));
+ return 10;
+ }
+ return 0;
+}
+
+
+static __always_inline
+int list_push_pop_multiple(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool leave_in_map)
+{
+ struct bpf_list_node *n;
+ struct foo *f[8], *pf;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(f); i++) {
+ f[i] = bpf_obj_new(typeof(**f));
+ if (!f[i])
+ return 2;
+ f[i]->data = i;
+ bpf_spin_lock(lock);
+ bpf_list_push_front(head, &f[i]->node);
+ bpf_spin_unlock(lock);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(f); i++) {
+ bpf_spin_lock(lock);
+ n = bpf_list_pop_front(head);
+ bpf_spin_unlock(lock);
+ if (!n)
+ return 3;
+ pf = container_of(n, struct foo, node);
+ if (pf->data != (ARRAY_SIZE(f) - i - 1)) {
+ bpf_obj_drop(pf);
+ return 4;
+ }
+ bpf_spin_lock(lock);
+ bpf_list_push_back(head, &pf->node);
+ bpf_spin_unlock(lock);
+ }
+
+ if (leave_in_map)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(f); i++) {
+ bpf_spin_lock(lock);
+ n = bpf_list_pop_back(head);
+ bpf_spin_unlock(lock);
+ if (!n)
+ return 5;
+ pf = container_of(n, struct foo, node);
+ if (pf->data != i) {
+ bpf_obj_drop(pf);
+ return 6;
+ }
+ bpf_obj_drop(pf);
+ }
+ bpf_spin_lock(lock);
+ n = bpf_list_pop_back(head);
+ bpf_spin_unlock(lock);
+ if (n) {
+ bpf_obj_drop(container_of(n, struct foo, node));
+ return 7;
+ }
+
+ bpf_spin_lock(lock);
+ n = bpf_list_pop_front(head);
+ bpf_spin_unlock(lock);
+ if (n) {
+ bpf_obj_drop(container_of(n, struct foo, node));
+ return 8;
+ }
+ return 0;
+}
+
+static __always_inline
+int list_in_list(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool leave_in_map)
+{
+ struct bpf_list_node *n;
+ struct bar *ba[8], *b;
+ struct foo *f;
+ int i;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 2;
+ for (i = 0; i < ARRAY_SIZE(ba); i++) {
+ b = bpf_obj_new(typeof(*b));
+ if (!b) {
+ bpf_obj_drop(f);
+ return 3;
+ }
+ b->data = i;
+ bpf_spin_lock(&f->lock);
+ bpf_list_push_back(&f->head, &b->node);
+ bpf_spin_unlock(&f->lock);
+ }
+
+ bpf_spin_lock(lock);
+ f->data = 42;
+ bpf_list_push_front(head, &f->node);
+ bpf_spin_unlock(lock);
+
+ if (leave_in_map)
+ return 0;
+
+ bpf_spin_lock(lock);
+ n = bpf_list_pop_front(head);
+ bpf_spin_unlock(lock);
+ if (!n)
+ return 4;
+ f = container_of(n, struct foo, node);
+ if (f->data != 42) {
+ bpf_obj_drop(f);
+ return 5;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ba); i++) {
+ bpf_spin_lock(&f->lock);
+ n = bpf_list_pop_front(&f->head);
+ bpf_spin_unlock(&f->lock);
+ if (!n) {
+ bpf_obj_drop(f);
+ return 6;
+ }
+ b = container_of(n, struct bar, node);
+ if (b->data != i) {
+ bpf_obj_drop(f);
+ bpf_obj_drop(b);
+ return 7;
+ }
+ bpf_obj_drop(b);
+ }
+ bpf_spin_lock(&f->lock);
+ n = bpf_list_pop_front(&f->head);
+ bpf_spin_unlock(&f->lock);
+ if (n) {
+ bpf_obj_drop(f);
+ bpf_obj_drop(container_of(n, struct bar, node));
+ return 8;
+ }
+ bpf_obj_drop(f);
+ return 0;
+}
+
+static __always_inline
+int test_list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head)
+{
+ int ret;
+
+ ret = list_push_pop(lock, head, false);
+ if (ret)
+ return ret;
+ return list_push_pop(lock, head, true);
+}
+
+static __always_inline
+int test_list_push_pop_multiple(struct bpf_spin_lock *lock, struct bpf_list_head *head)
+{
+ int ret;
+
+ ret = list_push_pop_multiple(lock ,head, false);
+ if (ret)
+ return ret;
+ return list_push_pop_multiple(lock, head, true);
+}
+
+static __always_inline
+int test_list_in_list(struct bpf_spin_lock *lock, struct bpf_list_head *head)
+{
+ int ret;
+
+ ret = list_in_list(lock, head, false);
+ if (ret)
+ return ret;
+ return list_in_list(lock, head, true);
+}
+
+SEC("tc")
+int map_list_push_pop(void *ctx)
+{
+ struct map_value *v;
+
+ v = bpf_map_lookup_elem(&array_map, &(int){0});
+ if (!v)
+ return 1;
+ return test_list_push_pop(&v->lock, &v->head);
+}
+
+SEC("tc")
+int inner_map_list_push_pop(void *ctx)
+{
+ struct map_value *v;
+ void *map;
+
+ map = bpf_map_lookup_elem(&map_of_maps, &(int){0});
+ if (!map)
+ return 1;
+ v = bpf_map_lookup_elem(map, &(int){0});
+ if (!v)
+ return 1;
+ return test_list_push_pop(&v->lock, &v->head);
+}
+
+SEC("tc")
+int global_list_push_pop(void *ctx)
+{
+ return test_list_push_pop(&glock, &ghead);
+}
+
+SEC("tc")
+int map_list_push_pop_multiple(void *ctx)
+{
+ struct map_value *v;
+ int ret;
+
+ v = bpf_map_lookup_elem(&array_map, &(int){0});
+ if (!v)
+ return 1;
+ return test_list_push_pop_multiple(&v->lock, &v->head);
+}
+
+SEC("tc")
+int inner_map_list_push_pop_multiple(void *ctx)
+{
+ struct map_value *v;
+ void *map;
+ int ret;
+
+ map = bpf_map_lookup_elem(&map_of_maps, &(int){0});
+ if (!map)
+ return 1;
+ v = bpf_map_lookup_elem(map, &(int){0});
+ if (!v)
+ return 1;
+ return test_list_push_pop_multiple(&v->lock, &v->head);
+}
+
+SEC("tc")
+int global_list_push_pop_multiple(void *ctx)
+{
+ int ret;
+
+ ret = list_push_pop_multiple(&glock, &ghead, false);
+ if (ret)
+ return ret;
+ return list_push_pop_multiple(&glock, &ghead, true);
+}
+
+SEC("tc")
+int map_list_in_list(void *ctx)
+{
+ struct map_value *v;
+ int ret;
+
+ v = bpf_map_lookup_elem(&array_map, &(int){0});
+ if (!v)
+ return 1;
+ return test_list_in_list(&v->lock, &v->head);
+}
+
+SEC("tc")
+int inner_map_list_in_list(void *ctx)
+{
+ struct map_value *v;
+ void *map;
+ int ret;
+
+ map = bpf_map_lookup_elem(&map_of_maps, &(int){0});
+ if (!map)
+ return 1;
+ v = bpf_map_lookup_elem(map, &(int){0});
+ if (!v)
+ return 1;
+ return test_list_in_list(&v->lock, &v->head);
+}
+
+SEC("tc")
+int global_list_in_list(void *ctx)
+{
+ return test_list_in_list(&glock, &ghead);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/linked_list.h b/tools/testing/selftests/bpf/progs/linked_list.h
new file mode 100644
index 000000000000..3fb2412552fc
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/linked_list.h
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef LINKED_LIST_H
+#define LINKED_LIST_H
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_experimental.h"
+
+struct bar {
+ struct bpf_list_node node;
+ int data;
+};
+
+struct foo {
+ struct bpf_list_node node;
+ struct bpf_list_head head __contains(bar, node);
+ struct bpf_spin_lock lock;
+ int data;
+ struct bpf_list_node node2;
+};
+
+struct map_value {
+ struct bpf_spin_lock lock;
+ int data;
+ struct bpf_list_head head __contains(foo, node);
+};
+
+struct array_map {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 1);
+};
+
+struct array_map array_map SEC(".maps");
+struct array_map inner_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, int);
+ __array(values, struct array_map);
+} map_of_maps SEC(".maps") = {
+ .values = {
+ [0] = &inner_map,
+ },
+};
+
+#define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8)))
+
+private(A) struct bpf_spin_lock glock;
+private(A) struct bpf_list_head ghead __contains(foo, node);
+private(B) struct bpf_spin_lock glock2;
+
+#endif
diff --git a/tools/testing/selftests/bpf/progs/linked_list_fail.c b/tools/testing/selftests/bpf/progs/linked_list_fail.c
new file mode 100644
index 000000000000..1d9017240e19
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/linked_list_fail.c
@@ -0,0 +1,581 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_experimental.h"
+
+#include "linked_list.h"
+
+#define INIT \
+ struct map_value *v, *v2, *iv, *iv2; \
+ struct foo *f, *f1, *f2; \
+ struct bar *b; \
+ void *map; \
+ \
+ map = bpf_map_lookup_elem(&map_of_maps, &(int){ 0 }); \
+ if (!map) \
+ return 0; \
+ v = bpf_map_lookup_elem(&array_map, &(int){ 0 }); \
+ if (!v) \
+ return 0; \
+ v2 = bpf_map_lookup_elem(&array_map, &(int){ 0 }); \
+ if (!v2) \
+ return 0; \
+ iv = bpf_map_lookup_elem(map, &(int){ 0 }); \
+ if (!iv) \
+ return 0; \
+ iv2 = bpf_map_lookup_elem(map, &(int){ 0 }); \
+ if (!iv2) \
+ return 0; \
+ f = bpf_obj_new(typeof(*f)); \
+ if (!f) \
+ return 0; \
+ f1 = f; \
+ f2 = bpf_obj_new(typeof(*f2)); \
+ if (!f2) { \
+ bpf_obj_drop(f1); \
+ return 0; \
+ } \
+ b = bpf_obj_new(typeof(*b)); \
+ if (!b) { \
+ bpf_obj_drop(f2); \
+ bpf_obj_drop(f1); \
+ return 0; \
+ }
+
+#define CHECK(test, op, hexpr) \
+ SEC("?tc") \
+ int test##_missing_lock_##op(void *ctx) \
+ { \
+ INIT; \
+ void (*p)(void *) = (void *)&bpf_list_##op; \
+ p(hexpr); \
+ return 0; \
+ }
+
+CHECK(kptr, push_front, &f->head);
+CHECK(kptr, push_back, &f->head);
+CHECK(kptr, pop_front, &f->head);
+CHECK(kptr, pop_back, &f->head);
+
+CHECK(global, push_front, &ghead);
+CHECK(global, push_back, &ghead);
+CHECK(global, pop_front, &ghead);
+CHECK(global, pop_back, &ghead);
+
+CHECK(map, push_front, &v->head);
+CHECK(map, push_back, &v->head);
+CHECK(map, pop_front, &v->head);
+CHECK(map, pop_back, &v->head);
+
+CHECK(inner_map, push_front, &iv->head);
+CHECK(inner_map, push_back, &iv->head);
+CHECK(inner_map, pop_front, &iv->head);
+CHECK(inner_map, pop_back, &iv->head);
+
+#undef CHECK
+
+#define CHECK(test, op, lexpr, hexpr) \
+ SEC("?tc") \
+ int test##_incorrect_lock_##op(void *ctx) \
+ { \
+ INIT; \
+ void (*p)(void *) = (void *)&bpf_list_##op; \
+ bpf_spin_lock(lexpr); \
+ p(hexpr); \
+ return 0; \
+ }
+
+#define CHECK_OP(op) \
+ CHECK(kptr_kptr, op, &f1->lock, &f2->head); \
+ CHECK(kptr_global, op, &f1->lock, &ghead); \
+ CHECK(kptr_map, op, &f1->lock, &v->head); \
+ CHECK(kptr_inner_map, op, &f1->lock, &iv->head); \
+ \
+ CHECK(global_global, op, &glock2, &ghead); \
+ CHECK(global_kptr, op, &glock, &f1->head); \
+ CHECK(global_map, op, &glock, &v->head); \
+ CHECK(global_inner_map, op, &glock, &iv->head); \
+ \
+ CHECK(map_map, op, &v->lock, &v2->head); \
+ CHECK(map_kptr, op, &v->lock, &f2->head); \
+ CHECK(map_global, op, &v->lock, &ghead); \
+ CHECK(map_inner_map, op, &v->lock, &iv->head); \
+ \
+ CHECK(inner_map_inner_map, op, &iv->lock, &iv2->head); \
+ CHECK(inner_map_kptr, op, &iv->lock, &f2->head); \
+ CHECK(inner_map_global, op, &iv->lock, &ghead); \
+ CHECK(inner_map_map, op, &iv->lock, &v->head);
+
+CHECK_OP(push_front);
+CHECK_OP(push_back);
+CHECK_OP(pop_front);
+CHECK_OP(pop_back);
+
+#undef CHECK
+#undef CHECK_OP
+#undef INIT
+
+SEC("?kprobe/xyz")
+int map_compat_kprobe(void *ctx)
+{
+ bpf_list_push_front(&ghead, NULL);
+ return 0;
+}
+
+SEC("?kretprobe/xyz")
+int map_compat_kretprobe(void *ctx)
+{
+ bpf_list_push_front(&ghead, NULL);
+ return 0;
+}
+
+SEC("?tracepoint/xyz")
+int map_compat_tp(void *ctx)
+{
+ bpf_list_push_front(&ghead, NULL);
+ return 0;
+}
+
+SEC("?perf_event")
+int map_compat_perf(void *ctx)
+{
+ bpf_list_push_front(&ghead, NULL);
+ return 0;
+}
+
+SEC("?raw_tp/xyz")
+int map_compat_raw_tp(void *ctx)
+{
+ bpf_list_push_front(&ghead, NULL);
+ return 0;
+}
+
+SEC("?raw_tp.w/xyz")
+int map_compat_raw_tp_w(void *ctx)
+{
+ bpf_list_push_front(&ghead, NULL);
+ return 0;
+}
+
+SEC("?tc")
+int obj_type_id_oor(void *ctx)
+{
+ bpf_obj_new_impl(~0UL, NULL);
+ return 0;
+}
+
+SEC("?tc")
+int obj_new_no_composite(void *ctx)
+{
+ bpf_obj_new_impl(bpf_core_type_id_local(int), (void *)42);
+ return 0;
+}
+
+SEC("?tc")
+int obj_new_no_struct(void *ctx)
+{
+
+ bpf_obj_new(union { int data; unsigned udata; });
+ return 0;
+}
+
+SEC("?tc")
+int obj_drop_non_zero_off(void *ctx)
+{
+ void *f;
+
+ f = bpf_obj_new(struct foo);
+ if (!f)
+ return 0;
+ bpf_obj_drop(f+1);
+ return 0;
+}
+
+SEC("?tc")
+int new_null_ret(void *ctx)
+{
+ return bpf_obj_new(struct foo)->data;
+}
+
+SEC("?tc")
+int obj_new_acq(void *ctx)
+{
+ bpf_obj_new(struct foo);
+ return 0;
+}
+
+SEC("?tc")
+int use_after_drop(void *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ bpf_obj_drop(f);
+ return f->data;
+}
+
+SEC("?tc")
+int ptr_walk_scalar(void *ctx)
+{
+ struct test1 {
+ struct test2 {
+ struct test2 *next;
+ } *ptr;
+ } *p;
+
+ p = bpf_obj_new(typeof(*p));
+ if (!p)
+ return 0;
+ bpf_this_cpu_ptr(p->ptr);
+ return 0;
+}
+
+SEC("?tc")
+int direct_read_lock(void *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ return *(int *)&f->lock;
+}
+
+SEC("?tc")
+int direct_write_lock(void *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ *(int *)&f->lock = 0;
+ return 0;
+}
+
+SEC("?tc")
+int direct_read_head(void *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ return *(int *)&f->head;
+}
+
+SEC("?tc")
+int direct_write_head(void *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ *(int *)&f->head = 0;
+ return 0;
+}
+
+SEC("?tc")
+int direct_read_node(void *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ return *(int *)&f->node;
+}
+
+SEC("?tc")
+int direct_write_node(void *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ *(int *)&f->node = 0;
+ return 0;
+}
+
+static __always_inline
+int write_after_op(void (*push_op)(void *head, void *node))
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ bpf_spin_lock(&glock);
+ push_op(&ghead, &f->node);
+ f->data = 42;
+ bpf_spin_unlock(&glock);
+
+ return 0;
+}
+
+SEC("?tc")
+int write_after_push_front(void *ctx)
+{
+ return write_after_op((void *)bpf_list_push_front);
+}
+
+SEC("?tc")
+int write_after_push_back(void *ctx)
+{
+ return write_after_op((void *)bpf_list_push_back);
+}
+
+static __always_inline
+int use_after_unlock(void (*op)(void *head, void *node))
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ bpf_spin_lock(&glock);
+ f->data = 42;
+ op(&ghead, &f->node);
+ bpf_spin_unlock(&glock);
+
+ return f->data;
+}
+
+SEC("?tc")
+int use_after_unlock_push_front(void *ctx)
+{
+ return use_after_unlock((void *)bpf_list_push_front);
+}
+
+SEC("?tc")
+int use_after_unlock_push_back(void *ctx)
+{
+ return use_after_unlock((void *)bpf_list_push_back);
+}
+
+static __always_inline
+int list_double_add(void (*op)(void *head, void *node))
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ bpf_spin_lock(&glock);
+ op(&ghead, &f->node);
+ op(&ghead, &f->node);
+ bpf_spin_unlock(&glock);
+
+ return 0;
+}
+
+SEC("?tc")
+int double_push_front(void *ctx)
+{
+ return list_double_add((void *)bpf_list_push_front);
+}
+
+SEC("?tc")
+int double_push_back(void *ctx)
+{
+ return list_double_add((void *)bpf_list_push_back);
+}
+
+SEC("?tc")
+int no_node_value_type(void *ctx)
+{
+ void *p;
+
+ p = bpf_obj_new(struct { int data; });
+ if (!p)
+ return 0;
+ bpf_spin_lock(&glock);
+ bpf_list_push_front(&ghead, p);
+ bpf_spin_unlock(&glock);
+
+ return 0;
+}
+
+SEC("?tc")
+int incorrect_value_type(void *ctx)
+{
+ struct bar *b;
+
+ b = bpf_obj_new(typeof(*b));
+ if (!b)
+ return 0;
+ bpf_spin_lock(&glock);
+ bpf_list_push_front(&ghead, &b->node);
+ bpf_spin_unlock(&glock);
+
+ return 0;
+}
+
+SEC("?tc")
+int incorrect_node_var_off(struct __sk_buff *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ bpf_spin_lock(&glock);
+ bpf_list_push_front(&ghead, (void *)&f->node + ctx->protocol);
+ bpf_spin_unlock(&glock);
+
+ return 0;
+}
+
+SEC("?tc")
+int incorrect_node_off1(void *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ bpf_spin_lock(&glock);
+ bpf_list_push_front(&ghead, (void *)&f->node + 1);
+ bpf_spin_unlock(&glock);
+
+ return 0;
+}
+
+SEC("?tc")
+int incorrect_node_off2(void *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ bpf_spin_lock(&glock);
+ bpf_list_push_front(&ghead, &f->node2);
+ bpf_spin_unlock(&glock);
+
+ return 0;
+}
+
+SEC("?tc")
+int no_head_type(void *ctx)
+{
+ void *p;
+
+ p = bpf_obj_new(typeof(struct { int data; }));
+ if (!p)
+ return 0;
+ bpf_spin_lock(&glock);
+ bpf_list_push_front(p, NULL);
+ bpf_spin_lock(&glock);
+
+ return 0;
+}
+
+SEC("?tc")
+int incorrect_head_var_off1(struct __sk_buff *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ bpf_spin_lock(&glock);
+ bpf_list_push_front((void *)&ghead + ctx->protocol, &f->node);
+ bpf_spin_unlock(&glock);
+
+ return 0;
+}
+
+SEC("?tc")
+int incorrect_head_var_off2(struct __sk_buff *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ bpf_spin_lock(&glock);
+ bpf_list_push_front((void *)&f->head + ctx->protocol, &f->node);
+ bpf_spin_unlock(&glock);
+
+ return 0;
+}
+
+SEC("?tc")
+int incorrect_head_off1(void *ctx)
+{
+ struct foo *f;
+ struct bar *b;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ b = bpf_obj_new(typeof(*b));
+ if (!b) {
+ bpf_obj_drop(f);
+ return 0;
+ }
+
+ bpf_spin_lock(&f->lock);
+ bpf_list_push_front((void *)&f->head + 1, &b->node);
+ bpf_spin_unlock(&f->lock);
+
+ return 0;
+}
+
+SEC("?tc")
+int incorrect_head_off2(void *ctx)
+{
+ struct foo *f;
+ struct bar *b;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+
+ bpf_spin_lock(&glock);
+ bpf_list_push_front((void *)&ghead + 1, &f->node);
+ bpf_spin_unlock(&glock);
+
+ return 0;
+}
+
+static __always_inline
+int pop_ptr_off(void *(*op)(void *head))
+{
+ struct {
+ struct bpf_list_head head __contains(foo, node2);
+ struct bpf_spin_lock lock;
+ } *p;
+ struct bpf_list_node *n;
+
+ p = bpf_obj_new(typeof(*p));
+ if (!p)
+ return 0;
+ bpf_spin_lock(&p->lock);
+ n = op(&p->head);
+ bpf_spin_unlock(&p->lock);
+
+ bpf_this_cpu_ptr(n);
+ return 0;
+}
+
+SEC("?tc")
+int pop_front_off(void *ctx)
+{
+ return pop_ptr_off((void *)bpf_list_pop_front);
+}
+
+SEC("?tc")
+int pop_back_off(void *ctx)
+{
+ return pop_ptr_off((void *)bpf_list_pop_back);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/lsm_cgroup.c b/tools/testing/selftests/bpf/progs/lsm_cgroup.c
index 4f2d60b87b75..02c11d16b692 100644
--- a/tools/testing/selftests/bpf/progs/lsm_cgroup.c
+++ b/tools/testing/selftests/bpf/progs/lsm_cgroup.c
@@ -7,6 +7,10 @@
char _license[] SEC("license") = "GPL";
+extern bool CONFIG_SECURITY_SELINUX __kconfig __weak;
+extern bool CONFIG_SECURITY_SMACK __kconfig __weak;
+extern bool CONFIG_SECURITY_APPARMOR __kconfig __weak;
+
#ifndef AF_PACKET
#define AF_PACKET 17
#endif
@@ -140,6 +144,10 @@ SEC("lsm_cgroup/sk_alloc_security")
int BPF_PROG(socket_alloc, struct sock *sk, int family, gfp_t priority)
{
called_socket_alloc++;
+ /* if already have non-bpf lsms installed, EPERM will cause memory leak of non-bpf lsms */
+ if (CONFIG_SECURITY_SELINUX || CONFIG_SECURITY_SMACK || CONFIG_SECURITY_APPARMOR)
+ return 1;
+
if (family == AF_UNIX)
return 0; /* EPERM */
diff --git a/tools/testing/selftests/bpf/progs/rcu_read_lock.c b/tools/testing/selftests/bpf/progs/rcu_read_lock.c
new file mode 100644
index 000000000000..94a970076b98
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/rcu_read_lock.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_tracing_net.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, long);
+} map_a SEC(".maps");
+
+__u32 user_data, key_serial, target_pid;
+__u64 flags, task_storage_val, cgroup_id;
+
+struct bpf_key *bpf_lookup_user_key(__u32 serial, __u64 flags) __ksym;
+void bpf_key_put(struct bpf_key *key) __ksym;
+void bpf_rcu_read_lock(void) __ksym;
+void bpf_rcu_read_unlock(void) __ksym;
+struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym;
+void bpf_task_release(struct task_struct *p) __ksym;
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int get_cgroup_id(void *ctx)
+{
+ struct task_struct *task;
+
+ task = bpf_get_current_task_btf();
+ if (task->pid != target_pid)
+ return 0;
+
+ /* simulate bpf_get_current_cgroup_id() helper */
+ bpf_rcu_read_lock();
+ cgroup_id = task->cgroups->dfl_cgrp->kn->id;
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int task_succ(void *ctx)
+{
+ struct task_struct *task, *real_parent;
+ long init_val = 2;
+ long *ptr;
+
+ task = bpf_get_current_task_btf();
+ if (task->pid != target_pid)
+ return 0;
+
+ bpf_rcu_read_lock();
+ /* region including helper using rcu ptr real_parent */
+ real_parent = task->real_parent;
+ ptr = bpf_task_storage_get(&map_a, real_parent, &init_val,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!ptr)
+ goto out;
+ ptr = bpf_task_storage_get(&map_a, real_parent, 0, 0);
+ if (!ptr)
+ goto out;
+ task_storage_val = *ptr;
+out:
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_nanosleep")
+int no_lock(void *ctx)
+{
+ struct task_struct *task, *real_parent;
+
+ /* no bpf_rcu_read_lock(), old code still works */
+ task = bpf_get_current_task_btf();
+ real_parent = task->real_parent;
+ (void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_nanosleep")
+int two_regions(void *ctx)
+{
+ struct task_struct *task, *real_parent;
+
+ /* two regions */
+ task = bpf_get_current_task_btf();
+ bpf_rcu_read_lock();
+ bpf_rcu_read_unlock();
+ bpf_rcu_read_lock();
+ real_parent = task->real_parent;
+ (void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry/" SYS_PREFIX "sys_getpgid")
+int non_sleepable_1(void *ctx)
+{
+ struct task_struct *task, *real_parent;
+
+ task = bpf_get_current_task_btf();
+ bpf_rcu_read_lock();
+ real_parent = task->real_parent;
+ (void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry/" SYS_PREFIX "sys_getpgid")
+int non_sleepable_2(void *ctx)
+{
+ struct task_struct *task, *real_parent;
+
+ bpf_rcu_read_lock();
+ task = bpf_get_current_task_btf();
+ bpf_rcu_read_unlock();
+
+ bpf_rcu_read_lock();
+ real_parent = task->real_parent;
+ (void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_nanosleep")
+int task_acquire(void *ctx)
+{
+ struct task_struct *task, *real_parent;
+
+ task = bpf_get_current_task_btf();
+ bpf_rcu_read_lock();
+ real_parent = task->real_parent;
+ /* acquire a reference which can be used outside rcu read lock region */
+ real_parent = bpf_task_acquire(real_parent);
+ bpf_rcu_read_unlock();
+ (void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
+ bpf_task_release(real_parent);
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int miss_lock(void *ctx)
+{
+ struct task_struct *task;
+ struct css_set *cgroups;
+ struct cgroup *dfl_cgrp;
+
+ /* missing bpf_rcu_read_lock() */
+ task = bpf_get_current_task_btf();
+ bpf_rcu_read_lock();
+ (void)bpf_task_storage_get(&map_a, task, 0, 0);
+ bpf_rcu_read_unlock();
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int miss_unlock(void *ctx)
+{
+ struct task_struct *task;
+ struct css_set *cgroups;
+ struct cgroup *dfl_cgrp;
+
+ /* missing bpf_rcu_read_unlock() */
+ task = bpf_get_current_task_btf();
+ bpf_rcu_read_lock();
+ (void)bpf_task_storage_get(&map_a, task, 0, 0);
+ return 0;
+}
+
+SEC("?fentry/" SYS_PREFIX "sys_getpgid")
+int non_sleepable_rcu_mismatch(void *ctx)
+{
+ struct task_struct *task, *real_parent;
+
+ task = bpf_get_current_task_btf();
+ /* non-sleepable: missing bpf_rcu_read_unlock() in one path */
+ bpf_rcu_read_lock();
+ real_parent = task->real_parent;
+ (void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
+ if (real_parent)
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int inproper_sleepable_helper(void *ctx)
+{
+ struct task_struct *task, *real_parent;
+ struct pt_regs *regs;
+ __u32 value = 0;
+ void *ptr;
+
+ task = bpf_get_current_task_btf();
+ /* sleepable helper in rcu read lock region */
+ bpf_rcu_read_lock();
+ real_parent = task->real_parent;
+ regs = (struct pt_regs *)bpf_task_pt_regs(real_parent);
+ if (!regs) {
+ bpf_rcu_read_unlock();
+ return 0;
+ }
+
+ ptr = (void *)PT_REGS_IP(regs);
+ (void)bpf_copy_from_user_task(&value, sizeof(uint32_t), ptr, task, 0);
+ user_data = value;
+ (void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?lsm.s/bpf")
+int BPF_PROG(inproper_sleepable_kfunc, int cmd, union bpf_attr *attr, unsigned int size)
+{
+ struct bpf_key *bkey;
+
+ /* sleepable kfunc in rcu read lock region */
+ bpf_rcu_read_lock();
+ bkey = bpf_lookup_user_key(key_serial, flags);
+ bpf_rcu_read_unlock();
+ if (!bkey)
+ return -1;
+ bpf_key_put(bkey);
+
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_nanosleep")
+int nested_rcu_region(void *ctx)
+{
+ struct task_struct *task, *real_parent;
+
+ /* nested rcu read lock regions */
+ task = bpf_get_current_task_btf();
+ bpf_rcu_read_lock();
+ bpf_rcu_read_lock();
+ real_parent = task->real_parent;
+ (void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
+ bpf_rcu_read_unlock();
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int task_untrusted_non_rcuptr(void *ctx)
+{
+ struct task_struct *task, *last_wakee;
+
+ task = bpf_get_current_task_btf();
+ bpf_rcu_read_lock();
+ /* the pointer last_wakee marked as untrusted */
+ last_wakee = task->real_parent->last_wakee;
+ (void)bpf_task_storage_get(&map_a, last_wakee, 0, 0);
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int task_untrusted_rcuptr(void *ctx)
+{
+ struct task_struct *task, *real_parent;
+
+ task = bpf_get_current_task_btf();
+ bpf_rcu_read_lock();
+ real_parent = task->real_parent;
+ bpf_rcu_read_unlock();
+ /* helper use of rcu ptr outside the rcu read lock region */
+ (void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_nanosleep")
+int cross_rcu_region(void *ctx)
+{
+ struct task_struct *task, *real_parent;
+
+ /* rcu ptr define/use in different regions */
+ task = bpf_get_current_task_btf();
+ bpf_rcu_read_lock();
+ real_parent = task->real_parent;
+ bpf_rcu_read_unlock();
+ bpf_rcu_read_lock();
+ (void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
+ bpf_rcu_read_unlock();
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_common.h b/tools/testing/selftests/bpf/progs/task_kfunc_common.h
new file mode 100644
index 000000000000..c0ffd171743e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/task_kfunc_common.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#ifndef _TASK_KFUNC_COMMON_H
+#define _TASK_KFUNC_COMMON_H
+
+#include <errno.h>
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+struct __tasks_kfunc_map_value {
+ struct task_struct __kptr_ref * task;
+};
+
+struct hash_map {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, int);
+ __type(value, struct __tasks_kfunc_map_value);
+ __uint(max_entries, 1);
+} __tasks_kfunc_map SEC(".maps");
+
+struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym;
+struct task_struct *bpf_task_kptr_get(struct task_struct **pp) __ksym;
+void bpf_task_release(struct task_struct *p) __ksym;
+struct task_struct *bpf_task_from_pid(s32 pid) __ksym;
+
+static inline struct __tasks_kfunc_map_value *tasks_kfunc_map_value_lookup(struct task_struct *p)
+{
+ s32 pid;
+ long status;
+
+ status = bpf_probe_read_kernel(&pid, sizeof(pid), &p->pid);
+ if (status)
+ return NULL;
+
+ return bpf_map_lookup_elem(&__tasks_kfunc_map, &pid);
+}
+
+static inline int tasks_kfunc_map_insert(struct task_struct *p)
+{
+ struct __tasks_kfunc_map_value local, *v;
+ long status;
+ struct task_struct *acquired, *old;
+ s32 pid;
+
+ status = bpf_probe_read_kernel(&pid, sizeof(pid), &p->pid);
+ if (status)
+ return status;
+
+ local.task = NULL;
+ status = bpf_map_update_elem(&__tasks_kfunc_map, &pid, &local, BPF_NOEXIST);
+ if (status)
+ return status;
+
+ v = bpf_map_lookup_elem(&__tasks_kfunc_map, &pid);
+ if (!v) {
+ bpf_map_delete_elem(&__tasks_kfunc_map, &pid);
+ return -ENOENT;
+ }
+
+ acquired = bpf_task_acquire(p);
+ old = bpf_kptr_xchg(&v->task, acquired);
+ if (old) {
+ bpf_task_release(old);
+ return -EEXIST;
+ }
+
+ return 0;
+}
+
+#endif /* _TASK_KFUNC_COMMON_H */
diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_failure.c b/tools/testing/selftests/bpf/progs/task_kfunc_failure.c
new file mode 100644
index 000000000000..e310473190d5
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/task_kfunc_failure.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+#include "task_kfunc_common.h"
+
+char _license[] SEC("license") = "GPL";
+
+/* Prototype for all of the program trace events below:
+ *
+ * TRACE_EVENT(task_newtask,
+ * TP_PROTO(struct task_struct *p, u64 clone_flags)
+ */
+
+static struct __tasks_kfunc_map_value *insert_lookup_task(struct task_struct *task)
+{
+ int status;
+
+ status = tasks_kfunc_map_insert(task);
+ if (status)
+ return NULL;
+
+ return tasks_kfunc_map_value_lookup(task);
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(task_kfunc_acquire_untrusted, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *acquired;
+ struct __tasks_kfunc_map_value *v;
+
+ v = insert_lookup_task(task);
+ if (!v)
+ return 0;
+
+ /* Can't invoke bpf_task_acquire() on an untrusted pointer. */
+ acquired = bpf_task_acquire(v->task);
+ bpf_task_release(acquired);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(task_kfunc_acquire_fp, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *acquired, *stack_task = (struct task_struct *)&clone_flags;
+
+ /* Can't invoke bpf_task_acquire() on a random frame pointer. */
+ acquired = bpf_task_acquire((struct task_struct *)&stack_task);
+ bpf_task_release(acquired);
+
+ return 0;
+}
+
+SEC("kretprobe/free_task")
+int BPF_PROG(task_kfunc_acquire_unsafe_kretprobe, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *acquired;
+
+ acquired = bpf_task_acquire(task);
+ /* Can't release a bpf_task_acquire()'d task without a NULL check. */
+ bpf_task_release(acquired);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(task_kfunc_acquire_trusted_walked, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *acquired;
+
+ /* Can't invoke bpf_task_acquire() on a trusted pointer obtained from walking a struct. */
+ acquired = bpf_task_acquire(task->last_wakee);
+ bpf_task_release(acquired);
+
+ return 0;
+}
+
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(task_kfunc_acquire_null, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *acquired;
+
+ /* Can't invoke bpf_task_acquire() on a NULL pointer. */
+ acquired = bpf_task_acquire(NULL);
+ if (!acquired)
+ return 0;
+ bpf_task_release(acquired);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(task_kfunc_acquire_unreleased, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *acquired;
+
+ acquired = bpf_task_acquire(task);
+
+ /* Acquired task is never released. */
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(task_kfunc_get_non_kptr_param, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *kptr;
+
+ /* Cannot use bpf_task_kptr_get() on a non-kptr, even on a valid task. */
+ kptr = bpf_task_kptr_get(&task);
+ if (!kptr)
+ return 0;
+
+ bpf_task_release(kptr);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(task_kfunc_get_non_kptr_acquired, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *kptr, *acquired;
+
+ acquired = bpf_task_acquire(task);
+
+ /* Cannot use bpf_task_kptr_get() on a non-kptr, even if it was acquired. */
+ kptr = bpf_task_kptr_get(&acquired);
+ bpf_task_release(acquired);
+ if (!kptr)
+ return 0;
+
+ bpf_task_release(kptr);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(task_kfunc_get_null, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *kptr;
+
+ /* Cannot use bpf_task_kptr_get() on a NULL pointer. */
+ kptr = bpf_task_kptr_get(NULL);
+ if (!kptr)
+ return 0;
+
+ bpf_task_release(kptr);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(task_kfunc_xchg_unreleased, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *kptr;
+ struct __tasks_kfunc_map_value *v;
+
+ v = insert_lookup_task(task);
+ if (!v)
+ return 0;
+
+ kptr = bpf_kptr_xchg(&v->task, NULL);
+ if (!kptr)
+ return 0;
+
+ /* Kptr retrieved from map is never released. */
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(task_kfunc_get_unreleased, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *kptr;
+ struct __tasks_kfunc_map_value *v;
+
+ v = insert_lookup_task(task);
+ if (!v)
+ return 0;
+
+ kptr = bpf_task_kptr_get(&v->task);
+ if (!kptr)
+ return 0;
+
+ /* Kptr acquired above is never released. */
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(task_kfunc_release_untrusted, struct task_struct *task, u64 clone_flags)
+{
+ struct __tasks_kfunc_map_value *v;
+
+ v = insert_lookup_task(task);
+ if (!v)
+ return 0;
+
+ /* Can't invoke bpf_task_release() on an untrusted pointer. */
+ bpf_task_release(v->task);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(task_kfunc_release_fp, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *acquired = (struct task_struct *)&clone_flags;
+
+ /* Cannot release random frame pointer. */
+ bpf_task_release(acquired);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(task_kfunc_release_null, struct task_struct *task, u64 clone_flags)
+{
+ struct __tasks_kfunc_map_value local, *v;
+ long status;
+ struct task_struct *acquired, *old;
+ s32 pid;
+
+ status = bpf_probe_read_kernel(&pid, sizeof(pid), &task->pid);
+ if (status)
+ return 0;
+
+ local.task = NULL;
+ status = bpf_map_update_elem(&__tasks_kfunc_map, &pid, &local, BPF_NOEXIST);
+ if (status)
+ return status;
+
+ v = bpf_map_lookup_elem(&__tasks_kfunc_map, &pid);
+ if (!v)
+ return -ENOENT;
+
+ acquired = bpf_task_acquire(task);
+
+ old = bpf_kptr_xchg(&v->task, acquired);
+
+ /* old cannot be passed to bpf_task_release() without a NULL check. */
+ bpf_task_release(old);
+ bpf_task_release(old);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(task_kfunc_release_unacquired, struct task_struct *task, u64 clone_flags)
+{
+ /* Cannot release trusted task pointer which was not acquired. */
+ bpf_task_release(task);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(task_kfunc_from_pid_no_null_check, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *acquired;
+
+ acquired = bpf_task_from_pid(task->pid);
+
+ /* Releasing bpf_task_from_pid() lookup without a NULL check. */
+ bpf_task_release(acquired);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_success.c b/tools/testing/selftests/bpf/progs/task_kfunc_success.c
new file mode 100644
index 000000000000..60c7ead41cfc
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/task_kfunc_success.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+#include "task_kfunc_common.h"
+
+char _license[] SEC("license") = "GPL";
+
+int err, pid;
+
+/* Prototype for all of the program trace events below:
+ *
+ * TRACE_EVENT(task_newtask,
+ * TP_PROTO(struct task_struct *p, u64 clone_flags)
+ */
+
+static bool is_test_kfunc_task(void)
+{
+ int cur_pid = bpf_get_current_pid_tgid() >> 32;
+
+ return pid == cur_pid;
+}
+
+static int test_acquire_release(struct task_struct *task)
+{
+ struct task_struct *acquired;
+
+ acquired = bpf_task_acquire(task);
+ bpf_task_release(acquired);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_task_acquire_release_argument, struct task_struct *task, u64 clone_flags)
+{
+ if (!is_test_kfunc_task())
+ return 0;
+
+ return test_acquire_release(task);
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_task_acquire_release_current, struct task_struct *task, u64 clone_flags)
+{
+ if (!is_test_kfunc_task())
+ return 0;
+
+ return test_acquire_release(bpf_get_current_task_btf());
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_task_acquire_leave_in_map, struct task_struct *task, u64 clone_flags)
+{
+ long status;
+
+ if (!is_test_kfunc_task())
+ return 0;
+
+ status = tasks_kfunc_map_insert(task);
+ if (status)
+ err = 1;
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_task_xchg_release, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *kptr;
+ struct __tasks_kfunc_map_value *v;
+ long status;
+
+ if (!is_test_kfunc_task())
+ return 0;
+
+ status = tasks_kfunc_map_insert(task);
+ if (status) {
+ err = 1;
+ return 0;
+ }
+
+ v = tasks_kfunc_map_value_lookup(task);
+ if (!v) {
+ err = 2;
+ return 0;
+ }
+
+ kptr = bpf_kptr_xchg(&v->task, NULL);
+ if (!kptr) {
+ err = 3;
+ return 0;
+ }
+
+ bpf_task_release(kptr);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_task_get_release, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *kptr;
+ struct __tasks_kfunc_map_value *v;
+ long status;
+
+ if (!is_test_kfunc_task())
+ return 0;
+
+ status = tasks_kfunc_map_insert(task);
+ if (status) {
+ err = 1;
+ return 0;
+ }
+
+ v = tasks_kfunc_map_value_lookup(task);
+ if (!v) {
+ err = 2;
+ return 0;
+ }
+
+ kptr = bpf_task_kptr_get(&v->task);
+ if (!kptr) {
+ err = 3;
+ return 0;
+ }
+
+ bpf_task_release(kptr);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_task_current_acquire_release, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *current, *acquired;
+
+ if (!is_test_kfunc_task())
+ return 0;
+
+ current = bpf_get_current_task_btf();
+ acquired = bpf_task_acquire(current);
+ bpf_task_release(acquired);
+
+ return 0;
+}
+
+static void lookup_compare_pid(const struct task_struct *p)
+{
+ struct task_struct *acquired;
+
+ acquired = bpf_task_from_pid(p->pid);
+ if (!acquired) {
+ err = 1;
+ return;
+ }
+
+ if (acquired->pid != p->pid)
+ err = 2;
+ bpf_task_release(acquired);
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_task_from_pid_arg, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *acquired;
+
+ if (!is_test_kfunc_task())
+ return 0;
+
+ lookup_compare_pid(task);
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_task_from_pid_current, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *current, *acquired;
+
+ if (!is_test_kfunc_task())
+ return 0;
+
+ lookup_compare_pid(bpf_get_current_task_btf());
+ return 0;
+}
+
+static int is_pid_lookup_valid(s32 pid)
+{
+ struct task_struct *acquired;
+
+ acquired = bpf_task_from_pid(pid);
+ if (acquired) {
+ bpf_task_release(acquired);
+ return 1;
+ }
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_task_from_pid_invalid, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *acquired;
+
+ if (!is_test_kfunc_task())
+ return 0;
+
+ if (is_pid_lookup_valid(-1)) {
+ err = 1;
+ return 0;
+ }
+
+ if (is_pid_lookup_valid(0xcafef00d)) {
+ err = 2;
+ return 0;
+ }
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/task_local_storage_exit_creds.c b/tools/testing/selftests/bpf/progs/task_local_storage_exit_creds.c
index 81758c0aef99..41d88ed222ff 100644
--- a/tools/testing/selftests/bpf/progs/task_local_storage_exit_creds.c
+++ b/tools/testing/selftests/bpf/progs/task_local_storage_exit_creds.c
@@ -14,6 +14,7 @@ struct {
__type(value, __u64);
} task_storage SEC(".maps");
+int run_count = 0;
int valid_ptr_count = 0;
int null_ptr_count = 0;
@@ -28,5 +29,7 @@ int BPF_PROG(trace_exit_creds, struct task_struct *task)
__sync_fetch_and_add(&valid_ptr_count, 1);
else
__sync_fetch_and_add(&null_ptr_count, 1);
+
+ __sync_fetch_and_add(&run_count, 1);
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/task_ls_recursion.c b/tools/testing/selftests/bpf/progs/task_ls_recursion.c
index 564583dca7c8..4542dc683b44 100644
--- a/tools/testing/selftests/bpf/progs/task_ls_recursion.c
+++ b/tools/testing/selftests/bpf/progs/task_ls_recursion.c
@@ -5,7 +5,13 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
+#ifndef EBUSY
+#define EBUSY 16
+#endif
+
char _license[] SEC("license") = "GPL";
+int nr_del_errs = 0;
+int test_pid = 0;
struct {
__uint(type, BPF_MAP_TYPE_TASK_STORAGE);
@@ -26,6 +32,13 @@ int BPF_PROG(on_lookup)
{
struct task_struct *task = bpf_get_current_task_btf();
+ if (!test_pid || task->pid != test_pid)
+ return 0;
+
+ /* The bpf_task_storage_delete will call
+ * bpf_local_storage_lookup. The prog->active will
+ * stop the recursion.
+ */
bpf_task_storage_delete(&map_a, task);
bpf_task_storage_delete(&map_b, task);
return 0;
@@ -37,11 +50,32 @@ int BPF_PROG(on_update)
struct task_struct *task = bpf_get_current_task_btf();
long *ptr;
+ if (!test_pid || task->pid != test_pid)
+ return 0;
+
ptr = bpf_task_storage_get(&map_a, task, 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
- if (ptr)
+ /* ptr will not be NULL when it is called from
+ * the bpf_task_storage_get(&map_b,...F_CREATE) in
+ * the BPF_PROG(on_enter) below. It is because
+ * the value can be found in map_a and the kernel
+ * does not need to acquire any spin_lock.
+ */
+ if (ptr) {
+ int err;
+
*ptr += 1;
+ err = bpf_task_storage_delete(&map_a, task);
+ if (err == -EBUSY)
+ nr_del_errs++;
+ }
+ /* This will still fail because map_b is empty and
+ * this BPF_PROG(on_update) has failed to acquire
+ * the percpu busy lock => meaning potential
+ * deadlock is detected and it will fail to create
+ * new storage.
+ */
ptr = bpf_task_storage_get(&map_b, task, 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
if (ptr)
@@ -57,14 +91,17 @@ int BPF_PROG(on_enter, struct pt_regs *regs, long id)
long *ptr;
task = bpf_get_current_task_btf();
+ if (!test_pid || task->pid != test_pid)
+ return 0;
+
ptr = bpf_task_storage_get(&map_a, task, 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
- if (ptr)
+ if (ptr && !*ptr)
*ptr = 200;
ptr = bpf_task_storage_get(&map_b, task, 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
- if (ptr)
+ if (ptr && !*ptr)
*ptr = 100;
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/task_storage_nodeadlock.c b/tools/testing/selftests/bpf/progs/task_storage_nodeadlock.c
new file mode 100644
index 000000000000..ea2dbb80f7b3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/task_storage_nodeadlock.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+#ifndef EBUSY
+#define EBUSY 16
+#endif
+
+extern bool CONFIG_PREEMPT __kconfig __weak;
+int nr_get_errs = 0;
+int nr_del_errs = 0;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, int);
+} task_storage SEC(".maps");
+
+SEC("lsm.s/socket_post_create")
+int BPF_PROG(socket_post_create, struct socket *sock, int family, int type,
+ int protocol, int kern)
+{
+ struct task_struct *task;
+ int ret, zero = 0;
+ int *value;
+
+ if (!CONFIG_PREEMPT)
+ return 0;
+
+ task = bpf_get_current_task_btf();
+ value = bpf_task_storage_get(&task_storage, task, &zero,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!value)
+ __sync_fetch_and_add(&nr_get_errs, 1);
+
+ ret = bpf_task_storage_delete(&task_storage,
+ bpf_get_current_task_btf());
+ if (ret == -EBUSY)
+ __sync_fetch_and_add(&nr_del_errs, 1);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c b/tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c
new file mode 100644
index 000000000000..f5ac5f3e8919
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (C) 2022 Huawei Technologies Duesseldorf GmbH
+ *
+ * Author: Roberto Sassu <roberto.sassu@huawei.com>
+ */
+
+#include "vmlinux.h"
+#include <errno.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+/* From include/linux/mm.h. */
+#define FMODE_WRITE 0x2
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u32);
+} data_input SEC(".maps");
+
+char _license[] SEC("license") = "GPL";
+
+SEC("lsm/bpf_map")
+int BPF_PROG(check_access, struct bpf_map *map, fmode_t fmode)
+{
+ if (map != (struct bpf_map *)&data_input)
+ return 0;
+
+ if (fmode & FMODE_WRITE)
+ return -EACCES;
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_misc_tcp_hdr_options.c b/tools/testing/selftests/bpf/progs/test_misc_tcp_hdr_options.c
index 2c121c5d66a7..d487153a839d 100644
--- a/tools/testing/selftests/bpf/progs/test_misc_tcp_hdr_options.c
+++ b/tools/testing/selftests/bpf/progs/test_misc_tcp_hdr_options.c
@@ -27,6 +27,7 @@ unsigned int nr_pure_ack = 0;
unsigned int nr_data = 0;
unsigned int nr_syn = 0;
unsigned int nr_fin = 0;
+unsigned int nr_hwtstamp = 0;
/* Check the header received from the active side */
static int __check_active_hdr_in(struct bpf_sock_ops *skops, bool check_syn)
@@ -146,6 +147,9 @@ static int check_active_hdr_in(struct bpf_sock_ops *skops)
if (th->ack && !th->fin && tcp_hdrlen(th) == skops->skb_len)
nr_pure_ack++;
+ if (skops->skb_hwtstamp)
+ nr_hwtstamp++;
+
return CG_OK;
}
diff --git a/tools/testing/selftests/bpf/progs/test_module_attach.c b/tools/testing/selftests/bpf/progs/test_module_attach.c
index 08628afedb77..8a1b50f3a002 100644
--- a/tools/testing/selftests/bpf/progs/test_module_attach.c
+++ b/tools/testing/selftests/bpf/progs/test_module_attach.c
@@ -110,4 +110,10 @@ int BPF_PROG(handle_fmod_ret,
return 0; /* don't override the exit code */
}
+SEC("kprobe.multi/bpf_testmod_test_read")
+int BPF_PROG(kprobe_multi)
+{
+ return 0;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_ringbuf_map_key.c b/tools/testing/selftests/bpf/progs/test_ringbuf_map_key.c
new file mode 100644
index 000000000000..2760bf60d05a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_ringbuf_map_key.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct sample {
+ int pid;
+ int seq;
+ long value;
+ char comm[16];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_RINGBUF);
+} ringbuf SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1000);
+ __type(key, struct sample);
+ __type(value, int);
+} hash_map SEC(".maps");
+
+/* inputs */
+int pid = 0;
+
+/* inner state */
+long seq = 0;
+
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
+int test_ringbuf_mem_map_key(void *ctx)
+{
+ int cur_pid = bpf_get_current_pid_tgid() >> 32;
+ struct sample *sample, sample_copy;
+ int *lookup_val;
+
+ if (cur_pid != pid)
+ return 0;
+
+ sample = bpf_ringbuf_reserve(&ringbuf, sizeof(*sample), 0);
+ if (!sample)
+ return 0;
+
+ sample->pid = pid;
+ bpf_get_current_comm(sample->comm, sizeof(sample->comm));
+ sample->seq = ++seq;
+ sample->value = 42;
+
+ /* test using 'sample' (PTR_TO_MEM | MEM_ALLOC) as map key arg
+ */
+ lookup_val = (int *)bpf_map_lookup_elem(&hash_map, sample);
+
+ /* workaround - memcpy is necessary so that verifier doesn't
+ * complain with:
+ * verifier internal error: more than one arg with ref_obj_id R3
+ * when trying to do bpf_map_update_elem(&hash_map, sample, &sample->seq, BPF_ANY);
+ *
+ * Since bpf_map_lookup_elem above uses 'sample' as key, test using
+ * sample field as value below
+ */
+ __builtin_memcpy(&sample_copy, sample, sizeof(struct sample));
+ bpf_map_update_elem(&hash_map, &sample_copy, &sample->seq, BPF_ANY);
+
+ bpf_ringbuf_submit(sample, 0);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_skeleton.c b/tools/testing/selftests/bpf/progs/test_skeleton.c
index 1a4e93f6d9df..adece9f91f58 100644
--- a/tools/testing/selftests/bpf/progs/test_skeleton.c
+++ b/tools/testing/selftests/bpf/progs/test_skeleton.c
@@ -53,6 +53,20 @@ int out_mostly_var;
char huge_arr[16 * 1024 * 1024];
+/* non-mmapable custom .data section */
+
+struct my_value { int x, y, z; };
+
+__hidden int zero_key SEC(".data.non_mmapable");
+static struct my_value zero_value SEC(".data.non_mmapable");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct my_value);
+ __uint(max_entries, 1);
+} my_map SEC(".maps");
+
SEC("raw_tp/sys_enter")
int handler(const void *ctx)
{
@@ -75,6 +89,9 @@ int handler(const void *ctx)
huge_arr[sizeof(huge_arr) - 1] = 123;
+ /* make sure zero_key and zero_value are not optimized out */
+ bpf_map_update_elem(&my_map, &zero_key, &zero_value, BPF_ANY);
+
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_spin_lock.c b/tools/testing/selftests/bpf/progs/test_spin_lock.c
index 7e88309d3229..5bd10409285b 100644
--- a/tools/testing/selftests/bpf/progs/test_spin_lock.c
+++ b/tools/testing/selftests/bpf/progs/test_spin_lock.c
@@ -45,8 +45,8 @@ struct {
#define CREDIT_PER_NS(delta, rate) (((delta) * rate) >> 20)
-SEC("tc")
-int bpf_sping_lock_test(struct __sk_buff *skb)
+SEC("cgroup_skb/ingress")
+int bpf_spin_lock_test(struct __sk_buff *skb)
{
volatile int credit = 0, max_credit = 100, pkt_len = 64;
struct hmap_elem zero = {}, *val;
diff --git a/tools/testing/selftests/bpf/progs/test_spin_lock_fail.c b/tools/testing/selftests/bpf/progs/test_spin_lock_fail.c
new file mode 100644
index 000000000000..86cd183ef6dc
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_spin_lock_fail.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_experimental.h"
+
+struct foo {
+ struct bpf_spin_lock lock;
+ int data;
+};
+
+struct array_map {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct foo);
+ __uint(max_entries, 1);
+} array_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, int);
+ __array(values, struct array_map);
+} map_of_maps SEC(".maps") = {
+ .values = {
+ [0] = &array_map,
+ },
+};
+
+SEC(".data.A") struct bpf_spin_lock lockA;
+SEC(".data.B") struct bpf_spin_lock lockB;
+
+SEC("?tc")
+int lock_id_kptr_preserve(void *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ bpf_this_cpu_ptr(f);
+ return 0;
+}
+
+SEC("?tc")
+int lock_id_global_zero(void *ctx)
+{
+ bpf_this_cpu_ptr(&lockA);
+ return 0;
+}
+
+SEC("?tc")
+int lock_id_mapval_preserve(void *ctx)
+{
+ struct foo *f;
+ int key = 0;
+
+ f = bpf_map_lookup_elem(&array_map, &key);
+ if (!f)
+ return 0;
+ bpf_this_cpu_ptr(f);
+ return 0;
+}
+
+SEC("?tc")
+int lock_id_innermapval_preserve(void *ctx)
+{
+ struct foo *f;
+ int key = 0;
+ void *map;
+
+ map = bpf_map_lookup_elem(&map_of_maps, &key);
+ if (!map)
+ return 0;
+ f = bpf_map_lookup_elem(map, &key);
+ if (!f)
+ return 0;
+ bpf_this_cpu_ptr(f);
+ return 0;
+}
+
+#define CHECK(test, A, B) \
+ SEC("?tc") \
+ int lock_id_mismatch_##test(void *ctx) \
+ { \
+ struct foo *f1, *f2, *v, *iv; \
+ int key = 0; \
+ void *map; \
+ \
+ map = bpf_map_lookup_elem(&map_of_maps, &key); \
+ if (!map) \
+ return 0; \
+ iv = bpf_map_lookup_elem(map, &key); \
+ if (!iv) \
+ return 0; \
+ v = bpf_map_lookup_elem(&array_map, &key); \
+ if (!v) \
+ return 0; \
+ f1 = bpf_obj_new(typeof(*f1)); \
+ if (!f1) \
+ return 0; \
+ f2 = bpf_obj_new(typeof(*f2)); \
+ if (!f2) { \
+ bpf_obj_drop(f1); \
+ return 0; \
+ } \
+ bpf_spin_lock(A); \
+ bpf_spin_unlock(B); \
+ return 0; \
+ }
+
+CHECK(kptr_kptr, &f1->lock, &f2->lock);
+CHECK(kptr_global, &f1->lock, &lockA);
+CHECK(kptr_mapval, &f1->lock, &v->lock);
+CHECK(kptr_innermapval, &f1->lock, &iv->lock);
+
+CHECK(global_global, &lockA, &lockB);
+CHECK(global_kptr, &lockA, &f1->lock);
+CHECK(global_mapval, &lockA, &v->lock);
+CHECK(global_innermapval, &lockA, &iv->lock);
+
+SEC("?tc")
+int lock_id_mismatch_mapval_mapval(void *ctx)
+{
+ struct foo *f1, *f2;
+ int key = 0;
+
+ f1 = bpf_map_lookup_elem(&array_map, &key);
+ if (!f1)
+ return 0;
+ f2 = bpf_map_lookup_elem(&array_map, &key);
+ if (!f2)
+ return 0;
+
+ bpf_spin_lock(&f1->lock);
+ f1->data = 42;
+ bpf_spin_unlock(&f2->lock);
+
+ return 0;
+}
+
+CHECK(mapval_kptr, &v->lock, &f1->lock);
+CHECK(mapval_global, &v->lock, &lockB);
+CHECK(mapval_innermapval, &v->lock, &iv->lock);
+
+SEC("?tc")
+int lock_id_mismatch_innermapval_innermapval1(void *ctx)
+{
+ struct foo *f1, *f2;
+ int key = 0;
+ void *map;
+
+ map = bpf_map_lookup_elem(&map_of_maps, &key);
+ if (!map)
+ return 0;
+ f1 = bpf_map_lookup_elem(map, &key);
+ if (!f1)
+ return 0;
+ f2 = bpf_map_lookup_elem(map, &key);
+ if (!f2)
+ return 0;
+
+ bpf_spin_lock(&f1->lock);
+ f1->data = 42;
+ bpf_spin_unlock(&f2->lock);
+
+ return 0;
+}
+
+SEC("?tc")
+int lock_id_mismatch_innermapval_innermapval2(void *ctx)
+{
+ struct foo *f1, *f2;
+ int key = 0;
+ void *map;
+
+ map = bpf_map_lookup_elem(&map_of_maps, &key);
+ if (!map)
+ return 0;
+ f1 = bpf_map_lookup_elem(map, &key);
+ if (!f1)
+ return 0;
+ map = bpf_map_lookup_elem(&map_of_maps, &key);
+ if (!map)
+ return 0;
+ f2 = bpf_map_lookup_elem(map, &key);
+ if (!f2)
+ return 0;
+
+ bpf_spin_lock(&f1->lock);
+ f1->data = 42;
+ bpf_spin_unlock(&f2->lock);
+
+ return 0;
+}
+
+CHECK(innermapval_kptr, &iv->lock, &f1->lock);
+CHECK(innermapval_global, &iv->lock, &lockA);
+CHECK(innermapval_mapval, &iv->lock, &v->lock);
+
+#undef CHECK
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/type_cast.c b/tools/testing/selftests/bpf/progs/type_cast.c
new file mode 100644
index 000000000000..eb78e6f03129
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/type_cast.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, long);
+} enter_id SEC(".maps");
+
+#define IFNAMSIZ 16
+
+int ifindex, ingress_ifindex;
+char name[IFNAMSIZ];
+unsigned int inum;
+unsigned int meta_len, frag0_len, kskb_len, kskb2_len;
+
+void *bpf_cast_to_kern_ctx(void *) __ksym;
+void *bpf_rdonly_cast(void *, __u32) __ksym;
+
+SEC("?xdp")
+int md_xdp(struct xdp_md *ctx)
+{
+ struct xdp_buff *kctx = bpf_cast_to_kern_ctx(ctx);
+ struct net_device *dev;
+
+ dev = kctx->rxq->dev;
+ ifindex = dev->ifindex;
+ inum = dev->nd_net.net->ns.inum;
+ __builtin_memcpy(name, dev->name, IFNAMSIZ);
+ ingress_ifindex = ctx->ingress_ifindex;
+ return XDP_PASS;
+}
+
+SEC("?tc")
+int md_skb(struct __sk_buff *skb)
+{
+ struct sk_buff *kskb = bpf_cast_to_kern_ctx(skb);
+ struct skb_shared_info *shared_info;
+ struct sk_buff *kskb2;
+
+ kskb_len = kskb->len;
+
+ /* Simulate the following kernel macro:
+ * #define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
+ */
+ shared_info = bpf_rdonly_cast(kskb->head + kskb->end,
+ bpf_core_type_id_kernel(struct skb_shared_info));
+ meta_len = shared_info->meta_len;
+ frag0_len = shared_info->frag_list->len;
+
+ /* kskb2 should be equal to kskb */
+ kskb2 = bpf_rdonly_cast(kskb, bpf_core_type_id_kernel(struct sk_buff));
+ kskb2_len = kskb2->len;
+ return 0;
+}
+
+SEC("?tp_btf/sys_enter")
+int BPF_PROG(untrusted_ptr, struct pt_regs *regs, long id)
+{
+ struct task_struct *task, *task_dup;
+ long *ptr;
+
+ task = bpf_get_current_task_btf();
+ task_dup = bpf_rdonly_cast(task, bpf_core_type_id_kernel(struct task_struct));
+ (void)bpf_task_storage_get(&enter_id, task_dup, 0, 0);
+ return 0;
+}
+
+SEC("?tracepoint/syscalls/sys_enter_nanosleep")
+int kctx_u64(void *ctx)
+{
+ u64 *kctx = bpf_rdonly_cast(ctx, bpf_core_type_id_kernel(u64));
+
+ (void)kctx;
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/task_local_storage_helpers.h b/tools/testing/selftests/bpf/task_local_storage_helpers.h
index 711d5abb7d51..281f86132766 100644
--- a/tools/testing/selftests/bpf/task_local_storage_helpers.h
+++ b/tools/testing/selftests/bpf/task_local_storage_helpers.h
@@ -7,8 +7,12 @@
#include <sys/types.h>
#ifndef __NR_pidfd_open
+#ifdef __alpha__
+#define __NR_pidfd_open 544
+#else
#define __NR_pidfd_open 434
#endif
+#endif
static inline int sys_pidfd_open(pid_t pid, unsigned int flags)
{
diff --git a/tools/testing/selftests/bpf/test_bpftool_metadata.sh b/tools/testing/selftests/bpf/test_bpftool_metadata.sh
index 1bf81b49457a..b5520692f41b 100755
--- a/tools/testing/selftests/bpf/test_bpftool_metadata.sh
+++ b/tools/testing/selftests/bpf/test_bpftool_metadata.sh
@@ -4,6 +4,9 @@
# Kselftest framework requirement - SKIP code is 4.
ksft_skip=4
+BPF_FILE_USED="metadata_used.bpf.o"
+BPF_FILE_UNUSED="metadata_unused.bpf.o"
+
TESTNAME=bpftool_metadata
BPF_FS=$(awk '$3 == "bpf" {print $2; exit}' /proc/mounts)
BPF_DIR=$BPF_FS/test_$TESTNAME
@@ -55,7 +58,7 @@ mkdir $BPF_DIR
trap cleanup EXIT
-bpftool prog load metadata_unused.o $BPF_DIR/unused
+bpftool prog load $BPF_FILE_UNUSED $BPF_DIR/unused
METADATA_PLAIN="$(bpftool prog)"
echo "$METADATA_PLAIN" | grep 'a = "foo"' > /dev/null
@@ -67,7 +70,7 @@ bpftool map | grep 'metadata.rodata' > /dev/null
rm $BPF_DIR/unused
-bpftool prog load metadata_used.o $BPF_DIR/used
+bpftool prog load $BPF_FILE_USED $BPF_DIR/used
METADATA_PLAIN="$(bpftool prog)"
echo "$METADATA_PLAIN" | grep 'a = "bar"' > /dev/null
diff --git a/tools/testing/selftests/bpf/test_bpftool_synctypes.py b/tools/testing/selftests/bpf/test_bpftool_synctypes.py
index a6410bebe603..0cfece7ff4f8 100755
--- a/tools/testing/selftests/bpf/test_bpftool_synctypes.py
+++ b/tools/testing/selftests/bpf/test_bpftool_synctypes.py
@@ -309,11 +309,11 @@ class MainHeaderFileExtractor(SourceFileExtractor):
commands), which looks to the lists of options in other source files
but has different start and end markers:
- "OPTIONS := { {-j|--json} [{-p|--pretty}] | {-d|--debug} | {-l|--legacy}"
+ "OPTIONS := { {-j|--json} [{-p|--pretty}] | {-d|--debug}"
Return a set containing all options, such as:
- {'-p', '-d', '--legacy', '--pretty', '--debug', '--json', '-l', '-j'}
+ {'-p', '-d', '--pretty', '--debug', '--json', '-j'}
"""
start_marker = re.compile(f'"OPTIONS :=')
pattern = re.compile('([\w-]+) ?(?:\||}[ }\]"])')
@@ -336,7 +336,7 @@ class ManSubstitutionsExtractor(SourceFileExtractor):
Return a set containing all options, such as:
- {'-p', '-d', '--legacy', '--pretty', '--debug', '--json', '-l', '-j'}
+ {'-p', '-d', '--pretty', '--debug', '--json', '-j'}
"""
start_marker = re.compile('\|COMMON_OPTIONS\| replace:: {')
pattern = re.compile('\*\*([\w/-]+)\*\*')
@@ -501,6 +501,14 @@ def main():
source_map_types = set(bpf_info.get_map_type_map().values())
source_map_types.discard('unspec')
+ # BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED and BPF_MAP_TYPE_CGROUP_STORAGE
+ # share the same enum value and source_map_types picks
+ # BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED/cgroup_storage_deprecated.
+ # Replace 'cgroup_storage_deprecated' with 'cgroup_storage'
+ # so it aligns with what `bpftool map help` shows.
+ source_map_types.remove('cgroup_storage_deprecated')
+ source_map_types.add('cgroup_storage')
+
help_map_types = map_info.get_map_help()
help_map_options = map_info.get_options()
map_info.close()
diff --git a/tools/testing/selftests/bpf/test_flow_dissector.sh b/tools/testing/selftests/bpf/test_flow_dissector.sh
index 5303ce0c977b..4b298863797a 100755
--- a/tools/testing/selftests/bpf/test_flow_dissector.sh
+++ b/tools/testing/selftests/bpf/test_flow_dissector.sh
@@ -2,6 +2,8 @@
# SPDX-License-Identifier: GPL-2.0
#
# Load BPF flow dissector and verify it correctly dissects traffic
+
+BPF_FILE="bpf_flow.bpf.o"
export TESTNAME=test_flow_dissector
unmount=0
@@ -22,7 +24,7 @@ if [[ -z $(ip netns identify $$) ]]; then
if bpftool="$(which bpftool)"; then
echo "Testing global flow dissector..."
- $bpftool prog loadall ./bpf_flow.o /sys/fs/bpf/flow \
+ $bpftool prog loadall $BPF_FILE /sys/fs/bpf/flow \
type flow_dissector
if ! unshare --net $bpftool prog attach pinned \
@@ -95,7 +97,7 @@ else
fi
# Attach BPF program
-./flow_dissector_load -p bpf_flow.o -s _dissect
+./flow_dissector_load -p $BPF_FILE -s _dissect
# Setup
tc qdisc add dev lo ingress
diff --git a/tools/testing/selftests/bpf/test_lwt_ip_encap.sh b/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
index 6c69c42b1d60..1e565f47aca9 100755
--- a/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
+++ b/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
@@ -38,6 +38,7 @@
# ping: SRC->[encap at veth2:ingress]->GRE:decap->DST
# ping replies go DST->SRC directly
+BPF_FILE="test_lwt_ip_encap.bpf.o"
if [[ $EUID -ne 0 ]]; then
echo "This script must be run as root"
echo "FAIL"
@@ -373,14 +374,14 @@ test_egress()
# install replacement routes (LWT/eBPF), pings succeed
if [ "${ENCAP}" == "IPv4" ] ; then
ip -netns ${NS1} route add ${IPv4_DST} encap bpf xmit obj \
- test_lwt_ip_encap.o sec encap_gre dev veth1 ${VRF}
+ ${BPF_FILE} sec encap_gre dev veth1 ${VRF}
ip -netns ${NS1} -6 route add ${IPv6_DST} encap bpf xmit obj \
- test_lwt_ip_encap.o sec encap_gre dev veth1 ${VRF}
+ ${BPF_FILE} sec encap_gre dev veth1 ${VRF}
elif [ "${ENCAP}" == "IPv6" ] ; then
ip -netns ${NS1} route add ${IPv4_DST} encap bpf xmit obj \
- test_lwt_ip_encap.o sec encap_gre6 dev veth1 ${VRF}
+ ${BPF_FILE} sec encap_gre6 dev veth1 ${VRF}
ip -netns ${NS1} -6 route add ${IPv6_DST} encap bpf xmit obj \
- test_lwt_ip_encap.o sec encap_gre6 dev veth1 ${VRF}
+ ${BPF_FILE} sec encap_gre6 dev veth1 ${VRF}
else
echo " unknown encap ${ENCAP}"
TEST_STATUS=1
@@ -431,14 +432,14 @@ test_ingress()
# install replacement routes (LWT/eBPF), pings succeed
if [ "${ENCAP}" == "IPv4" ] ; then
ip -netns ${NS2} route add ${IPv4_DST} encap bpf in obj \
- test_lwt_ip_encap.o sec encap_gre dev veth2 ${VRF}
+ ${BPF_FILE} sec encap_gre dev veth2 ${VRF}
ip -netns ${NS2} -6 route add ${IPv6_DST} encap bpf in obj \
- test_lwt_ip_encap.o sec encap_gre dev veth2 ${VRF}
+ ${BPF_FILE} sec encap_gre dev veth2 ${VRF}
elif [ "${ENCAP}" == "IPv6" ] ; then
ip -netns ${NS2} route add ${IPv4_DST} encap bpf in obj \
- test_lwt_ip_encap.o sec encap_gre6 dev veth2 ${VRF}
+ ${BPF_FILE} sec encap_gre6 dev veth2 ${VRF}
ip -netns ${NS2} -6 route add ${IPv6_DST} encap bpf in obj \
- test_lwt_ip_encap.o sec encap_gre6 dev veth2 ${VRF}
+ ${BPF_FILE} sec encap_gre6 dev veth2 ${VRF}
else
echo "FAIL: unknown encap ${ENCAP}"
TEST_STATUS=1
diff --git a/tools/testing/selftests/bpf/test_lwt_seg6local.sh b/tools/testing/selftests/bpf/test_lwt_seg6local.sh
index 826f4423ce02..0efea2292d6a 100755
--- a/tools/testing/selftests/bpf/test_lwt_seg6local.sh
+++ b/tools/testing/selftests/bpf/test_lwt_seg6local.sh
@@ -23,6 +23,7 @@
# Kselftest framework requirement - SKIP code is 4.
ksft_skip=4
+BPF_FILE="test_lwt_seg6local.bpf.o"
readonly NS1="ns1-$(mktemp -u XXXXXX)"
readonly NS2="ns2-$(mktemp -u XXXXXX)"
readonly NS3="ns3-$(mktemp -u XXXXXX)"
@@ -117,18 +118,18 @@ ip netns exec ${NS6} ip -6 addr add fb00::109/16 dev veth10 scope link
ip netns exec ${NS1} ip -6 addr add fb00::1/16 dev lo
ip netns exec ${NS1} ip -6 route add fb00::6 dev veth1 via fb00::21
-ip netns exec ${NS2} ip -6 route add fb00::6 encap bpf in obj test_lwt_seg6local.o sec encap_srh dev veth2
+ip netns exec ${NS2} ip -6 route add fb00::6 encap bpf in obj ${BPF_FILE} sec encap_srh dev veth2
ip netns exec ${NS2} ip -6 route add fd00::1 dev veth3 via fb00::43 scope link
ip netns exec ${NS3} ip -6 route add fc42::1 dev veth5 via fb00::65
-ip netns exec ${NS3} ip -6 route add fd00::1 encap seg6local action End.BPF endpoint obj test_lwt_seg6local.o sec add_egr_x dev veth4
+ip netns exec ${NS3} ip -6 route add fd00::1 encap seg6local action End.BPF endpoint obj ${BPF_FILE} sec add_egr_x dev veth4
-ip netns exec ${NS4} ip -6 route add fd00::2 encap seg6local action End.BPF endpoint obj test_lwt_seg6local.o sec pop_egr dev veth6
+ip netns exec ${NS4} ip -6 route add fd00::2 encap seg6local action End.BPF endpoint obj ${BPF_FILE} sec pop_egr dev veth6
ip netns exec ${NS4} ip -6 addr add fc42::1 dev lo
ip netns exec ${NS4} ip -6 route add fd00::3 dev veth7 via fb00::87
ip netns exec ${NS5} ip -6 route add fd00::4 table 117 dev veth9 via fb00::109
-ip netns exec ${NS5} ip -6 route add fd00::3 encap seg6local action End.BPF endpoint obj test_lwt_seg6local.o sec inspect_t dev veth8
+ip netns exec ${NS5} ip -6 route add fd00::3 encap seg6local action End.BPF endpoint obj ${BPF_FILE} sec inspect_t dev veth8
ip netns exec ${NS6} ip -6 addr add fb00::6/16 dev lo
ip netns exec ${NS6} ip -6 addr add fd00::4/16 dev lo
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index 3fef451d8831..4716e38e153a 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -222,6 +222,26 @@ static char *test_result(bool failed, bool skipped)
return failed ? "FAIL" : (skipped ? "SKIP" : "OK");
}
+#define TEST_NUM_WIDTH 7
+
+static void print_test_result(const struct prog_test_def *test, const struct test_state *test_state)
+{
+ int skipped_cnt = test_state->skip_cnt;
+ int subtests_cnt = test_state->subtest_num;
+
+ fprintf(env.stdout, "#%-*d %s:", TEST_NUM_WIDTH, test->test_num, test->test_name);
+ if (test_state->error_cnt)
+ fprintf(env.stdout, "FAIL");
+ else if (!skipped_cnt)
+ fprintf(env.stdout, "OK");
+ else if (skipped_cnt == subtests_cnt || !subtests_cnt)
+ fprintf(env.stdout, "SKIP");
+ else
+ fprintf(env.stdout, "OK (SKIP: %d/%d)", skipped_cnt, subtests_cnt);
+
+ fprintf(env.stdout, "\n");
+}
+
static void print_test_log(char *log_buf, size_t log_cnt)
{
log_buf[log_cnt] = '\0';
@@ -230,18 +250,6 @@ static void print_test_log(char *log_buf, size_t log_cnt)
fprintf(env.stdout, "\n");
}
-#define TEST_NUM_WIDTH 7
-
-static void print_test_name(int test_num, const char *test_name, char *result)
-{
- fprintf(env.stdout, "#%-*d %s", TEST_NUM_WIDTH, test_num, test_name);
-
- if (result)
- fprintf(env.stdout, ":%s", result);
-
- fprintf(env.stdout, "\n");
-}
-
static void print_subtest_name(int test_num, int subtest_num,
const char *test_name, char *subtest_name,
char *result)
@@ -307,8 +315,7 @@ static void dump_test_log(const struct prog_test_def *test,
subtest_state->skipped));
}
- print_test_name(test->test_num, test->test_name,
- test_result(test_failed, test_state->skip_cnt));
+ print_test_result(test, test_state);
}
static void stdio_restore(void);
@@ -1070,8 +1077,7 @@ static void run_one_test(int test_num)
state->tested = true;
if (verbose() && env.worker_id == -1)
- print_test_name(test_num + 1, test->test_name,
- test_result(state->error_cnt, state->skip_cnt));
+ print_test_result(test, state);
reset_affinity();
restore_netns();
diff --git a/tools/testing/selftests/bpf/test_tc_edt.sh b/tools/testing/selftests/bpf/test_tc_edt.sh
index daa7d1b8d309..76f0bd17061f 100755
--- a/tools/testing/selftests/bpf/test_tc_edt.sh
+++ b/tools/testing/selftests/bpf/test_tc_edt.sh
@@ -5,6 +5,7 @@
# with dst port = 9000 down to 5MBps. Then it measures actual
# throughput of the flow.
+BPF_FILE="test_tc_edt.bpf.o"
if [[ $EUID -ne 0 ]]; then
echo "This script must be run as root"
echo "FAIL"
@@ -54,7 +55,7 @@ ip -netns ${NS_DST} route add ${IP_SRC}/32 dev veth_dst
ip netns exec ${NS_SRC} tc qdisc add dev veth_src root fq
ip netns exec ${NS_SRC} tc qdisc add dev veth_src clsact
ip netns exec ${NS_SRC} tc filter add dev veth_src egress \
- bpf da obj test_tc_edt.o sec cls_test
+ bpf da obj ${BPF_FILE} sec cls_test
# start the listener
diff --git a/tools/testing/selftests/bpf/test_tc_tunnel.sh b/tools/testing/selftests/bpf/test_tc_tunnel.sh
index 088fcad138c9..334bdfeab940 100755
--- a/tools/testing/selftests/bpf/test_tc_tunnel.sh
+++ b/tools/testing/selftests/bpf/test_tc_tunnel.sh
@@ -3,6 +3,7 @@
#
# In-place tunneling
+BPF_FILE="test_tc_tunnel.bpf.o"
# must match the port that the bpf program filters on
readonly port=8000
@@ -196,7 +197,7 @@ verify_data
# client can no longer connect
ip netns exec "${ns1}" tc qdisc add dev veth1 clsact
ip netns exec "${ns1}" tc filter add dev veth1 egress \
- bpf direct-action object-file ./test_tc_tunnel.o \
+ bpf direct-action object-file ${BPF_FILE} \
section "encap_${tuntype}_${mac}"
echo "test bpf encap without decap (expect failure)"
server_listen
@@ -296,7 +297,7 @@ fi
ip netns exec "${ns2}" ip link del dev testtun0
ip netns exec "${ns2}" tc qdisc add dev veth2 clsact
ip netns exec "${ns2}" tc filter add dev veth2 ingress \
- bpf direct-action object-file ./test_tc_tunnel.o section decap
+ bpf direct-action object-file ${BPF_FILE} section decap
echo "test bpf encap with bpf decap"
client_connect
verify_data
diff --git a/tools/testing/selftests/bpf/test_tunnel.sh b/tools/testing/selftests/bpf/test_tunnel.sh
index e9ebc67d73f7..2eaedc1d9ed3 100755
--- a/tools/testing/selftests/bpf/test_tunnel.sh
+++ b/tools/testing/selftests/bpf/test_tunnel.sh
@@ -45,6 +45,7 @@
# 5) Tunnel protocol handler, ex: vxlan_rcv, decap the packet
# 6) Forward the packet to the overlay tnl dev
+BPF_FILE="test_tunnel_kern.bpf.o"
BPF_PIN_TUNNEL_DIR="/sys/fs/bpf/tc/tunnel"
PING_ARG="-c 3 -w 10 -q"
ret=0
@@ -545,7 +546,7 @@ test_xfrm_tunnel()
> /sys/kernel/debug/tracing/trace
setup_xfrm_tunnel
mkdir -p ${BPF_PIN_TUNNEL_DIR}
- bpftool prog loadall ./test_tunnel_kern.o ${BPF_PIN_TUNNEL_DIR}
+ bpftool prog loadall ${BPF_FILE} ${BPF_PIN_TUNNEL_DIR}
tc qdisc add dev veth1 clsact
tc filter add dev veth1 proto ip ingress bpf da object-pinned \
${BPF_PIN_TUNNEL_DIR}/xfrm_get_state
@@ -572,7 +573,7 @@ attach_bpf()
SET=$2
GET=$3
mkdir -p ${BPF_PIN_TUNNEL_DIR}
- bpftool prog loadall ./test_tunnel_kern.o ${BPF_PIN_TUNNEL_DIR}/
+ bpftool prog loadall ${BPF_FILE} ${BPF_PIN_TUNNEL_DIR}/
tc qdisc add dev $DEV clsact
tc filter add dev $DEV egress bpf da object-pinned ${BPF_PIN_TUNNEL_DIR}/$SET
tc filter add dev $DEV ingress bpf da object-pinned ${BPF_PIN_TUNNEL_DIR}/$GET
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index b605a70d4f6b..8c808551dfd7 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -68,7 +68,6 @@
#define SKIP_INSNS() BPF_RAW_INSN(0xde, 0xa, 0xd, 0xbeef, 0xdeadbeef)
#define DEFAULT_LIBBPF_LOG_LEVEL 4
-#define VERBOSE_LIBBPF_LOG_LEVEL 1
#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
#define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
@@ -81,6 +80,7 @@
static bool unpriv_disabled = false;
static int skips;
static bool verbose = false;
+static int verif_log_level = 0;
struct kfunc_btf_id_pair {
const char *kfunc;
@@ -759,7 +759,7 @@ static int load_btf_spec(__u32 *types, int types_len,
.log_buf = bpf_vlog,
.log_size = sizeof(bpf_vlog),
.log_level = (verbose
- ? VERBOSE_LIBBPF_LOG_LEVEL
+ ? verif_log_level
: DEFAULT_LIBBPF_LOG_LEVEL),
);
@@ -1491,7 +1491,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
opts.expected_attach_type = test->expected_attach_type;
if (verbose)
- opts.log_level = VERBOSE_LIBBPF_LOG_LEVEL;
+ opts.log_level = verif_log_level | 4; /* force stats */
else if (expected_ret == VERBOSE_ACCEPT)
opts.log_level = 2;
else
@@ -1746,6 +1746,13 @@ int main(int argc, char **argv)
if (argc > 1 && strcmp(argv[1], "-v") == 0) {
arg++;
verbose = true;
+ verif_log_level = 1;
+ argc--;
+ }
+ if (argc > 1 && strcmp(argv[1], "-vv") == 0) {
+ arg++;
+ verbose = true;
+ verif_log_level = 2;
argc--;
}
diff --git a/tools/testing/selftests/bpf/test_xdp_meta.sh b/tools/testing/selftests/bpf/test_xdp_meta.sh
index ea69370caae3..2740322c1878 100755
--- a/tools/testing/selftests/bpf/test_xdp_meta.sh
+++ b/tools/testing/selftests/bpf/test_xdp_meta.sh
@@ -1,5 +1,6 @@
#!/bin/sh
+BPF_FILE="test_xdp_meta.bpf.o"
# Kselftest framework requirement - SKIP code is 4.
readonly KSFT_SKIP=4
readonly NS1="ns1-$(mktemp -u XXXXXX)"
@@ -42,11 +43,11 @@ ip netns exec ${NS2} ip addr add 10.1.1.22/24 dev veth2
ip netns exec ${NS1} tc qdisc add dev veth1 clsact
ip netns exec ${NS2} tc qdisc add dev veth2 clsact
-ip netns exec ${NS1} tc filter add dev veth1 ingress bpf da obj test_xdp_meta.o sec t
-ip netns exec ${NS2} tc filter add dev veth2 ingress bpf da obj test_xdp_meta.o sec t
+ip netns exec ${NS1} tc filter add dev veth1 ingress bpf da obj ${BPF_FILE} sec t
+ip netns exec ${NS2} tc filter add dev veth2 ingress bpf da obj ${BPF_FILE} sec t
-ip netns exec ${NS1} ip link set dev veth1 xdp obj test_xdp_meta.o sec x
-ip netns exec ${NS2} ip link set dev veth2 xdp obj test_xdp_meta.o sec x
+ip netns exec ${NS1} ip link set dev veth1 xdp obj ${BPF_FILE} sec x
+ip netns exec ${NS2} ip link set dev veth2 xdp obj ${BPF_FILE} sec x
ip netns exec ${NS1} ip link set dev veth1 up
ip netns exec ${NS2} ip link set dev veth2 up
diff --git a/tools/testing/selftests/bpf/test_xdp_vlan.sh b/tools/testing/selftests/bpf/test_xdp_vlan.sh
index 810c407e0286..fbcaa9f0120b 100755
--- a/tools/testing/selftests/bpf/test_xdp_vlan.sh
+++ b/tools/testing/selftests/bpf/test_xdp_vlan.sh
@@ -200,11 +200,11 @@ ip netns exec ${NS2} sh -c 'ping -W 1 -c 1 100.64.41.1 || echo "Success: First p
# ----------------------------------------------------------------------
# In ns1: ingress use XDP to remove VLAN tags
export DEVNS1=veth1
-export FILE=test_xdp_vlan.o
+export BPF_FILE=test_xdp_vlan.bpf.o
# First test: Remove VLAN by setting VLAN ID 0, using "xdp_vlan_change"
export XDP_PROG=xdp_vlan_change
-ip netns exec ${NS1} ip link set $DEVNS1 $XDP_MODE object $FILE section $XDP_PROG
+ip netns exec ${NS1} ip link set $DEVNS1 $XDP_MODE object $BPF_FILE section $XDP_PROG
# In ns1: egress use TC to add back VLAN tag 4011
# (del cmd)
@@ -212,7 +212,7 @@ ip netns exec ${NS1} ip link set $DEVNS1 $XDP_MODE object $FILE section $XDP_PRO
#
ip netns exec ${NS1} tc qdisc add dev $DEVNS1 clsact
ip netns exec ${NS1} tc filter add dev $DEVNS1 egress \
- prio 1 handle 1 bpf da obj $FILE sec tc_vlan_push
+ prio 1 handle 1 bpf da obj $BPF_FILE sec tc_vlan_push
# Now the namespaces can reach each-other, test with ping:
ip netns exec ${NS2} ping -i 0.2 -W 2 -c 2 $IPADDR1
@@ -226,7 +226,7 @@ ip netns exec ${NS1} ping -i 0.2 -W 2 -c 2 $IPADDR2
#
export XDP_PROG=xdp_vlan_remove_outer2
ip netns exec ${NS1} ip link set $DEVNS1 $XDP_MODE off
-ip netns exec ${NS1} ip link set $DEVNS1 $XDP_MODE object $FILE section $XDP_PROG
+ip netns exec ${NS1} ip link set $DEVNS1 $XDP_MODE object $BPF_FILE section $XDP_PROG
# Now the namespaces should still be able reach each-other, test with ping:
ip netns exec ${NS2} ping -i 0.2 -W 2 -c 2 $IPADDR1
diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c
index 9c4be2cdb21a..09a16a77bae4 100644
--- a/tools/testing/selftests/bpf/trace_helpers.c
+++ b/tools/testing/selftests/bpf/trace_helpers.c
@@ -23,7 +23,7 @@ static int ksym_cmp(const void *p1, const void *p2)
return ((struct ksym *)p1)->addr - ((struct ksym *)p2)->addr;
}
-int load_kallsyms(void)
+int load_kallsyms_refresh(void)
{
FILE *f;
char func[256], buf[256];
@@ -31,12 +31,7 @@ int load_kallsyms(void)
void *addr;
int i = 0;
- /*
- * This is called/used from multiplace places,
- * load symbols just once.
- */
- if (sym_cnt)
- return 0;
+ sym_cnt = 0;
f = fopen("/proc/kallsyms", "r");
if (!f)
@@ -57,6 +52,17 @@ int load_kallsyms(void)
return 0;
}
+int load_kallsyms(void)
+{
+ /*
+ * This is called/used from multiplace places,
+ * load symbols just once.
+ */
+ if (sym_cnt)
+ return 0;
+ return load_kallsyms_refresh();
+}
+
struct ksym *ksym_search(long key)
{
int start = 0, end = sym_cnt;
diff --git a/tools/testing/selftests/bpf/trace_helpers.h b/tools/testing/selftests/bpf/trace_helpers.h
index 238a9c98cde2..53efde0e2998 100644
--- a/tools/testing/selftests/bpf/trace_helpers.h
+++ b/tools/testing/selftests/bpf/trace_helpers.h
@@ -10,6 +10,8 @@ struct ksym {
};
int load_kallsyms(void);
+int load_kallsyms_refresh(void);
+
struct ksym *ksym_search(long key);
long ksym_get_addr(const char *name);
diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c
index e1a937277b54..3193915c5ee6 100644
--- a/tools/testing/selftests/bpf/verifier/calls.c
+++ b/tools/testing/selftests/bpf/verifier/calls.c
@@ -109,7 +109,7 @@
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
- .errstr = "arg#0 pointer type STRUCT prog_test_ref_kfunc must point",
+ .errstr = "arg#0 is ptr_or_null_ expected ptr_ or socket",
.fixup_kfunc_btf_id = {
{ "bpf_kfunc_call_test_acquire", 3 },
{ "bpf_kfunc_call_test_release", 5 },
diff --git a/tools/testing/selftests/bpf/verifier/jeq_infer_not_null.c b/tools/testing/selftests/bpf/verifier/jeq_infer_not_null.c
new file mode 100644
index 000000000000..67a1c07ead34
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/jeq_infer_not_null.c
@@ -0,0 +1,174 @@
+{
+ /* This is equivalent to the following program:
+ *
+ * r6 = skb->sk;
+ * r7 = sk_fullsock(r6);
+ * r0 = sk_fullsock(r6);
+ * if (r0 == 0) return 0; (a)
+ * if (r0 != r7) return 0; (b)
+ * *r7->type; (c)
+ * return 0;
+ *
+ * It is safe to dereference r7 at point (c), because of (a) and (b).
+ * The test verifies that relation r0 == r7 is propagated from (b) to (c).
+ */
+ "jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL -> PTR_TO_SOCKET for JNE false branch",
+ .insns = {
+ /* r6 = skb->sk; */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ /* if (r6 == 0) return 0; */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 8),
+ /* r7 = sk_fullsock(skb); */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ /* r0 = sk_fullsock(skb); */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ /* if (r0 == null) return 0; */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ /* if (r0 == r7) r0 = *(r7->type); */
+ BPF_JMP_REG(BPF_JNE, BPF_REG_0, BPF_REG_7, 1), /* Use ! JNE ! */
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_sock, type)),
+ /* return 0 */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R7 pointer comparison",
+},
+{
+ /* Same as above, but verify that another branch of JNE still
+ * prohibits access to PTR_MAYBE_NULL.
+ */
+ "jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL unchanged for JNE true branch",
+ .insns = {
+ /* r6 = skb->sk */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ /* if (r6 == 0) return 0; */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 9),
+ /* r7 = sk_fullsock(skb); */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ /* r0 = sk_fullsock(skb); */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ /* if (r0 == null) return 0; */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
+ /* if (r0 == r7) return 0; */
+ BPF_JMP_REG(BPF_JNE, BPF_REG_0, BPF_REG_7, 1), /* Use ! JNE ! */
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ /* r0 = *(r7->type); */
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_sock, type)),
+ /* return 0 */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = REJECT,
+ .errstr = "R7 invalid mem access 'sock_or_null'",
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R7 pointer comparison",
+},
+{
+ /* Same as a first test, but not null should be inferred for JEQ branch */
+ "jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL -> PTR_TO_SOCKET for JEQ true branch",
+ .insns = {
+ /* r6 = skb->sk; */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ /* if (r6 == null) return 0; */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 9),
+ /* r7 = sk_fullsock(skb); */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ /* r0 = sk_fullsock(skb); */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ /* if (r0 == null) return 0; */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ /* if (r0 != r7) return 0; */
+ BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_7, 1), /* Use ! JEQ ! */
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ /* r0 = *(r7->type); */
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_sock, type)),
+ /* return 0; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R7 pointer comparison",
+},
+{
+ /* Same as above, but verify that another branch of JNE still
+ * prohibits access to PTR_MAYBE_NULL.
+ */
+ "jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL unchanged for JEQ false branch",
+ .insns = {
+ /* r6 = skb->sk; */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ /* if (r6 == null) return 0; */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 8),
+ /* r7 = sk_fullsock(skb); */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ /* r0 = sk_fullsock(skb); */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ /* if (r0 == null) return 0; */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ /* if (r0 != r7) r0 = *(r7->type); */
+ BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_7, 1), /* Use ! JEQ ! */
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_sock, type)),
+ /* return 0; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = REJECT,
+ .errstr = "R7 invalid mem access 'sock_or_null'",
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R7 pointer comparison",
+},
+{
+ /* Maps are treated in a different branch of `mark_ptr_not_null_reg`,
+ * so separate test for maps case.
+ */
+ "jne/jeq infer not null, PTR_TO_MAP_VALUE_OR_NULL -> PTR_TO_MAP_VALUE",
+ .insns = {
+ /* r9 = &some stack to use as key */
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_9, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_9, -8),
+ /* r8 = process local map */
+ BPF_LD_MAP_FD(BPF_REG_8, 0),
+ /* r6 = map_lookup_elem(r8, r9); */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_9),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ /* r7 = map_lookup_elem(r8, r9); */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_9),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ /* if (r6 == 0) return 0; */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 2),
+ /* if (r6 != r7) return 0; */
+ BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_7, 1),
+ /* read *r7; */
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_xdp_sock, queue_id)),
+ /* return 0; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_xskmap = { 3 },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .result = ACCEPT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/jit.c b/tools/testing/selftests/bpf/verifier/jit.c
index 79021c30e51e..8bf37e5207f1 100644
--- a/tools/testing/selftests/bpf/verifier/jit.c
+++ b/tools/testing/selftests/bpf/verifier/jit.c
@@ -21,6 +21,30 @@
.retval = 2,
},
{
+ "jit: lsh, rsh, arsh by reg",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_IMM(BPF_REG_4, 1),
+ BPF_MOV64_IMM(BPF_REG_1, 0xff),
+ BPF_ALU64_REG(BPF_LSH, BPF_REG_1, BPF_REG_0),
+ BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x3fc, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_RSH, BPF_REG_1, BPF_REG_4),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_1),
+ BPF_ALU32_REG(BPF_RSH, BPF_REG_4, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 0xff, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ARSH, BPF_REG_4, BPF_REG_4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
"jit: mov32 for ldimm64, 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 2),
diff --git a/tools/testing/selftests/bpf/verifier/ref_tracking.c b/tools/testing/selftests/bpf/verifier/ref_tracking.c
index fd683a32a276..9540164712b7 100644
--- a/tools/testing/selftests/bpf/verifier/ref_tracking.c
+++ b/tools/testing/selftests/bpf/verifier/ref_tracking.c
@@ -142,7 +142,7 @@
.kfunc = "bpf",
.expected_attach_type = BPF_LSM_MAC,
.flags = BPF_F_SLEEPABLE,
- .errstr = "arg#0 pointer type STRUCT bpf_key must point to scalar, or struct with scalar",
+ .errstr = "arg#0 is ptr_or_null_ expected ptr_ or socket",
.fixup_kfunc_btf_id = {
{ "bpf_lookup_user_key", 2 },
{ "bpf_key_put", 4 },
@@ -163,7 +163,7 @@
.kfunc = "bpf",
.expected_attach_type = BPF_LSM_MAC,
.flags = BPF_F_SLEEPABLE,
- .errstr = "arg#0 pointer type STRUCT bpf_key must point to scalar, or struct with scalar",
+ .errstr = "arg#0 is ptr_or_null_ expected ptr_ or socket",
.fixup_kfunc_btf_id = {
{ "bpf_lookup_system_key", 1 },
{ "bpf_key_put", 3 },
diff --git a/tools/testing/selftests/bpf/verifier/ringbuf.c b/tools/testing/selftests/bpf/verifier/ringbuf.c
index b64d33e4833c..84838feba47f 100644
--- a/tools/testing/selftests/bpf/verifier/ringbuf.c
+++ b/tools/testing/selftests/bpf/verifier/ringbuf.c
@@ -28,7 +28,7 @@
},
.fixup_map_ringbuf = { 1 },
.result = REJECT,
- .errstr = "dereference of modified alloc_mem ptr R1",
+ .errstr = "dereference of modified ringbuf_mem ptr R1",
},
{
"ringbuf: invalid reservation offset 2",
diff --git a/tools/testing/selftests/bpf/verifier/spill_fill.c b/tools/testing/selftests/bpf/verifier/spill_fill.c
index e23f07175e1b..9bb302dade23 100644
--- a/tools/testing/selftests/bpf/verifier/spill_fill.c
+++ b/tools/testing/selftests/bpf/verifier/spill_fill.c
@@ -84,7 +84,7 @@
},
.fixup_map_ringbuf = { 1 },
.result = REJECT,
- .errstr = "R0 pointer arithmetic on alloc_mem_or_null prohibited",
+ .errstr = "R0 pointer arithmetic on ringbuf_mem_or_null prohibited",
},
{
"check corrupted spill/fill",
diff --git a/tools/testing/selftests/bpf/veristat.c b/tools/testing/selftests/bpf/veristat.c
index b0d83a28e348..f961b49b8ef4 100644
--- a/tools/testing/selftests/bpf/veristat.c
+++ b/tools/testing/selftests/bpf/veristat.c
@@ -17,6 +17,7 @@
#include <bpf/libbpf.h>
#include <libelf.h>
#include <gelf.h>
+#include <float.h>
enum stat_id {
VERDICT,
@@ -34,6 +35,45 @@ enum stat_id {
NUM_STATS_CNT = FILE_NAME - VERDICT,
};
+/* In comparison mode each stat can specify up to four different values:
+ * - A side value;
+ * - B side value;
+ * - absolute diff value;
+ * - relative (percentage) diff value.
+ *
+ * When specifying stat specs in comparison mode, user can use one of the
+ * following variant suffixes to specify which exact variant should be used for
+ * ordering or filtering:
+ * - `_a` for A side value;
+ * - `_b` for B side value;
+ * - `_diff` for absolute diff value;
+ * - `_pct` for relative (percentage) diff value.
+ *
+ * If no variant suffix is provided, then `_b` (control data) is assumed.
+ *
+ * As an example, let's say instructions stat has the following output:
+ *
+ * Insns (A) Insns (B) Insns (DIFF)
+ * --------- --------- --------------
+ * 21547 20920 -627 (-2.91%)
+ *
+ * Then:
+ * - 21547 is A side value (insns_a);
+ * - 20920 is B side value (insns_b);
+ * - -627 is absolute diff value (insns_diff);
+ * - -2.91% is relative diff value (insns_pct).
+ *
+ * For verdict there is no verdict_pct variant.
+ * For file and program name, _a and _b variants are equivalent and there are
+ * no _diff or _pct variants.
+ */
+enum stat_variant {
+ VARIANT_A,
+ VARIANT_B,
+ VARIANT_DIFF,
+ VARIANT_PCT,
+};
+
struct verif_stats {
char *file_name;
char *prog_name;
@@ -41,9 +81,19 @@ struct verif_stats {
long stats[NUM_STATS_CNT];
};
+/* joined comparison mode stats */
+struct verif_stats_join {
+ char *file_name;
+ char *prog_name;
+
+ const struct verif_stats *stats_a;
+ const struct verif_stats *stats_b;
+};
+
struct stat_specs {
int spec_cnt;
enum stat_id ids[ALL_STATS_CNT];
+ enum stat_variant variants[ALL_STATS_CNT];
bool asc[ALL_STATS_CNT];
int lens[ALL_STATS_CNT * 3]; /* 3x for comparison mode */
};
@@ -54,9 +104,31 @@ enum resfmt {
RESFMT_CSV,
};
+enum filter_kind {
+ FILTER_NAME,
+ FILTER_STAT,
+};
+
+enum operator_kind {
+ OP_EQ, /* == or = */
+ OP_NEQ, /* != or <> */
+ OP_LT, /* < */
+ OP_LE, /* <= */
+ OP_GT, /* > */
+ OP_GE, /* >= */
+};
+
struct filter {
+ enum filter_kind kind;
+ /* FILTER_NAME */
+ char *any_glob;
char *file_glob;
char *prog_glob;
+ /* FILTER_STAT */
+ enum operator_kind op;
+ int stat_id;
+ enum stat_variant stat_var;
+ long value;
};
static struct env {
@@ -67,6 +139,7 @@ static struct env {
int log_level;
enum resfmt out_fmt;
bool comparison_mode;
+ bool replay_mode;
struct verif_stats *prog_stats;
int prog_stat_cnt;
@@ -75,6 +148,9 @@ static struct env {
struct verif_stats *baseline_stats;
int baseline_stat_cnt;
+ struct verif_stats_join *join_stats;
+ int join_stat_cnt;
+
struct stat_specs output_spec;
struct stat_specs sort_spec;
@@ -115,6 +191,7 @@ static const struct argp_option opts[] = {
{ "sort", 's', "SPEC", 0, "Specify sort order" },
{ "output-format", 'o', "FMT", 0, "Result output format (table, csv), default is table." },
{ "compare", 'C', NULL, 0, "Comparison mode" },
+ { "replay", 'R', NULL, 0, "Replay mode" },
{ "filter", 'f', "FILTER", 0, "Filter expressions (or @filename for file with expressions)." },
{},
};
@@ -169,6 +246,9 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
case 'C':
env.comparison_mode = true;
break;
+ case 'R':
+ env.replay_mode = true;
+ break;
case 'f':
if (arg[0] == '@')
err = append_filter_file(arg + 1);
@@ -226,28 +306,6 @@ static bool glob_matches(const char *str, const char *pat)
return !*str && !*pat;
}
-static bool should_process_file(const char *filename)
-{
- int i;
-
- if (env.deny_filter_cnt > 0) {
- for (i = 0; i < env.deny_filter_cnt; i++) {
- if (glob_matches(filename, env.deny_filters[i].file_glob))
- return false;
- }
- }
-
- if (env.allow_filter_cnt == 0)
- return true;
-
- for (i = 0; i < env.allow_filter_cnt; i++) {
- if (glob_matches(filename, env.allow_filters[i].file_glob))
- return true;
- }
-
- return false;
-}
-
static bool is_bpf_obj_file(const char *path) {
Elf64_Ehdr *ehdr;
int fd, err = -EINVAL;
@@ -280,45 +338,84 @@ cleanup:
return err == 0;
}
-static bool should_process_prog(const char *path, const char *prog_name)
+static bool should_process_file_prog(const char *filename, const char *prog_name)
{
- const char *filename = basename(path);
- int i;
+ struct filter *f;
+ int i, allow_cnt = 0;
- if (env.deny_filter_cnt > 0) {
- for (i = 0; i < env.deny_filter_cnt; i++) {
- if (glob_matches(filename, env.deny_filters[i].file_glob))
- return false;
- if (!env.deny_filters[i].prog_glob)
- continue;
- if (glob_matches(prog_name, env.deny_filters[i].prog_glob))
- return false;
- }
- }
+ for (i = 0; i < env.deny_filter_cnt; i++) {
+ f = &env.deny_filters[i];
+ if (f->kind != FILTER_NAME)
+ continue;
- if (env.allow_filter_cnt == 0)
- return true;
+ if (f->any_glob && glob_matches(filename, f->any_glob))
+ return false;
+ if (f->any_glob && prog_name && glob_matches(prog_name, f->any_glob))
+ return false;
+ if (f->file_glob && glob_matches(filename, f->file_glob))
+ return false;
+ if (f->prog_glob && prog_name && glob_matches(prog_name, f->prog_glob))
+ return false;
+ }
for (i = 0; i < env.allow_filter_cnt; i++) {
- if (!glob_matches(filename, env.allow_filters[i].file_glob))
+ f = &env.allow_filters[i];
+ if (f->kind != FILTER_NAME)
continue;
- /* if filter specifies only filename glob part, it implicitly
- * allows all progs within that file
- */
- if (!env.allow_filters[i].prog_glob)
- return true;
- if (glob_matches(prog_name, env.allow_filters[i].prog_glob))
+
+ allow_cnt++;
+ if (f->any_glob) {
+ if (glob_matches(filename, f->any_glob))
+ return true;
+ /* If we don't know program name yet, any_glob filter
+ * has to assume that current BPF object file might be
+ * relevant; we'll check again later on after opening
+ * BPF object file, at which point program name will
+ * be known finally.
+ */
+ if (!prog_name || glob_matches(prog_name, f->any_glob))
+ return true;
+ } else {
+ if (f->file_glob && !glob_matches(filename, f->file_glob))
+ continue;
+ if (f->prog_glob && prog_name && !glob_matches(prog_name, f->prog_glob))
+ continue;
return true;
+ }
}
- return false;
+ /* if there are no file/prog name allow filters, allow all progs,
+ * unless they are denied earlier explicitly
+ */
+ return allow_cnt == 0;
}
+static struct {
+ enum operator_kind op_kind;
+ const char *op_str;
+} operators[] = {
+ /* Order of these definitions matter to avoid situations like '<'
+ * matching part of what is actually a '<>' operator. That is,
+ * substrings should go last.
+ */
+ { OP_EQ, "==" },
+ { OP_NEQ, "!=" },
+ { OP_NEQ, "<>" },
+ { OP_LE, "<=" },
+ { OP_LT, "<" },
+ { OP_GE, ">=" },
+ { OP_GT, ">" },
+ { OP_EQ, "=" },
+};
+
+static bool parse_stat_id_var(const char *name, size_t len, int *id, enum stat_variant *var);
+
static int append_filter(struct filter **filters, int *cnt, const char *str)
{
struct filter *f;
void *tmp;
const char *p;
+ int i;
tmp = realloc(*filters, (*cnt + 1) * sizeof(**filters));
if (!tmp)
@@ -326,26 +423,108 @@ static int append_filter(struct filter **filters, int *cnt, const char *str)
*filters = tmp;
f = &(*filters)[*cnt];
- f->file_glob = f->prog_glob = NULL;
+ memset(f, 0, sizeof(*f));
+
+ /* First, let's check if it's a stats filter of the following form:
+ * <stat><op><value, where:
+ * - <stat> is one of supported numerical stats (verdict is also
+ * considered numerical, failure == 0, success == 1);
+ * - <op> is comparison operator (see `operators` definitions);
+ * - <value> is an integer (or failure/success, or false/true as
+ * special aliases for 0 and 1, respectively).
+ * If the form doesn't match what user provided, we assume file/prog
+ * glob filter.
+ */
+ for (i = 0; i < ARRAY_SIZE(operators); i++) {
+ enum stat_variant var;
+ int id;
+ long val;
+ const char *end = str;
+ const char *op_str;
+
+ op_str = operators[i].op_str;
+ p = strstr(str, op_str);
+ if (!p)
+ continue;
+
+ if (!parse_stat_id_var(str, p - str, &id, &var)) {
+ fprintf(stderr, "Unrecognized stat name in '%s'!\n", str);
+ return -EINVAL;
+ }
+ if (id >= FILE_NAME) {
+ fprintf(stderr, "Non-integer stat is specified in '%s'!\n", str);
+ return -EINVAL;
+ }
+
+ p += strlen(op_str);
+
+ if (strcasecmp(p, "true") == 0 ||
+ strcasecmp(p, "t") == 0 ||
+ strcasecmp(p, "success") == 0 ||
+ strcasecmp(p, "succ") == 0 ||
+ strcasecmp(p, "s") == 0 ||
+ strcasecmp(p, "match") == 0 ||
+ strcasecmp(p, "m") == 0) {
+ val = 1;
+ } else if (strcasecmp(p, "false") == 0 ||
+ strcasecmp(p, "f") == 0 ||
+ strcasecmp(p, "failure") == 0 ||
+ strcasecmp(p, "fail") == 0 ||
+ strcasecmp(p, "mismatch") == 0 ||
+ strcasecmp(p, "mis") == 0) {
+ val = 0;
+ } else {
+ errno = 0;
+ val = strtol(p, (char **)&end, 10);
+ if (errno || end == p || *end != '\0' ) {
+ fprintf(stderr, "Invalid integer value in '%s'!\n", str);
+ return -EINVAL;
+ }
+ }
+
+ f->kind = FILTER_STAT;
+ f->stat_id = id;
+ f->stat_var = var;
+ f->op = operators[i].op_kind;
+ f->value = val;
+
+ *cnt += 1;
+ return 0;
+ }
- /* filter can be specified either as "<obj-glob>" or "<obj-glob>/<prog-glob>" */
+ /* File/prog filter can be specified either as '<glob>' or
+ * '<file-glob>/<prog-glob>'. In the former case <glob> is applied to
+ * both file and program names. This seems to be way more useful in
+ * practice. If user needs full control, they can use '/<prog-glob>'
+ * form to glob just program name, or '<file-glob>/' to glob only file
+ * name. But usually common <glob> seems to be the most useful and
+ * ergonomic way.
+ */
+ f->kind = FILTER_NAME;
p = strchr(str, '/');
if (!p) {
- f->file_glob = strdup(str);
- if (!f->file_glob)
+ f->any_glob = strdup(str);
+ if (!f->any_glob)
return -ENOMEM;
} else {
- f->file_glob = strndup(str, p - str);
- f->prog_glob = strdup(p + 1);
- if (!f->file_glob || !f->prog_glob) {
- free(f->file_glob);
- free(f->prog_glob);
- f->file_glob = f->prog_glob = NULL;
- return -ENOMEM;
+ if (str != p) {
+ /* non-empty file glob */
+ f->file_glob = strndup(str, p - str);
+ if (!f->file_glob)
+ return -ENOMEM;
+ }
+ if (strlen(p + 1) > 0) {
+ /* non-empty prog glob */
+ f->prog_glob = strdup(p + 1);
+ if (!f->prog_glob) {
+ free(f->file_glob);
+ f->file_glob = NULL;
+ return -ENOMEM;
+ }
}
}
- *cnt = *cnt + 1;
+ *cnt += 1;
return 0;
}
@@ -388,6 +567,15 @@ static const struct stat_specs default_output_spec = {
},
};
+static const struct stat_specs default_csv_output_spec = {
+ .spec_cnt = 9,
+ .ids = {
+ FILE_NAME, PROG_NAME, VERDICT, DURATION,
+ TOTAL_INSNS, TOTAL_STATES, PEAK_STATES,
+ MAX_STATES_PER_INSN, MARK_READ_MAX_LEN,
+ },
+};
+
static const struct stat_specs default_sort_spec = {
.spec_cnt = 2,
.ids = {
@@ -396,48 +584,123 @@ static const struct stat_specs default_sort_spec = {
.asc = { true, true, },
};
+/* sorting for comparison mode to join two data sets */
+static const struct stat_specs join_sort_spec = {
+ .spec_cnt = 2,
+ .ids = {
+ FILE_NAME, PROG_NAME,
+ },
+ .asc = { true, true, },
+};
+
static struct stat_def {
const char *header;
const char *names[4];
bool asc_by_default;
+ bool left_aligned;
} stat_defs[] = {
- [FILE_NAME] = { "File", {"file_name", "filename", "file"}, true /* asc */ },
- [PROG_NAME] = { "Program", {"prog_name", "progname", "prog"}, true /* asc */ },
- [VERDICT] = { "Verdict", {"verdict"}, true /* asc: failure, success */ },
+ [FILE_NAME] = { "File", {"file_name", "filename", "file"}, true /* asc */, true /* left */ },
+ [PROG_NAME] = { "Program", {"prog_name", "progname", "prog"}, true /* asc */, true /* left */ },
+ [VERDICT] = { "Verdict", {"verdict"}, true /* asc: failure, success */, true /* left */ },
[DURATION] = { "Duration (us)", {"duration", "dur"}, },
- [TOTAL_INSNS] = { "Total insns", {"total_insns", "insns"}, },
- [TOTAL_STATES] = { "Total states", {"total_states", "states"}, },
+ [TOTAL_INSNS] = { "Insns", {"total_insns", "insns"}, },
+ [TOTAL_STATES] = { "States", {"total_states", "states"}, },
[PEAK_STATES] = { "Peak states", {"peak_states"}, },
[MAX_STATES_PER_INSN] = { "Max states per insn", {"max_states_per_insn"}, },
[MARK_READ_MAX_LEN] = { "Max mark read length", {"max_mark_read_len", "mark_read"}, },
};
+static bool parse_stat_id_var(const char *name, size_t len, int *id, enum stat_variant *var)
+{
+ static const char *var_sfxs[] = {
+ [VARIANT_A] = "_a",
+ [VARIANT_B] = "_b",
+ [VARIANT_DIFF] = "_diff",
+ [VARIANT_PCT] = "_pct",
+ };
+ int i, j, k;
+
+ for (i = 0; i < ARRAY_SIZE(stat_defs); i++) {
+ struct stat_def *def = &stat_defs[i];
+ size_t alias_len, sfx_len;
+ const char *alias;
+
+ for (j = 0; j < ARRAY_SIZE(stat_defs[i].names); j++) {
+ alias = def->names[j];
+ if (!alias)
+ continue;
+
+ alias_len = strlen(alias);
+ if (strncmp(name, alias, alias_len) != 0)
+ continue;
+
+ if (alias_len == len) {
+ /* If no variant suffix is specified, we
+ * assume control group (just in case we are
+ * in comparison mode. Variant is ignored in
+ * non-comparison mode.
+ */
+ *var = VARIANT_B;
+ *id = i;
+ return true;
+ }
+
+ for (k = 0; k < ARRAY_SIZE(var_sfxs); k++) {
+ sfx_len = strlen(var_sfxs[k]);
+ if (alias_len + sfx_len != len)
+ continue;
+
+ if (strncmp(name + alias_len, var_sfxs[k], sfx_len) == 0) {
+ *var = (enum stat_variant)k;
+ *id = i;
+ return true;
+ }
+ }
+ }
+ }
+
+ return false;
+}
+
+static bool is_asc_sym(char c)
+{
+ return c == '^';
+}
+
+static bool is_desc_sym(char c)
+{
+ return c == 'v' || c == 'V' || c == '.' || c == '!' || c == '_';
+}
+
static int parse_stat(const char *stat_name, struct stat_specs *specs)
{
- int id, i;
+ int id;
+ bool has_order = false, is_asc = false;
+ size_t len = strlen(stat_name);
+ enum stat_variant var;
if (specs->spec_cnt >= ARRAY_SIZE(specs->ids)) {
fprintf(stderr, "Can't specify more than %zd stats\n", ARRAY_SIZE(specs->ids));
return -E2BIG;
}
- for (id = 0; id < ARRAY_SIZE(stat_defs); id++) {
- struct stat_def *def = &stat_defs[id];
-
- for (i = 0; i < ARRAY_SIZE(stat_defs[id].names); i++) {
- if (!def->names[i] || strcmp(def->names[i], stat_name) != 0)
- continue;
-
- specs->ids[specs->spec_cnt] = id;
- specs->asc[specs->spec_cnt] = def->asc_by_default;
- specs->spec_cnt++;
+ if (len > 1 && (is_asc_sym(stat_name[len - 1]) || is_desc_sym(stat_name[len - 1]))) {
+ has_order = true;
+ is_asc = is_asc_sym(stat_name[len - 1]);
+ len -= 1;
+ }
- return 0;
- }
+ if (!parse_stat_id_var(stat_name, len, &id, &var)) {
+ fprintf(stderr, "Unrecognized stat name '%s'\n", stat_name);
+ return -ESRCH;
}
- fprintf(stderr, "Unrecognized stat name '%s'\n", stat_name);
- return -ESRCH;
+ specs->ids[specs->spec_cnt] = id;
+ specs->variants[specs->spec_cnt] = var;
+ specs->asc[specs->spec_cnt] = has_order ? is_asc : stat_defs[id].asc_by_default;
+ specs->spec_cnt++;
+
+ return 0;
}
static int parse_stats(const char *stats_str, struct stat_specs *specs)
@@ -509,6 +772,28 @@ static int parse_verif_log(char * const buf, size_t buf_sz, struct verif_stats *
return 0;
}
+static void fixup_obj(struct bpf_object *obj)
+{
+ struct bpf_map *map;
+
+ bpf_object__for_each_map(map, obj) {
+ /* disable pinning */
+ bpf_map__set_pin_path(map, NULL);
+
+ /* fix up map size, if necessary */
+ switch (bpf_map__type(map)) {
+ case BPF_MAP_TYPE_SK_STORAGE:
+ case BPF_MAP_TYPE_TASK_STORAGE:
+ case BPF_MAP_TYPE_INODE_STORAGE:
+ case BPF_MAP_TYPE_CGROUP_STORAGE:
+ break;
+ default:
+ if (bpf_map__max_entries(map) == 0)
+ bpf_map__set_max_entries(map, 1);
+ }
+ }
+}
+
static int process_prog(const char *filename, struct bpf_object *obj, struct bpf_program *prog)
{
const char *prog_name = bpf_program__name(prog);
@@ -518,7 +803,7 @@ static int process_prog(const char *filename, struct bpf_object *obj, struct bpf
int err = 0;
void *tmp;
- if (!should_process_prog(filename, bpf_program__name(prog))) {
+ if (!should_process_file_prog(basename(filename), bpf_program__name(prog))) {
env.progs_skipped++;
return 0;
}
@@ -543,6 +828,9 @@ static int process_prog(const char *filename, struct bpf_object *obj, struct bpf
}
verif_log_buf[0] = '\0';
+ /* increase chances of successful BPF object loading */
+ fixup_obj(obj);
+
err = bpf_object__load(obj);
env.progs_processed++;
@@ -571,7 +859,7 @@ static int process_obj(const char *filename)
LIBBPF_OPTS(bpf_object_open_opts, opts);
int err = 0, prog_cnt = 0;
- if (!should_process_file(basename(filename))) {
+ if (!should_process_file_prog(basename(filename), NULL)) {
if (env.verbose)
printf("Skipping '%s' due to filters...\n", filename);
env.files_skipped++;
@@ -691,7 +979,106 @@ static int cmp_prog_stats(const void *v1, const void *v2)
return cmp;
}
- return 0;
+ /* always disambiguate with file+prog, which are unique */
+ cmp = strcmp(s1->file_name, s2->file_name);
+ if (cmp != 0)
+ return cmp;
+ return strcmp(s1->prog_name, s2->prog_name);
+}
+
+static void fetch_join_stat_value(const struct verif_stats_join *s,
+ enum stat_id id, enum stat_variant var,
+ const char **str_val,
+ double *num_val)
+{
+ long v1, v2;
+
+ if (id == FILE_NAME) {
+ *str_val = s->file_name;
+ return;
+ }
+ if (id == PROG_NAME) {
+ *str_val = s->prog_name;
+ return;
+ }
+
+ v1 = s->stats_a ? s->stats_a->stats[id] : 0;
+ v2 = s->stats_b ? s->stats_b->stats[id] : 0;
+
+ switch (var) {
+ case VARIANT_A:
+ if (!s->stats_a)
+ *num_val = -DBL_MAX;
+ else
+ *num_val = s->stats_a->stats[id];
+ return;
+ case VARIANT_B:
+ if (!s->stats_b)
+ *num_val = -DBL_MAX;
+ else
+ *num_val = s->stats_b->stats[id];
+ return;
+ case VARIANT_DIFF:
+ if (!s->stats_a || !s->stats_b)
+ *num_val = -DBL_MAX;
+ else if (id == VERDICT)
+ *num_val = v1 == v2 ? 1.0 /* MATCH */ : 0.0 /* MISMATCH */;
+ else
+ *num_val = (double)(v2 - v1);
+ return;
+ case VARIANT_PCT:
+ if (!s->stats_a || !s->stats_b) {
+ *num_val = -DBL_MAX;
+ } else if (v1 == 0) {
+ if (v1 == v2)
+ *num_val = 0.0;
+ else
+ *num_val = v2 < v1 ? -100.0 : 100.0;
+ } else {
+ *num_val = (v2 - v1) * 100.0 / v1;
+ }
+ return;
+ }
+}
+
+static int cmp_join_stat(const struct verif_stats_join *s1,
+ const struct verif_stats_join *s2,
+ enum stat_id id, enum stat_variant var, bool asc)
+{
+ const char *str1 = NULL, *str2 = NULL;
+ double v1, v2;
+ int cmp = 0;
+
+ fetch_join_stat_value(s1, id, var, &str1, &v1);
+ fetch_join_stat_value(s2, id, var, &str2, &v2);
+
+ if (str1)
+ cmp = strcmp(str1, str2);
+ else if (v1 != v2)
+ cmp = v1 < v2 ? -1 : 1;
+
+ return asc ? cmp : -cmp;
+}
+
+static int cmp_join_stats(const void *v1, const void *v2)
+{
+ const struct verif_stats_join *s1 = v1, *s2 = v2;
+ int i, cmp;
+
+ for (i = 0; i < env.sort_spec.spec_cnt; i++) {
+ cmp = cmp_join_stat(s1, s2,
+ env.sort_spec.ids[i],
+ env.sort_spec.variants[i],
+ env.sort_spec.asc[i]);
+ if (cmp != 0)
+ return cmp;
+ }
+
+ /* always disambiguate with file+prog, which are unique */
+ cmp = strcmp(s1->file_name, s2->file_name);
+ if (cmp != 0)
+ return cmp;
+ return strcmp(s1->prog_name, s2->prog_name);
}
#define HEADER_CHAR '-'
@@ -713,6 +1100,7 @@ static void output_header_underlines(void)
static void output_headers(enum resfmt fmt)
{
+ const char *fmt_str;
int i, len;
for (i = 0; i < env.output_spec.spec_cnt; i++) {
@@ -726,7 +1114,8 @@ static void output_headers(enum resfmt fmt)
*max_len = len;
break;
case RESFMT_TABLE:
- printf("%s%-*s", i == 0 ? "" : COLUMN_SEP, *max_len, stat_defs[id].header);
+ fmt_str = stat_defs[id].left_aligned ? "%s%-*s" : "%s%*s";
+ printf(fmt_str, i == 0 ? "" : COLUMN_SEP, *max_len, stat_defs[id].header);
if (i == env.output_spec.spec_cnt - 1)
printf("\n");
break;
@@ -747,13 +1136,16 @@ static void prepare_value(const struct verif_stats *s, enum stat_id id,
{
switch (id) {
case FILE_NAME:
- *str = s->file_name;
+ *str = s ? s->file_name : "N/A";
break;
case PROG_NAME:
- *str = s->prog_name;
+ *str = s ? s->prog_name : "N/A";
break;
case VERDICT:
- *str = s->stats[VERDICT] ? "success" : "failure";
+ if (!s)
+ *str = "N/A";
+ else
+ *str = s->stats[VERDICT] ? "success" : "failure";
break;
case DURATION:
case TOTAL_INSNS:
@@ -761,7 +1153,7 @@ static void prepare_value(const struct verif_stats *s, enum stat_id id,
case PEAK_STATES:
case MAX_STATES_PER_INSN:
case MARK_READ_MAX_LEN:
- *val = s->stats[id];
+ *val = s ? s->stats[id] : 0;
break;
default:
fprintf(stderr, "Unrecognized stat #%d\n", id);
@@ -816,42 +1208,6 @@ static void output_stats(const struct verif_stats *s, enum resfmt fmt, bool last
}
}
-static int handle_verif_mode(void)
-{
- int i, err;
-
- if (env.filename_cnt == 0) {
- fprintf(stderr, "Please provide path to BPF object file!\n");
- argp_help(&argp, stderr, ARGP_HELP_USAGE, "veristat");
- return -EINVAL;
- }
-
- for (i = 0; i < env.filename_cnt; i++) {
- err = process_obj(env.filenames[i]);
- if (err) {
- fprintf(stderr, "Failed to process '%s': %d\n", env.filenames[i], err);
- return err;
- }
- }
-
- qsort(env.prog_stats, env.prog_stat_cnt, sizeof(*env.prog_stats), cmp_prog_stats);
-
- if (env.out_fmt == RESFMT_TABLE) {
- /* calculate column widths */
- output_headers(RESFMT_TABLE_CALCLEN);
- for (i = 0; i < env.prog_stat_cnt; i++)
- output_stats(&env.prog_stats[i], RESFMT_TABLE_CALCLEN, false);
- }
-
- /* actually output the table */
- output_headers(env.out_fmt);
- for (i = 0; i < env.prog_stat_cnt; i++) {
- output_stats(&env.prog_stats[i], env.out_fmt, i == env.prog_stat_cnt - 1);
- }
-
- return 0;
-}
-
static int parse_stat_value(const char *str, enum stat_id id, struct verif_stats *st)
{
switch (id) {
@@ -983,7 +1339,7 @@ static int parse_stats_csv(const char *filename, struct stat_specs *specs,
* parsed entire line; if row should be ignored we pretend we
* never parsed it
*/
- if (!should_process_prog(st->file_name, st->prog_name)) {
+ if (!should_process_file_prog(st->file_name, st->prog_name)) {
free(st->file_name);
free(st->prog_name);
*stat_cntp -= 1;
@@ -1072,9 +1428,11 @@ static void output_comp_headers(enum resfmt fmt)
output_comp_header_underlines();
}
-static void output_comp_stats(const struct verif_stats *base, const struct verif_stats *comp,
+static void output_comp_stats(const struct verif_stats_join *join_stats,
enum resfmt fmt, bool last)
{
+ const struct verif_stats *base = join_stats->stats_a;
+ const struct verif_stats *comp = join_stats->stats_b;
char base_buf[1024] = {}, comp_buf[1024] = {}, diff_buf[1024] = {};
int i;
@@ -1092,28 +1450,44 @@ static void output_comp_stats(const struct verif_stats *base, const struct verif
/* normalize all the outputs to be in string buffers for simplicity */
if (is_key_stat(id)) {
/* key stats (file and program name) are always strings */
- if (base != &fallback_stats)
+ if (base)
snprintf(base_buf, sizeof(base_buf), "%s", base_str);
else
snprintf(base_buf, sizeof(base_buf), "%s", comp_str);
} else if (base_str) {
snprintf(base_buf, sizeof(base_buf), "%s", base_str);
snprintf(comp_buf, sizeof(comp_buf), "%s", comp_str);
- if (strcmp(base_str, comp_str) == 0)
+ if (!base || !comp)
+ snprintf(diff_buf, sizeof(diff_buf), "%s", "N/A");
+ else if (strcmp(base_str, comp_str) == 0)
snprintf(diff_buf, sizeof(diff_buf), "%s", "MATCH");
else
snprintf(diff_buf, sizeof(diff_buf), "%s", "MISMATCH");
} else {
- snprintf(base_buf, sizeof(base_buf), "%ld", base_val);
- snprintf(comp_buf, sizeof(comp_buf), "%ld", comp_val);
+ double p = 0.0;
+
+ if (base)
+ snprintf(base_buf, sizeof(base_buf), "%ld", base_val);
+ else
+ snprintf(base_buf, sizeof(base_buf), "%s", "N/A");
+ if (comp)
+ snprintf(comp_buf, sizeof(comp_buf), "%ld", comp_val);
+ else
+ snprintf(comp_buf, sizeof(comp_buf), "%s", "N/A");
diff_val = comp_val - base_val;
- if (base == &fallback_stats || comp == &fallback_stats || base_val == 0) {
- snprintf(diff_buf, sizeof(diff_buf), "%+ld (%+.2lf%%)",
- diff_val, comp_val < base_val ? -100.0 : 100.0);
+ if (!base || !comp) {
+ snprintf(diff_buf, sizeof(diff_buf), "%s", "N/A");
} else {
- snprintf(diff_buf, sizeof(diff_buf), "%+ld (%+.2lf%%)",
- diff_val, diff_val * 100.0 / base_val);
+ if (base_val == 0) {
+ if (comp_val == base_val)
+ p = 0.0; /* avoid +0 (+100%) case */
+ else
+ p = comp_val < base_val ? -100.0 : 100.0;
+ } else {
+ p = diff_val * 100.0 / base_val;
+ }
+ snprintf(diff_buf, sizeof(diff_buf), "%+ld (%+.2lf%%)", diff_val, p);
}
}
@@ -1170,14 +1544,64 @@ static int cmp_stats_key(const struct verif_stats *base, const struct verif_stat
return strcmp(base->prog_name, comp->prog_name);
}
+static bool is_join_stat_filter_matched(struct filter *f, const struct verif_stats_join *stats)
+{
+ static const double eps = 1e-9;
+ const char *str = NULL;
+ double value = 0.0;
+
+ fetch_join_stat_value(stats, f->stat_id, f->stat_var, &str, &value);
+
+ switch (f->op) {
+ case OP_EQ: return value > f->value - eps && value < f->value + eps;
+ case OP_NEQ: return value < f->value - eps || value > f->value + eps;
+ case OP_LT: return value < f->value - eps;
+ case OP_LE: return value <= f->value + eps;
+ case OP_GT: return value > f->value + eps;
+ case OP_GE: return value >= f->value - eps;
+ }
+
+ fprintf(stderr, "BUG: unknown filter op %d!\n", f->op);
+ return false;
+}
+
+static bool should_output_join_stats(const struct verif_stats_join *stats)
+{
+ struct filter *f;
+ int i, allow_cnt = 0;
+
+ for (i = 0; i < env.deny_filter_cnt; i++) {
+ f = &env.deny_filters[i];
+ if (f->kind != FILTER_STAT)
+ continue;
+
+ if (is_join_stat_filter_matched(f, stats))
+ return false;
+ }
+
+ for (i = 0; i < env.allow_filter_cnt; i++) {
+ f = &env.allow_filters[i];
+ if (f->kind != FILTER_STAT)
+ continue;
+ allow_cnt++;
+
+ if (is_join_stat_filter_matched(f, stats))
+ return true;
+ }
+
+ /* if there are no stat allowed filters, pass everything through */
+ return allow_cnt == 0;
+}
+
static int handle_comparison_mode(void)
{
struct stat_specs base_specs = {}, comp_specs = {};
+ struct stat_specs tmp_sort_spec;
enum resfmt cur_fmt;
- int err, i, j;
+ int err, i, j, last_idx;
if (env.filename_cnt != 2) {
- fprintf(stderr, "Comparison mode expects exactly two input CSV files!\n");
+ fprintf(stderr, "Comparison mode expects exactly two input CSV files!\n\n");
argp_help(&argp, stderr, ARGP_HELP_USAGE, "veristat");
return -EINVAL;
}
@@ -1215,31 +1639,26 @@ static int handle_comparison_mode(void)
}
}
+ /* Replace user-specified sorting spec with file+prog sorting rule to
+ * be able to join two datasets correctly. Once we are done, we will
+ * restore the original sort spec.
+ */
+ tmp_sort_spec = env.sort_spec;
+ env.sort_spec = join_sort_spec;
qsort(env.prog_stats, env.prog_stat_cnt, sizeof(*env.prog_stats), cmp_prog_stats);
qsort(env.baseline_stats, env.baseline_stat_cnt, sizeof(*env.baseline_stats), cmp_prog_stats);
+ env.sort_spec = tmp_sort_spec;
- /* for human-readable table output we need to do extra pass to
- * calculate column widths, so we substitute current output format
- * with RESFMT_TABLE_CALCLEN and later revert it back to RESFMT_TABLE
- * and do everything again.
- */
- if (env.out_fmt == RESFMT_TABLE)
- cur_fmt = RESFMT_TABLE_CALCLEN;
- else
- cur_fmt = env.out_fmt;
-
-one_more_time:
- output_comp_headers(cur_fmt);
-
- /* If baseline and comparison datasets have different subset of rows
- * (we match by 'object + prog' as a unique key) then assume
- * empty/missing/zero value for rows that are missing in the opposite
- * data set
+ /* Join two datasets together. If baseline and comparison datasets
+ * have different subset of rows (we match by 'object + prog' as
+ * a unique key) then assume empty/missing/zero value for rows that
+ * are missing in the opposite data set.
*/
i = j = 0;
while (i < env.baseline_stat_cnt || j < env.prog_stat_cnt) {
- bool last = (i == env.baseline_stat_cnt - 1) || (j == env.prog_stat_cnt - 1);
const struct verif_stats *base, *comp;
+ struct verif_stats_join *join;
+ void *tmp;
int r;
base = i < env.baseline_stat_cnt ? &env.baseline_stats[i] : &fallback_stats;
@@ -1256,18 +1675,64 @@ one_more_time:
return -EINVAL;
}
+ tmp = realloc(env.join_stats, (env.join_stat_cnt + 1) * sizeof(*env.join_stats));
+ if (!tmp)
+ return -ENOMEM;
+ env.join_stats = tmp;
+
+ join = &env.join_stats[env.join_stat_cnt];
+ memset(join, 0, sizeof(*join));
+
r = cmp_stats_key(base, comp);
if (r == 0) {
- output_comp_stats(base, comp, cur_fmt, last);
+ join->file_name = base->file_name;
+ join->prog_name = base->prog_name;
+ join->stats_a = base;
+ join->stats_b = comp;
i++;
j++;
} else if (comp == &fallback_stats || r < 0) {
- output_comp_stats(base, &fallback_stats, cur_fmt, last);
+ join->file_name = base->file_name;
+ join->prog_name = base->prog_name;
+ join->stats_a = base;
+ join->stats_b = NULL;
i++;
} else {
- output_comp_stats(&fallback_stats, comp, cur_fmt, last);
+ join->file_name = comp->file_name;
+ join->prog_name = comp->prog_name;
+ join->stats_a = NULL;
+ join->stats_b = comp;
j++;
}
+ env.join_stat_cnt += 1;
+ }
+
+ /* now sort joined results accorsing to sort spec */
+ qsort(env.join_stats, env.join_stat_cnt, sizeof(*env.join_stats), cmp_join_stats);
+
+ /* for human-readable table output we need to do extra pass to
+ * calculate column widths, so we substitute current output format
+ * with RESFMT_TABLE_CALCLEN and later revert it back to RESFMT_TABLE
+ * and do everything again.
+ */
+ if (env.out_fmt == RESFMT_TABLE)
+ cur_fmt = RESFMT_TABLE_CALCLEN;
+ else
+ cur_fmt = env.out_fmt;
+
+one_more_time:
+ output_comp_headers(cur_fmt);
+
+ for (i = 0; i < env.join_stat_cnt; i++) {
+ const struct verif_stats_join *join = &env.join_stats[i];
+
+ if (!should_output_join_stats(join))
+ continue;
+
+ if (cur_fmt == RESFMT_TABLE_CALCLEN)
+ last_idx = i;
+
+ output_comp_stats(join, cur_fmt, i == last_idx);
}
if (cur_fmt == RESFMT_TABLE_CALCLEN) {
@@ -1278,6 +1743,128 @@ one_more_time:
return 0;
}
+static bool is_stat_filter_matched(struct filter *f, const struct verif_stats *stats)
+{
+ long value = stats->stats[f->stat_id];
+
+ switch (f->op) {
+ case OP_EQ: return value == f->value;
+ case OP_NEQ: return value != f->value;
+ case OP_LT: return value < f->value;
+ case OP_LE: return value <= f->value;
+ case OP_GT: return value > f->value;
+ case OP_GE: return value >= f->value;
+ }
+
+ fprintf(stderr, "BUG: unknown filter op %d!\n", f->op);
+ return false;
+}
+
+static bool should_output_stats(const struct verif_stats *stats)
+{
+ struct filter *f;
+ int i, allow_cnt = 0;
+
+ for (i = 0; i < env.deny_filter_cnt; i++) {
+ f = &env.deny_filters[i];
+ if (f->kind != FILTER_STAT)
+ continue;
+
+ if (is_stat_filter_matched(f, stats))
+ return false;
+ }
+
+ for (i = 0; i < env.allow_filter_cnt; i++) {
+ f = &env.allow_filters[i];
+ if (f->kind != FILTER_STAT)
+ continue;
+ allow_cnt++;
+
+ if (is_stat_filter_matched(f, stats))
+ return true;
+ }
+
+ /* if there are no stat allowed filters, pass everything through */
+ return allow_cnt == 0;
+}
+
+static void output_prog_stats(void)
+{
+ const struct verif_stats *stats;
+ int i, last_stat_idx = 0;
+
+ if (env.out_fmt == RESFMT_TABLE) {
+ /* calculate column widths */
+ output_headers(RESFMT_TABLE_CALCLEN);
+ for (i = 0; i < env.prog_stat_cnt; i++) {
+ stats = &env.prog_stats[i];
+ if (!should_output_stats(stats))
+ continue;
+ output_stats(stats, RESFMT_TABLE_CALCLEN, false);
+ last_stat_idx = i;
+ }
+ }
+
+ /* actually output the table */
+ output_headers(env.out_fmt);
+ for (i = 0; i < env.prog_stat_cnt; i++) {
+ stats = &env.prog_stats[i];
+ if (!should_output_stats(stats))
+ continue;
+ output_stats(stats, env.out_fmt, i == last_stat_idx);
+ }
+}
+
+static int handle_verif_mode(void)
+{
+ int i, err;
+
+ if (env.filename_cnt == 0) {
+ fprintf(stderr, "Please provide path to BPF object file!\n\n");
+ argp_help(&argp, stderr, ARGP_HELP_USAGE, "veristat");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < env.filename_cnt; i++) {
+ err = process_obj(env.filenames[i]);
+ if (err) {
+ fprintf(stderr, "Failed to process '%s': %d\n", env.filenames[i], err);
+ return err;
+ }
+ }
+
+ qsort(env.prog_stats, env.prog_stat_cnt, sizeof(*env.prog_stats), cmp_prog_stats);
+
+ output_prog_stats();
+
+ return 0;
+}
+
+static int handle_replay_mode(void)
+{
+ struct stat_specs specs = {};
+ int err;
+
+ if (env.filename_cnt != 1) {
+ fprintf(stderr, "Replay mode expects exactly one input CSV file!\n\n");
+ argp_help(&argp, stderr, ARGP_HELP_USAGE, "veristat");
+ return -EINVAL;
+ }
+
+ err = parse_stats_csv(env.filenames[0], &specs,
+ &env.prog_stats, &env.prog_stat_cnt);
+ if (err) {
+ fprintf(stderr, "Failed to parse stats from '%s': %d\n", env.filenames[0], err);
+ return err;
+ }
+
+ qsort(env.prog_stats, env.prog_stat_cnt, sizeof(*env.prog_stats), cmp_prog_stats);
+
+ output_prog_stats();
+
+ return 0;
+}
+
int main(int argc, char **argv)
{
int err = 0, i;
@@ -1286,34 +1873,49 @@ int main(int argc, char **argv)
return 1;
if (env.verbose && env.quiet) {
- fprintf(stderr, "Verbose and quiet modes are incompatible, please specify just one or neither!\n");
+ fprintf(stderr, "Verbose and quiet modes are incompatible, please specify just one or neither!\n\n");
argp_help(&argp, stderr, ARGP_HELP_USAGE, "veristat");
return 1;
}
if (env.verbose && env.log_level == 0)
env.log_level = 1;
- if (env.output_spec.spec_cnt == 0)
- env.output_spec = default_output_spec;
+ if (env.output_spec.spec_cnt == 0) {
+ if (env.out_fmt == RESFMT_CSV)
+ env.output_spec = default_csv_output_spec;
+ else
+ env.output_spec = default_output_spec;
+ }
if (env.sort_spec.spec_cnt == 0)
env.sort_spec = default_sort_spec;
+ if (env.comparison_mode && env.replay_mode) {
+ fprintf(stderr, "Can't specify replay and comparison mode at the same time!\n\n");
+ argp_help(&argp, stderr, ARGP_HELP_USAGE, "veristat");
+ return 1;
+ }
+
if (env.comparison_mode)
err = handle_comparison_mode();
+ else if (env.replay_mode)
+ err = handle_replay_mode();
else
err = handle_verif_mode();
free_verif_stats(env.prog_stats, env.prog_stat_cnt);
free_verif_stats(env.baseline_stats, env.baseline_stat_cnt);
+ free(env.join_stats);
for (i = 0; i < env.filename_cnt; i++)
free(env.filenames[i]);
free(env.filenames);
for (i = 0; i < env.allow_filter_cnt; i++) {
+ free(env.allow_filters[i].any_glob);
free(env.allow_filters[i].file_glob);
free(env.allow_filters[i].prog_glob);
}
free(env.allow_filters);
for (i = 0; i < env.deny_filter_cnt; i++) {
+ free(env.deny_filters[i].any_glob);
free(env.deny_filters[i].file_glob);
free(env.deny_filters[i].prog_glob);
}
diff --git a/tools/testing/selftests/bpf/vmtest.sh b/tools/testing/selftests/bpf/vmtest.sh
index a29aa05ebb3e..316a56d680f2 100755
--- a/tools/testing/selftests/bpf/vmtest.sh
+++ b/tools/testing/selftests/bpf/vmtest.sh
@@ -21,6 +21,12 @@ x86_64)
QEMU_FLAGS=(-cpu host -smp 8)
BZIMAGE="arch/x86/boot/bzImage"
;;
+aarch64)
+ QEMU_BINARY=qemu-system-aarch64
+ QEMU_CONSOLE="ttyAMA0,115200"
+ QEMU_FLAGS=(-M virt,gic-version=3 -cpu host -smp 8)
+ BZIMAGE="arch/arm64/boot/Image"
+ ;;
*)
echo "Unsupported architecture"
exit 1
diff --git a/tools/testing/selftests/bpf/xdp_synproxy.c b/tools/testing/selftests/bpf/xdp_synproxy.c
index ff35320d2be9..410a1385a01d 100644
--- a/tools/testing/selftests/bpf/xdp_synproxy.c
+++ b/tools/testing/selftests/bpf/xdp_synproxy.c
@@ -104,7 +104,8 @@ static void parse_options(int argc, char *argv[], unsigned int *ifindex, __u32 *
{ "tc", no_argument, NULL, 'c' },
{ NULL, 0, NULL, 0 },
};
- unsigned long mss4, mss6, wscale, ttl;
+ unsigned long mss4, wscale, ttl;
+ unsigned long long mss6;
unsigned int tcpipopts_mask = 0;
if (argc < 2)
@@ -286,7 +287,7 @@ static int syncookie_open_bpf_maps(__u32 prog_id, int *values_map_fd, int *ports
prog_info = (struct bpf_prog_info) {
.nr_map_ids = 8,
- .map_ids = (__u64)map_ids,
+ .map_ids = (__u64)(unsigned long)map_ids,
};
info_len = sizeof(prog_info);
diff --git a/tools/testing/selftests/bpf/xsk.c b/tools/testing/selftests/bpf/xsk.c
index 0b3ff49c740d..39d349509ba4 100644
--- a/tools/testing/selftests/bpf/xsk.c
+++ b/tools/testing/selftests/bpf/xsk.c
@@ -33,6 +33,7 @@
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "xsk.h"
+#include "bpf_util.h"
#ifndef SOL_XDP
#define SOL_XDP 283
@@ -521,25 +522,6 @@ static int xsk_create_bpf_link(struct xsk_socket *xsk)
return 0;
}
-/* Copy up to sz - 1 bytes from zero-terminated src string and ensure that dst
- * is zero-terminated string no matter what (unless sz == 0, in which case
- * it's a no-op). It's conceptually close to FreeBSD's strlcpy(), but differs
- * in what is returned. Given this is internal helper, it's trivial to extend
- * this, when necessary. Use this instead of strncpy inside libbpf source code.
- */
-static inline void libbpf_strlcpy(char *dst, const char *src, size_t sz)
-{
- size_t i;
-
- if (sz == 0)
- return;
-
- sz--;
- for (i = 0; i < sz && src[i]; i++)
- dst[i] = src[i];
- dst[i] = '\0';
-}
-
static int xsk_get_max_queues(struct xsk_socket *xsk)
{
struct ethtool_channels channels = { .cmd = ETHTOOL_GCHANNELS };
@@ -552,7 +534,7 @@ static int xsk_get_max_queues(struct xsk_socket *xsk)
return -errno;
ifr.ifr_data = (void *)&channels;
- libbpf_strlcpy(ifr.ifr_name, ctx->ifname, IFNAMSIZ);
+ bpf_strlcpy(ifr.ifr_name, ctx->ifname, IFNAMSIZ);
err = ioctl(fd, SIOCETHTOOL, &ifr);
if (err && errno != EOPNOTSUPP) {
ret = -errno;
@@ -771,7 +753,7 @@ static int xsk_create_xsk_struct(int ifindex, struct xsk_socket *xsk)
}
ctx->ifindex = ifindex;
- libbpf_strlcpy(ctx->ifname, ifname, IFNAMSIZ);
+ bpf_strlcpy(ctx->ifname, ifname, IFNAMSIZ);
xsk->ctx = ctx;
xsk->ctx->has_bpf_link = xsk_probe_bpf_link();
@@ -958,7 +940,7 @@ static struct xsk_ctx *xsk_create_ctx(struct xsk_socket *xsk,
ctx->refcount = 1;
ctx->umem = umem;
ctx->queue_id = queue_id;
- libbpf_strlcpy(ctx->ifname, ifname, IFNAMSIZ);
+ bpf_strlcpy(ctx->ifname, ifname, IFNAMSIZ);
ctx->fill = fill;
ctx->comp = comp;
diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c
index 681a5db80dae..162d3a516f2c 100644
--- a/tools/testing/selftests/bpf/xskxceiver.c
+++ b/tools/testing/selftests/bpf/xskxceiver.c
@@ -1006,7 +1006,8 @@ static int __send_pkts(struct ifobject *ifobject, u32 *pkt_nb, struct pollfd *fd
{
struct xsk_socket_info *xsk = ifobject->xsk;
bool use_poll = ifobject->use_poll;
- u32 i, idx = 0, ret, valid_pkts = 0;
+ u32 i, idx = 0, valid_pkts = 0;
+ int ret;
while (xsk_ring_prod__reserve(&xsk->tx, BATCH_SIZE, &idx) < BATCH_SIZE) {
if (use_poll) {
diff --git a/tools/testing/selftests/drivers/net/bonding/Makefile b/tools/testing/selftests/drivers/net/bonding/Makefile
index 6b8d2e2f23c2..0f3921908b07 100644
--- a/tools/testing/selftests/drivers/net/bonding/Makefile
+++ b/tools/testing/selftests/drivers/net/bonding/Makefile
@@ -5,7 +5,9 @@ TEST_PROGS := \
bond-arp-interval-causes-panic.sh \
bond-break-lacpdu-tx.sh \
bond-lladdr-target.sh \
- dev_addr_lists.sh
+ dev_addr_lists.sh \
+ mode-1-recovery-updelay.sh \
+ mode-2-recovery-updelay.sh
TEST_FILES := \
lag_lib.sh \
diff --git a/tools/testing/selftests/drivers/net/bonding/lag_lib.sh b/tools/testing/selftests/drivers/net/bonding/lag_lib.sh
index 16c7fb858ac1..2a268b17b61f 100644
--- a/tools/testing/selftests/drivers/net/bonding/lag_lib.sh
+++ b/tools/testing/selftests/drivers/net/bonding/lag_lib.sh
@@ -1,6 +1,8 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
+NAMESPACES=""
+
# Test that a link aggregation device (bonding, team) removes the hardware
# addresses that it adds on its underlying devices.
test_LAG_cleanup()
@@ -59,3 +61,107 @@ test_LAG_cleanup()
log_test "$driver cleanup mode $mode"
}
+
+# Build a generic 2 node net namespace with 2 connections
+# between the namespaces
+#
+# +-----------+ +-----------+
+# | node1 | | node2 |
+# | | | |
+# | | | |
+# | eth0 +-------+ eth0 |
+# | | | |
+# | eth1 +-------+ eth1 |
+# | | | |
+# +-----------+ +-----------+
+lag_setup2x2()
+{
+ local state=${1:-down}
+ local namespaces="lag_node1 lag_node2"
+
+ # create namespaces
+ for n in ${namespaces}; do
+ ip netns add ${n}
+ done
+
+ # wire up namespaces
+ ip link add name lag1 type veth peer name lag1-end
+ ip link set dev lag1 netns lag_node1 $state name eth0
+ ip link set dev lag1-end netns lag_node2 $state name eth0
+
+ ip link add name lag1 type veth peer name lag1-end
+ ip link set dev lag1 netns lag_node1 $state name eth1
+ ip link set dev lag1-end netns lag_node2 $state name eth1
+
+ NAMESPACES="${namespaces}"
+}
+
+# cleanup all lag related namespaces and remove the bonding module
+lag_cleanup()
+{
+ for n in ${NAMESPACES}; do
+ ip netns delete ${n} >/dev/null 2>&1 || true
+ done
+ modprobe -r bonding
+}
+
+SWITCH="lag_node1"
+CLIENT="lag_node2"
+CLIENTIP="172.20.2.1"
+SWITCHIP="172.20.2.2"
+
+lag_setup_network()
+{
+ lag_setup2x2 "down"
+
+ # create switch
+ ip netns exec ${SWITCH} ip link add br0 up type bridge
+ ip netns exec ${SWITCH} ip link set eth0 master br0 up
+ ip netns exec ${SWITCH} ip link set eth1 master br0 up
+ ip netns exec ${SWITCH} ip addr add ${SWITCHIP}/24 dev br0
+}
+
+lag_reset_network()
+{
+ ip netns exec ${CLIENT} ip link del bond0
+ ip netns exec ${SWITCH} ip link set eth0 up
+ ip netns exec ${SWITCH} ip link set eth1 up
+}
+
+create_bond()
+{
+ # create client
+ ip netns exec ${CLIENT} ip link set eth0 down
+ ip netns exec ${CLIENT} ip link set eth1 down
+
+ ip netns exec ${CLIENT} ip link add bond0 type bond $@
+ ip netns exec ${CLIENT} ip link set eth0 master bond0
+ ip netns exec ${CLIENT} ip link set eth1 master bond0
+ ip netns exec ${CLIENT} ip link set bond0 up
+ ip netns exec ${CLIENT} ip addr add ${CLIENTIP}/24 dev bond0
+}
+
+test_bond_recovery()
+{
+ RET=0
+
+ create_bond $@
+
+ # verify connectivity
+ ip netns exec ${CLIENT} ping ${SWITCHIP} -c 2 >/dev/null 2>&1
+ check_err $? "No connectivity"
+
+ # force the links of the bond down
+ ip netns exec ${SWITCH} ip link set eth0 down
+ sleep 2
+ ip netns exec ${SWITCH} ip link set eth0 up
+ ip netns exec ${SWITCH} ip link set eth1 down
+
+ # re-verify connectivity
+ ip netns exec ${CLIENT} ping ${SWITCHIP} -c 2 >/dev/null 2>&1
+
+ local rc=$?
+ check_err $rc "Bond failed to recover"
+ log_test "$1 ($2) bond recovery"
+ lag_reset_network
+}
diff --git a/tools/testing/selftests/drivers/net/bonding/mode-1-recovery-updelay.sh b/tools/testing/selftests/drivers/net/bonding/mode-1-recovery-updelay.sh
new file mode 100755
index 000000000000..ad4c845a4ac7
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/bonding/mode-1-recovery-updelay.sh
@@ -0,0 +1,45 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+# Regression Test:
+# When the bond is configured with down/updelay and the link state of
+# slave members flaps if there are no remaining members up the bond
+# should immediately select a member to bring up. (from bonding.txt
+# section 13.1 paragraph 4)
+#
+# +-------------+ +-----------+
+# | client | | switch |
+# | | | |
+# | +--------| link1 |-----+ |
+# | | +-------+ | |
+# | | | | | |
+# | | +-------+ | |
+# | | bond | link2 | Br0 | |
+# +-------------+ +-----------+
+# 172.20.2.1 172.20.2.2
+
+
+REQUIRE_MZ=no
+REQUIRE_JQ=no
+NUM_NETIFS=0
+lib_dir=$(dirname "$0")
+source "$lib_dir"/net_forwarding_lib.sh
+source "$lib_dir"/lag_lib.sh
+
+cleanup()
+{
+ lag_cleanup
+}
+
+trap cleanup 0 1 2
+
+lag_setup_network
+test_bond_recovery mode 1 miimon 100 updelay 0
+test_bond_recovery mode 1 miimon 100 updelay 200
+test_bond_recovery mode 1 miimon 100 updelay 500
+test_bond_recovery mode 1 miimon 100 updelay 1000
+test_bond_recovery mode 1 miimon 100 updelay 2000
+test_bond_recovery mode 1 miimon 100 updelay 5000
+test_bond_recovery mode 1 miimon 100 updelay 10000
+
+exit "$EXIT_STATUS"
diff --git a/tools/testing/selftests/drivers/net/bonding/mode-2-recovery-updelay.sh b/tools/testing/selftests/drivers/net/bonding/mode-2-recovery-updelay.sh
new file mode 100755
index 000000000000..2330d37453f9
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/bonding/mode-2-recovery-updelay.sh
@@ -0,0 +1,45 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+# Regression Test:
+# When the bond is configured with down/updelay and the link state of
+# slave members flaps if there are no remaining members up the bond
+# should immediately select a member to bring up. (from bonding.txt
+# section 13.1 paragraph 4)
+#
+# +-------------+ +-----------+
+# | client | | switch |
+# | | | |
+# | +--------| link1 |-----+ |
+# | | +-------+ | |
+# | | | | | |
+# | | +-------+ | |
+# | | bond | link2 | Br0 | |
+# +-------------+ +-----------+
+# 172.20.2.1 172.20.2.2
+
+
+REQUIRE_MZ=no
+REQUIRE_JQ=no
+NUM_NETIFS=0
+lib_dir=$(dirname "$0")
+source "$lib_dir"/net_forwarding_lib.sh
+source "$lib_dir"/lag_lib.sh
+
+cleanup()
+{
+ lag_cleanup
+}
+
+trap cleanup 0 1 2
+
+lag_setup_network
+test_bond_recovery mode 2 miimon 100 updelay 0
+test_bond_recovery mode 2 miimon 100 updelay 200
+test_bond_recovery mode 2 miimon 100 updelay 500
+test_bond_recovery mode 2 miimon 100 updelay 1000
+test_bond_recovery mode 2 miimon 100 updelay 2000
+test_bond_recovery mode 2 miimon 100 updelay 5000
+test_bond_recovery mode 2 miimon 100 updelay 10000
+
+exit "$EXIT_STATUS"
diff --git a/tools/testing/selftests/drivers/net/bonding/settings b/tools/testing/selftests/drivers/net/bonding/settings
index 867e118223cd..6091b45d226b 100644
--- a/tools/testing/selftests/drivers/net/bonding/settings
+++ b/tools/testing/selftests/drivers/net/bonding/settings
@@ -1 +1 @@
-timeout=60
+timeout=120
diff --git a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_control.sh b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_control.sh
index d3a891d421ab..64153bbf95df 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_control.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_control.sh
@@ -83,6 +83,7 @@ ALL_TESTS="
ptp_general_test
flow_action_sample_test
flow_action_trap_test
+ eapol_test
"
NUM_NETIFS=4
source $lib_dir/lib.sh
@@ -677,6 +678,27 @@ flow_action_trap_test()
tc qdisc del dev $rp1 clsact
}
+eapol_payload_get()
+{
+ local source_mac=$1; shift
+ local p
+
+ p=$(:
+ )"01:80:C2:00:00:03:"$( : ETH daddr
+ )"$source_mac:"$( : ETH saddr
+ )"88:8E:"$( : ETH type
+ )
+ echo $p
+}
+
+eapol_test()
+{
+ local h1mac=$(mac_get $h1)
+
+ devlink_trap_stats_test "EAPOL" "eapol" $MZ $h1 -c 1 \
+ $(eapol_payload_get $h1mac) -p 100 -q
+}
+
trap cleanup EXIT
setup_prepare
diff --git a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l2_drops.sh b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l2_drops.sh
index a4c2812e9807..8d4b2c6265b3 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l2_drops.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l2_drops.sh
@@ -14,6 +14,7 @@ ALL_TESTS="
ingress_stp_filter_test
port_list_is_empty_test
port_loopback_filter_test
+ locked_port_test
"
NUM_NETIFS=4
source $lib_dir/tc_common.sh
@@ -420,6 +421,110 @@ port_loopback_filter_test()
port_loopback_filter_uc_test
}
+locked_port_miss_test()
+{
+ local trap_name="locked_port"
+ local smac=00:11:22:33:44:55
+
+ bridge link set dev $swp1 learning off
+ bridge link set dev $swp1 locked on
+
+ RET=0
+
+ devlink_trap_stats_check $trap_name $MZ $h1 -c 1 \
+ -a $smac -b $(mac_get $h2) -A 192.0.2.1 -B 192.0.2.2 -p 100 -q
+ check_fail $? "Trap stats increased before setting action to \"trap\""
+
+ devlink_trap_action_set $trap_name "trap"
+
+ devlink_trap_stats_check $trap_name $MZ $h1 -c 1 \
+ -a $smac -b $(mac_get $h2) -A 192.0.2.1 -B 192.0.2.2 -p 100 -q
+ check_err $? "Trap stats did not increase when should"
+
+ devlink_trap_action_set $trap_name "drop"
+
+ devlink_trap_stats_check $trap_name $MZ $h1 -c 1 \
+ -a $smac -b $(mac_get $h2) -A 192.0.2.1 -B 192.0.2.2 -p 100 -q
+ check_fail $? "Trap stats increased after setting action to \"drop\""
+
+ devlink_trap_action_set $trap_name "trap"
+
+ bridge fdb replace $smac dev $swp1 master static vlan 1
+
+ devlink_trap_stats_check $trap_name $MZ $h1 -c 1 \
+ -a $smac -b $(mac_get $h2) -A 192.0.2.1 -B 192.0.2.2 -p 100 -q
+ check_fail $? "Trap stats increased after adding an FDB entry"
+
+ bridge fdb del $smac dev $swp1 master static vlan 1
+ bridge link set dev $swp1 locked off
+
+ devlink_trap_stats_check $trap_name $MZ $h1 -c 1 \
+ -a $smac -b $(mac_get $h2) -A 192.0.2.1 -B 192.0.2.2 -p 100 -q
+ check_fail $? "Trap stats increased after unlocking port"
+
+ log_test "Locked port - FDB miss"
+
+ devlink_trap_action_set $trap_name "drop"
+ bridge link set dev $swp1 learning on
+}
+
+locked_port_mismatch_test()
+{
+ local trap_name="locked_port"
+ local smac=00:11:22:33:44:55
+
+ bridge link set dev $swp1 learning off
+ bridge link set dev $swp1 locked on
+
+ RET=0
+
+ bridge fdb replace $smac dev $swp2 master static vlan 1
+
+ devlink_trap_stats_check $trap_name $MZ $h1 -c 1 \
+ -a $smac -b $(mac_get $h2) -A 192.0.2.1 -B 192.0.2.2 -p 100 -q
+ check_fail $? "Trap stats increased before setting action to \"trap\""
+
+ devlink_trap_action_set $trap_name "trap"
+
+ devlink_trap_stats_check $trap_name $MZ $h1 -c 1 \
+ -a $smac -b $(mac_get $h2) -A 192.0.2.1 -B 192.0.2.2 -p 100 -q
+ check_err $? "Trap stats did not increase when should"
+
+ devlink_trap_action_set $trap_name "drop"
+
+ devlink_trap_stats_check $trap_name $MZ $h1 -c 1 \
+ -a $smac -b $(mac_get $h2) -A 192.0.2.1 -B 192.0.2.2 -p 100 -q
+ check_fail $? "Trap stats increased after setting action to \"drop\""
+
+ devlink_trap_action_set $trap_name "trap"
+ bridge link set dev $swp1 locked off
+
+ devlink_trap_stats_check $trap_name $MZ $h1 -c 1 \
+ -a $smac -b $(mac_get $h2) -A 192.0.2.1 -B 192.0.2.2 -p 100 -q
+ check_fail $? "Trap stats increased after unlocking port"
+
+ bridge link set dev $swp1 locked on
+ bridge fdb replace $smac dev $swp1 master static vlan 1
+
+ devlink_trap_stats_check $trap_name $MZ $h1 -c 1 \
+ -a $smac -b $(mac_get $h2) -A 192.0.2.1 -B 192.0.2.2 -p 100 -q
+ check_fail $? "Trap stats increased after replacing an FDB entry"
+
+ bridge fdb del $smac dev $swp1 master static vlan 1
+ devlink_trap_action_set $trap_name "drop"
+
+ log_test "Locked port - FDB mismatch"
+
+ bridge link set dev $swp1 locked off
+ bridge link set dev $swp1 learning on
+}
+
+locked_port_test()
+{
+ locked_port_miss_test
+ locked_port_mismatch_test
+}
+
trap cleanup EXIT
setup_prepare
diff --git a/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh b/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh
index 04f03ae9d8fb..5e89657857c7 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh
@@ -34,6 +34,7 @@ ALL_TESTS="
nexthop_obj_bucket_offload_test
nexthop_obj_blackhole_offload_test
nexthop_obj_route_offload_test
+ bridge_locked_port_test
devlink_reload_test
"
NUM_NETIFS=2
@@ -917,6 +918,36 @@ nexthop_obj_route_offload_test()
simple_if_fini $swp1 192.0.2.1/24 2001:db8:1::1/64
}
+bridge_locked_port_test()
+{
+ RET=0
+
+ ip link add name br1 up type bridge vlan_filtering 0
+
+ ip link add link $swp1 name $swp1.10 type vlan id 10
+ ip link set dev $swp1.10 master br1
+
+ bridge link set dev $swp1.10 locked on
+ check_fail $? "managed to set locked flag on a VLAN upper"
+
+ ip link set dev $swp1.10 nomaster
+ ip link set dev $swp1 master br1
+
+ bridge link set dev $swp1 locked on
+ check_fail $? "managed to set locked flag on a bridge port that has a VLAN upper"
+
+ ip link del dev $swp1.10
+ bridge link set dev $swp1 locked on
+
+ ip link add link $swp1 name $swp1.10 type vlan id 10
+ check_fail $? "managed to configure a VLAN upper on a locked port"
+
+ log_test "bridge locked port"
+
+ ip link del dev $swp1.10 &> /dev/null
+ ip link del dev br1
+}
+
devlink_reload_test()
{
# Test that after executing all the above configuration tests, a
diff --git a/tools/testing/selftests/nci/nci_dev.c b/tools/testing/selftests/nci/nci_dev.c
index 162c41e9bcae..1562aa7d60b0 100644
--- a/tools/testing/selftests/nci/nci_dev.c
+++ b/tools/testing/selftests/nci/nci_dev.c
@@ -888,6 +888,17 @@ TEST_F(NCI, deinit)
&msg);
ASSERT_EQ(rc, 0);
EXPECT_EQ(get_dev_enable_state(&msg), 0);
+
+ /* Test that operations that normally send packets to the driver
+ * don't cause issues when the device is already closed.
+ * Note: the send of NFC_CMD_DEV_UP itself still succeeds it's just
+ * that the device won't actually be up.
+ */
+ close(self->virtual_nci_fd);
+ self->virtual_nci_fd = -1;
+ rc = send_cmd_with_idx(self->sd, self->fid, self->pid,
+ NFC_CMD_DEV_UP, self->dev_idex);
+ EXPECT_EQ(rc, 0);
}
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore
index ff8fe93f679c..9cc84114741d 100644
--- a/tools/testing/selftests/net/.gitignore
+++ b/tools/testing/selftests/net/.gitignore
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
bind_bhash
+csum
cmsg_sender
diag_uid
fin_ack_lat
@@ -26,6 +27,7 @@ rxtimestamp
sk_bind_sendto_listen
sk_connect_zero_addr
socket
+so_incoming_cpu
so_netns_cookie
so_txtime
stress_reuseport_listen
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 69c58362c0ed..3007e98a6d64 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -71,6 +71,10 @@ TEST_GEN_FILES += bind_bhash
TEST_GEN_PROGS += sk_bind_sendto_listen
TEST_GEN_PROGS += sk_connect_zero_addr
TEST_PROGS += test_ingress_egress_chaining.sh
+TEST_GEN_PROGS += so_incoming_cpu
+TEST_PROGS += sctp_vrf.sh
+TEST_GEN_FILES += sctp_hello
+TEST_GEN_FILES += csum
TEST_FILES := settings
diff --git a/tools/testing/selftests/net/bpf/Makefile b/tools/testing/selftests/net/bpf/Makefile
index 8ccaf8732eb2..a26cb94354f6 100644
--- a/tools/testing/selftests/net/bpf/Makefile
+++ b/tools/testing/selftests/net/bpf/Makefile
@@ -1,14 +1,51 @@
# SPDX-License-Identifier: GPL-2.0
CLANG ?= clang
+SCRATCH_DIR := $(OUTPUT)/tools
+BUILD_DIR := $(SCRATCH_DIR)/build
+BPFDIR := $(abspath ../../../lib/bpf)
+APIDIR := $(abspath ../../../include/uapi)
+
CCINCLUDE += -I../../bpf
-CCINCLUDE += -I../../../../lib
CCINCLUDE += -I../../../../../usr/include/
+CCINCLUDE += -I$(SCRATCH_DIR)/include
+
+BPFOBJ := $(BUILD_DIR)/libbpf/libbpf.a
+
+MAKE_DIRS := $(BUILD_DIR)/libbpf
+$(MAKE_DIRS):
+ mkdir -p $@
TEST_CUSTOM_PROGS = $(OUTPUT)/bpf/nat6to4.o
all: $(TEST_CUSTOM_PROGS)
-$(OUTPUT)/%.o: %.c
- $(CLANG) -O2 -target bpf -c $< $(CCINCLUDE) -o $@
+# Get Clang's default includes on this system, as opposed to those seen by
+# '-target bpf'. This fixes "missing" files on some architectures/distros,
+# such as asm/byteorder.h, asm/socket.h, asm/sockios.h, sys/cdefs.h etc.
+#
+# Use '-idirafter': Don't interfere with include mechanics except where the
+# build would have failed anyways.
+define get_sys_includes
+$(shell $(1) $(2) -v -E - </dev/null 2>&1 \
+ | sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }') \
+$(shell $(1) $(2) -dM -E - </dev/null | grep '__riscv_xlen ' | awk '{printf("-D__riscv_xlen=%d -D__BITS_PER_LONG=%d", $$3, $$3)}')
+endef
+
+ifneq ($(CROSS_COMPILE),)
+CLANG_TARGET_ARCH = --target=$(notdir $(CROSS_COMPILE:%-=%))
+endif
+
+CLANG_SYS_INCLUDES = $(call get_sys_includes,$(CLANG),$(CLANG_TARGET_ARCH))
+
+$(TEST_CUSTOM_PROGS): $(BPFOBJ)
+ $(CLANG) -O2 -target bpf -c $(@:.o=.c) $(CCINCLUDE) $(CLANG_SYS_INCLUDES) -o $@
+
+$(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
+ $(APIDIR)/linux/bpf.h \
+ | $(BUILD_DIR)/libbpf
+ $(MAKE) $(submake_extras) -C $(BPFDIR) OUTPUT=$(BUILD_DIR)/libbpf/ \
+ EXTRA_CFLAGS='-g -O0' \
+ DESTDIR=$(SCRATCH_DIR) prefix= all install_headers
+
+EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(SCRATCH_DIR)
-EXTRA_CLEAN := $(TEST_CUSTOM_PROGS)
diff --git a/tools/testing/selftests/net/csum.c b/tools/testing/selftests/net/csum.c
new file mode 100644
index 000000000000..82a1c1839da6
--- /dev/null
+++ b/tools/testing/selftests/net/csum.c
@@ -0,0 +1,986 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Test hardware checksum offload: Rx + Tx, IPv4 + IPv6, TCP + UDP.
+ *
+ * The test runs on two machines to exercise the NIC. For this reason it
+ * is not integrated in kselftests.
+ *
+ * CMD=$((./csum -[46] -[tu] -S $SADDR -D $DADDR -[RT] -r 1 $EXTRA_ARGS))
+ *
+ * Rx:
+ *
+ * The sender sends packets with a known checksum field using PF_INET(6)
+ * SOCK_RAW sockets.
+ *
+ * good packet: $CMD [-t]
+ * bad packet: $CMD [-t] -E
+ *
+ * The receiver reads UDP packets with a UDP socket. This is not an
+ * option for TCP packets ('-t'). Optionally insert an iptables filter
+ * to avoid these entering the real protocol stack.
+ *
+ * The receiver also reads all packets with a PF_PACKET socket, to
+ * observe whether both good and bad packets arrive on the host. And to
+ * read the optional TP_STATUS_CSUM_VALID bit. This requires setting
+ * option PACKET_AUXDATA, and works only for CHECKSUM_UNNECESSARY.
+ *
+ * Tx:
+ *
+ * The sender needs to build CHECKSUM_PARTIAL packets to exercise tx
+ * checksum offload.
+ *
+ * The sender can sends packets with a UDP socket.
+ *
+ * Optionally crafts a packet that sums up to zero to verify that the
+ * device writes negative zero 0xFFFF in this case to distinguish from
+ * 0x0000 (checksum disabled), as required by RFC 768. Hit this case
+ * by choosing a specific source port.
+ *
+ * good packet: $CMD -U
+ * zero csum: $CMD -U -Z
+ *
+ * The sender can also build packets with PF_PACKET with PACKET_VNET_HDR,
+ * to cover more protocols. PF_PACKET requires passing src and dst mac
+ * addresses.
+ *
+ * good packet: $CMD -s $smac -d $dmac -p [-t]
+ *
+ * Argument '-z' sends UDP packets with a 0x000 checksum disabled field,
+ * to verify that the NIC passes these packets unmodified.
+ *
+ * Argument '-e' adds a transport mode encapsulation header between
+ * network and transport header. This will fail for devices that parse
+ * headers. Should work on devices that implement protocol agnostic tx
+ * checksum offload (NETIF_F_HW_CSUM).
+ *
+ * Argument '-r $SEED' optionally randomizes header, payload and length
+ * to increase coverage between packets sent. SEED 1 further chooses a
+ * different seed for each run (and logs this for reproducibility). It
+ * is advised to enable this for extra coverage in continuous testing.
+ */
+
+#define _GNU_SOURCE
+
+#include <arpa/inet.h>
+#include <asm/byteorder.h>
+#include <errno.h>
+#include <error.h>
+#include <linux/filter.h>
+#include <linux/if_packet.h>
+#include <linux/ipv6.h>
+#include <linux/virtio_net.h>
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <netinet/if_ether.h>
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <netinet/ip6.h>
+#include <netinet/tcp.h>
+#include <netinet/udp.h>
+#include <poll.h>
+#include <sched.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+static bool cfg_bad_csum;
+static int cfg_family = PF_INET6;
+static int cfg_num_pkt = 4;
+static bool cfg_do_rx = true;
+static bool cfg_do_tx = true;
+static bool cfg_encap;
+static char *cfg_ifname = "eth0";
+static char *cfg_mac_dst;
+static char *cfg_mac_src;
+static int cfg_proto = IPPROTO_UDP;
+static int cfg_payload_char = 'a';
+static int cfg_payload_len = 100;
+static uint16_t cfg_port_dst = 34000;
+static uint16_t cfg_port_src = 33000;
+static uint16_t cfg_port_src_encap = 33001;
+static unsigned int cfg_random_seed;
+static int cfg_rcvbuf = 1 << 22; /* be able to queue large cfg_num_pkt */
+static bool cfg_send_pfpacket;
+static bool cfg_send_udp;
+static int cfg_timeout_ms = 2000;
+static bool cfg_zero_disable; /* skip checksum: set to zero (udp only) */
+static bool cfg_zero_sum; /* create packet that adds up to zero */
+
+static struct sockaddr_in cfg_daddr4 = {.sin_family = AF_INET};
+static struct sockaddr_in cfg_saddr4 = {.sin_family = AF_INET};
+static struct sockaddr_in6 cfg_daddr6 = {.sin6_family = AF_INET6};
+static struct sockaddr_in6 cfg_saddr6 = {.sin6_family = AF_INET6};
+
+#define ENC_HEADER_LEN (sizeof(struct udphdr) + sizeof(struct udp_encap_hdr))
+#define MAX_HEADER_LEN (sizeof(struct ipv6hdr) + ENC_HEADER_LEN + sizeof(struct tcphdr))
+#define MAX_PAYLOAD_LEN 1024
+
+/* Trivial demo encap. Stand-in for transport layer protocols like ESP or PSP */
+struct udp_encap_hdr {
+ uint8_t nexthdr;
+ uint8_t padding[3];
+};
+
+/* Ipaddrs, for pseudo csum. Global var is ugly, pass through funcs was worse */
+static void *iph_addr_p;
+
+static unsigned long gettimeofday_ms(void)
+{
+ struct timeval tv;
+
+ gettimeofday(&tv, NULL);
+ return (tv.tv_sec * 1000UL) + (tv.tv_usec / 1000UL);
+}
+
+static uint32_t checksum_nofold(char *data, size_t len, uint32_t sum)
+{
+ uint16_t *words = (uint16_t *)data;
+ int i;
+
+ for (i = 0; i < len / 2; i++)
+ sum += words[i];
+
+ if (len & 1)
+ sum += ((unsigned char *)data)[len - 1];
+
+ return sum;
+}
+
+static uint16_t checksum_fold(void *data, size_t len, uint32_t sum)
+{
+ sum = checksum_nofold(data, len, sum);
+
+ while (sum > 0xFFFF)
+ sum = (sum & 0xFFFF) + (sum >> 16);
+
+ return ~sum;
+}
+
+static uint16_t checksum(void *th, uint16_t proto, size_t len)
+{
+ uint32_t sum;
+ int alen;
+
+ alen = cfg_family == PF_INET6 ? 32 : 8;
+
+ sum = checksum_nofold(iph_addr_p, alen, 0);
+ sum += htons(proto);
+ sum += htons(len);
+
+ /* With CHECKSUM_PARTIAL kernel expects non-inverted pseudo csum */
+ if (cfg_do_tx && cfg_send_pfpacket)
+ return ~checksum_fold(NULL, 0, sum);
+ else
+ return checksum_fold(th, len, sum);
+}
+
+static void *build_packet_ipv4(void *_iph, uint8_t proto, unsigned int len)
+{
+ struct iphdr *iph = _iph;
+
+ memset(iph, 0, sizeof(*iph));
+
+ iph->version = 4;
+ iph->ihl = 5;
+ iph->ttl = 8;
+ iph->protocol = proto;
+ iph->saddr = cfg_saddr4.sin_addr.s_addr;
+ iph->daddr = cfg_daddr4.sin_addr.s_addr;
+ iph->tot_len = htons(sizeof(*iph) + len);
+ iph->check = checksum_fold(iph, sizeof(*iph), 0);
+
+ iph_addr_p = &iph->saddr;
+
+ return iph + 1;
+}
+
+static void *build_packet_ipv6(void *_ip6h, uint8_t proto, unsigned int len)
+{
+ struct ipv6hdr *ip6h = _ip6h;
+
+ memset(ip6h, 0, sizeof(*ip6h));
+
+ ip6h->version = 6;
+ ip6h->payload_len = htons(len);
+ ip6h->nexthdr = proto;
+ ip6h->hop_limit = 64;
+ ip6h->saddr = cfg_saddr6.sin6_addr;
+ ip6h->daddr = cfg_daddr6.sin6_addr;
+
+ iph_addr_p = &ip6h->saddr;
+
+ return ip6h + 1;
+}
+
+static void *build_packet_udp(void *_uh)
+{
+ struct udphdr *uh = _uh;
+
+ uh->source = htons(cfg_port_src);
+ uh->dest = htons(cfg_port_dst);
+ uh->len = htons(sizeof(*uh) + cfg_payload_len);
+ uh->check = 0;
+
+ /* choose source port so that uh->check adds up to zero */
+ if (cfg_zero_sum) {
+ uh->source = 0;
+ uh->source = checksum(uh, IPPROTO_UDP, sizeof(*uh) + cfg_payload_len);
+
+ fprintf(stderr, "tx: changing sport: %hu -> %hu\n",
+ cfg_port_src, ntohs(uh->source));
+ cfg_port_src = ntohs(uh->source);
+ }
+
+ if (cfg_zero_disable)
+ uh->check = 0;
+ else
+ uh->check = checksum(uh, IPPROTO_UDP, sizeof(*uh) + cfg_payload_len);
+
+ if (cfg_bad_csum)
+ uh->check = ~uh->check;
+
+ fprintf(stderr, "tx: sending checksum: 0x%x\n", uh->check);
+ return uh + 1;
+}
+
+static void *build_packet_tcp(void *_th)
+{
+ struct tcphdr *th = _th;
+
+ th->source = htons(cfg_port_src);
+ th->dest = htons(cfg_port_dst);
+ th->doff = 5;
+ th->check = 0;
+
+ th->check = checksum(th, IPPROTO_TCP, sizeof(*th) + cfg_payload_len);
+
+ if (cfg_bad_csum)
+ th->check = ~th->check;
+
+ fprintf(stderr, "tx: sending checksum: 0x%x\n", th->check);
+ return th + 1;
+}
+
+static char *build_packet_udp_encap(void *_uh)
+{
+ struct udphdr *uh = _uh;
+ struct udp_encap_hdr *eh = _uh + sizeof(*uh);
+
+ /* outer dst == inner dst, to simplify BPF filter
+ * outer src != inner src, to demultiplex on recv
+ */
+ uh->dest = htons(cfg_port_dst);
+ uh->source = htons(cfg_port_src_encap);
+ uh->check = 0;
+ uh->len = htons(sizeof(*uh) +
+ sizeof(*eh) +
+ sizeof(struct tcphdr) +
+ cfg_payload_len);
+
+ eh->nexthdr = IPPROTO_TCP;
+
+ return build_packet_tcp(eh + 1);
+}
+
+static char *build_packet(char *buf, int max_len, int *len)
+{
+ uint8_t proto;
+ char *off;
+ int tlen;
+
+ if (cfg_random_seed) {
+ int *buf32 = (void *)buf;
+ int i;
+
+ for (i = 0; i < (max_len / sizeof(int)); i++)
+ buf32[i] = rand();
+ } else {
+ memset(buf, cfg_payload_char, max_len);
+ }
+
+ if (cfg_proto == IPPROTO_UDP)
+ tlen = sizeof(struct udphdr) + cfg_payload_len;
+ else
+ tlen = sizeof(struct tcphdr) + cfg_payload_len;
+
+ if (cfg_encap) {
+ proto = IPPROTO_UDP;
+ tlen += ENC_HEADER_LEN;
+ } else {
+ proto = cfg_proto;
+ }
+
+ if (cfg_family == PF_INET)
+ off = build_packet_ipv4(buf, proto, tlen);
+ else
+ off = build_packet_ipv6(buf, proto, tlen);
+
+ if (cfg_encap)
+ off = build_packet_udp_encap(off);
+ else if (cfg_proto == IPPROTO_UDP)
+ off = build_packet_udp(off);
+ else
+ off = build_packet_tcp(off);
+
+ /* only pass the payload, but still compute headers for cfg_zero_sum */
+ if (cfg_send_udp) {
+ *len = cfg_payload_len;
+ return off;
+ }
+
+ *len = off - buf + cfg_payload_len;
+ return buf;
+}
+
+static int open_inet(int ipproto, int protocol)
+{
+ int fd;
+
+ fd = socket(cfg_family, ipproto, protocol);
+ if (fd == -1)
+ error(1, errno, "socket inet");
+
+ if (cfg_family == PF_INET6) {
+ /* may have been updated by cfg_zero_sum */
+ cfg_saddr6.sin6_port = htons(cfg_port_src);
+
+ if (bind(fd, (void *)&cfg_saddr6, sizeof(cfg_saddr6)))
+ error(1, errno, "bind dgram 6");
+ if (connect(fd, (void *)&cfg_daddr6, sizeof(cfg_daddr6)))
+ error(1, errno, "connect dgram 6");
+ } else {
+ /* may have been updated by cfg_zero_sum */
+ cfg_saddr4.sin_port = htons(cfg_port_src);
+
+ if (bind(fd, (void *)&cfg_saddr4, sizeof(cfg_saddr4)))
+ error(1, errno, "bind dgram 4");
+ if (connect(fd, (void *)&cfg_daddr4, sizeof(cfg_daddr4)))
+ error(1, errno, "connect dgram 4");
+ }
+
+ return fd;
+}
+
+static int open_packet(void)
+{
+ int fd, one = 1;
+
+ fd = socket(PF_PACKET, SOCK_RAW, 0);
+ if (fd == -1)
+ error(1, errno, "socket packet");
+
+ if (setsockopt(fd, SOL_PACKET, PACKET_VNET_HDR, &one, sizeof(one)))
+ error(1, errno, "setsockopt packet_vnet_ndr");
+
+ return fd;
+}
+
+static void send_inet(int fd, const char *buf, int len)
+{
+ int ret;
+
+ ret = write(fd, buf, len);
+ if (ret == -1)
+ error(1, errno, "write");
+ if (ret != len)
+ error(1, 0, "write: %d", ret);
+}
+
+static void eth_str_to_addr(const char *str, unsigned char *eth)
+{
+ if (sscanf(str, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
+ &eth[0], &eth[1], &eth[2], &eth[3], &eth[4], &eth[5]) != 6)
+ error(1, 0, "cannot parse mac addr %s", str);
+}
+
+static void send_packet(int fd, const char *buf, int len)
+{
+ struct virtio_net_hdr vh = {0};
+ struct sockaddr_ll addr = {0};
+ struct msghdr msg = {0};
+ struct ethhdr eth;
+ struct iovec iov[3];
+ int ret;
+
+ addr.sll_family = AF_PACKET;
+ addr.sll_halen = ETH_ALEN;
+ addr.sll_ifindex = if_nametoindex(cfg_ifname);
+ if (!addr.sll_ifindex)
+ error(1, errno, "if_nametoindex %s", cfg_ifname);
+
+ vh.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+ if (cfg_family == PF_INET6) {
+ vh.csum_start = sizeof(struct ethhdr) + sizeof(struct ipv6hdr);
+ addr.sll_protocol = htons(ETH_P_IPV6);
+ } else {
+ vh.csum_start = sizeof(struct ethhdr) + sizeof(struct iphdr);
+ addr.sll_protocol = htons(ETH_P_IP);
+ }
+
+ if (cfg_encap)
+ vh.csum_start += ENC_HEADER_LEN;
+
+ if (cfg_proto == IPPROTO_TCP) {
+ vh.csum_offset = __builtin_offsetof(struct tcphdr, check);
+ vh.hdr_len = vh.csum_start + sizeof(struct tcphdr);
+ } else {
+ vh.csum_offset = __builtin_offsetof(struct udphdr, check);
+ vh.hdr_len = vh.csum_start + sizeof(struct udphdr);
+ }
+
+ eth_str_to_addr(cfg_mac_src, eth.h_source);
+ eth_str_to_addr(cfg_mac_dst, eth.h_dest);
+ eth.h_proto = addr.sll_protocol;
+
+ iov[0].iov_base = &vh;
+ iov[0].iov_len = sizeof(vh);
+
+ iov[1].iov_base = &eth;
+ iov[1].iov_len = sizeof(eth);
+
+ iov[2].iov_base = (void *)buf;
+ iov[2].iov_len = len;
+
+ msg.msg_iov = iov;
+ msg.msg_iovlen = sizeof(iov) / sizeof(iov[0]);
+
+ msg.msg_name = &addr;
+ msg.msg_namelen = sizeof(addr);
+
+ ret = sendmsg(fd, &msg, 0);
+ if (ret == -1)
+ error(1, errno, "sendmsg packet");
+ if (ret != sizeof(vh) + sizeof(eth) + len)
+ error(1, errno, "sendmsg packet: %u", ret);
+}
+
+static int recv_prepare_udp(void)
+{
+ int fd;
+
+ fd = socket(cfg_family, SOCK_DGRAM, 0);
+ if (fd == -1)
+ error(1, errno, "socket r");
+
+ if (setsockopt(fd, SOL_SOCKET, SO_RCVBUF,
+ &cfg_rcvbuf, sizeof(cfg_rcvbuf)))
+ error(1, errno, "setsockopt SO_RCVBUF r");
+
+ if (cfg_family == PF_INET6) {
+ if (bind(fd, (void *)&cfg_daddr6, sizeof(cfg_daddr6)))
+ error(1, errno, "bind r");
+ } else {
+ if (bind(fd, (void *)&cfg_daddr4, sizeof(cfg_daddr4)))
+ error(1, errno, "bind r");
+ }
+
+ return fd;
+}
+
+/* Filter out all traffic that is not cfg_proto with our destination port.
+ *
+ * Otherwise background noise may cause PF_PACKET receive queue overflow,
+ * dropping the expected packets and failing the test.
+ */
+static void __recv_prepare_packet_filter(int fd, int off_nexthdr, int off_dport)
+{
+ struct sock_filter filter[] = {
+ BPF_STMT(BPF_LD + BPF_B + BPF_ABS, SKF_AD_OFF + SKF_AD_PKTTYPE),
+ BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, PACKET_HOST, 0, 4),
+ BPF_STMT(BPF_LD + BPF_B + BPF_ABS, off_nexthdr),
+ BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, cfg_encap ? IPPROTO_UDP : cfg_proto, 0, 2),
+ BPF_STMT(BPF_LD + BPF_H + BPF_ABS, off_dport),
+ BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, cfg_port_dst, 1, 0),
+ BPF_STMT(BPF_RET + BPF_K, 0),
+ BPF_STMT(BPF_RET + BPF_K, 0xFFFF),
+ };
+ struct sock_fprog prog = {};
+
+ prog.filter = filter;
+ prog.len = sizeof(filter) / sizeof(struct sock_filter);
+ if (setsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, &prog, sizeof(prog)))
+ error(1, errno, "setsockopt filter");
+}
+
+static void recv_prepare_packet_filter(int fd)
+{
+ const int off_dport = offsetof(struct tcphdr, dest); /* same for udp */
+
+ if (cfg_family == AF_INET)
+ __recv_prepare_packet_filter(fd, offsetof(struct iphdr, protocol),
+ sizeof(struct iphdr) + off_dport);
+ else
+ __recv_prepare_packet_filter(fd, offsetof(struct ipv6hdr, nexthdr),
+ sizeof(struct ipv6hdr) + off_dport);
+}
+
+static void recv_prepare_packet_bind(int fd)
+{
+ struct sockaddr_ll laddr = {0};
+
+ laddr.sll_family = AF_PACKET;
+
+ if (cfg_family == PF_INET)
+ laddr.sll_protocol = htons(ETH_P_IP);
+ else
+ laddr.sll_protocol = htons(ETH_P_IPV6);
+
+ laddr.sll_ifindex = if_nametoindex(cfg_ifname);
+ if (!laddr.sll_ifindex)
+ error(1, 0, "if_nametoindex %s", cfg_ifname);
+
+ if (bind(fd, (void *)&laddr, sizeof(laddr)))
+ error(1, errno, "bind pf_packet");
+}
+
+static int recv_prepare_packet(void)
+{
+ int fd, one = 1;
+
+ fd = socket(PF_PACKET, SOCK_DGRAM, 0);
+ if (fd == -1)
+ error(1, errno, "socket p");
+
+ if (setsockopt(fd, SOL_SOCKET, SO_RCVBUF,
+ &cfg_rcvbuf, sizeof(cfg_rcvbuf)))
+ error(1, errno, "setsockopt SO_RCVBUF p");
+
+ /* enable auxdata to recv checksum status (valid vs unknown) */
+ if (setsockopt(fd, SOL_PACKET, PACKET_AUXDATA, &one, sizeof(one)))
+ error(1, errno, "setsockopt auxdata");
+
+ /* install filter to restrict packet flow to match */
+ recv_prepare_packet_filter(fd);
+
+ /* bind to address family to start packet flow */
+ recv_prepare_packet_bind(fd);
+
+ return fd;
+}
+
+static int recv_udp(int fd)
+{
+ static char buf[MAX_PAYLOAD_LEN];
+ int ret, count = 0;
+
+ while (1) {
+ ret = recv(fd, buf, sizeof(buf), MSG_DONTWAIT);
+ if (ret == -1 && errno == EAGAIN)
+ break;
+ if (ret == -1)
+ error(1, errno, "recv r");
+
+ fprintf(stderr, "rx: udp: len=%u\n", ret);
+ count++;
+ }
+
+ return count;
+}
+
+static int recv_verify_csum(void *th, int len, uint16_t sport, uint16_t csum_field)
+{
+ uint16_t csum;
+
+ csum = checksum(th, cfg_proto, len);
+
+ fprintf(stderr, "rx: pkt: sport=%hu len=%u csum=0x%hx verify=0x%hx\n",
+ sport, len, csum_field, csum);
+
+ /* csum must be zero unless cfg_bad_csum indicates bad csum */
+ if (csum && !cfg_bad_csum) {
+ fprintf(stderr, "pkt: bad csum\n");
+ return 1;
+ } else if (cfg_bad_csum && !csum) {
+ fprintf(stderr, "pkt: good csum, while bad expected\n");
+ return 1;
+ }
+
+ if (cfg_zero_sum && csum_field != 0xFFFF) {
+ fprintf(stderr, "pkt: zero csum: field should be 0xFFFF, is 0x%hx\n", csum_field);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int recv_verify_packet_tcp(void *th, int len)
+{
+ struct tcphdr *tcph = th;
+
+ if (len < sizeof(*tcph) || tcph->dest != htons(cfg_port_dst))
+ return -1;
+
+ return recv_verify_csum(th, len, ntohs(tcph->source), tcph->check);
+}
+
+static int recv_verify_packet_udp_encap(void *th, int len)
+{
+ struct udp_encap_hdr *eh = th;
+
+ if (len < sizeof(*eh) || eh->nexthdr != IPPROTO_TCP)
+ return -1;
+
+ return recv_verify_packet_tcp(eh + 1, len - sizeof(*eh));
+}
+
+static int recv_verify_packet_udp(void *th, int len)
+{
+ struct udphdr *udph = th;
+
+ if (len < sizeof(*udph))
+ return -1;
+
+ if (udph->dest != htons(cfg_port_dst))
+ return -1;
+
+ if (udph->source == htons(cfg_port_src_encap))
+ return recv_verify_packet_udp_encap(udph + 1,
+ len - sizeof(*udph));
+
+ return recv_verify_csum(th, len, ntohs(udph->source), udph->check);
+}
+
+static int recv_verify_packet_ipv4(void *nh, int len)
+{
+ struct iphdr *iph = nh;
+ uint16_t proto = cfg_encap ? IPPROTO_UDP : cfg_proto;
+
+ if (len < sizeof(*iph) || iph->protocol != proto)
+ return -1;
+
+ iph_addr_p = &iph->saddr;
+ if (proto == IPPROTO_TCP)
+ return recv_verify_packet_tcp(iph + 1, len - sizeof(*iph));
+ else
+ return recv_verify_packet_udp(iph + 1, len - sizeof(*iph));
+}
+
+static int recv_verify_packet_ipv6(void *nh, int len)
+{
+ struct ipv6hdr *ip6h = nh;
+ uint16_t proto = cfg_encap ? IPPROTO_UDP : cfg_proto;
+
+ if (len < sizeof(*ip6h) || ip6h->nexthdr != proto)
+ return -1;
+
+ iph_addr_p = &ip6h->saddr;
+
+ if (proto == IPPROTO_TCP)
+ return recv_verify_packet_tcp(ip6h + 1, len - sizeof(*ip6h));
+ else
+ return recv_verify_packet_udp(ip6h + 1, len - sizeof(*ip6h));
+}
+
+/* return whether auxdata includes TP_STATUS_CSUM_VALID */
+static bool recv_verify_packet_csum(struct msghdr *msg)
+{
+ struct tpacket_auxdata *aux = NULL;
+ struct cmsghdr *cm;
+
+ if (msg->msg_flags & MSG_CTRUNC)
+ error(1, 0, "cmsg: truncated");
+
+ for (cm = CMSG_FIRSTHDR(msg); cm; cm = CMSG_NXTHDR(msg, cm)) {
+ if (cm->cmsg_level != SOL_PACKET ||
+ cm->cmsg_type != PACKET_AUXDATA)
+ error(1, 0, "cmsg: level=%d type=%d\n",
+ cm->cmsg_level, cm->cmsg_type);
+
+ if (cm->cmsg_len != CMSG_LEN(sizeof(struct tpacket_auxdata)))
+ error(1, 0, "cmsg: len=%lu expected=%lu",
+ cm->cmsg_len, CMSG_LEN(sizeof(struct tpacket_auxdata)));
+
+ aux = (void *)CMSG_DATA(cm);
+ }
+
+ if (!aux)
+ error(1, 0, "cmsg: no auxdata");
+
+ return aux->tp_status & TP_STATUS_CSUM_VALID;
+}
+
+static int recv_packet(int fd)
+{
+ static char _buf[MAX_HEADER_LEN + MAX_PAYLOAD_LEN];
+ unsigned long total = 0, bad_csums = 0, bad_validations = 0;
+ char ctrl[CMSG_SPACE(sizeof(struct tpacket_auxdata))];
+ struct pkt *buf = (void *)_buf;
+ struct msghdr msg = {0};
+ struct iovec iov;
+ int len, ret;
+
+ iov.iov_base = _buf;
+ iov.iov_len = sizeof(_buf);
+
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+
+ msg.msg_control = ctrl;
+ msg.msg_controllen = sizeof(ctrl);
+
+ while (1) {
+ msg.msg_flags = 0;
+
+ len = recvmsg(fd, &msg, MSG_DONTWAIT);
+ if (len == -1 && errno == EAGAIN)
+ break;
+ if (len == -1)
+ error(1, errno, "recv p");
+
+ if (cfg_family == PF_INET6)
+ ret = recv_verify_packet_ipv6(buf, len);
+ else
+ ret = recv_verify_packet_ipv4(buf, len);
+
+ if (ret == -1 /* skip: non-matching */)
+ continue;
+
+ total++;
+ if (ret == 1)
+ bad_csums++;
+
+ /* Fail if kernel returns valid for known bad csum.
+ * Do not fail if kernel does not validate a good csum:
+ * Absence of validation does not imply invalid.
+ */
+ if (recv_verify_packet_csum(&msg) && cfg_bad_csum) {
+ fprintf(stderr, "cmsg: expected bad csum, pf_packet returns valid\n");
+ bad_validations++;
+ }
+ }
+
+ if (bad_csums || bad_validations)
+ error(1, 0, "rx: errors at pf_packet: total=%lu bad_csums=%lu bad_valids=%lu\n",
+ total, bad_csums, bad_validations);
+
+ return total;
+}
+
+static void parse_args(int argc, char *const argv[])
+{
+ const char *daddr = NULL, *saddr = NULL;
+ int c;
+
+ while ((c = getopt(argc, argv, "46d:D:eEi:l:L:n:r:PRs:S:tTuUzZ")) != -1) {
+ switch (c) {
+ case '4':
+ cfg_family = PF_INET;
+ break;
+ case '6':
+ cfg_family = PF_INET6;
+ break;
+ case 'd':
+ cfg_mac_dst = optarg;
+ break;
+ case 'D':
+ daddr = optarg;
+ break;
+ case 'e':
+ cfg_encap = true;
+ break;
+ case 'E':
+ cfg_bad_csum = true;
+ break;
+ case 'i':
+ cfg_ifname = optarg;
+ break;
+ case 'l':
+ cfg_payload_len = strtol(optarg, NULL, 0);
+ break;
+ case 'L':
+ cfg_timeout_ms = strtol(optarg, NULL, 0) * 1000;
+ break;
+ case 'n':
+ cfg_num_pkt = strtol(optarg, NULL, 0);
+ break;
+ case 'r':
+ cfg_random_seed = strtol(optarg, NULL, 0);
+ break;
+ case 'P':
+ cfg_send_pfpacket = true;
+ break;
+ case 'R':
+ /* only Rx: used with two machine tests */
+ cfg_do_tx = false;
+ break;
+ case 's':
+ cfg_mac_src = optarg;
+ break;
+ case 'S':
+ saddr = optarg;
+ break;
+ case 't':
+ cfg_proto = IPPROTO_TCP;
+ break;
+ case 'T':
+ /* only Tx: used with two machine tests */
+ cfg_do_rx = false;
+ break;
+ case 'u':
+ cfg_proto = IPPROTO_UDP;
+ break;
+ case 'U':
+ /* send using real udp socket,
+ * to exercise tx checksum offload
+ */
+ cfg_send_udp = true;
+ break;
+ case 'z':
+ cfg_zero_disable = true;
+ break;
+ case 'Z':
+ cfg_zero_sum = true;
+ break;
+ default:
+ error(1, 0, "unknown arg %c", c);
+ }
+ }
+
+ if (!daddr || !saddr)
+ error(1, 0, "Must pass -D <daddr> and -S <saddr>");
+
+ if (cfg_do_tx && cfg_send_pfpacket && (!cfg_mac_src || !cfg_mac_dst))
+ error(1, 0, "Transmit with pf_packet requires mac addresses");
+
+ if (cfg_payload_len > MAX_PAYLOAD_LEN)
+ error(1, 0, "Payload length exceeds max");
+
+ if (cfg_proto != IPPROTO_UDP && (cfg_zero_sum || cfg_zero_disable))
+ error(1, 0, "Only UDP supports zero csum");
+
+ if (cfg_zero_sum && !cfg_send_udp)
+ error(1, 0, "Zero checksum conversion requires -U for tx csum offload");
+ if (cfg_zero_sum && cfg_bad_csum)
+ error(1, 0, "Cannot combine zero checksum conversion and invalid checksum");
+ if (cfg_zero_sum && cfg_random_seed)
+ error(1, 0, "Cannot combine zero checksum conversion with randomization");
+
+ if (cfg_family == PF_INET6) {
+ cfg_saddr6.sin6_port = htons(cfg_port_src);
+ cfg_daddr6.sin6_port = htons(cfg_port_dst);
+
+ if (inet_pton(cfg_family, daddr, &cfg_daddr6.sin6_addr) != 1)
+ error(1, errno, "Cannot parse ipv6 -D");
+ if (inet_pton(cfg_family, saddr, &cfg_saddr6.sin6_addr) != 1)
+ error(1, errno, "Cannot parse ipv6 -S");
+ } else {
+ cfg_saddr4.sin_port = htons(cfg_port_src);
+ cfg_daddr4.sin_port = htons(cfg_port_dst);
+
+ if (inet_pton(cfg_family, daddr, &cfg_daddr4.sin_addr) != 1)
+ error(1, errno, "Cannot parse ipv4 -D");
+ if (inet_pton(cfg_family, saddr, &cfg_saddr4.sin_addr) != 1)
+ error(1, errno, "Cannot parse ipv4 -S");
+ }
+
+ if (cfg_do_tx && cfg_random_seed) {
+ /* special case: time-based seed */
+ if (cfg_random_seed == 1)
+ cfg_random_seed = (unsigned int)gettimeofday_ms();
+ srand(cfg_random_seed);
+ fprintf(stderr, "randomization seed: %u\n", cfg_random_seed);
+ }
+}
+
+static void do_tx(void)
+{
+ static char _buf[MAX_HEADER_LEN + MAX_PAYLOAD_LEN];
+ char *buf;
+ int fd, len, i;
+
+ buf = build_packet(_buf, sizeof(_buf), &len);
+
+ if (cfg_send_pfpacket)
+ fd = open_packet();
+ else if (cfg_send_udp)
+ fd = open_inet(SOCK_DGRAM, 0);
+ else
+ fd = open_inet(SOCK_RAW, IPPROTO_RAW);
+
+ for (i = 0; i < cfg_num_pkt; i++) {
+ if (cfg_send_pfpacket)
+ send_packet(fd, buf, len);
+ else
+ send_inet(fd, buf, len);
+
+ /* randomize each packet individually to increase coverage */
+ if (cfg_random_seed) {
+ cfg_payload_len = rand() % MAX_PAYLOAD_LEN;
+ buf = build_packet(_buf, sizeof(_buf), &len);
+ }
+ }
+
+ if (close(fd))
+ error(1, errno, "close tx");
+}
+
+static void do_rx(int fdp, int fdr)
+{
+ unsigned long count_udp = 0, count_pkt = 0;
+ long tleft, tstop;
+ struct pollfd pfd;
+
+ tstop = gettimeofday_ms() + cfg_timeout_ms;
+ tleft = cfg_timeout_ms;
+
+ do {
+ pfd.events = POLLIN;
+ pfd.fd = fdp;
+ if (poll(&pfd, 1, tleft) == -1)
+ error(1, errno, "poll");
+
+ if (pfd.revents & POLLIN)
+ count_pkt += recv_packet(fdp);
+
+ if (cfg_proto == IPPROTO_UDP)
+ count_udp += recv_udp(fdr);
+
+ tleft = tstop - gettimeofday_ms();
+ } while (tleft > 0);
+
+ if (close(fdr))
+ error(1, errno, "close r");
+ if (close(fdp))
+ error(1, errno, "close p");
+
+ if (count_pkt < cfg_num_pkt)
+ error(1, 0, "rx: missing packets at pf_packet: %lu < %u",
+ count_pkt, cfg_num_pkt);
+
+ if (cfg_proto == IPPROTO_UDP) {
+ if (cfg_bad_csum && count_udp)
+ error(1, 0, "rx: unexpected packets at udp");
+ if (!cfg_bad_csum && !count_udp)
+ error(1, 0, "rx: missing packets at udp");
+ }
+}
+
+int main(int argc, char *const argv[])
+{
+ int fdp = -1, fdr = -1; /* -1 to silence -Wmaybe-uninitialized */
+
+ parse_args(argc, argv);
+
+ /* open receive sockets before transmitting */
+ if (cfg_do_rx) {
+ fdp = recv_prepare_packet();
+ fdr = recv_prepare_udp();
+ }
+
+ if (cfg_do_tx)
+ do_tx();
+
+ if (cfg_do_rx)
+ do_rx(fdp, fdr);
+
+ fprintf(stderr, "OK\n");
+ return 0;
+}
diff --git a/tools/testing/selftests/net/forwarding/bridge_igmp.sh b/tools/testing/selftests/net/forwarding/bridge_igmp.sh
index 1162836f8f32..2aa66d2a1702 100755
--- a/tools/testing/selftests/net/forwarding/bridge_igmp.sh
+++ b/tools/testing/selftests/net/forwarding/bridge_igmp.sh
@@ -96,9 +96,6 @@ cleanup()
switch_destroy
- # Always cleanup the mcast group
- ip address del dev $h2 $TEST_GROUP/32 2>&1 1>/dev/null
-
h2_destroy
h1_destroy
diff --git a/tools/testing/selftests/net/forwarding/bridge_locked_port.sh b/tools/testing/selftests/net/forwarding/bridge_locked_port.sh
index 5b02b6b60ce7..dc92d32464f6 100755
--- a/tools/testing/selftests/net/forwarding/bridge_locked_port.sh
+++ b/tools/testing/selftests/net/forwarding/bridge_locked_port.sh
@@ -1,7 +1,16 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
-ALL_TESTS="locked_port_ipv4 locked_port_ipv6 locked_port_vlan"
+ALL_TESTS="
+ locked_port_ipv4
+ locked_port_ipv6
+ locked_port_vlan
+ locked_port_mab
+ locked_port_mab_roam
+ locked_port_mab_config
+ locked_port_mab_flush
+"
+
NUM_NETIFS=4
CHECK_TC="no"
source lib.sh
@@ -166,6 +175,150 @@ locked_port_ipv6()
log_test "Locked port ipv6"
}
+locked_port_mab()
+{
+ RET=0
+ check_port_mab_support || return 0
+
+ ping_do $h1 192.0.2.2
+ check_err $? "Ping did not work before locking port"
+
+ bridge link set dev $swp1 learning on locked on
+
+ ping_do $h1 192.0.2.2
+ check_fail $? "Ping worked on a locked port without an FDB entry"
+
+ bridge fdb get `mac_get $h1` br br0 vlan 1 &> /dev/null
+ check_fail $? "FDB entry created before enabling MAB"
+
+ bridge link set dev $swp1 learning on locked on mab on
+
+ ping_do $h1 192.0.2.2
+ check_fail $? "Ping worked on MAB enabled port without an FDB entry"
+
+ bridge fdb get `mac_get $h1` br br0 vlan 1 | grep "dev $swp1" | grep -q "locked"
+ check_err $? "Locked FDB entry not created"
+
+ bridge fdb replace `mac_get $h1` dev $swp1 master static
+
+ ping_do $h1 192.0.2.2
+ check_err $? "Ping did not work after replacing FDB entry"
+
+ bridge fdb get `mac_get $h1` br br0 vlan 1 | grep "dev $swp1" | grep -q "locked"
+ check_fail $? "FDB entry marked as locked after replacement"
+
+ bridge fdb del `mac_get $h1` dev $swp1 master
+ bridge link set dev $swp1 learning off locked off mab off
+
+ log_test "Locked port MAB"
+}
+
+# Check that entries cannot roam to a locked port, but that entries can roam
+# to an unlocked port.
+locked_port_mab_roam()
+{
+ local mac=a0:b0:c0:c0:b0:a0
+
+ RET=0
+ check_port_mab_support || return 0
+
+ bridge link set dev $swp1 learning on locked on mab on
+
+ $MZ $h1 -q -c 5 -d 100msec -t udp -a $mac -b rand
+ bridge fdb get $mac br br0 vlan 1 | grep "dev $swp1" | grep -q "locked"
+ check_err $? "No locked entry on first injection"
+
+ $MZ $h2 -q -c 5 -d 100msec -t udp -a $mac -b rand
+ bridge fdb get $mac br br0 vlan 1 | grep -q "dev $swp2"
+ check_err $? "Entry did not roam to an unlocked port"
+
+ bridge fdb get $mac br br0 vlan 1 | grep -q "locked"
+ check_fail $? "Entry roamed with locked flag on"
+
+ $MZ $h1 -q -c 5 -d 100msec -t udp -a $mac -b rand
+ bridge fdb get $mac br br0 vlan 1 | grep -q "dev $swp1"
+ check_fail $? "Entry roamed back to locked port"
+
+ bridge fdb del $mac vlan 1 dev $swp2 master
+ bridge link set dev $swp1 learning off locked off mab off
+
+ log_test "Locked port MAB roam"
+}
+
+# Check that MAB can only be enabled on a port that is both locked and has
+# learning enabled.
+locked_port_mab_config()
+{
+ RET=0
+ check_port_mab_support || return 0
+
+ bridge link set dev $swp1 learning on locked off mab on &> /dev/null
+ check_fail $? "MAB enabled while port is unlocked"
+
+ bridge link set dev $swp1 learning off locked on mab on &> /dev/null
+ check_fail $? "MAB enabled while port has learning disabled"
+
+ bridge link set dev $swp1 learning on locked on mab on
+ check_err $? "Failed to enable MAB when port is locked and has learning enabled"
+
+ bridge link set dev $swp1 learning off locked off mab off
+
+ log_test "Locked port MAB configuration"
+}
+
+# Check that locked FDB entries are flushed from a port when MAB is disabled.
+locked_port_mab_flush()
+{
+ local locked_mac1=00:01:02:03:04:05
+ local unlocked_mac1=00:01:02:03:04:06
+ local locked_mac2=00:01:02:03:04:07
+ local unlocked_mac2=00:01:02:03:04:08
+
+ RET=0
+ check_port_mab_support || return 0
+
+ bridge link set dev $swp1 learning on locked on mab on
+ bridge link set dev $swp2 learning on locked on mab on
+
+ # Create regular and locked FDB entries on each port.
+ bridge fdb add $unlocked_mac1 dev $swp1 vlan 1 master static
+ bridge fdb add $unlocked_mac2 dev $swp2 vlan 1 master static
+
+ $MZ $h1 -q -c 5 -d 100msec -t udp -a $locked_mac1 -b rand
+ bridge fdb get $locked_mac1 br br0 vlan 1 | grep "dev $swp1" | \
+ grep -q "locked"
+ check_err $? "Failed to create locked FDB entry on first port"
+
+ $MZ $h2 -q -c 5 -d 100msec -t udp -a $locked_mac2 -b rand
+ bridge fdb get $locked_mac2 br br0 vlan 1 | grep "dev $swp2" | \
+ grep -q "locked"
+ check_err $? "Failed to create locked FDB entry on second port"
+
+ # Disable MAB on the first port and check that only the first locked
+ # FDB entry was flushed.
+ bridge link set dev $swp1 mab off
+
+ bridge fdb get $unlocked_mac1 br br0 vlan 1 &> /dev/null
+ check_err $? "Regular FDB entry on first port was flushed after disabling MAB"
+
+ bridge fdb get $unlocked_mac2 br br0 vlan 1 &> /dev/null
+ check_err $? "Regular FDB entry on second port was flushed after disabling MAB"
+
+ bridge fdb get $locked_mac1 br br0 vlan 1 &> /dev/null
+ check_fail $? "Locked FDB entry on first port was not flushed after disabling MAB"
+
+ bridge fdb get $locked_mac2 br br0 vlan 1 &> /dev/null
+ check_err $? "Locked FDB entry on second port was flushed after disabling MAB"
+
+ bridge fdb del $unlocked_mac2 dev $swp2 vlan 1 master static
+ bridge fdb del $unlocked_mac1 dev $swp1 vlan 1 master static
+
+ bridge link set dev $swp2 learning on locked off mab off
+ bridge link set dev $swp1 learning off locked off mab off
+
+ log_test "Locked port MAB FDB flush"
+}
+
trap cleanup EXIT
setup_prepare
diff --git a/tools/testing/selftests/net/forwarding/bridge_vlan_mcast.sh b/tools/testing/selftests/net/forwarding/bridge_vlan_mcast.sh
index 8748d1b1d95b..72dfbeaf56b9 100755
--- a/tools/testing/selftests/net/forwarding/bridge_vlan_mcast.sh
+++ b/tools/testing/selftests/net/forwarding/bridge_vlan_mcast.sh
@@ -59,6 +59,9 @@ switch_create()
switch_destroy()
{
+ tc qdisc del dev $swp2 clsact
+ tc qdisc del dev $swp1 clsact
+
ip link set dev $swp2 down
ip link set dev $swp1 down
diff --git a/tools/testing/selftests/net/forwarding/devlink_lib.sh b/tools/testing/selftests/net/forwarding/devlink_lib.sh
index 601990c6881b..f1de525cfa55 100644
--- a/tools/testing/selftests/net/forwarding/devlink_lib.sh
+++ b/tools/testing/selftests/net/forwarding/devlink_lib.sh
@@ -503,25 +503,30 @@ devlink_trap_drop_cleanup()
tc filter del dev $dev egress protocol $proto pref $pref handle $handle flower
}
-devlink_trap_stats_test()
+devlink_trap_stats_check()
{
- local test_name=$1; shift
local trap_name=$1; shift
local send_one="$@"
local t0_packets
local t1_packets
- RET=0
-
t0_packets=$(devlink_trap_rx_packets_get $trap_name)
$send_one && sleep 1
t1_packets=$(devlink_trap_rx_packets_get $trap_name)
- if [[ $t1_packets -eq $t0_packets ]]; then
- check_err 1 "Trap stats did not increase"
- fi
+ [[ $t1_packets -ne $t0_packets ]]
+}
+
+devlink_trap_stats_test()
+{
+ local test_name=$1; shift
+
+ RET=0
+
+ devlink_trap_stats_check "$@"
+ check_err $? "Trap stats did not increase"
log_test "$test_name"
}
diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh
index 3ffb9d6c0950..1c4f866de7d7 100755
--- a/tools/testing/selftests/net/forwarding/lib.sh
+++ b/tools/testing/selftests/net/forwarding/lib.sh
@@ -137,6 +137,14 @@ check_locked_port_support()
fi
}
+check_port_mab_support()
+{
+ if ! bridge -d link show | grep -q "mab"; then
+ echo "SKIP: iproute2 too old; MacAuth feature not supported."
+ return $ksft_skip
+ fi
+}
+
if [[ "$(id -u)" -ne 0 ]]; then
echo "SKIP: need root privileges"
exit $ksft_skip
diff --git a/tools/testing/selftests/net/hsr/Makefile b/tools/testing/selftests/net/hsr/Makefile
new file mode 100644
index 000000000000..92c1d9d080cd
--- /dev/null
+++ b/tools/testing/selftests/net/hsr/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
+top_srcdir = ../../../../..
+
+TEST_PROGS := hsr_ping.sh
+
+include ../../lib.mk
diff --git a/tools/testing/selftests/net/hsr/config b/tools/testing/selftests/net/hsr/config
new file mode 100644
index 000000000000..22061204fb69
--- /dev/null
+++ b/tools/testing/selftests/net/hsr/config
@@ -0,0 +1,4 @@
+CONFIG_IPV6=y
+CONFIG_NET_SCH_NETEM=m
+CONFIG_HSR=y
+CONFIG_VETH=y
diff --git a/tools/testing/selftests/net/hsr/hsr_ping.sh b/tools/testing/selftests/net/hsr/hsr_ping.sh
new file mode 100755
index 000000000000..df9143538708
--- /dev/null
+++ b/tools/testing/selftests/net/hsr/hsr_ping.sh
@@ -0,0 +1,256 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+ret=0
+ksft_skip=4
+ipv6=true
+
+optstring="h4"
+usage() {
+ echo "Usage: $0 [OPTION]"
+ echo -e "\t-4: IPv4 only: disable IPv6 tests (default: test both IPv4 and IPv6)"
+}
+
+while getopts "$optstring" option;do
+ case "$option" in
+ "h")
+ usage $0
+ exit 0
+ ;;
+ "4")
+ ipv6=false
+ ;;
+ "?")
+ usage $0
+ exit 1
+ ;;
+esac
+done
+
+sec=$(date +%s)
+rndh=$(printf %x $sec)-$(mktemp -u XXXXXX)
+ns1="ns1-$rndh"
+ns2="ns2-$rndh"
+ns3="ns3-$rndh"
+
+cleanup()
+{
+ local netns
+ for netns in "$ns1" "$ns2" "$ns3" ;do
+ ip netns del $netns
+ done
+}
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without ip tool"
+ exit $ksft_skip
+fi
+
+trap cleanup EXIT
+
+for i in "$ns1" "$ns2" "$ns3" ;do
+ ip netns add $i || exit $ksft_skip
+ ip -net $i link set lo up
+done
+
+echo "INFO: preparing interfaces."
+# Three HSR nodes. Each node has one link to each of its neighbour, two links in total.
+#
+# ns1eth1 ----- ns2eth1
+# hsr1 hsr2
+# ns1eth2 ns2eth2
+# | |
+# ns3eth1 ns3eth2
+# \ /
+# hsr3
+#
+# Interfaces
+ip link add ns1eth1 netns "$ns1" type veth peer name ns2eth1 netns "$ns2"
+ip link add ns1eth2 netns "$ns1" type veth peer name ns3eth1 netns "$ns3"
+ip link add ns3eth2 netns "$ns3" type veth peer name ns2eth2 netns "$ns2"
+
+# HSRv0.
+ip -net "$ns1" link add name hsr1 type hsr slave1 ns1eth1 slave2 ns1eth2 supervision 45 version 0 proto 0
+ip -net "$ns2" link add name hsr2 type hsr slave1 ns2eth1 slave2 ns2eth2 supervision 45 version 0 proto 0
+ip -net "$ns3" link add name hsr3 type hsr slave1 ns3eth1 slave2 ns3eth2 supervision 45 version 0 proto 0
+
+# IP for HSR
+ip -net "$ns1" addr add 100.64.0.1/24 dev hsr1
+ip -net "$ns1" addr add dead:beef:1::1/64 dev hsr1 nodad
+ip -net "$ns2" addr add 100.64.0.2/24 dev hsr2
+ip -net "$ns2" addr add dead:beef:1::2/64 dev hsr2 nodad
+ip -net "$ns3" addr add 100.64.0.3/24 dev hsr3
+ip -net "$ns3" addr add dead:beef:1::3/64 dev hsr3 nodad
+
+# All Links up
+ip -net "$ns1" link set ns1eth1 up
+ip -net "$ns1" link set ns1eth2 up
+ip -net "$ns1" link set hsr1 up
+
+ip -net "$ns2" link set ns2eth1 up
+ip -net "$ns2" link set ns2eth2 up
+ip -net "$ns2" link set hsr2 up
+
+ip -net "$ns3" link set ns3eth1 up
+ip -net "$ns3" link set ns3eth2 up
+ip -net "$ns3" link set hsr3 up
+
+# $1: IP address
+is_v6()
+{
+ [ -z "${1##*:*}" ]
+}
+
+do_ping()
+{
+ local netns="$1"
+ local connect_addr="$2"
+ local ping_args="-q -c 2"
+
+ if is_v6 "${connect_addr}"; then
+ $ipv6 || return 0
+ ping_args="${ping_args} -6"
+ fi
+
+ ip netns exec ${netns} ping ${ping_args} $connect_addr >/dev/null
+ if [ $? -ne 0 ] ; then
+ echo "$netns -> $connect_addr connectivity [ FAIL ]" 1>&2
+ ret=1
+ return 1
+ fi
+
+ return 0
+}
+
+do_ping_long()
+{
+ local netns="$1"
+ local connect_addr="$2"
+ local ping_args="-q -c 10"
+
+ if is_v6 "${connect_addr}"; then
+ $ipv6 || return 0
+ ping_args="${ping_args} -6"
+ fi
+
+ OUT="$(LANG=C ip netns exec ${netns} ping ${ping_args} $connect_addr | grep received)"
+ if [ $? -ne 0 ] ; then
+ echo "$netns -> $connect_addr ping [ FAIL ]" 1>&2
+ ret=1
+ return 1
+ fi
+
+ VAL="$(echo $OUT | cut -d' ' -f1-8)"
+ if [ "$VAL" != "10 packets transmitted, 10 received, 0% packet loss," ]
+ then
+ echo "$netns -> $connect_addr ping TEST [ FAIL ]"
+ echo "Expect to send and receive 10 packets and no duplicates."
+ echo "Full message: ${OUT}."
+ ret=1
+ return 1
+ fi
+
+ return 0
+}
+
+stop_if_error()
+{
+ local msg="$1"
+
+ if [ ${ret} -ne 0 ]; then
+ echo "FAIL: ${msg}" 1>&2
+ exit ${ret}
+ fi
+}
+
+
+echo "INFO: Initial validation ping."
+# Each node has to be able each one.
+do_ping "$ns1" 100.64.0.2
+do_ping "$ns2" 100.64.0.1
+do_ping "$ns3" 100.64.0.1
+stop_if_error "Initial validation failed."
+
+do_ping "$ns1" 100.64.0.3
+do_ping "$ns2" 100.64.0.3
+do_ping "$ns3" 100.64.0.2
+
+do_ping "$ns1" dead:beef:1::2
+do_ping "$ns1" dead:beef:1::3
+do_ping "$ns2" dead:beef:1::1
+do_ping "$ns2" dead:beef:1::2
+do_ping "$ns3" dead:beef:1::1
+do_ping "$ns3" dead:beef:1::2
+
+stop_if_error "Initial validation failed."
+
+# Wait until supervisor all supervision frames have been processed and the node
+# entries have been merged. Otherwise duplicate frames will be observed which is
+# valid at this stage.
+WAIT=5
+while [ ${WAIT} -gt 0 ]
+do
+ grep 00:00:00:00:00:00 /sys/kernel/debug/hsr/hsr*/node_table
+ if [ $? -ne 0 ]
+ then
+ break
+ fi
+ sleep 1
+ let WAIT = WAIT - 1
+done
+
+# Just a safety delay in case the above check didn't handle it.
+sleep 1
+
+echo "INFO: Longer ping test."
+do_ping_long "$ns1" 100.64.0.2
+do_ping_long "$ns1" dead:beef:1::2
+do_ping_long "$ns1" 100.64.0.3
+do_ping_long "$ns1" dead:beef:1::3
+
+stop_if_error "Longer ping test failed."
+
+do_ping_long "$ns2" 100.64.0.1
+do_ping_long "$ns2" dead:beef:1::1
+do_ping_long "$ns2" 100.64.0.3
+do_ping_long "$ns2" dead:beef:1::2
+stop_if_error "Longer ping test failed."
+
+do_ping_long "$ns3" 100.64.0.1
+do_ping_long "$ns3" dead:beef:1::1
+do_ping_long "$ns3" 100.64.0.2
+do_ping_long "$ns3" dead:beef:1::2
+stop_if_error "Longer ping test failed."
+
+echo "INFO: Cutting one link."
+do_ping_long "$ns1" 100.64.0.3 &
+
+sleep 3
+ip -net "$ns3" link set ns3eth1 down
+wait
+
+ip -net "$ns3" link set ns3eth1 up
+
+stop_if_error "Failed with one link down."
+
+echo "INFO: Delay the link and drop a few packages."
+tc -net "$ns3" qdisc add dev ns3eth1 root netem delay 50ms
+tc -net "$ns2" qdisc add dev ns2eth1 root netem delay 5ms loss 25%
+
+do_ping_long "$ns1" 100.64.0.2
+do_ping_long "$ns1" 100.64.0.3
+
+stop_if_error "Failed with delay and packetloss."
+
+do_ping_long "$ns2" 100.64.0.1
+do_ping_long "$ns2" 100.64.0.3
+
+stop_if_error "Failed with delay and packetloss."
+
+do_ping_long "$ns3" 100.64.0.1
+do_ping_long "$ns3" 100.64.0.2
+stop_if_error "Failed with delay and packetloss."
+
+echo "INFO: All good."
+exit $ret
diff --git a/tools/testing/selftests/net/mptcp/diag.sh b/tools/testing/selftests/net/mptcp/diag.sh
index 515859a5168b..24bcd7b9bdb2 100755
--- a/tools/testing/selftests/net/mptcp/diag.sh
+++ b/tools/testing/selftests/net/mptcp/diag.sh
@@ -1,6 +1,7 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
+sec=$(date +%s)
rndh=$(printf %x $sec)-$(mktemp -u XXXXXX)
ns="ns1-$rndh"
ksft_skip=4
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c
index e54653ea2ed4..8a8266957bc5 100644
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.c
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c
@@ -83,6 +83,7 @@ struct cfg_cmsg_types {
struct cfg_sockopt_types {
unsigned int transparent:1;
+ unsigned int mptfo:1;
};
struct tcp_inq_state {
@@ -90,6 +91,13 @@ struct tcp_inq_state {
bool expect_eof;
};
+struct wstate {
+ char buf[8192];
+ unsigned int len;
+ unsigned int off;
+ unsigned int total_len;
+};
+
static struct tcp_inq_state tcp_inq;
static struct cfg_cmsg_types cfg_cmsg_types;
@@ -232,6 +240,14 @@ static void set_transparent(int fd, int pf)
}
}
+static void set_mptfo(int fd, int pf)
+{
+ int qlen = 25;
+
+ if (setsockopt(fd, IPPROTO_TCP, TCP_FASTOPEN, &qlen, sizeof(qlen)) == -1)
+ perror("TCP_FASTOPEN");
+}
+
static int do_ulp_so(int sock, const char *name)
{
return setsockopt(sock, IPPROTO_TCP, TCP_ULP, name, strlen(name));
@@ -300,6 +316,9 @@ static int sock_listen_mptcp(const char * const listenaddr,
if (cfg_sockopt_types.transparent)
set_transparent(sock, pf);
+ if (cfg_sockopt_types.mptfo)
+ set_mptfo(sock, pf);
+
if (bind(sock, a->ai_addr, a->ai_addrlen) == 0)
break; /* success */
@@ -330,13 +349,15 @@ static int sock_listen_mptcp(const char * const listenaddr,
static int sock_connect_mptcp(const char * const remoteaddr,
const char * const port, int proto,
- struct addrinfo **peer)
+ struct addrinfo **peer,
+ int infd, struct wstate *winfo)
{
struct addrinfo hints = {
.ai_protocol = IPPROTO_TCP,
.ai_socktype = SOCK_STREAM,
};
struct addrinfo *a, *addr;
+ int syn_copied = 0;
int sock = -1;
hints.ai_family = pf;
@@ -354,14 +375,34 @@ static int sock_connect_mptcp(const char * const remoteaddr,
if (cfg_mark)
set_mark(sock, cfg_mark);
- if (connect(sock, a->ai_addr, a->ai_addrlen) == 0) {
- *peer = a;
- break; /* success */
+ if (cfg_sockopt_types.mptfo) {
+ if (!winfo->total_len)
+ winfo->total_len = winfo->len = read(infd, winfo->buf,
+ sizeof(winfo->buf));
+
+ syn_copied = sendto(sock, winfo->buf, winfo->len, MSG_FASTOPEN,
+ a->ai_addr, a->ai_addrlen);
+ if (syn_copied >= 0) {
+ winfo->off = syn_copied;
+ winfo->len -= syn_copied;
+ *peer = a;
+ break; /* success */
+ }
+ } else {
+ if (connect(sock, a->ai_addr, a->ai_addrlen) == 0) {
+ *peer = a;
+ break; /* success */
+ }
+ }
+ if (cfg_sockopt_types.mptfo) {
+ perror("sendto()");
+ close(sock);
+ sock = -1;
+ } else {
+ perror("connect()");
+ close(sock);
+ sock = -1;
}
-
- perror("connect()");
- close(sock);
- sock = -1;
}
freeaddrinfo(addr);
@@ -571,14 +612,14 @@ static void shut_wr(int fd)
shutdown(fd, SHUT_WR);
}
-static int copyfd_io_poll(int infd, int peerfd, int outfd, bool *in_closed_after_out)
+static int copyfd_io_poll(int infd, int peerfd, int outfd,
+ bool *in_closed_after_out, struct wstate *winfo)
{
struct pollfd fds = {
.fd = peerfd,
.events = POLLIN | POLLOUT,
};
- unsigned int woff = 0, wlen = 0, total_wlen = 0, total_rlen = 0;
- char wbuf[8192];
+ unsigned int total_wlen = 0, total_rlen = 0;
set_nonblock(peerfd, true);
@@ -638,19 +679,19 @@ static int copyfd_io_poll(int infd, int peerfd, int outfd, bool *in_closed_after
}
if (fds.revents & POLLOUT) {
- if (wlen == 0) {
- woff = 0;
- wlen = read(infd, wbuf, sizeof(wbuf));
+ if (winfo->len == 0) {
+ winfo->off = 0;
+ winfo->len = read(infd, winfo->buf, sizeof(winfo->buf));
}
- if (wlen > 0) {
+ if (winfo->len > 0) {
ssize_t bw;
/* limit the total amount of written data to the trunc value */
- if (cfg_truncate > 0 && wlen + total_wlen > cfg_truncate)
- wlen = cfg_truncate - total_wlen;
+ if (cfg_truncate > 0 && winfo->len + total_wlen > cfg_truncate)
+ winfo->len = cfg_truncate - total_wlen;
- bw = do_rnd_write(peerfd, wbuf + woff, wlen);
+ bw = do_rnd_write(peerfd, winfo->buf + winfo->off, winfo->len);
if (bw < 0) {
if (cfg_rcv_trunc)
return 0;
@@ -658,10 +699,10 @@ static int copyfd_io_poll(int infd, int peerfd, int outfd, bool *in_closed_after
return 111;
}
- woff += bw;
- wlen -= bw;
+ winfo->off += bw;
+ winfo->len -= bw;
total_wlen += bw;
- } else if (wlen == 0) {
+ } else if (winfo->len == 0) {
/* We have no more data to send. */
fds.events &= ~POLLOUT;
@@ -717,10 +758,26 @@ static int do_recvfile(int infd, int outfd)
return (int)r;
}
-static int do_mmap(int infd, int outfd, unsigned int size)
+static int spool_buf(int fd, struct wstate *winfo)
+{
+ while (winfo->len) {
+ int ret = write(fd, winfo->buf + winfo->off, winfo->len);
+
+ if (ret < 0) {
+ perror("write");
+ return 4;
+ }
+ winfo->off += ret;
+ winfo->len -= ret;
+ }
+ return 0;
+}
+
+static int do_mmap(int infd, int outfd, unsigned int size,
+ struct wstate *winfo)
{
char *inbuf = mmap(NULL, size, PROT_READ, MAP_SHARED, infd, 0);
- ssize_t ret = 0, off = 0;
+ ssize_t ret = 0, off = winfo->total_len;
size_t rem;
if (inbuf == MAP_FAILED) {
@@ -728,7 +785,11 @@ static int do_mmap(int infd, int outfd, unsigned int size)
return 1;
}
- rem = size;
+ ret = spool_buf(outfd, winfo);
+ if (ret < 0)
+ return ret;
+
+ rem = size - winfo->total_len;
while (rem > 0) {
ret = write(outfd, inbuf + off, rem);
@@ -772,8 +833,16 @@ static int get_infd_size(int fd)
return (int)count;
}
-static int do_sendfile(int infd, int outfd, unsigned int count)
+static int do_sendfile(int infd, int outfd, unsigned int count,
+ struct wstate *winfo)
{
+ int ret = spool_buf(outfd, winfo);
+
+ if (ret < 0)
+ return ret;
+
+ count -= winfo->total_len;
+
while (count > 0) {
ssize_t r;
@@ -790,7 +859,8 @@ static int do_sendfile(int infd, int outfd, unsigned int count)
}
static int copyfd_io_mmap(int infd, int peerfd, int outfd,
- unsigned int size, bool *in_closed_after_out)
+ unsigned int size, bool *in_closed_after_out,
+ struct wstate *winfo)
{
int err;
@@ -799,9 +869,9 @@ static int copyfd_io_mmap(int infd, int peerfd, int outfd,
if (err)
return err;
- err = do_mmap(infd, peerfd, size);
+ err = do_mmap(infd, peerfd, size, winfo);
} else {
- err = do_mmap(infd, peerfd, size);
+ err = do_mmap(infd, peerfd, size, winfo);
if (err)
return err;
@@ -815,7 +885,7 @@ static int copyfd_io_mmap(int infd, int peerfd, int outfd,
}
static int copyfd_io_sendfile(int infd, int peerfd, int outfd,
- unsigned int size, bool *in_closed_after_out)
+ unsigned int size, bool *in_closed_after_out, struct wstate *winfo)
{
int err;
@@ -824,9 +894,9 @@ static int copyfd_io_sendfile(int infd, int peerfd, int outfd,
if (err)
return err;
- err = do_sendfile(infd, peerfd, size);
+ err = do_sendfile(infd, peerfd, size, winfo);
} else {
- err = do_sendfile(infd, peerfd, size);
+ err = do_sendfile(infd, peerfd, size, winfo);
if (err)
return err;
@@ -839,7 +909,7 @@ static int copyfd_io_sendfile(int infd, int peerfd, int outfd,
return err;
}
-static int copyfd_io(int infd, int peerfd, int outfd, bool close_peerfd)
+static int copyfd_io(int infd, int peerfd, int outfd, bool close_peerfd, struct wstate *winfo)
{
bool in_closed_after_out = false;
struct timespec start, end;
@@ -851,21 +921,24 @@ static int copyfd_io(int infd, int peerfd, int outfd, bool close_peerfd)
switch (cfg_mode) {
case CFG_MODE_POLL:
- ret = copyfd_io_poll(infd, peerfd, outfd, &in_closed_after_out);
+ ret = copyfd_io_poll(infd, peerfd, outfd, &in_closed_after_out,
+ winfo);
break;
case CFG_MODE_MMAP:
file_size = get_infd_size(infd);
if (file_size < 0)
return file_size;
- ret = copyfd_io_mmap(infd, peerfd, outfd, file_size, &in_closed_after_out);
+ ret = copyfd_io_mmap(infd, peerfd, outfd, file_size,
+ &in_closed_after_out, winfo);
break;
case CFG_MODE_SENDFILE:
file_size = get_infd_size(infd);
if (file_size < 0)
return file_size;
- ret = copyfd_io_sendfile(infd, peerfd, outfd, file_size, &in_closed_after_out);
+ ret = copyfd_io_sendfile(infd, peerfd, outfd, file_size,
+ &in_closed_after_out, winfo);
break;
default:
@@ -999,6 +1072,7 @@ static void maybe_close(int fd)
int main_loop_s(int listensock)
{
struct sockaddr_storage ss;
+ struct wstate winfo;
struct pollfd polls;
socklen_t salen;
int remotesock;
@@ -1033,7 +1107,8 @@ again:
SOCK_TEST_TCPULP(remotesock, 0);
- copyfd_io(fd, remotesock, 1, true);
+ memset(&winfo, 0, sizeof(winfo));
+ copyfd_io(fd, remotesock, 1, true, &winfo);
} else {
perror("accept");
return 1;
@@ -1130,6 +1205,11 @@ static void parse_setsock_options(const char *name)
return;
}
+ if (strncmp(name, "MPTFO", len) == 0) {
+ cfg_sockopt_types.mptfo = 1;
+ return;
+ }
+
fprintf(stderr, "Unrecognized setsockopt option %s\n", name);
exit(1);
}
@@ -1166,11 +1246,18 @@ void xdisconnect(int fd, int addrlen)
int main_loop(void)
{
- int fd, ret, fd_in = 0;
+ int fd = 0, ret, fd_in = 0;
struct addrinfo *peer;
+ struct wstate winfo;
+
+ if (cfg_input && cfg_sockopt_types.mptfo) {
+ fd_in = open(cfg_input, O_RDONLY);
+ if (fd < 0)
+ xerror("can't open %s:%d", cfg_input, errno);
+ }
- /* listener is ready. */
- fd = sock_connect_mptcp(cfg_host, cfg_port, cfg_sock_proto, &peer);
+ memset(&winfo, 0, sizeof(winfo));
+ fd = sock_connect_mptcp(cfg_host, cfg_port, cfg_sock_proto, &peer, fd_in, &winfo);
if (fd < 0)
return 2;
@@ -1186,14 +1273,13 @@ again:
if (cfg_cmsg_types.cmsg_enabled)
apply_cmsg_types(fd, &cfg_cmsg_types);
- if (cfg_input) {
+ if (cfg_input && !cfg_sockopt_types.mptfo) {
fd_in = open(cfg_input, O_RDONLY);
if (fd < 0)
xerror("can't open %s:%d", cfg_input, errno);
}
- /* close the client socket open only if we are not going to reconnect */
- ret = copyfd_io(fd_in, fd, 1, 0);
+ ret = copyfd_io(fd_in, fd, 1, 0, &winfo);
if (ret)
return ret;
@@ -1210,6 +1296,7 @@ again:
xerror("can't reconnect: %d", errno);
if (cfg_input)
close(fd_in);
+ memset(&winfo, 0, sizeof(winfo));
goto again;
} else {
close(fd);
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
index 621af6895f4d..a43d3e2f59bb 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
@@ -274,8 +274,7 @@ check_transfer()
check_mptcp_disabled()
{
- local disabled_ns
- disabled_ns="ns_disabled-$sech-$(mktemp -u XXXXXX)"
+ local disabled_ns="ns_disabled-$rndh"
ip netns add ${disabled_ns} || exit $ksft_skip
# net.mptcp.enabled should be enabled by default
@@ -762,9 +761,25 @@ run_tests_peekmode()
run_tests_lo "$ns1" "$ns1" dead:beef:1::1 1 "-P ${peekmode}"
}
+run_tests_mptfo()
+{
+ echo "INFO: with MPTFO start"
+ ip netns exec "$ns1" sysctl -q net.ipv4.tcp_fastopen=2
+ ip netns exec "$ns2" sysctl -q net.ipv4.tcp_fastopen=1
+
+ run_tests_lo "$ns1" "$ns2" 10.0.1.1 0 "-o MPTFO"
+ run_tests_lo "$ns1" "$ns2" 10.0.1.1 0 "-o MPTFO"
+
+ run_tests_lo "$ns1" "$ns2" dead:beef:1::1 0 "-o MPTFO"
+ run_tests_lo "$ns1" "$ns2" dead:beef:1::1 0 "-o MPTFO"
+
+ ip netns exec "$ns1" sysctl -q net.ipv4.tcp_fastopen=0
+ ip netns exec "$ns2" sysctl -q net.ipv4.tcp_fastopen=0
+ echo "INFO: with MPTFO end"
+}
+
run_tests_disconnect()
{
- local peekmode="$1"
local old_cin=$cin
local old_sin=$sin
@@ -772,7 +787,6 @@ run_tests_disconnect()
# force do_transfer to cope with the multiple tranmissions
sin="$cin.disconnect"
- sin_disconnect=$old_sin
cin="$cin.disconnect"
cin_disconnect="$old_cin"
connect_per_transfer=3
@@ -783,7 +797,6 @@ run_tests_disconnect()
# restore previous status
sin=$old_sin
- sin_disconnect="$cout".disconnect
cin=$old_cin
cin_disconnect="$cin".disconnect
connect_per_transfer=1
@@ -901,6 +914,10 @@ run_tests_peekmode "saveWithPeek"
run_tests_peekmode "saveAfterPeek"
stop_if_error "Tests with peek mode have failed"
+# MPTFO (MultiPath TCP Fatopen tests)
+run_tests_mptfo
+stop_if_error "Tests with MPTFO have failed"
+
# connect to ns4 ip address, ns2 should intercept/proxy
run_test_transparent 10.0.3.1 "tproxy ipv4"
run_test_transparent dead:beef:3::1 "tproxy ipv6"
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index 2eeaf4aca644..d11d3d566608 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -26,6 +26,10 @@ ip_mptcp=0
check_invert=0
validate_checksum=0
init=0
+evts_ns1=""
+evts_ns2=""
+evts_ns1_pid=0
+evts_ns2_pid=0
declare -A all_tests
declare -a only_tests_ids
@@ -59,8 +63,9 @@ init_partial()
{
capout=$(mktemp)
- local rndh
- rndh=$(mktemp -u XXXXXX)
+ local sec rndh
+ sec=$(date +%s)
+ rndh=$(printf %x $sec)-$(mktemp -u XXXXXX)
ns1="ns1-$rndh"
ns2="ns2-$rndh"
@@ -153,6 +158,8 @@ init() {
cin=$(mktemp)
cinsent=$(mktemp)
cout=$(mktemp)
+ evts_ns1=$(mktemp)
+ evts_ns2=$(mktemp)
trap cleanup EXIT
@@ -164,6 +171,7 @@ cleanup()
{
rm -f "$cin" "$cout" "$sinfail"
rm -f "$sin" "$sout" "$cinsent" "$cinfail"
+ rm -rf $evts_ns1 $evts_ns2
cleanup_partial
}
@@ -319,6 +327,18 @@ reset_with_fail()
index 100 || exit 1
}
+reset_with_events()
+{
+ reset "${1}" || return 1
+
+ :> "$evts_ns1"
+ :> "$evts_ns2"
+ ip netns exec $ns1 ./pm_nl_ctl events >> "$evts_ns1" 2>&1 &
+ evts_ns1_pid=$!
+ ip netns exec $ns2 ./pm_nl_ctl events >> "$evts_ns2" 2>&1 &
+ evts_ns2_pid=$!
+}
+
fail_test()
{
ret=1
@@ -472,6 +492,12 @@ kill_wait()
wait $1 2>/dev/null
}
+kill_events_pids()
+{
+ kill_wait $evts_ns1_pid
+ kill_wait $evts_ns2_pid
+}
+
pm_nl_set_limits()
{
local ns=$1
@@ -672,10 +698,6 @@ do_transfer()
local port=$((10000 + TEST_COUNT - 1))
local cappid
local userspace_pm=0
- local evts_ns1
- local evts_ns1_pid
- local evts_ns2
- local evts_ns2_pid
:> "$cout"
:> "$sout"
@@ -752,17 +774,6 @@ do_transfer()
addr_nr_ns2=${addr_nr_ns2:9}
fi
- if [ $userspace_pm -eq 1 ]; then
- evts_ns1=$(mktemp)
- evts_ns2=$(mktemp)
- :> "$evts_ns1"
- :> "$evts_ns2"
- ip netns exec ${listener_ns} ./pm_nl_ctl events >> "$evts_ns1" 2>&1 &
- evts_ns1_pid=$!
- ip netns exec ${connector_ns} ./pm_nl_ctl events >> "$evts_ns2" 2>&1 &
- evts_ns2_pid=$!
- fi
-
local local_addr
if is_v6 "${connect_addr}"; then
local_addr="::"
@@ -829,7 +840,8 @@ do_transfer()
if [ $userspace_pm -eq 0 ]; then
pm_nl_add_endpoint $ns1 $addr flags signal
else
- tk=$(sed -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q' "$evts_ns1")
+ tk=$(grep "type:1," "$evts_ns1" |
+ sed -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q')
ip netns exec ${listener_ns} ./pm_nl_ctl ann $addr token $tk id $id
sleep 1
ip netns exec ${listener_ns} ./pm_nl_ctl rem token $tk id $id
@@ -980,12 +992,6 @@ do_transfer()
kill $cappid
fi
- if [ $userspace_pm -eq 1 ]; then
- kill_wait $evts_ns1_pid
- kill_wait $evts_ns2_pid
- rm -rf $evts_ns1 $evts_ns2
- fi
-
NSTAT_HISTORY=/tmp/${listener_ns}.nstat ip netns exec ${listener_ns} \
nstat | grep Tcp > /tmp/${listener_ns}.out
NSTAT_HISTORY=/tmp/${connector_ns}.nstat ip netns exec ${connector_ns} \
@@ -2507,6 +2513,57 @@ backup_tests()
fi
}
+LISTENER_CREATED=15 #MPTCP_EVENT_LISTENER_CREATED
+LISTENER_CLOSED=16 #MPTCP_EVENT_LISTENER_CLOSED
+
+AF_INET=2
+AF_INET6=10
+
+verify_listener_events()
+{
+ local evt=$1
+ local e_type=$2
+ local e_family=$3
+ local e_saddr=$4
+ local e_sport=$5
+ local type
+ local family
+ local saddr
+ local sport
+
+ if [ $e_type = $LISTENER_CREATED ]; then
+ stdbuf -o0 -e0 printf "\t\t\t\t\t CREATE_LISTENER %s:%s"\
+ $e_saddr $e_sport
+ elif [ $e_type = $LISTENER_CLOSED ]; then
+ stdbuf -o0 -e0 printf "\t\t\t\t\t CLOSE_LISTENER %s:%s "\
+ $e_saddr $e_sport
+ fi
+
+ type=$(grep "type:$e_type," $evt |
+ sed --unbuffered -n 's/.*\(type:\)\([[:digit:]]*\).*$/\2/p;q')
+ family=$(grep "type:$e_type," $evt |
+ sed --unbuffered -n 's/.*\(family:\)\([[:digit:]]*\).*$/\2/p;q')
+ sport=$(grep "type:$e_type," $evt |
+ sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q')
+ if [ $family ] && [ $family = $AF_INET6 ]; then
+ saddr=$(grep "type:$e_type," $evt |
+ sed --unbuffered -n 's/.*\(saddr6:\)\([0-9a-f:.]*\).*$/\2/p;q')
+ else
+ saddr=$(grep "type:$e_type," $evt |
+ sed --unbuffered -n 's/.*\(saddr4:\)\([0-9.]*\).*$/\2/p;q')
+ fi
+
+ if [ $type ] && [ $type = $e_type ] &&
+ [ $family ] && [ $family = $e_family ] &&
+ [ $saddr ] && [ $saddr = $e_saddr ] &&
+ [ $sport ] && [ $sport = $e_sport ]; then
+ stdbuf -o0 -e0 printf "[ ok ]\n"
+ return 0
+ fi
+ fail_test
+ stdbuf -o0 -e0 printf "[fail]\n"
+}
+
add_addr_ports_tests()
{
# signal address with port
@@ -2531,7 +2588,8 @@ add_addr_ports_tests()
fi
# single address with port, remove
- if reset "remove single address with port"; then
+ # pm listener events
+ if reset_with_events "remove single address with port"; then
pm_nl_set_limits $ns1 0 1
pm_nl_add_endpoint $ns1 10.0.2.1 flags signal port 10100
pm_nl_set_limits $ns2 1 1
@@ -2539,6 +2597,10 @@ add_addr_ports_tests()
chk_join_nr 1 1 1
chk_add_nr 1 1 1
chk_rm_nr 1 1 invert
+
+ verify_listener_events $evts_ns1 $LISTENER_CREATED $AF_INET 10.0.2.1 10100
+ verify_listener_events $evts_ns1 $LISTENER_CLOSED $AF_INET 10.0.2.1 10100
+ kill_events_pids
fi
# subflow and signal with port, remove
@@ -2959,22 +3021,24 @@ userspace_tests()
fi
# userspace pm add & remove address
- if reset "userspace pm add & remove address"; then
+ if reset_with_events "userspace pm add & remove address"; then
set_userspace_pm $ns1
pm_nl_set_limits $ns2 1 1
run_tests $ns1 $ns2 10.0.1.1 0 userspace_1 0 slow
chk_join_nr 1 1 1
chk_add_nr 1 1
chk_rm_nr 1 1 invert
+ kill_events_pids
fi
# userspace pm create destroy subflow
- if reset "userspace pm create destroy subflow"; then
+ if reset_with_events "userspace pm create destroy subflow"; then
set_userspace_pm $ns2
pm_nl_set_limits $ns1 0 1
run_tests $ns1 $ns2 10.0.1.1 0 0 userspace_1 slow
chk_join_nr 1 1 1
chk_rm_nr 0 1
+ kill_events_pids
fi
}
diff --git a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
index 80d36f7cfee8..1b70c0a304ce 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
@@ -10,13 +10,19 @@ ksft_skip=4
timeout_poll=30
timeout_test=$((timeout_poll * 2 + 1))
mptcp_connect=""
-do_all_tests=1
+
+sec=$(date +%s)
+rndh=$(printf %x $sec)-$(mktemp -u XXXXXX)
+ns1="ns1-$rndh"
+ns2="ns2-$rndh"
+ns_sbox="ns_sbox-$rndh"
add_mark_rules()
{
local ns=$1
local m=$2
+ local t
for t in iptables ip6tables; do
# just to debug: check we have multiple subflows connection requests
ip netns exec $ns $t -A OUTPUT -p tcp --syn -m mark --mark $m -j ACCEPT
@@ -31,12 +37,7 @@ add_mark_rules()
init()
{
- rndh=$(printf %x $sec)-$(mktemp -u XXXXXX)
-
- ns1="ns1-$rndh"
- ns2="ns2-$rndh"
- ns_sbox="ns_sbox-$rndh"
-
+ local netns
for netns in "$ns1" "$ns2" "$ns_sbox";do
ip netns add $netns || exit $ksft_skip
ip -net $netns link set lo up
@@ -45,6 +46,7 @@ init()
ip netns exec $netns sysctl -q net.ipv4.conf.default.rp_filter=0
done
+ local i
for i in `seq 1 4`; do
ip link add ns1eth$i netns "$ns1" type veth peer name ns2eth$i netns "$ns2"
ip -net "$ns1" addr add 10.0.$i.1/24 dev ns1eth$i
@@ -74,6 +76,7 @@ init()
cleanup()
{
+ local netns
for netns in "$ns1" "$ns2" "$ns_sbox"; do
ip netns del $netns
done
@@ -104,15 +107,17 @@ check_mark()
local ns=$1
local af=$2
- tables=iptables
+ local tables=iptables
if [ $af -eq 6 ];then
tables=ip6tables
fi
+ local counters values
counters=$(ip netns exec $ns $tables -v -L OUTPUT | grep DROP)
values=${counters%DROP*}
+ local v
for v in $values; do
if [ $v -ne 0 ]; then
echo "FAIL: got $tables $values in ns $ns , not 0 - not all expected packets marked" 1>&2
@@ -132,9 +137,9 @@ print_file_err()
check_transfer()
{
- in=$1
- out=$2
- what=$3
+ local in=$1
+ local out=$2
+ local what=$3
cmp "$in" "$out" > /dev/null 2>&1
if [ $? -ne 0 ] ;then
@@ -157,18 +162,18 @@ is_v6()
do_transfer()
{
- listener_ns="$1"
- connector_ns="$2"
- cl_proto="$3"
- srv_proto="$4"
- connect_addr="$5"
+ local listener_ns="$1"
+ local connector_ns="$2"
+ local cl_proto="$3"
+ local srv_proto="$4"
+ local connect_addr="$5"
- port=12001
+ local port=12001
:> "$cout"
:> "$sout"
- mptcp_connect="./mptcp_connect -r 20"
+ local mptcp_connect="./mptcp_connect -r 20"
local local_addr
if is_v6 "${connect_addr}"; then
@@ -181,7 +186,7 @@ do_transfer()
ip netns exec ${listener_ns} \
$mptcp_connect -t ${timeout_poll} -l -M 1 -p $port -s ${srv_proto} -c TIMESTAMPNS,TCPINQ \
${local_addr} < "$sin" > "$sout" &
- spid=$!
+ local spid=$!
sleep 1
@@ -190,12 +195,12 @@ do_transfer()
$mptcp_connect -t ${timeout_poll} -M 2 -p $port -s ${cl_proto} -c TIMESTAMPNS,TCPINQ \
$connect_addr < "$cin" > "$cout" &
- cpid=$!
+ local cpid=$!
wait $cpid
- retc=$?
+ local retc=$?
wait $spid
- rets=$?
+ local rets=$?
if [ ${rets} -ne 0 ] || [ ${retc} -ne 0 ]; then
echo " client exit code $retc, server $rets" 1>&2
@@ -230,9 +235,9 @@ do_transfer()
make_file()
{
- name=$1
- who=$2
- size=$3
+ local name=$1
+ local who=$2
+ local size=$3
dd if=/dev/urandom of="$name" bs=1024 count=$size 2> /dev/null
echo -e "\nMPTCP_TEST_FILE_END_MARKER" >> "$name"
@@ -265,9 +270,9 @@ do_mptcp_sockopt_tests()
run_tests()
{
- listener_ns="$1"
- connector_ns="$2"
- connect_addr="$3"
+ local listener_ns="$1"
+ local connector_ns="$2"
+ local connect_addr="$3"
local lret=0
do_transfer ${listener_ns} ${connector_ns} MPTCP MPTCP ${connect_addr}
@@ -282,8 +287,8 @@ run_tests()
do_tcpinq_test()
{
- ip netns exec "$ns1" ./mptcp_inq "$@"
- lret=$?
+ ip netns exec "$ns_sbox" ./mptcp_inq "$@"
+ local lret=$?
if [ $lret -ne 0 ];then
ret=$lret
echo "FAIL: mptcp_inq $@" 1>&2
@@ -298,9 +303,7 @@ do_tcpinq_tests()
{
local lret=0
- ip netns exec "$ns1" iptables -F
- ip netns exec "$ns1" ip6tables -F
-
+ local args
for args in "-t tcp" "-r tcp"; do
do_tcpinq_test $args
lret=$?
diff --git a/tools/testing/selftests/net/mptcp/simult_flows.sh b/tools/testing/selftests/net/mptcp/simult_flows.sh
index 40aeb5a71a2a..9f22f7e5027d 100755
--- a/tools/testing/selftests/net/mptcp/simult_flows.sh
+++ b/tools/testing/selftests/net/mptcp/simult_flows.sh
@@ -1,6 +1,7 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
+sec=$(date +%s)
rndh=$(printf %x $sec)-$(mktemp -u XXXXXX)
ns1="ns1-$rndh"
ns2="ns2-$rndh"
@@ -148,9 +149,6 @@ do_transfer()
:> "$sout"
:> "$capout"
- local addr_port
- addr_port=$(printf "%s:%d" ${connect_addr} ${port})
-
if $capture; then
local capuser
if [ -z $SUDO_USER ] ; then
@@ -173,7 +171,7 @@ do_transfer()
timeout ${timeout_test} \
ip netns exec ${ns3} \
- ./mptcp_connect -jt ${timeout_poll} -l -p $port -T $time \
+ ./mptcp_connect -jt ${timeout_poll} -l -p $port -T $max_time \
0.0.0.0 < "$sin" > "$sout" &
local spid=$!
@@ -181,7 +179,7 @@ do_transfer()
timeout ${timeout_test} \
ip netns exec ${ns1} \
- ./mptcp_connect -jt ${timeout_poll} -p $port -T $time \
+ ./mptcp_connect -jt ${timeout_poll} -p $port -T $max_time \
10.0.3.3 < "$cin" > "$cout" &
local cpid=$!
diff --git a/tools/testing/selftests/net/mptcp/userspace_pm.sh b/tools/testing/selftests/net/mptcp/userspace_pm.sh
index 3229725b64b0..a29deb9fa024 100755
--- a/tools/testing/selftests/net/mptcp/userspace_pm.sh
+++ b/tools/testing/selftests/net/mptcp/userspace_pm.sh
@@ -11,11 +11,17 @@ ANNOUNCED=6 # MPTCP_EVENT_ANNOUNCED
REMOVED=7 # MPTCP_EVENT_REMOVED
SUB_ESTABLISHED=10 # MPTCP_EVENT_SUB_ESTABLISHED
SUB_CLOSED=11 # MPTCP_EVENT_SUB_CLOSED
+LISTENER_CREATED=15 #MPTCP_EVENT_LISTENER_CREATED
+LISTENER_CLOSED=16 #MPTCP_EVENT_LISTENER_CLOSED
AF_INET=2
AF_INET6=10
-evts_pid=0
+file=""
+server_evts=""
+client_evts=""
+server_evts_pid=0
+client_evts_pid=0
client4_pid=0
server4_pid=0
client6_pid=0
@@ -33,7 +39,7 @@ client_addr_id=${RANDOM:0:2}
server_addr_id=${RANDOM:0:2}
sec=$(date +%s)
-rndh=$(stdbuf -o0 -e0 printf %x "$sec")-$(mktemp -u XXXXXX)
+rndh=$(printf %x "$sec")-$(mktemp -u XXXXXX)
ns1="ns1-$rndh"
ns2="ns2-$rndh"
@@ -47,7 +53,7 @@ cleanup()
{
echo "cleanup"
- rm -rf $file
+ rm -rf $file $client_evts $server_evts
# Terminate the MPTCP connection and related processes
if [ $client4_pid -ne 0 ]; then
@@ -62,8 +68,11 @@ cleanup()
if [ $server6_pid -ne 0 ]; then
kill_wait $server6_pid
fi
- if [ $evts_pid -ne 0 ]; then
- kill_wait $evts_pid
+ if [ $server_evts_pid -ne 0 ]; then
+ kill_wait $server_evts_pid
+ fi
+ if [ $client_evts_pid -ne 0 ]; then
+ kill_wait $client_evts_pid
fi
local netns
for netns in "$ns1" "$ns2" ;do
@@ -113,8 +122,9 @@ make_file()
make_connection()
{
- local file
- file=$(mktemp)
+ if [ -z "$file" ]; then
+ file=$(mktemp)
+ fi
make_file "$file" "client"
local is_v6=$1
@@ -132,16 +142,24 @@ make_connection()
# Capture netlink events over the two network namespaces running
# the MPTCP client and server
- local client_evts
- client_evts=$(mktemp)
+ if [ -z "$client_evts" ]; then
+ client_evts=$(mktemp)
+ fi
:>"$client_evts"
+ if [ $client_evts_pid -ne 0 ]; then
+ kill_wait $client_evts_pid
+ fi
ip netns exec "$ns2" ./pm_nl_ctl events >> "$client_evts" 2>&1 &
- local client_evts_pid=$!
- local server_evts
- server_evts=$(mktemp)
+ client_evts_pid=$!
+ if [ -z "$server_evts" ]; then
+ server_evts=$(mktemp)
+ fi
:>"$server_evts"
+ if [ $server_evts_pid -ne 0 ]; then
+ kill_wait $server_evts_pid
+ fi
ip netns exec "$ns1" ./pm_nl_ctl events >> "$server_evts" 2>&1 &
- local server_evts_pid=$!
+ server_evts_pid=$!
sleep 0.5
# Run the server
@@ -159,7 +177,6 @@ make_connection()
sleep 1
# Capture client/server attributes from MPTCP connection netlink events
- kill_wait $client_evts_pid
local client_token
local client_port
@@ -171,11 +188,10 @@ make_connection()
client_port=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$client_evts")
client_serverside=$(sed --unbuffered -n 's/.*\(server_side:\)\([[:digit:]]*\).*$/\2/p;q'\
"$client_evts")
- kill_wait $server_evts_pid
- server_token=$(sed --unbuffered -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q' "$server_evts")
- server_serverside=$(sed --unbuffered -n 's/.*\(server_side:\)\([[:digit:]]*\).*$/\2/p;q'\
- "$server_evts")
- rm -f "$client_evts" "$server_evts" "$file"
+ server_token=$(grep "type:1," "$server_evts" |
+ sed --unbuffered -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q')
+ server_serverside=$(grep "type:1," "$server_evts" |
+ sed --unbuffered -n 's/.*\(server_side:\)\([[:digit:]]*\).*$/\2/p;q')
if [ "$client_token" != "" ] && [ "$server_token" != "" ] && [ "$client_serverside" = 0 ] &&
[ "$server_serverside" = 1 ]
@@ -239,13 +255,8 @@ verify_announce_event()
test_announce()
{
- local evts
- evts=$(mktemp)
# Capture events on the network namespace running the server
- :>"$evts"
- ip netns exec "$ns1" ./pm_nl_ctl events >> "$evts" 2>&1 &
- evts_pid=$!
- sleep 0.5
+ :>"$server_evts"
# ADD_ADDR using an invalid token should result in no action
local invalid_token=$(( client4_token - 1))
@@ -253,7 +264,7 @@ test_announce()
$client_addr_id dev ns2eth1 > /dev/null 2>&1
local type
- type=$(sed --unbuffered -n 's/.*\(type:\)\([[:digit:]]*\).*$/\2/p;q' "$evts")
+ type=$(sed --unbuffered -n 's/.*\(type:\)\([[:digit:]]*\).*$/\2/p;q' "$server_evts")
stdbuf -o0 -e0 printf "ADD_ADDR 10.0.2.2 (ns2) => ns1, invalid token \t\t"
if [ "$type" = "" ]
then
@@ -264,71 +275,63 @@ test_announce()
fi
# ADD_ADDR from the client to server machine reusing the subflow port
- :>"$evts"
+ :>"$server_evts"
ip netns exec "$ns2"\
./pm_nl_ctl ann 10.0.2.2 token "$client4_token" id $client_addr_id dev\
ns2eth1 > /dev/null 2>&1
stdbuf -o0 -e0 printf "ADD_ADDR id:%d 10.0.2.2 (ns2) => ns1, reuse port \t\t" $client_addr_id
sleep 0.5
- verify_announce_event "$evts" "$ANNOUNCED" "$server4_token" "10.0.2.2" "$client_addr_id"\
+ verify_announce_event $server_evts $ANNOUNCED $server4_token "10.0.2.2" $client_addr_id \
"$client4_port"
# ADD_ADDR6 from the client to server machine reusing the subflow port
- :>"$evts"
+ :>"$server_evts"
ip netns exec "$ns2" ./pm_nl_ctl ann\
dead:beef:2::2 token "$client6_token" id $client_addr_id dev ns2eth1 > /dev/null 2>&1
stdbuf -o0 -e0 printf "ADD_ADDR6 id:%d dead:beef:2::2 (ns2) => ns1, reuse port\t\t" $client_addr_id
sleep 0.5
- verify_announce_event "$evts" "$ANNOUNCED" "$server6_token" "dead:beef:2::2"\
+ verify_announce_event "$server_evts" "$ANNOUNCED" "$server6_token" "dead:beef:2::2"\
"$client_addr_id" "$client6_port" "v6"
# ADD_ADDR from the client to server machine using a new port
- :>"$evts"
+ :>"$server_evts"
client_addr_id=$((client_addr_id+1))
ip netns exec "$ns2" ./pm_nl_ctl ann 10.0.2.2 token "$client4_token" id\
$client_addr_id dev ns2eth1 port $new4_port > /dev/null 2>&1
stdbuf -o0 -e0 printf "ADD_ADDR id:%d 10.0.2.2 (ns2) => ns1, new port \t\t\t" $client_addr_id
sleep 0.5
- verify_announce_event "$evts" "$ANNOUNCED" "$server4_token" "10.0.2.2"\
+ verify_announce_event "$server_evts" "$ANNOUNCED" "$server4_token" "10.0.2.2"\
"$client_addr_id" "$new4_port"
- kill_wait $evts_pid
-
# Capture events on the network namespace running the client
- :>"$evts"
- ip netns exec "$ns2" ./pm_nl_ctl events >> "$evts" 2>&1 &
- evts_pid=$!
- sleep 0.5
+ :>"$client_evts"
# ADD_ADDR from the server to client machine reusing the subflow port
ip netns exec "$ns1" ./pm_nl_ctl ann 10.0.2.1 token "$server4_token" id\
$server_addr_id dev ns1eth2 > /dev/null 2>&1
stdbuf -o0 -e0 printf "ADD_ADDR id:%d 10.0.2.1 (ns1) => ns2, reuse port \t\t" $server_addr_id
sleep 0.5
- verify_announce_event "$evts" "$ANNOUNCED" "$client4_token" "10.0.2.1"\
+ verify_announce_event "$client_evts" "$ANNOUNCED" "$client4_token" "10.0.2.1"\
"$server_addr_id" "$app4_port"
# ADD_ADDR6 from the server to client machine reusing the subflow port
- :>"$evts"
+ :>"$client_evts"
ip netns exec "$ns1" ./pm_nl_ctl ann dead:beef:2::1 token "$server6_token" id\
$server_addr_id dev ns1eth2 > /dev/null 2>&1
stdbuf -o0 -e0 printf "ADD_ADDR6 id:%d dead:beef:2::1 (ns1) => ns2, reuse port\t\t" $server_addr_id
sleep 0.5
- verify_announce_event "$evts" "$ANNOUNCED" "$client6_token" "dead:beef:2::1"\
+ verify_announce_event "$client_evts" "$ANNOUNCED" "$client6_token" "dead:beef:2::1"\
"$server_addr_id" "$app6_port" "v6"
# ADD_ADDR from the server to client machine using a new port
- :>"$evts"
+ :>"$client_evts"
server_addr_id=$((server_addr_id+1))
ip netns exec "$ns1" ./pm_nl_ctl ann 10.0.2.1 token "$server4_token" id\
$server_addr_id dev ns1eth2 port $new4_port > /dev/null 2>&1
stdbuf -o0 -e0 printf "ADD_ADDR id:%d 10.0.2.1 (ns1) => ns2, new port \t\t\t" $server_addr_id
sleep 0.5
- verify_announce_event "$evts" "$ANNOUNCED" "$client4_token" "10.0.2.1"\
+ verify_announce_event "$client_evts" "$ANNOUNCED" "$client4_token" "10.0.2.1"\
"$server_addr_id" "$new4_port"
-
- kill_wait $evts_pid
- rm -f "$evts"
}
verify_remove_event()
@@ -356,14 +359,8 @@ verify_remove_event()
test_remove()
{
- local evts
- evts=$(mktemp)
-
# Capture events on the network namespace running the server
- :>"$evts"
- ip netns exec "$ns1" ./pm_nl_ctl events >> "$evts" 2>&1 &
- evts_pid=$!
- sleep 0.5
+ :>"$server_evts"
# RM_ADDR using an invalid token should result in no action
local invalid_token=$(( client4_token - 1 ))
@@ -372,7 +369,7 @@ test_remove()
stdbuf -o0 -e0 printf "RM_ADDR id:%d ns2 => ns1, invalid token \t"\
$client_addr_id
local type
- type=$(sed --unbuffered -n 's/.*\(type:\)\([[:digit:]]*\).*$/\2/p;q' "$evts")
+ type=$(sed --unbuffered -n 's/.*\(type:\)\([[:digit:]]*\).*$/\2/p;q' "$server_evts")
if [ "$type" = "" ]
then
stdbuf -o0 -e0 printf "[OK]\n"
@@ -386,7 +383,7 @@ test_remove()
$invalid_id > /dev/null 2>&1
stdbuf -o0 -e0 printf "RM_ADDR id:%d ns2 => ns1, invalid id \t"\
$invalid_id
- type=$(sed --unbuffered -n 's/.*\(type:\)\([[:digit:]]*\).*$/\2/p;q' "$evts")
+ type=$(sed --unbuffered -n 's/.*\(type:\)\([[:digit:]]*\).*$/\2/p;q' "$server_evts")
if [ "$type" = "" ]
then
stdbuf -o0 -e0 printf "[OK]\n"
@@ -395,40 +392,35 @@ test_remove()
fi
# RM_ADDR from the client to server machine
- :>"$evts"
+ :>"$server_evts"
ip netns exec "$ns2" ./pm_nl_ctl rem token "$client4_token" id\
$client_addr_id > /dev/null 2>&1
stdbuf -o0 -e0 printf "RM_ADDR id:%d ns2 => ns1 \t"\
$client_addr_id
sleep 0.5
- verify_remove_event "$evts" "$REMOVED" "$server4_token" "$client_addr_id"
+ verify_remove_event "$server_evts" "$REMOVED" "$server4_token" "$client_addr_id"
# RM_ADDR from the client to server machine
- :>"$evts"
+ :>"$server_evts"
client_addr_id=$(( client_addr_id - 1 ))
ip netns exec "$ns2" ./pm_nl_ctl rem token "$client4_token" id\
$client_addr_id > /dev/null 2>&1
stdbuf -o0 -e0 printf "RM_ADDR id:%d ns2 => ns1 \t"\
$client_addr_id
sleep 0.5
- verify_remove_event "$evts" "$REMOVED" "$server4_token" "$client_addr_id"
+ verify_remove_event "$server_evts" "$REMOVED" "$server4_token" "$client_addr_id"
# RM_ADDR6 from the client to server machine
- :>"$evts"
+ :>"$server_evts"
ip netns exec "$ns2" ./pm_nl_ctl rem token "$client6_token" id\
$client_addr_id > /dev/null 2>&1
stdbuf -o0 -e0 printf "RM_ADDR6 id:%d ns2 => ns1 \t"\
$client_addr_id
sleep 0.5
- verify_remove_event "$evts" "$REMOVED" "$server6_token" "$client_addr_id"
-
- kill_wait $evts_pid
+ verify_remove_event "$server_evts" "$REMOVED" "$server6_token" "$client_addr_id"
# Capture events on the network namespace running the client
- :>"$evts"
- ip netns exec "$ns2" ./pm_nl_ctl events >> "$evts" 2>&1 &
- evts_pid=$!
- sleep 0.5
+ :>"$client_evts"
# RM_ADDR from the server to client machine
ip netns exec "$ns1" ./pm_nl_ctl rem token "$server4_token" id\
@@ -436,27 +428,24 @@ test_remove()
stdbuf -o0 -e0 printf "RM_ADDR id:%d ns1 => ns2 \t"\
$server_addr_id
sleep 0.5
- verify_remove_event "$evts" "$REMOVED" "$client4_token" "$server_addr_id"
+ verify_remove_event "$client_evts" "$REMOVED" "$client4_token" "$server_addr_id"
# RM_ADDR from the server to client machine
- :>"$evts"
+ :>"$client_evts"
server_addr_id=$(( server_addr_id - 1 ))
ip netns exec "$ns1" ./pm_nl_ctl rem token "$server4_token" id\
$server_addr_id > /dev/null 2>&1
stdbuf -o0 -e0 printf "RM_ADDR id:%d ns1 => ns2 \t" $server_addr_id
sleep 0.5
- verify_remove_event "$evts" "$REMOVED" "$client4_token" "$server_addr_id"
+ verify_remove_event "$client_evts" "$REMOVED" "$client4_token" "$server_addr_id"
# RM_ADDR6 from the server to client machine
- :>"$evts"
+ :>"$client_evts"
ip netns exec "$ns1" ./pm_nl_ctl rem token "$server6_token" id\
$server_addr_id > /dev/null 2>&1
stdbuf -o0 -e0 printf "RM_ADDR6 id:%d ns1 => ns2 \t" $server_addr_id
sleep 0.5
- verify_remove_event "$evts" "$REMOVED" "$client6_token" "$server_addr_id"
-
- kill_wait $evts_pid
- rm -f "$evts"
+ verify_remove_event "$client_evts" "$REMOVED" "$client6_token" "$server_addr_id"
}
verify_subflow_events()
@@ -532,13 +521,8 @@ verify_subflow_events()
test_subflows()
{
- local evts
- evts=$(mktemp)
# Capture events on the network namespace running the server
- :>"$evts"
- ip netns exec "$ns1" ./pm_nl_ctl events >> "$evts" 2>&1 &
- evts_pid=$!
- sleep 0.5
+ :>"$server_evts"
# Attempt to add a listener at 10.0.2.2:<subflow-port>
ip netns exec "$ns2" ./pm_nl_ctl listen 10.0.2.2\
@@ -551,25 +535,25 @@ test_subflows()
sleep 0.5
# CREATE_SUBFLOW from server to client machine
- :>"$evts"
+ :>"$server_evts"
ip netns exec "$ns1" ./pm_nl_ctl csf lip 10.0.2.1 lid 23 rip 10.0.2.2\
rport "$client4_port" token "$server4_token" > /dev/null 2>&1
sleep 0.5
- verify_subflow_events "$evts" "$SUB_ESTABLISHED" "$server4_token" "$AF_INET" "10.0.2.1"\
+ verify_subflow_events $server_evts $SUB_ESTABLISHED $server4_token $AF_INET "10.0.2.1" \
"10.0.2.2" "$client4_port" "23" "$client_addr_id" "ns1" "ns2"
# Delete the listener from the client ns, if one was created
kill_wait $listener_pid
local sport
- sport=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$evts")
+ sport=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$server_evts")
# DESTROY_SUBFLOW from server to client machine
- :>"$evts"
+ :>"$server_evts"
ip netns exec "$ns1" ./pm_nl_ctl dsf lip 10.0.2.1 lport "$sport" rip 10.0.2.2 rport\
"$client4_port" token "$server4_token" > /dev/null 2>&1
sleep 0.5
- verify_subflow_events "$evts" "$SUB_CLOSED" "$server4_token" "$AF_INET" "10.0.2.1"\
+ verify_subflow_events "$server_evts" "$SUB_CLOSED" "$server4_token" "$AF_INET" "10.0.2.1"\
"10.0.2.2" "$client4_port" "23" "$client_addr_id" "ns1" "ns2"
# RM_ADDR from client to server machine
@@ -583,31 +567,31 @@ test_subflows()
listener_pid=$!
# ADD_ADDR6 from client to server machine reusing the subflow port
- :>"$evts"
+ :>"$server_evts"
ip netns exec "$ns2" ./pm_nl_ctl ann dead:beef:2::2 token "$client6_token" id\
$client_addr_id > /dev/null 2>&1
sleep 0.5
# CREATE_SUBFLOW6 from server to client machine
- :>"$evts"
+ :>"$server_evts"
ip netns exec "$ns1" ./pm_nl_ctl csf lip dead:beef:2::1 lid 23 rip\
dead:beef:2::2 rport "$client6_port" token "$server6_token" > /dev/null 2>&1
sleep 0.5
- verify_subflow_events "$evts" "$SUB_ESTABLISHED" "$server6_token" "$AF_INET6"\
+ verify_subflow_events "$server_evts" "$SUB_ESTABLISHED" "$server6_token" "$AF_INET6"\
"dead:beef:2::1" "dead:beef:2::2" "$client6_port" "23"\
"$client_addr_id" "ns1" "ns2"
# Delete the listener from the client ns, if one was created
kill_wait $listener_pid
- sport=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$evts")
+ sport=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$server_evts")
# DESTROY_SUBFLOW6 from server to client machine
- :>"$evts"
+ :>"$server_evts"
ip netns exec "$ns1" ./pm_nl_ctl dsf lip dead:beef:2::1 lport "$sport" rip\
dead:beef:2::2 rport "$client6_port" token "$server6_token" > /dev/null 2>&1
sleep 0.5
- verify_subflow_events "$evts" "$SUB_CLOSED" "$server6_token" "$AF_INET6"\
+ verify_subflow_events "$server_evts" "$SUB_CLOSED" "$server6_token" "$AF_INET6"\
"dead:beef:2::1" "dead:beef:2::2" "$client6_port" "23"\
"$client_addr_id" "ns1" "ns2"
@@ -622,44 +606,39 @@ test_subflows()
listener_pid=$!
# ADD_ADDR from client to server machine using a new port
- :>"$evts"
+ :>"$server_evts"
ip netns exec "$ns2" ./pm_nl_ctl ann 10.0.2.2 token "$client4_token" id\
$client_addr_id port $new4_port > /dev/null 2>&1
sleep 0.5
# CREATE_SUBFLOW from server to client machine
- :>"$evts"
+ :>"$server_evts"
ip netns exec "$ns1" ./pm_nl_ctl csf lip 10.0.2.1 lid 23 rip 10.0.2.2 rport\
$new4_port token "$server4_token" > /dev/null 2>&1
sleep 0.5
- verify_subflow_events "$evts" "$SUB_ESTABLISHED" "$server4_token" "$AF_INET"\
+ verify_subflow_events "$server_evts" "$SUB_ESTABLISHED" "$server4_token" "$AF_INET"\
"10.0.2.1" "10.0.2.2" "$new4_port" "23"\
"$client_addr_id" "ns1" "ns2"
# Delete the listener from the client ns, if one was created
kill_wait $listener_pid
- sport=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$evts")
+ sport=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$server_evts")
# DESTROY_SUBFLOW from server to client machine
- :>"$evts"
+ :>"$server_evts"
ip netns exec "$ns1" ./pm_nl_ctl dsf lip 10.0.2.1 lport "$sport" rip 10.0.2.2 rport\
$new4_port token "$server4_token" > /dev/null 2>&1
sleep 0.5
- verify_subflow_events "$evts" "$SUB_CLOSED" "$server4_token" "$AF_INET" "10.0.2.1"\
+ verify_subflow_events "$server_evts" "$SUB_CLOSED" "$server4_token" "$AF_INET" "10.0.2.1"\
"10.0.2.2" "$new4_port" "23" "$client_addr_id" "ns1" "ns2"
# RM_ADDR from client to server machine
ip netns exec "$ns2" ./pm_nl_ctl rem id $client_addr_id token\
"$client4_token" > /dev/null 2>&1
- kill_wait $evts_pid
-
# Capture events on the network namespace running the client
- :>"$evts"
- ip netns exec "$ns2" ./pm_nl_ctl events >> "$evts" 2>&1 &
- evts_pid=$!
- sleep 0.5
+ :>"$client_evts"
# Attempt to add a listener at 10.0.2.1:<subflow-port>
ip netns exec "$ns1" ./pm_nl_ctl listen 10.0.2.1\
@@ -672,24 +651,24 @@ test_subflows()
sleep 0.5
# CREATE_SUBFLOW from client to server machine
- :>"$evts"
+ :>"$client_evts"
ip netns exec "$ns2" ./pm_nl_ctl csf lip 10.0.2.2 lid 23 rip 10.0.2.1 rport\
$app4_port token "$client4_token" > /dev/null 2>&1
sleep 0.5
- verify_subflow_events "$evts" "$SUB_ESTABLISHED" "$client4_token" "$AF_INET" "10.0.2.2"\
+ verify_subflow_events $client_evts $SUB_ESTABLISHED $client4_token $AF_INET "10.0.2.2"\
"10.0.2.1" "$app4_port" "23" "$server_addr_id" "ns2" "ns1"
# Delete the listener from the server ns, if one was created
kill_wait $listener_pid
- sport=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$evts")
+ sport=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$client_evts")
# DESTROY_SUBFLOW from client to server machine
- :>"$evts"
+ :>"$client_evts"
ip netns exec "$ns2" ./pm_nl_ctl dsf lip 10.0.2.2 lport "$sport" rip 10.0.2.1 rport\
$app4_port token "$client4_token" > /dev/null 2>&1
sleep 0.5
- verify_subflow_events "$evts" "$SUB_CLOSED" "$client4_token" "$AF_INET" "10.0.2.2"\
+ verify_subflow_events "$client_evts" "$SUB_CLOSED" "$client4_token" "$AF_INET" "10.0.2.2"\
"10.0.2.1" "$app4_port" "23" "$server_addr_id" "ns2" "ns1"
# RM_ADDR from server to client machine
@@ -703,17 +682,17 @@ test_subflows()
listener_pid=$!
# ADD_ADDR6 from server to client machine reusing the subflow port
- :>"$evts"
+ :>"$client_evts"
ip netns exec "$ns1" ./pm_nl_ctl ann dead:beef:2::1 token "$server6_token" id\
$server_addr_id > /dev/null 2>&1
sleep 0.5
# CREATE_SUBFLOW6 from client to server machine
- :>"$evts"
+ :>"$client_evts"
ip netns exec "$ns2" ./pm_nl_ctl csf lip dead:beef:2::2 lid 23 rip\
dead:beef:2::1 rport $app6_port token "$client6_token" > /dev/null 2>&1
sleep 0.5
- verify_subflow_events "$evts" "$SUB_ESTABLISHED" "$client6_token"\
+ verify_subflow_events "$client_evts" "$SUB_ESTABLISHED" "$client6_token"\
"$AF_INET6" "dead:beef:2::2"\
"dead:beef:2::1" "$app6_port" "23"\
"$server_addr_id" "ns2" "ns1"
@@ -721,14 +700,14 @@ test_subflows()
# Delete the listener from the server ns, if one was created
kill_wait $listener_pid
- sport=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$evts")
+ sport=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$client_evts")
# DESTROY_SUBFLOW6 from client to server machine
- :>"$evts"
+ :>"$client_evts"
ip netns exec "$ns2" ./pm_nl_ctl dsf lip dead:beef:2::2 lport "$sport" rip\
dead:beef:2::1 rport $app6_port token "$client6_token" > /dev/null 2>&1
sleep 0.5
- verify_subflow_events "$evts" "$SUB_CLOSED" "$client6_token" "$AF_INET6" "dead:beef:2::2"\
+ verify_subflow_events $client_evts $SUB_CLOSED $client6_token $AF_INET6 "dead:beef:2::2"\
"dead:beef:2::1" "$app6_port" "23" "$server_addr_id" "ns2" "ns1"
# RM_ADDR6 from server to client machine
@@ -742,38 +721,35 @@ test_subflows()
listener_pid=$!
# ADD_ADDR from server to client machine using a new port
- :>"$evts"
+ :>"$client_evts"
ip netns exec "$ns1" ./pm_nl_ctl ann 10.0.2.1 token "$server4_token" id\
$server_addr_id port $new4_port > /dev/null 2>&1
sleep 0.5
# CREATE_SUBFLOW from client to server machine
- :>"$evts"
+ :>"$client_evts"
ip netns exec "$ns2" ./pm_nl_ctl csf lip 10.0.2.2 lid 23 rip 10.0.2.1 rport\
$new4_port token "$client4_token" > /dev/null 2>&1
sleep 0.5
- verify_subflow_events "$evts" "$SUB_ESTABLISHED" "$client4_token" "$AF_INET"\
+ verify_subflow_events "$client_evts" "$SUB_ESTABLISHED" "$client4_token" "$AF_INET"\
"10.0.2.2" "10.0.2.1" "$new4_port" "23" "$server_addr_id" "ns2" "ns1"
# Delete the listener from the server ns, if one was created
kill_wait $listener_pid
- sport=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$evts")
+ sport=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$client_evts")
# DESTROY_SUBFLOW from client to server machine
- :>"$evts"
+ :>"$client_evts"
ip netns exec "$ns2" ./pm_nl_ctl dsf lip 10.0.2.2 lport "$sport" rip 10.0.2.1 rport\
$new4_port token "$client4_token" > /dev/null 2>&1
sleep 0.5
- verify_subflow_events "$evts" "$SUB_CLOSED" "$client4_token" "$AF_INET" "10.0.2.2"\
+ verify_subflow_events "$client_evts" "$SUB_CLOSED" "$client4_token" "$AF_INET" "10.0.2.2"\
"10.0.2.1" "$new4_port" "23" "$server_addr_id" "ns2" "ns1"
# RM_ADDR from server to client machine
ip netns exec "$ns1" ./pm_nl_ctl rem id $server_addr_id token\
"$server4_token" > /dev/null 2>&1
-
- kill_wait $evts_pid
- rm -f "$evts"
}
test_prio()
@@ -807,11 +783,85 @@ test_prio()
fi
}
+verify_listener_events()
+{
+ local evt=$1
+ local e_type=$2
+ local e_family=$3
+ local e_saddr=$4
+ local e_sport=$5
+ local type
+ local family
+ local saddr
+ local sport
+
+ if [ $e_type = $LISTENER_CREATED ]; then
+ stdbuf -o0 -e0 printf "CREATE_LISTENER %s:%s\t\t\t\t\t"\
+ $e_saddr $e_sport
+ elif [ $e_type = $LISTENER_CLOSED ]; then
+ stdbuf -o0 -e0 printf "CLOSE_LISTENER %s:%s\t\t\t\t\t"\
+ $e_saddr $e_sport
+ fi
+
+ type=$(grep "type:$e_type," $evt |
+ sed --unbuffered -n 's/.*\(type:\)\([[:digit:]]*\).*$/\2/p;q')
+ family=$(grep "type:$e_type," $evt |
+ sed --unbuffered -n 's/.*\(family:\)\([[:digit:]]*\).*$/\2/p;q')
+ sport=$(grep "type:$e_type," $evt |
+ sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q')
+ if [ $family ] && [ $family = $AF_INET6 ]; then
+ saddr=$(grep "type:$e_type," $evt |
+ sed --unbuffered -n 's/.*\(saddr6:\)\([0-9a-f:.]*\).*$/\2/p;q')
+ else
+ saddr=$(grep "type:$e_type," $evt |
+ sed --unbuffered -n 's/.*\(saddr4:\)\([0-9.]*\).*$/\2/p;q')
+ fi
+
+ if [ $type ] && [ $type = $e_type ] &&
+ [ $family ] && [ $family = $e_family ] &&
+ [ $saddr ] && [ $saddr = $e_saddr ] &&
+ [ $sport ] && [ $sport = $e_sport ]; then
+ stdbuf -o0 -e0 printf "[OK]\n"
+ return 0
+ fi
+ stdbuf -o0 -e0 printf "[FAIL]\n"
+ exit 1
+}
+
+test_listener()
+{
+ # Capture events on the network namespace running the client
+ :>$client_evts
+
+ # Attempt to add a listener at 10.0.2.2:<subflow-port>
+ ip netns exec $ns2 ./pm_nl_ctl listen 10.0.2.2\
+ $client4_port > /dev/null 2>&1 &
+ local listener_pid=$!
+
+ verify_listener_events $client_evts $LISTENER_CREATED $AF_INET 10.0.2.2 $client4_port
+
+ # ADD_ADDR from client to server machine reusing the subflow port
+ ip netns exec $ns2 ./pm_nl_ctl ann 10.0.2.2 token $client4_token id\
+ $client_addr_id > /dev/null 2>&1
+ sleep 0.5
+
+ # CREATE_SUBFLOW from server to client machine
+ ip netns exec $ns1 ./pm_nl_ctl csf lip 10.0.2.1 lid 23 rip 10.0.2.2\
+ rport $client4_port token $server4_token > /dev/null 2>&1
+ sleep 0.5
+
+ # Delete the listener from the client ns, if one was created
+ kill_wait $listener_pid
+
+ verify_listener_events $client_evts $LISTENER_CLOSED $AF_INET 10.0.2.2 $client4_port
+}
+
make_connection
make_connection "v6"
test_announce
test_remove
test_subflows
test_prio
+test_listener
exit 0
diff --git a/tools/testing/selftests/net/sctp_hello.c b/tools/testing/selftests/net/sctp_hello.c
new file mode 100644
index 000000000000..f02f1f95d227
--- /dev/null
+++ b/tools/testing/selftests/net/sctp_hello.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+
+static void set_addr(struct sockaddr_storage *ss, char *ip, char *port, int *len)
+{
+ if (ss->ss_family == AF_INET) {
+ struct sockaddr_in *a = (struct sockaddr_in *)ss;
+
+ a->sin_addr.s_addr = inet_addr(ip);
+ a->sin_port = htons(atoi(port));
+ *len = sizeof(*a);
+ } else {
+ struct sockaddr_in6 *a = (struct sockaddr_in6 *)ss;
+
+ a->sin6_family = AF_INET6;
+ inet_pton(AF_INET6, ip, &a->sin6_addr);
+ a->sin6_port = htons(atoi(port));
+ *len = sizeof(*a);
+ }
+}
+
+static int do_client(int argc, char *argv[])
+{
+ struct sockaddr_storage ss;
+ char buf[] = "hello";
+ int csk, ret, len;
+
+ if (argc < 5) {
+ printf("%s client -4|6 IP PORT [IP PORT]\n", argv[0]);
+ return -1;
+ }
+
+ bzero((void *)&ss, sizeof(ss));
+ ss.ss_family = !strcmp(argv[2], "-4") ? AF_INET : AF_INET6;
+ csk = socket(ss.ss_family, SOCK_STREAM, IPPROTO_SCTP);
+ if (csk < 0) {
+ printf("failed to create socket\n");
+ return -1;
+ }
+
+ if (argc >= 7) {
+ set_addr(&ss, argv[5], argv[6], &len);
+ ret = bind(csk, (struct sockaddr *)&ss, len);
+ if (ret < 0) {
+ printf("failed to bind to address\n");
+ return -1;
+ }
+ }
+
+ set_addr(&ss, argv[3], argv[4], &len);
+ ret = connect(csk, (struct sockaddr *)&ss, len);
+ if (ret < 0) {
+ printf("failed to connect to peer\n");
+ return -1;
+ }
+
+ ret = send(csk, buf, strlen(buf) + 1, 0);
+ if (ret < 0) {
+ printf("failed to send msg %d\n", ret);
+ return -1;
+ }
+ close(csk);
+
+ return 0;
+}
+
+int main(int argc, char *argv[])
+{
+ struct sockaddr_storage ss;
+ int lsk, csk, ret, len;
+ char buf[20];
+
+ if (argc < 2 || (strcmp(argv[1], "server") && strcmp(argv[1], "client"))) {
+ printf("%s server|client ...\n", argv[0]);
+ return -1;
+ }
+
+ if (!strcmp(argv[1], "client"))
+ return do_client(argc, argv);
+
+ if (argc < 5) {
+ printf("%s server -4|6 IP PORT [IFACE]\n", argv[0]);
+ return -1;
+ }
+
+ ss.ss_family = !strcmp(argv[2], "-4") ? AF_INET : AF_INET6;
+ lsk = socket(ss.ss_family, SOCK_STREAM, IPPROTO_SCTP);
+ if (lsk < 0) {
+ printf("failed to create lsk\n");
+ return -1;
+ }
+
+ if (argc >= 6) {
+ ret = setsockopt(lsk, SOL_SOCKET, SO_BINDTODEVICE,
+ argv[5], strlen(argv[5]) + 1);
+ if (ret < 0) {
+ printf("failed to bind to device\n");
+ return -1;
+ }
+ }
+
+ set_addr(&ss, argv[3], argv[4], &len);
+ ret = bind(lsk, (struct sockaddr *)&ss, len);
+ if (ret < 0) {
+ printf("failed to bind to address\n");
+ return -1;
+ }
+
+ ret = listen(lsk, 5);
+ if (ret < 0) {
+ printf("failed to listen on port\n");
+ return -1;
+ }
+
+ csk = accept(lsk, (struct sockaddr *)NULL, (socklen_t *)NULL);
+ if (csk < 0) {
+ printf("failed to accept new client\n");
+ return -1;
+ }
+
+ ret = recv(csk, buf, sizeof(buf), 0);
+ if (ret <= 0) {
+ printf("failed to recv msg %d\n", ret);
+ return -1;
+ }
+ close(csk);
+ close(lsk);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/net/sctp_vrf.sh b/tools/testing/selftests/net/sctp_vrf.sh
new file mode 100755
index 000000000000..c721e952e5f3
--- /dev/null
+++ b/tools/testing/selftests/net/sctp_vrf.sh
@@ -0,0 +1,178 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Testing For SCTP VRF.
+# TOPO: CLIENT_NS1 (veth1) <---> (veth1) -> vrf_s1
+# SERVER_NS
+# CLIENT_NS2 (veth1) <---> (veth2) -> vrf_s2
+
+CLIENT_NS1="client-ns1"
+CLIENT_NS2="client-ns2"
+CLIENT_IP4="10.0.0.1"
+CLIENT_IP6="2000::1"
+CLIENT_PORT=1234
+
+SERVER_NS="server-ns"
+SERVER_IP4="10.0.0.2"
+SERVER_IP6="2000::2"
+SERVER_PORT=1234
+
+setup() {
+ modprobe sctp
+ modprobe sctp_diag
+ ip netns add $CLIENT_NS1
+ ip netns add $CLIENT_NS2
+ ip netns add $SERVER_NS
+
+ ip net exec $CLIENT_NS1 sysctl -w net.ipv6.conf.default.accept_dad=0 2>&1 >/dev/null
+ ip net exec $CLIENT_NS2 sysctl -w net.ipv6.conf.default.accept_dad=0 2>&1 >/dev/null
+ ip net exec $SERVER_NS sysctl -w net.ipv6.conf.default.accept_dad=0 2>&1 >/dev/null
+
+ ip -n $SERVER_NS link add veth1 type veth peer name veth1 netns $CLIENT_NS1
+ ip -n $SERVER_NS link add veth2 type veth peer name veth1 netns $CLIENT_NS2
+
+ ip -n $CLIENT_NS1 link set veth1 up
+ ip -n $CLIENT_NS1 addr add $CLIENT_IP4/24 dev veth1
+ ip -n $CLIENT_NS1 addr add $CLIENT_IP6/24 dev veth1
+
+ ip -n $CLIENT_NS2 link set veth1 up
+ ip -n $CLIENT_NS2 addr add $CLIENT_IP4/24 dev veth1
+ ip -n $CLIENT_NS2 addr add $CLIENT_IP6/24 dev veth1
+
+ ip -n $SERVER_NS link add dummy1 type dummy
+ ip -n $SERVER_NS link set dummy1 up
+ ip -n $SERVER_NS link add vrf-1 type vrf table 10
+ ip -n $SERVER_NS link add vrf-2 type vrf table 20
+ ip -n $SERVER_NS link set vrf-1 up
+ ip -n $SERVER_NS link set vrf-2 up
+ ip -n $SERVER_NS link set veth1 master vrf-1
+ ip -n $SERVER_NS link set veth2 master vrf-2
+
+ ip -n $SERVER_NS addr add $SERVER_IP4/24 dev dummy1
+ ip -n $SERVER_NS addr add $SERVER_IP4/24 dev veth1
+ ip -n $SERVER_NS addr add $SERVER_IP4/24 dev veth2
+ ip -n $SERVER_NS addr add $SERVER_IP6/24 dev dummy1
+ ip -n $SERVER_NS addr add $SERVER_IP6/24 dev veth1
+ ip -n $SERVER_NS addr add $SERVER_IP6/24 dev veth2
+
+ ip -n $SERVER_NS link set veth1 up
+ ip -n $SERVER_NS link set veth2 up
+ ip -n $SERVER_NS route add table 10 $CLIENT_IP4 dev veth1 src $SERVER_IP4
+ ip -n $SERVER_NS route add table 20 $CLIENT_IP4 dev veth2 src $SERVER_IP4
+ ip -n $SERVER_NS route add $CLIENT_IP4 dev veth1 src $SERVER_IP4
+ ip -n $SERVER_NS route add table 10 $CLIENT_IP6 dev veth1 src $SERVER_IP6
+ ip -n $SERVER_NS route add table 20 $CLIENT_IP6 dev veth2 src $SERVER_IP6
+ ip -n $SERVER_NS route add $CLIENT_IP6 dev veth1 src $SERVER_IP6
+}
+
+cleanup() {
+ ip netns exec $SERVER_NS pkill sctp_hello 2>&1 >/dev/null
+ ip netns del "$CLIENT_NS1"
+ ip netns del "$CLIENT_NS2"
+ ip netns del "$SERVER_NS"
+}
+
+wait_server() {
+ local IFACE=$1
+ local CNT=0
+
+ until ip netns exec $SERVER_NS ss -lS src $SERVER_IP:$SERVER_PORT | \
+ grep LISTEN | grep "$IFACE" 2>&1 >/dev/null; do
+ [ $((CNT++)) = "20" ] && { RET=3; return $RET; }
+ sleep 0.1
+ done
+}
+
+do_test() {
+ local CLIENT_NS=$1
+ local IFACE=$2
+
+ ip netns exec $SERVER_NS pkill sctp_hello 2>&1 >/dev/null
+ ip netns exec $SERVER_NS ./sctp_hello server $AF $SERVER_IP \
+ $SERVER_PORT $IFACE 2>&1 >/dev/null &
+ disown
+ wait_server $IFACE || return $RET
+ timeout 3 ip netns exec $CLIENT_NS ./sctp_hello client $AF \
+ $SERVER_IP $SERVER_PORT $CLIENT_IP $CLIENT_PORT 2>&1 >/dev/null
+ RET=$?
+ return $RET
+}
+
+do_testx() {
+ local IFACE1=$1
+ local IFACE2=$2
+
+ ip netns exec $SERVER_NS pkill sctp_hello 2>&1 >/dev/null
+ ip netns exec $SERVER_NS ./sctp_hello server $AF $SERVER_IP \
+ $SERVER_PORT $IFACE1 2>&1 >/dev/null &
+ disown
+ wait_server $IFACE1 || return $RET
+ ip netns exec $SERVER_NS ./sctp_hello server $AF $SERVER_IP \
+ $SERVER_PORT $IFACE2 2>&1 >/dev/null &
+ disown
+ wait_server $IFACE2 || return $RET
+ timeout 3 ip netns exec $CLIENT_NS1 ./sctp_hello client $AF \
+ $SERVER_IP $SERVER_PORT $CLIENT_IP $CLIENT_PORT 2>&1 >/dev/null && \
+ timeout 3 ip netns exec $CLIENT_NS2 ./sctp_hello client $AF \
+ $SERVER_IP $SERVER_PORT $CLIENT_IP $CLIENT_PORT 2>&1 >/dev/null
+ RET=$?
+ return $RET
+}
+
+testup() {
+ ip netns exec $SERVER_NS sysctl -w net.sctp.l3mdev_accept=1 2>&1 >/dev/null
+ echo -n "TEST 01: nobind, connect from client 1, l3mdev_accept=1, Y "
+ do_test $CLIENT_NS1 || { echo "[FAIL]"; return $RET; }
+ echo "[PASS]"
+
+ echo -n "TEST 02: nobind, connect from client 2, l3mdev_accept=1, N "
+ do_test $CLIENT_NS2 && { echo "[FAIL]"; return $RET; }
+ echo "[PASS]"
+
+ ip netns exec $SERVER_NS sysctl -w net.sctp.l3mdev_accept=0 2>&1 >/dev/null
+ echo -n "TEST 03: nobind, connect from client 1, l3mdev_accept=0, N "
+ do_test $CLIENT_NS1 && { echo "[FAIL]"; return $RET; }
+ echo "[PASS]"
+
+ echo -n "TEST 04: nobind, connect from client 2, l3mdev_accept=0, N "
+ do_test $CLIENT_NS2 && { echo "[FAIL]"; return $RET; }
+ echo "[PASS]"
+
+ echo -n "TEST 05: bind veth2 in server, connect from client 1, N "
+ do_test $CLIENT_NS1 veth2 && { echo "[FAIL]"; return $RET; }
+ echo "[PASS]"
+
+ echo -n "TEST 06: bind veth1 in server, connect from client 1, Y "
+ do_test $CLIENT_NS1 veth1 || { echo "[FAIL]"; return $RET; }
+ echo "[PASS]"
+
+ echo -n "TEST 07: bind vrf-1 in server, connect from client 1, Y "
+ do_test $CLIENT_NS1 vrf-1 || { echo "[FAIL]"; return $RET; }
+ echo "[PASS]"
+
+ echo -n "TEST 08: bind vrf-2 in server, connect from client 1, N "
+ do_test $CLIENT_NS1 vrf-2 && { echo "[FAIL]"; return $RET; }
+ echo "[PASS]"
+
+ echo -n "TEST 09: bind vrf-2 in server, connect from client 2, Y "
+ do_test $CLIENT_NS2 vrf-2 || { echo "[FAIL]"; return $RET; }
+ echo "[PASS]"
+
+ echo -n "TEST 10: bind vrf-1 in server, connect from client 2, N "
+ do_test $CLIENT_NS2 vrf-1 && { echo "[FAIL]"; return $RET; }
+ echo "[PASS]"
+
+ echo -n "TEST 11: bind vrf-1 & 2 in server, connect from client 1 & 2, Y "
+ do_testx vrf-1 vrf-2 || { echo "[FAIL]"; return $RET; }
+ echo "[PASS]"
+
+ echo -n "TEST 12: bind vrf-2 & 1 in server, connect from client 1 & 2, N "
+ do_testx vrf-2 vrf-1 || { echo "[FAIL]"; return $RET; }
+ echo "[PASS]"
+}
+
+trap cleanup EXIT
+setup && echo "Testing For SCTP VRF:" && \
+CLIENT_IP=$CLIENT_IP4 SERVER_IP=$SERVER_IP4 AF="-4" testup && echo "***v4 Tests Done***" &&
+CLIENT_IP=$CLIENT_IP6 SERVER_IP=$SERVER_IP6 AF="-6" testup && echo "***v6 Tests Done***"
+exit $?
diff --git a/tools/testing/selftests/net/so_incoming_cpu.c b/tools/testing/selftests/net/so_incoming_cpu.c
new file mode 100644
index 000000000000..0e04f9fef986
--- /dev/null
+++ b/tools/testing/selftests/net/so_incoming_cpu.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Amazon.com Inc. or its affiliates. */
+#define _GNU_SOURCE
+#include <sched.h>
+
+#include <netinet/in.h>
+#include <sys/socket.h>
+#include <sys/sysinfo.h>
+
+#include "../kselftest_harness.h"
+
+#define CLIENT_PER_SERVER 32 /* More sockets, more reliable */
+#define NR_SERVER self->nproc
+#define NR_CLIENT (CLIENT_PER_SERVER * NR_SERVER)
+
+FIXTURE(so_incoming_cpu)
+{
+ int nproc;
+ int *servers;
+ union {
+ struct sockaddr addr;
+ struct sockaddr_in in_addr;
+ };
+ socklen_t addrlen;
+};
+
+enum when_to_set {
+ BEFORE_REUSEPORT,
+ BEFORE_LISTEN,
+ AFTER_LISTEN,
+ AFTER_ALL_LISTEN,
+};
+
+FIXTURE_VARIANT(so_incoming_cpu)
+{
+ int when_to_set;
+};
+
+FIXTURE_VARIANT_ADD(so_incoming_cpu, before_reuseport)
+{
+ .when_to_set = BEFORE_REUSEPORT,
+};
+
+FIXTURE_VARIANT_ADD(so_incoming_cpu, before_listen)
+{
+ .when_to_set = BEFORE_LISTEN,
+};
+
+FIXTURE_VARIANT_ADD(so_incoming_cpu, after_listen)
+{
+ .when_to_set = AFTER_LISTEN,
+};
+
+FIXTURE_VARIANT_ADD(so_incoming_cpu, after_all_listen)
+{
+ .when_to_set = AFTER_ALL_LISTEN,
+};
+
+FIXTURE_SETUP(so_incoming_cpu)
+{
+ self->nproc = get_nprocs();
+ ASSERT_LE(2, self->nproc);
+
+ self->servers = malloc(sizeof(int) * NR_SERVER);
+ ASSERT_NE(self->servers, NULL);
+
+ self->in_addr.sin_family = AF_INET;
+ self->in_addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+ self->in_addr.sin_port = htons(0);
+ self->addrlen = sizeof(struct sockaddr_in);
+}
+
+FIXTURE_TEARDOWN(so_incoming_cpu)
+{
+ int i;
+
+ for (i = 0; i < NR_SERVER; i++)
+ close(self->servers[i]);
+
+ free(self->servers);
+}
+
+void set_so_incoming_cpu(struct __test_metadata *_metadata, int fd, int cpu)
+{
+ int ret;
+
+ ret = setsockopt(fd, SOL_SOCKET, SO_INCOMING_CPU, &cpu, sizeof(int));
+ ASSERT_EQ(ret, 0);
+}
+
+int create_server(struct __test_metadata *_metadata,
+ FIXTURE_DATA(so_incoming_cpu) *self,
+ const FIXTURE_VARIANT(so_incoming_cpu) *variant,
+ int cpu)
+{
+ int fd, ret;
+
+ fd = socket(AF_INET, SOCK_STREAM | SOCK_NONBLOCK, 0);
+ ASSERT_NE(fd, -1);
+
+ if (variant->when_to_set == BEFORE_REUSEPORT)
+ set_so_incoming_cpu(_metadata, fd, cpu);
+
+ ret = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &(int){1}, sizeof(int));
+ ASSERT_EQ(ret, 0);
+
+ ret = bind(fd, &self->addr, self->addrlen);
+ ASSERT_EQ(ret, 0);
+
+ if (variant->when_to_set == BEFORE_LISTEN)
+ set_so_incoming_cpu(_metadata, fd, cpu);
+
+ /* We don't use CLIENT_PER_SERVER here not to block
+ * this test at connect() if SO_INCOMING_CPU is broken.
+ */
+ ret = listen(fd, NR_CLIENT);
+ ASSERT_EQ(ret, 0);
+
+ if (variant->when_to_set == AFTER_LISTEN)
+ set_so_incoming_cpu(_metadata, fd, cpu);
+
+ return fd;
+}
+
+void create_servers(struct __test_metadata *_metadata,
+ FIXTURE_DATA(so_incoming_cpu) *self,
+ const FIXTURE_VARIANT(so_incoming_cpu) *variant)
+{
+ int i, ret;
+
+ for (i = 0; i < NR_SERVER; i++) {
+ self->servers[i] = create_server(_metadata, self, variant, i);
+
+ if (i == 0) {
+ ret = getsockname(self->servers[i], &self->addr, &self->addrlen);
+ ASSERT_EQ(ret, 0);
+ }
+ }
+
+ if (variant->when_to_set == AFTER_ALL_LISTEN) {
+ for (i = 0; i < NR_SERVER; i++)
+ set_so_incoming_cpu(_metadata, self->servers[i], i);
+ }
+}
+
+void create_clients(struct __test_metadata *_metadata,
+ FIXTURE_DATA(so_incoming_cpu) *self)
+{
+ cpu_set_t cpu_set;
+ int i, j, fd, ret;
+
+ for (i = 0; i < NR_SERVER; i++) {
+ CPU_ZERO(&cpu_set);
+
+ CPU_SET(i, &cpu_set);
+ ASSERT_EQ(CPU_COUNT(&cpu_set), 1);
+ ASSERT_NE(CPU_ISSET(i, &cpu_set), 0);
+
+ /* Make sure SYN will be processed on the i-th CPU
+ * and finally distributed to the i-th listener.
+ */
+ sched_setaffinity(0, sizeof(cpu_set), &cpu_set);
+ ASSERT_EQ(ret, 0);
+
+ for (j = 0; j < CLIENT_PER_SERVER; j++) {
+ fd = socket(AF_INET, SOCK_STREAM, 0);
+ ASSERT_NE(fd, -1);
+
+ ret = connect(fd, &self->addr, self->addrlen);
+ ASSERT_EQ(ret, 0);
+
+ close(fd);
+ }
+ }
+}
+
+void verify_incoming_cpu(struct __test_metadata *_metadata,
+ FIXTURE_DATA(so_incoming_cpu) *self)
+{
+ int i, j, fd, cpu, ret, total = 0;
+ socklen_t len = sizeof(int);
+
+ for (i = 0; i < NR_SERVER; i++) {
+ for (j = 0; j < CLIENT_PER_SERVER; j++) {
+ /* If we see -EAGAIN here, SO_INCOMING_CPU is broken */
+ fd = accept(self->servers[i], &self->addr, &self->addrlen);
+ ASSERT_NE(fd, -1);
+
+ ret = getsockopt(fd, SOL_SOCKET, SO_INCOMING_CPU, &cpu, &len);
+ ASSERT_EQ(ret, 0);
+ ASSERT_EQ(cpu, i);
+
+ close(fd);
+ total++;
+ }
+ }
+
+ ASSERT_EQ(total, NR_CLIENT);
+ TH_LOG("SO_INCOMING_CPU is very likely to be "
+ "working correctly with %d sockets.", total);
+}
+
+TEST_F(so_incoming_cpu, test1)
+{
+ create_servers(_metadata, self, variant);
+ create_clients(_metadata, self);
+ verify_incoming_cpu(_metadata, self);
+}
+
+TEST_F(so_incoming_cpu, test2)
+{
+ int server;
+
+ create_servers(_metadata, self, variant);
+
+ /* No CPU specified */
+ server = create_server(_metadata, self, variant, -1);
+ close(server);
+
+ create_clients(_metadata, self);
+ verify_incoming_cpu(_metadata, self);
+}
+
+TEST_F(so_incoming_cpu, test3)
+{
+ int server, client;
+
+ create_servers(_metadata, self, variant);
+
+ /* No CPU specified */
+ server = create_server(_metadata, self, variant, -1);
+
+ create_clients(_metadata, self);
+
+ /* Never receive any requests */
+ client = accept(server, &self->addr, &self->addrlen);
+ ASSERT_EQ(client, -1);
+
+ verify_incoming_cpu(_metadata, self);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/tc-testing/tdc.py b/tools/testing/selftests/tc-testing/tdc.py
index ee22e3447ec7..7bd94f8e490a 100755
--- a/tools/testing/selftests/tc-testing/tdc.py
+++ b/tools/testing/selftests/tc-testing/tdc.py
@@ -246,6 +246,110 @@ def prepare_env(args, pm, stage, prefix, cmdlist, output = None):
stage, output,
'"{}" did not complete successfully'.format(prefix))
+def verify_by_json(procout, res, tidx, args, pm):
+ try:
+ outputJSON = json.loads(procout)
+ except json.JSONDecodeError:
+ res.set_result(ResultState.fail)
+ res.set_failmsg('Cannot decode verify command\'s output. Is it JSON?')
+ return res
+
+ matchJSON = json.loads(json.dumps(tidx['matchJSON']))
+
+ if type(outputJSON) != type(matchJSON):
+ failmsg = 'Original output and matchJSON value are not the same type: output: {} != matchJSON: {} '
+ failmsg = failmsg.format(type(outputJSON).__name__, type(matchJSON).__name__)
+ res.set_result(ResultState.fail)
+ res.set_failmsg(failmsg)
+ return res
+
+ if len(matchJSON) > len(outputJSON):
+ failmsg = "Your matchJSON value is an array, and it contains more elements than the command under test\'s output:\ncommand output (length: {}):\n{}\nmatchJSON value (length: {}):\n{}"
+ failmsg = failmsg.format(len(outputJSON), outputJSON, len(matchJSON), matchJSON)
+ res.set_result(ResultState.fail)
+ res.set_failmsg(failmsg)
+ return res
+ res = find_in_json(res, outputJSON, matchJSON, 0)
+
+ return res
+
+def find_in_json(res, outputJSONVal, matchJSONVal, matchJSONKey=None):
+ if res.get_result() == ResultState.fail:
+ return res
+
+ if type(matchJSONVal) == list:
+ res = find_in_json_list(res, outputJSONVal, matchJSONVal, matchJSONKey)
+
+ elif type(matchJSONVal) == dict:
+ res = find_in_json_dict(res, outputJSONVal, matchJSONVal)
+ else:
+ res = find_in_json_other(res, outputJSONVal, matchJSONVal, matchJSONKey)
+
+ if res.get_result() != ResultState.fail:
+ res.set_result(ResultState.success)
+ return res
+
+ return res
+
+def find_in_json_list(res, outputJSONVal, matchJSONVal, matchJSONKey=None):
+ if (type(matchJSONVal) != type(outputJSONVal)):
+ failmsg = 'Original output and matchJSON value are not the same type: output: {} != matchJSON: {}'
+ failmsg = failmsg.format(outputJSONVal, matchJSONVal)
+ res.set_result(ResultState.fail)
+ res.set_failmsg(failmsg)
+ return res
+
+ if len(matchJSONVal) > len(outputJSONVal):
+ failmsg = "Your matchJSON value is an array, and it contains more elements than the command under test\'s output:\ncommand output (length: {}):\n{}\nmatchJSON value (length: {}):\n{}"
+ failmsg = failmsg.format(len(outputJSONVal), outputJSONVal, len(matchJSONVal), matchJSONVal)
+ res.set_result(ResultState.fail)
+ res.set_failmsg(failmsg)
+ return res
+
+ for matchJSONIdx, matchJSONVal in enumerate(matchJSONVal):
+ res = find_in_json(res, outputJSONVal[matchJSONIdx], matchJSONVal,
+ matchJSONKey)
+ return res
+
+def find_in_json_dict(res, outputJSONVal, matchJSONVal):
+ for matchJSONKey, matchJSONVal in matchJSONVal.items():
+ if type(outputJSONVal) == dict:
+ if matchJSONKey not in outputJSONVal:
+ failmsg = 'Key not found in json output: {}: {}\nMatching against output: {}'
+ failmsg = failmsg.format(matchJSONKey, matchJSONVal, outputJSONVal)
+ res.set_result(ResultState.fail)
+ res.set_failmsg(failmsg)
+ return res
+
+ else:
+ failmsg = 'Original output and matchJSON value are not the same type: output: {} != matchJSON: {}'
+ failmsg = failmsg.format(type(outputJSON).__name__, type(matchJSON).__name__)
+ res.set_result(ResultState.fail)
+ res.set_failmsg(failmsg)
+ return rest
+
+ if type(outputJSONVal) == dict and (type(outputJSONVal[matchJSONKey]) == dict or
+ type(outputJSONVal[matchJSONKey]) == list):
+ if len(matchJSONVal) > 0:
+ res = find_in_json(res, outputJSONVal[matchJSONKey], matchJSONVal, matchJSONKey)
+ # handling corner case where matchJSONVal == [] or matchJSONVal == {}
+ else:
+ res = find_in_json_other(res, outputJSONVal, matchJSONVal, matchJSONKey)
+ else:
+ res = find_in_json(res, outputJSONVal, matchJSONVal, matchJSONKey)
+ return res
+
+def find_in_json_other(res, outputJSONVal, matchJSONVal, matchJSONKey=None):
+ if matchJSONKey in outputJSONVal:
+ if matchJSONVal != outputJSONVal[matchJSONKey]:
+ failmsg = 'Value doesn\'t match: {}: {} != {}\nMatching against output: {}'
+ failmsg = failmsg.format(matchJSONKey, matchJSONVal, outputJSONVal[matchJSONKey], outputJSONVal)
+ res.set_result(ResultState.fail)
+ res.set_failmsg(failmsg)
+ return res
+
+ return res
+
def run_one_test(pm, args, index, tidx):
global NAMES
result = True
@@ -292,16 +396,22 @@ def run_one_test(pm, args, index, tidx):
else:
if args.verbose > 0:
print('-----> verify stage')
- match_pattern = re.compile(
- str(tidx["matchPattern"]), re.DOTALL | re.MULTILINE)
(p, procout) = exec_cmd(args, pm, 'verify', tidx["verifyCmd"])
if procout:
- match_index = re.findall(match_pattern, procout)
- if len(match_index) != int(tidx["matchCount"]):
- res.set_result(ResultState.fail)
- res.set_failmsg('Could not match regex pattern. Verify command output:\n{}'.format(procout))
+ if 'matchJSON' in tidx:
+ verify_by_json(procout, res, tidx, args, pm)
+ elif 'matchPattern' in tidx:
+ match_pattern = re.compile(
+ str(tidx["matchPattern"]), re.DOTALL | re.MULTILINE)
+ match_index = re.findall(match_pattern, procout)
+ if len(match_index) != int(tidx["matchCount"]):
+ res.set_result(ResultState.fail)
+ res.set_failmsg('Could not match regex pattern. Verify command output:\n{}'.format(procout))
+ else:
+ res.set_result(ResultState.success)
else:
- res.set_result(ResultState.success)
+ res.set_result(ResultState.fail)
+ res.set_failmsg('Must specify a match option: matchJSON or matchPattern\n{}'.format(procout))
elif int(tidx["matchCount"]) != 0:
res.set_result(ResultState.fail)
res.set_failmsg('No output generated by verify command.')
@@ -365,6 +475,7 @@ def test_runner(pm, args, filtered_tests):
res.set_result(ResultState.skip)
res.set_errormsg(errmsg)
tsr.add_resultdata(res)
+ index += 1
continue
try:
badtest = tidx # in case it goes bad