diff options
Diffstat (limited to 'tools/testing/selftests/bpf')
376 files changed, 25899 insertions, 5638 deletions
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore index e9c377001f93..3d8378972d26 100644 --- a/tools/testing/selftests/bpf/.gitignore +++ b/tools/testing/selftests/bpf/.gitignore @@ -18,11 +18,9 @@ feature urandom_read test_sockmap test_lirc_mode2_user -test_flow_dissector flow_dissector_load test_tcpnotify_user test_libbpf -test_sysctl xdping test_cpp *.d diff --git a/tools/testing/selftests/bpf/DENYLIST.aarch64 b/tools/testing/selftests/bpf/DENYLIST.aarch64 deleted file mode 100644 index 901349da680f..000000000000 --- a/tools/testing/selftests/bpf/DENYLIST.aarch64 +++ /dev/null @@ -1,12 +0,0 @@ -bpf_cookie/multi_kprobe_attach_api # kprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3 -bpf_cookie/multi_kprobe_link_api # kprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3 -kprobe_multi_bench_attach # needs CONFIG_FPROBE -kprobe_multi_test # needs CONFIG_FPROBE -module_attach # prog 'kprobe_multi': failed to auto-attach: -95 -fentry_test/fentry_many_args # fentry_many_args:FAIL:fentry_many_args_attach unexpected error: -524 -fexit_test/fexit_many_args # fexit_many_args:FAIL:fexit_many_args_attach unexpected error: -524 -tracing_struct/struct_many_args # struct_many_args:FAIL:tracing_struct_many_args__attach unexpected error: -524 -fill_link_info/kprobe_multi_link_info # bpf_program__attach_kprobe_multi_opts unexpected error: -95 -fill_link_info/kretprobe_multi_link_info # bpf_program__attach_kprobe_multi_opts unexpected error: -95 -fill_link_info/kprobe_multi_invalid_ubuff # bpf_program__attach_kprobe_multi_opts unexpected error: -95 -missed/kprobe_recursion # missed_kprobe_recursion__attach unexpected error: -95 (errno 95) diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 7eeb3cbe18c7..4863106034df 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -34,6 +34,9 @@ OPT_FLAGS ?= $(if $(RELEASE),-O2,-O0) LIBELF_CFLAGS := $(shell $(PKG_CONFIG) libelf --cflags 2>/dev/null) LIBELF_LIBS := $(shell $(PKG_CONFIG) libelf --libs 2>/dev/null || echo -lelf) +SKIP_DOCS ?= +SKIP_LLVM ?= + ifeq ($(srctree),) srctree := $(patsubst %/,%,$(dir $(CURDIR))) srctree := $(patsubst %/,%,$(dir $(srctree))) @@ -41,7 +44,7 @@ srctree := $(patsubst %/,%,$(dir $(srctree))) srctree := $(patsubst %/,%,$(dir $(srctree))) endif -CFLAGS += -g $(OPT_FLAGS) -rdynamic \ +CFLAGS += -g $(OPT_FLAGS) -rdynamic -std=gnu11 \ -Wall -Werror -fno-omit-frame-pointer \ $(GENFLAGS) $(SAN_CFLAGS) $(LIBELF_CFLAGS) \ -I$(CURDIR) -I$(INCLUDE_DIR) -I$(GENDIR) -I$(LIBDIR) \ @@ -54,21 +57,6 @@ PCAP_LIBS := $(shell $(PKG_CONFIG) --libs libpcap 2>/dev/null) LDLIBS += $(PCAP_LIBS) CFLAGS += $(PCAP_CFLAGS) -# The following tests perform type punning and they may break strict -# aliasing rules, which are exploited by both GCC and clang by default -# while optimizing. This can lead to broken programs. -progs/bind4_prog.c-CFLAGS := -fno-strict-aliasing -progs/bind6_prog.c-CFLAGS := -fno-strict-aliasing -progs/dynptr_fail.c-CFLAGS := -fno-strict-aliasing -progs/linked_list_fail.c-CFLAGS := -fno-strict-aliasing -progs/map_kptr_fail.c-CFLAGS := -fno-strict-aliasing -progs/syscall.c-CFLAGS := -fno-strict-aliasing -progs/test_pkt_md_access.c-CFLAGS := -fno-strict-aliasing -progs/test_sk_lookup.c-CFLAGS := -fno-strict-aliasing -progs/timer_crash.c-CFLAGS := -fno-strict-aliasing -progs/test_global_func9.c-CFLAGS := -fno-strict-aliasing -progs/verifier_nocsr.c-CFLAGS := -fno-strict-aliasing - # Some utility functions use LLVM libraries jit_disasm_helpers.c-CFLAGS = $(LLVM_CFLAGS) @@ -85,7 +73,7 @@ endif # Order correspond to 'make run_tests' order TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_progs \ test_sockmap \ - test_tcpnotify_user test_sysctl \ + test_tcpnotify_user \ test_progs-no_alu32 TEST_INST_SUBDIRS := no_alu32 @@ -103,18 +91,6 @@ progs/btf_dump_test_case_packing.c-bpf_gcc-CFLAGS := -Wno-error progs/btf_dump_test_case_padding.c-bpf_gcc-CFLAGS := -Wno-error progs/btf_dump_test_case_syntax.c-bpf_gcc-CFLAGS := -Wno-error -# The following tests do type-punning, via the __imm_insn macro, from -# `struct bpf_insn' to long and then uses the value. This triggers an -# "is used uninitialized" warning in GCC due to strict-aliasing -# rules. -progs/verifier_ref_tracking.c-bpf_gcc-CFLAGS := -fno-strict-aliasing -progs/verifier_unpriv.c-bpf_gcc-CFLAGS := -fno-strict-aliasing -progs/verifier_cgroup_storage.c-bpf_gcc-CFLAGS := -fno-strict-aliasing -progs/verifier_ld_ind.c-bpf_gcc-CFLAGS := -fno-strict-aliasing -progs/verifier_map_ret_val.c-bpf_gcc-CFLAGS := -fno-strict-aliasing -progs/verifier_spill_fill.c-bpf_gcc-CFLAGS := -fno-strict-aliasing -progs/verifier_subprog_precision.c-bpf_gcc-CFLAGS := -fno-strict-aliasing -progs/verifier_uninit.c-bpf_gcc-CFLAGS := -fno-strict-aliasing endif ifneq ($(CLANG_CPUV4),) @@ -122,51 +98,41 @@ TEST_GEN_PROGS += test_progs-cpuv4 TEST_INST_SUBDIRS += cpuv4 endif -TEST_GEN_FILES = test_lwt_ip_encap.bpf.o test_tc_edt.bpf.o +TEST_GEN_FILES = test_tc_edt.bpf.o TEST_FILES = xsk_prereqs.sh $(wildcard progs/btf_dump_test_case_*.c) # Order correspond to 'make run_tests' order TEST_PROGS := test_kmod.sh \ - test_xdp_redirect.sh \ - test_xdp_redirect_multi.sh \ - test_xdp_meta.sh \ - test_tunnel.sh \ - test_lwt_seg6local.sh \ test_lirc_mode2.sh \ - test_flow_dissector.sh \ - test_xdp_vlan_mode_generic.sh \ - test_xdp_vlan_mode_native.sh \ - test_lwt_ip_encap.sh \ test_tc_tunnel.sh \ test_tc_edt.sh \ test_xdping.sh \ test_bpftool_build.sh \ test_bpftool.sh \ + test_bpftool_map.sh \ test_bpftool_metadata.sh \ test_doc_build.sh \ test_xsk.sh \ test_xdp_features.sh -TEST_PROGS_EXTENDED := with_addr.sh \ - with_tunnels.sh ima_setup.sh verify_sig_setup.sh \ - test_xdp_vlan.sh test_bpftool.py +TEST_PROGS_EXTENDED := \ + ima_setup.sh verify_sig_setup.sh \ + test_bpftool.py + +TEST_KMODS := bpf_testmod.ko bpf_test_no_cfi.ko bpf_test_modorder_x.ko \ + bpf_test_modorder_y.ko +TEST_KMOD_TARGETS = $(addprefix $(OUTPUT)/,$(TEST_KMODS)) # Compile but not part of 'make run_tests' TEST_GEN_PROGS_EXTENDED = \ bench \ - bpf_testmod.ko \ - bpf_test_modorder_x.ko \ - bpf_test_modorder_y.ko \ - bpf_test_no_cfi.ko \ flow_dissector_load \ runqslower \ test_cpp \ - test_flow_dissector \ test_lirc_mode2_user \ veristat \ xdp_features \ xdp_hw_metadata \ - xdp_redirect_multi \ xdp_synproxy \ xdping \ xskxceiver @@ -184,8 +150,9 @@ override define CLEAN $(Q)$(RM) -r $(TEST_GEN_PROGS) $(Q)$(RM) -r $(TEST_GEN_PROGS_EXTENDED) $(Q)$(RM) -r $(TEST_GEN_FILES) + $(Q)$(RM) -r $(TEST_KMODS) $(Q)$(RM) -r $(EXTRA_CLEAN) - $(Q)$(MAKE) -C bpf_testmod clean + $(Q)$(MAKE) -C test_kmods clean $(Q)$(MAKE) docs-clean endef @@ -203,22 +170,29 @@ ifeq ($(shell expr $(MAKE_VERSION) \>= 4.4), 1) $(let OUTPUT,$(OUTPUT)/,\ $(eval include ../../../build/Makefile.feature)) else -OUTPUT := $(OUTPUT)/ +override OUTPUT := $(OUTPUT)/ $(eval include ../../../build/Makefile.feature) -OUTPUT := $(patsubst %/,%,$(OUTPUT)) +override OUTPUT := $(patsubst %/,%,$(OUTPUT)) endif endif +ifneq ($(SKIP_LLVM),1) ifeq ($(feature-llvm),1) LLVM_CFLAGS += -DHAVE_LLVM_SUPPORT LLVM_CONFIG_LIB_COMPONENTS := mcdisassembler all-targets # both llvm-config and lib.mk add -D_GNU_SOURCE, which ends up as conflict LLVM_CFLAGS += $(filter-out -D_GNU_SOURCE,$(shell $(LLVM_CONFIG) --cflags)) - LLVM_LDLIBS += $(shell $(LLVM_CONFIG) --link-static --libs $(LLVM_CONFIG_LIB_COMPONENTS)) - LLVM_LDLIBS += $(shell $(LLVM_CONFIG) --link-static --system-libs $(LLVM_CONFIG_LIB_COMPONENTS)) - LLVM_LDLIBS += -lstdc++ + # Prefer linking statically if it's available, otherwise fallback to shared + ifeq ($(shell $(LLVM_CONFIG) --link-static --libs >/dev/null 2>&1 && echo static),static) + LLVM_LDLIBS += $(shell $(LLVM_CONFIG) --link-static --libs $(LLVM_CONFIG_LIB_COMPONENTS)) + LLVM_LDLIBS += $(filter-out -lxml2,$(shell $(LLVM_CONFIG) --link-static --system-libs $(LLVM_CONFIG_LIB_COMPONENTS))) + LLVM_LDLIBS += -lstdc++ + else + LLVM_LDLIBS += $(shell $(LLVM_CONFIG) --link-shared --libs $(LLVM_CONFIG_LIB_COMPONENTS)) + endif LLVM_LDFLAGS += $(shell $(LLVM_CONFIG) --ldflags) endif +endif SCRATCH_DIR := $(OUTPUT)/tools BUILD_DIR := $(SCRATCH_DIR)/build @@ -247,11 +221,11 @@ ifeq ($(VMLINUX_BTF),) $(error Cannot find a vmlinux for VMLINUX_BTF at any of "$(VMLINUX_BTF_PATHS)") endif -# Define simple and short `make test_progs`, `make test_sysctl`, etc targets +# Define simple and short `make test_progs`, `make test_maps`, etc targets # to build individual tests. # NOTE: Semicolon at the end is critical to override lib.mk's default static # rule for binaries. -$(notdir $(TEST_GEN_PROGS) \ +$(notdir $(TEST_GEN_PROGS) $(TEST_KMODS) \ $(TEST_GEN_PROGS_EXTENDED)): %: $(OUTPUT)/% ; # sort removes libbpf duplicates when not cross-building @@ -305,37 +279,19 @@ $(OUTPUT)/sign-file: ../../../../scripts/sign-file.c $< -o $@ \ $(shell $(PKG_CONFIG) --libs libcrypto 2> /dev/null || echo -lcrypto) -$(OUTPUT)/bpf_testmod.ko: $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard bpf_testmod/Makefile bpf_testmod/*.[ch]) - $(call msg,MOD,,$@) - $(Q)$(RM) bpf_testmod/bpf_testmod.ko # force re-compilation - $(Q)$(MAKE) $(submake_extras) -C bpf_testmod \ - RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) \ +# This should really be a grouped target, but make versions before 4.3 don't +# support that for regular rules. However, pattern matching rules are implicitly +# treated as grouped even with older versions of make, so as a workaround, the +# subst() turns the rule into a pattern matching rule +$(addprefix test_kmods/,$(subst .ko,%ko,$(TEST_KMODS))): $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard test_kmods/Makefile test_kmods/*.[ch]) + $(Q)$(RM) test_kmods/*.ko test_kmods/*.mod.o # force re-compilation + $(Q)$(MAKE) $(submake_extras) -C test_kmods \ + RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) \ EXTRA_CFLAGS='' EXTRA_LDFLAGS='' - $(Q)cp bpf_testmod/bpf_testmod.ko $@ -$(OUTPUT)/bpf_test_no_cfi.ko: $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard bpf_test_no_cfi/Makefile bpf_test_no_cfi/*.[ch]) +$(TEST_KMOD_TARGETS): $(addprefix test_kmods/,$(TEST_KMODS)) $(call msg,MOD,,$@) - $(Q)$(RM) bpf_test_no_cfi/bpf_test_no_cfi.ko # force re-compilation - $(Q)$(MAKE) $(submake_extras) -C bpf_test_no_cfi \ - RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) \ - EXTRA_CFLAGS='' EXTRA_LDFLAGS='' - $(Q)cp bpf_test_no_cfi/bpf_test_no_cfi.ko $@ - -$(OUTPUT)/bpf_test_modorder_x.ko: $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard bpf_test_modorder_x/Makefile bpf_test_modorder_x/*.[ch]) - $(call msg,MOD,,$@) - $(Q)$(RM) bpf_test_modorder_x/bpf_test_modorder_x.ko # force re-compilation - $(Q)$(MAKE) $(submake_extras) -C bpf_test_modorder_x \ - RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) \ - EXTRA_CFLAGS='' EXTRA_LDFLAGS='' - $(Q)cp bpf_test_modorder_x/bpf_test_modorder_x.ko $@ - -$(OUTPUT)/bpf_test_modorder_y.ko: $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard bpf_test_modorder_y/Makefile bpf_test_modorder_y/*.[ch]) - $(call msg,MOD,,$@) - $(Q)$(RM) bpf_test_modorder_y/bpf_test_modorder_y.ko # force re-compilation - $(Q)$(MAKE) $(submake_extras) -C bpf_test_modorder_y \ - RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) \ - EXTRA_CFLAGS='' EXTRA_LDFLAGS='' - $(Q)cp bpf_test_modorder_y/bpf_test_modorder_y.ko $@ + $(Q)cp test_kmods/$(@F) $@ DEFAULT_BPFTOOL := $(HOST_SCRATCH_DIR)/sbin/bpftool @@ -354,6 +310,7 @@ $(OUTPUT)/runqslower: $(BPFOBJ) | $(DEFAULT_BPFTOOL) $(RUNQSLOWER_OUTPUT) BPFTOOL_OUTPUT=$(HOST_BUILD_DIR)/bpftool/ \ BPFOBJ_OUTPUT=$(BUILD_DIR)/libbpf/ \ BPFOBJ=$(BPFOBJ) BPF_INCLUDE=$(INCLUDE_DIR) \ + BPF_TARGET_ENDIAN=$(BPF_TARGET_ENDIAN) \ EXTRA_CFLAGS='-g $(OPT_FLAGS) $(SAN_CFLAGS) $(EXTRA_CFLAGS)' \ EXTRA_LDFLAGS='$(SAN_LDFLAGS) $(EXTRA_LDFLAGS)' && \ cp $(RUNQSLOWER_OUTPUT)runqslower $@ @@ -373,7 +330,6 @@ NETWORK_HELPERS := $(OUTPUT)/network_helpers.o $(OUTPUT)/test_sockmap: $(CGROUP_HELPERS) $(TESTING_HELPERS) $(OUTPUT)/test_tcpnotify_user: $(CGROUP_HELPERS) $(TESTING_HELPERS) $(TRACE_HELPERS) $(OUTPUT)/test_sock_fields: $(CGROUP_HELPERS) $(TESTING_HELPERS) -$(OUTPUT)/test_sysctl: $(CGROUP_HELPERS) $(TESTING_HELPERS) $(OUTPUT)/test_tag: $(TESTING_HELPERS) $(OUTPUT)/test_lirc_mode2_user: $(TESTING_HELPERS) $(OUTPUT)/xdping: $(TESTING_HELPERS) @@ -407,7 +363,9 @@ $(CROSS_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \ prefix= DESTDIR=$(SCRATCH_DIR)/ install-bin endif +ifneq ($(SKIP_DOCS),1) all: docs +endif docs: $(Q)RST2MAN_OPTS="--exit-status=1" $(MAKE) $(submake_extras) \ @@ -480,10 +438,10 @@ $(shell $(1) $(2) -dM -E - </dev/null | grep -E 'MIPS(EL|EB)|_MIPS_SZ(PTR|LONG) endef # Determine target endianness. -IS_LITTLE_ENDIAN = $(shell $(CC) -dM -E - </dev/null | \ +IS_LITTLE_ENDIAN := $(shell $(CC) -dM -E - </dev/null | \ grep 'define __BYTE_ORDER__ __ORDER_LITTLE_ENDIAN__') -MENDIAN=$(if $(IS_LITTLE_ENDIAN),-mlittle-endian,-mbig-endian) -BPF_TARGET_ENDIAN=$(if $(IS_LITTLE_ENDIAN),--target=bpfel,--target=bpfeb) +MENDIAN:=$(if $(IS_LITTLE_ENDIAN),-mlittle-endian,-mbig-endian) +BPF_TARGET_ENDIAN:=$(if $(IS_LITTLE_ENDIAN),--target=bpfel,--target=bpfeb) ifneq ($(CROSS_COMPILE),) CLANG_TARGET_ARCH = --target=$(notdir $(CROSS_COMPILE:%-=%)) @@ -493,6 +451,8 @@ CLANG_SYS_INCLUDES = $(call get_sys_includes,$(CLANG),$(CLANG_TARGET_ARCH)) BPF_CFLAGS = -g -Wall -Werror -D__TARGET_ARCH_$(SRCARCH) $(MENDIAN) \ -I$(INCLUDE_DIR) -I$(CURDIR) -I$(APIDIR) \ -I$(abspath $(OUTPUT)/../usr/include) \ + -std=gnu11 \ + -fno-strict-aliasing \ -Wno-compare-distinct-pointer-types # TODO: enable me -Wsign-compare @@ -720,9 +680,6 @@ ifneq ($2:$(OUTPUT),:$(shell pwd)) $(Q)rsync -aq $$^ $(TRUNNER_OUTPUT)/ endif -$(OUTPUT)/$(TRUNNER_BINARY): LDLIBS += $$(LLVM_LDLIBS) -$(OUTPUT)/$(TRUNNER_BINARY): LDFLAGS += $$(LLVM_LDFLAGS) - # some X.test.o files have runtime dependencies on Y.bpf.o files $(OUTPUT)/$(TRUNNER_BINARY): | $(TRUNNER_BPF_OBJS) @@ -730,9 +687,10 @@ $(OUTPUT)/$(TRUNNER_BINARY): $(TRUNNER_TEST_OBJS) \ $(TRUNNER_EXTRA_OBJS) $$(BPFOBJ) \ $(RESOLVE_BTFIDS) \ $(TRUNNER_BPFTOOL) \ + $(OUTPUT)/veristat \ | $(TRUNNER_BINARY)-extras $$(call msg,BINARY,,$$@) - $(Q)$$(CC) $$(CFLAGS) $$(filter %.a %.o,$$^) $$(LDLIBS) $$(LDFLAGS) -o $$@ + $(Q)$$(CC) $$(CFLAGS) $$(filter %.a %.o,$$^) $$(LDLIBS) $$(LLVM_LDLIBS) $$(LDFLAGS) $$(LLVM_LDFLAGS) -o $$@ $(Q)$(RESOLVE_BTFIDS) --btf $(TRUNNER_OUTPUT)/btf_data.bpf.o $$@ $(Q)ln -sf $(if $2,..,.)/tools/build/bpftool/$(USE_BOOTSTRAP)bpftool \ $(OUTPUT)/$(if $2,$2/)bpftool @@ -760,14 +718,12 @@ TRUNNER_EXTRA_SOURCES := test_progs.c \ json_writer.c \ flow_dissector_load.h \ ip_check_defrag_frags.h -TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read $(OUTPUT)/bpf_testmod.ko \ - $(OUTPUT)/bpf_test_no_cfi.ko \ - $(OUTPUT)/bpf_test_modorder_x.ko \ - $(OUTPUT)/bpf_test_modorder_y.ko \ +TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read \ $(OUTPUT)/liburandom_read.so \ $(OUTPUT)/xdp_synproxy \ $(OUTPUT)/sign-file \ $(OUTPUT)/uprobe_multi \ + $(TEST_KMOD_TARGETS) \ ima_setup.sh \ verify_sig_setup.sh \ $(wildcard progs/btf_dump_test_case_*.c) \ @@ -834,9 +790,12 @@ $(OUTPUT)/xdp_features: xdp_features.c $(OUTPUT)/network_helpers.o $(OUTPUT)/xdp $(Q)$(CC) $(CFLAGS) $(filter %.a %.o %.c,$^) $(LDLIBS) -o $@ # Make sure we are able to include and link libbpf against c++. +CXXFLAGS += $(CFLAGS) +CXXFLAGS := $(subst -D_GNU_SOURCE=,,$(CXXFLAGS)) +CXXFLAGS := $(subst -std=gnu11,-std=gnu++11,$(CXXFLAGS)) $(OUTPUT)/test_cpp: test_cpp.cpp $(OUTPUT)/test_core_extern.skel.h $(BPFOBJ) $(call msg,CXX,,$@) - $(Q)$(CXX) $(subst -D_GNU_SOURCE=,,$(CFLAGS)) $(filter %.a %.o %.cpp,$^) $(LDLIBS) -o $@ + $(Q)$(CXX) $(CXXFLAGS) $(filter %.a %.o %.cpp,$^) $(LDLIBS) -o $@ # Benchmark runner $(OUTPUT)/bench_%.o: benchs/bench_%.c bench.h $(BPFOBJ) @@ -856,6 +815,7 @@ $(OUTPUT)/bench_local_storage_create.o: $(OUTPUT)/bench_local_storage_create.ske $(OUTPUT)/bench_bpf_hashmap_lookup.o: $(OUTPUT)/bpf_hashmap_lookup.skel.h $(OUTPUT)/bench_htab_mem.o: $(OUTPUT)/htab_mem_bench.skel.h $(OUTPUT)/bench_bpf_crypto.o: $(OUTPUT)/crypto_bench.skel.h +$(OUTPUT)/bench_sockmap.o: $(OUTPUT)/bench_sockmap_prog.skel.h $(OUTPUT)/bench.o: bench.h testing_helpers.h $(BPFOBJ) $(OUTPUT)/bench: LDLIBS += -lm $(OUTPUT)/bench: $(OUTPUT)/bench.o \ @@ -876,10 +836,16 @@ $(OUTPUT)/bench: $(OUTPUT)/bench.o \ $(OUTPUT)/bench_local_storage_create.o \ $(OUTPUT)/bench_htab_mem.o \ $(OUTPUT)/bench_bpf_crypto.o \ + $(OUTPUT)/bench_sockmap.o \ # $(call msg,BINARY,,$@) $(Q)$(CC) $(CFLAGS) $(LDFLAGS) $(filter %.a %.o,$^) $(LDLIBS) -o $@ +# This works around GCC warning about snprintf truncating strings like: +# +# char a[PATH_MAX], b[PATH_MAX]; +# snprintf(a, "%s/foo", b); // triggers -Wformat-truncation +$(OUTPUT)/veristat.o: CFLAGS += -Wno-format-truncation $(OUTPUT)/veristat.o: $(BPFOBJ) $(OUTPUT)/veristat: $(OUTPUT)/veristat.o $(call msg,BINARY,,$@) @@ -894,12 +860,9 @@ $(OUTPUT)/uprobe_multi: uprobe_multi.c uprobe_multi.ld EXTRA_CLEAN := $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) \ prog_tests/tests.h map_tests/tests.h verifier/tests.h \ - feature bpftool \ + feature bpftool $(TEST_KMOD_TARGETS) \ $(addprefix $(OUTPUT)/,*.o *.d *.skel.h *.lskel.h *.subskel.h \ - no_alu32 cpuv4 bpf_gcc bpf_testmod.ko \ - bpf_test_no_cfi.ko \ - bpf_test_modorder_x.ko \ - bpf_test_modorder_y.ko \ + no_alu32 cpuv4 bpf_gcc \ liburandom_read.so) \ $(OUTPUT)/FEATURE-DUMP.selftests diff --git a/tools/testing/selftests/bpf/Makefile.docs b/tools/testing/selftests/bpf/Makefile.docs index eb6a4fea8c79..f7f9e7088bb3 100644 --- a/tools/testing/selftests/bpf/Makefile.docs +++ b/tools/testing/selftests/bpf/Makefile.docs @@ -7,12 +7,6 @@ INSTALL ?= install RM ?= rm -f RMDIR ?= rmdir --ignore-fail-on-non-empty -ifeq ($(V),1) - Q = -else - Q = @ -endif - prefix ?= /usr/local mandir ?= $(prefix)/man man2dir = $(mandir)/man2 diff --git a/tools/testing/selftests/bpf/bench.c b/tools/testing/selftests/bpf/bench.c index 1bd403a5ef7b..ddd73d06a1eb 100644 --- a/tools/testing/selftests/bpf/bench.c +++ b/tools/testing/selftests/bpf/bench.c @@ -283,6 +283,7 @@ extern struct argp bench_local_storage_create_argp; extern struct argp bench_htab_mem_argp; extern struct argp bench_trigger_batch_argp; extern struct argp bench_crypto_argp; +extern struct argp bench_sockmap_argp; static const struct argp_child bench_parsers[] = { { &bench_ringbufs_argp, 0, "Ring buffers benchmark", 0 }, @@ -297,6 +298,7 @@ static const struct argp_child bench_parsers[] = { { &bench_htab_mem_argp, 0, "hash map memory benchmark", 0 }, { &bench_trigger_batch_argp, 0, "BPF triggering benchmark", 0 }, { &bench_crypto_argp, 0, "bpf crypto benchmark", 0 }, + { &bench_sockmap_argp, 0, "bpf sockmap benchmark", 0 }, {}, }; @@ -526,6 +528,12 @@ extern const struct bench bench_trig_uprobe_multi_push; extern const struct bench bench_trig_uretprobe_multi_push; extern const struct bench bench_trig_uprobe_multi_ret; extern const struct bench bench_trig_uretprobe_multi_ret; +#ifdef __x86_64__ +extern const struct bench bench_trig_uprobe_nop5; +extern const struct bench bench_trig_uretprobe_nop5; +extern const struct bench bench_trig_uprobe_multi_nop5; +extern const struct bench bench_trig_uretprobe_multi_nop5; +#endif extern const struct bench bench_rb_libbpf; extern const struct bench bench_rb_custom; @@ -549,6 +557,7 @@ extern const struct bench bench_local_storage_create; extern const struct bench bench_htab_mem; extern const struct bench bench_crypto_encrypt; extern const struct bench bench_crypto_decrypt; +extern const struct bench bench_sockmap; static const struct bench *benchs[] = { &bench_count_global, @@ -586,6 +595,12 @@ static const struct bench *benchs[] = { &bench_trig_uretprobe_multi_push, &bench_trig_uprobe_multi_ret, &bench_trig_uretprobe_multi_ret, +#ifdef __x86_64__ + &bench_trig_uprobe_nop5, + &bench_trig_uretprobe_nop5, + &bench_trig_uprobe_multi_nop5, + &bench_trig_uretprobe_multi_nop5, +#endif /* ringbuf/perfbuf benchmarks */ &bench_rb_libbpf, &bench_rb_custom, @@ -609,6 +624,7 @@ static const struct bench *benchs[] = { &bench_htab_mem, &bench_crypto_encrypt, &bench_crypto_decrypt, + &bench_sockmap, }; static void find_benchmark(void) diff --git a/tools/testing/selftests/bpf/benchs/bench_htab_mem.c b/tools/testing/selftests/bpf/benchs/bench_htab_mem.c index 926ee822143e..297e32390cd1 100644 --- a/tools/testing/selftests/bpf/benchs/bench_htab_mem.c +++ b/tools/testing/selftests/bpf/benchs/bench_htab_mem.c @@ -279,6 +279,7 @@ static void htab_mem_read_mem_cgrp_file(const char *name, unsigned long *value) } got = read(fd, buf, sizeof(buf) - 1); + close(fd); if (got <= 0) { *value = 0; return; @@ -286,8 +287,6 @@ static void htab_mem_read_mem_cgrp_file(const char *name, unsigned long *value) buf[got] = 0; *value = strtoull(buf, NULL, 0); - - close(fd); } static void htab_mem_measure(struct bench_res *res) diff --git a/tools/testing/selftests/bpf/benchs/bench_sockmap.c b/tools/testing/selftests/bpf/benchs/bench_sockmap.c new file mode 100644 index 000000000000..8ebf563a67a2 --- /dev/null +++ b/tools/testing/selftests/bpf/benchs/bench_sockmap.c @@ -0,0 +1,598 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <error.h> +#include <sys/types.h> +#include <sys/socket.h> +#include <netinet/in.h> +#include <sys/sendfile.h> +#include <arpa/inet.h> +#include <fcntl.h> +#include <argp.h> +#include "bench.h" +#include "bench_sockmap_prog.skel.h" + +#define FILE_SIZE (128 * 1024) +#define DATA_REPEAT_SIZE 10 + +static const char snd_data[DATA_REPEAT_SIZE] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; + +/* c1 <-> [p1, p2] <-> c2 + * RX bench(BPF_SK_SKB_STREAM_VERDICT): + * ARG_FW_RX_PASS: + * send(p2) -> recv(c2) -> bpf skb passthrough -> recv(c2) + * ARG_FW_RX_VERDICT_EGRESS: + * send(c1) -> verdict skb to tx queuec of p2 -> recv(c2) + * ARG_FW_RX_VERDICT_INGRESS: + * send(c1) -> verdict skb to rx queuec of c2 -> recv(c2) + * + * TX bench(BPF_SK_MSG_VERDIC): + * ARG_FW_TX_PASS: + * send(p2) -> bpf msg passthrough -> send(p2) -> recv(c2) + * ARG_FW_TX_VERDICT_INGRESS: + * send(p2) -> verdict msg to rx queue of c2 -> recv(c2) + * ARG_FW_TX_VERDICT_EGRESS: + * send(p1) -> verdict msg to tx queue of p2 -> recv(c2) + */ +enum SOCKMAP_ARG_FLAG { + ARG_FW_RX_NORMAL = 11000, + ARG_FW_RX_PASS, + ARG_FW_RX_VERDICT_EGRESS, + ARG_FW_RX_VERDICT_INGRESS, + ARG_FW_TX_NORMAL, + ARG_FW_TX_PASS, + ARG_FW_TX_VERDICT_INGRESS, + ARG_FW_TX_VERDICT_EGRESS, + ARG_CTL_RX_STRP, + ARG_CONSUMER_DELAY_TIME, + ARG_PRODUCER_DURATION, +}; + +#define TXMODE_NORMAL() \ + ((ctx.mode) == ARG_FW_TX_NORMAL) + +#define TXMODE_BPF_INGRESS() \ + ((ctx.mode) == ARG_FW_TX_VERDICT_INGRESS) + +#define TXMODE_BPF_EGRESS() \ + ((ctx.mode) == ARG_FW_TX_VERDICT_EGRESS) + +#define TXMODE_BPF_PASS() \ + ((ctx.mode) == ARG_FW_TX_PASS) + +#define TXMODE_BPF() ( \ + TXMODE_BPF_PASS() || \ + TXMODE_BPF_INGRESS() || \ + TXMODE_BPF_EGRESS()) + +#define TXMODE() ( \ + TXMODE_NORMAL() || \ + TXMODE_BPF()) + +#define RXMODE_NORMAL() \ + ((ctx.mode) == ARG_FW_RX_NORMAL) + +#define RXMODE_BPF_PASS() \ + ((ctx.mode) == ARG_FW_RX_PASS) + +#define RXMODE_BPF_VERDICT_EGRESS() \ + ((ctx.mode) == ARG_FW_RX_VERDICT_EGRESS) + +#define RXMODE_BPF_VERDICT_INGRESS() \ + ((ctx.mode) == ARG_FW_RX_VERDICT_INGRESS) + +#define RXMODE_BPF_VERDICT() ( \ + RXMODE_BPF_VERDICT_INGRESS() || \ + RXMODE_BPF_VERDICT_EGRESS()) + +#define RXMODE_BPF() ( \ + RXMODE_BPF_PASS() || \ + RXMODE_BPF_VERDICT()) + +#define RXMODE() ( \ + RXMODE_NORMAL() || \ + RXMODE_BPF()) + +static struct socmap_ctx { + struct bench_sockmap_prog *skel; + enum SOCKMAP_ARG_FLAG mode; + #define c1 fds[0] + #define p1 fds[1] + #define c2 fds[2] + #define p2 fds[3] + #define sfd fds[4] + int fds[5]; + long send_calls; + long read_calls; + long prod_send; + long user_read; + int file_size; + int delay_consumer; + int prod_run_time; + int strp_size; +} ctx = { + .prod_send = 0, + .user_read = 0, + .file_size = FILE_SIZE, + .mode = ARG_FW_RX_VERDICT_EGRESS, + .fds = {0}, + .delay_consumer = 0, + .prod_run_time = 0, + .strp_size = 0, +}; + +static void bench_sockmap_prog_destroy(void) +{ + int i; + + for (i = 0; i < sizeof(ctx.fds); i++) { + if (ctx.fds[0] > 0) + close(ctx.fds[i]); + } + + bench_sockmap_prog__destroy(ctx.skel); +} + +static void init_addr(struct sockaddr_storage *ss, + socklen_t *len) +{ + struct sockaddr_in *addr4 = memset(ss, 0, sizeof(*ss)); + + addr4->sin_family = AF_INET; + addr4->sin_port = 0; + addr4->sin_addr.s_addr = htonl(INADDR_LOOPBACK); + *len = sizeof(*addr4); +} + +static bool set_non_block(int fd, bool blocking) +{ + int flags = fcntl(fd, F_GETFL, 0); + + if (flags == -1) + return false; + flags = blocking ? (flags | O_NONBLOCK) : (flags & ~O_NONBLOCK); + return (fcntl(fd, F_SETFL, flags) == 0); +} + +static int create_pair(int *c, int *p, int type) +{ + struct sockaddr_storage addr; + int err, cfd, pfd; + socklen_t addr_len = sizeof(struct sockaddr_storage); + + err = getsockname(ctx.sfd, (struct sockaddr *)&addr, &addr_len); + if (err) { + fprintf(stderr, "getsockname error %d\n", errno); + return err; + } + cfd = socket(AF_INET, type, 0); + if (cfd < 0) { + fprintf(stderr, "socket error %d\n", errno); + return err; + } + + err = connect(cfd, (struct sockaddr *)&addr, addr_len); + if (err && errno != EINPROGRESS) { + fprintf(stderr, "connect error %d\n", errno); + return err; + } + + pfd = accept(ctx.sfd, NULL, NULL); + if (pfd < 0) { + fprintf(stderr, "accept error %d\n", errno); + return err; + } + *c = cfd; + *p = pfd; + return 0; +} + +static int create_sockets(void) +{ + struct sockaddr_storage addr; + int err, one = 1; + socklen_t addr_len; + + init_addr(&addr, &addr_len); + ctx.sfd = socket(AF_INET, SOCK_STREAM, 0); + if (ctx.sfd < 0) { + fprintf(stderr, "socket error:%d\n", errno); + return ctx.sfd; + } + err = setsockopt(ctx.sfd, SOL_SOCKET, SO_REUSEPORT, &one, sizeof(one)); + if (err) { + fprintf(stderr, "setsockopt error:%d\n", errno); + return err; + } + + err = bind(ctx.sfd, (struct sockaddr *)&addr, addr_len); + if (err) { + fprintf(stderr, "bind error:%d\n", errno); + return err; + } + + err = listen(ctx.sfd, SOMAXCONN); + if (err) { + fprintf(stderr, "listen error:%d\n", errno); + return err; + } + + err = create_pair(&ctx.c1, &ctx.p1, SOCK_STREAM); + if (err) { + fprintf(stderr, "create_pair 1 error\n"); + return err; + } + + err = create_pair(&ctx.c2, &ctx.p2, SOCK_STREAM); + if (err) { + fprintf(stderr, "create_pair 2 error\n"); + return err; + } + printf("create socket fd c1:%d p1:%d c2:%d p2:%d\n", + ctx.c1, ctx.p1, ctx.c2, ctx.p2); + return 0; +} + +static void validate(void) +{ + if (env.consumer_cnt != 2 || env.producer_cnt != 1 || + !env.affinity) + goto err; + return; +err: + fprintf(stderr, "argument '-c 2 -p 1 -a' is necessary"); + exit(1); +} + +static int setup_rx_sockmap(void) +{ + int verdict, pass, parser, map; + int zero = 0, one = 1; + int err; + + parser = bpf_program__fd(ctx.skel->progs.prog_skb_parser); + verdict = bpf_program__fd(ctx.skel->progs.prog_skb_verdict); + pass = bpf_program__fd(ctx.skel->progs.prog_skb_pass); + map = bpf_map__fd(ctx.skel->maps.sock_map_rx); + + if (ctx.strp_size != 0) { + ctx.skel->bss->pkt_size = ctx.strp_size; + err = bpf_prog_attach(parser, map, BPF_SK_SKB_STREAM_PARSER, 0); + if (err) + return err; + } + + if (RXMODE_BPF_VERDICT()) + err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0); + else if (RXMODE_BPF_PASS()) + err = bpf_prog_attach(pass, map, BPF_SK_SKB_STREAM_VERDICT, 0); + if (err) + return err; + + if (RXMODE_BPF_PASS()) + return bpf_map_update_elem(map, &zero, &ctx.c2, BPF_NOEXIST); + + err = bpf_map_update_elem(map, &zero, &ctx.p1, BPF_NOEXIST); + if (err < 0) + return err; + + if (RXMODE_BPF_VERDICT_INGRESS()) { + ctx.skel->bss->verdict_dir = BPF_F_INGRESS; + err = bpf_map_update_elem(map, &one, &ctx.c2, BPF_NOEXIST); + } else { + err = bpf_map_update_elem(map, &one, &ctx.p2, BPF_NOEXIST); + } + if (err < 0) + return err; + + return 0; +} + +static int setup_tx_sockmap(void) +{ + int zero = 0, one = 1; + int prog, map; + int err; + + map = bpf_map__fd(ctx.skel->maps.sock_map_tx); + prog = TXMODE_BPF_PASS() ? + bpf_program__fd(ctx.skel->progs.prog_skmsg_pass) : + bpf_program__fd(ctx.skel->progs.prog_skmsg_verdict); + + err = bpf_prog_attach(prog, map, BPF_SK_MSG_VERDICT, 0); + if (err) + return err; + + if (TXMODE_BPF_EGRESS()) { + err = bpf_map_update_elem(map, &zero, &ctx.p1, BPF_NOEXIST); + err |= bpf_map_update_elem(map, &one, &ctx.p2, BPF_NOEXIST); + } else { + ctx.skel->bss->verdict_dir = BPF_F_INGRESS; + err = bpf_map_update_elem(map, &zero, &ctx.p2, BPF_NOEXIST); + err |= bpf_map_update_elem(map, &one, &ctx.c2, BPF_NOEXIST); + } + + if (err < 0) + return err; + + return 0; +} + +static void setup(void) +{ + int err; + + ctx.skel = bench_sockmap_prog__open_and_load(); + if (!ctx.skel) { + fprintf(stderr, "error loading skel\n"); + exit(1); + } + + if (create_sockets()) { + fprintf(stderr, "create_net_mode error\n"); + goto err; + } + + if (RXMODE_BPF()) { + err = setup_rx_sockmap(); + if (err) { + fprintf(stderr, "setup_rx_sockmap error:%d\n", err); + goto err; + } + } else if (TXMODE_BPF()) { + err = setup_tx_sockmap(); + if (err) { + fprintf(stderr, "setup_tx_sockmap error:%d\n", err); + goto err; + } + } else { + fprintf(stderr, "unknown sockmap bench mode: %d\n", ctx.mode); + goto err; + } + + return; + +err: + bench_sockmap_prog_destroy(); + exit(1); +} + +static void measure(struct bench_res *res) +{ + res->drops = atomic_swap(&ctx.prod_send, 0); + res->hits = atomic_swap(&ctx.skel->bss->process_byte, 0); + res->false_hits = atomic_swap(&ctx.user_read, 0); + res->important_hits = atomic_swap(&ctx.send_calls, 0); + res->important_hits |= atomic_swap(&ctx.read_calls, 0) << 32; +} + +static void verify_data(int *check_pos, char *buf, int rcv) +{ + for (int i = 0 ; i < rcv; i++) { + if (buf[i] != snd_data[(*check_pos) % DATA_REPEAT_SIZE]) { + fprintf(stderr, "verify data fail"); + exit(1); + } + (*check_pos)++; + if (*check_pos >= FILE_SIZE) + *check_pos = 0; + } +} + +static void *consumer(void *input) +{ + int rcv, sent; + int check_pos = 0; + int tid = (long)input; + int recv_buf_size = FILE_SIZE; + char *buf = malloc(recv_buf_size); + int delay_read = ctx.delay_consumer; + + if (!buf) { + fprintf(stderr, "fail to init read buffer"); + return NULL; + } + + while (true) { + if (tid == 1) { + /* consumer 1 is unused for tx test and stream verdict test */ + if (RXMODE_BPF() || TXMODE()) + return NULL; + /* it's only for RX_NORMAL which service as reserve-proxy mode */ + rcv = read(ctx.p1, buf, recv_buf_size); + if (rcv < 0) { + fprintf(stderr, "fail to read p1"); + return NULL; + } + + sent = send(ctx.p2, buf, recv_buf_size, 0); + if (sent < 0) { + fprintf(stderr, "fail to send p2"); + return NULL; + } + } else { + if (delay_read != 0) { + if (delay_read < 0) + return NULL; + sleep(delay_read); + delay_read = 0; + } + /* read real endpoint by consumer 0 */ + atomic_inc(&ctx.read_calls); + rcv = read(ctx.c2, buf, recv_buf_size); + if (rcv < 0 && errno != EAGAIN) { + fprintf(stderr, "%s fail to read c2 %d\n", __func__, errno); + return NULL; + } + verify_data(&check_pos, buf, rcv); + atomic_add(&ctx.user_read, rcv); + } + } + + return NULL; +} + +static void *producer(void *input) +{ + int off = 0, fp, need_sent, sent; + int file_size = ctx.file_size; + struct timespec ts1, ts2; + int target; + FILE *file; + + file = tmpfile(); + if (!file) { + fprintf(stderr, "create file for sendfile"); + return NULL; + } + + /* we need simple verify */ + for (int i = 0; i < file_size; i++) { + if (fwrite(&snd_data[off], sizeof(char), 1, file) != 1) { + fprintf(stderr, "init tmpfile error"); + return NULL; + } + if (++off >= sizeof(snd_data)) + off = 0; + } + fflush(file); + fseek(file, 0, SEEK_SET); + + fp = fileno(file); + need_sent = file_size; + clock_gettime(CLOCK_MONOTONIC, &ts1); + + if (RXMODE_BPF_VERDICT()) + target = ctx.c1; + else if (TXMODE_BPF_EGRESS()) + target = ctx.p1; + else + target = ctx.p2; + set_non_block(target, true); + while (true) { + if (ctx.prod_run_time) { + clock_gettime(CLOCK_MONOTONIC, &ts2); + if (ts2.tv_sec - ts1.tv_sec > ctx.prod_run_time) + return NULL; + } + + errno = 0; + atomic_inc(&ctx.send_calls); + sent = sendfile(target, fp, NULL, need_sent); + if (sent < 0) { + if (errno != EAGAIN && errno != ENOMEM && errno != ENOBUFS) { + fprintf(stderr, "sendfile return %d, errorno %d:%s\n", + sent, errno, strerror(errno)); + return NULL; + } + continue; + } else if (sent < need_sent) { + need_sent -= sent; + atomic_add(&ctx.prod_send, sent); + continue; + } + atomic_add(&ctx.prod_send, need_sent); + need_sent = file_size; + lseek(fp, 0, SEEK_SET); + } + + return NULL; +} + +static void report_progress(int iter, struct bench_res *res, long delta_ns) +{ + double speed_mbs, prod_mbs, bpf_mbs, send_hz, read_hz; + + prod_mbs = res->drops / 1000000.0 / (delta_ns / 1000000000.0); + speed_mbs = res->false_hits / 1000000.0 / (delta_ns / 1000000000.0); + bpf_mbs = res->hits / 1000000.0 / (delta_ns / 1000000000.0); + send_hz = (res->important_hits & 0xFFFFFFFF) / (delta_ns / 1000000000.0); + read_hz = (res->important_hits >> 32) / (delta_ns / 1000000000.0); + + printf("Iter %3d (%7.3lfus): ", + iter, (delta_ns - 1000000000) / 1000.0); + printf("Send Speed %8.3lf MB/s (%8.3lf calls/s), BPF Speed %8.3lf MB/s, " + "Rcv Speed %8.3lf MB/s (%8.3lf calls/s)\n", + prod_mbs, send_hz, bpf_mbs, speed_mbs, read_hz); +} + +static void report_final(struct bench_res res[], int res_cnt) +{ + double verdict_mbs_mean = 0.0; + long verdict_total = 0; + int i; + + for (i = 0; i < res_cnt; i++) { + verdict_mbs_mean += res[i].hits / 1000000.0 / (0.0 + res_cnt); + verdict_total += res[i].hits / 1000000.0; + } + + printf("Summary: total trans %8.3lu MB \u00B1 %5.3lf MB/s\n", + verdict_total, verdict_mbs_mean); +} + +static const struct argp_option opts[] = { + { "rx-normal", ARG_FW_RX_NORMAL, NULL, 0, + "simple reserve-proxy mode, no bfp enabled"}, + { "rx-pass", ARG_FW_RX_PASS, NULL, 0, + "run bpf prog but no redir applied"}, + { "rx-strp", ARG_CTL_RX_STRP, "Byte", 0, + "enable strparser and set the encapsulation size"}, + { "rx-verdict-egress", ARG_FW_RX_VERDICT_EGRESS, NULL, 0, + "forward data with bpf(stream verdict)"}, + { "rx-verdict-ingress", ARG_FW_RX_VERDICT_INGRESS, NULL, 0, + "forward data with bpf(stream verdict)"}, + { "tx-normal", ARG_FW_TX_NORMAL, NULL, 0, + "simple c-s mode, no bfp enabled"}, + { "tx-pass", ARG_FW_TX_PASS, NULL, 0, + "run bpf prog but no redir applied"}, + { "tx-verdict-ingress", ARG_FW_TX_VERDICT_INGRESS, NULL, 0, + "forward msg to ingress queue of another socket"}, + { "tx-verdict-egress", ARG_FW_TX_VERDICT_EGRESS, NULL, 0, + "forward msg to egress queue of another socket"}, + { "delay-consumer", ARG_CONSUMER_DELAY_TIME, "SEC", 0, + "delay consumer start"}, + { "producer-duration", ARG_PRODUCER_DURATION, "SEC", 0, + "producer duration"}, + {}, +}; + +static error_t parse_arg(int key, char *arg, struct argp_state *state) +{ + switch (key) { + case ARG_FW_RX_NORMAL...ARG_FW_TX_VERDICT_EGRESS: + ctx.mode = key; + break; + case ARG_CONSUMER_DELAY_TIME: + ctx.delay_consumer = strtol(arg, NULL, 10); + break; + case ARG_PRODUCER_DURATION: + ctx.prod_run_time = strtol(arg, NULL, 10); + break; + case ARG_CTL_RX_STRP: + ctx.strp_size = strtol(arg, NULL, 10); + break; + default: + return ARGP_ERR_UNKNOWN; + } + + return 0; +} + +/* exported into benchmark runner */ +const struct argp bench_sockmap_argp = { + .options = opts, + .parser = parse_arg, +}; + +/* Benchmark performance of creating bpf local storage */ +const struct bench bench_sockmap = { + .name = "sockmap", + .argp = &bench_sockmap_argp, + .validate = validate, + .setup = setup, + .producer_thread = producer, + .consumer_thread = consumer, + .measure = measure, + .report_progress = report_progress, + .report_final = report_final, +}; diff --git a/tools/testing/selftests/bpf/benchs/bench_trigger.c b/tools/testing/selftests/bpf/benchs/bench_trigger.c index 32e9f194d449..82327657846e 100644 --- a/tools/testing/selftests/bpf/benchs/bench_trigger.c +++ b/tools/testing/selftests/bpf/benchs/bench_trigger.c @@ -333,6 +333,20 @@ static void *uprobe_producer_ret(void *input) return NULL; } +#ifdef __x86_64__ +__nocf_check __weak void uprobe_target_nop5(void) +{ + asm volatile (".byte 0x0f, 0x1f, 0x44, 0x00, 0x00"); +} + +static void *uprobe_producer_nop5(void *input) +{ + while (true) + uprobe_target_nop5(); + return NULL; +} +#endif + static void usetup(bool use_retprobe, bool use_multi, void *target_addr) { size_t uprobe_offset; @@ -448,6 +462,28 @@ static void uretprobe_multi_ret_setup(void) usetup(true, true /* use_multi */, &uprobe_target_ret); } +#ifdef __x86_64__ +static void uprobe_nop5_setup(void) +{ + usetup(false, false /* !use_multi */, &uprobe_target_nop5); +} + +static void uretprobe_nop5_setup(void) +{ + usetup(true, false /* !use_multi */, &uprobe_target_nop5); +} + +static void uprobe_multi_nop5_setup(void) +{ + usetup(false, true /* use_multi */, &uprobe_target_nop5); +} + +static void uretprobe_multi_nop5_setup(void) +{ + usetup(true, true /* use_multi */, &uprobe_target_nop5); +} +#endif + const struct bench bench_trig_syscall_count = { .name = "trig-syscall-count", .validate = trigger_validate, @@ -506,3 +542,9 @@ BENCH_TRIG_USERMODE(uprobe_multi_ret, ret, "uprobe-multi-ret"); BENCH_TRIG_USERMODE(uretprobe_multi_nop, nop, "uretprobe-multi-nop"); BENCH_TRIG_USERMODE(uretprobe_multi_push, push, "uretprobe-multi-push"); BENCH_TRIG_USERMODE(uretprobe_multi_ret, ret, "uretprobe-multi-ret"); +#ifdef __x86_64__ +BENCH_TRIG_USERMODE(uprobe_nop5, nop5, "uprobe-nop5"); +BENCH_TRIG_USERMODE(uretprobe_nop5, nop5, "uretprobe-nop5"); +BENCH_TRIG_USERMODE(uprobe_multi_nop5, nop5, "uprobe-multi-nop5"); +BENCH_TRIG_USERMODE(uretprobe_multi_nop5, nop5, "uretprobe-multi-nop5"); +#endif diff --git a/tools/testing/selftests/bpf/benchs/run_bench_uprobes.sh b/tools/testing/selftests/bpf/benchs/run_bench_uprobes.sh index af169f831f2f..03f55405484b 100755 --- a/tools/testing/selftests/bpf/benchs/run_bench_uprobes.sh +++ b/tools/testing/selftests/bpf/benchs/run_bench_uprobes.sh @@ -2,7 +2,7 @@ set -eufo pipefail -for i in usermode-count syscall-count {uprobe,uretprobe}-{nop,push,ret} +for i in usermode-count syscall-count {uprobe,uretprobe}-{nop,push,ret,nop5} do summary=$(sudo ./bench -w2 -d5 -a trig-$i | tail -n1 | cut -d'(' -f1 | cut -d' ' -f3-) printf "%-15s: %s\n" $i "$summary" diff --git a/tools/testing/selftests/bpf/bpf_arena_common.h b/tools/testing/selftests/bpf/bpf_arena_common.h index 68a51dcc0669..16f8ce832004 100644 --- a/tools/testing/selftests/bpf/bpf_arena_common.h +++ b/tools/testing/selftests/bpf/bpf_arena_common.h @@ -46,8 +46,11 @@ void __arena* bpf_arena_alloc_pages(void *map, void __arena *addr, __u32 page_cnt, int node_id, __u64 flags) __ksym __weak; +int bpf_arena_reserve_pages(void *map, void __arena *addr, __u32 page_cnt) __ksym __weak; void bpf_arena_free_pages(void *map, void __arena *ptr, __u32 page_cnt) __ksym __weak; +#define arena_base(map) ((void __arena *)((struct bpf_arena *)(map))->user_vm_start) + #else /* when compiled as user space code */ #define __arena diff --git a/tools/testing/selftests/bpf/bpf_atomic.h b/tools/testing/selftests/bpf/bpf_atomic.h new file mode 100644 index 000000000000..c550e5711967 --- /dev/null +++ b/tools/testing/selftests/bpf/bpf_atomic.h @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ +#ifndef BPF_ATOMIC_H +#define BPF_ATOMIC_H + +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include "bpf_experimental.h" + +extern bool CONFIG_X86_64 __kconfig __weak; + +/* + * __unqual_typeof(x) - Declare an unqualified scalar type, leaving + * non-scalar types unchanged, + * + * Prefer C11 _Generic for better compile-times and simpler code. Note: 'char' + * is not type-compatible with 'signed char', and we define a separate case. + * + * This is copied verbatim from kernel's include/linux/compiler_types.h, but + * with default expression (for pointers) changed from (x) to (typeof(x)0). + * + * This is because LLVM has a bug where for lvalue (x), it does not get rid of + * an extra address_space qualifier, but does in case of rvalue (typeof(x)0). + * Hence, for pointers, we need to create an rvalue expression to get the + * desired type. See https://github.com/llvm/llvm-project/issues/53400. + */ +#define __scalar_type_to_expr_cases(type) \ + unsigned type : (unsigned type)0, signed type : (signed type)0 + +#define __unqual_typeof(x) \ + typeof(_Generic((x), \ + char: (char)0, \ + __scalar_type_to_expr_cases(char), \ + __scalar_type_to_expr_cases(short), \ + __scalar_type_to_expr_cases(int), \ + __scalar_type_to_expr_cases(long), \ + __scalar_type_to_expr_cases(long long), \ + default: (typeof(x))0)) + +/* No-op for BPF */ +#define cpu_relax() ({}) + +#define READ_ONCE(x) (*(volatile typeof(x) *)&(x)) + +#define WRITE_ONCE(x, val) ((*(volatile typeof(x) *)&(x)) = (val)) + +#define cmpxchg(p, old, new) __sync_val_compare_and_swap((p), old, new) + +#define try_cmpxchg(p, pold, new) \ + ({ \ + __unqual_typeof(*(pold)) __o = *(pold); \ + __unqual_typeof(*(p)) __r = cmpxchg(p, __o, new); \ + if (__r != __o) \ + *(pold) = __r; \ + __r == __o; \ + }) + +#define try_cmpxchg_relaxed(p, pold, new) try_cmpxchg(p, pold, new) + +#define try_cmpxchg_acquire(p, pold, new) try_cmpxchg(p, pold, new) + +#define smp_mb() \ + ({ \ + volatile unsigned long __val; \ + __sync_fetch_and_add(&__val, 0); \ + }) + +#define smp_rmb() \ + ({ \ + if (!CONFIG_X86_64) \ + smp_mb(); \ + else \ + barrier(); \ + }) + +#define smp_wmb() \ + ({ \ + if (!CONFIG_X86_64) \ + smp_mb(); \ + else \ + barrier(); \ + }) + +/* Control dependency provides LOAD->STORE, provide LOAD->LOAD */ +#define smp_acquire__after_ctrl_dep() ({ smp_rmb(); }) + +#define smp_load_acquire(p) \ + ({ \ + __unqual_typeof(*(p)) __v = READ_ONCE(*(p)); \ + if (!CONFIG_X86_64) \ + smp_mb(); \ + barrier(); \ + __v; \ + }) + +#define smp_store_release(p, val) \ + ({ \ + if (!CONFIG_X86_64) \ + smp_mb(); \ + barrier(); \ + WRITE_ONCE(*(p), val); \ + }) + +#define smp_cond_load_relaxed_label(p, cond_expr, label) \ + ({ \ + typeof(p) __ptr = (p); \ + __unqual_typeof(*(p)) VAL; \ + for (;;) { \ + VAL = (__unqual_typeof(*(p)))READ_ONCE(*__ptr); \ + if (cond_expr) \ + break; \ + cond_break_label(label); \ + cpu_relax(); \ + } \ + (typeof(*(p)))VAL; \ + }) + +#define smp_cond_load_acquire_label(p, cond_expr, label) \ + ({ \ + __unqual_typeof(*p) __val = \ + smp_cond_load_relaxed_label(p, cond_expr, label); \ + smp_acquire__after_ctrl_dep(); \ + (typeof(*(p)))__val; \ + }) + +#define atomic_read(p) READ_ONCE((p)->counter) + +#define atomic_cond_read_relaxed_label(p, cond_expr, label) \ + smp_cond_load_relaxed_label(&(p)->counter, cond_expr, label) + +#define atomic_cond_read_acquire_label(p, cond_expr, label) \ + smp_cond_load_acquire_label(&(p)->counter, cond_expr, label) + +#define atomic_try_cmpxchg_relaxed(p, pold, new) \ + try_cmpxchg_relaxed(&(p)->counter, pold, new) + +#define atomic_try_cmpxchg_acquire(p, pold, new) \ + try_cmpxchg_acquire(&(p)->counter, pold, new) + +#endif /* BPF_ATOMIC_H */ diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h index cd8ecd39c3f3..da7e230f2781 100644 --- a/tools/testing/selftests/bpf/bpf_experimental.h +++ b/tools/testing/selftests/bpf/bpf_experimental.h @@ -368,12 +368,12 @@ l_true: \ ret; \ }) -#define cond_break \ +#define __cond_break(expr) \ ({ __label__ l_break, l_continue; \ asm volatile goto("may_goto %l[l_break]" \ :::: l_break); \ goto l_continue; \ - l_break: break; \ + l_break: expr; \ l_continue:; \ }) #else @@ -392,7 +392,7 @@ l_true: \ ret; \ }) -#define cond_break \ +#define __cond_break(expr) \ ({ __label__ l_break, l_continue; \ asm volatile goto("1:.byte 0xe5; \ .byte 0; \ @@ -400,7 +400,7 @@ l_true: \ .short 0" \ :::: l_break); \ goto l_continue; \ - l_break: break; \ + l_break: expr; \ l_continue:; \ }) #else @@ -418,7 +418,7 @@ l_true: \ ret; \ }) -#define cond_break \ +#define __cond_break(expr) \ ({ __label__ l_break, l_continue; \ asm volatile goto("1:.byte 0xe5; \ .byte 0; \ @@ -426,12 +426,15 @@ l_true: \ .short 0" \ :::: l_break); \ goto l_continue; \ - l_break: break; \ + l_break: expr; \ l_continue:; \ }) #endif #endif +#define cond_break __cond_break(break) +#define cond_break_label(label) __cond_break(goto label) + #ifndef bpf_nop_mov #define bpf_nop_mov(var) \ asm volatile("%[reg]=%[reg]"::[reg]"r"((short)var)) @@ -588,4 +591,12 @@ extern int bpf_iter_kmem_cache_new(struct bpf_iter_kmem_cache *it) __weak __ksym extern struct kmem_cache *bpf_iter_kmem_cache_next(struct bpf_iter_kmem_cache *it) __weak __ksym; extern void bpf_iter_kmem_cache_destroy(struct bpf_iter_kmem_cache *it) __weak __ksym; +struct bpf_iter_dmabuf; +extern int bpf_iter_dmabuf_new(struct bpf_iter_dmabuf *it) __weak __ksym; +extern struct dma_buf *bpf_iter_dmabuf_next(struct bpf_iter_dmabuf *it) __weak __ksym; +extern void bpf_iter_dmabuf_destroy(struct bpf_iter_dmabuf *it) __weak __ksym; + +extern int bpf_cgroup_read_xattr(struct cgroup *cgroup, const char *name__str, + struct bpf_dynptr *value_p) __weak __ksym; + #endif diff --git a/tools/testing/selftests/bpf/bpf_kfuncs.h b/tools/testing/selftests/bpf/bpf_kfuncs.h index 2eb3483f2fb0..9386dfe8b884 100644 --- a/tools/testing/selftests/bpf/bpf_kfuncs.h +++ b/tools/testing/selftests/bpf/bpf_kfuncs.h @@ -69,7 +69,7 @@ extern int bpf_get_file_xattr(struct file *file, const char *name, struct bpf_dynptr *value_ptr) __ksym; extern int bpf_get_fsverity_digest(struct file *file, struct bpf_dynptr *digest_ptr) __ksym; -extern struct bpf_key *bpf_lookup_user_key(__u32 serial, __u64 flags) __ksym; +extern struct bpf_key *bpf_lookup_user_key(__s32 serial, __u64 flags) __ksym; extern struct bpf_key *bpf_lookup_system_key(__u64 id) __ksym; extern void bpf_key_put(struct bpf_key *key) __ksym; extern int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_ptr, @@ -87,4 +87,9 @@ struct dentry; */ extern int bpf_get_dentry_xattr(struct dentry *dentry, const char *name, struct bpf_dynptr *value_ptr) __ksym __weak; + +extern int bpf_set_dentry_xattr(struct dentry *dentry, const char *name__str, + const struct bpf_dynptr *value_p, int flags) __ksym __weak; +extern int bpf_remove_dentry_xattr(struct dentry *dentry, const char *name__str) __ksym __weak; + #endif diff --git a/tools/testing/selftests/bpf/bpf_test_modorder_x/Makefile b/tools/testing/selftests/bpf/bpf_test_modorder_x/Makefile deleted file mode 100644 index 40b25b98ad1b..000000000000 --- a/tools/testing/selftests/bpf/bpf_test_modorder_x/Makefile +++ /dev/null @@ -1,19 +0,0 @@ -BPF_TESTMOD_DIR := $(realpath $(dir $(abspath $(lastword $(MAKEFILE_LIST))))) -KDIR ?= $(abspath $(BPF_TESTMOD_DIR)/../../../../..) - -ifeq ($(V),1) -Q = -else -Q = @ -endif - -MODULES = bpf_test_modorder_x.ko - -obj-m += bpf_test_modorder_x.o - -all: - +$(Q)make -C $(KDIR) M=$(BPF_TESTMOD_DIR) modules - -clean: - +$(Q)make -C $(KDIR) M=$(BPF_TESTMOD_DIR) clean - diff --git a/tools/testing/selftests/bpf/bpf_test_modorder_y/Makefile b/tools/testing/selftests/bpf/bpf_test_modorder_y/Makefile deleted file mode 100644 index 52c3ab9d84e2..000000000000 --- a/tools/testing/selftests/bpf/bpf_test_modorder_y/Makefile +++ /dev/null @@ -1,19 +0,0 @@ -BPF_TESTMOD_DIR := $(realpath $(dir $(abspath $(lastword $(MAKEFILE_LIST))))) -KDIR ?= $(abspath $(BPF_TESTMOD_DIR)/../../../../..) - -ifeq ($(V),1) -Q = -else -Q = @ -endif - -MODULES = bpf_test_modorder_y.ko - -obj-m += bpf_test_modorder_y.o - -all: - +$(Q)make -C $(KDIR) M=$(BPF_TESTMOD_DIR) modules - -clean: - +$(Q)make -C $(KDIR) M=$(BPF_TESTMOD_DIR) clean - diff --git a/tools/testing/selftests/bpf/bpf_test_no_cfi/Makefile b/tools/testing/selftests/bpf/bpf_test_no_cfi/Makefile deleted file mode 100644 index ed5143b79edf..000000000000 --- a/tools/testing/selftests/bpf/bpf_test_no_cfi/Makefile +++ /dev/null @@ -1,19 +0,0 @@ -BPF_TEST_NO_CFI_DIR := $(realpath $(dir $(abspath $(lastword $(MAKEFILE_LIST))))) -KDIR ?= $(abspath $(BPF_TEST_NO_CFI_DIR)/../../../../..) - -ifeq ($(V),1) -Q = -else -Q = @ -endif - -MODULES = bpf_test_no_cfi.ko - -obj-m += bpf_test_no_cfi.o - -all: - +$(Q)make -C $(KDIR) M=$(BPF_TEST_NO_CFI_DIR) modules - -clean: - +$(Q)make -C $(KDIR) M=$(BPF_TEST_NO_CFI_DIR) clean - diff --git a/tools/testing/selftests/bpf/bpf_testmod/Makefile b/tools/testing/selftests/bpf/bpf_testmod/Makefile deleted file mode 100644 index 15cb36c4483a..000000000000 --- a/tools/testing/selftests/bpf/bpf_testmod/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -BPF_TESTMOD_DIR := $(realpath $(dir $(abspath $(lastword $(MAKEFILE_LIST))))) -KDIR ?= $(abspath $(BPF_TESTMOD_DIR)/../../../../..) - -ifeq ($(V),1) -Q = -else -Q = @ -endif - -MODULES = bpf_testmod.ko - -obj-m += bpf_testmod.o -CFLAGS_bpf_testmod.o = -I$(src) - -all: - +$(Q)make -C $(KDIR) M=$(BPF_TESTMOD_DIR) modules - -clean: - +$(Q)make -C $(KDIR) M=$(BPF_TESTMOD_DIR) clean - diff --git a/tools/testing/selftests/bpf/cap_helpers.c b/tools/testing/selftests/bpf/cap_helpers.c index d5ac507401d7..98f840c3a38f 100644 --- a/tools/testing/selftests/bpf/cap_helpers.c +++ b/tools/testing/selftests/bpf/cap_helpers.c @@ -19,7 +19,7 @@ int cap_enable_effective(__u64 caps, __u64 *old_caps) err = capget(&hdr, data); if (err) - return err; + return -errno; if (old_caps) *old_caps = (__u64)(data[1].effective) << 32 | data[0].effective; @@ -32,7 +32,7 @@ int cap_enable_effective(__u64 caps, __u64 *old_caps) data[1].effective |= cap1; err = capset(&hdr, data); if (err) - return err; + return -errno; return 0; } @@ -49,7 +49,7 @@ int cap_disable_effective(__u64 caps, __u64 *old_caps) err = capget(&hdr, data); if (err) - return err; + return -errno; if (old_caps) *old_caps = (__u64)(data[1].effective) << 32 | data[0].effective; @@ -61,7 +61,7 @@ int cap_disable_effective(__u64 caps, __u64 *old_caps) data[1].effective &= ~cap1; err = capset(&hdr, data); if (err) - return err; + return -errno; return 0; } diff --git a/tools/testing/selftests/bpf/cap_helpers.h b/tools/testing/selftests/bpf/cap_helpers.h index 6d163530cb0f..8dcb28557f76 100644 --- a/tools/testing/selftests/bpf/cap_helpers.h +++ b/tools/testing/selftests/bpf/cap_helpers.h @@ -4,6 +4,7 @@ #include <linux/types.h> #include <linux/capability.h> +#include <errno.h> #ifndef CAP_PERFMON #define CAP_PERFMON 38 diff --git a/tools/testing/selftests/bpf/cgroup_helpers.c b/tools/testing/selftests/bpf/cgroup_helpers.c index e4535451322e..15f626014872 100644 --- a/tools/testing/selftests/bpf/cgroup_helpers.c +++ b/tools/testing/selftests/bpf/cgroup_helpers.c @@ -4,6 +4,7 @@ #include <sys/mount.h> #include <sys/stat.h> #include <sys/types.h> +#include <sys/xattr.h> #include <linux/limits.h> #include <stdio.h> #include <stdlib.h> @@ -319,6 +320,26 @@ int join_parent_cgroup(const char *relative_path) } /** + * set_cgroup_xattr() - Set xattr on a cgroup dir + * @relative_path: The cgroup path, relative to the workdir, to set xattr + * @name: xattr name + * @value: xattr value + * + * This function set xattr on cgroup dir. + * + * On success, it returns 0, otherwise on failure it returns -1. + */ +int set_cgroup_xattr(const char *relative_path, + const char *name, + const char *value) +{ + char cgroup_path[PATH_MAX + 1]; + + format_cgroup_path(cgroup_path, relative_path); + return setxattr(cgroup_path, name, value, strlen(value) + 1, 0); +} + +/** * __cleanup_cgroup_environment() - Delete temporary cgroups * * This is a helper for cleanup_cgroup_environment() that is responsible for diff --git a/tools/testing/selftests/bpf/cgroup_helpers.h b/tools/testing/selftests/bpf/cgroup_helpers.h index 502845160d88..182e1ac36c95 100644 --- a/tools/testing/selftests/bpf/cgroup_helpers.h +++ b/tools/testing/selftests/bpf/cgroup_helpers.h @@ -26,6 +26,10 @@ int join_cgroup(const char *relative_path); int join_root_cgroup(void); int join_parent_cgroup(const char *relative_path); +int set_cgroup_xattr(const char *relative_path, + const char *name, + const char *value); + int setup_cgroup_environment(void); void cleanup_cgroup_environment(void); diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config index 4ca84c8d9116..8916ab814a3e 100644 --- a/tools/testing/selftests/bpf/config +++ b/tools/testing/selftests/bpf/config @@ -22,6 +22,8 @@ CONFIG_CRYPTO_AES=y CONFIG_DEBUG_INFO=y CONFIG_DEBUG_INFO_BTF=y CONFIG_DEBUG_INFO_DWARF4=y +CONFIG_DMABUF_HEAPS=y +CONFIG_DMABUF_HEAPS_SYSTEM=y CONFIG_DUMMY=y CONFIG_DYNAMIC_FTRACE=y CONFIG_FPROBE=y @@ -58,6 +60,7 @@ CONFIG_MPLS=y CONFIG_MPLS_IPTUNNEL=y CONFIG_MPLS_ROUTING=y CONFIG_MPTCP=y +CONFIG_NET_ACT_GACT=y CONFIG_NET_ACT_SKBMOD=y CONFIG_NET_CLS=y CONFIG_NET_CLS_ACT=y @@ -70,8 +73,10 @@ CONFIG_NET_IPGRE=y CONFIG_NET_IPGRE_DEMUX=y CONFIG_NET_IPIP=y CONFIG_NET_MPLS_GSO=y +CONFIG_NET_SCH_BPF=y CONFIG_NET_SCH_FQ=y CONFIG_NET_SCH_INGRESS=y +CONFIG_NET_SCH_HTB=y CONFIG_NET_SCHED=y CONFIG_NETDEVSIM=y CONFIG_NETFILTER=y @@ -92,6 +97,9 @@ CONFIG_NF_TABLES_NETDEV=y CONFIG_NF_TABLES_IPV4=y CONFIG_NF_TABLES_IPV6=y CONFIG_NETFILTER_INGRESS=y +CONFIG_IP_NF_IPTABLES_LEGACY=y +CONFIG_IP6_NF_IPTABLES_LEGACY=y +CONFIG_NETFILTER_XTABLES_LEGACY=y CONFIG_NF_FLOW_TABLE=y CONFIG_NF_FLOW_TABLE_INET=y CONFIG_NETFILTER_NETLINK=y @@ -100,11 +108,13 @@ CONFIG_IP_NF_IPTABLES=y CONFIG_IP6_NF_IPTABLES=y CONFIG_IP6_NF_FILTER=y CONFIG_NF_NAT=y +CONFIG_PACKET=y CONFIG_RC_CORE=y CONFIG_SECURITY=y CONFIG_SECURITYFS=y CONFIG_SYN_COOKIES=y CONFIG_TEST_BPF=m +CONFIG_UDMABUF=y CONFIG_USERFAULTFD=y CONFIG_VSOCKETS=y CONFIG_VXLAN=y diff --git a/tools/testing/selftests/bpf/config.aarch64 b/tools/testing/selftests/bpf/config.aarch64 index 3720b7611523..e1495a4bbc99 100644 --- a/tools/testing/selftests/bpf/config.aarch64 +++ b/tools/testing/selftests/bpf/config.aarch64 @@ -158,7 +158,6 @@ CONFIG_TRANSPARENT_HUGEPAGE=y CONFIG_TUN=y CONFIG_UNIX=y CONFIG_UPROBES=y -CONFIG_USELIB=y CONFIG_USER_NS=y CONFIG_VETH=y CONFIG_VLAN_8021Q=y diff --git a/tools/testing/selftests/bpf/config.ppc64el b/tools/testing/selftests/bpf/config.ppc64el new file mode 100644 index 000000000000..9acf389dc4ce --- /dev/null +++ b/tools/testing/selftests/bpf/config.ppc64el @@ -0,0 +1,93 @@ +CONFIG_ALTIVEC=y +CONFIG_AUDIT=y +CONFIG_BLK_CGROUP=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BONDING=y +CONFIG_BPF_JIT_ALWAYS_ON=y +CONFIG_BPF_PRELOAD_UMD=y +CONFIG_BPF_PRELOAD=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_SCHED=y +CONFIG_CGROUPS=y +CONFIG_CMDLINE_BOOL=y +CONFIG_CMDLINE="console=hvc0 wg.success=hvc1 panic_on_warn=1" +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_CPUSETS=y +CONFIG_DEBUG_ATOMIC_SLEEP=y +CONFIG_DEBUG_FS=y +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_DEVTMPFS=y +CONFIG_EXPERT=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_FRAME_POINTER=y +CONFIG_FRAME_WARN=1280 +CONFIG_HARDLOCKUP_DETECTOR=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_HUGETLBFS=y +CONFIG_HVC_CONSOLE=y +CONFIG_INET=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IPV6_SEG6_LWTUNNEL=y +CONFIG_JUMP_LABEL=y +CONFIG_KALLSYMS_ALL=y +CONFIG_KPROBES=y +CONFIG_MEMCG=y +CONFIG_NAMESPACES=y +CONFIG_NET_ACT_BPF=y +CONFIG_NETDEVICES=y +CONFIG_NETFILTER_XT_MATCH_BPF=y +CONFIG_NET_L3_MASTER_DEV=y +CONFIG_NET_VRF=y +CONFIG_NET=y +CONFIG_NO_HZ_IDLE=y +CONFIG_NONPORTABLE=y +CONFIG_NR_CPUS=256 +CONFIG_PACKET=y +CONFIG_PANIC_ON_OOPS=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_PCI_HOST_GENERIC=y +CONFIG_PCI=y +CONFIG_POSIX_MQUEUE=y +CONFIG_PPC64=y +CONFIG_PPC_OF_BOOT_TRAMPOLINE=y +CONFIG_PPC_PSERIES=y +CONFIG_PPC_RADIX_MMU=y +CONFIG_PRINTK_TIME=y +CONFIG_PROC_KCORE=y +CONFIG_PROFILING=y +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +CONFIG_RT_GROUP_SCHED=y +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_SMP=y +CONFIG_SOC_VIRT=y +CONFIG_SYSVIPC=y +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_THREAD_SHIFT=14 +CONFIG_TLS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS=y +CONFIG_TUN=y +CONFIG_UNIX=y +CONFIG_UPROBES=y +CONFIG_USER_NS=y +CONFIG_VETH=y +CONFIG_VLAN_8021Q=y +CONFIG_VSOCKETS_LOOPBACK=y +CONFIG_VSX=y +CONFIG_XFRM_USER=y diff --git a/tools/testing/selftests/bpf/config.s390x b/tools/testing/selftests/bpf/config.s390x index 706931a8c2c6..26c3bc2ce11d 100644 --- a/tools/testing/selftests/bpf/config.s390x +++ b/tools/testing/selftests/bpf/config.s390x @@ -128,7 +128,6 @@ CONFIG_TRANSPARENT_HUGEPAGE=y CONFIG_TUN=y CONFIG_UNIX=y CONFIG_UPROBES=y -CONFIG_USELIB=y CONFIG_USER_NS=y CONFIG_VETH=y CONFIG_VLAN_8021Q=y diff --git a/tools/testing/selftests/bpf/config.x86_64 b/tools/testing/selftests/bpf/config.x86_64 index 5680befae8c6..5e713ef7caa3 100644 --- a/tools/testing/selftests/bpf/config.x86_64 +++ b/tools/testing/selftests/bpf/config.x86_64 @@ -39,7 +39,6 @@ CONFIG_CPU_FREQ_GOV_USERSPACE=y CONFIG_CPU_FREQ_STAT=y CONFIG_CPU_IDLE_GOV_LADDER=y CONFIG_CPUSETS=y -CONFIG_CRC_T10DIF=y CONFIG_CRYPTO_BLAKE2B=y CONFIG_CRYPTO_SEQIV=y CONFIG_CRYPTO_XXHASH=y diff --git a/tools/testing/selftests/bpf/map_tests/map_in_map_batch_ops.c b/tools/testing/selftests/bpf/map_tests/map_in_map_batch_ops.c index 66191ae9863c..79c3ccadb962 100644 --- a/tools/testing/selftests/bpf/map_tests/map_in_map_batch_ops.c +++ b/tools/testing/selftests/bpf/map_tests/map_in_map_batch_ops.c @@ -120,11 +120,12 @@ static void validate_fetch_results(int outer_map_fd, static void fetch_and_validate(int outer_map_fd, struct bpf_map_batch_opts *opts, - __u32 batch_size, bool delete_entries) + __u32 batch_size, bool delete_entries, + bool has_holes) { - __u32 *fetched_keys, *fetched_values, total_fetched = 0; - __u32 batch_key = 0, fetch_count, step_size; - int err, max_entries = OUTER_MAP_ENTRIES; + int err, max_entries = OUTER_MAP_ENTRIES - !!has_holes; + __u32 *fetched_keys, *fetched_values, total_fetched = 0, i; + __u32 batch_key = 0, fetch_count, step_size = batch_size; __u32 value_size = sizeof(__u32); /* Total entries needs to be fetched */ @@ -134,9 +135,8 @@ static void fetch_and_validate(int outer_map_fd, "Memory allocation failed for fetched_keys or fetched_values", "error=%s\n", strerror(errno)); - for (step_size = batch_size; - step_size <= max_entries; - step_size += batch_size) { + /* hash map may not always return full batch */ + for (i = 0; i < OUTER_MAP_ENTRIES; i++) { fetch_count = step_size; err = delete_entries ? bpf_map_lookup_and_delete_batch(outer_map_fd, @@ -155,6 +155,7 @@ static void fetch_and_validate(int outer_map_fd, if (err && errno == ENOSPC) { /* Fetch again with higher batch size */ total_fetched = 0; + step_size += batch_size; continue; } @@ -184,18 +185,19 @@ static void fetch_and_validate(int outer_map_fd, } static void _map_in_map_batch_ops(enum bpf_map_type outer_map_type, - enum bpf_map_type inner_map_type) + enum bpf_map_type inner_map_type, + bool has_holes) { + __u32 max_entries = OUTER_MAP_ENTRIES - !!has_holes; __u32 *outer_map_keys, *inner_map_fds; - __u32 max_entries = OUTER_MAP_ENTRIES; LIBBPF_OPTS(bpf_map_batch_opts, opts); __u32 value_size = sizeof(__u32); int batch_size[2] = {5, 10}; __u32 map_index, op_index; int outer_map_fd, ret; - outer_map_keys = calloc(max_entries, value_size); - inner_map_fds = calloc(max_entries, value_size); + outer_map_keys = calloc(OUTER_MAP_ENTRIES, value_size); + inner_map_fds = calloc(OUTER_MAP_ENTRIES, value_size); CHECK((!outer_map_keys || !inner_map_fds), "Memory allocation failed for outer_map_keys or inner_map_fds", "error=%s\n", strerror(errno)); @@ -209,6 +211,24 @@ static void _map_in_map_batch_ops(enum bpf_map_type outer_map_type, ((outer_map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) ? 9 : 1000) - map_index; + /* This condition is only meaningful for array of maps. + * + * max_entries == OUTER_MAP_ENTRIES - 1 if it is true. Say + * max_entries is short for n, then outer_map_keys looks like: + * + * [n, n-1, ... 2, 1] + * + * We change it to + * + * [n, n-1, ... 2, 0] + * + * So it will leave key 1 as a hole. It will serve to test the + * correctness when batch on an array: a "non-exist" key might be + * actually allocated and returned from key iteration. + */ + if (has_holes) + outer_map_keys[max_entries - 1]--; + /* batch operation - map_update */ ret = bpf_map_update_batch(outer_map_fd, outer_map_keys, inner_map_fds, &max_entries, &opts); @@ -219,15 +239,17 @@ static void _map_in_map_batch_ops(enum bpf_map_type outer_map_type, /* batch operation - map_lookup */ for (op_index = 0; op_index < 2; ++op_index) fetch_and_validate(outer_map_fd, &opts, - batch_size[op_index], false); + batch_size[op_index], false, + has_holes); /* batch operation - map_lookup_delete */ if (outer_map_type == BPF_MAP_TYPE_HASH_OF_MAPS) fetch_and_validate(outer_map_fd, &opts, - max_entries, true /*delete*/); + max_entries, true /*delete*/, + has_holes); /* close all map fds */ - for (map_index = 0; map_index < max_entries; map_index++) + for (map_index = 0; map_index < OUTER_MAP_ENTRIES; map_index++) close(inner_map_fds[map_index]); close(outer_map_fd); @@ -237,16 +259,20 @@ static void _map_in_map_batch_ops(enum bpf_map_type outer_map_type, void test_map_in_map_batch_ops_array(void) { - _map_in_map_batch_ops(BPF_MAP_TYPE_ARRAY_OF_MAPS, BPF_MAP_TYPE_ARRAY); + _map_in_map_batch_ops(BPF_MAP_TYPE_ARRAY_OF_MAPS, BPF_MAP_TYPE_ARRAY, false); printf("%s:PASS with inner ARRAY map\n", __func__); - _map_in_map_batch_ops(BPF_MAP_TYPE_ARRAY_OF_MAPS, BPF_MAP_TYPE_HASH); + _map_in_map_batch_ops(BPF_MAP_TYPE_ARRAY_OF_MAPS, BPF_MAP_TYPE_HASH, false); printf("%s:PASS with inner HASH map\n", __func__); + _map_in_map_batch_ops(BPF_MAP_TYPE_ARRAY_OF_MAPS, BPF_MAP_TYPE_ARRAY, true); + printf("%s:PASS with inner ARRAY map with holes\n", __func__); + _map_in_map_batch_ops(BPF_MAP_TYPE_ARRAY_OF_MAPS, BPF_MAP_TYPE_HASH, true); + printf("%s:PASS with inner HASH map with holes\n", __func__); } void test_map_in_map_batch_ops_hash(void) { - _map_in_map_batch_ops(BPF_MAP_TYPE_HASH_OF_MAPS, BPF_MAP_TYPE_ARRAY); + _map_in_map_batch_ops(BPF_MAP_TYPE_HASH_OF_MAPS, BPF_MAP_TYPE_ARRAY, false); printf("%s:PASS with inner ARRAY map\n", __func__); - _map_in_map_batch_ops(BPF_MAP_TYPE_HASH_OF_MAPS, BPF_MAP_TYPE_HASH); + _map_in_map_batch_ops(BPF_MAP_TYPE_HASH_OF_MAPS, BPF_MAP_TYPE_HASH, false); printf("%s:PASS with inner HASH map\n", __func__); } diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c index 27784946b01b..72b5c174ab3b 100644 --- a/tools/testing/selftests/bpf/network_helpers.c +++ b/tools/testing/selftests/bpf/network_helpers.c @@ -21,7 +21,7 @@ #include <linux/limits.h> #include <linux/ip.h> -#include <linux/udp.h> +#include <netinet/udp.h> #include <netinet/tcp.h> #include <net/if.h> @@ -446,6 +446,23 @@ char *ping_command(int family) return "ping"; } +int append_tid(char *str, size_t sz) +{ + size_t end; + + if (!str) + return -1; + + end = strlen(str); + if (end + 8 > sz) + return -1; + + sprintf(&str[end], "%07d", gettid()); + str[end + 7] = '\0'; + + return 0; +} + int remove_netns(const char *name) { char *cmd; @@ -548,6 +565,34 @@ void close_netns(struct nstoken *token) free(token); } +int open_tuntap(const char *dev_name, bool need_mac) +{ + int err = 0; + struct ifreq ifr; + int fd = open("/dev/net/tun", O_RDWR); + + if (!ASSERT_GE(fd, 0, "open(/dev/net/tun)")) + return -1; + + ifr.ifr_flags = IFF_NO_PI | (need_mac ? IFF_TAP : IFF_TUN); + strncpy(ifr.ifr_name, dev_name, IFNAMSIZ - 1); + ifr.ifr_name[IFNAMSIZ - 1] = '\0'; + + err = ioctl(fd, TUNSETIFF, &ifr); + if (!ASSERT_OK(err, "ioctl(TUNSETIFF)")) { + close(fd); + return -1; + } + + err = fcntl(fd, F_SETFL, O_NONBLOCK); + if (!ASSERT_OK(err, "fcntl(O_NONBLOCK)")) { + close(fd); + return -1; + } + + return fd; +} + int get_socket_local_port(int sock_fd) { struct sockaddr_storage addr; @@ -733,6 +778,36 @@ struct tmonitor_ctx { int pcap_fd; }; +static int __base_pr(const char *format, va_list args) +{ + return vfprintf(stdout, format, args); +} + +static tm_print_fn_t __tm_pr = __base_pr; + +tm_print_fn_t traffic_monitor_set_print(tm_print_fn_t fn) +{ + tm_print_fn_t old_print_fn; + + old_print_fn = __atomic_exchange_n(&__tm_pr, fn, __ATOMIC_RELAXED); + + return old_print_fn; +} + +void tm_print(const char *format, ...) +{ + tm_print_fn_t print_fn; + va_list args; + + print_fn = __atomic_load_n(&__tm_pr, __ATOMIC_RELAXED); + if (!print_fn) + return; + + va_start(args, format); + print_fn(format, args); + va_end(args); +} + /* Is this packet captured with a Ethernet protocol type? */ static bool is_ethernet(const u_char *packet) { @@ -750,7 +825,7 @@ static bool is_ethernet(const u_char *packet) case 770: /* ARPHRD_FRAD */ case 778: /* ARPHDR_IPGRE */ case 803: /* ARPHRD_IEEE80211_RADIOTAP */ - printf("Packet captured: arphdr_type=%d\n", arphdr_type); + tm_print("Packet captured: arphdr_type=%d\n", arphdr_type); return false; } return true; @@ -771,12 +846,13 @@ static const char *pkt_type_str(u16 pkt_type) return "Unknown"; } +#define MAX_FLAGS_STRLEN 21 /* Show the information of the transport layer in the packet */ static void show_transport(const u_char *packet, u16 len, u32 ifindex, const char *src_addr, const char *dst_addr, u16 proto, bool ipv6, u8 pkt_type) { - char *ifname, _ifname[IF_NAMESIZE]; + char *ifname, _ifname[IF_NAMESIZE], flags[MAX_FLAGS_STRLEN] = ""; const char *transport_str; u16 src_port, dst_port; struct udphdr *udp; @@ -799,47 +875,39 @@ static void show_transport(const u_char *packet, u16 len, u32 ifindex, dst_port = ntohs(tcp->dest); transport_str = "TCP"; } else if (proto == IPPROTO_ICMP) { - printf("%-7s %-3s IPv4 %s > %s: ICMP, length %d, type %d, code %d\n", - ifname, pkt_type_str(pkt_type), src_addr, dst_addr, len, - packet[0], packet[1]); + tm_print("%-7s %-3s IPv4 %s > %s: ICMP, length %d, type %d, code %d\n", + ifname, pkt_type_str(pkt_type), src_addr, dst_addr, len, + packet[0], packet[1]); return; } else if (proto == IPPROTO_ICMPV6) { - printf("%-7s %-3s IPv6 %s > %s: ICMPv6, length %d, type %d, code %d\n", - ifname, pkt_type_str(pkt_type), src_addr, dst_addr, len, - packet[0], packet[1]); + tm_print("%-7s %-3s IPv6 %s > %s: ICMPv6, length %d, type %d, code %d\n", + ifname, pkt_type_str(pkt_type), src_addr, dst_addr, len, + packet[0], packet[1]); return; } else { - printf("%-7s %-3s %s %s > %s: protocol %d\n", - ifname, pkt_type_str(pkt_type), ipv6 ? "IPv6" : "IPv4", - src_addr, dst_addr, proto); + tm_print("%-7s %-3s %s %s > %s: protocol %d\n", + ifname, pkt_type_str(pkt_type), ipv6 ? "IPv6" : "IPv4", + src_addr, dst_addr, proto); return; } /* TCP or UDP*/ - flockfile(stdout); + if (proto == IPPROTO_TCP) + snprintf(flags, MAX_FLAGS_STRLEN, "%s%s%s%s", + tcp->fin ? ", FIN" : "", + tcp->syn ? ", SYN" : "", + tcp->rst ? ", RST" : "", + tcp->ack ? ", ACK" : ""); + if (ipv6) - printf("%-7s %-3s IPv6 %s.%d > %s.%d: %s, length %d", - ifname, pkt_type_str(pkt_type), src_addr, src_port, - dst_addr, dst_port, transport_str, len); + tm_print("%-7s %-3s IPv6 %s.%d > %s.%d: %s, length %d%s\n", + ifname, pkt_type_str(pkt_type), src_addr, src_port, + dst_addr, dst_port, transport_str, len, flags); else - printf("%-7s %-3s IPv4 %s:%d > %s:%d: %s, length %d", - ifname, pkt_type_str(pkt_type), src_addr, src_port, - dst_addr, dst_port, transport_str, len); - - if (proto == IPPROTO_TCP) { - if (tcp->fin) - printf(", FIN"); - if (tcp->syn) - printf(", SYN"); - if (tcp->rst) - printf(", RST"); - if (tcp->ack) - printf(", ACK"); - } - - printf("\n"); - funlockfile(stdout); + tm_print("%-7s %-3s IPv4 %s:%d > %s:%d: %s, length %d%s\n", + ifname, pkt_type_str(pkt_type), src_addr, src_port, + dst_addr, dst_port, transport_str, len, flags); } static void show_ipv6_packet(const u_char *packet, u32 ifindex, u8 pkt_type) @@ -954,8 +1022,8 @@ static void *traffic_monitor_thread(void *arg) ifname = _ifname; } - printf("%-7s %-3s Unknown network protocol type 0x%x\n", - ifname, pkt_type_str(ptype), proto); + tm_print("%-7s %-3s Unknown network protocol type 0x%x\n", + ifname, pkt_type_str(ptype), proto); } } @@ -1155,8 +1223,9 @@ void traffic_monitor_stop(struct tmonitor_ctx *ctx) write(ctx->wake_fd, &w, sizeof(w)); pthread_join(ctx->thread, NULL); - printf("Packet file: %s\n", strrchr(ctx->pkt_fname, '/') + 1); + tm_print("Packet file: %s\n", strrchr(ctx->pkt_fname, '/') + 1); traffic_monitor_release(ctx); } + #endif /* TRAFFIC_MONITOR */ diff --git a/tools/testing/selftests/bpf/network_helpers.h b/tools/testing/selftests/bpf/network_helpers.h index 5764155b6d25..ef208eefd571 100644 --- a/tools/testing/selftests/bpf/network_helpers.h +++ b/tools/testing/selftests/bpf/network_helpers.h @@ -8,14 +8,17 @@ typedef __u16 __sum16; #include <linux/if_ether.h> #include <linux/if_packet.h> +#include <linux/if_tun.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/ethtool.h> #include <linux/sockios.h> #include <linux/err.h> #include <netinet/tcp.h> +#include <netinet/udp.h> #include <bpf/bpf_endian.h> #include <net/if.h> +#include <stdio.h> #define MAGIC_VAL 0x1234 #define NUM_ITER 100000 @@ -84,6 +87,8 @@ int get_socket_local_port(int sock_fd); int get_hw_ring_size(char *ifname, struct ethtool_ringparam *ring_param); int set_hw_ring_size(char *ifname, struct ethtool_ringparam *ring_param); +int open_tuntap(const char *dev_name, bool need_mac); + struct nstoken; /** * open_netns() - Switch to specified network namespace by name. @@ -97,6 +102,18 @@ int send_recv_data(int lfd, int fd, uint32_t total_bytes); int make_netns(const char *name); int remove_netns(const char *name); +/** + * append_tid() - Append thread ID to the given string. + * + * @str: string to extend + * @sz: string's size + * + * 8 characters are used to append the thread ID (7 digits + '\0') + * + * Returns -1 on errors, 0 otherwise + */ +int append_tid(char *str, size_t sz); + static __u16 csum_fold(__u32 csum) { csum = (csum & 0xffff) + (csum >> 16); @@ -105,6 +122,45 @@ static __u16 csum_fold(__u32 csum) return (__u16)~csum; } +static __wsum csum_partial(const void *buf, int len, __wsum sum) +{ + __u16 *p = (__u16 *)buf; + int num_u16 = len >> 1; + int i; + + for (i = 0; i < num_u16; i++) + sum += p[i]; + + return sum; +} + +static inline __sum16 build_ip_csum(struct iphdr *iph) +{ + __u32 sum = 0; + __u16 *p; + + iph->check = 0; + p = (void *)iph; + sum = csum_partial(p, iph->ihl << 2, 0); + + return csum_fold(sum); +} + +/** + * csum_tcpudp_magic - compute IP pseudo-header checksum + * + * Compute the IPv4 pseudo header checksum. The helper can take a + * accumulated sum from the transport layer to accumulate it and directly + * return the transport layer + * + * @saddr: IP source address + * @daddr: IP dest address + * @len: IP data size + * @proto: transport layer protocol + * @csum: The accumulated partial sum to add to the computation + * + * Returns the folded sum + */ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, __wsum csum) @@ -120,6 +176,21 @@ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, return csum_fold((__u32)s); } +/** + * csum_ipv6_magic - compute IPv6 pseudo-header checksum + * + * Compute the ipv6 pseudo header checksum. The helper can take a + * accumulated sum from the transport layer to accumulate it and directly + * return the transport layer + * + * @saddr: IPv6 source address + * @daddr: IPv6 dest address + * @len: IPv6 data size + * @proto: transport layer protocol + * @csum: The accumulated partial sum to add to the computation + * + * Returns the folded sum + */ static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, __u32 len, __u8 proto, @@ -139,12 +210,56 @@ static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr, return csum_fold((__u32)s); } +/** + * build_udp_v4_csum - compute UDP checksum for UDP over IPv4 + * + * Compute the checksum to embed in UDP header, composed of the sum of IP + * pseudo-header checksum, UDP header checksum and UDP data checksum + * @iph IP header + * @udph UDP header, which must be immediately followed by UDP data + * + * Returns the total checksum + */ + +static inline __sum16 build_udp_v4_csum(const struct iphdr *iph, + const struct udphdr *udph) +{ + unsigned long sum; + + sum = csum_partial(udph, ntohs(udph->len), 0); + return csum_tcpudp_magic(iph->saddr, iph->daddr, ntohs(udph->len), + IPPROTO_UDP, sum); +} + +/** + * build_udp_v6_csum - compute UDP checksum for UDP over IPv6 + * + * Compute the checksum to embed in UDP header, composed of the sum of IPv6 + * pseudo-header checksum, UDP header checksum and UDP data checksum + * @ip6h IPv6 header + * @udph UDP header, which must be immediately followed by UDP data + * + * Returns the total checksum + */ +static inline __sum16 build_udp_v6_csum(const struct ipv6hdr *ip6h, + const struct udphdr *udph) +{ + unsigned long sum; + + sum = csum_partial(udph, ntohs(udph->len), 0); + return csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, ntohs(udph->len), + IPPROTO_UDP, sum); +} + struct tmonitor_ctx; +typedef int (*tm_print_fn_t)(const char *format, va_list args); + #ifdef TRAFFIC_MONITOR struct tmonitor_ctx *traffic_monitor_start(const char *netns, const char *test_name, const char *subtest_name); void traffic_monitor_stop(struct tmonitor_ctx *ctx); +tm_print_fn_t traffic_monitor_set_print(tm_print_fn_t fn); #else static inline struct tmonitor_ctx *traffic_monitor_start(const char *netns, const char *test_name, const char *subtest_name) @@ -155,6 +270,11 @@ static inline struct tmonitor_ctx *traffic_monitor_start(const char *netns, cons static inline void traffic_monitor_stop(struct tmonitor_ctx *ctx) { } + +static inline tm_print_fn_t traffic_monitor_set_print(tm_print_fn_t fn) +{ + return NULL; +} #endif #endif diff --git a/tools/testing/selftests/bpf/prog_tests/align.c b/tools/testing/selftests/bpf/prog_tests/align.c index 4ebd0da898f5..1d53a8561ee2 100644 --- a/tools/testing/selftests/bpf/prog_tests/align.c +++ b/tools/testing/selftests/bpf/prog_tests/align.c @@ -610,9 +610,11 @@ static int do_test_single(struct bpf_align_test *test) .log_size = sizeof(bpf_vlog), .log_level = 2, ); + const char *main_pass_start = "0: R1=ctx() R10=fp0"; const char *line_ptr; int cur_line = -1; int prog_len, i; + char *start; int fd_prog; int ret; @@ -632,7 +634,13 @@ static int do_test_single(struct bpf_align_test *test) ret = 0; /* We make a local copy so that we can strtok() it */ strncpy(bpf_vlog_copy, bpf_vlog, sizeof(bpf_vlog_copy)); - line_ptr = strtok(bpf_vlog_copy, "\n"); + start = strstr(bpf_vlog_copy, main_pass_start); + if (!start) { + ret = 1; + printf("Can't find initial line '%s'\n", main_pass_start); + goto out; + } + line_ptr = strtok(start, "\n"); for (i = 0; i < MAX_MATCHES; i++) { struct bpf_reg_match m = test->matches[i]; const char *p; @@ -682,6 +690,7 @@ static int do_test_single(struct bpf_align_test *test) break; } } +out: if (fd_prog >= 0) close(fd_prog); } diff --git a/tools/testing/selftests/bpf/prog_tests/arena_atomics.c b/tools/testing/selftests/bpf/prog_tests/arena_atomics.c index 26e7c06c6cb4..d98577a6babc 100644 --- a/tools/testing/selftests/bpf/prog_tests/arena_atomics.c +++ b/tools/testing/selftests/bpf/prog_tests/arena_atomics.c @@ -162,6 +162,66 @@ static void test_uaf(struct arena_atomics *skel) ASSERT_EQ(skel->arena->uaf_recovery_fails, 0, "uaf_recovery_fails"); } +static void test_load_acquire(struct arena_atomics *skel) +{ + LIBBPF_OPTS(bpf_test_run_opts, topts); + int err, prog_fd; + + if (skel->data->skip_lacq_srel_tests) { + printf("%s:SKIP: ENABLE_ATOMICS_TESTS not defined, Clang doesn't support addr_space_cast, and/or JIT doesn't support load-acquire\n", + __func__); + test__skip(); + return; + } + + /* No need to attach it, just run it directly */ + prog_fd = bpf_program__fd(skel->progs.load_acquire); + err = bpf_prog_test_run_opts(prog_fd, &topts); + if (!ASSERT_OK(err, "test_run_opts err")) + return; + if (!ASSERT_OK(topts.retval, "test_run_opts retval")) + return; + + ASSERT_EQ(skel->arena->load_acquire8_result, 0x12, + "load_acquire8_result"); + ASSERT_EQ(skel->arena->load_acquire16_result, 0x1234, + "load_acquire16_result"); + ASSERT_EQ(skel->arena->load_acquire32_result, 0x12345678, + "load_acquire32_result"); + ASSERT_EQ(skel->arena->load_acquire64_result, 0x1234567890abcdef, + "load_acquire64_result"); +} + +static void test_store_release(struct arena_atomics *skel) +{ + LIBBPF_OPTS(bpf_test_run_opts, topts); + int err, prog_fd; + + if (skel->data->skip_lacq_srel_tests) { + printf("%s:SKIP: ENABLE_ATOMICS_TESTS not defined, Clang doesn't support addr_space_cast, and/or JIT doesn't support store-release\n", + __func__); + test__skip(); + return; + } + + /* No need to attach it, just run it directly */ + prog_fd = bpf_program__fd(skel->progs.store_release); + err = bpf_prog_test_run_opts(prog_fd, &topts); + if (!ASSERT_OK(err, "test_run_opts err")) + return; + if (!ASSERT_OK(topts.retval, "test_run_opts retval")) + return; + + ASSERT_EQ(skel->arena->store_release8_result, 0x12, + "store_release8_result"); + ASSERT_EQ(skel->arena->store_release16_result, 0x1234, + "store_release16_result"); + ASSERT_EQ(skel->arena->store_release32_result, 0x12345678, + "store_release32_result"); + ASSERT_EQ(skel->arena->store_release64_result, 0x1234567890abcdef, + "store_release64_result"); +} + void test_arena_atomics(void) { struct arena_atomics *skel; @@ -171,7 +231,7 @@ void test_arena_atomics(void) if (!ASSERT_OK_PTR(skel, "arena atomics skeleton open")) return; - if (skel->data->skip_tests) { + if (skel->data->skip_all_tests) { printf("%s:SKIP:no ENABLE_ATOMICS_TESTS or no addr_space_cast support in clang", __func__); test__skip(); @@ -198,6 +258,10 @@ void test_arena_atomics(void) test_xchg(skel); if (test__start_subtest("uaf")) test_uaf(skel); + if (test__start_subtest("load_acquire")) + test_load_acquire(skel); + if (test__start_subtest("store_release")) + test_store_release(skel); cleanup: arena_atomics__destroy(skel); diff --git a/tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c b/tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c new file mode 100644 index 000000000000..0223fce4db2b --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ +#include <test_progs.h> +#include <network_helpers.h> +#include <sys/sysinfo.h> + +struct __qspinlock { int val; }; +typedef struct __qspinlock arena_spinlock_t; + +struct arena_qnode { + unsigned long next; + int count; + int locked; +}; + +#include "arena_spin_lock.skel.h" + +static long cpu; +static int repeat; + +pthread_barrier_t barrier; + +static void *spin_lock_thread(void *arg) +{ + int err, prog_fd = *(u32 *)arg; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = repeat, + ); + cpu_set_t cpuset; + + CPU_ZERO(&cpuset); + CPU_SET(__sync_fetch_and_add(&cpu, 1), &cpuset); + ASSERT_OK(pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset), "cpu affinity"); + + err = pthread_barrier_wait(&barrier); + if (err != PTHREAD_BARRIER_SERIAL_THREAD && err != 0) + ASSERT_FALSE(true, "pthread_barrier"); + + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run err"); + ASSERT_EQ((int)topts.retval, 0, "test_run retval"); + + pthread_exit(arg); +} + +static void test_arena_spin_lock_size(int size) +{ + LIBBPF_OPTS(bpf_test_run_opts, topts); + struct arena_spin_lock *skel; + pthread_t thread_id[16]; + int prog_fd, i, err; + int nthreads; + void *ret; + + nthreads = MIN(get_nprocs(), ARRAY_SIZE(thread_id)); + if (nthreads < 2) { + test__skip(); + return; + } + + skel = arena_spin_lock__open_and_load(); + if (!ASSERT_OK_PTR(skel, "arena_spin_lock__open_and_load")) + return; + if (skel->data->test_skip == 2) { + test__skip(); + goto end; + } + skel->bss->cs_count = size; + skel->bss->limit = repeat * nthreads; + + ASSERT_OK(pthread_barrier_init(&barrier, NULL, nthreads), "barrier init"); + + prog_fd = bpf_program__fd(skel->progs.prog); + for (i = 0; i < nthreads; i++) { + err = pthread_create(&thread_id[i], NULL, &spin_lock_thread, &prog_fd); + if (!ASSERT_OK(err, "pthread_create")) + goto end_barrier; + } + + for (i = 0; i < nthreads; i++) { + if (!ASSERT_OK(pthread_join(thread_id[i], &ret), "pthread_join")) + goto end_barrier; + if (!ASSERT_EQ(ret, &prog_fd, "ret == prog_fd")) + goto end_barrier; + } + + ASSERT_EQ(skel->bss->counter, repeat * nthreads, "check counter value"); + +end_barrier: + pthread_barrier_destroy(&barrier); +end: + arena_spin_lock__destroy(skel); + return; +} + +void test_arena_spin_lock(void) +{ + repeat = 1000; + if (test__start_subtest("arena_spin_lock_1")) + test_arena_spin_lock_size(1); + cpu = 0; + if (test__start_subtest("arena_spin_lock_1000")) + test_arena_spin_lock_size(1000); + cpu = 0; + repeat = 100; + if (test__start_subtest("arena_spin_lock_50000")) + test_arena_spin_lock_size(50000); +} diff --git a/tools/testing/selftests/bpf/prog_tests/attach_probe.c b/tools/testing/selftests/bpf/prog_tests/attach_probe.c index 329c7862b52d..cabc51c2ca6b 100644 --- a/tools/testing/selftests/bpf/prog_tests/attach_probe.c +++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c @@ -122,6 +122,85 @@ cleanup: test_attach_probe_manual__destroy(skel); } +/* attach uprobe/uretprobe long event name testings */ +static void test_attach_uprobe_long_event_name(void) +{ + DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts); + struct bpf_link *uprobe_link, *uretprobe_link; + struct test_attach_probe_manual *skel; + ssize_t uprobe_offset; + char path[PATH_MAX] = {0}; + + skel = test_attach_probe_manual__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_kprobe_manual_open_and_load")) + return; + + uprobe_offset = get_uprobe_offset(&trigger_func); + if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset")) + goto cleanup; + + if (!ASSERT_GT(readlink("/proc/self/exe", path, PATH_MAX - 1), 0, "readlink")) + goto cleanup; + + /* manual-attach uprobe/uretprobe */ + uprobe_opts.attach_mode = PROBE_ATTACH_MODE_LEGACY; + uprobe_opts.ref_ctr_offset = 0; + uprobe_opts.retprobe = false; + uprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe, + 0 /* self pid */, + path, + uprobe_offset, + &uprobe_opts); + if (!ASSERT_OK_PTR(uprobe_link, "attach_uprobe_long_event_name")) + goto cleanup; + skel->links.handle_uprobe = uprobe_link; + + uprobe_opts.retprobe = true; + uretprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe, + -1 /* any pid */, + path, + uprobe_offset, &uprobe_opts); + if (!ASSERT_OK_PTR(uretprobe_link, "attach_uretprobe_long_event_name")) + goto cleanup; + skel->links.handle_uretprobe = uretprobe_link; + +cleanup: + test_attach_probe_manual__destroy(skel); +} + +/* attach kprobe/kretprobe long event name testings */ +static void test_attach_kprobe_long_event_name(void) +{ + DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts); + struct bpf_link *kprobe_link, *kretprobe_link; + struct test_attach_probe_manual *skel; + + skel = test_attach_probe_manual__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_kprobe_manual_open_and_load")) + return; + + /* manual-attach kprobe/kretprobe */ + kprobe_opts.attach_mode = PROBE_ATTACH_MODE_LEGACY; + kprobe_opts.retprobe = false; + kprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe, + "bpf_testmod_looooooooooooooooooooooooooooooong_name", + &kprobe_opts); + if (!ASSERT_OK_PTR(kprobe_link, "attach_kprobe_long_event_name")) + goto cleanup; + skel->links.handle_kprobe = kprobe_link; + + kprobe_opts.retprobe = true; + kretprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe, + "bpf_testmod_looooooooooooooooooooooooooooooong_name", + &kprobe_opts); + if (!ASSERT_OK_PTR(kretprobe_link, "attach_kretprobe_long_event_name")) + goto cleanup; + skel->links.handle_kretprobe = kretprobe_link; + +cleanup: + test_attach_probe_manual__destroy(skel); +} + static void test_attach_probe_auto(struct test_attach_probe *skel) { struct bpf_link *uprobe_err_link; @@ -323,6 +402,11 @@ void test_attach_probe(void) if (test__start_subtest("uprobe-ref_ctr")) test_uprobe_ref_ctr(skel); + if (test__start_subtest("uprobe-long_name")) + test_attach_uprobe_long_event_name(); + if (test__start_subtest("kprobe-long_name")) + test_attach_kprobe_long_event_name(); + cleanup: test_attach_probe__destroy(skel); ASSERT_EQ(uprobe_ref_ctr, 0, "uprobe_ref_ctr_cleanup"); diff --git a/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c b/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c index cc184e4420f6..42b49870e520 100644 --- a/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c +++ b/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c @@ -6,10 +6,14 @@ #include <test_progs.h> #include "bloom_filter_map.skel.h" +#ifndef NUMA_NO_NODE +#define NUMA_NO_NODE (-1) +#endif + static void test_fail_cases(void) { LIBBPF_OPTS(bpf_map_create_opts, opts); - __u32 value; + __u32 value = 0; int fd, err; /* Invalid key size */ @@ -69,6 +73,7 @@ static void test_success_cases(void) /* Create a map */ opts.map_flags = BPF_F_ZERO_SEED | BPF_F_NUMA_NODE; + opts.numa_node = NUMA_NO_NODE; fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, sizeof(value), 100, &opts); if (!ASSERT_GE(fd, 0, "bpf_map_create bloom filter success case")) return; diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c index 6befa870434b..4a0670c056ba 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c @@ -489,10 +489,28 @@ cleanup: bpf_link__destroy(link); } +static int verify_tracing_link_info(int fd, u64 cookie) +{ + struct bpf_link_info info; + int err; + u32 len = sizeof(info); + + err = bpf_link_get_info_by_fd(fd, &info, &len); + if (!ASSERT_OK(err, "get_link_info")) + return -1; + + if (!ASSERT_EQ(info.type, BPF_LINK_TYPE_TRACING, "link_type")) + return -1; + + ASSERT_EQ(info.tracing.cookie, cookie, "tracing_cookie"); + + return 0; +} + static void tracing_subtest(struct test_bpf_cookie *skel) { __u64 cookie; - int prog_fd; + int prog_fd, err; int fentry_fd = -1, fexit_fd = -1, fmod_ret_fd = -1; LIBBPF_OPTS(bpf_test_run_opts, opts); LIBBPF_OPTS(bpf_link_create_opts, link_opts); @@ -507,6 +525,10 @@ static void tracing_subtest(struct test_bpf_cookie *skel) if (!ASSERT_GE(fentry_fd, 0, "fentry.link_create")) goto cleanup; + err = verify_tracing_link_info(fentry_fd, cookie); + if (!ASSERT_OK(err, "verify_tracing_link_info")) + goto cleanup; + cookie = 0x20000000000000L; prog_fd = bpf_program__fd(skel->progs.fexit_test1); link_opts.tracing.cookie = cookie; @@ -635,10 +657,29 @@ cleanup: bpf_link__destroy(link); } +static int verify_raw_tp_link_info(int fd, u64 cookie) +{ + struct bpf_link_info info; + int err; + u32 len = sizeof(info); + + memset(&info, 0, sizeof(info)); + err = bpf_link_get_info_by_fd(fd, &info, &len); + if (!ASSERT_OK(err, "get_link_info")) + return -1; + + if (!ASSERT_EQ(info.type, BPF_LINK_TYPE_RAW_TRACEPOINT, "link_type")) + return -1; + + ASSERT_EQ(info.raw_tracepoint.cookie, cookie, "raw_tp_cookie"); + + return 0; +} + static void raw_tp_subtest(struct test_bpf_cookie *skel) { __u64 cookie; - int prog_fd, link_fd = -1; + int err, prog_fd, link_fd = -1; struct bpf_link *link = NULL; LIBBPF_OPTS(bpf_raw_tp_opts, raw_tp_opts); LIBBPF_OPTS(bpf_raw_tracepoint_opts, opts); @@ -656,6 +697,11 @@ static void raw_tp_subtest(struct test_bpf_cookie *skel) goto cleanup; usleep(1); /* trigger */ + + err = verify_raw_tp_link_info(link_fd, cookie); + if (!ASSERT_OK(err, "verify_raw_tp_link_info")) + goto cleanup; + close(link_fd); /* detach */ link_fd = -1; diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c index 6f1bfacd7375..5225d69bf79b 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c @@ -323,19 +323,87 @@ static void test_task_pidfd(void) static void test_task_sleepable(void) { struct bpf_iter_tasks *skel; + int pid, status, err, data_pipe[2], finish_pipe[2], c = 0; + char *test_data = NULL; + char *test_data_long = NULL; + char *data[2]; + + if (!ASSERT_OK(pipe(data_pipe), "data_pipe") || + !ASSERT_OK(pipe(finish_pipe), "finish_pipe")) + return; skel = bpf_iter_tasks__open_and_load(); if (!ASSERT_OK_PTR(skel, "bpf_iter_tasks__open_and_load")) return; + pid = fork(); + if (!ASSERT_GE(pid, 0, "fork")) + return; + + if (pid == 0) { + /* child */ + close(data_pipe[0]); + close(finish_pipe[1]); + + test_data = malloc(sizeof(char) * 10); + strncpy(test_data, "test_data", 10); + test_data[9] = '\0'; + + test_data_long = malloc(sizeof(char) * 5000); + for (int i = 0; i < 5000; ++i) { + if (i % 2 == 0) + test_data_long[i] = 'b'; + else + test_data_long[i] = 'a'; + } + test_data_long[4999] = '\0'; + + data[0] = test_data; + data[1] = test_data_long; + + write(data_pipe[1], &data, sizeof(data)); + + /* keep child alive until after the test */ + err = read(finish_pipe[0], &c, 1); + if (err != 1) + exit(-1); + + close(data_pipe[1]); + close(finish_pipe[0]); + _exit(0); + } + + /* parent */ + close(data_pipe[1]); + close(finish_pipe[0]); + + err = read(data_pipe[0], &data, sizeof(data)); + ASSERT_EQ(err, sizeof(data), "read_check"); + + skel->bss->user_ptr = data[0]; + skel->bss->user_ptr_long = data[1]; + skel->bss->pid = pid; + do_dummy_read(skel->progs.dump_task_sleepable); ASSERT_GT(skel->bss->num_expected_failure_copy_from_user_task, 0, "num_expected_failure_copy_from_user_task"); ASSERT_GT(skel->bss->num_success_copy_from_user_task, 0, "num_success_copy_from_user_task"); + ASSERT_GT(skel->bss->num_expected_failure_copy_from_user_task_str, 0, + "num_expected_failure_copy_from_user_task_str"); + ASSERT_GT(skel->bss->num_success_copy_from_user_task_str, 0, + "num_success_copy_from_user_task_str"); bpf_iter_tasks__destroy(skel); + + write(finish_pipe[1], &c, 1); + err = waitpid(pid, &status, 0); + ASSERT_EQ(err, pid, "waitpid"); + ASSERT_EQ(status, 0, "zero_child_exit"); + + close(data_pipe[0]); + close(finish_pipe[1]); } static void test_task_stack(void) diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c b/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c index fe2c502e5089..ecc3d47919ad 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c @@ -78,7 +78,7 @@ static int test_setup_uffd(void *fault_addr) } uffd_register.range.start = (unsigned long)fault_addr; - uffd_register.range.len = 4096; + uffd_register.range.len = getpagesize(); uffd_register.mode = UFFDIO_REGISTER_MODE_MISSING; if (ioctl(uffd, UFFDIO_REGISTER, &uffd_register)) { close(uffd); diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c index a4a1f93878d4..dd6512fa652b 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c @@ -63,6 +63,12 @@ static void test_bpf_nf_ct(int mode) .repeat = 1, ); + if (SYS_NOFAIL("iptables-legacy --version")) { + fprintf(stdout, "Missing required iptables-legacy tool\n"); + test__skip(); + return; + } + skel = test_bpf_nf__open_and_load(); if (!ASSERT_OK_PTR(skel, "test_bpf_nf__open_and_load")) return; @@ -72,11 +78,14 @@ static void test_bpf_nf_ct(int mode) if (!ASSERT_OK(system(cmd), cmd)) goto end; - srv_port = (mode == TEST_XDP) ? 5005 : 5006; - srv_fd = start_server(AF_INET, SOCK_STREAM, "127.0.0.1", srv_port, TIMEOUT_MS); + srv_fd = start_server(AF_INET, SOCK_STREAM, "127.0.0.1", 0, TIMEOUT_MS); if (!ASSERT_GE(srv_fd, 0, "start_server")) goto end; + srv_port = get_socket_local_port(srv_fd); + if (!ASSERT_GE(srv_port, 0, "get_sock_local_port")) + goto end; + client_fd = connect_to_server(srv_fd); if (!ASSERT_GE(client_fd, 0, "connect_to_server")) goto end; @@ -91,7 +100,7 @@ static void test_bpf_nf_ct(int mode) skel->bss->saddr = peer_addr.sin_addr.s_addr; skel->bss->sport = peer_addr.sin_port; skel->bss->daddr = peer_addr.sin_addr.s_addr; - skel->bss->dport = htons(srv_port); + skel->bss->dport = srv_port; if (mode == TEST_XDP) prog_fd = bpf_program__fd(skel->progs.nf_xdp_ct_test); diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_qdisc.c b/tools/testing/selftests/bpf/prog_tests/bpf_qdisc.c new file mode 100644 index 000000000000..730357cd0c9a --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/bpf_qdisc.c @@ -0,0 +1,231 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/pkt_sched.h> +#include <linux/rtnetlink.h> +#include <test_progs.h> + +#include "network_helpers.h" +#include "bpf_qdisc_fifo.skel.h" +#include "bpf_qdisc_fq.skel.h" +#include "bpf_qdisc_fail__incompl_ops.skel.h" + +#define LO_IFINDEX 1 + +static const unsigned int total_bytes = 10 * 1024 * 1024; + +static void do_test(char *qdisc) +{ + DECLARE_LIBBPF_OPTS(bpf_tc_hook, hook, .ifindex = LO_IFINDEX, + .attach_point = BPF_TC_QDISC, + .parent = TC_H_ROOT, + .handle = 0x8000000, + .qdisc = qdisc); + int srv_fd = -1, cli_fd = -1; + int err; + + err = bpf_tc_hook_create(&hook); + if (!ASSERT_OK(err, "attach qdisc")) + return; + + srv_fd = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0); + if (!ASSERT_OK_FD(srv_fd, "start server")) + goto done; + + cli_fd = connect_to_fd(srv_fd, 0); + if (!ASSERT_OK_FD(cli_fd, "connect to client")) + goto done; + + err = send_recv_data(srv_fd, cli_fd, total_bytes); + ASSERT_OK(err, "send_recv_data"); + +done: + if (srv_fd != -1) + close(srv_fd); + if (cli_fd != -1) + close(cli_fd); + + bpf_tc_hook_destroy(&hook); +} + +static void test_fifo(void) +{ + struct bpf_qdisc_fifo *fifo_skel; + + fifo_skel = bpf_qdisc_fifo__open_and_load(); + if (!ASSERT_OK_PTR(fifo_skel, "bpf_qdisc_fifo__open_and_load")) + return; + + if (!ASSERT_OK(bpf_qdisc_fifo__attach(fifo_skel), "bpf_qdisc_fifo__attach")) + goto out; + + do_test("bpf_fifo"); +out: + bpf_qdisc_fifo__destroy(fifo_skel); +} + +static void test_fq(void) +{ + struct bpf_qdisc_fq *fq_skel; + + fq_skel = bpf_qdisc_fq__open_and_load(); + if (!ASSERT_OK_PTR(fq_skel, "bpf_qdisc_fq__open_and_load")) + return; + + if (!ASSERT_OK(bpf_qdisc_fq__attach(fq_skel), "bpf_qdisc_fq__attach")) + goto out; + + do_test("bpf_fq"); +out: + bpf_qdisc_fq__destroy(fq_skel); +} + +static void test_qdisc_attach_to_mq(void) +{ + DECLARE_LIBBPF_OPTS(bpf_tc_hook, hook, + .attach_point = BPF_TC_QDISC, + .parent = TC_H_MAKE(1 << 16, 1), + .handle = 0x11 << 16, + .qdisc = "bpf_fifo"); + struct bpf_qdisc_fifo *fifo_skel; + int err; + + fifo_skel = bpf_qdisc_fifo__open_and_load(); + if (!ASSERT_OK_PTR(fifo_skel, "bpf_qdisc_fifo__open_and_load")) + return; + + if (!ASSERT_OK(bpf_qdisc_fifo__attach(fifo_skel), "bpf_qdisc_fifo__attach")) + goto out; + + SYS(out, "ip link add veth0 type veth peer veth1"); + hook.ifindex = if_nametoindex("veth0"); + SYS(out, "tc qdisc add dev veth0 root handle 1: mq"); + + err = bpf_tc_hook_create(&hook); + ASSERT_OK(err, "attach qdisc"); + + bpf_tc_hook_destroy(&hook); + + SYS(out, "tc qdisc delete dev veth0 root mq"); +out: + bpf_qdisc_fifo__destroy(fifo_skel); +} + +static void test_qdisc_attach_to_non_root(void) +{ + DECLARE_LIBBPF_OPTS(bpf_tc_hook, hook, .ifindex = LO_IFINDEX, + .attach_point = BPF_TC_QDISC, + .parent = TC_H_MAKE(1 << 16, 1), + .handle = 0x11 << 16, + .qdisc = "bpf_fifo"); + struct bpf_qdisc_fifo *fifo_skel; + int err; + + fifo_skel = bpf_qdisc_fifo__open_and_load(); + if (!ASSERT_OK_PTR(fifo_skel, "bpf_qdisc_fifo__open_and_load")) + return; + + if (!ASSERT_OK(bpf_qdisc_fifo__attach(fifo_skel), "bpf_qdisc_fifo__attach")) + goto out; + + SYS(out, "tc qdisc add dev lo root handle 1: htb"); + SYS(out_del_htb, "tc class add dev lo parent 1: classid 1:1 htb rate 75Kbit"); + + err = bpf_tc_hook_create(&hook); + if (!ASSERT_ERR(err, "attach qdisc")) + bpf_tc_hook_destroy(&hook); + +out_del_htb: + SYS(out, "tc qdisc delete dev lo root htb"); +out: + bpf_qdisc_fifo__destroy(fifo_skel); +} + +static void test_incompl_ops(void) +{ + struct bpf_qdisc_fail__incompl_ops *skel; + struct bpf_link *link; + + skel = bpf_qdisc_fail__incompl_ops__open_and_load(); + if (!ASSERT_OK_PTR(skel, "bpf_qdisc_fifo__open_and_load")) + return; + + link = bpf_map__attach_struct_ops(skel->maps.test); + if (!ASSERT_ERR_PTR(link, "bpf_map__attach_struct_ops")) + bpf_link__destroy(link); + + bpf_qdisc_fail__incompl_ops__destroy(skel); +} + +static int get_default_qdisc(char *qdisc_name) +{ + FILE *f; + int num; + + f = fopen("/proc/sys/net/core/default_qdisc", "r"); + if (!f) + return -errno; + + num = fscanf(f, "%s", qdisc_name); + fclose(f); + + return num == 1 ? 0 : -EFAULT; +} + +static void test_default_qdisc_attach_to_mq(void) +{ + char default_qdisc[IFNAMSIZ] = {}; + struct bpf_qdisc_fifo *fifo_skel; + struct netns_obj *netns = NULL; + int err; + + fifo_skel = bpf_qdisc_fifo__open_and_load(); + if (!ASSERT_OK_PTR(fifo_skel, "bpf_qdisc_fifo__open_and_load")) + return; + + if (!ASSERT_OK(bpf_qdisc_fifo__attach(fifo_skel), "bpf_qdisc_fifo__attach")) + goto out; + + err = get_default_qdisc(default_qdisc); + if (!ASSERT_OK(err, "read sysctl net.core.default_qdisc")) + goto out; + + err = write_sysctl("/proc/sys/net/core/default_qdisc", "bpf_fifo"); + if (!ASSERT_OK(err, "write sysctl net.core.default_qdisc")) + goto out; + + netns = netns_new("bpf_qdisc_ns", true); + if (!ASSERT_OK_PTR(netns, "netns_new")) + goto out; + + SYS(out, "ip link add veth0 type veth peer veth1"); + SYS(out, "tc qdisc add dev veth0 root handle 1: mq"); + + ASSERT_EQ(fifo_skel->bss->init_called, true, "init_called"); + + SYS(out, "tc qdisc delete dev veth0 root mq"); +out: + netns_free(netns); + if (default_qdisc[0]) + write_sysctl("/proc/sys/net/core/default_qdisc", default_qdisc); + + bpf_qdisc_fifo__destroy(fifo_skel); +} + +void test_ns_bpf_qdisc(void) +{ + if (test__start_subtest("fifo")) + test_fifo(); + if (test__start_subtest("fq")) + test_fq(); + if (test__start_subtest("attach to mq")) + test_qdisc_attach_to_mq(); + if (test__start_subtest("attach to non root")) + test_qdisc_attach_to_non_root(); + if (test__start_subtest("incompl_ops")) + test_incompl_ops(); +} + +void serial_test_bpf_qdisc_default(void) +{ + test_default_qdisc_attach_to_mq(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c index e63d74ce046f..8a9ba4292109 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf.c +++ b/tools/testing/selftests/bpf/prog_tests/btf.c @@ -3866,11 +3866,11 @@ static struct btf_raw_test raw_tests[] = { .err_str = "vlen != 0", }, { - .descr = "decl_tag test #8, invalid kflag", + .descr = "decl_tag test #8, tag with kflag", .raw_types = { BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */ - BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DECL_TAG, 1, 0), 2), (-1), + BTF_DECL_ATTR_ENC(NAME_TBD, 2, -1), BTF_END_RAW, }, BTF_STR_SEC("\0local\0tag1"), @@ -3881,8 +3881,6 @@ static struct btf_raw_test raw_tests[] = { .key_type_id = 1, .value_type_id = 1, .max_entries = 1, - .btf_load_err = true, - .err_str = "Invalid btf_info kind_flag", }, { .descr = "decl_tag test #9, var, invalid component_idx", @@ -4207,6 +4205,23 @@ static struct btf_raw_test raw_tests[] = { .err_str = "Type tags don't precede modifiers", }, { + .descr = "type_tag test #7, tag with kflag", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_ATTR_ENC(NAME_TBD, 1), /* [2] */ + BTF_PTR_ENC(2), /* [3] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0tag"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, +}, +{ .descr = "enum64 test #1, unsigned, size 8", .raw_types = { BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c b/tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c index d9024c7a892a..5bc15bb6b7ce 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c @@ -440,6 +440,105 @@ cleanup: btf__free(btf1); } +/* Ensure module split BTF dedup worked correctly; when dedup fails badly + * core kernel types are in split BTF also, so ensure that references to + * such types point at base - not split - BTF. + * + * bpf_testmod_test_write() has multiple core kernel type parameters; + * + * ssize_t + * bpf_testmod_test_write(struct file *file, struct kobject *kobj, + * struct bin_attribute *bin_attr, + * char *buf, loff_t off, size_t len); + * + * Ensure each of the FUNC_PROTO params is a core kernel type. + * + * Do the same for + * + * __bpf_kfunc struct sock *bpf_kfunc_call_test3(struct sock *sk); + * + * ...and + * + * __bpf_kfunc void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb); + * + */ +const char *mod_funcs[] = { + "bpf_testmod_test_write", + "bpf_kfunc_call_test3", + "bpf_kfunc_call_test_pass_ctx" +}; + +static void test_split_module(void) +{ + struct btf *vmlinux_btf, *btf1 = NULL; + int i, nr_base_types; + + vmlinux_btf = btf__load_vmlinux_btf(); + if (!ASSERT_OK_PTR(vmlinux_btf, "vmlinux_btf")) + return; + nr_base_types = btf__type_cnt(vmlinux_btf); + if (!ASSERT_GT(nr_base_types, 0, "nr_base_types")) + goto cleanup; + + btf1 = btf__parse_split("/sys/kernel/btf/bpf_testmod", vmlinux_btf); + if (!ASSERT_OK_PTR(btf1, "split_btf")) + return; + + for (i = 0; i < ARRAY_SIZE(mod_funcs); i++) { + const struct btf_param *p; + const struct btf_type *t; + __u16 vlen; + __u32 id; + int j; + + id = btf__find_by_name_kind(btf1, mod_funcs[i], BTF_KIND_FUNC); + if (!ASSERT_GE(id, nr_base_types, "func_id")) + goto cleanup; + t = btf__type_by_id(btf1, id); + if (!ASSERT_OK_PTR(t, "func_id_type")) + goto cleanup; + t = btf__type_by_id(btf1, t->type); + if (!ASSERT_OK_PTR(t, "func_proto_id_type")) + goto cleanup; + if (!ASSERT_EQ(btf_is_func_proto(t), true, "is_func_proto")) + goto cleanup; + vlen = btf_vlen(t); + + for (j = 0, p = btf_params(t); j < vlen; j++, p++) { + /* bpf_testmod uses resilient split BTF, so any + * reference types will be added to split BTF and their + * associated targets will be base BTF types; for example + * for a "struct sock *" the PTR will be in split BTF + * while the "struct sock" will be in base. + * + * In some cases like loff_t we have to resolve + * multiple typedefs hence the while() loop below. + * + * Note that resilient split BTF generation depends + * on pahole version, so we do not assert that + * reference types are in split BTF, as if pahole + * does not support resilient split BTF they will + * also be base BTF types. + */ + id = p->type; + do { + t = btf__type_by_id(btf1, id); + if (!ASSERT_OK_PTR(t, "param_ref_type")) + goto cleanup; + if (!btf_is_mod(t) && !btf_is_ptr(t) && !btf_is_typedef(t)) + break; + id = t->type; + } while (true); + + if (!ASSERT_LT(id, nr_base_types, "verify_base_type")) + goto cleanup; + } + } +cleanup: + btf__free(btf1); + btf__free(vmlinux_btf); +} + void test_btf_dedup_split() { if (test__start_subtest("split_simple")) @@ -450,4 +549,6 @@ void test_btf_dedup_split() test_split_fwd_resolve(); if (test__start_subtest("split_dup_struct_in_cu")) test_split_dup_struct_in_cu(); + if (test__start_subtest("split_module")) + test_split_module(); } diff --git a/tools/testing/selftests/bpf/prog_tests/btf_distill.c b/tools/testing/selftests/bpf/prog_tests/btf_distill.c index ca84726d5ac1..fb67ae195a73 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_distill.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_distill.c @@ -385,7 +385,7 @@ static void test_distilled_base_missing_err(void) "[2] INT 'int' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED"); btf5 = btf__new_empty(); if (!ASSERT_OK_PTR(btf5, "empty_reloc_btf")) - return; + goto cleanup; btf__add_int(btf5, "int", 4, BTF_INT_SIGNED); /* [1] int */ VALIDATE_RAW_BTF( btf5, @@ -478,7 +478,7 @@ static void test_distilled_base_multi_err2(void) "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED"); btf5 = btf__new_empty(); if (!ASSERT_OK_PTR(btf5, "empty_reloc_btf")) - return; + goto cleanup; btf__add_int(btf5, "int", 4, BTF_INT_SIGNED); /* [1] int */ btf__add_int(btf5, "int", 4, BTF_INT_SIGNED); /* [2] int */ VALIDATE_RAW_BTF( @@ -601,6 +601,76 @@ cleanup: btf__free(base); } +/* If a needed composite type, which is the member of composite type + * in the split BTF, has a different size in the base BTF we wish to + * relocate with, btf__relocate() should error out. + */ +static void test_distilled_base_embedded_err(void) +{ + struct btf *btf1 = NULL, *btf2 = NULL, *btf3 = NULL, *btf4 = NULL, *btf5 = NULL; + + btf1 = btf__new_empty(); + if (!ASSERT_OK_PTR(btf1, "empty_main_btf")) + return; + + btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [1] int */ + btf__add_struct(btf1, "s1", 4); /* [2] struct s1 { */ + btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */ + /* } */ + VALIDATE_RAW_BTF( + btf1, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] STRUCT 's1' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0"); + + btf2 = btf__new_empty_split(btf1); + if (!ASSERT_OK_PTR(btf2, "empty_split_btf")) + goto cleanup; + + btf__add_struct(btf2, "with_embedded", 8); /* [3] struct with_embedded { */ + btf__add_field(btf2, "e1", 2, 0, 0); /* struct s1 e1; */ + /* } */ + + VALIDATE_RAW_BTF( + btf2, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] STRUCT 's1' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0", + "[3] STRUCT 'with_embedded' size=8 vlen=1\n" + "\t'e1' type_id=2 bits_offset=0"); + + if (!ASSERT_EQ(0, btf__distill_base(btf2, &btf3, &btf4), + "distilled_base") || + !ASSERT_OK_PTR(btf3, "distilled_base") || + !ASSERT_OK_PTR(btf4, "distilled_split") || + !ASSERT_EQ(2, btf__type_cnt(btf3), "distilled_base_type_cnt")) + goto cleanup; + + VALIDATE_RAW_BTF( + btf4, + "[1] STRUCT 's1' size=4 vlen=0", + "[2] STRUCT 'with_embedded' size=8 vlen=1\n" + "\t'e1' type_id=1 bits_offset=0"); + + btf5 = btf__new_empty(); + if (!ASSERT_OK_PTR(btf5, "empty_reloc_btf")) + goto cleanup; + + btf__add_int(btf5, "int", 4, BTF_INT_SIGNED); /* [1] int */ + /* struct with the same name but different size */ + btf__add_struct(btf5, "s1", 8); /* [2] struct s1 { */ + btf__add_field(btf5, "f1", 1, 0, 0); /* int f1; */ + /* } */ + + ASSERT_EQ(btf__relocate(btf4, btf5), -EINVAL, "relocate_split"); +cleanup: + btf__free(btf5); + btf__free(btf4); + btf__free(btf3); + btf__free(btf2); + btf__free(btf1); +} + void test_btf_distill(void) { if (test__start_subtest("distilled_base")) @@ -613,6 +683,8 @@ void test_btf_distill(void) test_distilled_base_multi_err(); if (test__start_subtest("distilled_base_multi_err2")) test_distilled_base_multi_err2(); + if (test__start_subtest("distilled_base_embedded_err")) + test_distilled_base_embedded_err(); if (test__start_subtest("distilled_base_vmlinux")) test_distilled_base_vmlinux(); if (test__start_subtest("distilled_endianness")) diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dump.c b/tools/testing/selftests/bpf/prog_tests/btf_dump.c index b293b8501fd6..82903585c870 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_dump.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_dump.c @@ -126,26 +126,69 @@ done: return err; } -static char *dump_buf; -static size_t dump_buf_sz; -static FILE *dump_buf_file; +struct test_ctx { + struct btf *btf; + struct btf_dump *d; + char *dump_buf; + size_t dump_buf_sz; + FILE *dump_buf_file; +}; -static void test_btf_dump_incremental(void) +static void test_ctx__free(struct test_ctx *t) { - struct btf *btf = NULL; - struct btf_dump *d = NULL; - int id, err, i; + fclose(t->dump_buf_file); + free(t->dump_buf); + btf_dump__free(t->d); + btf__free(t->btf); +} - dump_buf_file = open_memstream(&dump_buf, &dump_buf_sz); - if (!ASSERT_OK_PTR(dump_buf_file, "dump_memstream")) - return; - btf = btf__new_empty(); - if (!ASSERT_OK_PTR(btf, "new_empty")) +static int test_ctx__init(struct test_ctx *t) +{ + t->dump_buf_file = open_memstream(&t->dump_buf, &t->dump_buf_sz); + if (!ASSERT_OK_PTR(t->dump_buf_file, "dump_memstream")) + return -1; + t->btf = btf__new_empty(); + if (!ASSERT_OK_PTR(t->btf, "new_empty")) goto err_out; - d = btf_dump__new(btf, btf_dump_printf, dump_buf_file, NULL); - if (!ASSERT_OK(libbpf_get_error(d), "btf_dump__new")) + t->d = btf_dump__new(t->btf, btf_dump_printf, t->dump_buf_file, NULL); + if (!ASSERT_OK(libbpf_get_error(t->d), "btf_dump__new")) goto err_out; + return 0; + +err_out: + test_ctx__free(t); + return -1; +} + +static void test_ctx__dump_and_compare(struct test_ctx *t, + const char *expected_output, + const char *message) +{ + int i, err; + + for (i = 1; i < btf__type_cnt(t->btf); i++) { + err = btf_dump__dump_type(t->d, i); + ASSERT_OK(err, "dump_type_ok"); + } + + fflush(t->dump_buf_file); + t->dump_buf[t->dump_buf_sz] = 0; /* some libc implementations don't do this */ + + ASSERT_STREQ(t->dump_buf, expected_output, message); +} + +static void test_btf_dump_incremental(void) +{ + struct test_ctx t = {}; + struct btf *btf; + int id, err; + + if (test_ctx__init(&t)) + return; + + btf = t.btf; + /* First, generate BTF corresponding to the following C code: * * enum x; @@ -182,15 +225,7 @@ static void test_btf_dump_incremental(void) err = btf__add_field(btf, "x", 4, 0, 0); ASSERT_OK(err, "field_ok"); - for (i = 1; i < btf__type_cnt(btf); i++) { - err = btf_dump__dump_type(d, i); - ASSERT_OK(err, "dump_type_ok"); - } - - fflush(dump_buf_file); - dump_buf[dump_buf_sz] = 0; /* some libc implementations don't do this */ - - ASSERT_STREQ(dump_buf, + test_ctx__dump_and_compare(&t, "enum x;\n" "\n" "enum x {\n" @@ -221,7 +256,7 @@ static void test_btf_dump_incremental(void) * enum values don't conflict; * */ - fseek(dump_buf_file, 0, SEEK_SET); + fseek(t.dump_buf_file, 0, SEEK_SET); id = btf__add_struct(btf, "s", 4); ASSERT_EQ(id, 7, "struct_id"); @@ -232,14 +267,7 @@ static void test_btf_dump_incremental(void) err = btf__add_field(btf, "s", 6, 64, 0); ASSERT_OK(err, "field_ok"); - for (i = 1; i < btf__type_cnt(btf); i++) { - err = btf_dump__dump_type(d, i); - ASSERT_OK(err, "dump_type_ok"); - } - - fflush(dump_buf_file); - dump_buf[dump_buf_sz] = 0; /* some libc implementations don't do this */ - ASSERT_STREQ(dump_buf, + test_ctx__dump_and_compare(&t, "struct s___2 {\n" " enum x x;\n" " enum {\n" @@ -248,11 +276,53 @@ static void test_btf_dump_incremental(void) " struct s s;\n" "};\n\n" , "c_dump1"); -err_out: - fclose(dump_buf_file); - free(dump_buf); - btf_dump__free(d); - btf__free(btf); + test_ctx__free(&t); +} + +static void test_btf_dump_type_tags(void) +{ + struct test_ctx t = {}; + struct btf *btf; + int id, err; + + if (test_ctx__init(&t)) + return; + + btf = t.btf; + + /* Generate BTF corresponding to the following C code: + * + * struct s { + * void __attribute__((btf_type_tag(\"void_tag\"))) *p1; + * void __attribute__((void_attr)) *p2; + * }; + * + */ + + id = btf__add_type_tag(btf, "void_tag", 0); + ASSERT_EQ(id, 1, "type_tag_id"); + id = btf__add_ptr(btf, id); + ASSERT_EQ(id, 2, "void_ptr_id1"); + + id = btf__add_type_attr(btf, "void_attr", 0); + ASSERT_EQ(id, 3, "type_attr_id"); + id = btf__add_ptr(btf, id); + ASSERT_EQ(id, 4, "void_ptr_id2"); + + id = btf__add_struct(btf, "s", 8); + ASSERT_EQ(id, 5, "struct_id"); + err = btf__add_field(btf, "p1", 2, 0, 0); + ASSERT_OK(err, "field_ok1"); + err = btf__add_field(btf, "p2", 4, 0, 0); + ASSERT_OK(err, "field_ok2"); + + test_ctx__dump_and_compare(&t, +"struct s {\n" +" void __attribute__((btf_type_tag(\"void_tag\"))) *p1;\n" +" void __attribute__((void_attr)) *p2;\n" +"};\n\n", "dump_and_compare"); + + test_ctx__free(&t); } #define STRSIZE 4096 @@ -809,6 +879,122 @@ static void test_btf_dump_var_data(struct btf *btf, struct btf_dump *d, "static int bpf_cgrp_storage_busy = (int)2", 2); } +struct btf_dump_string_ctx { + struct btf *btf; + struct btf_dump *d; + char *str; + struct btf_dump_type_data_opts *opts; + int array_id; +}; + +static int btf_dump_one_string(struct btf_dump_string_ctx *ctx, + char *ptr, size_t ptr_sz, + const char *expected_val) +{ + size_t type_sz; + int ret; + + ctx->str[0] = '\0'; + type_sz = btf__resolve_size(ctx->btf, ctx->array_id); + ret = btf_dump__dump_type_data(ctx->d, ctx->array_id, ptr, ptr_sz, ctx->opts); + if (type_sz <= ptr_sz) { + if (!ASSERT_EQ(ret, type_sz, "failed/unexpected type_sz")) + return -EINVAL; + } + if (!ASSERT_STREQ(ctx->str, expected_val, "ensure expected/actual match")) + return -EFAULT; + return 0; +} + +static void btf_dump_strings(struct btf_dump_string_ctx *ctx) +{ + struct btf_dump_type_data_opts *opts = ctx->opts; + + opts->emit_strings = true; + + opts->compact = true; + opts->emit_zeroes = false; + + opts->skip_names = false; + btf_dump_one_string(ctx, "foo", 4, "(char[4])\"foo\""); + + opts->skip_names = true; + btf_dump_one_string(ctx, "foo", 4, "\"foo\""); + + /* This should have no effect. */ + opts->emit_zeroes = false; + btf_dump_one_string(ctx, "foo", 4, "\"foo\""); + + /* This should have no effect. */ + opts->compact = false; + btf_dump_one_string(ctx, "foo", 4, "\"foo\""); + + /* Non-printable characters come out as hex. */ + btf_dump_one_string(ctx, "fo\xff", 4, "\"fo\\xff\""); + btf_dump_one_string(ctx, "fo\x7", 4, "\"fo\\x07\""); + + /* + * Strings that are too long for the specified type ("char[4]") + * should fall back to the current behavior. + */ + opts->compact = true; + btf_dump_one_string(ctx, "abcde", 6, "['a','b','c','d',]"); + + /* + * Strings that are too short for the specified type ("char[4]") + * should work normally. + */ + btf_dump_one_string(ctx, "ab", 3, "\"ab\""); + + /* Non-NUL-terminated arrays don't get printed as strings. */ + char food[4] = { 'f', 'o', 'o', 'd' }; + char bye[3] = { 'b', 'y', 'e' }; + + btf_dump_one_string(ctx, food, 4, "['f','o','o','d',]"); + btf_dump_one_string(ctx, bye, 3, "['b','y','e',]"); + + /* The embedded NUL should terminate the string. */ + char embed[4] = { 'f', 'o', '\0', 'd' }; + + btf_dump_one_string(ctx, embed, 4, "\"fo\""); +} + +static void test_btf_dump_string_data(void) +{ + struct test_ctx t = {}; + char str[STRSIZE]; + struct btf_dump *d; + DECLARE_LIBBPF_OPTS(btf_dump_type_data_opts, opts); + struct btf_dump_string_ctx ctx; + int char_id, int_id, array_id; + + if (test_ctx__init(&t)) + return; + + d = btf_dump__new(t.btf, btf_dump_snprintf, str, NULL); + if (!ASSERT_OK_PTR(d, "could not create BTF dump")) + return; + + /* Generate BTF for a four-element char array. */ + char_id = btf__add_int(t.btf, "char", 1, BTF_INT_CHAR); + ASSERT_EQ(char_id, 1, "char_id"); + int_id = btf__add_int(t.btf, "int", 4, BTF_INT_SIGNED); + ASSERT_EQ(int_id, 2, "int_id"); + array_id = btf__add_array(t.btf, int_id, char_id, 4); + ASSERT_EQ(array_id, 3, "array_id"); + + ctx.btf = t.btf; + ctx.d = d; + ctx.str = str; + ctx.opts = &opts; + ctx.array_id = array_id; + + btf_dump_strings(&ctx); + + btf_dump__free(d); + test_ctx__free(&t); +} + static void test_btf_datasec(struct btf *btf, struct btf_dump *d, char *str, const char *name, const char *expected_val, void *data, size_t data_sz) @@ -874,6 +1060,9 @@ void test_btf_dump() { if (test__start_subtest("btf_dump: incremental")) test_btf_dump_incremental(); + if (test__start_subtest("btf_dump: type_tags")) + test_btf_dump_type_tags(); + btf = libbpf_find_kernel_btf(); if (!ASSERT_OK_PTR(btf, "no kernel BTF found")) return; @@ -897,6 +1086,8 @@ void test_btf_dump() { test_btf_dump_struct_data(btf, d, str); if (test__start_subtest("btf_dump: var_data")) test_btf_dump_var_data(btf, d, str); + if (test__start_subtest("btf_dump: string_data")) + test_btf_dump_string_data(); btf_dump__free(d); btf__free(btf); diff --git a/tools/testing/selftests/bpf/prog_tests/btf_split.c b/tools/testing/selftests/bpf/prog_tests/btf_split.c index eef1158676ed..3696fb9a05ed 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_split.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_split.c @@ -12,10 +12,11 @@ static void btf_dump_printf(void *ctx, const char *fmt, va_list args) vfprintf(ctx, fmt, args); } -void test_btf_split() { +static void __test_btf_split(bool multi) +{ struct btf_dump *d = NULL; const struct btf_type *t; - struct btf *btf1, *btf2; + struct btf *btf1, *btf2, *btf3 = NULL; int str_off, i, err; btf1 = btf__new_empty(); @@ -63,14 +64,46 @@ void test_btf_split() { ASSERT_EQ(btf_vlen(t), 3, "split_struct_vlen"); ASSERT_STREQ(btf__str_by_offset(btf2, t->name_off), "s2", "split_struct_name"); + if (multi) { + btf3 = btf__new_empty_split(btf2); + if (!ASSERT_OK_PTR(btf3, "multi_split_btf")) + goto cleanup; + } else { + btf3 = btf2; + } + + btf__add_union(btf3, "u1", 16); /* [5] union u1 { */ + btf__add_field(btf3, "f1", 4, 0, 0); /* struct s2 f1; */ + btf__add_field(btf3, "uf2", 1, 0, 0); /* int f2; */ + /* } */ + + if (multi) { + t = btf__type_by_id(btf2, 5); + ASSERT_NULL(t, "multisplit_type_in_first_split"); + } + + t = btf__type_by_id(btf3, 5); + if (!ASSERT_OK_PTR(t, "split_union_type")) + goto cleanup; + ASSERT_EQ(btf_is_union(t), true, "split_union_kind"); + ASSERT_EQ(btf_vlen(t), 2, "split_union_vlen"); + ASSERT_STREQ(btf__str_by_offset(btf3, t->name_off), "u1", "split_union_name"); + ASSERT_EQ(btf__type_cnt(btf3), 6, "split_type_cnt"); + + t = btf__type_by_id(btf3, 1); + if (!ASSERT_OK_PTR(t, "split_base_type")) + goto cleanup; + ASSERT_EQ(btf_is_int(t), true, "split_base_int"); + ASSERT_STREQ(btf__str_by_offset(btf3, t->name_off), "int", "split_base_type_name"); + /* BTF-to-C dump of split BTF */ dump_buf_file = open_memstream(&dump_buf, &dump_buf_sz); if (!ASSERT_OK_PTR(dump_buf_file, "dump_memstream")) return; - d = btf_dump__new(btf2, btf_dump_printf, dump_buf_file, NULL); + d = btf_dump__new(btf3, btf_dump_printf, dump_buf_file, NULL); if (!ASSERT_OK_PTR(d, "btf_dump__new")) goto cleanup; - for (i = 1; i < btf__type_cnt(btf2); i++) { + for (i = 1; i < btf__type_cnt(btf3); i++) { err = btf_dump__dump_type(d, i); ASSERT_OK(err, "dump_type_ok"); } @@ -79,12 +112,15 @@ void test_btf_split() { ASSERT_STREQ(dump_buf, "struct s1 {\n" " int f1;\n" -"};\n" -"\n" +"};\n\n" "struct s2 {\n" " struct s1 f1;\n" " int f2;\n" " int *f3;\n" +"};\n\n" +"union u1 {\n" +" struct s2 f1;\n" +" int uf2;\n" "};\n\n", "c_dump"); cleanup: @@ -94,4 +130,14 @@ cleanup: btf_dump__free(d); btf__free(btf1); btf__free(btf2); + if (btf2 != btf3) + btf__free(btf3); +} + +void test_btf_split(void) +{ + if (test__start_subtest("single_split")) + __test_btf_split(false); + if (test__start_subtest("multi_split")) + __test_btf_split(true); } diff --git a/tools/testing/selftests/bpf/prog_tests/btf_sysfs.c b/tools/testing/selftests/bpf/prog_tests/btf_sysfs.c new file mode 100644 index 000000000000..3923e64c4c1d --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/btf_sysfs.c @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright (c) 2025 Isovalent */ + +#include <test_progs.h> +#include <bpf/btf.h> +#include <sys/stat.h> +#include <sys/mman.h> +#include <fcntl.h> +#include <unistd.h> + +static void test_btf_mmap_sysfs(const char *path, struct btf *base) +{ + struct stat st; + __u64 btf_size, end; + void *raw_data = NULL; + int fd = -1; + long page_size; + struct btf *btf = NULL; + + page_size = sysconf(_SC_PAGESIZE); + if (!ASSERT_GE(page_size, 0, "get_page_size")) + goto cleanup; + + if (!ASSERT_OK(stat(path, &st), "stat_btf")) + goto cleanup; + + btf_size = st.st_size; + end = (btf_size + page_size - 1) / page_size * page_size; + + fd = open(path, O_RDONLY); + if (!ASSERT_GE(fd, 0, "open_btf")) + goto cleanup; + + raw_data = mmap(NULL, btf_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); + if (!ASSERT_EQ(raw_data, MAP_FAILED, "mmap_btf_writable")) + goto cleanup; + + raw_data = mmap(NULL, btf_size, PROT_READ, MAP_SHARED, fd, 0); + if (!ASSERT_EQ(raw_data, MAP_FAILED, "mmap_btf_shared")) + goto cleanup; + + raw_data = mmap(NULL, end + 1, PROT_READ, MAP_PRIVATE, fd, 0); + if (!ASSERT_EQ(raw_data, MAP_FAILED, "mmap_btf_invalid_size")) + goto cleanup; + + raw_data = mmap(NULL, end, PROT_READ, MAP_PRIVATE, fd, 0); + if (!ASSERT_OK_PTR(raw_data, "mmap_btf")) + goto cleanup; + + if (!ASSERT_EQ(mprotect(raw_data, btf_size, PROT_READ | PROT_WRITE), -1, + "mprotect_writable")) + goto cleanup; + + if (!ASSERT_EQ(mprotect(raw_data, btf_size, PROT_READ | PROT_EXEC), -1, + "mprotect_executable")) + goto cleanup; + + /* Check padding is zeroed */ + for (int i = btf_size; i < end; i++) { + if (((__u8 *)raw_data)[i] != 0) { + PRINT_FAIL("tail of BTF is not zero at page offset %d\n", i); + goto cleanup; + } + } + + btf = btf__new_split(raw_data, btf_size, base); + if (!ASSERT_OK_PTR(btf, "parse_btf")) + goto cleanup; + +cleanup: + btf__free(btf); + if (raw_data && raw_data != MAP_FAILED) + munmap(raw_data, btf_size); + if (fd >= 0) + close(fd); +} + +void test_btf_sysfs(void) +{ + test_btf_mmap_sysfs("/sys/kernel/btf/vmlinux", NULL); +} diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_mprog_opts.c b/tools/testing/selftests/bpf/prog_tests/cgroup_mprog_opts.c new file mode 100644 index 000000000000..bb60704a3ef9 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_mprog_opts.c @@ -0,0 +1,617 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ +#include <test_progs.h> +#include "cgroup_helpers.h" +#include "cgroup_mprog.skel.h" + +static void assert_mprog_count(int cg, int atype, int expected) +{ + __u32 count = 0, attach_flags = 0; + int err; + + err = bpf_prog_query(cg, atype, 0, &attach_flags, + NULL, &count); + ASSERT_EQ(count, expected, "count"); + ASSERT_EQ(err, 0, "prog_query"); +} + +static void test_prog_attach_detach(int atype) +{ + LIBBPF_OPTS(bpf_prog_attach_opts, opta); + LIBBPF_OPTS(bpf_prog_detach_opts, optd); + LIBBPF_OPTS(bpf_prog_query_opts, optq); + __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4; + struct cgroup_mprog *skel; + __u32 prog_ids[10]; + int cg, err; + + cg = test__join_cgroup("/prog_attach_detach"); + if (!ASSERT_GE(cg, 0, "join_cgroup /prog_attach_detach")) + return; + + skel = cgroup_mprog__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_load")) + goto cleanup; + + fd1 = bpf_program__fd(skel->progs.getsockopt_1); + fd2 = bpf_program__fd(skel->progs.getsockopt_2); + fd3 = bpf_program__fd(skel->progs.getsockopt_3); + fd4 = bpf_program__fd(skel->progs.getsockopt_4); + + id1 = id_from_prog_fd(fd1); + id2 = id_from_prog_fd(fd2); + id3 = id_from_prog_fd(fd3); + id4 = id_from_prog_fd(fd4); + + assert_mprog_count(cg, atype, 0); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_ALLOW_MULTI | BPF_F_BEFORE | BPF_F_AFTER, + .expected_revision = 1, + ); + + /* ordering: [fd1] */ + err = bpf_prog_attach_opts(fd1, cg, atype, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup; + + assert_mprog_count(cg, atype, 1); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_ALLOW_MULTI | BPF_F_BEFORE, + .expected_revision = 2, + ); + + /* ordering: [fd2, fd1] */ + err = bpf_prog_attach_opts(fd2, cg, atype, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup1; + + assert_mprog_count(cg, atype, 2); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_ALLOW_MULTI | BPF_F_AFTER, + .relative_fd = fd2, + .expected_revision = 3, + ); + + /* ordering: [fd2, fd3, fd1] */ + err = bpf_prog_attach_opts(fd3, cg, atype, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup2; + + assert_mprog_count(cg, atype, 3); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_ALLOW_MULTI, + .expected_revision = 4, + ); + + /* ordering: [fd2, fd3, fd1, fd4] */ + err = bpf_prog_attach_opts(fd4, cg, atype, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup3; + + assert_mprog_count(cg, atype, 4); + + /* retrieve optq.prog_cnt */ + err = bpf_prog_query_opts(cg, atype, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup4; + + /* optq.prog_cnt will be used in below query */ + memset(prog_ids, 0, sizeof(prog_ids)); + optq.prog_ids = prog_ids; + err = bpf_prog_query_opts(cg, atype, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup4; + + ASSERT_EQ(optq.count, 4, "count"); + ASSERT_EQ(optq.revision, 5, "revision"); + ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]"); + ASSERT_EQ(optq.prog_ids[2], id1, "prog_ids[2]"); + ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]"); + ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]"); + ASSERT_EQ(optq.link_ids, NULL, "link_ids"); + +cleanup4: + optd.expected_revision = 5; + err = bpf_prog_detach_opts(fd4, cg, atype, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(cg, atype, 3); + +cleanup3: + LIBBPF_OPTS_RESET(optd); + err = bpf_prog_detach_opts(fd3, cg, atype, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(cg, atype, 2); + + /* Check revision after two detach operations */ + err = bpf_prog_query_opts(cg, atype, &optq); + ASSERT_OK(err, "prog_query"); + ASSERT_EQ(optq.revision, 7, "revision"); + +cleanup2: + err = bpf_prog_detach_opts(fd2, cg, atype, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(cg, atype, 1); + +cleanup1: + err = bpf_prog_detach_opts(fd1, cg, atype, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(cg, atype, 0); + +cleanup: + cgroup_mprog__destroy(skel); + close(cg); +} + +static void test_link_attach_detach(int atype) +{ + LIBBPF_OPTS(bpf_cgroup_opts, opta); + LIBBPF_OPTS(bpf_cgroup_opts, optd); + LIBBPF_OPTS(bpf_prog_query_opts, optq); + struct bpf_link *link1, *link2, *link3, *link4; + __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4; + struct cgroup_mprog *skel; + __u32 prog_ids[10]; + int cg, err; + + cg = test__join_cgroup("/link_attach_detach"); + if (!ASSERT_GE(cg, 0, "join_cgroup /link_attach_detach")) + return; + + skel = cgroup_mprog__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_load")) + goto cleanup; + + fd1 = bpf_program__fd(skel->progs.getsockopt_1); + fd2 = bpf_program__fd(skel->progs.getsockopt_2); + fd3 = bpf_program__fd(skel->progs.getsockopt_3); + fd4 = bpf_program__fd(skel->progs.getsockopt_4); + + id1 = id_from_prog_fd(fd1); + id2 = id_from_prog_fd(fd2); + id3 = id_from_prog_fd(fd3); + id4 = id_from_prog_fd(fd4); + + assert_mprog_count(cg, atype, 0); + + LIBBPF_OPTS_RESET(opta, + .expected_revision = 1, + ); + + /* ordering: [fd1] */ + link1 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_1, cg, &opta); + if (!ASSERT_OK_PTR(link1, "link_attach")) + goto cleanup; + + assert_mprog_count(cg, atype, 1); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_BEFORE | BPF_F_LINK, + .relative_id = id_from_link_fd(bpf_link__fd(link1)), + .expected_revision = 2, + ); + + /* ordering: [fd2, fd1] */ + link2 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_2, cg, &opta); + if (!ASSERT_OK_PTR(link2, "link_attach")) + goto cleanup1; + + assert_mprog_count(cg, atype, 2); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_AFTER | BPF_F_LINK, + .relative_fd = bpf_link__fd(link2), + .expected_revision = 3, + ); + + /* ordering: [fd2, fd3, fd1] */ + link3 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_3, cg, &opta); + if (!ASSERT_OK_PTR(link3, "link_attach")) + goto cleanup2; + + assert_mprog_count(cg, atype, 3); + + LIBBPF_OPTS_RESET(opta, + .expected_revision = 4, + ); + + /* ordering: [fd2, fd3, fd1, fd4] */ + link4 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_4, cg, &opta); + if (!ASSERT_OK_PTR(link4, "link_attach")) + goto cleanup3; + + assert_mprog_count(cg, atype, 4); + + /* retrieve optq.prog_cnt */ + err = bpf_prog_query_opts(cg, atype, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup4; + + /* optq.prog_cnt will be used in below query */ + memset(prog_ids, 0, sizeof(prog_ids)); + optq.prog_ids = prog_ids; + err = bpf_prog_query_opts(cg, atype, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup4; + + ASSERT_EQ(optq.count, 4, "count"); + ASSERT_EQ(optq.revision, 5, "revision"); + ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]"); + ASSERT_EQ(optq.prog_ids[2], id1, "prog_ids[2]"); + ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]"); + ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]"); + ASSERT_EQ(optq.link_ids, NULL, "link_ids"); + +cleanup4: + bpf_link__destroy(link4); + assert_mprog_count(cg, atype, 3); + +cleanup3: + bpf_link__destroy(link3); + assert_mprog_count(cg, atype, 2); + + /* Check revision after two detach operations */ + err = bpf_prog_query_opts(cg, atype, &optq); + ASSERT_OK(err, "prog_query"); + ASSERT_EQ(optq.revision, 7, "revision"); + +cleanup2: + bpf_link__destroy(link2); + assert_mprog_count(cg, atype, 1); + +cleanup1: + bpf_link__destroy(link1); + assert_mprog_count(cg, atype, 0); + +cleanup: + cgroup_mprog__destroy(skel); + close(cg); +} + +static void test_preorder_prog_attach_detach(int atype) +{ + LIBBPF_OPTS(bpf_prog_attach_opts, opta); + LIBBPF_OPTS(bpf_prog_detach_opts, optd); + __u32 fd1, fd2, fd3, fd4; + struct cgroup_mprog *skel; + int cg, err; + + cg = test__join_cgroup("/preorder_prog_attach_detach"); + if (!ASSERT_GE(cg, 0, "join_cgroup /preorder_prog_attach_detach")) + return; + + skel = cgroup_mprog__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_load")) + goto cleanup; + + fd1 = bpf_program__fd(skel->progs.getsockopt_1); + fd2 = bpf_program__fd(skel->progs.getsockopt_2); + fd3 = bpf_program__fd(skel->progs.getsockopt_3); + fd4 = bpf_program__fd(skel->progs.getsockopt_4); + + assert_mprog_count(cg, atype, 0); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_ALLOW_MULTI, + .expected_revision = 1, + ); + + /* ordering: [fd1] */ + err = bpf_prog_attach_opts(fd1, cg, atype, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup; + + assert_mprog_count(cg, atype, 1); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_ALLOW_MULTI | BPF_F_PREORDER, + .expected_revision = 2, + ); + + /* ordering: [fd1, fd2] */ + err = bpf_prog_attach_opts(fd2, cg, atype, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup1; + + assert_mprog_count(cg, atype, 2); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_ALLOW_MULTI | BPF_F_AFTER, + .relative_fd = fd2, + .expected_revision = 3, + ); + + err = bpf_prog_attach_opts(fd3, cg, atype, &opta); + if (!ASSERT_EQ(err, -EINVAL, "prog_attach")) + goto cleanup2; + + assert_mprog_count(cg, atype, 2); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_ALLOW_MULTI | BPF_F_AFTER | BPF_F_PREORDER, + .relative_fd = fd2, + .expected_revision = 3, + ); + + /* ordering: [fd1, fd2, fd3] */ + err = bpf_prog_attach_opts(fd3, cg, atype, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup2; + + assert_mprog_count(cg, atype, 3); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_ALLOW_MULTI, + .expected_revision = 4, + ); + + /* ordering: [fd2, fd3, fd1, fd4] */ + err = bpf_prog_attach_opts(fd4, cg, atype, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup3; + + assert_mprog_count(cg, atype, 4); + + err = bpf_prog_detach_opts(fd4, cg, atype, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(cg, atype, 3); + +cleanup3: + err = bpf_prog_detach_opts(fd3, cg, atype, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(cg, atype, 2); + +cleanup2: + err = bpf_prog_detach_opts(fd2, cg, atype, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(cg, atype, 1); + +cleanup1: + err = bpf_prog_detach_opts(fd1, cg, atype, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(cg, atype, 0); + +cleanup: + cgroup_mprog__destroy(skel); + close(cg); +} + +static void test_preorder_link_attach_detach(int atype) +{ + LIBBPF_OPTS(bpf_cgroup_opts, opta); + struct bpf_link *link1, *link2, *link3, *link4; + struct cgroup_mprog *skel; + __u32 fd2; + int cg; + + cg = test__join_cgroup("/preorder_link_attach_detach"); + if (!ASSERT_GE(cg, 0, "join_cgroup /preorder_link_attach_detach")) + return; + + skel = cgroup_mprog__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_load")) + goto cleanup; + + fd2 = bpf_program__fd(skel->progs.getsockopt_2); + + assert_mprog_count(cg, atype, 0); + + LIBBPF_OPTS_RESET(opta, + .expected_revision = 1, + ); + + /* ordering: [fd1] */ + link1 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_1, cg, &opta); + if (!ASSERT_OK_PTR(link1, "link_attach")) + goto cleanup; + + assert_mprog_count(cg, atype, 1); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_PREORDER, + .expected_revision = 2, + ); + + /* ordering: [fd1, fd2] */ + link2 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_2, cg, &opta); + if (!ASSERT_OK_PTR(link2, "link_attach")) + goto cleanup1; + + assert_mprog_count(cg, atype, 2); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_AFTER, + .relative_fd = fd2, + .expected_revision = 3, + ); + + link3 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_3, cg, &opta); + if (!ASSERT_ERR_PTR(link3, "link_attach")) + goto cleanup2; + + assert_mprog_count(cg, atype, 2); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_AFTER | BPF_F_PREORDER | BPF_F_LINK, + .relative_fd = bpf_link__fd(link2), + .expected_revision = 3, + ); + + /* ordering: [fd1, fd2, fd3] */ + link3 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_3, cg, &opta); + if (!ASSERT_OK_PTR(link3, "link_attach")) + goto cleanup2; + + assert_mprog_count(cg, atype, 3); + + LIBBPF_OPTS_RESET(opta, + .expected_revision = 4, + ); + + /* ordering: [fd2, fd3, fd1, fd4] */ + link4 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_4, cg, &opta); + if (!ASSERT_OK_PTR(link4, "prog_attach")) + goto cleanup3; + + assert_mprog_count(cg, atype, 4); + + bpf_link__destroy(link4); + assert_mprog_count(cg, atype, 3); + +cleanup3: + bpf_link__destroy(link3); + assert_mprog_count(cg, atype, 2); + +cleanup2: + bpf_link__destroy(link2); + assert_mprog_count(cg, atype, 1); + +cleanup1: + bpf_link__destroy(link1); + assert_mprog_count(cg, atype, 0); + +cleanup: + cgroup_mprog__destroy(skel); + close(cg); +} + +static void test_invalid_attach_detach(int atype) +{ + LIBBPF_OPTS(bpf_prog_attach_opts, opta); + __u32 fd1, fd2, id2; + struct cgroup_mprog *skel; + int cg, err; + + cg = test__join_cgroup("/invalid_attach_detach"); + if (!ASSERT_GE(cg, 0, "join_cgroup /invalid_attach_detach")) + return; + + skel = cgroup_mprog__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_load")) + goto cleanup; + + fd1 = bpf_program__fd(skel->progs.getsockopt_1); + fd2 = bpf_program__fd(skel->progs.getsockopt_2); + + id2 = id_from_prog_fd(fd2); + + assert_mprog_count(cg, atype, 0); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_ALLOW_MULTI | BPF_F_BEFORE | BPF_F_AFTER, + .relative_id = id2, + ); + + err = bpf_prog_attach_opts(fd1, cg, atype, &opta); + ASSERT_EQ(err, -EINVAL, "prog_attach"); + assert_mprog_count(cg, atype, 0); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_ALLOW_MULTI | BPF_F_BEFORE | BPF_F_ID, + ); + + err = bpf_prog_attach_opts(fd1, cg, atype, &opta); + ASSERT_EQ(err, -ENOENT, "prog_attach"); + assert_mprog_count(cg, atype, 0); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_ALLOW_MULTI | BPF_F_AFTER | BPF_F_ID, + ); + + err = bpf_prog_attach_opts(fd1, cg, atype, &opta); + ASSERT_EQ(err, -ENOENT, "prog_attach"); + assert_mprog_count(cg, atype, 0); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_ALLOW_MULTI | BPF_F_BEFORE | BPF_F_AFTER, + .relative_id = id2, + ); + + err = bpf_prog_attach_opts(fd1, cg, atype, &opta); + ASSERT_EQ(err, -EINVAL, "prog_attach"); + assert_mprog_count(cg, atype, 0); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_ALLOW_MULTI | BPF_F_LINK, + .relative_id = id2, + ); + + err = bpf_prog_attach_opts(fd1, cg, atype, &opta); + ASSERT_EQ(err, -EINVAL, "prog_attach"); + assert_mprog_count(cg, atype, 0); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_ALLOW_MULTI, + .relative_id = id2, + ); + + err = bpf_prog_attach_opts(fd1, cg, atype, &opta); + ASSERT_EQ(err, -EINVAL, "prog_attach"); + assert_mprog_count(cg, atype, 0); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_ALLOW_MULTI | BPF_F_BEFORE, + .relative_fd = fd1, + ); + + err = bpf_prog_attach_opts(fd1, cg, atype, &opta); + ASSERT_EQ(err, -ENOENT, "prog_attach"); + assert_mprog_count(cg, atype, 0); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_ALLOW_MULTI | BPF_F_AFTER, + .relative_fd = fd1, + ); + + err = bpf_prog_attach_opts(fd1, cg, atype, &opta); + ASSERT_EQ(err, -ENOENT, "prog_attach"); + assert_mprog_count(cg, atype, 0); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_ALLOW_MULTI, + ); + + err = bpf_prog_attach_opts(fd1, cg, atype, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup; + assert_mprog_count(cg, atype, 1); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_ALLOW_MULTI | BPF_F_AFTER, + ); + + err = bpf_prog_attach_opts(fd1, cg, atype, &opta); + ASSERT_EQ(err, -EINVAL, "prog_attach"); + assert_mprog_count(cg, atype, 1); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_ALLOW_MULTI | BPF_F_REPLACE | BPF_F_AFTER, + .replace_prog_fd = fd1, + ); + + err = bpf_prog_attach_opts(fd1, cg, atype, &opta); + ASSERT_EQ(err, -EINVAL, "prog_attach"); + assert_mprog_count(cg, atype, 1); +cleanup: + cgroup_mprog__destroy(skel); + close(cg); +} + +void test_cgroup_mprog_opts(void) +{ + if (test__start_subtest("prog_attach_detach")) + test_prog_attach_detach(BPF_CGROUP_GETSOCKOPT); + if (test__start_subtest("link_attach_detach")) + test_link_attach_detach(BPF_CGROUP_GETSOCKOPT); + if (test__start_subtest("preorder_prog_attach_detach")) + test_preorder_prog_attach_detach(BPF_CGROUP_GETSOCKOPT); + if (test__start_subtest("preorder_link_attach_detach")) + test_preorder_link_attach_detach(BPF_CGROUP_GETSOCKOPT); + if (test__start_subtest("invalid_attach_detach")) + test_invalid_attach_detach(BPF_CGROUP_GETSOCKOPT); +} diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_mprog_ordering.c b/tools/testing/selftests/bpf/prog_tests/cgroup_mprog_ordering.c new file mode 100644 index 000000000000..a36d2e968bc5 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_mprog_ordering.c @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ +#include <test_progs.h> +#include "cgroup_helpers.h" +#include "cgroup_preorder.skel.h" + +static int run_getsockopt_test(int cg_parent, int sock_fd, bool has_relative_fd) +{ + LIBBPF_OPTS(bpf_prog_attach_opts, opts); + enum bpf_attach_type prog_p_atype, prog_p2_atype; + int prog_p_fd, prog_p2_fd; + struct cgroup_preorder *skel = NULL; + struct bpf_program *prog; + __u8 *result, buf; + socklen_t optlen = 1; + int err = 0; + + skel = cgroup_preorder__open_and_load(); + if (!ASSERT_OK_PTR(skel, "cgroup_preorder__open_and_load")) + return 0; + + LIBBPF_OPTS_RESET(opts); + opts.flags = BPF_F_ALLOW_MULTI; + prog = skel->progs.parent; + prog_p_fd = bpf_program__fd(prog); + prog_p_atype = bpf_program__expected_attach_type(prog); + err = bpf_prog_attach_opts(prog_p_fd, cg_parent, prog_p_atype, &opts); + if (!ASSERT_OK(err, "bpf_prog_attach_opts-parent")) + goto close_skel; + + opts.flags = BPF_F_ALLOW_MULTI | BPF_F_BEFORE; + if (has_relative_fd) + opts.relative_fd = prog_p_fd; + prog = skel->progs.parent_2; + prog_p2_fd = bpf_program__fd(prog); + prog_p2_atype = bpf_program__expected_attach_type(prog); + err = bpf_prog_attach_opts(prog_p2_fd, cg_parent, prog_p2_atype, &opts); + if (!ASSERT_OK(err, "bpf_prog_attach_opts-parent_2")) + goto detach_parent; + + err = getsockopt(sock_fd, SOL_IP, IP_TOS, &buf, &optlen); + if (!ASSERT_OK(err, "getsockopt")) + goto detach_parent_2; + + result = skel->bss->result; + ASSERT_TRUE(result[0] == 4 && result[1] == 3, "result values"); + +detach_parent_2: + ASSERT_OK(bpf_prog_detach2(prog_p2_fd, cg_parent, prog_p2_atype), + "bpf_prog_detach2-parent_2"); +detach_parent: + ASSERT_OK(bpf_prog_detach2(prog_p_fd, cg_parent, prog_p_atype), + "bpf_prog_detach2-parent"); +close_skel: + cgroup_preorder__destroy(skel); + return err; +} + +void test_cgroup_mprog_ordering(void) +{ + int cg_parent = -1, sock_fd = -1; + + cg_parent = test__join_cgroup("/parent"); + if (!ASSERT_GE(cg_parent, 0, "join_cgroup /parent")) + goto out; + + sock_fd = socket(AF_INET, SOCK_STREAM, 0); + if (!ASSERT_GE(sock_fd, 0, "socket")) + goto out; + + ASSERT_OK(run_getsockopt_test(cg_parent, sock_fd, false), "getsockopt_test_1"); + ASSERT_OK(run_getsockopt_test(cg_parent, sock_fd, true), "getsockopt_test_2"); + +out: + close(sock_fd); + close(cg_parent); +} diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_preorder.c b/tools/testing/selftests/bpf/prog_tests/cgroup_preorder.c new file mode 100644 index 000000000000..d4d583872fa2 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_preorder.c @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ +#include <test_progs.h> +#include "cgroup_helpers.h" +#include "cgroup_preorder.skel.h" + +static int run_getsockopt_test(int cg_parent, int cg_child, int sock_fd, bool all_preorder) +{ + LIBBPF_OPTS(bpf_prog_attach_opts, opts); + enum bpf_attach_type prog_c_atype, prog_c2_atype, prog_p_atype, prog_p2_atype; + int prog_c_fd, prog_c2_fd, prog_p_fd, prog_p2_fd; + struct cgroup_preorder *skel = NULL; + struct bpf_program *prog; + __u8 *result, buf; + socklen_t optlen; + int err = 0; + + skel = cgroup_preorder__open_and_load(); + if (!ASSERT_OK_PTR(skel, "cgroup_preorder__open_and_load")) + return 0; + + buf = 0x00; + err = setsockopt(sock_fd, SOL_IP, IP_TOS, &buf, 1); + if (!ASSERT_OK(err, "setsockopt")) + goto close_skel; + + opts.flags = BPF_F_ALLOW_MULTI; + if (all_preorder) + opts.flags |= BPF_F_PREORDER; + prog = skel->progs.child; + prog_c_fd = bpf_program__fd(prog); + prog_c_atype = bpf_program__expected_attach_type(prog); + err = bpf_prog_attach_opts(prog_c_fd, cg_child, prog_c_atype, &opts); + if (!ASSERT_OK(err, "bpf_prog_attach_opts-child")) + goto close_skel; + + opts.flags = BPF_F_ALLOW_MULTI | BPF_F_PREORDER; + prog = skel->progs.child_2; + prog_c2_fd = bpf_program__fd(prog); + prog_c2_atype = bpf_program__expected_attach_type(prog); + err = bpf_prog_attach_opts(prog_c2_fd, cg_child, prog_c2_atype, &opts); + if (!ASSERT_OK(err, "bpf_prog_attach_opts-child_2")) + goto detach_child; + + optlen = 1; + err = getsockopt(sock_fd, SOL_IP, IP_TOS, &buf, &optlen); + if (!ASSERT_OK(err, "getsockopt")) + goto detach_child_2; + + result = skel->bss->result; + if (all_preorder) + ASSERT_TRUE(result[0] == 1 && result[1] == 2, "child only"); + else + ASSERT_TRUE(result[0] == 2 && result[1] == 1, "child only"); + + skel->bss->idx = 0; + memset(result, 0, 4); + + opts.flags = BPF_F_ALLOW_MULTI; + if (all_preorder) + opts.flags |= BPF_F_PREORDER; + prog = skel->progs.parent; + prog_p_fd = bpf_program__fd(prog); + prog_p_atype = bpf_program__expected_attach_type(prog); + err = bpf_prog_attach_opts(prog_p_fd, cg_parent, prog_p_atype, &opts); + if (!ASSERT_OK(err, "bpf_prog_attach_opts-parent")) + goto detach_child_2; + + opts.flags = BPF_F_ALLOW_MULTI | BPF_F_PREORDER; + prog = skel->progs.parent_2; + prog_p2_fd = bpf_program__fd(prog); + prog_p2_atype = bpf_program__expected_attach_type(prog); + err = bpf_prog_attach_opts(prog_p2_fd, cg_parent, prog_p2_atype, &opts); + if (!ASSERT_OK(err, "bpf_prog_attach_opts-parent_2")) + goto detach_parent; + + err = getsockopt(sock_fd, SOL_IP, IP_TOS, &buf, &optlen); + if (!ASSERT_OK(err, "getsockopt")) + goto detach_parent_2; + + if (all_preorder) + ASSERT_TRUE(result[0] == 3 && result[1] == 4 && result[2] == 1 && result[3] == 2, + "parent and child"); + else + ASSERT_TRUE(result[0] == 4 && result[1] == 2 && result[2] == 1 && result[3] == 3, + "parent and child"); + +detach_parent_2: + ASSERT_OK(bpf_prog_detach2(prog_p2_fd, cg_parent, prog_p2_atype), + "bpf_prog_detach2-parent_2"); +detach_parent: + ASSERT_OK(bpf_prog_detach2(prog_p_fd, cg_parent, prog_p_atype), + "bpf_prog_detach2-parent"); +detach_child_2: + ASSERT_OK(bpf_prog_detach2(prog_c2_fd, cg_child, prog_c2_atype), + "bpf_prog_detach2-child_2"); +detach_child: + ASSERT_OK(bpf_prog_detach2(prog_c_fd, cg_child, prog_c_atype), + "bpf_prog_detach2-child"); +close_skel: + cgroup_preorder__destroy(skel); + return err; +} + +void test_cgroup_preorder(void) +{ + int cg_parent = -1, cg_child = -1, sock_fd = -1; + + cg_parent = test__join_cgroup("/parent"); + if (!ASSERT_GE(cg_parent, 0, "join_cgroup /parent")) + goto out; + + cg_child = test__join_cgroup("/parent/child"); + if (!ASSERT_GE(cg_child, 0, "join_cgroup /parent/child")) + goto out; + + sock_fd = socket(AF_INET, SOCK_STREAM, 0); + if (!ASSERT_GE(sock_fd, 0, "socket")) + goto out; + + ASSERT_OK(run_getsockopt_test(cg_parent, cg_child, sock_fd, false), "getsockopt_test_1"); + ASSERT_OK(run_getsockopt_test(cg_parent, cg_child, sock_fd, true), "getsockopt_test_2"); + +out: + close(sock_fd); + close(cg_child); + close(cg_parent); +} diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_skb_direct_packet_access.c b/tools/testing/selftests/bpf/prog_tests/cgroup_skb_direct_packet_access.c new file mode 100644 index 000000000000..e1a90c10db8c --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_skb_direct_packet_access.c @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <test_progs.h> +#include "cgroup_skb_direct_packet_access.skel.h" + +void test_cgroup_skb_prog_run_direct_packet_access(void) +{ + int err; + struct cgroup_skb_direct_packet_access *skel; + char test_skb[64] = {}; + + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = test_skb, + .data_size_in = sizeof(test_skb), + ); + + skel = cgroup_skb_direct_packet_access__open_and_load(); + if (!ASSERT_OK_PTR(skel, "cgroup_skb_direct_packet_access__open_and_load")) + return; + + err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.direct_packet_access), &topts); + ASSERT_OK(err, "bpf_prog_test_run_opts err"); + ASSERT_EQ(topts.retval, 1, "retval"); + + ASSERT_NEQ(skel->bss->data_end, 0, "data_end"); + + cgroup_skb_direct_packet_access__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c b/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c index 64abba72ac10..37c1cc52ed98 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c @@ -10,12 +10,18 @@ static int run_test(int cgroup_fd, int server_fd, bool classid) { struct connect4_dropper *skel; - int fd, err = 0; + int fd, err = 0, port; skel = connect4_dropper__open_and_load(); if (!ASSERT_OK_PTR(skel, "skel_open")) return -1; + port = get_socket_local_port(server_fd); + if (!ASSERT_GE(port, 0, "get_socket_local_port")) + return -1; + + skel->bss->port = ntohs(port); + skel->links.connect_v4_dropper = bpf_program__attach_cgroup(skel->progs.connect_v4_dropper, cgroup_fd); @@ -48,10 +54,9 @@ void test_cgroup_v1v2(void) { struct network_helper_opts opts = {}; int server_fd, client_fd, cgroup_fd; - static const int port = 60120; /* Step 1: Check base connectivity works without any BPF. */ - server_fd = start_server(AF_INET, SOCK_STREAM, NULL, port, 0); + server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 0, 0); if (!ASSERT_GE(server_fd, 0, "server_fd")) return; client_fd = connect_to_fd_opts(server_fd, &opts); @@ -66,7 +71,7 @@ void test_cgroup_v1v2(void) cgroup_fd = test__join_cgroup("/connect_dropper"); if (!ASSERT_GE(cgroup_fd, 0, "cgroup_fd")) return; - server_fd = start_server(AF_INET, SOCK_STREAM, NULL, port, 0); + server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 0, 0); if (!ASSERT_GE(server_fd, 0, "server_fd")) { close(cgroup_fd); return; diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_xattr.c b/tools/testing/selftests/bpf/prog_tests/cgroup_xattr.c new file mode 100644 index 000000000000..e0dd966e4a3e --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_xattr.c @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ + +#include <errno.h> +#include <fcntl.h> +#include <sys/stat.h> +#include <string.h> +#include <unistd.h> +#include <sys/socket.h> +#include <test_progs.h> +#include "cgroup_helpers.h" + +#include "read_cgroupfs_xattr.skel.h" +#include "cgroup_read_xattr.skel.h" + +#define CGROUP_FS_PARENT "foo/" +#define CGROUP_FS_CHILD CGROUP_FS_PARENT "bar/" +#define TMP_FILE "/tmp/selftests_cgroup_xattr" + +static const char xattr_value_a[] = "bpf_selftest_value_a"; +static const char xattr_value_b[] = "bpf_selftest_value_b"; +static const char xattr_name[] = "user.bpf_test"; + +static void test_read_cgroup_xattr(void) +{ + int tmp_fd, parent_cgroup_fd = -1, child_cgroup_fd = -1; + struct read_cgroupfs_xattr *skel = NULL; + + parent_cgroup_fd = test__join_cgroup(CGROUP_FS_PARENT); + if (!ASSERT_OK_FD(parent_cgroup_fd, "create parent cgroup")) + return; + if (!ASSERT_OK(set_cgroup_xattr(CGROUP_FS_PARENT, xattr_name, xattr_value_a), + "set parent xattr")) + goto out; + + child_cgroup_fd = test__join_cgroup(CGROUP_FS_CHILD); + if (!ASSERT_OK_FD(child_cgroup_fd, "create child cgroup")) + goto out; + if (!ASSERT_OK(set_cgroup_xattr(CGROUP_FS_CHILD, xattr_name, xattr_value_b), + "set child xattr")) + goto out; + + skel = read_cgroupfs_xattr__open_and_load(); + if (!ASSERT_OK_PTR(skel, "read_cgroupfs_xattr__open_and_load")) + goto out; + + skel->bss->target_pid = gettid(); + + if (!ASSERT_OK(read_cgroupfs_xattr__attach(skel), "read_cgroupfs_xattr__attach")) + goto out; + + tmp_fd = open(TMP_FILE, O_RDONLY | O_CREAT); + ASSERT_OK_FD(tmp_fd, "open tmp file"); + close(tmp_fd); + + ASSERT_TRUE(skel->bss->found_value_a, "found_value_a"); + ASSERT_TRUE(skel->bss->found_value_b, "found_value_b"); + +out: + close(child_cgroup_fd); + close(parent_cgroup_fd); + read_cgroupfs_xattr__destroy(skel); + unlink(TMP_FILE); +} + +void test_cgroup_xattr(void) +{ + RUN_TESTS(cgroup_read_xattr); + + if (test__start_subtest("read_cgroupfs_xattr")) + test_read_cgroup_xattr(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/compute_live_registers.c b/tools/testing/selftests/bpf/prog_tests/compute_live_registers.c new file mode 100644 index 000000000000..285f20241fe1 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/compute_live_registers.c @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "compute_live_registers.skel.h" +#include "test_progs.h" + +void test_compute_live_registers(void) +{ + RUN_TESTS(compute_live_registers); +} diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c index 1c682550e0e7..08963c82f30b 100644 --- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c +++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c @@ -2,7 +2,7 @@ #define _GNU_SOURCE #include <test_progs.h> #include "progs/core_reloc_types.h" -#include "bpf_testmod/bpf_testmod.h" +#include "test_kmods/bpf_testmod.h" #include <linux/limits.h> #include <sys/mman.h> #include <sys/syscall.h> @@ -85,11 +85,11 @@ static int duration = 0; #define NESTING_ERR_CASE(name) { \ NESTING_CASE_COMMON(name), \ .fails = true, \ - .run_btfgen_fails = true, \ + .run_btfgen_fails = true, \ } #define ARRAYS_DATA(struct_name) STRUCT_TO_CHAR_PTR(struct_name) { \ - .a = { [2] = 1 }, \ + .a = { [2] = 1, [3] = 11 }, \ .b = { [1] = { [2] = { [3] = 2 } } }, \ .c = { [1] = { .c = 3 } }, \ .d = { [0] = { [0] = { .d = 4 } } }, \ @@ -108,6 +108,7 @@ static int duration = 0; .input_len = sizeof(struct core_reloc_##name), \ .output = STRUCT_TO_CHAR_PTR(core_reloc_arrays_output) { \ .a2 = 1, \ + .a3 = 12, \ .b123 = 2, \ .c1c = 3, \ .d00d = 4, \ @@ -602,6 +603,7 @@ static const struct core_reloc_test_case test_cases[] = { ARRAYS_ERR_CASE(arrays___err_non_array), ARRAYS_ERR_CASE(arrays___err_wrong_val_type), ARRAYS_ERR_CASE(arrays___err_bad_zero_sz_arr), + ARRAYS_ERR_CASE(arrays___err_bad_signed_arr_elem_sz), /* enum/ptr/int handling scenarios */ PRIMITIVES_CASE(primitives), diff --git a/tools/testing/selftests/bpf/prog_tests/cpumask.c b/tools/testing/selftests/bpf/prog_tests/cpumask.c index e58a04654238..6c45330a5ca3 100644 --- a/tools/testing/selftests/bpf/prog_tests/cpumask.c +++ b/tools/testing/selftests/bpf/prog_tests/cpumask.c @@ -25,6 +25,10 @@ static const char * const cpumask_success_testcases[] = { "test_global_mask_nested_deep_rcu", "test_global_mask_nested_deep_array_rcu", "test_cpumask_weight", + "test_refcount_null_tracking", + "test_populate_reject_small_mask", + "test_populate_reject_unaligned", + "test_populate", }; static void verify_success(const char *prog_name) @@ -78,6 +82,5 @@ void test_cpumask(void) verify_success(cpumask_success_testcases[i]); } - RUN_TESTS(cpumask_success); RUN_TESTS(cpumask_failure); } diff --git a/tools/testing/selftests/bpf/prog_tests/dmabuf_iter.c b/tools/testing/selftests/bpf/prog_tests/dmabuf_iter.c new file mode 100644 index 000000000000..6c2b0c3dbcd8 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/dmabuf_iter.c @@ -0,0 +1,285 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Google */ + +#include <test_progs.h> +#include <bpf/libbpf.h> +#include <bpf/btf.h> +#include "dmabuf_iter.skel.h" + +#include <fcntl.h> +#include <stdbool.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/ioctl.h> +#include <sys/mman.h> +#include <unistd.h> + +#include <linux/dma-buf.h> +#include <linux/dma-heap.h> +#include <linux/udmabuf.h> + +static int udmabuf = -1; +static const char udmabuf_test_buffer_name[DMA_BUF_NAME_LEN] = "udmabuf_test_buffer_for_iter"; +static size_t udmabuf_test_buffer_size; +static int sysheap_dmabuf = -1; +static const char sysheap_test_buffer_name[DMA_BUF_NAME_LEN] = "sysheap_test_buffer_for_iter"; +static size_t sysheap_test_buffer_size; + +static int create_udmabuf(void) +{ + struct udmabuf_create create; + int dev_udmabuf, memfd, local_udmabuf; + + udmabuf_test_buffer_size = 10 * getpagesize(); + + if (!ASSERT_LE(sizeof(udmabuf_test_buffer_name), DMA_BUF_NAME_LEN, "NAMETOOLONG")) + return -1; + + memfd = memfd_create("memfd_test", MFD_ALLOW_SEALING); + if (!ASSERT_OK_FD(memfd, "memfd_create")) + return -1; + + if (!ASSERT_OK(ftruncate(memfd, udmabuf_test_buffer_size), "ftruncate")) + goto close_memfd; + + if (!ASSERT_OK(fcntl(memfd, F_ADD_SEALS, F_SEAL_SHRINK), "seal")) + goto close_memfd; + + dev_udmabuf = open("/dev/udmabuf", O_RDONLY); + if (!ASSERT_OK_FD(dev_udmabuf, "open udmabuf")) + goto close_memfd; + + memset(&create, 0, sizeof(create)); + create.memfd = memfd; + create.flags = UDMABUF_FLAGS_CLOEXEC; + create.offset = 0; + create.size = udmabuf_test_buffer_size; + + local_udmabuf = ioctl(dev_udmabuf, UDMABUF_CREATE, &create); + close(dev_udmabuf); + if (!ASSERT_OK_FD(local_udmabuf, "udmabuf_create")) + goto close_memfd; + + if (!ASSERT_OK(ioctl(local_udmabuf, DMA_BUF_SET_NAME_B, udmabuf_test_buffer_name), "name")) + goto close_udmabuf; + + return local_udmabuf; + +close_udmabuf: + close(local_udmabuf); +close_memfd: + close(memfd); + return -1; +} + +static int create_sys_heap_dmabuf(void) +{ + sysheap_test_buffer_size = 20 * getpagesize(); + + struct dma_heap_allocation_data data = { + .len = sysheap_test_buffer_size, + .fd = 0, + .fd_flags = O_RDWR | O_CLOEXEC, + .heap_flags = 0, + }; + int heap_fd, ret; + + if (!ASSERT_LE(sizeof(sysheap_test_buffer_name), DMA_BUF_NAME_LEN, "NAMETOOLONG")) + return -1; + + heap_fd = open("/dev/dma_heap/system", O_RDONLY); + if (!ASSERT_OK_FD(heap_fd, "open dma heap")) + return -1; + + ret = ioctl(heap_fd, DMA_HEAP_IOCTL_ALLOC, &data); + close(heap_fd); + if (!ASSERT_OK(ret, "syheap alloc")) + return -1; + + if (!ASSERT_OK(ioctl(data.fd, DMA_BUF_SET_NAME_B, sysheap_test_buffer_name), "name")) + goto close_sysheap_dmabuf; + + return data.fd; + +close_sysheap_dmabuf: + close(data.fd); + return -1; +} + +static int create_test_buffers(void) +{ + udmabuf = create_udmabuf(); + sysheap_dmabuf = create_sys_heap_dmabuf(); + + if (udmabuf < 0 || sysheap_dmabuf < 0) + return -1; + + return 0; +} + +static void destroy_test_buffers(void) +{ + close(udmabuf); + udmabuf = -1; + + close(sysheap_dmabuf); + sysheap_dmabuf = -1; +} + +enum Fields { INODE, SIZE, NAME, EXPORTER, FIELD_COUNT }; +struct DmabufInfo { + unsigned long inode; + unsigned long size; + char name[DMA_BUF_NAME_LEN]; + char exporter[32]; +}; + +static bool check_dmabuf_info(const struct DmabufInfo *bufinfo, + unsigned long size, + const char *name, const char *exporter) +{ + return size == bufinfo->size && + !strcmp(name, bufinfo->name) && + !strcmp(exporter, bufinfo->exporter); +} + +static void subtest_dmabuf_iter_check_no_infinite_reads(struct dmabuf_iter *skel) +{ + int iter_fd; + char buf[256]; + + iter_fd = bpf_iter_create(bpf_link__fd(skel->links.dmabuf_collector)); + if (!ASSERT_OK_FD(iter_fd, "iter_create")) + return; + + while (read(iter_fd, buf, sizeof(buf)) > 0) + ; /* Read out all contents */ + + /* Next reads should return 0 */ + ASSERT_EQ(read(iter_fd, buf, sizeof(buf)), 0, "read"); + + close(iter_fd); +} + +static void subtest_dmabuf_iter_check_default_iter(struct dmabuf_iter *skel) +{ + bool found_test_sysheap_dmabuf = false; + bool found_test_udmabuf = false; + struct DmabufInfo bufinfo; + size_t linesize = 0; + char *line = NULL; + FILE *iter_file; + int iter_fd, f = INODE; + + iter_fd = bpf_iter_create(bpf_link__fd(skel->links.dmabuf_collector)); + if (!ASSERT_OK_FD(iter_fd, "iter_create")) + return; + + iter_file = fdopen(iter_fd, "r"); + if (!ASSERT_OK_PTR(iter_file, "fdopen")) + goto close_iter_fd; + + while (getline(&line, &linesize, iter_file) != -1) { + if (f % FIELD_COUNT == INODE) { + ASSERT_EQ(sscanf(line, "%ld", &bufinfo.inode), 1, + "read inode"); + } else if (f % FIELD_COUNT == SIZE) { + ASSERT_EQ(sscanf(line, "%ld", &bufinfo.size), 1, + "read size"); + } else if (f % FIELD_COUNT == NAME) { + ASSERT_EQ(sscanf(line, "%s", bufinfo.name), 1, + "read name"); + } else if (f % FIELD_COUNT == EXPORTER) { + ASSERT_EQ(sscanf(line, "%31s", bufinfo.exporter), 1, + "read exporter"); + + if (check_dmabuf_info(&bufinfo, + sysheap_test_buffer_size, + sysheap_test_buffer_name, + "system")) + found_test_sysheap_dmabuf = true; + else if (check_dmabuf_info(&bufinfo, + udmabuf_test_buffer_size, + udmabuf_test_buffer_name, + "udmabuf")) + found_test_udmabuf = true; + } + ++f; + } + + ASSERT_EQ(f % FIELD_COUNT, INODE, "number of fields"); + + ASSERT_TRUE(found_test_sysheap_dmabuf, "found_test_sysheap_dmabuf"); + ASSERT_TRUE(found_test_udmabuf, "found_test_udmabuf"); + + free(line); + fclose(iter_file); +close_iter_fd: + close(iter_fd); +} + +static void subtest_dmabuf_iter_check_open_coded(struct dmabuf_iter *skel, int map_fd) +{ + LIBBPF_OPTS(bpf_test_run_opts, topts); + char key[DMA_BUF_NAME_LEN]; + int err, fd; + bool found; + + /* No need to attach it, just run it directly */ + fd = bpf_program__fd(skel->progs.iter_dmabuf_for_each); + + err = bpf_prog_test_run_opts(fd, &topts); + if (!ASSERT_OK(err, "test_run_opts err")) + return; + if (!ASSERT_OK(topts.retval, "test_run_opts retval")) + return; + + if (!ASSERT_OK(bpf_map_get_next_key(map_fd, NULL, key), "get next key")) + return; + + do { + ASSERT_OK(bpf_map_lookup_elem(map_fd, key, &found), "lookup"); + ASSERT_TRUE(found, "found test buffer"); + } while (bpf_map_get_next_key(map_fd, key, key)); +} + +void test_dmabuf_iter(void) +{ + struct dmabuf_iter *skel = NULL; + int map_fd; + const bool f = false; + + skel = dmabuf_iter__open_and_load(); + if (!ASSERT_OK_PTR(skel, "dmabuf_iter__open_and_load")) + return; + + map_fd = bpf_map__fd(skel->maps.testbuf_hash); + if (!ASSERT_OK_FD(map_fd, "map_fd")) + goto destroy_skel; + + if (!ASSERT_OK(bpf_map_update_elem(map_fd, udmabuf_test_buffer_name, &f, BPF_ANY), + "insert udmabuf")) + goto destroy_skel; + if (!ASSERT_OK(bpf_map_update_elem(map_fd, sysheap_test_buffer_name, &f, BPF_ANY), + "insert sysheap buffer")) + goto destroy_skel; + + if (!ASSERT_OK(create_test_buffers(), "create_test_buffers")) + goto destroy; + + if (!ASSERT_OK(dmabuf_iter__attach(skel), "skel_attach")) + goto destroy; + + if (test__start_subtest("no_infinite_reads")) + subtest_dmabuf_iter_check_no_infinite_reads(skel); + if (test__start_subtest("default_iter")) + subtest_dmabuf_iter_check_default_iter(skel); + if (test__start_subtest("open_coded")) + subtest_dmabuf_iter_check_open_coded(skel, map_fd); + +destroy: + destroy_test_buffers(); +destroy_skel: + dmabuf_iter__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/dynptr.c b/tools/testing/selftests/bpf/prog_tests/dynptr.c index b614a5272dfd..9b2d9ceda210 100644 --- a/tools/testing/selftests/bpf/prog_tests/dynptr.c +++ b/tools/testing/selftests/bpf/prog_tests/dynptr.c @@ -10,6 +10,7 @@ enum test_setup_type { SETUP_SYSCALL_SLEEP, SETUP_SKB_PROG, SETUP_SKB_PROG_TP, + SETUP_XDP_PROG, }; static struct { @@ -18,6 +19,16 @@ static struct { } success_tests[] = { {"test_read_write", SETUP_SYSCALL_SLEEP}, {"test_dynptr_data", SETUP_SYSCALL_SLEEP}, + {"test_dynptr_copy", SETUP_SYSCALL_SLEEP}, + {"test_dynptr_copy_xdp", SETUP_XDP_PROG}, + {"test_dynptr_memset_zero", SETUP_SYSCALL_SLEEP}, + {"test_dynptr_memset_notzero", SETUP_SYSCALL_SLEEP}, + {"test_dynptr_memset_zero_offset", SETUP_SYSCALL_SLEEP}, + {"test_dynptr_memset_zero_adjusted", SETUP_SYSCALL_SLEEP}, + {"test_dynptr_memset_overflow", SETUP_SYSCALL_SLEEP}, + {"test_dynptr_memset_overflow_offset", SETUP_SYSCALL_SLEEP}, + {"test_dynptr_memset_readonly", SETUP_SKB_PROG}, + {"test_dynptr_memset_xdp_chunks", SETUP_XDP_PROG}, {"test_ringbuf", SETUP_SYSCALL_SLEEP}, {"test_skb_readonly", SETUP_SKB_PROG}, {"test_dynptr_skb_data", SETUP_SKB_PROG}, @@ -30,10 +41,21 @@ static struct { {"test_dynptr_skb_no_buff", SETUP_SKB_PROG}, {"test_dynptr_skb_strcmp", SETUP_SKB_PROG}, {"test_dynptr_skb_tp_btf", SETUP_SKB_PROG_TP}, + {"test_probe_read_user_dynptr", SETUP_XDP_PROG}, + {"test_probe_read_kernel_dynptr", SETUP_XDP_PROG}, + {"test_probe_read_user_str_dynptr", SETUP_XDP_PROG}, + {"test_probe_read_kernel_str_dynptr", SETUP_XDP_PROG}, + {"test_copy_from_user_dynptr", SETUP_SYSCALL_SLEEP}, + {"test_copy_from_user_str_dynptr", SETUP_SYSCALL_SLEEP}, + {"test_copy_from_user_task_dynptr", SETUP_SYSCALL_SLEEP}, + {"test_copy_from_user_task_str_dynptr", SETUP_SYSCALL_SLEEP}, }; +#define PAGE_SIZE_64K 65536 + static void verify_success(const char *prog_name, enum test_setup_type setup_type) { + char user_data[384] = {[0 ... 382] = 'a', '\0'}; struct dynptr_success *skel; struct bpf_program *prog; struct bpf_link *link; @@ -55,6 +77,10 @@ static void verify_success(const char *prog_name, enum test_setup_type setup_typ if (!ASSERT_OK(err, "dynptr_success__load")) goto cleanup; + skel->bss->user_ptr = user_data; + skel->data->test_len[0] = sizeof(user_data); + memcpy(skel->bss->expected_str, user_data, sizeof(user_data)); + switch (setup_type) { case SETUP_SYSCALL_SLEEP: link = bpf_program__attach(prog); @@ -120,6 +146,28 @@ static void verify_success(const char *prog_name, enum test_setup_type setup_typ break; } + case SETUP_XDP_PROG: + { + char data[90000]; + int err, prog_fd; + LIBBPF_OPTS(bpf_test_run_opts, opts, + .data_in = &data, + .repeat = 1, + ); + + if (getpagesize() == PAGE_SIZE_64K) + opts.data_size_in = sizeof(data); + else + opts.data_size_in = 5000; + + prog_fd = bpf_program__fd(prog); + err = bpf_prog_test_run_opts(prog_fd, &opts); + + if (!ASSERT_OK(err, "test_run")) + goto cleanup; + + break; + } } ASSERT_EQ(skel->bss->err, 0, "err"); diff --git a/tools/testing/selftests/bpf/prog_tests/fd_array.c b/tools/testing/selftests/bpf/prog_tests/fd_array.c new file mode 100644 index 000000000000..241b2c8c6e0f --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/fd_array.c @@ -0,0 +1,441 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <test_progs.h> + +#include <linux/btf.h> +#include <bpf/bpf.h> + +#include "../test_btf.h" + +static inline int new_map(void) +{ + const char *name = NULL; + __u32 max_entries = 1; + __u32 value_size = 8; + __u32 key_size = 4; + + return bpf_map_create(BPF_MAP_TYPE_ARRAY, name, + key_size, value_size, + max_entries, NULL); +} + +static int new_btf(void) +{ + struct btf_blob { + struct btf_header btf_hdr; + __u32 types[8]; + __u32 str; + } raw_btf = { + .btf_hdr = { + .magic = BTF_MAGIC, + .version = BTF_VERSION, + .hdr_len = sizeof(struct btf_header), + .type_len = sizeof(raw_btf.types), + .str_off = offsetof(struct btf_blob, str) - offsetof(struct btf_blob, types), + .str_len = sizeof(raw_btf.str), + }, + .types = { + /* long */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 64, 8), /* [1] */ + /* unsigned long */ + BTF_TYPE_INT_ENC(0, 0, 0, 64, 8), /* [2] */ + }, + }; + + return bpf_btf_load(&raw_btf, sizeof(raw_btf), NULL); +} + +#define Close(FD) do { \ + if ((FD) >= 0) { \ + close(FD); \ + FD = -1; \ + } \ +} while(0) + +static bool map_exists(__u32 id) +{ + int fd; + + fd = bpf_map_get_fd_by_id(id); + if (fd >= 0) { + close(fd); + return true; + } + return false; +} + +static bool btf_exists(__u32 id) +{ + int fd; + + fd = bpf_btf_get_fd_by_id(id); + if (fd >= 0) { + close(fd); + return true; + } + return false; +} + +static inline int bpf_prog_get_map_ids(int prog_fd, __u32 *nr_map_ids, __u32 *map_ids) +{ + __u32 len = sizeof(struct bpf_prog_info); + struct bpf_prog_info info; + int err; + + memset(&info, 0, len); + info.nr_map_ids = *nr_map_ids; + info.map_ids = ptr_to_u64(map_ids); + + err = bpf_prog_get_info_by_fd(prog_fd, &info, &len); + if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd")) + return -1; + + *nr_map_ids = info.nr_map_ids; + + return 0; +} + +static int __load_test_prog(int map_fd, const int *fd_array, int fd_array_cnt) +{ + /* A trivial program which uses one map */ + struct bpf_insn insns[] = { + BPF_LD_MAP_FD(BPF_REG_1, map_fd), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + LIBBPF_OPTS(bpf_prog_load_opts, opts); + + opts.fd_array = fd_array; + opts.fd_array_cnt = fd_array_cnt; + + return bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "GPL", insns, ARRAY_SIZE(insns), &opts); +} + +static int load_test_prog(const int *fd_array, int fd_array_cnt) +{ + int map_fd; + int ret; + + map_fd = new_map(); + if (!ASSERT_GE(map_fd, 0, "new_map")) + return map_fd; + + ret = __load_test_prog(map_fd, fd_array, fd_array_cnt); + close(map_fd); + return ret; +} + +static bool check_expected_map_ids(int prog_fd, int expected, __u32 *map_ids, __u32 *nr_map_ids) +{ + int err; + + err = bpf_prog_get_map_ids(prog_fd, nr_map_ids, map_ids); + if (!ASSERT_OK(err, "bpf_prog_get_map_ids")) + return false; + if (!ASSERT_EQ(*nr_map_ids, expected, "unexpected nr_map_ids")) + return false; + + return true; +} + +/* + * Load a program, which uses one map. No fd_array maps are present. + * On return only one map is expected to be bound to prog. + */ +static void check_fd_array_cnt__no_fd_array(void) +{ + __u32 map_ids[16]; + __u32 nr_map_ids; + int prog_fd = -1; + + prog_fd = load_test_prog(NULL, 0); + if (!ASSERT_GE(prog_fd, 0, "BPF_PROG_LOAD")) + return; + nr_map_ids = ARRAY_SIZE(map_ids); + check_expected_map_ids(prog_fd, 1, map_ids, &nr_map_ids); + close(prog_fd); +} + +/* + * Load a program, which uses one map, and pass two extra, non-equal, maps in + * fd_array with fd_array_cnt=2. On return three maps are expected to be bound + * to the program. + */ +static void check_fd_array_cnt__fd_array_ok(void) +{ + int extra_fds[2] = { -1, -1 }; + __u32 map_ids[16]; + __u32 nr_map_ids; + int prog_fd = -1; + + extra_fds[0] = new_map(); + if (!ASSERT_GE(extra_fds[0], 0, "new_map")) + goto cleanup; + extra_fds[1] = new_map(); + if (!ASSERT_GE(extra_fds[1], 0, "new_map")) + goto cleanup; + prog_fd = load_test_prog(extra_fds, 2); + if (!ASSERT_GE(prog_fd, 0, "BPF_PROG_LOAD")) + goto cleanup; + nr_map_ids = ARRAY_SIZE(map_ids); + if (!check_expected_map_ids(prog_fd, 3, map_ids, &nr_map_ids)) + goto cleanup; + + /* maps should still exist when original file descriptors are closed */ + Close(extra_fds[0]); + Close(extra_fds[1]); + if (!ASSERT_EQ(map_exists(map_ids[0]), true, "map_ids[0] should exist")) + goto cleanup; + if (!ASSERT_EQ(map_exists(map_ids[1]), true, "map_ids[1] should exist")) + goto cleanup; + + /* some fds might be invalid, so ignore return codes */ +cleanup: + Close(extra_fds[1]); + Close(extra_fds[0]); + Close(prog_fd); +} + +/* + * Load a program with a few extra maps duplicated in the fd_array. + * After the load maps should only be referenced once. + */ +static void check_fd_array_cnt__duplicated_maps(void) +{ + int extra_fds[4] = { -1, -1, -1, -1 }; + __u32 map_ids[16]; + __u32 nr_map_ids; + int prog_fd = -1; + + extra_fds[0] = extra_fds[2] = new_map(); + if (!ASSERT_GE(extra_fds[0], 0, "new_map")) + goto cleanup; + extra_fds[1] = extra_fds[3] = new_map(); + if (!ASSERT_GE(extra_fds[1], 0, "new_map")) + goto cleanup; + prog_fd = load_test_prog(extra_fds, 4); + if (!ASSERT_GE(prog_fd, 0, "BPF_PROG_LOAD")) + goto cleanup; + nr_map_ids = ARRAY_SIZE(map_ids); + if (!check_expected_map_ids(prog_fd, 3, map_ids, &nr_map_ids)) + goto cleanup; + + /* maps should still exist when original file descriptors are closed */ + Close(extra_fds[0]); + Close(extra_fds[1]); + if (!ASSERT_EQ(map_exists(map_ids[0]), true, "map should exist")) + goto cleanup; + if (!ASSERT_EQ(map_exists(map_ids[1]), true, "map should exist")) + goto cleanup; + + /* some fds might be invalid, so ignore return codes */ +cleanup: + Close(extra_fds[1]); + Close(extra_fds[0]); + Close(prog_fd); +} + +/* + * Check that if maps which are referenced by a program are + * passed in fd_array, then they will be referenced only once + */ +static void check_fd_array_cnt__referenced_maps_in_fd_array(void) +{ + int extra_fds[1] = { -1 }; + __u32 map_ids[16]; + __u32 nr_map_ids; + int prog_fd = -1; + + extra_fds[0] = new_map(); + if (!ASSERT_GE(extra_fds[0], 0, "new_map")) + goto cleanup; + prog_fd = __load_test_prog(extra_fds[0], extra_fds, 1); + if (!ASSERT_GE(prog_fd, 0, "BPF_PROG_LOAD")) + goto cleanup; + nr_map_ids = ARRAY_SIZE(map_ids); + if (!check_expected_map_ids(prog_fd, 1, map_ids, &nr_map_ids)) + goto cleanup; + + /* map should still exist when original file descriptor is closed */ + Close(extra_fds[0]); + if (!ASSERT_EQ(map_exists(map_ids[0]), true, "map should exist")) + goto cleanup; + + /* some fds might be invalid, so ignore return codes */ +cleanup: + Close(extra_fds[0]); + Close(prog_fd); +} + +static int get_btf_id_by_fd(int btf_fd, __u32 *id) +{ + struct bpf_btf_info info; + __u32 info_len = sizeof(info); + int err; + + memset(&info, 0, info_len); + err = bpf_btf_get_info_by_fd(btf_fd, &info, &info_len); + if (err) + return err; + if (id) + *id = info.id; + return 0; +} + +/* + * Check that fd_array operates properly for btfs. Namely, to check that + * passing a btf fd in fd_array increases its reference count, do the + * following: + * 1) Create a new btf, it's referenced only by a file descriptor, so refcnt=1 + * 2) Load a BPF prog with fd_array[0] = btf_fd; now btf's refcnt=2 + * 3) Close the btf_fd, now refcnt=1 + * Wait and check that BTF stil exists. + */ +static void check_fd_array_cnt__referenced_btfs(void) +{ + int extra_fds[1] = { -1 }; + int prog_fd = -1; + __u32 btf_id; + int tries; + int err; + + extra_fds[0] = new_btf(); + if (!ASSERT_GE(extra_fds[0], 0, "new_btf")) + goto cleanup; + prog_fd = load_test_prog(extra_fds, 1); + if (!ASSERT_GE(prog_fd, 0, "BPF_PROG_LOAD")) + goto cleanup; + + /* btf should still exist when original file descriptor is closed */ + err = get_btf_id_by_fd(extra_fds[0], &btf_id); + if (!ASSERT_EQ(err, 0, "get_btf_id_by_fd")) + goto cleanup; + + Close(extra_fds[0]); + + if (!ASSERT_GE(kern_sync_rcu(), 0, "kern_sync_rcu 1")) + goto cleanup; + + if (!ASSERT_EQ(btf_exists(btf_id), true, "btf should exist")) + goto cleanup; + + Close(prog_fd); + + /* The program is freed by a workqueue, so no reliable + * way to sync, so just wait a bit (max ~1 second). */ + for (tries = 100; tries >= 0; tries--) { + usleep(1000); + + if (!btf_exists(btf_id)) + break; + + if (tries) + continue; + + PRINT_FAIL("btf should have been freed"); + } + + /* some fds might be invalid, so ignore return codes */ +cleanup: + Close(extra_fds[0]); + Close(prog_fd); +} + +/* + * Test that a program with trash in fd_array can't be loaded: + * only map and BTF file descriptors should be accepted. + */ +static void check_fd_array_cnt__fd_array_with_trash(void) +{ + int extra_fds[3] = { -1, -1, -1 }; + int prog_fd = -1; + + extra_fds[0] = new_map(); + if (!ASSERT_GE(extra_fds[0], 0, "new_map")) + goto cleanup; + extra_fds[1] = new_btf(); + if (!ASSERT_GE(extra_fds[1], 0, "new_btf")) + goto cleanup; + + /* trash 1: not a file descriptor */ + extra_fds[2] = 0xbeef; + prog_fd = load_test_prog(extra_fds, 3); + if (!ASSERT_EQ(prog_fd, -EBADF, "prog should have been rejected with -EBADF")) + goto cleanup; + + /* trash 2: not a map or btf */ + extra_fds[2] = socket(AF_INET, SOCK_STREAM, 0); + if (!ASSERT_GE(extra_fds[2], 0, "socket")) + goto cleanup; + + prog_fd = load_test_prog(extra_fds, 3); + if (!ASSERT_EQ(prog_fd, -EINVAL, "prog should have been rejected with -EINVAL")) + goto cleanup; + + /* Validate that the prog is ok if trash is removed */ + Close(extra_fds[2]); + extra_fds[2] = new_btf(); + if (!ASSERT_GE(extra_fds[2], 0, "new_btf")) + goto cleanup; + + prog_fd = load_test_prog(extra_fds, 3); + if (!ASSERT_GE(prog_fd, 0, "prog should have been loaded")) + goto cleanup; + + /* some fds might be invalid, so ignore return codes */ +cleanup: + Close(extra_fds[2]); + Close(extra_fds[1]); + Close(extra_fds[0]); +} + +/* + * Test that a program with too big fd_array can't be loaded. + */ +static void check_fd_array_cnt__fd_array_too_big(void) +{ + int extra_fds[65]; + int prog_fd = -1; + int i; + + for (i = 0; i < 65; i++) { + extra_fds[i] = new_map(); + if (!ASSERT_GE(extra_fds[i], 0, "new_map")) + goto cleanup_fds; + } + + prog_fd = load_test_prog(extra_fds, 65); + ASSERT_EQ(prog_fd, -E2BIG, "prog should have been rejected with -E2BIG"); + +cleanup_fds: + while (i > 0) + Close(extra_fds[--i]); +} + +void test_fd_array_cnt(void) +{ + if (test__start_subtest("no-fd-array")) + check_fd_array_cnt__no_fd_array(); + + if (test__start_subtest("fd-array-ok")) + check_fd_array_cnt__fd_array_ok(); + + if (test__start_subtest("fd-array-dup-input")) + check_fd_array_cnt__duplicated_maps(); + + if (test__start_subtest("fd-array-ref-maps-in-array")) + check_fd_array_cnt__referenced_maps_in_fd_array(); + + if (test__start_subtest("fd-array-ref-btfs")) + check_fd_array_cnt__referenced_btfs(); + + if (test__start_subtest("fd-array-trash-input")) + check_fd_array_cnt__fd_array_with_trash(); + + if (test__start_subtest("fd-array-2big")) + check_fd_array_cnt__fd_array_too_big(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/fd_htab_lookup.c b/tools/testing/selftests/bpf/prog_tests/fd_htab_lookup.c new file mode 100644 index 000000000000..ca46fdd6e1ae --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/fd_htab_lookup.c @@ -0,0 +1,192 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2025. Huawei Technologies Co., Ltd */ +#define _GNU_SOURCE +#include <stdbool.h> +#include <test_progs.h> +#include "fd_htab_lookup.skel.h" + +struct htab_op_ctx { + int fd; + int loop; + unsigned int entries; + bool stop; +}; + +#define ERR_TO_RETVAL(where, err) ((void *)(long)(((where) << 12) | (-err))) + +static void *htab_lookup_fn(void *arg) +{ + struct htab_op_ctx *ctx = arg; + int i = 0; + + while (i++ < ctx->loop && !ctx->stop) { + unsigned int j; + + for (j = 0; j < ctx->entries; j++) { + unsigned int key = j, zero = 0, value; + int inner_fd, err; + + err = bpf_map_lookup_elem(ctx->fd, &key, &value); + if (err) { + ctx->stop = true; + return ERR_TO_RETVAL(1, err); + } + + inner_fd = bpf_map_get_fd_by_id(value); + if (inner_fd < 0) { + /* The old map has been freed */ + if (inner_fd == -ENOENT) + continue; + ctx->stop = true; + return ERR_TO_RETVAL(2, inner_fd); + } + + err = bpf_map_lookup_elem(inner_fd, &zero, &value); + if (err) { + close(inner_fd); + ctx->stop = true; + return ERR_TO_RETVAL(3, err); + } + close(inner_fd); + + if (value != key) { + ctx->stop = true; + return ERR_TO_RETVAL(4, -EINVAL); + } + } + } + + return NULL; +} + +static void *htab_update_fn(void *arg) +{ + struct htab_op_ctx *ctx = arg; + int i = 0; + + while (i++ < ctx->loop && !ctx->stop) { + unsigned int j; + + for (j = 0; j < ctx->entries; j++) { + unsigned int key = j, zero = 0; + int inner_fd, err; + + inner_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 4, 1, NULL); + if (inner_fd < 0) { + ctx->stop = true; + return ERR_TO_RETVAL(1, inner_fd); + } + + err = bpf_map_update_elem(inner_fd, &zero, &key, 0); + if (err) { + close(inner_fd); + ctx->stop = true; + return ERR_TO_RETVAL(2, err); + } + + err = bpf_map_update_elem(ctx->fd, &key, &inner_fd, BPF_EXIST); + if (err) { + close(inner_fd); + ctx->stop = true; + return ERR_TO_RETVAL(3, err); + } + close(inner_fd); + } + } + + return NULL; +} + +static int setup_htab(int fd, unsigned int entries) +{ + unsigned int i; + + for (i = 0; i < entries; i++) { + unsigned int key = i, zero = 0; + int inner_fd, err; + + inner_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 4, 1, NULL); + if (!ASSERT_OK_FD(inner_fd, "new array")) + return -1; + + err = bpf_map_update_elem(inner_fd, &zero, &key, 0); + if (!ASSERT_OK(err, "init array")) { + close(inner_fd); + return -1; + } + + err = bpf_map_update_elem(fd, &key, &inner_fd, 0); + if (!ASSERT_OK(err, "init outer")) { + close(inner_fd); + return -1; + } + close(inner_fd); + } + + return 0; +} + +static int get_int_from_env(const char *name, int dft) +{ + const char *value; + + value = getenv(name); + if (!value) + return dft; + + return atoi(value); +} + +void test_fd_htab_lookup(void) +{ + unsigned int i, wr_nr = 8, rd_nr = 16; + pthread_t tids[wr_nr + rd_nr]; + struct fd_htab_lookup *skel; + struct htab_op_ctx ctx; + int err; + + skel = fd_htab_lookup__open_and_load(); + if (!ASSERT_OK_PTR(skel, "fd_htab_lookup__open_and_load")) + return; + + ctx.fd = bpf_map__fd(skel->maps.outer_map); + ctx.loop = get_int_from_env("FD_HTAB_LOOP_NR", 5); + ctx.stop = false; + ctx.entries = 8; + + err = setup_htab(ctx.fd, ctx.entries); + if (err) + goto destroy; + + memset(tids, 0, sizeof(tids)); + for (i = 0; i < wr_nr; i++) { + err = pthread_create(&tids[i], NULL, htab_update_fn, &ctx); + if (!ASSERT_OK(err, "pthread_create")) { + ctx.stop = true; + goto reap; + } + } + for (i = 0; i < rd_nr; i++) { + err = pthread_create(&tids[i + wr_nr], NULL, htab_lookup_fn, &ctx); + if (!ASSERT_OK(err, "pthread_create")) { + ctx.stop = true; + goto reap; + } + } + +reap: + for (i = 0; i < wr_nr + rd_nr; i++) { + void *ret = NULL; + char desc[32]; + + if (!tids[i]) + continue; + + snprintf(desc, sizeof(desc), "thread %u", i + 1); + err = pthread_join(tids[i], &ret); + ASSERT_OK(err, desc); + ASSERT_EQ(ret, NULL, desc); + } +destroy: + fd_htab_lookup__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/fill_link_info.c b/tools/testing/selftests/bpf/prog_tests/fill_link_info.c index d50cbd8040d4..e40114620751 100644 --- a/tools/testing/selftests/bpf/prog_tests/fill_link_info.c +++ b/tools/testing/selftests/bpf/prog_tests/fill_link_info.c @@ -37,6 +37,7 @@ static noinline void uprobe_func(void) static int verify_perf_link_info(int fd, enum bpf_perf_event_type type, long addr, ssize_t offset, ssize_t entry_offset) { + ssize_t ref_ctr_offset = entry_offset /* ref_ctr_offset for uprobes */; struct bpf_link_info info; __u32 len = sizeof(info); char buf[PATH_MAX]; @@ -97,6 +98,7 @@ again: case BPF_PERF_EVENT_UPROBE: case BPF_PERF_EVENT_URETPROBE: ASSERT_EQ(info.perf_event.uprobe.offset, offset, "uprobe_offset"); + ASSERT_EQ(info.perf_event.uprobe.ref_ctr_offset, ref_ctr_offset, "uprobe_ref_ctr_offset"); ASSERT_EQ(info.perf_event.uprobe.name_len, strlen(UPROBE_FILE) + 1, "name_len"); @@ -171,6 +173,10 @@ static void test_kprobe_fill_link_info(struct test_fill_link_info *skel, /* See also arch_adjust_kprobe_addr(). */ if (skel->kconfig->CONFIG_X86_KERNEL_IBT) entry_offset = 4; + if (skel->kconfig->CONFIG_PPC64 && + skel->kconfig->CONFIG_KPROBES_ON_FTRACE && + !skel->kconfig->CONFIG_PPC_FTRACE_OUT_OF_LINE) + entry_offset = 4; err = verify_perf_link_info(link_fd, type, kprobe_addr, 0, entry_offset); ASSERT_OK(err, "verify_perf_link_info"); } else { @@ -237,20 +243,32 @@ static void test_uprobe_fill_link_info(struct test_fill_link_info *skel, .retprobe = type == BPF_PERF_EVENT_URETPROBE, .bpf_cookie = PERF_EVENT_COOKIE, ); + const char *sema[1] = { + "uprobe_link_info_sema_1", + }; + __u64 *ref_ctr_offset; struct bpf_link *link; int link_fd, err; + err = elf_resolve_syms_offsets("/proc/self/exe", 1, sema, + (unsigned long **) &ref_ctr_offset, STT_OBJECT); + if (!ASSERT_OK(err, "elf_resolve_syms_offsets_object")) + return; + + opts.ref_ctr_offset = *ref_ctr_offset; link = bpf_program__attach_uprobe_opts(skel->progs.uprobe_run, 0, /* self pid */ UPROBE_FILE, uprobe_offset, &opts); if (!ASSERT_OK_PTR(link, "attach_uprobe")) - return; + goto out; link_fd = bpf_link__fd(link); - err = verify_perf_link_info(link_fd, type, 0, uprobe_offset, 0); + err = verify_perf_link_info(link_fd, type, 0, uprobe_offset, *ref_ctr_offset); ASSERT_OK(err, "verify_perf_link_info"); bpf_link__destroy(link); +out: + free(ref_ctr_offset); } static int verify_kmulti_link_info(int fd, bool retprobe, bool has_cookies) diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c index cfcc90cb7ffb..08bae13248c4 100644 --- a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c @@ -7,39 +7,14 @@ #include "bpf_flow.skel.h" +#define TEST_NS "flow_dissector_ns" #define FLOW_CONTINUE_SADDR 0x7f00007f /* 127.0.0.127 */ +#define TEST_NAME_MAX_LEN 64 #ifndef IP_MF #define IP_MF 0x2000 #endif -#define CHECK_FLOW_KEYS(desc, got, expected) \ - _CHECK(memcmp(&got, &expected, sizeof(got)) != 0, \ - desc, \ - topts.duration, \ - "nhoff=%u/%u " \ - "thoff=%u/%u " \ - "addr_proto=0x%x/0x%x " \ - "is_frag=%u/%u " \ - "is_first_frag=%u/%u " \ - "is_encap=%u/%u " \ - "ip_proto=0x%x/0x%x " \ - "n_proto=0x%x/0x%x " \ - "flow_label=0x%x/0x%x " \ - "sport=%u/%u " \ - "dport=%u/%u\n", \ - got.nhoff, expected.nhoff, \ - got.thoff, expected.thoff, \ - got.addr_proto, expected.addr_proto, \ - got.is_frag, expected.is_frag, \ - got.is_first_frag, expected.is_first_frag, \ - got.is_encap, expected.is_encap, \ - got.ip_proto, expected.ip_proto, \ - got.n_proto, expected.n_proto, \ - got.flow_label, expected.flow_label, \ - got.sport, expected.sport, \ - got.dport, expected.dport) - struct ipv4_pkt { struct ethhdr eth; struct iphdr iph; @@ -89,6 +64,19 @@ struct dvlan_ipv6_pkt { struct tcphdr tcp; } __packed; +struct gre_base_hdr { + __be16 flags; + __be16 protocol; +} gre_base_hdr; + +struct gre_minimal_pkt { + struct ethhdr eth; + struct iphdr iph; + struct gre_base_hdr gre_hdr; + struct iphdr iph_inner; + struct tcphdr tcp; +} __packed; + struct test { const char *name; union { @@ -98,6 +86,7 @@ struct test { struct ipv6_pkt ipv6; struct ipv6_frag_pkt ipv6_frag; struct dvlan_ipv6_pkt dvlan_ipv6; + struct gre_minimal_pkt gre_minimal; } pkt; struct bpf_flow_keys keys; __u32 flags; @@ -106,7 +95,6 @@ struct test { #define VLAN_HLEN 4 -static __u32 duration; struct test tests[] = { { .name = "ipv4", @@ -444,8 +432,137 @@ struct test tests[] = { }, .retval = BPF_FLOW_DISSECTOR_CONTINUE, }, + { + .name = "ip-gre", + .pkt.gre_minimal = { + .eth.h_proto = __bpf_constant_htons(ETH_P_IP), + .iph.ihl = 5, + .iph.protocol = IPPROTO_GRE, + .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), + .gre_hdr = { + .flags = 0, + .protocol = __bpf_constant_htons(ETH_P_IP), + }, + .iph_inner.ihl = 5, + .iph_inner.protocol = IPPROTO_TCP, + .iph_inner.tot_len = + __bpf_constant_htons(MAGIC_BYTES - + sizeof(struct iphdr)), + .tcp.doff = 5, + .tcp.source = 80, + .tcp.dest = 8080, + }, + .keys = { + .nhoff = ETH_HLEN, + .thoff = ETH_HLEN + sizeof(struct iphdr) * 2 + + sizeof(struct gre_base_hdr), + .addr_proto = ETH_P_IP, + .ip_proto = IPPROTO_TCP, + .n_proto = __bpf_constant_htons(ETH_P_IP), + .is_encap = true, + .sport = 80, + .dport = 8080, + }, + .retval = BPF_OK, + }, + { + .name = "ip-gre-no-encap", + .pkt.ipip = { + .eth.h_proto = __bpf_constant_htons(ETH_P_IP), + .iph.ihl = 5, + .iph.protocol = IPPROTO_GRE, + .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), + .iph_inner.ihl = 5, + .iph_inner.protocol = IPPROTO_TCP, + .iph_inner.tot_len = + __bpf_constant_htons(MAGIC_BYTES - + sizeof(struct iphdr)), + .tcp.doff = 5, + .tcp.source = 80, + .tcp.dest = 8080, + }, + .keys = { + .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP, + .nhoff = ETH_HLEN, + .thoff = ETH_HLEN + sizeof(struct iphdr) + + sizeof(struct gre_base_hdr), + .addr_proto = ETH_P_IP, + .ip_proto = IPPROTO_GRE, + .n_proto = __bpf_constant_htons(ETH_P_IP), + .is_encap = true, + }, + .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP, + .retval = BPF_OK, + }, }; +void serial_test_flow_dissector_namespace(void) +{ + struct bpf_flow *skel; + struct nstoken *ns; + int err, prog_fd; + + skel = bpf_flow__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open/load skeleton")) + return; + + prog_fd = bpf_program__fd(skel->progs._dissect); + if (!ASSERT_OK_FD(prog_fd, "get dissector fd")) + goto out_destroy_skel; + + /* We must be able to attach a flow dissector to root namespace */ + err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0); + if (!ASSERT_OK(err, "attach on root namespace ok")) + goto out_destroy_skel; + + err = make_netns(TEST_NS); + if (!ASSERT_OK(err, "create non-root net namespace")) + goto out_destroy_skel; + + /* We must not be able to additionally attach a flow dissector to a + * non-root net namespace + */ + ns = open_netns(TEST_NS); + if (!ASSERT_OK_PTR(ns, "enter non-root net namespace")) + goto out_clean_ns; + err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0); + if (!ASSERT_ERR(err, + "refuse new flow dissector in non-root net namespace")) + bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR); + else + ASSERT_EQ(errno, EEXIST, + "refused because of already attached prog"); + close_netns(ns); + + /* If no flow dissector is attached to the root namespace, we must + * be able to attach one to a non-root net namespace + */ + bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR); + ns = open_netns(TEST_NS); + ASSERT_OK_PTR(ns, "enter non-root net namespace"); + err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0); + close_netns(ns); + ASSERT_OK(err, "accept new flow dissector in non-root net namespace"); + + /* If a flow dissector is attached to non-root net namespace, attaching + * a flow dissector to root namespace must fail + */ + err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0); + if (!ASSERT_ERR(err, "refuse new flow dissector on root namespace")) + bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR); + else + ASSERT_EQ(errno, EEXIST, + "refused because of already attached prog"); + + ns = open_netns(TEST_NS); + bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR); + close_netns(ns); +out_clean_ns: + remove_netns(TEST_NS); +out_destroy_skel: + bpf_flow__destroy(skel); +} + static int create_tap(const char *ifname) { struct ifreq ifr = { @@ -533,22 +650,27 @@ static int init_prog_array(struct bpf_object *obj, struct bpf_map *prog_array) return 0; } -static void run_tests_skb_less(int tap_fd, struct bpf_map *keys) +static void run_tests_skb_less(int tap_fd, struct bpf_map *keys, + char *test_suffix) { + char test_name[TEST_NAME_MAX_LEN]; int i, err, keys_fd; keys_fd = bpf_map__fd(keys); - if (CHECK(keys_fd < 0, "bpf_map__fd", "err %d\n", keys_fd)) + if (!ASSERT_OK_FD(keys_fd, "bpf_map__fd")) return; for (i = 0; i < ARRAY_SIZE(tests); i++) { /* Keep in sync with 'flags' from eth_get_headlen. */ __u32 eth_get_headlen_flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG; - LIBBPF_OPTS(bpf_test_run_opts, topts); struct bpf_flow_keys flow_keys = {}; __u32 key = (__u32)(tests[i].keys.sport) << 16 | tests[i].keys.dport; + snprintf(test_name, TEST_NAME_MAX_LEN, "%s-%s", tests[i].name, + test_suffix); + if (!test__start_subtest(test_name)) + continue; /* For skb-less case we can't pass input flags; run * only the tests that have a matching set of flags. @@ -558,78 +680,139 @@ static void run_tests_skb_less(int tap_fd, struct bpf_map *keys) continue; err = tx_tap(tap_fd, &tests[i].pkt, sizeof(tests[i].pkt)); - CHECK(err < 0, "tx_tap", "err %d errno %d\n", err, errno); + if (!ASSERT_EQ(err, sizeof(tests[i].pkt), "tx_tap")) + continue; /* check the stored flow_keys only if BPF_OK expected */ if (tests[i].retval != BPF_OK) continue; err = bpf_map_lookup_elem(keys_fd, &key, &flow_keys); - ASSERT_OK(err, "bpf_map_lookup_elem"); + if (!ASSERT_OK(err, "bpf_map_lookup_elem")) + continue; - CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys); + ASSERT_MEMEQ(&flow_keys, &tests[i].keys, + sizeof(struct bpf_flow_keys), + "returned flow keys"); err = bpf_map_delete_elem(keys_fd, &key); ASSERT_OK(err, "bpf_map_delete_elem"); } } -static void test_skb_less_prog_attach(struct bpf_flow *skel, int tap_fd) +void test_flow_dissector_skb_less_direct_attach(void) { - int err, prog_fd; + int err, prog_fd, tap_fd; + struct bpf_flow *skel; + struct netns_obj *ns; - prog_fd = bpf_program__fd(skel->progs._dissect); - if (CHECK(prog_fd < 0, "bpf_program__fd", "err %d\n", prog_fd)) + ns = netns_new("flow_dissector_skb_less_indirect_attach_ns", true); + if (!ASSERT_OK_PTR(ns, "create and open netns")) return; + skel = bpf_flow__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open/load skeleton")) + goto out_clean_ns; + + err = init_prog_array(skel->obj, skel->maps.jmp_table); + if (!ASSERT_OK(err, "init_prog_array")) + goto out_destroy_skel; + + prog_fd = bpf_program__fd(skel->progs._dissect); + if (!ASSERT_OK_FD(prog_fd, "bpf_program__fd")) + goto out_destroy_skel; + err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0); - if (CHECK(err, "bpf_prog_attach", "err %d errno %d\n", err, errno)) - return; + if (!ASSERT_OK(err, "bpf_prog_attach")) + goto out_destroy_skel; + + tap_fd = create_tap("tap0"); + if (!ASSERT_OK_FD(tap_fd, "create_tap")) + goto out_destroy_skel; + err = ifup("tap0"); + if (!ASSERT_OK(err, "ifup")) + goto out_close_tap; - run_tests_skb_less(tap_fd, skel->maps.last_dissection); + run_tests_skb_less(tap_fd, skel->maps.last_dissection, + "non-skb-direct-attach"); err = bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR); - CHECK(err, "bpf_prog_detach2", "err %d errno %d\n", err, errno); + ASSERT_OK(err, "bpf_prog_detach2"); + +out_close_tap: + close(tap_fd); +out_destroy_skel: + bpf_flow__destroy(skel); +out_clean_ns: + netns_free(ns); } -static void test_skb_less_link_create(struct bpf_flow *skel, int tap_fd) +void test_flow_dissector_skb_less_indirect_attach(void) { + int err, net_fd, tap_fd; + struct bpf_flow *skel; struct bpf_link *link; - int err, net_fd; + struct netns_obj *ns; - net_fd = open("/proc/self/ns/net", O_RDONLY); - if (CHECK(net_fd < 0, "open(/proc/self/ns/net)", "err %d\n", errno)) + ns = netns_new("flow_dissector_skb_less_indirect_attach_ns", true); + if (!ASSERT_OK_PTR(ns, "create and open netns")) return; + skel = bpf_flow__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open/load skeleton")) + goto out_clean_ns; + + net_fd = open("/proc/self/ns/net", O_RDONLY); + if (!ASSERT_OK_FD(net_fd, "open(/proc/self/ns/net")) + goto out_destroy_skel; + + err = init_prog_array(skel->obj, skel->maps.jmp_table); + if (!ASSERT_OK(err, "init_prog_array")) + goto out_destroy_skel; + + tap_fd = create_tap("tap0"); + if (!ASSERT_OK_FD(tap_fd, "create_tap")) + goto out_close_ns; + err = ifup("tap0"); + if (!ASSERT_OK(err, "ifup")) + goto out_close_tap; + link = bpf_program__attach_netns(skel->progs._dissect, net_fd); if (!ASSERT_OK_PTR(link, "attach_netns")) - goto out_close; + goto out_close_tap; - run_tests_skb_less(tap_fd, skel->maps.last_dissection); + run_tests_skb_less(tap_fd, skel->maps.last_dissection, + "non-skb-indirect-attach"); err = bpf_link__destroy(link); - CHECK(err, "bpf_link__destroy", "err %d\n", err); -out_close: + ASSERT_OK(err, "bpf_link__destroy"); + +out_close_tap: + close(tap_fd); +out_close_ns: close(net_fd); +out_destroy_skel: + bpf_flow__destroy(skel); +out_clean_ns: + netns_free(ns); } -void test_flow_dissector(void) +void test_flow_dissector_skb(void) { - int i, err, prog_fd, keys_fd = -1, tap_fd; + char test_name[TEST_NAME_MAX_LEN]; struct bpf_flow *skel; + int i, err, prog_fd; skel = bpf_flow__open_and_load(); - if (CHECK(!skel, "skel", "failed to open/load skeleton\n")) + if (!ASSERT_OK_PTR(skel, "open/load skeleton")) return; - prog_fd = bpf_program__fd(skel->progs._dissect); - if (CHECK(prog_fd < 0, "bpf_program__fd", "err %d\n", prog_fd)) - goto out_destroy_skel; - keys_fd = bpf_map__fd(skel->maps.last_dissection); - if (CHECK(keys_fd < 0, "bpf_map__fd", "err %d\n", keys_fd)) - goto out_destroy_skel; err = init_prog_array(skel->obj, skel->maps.jmp_table); - if (CHECK(err, "init_prog_array", "err %d\n", err)) + if (!ASSERT_OK(err, "init_prog_array")) + goto out_destroy_skel; + + prog_fd = bpf_program__fd(skel->progs._dissect); + if (!ASSERT_OK_FD(prog_fd, "bpf_program__fd")) goto out_destroy_skel; for (i = 0; i < ARRAY_SIZE(tests); i++) { @@ -641,6 +824,10 @@ void test_flow_dissector(void) ); static struct bpf_flow_keys ctx = {}; + snprintf(test_name, TEST_NAME_MAX_LEN, "%s-skb", tests[i].name); + if (!test__start_subtest(test_name)) + continue; + if (tests[i].flags) { topts.ctx_in = &ctx; topts.ctx_size_in = sizeof(ctx); @@ -656,26 +843,12 @@ void test_flow_dissector(void) continue; ASSERT_EQ(topts.data_size_out, sizeof(flow_keys), "test_run data_size_out"); - CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys); + ASSERT_MEMEQ(&flow_keys, &tests[i].keys, + sizeof(struct bpf_flow_keys), + "returned flow keys"); } - /* Do the same tests but for skb-less flow dissector. - * We use a known path in the net/tun driver that calls - * eth_get_headlen and we manually export bpf_flow_keys - * via BPF map in this case. - */ - - tap_fd = create_tap("tap0"); - CHECK(tap_fd < 0, "create_tap", "tap_fd %d errno %d\n", tap_fd, errno); - err = ifup("tap0"); - CHECK(err, "ifup", "err %d errno %d\n", err, errno); - - /* Test direct prog attachment */ - test_skb_less_prog_attach(skel, tap_fd); - /* Test indirect prog attachment via link */ - test_skb_less_link_create(skel, tap_fd); - - close(tap_fd); out_destroy_skel: bpf_flow__destroy(skel); } + diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector_classification.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector_classification.c new file mode 100644 index 000000000000..80b153d3ddec --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector_classification.c @@ -0,0 +1,797 @@ +// SPDX-License-Identifier: GPL-2.0 + +#define _GNU_SOURCE +#include <stdbool.h> +#include <stdlib.h> +#include <stdio.h> +#include <bpf/bpf.h> +#include <linux/bpf.h> +#include <bpf/libbpf.h> +#include <arpa/inet.h> +#include <asm/byteorder.h> +#include <netinet/udp.h> +#include <poll.h> +#include <string.h> +#include <sys/ioctl.h> +#include <sys/socket.h> +#include <sys/time.h> +#include <unistd.h> +#include "test_progs.h" +#include "network_helpers.h" +#include "bpf_util.h" +#include "bpf_flow.skel.h" + +#define CFG_PORT_INNER 8000 +#define CFG_PORT_GUE 6080 +#define SUBTEST_NAME_MAX_LEN 32 +#define TEST_NAME_MAX_LEN (32 + SUBTEST_NAME_MAX_LEN) +#define MAX_SOURCE_PORTS 3 +#define TEST_PACKETS_COUNT 10 +#define TEST_PACKET_LEN 100 +#define TEST_PACKET_PATTERN 'a' +#define TEST_IPV4 "192.168.0.1/32" +#define TEST_IPV6 "100::a/128" +#define TEST_TUNNEL_REMOTE "127.0.0.2" +#define TEST_TUNNEL_LOCAL "127.0.0.1" + +#define INIT_ADDR4(addr4, port) \ + { \ + .sin_family = AF_INET, \ + .sin_port = __constant_htons(port), \ + .sin_addr.s_addr = __constant_htonl(addr4), \ + } + +#define INIT_ADDR6(addr6, port) \ + { \ + .sin6_family = AF_INET6, \ + .sin6_port = __constant_htons(port), \ + .sin6_addr = addr6, \ + } +#define TEST_IN4_SRC_ADDR_DEFAULT INIT_ADDR4(INADDR_LOOPBACK + 2, 0) +#define TEST_IN4_DST_ADDR_DEFAULT INIT_ADDR4(INADDR_LOOPBACK, CFG_PORT_INNER) +#define TEST_OUT4_SRC_ADDR_DEFAULT INIT_ADDR4(INADDR_LOOPBACK + 1, 0) +#define TEST_OUT4_DST_ADDR_DEFAULT INIT_ADDR4(INADDR_LOOPBACK, 0) + +#define TEST_IN6_SRC_ADDR_DEFAULT INIT_ADDR6(IN6ADDR_LOOPBACK_INIT, 0) +#define TEST_IN6_DST_ADDR_DEFAULT \ + INIT_ADDR6(IN6ADDR_LOOPBACK_INIT, CFG_PORT_INNER) +#define TEST_OUT6_SRC_ADDR_DEFAULT INIT_ADDR6(IN6ADDR_LOOPBACK_INIT, 0) +#define TEST_OUT6_DST_ADDR_DEFAULT INIT_ADDR6(IN6ADDR_LOOPBACK_INIT, 0) + +#define TEST_IN4_SRC_ADDR_DISSECT_CONTINUE INIT_ADDR4(INADDR_LOOPBACK + 126, 0) +#define TEST_IN4_SRC_ADDR_IPIP INIT_ADDR4((in_addr_t)0x01010101, 0) +#define TEST_IN4_DST_ADDR_IPIP INIT_ADDR4((in_addr_t)0xC0A80001, CFG_PORT_INNER) + +struct grehdr { + uint16_t unused; + uint16_t protocol; +} __packed; + +struct guehdr { + union { + struct { +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 hlen : 5, control : 1, version : 2; +#elif defined(__BIG_ENDIAN_BITFIELD) + __u8 version : 2, control : 1, hlen : 5; +#else +#error "Please fix <asm/byteorder.h>" +#endif + __u8 proto_ctype; + __be16 flags; + }; + __be32 word; + }; +}; + +static char buf[ETH_DATA_LEN]; + +struct test_configuration { + char name[SUBTEST_NAME_MAX_LEN]; + int (*test_setup)(void); + void (*test_teardown)(void); + int source_ports[MAX_SOURCE_PORTS]; + int cfg_l3_inner; + struct sockaddr_in in_saddr4; + struct sockaddr_in in_daddr4; + struct sockaddr_in6 in_saddr6; + struct sockaddr_in6 in_daddr6; + int cfg_l3_outer; + struct sockaddr_in out_saddr4; + struct sockaddr_in out_daddr4; + struct sockaddr_in6 out_saddr6; + struct sockaddr_in6 out_daddr6; + int cfg_encap_proto; + uint8_t cfg_dsfield_inner; + uint8_t cfg_dsfield_outer; + int cfg_l3_extra; + struct sockaddr_in extra_saddr4; + struct sockaddr_in extra_daddr4; + struct sockaddr_in6 extra_saddr6; + struct sockaddr_in6 extra_daddr6; +}; + +static unsigned long util_gettime(void) +{ + struct timeval tv; + + gettimeofday(&tv, NULL); + return (tv.tv_sec * 1000) + (tv.tv_usec / 1000); +} + +static void build_ipv4_header(void *header, uint8_t proto, uint32_t src, + uint32_t dst, int payload_len, uint8_t tos) +{ + struct iphdr *iph = header; + + iph->ihl = 5; + iph->version = 4; + iph->tos = tos; + iph->ttl = 8; + iph->tot_len = htons(sizeof(*iph) + payload_len); + iph->id = htons(1337); + iph->protocol = proto; + iph->saddr = src; + iph->daddr = dst; + iph->check = build_ip_csum((void *)iph); +} + +static void ipv6_set_dsfield(struct ipv6hdr *ip6h, uint8_t dsfield) +{ + uint16_t val, *ptr = (uint16_t *)ip6h; + + val = ntohs(*ptr); + val &= 0xF00F; + val |= ((uint16_t)dsfield) << 4; + *ptr = htons(val); +} + +static void build_ipv6_header(void *header, uint8_t proto, + const struct sockaddr_in6 *src, + const struct sockaddr_in6 *dst, int payload_len, + uint8_t dsfield) +{ + struct ipv6hdr *ip6h = header; + + ip6h->version = 6; + ip6h->payload_len = htons(payload_len); + ip6h->nexthdr = proto; + ip6h->hop_limit = 8; + ipv6_set_dsfield(ip6h, dsfield); + + memcpy(&ip6h->saddr, &src->sin6_addr, sizeof(ip6h->saddr)); + memcpy(&ip6h->daddr, &dst->sin6_addr, sizeof(ip6h->daddr)); +} + +static void build_udp_header(void *header, int payload_len, uint16_t sport, + uint16_t dport, int family) +{ + struct udphdr *udph = header; + int len = sizeof(*udph) + payload_len; + + udph->source = htons(sport); + udph->dest = htons(dport); + udph->len = htons(len); + udph->check = 0; + if (family == AF_INET) + udph->check = build_udp_v4_csum(header - sizeof(struct iphdr), + udph); + else + udph->check = build_udp_v6_csum(header - sizeof(struct ipv6hdr), + udph); +} + +static void build_gue_header(void *header, uint8_t proto) +{ + struct guehdr *gueh = header; + + gueh->proto_ctype = proto; +} + +static void build_gre_header(void *header, uint16_t proto) +{ + struct grehdr *greh = header; + + greh->protocol = htons(proto); +} + +static int l3_length(int family) +{ + if (family == AF_INET) + return sizeof(struct iphdr); + else + return sizeof(struct ipv6hdr); +} + +static int build_packet(const struct test_configuration *test, uint16_t sport) +{ + int ol3_len = 0, ol4_len = 0, il3_len = 0, il4_len = 0; + int el3_len = 0, packet_len; + + memset(buf, 0, ETH_DATA_LEN); + + if (test->cfg_l3_extra) + el3_len = l3_length(test->cfg_l3_extra); + + /* calculate header offsets */ + if (test->cfg_encap_proto) { + ol3_len = l3_length(test->cfg_l3_outer); + + if (test->cfg_encap_proto == IPPROTO_GRE) + ol4_len = sizeof(struct grehdr); + else if (test->cfg_encap_proto == IPPROTO_UDP) + ol4_len = sizeof(struct udphdr) + sizeof(struct guehdr); + } + + il3_len = l3_length(test->cfg_l3_inner); + il4_len = sizeof(struct udphdr); + + packet_len = el3_len + ol3_len + ol4_len + il3_len + il4_len + + TEST_PACKET_LEN; + if (!ASSERT_LE(packet_len, sizeof(buf), "check packet size")) + return -1; + + /* + * Fill packet from inside out, to calculate correct checksums. + * But create ip before udp headers, as udp uses ip for pseudo-sum. + */ + memset(buf + el3_len + ol3_len + ol4_len + il3_len + il4_len, + TEST_PACKET_PATTERN, TEST_PACKET_LEN); + + /* add zero byte for udp csum padding */ + buf[el3_len + ol3_len + ol4_len + il3_len + il4_len + TEST_PACKET_LEN] = + 0; + + switch (test->cfg_l3_inner) { + case PF_INET: + build_ipv4_header(buf + el3_len + ol3_len + ol4_len, + IPPROTO_UDP, test->in_saddr4.sin_addr.s_addr, + test->in_daddr4.sin_addr.s_addr, + il4_len + TEST_PACKET_LEN, + test->cfg_dsfield_inner); + break; + case PF_INET6: + build_ipv6_header(buf + el3_len + ol3_len + ol4_len, + IPPROTO_UDP, &test->in_saddr6, + &test->in_daddr6, il4_len + TEST_PACKET_LEN, + test->cfg_dsfield_inner); + break; + } + + build_udp_header(buf + el3_len + ol3_len + ol4_len + il3_len, + TEST_PACKET_LEN, sport, CFG_PORT_INNER, + test->cfg_l3_inner); + + if (!test->cfg_encap_proto) + return il3_len + il4_len + TEST_PACKET_LEN; + + switch (test->cfg_l3_outer) { + case PF_INET: + build_ipv4_header(buf + el3_len, test->cfg_encap_proto, + test->out_saddr4.sin_addr.s_addr, + test->out_daddr4.sin_addr.s_addr, + ol4_len + il3_len + il4_len + TEST_PACKET_LEN, + test->cfg_dsfield_outer); + break; + case PF_INET6: + build_ipv6_header(buf + el3_len, test->cfg_encap_proto, + &test->out_saddr6, &test->out_daddr6, + ol4_len + il3_len + il4_len + TEST_PACKET_LEN, + test->cfg_dsfield_outer); + break; + } + + switch (test->cfg_encap_proto) { + case IPPROTO_UDP: + build_gue_header(buf + el3_len + ol3_len + ol4_len - + sizeof(struct guehdr), + test->cfg_l3_inner == PF_INET ? IPPROTO_IPIP : + IPPROTO_IPV6); + build_udp_header(buf + el3_len + ol3_len, + sizeof(struct guehdr) + il3_len + il4_len + + TEST_PACKET_LEN, + sport, CFG_PORT_GUE, test->cfg_l3_outer); + break; + case IPPROTO_GRE: + build_gre_header(buf + el3_len + ol3_len, + test->cfg_l3_inner == PF_INET ? ETH_P_IP : + ETH_P_IPV6); + break; + } + + switch (test->cfg_l3_extra) { + case PF_INET: + build_ipv4_header(buf, + test->cfg_l3_outer == PF_INET ? IPPROTO_IPIP : + IPPROTO_IPV6, + test->extra_saddr4.sin_addr.s_addr, + test->extra_daddr4.sin_addr.s_addr, + ol3_len + ol4_len + il3_len + il4_len + + TEST_PACKET_LEN, + 0); + break; + case PF_INET6: + build_ipv6_header(buf, + test->cfg_l3_outer == PF_INET ? IPPROTO_IPIP : + IPPROTO_IPV6, + &test->extra_saddr6, &test->extra_daddr6, + ol3_len + ol4_len + il3_len + il4_len + + TEST_PACKET_LEN, + 0); + break; + } + + return el3_len + ol3_len + ol4_len + il3_len + il4_len + + TEST_PACKET_LEN; +} + +/* sender transmits encapsulated over RAW or unencap'd over UDP */ +static int setup_tx(const struct test_configuration *test) +{ + int family, fd, ret; + + if (test->cfg_l3_extra) + family = test->cfg_l3_extra; + else if (test->cfg_l3_outer) + family = test->cfg_l3_outer; + else + family = test->cfg_l3_inner; + + fd = socket(family, SOCK_RAW, IPPROTO_RAW); + if (!ASSERT_OK_FD(fd, "setup tx socket")) + return fd; + + if (test->cfg_l3_extra) { + if (test->cfg_l3_extra == PF_INET) + ret = connect(fd, (void *)&test->extra_daddr4, + sizeof(test->extra_daddr4)); + else + ret = connect(fd, (void *)&test->extra_daddr6, + sizeof(test->extra_daddr6)); + if (!ASSERT_OK(ret, "connect")) { + close(fd); + return ret; + } + } else if (test->cfg_l3_outer) { + /* connect to destination if not encapsulated */ + if (test->cfg_l3_outer == PF_INET) + ret = connect(fd, (void *)&test->out_daddr4, + sizeof(test->out_daddr4)); + else + ret = connect(fd, (void *)&test->out_daddr6, + sizeof(test->out_daddr6)); + if (!ASSERT_OK(ret, "connect")) { + close(fd); + return ret; + } + } else { + /* otherwise using loopback */ + if (test->cfg_l3_inner == PF_INET) + ret = connect(fd, (void *)&test->in_daddr4, + sizeof(test->in_daddr4)); + else + ret = connect(fd, (void *)&test->in_daddr6, + sizeof(test->in_daddr6)); + if (!ASSERT_OK(ret, "connect")) { + close(fd); + return ret; + } + } + + return fd; +} + +/* receiver reads unencapsulated UDP */ +static int setup_rx(const struct test_configuration *test) +{ + int fd, ret; + + fd = socket(test->cfg_l3_inner, SOCK_DGRAM, 0); + if (!ASSERT_OK_FD(fd, "socket rx")) + return fd; + + if (test->cfg_l3_inner == PF_INET) + ret = bind(fd, (void *)&test->in_daddr4, + sizeof(test->in_daddr4)); + else + ret = bind(fd, (void *)&test->in_daddr6, + sizeof(test->in_daddr6)); + if (!ASSERT_OK(ret, "bind rx")) { + close(fd); + return ret; + } + + return fd; +} + +static int do_tx(int fd, const char *pkt, int len) +{ + int ret; + + ret = write(fd, pkt, len); + return ret != len; +} + +static int do_poll(int fd, short events, int timeout) +{ + struct pollfd pfd; + int ret; + + pfd.fd = fd; + pfd.events = events; + + ret = poll(&pfd, 1, timeout); + return ret; +} + +static int do_rx(int fd) +{ + char rbuf; + int ret, num = 0; + + while (1) { + ret = recv(fd, &rbuf, 1, MSG_DONTWAIT); + if (ret == -1 && errno == EAGAIN) + break; + if (ret < 0) + return -1; + if (!ASSERT_EQ(rbuf, TEST_PACKET_PATTERN, "check pkt pattern")) + return -1; + num++; + } + + return num; +} + +static int run_test(const struct test_configuration *test, + int source_port_index) +{ + int fdt = -1, fdr = -1, len, tx = 0, rx = 0, err; + unsigned long tstop, tcur; + + fdr = setup_rx(test); + fdt = setup_tx(test); + if (!ASSERT_OK_FD(fdr, "setup rx") || !ASSERT_OK_FD(fdt, "setup tx")) { + err = -1; + goto out_close_sockets; + } + + len = build_packet(test, + (uint16_t)test->source_ports[source_port_index]); + if (!ASSERT_GT(len, 0, "build test packet")) + return -1; + + tcur = util_gettime(); + tstop = tcur; + + while (tx < TEST_PACKETS_COUNT) { + if (!ASSERT_OK(do_tx(fdt, buf, len), "do_tx")) + break; + tx++; + err = do_rx(fdr); + if (!ASSERT_GE(err, 0, "do_rx")) + break; + rx += err; + } + + /* read straggler packets, if any */ + if (rx < tx) { + tstop = util_gettime() + 100; + while (rx < tx) { + tcur = util_gettime(); + if (tcur >= tstop) + break; + + err = do_poll(fdr, POLLIN, tstop - tcur); + if (err < 0) + break; + err = do_rx(fdr); + if (err >= 0) + rx += err; + } + } + +out_close_sockets: + close(fdt); + close(fdr); + return rx; +} + +static int attach_and_configure_program(struct bpf_flow *skel) +{ + struct bpf_map *prog_array = skel->maps.jmp_table; + int main_prog_fd, sub_prog_fd, map_fd, i, err; + struct bpf_program *prog; + char prog_name[32]; + + main_prog_fd = bpf_program__fd(skel->progs._dissect); + if (main_prog_fd < 0) + return main_prog_fd; + + err = bpf_prog_attach(main_prog_fd, 0, BPF_FLOW_DISSECTOR, 0); + if (err) + return err; + + map_fd = bpf_map__fd(prog_array); + if (map_fd < 0) + return map_fd; + + for (i = 0; i < bpf_map__max_entries(prog_array); i++) { + snprintf(prog_name, sizeof(prog_name), "flow_dissector_%d", i); + + prog = bpf_object__find_program_by_name(skel->obj, prog_name); + if (!prog) + return -1; + + sub_prog_fd = bpf_program__fd(prog); + if (sub_prog_fd < 0) + return -1; + + err = bpf_map_update_elem(map_fd, &i, &sub_prog_fd, BPF_ANY); + if (err) + return -1; + } + + return main_prog_fd; +} + +static void detach_program(struct bpf_flow *skel, int prog_fd) +{ + bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR); +} + +static int set_port_drop(int pf, bool multi_port) +{ + char dst_port[16]; + + snprintf(dst_port, sizeof(dst_port), "%d", CFG_PORT_INNER); + + SYS(fail, "tc qdisc add dev lo ingress"); + SYS(fail_delete_qdisc, "tc filter add %s %s %s %s %s %s %s %s %s %s %s %s", + "dev lo", + "parent FFFF:", + "protocol", pf == PF_INET6 ? "ipv6" : "ip", + "pref 1337", + "flower", + "ip_proto udp", + "src_port", multi_port ? "8-10" : "9", + "dst_port", dst_port, + "action drop"); + return 0; + +fail_delete_qdisc: + SYS_NOFAIL("tc qdisc del dev lo ingress"); +fail: + return 1; +} + +static void remove_filter(void) +{ + SYS_NOFAIL("tc filter del dev lo ingress"); + SYS_NOFAIL("tc qdisc del dev lo ingress"); +} + +static int ipv4_setup(void) +{ + return set_port_drop(PF_INET, false); +} + +static int ipv6_setup(void) +{ + return set_port_drop(PF_INET6, false); +} + +static int port_range_setup(void) +{ + return set_port_drop(PF_INET, true); +} + +static int set_addresses(void) +{ + SYS(out, "ip -4 addr add %s dev lo", TEST_IPV4); + SYS(out_remove_ipv4, "ip -6 addr add %s dev lo", TEST_IPV6); + return 0; +out_remove_ipv4: + SYS_NOFAIL("ip -4 addr del %s dev lo", TEST_IPV4); +out: + return -1; +} + +static void unset_addresses(void) +{ + SYS_NOFAIL("ip -4 addr del %s dev lo", TEST_IPV4); + SYS_NOFAIL("ip -6 addr del %s dev lo", TEST_IPV6); +} + +static int ipip_setup(void) +{ + if (!ASSERT_OK(set_addresses(), "configure addresses")) + return -1; + if (!ASSERT_OK(set_port_drop(PF_INET, false), "set filter")) + goto out_unset_addresses; + SYS(out_remove_filter, + "ip link add ipip_test type ipip remote %s local %s dev lo", + TEST_TUNNEL_REMOTE, TEST_TUNNEL_LOCAL); + SYS(out_clean_netif, "ip link set ipip_test up"); + return 0; + +out_clean_netif: + SYS_NOFAIL("ip link del ipip_test"); +out_remove_filter: + remove_filter(); +out_unset_addresses: + unset_addresses(); + return -1; +} + +static void ipip_shutdown(void) +{ + SYS_NOFAIL("ip link del ipip_test"); + remove_filter(); + unset_addresses(); +} + +static int gre_setup(void) +{ + if (!ASSERT_OK(set_addresses(), "configure addresses")) + return -1; + if (!ASSERT_OK(set_port_drop(PF_INET, false), "set filter")) + goto out_unset_addresses; + SYS(out_remove_filter, + "ip link add gre_test type gre remote %s local %s dev lo", + TEST_TUNNEL_REMOTE, TEST_TUNNEL_LOCAL); + SYS(out_clean_netif, "ip link set gre_test up"); + return 0; + +out_clean_netif: + SYS_NOFAIL("ip link del ipip_test"); +out_remove_filter: + remove_filter(); +out_unset_addresses: + unset_addresses(); + return -1; +} + +static void gre_shutdown(void) +{ + SYS_NOFAIL("ip link del gre_test"); + remove_filter(); + unset_addresses(); +} + +static const struct test_configuration tests_input[] = { + { + .name = "ipv4", + .test_setup = ipv4_setup, + .test_teardown = remove_filter, + .source_ports = { 8, 9, 10 }, + .cfg_l3_inner = PF_INET, + .in_saddr4 = TEST_IN4_SRC_ADDR_DEFAULT, + .in_daddr4 = TEST_IN4_DST_ADDR_DEFAULT + }, + { + .name = "ipv4_continue_dissect", + .test_setup = ipv4_setup, + .test_teardown = remove_filter, + .source_ports = { 8, 9, 10 }, + .cfg_l3_inner = PF_INET, + .in_saddr4 = TEST_IN4_SRC_ADDR_DISSECT_CONTINUE, + .in_daddr4 = TEST_IN4_DST_ADDR_DEFAULT }, + { + .name = "ipip", + .test_setup = ipip_setup, + .test_teardown = ipip_shutdown, + .source_ports = { 8, 9, 10 }, + .cfg_l3_inner = PF_INET, + .in_saddr4 = TEST_IN4_SRC_ADDR_IPIP, + .in_daddr4 = TEST_IN4_DST_ADDR_IPIP, + .out_saddr4 = TEST_OUT4_SRC_ADDR_DEFAULT, + .out_daddr4 = TEST_OUT4_DST_ADDR_DEFAULT, + .cfg_l3_outer = PF_INET, + .cfg_encap_proto = IPPROTO_IPIP, + + }, + { + .name = "gre", + .test_setup = gre_setup, + .test_teardown = gre_shutdown, + .source_ports = { 8, 9, 10 }, + .cfg_l3_inner = PF_INET, + .in_saddr4 = TEST_IN4_SRC_ADDR_IPIP, + .in_daddr4 = TEST_IN4_DST_ADDR_IPIP, + .out_saddr4 = TEST_OUT4_SRC_ADDR_DEFAULT, + .out_daddr4 = TEST_OUT4_DST_ADDR_DEFAULT, + .cfg_l3_outer = PF_INET, + .cfg_encap_proto = IPPROTO_GRE, + }, + { + .name = "port_range", + .test_setup = port_range_setup, + .test_teardown = remove_filter, + .source_ports = { 7, 9, 11 }, + .cfg_l3_inner = PF_INET, + .in_saddr4 = TEST_IN4_SRC_ADDR_DEFAULT, + .in_daddr4 = TEST_IN4_DST_ADDR_DEFAULT }, + { + .name = "ipv6", + .test_setup = ipv6_setup, + .test_teardown = remove_filter, + .source_ports = { 8, 9, 10 }, + .cfg_l3_inner = PF_INET6, + .in_saddr6 = TEST_IN6_SRC_ADDR_DEFAULT, + .in_daddr6 = TEST_IN6_DST_ADDR_DEFAULT + }, +}; + +struct test_ctx { + struct bpf_flow *skel; + struct netns_obj *ns; + int prog_fd; +}; + +static int test_global_init(struct test_ctx *ctx) +{ + int err; + + ctx->skel = bpf_flow__open_and_load(); + if (!ASSERT_OK_PTR(ctx->skel, "open and load flow_dissector")) + return -1; + + ctx->ns = netns_new("flow_dissector_classification", true); + if (!ASSERT_OK_PTR(ctx->ns, "switch ns")) + goto out_destroy_skel; + + err = write_sysctl("/proc/sys/net/ipv4/conf/default/rp_filter", "0"); + err |= write_sysctl("/proc/sys/net/ipv4/conf/all/rp_filter", "0"); + err |= write_sysctl("/proc/sys/net/ipv4/conf/lo/rp_filter", "0"); + if (!ASSERT_OK(err, "configure net tunables")) + goto out_clean_ns; + + ctx->prog_fd = attach_and_configure_program(ctx->skel); + if (!ASSERT_OK_FD(ctx->prog_fd, "attach and configure program")) + goto out_clean_ns; + return 0; +out_clean_ns: + netns_free(ctx->ns); +out_destroy_skel: + bpf_flow__destroy(ctx->skel); + return -1; +} + +static void test_global_shutdown(struct test_ctx *ctx) +{ + detach_program(ctx->skel, ctx->prog_fd); + netns_free(ctx->ns); + bpf_flow__destroy(ctx->skel); +} + +void test_flow_dissector_classification(void) +{ + struct test_ctx ctx; + const struct test_configuration *test; + int i; + + if (test_global_init(&ctx)) + return; + + for (i = 0; i < ARRAY_SIZE(tests_input); i++) { + if (!test__start_subtest(tests_input[i].name)) + continue; + test = &tests_input[i]; + /* All tests are expected to have one rx-ok port first, + * then a non-working rx port, and finally a rx-ok port + */ + if (test->test_setup && + !ASSERT_OK(test->test_setup(), "init filter")) + continue; + + ASSERT_EQ(run_test(test, 0), TEST_PACKETS_COUNT, + "test first port"); + ASSERT_EQ(run_test(test, 1), 0, "test second port"); + ASSERT_EQ(run_test(test, 2), TEST_PACKETS_COUNT, + "test third port"); + if (test->test_teardown) + test->test_teardown(); + } + test_global_shutdown(&ctx); +} diff --git a/tools/testing/selftests/bpf/prog_tests/for_each.c b/tools/testing/selftests/bpf/prog_tests/for_each.c index 09f6487f58b9..5fea3209566e 100644 --- a/tools/testing/selftests/bpf/prog_tests/for_each.c +++ b/tools/testing/selftests/bpf/prog_tests/for_each.c @@ -6,6 +6,7 @@ #include "for_each_array_map_elem.skel.h" #include "for_each_map_elem_write_key.skel.h" #include "for_each_multi_maps.skel.h" +#include "for_each_hash_modify.skel.h" static unsigned int duration; @@ -203,6 +204,40 @@ out: for_each_multi_maps__destroy(skel); } +static void test_hash_modify(void) +{ + struct for_each_hash_modify *skel; + int max_entries, i, err; + __u64 key, val; + + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1 + ); + + skel = for_each_hash_modify__open_and_load(); + if (!ASSERT_OK_PTR(skel, "for_each_hash_modify__open_and_load")) + return; + + max_entries = bpf_map__max_entries(skel->maps.hashmap); + for (i = 0; i < max_entries; i++) { + key = i; + val = i; + err = bpf_map__update_elem(skel->maps.hashmap, &key, sizeof(key), + &val, sizeof(val), BPF_ANY); + if (!ASSERT_OK(err, "map_update")) + goto out; + } + + err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_pkt_access), &topts); + ASSERT_OK(err, "bpf_prog_test_run_opts"); + ASSERT_OK(topts.retval, "retval"); + +out: + for_each_hash_modify__destroy(skel); +} + void test_for_each(void) { if (test__start_subtest("hash_map")) @@ -213,4 +248,6 @@ void test_for_each(void) test_write_map_key(); if (test__start_subtest("multi_maps")) test_multi_maps(); + if (test__start_subtest("hash_modify")) + test_hash_modify(); } diff --git a/tools/testing/selftests/bpf/prog_tests/free_timer.c b/tools/testing/selftests/bpf/prog_tests/free_timer.c new file mode 100644 index 000000000000..b7b77a6b2979 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/free_timer.c @@ -0,0 +1,165 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2025. Huawei Technologies Co., Ltd */ +#define _GNU_SOURCE +#include <unistd.h> +#include <sys/syscall.h> +#include <test_progs.h> + +#include "free_timer.skel.h" + +struct run_ctx { + struct bpf_program *start_prog; + struct bpf_program *overwrite_prog; + pthread_barrier_t notify; + int loop; + bool start; + bool stop; +}; + +static void start_threads(struct run_ctx *ctx) +{ + ctx->start = true; +} + +static void stop_threads(struct run_ctx *ctx) +{ + ctx->stop = true; + /* Guarantee the order between ->stop and ->start */ + __atomic_store_n(&ctx->start, true, __ATOMIC_RELEASE); +} + +static int wait_for_start(struct run_ctx *ctx) +{ + while (!__atomic_load_n(&ctx->start, __ATOMIC_ACQUIRE)) + usleep(10); + + return ctx->stop; +} + +static void *overwrite_timer_fn(void *arg) +{ + struct run_ctx *ctx = arg; + int loop, fd, err; + cpu_set_t cpuset; + long ret = 0; + + /* Pin on CPU 0 */ + CPU_ZERO(&cpuset); + CPU_SET(0, &cpuset); + pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset); + + /* Is the thread being stopped ? */ + err = wait_for_start(ctx); + if (err) + return NULL; + + fd = bpf_program__fd(ctx->overwrite_prog); + loop = ctx->loop; + while (loop-- > 0) { + LIBBPF_OPTS(bpf_test_run_opts, opts); + + /* Wait for start thread to complete */ + pthread_barrier_wait(&ctx->notify); + + /* Overwrite timers */ + err = bpf_prog_test_run_opts(fd, &opts); + if (err) + ret |= 1; + else if (opts.retval) + ret |= 2; + + /* Notify start thread to start timers */ + pthread_barrier_wait(&ctx->notify); + } + + return (void *)ret; +} + +static void *start_timer_fn(void *arg) +{ + struct run_ctx *ctx = arg; + int loop, fd, err; + cpu_set_t cpuset; + long ret = 0; + + /* Pin on CPU 1 */ + CPU_ZERO(&cpuset); + CPU_SET(1, &cpuset); + pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset); + + /* Is the thread being stopped ? */ + err = wait_for_start(ctx); + if (err) + return NULL; + + fd = bpf_program__fd(ctx->start_prog); + loop = ctx->loop; + while (loop-- > 0) { + LIBBPF_OPTS(bpf_test_run_opts, opts); + + /* Run the prog to start timer */ + err = bpf_prog_test_run_opts(fd, &opts); + if (err) + ret |= 4; + else if (opts.retval) + ret |= 8; + + /* Notify overwrite thread to do overwrite */ + pthread_barrier_wait(&ctx->notify); + + /* Wait for overwrite thread to complete */ + pthread_barrier_wait(&ctx->notify); + } + + return (void *)ret; +} + +void test_free_timer(void) +{ + struct free_timer *skel; + struct bpf_program *prog; + struct run_ctx ctx; + pthread_t tid[2]; + void *ret; + int err; + + skel = free_timer__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open_load")) + return; + + memset(&ctx, 0, sizeof(ctx)); + + prog = bpf_object__find_program_by_name(skel->obj, "start_timer"); + if (!ASSERT_OK_PTR(prog, "find start prog")) + goto out; + ctx.start_prog = prog; + + prog = bpf_object__find_program_by_name(skel->obj, "overwrite_timer"); + if (!ASSERT_OK_PTR(prog, "find overwrite prog")) + goto out; + ctx.overwrite_prog = prog; + + pthread_barrier_init(&ctx.notify, NULL, 2); + ctx.loop = 10; + + err = pthread_create(&tid[0], NULL, start_timer_fn, &ctx); + if (!ASSERT_OK(err, "create start_timer")) + goto out; + + err = pthread_create(&tid[1], NULL, overwrite_timer_fn, &ctx); + if (!ASSERT_OK(err, "create overwrite_timer")) { + stop_threads(&ctx); + goto out; + } + + start_threads(&ctx); + + ret = NULL; + err = pthread_join(tid[0], &ret); + ASSERT_EQ(err | (long)ret, 0, "start_timer"); + ret = NULL; + err = pthread_join(tid[1], &ret); + ASSERT_EQ(err | (long)ret, 0, "overwrite_timer"); +out: + free_timer__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/fs_kfuncs.c b/tools/testing/selftests/bpf/prog_tests/fs_kfuncs.c index 5a0b51157451..43a26ec69a8e 100644 --- a/tools/testing/selftests/bpf/prog_tests/fs_kfuncs.c +++ b/tools/testing/selftests/bpf/prog_tests/fs_kfuncs.c @@ -8,11 +8,12 @@ #include <unistd.h> #include <test_progs.h> #include "test_get_xattr.skel.h" +#include "test_set_remove_xattr.skel.h" #include "test_fsverity.skel.h" static const char testfile[] = "/tmp/test_progs_fs_kfuncs"; -static void test_xattr(void) +static void test_get_xattr(const char *name, const char *value, bool allow_access) { struct test_get_xattr *skel = NULL; int fd = -1, err; @@ -25,7 +26,7 @@ static void test_xattr(void) close(fd); fd = -1; - err = setxattr(testfile, "user.kfuncs", "hello", sizeof("hello"), 0); + err = setxattr(testfile, name, value, strlen(value) + 1, 0); if (err && errno == EOPNOTSUPP) { printf("%s:SKIP:local fs doesn't support xattr (%d)\n" "To run this test, make sure /tmp filesystem supports xattr.\n", @@ -48,16 +49,23 @@ static void test_xattr(void) goto out; fd = open(testfile, O_RDONLY, 0644); + if (!ASSERT_GE(fd, 0, "open_file")) goto out; - ASSERT_EQ(skel->bss->found_xattr_from_file, 1, "found_xattr_from_file"); - /* Trigger security_inode_getxattr */ - err = getxattr(testfile, "user.kfuncs", v, sizeof(v)); - ASSERT_EQ(err, -1, "getxattr_return"); - ASSERT_EQ(errno, EINVAL, "getxattr_errno"); - ASSERT_EQ(skel->bss->found_xattr_from_dentry, 1, "found_xattr_from_dentry"); + err = getxattr(testfile, name, v, sizeof(v)); + + if (allow_access) { + ASSERT_EQ(err, -1, "getxattr_return"); + ASSERT_EQ(errno, EINVAL, "getxattr_errno"); + ASSERT_EQ(skel->bss->found_xattr_from_file, 1, "found_xattr_from_file"); + ASSERT_EQ(skel->bss->found_xattr_from_dentry, 1, "found_xattr_from_dentry"); + } else { + ASSERT_EQ(err, strlen(value) + 1, "getxattr_return"); + ASSERT_EQ(skel->bss->found_xattr_from_file, 0, "found_xattr_from_file"); + ASSERT_EQ(skel->bss->found_xattr_from_dentry, 0, "found_xattr_from_dentry"); + } out: close(fd); @@ -65,6 +73,127 @@ out: remove(testfile); } +/* xattr value we will set to security.bpf.foo */ +static const char value_foo[] = "hello"; + +static void read_and_validate_foo(struct test_set_remove_xattr *skel) +{ + char value_out[32]; + int err; + + err = getxattr(testfile, skel->rodata->xattr_foo, value_out, sizeof(value_out)); + ASSERT_EQ(err, sizeof(value_foo), "getxattr size foo"); + ASSERT_EQ(strncmp(value_out, value_foo, sizeof(value_foo)), 0, "strncmp value_foo"); +} + +static void set_foo(struct test_set_remove_xattr *skel) +{ + ASSERT_OK(setxattr(testfile, skel->rodata->xattr_foo, value_foo, strlen(value_foo) + 1, 0), + "setxattr foo"); +} + +static void validate_bar_match(struct test_set_remove_xattr *skel) +{ + char value_out[32]; + int err; + + err = getxattr(testfile, skel->rodata->xattr_bar, value_out, sizeof(value_out)); + ASSERT_EQ(err, sizeof(skel->data->value_bar), "getxattr size bar"); + ASSERT_EQ(strncmp(value_out, skel->data->value_bar, sizeof(skel->data->value_bar)), 0, + "strncmp value_bar"); +} + +static void validate_bar_removed(struct test_set_remove_xattr *skel) +{ + char value_out[32]; + int err; + + err = getxattr(testfile, skel->rodata->xattr_bar, value_out, sizeof(value_out)); + ASSERT_LT(err, 0, "getxattr size bar should fail"); +} + +static void test_set_remove_xattr(void) +{ + struct test_set_remove_xattr *skel = NULL; + int fd = -1, err; + + fd = open(testfile, O_CREAT | O_RDONLY, 0644); + if (!ASSERT_GE(fd, 0, "create_file")) + return; + + close(fd); + fd = -1; + + skel = test_set_remove_xattr__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_set_remove_xattr__open_and_load")) + return; + + /* Set security.bpf.foo to "hello" */ + err = setxattr(testfile, skel->rodata->xattr_foo, value_foo, strlen(value_foo) + 1, 0); + if (err && errno == EOPNOTSUPP) { + printf("%s:SKIP:local fs doesn't support xattr (%d)\n" + "To run this test, make sure /tmp filesystem supports xattr.\n", + __func__, errno); + test__skip(); + goto out; + } + + if (!ASSERT_OK(err, "setxattr")) + goto out; + + skel->bss->monitored_pid = getpid(); + err = test_set_remove_xattr__attach(skel); + if (!ASSERT_OK(err, "test_set_remove_xattr__attach")) + goto out; + + /* First, test not _locked version of the kfuncs with getxattr. */ + + /* Read security.bpf.foo and trigger test_inode_getxattr. This + * bpf program will set security.bpf.bar to "world". + */ + read_and_validate_foo(skel); + validate_bar_match(skel); + + /* Read security.bpf.foo and trigger test_inode_getxattr again. + * This will remove xattr security.bpf.bar. + */ + read_and_validate_foo(skel); + validate_bar_removed(skel); + + ASSERT_TRUE(skel->bss->set_security_bpf_bar_success, "set_security_bpf_bar_success"); + ASSERT_TRUE(skel->bss->remove_security_bpf_bar_success, "remove_security_bpf_bar_success"); + ASSERT_TRUE(skel->bss->set_security_selinux_fail, "set_security_selinux_fail"); + ASSERT_TRUE(skel->bss->remove_security_selinux_fail, "remove_security_selinux_fail"); + + /* Second, test _locked version of the kfuncs, with setxattr */ + + /* Set security.bpf.foo and trigger test_inode_setxattr. This + * bpf program will set security.bpf.bar to "world". + */ + set_foo(skel); + validate_bar_match(skel); + + /* Set security.bpf.foo and trigger test_inode_setxattr again. + * This will remove xattr security.bpf.bar. + */ + set_foo(skel); + validate_bar_removed(skel); + + ASSERT_TRUE(skel->bss->locked_set_security_bpf_bar_success, + "locked_set_security_bpf_bar_success"); + ASSERT_TRUE(skel->bss->locked_remove_security_bpf_bar_success, + "locked_remove_security_bpf_bar_success"); + ASSERT_TRUE(skel->bss->locked_set_security_selinux_fail, + "locked_set_security_selinux_fail"); + ASSERT_TRUE(skel->bss->locked_remove_security_selinux_fail, + "locked_remove_security_selinux_fail"); + +out: + close(fd); + test_set_remove_xattr__destroy(skel); + remove(testfile); +} + #ifndef SHA256_DIGEST_SIZE #define SHA256_DIGEST_SIZE 32 #endif @@ -141,8 +270,21 @@ out: void test_fs_kfuncs(void) { - if (test__start_subtest("xattr")) - test_xattr(); + /* Matches xattr_names in progs/test_get_xattr.c */ + if (test__start_subtest("user_xattr")) + test_get_xattr("user.kfuncs", "hello", true); + + if (test__start_subtest("security_bpf_xattr")) + test_get_xattr("security.bpf.xxx", "hello", true); + + if (test__start_subtest("security_bpf_xattr_error")) + test_get_xattr("security.bpf", "hello", false); + + if (test__start_subtest("security_selinux_xattr_error")) + test_get_xattr("security.selinux", "hello", false); + + if (test__start_subtest("set_remove_xattr")) + test_set_remove_xattr(); if (test__start_subtest("fsverity")) test_fsverity(); diff --git a/tools/testing/selftests/bpf/prog_tests/kernel_flag.c b/tools/testing/selftests/bpf/prog_tests/kernel_flag.c new file mode 100644 index 000000000000..a133354ac9bc --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/kernel_flag.c @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Microsoft */ +#include <test_progs.h> +#include "kfunc_call_test.skel.h" +#include "kfunc_call_test.lskel.h" +#include "test_kernel_flag.skel.h" + +void test_kernel_flag(void) +{ + struct test_kernel_flag *lsm_skel; + struct kfunc_call_test *skel = NULL; + struct kfunc_call_test_lskel *lskel = NULL; + int ret; + + lsm_skel = test_kernel_flag__open_and_load(); + if (!ASSERT_OK_PTR(lsm_skel, "lsm_skel")) + return; + + lsm_skel->bss->monitored_tid = gettid(); + + ret = test_kernel_flag__attach(lsm_skel); + if (!ASSERT_OK(ret, "test_kernel_flag__attach")) + goto close_prog; + + /* Test with skel. This should pass the gatekeeper */ + skel = kfunc_call_test__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel")) + goto close_prog; + + /* Test with lskel. This should fail due to blocking kernel-based bpf() invocations */ + lskel = kfunc_call_test_lskel__open_and_load(); + if (!ASSERT_ERR_PTR(lskel, "lskel")) + goto close_prog; + +close_prog: + if (skel) + kfunc_call_test__destroy(skel); + if (lskel) + kfunc_call_test_lskel__destroy(lskel); + + lsm_skel->bss->monitored_tid = 0; + test_kernel_flag__destroy(lsm_skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c b/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c index 8e13a3416a21..1de14b111931 100644 --- a/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c +++ b/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c @@ -104,7 +104,7 @@ void test_kmem_cache_iter(void) goto destroy; memset(buf, 0, sizeof(buf)); - while (read(iter_fd, buf, sizeof(buf) > 0)) { + while (read(iter_fd, buf, sizeof(buf)) > 0) { /* Read out all contents */ printf("%s", buf); } diff --git a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c index 66ab1cae923e..e19ef509ebf8 100644 --- a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c +++ b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c @@ -397,6 +397,31 @@ cleanup: kprobe_multi_session_cookie__destroy(skel); } +static void test_unique_match(void) +{ + LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); + struct kprobe_multi *skel = NULL; + struct bpf_link *link = NULL; + + skel = kprobe_multi__open_and_load(); + if (!ASSERT_OK_PTR(skel, "kprobe_multi__open_and_load")) + return; + + opts.unique_match = true; + skel->bss->pid = getpid(); + link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual, + "bpf_fentry_test*", &opts); + if (!ASSERT_ERR_PTR(link, "bpf_program__attach_kprobe_multi_opts")) + bpf_link__destroy(link); + + link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual, + "bpf_fentry_test8*", &opts); + if (ASSERT_OK_PTR(link, "bpf_program__attach_kprobe_multi_opts")) + bpf_link__destroy(link); + + kprobe_multi__destroy(skel); +} + static size_t symbol_hash(long key, void *ctx __maybe_unused) { return str_hash((const char *) key); @@ -765,5 +790,7 @@ void test_kprobe_multi_test(void) test_session_skel_api(); if (test__start_subtest("session_cookie")) test_session_cookie_skel_api(); + if (test__start_subtest("unique_match")) + test_unique_match(); RUN_TESTS(kprobe_multi_verifier); } diff --git a/tools/testing/selftests/bpf/prog_tests/linked_list.c b/tools/testing/selftests/bpf/prog_tests/linked_list.c index 77d07e0a4a55..14c5a7ef0e87 100644 --- a/tools/testing/selftests/bpf/prog_tests/linked_list.c +++ b/tools/testing/selftests/bpf/prog_tests/linked_list.c @@ -7,6 +7,7 @@ #include "linked_list.skel.h" #include "linked_list_fail.skel.h" +#include "linked_list_peek.skel.h" static char log_buf[1024 * 1024]; @@ -71,7 +72,7 @@ static struct { { "new_null_ret", "R0 invalid mem access 'ptr_or_null_'" }, { "obj_new_acq", "Unreleased reference id=" }, { "use_after_drop", "invalid mem access 'scalar'" }, - { "ptr_walk_scalar", "type=scalar expected=percpu_ptr_" }, + { "ptr_walk_scalar", "type=rdonly_untrusted_mem expected=percpu_ptr_" }, { "direct_read_lock", "direct access to bpf_spin_lock is disallowed" }, { "direct_write_lock", "direct access to bpf_spin_lock is disallowed" }, { "direct_read_head", "direct access to bpf_list_head is disallowed" }, @@ -805,3 +806,8 @@ void test_linked_list(void) test_linked_list_success(LIST_IN_LIST, true); test_linked_list_success(TEST_ALL, false); } + +void test_linked_list_peek(void) +{ + RUN_TESTS(linked_list_peek); +} diff --git a/tools/testing/selftests/bpf/prog_tests/log_buf.c b/tools/testing/selftests/bpf/prog_tests/log_buf.c index 169ce689b97c..d6f14a232002 100644 --- a/tools/testing/selftests/bpf/prog_tests/log_buf.c +++ b/tools/testing/selftests/bpf/prog_tests/log_buf.c @@ -7,6 +7,10 @@ #include "test_log_buf.skel.h" #include "bpf_util.h" +#if !defined(__clang__) +#pragma GCC diagnostic ignored "-Wmaybe-uninitialized" +#endif + static size_t libbpf_log_pos; static char libbpf_log_buf[1024 * 1024]; static bool libbpf_log_error; diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h b/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h index fb1eb8c67361..ccec0fcdabc1 100644 --- a/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h +++ b/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h @@ -5,7 +5,6 @@ #include <time.h> #include <net/if.h> -#include <linux/if_tun.h> #include <linux/icmp.h> #include "test_progs.h" @@ -37,34 +36,6 @@ static inline int netns_delete(void) return system("ip netns del " NETNS ">/dev/null 2>&1"); } -static int open_tuntap(const char *dev_name, bool need_mac) -{ - int err = 0; - struct ifreq ifr; - int fd = open("/dev/net/tun", O_RDWR); - - if (!ASSERT_GT(fd, 0, "open(/dev/net/tun)")) - return -1; - - ifr.ifr_flags = IFF_NO_PI | (need_mac ? IFF_TAP : IFF_TUN); - strncpy(ifr.ifr_name, dev_name, IFNAMSIZ - 1); - ifr.ifr_name[IFNAMSIZ - 1] = '\0'; - - err = ioctl(fd, TUNSETIFF, &ifr); - if (!ASSERT_OK(err, "ioctl(TUNSETIFF)")) { - close(fd); - return -1; - } - - err = fcntl(fd, F_SETFL, O_NONBLOCK); - if (!ASSERT_OK(err, "fcntl(O_NONBLOCK)")) { - close(fd); - return -1; - } - - return fd; -} - #define ICMP_PAYLOAD_SIZE 100 /* Match an ICMP packet with payload len ICMP_PAYLOAD_SIZE */ diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_ip_encap.c b/tools/testing/selftests/bpf/prog_tests/lwt_ip_encap.c new file mode 100644 index 000000000000..b6391af5f6f9 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/lwt_ip_encap.c @@ -0,0 +1,540 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include <netinet/in.h> + +#include "network_helpers.h" +#include "test_progs.h" + +#define BPF_FILE "test_lwt_ip_encap.bpf.o" + +#define NETNS_NAME_SIZE 32 +#define NETNS_BASE "ns-lwt-ip-encap" + +#define IP4_ADDR_1 "172.16.1.100" +#define IP4_ADDR_2 "172.16.2.100" +#define IP4_ADDR_3 "172.16.3.100" +#define IP4_ADDR_4 "172.16.4.100" +#define IP4_ADDR_5 "172.16.5.100" +#define IP4_ADDR_6 "172.16.6.100" +#define IP4_ADDR_7 "172.16.7.100" +#define IP4_ADDR_8 "172.16.8.100" +#define IP4_ADDR_GRE "172.16.16.100" + +#define IP4_ADDR_SRC IP4_ADDR_1 +#define IP4_ADDR_DST IP4_ADDR_4 + +#define IP6_ADDR_1 "fb01::1" +#define IP6_ADDR_2 "fb02::1" +#define IP6_ADDR_3 "fb03::1" +#define IP6_ADDR_4 "fb04::1" +#define IP6_ADDR_5 "fb05::1" +#define IP6_ADDR_6 "fb06::1" +#define IP6_ADDR_7 "fb07::1" +#define IP6_ADDR_8 "fb08::1" +#define IP6_ADDR_GRE "fb10::1" + +#define IP6_ADDR_SRC IP6_ADDR_1 +#define IP6_ADDR_DST IP6_ADDR_4 + +/* Setup/topology: + * + * NS1 NS2 NS3 + * veth1 <---> veth2 veth3 <---> veth4 (the top route) + * veth5 <---> veth6 veth7 <---> veth8 (the bottom route) + * + * Each vethN gets IP[4|6]_ADDR_N address. + * + * IP*_ADDR_SRC = IP*_ADDR_1 + * IP*_ADDR_DST = IP*_ADDR_4 + * + * All tests test pings from IP*_ADDR__SRC to IP*_ADDR_DST. + * + * By default, routes are configured to allow packets to go + * IP*_ADDR_1 <=> IP*_ADDR_2 <=> IP*_ADDR_3 <=> IP*_ADDR_4 (the top route). + * + * A GRE device is installed in NS3 with IP*_ADDR_GRE, and + * NS1/NS2 are configured to route packets to IP*_ADDR_GRE via IP*_ADDR_8 + * (the bottom route). + * + * Tests: + * + * 1. Routes NS2->IP*_ADDR_DST are brought down, so the only way a ping + * from IP*_ADDR_SRC to IP*_ADDR_DST can work is via IP*_ADDR_GRE. + * + * 2a. In an egress test, a bpf LWT_XMIT program is installed on veth1 + * that encaps the packets with an IP/GRE header to route to IP*_ADDR_GRE. + * + * ping: SRC->[encap at veth1:egress]->GRE:decap->DST + * ping replies go DST->SRC directly + * + * 2b. In an ingress test, a bpf LWT_IN program is installed on veth2 + * that encaps the packets with an IP/GRE header to route to IP*_ADDR_GRE. + * + * ping: SRC->[encap at veth2:ingress]->GRE:decap->DST + * ping replies go DST->SRC directly + */ + +static int create_ns(char *name, size_t name_sz) +{ + if (!name) + goto fail; + + if (!ASSERT_OK(append_tid(name, name_sz), "append TID")) + goto fail; + + SYS(fail, "ip netns add %s", name); + + /* rp_filter gets confused by what these tests are doing, so disable it */ + SYS(fail, "ip netns exec %s sysctl -wq net.ipv4.conf.all.rp_filter=0", name); + SYS(fail, "ip netns exec %s sysctl -wq net.ipv4.conf.default.rp_filter=0", name); + /* Disable IPv6 DAD because it sometimes takes too long and fails tests */ + SYS(fail, "ip netns exec %s sysctl -wq net.ipv6.conf.all.accept_dad=0", name); + SYS(fail, "ip netns exec %s sysctl -wq net.ipv6.conf.default.accept_dad=0", name); + + return 0; +fail: + return -1; +} + +static int set_top_addr(const char *ns1, const char *ns2, const char *ns3) +{ + SYS(fail, "ip -n %s a add %s/24 dev veth1", ns1, IP4_ADDR_1); + SYS(fail, "ip -n %s a add %s/24 dev veth2", ns2, IP4_ADDR_2); + SYS(fail, "ip -n %s a add %s/24 dev veth3", ns2, IP4_ADDR_3); + SYS(fail, "ip -n %s a add %s/24 dev veth4", ns3, IP4_ADDR_4); + SYS(fail, "ip -n %s -6 a add %s/128 dev veth1", ns1, IP6_ADDR_1); + SYS(fail, "ip -n %s -6 a add %s/128 dev veth2", ns2, IP6_ADDR_2); + SYS(fail, "ip -n %s -6 a add %s/128 dev veth3", ns2, IP6_ADDR_3); + SYS(fail, "ip -n %s -6 a add %s/128 dev veth4", ns3, IP6_ADDR_4); + + SYS(fail, "ip -n %s link set dev veth1 up", ns1); + SYS(fail, "ip -n %s link set dev veth2 up", ns2); + SYS(fail, "ip -n %s link set dev veth3 up", ns2); + SYS(fail, "ip -n %s link set dev veth4 up", ns3); + + return 0; +fail: + return 1; +} + +static int set_bottom_addr(const char *ns1, const char *ns2, const char *ns3) +{ + SYS(fail, "ip -n %s a add %s/24 dev veth5", ns1, IP4_ADDR_5); + SYS(fail, "ip -n %s a add %s/24 dev veth6", ns2, IP4_ADDR_6); + SYS(fail, "ip -n %s a add %s/24 dev veth7", ns2, IP4_ADDR_7); + SYS(fail, "ip -n %s a add %s/24 dev veth8", ns3, IP4_ADDR_8); + SYS(fail, "ip -n %s -6 a add %s/128 dev veth5", ns1, IP6_ADDR_5); + SYS(fail, "ip -n %s -6 a add %s/128 dev veth6", ns2, IP6_ADDR_6); + SYS(fail, "ip -n %s -6 a add %s/128 dev veth7", ns2, IP6_ADDR_7); + SYS(fail, "ip -n %s -6 a add %s/128 dev veth8", ns3, IP6_ADDR_8); + + SYS(fail, "ip -n %s link set dev veth5 up", ns1); + SYS(fail, "ip -n %s link set dev veth6 up", ns2); + SYS(fail, "ip -n %s link set dev veth7 up", ns2); + SYS(fail, "ip -n %s link set dev veth8 up", ns3); + + return 0; +fail: + return 1; +} + +static int configure_vrf(const char *ns1, const char *ns2) +{ + if (!ns1 || !ns2) + goto fail; + + SYS(fail, "ip -n %s link add red type vrf table 1001", ns1); + SYS(fail, "ip -n %s link set red up", ns1); + SYS(fail, "ip -n %s route add table 1001 unreachable default metric 8192", ns1); + SYS(fail, "ip -n %s -6 route add table 1001 unreachable default metric 8192", ns1); + SYS(fail, "ip -n %s link set veth1 vrf red", ns1); + SYS(fail, "ip -n %s link set veth5 vrf red", ns1); + + SYS(fail, "ip -n %s link add red type vrf table 1001", ns2); + SYS(fail, "ip -n %s link set red up", ns2); + SYS(fail, "ip -n %s route add table 1001 unreachable default metric 8192", ns2); + SYS(fail, "ip -n %s -6 route add table 1001 unreachable default metric 8192", ns2); + SYS(fail, "ip -n %s link set veth2 vrf red", ns2); + SYS(fail, "ip -n %s link set veth3 vrf red", ns2); + SYS(fail, "ip -n %s link set veth6 vrf red", ns2); + SYS(fail, "ip -n %s link set veth7 vrf red", ns2); + + return 0; +fail: + return -1; +} + +static int configure_ns1(const char *ns1, const char *vrf) +{ + struct nstoken *nstoken = NULL; + + if (!ns1 || !vrf) + goto fail; + + nstoken = open_netns(ns1); + if (!ASSERT_OK_PTR(nstoken, "open ns1")) + goto fail; + + /* Top route */ + SYS(fail, "ip route add %s/32 dev veth1 %s", IP4_ADDR_2, vrf); + SYS(fail, "ip route add default dev veth1 via %s %s", IP4_ADDR_2, vrf); + SYS(fail, "ip -6 route add %s/128 dev veth1 %s", IP6_ADDR_2, vrf); + SYS(fail, "ip -6 route add default dev veth1 via %s %s", IP6_ADDR_2, vrf); + /* Bottom route */ + SYS(fail, "ip route add %s/32 dev veth5 %s", IP4_ADDR_6, vrf); + SYS(fail, "ip route add %s/32 dev veth5 via %s %s", IP4_ADDR_7, IP4_ADDR_6, vrf); + SYS(fail, "ip route add %s/32 dev veth5 via %s %s", IP4_ADDR_8, IP4_ADDR_6, vrf); + SYS(fail, "ip -6 route add %s/128 dev veth5 %s", IP6_ADDR_6, vrf); + SYS(fail, "ip -6 route add %s/128 dev veth5 via %s %s", IP6_ADDR_7, IP6_ADDR_6, vrf); + SYS(fail, "ip -6 route add %s/128 dev veth5 via %s %s", IP6_ADDR_8, IP6_ADDR_6, vrf); + + close_netns(nstoken); + return 0; +fail: + close_netns(nstoken); + return -1; +} + +static int configure_ns2(const char *ns2, const char *vrf) +{ + struct nstoken *nstoken = NULL; + + if (!ns2 || !vrf) + goto fail; + + nstoken = open_netns(ns2); + if (!ASSERT_OK_PTR(nstoken, "open ns2")) + goto fail; + + SYS(fail, "ip netns exec %s sysctl -wq net.ipv4.ip_forward=1", ns2); + SYS(fail, "ip netns exec %s sysctl -wq net.ipv6.conf.all.forwarding=1", ns2); + + /* Top route */ + SYS(fail, "ip route add %s/32 dev veth2 %s", IP4_ADDR_1, vrf); + SYS(fail, "ip route add %s/32 dev veth3 %s", IP4_ADDR_4, vrf); + SYS(fail, "ip -6 route add %s/128 dev veth2 %s", IP6_ADDR_1, vrf); + SYS(fail, "ip -6 route add %s/128 dev veth3 %s", IP6_ADDR_4, vrf); + /* Bottom route */ + SYS(fail, "ip route add %s/32 dev veth6 %s", IP4_ADDR_5, vrf); + SYS(fail, "ip route add %s/32 dev veth7 %s", IP4_ADDR_8, vrf); + SYS(fail, "ip -6 route add %s/128 dev veth6 %s", IP6_ADDR_5, vrf); + SYS(fail, "ip -6 route add %s/128 dev veth7 %s", IP6_ADDR_8, vrf); + + close_netns(nstoken); + return 0; +fail: + close_netns(nstoken); + return -1; +} + +static int configure_ns3(const char *ns3) +{ + struct nstoken *nstoken = NULL; + + if (!ns3) + goto fail; + + nstoken = open_netns(ns3); + if (!ASSERT_OK_PTR(nstoken, "open ns3")) + goto fail; + + /* Top route */ + SYS(fail, "ip route add %s/32 dev veth4", IP4_ADDR_3); + SYS(fail, "ip route add %s/32 dev veth4 via %s", IP4_ADDR_1, IP4_ADDR_3); + SYS(fail, "ip route add %s/32 dev veth4 via %s", IP4_ADDR_2, IP4_ADDR_3); + SYS(fail, "ip -6 route add %s/128 dev veth4", IP6_ADDR_3); + SYS(fail, "ip -6 route add %s/128 dev veth4 via %s", IP6_ADDR_1, IP6_ADDR_3); + SYS(fail, "ip -6 route add %s/128 dev veth4 via %s", IP6_ADDR_2, IP6_ADDR_3); + /* Bottom route */ + SYS(fail, "ip route add %s/32 dev veth8", IP4_ADDR_7); + SYS(fail, "ip route add %s/32 dev veth8 via %s", IP4_ADDR_5, IP4_ADDR_7); + SYS(fail, "ip route add %s/32 dev veth8 via %s", IP4_ADDR_6, IP4_ADDR_7); + SYS(fail, "ip -6 route add %s/128 dev veth8", IP6_ADDR_7); + SYS(fail, "ip -6 route add %s/128 dev veth8 via %s", IP6_ADDR_5, IP6_ADDR_7); + SYS(fail, "ip -6 route add %s/128 dev veth8 via %s", IP6_ADDR_6, IP6_ADDR_7); + + /* Configure IPv4 GRE device */ + SYS(fail, "ip tunnel add gre_dev mode gre remote %s local %s ttl 255", + IP4_ADDR_1, IP4_ADDR_GRE); + SYS(fail, "ip link set gre_dev up"); + SYS(fail, "ip a add %s dev gre_dev", IP4_ADDR_GRE); + + /* Configure IPv6 GRE device */ + SYS(fail, "ip tunnel add gre6_dev mode ip6gre remote %s local %s ttl 255", + IP6_ADDR_1, IP6_ADDR_GRE); + SYS(fail, "ip link set gre6_dev up"); + SYS(fail, "ip a add %s dev gre6_dev", IP6_ADDR_GRE); + + close_netns(nstoken); + return 0; +fail: + close_netns(nstoken); + return -1; +} + +static int setup_network(char *ns1, char *ns2, char *ns3, const char *vrf) +{ + if (!ns1 || !ns2 || !ns3 || !vrf) + goto fail; + + SYS(fail, "ip -n %s link add veth1 type veth peer name veth2 netns %s", ns1, ns2); + SYS(fail, "ip -n %s link add veth3 type veth peer name veth4 netns %s", ns2, ns3); + SYS(fail, "ip -n %s link add veth5 type veth peer name veth6 netns %s", ns1, ns2); + SYS(fail, "ip -n %s link add veth7 type veth peer name veth8 netns %s", ns2, ns3); + + if (vrf[0]) { + if (!ASSERT_OK(configure_vrf(ns1, ns2), "configure vrf")) + goto fail; + } + if (!ASSERT_OK(set_top_addr(ns1, ns2, ns3), "set top addresses")) + goto fail; + + if (!ASSERT_OK(set_bottom_addr(ns1, ns2, ns3), "set bottom addresses")) + goto fail; + + if (!ASSERT_OK(configure_ns1(ns1, vrf), "configure ns1 routes")) + goto fail; + + if (!ASSERT_OK(configure_ns2(ns2, vrf), "configure ns2 routes")) + goto fail; + + if (!ASSERT_OK(configure_ns3(ns3), "configure ns3 routes")) + goto fail; + + /* Link bottom route to the GRE tunnels */ + SYS(fail, "ip -n %s route add %s/32 dev veth5 via %s %s", + ns1, IP4_ADDR_GRE, IP4_ADDR_6, vrf); + SYS(fail, "ip -n %s route add %s/32 dev veth7 via %s %s", + ns2, IP4_ADDR_GRE, IP4_ADDR_8, vrf); + SYS(fail, "ip -n %s -6 route add %s/128 dev veth5 via %s %s", + ns1, IP6_ADDR_GRE, IP6_ADDR_6, vrf); + SYS(fail, "ip -n %s -6 route add %s/128 dev veth7 via %s %s", + ns2, IP6_ADDR_GRE, IP6_ADDR_8, vrf); + + return 0; +fail: + return -1; +} + +static int remove_routes_to_gredev(const char *ns1, const char *ns2, const char *vrf) +{ + SYS(fail, "ip -n %s route del %s dev veth5 %s", ns1, IP4_ADDR_GRE, vrf); + SYS(fail, "ip -n %s route del %s dev veth7 %s", ns2, IP4_ADDR_GRE, vrf); + SYS(fail, "ip -n %s -6 route del %s/128 dev veth5 %s", ns1, IP6_ADDR_GRE, vrf); + SYS(fail, "ip -n %s -6 route del %s/128 dev veth7 %s", ns2, IP6_ADDR_GRE, vrf); + + return 0; +fail: + return -1; +} + +static int add_unreachable_routes_to_gredev(const char *ns1, const char *ns2, const char *vrf) +{ + SYS(fail, "ip -n %s route add unreachable %s/32 %s", ns1, IP4_ADDR_GRE, vrf); + SYS(fail, "ip -n %s route add unreachable %s/32 %s", ns2, IP4_ADDR_GRE, vrf); + SYS(fail, "ip -n %s -6 route add unreachable %s/128 %s", ns1, IP6_ADDR_GRE, vrf); + SYS(fail, "ip -n %s -6 route add unreachable %s/128 %s", ns2, IP6_ADDR_GRE, vrf); + + return 0; +fail: + return -1; +} + +#define GSO_SIZE 5000 +#define GSO_TCP_PORT 9000 +/* This tests the fix from commit ea0371f78799 ("net: fix GSO in bpf_lwt_push_ip_encap") */ +static int test_gso_fix(const char *ns1, const char *ns3, int family) +{ + const char *ip_addr = family == AF_INET ? IP4_ADDR_DST : IP6_ADDR_DST; + char gso_packet[GSO_SIZE] = {}; + struct nstoken *nstoken = NULL; + int sfd, cfd, afd; + ssize_t bytes; + int ret = -1; + + if (!ns1 || !ns3) + return ret; + + nstoken = open_netns(ns3); + if (!ASSERT_OK_PTR(nstoken, "open ns3")) + return ret; + + sfd = start_server_str(family, SOCK_STREAM, ip_addr, GSO_TCP_PORT, NULL); + if (!ASSERT_OK_FD(sfd, "start server")) + goto close_netns; + + close_netns(nstoken); + + nstoken = open_netns(ns1); + if (!ASSERT_OK_PTR(nstoken, "open ns1")) + goto close_server; + + cfd = connect_to_addr_str(family, SOCK_STREAM, ip_addr, GSO_TCP_PORT, NULL); + if (!ASSERT_OK_FD(cfd, "connect to server")) + goto close_server; + + close_netns(nstoken); + nstoken = NULL; + + afd = accept(sfd, NULL, NULL); + if (!ASSERT_OK_FD(afd, "accept")) + goto close_client; + + /* Send a packet larger than MTU */ + bytes = send(cfd, gso_packet, GSO_SIZE, 0); + if (!ASSERT_EQ(bytes, GSO_SIZE, "send packet")) + goto close_accept; + + /* Verify we received all expected bytes */ + bytes = read(afd, gso_packet, GSO_SIZE); + if (!ASSERT_EQ(bytes, GSO_SIZE, "receive packet")) + goto close_accept; + + ret = 0; + +close_accept: + close(afd); +close_client: + close(cfd); +close_server: + close(sfd); +close_netns: + close_netns(nstoken); + + return ret; +} + +static int check_ping_ok(const char *ns1) +{ + SYS(fail, "ip netns exec %s ping -c 1 -W1 -I veth1 %s > /dev/null", ns1, IP4_ADDR_DST); + SYS(fail, "ip netns exec %s ping6 -c 1 -W1 -I veth1 %s > /dev/null", ns1, IP6_ADDR_DST); + return 0; +fail: + return -1; +} + +static int check_ping_fails(const char *ns1) +{ + int ret; + + ret = SYS_NOFAIL("ip netns exec %s ping -c 1 -W1 -I veth1 %s", ns1, IP4_ADDR_DST); + if (!ret) + return -1; + + ret = SYS_NOFAIL("ip netns exec %s ping6 -c 1 -W1 -I veth1 %s", ns1, IP6_ADDR_DST); + if (!ret) + return -1; + + return 0; +} + +#define EGRESS true +#define INGRESS false +#define IPV4_ENCAP true +#define IPV6_ENCAP false +static void lwt_ip_encap(bool ipv4_encap, bool egress, const char *vrf) +{ + char ns1[NETNS_NAME_SIZE] = NETNS_BASE "-1-"; + char ns2[NETNS_NAME_SIZE] = NETNS_BASE "-2-"; + char ns3[NETNS_NAME_SIZE] = NETNS_BASE "-3-"; + char *sec = ipv4_encap ? "encap_gre" : "encap_gre6"; + + if (!vrf) + return; + + if (!ASSERT_OK(create_ns(ns1, NETNS_NAME_SIZE), "create ns1")) + goto out; + if (!ASSERT_OK(create_ns(ns2, NETNS_NAME_SIZE), "create ns2")) + goto out; + if (!ASSERT_OK(create_ns(ns3, NETNS_NAME_SIZE), "create ns3")) + goto out; + + if (!ASSERT_OK(setup_network(ns1, ns2, ns3, vrf), "setup network")) + goto out; + + /* By default, pings work */ + if (!ASSERT_OK(check_ping_ok(ns1), "ping OK")) + goto out; + + /* Remove NS2->DST routes, ping fails */ + SYS(out, "ip -n %s route del %s/32 dev veth3 %s", ns2, IP4_ADDR_DST, vrf); + SYS(out, "ip -n %s -6 route del %s/128 dev veth3 %s", ns2, IP6_ADDR_DST, vrf); + if (!ASSERT_OK(check_ping_fails(ns1), "ping expected fail")) + goto out; + + /* Install replacement routes (LWT/eBPF), pings succeed */ + if (egress) { + SYS(out, "ip -n %s route add %s encap bpf xmit obj %s sec %s dev veth1 %s", + ns1, IP4_ADDR_DST, BPF_FILE, sec, vrf); + SYS(out, "ip -n %s -6 route add %s encap bpf xmit obj %s sec %s dev veth1 %s", + ns1, IP6_ADDR_DST, BPF_FILE, sec, vrf); + } else { + SYS(out, "ip -n %s route add %s encap bpf in obj %s sec %s dev veth2 %s", + ns2, IP4_ADDR_DST, BPF_FILE, sec, vrf); + SYS(out, "ip -n %s -6 route add %s encap bpf in obj %s sec %s dev veth2 %s", + ns2, IP6_ADDR_DST, BPF_FILE, sec, vrf); + } + + if (!ASSERT_OK(check_ping_ok(ns1), "ping OK")) + goto out; + + /* Skip GSO tests with VRF: VRF routing needs properly assigned + * source IP/device, which is easy to do with ping but hard with TCP. + */ + if (egress && !vrf[0]) { + if (!ASSERT_OK(test_gso_fix(ns1, ns3, AF_INET), "test GSO")) + goto out; + } + + /* Negative test: remove routes to GRE devices: ping fails */ + if (!ASSERT_OK(remove_routes_to_gredev(ns1, ns2, vrf), "remove routes to gredev")) + goto out; + if (!ASSERT_OK(check_ping_fails(ns1), "ping expected fail")) + goto out; + + /* Another negative test */ + if (!ASSERT_OK(add_unreachable_routes_to_gredev(ns1, ns2, vrf), + "add unreachable routes")) + goto out; + ASSERT_OK(check_ping_fails(ns1), "ping expected fail"); + +out: + SYS_NOFAIL("ip netns del %s", ns1); + SYS_NOFAIL("ip netns del %s", ns2); + SYS_NOFAIL("ip netns del %s", ns3); +} + +void test_lwt_ip_encap_vrf_ipv6(void) +{ + if (test__start_subtest("egress")) + lwt_ip_encap(IPV6_ENCAP, EGRESS, "vrf red"); + + if (test__start_subtest("ingress")) + lwt_ip_encap(IPV6_ENCAP, INGRESS, "vrf red"); +} + +void test_lwt_ip_encap_vrf_ipv4(void) +{ + if (test__start_subtest("egress")) + lwt_ip_encap(IPV4_ENCAP, EGRESS, "vrf red"); + + if (test__start_subtest("ingress")) + lwt_ip_encap(IPV4_ENCAP, INGRESS, "vrf red"); +} + +void test_lwt_ip_encap_ipv6(void) +{ + if (test__start_subtest("egress")) + lwt_ip_encap(IPV6_ENCAP, EGRESS, ""); + + if (test__start_subtest("ingress")) + lwt_ip_encap(IPV6_ENCAP, INGRESS, ""); +} + +void test_lwt_ip_encap_ipv4(void) +{ + if (test__start_subtest("egress")) + lwt_ip_encap(IPV4_ENCAP, EGRESS, ""); + + if (test__start_subtest("ingress")) + lwt_ip_encap(IPV4_ENCAP, INGRESS, ""); +} diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_seg6local.c b/tools/testing/selftests/bpf/prog_tests/lwt_seg6local.c new file mode 100644 index 000000000000..3bc730b7c7fa --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/lwt_seg6local.c @@ -0,0 +1,176 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/* Connects 6 network namespaces through veths. + * Each NS may have different IPv6 global scope addresses : + * + * NS1 NS2 NS3 NS4 NS5 NS6 + * lo veth1 <-> veth2 veth3 <-> veth4 veth5 <-> veth6 lo veth7 <-> veth8 veth9 <-> veth10 lo + * fb00 ::1 ::12 ::21 ::34 ::43 ::56 ::65 ::78 ::87 ::910 ::109 ::6 + * fd00 ::4 + * fc42 ::1 + * + * All IPv6 packets going to fb00::/16 through NS2 will be encapsulated in a + * IPv6 header with a Segment Routing Header, with segments : + * fd00::1 -> fd00::2 -> fd00::3 -> fd00::4 + * + * 3 fd00::/16 IPv6 addresses are binded to seg6local End.BPF actions : + * - fd00::1 : add a TLV, change the flags and apply a End.X action to fc42::1 + * - fd00::2 : remove the TLV, change the flags, add a tag + * - fd00::3 : apply an End.T action to fd00::4, through routing table 117 + * + * fd00::4 is a simple Segment Routing node decapsulating the inner IPv6 packet. + * Each End.BPF action will validate the operations applied on the SRH by the + * previous BPF program in the chain, otherwise the packet is dropped. + * + * An UDP datagram is sent from fb00::1 to fb00::6. The test succeeds if this + * datagram can be read on NS6 when binding to fb00::6. + */ + +#include "network_helpers.h" +#include "test_progs.h" + +#define NETNS_BASE "lwt-seg6local-" +#define BPF_FILE "test_lwt_seg6local.bpf.o" + +static void cleanup(void) +{ + int ns; + + for (ns = 1; ns < 7; ns++) + SYS_NOFAIL("ip netns del %s%d", NETNS_BASE, ns); +} + +static int setup(void) +{ + int ns; + + for (ns = 1; ns < 7; ns++) + SYS(fail, "ip netns add %s%d", NETNS_BASE, ns); + + SYS(fail, "ip -n %s6 link set dev lo up", NETNS_BASE); + + for (ns = 1; ns < 6; ns++) { + int local_id = ns * 2 - 1; + int peer_id = ns * 2; + int next_ns = ns + 1; + + SYS(fail, "ip -n %s%d link add veth%d type veth peer name veth%d netns %s%d", + NETNS_BASE, ns, local_id, peer_id, NETNS_BASE, next_ns); + + SYS(fail, "ip -n %s%d link set dev veth%d up", NETNS_BASE, ns, local_id); + SYS(fail, "ip -n %s%d link set dev veth%d up", NETNS_BASE, next_ns, peer_id); + + /* All link scope addresses to veths */ + SYS(fail, "ip -n %s%d -6 addr add fb00::%d%d/16 dev veth%d scope link", + NETNS_BASE, ns, local_id, peer_id, local_id); + SYS(fail, "ip -n %s%d -6 addr add fb00::%d%d/16 dev veth%d scope link", + NETNS_BASE, next_ns, peer_id, local_id, peer_id); + } + + + SYS(fail, "ip -n %s5 -6 route add fb00::109 table 117 dev veth9 scope link", NETNS_BASE); + + SYS(fail, "ip -n %s1 -6 addr add fb00::1/16 dev lo", NETNS_BASE); + SYS(fail, "ip -n %s1 -6 route add fb00::6 dev veth1 via fb00::21", NETNS_BASE); + + SYS(fail, "ip -n %s2 -6 route add fb00::6 encap bpf in obj %s sec encap_srh dev veth2", + NETNS_BASE, BPF_FILE); + SYS(fail, "ip -n %s2 -6 route add fd00::1 dev veth3 via fb00::43 scope link", NETNS_BASE); + + SYS(fail, "ip -n %s3 -6 route add fc42::1 dev veth5 via fb00::65", NETNS_BASE); + SYS(fail, + "ip -n %s3 -6 route add fd00::1 encap seg6local action End.BPF endpoint obj %s sec add_egr_x dev veth4", + NETNS_BASE, BPF_FILE); + + SYS(fail, + "ip -n %s4 -6 route add fd00::2 encap seg6local action End.BPF endpoint obj %s sec pop_egr dev veth6", + NETNS_BASE, BPF_FILE); + SYS(fail, "ip -n %s4 -6 addr add fc42::1 dev lo", NETNS_BASE); + SYS(fail, "ip -n %s4 -6 route add fd00::3 dev veth7 via fb00::87", NETNS_BASE); + + SYS(fail, "ip -n %s5 -6 route add fd00::4 table 117 dev veth9 via fb00::109", NETNS_BASE); + SYS(fail, + "ip -n %s5 -6 route add fd00::3 encap seg6local action End.BPF endpoint obj %s sec inspect_t dev veth8", + NETNS_BASE, BPF_FILE); + + SYS(fail, "ip -n %s6 -6 addr add fb00::6/16 dev lo", NETNS_BASE); + SYS(fail, "ip -n %s6 -6 addr add fd00::4/16 dev lo", NETNS_BASE); + + for (ns = 1; ns < 6; ns++) + SYS(fail, "ip netns exec %s%d sysctl -wq net.ipv6.conf.all.forwarding=1", + NETNS_BASE, ns); + + SYS(fail, "ip netns exec %s6 sysctl -wq net.ipv6.conf.all.seg6_enabled=1", NETNS_BASE); + SYS(fail, "ip netns exec %s6 sysctl -wq net.ipv6.conf.lo.seg6_enabled=1", NETNS_BASE); + SYS(fail, "ip netns exec %s6 sysctl -wq net.ipv6.conf.veth10.seg6_enabled=1", NETNS_BASE); + + return 0; +fail: + return -1; +} + +#define SERVER_PORT 7330 +#define CLIENT_PORT 2121 +void test_lwt_seg6local(void) +{ + struct sockaddr_in6 server_addr = {}; + const char *ns1 = NETNS_BASE "1"; + const char *ns6 = NETNS_BASE "6"; + struct nstoken *nstoken = NULL; + const char *foobar = "foobar"; + ssize_t bytes; + int sfd, cfd; + char buf[7]; + + if (!ASSERT_OK(setup(), "setup")) + goto out; + + nstoken = open_netns(ns6); + if (!ASSERT_OK_PTR(nstoken, "open ns6")) + goto out; + + sfd = start_server_str(AF_INET6, SOCK_DGRAM, "fb00::6", SERVER_PORT, NULL); + if (!ASSERT_OK_FD(sfd, "start server")) + goto close_netns; + + close_netns(nstoken); + + nstoken = open_netns(ns1); + if (!ASSERT_OK_PTR(nstoken, "open ns1")) + goto close_server; + + cfd = start_server_str(AF_INET6, SOCK_DGRAM, "fb00::1", CLIENT_PORT, NULL); + if (!ASSERT_OK_FD(cfd, "start client")) + goto close_server; + + close_netns(nstoken); + nstoken = NULL; + + /* Send a packet larger than MTU */ + server_addr.sin6_family = AF_INET6; + server_addr.sin6_port = htons(SERVER_PORT); + if (!ASSERT_EQ(inet_pton(AF_INET6, "fb00::6", &server_addr.sin6_addr), 1, + "build target addr")) + goto close_client; + + bytes = sendto(cfd, foobar, sizeof(foobar), 0, + (struct sockaddr *)&server_addr, sizeof(server_addr)); + if (!ASSERT_EQ(bytes, sizeof(foobar), "send packet")) + goto close_client; + + /* Verify we received all expected bytes */ + bytes = read(sfd, buf, sizeof(buf)); + if (!ASSERT_EQ(bytes, sizeof(buf), "receive packet")) + goto close_client; + ASSERT_STREQ(buf, foobar, "check udp packet"); + +close_client: + close(cfd); +close_server: + close(sfd); +close_netns: + close_netns(nstoken); + +out: + cleanup(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/mem_rdonly_untrusted.c b/tools/testing/selftests/bpf/prog_tests/mem_rdonly_untrusted.c new file mode 100644 index 000000000000..40d4f687bd9c --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/mem_rdonly_untrusted.c @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <test_progs.h> +#include "mem_rdonly_untrusted.skel.h" + +void test_mem_rdonly_untrusted(void) +{ + RUN_TESTS(mem_rdonly_untrusted); +} diff --git a/tools/testing/selftests/bpf/prog_tests/missed.c b/tools/testing/selftests/bpf/prog_tests/missed.c index 70d90c43537c..ed8857ae914a 100644 --- a/tools/testing/selftests/bpf/prog_tests/missed.c +++ b/tools/testing/selftests/bpf/prog_tests/missed.c @@ -85,6 +85,7 @@ static void test_missed_kprobe_recursion(void) ASSERT_GE(get_missed_count(bpf_program__fd(skel->progs.test3)), 1, "test3_recursion_misses"); ASSERT_GE(get_missed_count(bpf_program__fd(skel->progs.test4)), 1, "test4_recursion_misses"); ASSERT_GE(get_missed_count(bpf_program__fd(skel->progs.test5)), 1, "test5_recursion_misses"); + ASSERT_EQ(get_missed_count(bpf_program__fd(skel->progs.test6)), 1, "test6_recursion_misses"); cleanup: missed_kprobe_recursion__destroy(skel); diff --git a/tools/testing/selftests/bpf/prog_tests/net_timestamping.c b/tools/testing/selftests/bpf/prog_tests/net_timestamping.c new file mode 100644 index 000000000000..dbfd87499b6b --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/net_timestamping.c @@ -0,0 +1,239 @@ +#include <linux/net_tstamp.h> +#include <sys/time.h> +#include <linux/errqueue.h> +#include "test_progs.h" +#include "network_helpers.h" +#include "net_timestamping.skel.h" + +#define CG_NAME "/net-timestamping-test" +#define NSEC_PER_SEC 1000000000LL + +static const char addr4_str[] = "127.0.0.1"; +static const char addr6_str[] = "::1"; +static struct net_timestamping *skel; +static const int cfg_payload_len = 30; +static struct timespec usr_ts; +static u64 delay_tolerance_nsec = 10000000000; /* 10 seconds */ +int SK_TS_SCHED; +int SK_TS_TXSW; +int SK_TS_ACK; + +static int64_t timespec_to_ns64(struct timespec *ts) +{ + return ts->tv_sec * NSEC_PER_SEC + ts->tv_nsec; +} + +static void validate_key(int tskey, int tstype) +{ + static int expected_tskey = -1; + + if (tstype == SCM_TSTAMP_SCHED) + expected_tskey = cfg_payload_len - 1; + + ASSERT_EQ(expected_tskey, tskey, "tskey mismatch"); + + expected_tskey = tskey; +} + +static void validate_timestamp(struct timespec *cur, struct timespec *prev) +{ + int64_t cur_ns, prev_ns; + + cur_ns = timespec_to_ns64(cur); + prev_ns = timespec_to_ns64(prev); + + ASSERT_LT(cur_ns - prev_ns, delay_tolerance_nsec, "latency"); +} + +static void test_socket_timestamp(struct scm_timestamping *tss, int tstype, + int tskey) +{ + static struct timespec prev_ts; + + validate_key(tskey, tstype); + + switch (tstype) { + case SCM_TSTAMP_SCHED: + validate_timestamp(&tss->ts[0], &usr_ts); + SK_TS_SCHED += 1; + break; + case SCM_TSTAMP_SND: + validate_timestamp(&tss->ts[0], &prev_ts); + SK_TS_TXSW += 1; + break; + case SCM_TSTAMP_ACK: + validate_timestamp(&tss->ts[0], &prev_ts); + SK_TS_ACK += 1; + break; + } + + prev_ts = tss->ts[0]; +} + +static void test_recv_errmsg_cmsg(struct msghdr *msg) +{ + struct sock_extended_err *serr = NULL; + struct scm_timestamping *tss = NULL; + struct cmsghdr *cm; + + for (cm = CMSG_FIRSTHDR(msg); + cm && cm->cmsg_len; + cm = CMSG_NXTHDR(msg, cm)) { + if (cm->cmsg_level == SOL_SOCKET && + cm->cmsg_type == SCM_TIMESTAMPING) { + tss = (void *)CMSG_DATA(cm); + } else if ((cm->cmsg_level == SOL_IP && + cm->cmsg_type == IP_RECVERR) || + (cm->cmsg_level == SOL_IPV6 && + cm->cmsg_type == IPV6_RECVERR) || + (cm->cmsg_level == SOL_PACKET && + cm->cmsg_type == PACKET_TX_TIMESTAMP)) { + serr = (void *)CMSG_DATA(cm); + ASSERT_EQ(serr->ee_origin, SO_EE_ORIGIN_TIMESTAMPING, + "cmsg type"); + } + + if (serr && tss) + test_socket_timestamp(tss, serr->ee_info, + serr->ee_data); + } +} + +static bool socket_recv_errmsg(int fd) +{ + static char ctrl[1024 /* overprovision*/]; + char data[cfg_payload_len]; + static struct msghdr msg; + struct iovec entry; + int n = 0; + + memset(&msg, 0, sizeof(msg)); + memset(&entry, 0, sizeof(entry)); + memset(ctrl, 0, sizeof(ctrl)); + + entry.iov_base = data; + entry.iov_len = cfg_payload_len; + msg.msg_iov = &entry; + msg.msg_iovlen = 1; + msg.msg_name = NULL; + msg.msg_namelen = 0; + msg.msg_control = ctrl; + msg.msg_controllen = sizeof(ctrl); + + n = recvmsg(fd, &msg, MSG_ERRQUEUE); + if (n == -1) + ASSERT_EQ(errno, EAGAIN, "recvmsg MSG_ERRQUEUE"); + + if (n >= 0) + test_recv_errmsg_cmsg(&msg); + + return n == -1; +} + +static void test_socket_timestamping(int fd) +{ + while (!socket_recv_errmsg(fd)); + + ASSERT_EQ(SK_TS_SCHED, 1, "SCM_TSTAMP_SCHED"); + ASSERT_EQ(SK_TS_TXSW, 1, "SCM_TSTAMP_SND"); + ASSERT_EQ(SK_TS_ACK, 1, "SCM_TSTAMP_ACK"); + + SK_TS_SCHED = 0; + SK_TS_TXSW = 0; + SK_TS_ACK = 0; +} + +static void test_tcp(int family, bool enable_socket_timestamping) +{ + struct net_timestamping__bss *bss; + char buf[cfg_payload_len]; + int sfd = -1, cfd = -1; + unsigned int sock_opt; + struct netns_obj *ns; + int cg_fd; + int ret; + + cg_fd = test__join_cgroup(CG_NAME); + if (!ASSERT_OK_FD(cg_fd, "join cgroup")) + return; + + ns = netns_new("net_timestamping_ns", true); + if (!ASSERT_OK_PTR(ns, "create ns")) + goto out; + + skel = net_timestamping__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open and load skel")) + goto out; + + if (!ASSERT_OK(net_timestamping__attach(skel), "attach skel")) + goto out; + + skel->links.skops_sockopt = + bpf_program__attach_cgroup(skel->progs.skops_sockopt, cg_fd); + if (!ASSERT_OK_PTR(skel->links.skops_sockopt, "attach cgroup")) + goto out; + + bss = skel->bss; + memset(bss, 0, sizeof(*bss)); + + skel->bss->monitored_pid = getpid(); + + sfd = start_server(family, SOCK_STREAM, + family == AF_INET6 ? addr6_str : addr4_str, 0, 0); + if (!ASSERT_OK_FD(sfd, "start_server")) + goto out; + + cfd = connect_to_fd(sfd, 0); + if (!ASSERT_OK_FD(cfd, "connect_to_fd_server")) + goto out; + + if (enable_socket_timestamping) { + sock_opt = SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_OPT_ID | + SOF_TIMESTAMPING_TX_SCHED | + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_TX_ACK; + ret = setsockopt(cfd, SOL_SOCKET, SO_TIMESTAMPING, + (char *) &sock_opt, sizeof(sock_opt)); + if (!ASSERT_OK(ret, "setsockopt SO_TIMESTAMPING")) + goto out; + + ret = clock_gettime(CLOCK_REALTIME, &usr_ts); + if (!ASSERT_OK(ret, "get user time")) + goto out; + } + + ret = write(cfd, buf, sizeof(buf)); + if (!ASSERT_EQ(ret, sizeof(buf), "send to server")) + goto out; + + if (enable_socket_timestamping) + test_socket_timestamping(cfd); + + ASSERT_EQ(bss->nr_active, 1, "nr_active"); + ASSERT_EQ(bss->nr_snd, 2, "nr_snd"); + ASSERT_EQ(bss->nr_sched, 1, "nr_sched"); + ASSERT_EQ(bss->nr_txsw, 1, "nr_txsw"); + ASSERT_EQ(bss->nr_ack, 1, "nr_ack"); + +out: + if (sfd >= 0) + close(sfd); + if (cfd >= 0) + close(cfd); + net_timestamping__destroy(skel); + netns_free(ns); + close(cg_fd); +} + +void test_net_timestamping(void) +{ + if (test__start_subtest("INET4: bpf timestamping")) + test_tcp(AF_INET, false); + if (test__start_subtest("INET4: bpf and socket timestamping")) + test_tcp(AF_INET, true); + if (test__start_subtest("INET6: bpf timestamping")) + test_tcp(AF_INET6, false); + if (test__start_subtest("INET6: bpf and socket timestamping")) + test_tcp(AF_INET6, true); +} diff --git a/tools/testing/selftests/bpf/prog_tests/netns_cookie.c b/tools/testing/selftests/bpf/prog_tests/netns_cookie.c index ac3c3c097c0e..e00cd34586dd 100644 --- a/tools/testing/selftests/bpf/prog_tests/netns_cookie.c +++ b/tools/testing/selftests/bpf/prog_tests/netns_cookie.c @@ -33,20 +33,25 @@ void test_netns_cookie(void) skel->links.get_netns_cookie_sockops = bpf_program__attach_cgroup( skel->progs.get_netns_cookie_sockops, cgroup_fd); - if (!ASSERT_OK_PTR(skel->links.get_netns_cookie_sockops, "prog_attach")) + if (!ASSERT_OK_PTR(skel->links.get_netns_cookie_sockops, "prog_attach_sockops")) goto done; verdict = bpf_program__fd(skel->progs.get_netns_cookie_sk_msg); map = bpf_map__fd(skel->maps.sock_map); err = bpf_prog_attach(verdict, map, BPF_SK_MSG_VERDICT, 0); - if (!ASSERT_OK(err, "prog_attach")) + if (!ASSERT_OK(err, "prog_attach_sk_msg")) goto done; tc_fd = bpf_program__fd(skel->progs.get_netns_cookie_tcx); err = bpf_prog_attach_opts(tc_fd, loopback, BPF_TCX_INGRESS, &opta); - if (!ASSERT_OK(err, "prog_attach")) + if (!ASSERT_OK(err, "prog_attach_tcx")) goto done; + skel->links.get_netns_cookie_cgroup_skb = bpf_program__attach_cgroup( + skel->progs.get_netns_cookie_cgroup_skb, cgroup_fd); + if (!ASSERT_OK_PTR(skel->links.get_netns_cookie_cgroup_skb, "prog_attach_cgroup_skb")) + goto cleanup_tc; + server_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0); if (CHECK(server_fd < 0, "start_server", "errno %d\n", errno)) goto cleanup_tc; @@ -69,16 +74,18 @@ void test_netns_cookie(void) if (!ASSERT_OK(err, "getsockopt")) goto cleanup_tc; - ASSERT_EQ(val, cookie_expected_value, "cookie_value"); + ASSERT_EQ(val, cookie_expected_value, "cookie_value_sockops"); err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.sk_msg_netns_cookies), &client_fd, &val); if (!ASSERT_OK(err, "map_lookup(sk_msg_netns_cookies)")) goto cleanup_tc; - ASSERT_EQ(val, cookie_expected_value, "cookie_value"); - ASSERT_EQ(skel->bss->tcx_init_netns_cookie, cookie_expected_value, "cookie_value"); - ASSERT_EQ(skel->bss->tcx_netns_cookie, cookie_expected_value, "cookie_value"); + ASSERT_EQ(val, cookie_expected_value, "cookie_value_sk_msg"); + ASSERT_EQ(skel->bss->tcx_init_netns_cookie, cookie_expected_value, "cookie_value_init_tcx"); + ASSERT_EQ(skel->bss->tcx_netns_cookie, cookie_expected_value, "cookie_value_tcx"); + ASSERT_EQ(skel->bss->cgroup_skb_init_netns_cookie, cookie_expected_value, "cookie_value_init_cgroup_skb"); + ASSERT_EQ(skel->bss->cgroup_skb_netns_cookie, cookie_expected_value, "cookie_value_cgroup_skb"); cleanup_tc: err = bpf_prog_detach_opts(tc_fd, loopback, BPF_TCX_INGRESS, &optd); diff --git a/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c b/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c index 761ce24bce38..99c953f2be21 100644 --- a/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c +++ b/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c @@ -200,41 +200,28 @@ static void test_ns_current_pid_tgid_new_ns(int (*fn)(void *), void *arg) return; } -static void test_in_netns(int (*fn)(void *), void *arg) -{ - struct nstoken *nstoken = NULL; - - SYS(cleanup, "ip netns add ns_current_pid_tgid"); - SYS(cleanup, "ip -net ns_current_pid_tgid link set dev lo up"); - - nstoken = open_netns("ns_current_pid_tgid"); - if (!ASSERT_OK_PTR(nstoken, "open_netns")) - goto cleanup; - - test_ns_current_pid_tgid_new_ns(fn, arg); - -cleanup: - if (nstoken) - close_netns(nstoken); - SYS_NOFAIL("ip netns del ns_current_pid_tgid"); -} - /* TODO: use a different tracepoint */ -void serial_test_ns_current_pid_tgid(void) +void serial_test_current_pid_tgid(void) { if (test__start_subtest("root_ns_tp")) test_current_pid_tgid_tp(NULL); if (test__start_subtest("new_ns_tp")) test_ns_current_pid_tgid_new_ns(test_current_pid_tgid_tp, NULL); - if (test__start_subtest("new_ns_cgrp")) { - int cgroup_fd = -1; - - cgroup_fd = test__join_cgroup("/sock_addr"); - if (ASSERT_GE(cgroup_fd, 0, "join_cgroup")) { - test_in_netns(test_current_pid_tgid_cgrp, &cgroup_fd); - close(cgroup_fd); - } +} + +void test_ns_current_pid_tgid_cgrp(void) +{ + int cgroup_fd = test__join_cgroup("/sock_addr"); + + if (ASSERT_OK_FD(cgroup_fd, "join_cgroup")) { + test_ns_current_pid_tgid_new_ns(test_current_pid_tgid_cgrp, &cgroup_fd); + close(cgroup_fd); } - if (test__start_subtest("new_ns_sk_msg")) - test_in_netns(test_current_pid_tgid_sk_msg, NULL); } + +void test_ns_current_pid_tgid_sk_msg(void) +{ + test_ns_current_pid_tgid_new_ns(test_current_pid_tgid_sk_msg, NULL); +} + + diff --git a/tools/testing/selftests/bpf/prog_tests/prepare.c b/tools/testing/selftests/bpf/prog_tests/prepare.c new file mode 100644 index 000000000000..fb5cdad97116 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/prepare.c @@ -0,0 +1,99 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta */ + +#include <test_progs.h> +#include <network_helpers.h> +#include "prepare.skel.h" + +static bool check_prepared(struct bpf_object *obj) +{ + bool is_prepared = true; + const struct bpf_map *map; + + bpf_object__for_each_map(map, obj) { + if (bpf_map__fd(map) < 0) + is_prepared = false; + } + + return is_prepared; +} + +static void test_prepare_no_load(void) +{ + struct prepare *skel; + int err; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + ); + + skel = prepare__open(); + if (!ASSERT_OK_PTR(skel, "prepare__open")) + return; + + if (!ASSERT_FALSE(check_prepared(skel->obj), "not check_prepared")) + goto cleanup; + + err = bpf_object__prepare(skel->obj); + + if (!ASSERT_TRUE(check_prepared(skel->obj), "check_prepared")) + goto cleanup; + + if (!ASSERT_OK(err, "bpf_object__prepare")) + goto cleanup; + +cleanup: + prepare__destroy(skel); +} + +static void test_prepare_load(void) +{ + struct prepare *skel; + int err, prog_fd; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + ); + + skel = prepare__open(); + if (!ASSERT_OK_PTR(skel, "prepare__open")) + return; + + if (!ASSERT_FALSE(check_prepared(skel->obj), "not check_prepared")) + goto cleanup; + + err = bpf_object__prepare(skel->obj); + if (!ASSERT_OK(err, "bpf_object__prepare")) + goto cleanup; + + err = prepare__load(skel); + if (!ASSERT_OK(err, "prepare__load")) + goto cleanup; + + if (!ASSERT_TRUE(check_prepared(skel->obj), "check_prepared")) + goto cleanup; + + prog_fd = bpf_program__fd(skel->progs.program); + if (!ASSERT_GE(prog_fd, 0, "prog_fd")) + goto cleanup; + + err = bpf_prog_test_run_opts(prog_fd, &topts); + if (!ASSERT_OK(err, "test_run_opts err")) + goto cleanup; + + if (!ASSERT_OK(topts.retval, "test_run_opts retval")) + goto cleanup; + + ASSERT_EQ(skel->bss->err, 0, "err"); + +cleanup: + prepare__destroy(skel); +} + +void test_prepare(void) +{ + if (test__start_subtest("prepare_load")) + test_prepare_load(); + if (test__start_subtest("prepare_no_load")) + test_prepare_no_load(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/pro_epilogue.c b/tools/testing/selftests/bpf/prog_tests/pro_epilogue.c index 509883e6823a..5d3c00a08a88 100644 --- a/tools/testing/selftests/bpf/prog_tests/pro_epilogue.c +++ b/tools/testing/selftests/bpf/prog_tests/pro_epilogue.c @@ -6,6 +6,7 @@ #include "epilogue_tailcall.skel.h" #include "pro_epilogue_goto_start.skel.h" #include "epilogue_exit.skel.h" +#include "pro_epilogue_with_kfunc.skel.h" struct st_ops_args { __u64 a; @@ -55,6 +56,7 @@ void test_pro_epilogue(void) RUN_TESTS(pro_epilogue); RUN_TESTS(pro_epilogue_goto_start); RUN_TESTS(epilogue_exit); + RUN_TESTS(pro_epilogue_with_kfunc); if (test__start_subtest("tailcall")) test_tailcall(); } diff --git a/tools/testing/selftests/bpf/prog_tests/raw_tp_null.c b/tools/testing/selftests/bpf/prog_tests/raw_tp_null.c index 6fa19449297e..43676a9922dc 100644 --- a/tools/testing/selftests/bpf/prog_tests/raw_tp_null.c +++ b/tools/testing/selftests/bpf/prog_tests/raw_tp_null.c @@ -3,11 +3,14 @@ #include <test_progs.h> #include "raw_tp_null.skel.h" +#include "raw_tp_null_fail.skel.h" void test_raw_tp_null(void) { struct raw_tp_null *skel; + RUN_TESTS(raw_tp_null_fail); + skel = raw_tp_null__open_and_load(); if (!ASSERT_OK_PTR(skel, "raw_tp_null__open_and_load")) return; diff --git a/tools/testing/selftests/bpf/prog_tests/rbtree.c b/tools/testing/selftests/bpf/prog_tests/rbtree.c index 9818f06c97c5..d8f3d7a45fe9 100644 --- a/tools/testing/selftests/bpf/prog_tests/rbtree.c +++ b/tools/testing/selftests/bpf/prog_tests/rbtree.c @@ -8,6 +8,7 @@ #include "rbtree_fail.skel.h" #include "rbtree_btf_fail__wrong_node_type.skel.h" #include "rbtree_btf_fail__add_wrong_type.skel.h" +#include "rbtree_search.skel.h" static void test_rbtree_add_nodes(void) { @@ -187,3 +188,8 @@ void test_rbtree_fail(void) { RUN_TESTS(rbtree_fail); } + +void test_rbtree_search(void) +{ + RUN_TESTS(rbtree_search); +} diff --git a/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c b/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c index ebe0c12b5536..c9f855e5da24 100644 --- a/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c +++ b/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c @@ -81,6 +81,9 @@ static const char * const inproper_region_tests[] = { "nested_rcu_region", "rcu_read_lock_global_subprog_lock", "rcu_read_lock_global_subprog_unlock", + "rcu_read_lock_sleepable_helper_global_subprog", + "rcu_read_lock_sleepable_kfunc_global_subprog", + "rcu_read_lock_sleepable_global_subprog_indirect", }; static void test_inproper_region(void) diff --git a/tools/testing/selftests/bpf/prog_tests/read_vsyscall.c b/tools/testing/selftests/bpf/prog_tests/read_vsyscall.c index c7b9ba8b1d06..a8d1eaa67020 100644 --- a/tools/testing/selftests/bpf/prog_tests/read_vsyscall.c +++ b/tools/testing/selftests/bpf/prog_tests/read_vsyscall.c @@ -24,6 +24,7 @@ struct read_ret_desc { { .name = "copy_from_user", .ret = -EFAULT }, { .name = "copy_from_user_task", .ret = -EFAULT }, { .name = "copy_from_user_str", .ret = -EFAULT }, + { .name = "copy_from_user_task_str", .ret = -EFAULT }, }; void test_read_vsyscall(void) diff --git a/tools/testing/selftests/bpf/prog_tests/recursive_attach.c b/tools/testing/selftests/bpf/prog_tests/recursive_attach.c index 8100509e561b..0ffa01d54ce2 100644 --- a/tools/testing/selftests/bpf/prog_tests/recursive_attach.c +++ b/tools/testing/selftests/bpf/prog_tests/recursive_attach.c @@ -149,3 +149,70 @@ close_prog: fentry_recursive_target__destroy(target_skel); fentry_recursive__destroy(tracing_skel); } + +static void *fentry_target_test_run(void *arg) +{ + for (;;) { + int prog_fd = __atomic_load_n((int *)arg, __ATOMIC_SEQ_CST); + LIBBPF_OPTS(bpf_test_run_opts, topts); + int err; + + if (prog_fd == -1) + break; + err = bpf_prog_test_run_opts(prog_fd, &topts); + if (!ASSERT_OK(err, "fentry_target test_run")) + break; + } + + return NULL; +} + +void test_fentry_attach_stress(void) +{ + struct fentry_recursive_target *target_skel = NULL; + struct fentry_recursive *tracing_skel = NULL; + struct bpf_program *prog; + int err, i, tgt_prog_fd; + pthread_t thread; + + target_skel = fentry_recursive_target__open_and_load(); + if (!ASSERT_OK_PTR(target_skel, + "fentry_recursive_target__open_and_load")) + goto close_prog; + tgt_prog_fd = bpf_program__fd(target_skel->progs.fentry_target); + err = pthread_create(&thread, NULL, + fentry_target_test_run, &tgt_prog_fd); + if (!ASSERT_OK(err, "bpf_program__set_attach_target")) + goto close_prog; + + for (i = 0; i < 1000; i++) { + tracing_skel = fentry_recursive__open(); + if (!ASSERT_OK_PTR(tracing_skel, "fentry_recursive__open")) + goto stop_thread; + + prog = tracing_skel->progs.recursive_attach; + err = bpf_program__set_attach_target(prog, tgt_prog_fd, + "fentry_target"); + if (!ASSERT_OK(err, "bpf_program__set_attach_target")) + goto stop_thread; + + err = fentry_recursive__load(tracing_skel); + if (!ASSERT_OK(err, "fentry_recursive__load")) + goto stop_thread; + + err = fentry_recursive__attach(tracing_skel); + if (!ASSERT_OK(err, "fentry_recursive__attach")) + goto stop_thread; + + fentry_recursive__destroy(tracing_skel); + tracing_skel = NULL; + } + +stop_thread: + __atomic_store_n(&tgt_prog_fd, -1, __ATOMIC_SEQ_CST); + err = pthread_join(thread, NULL); + ASSERT_OK(err, "pthread_join"); +close_prog: + fentry_recursive__destroy(tracing_skel); + fentry_recursive_target__destroy(target_skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/reg_bounds.c b/tools/testing/selftests/bpf/prog_tests/reg_bounds.c index 39d42271cc46..e261b0e872db 100644 --- a/tools/testing/selftests/bpf/prog_tests/reg_bounds.c +++ b/tools/testing/selftests/bpf/prog_tests/reg_bounds.c @@ -465,6 +465,20 @@ static struct range range_refine(enum num_t x_t, struct range x, enum num_t y_t, return range_improve(x_t, x, x_swap); } + if (!t_is_32(x_t) && !t_is_32(y_t) && x_t != y_t) { + if (x_t == S64 && x.a > x.b) { + if (x.b < y.a && x.a <= y.b) + return range(x_t, x.a, y.b); + if (x.a > y.b && x.b >= y.a) + return range(x_t, y.a, x.b); + } else if (x_t == U64 && y.a > y.b) { + if (y.b < x.a && y.a <= x.b) + return range(x_t, y.a, x.b); + if (y.a > x.b && y.b >= x.a) + return range(x_t, x.a, y.b); + } + } + /* otherwise, plain range cast and intersection works */ return range_improve(x_t, x, y_cast); } diff --git a/tools/testing/selftests/bpf/prog_tests/res_spin_lock.c b/tools/testing/selftests/bpf/prog_tests/res_spin_lock.c new file mode 100644 index 000000000000..0703e987df89 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/res_spin_lock.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024-2025 Meta Platforms, Inc. and affiliates. */ +#include <test_progs.h> +#include <network_helpers.h> +#include <sys/sysinfo.h> + +#include "res_spin_lock.skel.h" +#include "res_spin_lock_fail.skel.h" + +void test_res_spin_lock_failure(void) +{ + RUN_TESTS(res_spin_lock_fail); +} + +static volatile int skip; + +static void *spin_lock_thread(void *arg) +{ + int err, prog_fd = *(u32 *) arg; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 10000, + ); + + while (!READ_ONCE(skip)) { + err = bpf_prog_test_run_opts(prog_fd, &topts); + if (err || topts.retval) { + ASSERT_OK(err, "test_run"); + ASSERT_OK(topts.retval, "test_run retval"); + break; + } + } + pthread_exit(arg); +} + +void test_res_spin_lock_success(void) +{ + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + struct res_spin_lock *skel; + pthread_t thread_id[16]; + int prog_fd, i, err; + void *ret; + + if (get_nprocs() < 2) { + test__skip(); + return; + } + + skel = res_spin_lock__open_and_load(); + if (!ASSERT_OK_PTR(skel, "res_spin_lock__open_and_load")) + return; + /* AA deadlock */ + prog_fd = bpf_program__fd(skel->progs.res_spin_lock_test); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "error"); + ASSERT_OK(topts.retval, "retval"); + + prog_fd = bpf_program__fd(skel->progs.res_spin_lock_test_held_lock_max); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "error"); + ASSERT_OK(topts.retval, "retval"); + + /* Multi-threaded ABBA deadlock. */ + + prog_fd = bpf_program__fd(skel->progs.res_spin_lock_test_AB); + for (i = 0; i < 16; i++) { + int err; + + err = pthread_create(&thread_id[i], NULL, &spin_lock_thread, &prog_fd); + if (!ASSERT_OK(err, "pthread_create")) + goto end; + } + + topts.retval = 0; + topts.repeat = 1000; + int fd = bpf_program__fd(skel->progs.res_spin_lock_test_BA); + while (!topts.retval && !err && !READ_ONCE(skel->bss->err)) { + err = bpf_prog_test_run_opts(fd, &topts); + } + + WRITE_ONCE(skip, true); + + for (i = 0; i < 16; i++) { + if (!ASSERT_OK(pthread_join(thread_id[i], &ret), "pthread_join")) + goto end; + if (!ASSERT_EQ(ret, &prog_fd, "ret == prog_fd")) + goto end; + } + + ASSERT_EQ(READ_ONCE(skel->bss->err), -EDEADLK, "timeout err"); + ASSERT_OK(err, "err"); + ASSERT_EQ(topts.retval, -EDEADLK, "timeout"); +end: + res_spin_lock__destroy(skel); + return; +} diff --git a/tools/testing/selftests/bpf/prog_tests/ringbuf.c b/tools/testing/selftests/bpf/prog_tests/ringbuf.c index da430df45aa4..d1e4cb28a72c 100644 --- a/tools/testing/selftests/bpf/prog_tests/ringbuf.c +++ b/tools/testing/selftests/bpf/prog_tests/ringbuf.c @@ -97,7 +97,7 @@ static void ringbuf_write_subtest(void) if (!ASSERT_OK_PTR(skel, "skel_open")) return; - skel->maps.ringbuf.max_entries = 0x4000; + skel->maps.ringbuf.max_entries = 0x40000; err = test_ringbuf_write_lskel__load(skel); if (!ASSERT_OK(err, "skel_load")) @@ -108,7 +108,7 @@ static void ringbuf_write_subtest(void) mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rb_fd, 0); if (!ASSERT_OK_PTR(mmap_ptr, "rw_cons_pos")) goto cleanup; - *mmap_ptr = 0x3000; + *mmap_ptr = 0x30000; ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw"); skel->bss->pid = getpid(); diff --git a/tools/testing/selftests/bpf/prog_tests/setget_sockopt.c b/tools/testing/selftests/bpf/prog_tests/setget_sockopt.c index e12255121c15..e4dac529d424 100644 --- a/tools/testing/selftests/bpf/prog_tests/setget_sockopt.c +++ b/tools/testing/selftests/bpf/prog_tests/setget_sockopt.c @@ -202,7 +202,7 @@ err_out: void test_setget_sockopt(void) { cg_fd = test__join_cgroup(CG_NAME); - if (cg_fd < 0) + if (!ASSERT_OK_FD(cg_fd, "join cgroup")) return; if (create_netns()) diff --git a/tools/testing/selftests/bpf/prog_tests/sk_assign.c b/tools/testing/selftests/bpf/prog_tests/sk_assign.c index 0b9bd1d6f7cc..10a0ab954b8a 100644 --- a/tools/testing/selftests/bpf/prog_tests/sk_assign.c +++ b/tools/testing/selftests/bpf/prog_tests/sk_assign.c @@ -37,8 +37,10 @@ configure_stack(void) tc = popen("tc -V", "r"); if (CHECK_FAIL(!tc)) return false; - if (CHECK_FAIL(!fgets(tc_version, sizeof(tc_version), tc))) + if (CHECK_FAIL(!fgets(tc_version, sizeof(tc_version), tc))) { + pclose(tc); return false; + } if (strstr(tc_version, ", libbpf ")) prog = "test_sk_assign_libbpf.bpf.o"; else diff --git a/tools/testing/selftests/bpf/prog_tests/snprintf.c b/tools/testing/selftests/bpf/prog_tests/snprintf.c index 4be6fdb78c6a..594441acb707 100644 --- a/tools/testing/selftests/bpf/prog_tests/snprintf.c +++ b/tools/testing/selftests/bpf/prog_tests/snprintf.c @@ -116,6 +116,8 @@ static void test_snprintf_negative(void) ASSERT_ERR(load_single_snprintf("%llc"), "invalid specifier 7"); ASSERT_ERR(load_single_snprintf("\x80"), "non ascii character"); ASSERT_ERR(load_single_snprintf("\x1"), "non printable character"); + ASSERT_ERR(load_single_snprintf("%p%"), "invalid specifier 8"); + ASSERT_ERR(load_single_snprintf("%s%"), "invalid specifier 9"); } void test_snprintf(void) diff --git a/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c b/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c index d56e18b25528..27781df8f2fb 100644 --- a/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c +++ b/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c @@ -1,20 +1,875 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2024 Meta +#include <poll.h> #include <test_progs.h> #include "network_helpers.h" #include "sock_iter_batch.skel.h" #define TEST_NS "sock_iter_batch_netns" +#define TEST_CHILD_NS "sock_iter_batch_child_netns" +static const int init_batch_size = 16; static const int nr_soreuse = 4; +struct iter_out { + int idx; + __u64 cookie; +} __packed; + +struct sock_count { + __u64 cookie; + int count; +}; + +static int insert(__u64 cookie, struct sock_count counts[], int counts_len) +{ + int insert = -1; + int i = 0; + + for (; i < counts_len; i++) { + if (!counts[i].cookie) { + insert = i; + } else if (counts[i].cookie == cookie) { + insert = i; + break; + } + } + if (insert < 0) + return insert; + + counts[insert].cookie = cookie; + counts[insert].count++; + + return counts[insert].count; +} + +static int read_n(int iter_fd, int n, struct sock_count counts[], + int counts_len) +{ + struct iter_out out; + int nread = 1; + int i = 0; + + for (; nread > 0 && (n < 0 || i < n); i++) { + nread = read(iter_fd, &out, sizeof(out)); + if (!nread || !ASSERT_EQ(nread, sizeof(out), "nread")) + break; + ASSERT_GE(insert(out.cookie, counts, counts_len), 0, "insert"); + } + + ASSERT_TRUE(n < 0 || i == n, "n < 0 || i == n"); + + return i; +} + +static __u64 socket_cookie(int fd) +{ + __u64 cookie; + socklen_t cookie_len = sizeof(cookie); + + if (!ASSERT_OK(getsockopt(fd, SOL_SOCKET, SO_COOKIE, &cookie, + &cookie_len), "getsockopt(SO_COOKIE)")) + return 0; + return cookie; +} + +static bool was_seen(int fd, struct sock_count counts[], int counts_len) +{ + __u64 cookie = socket_cookie(fd); + int i = 0; + + for (; cookie && i < counts_len; i++) + if (cookie == counts[i].cookie) + return true; + + return false; +} + +static int get_seen_socket(int *fds, struct sock_count counts[], int n) +{ + int i = 0; + + for (; i < n; i++) + if (was_seen(fds[i], counts, n)) + return i; + return -1; +} + +static int get_nth_socket(int *fds, int fds_len, struct bpf_link *link, int n) +{ + int i, nread, iter_fd; + int nth_sock_idx = -1; + struct iter_out out; + + iter_fd = bpf_iter_create(bpf_link__fd(link)); + if (!ASSERT_OK_FD(iter_fd, "bpf_iter_create")) + return -1; + + for (; n >= 0; n--) { + nread = read(iter_fd, &out, sizeof(out)); + if (!nread || !ASSERT_GE(nread, 1, "nread")) + goto done; + } + + for (i = 0; i < fds_len && nth_sock_idx < 0; i++) + if (fds[i] >= 0 && socket_cookie(fds[i]) == out.cookie) + nth_sock_idx = i; +done: + close(iter_fd); + return nth_sock_idx; +} + +static void destroy(int fd) +{ + struct sock_iter_batch *skel = NULL; + __u64 cookie = socket_cookie(fd); + struct bpf_link *link = NULL; + int iter_fd = -1; + int nread; + __u64 out; + + skel = sock_iter_batch__open(); + if (!ASSERT_OK_PTR(skel, "sock_iter_batch__open")) + goto done; + + skel->rodata->destroy_cookie = cookie; + + if (!ASSERT_OK(sock_iter_batch__load(skel), "sock_iter_batch__load")) + goto done; + + link = bpf_program__attach_iter(skel->progs.iter_tcp_destroy, NULL); + if (!ASSERT_OK_PTR(link, "bpf_program__attach_iter")) + goto done; + + iter_fd = bpf_iter_create(bpf_link__fd(link)); + if (!ASSERT_OK_FD(iter_fd, "bpf_iter_create")) + goto done; + + /* Delete matching socket. */ + nread = read(iter_fd, &out, sizeof(out)); + ASSERT_GE(nread, 0, "nread"); + if (nread) + ASSERT_EQ(out, cookie, "cookie matches"); +done: + if (iter_fd >= 0) + close(iter_fd); + bpf_link__destroy(link); + sock_iter_batch__destroy(skel); + close(fd); +} + +static int get_seen_count(int fd, struct sock_count counts[], int n) +{ + __u64 cookie = socket_cookie(fd); + int count = 0; + int i = 0; + + for (; cookie && !count && i < n; i++) + if (cookie == counts[i].cookie) + count = counts[i].count; + + return count; +} + +static void check_n_were_seen_once(int *fds, int fds_len, int n, + struct sock_count counts[], int counts_len) +{ + int seen_once = 0; + int seen_cnt; + int i = 0; + + for (; i < fds_len; i++) { + /* Skip any sockets that were closed or that weren't seen + * exactly once. + */ + if (fds[i] < 0) + continue; + seen_cnt = get_seen_count(fds[i], counts, counts_len); + if (seen_cnt && ASSERT_EQ(seen_cnt, 1, "seen_cnt")) + seen_once++; + } + + ASSERT_EQ(seen_once, n, "seen_once"); +} + +static int accept_from_one(struct pollfd *server_poll_fds, + int server_poll_fds_len) +{ + static const int poll_timeout_ms = 5000; /* 5s */ + int ret; + int i; + + ret = poll(server_poll_fds, server_poll_fds_len, poll_timeout_ms); + if (!ASSERT_EQ(ret, 1, "poll")) + return -1; + + for (i = 0; i < server_poll_fds_len; i++) + if (server_poll_fds[i].revents & POLLIN) + return accept(server_poll_fds[i].fd, NULL, NULL); + + return -1; +} + +static int *connect_to_server(int family, int sock_type, const char *addr, + __u16 port, int nr_connects, int *server_fds, + int server_fds_len) +{ + struct pollfd *server_poll_fds = NULL; + int *established_socks = NULL; + int i; + + server_poll_fds = calloc(server_fds_len, sizeof(*server_poll_fds)); + if (!ASSERT_OK_PTR(server_poll_fds, "server_poll_fds")) + return NULL; + + for (i = 0; i < server_fds_len; i++) { + server_poll_fds[i].fd = server_fds[i]; + server_poll_fds[i].events = POLLIN; + } + + i = 0; + + established_socks = malloc(sizeof(*established_socks) * nr_connects*2); + if (!ASSERT_OK_PTR(established_socks, "established_socks")) + goto error; + + while (nr_connects--) { + established_socks[i] = connect_to_addr_str(family, sock_type, + addr, port, NULL); + if (!ASSERT_OK_FD(established_socks[i], "connect_to_addr_str")) + goto error; + i++; + established_socks[i] = accept_from_one(server_poll_fds, + server_fds_len); + if (!ASSERT_OK_FD(established_socks[i], "accept_from_one")) + goto error; + i++; + } + + free(server_poll_fds); + return established_socks; +error: + free_fds(established_socks, i); + free(server_poll_fds); + return NULL; +} + +static void remove_seen(int family, int sock_type, const char *addr, __u16 port, + int *socks, int socks_len, int *established_socks, + int established_socks_len, struct sock_count *counts, + int counts_len, struct bpf_link *link, int iter_fd) +{ + int close_idx; + + /* Iterate through the first socks_len - 1 sockets. */ + read_n(iter_fd, socks_len - 1, counts, counts_len); + + /* Make sure we saw socks_len - 1 sockets exactly once. */ + check_n_were_seen_once(socks, socks_len, socks_len - 1, counts, + counts_len); + + /* Close a socket we've already seen to remove it from the bucket. */ + close_idx = get_seen_socket(socks, counts, counts_len); + if (!ASSERT_GE(close_idx, 0, "close_idx")) + return; + close(socks[close_idx]); + socks[close_idx] = -1; + + /* Iterate through the rest of the sockets. */ + read_n(iter_fd, -1, counts, counts_len); + + /* Make sure the last socket wasn't skipped and that there were no + * repeats. + */ + check_n_were_seen_once(socks, socks_len, socks_len - 1, counts, + counts_len); +} + +static void remove_seen_established(int family, int sock_type, const char *addr, + __u16 port, int *listen_socks, + int listen_socks_len, int *established_socks, + int established_socks_len, + struct sock_count *counts, int counts_len, + struct bpf_link *link, int iter_fd) +{ + int close_idx; + + /* Iterate through all listening sockets. */ + read_n(iter_fd, listen_socks_len, counts, counts_len); + + /* Make sure we saw all listening sockets exactly once. */ + check_n_were_seen_once(listen_socks, listen_socks_len, listen_socks_len, + counts, counts_len); + + /* Leave one established socket. */ + read_n(iter_fd, established_socks_len - 1, counts, counts_len); + + /* Close a socket we've already seen to remove it from the bucket. */ + close_idx = get_nth_socket(established_socks, established_socks_len, + link, listen_socks_len + 1); + if (!ASSERT_GE(close_idx, 0, "close_idx")) + return; + destroy(established_socks[close_idx]); + established_socks[close_idx] = -1; + + /* Iterate through the rest of the sockets. */ + read_n(iter_fd, -1, counts, counts_len); + + /* Make sure the last socket wasn't skipped and that there were no + * repeats. + */ + check_n_were_seen_once(established_socks, established_socks_len, + established_socks_len - 1, counts, counts_len); +} + +static void remove_unseen(int family, int sock_type, const char *addr, + __u16 port, int *socks, int socks_len, + int *established_socks, int established_socks_len, + struct sock_count *counts, int counts_len, + struct bpf_link *link, int iter_fd) +{ + int close_idx; + + /* Iterate through the first socket. */ + read_n(iter_fd, 1, counts, counts_len); + + /* Make sure we saw a socket from fds. */ + check_n_were_seen_once(socks, socks_len, 1, counts, counts_len); + + /* Close what would be the next socket in the bucket to exercise the + * condition where we need to skip past the first cookie we remembered. + */ + close_idx = get_nth_socket(socks, socks_len, link, 1); + if (!ASSERT_GE(close_idx, 0, "close_idx")) + return; + close(socks[close_idx]); + socks[close_idx] = -1; + + /* Iterate through the rest of the sockets. */ + read_n(iter_fd, -1, counts, counts_len); + + /* Make sure the remaining sockets were seen exactly once and that we + * didn't repeat the socket that was already seen. + */ + check_n_were_seen_once(socks, socks_len, socks_len - 1, counts, + counts_len); +} + +static void remove_unseen_established(int family, int sock_type, + const char *addr, __u16 port, + int *listen_socks, int listen_socks_len, + int *established_socks, + int established_socks_len, + struct sock_count *counts, int counts_len, + struct bpf_link *link, int iter_fd) +{ + int close_idx; + + /* Iterate through all listening sockets. */ + read_n(iter_fd, listen_socks_len, counts, counts_len); + + /* Make sure we saw all listening sockets exactly once. */ + check_n_were_seen_once(listen_socks, listen_socks_len, listen_socks_len, + counts, counts_len); + + /* Iterate through the first established socket. */ + read_n(iter_fd, 1, counts, counts_len); + + /* Make sure we saw one established socks. */ + check_n_were_seen_once(established_socks, established_socks_len, 1, + counts, counts_len); + + /* Close what would be the next socket in the bucket to exercise the + * condition where we need to skip past the first cookie we remembered. + */ + close_idx = get_nth_socket(established_socks, established_socks_len, + link, listen_socks_len + 1); + if (!ASSERT_GE(close_idx, 0, "close_idx")) + return; + + destroy(established_socks[close_idx]); + established_socks[close_idx] = -1; + + /* Iterate through the rest of the sockets. */ + read_n(iter_fd, -1, counts, counts_len); + + /* Make sure the remaining sockets were seen exactly once and that we + * didn't repeat the socket that was already seen. + */ + check_n_were_seen_once(established_socks, established_socks_len, + established_socks_len - 1, counts, counts_len); +} + +static void remove_all(int family, int sock_type, const char *addr, + __u16 port, int *socks, int socks_len, + int *established_socks, int established_socks_len, + struct sock_count *counts, int counts_len, + struct bpf_link *link, int iter_fd) +{ + int close_idx, i; + + /* Iterate through the first socket. */ + read_n(iter_fd, 1, counts, counts_len); + + /* Make sure we saw a socket from fds. */ + check_n_were_seen_once(socks, socks_len, 1, counts, counts_len); + + /* Close all remaining sockets to exhaust the list of saved cookies and + * exit without putting any sockets into the batch on the next read. + */ + for (i = 0; i < socks_len - 1; i++) { + close_idx = get_nth_socket(socks, socks_len, link, 1); + if (!ASSERT_GE(close_idx, 0, "close_idx")) + return; + close(socks[close_idx]); + socks[close_idx] = -1; + } + + /* Make sure there are no more sockets returned */ + ASSERT_EQ(read_n(iter_fd, -1, counts, counts_len), 0, "read_n"); +} + +static void remove_all_established(int family, int sock_type, const char *addr, + __u16 port, int *listen_socks, + int listen_socks_len, int *established_socks, + int established_socks_len, + struct sock_count *counts, int counts_len, + struct bpf_link *link, int iter_fd) +{ + int *close_idx = NULL; + int i; + + /* Iterate through all listening sockets. */ + read_n(iter_fd, listen_socks_len, counts, counts_len); + + /* Make sure we saw all listening sockets exactly once. */ + check_n_were_seen_once(listen_socks, listen_socks_len, listen_socks_len, + counts, counts_len); + + /* Iterate through the first established socket. */ + read_n(iter_fd, 1, counts, counts_len); + + /* Make sure we saw one established socks. */ + check_n_were_seen_once(established_socks, established_socks_len, 1, + counts, counts_len); + + /* Close all remaining sockets to exhaust the list of saved cookies and + * exit without putting any sockets into the batch on the next read. + */ + close_idx = malloc(sizeof(int) * (established_socks_len - 1)); + if (!ASSERT_OK_PTR(close_idx, "close_idx malloc")) + return; + for (i = 0; i < established_socks_len - 1; i++) { + close_idx[i] = get_nth_socket(established_socks, + established_socks_len, link, + listen_socks_len + i); + if (!ASSERT_GE(close_idx[i], 0, "close_idx")) + return; + } + + for (i = 0; i < established_socks_len - 1; i++) { + destroy(established_socks[close_idx[i]]); + established_socks[close_idx[i]] = -1; + } + + /* Make sure there are no more sockets returned */ + ASSERT_EQ(read_n(iter_fd, -1, counts, counts_len), 0, "read_n"); + free(close_idx); +} + +static void add_some(int family, int sock_type, const char *addr, __u16 port, + int *socks, int socks_len, int *established_socks, + int established_socks_len, struct sock_count *counts, + int counts_len, struct bpf_link *link, int iter_fd) +{ + int *new_socks = NULL; + + /* Iterate through the first socks_len - 1 sockets. */ + read_n(iter_fd, socks_len - 1, counts, counts_len); + + /* Make sure we saw socks_len - 1 sockets exactly once. */ + check_n_were_seen_once(socks, socks_len, socks_len - 1, counts, + counts_len); + + /* Double the number of sockets in the bucket. */ + new_socks = start_reuseport_server(family, sock_type, addr, port, 0, + socks_len); + if (!ASSERT_OK_PTR(new_socks, "start_reuseport_server")) + goto done; + + /* Iterate through the rest of the sockets. */ + read_n(iter_fd, -1, counts, counts_len); + + /* Make sure each of the original sockets was seen exactly once. */ + check_n_were_seen_once(socks, socks_len, socks_len, counts, + counts_len); +done: + free_fds(new_socks, socks_len); +} + +static void add_some_established(int family, int sock_type, const char *addr, + __u16 port, int *listen_socks, + int listen_socks_len, int *established_socks, + int established_socks_len, + struct sock_count *counts, + int counts_len, struct bpf_link *link, + int iter_fd) +{ + int *new_socks = NULL; + + /* Iterate through all listening sockets. */ + read_n(iter_fd, listen_socks_len, counts, counts_len); + + /* Make sure we saw all listening sockets exactly once. */ + check_n_were_seen_once(listen_socks, listen_socks_len, listen_socks_len, + counts, counts_len); + + /* Iterate through the first established_socks_len - 1 sockets. */ + read_n(iter_fd, established_socks_len - 1, counts, counts_len); + + /* Make sure we saw established_socks_len - 1 sockets exactly once. */ + check_n_were_seen_once(established_socks, established_socks_len, + established_socks_len - 1, counts, counts_len); + + /* Double the number of established sockets in the bucket. */ + new_socks = connect_to_server(family, sock_type, addr, port, + established_socks_len / 2, listen_socks, + listen_socks_len); + if (!ASSERT_OK_PTR(new_socks, "connect_to_server")) + goto done; + + /* Iterate through the rest of the sockets. */ + read_n(iter_fd, -1, counts, counts_len); + + /* Make sure each of the original sockets was seen exactly once. */ + check_n_were_seen_once(listen_socks, listen_socks_len, listen_socks_len, + counts, counts_len); + check_n_were_seen_once(established_socks, established_socks_len, + established_socks_len, counts, counts_len); +done: + free_fds(new_socks, established_socks_len); +} + +static void force_realloc(int family, int sock_type, const char *addr, + __u16 port, int *socks, int socks_len, + int *established_socks, int established_socks_len, + struct sock_count *counts, int counts_len, + struct bpf_link *link, int iter_fd) +{ + int *new_socks = NULL; + + /* Iterate through the first socket just to initialize the batch. */ + read_n(iter_fd, 1, counts, counts_len); + + /* Double the number of sockets in the bucket to force a realloc on the + * next read. + */ + new_socks = start_reuseport_server(family, sock_type, addr, port, 0, + socks_len); + if (!ASSERT_OK_PTR(new_socks, "start_reuseport_server")) + goto done; + + /* Iterate through the rest of the sockets. */ + read_n(iter_fd, -1, counts, counts_len); + + /* Make sure each socket from the first set was seen exactly once. */ + check_n_were_seen_once(socks, socks_len, socks_len, counts, + counts_len); +done: + free_fds(new_socks, socks_len); +} + +static void force_realloc_established(int family, int sock_type, + const char *addr, __u16 port, + int *listen_socks, int listen_socks_len, + int *established_socks, + int established_socks_len, + struct sock_count *counts, int counts_len, + struct bpf_link *link, int iter_fd) +{ + /* Iterate through all sockets to trigger a realloc. */ + read_n(iter_fd, -1, counts, counts_len); + + /* Make sure each socket was seen exactly once. */ + check_n_were_seen_once(listen_socks, listen_socks_len, listen_socks_len, + counts, counts_len); + check_n_were_seen_once(established_socks, established_socks_len, + established_socks_len, counts, counts_len); +} + +struct test_case { + void (*test)(int family, int sock_type, const char *addr, __u16 port, + int *socks, int socks_len, int *established_socks, + int established_socks_len, struct sock_count *counts, + int counts_len, struct bpf_link *link, int iter_fd); + const char *description; + int ehash_buckets; + int connections; + int init_socks; + int max_socks; + int sock_type; + int family; +}; + +static struct test_case resume_tests[] = { + { + .description = "udp: resume after removing a seen socket", + .init_socks = nr_soreuse, + .max_socks = nr_soreuse, + .sock_type = SOCK_DGRAM, + .family = AF_INET6, + .test = remove_seen, + }, + { + .description = "udp: resume after removing one unseen socket", + .init_socks = nr_soreuse, + .max_socks = nr_soreuse, + .sock_type = SOCK_DGRAM, + .family = AF_INET6, + .test = remove_unseen, + }, + { + .description = "udp: resume after removing all unseen sockets", + .init_socks = nr_soreuse, + .max_socks = nr_soreuse, + .sock_type = SOCK_DGRAM, + .family = AF_INET6, + .test = remove_all, + }, + { + .description = "udp: resume after adding a few sockets", + .init_socks = nr_soreuse, + .max_socks = nr_soreuse, + .sock_type = SOCK_DGRAM, + /* Use AF_INET so that new sockets are added to the head of the + * bucket's list. + */ + .family = AF_INET, + .test = add_some, + }, + { + .description = "udp: force a realloc to occur", + .init_socks = init_batch_size, + .max_socks = init_batch_size * 2, + .sock_type = SOCK_DGRAM, + /* Use AF_INET6 so that new sockets are added to the tail of the + * bucket's list, needing to be added to the next batch to force + * a realloc. + */ + .family = AF_INET6, + .test = force_realloc, + }, + { + .description = "tcp: resume after removing a seen socket (listening)", + .init_socks = nr_soreuse, + .max_socks = nr_soreuse, + .sock_type = SOCK_STREAM, + .family = AF_INET6, + .test = remove_seen, + }, + { + .description = "tcp: resume after removing one unseen socket (listening)", + .init_socks = nr_soreuse, + .max_socks = nr_soreuse, + .sock_type = SOCK_STREAM, + .family = AF_INET6, + .test = remove_unseen, + }, + { + .description = "tcp: resume after removing all unseen sockets (listening)", + .init_socks = nr_soreuse, + .max_socks = nr_soreuse, + .sock_type = SOCK_STREAM, + .family = AF_INET6, + .test = remove_all, + }, + { + .description = "tcp: resume after adding a few sockets (listening)", + .init_socks = nr_soreuse, + .max_socks = nr_soreuse, + .sock_type = SOCK_STREAM, + /* Use AF_INET so that new sockets are added to the head of the + * bucket's list. + */ + .family = AF_INET, + .test = add_some, + }, + { + .description = "tcp: force a realloc to occur (listening)", + .init_socks = init_batch_size, + .max_socks = init_batch_size * 2, + .sock_type = SOCK_STREAM, + /* Use AF_INET6 so that new sockets are added to the tail of the + * bucket's list, needing to be added to the next batch to force + * a realloc. + */ + .family = AF_INET6, + .test = force_realloc, + }, + { + .description = "tcp: resume after removing a seen socket (established)", + /* Force all established sockets into one bucket */ + .ehash_buckets = 1, + .connections = nr_soreuse, + .init_socks = nr_soreuse, + /* Room for connect()ed and accept()ed sockets */ + .max_socks = nr_soreuse * 3, + .sock_type = SOCK_STREAM, + .family = AF_INET6, + .test = remove_seen_established, + }, + { + .description = "tcp: resume after removing one unseen socket (established)", + /* Force all established sockets into one bucket */ + .ehash_buckets = 1, + .connections = nr_soreuse, + .init_socks = nr_soreuse, + /* Room for connect()ed and accept()ed sockets */ + .max_socks = nr_soreuse * 3, + .sock_type = SOCK_STREAM, + .family = AF_INET6, + .test = remove_unseen_established, + }, + { + .description = "tcp: resume after removing all unseen sockets (established)", + /* Force all established sockets into one bucket */ + .ehash_buckets = 1, + .connections = nr_soreuse, + .init_socks = nr_soreuse, + /* Room for connect()ed and accept()ed sockets */ + .max_socks = nr_soreuse * 3, + .sock_type = SOCK_STREAM, + .family = AF_INET6, + .test = remove_all_established, + }, + { + .description = "tcp: resume after adding a few sockets (established)", + /* Force all established sockets into one bucket */ + .ehash_buckets = 1, + .connections = nr_soreuse, + .init_socks = nr_soreuse, + /* Room for connect()ed and accept()ed sockets */ + .max_socks = nr_soreuse * 3, + .sock_type = SOCK_STREAM, + .family = AF_INET6, + .test = add_some_established, + }, + { + .description = "tcp: force a realloc to occur (established)", + /* Force all established sockets into one bucket */ + .ehash_buckets = 1, + /* Bucket size will need to double when going from listening to + * established sockets. + */ + .connections = init_batch_size, + .init_socks = nr_soreuse, + /* Room for connect()ed and accept()ed sockets */ + .max_socks = nr_soreuse + (init_batch_size * 2), + .sock_type = SOCK_STREAM, + .family = AF_INET6, + .test = force_realloc_established, + }, +}; + +static void do_resume_test(struct test_case *tc) +{ + struct sock_iter_batch *skel = NULL; + struct sock_count *counts = NULL; + static const __u16 port = 10001; + struct nstoken *nstoken = NULL; + struct bpf_link *link = NULL; + int *established_fds = NULL; + int err, iter_fd = -1; + const char *addr; + int *fds = NULL; + + if (tc->ehash_buckets) { + SYS_NOFAIL("ip netns del " TEST_CHILD_NS); + SYS(done, "sysctl -wq net.ipv4.tcp_child_ehash_entries=%d", + tc->ehash_buckets); + SYS(done, "ip netns add %s", TEST_CHILD_NS); + SYS(done, "ip -net %s link set dev lo up", TEST_CHILD_NS); + nstoken = open_netns(TEST_CHILD_NS); + if (!ASSERT_OK_PTR(nstoken, "open_child_netns")) + goto done; + } + + counts = calloc(tc->max_socks, sizeof(*counts)); + if (!ASSERT_OK_PTR(counts, "counts")) + goto done; + skel = sock_iter_batch__open(); + if (!ASSERT_OK_PTR(skel, "sock_iter_batch__open")) + goto done; + + /* Prepare a bucket of sockets in the kernel hashtable */ + addr = tc->family == AF_INET6 ? "::1" : "127.0.0.1"; + fds = start_reuseport_server(tc->family, tc->sock_type, addr, port, 0, + tc->init_socks); + if (!ASSERT_OK_PTR(fds, "start_reuseport_server")) + goto done; + if (tc->connections) { + established_fds = connect_to_server(tc->family, tc->sock_type, + addr, port, + tc->connections, fds, + tc->init_socks); + if (!ASSERT_OK_PTR(established_fds, "connect_to_server")) + goto done; + } + skel->rodata->ports[0] = 0; + skel->rodata->ports[1] = 0; + skel->rodata->sf = tc->family; + skel->rodata->ss = 0; + + err = sock_iter_batch__load(skel); + if (!ASSERT_OK(err, "sock_iter_batch__load")) + goto done; + + link = bpf_program__attach_iter(tc->sock_type == SOCK_STREAM ? + skel->progs.iter_tcp_soreuse : + skel->progs.iter_udp_soreuse, + NULL); + if (!ASSERT_OK_PTR(link, "bpf_program__attach_iter")) + goto done; + + iter_fd = bpf_iter_create(bpf_link__fd(link)); + if (!ASSERT_OK_FD(iter_fd, "bpf_iter_create")) + goto done; + + tc->test(tc->family, tc->sock_type, addr, port, fds, tc->init_socks, + established_fds, tc->connections*2, counts, tc->max_socks, + link, iter_fd); +done: + close_netns(nstoken); + SYS_NOFAIL("ip netns del " TEST_CHILD_NS); + SYS_NOFAIL("sysctl -w net.ipv4.tcp_child_ehash_entries=0"); + free(counts); + free_fds(fds, tc->init_socks); + free_fds(established_fds, tc->connections*2); + if (iter_fd >= 0) + close(iter_fd); + bpf_link__destroy(link); + sock_iter_batch__destroy(skel); +} + +static void do_resume_tests(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(resume_tests); i++) { + if (test__start_subtest(resume_tests[i].description)) { + do_resume_test(&resume_tests[i]); + } + } +} + static void do_test(int sock_type, bool onebyone) { int err, i, nread, to_read, total_read, iter_fd = -1; - int first_idx, second_idx, indices[nr_soreuse]; + struct iter_out outputs[nr_soreuse]; struct bpf_link *link = NULL; struct sock_iter_batch *skel; + int first_idx, second_idx; int *fds[2] = {}; skel = sock_iter_batch__open(); @@ -34,6 +889,9 @@ static void do_test(int sock_type, bool onebyone) goto done; skel->rodata->ports[i] = ntohs(local_port); } + skel->rodata->sf = AF_INET6; + if (sock_type == SOCK_STREAM) + skel->rodata->ss = TCP_LISTEN; err = sock_iter_batch__load(skel); if (!ASSERT_OK(err, "sock_iter_batch__load")) @@ -55,38 +913,38 @@ static void do_test(int sock_type, bool onebyone) * from a bucket and leave one socket out from * that bucket on purpose. */ - to_read = (nr_soreuse - 1) * sizeof(*indices); + to_read = (nr_soreuse - 1) * sizeof(*outputs); total_read = 0; first_idx = -1; do { - nread = read(iter_fd, indices, onebyone ? sizeof(*indices) : to_read); - if (nread <= 0 || nread % sizeof(*indices)) + nread = read(iter_fd, outputs, onebyone ? sizeof(*outputs) : to_read); + if (nread <= 0 || nread % sizeof(*outputs)) break; total_read += nread; if (first_idx == -1) - first_idx = indices[0]; - for (i = 0; i < nread / sizeof(*indices); i++) - ASSERT_EQ(indices[i], first_idx, "first_idx"); + first_idx = outputs[0].idx; + for (i = 0; i < nread / sizeof(*outputs); i++) + ASSERT_EQ(outputs[i].idx, first_idx, "first_idx"); } while (total_read < to_read); - ASSERT_EQ(nread, onebyone ? sizeof(*indices) : to_read, "nread"); + ASSERT_EQ(nread, onebyone ? sizeof(*outputs) : to_read, "nread"); ASSERT_EQ(total_read, to_read, "total_read"); free_fds(fds[first_idx], nr_soreuse); fds[first_idx] = NULL; /* Read the "whole" second bucket */ - to_read = nr_soreuse * sizeof(*indices); + to_read = nr_soreuse * sizeof(*outputs); total_read = 0; second_idx = !first_idx; do { - nread = read(iter_fd, indices, onebyone ? sizeof(*indices) : to_read); - if (nread <= 0 || nread % sizeof(*indices)) + nread = read(iter_fd, outputs, onebyone ? sizeof(*outputs) : to_read); + if (nread <= 0 || nread % sizeof(*outputs)) break; total_read += nread; - for (i = 0; i < nread / sizeof(*indices); i++) - ASSERT_EQ(indices[i], second_idx, "second_idx"); + for (i = 0; i < nread / sizeof(*outputs); i++) + ASSERT_EQ(outputs[i].idx, second_idx, "second_idx"); } while (total_read <= to_read); ASSERT_EQ(nread, 0, "nread"); /* Both so_reuseport ports should be in different buckets, so @@ -128,6 +986,7 @@ void test_sock_iter_batch(void) do_test(SOCK_DGRAM, true); do_test(SOCK_DGRAM, false); } + do_resume_tests(); close_netns(nstoken); done: diff --git a/tools/testing/selftests/bpf/prog_tests/socket_helpers.h b/tools/testing/selftests/bpf/prog_tests/socket_helpers.h new file mode 100644 index 000000000000..e02cabcc814e --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/socket_helpers.h @@ -0,0 +1,468 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __SOCKET_HELPERS__ +#define __SOCKET_HELPERS__ + +#include <sys/un.h> +#include <linux/vm_sockets.h> + +/* include/linux/net.h */ +#define SOCK_TYPE_MASK 0xf + +#define IO_TIMEOUT_SEC 30 +#define MAX_STRERR_LEN 256 + +/* workaround for older vm_sockets.h */ +#ifndef VMADDR_CID_LOCAL +#define VMADDR_CID_LOCAL 1 +#endif + +/* include/linux/cleanup.h */ +#define __get_and_null(p, nullvalue) \ + ({ \ + __auto_type __ptr = &(p); \ + __auto_type __val = *__ptr; \ + *__ptr = nullvalue; \ + __val; \ + }) + +#define take_fd(fd) __get_and_null(fd, -EBADF) + +/* Wrappers that fail the test on error and report it. */ + +#define _FAIL(errnum, fmt...) \ + ({ \ + error_at_line(0, (errnum), __func__, __LINE__, fmt); \ + CHECK_FAIL(true); \ + }) +#define FAIL(fmt...) _FAIL(0, fmt) +#define FAIL_ERRNO(fmt...) _FAIL(errno, fmt) +#define FAIL_LIBBPF(err, msg) \ + ({ \ + char __buf[MAX_STRERR_LEN]; \ + libbpf_strerror((err), __buf, sizeof(__buf)); \ + FAIL("%s: %s", (msg), __buf); \ + }) + + +#define xaccept_nonblock(fd, addr, len) \ + ({ \ + int __ret = \ + accept_timeout((fd), (addr), (len), IO_TIMEOUT_SEC); \ + if (__ret == -1) \ + FAIL_ERRNO("accept"); \ + __ret; \ + }) + +#define xbind(fd, addr, len) \ + ({ \ + int __ret = bind((fd), (addr), (len)); \ + if (__ret == -1) \ + FAIL_ERRNO("bind"); \ + __ret; \ + }) + +#define xclose(fd) \ + ({ \ + int __ret = close((fd)); \ + if (__ret == -1) \ + FAIL_ERRNO("close"); \ + __ret; \ + }) + +#define xconnect(fd, addr, len) \ + ({ \ + int __ret = connect((fd), (addr), (len)); \ + if (__ret == -1) \ + FAIL_ERRNO("connect"); \ + __ret; \ + }) + +#define xgetsockname(fd, addr, len) \ + ({ \ + int __ret = getsockname((fd), (addr), (len)); \ + if (__ret == -1) \ + FAIL_ERRNO("getsockname"); \ + __ret; \ + }) + +#define xgetsockopt(fd, level, name, val, len) \ + ({ \ + int __ret = getsockopt((fd), (level), (name), (val), (len)); \ + if (__ret == -1) \ + FAIL_ERRNO("getsockopt(" #name ")"); \ + __ret; \ + }) + +#define xlisten(fd, backlog) \ + ({ \ + int __ret = listen((fd), (backlog)); \ + if (__ret == -1) \ + FAIL_ERRNO("listen"); \ + __ret; \ + }) + +#define xsetsockopt(fd, level, name, val, len) \ + ({ \ + int __ret = setsockopt((fd), (level), (name), (val), (len)); \ + if (__ret == -1) \ + FAIL_ERRNO("setsockopt(" #name ")"); \ + __ret; \ + }) + +#define xsend(fd, buf, len, flags) \ + ({ \ + ssize_t __ret = send((fd), (buf), (len), (flags)); \ + if (__ret == -1) \ + FAIL_ERRNO("send"); \ + __ret; \ + }) + +#define xrecv_nonblock(fd, buf, len, flags) \ + ({ \ + ssize_t __ret = recv_timeout((fd), (buf), (len), (flags), \ + IO_TIMEOUT_SEC); \ + if (__ret == -1) \ + FAIL_ERRNO("recv"); \ + __ret; \ + }) + +#define xsocket(family, sotype, flags) \ + ({ \ + int __ret = socket(family, sotype, flags); \ + if (__ret == -1) \ + FAIL_ERRNO("socket"); \ + __ret; \ + }) + +static inline void close_fd(int *fd) +{ + if (*fd >= 0) + xclose(*fd); +} + +#define __close_fd __attribute__((cleanup(close_fd))) + +static inline struct sockaddr *sockaddr(struct sockaddr_storage *ss) +{ + return (struct sockaddr *)ss; +} + +static inline void init_addr_loopback4(struct sockaddr_storage *ss, + socklen_t *len) +{ + struct sockaddr_in *addr4 = memset(ss, 0, sizeof(*ss)); + + addr4->sin_family = AF_INET; + addr4->sin_port = 0; + addr4->sin_addr.s_addr = htonl(INADDR_LOOPBACK); + *len = sizeof(*addr4); +} + +static inline void init_addr_loopback6(struct sockaddr_storage *ss, + socklen_t *len) +{ + struct sockaddr_in6 *addr6 = memset(ss, 0, sizeof(*ss)); + + addr6->sin6_family = AF_INET6; + addr6->sin6_port = 0; + addr6->sin6_addr = in6addr_loopback; + *len = sizeof(*addr6); +} + +static inline void init_addr_loopback_unix(struct sockaddr_storage *ss, + socklen_t *len) +{ + struct sockaddr_un *addr = memset(ss, 0, sizeof(*ss)); + + addr->sun_family = AF_UNIX; + *len = sizeof(sa_family_t); +} + +static inline void init_addr_loopback_vsock(struct sockaddr_storage *ss, + socklen_t *len) +{ + struct sockaddr_vm *addr = memset(ss, 0, sizeof(*ss)); + + addr->svm_family = AF_VSOCK; + addr->svm_port = VMADDR_PORT_ANY; + addr->svm_cid = VMADDR_CID_LOCAL; + *len = sizeof(*addr); +} + +static inline void init_addr_loopback(int family, struct sockaddr_storage *ss, + socklen_t *len) +{ + switch (family) { + case AF_INET: + init_addr_loopback4(ss, len); + return; + case AF_INET6: + init_addr_loopback6(ss, len); + return; + case AF_UNIX: + init_addr_loopback_unix(ss, len); + return; + case AF_VSOCK: + init_addr_loopback_vsock(ss, len); + return; + default: + FAIL("unsupported address family %d", family); + } +} + +static inline int enable_reuseport(int s, int progfd) +{ + int err, one = 1; + + err = xsetsockopt(s, SOL_SOCKET, SO_REUSEPORT, &one, sizeof(one)); + if (err) + return -1; + err = xsetsockopt(s, SOL_SOCKET, SO_ATTACH_REUSEPORT_EBPF, &progfd, + sizeof(progfd)); + if (err) + return -1; + + return 0; +} + +static inline int socket_loopback_reuseport(int family, int sotype, int progfd) +{ + struct sockaddr_storage addr; + socklen_t len = 0; + int err, s; + + init_addr_loopback(family, &addr, &len); + + s = xsocket(family, sotype, 0); + if (s == -1) + return -1; + + if (progfd >= 0) + enable_reuseport(s, progfd); + + err = xbind(s, sockaddr(&addr), len); + if (err) + goto close; + + if (sotype & SOCK_DGRAM) + return s; + + err = xlisten(s, SOMAXCONN); + if (err) + goto close; + + return s; +close: + xclose(s); + return -1; +} + +static inline int socket_loopback(int family, int sotype) +{ + return socket_loopback_reuseport(family, sotype, -1); +} + +static inline int poll_connect(int fd, unsigned int timeout_sec) +{ + struct timeval timeout = { .tv_sec = timeout_sec }; + fd_set wfds; + int r, eval; + socklen_t esize = sizeof(eval); + + FD_ZERO(&wfds); + FD_SET(fd, &wfds); + + r = select(fd + 1, NULL, &wfds, NULL, &timeout); + if (r == 0) + errno = ETIME; + if (r != 1) + return -1; + + if (getsockopt(fd, SOL_SOCKET, SO_ERROR, &eval, &esize) < 0) + return -1; + if (eval != 0) { + errno = eval; + return -1; + } + + return 0; +} + +static inline int poll_read(int fd, unsigned int timeout_sec) +{ + struct timeval timeout = { .tv_sec = timeout_sec }; + fd_set rfds; + int r; + + FD_ZERO(&rfds); + FD_SET(fd, &rfds); + + r = select(fd + 1, &rfds, NULL, NULL, &timeout); + if (r == 0) + errno = ETIME; + + return r == 1 ? 0 : -1; +} + +static inline int accept_timeout(int fd, struct sockaddr *addr, socklen_t *len, + unsigned int timeout_sec) +{ + if (poll_read(fd, timeout_sec)) + return -1; + + return accept(fd, addr, len); +} + +static inline int recv_timeout(int fd, void *buf, size_t len, int flags, + unsigned int timeout_sec) +{ + if (poll_read(fd, timeout_sec)) + return -1; + + return recv(fd, buf, len, flags); +} + + +static inline int create_pair(int family, int sotype, int *p0, int *p1) +{ + __close_fd int s, c = -1, p = -1; + struct sockaddr_storage addr; + socklen_t len; + int err; + + s = socket_loopback(family, sotype); + if (s < 0) + return s; + + c = xsocket(family, sotype, 0); + if (c < 0) + return c; + + init_addr_loopback(family, &addr, &len); + err = xbind(c, sockaddr(&addr), len); + if (err) + return err; + + len = sizeof(addr); + err = xgetsockname(s, sockaddr(&addr), &len); + if (err) + return err; + + err = connect(c, sockaddr(&addr), len); + if (err) { + if (errno != EINPROGRESS) { + FAIL_ERRNO("connect"); + return err; + } + + err = poll_connect(c, IO_TIMEOUT_SEC); + if (err) { + FAIL_ERRNO("poll_connect"); + return err; + } + } + + switch (sotype & SOCK_TYPE_MASK) { + case SOCK_DGRAM: + err = xgetsockname(c, sockaddr(&addr), &len); + if (err) + return err; + + err = xconnect(s, sockaddr(&addr), len); + if (err) + return err; + + *p0 = take_fd(s); + break; + case SOCK_STREAM: + case SOCK_SEQPACKET: + p = xaccept_nonblock(s, NULL, NULL); + if (p < 0) + return p; + + *p0 = take_fd(p); + break; + default: + FAIL("Unsupported socket type %#x", sotype); + return -EOPNOTSUPP; + } + + *p1 = take_fd(c); + return 0; +} + +static inline int create_socket_pairs(int family, int sotype, int *c0, int *c1, + int *p0, int *p1) +{ + int err; + + err = create_pair(family, sotype, c0, p0); + if (err) + return err; + + err = create_pair(family, sotype, c1, p1); + if (err) { + close(*c0); + close(*p0); + } + + return err; +} + +static inline const char *socket_kind_to_str(int sock_fd) +{ + socklen_t opt_len; + int domain, type; + + opt_len = sizeof(domain); + if (getsockopt(sock_fd, SOL_SOCKET, SO_DOMAIN, &domain, &opt_len)) + FAIL_ERRNO("getsockopt(SO_DOMAIN)"); + + opt_len = sizeof(type); + if (getsockopt(sock_fd, SOL_SOCKET, SO_TYPE, &type, &opt_len)) + FAIL_ERRNO("getsockopt(SO_TYPE)"); + + switch (domain) { + case AF_INET: + switch (type) { + case SOCK_STREAM: + return "tcp4"; + case SOCK_DGRAM: + return "udp4"; + } + break; + case AF_INET6: + switch (type) { + case SOCK_STREAM: + return "tcp6"; + case SOCK_DGRAM: + return "udp6"; + } + break; + case AF_UNIX: + switch (type) { + case SOCK_STREAM: + return "u_str"; + case SOCK_DGRAM: + return "u_dgr"; + case SOCK_SEQPACKET: + return "u_seq"; + } + break; + case AF_VSOCK: + switch (type) { + case SOCK_STREAM: + return "v_str"; + case SOCK_DGRAM: + return "v_dgr"; + case SOCK_SEQPACKET: + return "v_seq"; + } + break; + } + + return "???"; +} + +#endif // __SOCKET_HELPERS__ diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c index fdff0652d7ef..1e3e4392dcca 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c @@ -12,6 +12,7 @@ #include "test_sockmap_progs_query.skel.h" #include "test_sockmap_pass_prog.skel.h" #include "test_sockmap_drop_prog.skel.h" +#include "test_sockmap_change_tail.skel.h" #include "bpf_iter_sockmap.skel.h" #include "sockmap_helpers.h" @@ -110,31 +111,35 @@ out: static void test_sockmap_vsock_delete_on_close(void) { - int err, c, p, map; - const int zero = 0; - - err = create_pair(AF_VSOCK, SOCK_STREAM, &c, &p); - if (!ASSERT_OK(err, "create_pair(AF_VSOCK)")) - return; + int map, c, p, err, zero = 0; map = bpf_map_create(BPF_MAP_TYPE_SOCKMAP, NULL, sizeof(int), sizeof(int), 1, NULL); - if (!ASSERT_GE(map, 0, "bpf_map_create")) { - close(c); - goto out; - } + if (!ASSERT_OK_FD(map, "bpf_map_create")) + return; - err = bpf_map_update_elem(map, &zero, &c, BPF_NOEXIST); - close(c); - if (!ASSERT_OK(err, "bpf_map_update")) - goto out; + err = create_pair(AF_VSOCK, SOCK_STREAM, &c, &p); + if (!ASSERT_OK(err, "create_pair")) + goto close_map; - err = bpf_map_update_elem(map, &zero, &p, BPF_NOEXIST); + if (xbpf_map_update_elem(map, &zero, &c, BPF_NOEXIST)) + goto close_socks; + + xclose(c); + xclose(p); + + err = create_pair(AF_VSOCK, SOCK_STREAM, &c, &p); + if (!ASSERT_OK(err, "create_pair")) + goto close_map; + + err = bpf_map_update_elem(map, &zero, &c, BPF_NOEXIST); ASSERT_OK(err, "after close(), bpf_map_update"); -out: - close(p); - close(map); +close_socks: + xclose(c); + xclose(p); +close_map: + xclose(map); } static void test_skmsg_helpers(enum bpf_map_type map_type) @@ -521,8 +526,8 @@ static void test_sockmap_skb_verdict_shutdown(void) if (!ASSERT_EQ(err, 1, "epoll_wait(fd)")) goto out_close; - n = recv(c1, &b, 1, SOCK_NONBLOCK); - ASSERT_EQ(n, 0, "recv_timeout(fin)"); + n = recv(c1, &b, 1, MSG_DONTWAIT); + ASSERT_EQ(n, 0, "recv(fin)"); out_close: close(c1); close(p1); @@ -530,57 +535,6 @@ out: test_sockmap_pass_prog__destroy(skel); } -static void test_sockmap_stream_pass(void) -{ - int zero = 0, sent, recvd; - int verdict, parser; - int err, map; - int c = -1, p = -1; - struct test_sockmap_pass_prog *pass = NULL; - char snd[256] = "0123456789"; - char rcv[256] = "0"; - - pass = test_sockmap_pass_prog__open_and_load(); - verdict = bpf_program__fd(pass->progs.prog_skb_verdict); - parser = bpf_program__fd(pass->progs.prog_skb_parser); - map = bpf_map__fd(pass->maps.sock_map_rx); - - err = bpf_prog_attach(parser, map, BPF_SK_SKB_STREAM_PARSER, 0); - if (!ASSERT_OK(err, "bpf_prog_attach stream parser")) - goto out; - - err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0); - if (!ASSERT_OK(err, "bpf_prog_attach stream verdict")) - goto out; - - err = create_pair(AF_INET, SOCK_STREAM, &c, &p); - if (err) - goto out; - - /* sk_data_ready of 'p' will be replaced by strparser handler */ - err = bpf_map_update_elem(map, &zero, &p, BPF_NOEXIST); - if (!ASSERT_OK(err, "bpf_map_update_elem(p)")) - goto out_close; - - /* - * as 'prog_skb_parser' return the original skb len and - * 'prog_skb_verdict' return SK_PASS, the kernel will just - * pass it through to original socket 'p' - */ - sent = xsend(c, snd, sizeof(snd), 0); - ASSERT_EQ(sent, sizeof(snd), "xsend(c)"); - - recvd = recv_timeout(p, rcv, sizeof(rcv), SOCK_NONBLOCK, - IO_TIMEOUT_SEC); - ASSERT_EQ(recvd, sizeof(rcv), "recv_timeout(p)"); - -out_close: - close(c); - close(p); - -out: - test_sockmap_pass_prog__destroy(pass); -} static void test_sockmap_skb_verdict_fionread(bool pass_prog) { @@ -627,7 +581,7 @@ static void test_sockmap_skb_verdict_fionread(bool pass_prog) ASSERT_EQ(avail, expected, "ioctl(FIONREAD)"); /* On DROP test there will be no data to read */ if (pass_prog) { - recvd = recv_timeout(c1, &buf, sizeof(buf), SOCK_NONBLOCK, IO_TIMEOUT_SEC); + recvd = recv_timeout(c1, &buf, sizeof(buf), MSG_DONTWAIT, IO_TIMEOUT_SEC); ASSERT_EQ(recvd, sizeof(buf), "recv_timeout(c0)"); } @@ -643,6 +597,54 @@ out: test_sockmap_drop_prog__destroy(drop); } +static void test_sockmap_skb_verdict_change_tail(void) +{ + struct test_sockmap_change_tail *skel; + int err, map, verdict; + int c1, p1, sent, recvd; + int zero = 0; + char buf[2]; + + skel = test_sockmap_change_tail__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open_and_load")) + return; + verdict = bpf_program__fd(skel->progs.prog_skb_verdict); + map = bpf_map__fd(skel->maps.sock_map_rx); + + err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0); + if (!ASSERT_OK(err, "bpf_prog_attach")) + goto out; + err = create_pair(AF_INET, SOCK_STREAM, &c1, &p1); + if (!ASSERT_OK(err, "create_pair()")) + goto out; + err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST); + if (!ASSERT_OK(err, "bpf_map_update_elem(c1)")) + goto out_close; + sent = xsend(p1, "Tr", 2, 0); + ASSERT_EQ(sent, 2, "xsend(p1)"); + recvd = recv(c1, buf, 2, 0); + ASSERT_EQ(recvd, 1, "recv(c1)"); + ASSERT_EQ(skel->data->change_tail_ret, 0, "change_tail_ret"); + + sent = xsend(p1, "G", 1, 0); + ASSERT_EQ(sent, 1, "xsend(p1)"); + recvd = recv(c1, buf, 2, 0); + ASSERT_EQ(recvd, 2, "recv(c1)"); + ASSERT_EQ(skel->data->change_tail_ret, 0, "change_tail_ret"); + + sent = xsend(p1, "E", 1, 0); + ASSERT_EQ(sent, 1, "xsend(p1)"); + recvd = recv(c1, buf, 1, 0); + ASSERT_EQ(recvd, 1, "recv(c1)"); + ASSERT_EQ(skel->data->change_tail_ret, -EINVAL, "change_tail_ret"); + +out_close: + close(c1); + close(p1); +out: + test_sockmap_change_tail__destroy(skel); +} + static void test_sockmap_skb_verdict_peek_helper(int map) { int err, c1, p1, zero = 0, sent, recvd, avail; @@ -934,8 +936,10 @@ static void test_sockmap_same_sock(void) err = socketpair(AF_UNIX, SOCK_STREAM, 0, stream); ASSERT_OK(err, "socketpair(af_unix, sock_stream)"); - if (err) + if (err) { + close(tcp); goto out; + } for (i = 0; i < 2; i++) { err = bpf_map_update_elem(map, &zero, &stream[0], BPF_ANY); @@ -954,14 +958,14 @@ static void test_sockmap_same_sock(void) ASSERT_OK(err, "bpf_map_update_elem(tcp)"); } + close(tcp); err = bpf_map_delete_elem(map, &zero); - ASSERT_OK(err, "bpf_map_delete_elem(entry)"); + ASSERT_ERR(err, "bpf_map_delete_elem(entry)"); close(stream[0]); close(stream[1]); out: close(dgram); - close(tcp); close(udp); test_sockmap_pass_prog__destroy(skel); } @@ -1010,6 +1014,34 @@ destroy: test_sockmap_pass_prog__destroy(skel); } +static void test_sockmap_vsock_unconnected(void) +{ + struct sockaddr_storage addr; + int map, s, zero = 0; + socklen_t alen; + + map = bpf_map_create(BPF_MAP_TYPE_SOCKMAP, NULL, sizeof(int), + sizeof(int), 1, NULL); + if (!ASSERT_OK_FD(map, "bpf_map_create")) + return; + + s = xsocket(AF_VSOCK, SOCK_STREAM, 0); + if (s < 0) + goto close_map; + + /* Fail connect(), but trigger transport assignment. */ + init_addr_loopback(AF_VSOCK, &addr, &alen); + if (!ASSERT_ERR(connect(s, sockaddr(&addr), alen), "connect")) + goto close_sock; + + ASSERT_ERR(bpf_map_update_elem(map, &zero, &s, BPF_ANY), "map_update"); + +close_sock: + xclose(s); +close_map: + xclose(map); +} + void test_sockmap_basic(void) { if (test__start_subtest("sockmap create_update_free")) @@ -1050,12 +1082,12 @@ void test_sockmap_basic(void) test_sockmap_progs_query(BPF_SK_SKB_VERDICT); if (test__start_subtest("sockmap skb_verdict shutdown")) test_sockmap_skb_verdict_shutdown(); - if (test__start_subtest("sockmap stream parser and verdict pass")) - test_sockmap_stream_pass(); if (test__start_subtest("sockmap skb_verdict fionread")) test_sockmap_skb_verdict_fionread(true); if (test__start_subtest("sockmap skb_verdict fionread on drop")) test_sockmap_skb_verdict_fionread(false); + if (test__start_subtest("sockmap skb_verdict change tail")) + test_sockmap_skb_verdict_change_tail(); if (test__start_subtest("sockmap skb_verdict msg_f_peek")) test_sockmap_skb_verdict_peek(); if (test__start_subtest("sockmap skb_verdict msg_f_peek with link")) @@ -1074,4 +1106,6 @@ void test_sockmap_basic(void) test_skmsg_helpers_with_link(BPF_MAP_TYPE_SOCKHASH); if (test__start_subtest("sockmap skb_verdict vsock poll")) test_sockmap_skb_verdict_vsock_poll(); + if (test__start_subtest("sockmap vsock unconnected")) + test_sockmap_vsock_unconnected(); } diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h b/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h index 38e35c72bdaa..d815efac52fd 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h @@ -1,143 +1,19 @@ #ifndef __SOCKMAP_HELPERS__ #define __SOCKMAP_HELPERS__ -#include <linux/vm_sockets.h> +#include "socket_helpers.h" -/* include/linux/net.h */ -#define SOCK_TYPE_MASK 0xf - -#define IO_TIMEOUT_SEC 30 -#define MAX_STRERR_LEN 256 #define MAX_TEST_NAME 80 -/* workaround for older vm_sockets.h */ -#ifndef VMADDR_CID_LOCAL -#define VMADDR_CID_LOCAL 1 -#endif +#define u32(v) ((u32){(v)}) +#define u64(v) ((u64){(v)}) #define __always_unused __attribute__((__unused__)) -/* include/linux/cleanup.h */ -#define __get_and_null(p, nullvalue) \ - ({ \ - __auto_type __ptr = &(p); \ - __auto_type __val = *__ptr; \ - *__ptr = nullvalue; \ - __val; \ - }) - -#define take_fd(fd) __get_and_null(fd, -EBADF) - -#define _FAIL(errnum, fmt...) \ - ({ \ - error_at_line(0, (errnum), __func__, __LINE__, fmt); \ - CHECK_FAIL(true); \ - }) -#define FAIL(fmt...) _FAIL(0, fmt) -#define FAIL_ERRNO(fmt...) _FAIL(errno, fmt) -#define FAIL_LIBBPF(err, msg) \ - ({ \ - char __buf[MAX_STRERR_LEN]; \ - libbpf_strerror((err), __buf, sizeof(__buf)); \ - FAIL("%s: %s", (msg), __buf); \ - }) - -/* Wrappers that fail the test on error and report it. */ - -#define xaccept_nonblock(fd, addr, len) \ - ({ \ - int __ret = \ - accept_timeout((fd), (addr), (len), IO_TIMEOUT_SEC); \ - if (__ret == -1) \ - FAIL_ERRNO("accept"); \ - __ret; \ - }) - -#define xbind(fd, addr, len) \ - ({ \ - int __ret = bind((fd), (addr), (len)); \ - if (__ret == -1) \ - FAIL_ERRNO("bind"); \ - __ret; \ - }) - -#define xclose(fd) \ - ({ \ - int __ret = close((fd)); \ - if (__ret == -1) \ - FAIL_ERRNO("close"); \ - __ret; \ - }) - -#define xconnect(fd, addr, len) \ - ({ \ - int __ret = connect((fd), (addr), (len)); \ - if (__ret == -1) \ - FAIL_ERRNO("connect"); \ - __ret; \ - }) - -#define xgetsockname(fd, addr, len) \ - ({ \ - int __ret = getsockname((fd), (addr), (len)); \ - if (__ret == -1) \ - FAIL_ERRNO("getsockname"); \ - __ret; \ - }) - -#define xgetsockopt(fd, level, name, val, len) \ - ({ \ - int __ret = getsockopt((fd), (level), (name), (val), (len)); \ - if (__ret == -1) \ - FAIL_ERRNO("getsockopt(" #name ")"); \ - __ret; \ - }) - -#define xlisten(fd, backlog) \ - ({ \ - int __ret = listen((fd), (backlog)); \ - if (__ret == -1) \ - FAIL_ERRNO("listen"); \ - __ret; \ - }) - -#define xsetsockopt(fd, level, name, val, len) \ - ({ \ - int __ret = setsockopt((fd), (level), (name), (val), (len)); \ - if (__ret == -1) \ - FAIL_ERRNO("setsockopt(" #name ")"); \ - __ret; \ - }) - -#define xsend(fd, buf, len, flags) \ - ({ \ - ssize_t __ret = send((fd), (buf), (len), (flags)); \ - if (__ret == -1) \ - FAIL_ERRNO("send"); \ - __ret; \ - }) - -#define xrecv_nonblock(fd, buf, len, flags) \ - ({ \ - ssize_t __ret = recv_timeout((fd), (buf), (len), (flags), \ - IO_TIMEOUT_SEC); \ - if (__ret == -1) \ - FAIL_ERRNO("recv"); \ - __ret; \ - }) - -#define xsocket(family, sotype, flags) \ - ({ \ - int __ret = socket(family, sotype, flags); \ - if (__ret == -1) \ - FAIL_ERRNO("socket"); \ - __ret; \ - }) - #define xbpf_map_delete_elem(fd, key) \ ({ \ int __ret = bpf_map_delete_elem((fd), (key)); \ - if (__ret < 0) \ + if (__ret < 0) \ FAIL_ERRNO("map_delete"); \ __ret; \ }) @@ -145,7 +21,7 @@ #define xbpf_map_lookup_elem(fd, key, val) \ ({ \ int __ret = bpf_map_lookup_elem((fd), (key), (val)); \ - if (__ret < 0) \ + if (__ret < 0) \ FAIL_ERRNO("map_lookup"); \ __ret; \ }) @@ -153,7 +29,7 @@ #define xbpf_map_update_elem(fd, key, val, flags) \ ({ \ int __ret = bpf_map_update_elem((fd), (key), (val), (flags)); \ - if (__ret < 0) \ + if (__ret < 0) \ FAIL_ERRNO("map_update"); \ __ret; \ }) @@ -162,7 +38,7 @@ ({ \ int __ret = \ bpf_prog_attach((prog), (target), (type), (flags)); \ - if (__ret < 0) \ + if (__ret < 0) \ FAIL_ERRNO("prog_attach(" #type ")"); \ __ret; \ }) @@ -170,7 +46,7 @@ #define xbpf_prog_detach2(prog, target, type) \ ({ \ int __ret = bpf_prog_detach2((prog), (target), (type)); \ - if (__ret < 0) \ + if (__ret < 0) \ FAIL_ERRNO("prog_detach2(" #type ")"); \ __ret; \ }) @@ -193,277 +69,15 @@ __ret; \ }) -static inline void close_fd(int *fd) -{ - if (*fd >= 0) - xclose(*fd); -} - -#define __close_fd __attribute__((cleanup(close_fd))) - -static inline int poll_connect(int fd, unsigned int timeout_sec) -{ - struct timeval timeout = { .tv_sec = timeout_sec }; - fd_set wfds; - int r, eval; - socklen_t esize = sizeof(eval); - - FD_ZERO(&wfds); - FD_SET(fd, &wfds); - - r = select(fd + 1, NULL, &wfds, NULL, &timeout); - if (r == 0) - errno = ETIME; - if (r != 1) - return -1; - - if (getsockopt(fd, SOL_SOCKET, SO_ERROR, &eval, &esize) < 0) - return -1; - if (eval != 0) { - errno = eval; - return -1; - } - - return 0; -} - -static inline int poll_read(int fd, unsigned int timeout_sec) -{ - struct timeval timeout = { .tv_sec = timeout_sec }; - fd_set rfds; - int r; - - FD_ZERO(&rfds); - FD_SET(fd, &rfds); - - r = select(fd + 1, &rfds, NULL, NULL, &timeout); - if (r == 0) - errno = ETIME; - - return r == 1 ? 0 : -1; -} - -static inline int accept_timeout(int fd, struct sockaddr *addr, socklen_t *len, - unsigned int timeout_sec) +static inline int add_to_sockmap(int mapfd, int fd1, int fd2) { - if (poll_read(fd, timeout_sec)) - return -1; - - return accept(fd, addr, len); -} - -static inline int recv_timeout(int fd, void *buf, size_t len, int flags, - unsigned int timeout_sec) -{ - if (poll_read(fd, timeout_sec)) - return -1; - - return recv(fd, buf, len, flags); -} - -static inline void init_addr_loopback4(struct sockaddr_storage *ss, - socklen_t *len) -{ - struct sockaddr_in *addr4 = memset(ss, 0, sizeof(*ss)); - - addr4->sin_family = AF_INET; - addr4->sin_port = 0; - addr4->sin_addr.s_addr = htonl(INADDR_LOOPBACK); - *len = sizeof(*addr4); -} - -static inline void init_addr_loopback6(struct sockaddr_storage *ss, - socklen_t *len) -{ - struct sockaddr_in6 *addr6 = memset(ss, 0, sizeof(*ss)); - - addr6->sin6_family = AF_INET6; - addr6->sin6_port = 0; - addr6->sin6_addr = in6addr_loopback; - *len = sizeof(*addr6); -} - -static inline void init_addr_loopback_vsock(struct sockaddr_storage *ss, - socklen_t *len) -{ - struct sockaddr_vm *addr = memset(ss, 0, sizeof(*ss)); - - addr->svm_family = AF_VSOCK; - addr->svm_port = VMADDR_PORT_ANY; - addr->svm_cid = VMADDR_CID_LOCAL; - *len = sizeof(*addr); -} - -static inline void init_addr_loopback(int family, struct sockaddr_storage *ss, - socklen_t *len) -{ - switch (family) { - case AF_INET: - init_addr_loopback4(ss, len); - return; - case AF_INET6: - init_addr_loopback6(ss, len); - return; - case AF_VSOCK: - init_addr_loopback_vsock(ss, len); - return; - default: - FAIL("unsupported address family %d", family); - } -} - -static inline struct sockaddr *sockaddr(struct sockaddr_storage *ss) -{ - return (struct sockaddr *)ss; -} - -static inline int add_to_sockmap(int sock_mapfd, int fd1, int fd2) -{ - u64 value; - u32 key; int err; - key = 0; - value = fd1; - err = xbpf_map_update_elem(sock_mapfd, &key, &value, BPF_NOEXIST); + err = xbpf_map_update_elem(mapfd, &u32(0), &u64(fd1), BPF_NOEXIST); if (err) return err; - key = 1; - value = fd2; - return xbpf_map_update_elem(sock_mapfd, &key, &value, BPF_NOEXIST); -} - -static inline int enable_reuseport(int s, int progfd) -{ - int err, one = 1; - - err = xsetsockopt(s, SOL_SOCKET, SO_REUSEPORT, &one, sizeof(one)); - if (err) - return -1; - err = xsetsockopt(s, SOL_SOCKET, SO_ATTACH_REUSEPORT_EBPF, &progfd, - sizeof(progfd)); - if (err) - return -1; - - return 0; -} - -static inline int socket_loopback_reuseport(int family, int sotype, int progfd) -{ - struct sockaddr_storage addr; - socklen_t len = 0; - int err, s; - - init_addr_loopback(family, &addr, &len); - - s = xsocket(family, sotype, 0); - if (s == -1) - return -1; - - if (progfd >= 0) - enable_reuseport(s, progfd); - - err = xbind(s, sockaddr(&addr), len); - if (err) - goto close; - - if (sotype & SOCK_DGRAM) - return s; - - err = xlisten(s, SOMAXCONN); - if (err) - goto close; - - return s; -close: - xclose(s); - return -1; -} - -static inline int socket_loopback(int family, int sotype) -{ - return socket_loopback_reuseport(family, sotype, -1); -} - -static inline int create_pair(int family, int sotype, int *p0, int *p1) -{ - __close_fd int s, c = -1, p = -1; - struct sockaddr_storage addr; - socklen_t len = sizeof(addr); - int err; - - s = socket_loopback(family, sotype); - if (s < 0) - return s; - - err = xgetsockname(s, sockaddr(&addr), &len); - if (err) - return err; - - c = xsocket(family, sotype, 0); - if (c < 0) - return c; - - err = connect(c, sockaddr(&addr), len); - if (err) { - if (errno != EINPROGRESS) { - FAIL_ERRNO("connect"); - return err; - } - - err = poll_connect(c, IO_TIMEOUT_SEC); - if (err) { - FAIL_ERRNO("poll_connect"); - return err; - } - } - - switch (sotype & SOCK_TYPE_MASK) { - case SOCK_DGRAM: - err = xgetsockname(c, sockaddr(&addr), &len); - if (err) - return err; - - err = xconnect(s, sockaddr(&addr), len); - if (err) - return err; - - *p0 = take_fd(s); - break; - case SOCK_STREAM: - case SOCK_SEQPACKET: - p = xaccept_nonblock(s, NULL, NULL); - if (p < 0) - return p; - - *p0 = take_fd(p); - break; - default: - FAIL("Unsupported socket type %#x", sotype); - return -EOPNOTSUPP; - } - - *p1 = take_fd(c); - return 0; -} - -static inline int create_socket_pairs(int family, int sotype, int *c0, int *c1, - int *p0, int *p1) -{ - int err; - - err = create_pair(family, sotype, c0, p0); - if (err) - return err; - - err = create_pair(family, sotype, c1, p1); - if (err) { - close(*c0); - close(*p0); - } - - return err; + return xbpf_map_update_elem(mapfd, &u32(1), &u64(fd2), BPF_NOEXIST); } #endif // __SOCKMAP_HELPERS__ diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c b/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c index 2d0796314862..b87e7f39e15a 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c @@ -3,77 +3,62 @@ /* * Tests for sockmap/sockhash holding kTLS sockets. */ - +#include <error.h> #include <netinet/tcp.h> +#include <linux/tls.h> #include "test_progs.h" +#include "sockmap_helpers.h" +#include "test_skmsg_load_helpers.skel.h" +#include "test_sockmap_ktls.skel.h" #define MAX_TEST_NAME 80 #define TCP_ULP 31 -static int tcp_server(int family) +static int init_ktls_pairs(int c, int p) { - int err, s; - - s = socket(family, SOCK_STREAM, 0); - if (!ASSERT_GE(s, 0, "socket")) - return -1; - - err = listen(s, SOMAXCONN); - if (!ASSERT_OK(err, "listen")) - return -1; - - return s; -} + int err; + struct tls12_crypto_info_aes_gcm_128 crypto_rx; + struct tls12_crypto_info_aes_gcm_128 crypto_tx; -static int disconnect(int fd) -{ - struct sockaddr unspec = { AF_UNSPEC }; + err = setsockopt(c, IPPROTO_TCP, TCP_ULP, "tls", strlen("tls")); + if (!ASSERT_OK(err, "setsockopt(TCP_ULP)")) + goto out; - return connect(fd, &unspec, sizeof(unspec)); + err = setsockopt(p, IPPROTO_TCP, TCP_ULP, "tls", strlen("tls")); + if (!ASSERT_OK(err, "setsockopt(TCP_ULP)")) + goto out; + + memset(&crypto_rx, 0, sizeof(crypto_rx)); + memset(&crypto_tx, 0, sizeof(crypto_tx)); + crypto_rx.info.version = TLS_1_2_VERSION; + crypto_tx.info.version = TLS_1_2_VERSION; + crypto_rx.info.cipher_type = TLS_CIPHER_AES_GCM_128; + crypto_tx.info.cipher_type = TLS_CIPHER_AES_GCM_128; + + err = setsockopt(c, SOL_TLS, TLS_TX, &crypto_tx, sizeof(crypto_tx)); + if (!ASSERT_OK(err, "setsockopt(TLS_TX)")) + goto out; + + err = setsockopt(p, SOL_TLS, TLS_RX, &crypto_rx, sizeof(crypto_rx)); + if (!ASSERT_OK(err, "setsockopt(TLS_RX)")) + goto out; + return 0; +out: + return -1; } -/* Disconnect (unhash) a kTLS socket after removing it from sockmap. */ -static void test_sockmap_ktls_disconnect_after_delete(int family, int map) +static int create_ktls_pairs(int family, int sotype, int *c, int *p) { - struct sockaddr_storage addr = {0}; - socklen_t len = sizeof(addr); - int err, cli, srv, zero = 0; - - srv = tcp_server(family); - if (srv == -1) - return; - - err = getsockname(srv, (struct sockaddr *)&addr, &len); - if (!ASSERT_OK(err, "getsockopt")) - goto close_srv; - - cli = socket(family, SOCK_STREAM, 0); - if (!ASSERT_GE(cli, 0, "socket")) - goto close_srv; + int err; - err = connect(cli, (struct sockaddr *)&addr, len); - if (!ASSERT_OK(err, "connect")) - goto close_cli; - - err = bpf_map_update_elem(map, &zero, &cli, 0); - if (!ASSERT_OK(err, "bpf_map_update_elem")) - goto close_cli; - - err = setsockopt(cli, IPPROTO_TCP, TCP_ULP, "tls", strlen("tls")); - if (!ASSERT_OK(err, "setsockopt(TCP_ULP)")) - goto close_cli; - - err = bpf_map_delete_elem(map, &zero); - if (!ASSERT_OK(err, "bpf_map_delete_elem")) - goto close_cli; - - err = disconnect(cli); - ASSERT_OK(err, "disconnect"); + err = create_pair(family, sotype, c, p); + if (!ASSERT_OK(err, "create_pair()")) + return -1; -close_cli: - close(cli); -close_srv: - close(srv); + err = init_ktls_pairs(*c, *p); + if (!ASSERT_OK(err, "init_ktls_pairs(c, p)")) + return -1; + return 0; } static void test_sockmap_ktls_update_fails_when_sock_has_ulp(int family, int map) @@ -146,6 +131,278 @@ static const char *fmt_test_name(const char *subtest_name, int family, return test_name; } +static void test_sockmap_ktls_offload(int family, int sotype) +{ + int err; + int c = 0, p = 0, sent, recvd; + char msg[12] = "hello world\0"; + char rcv[13]; + + err = create_ktls_pairs(family, sotype, &c, &p); + if (!ASSERT_OK(err, "create_ktls_pairs()")) + goto out; + + sent = send(c, msg, sizeof(msg), 0); + if (!ASSERT_OK(err, "send(msg)")) + goto out; + + recvd = recv(p, rcv, sizeof(rcv), 0); + if (!ASSERT_OK(err, "recv(msg)") || + !ASSERT_EQ(recvd, sent, "length mismatch")) + goto out; + + ASSERT_OK(memcmp(msg, rcv, sizeof(msg)), "data mismatch"); + +out: + if (c) + close(c); + if (p) + close(p); +} + +static void test_sockmap_ktls_tx_cork(int family, int sotype, bool push) +{ + int err, off; + int i, j; + int start_push = 0, push_len = 0; + int c = 0, p = 0, one = 1, sent, recvd; + int prog_fd, map_fd; + char msg[12] = "hello world\0"; + char rcv[20] = {0}; + struct test_sockmap_ktls *skel; + + skel = test_sockmap_ktls__open_and_load(); + if (!ASSERT_TRUE(skel, "open ktls skel")) + return; + + err = create_pair(family, sotype, &c, &p); + if (!ASSERT_OK(err, "create_pair()")) + goto out; + + prog_fd = bpf_program__fd(skel->progs.prog_sk_policy); + map_fd = bpf_map__fd(skel->maps.sock_map); + + err = bpf_prog_attach(prog_fd, map_fd, BPF_SK_MSG_VERDICT, 0); + if (!ASSERT_OK(err, "bpf_prog_attach sk msg")) + goto out; + + err = bpf_map_update_elem(map_fd, &one, &c, BPF_NOEXIST); + if (!ASSERT_OK(err, "bpf_map_update_elem(c)")) + goto out; + + err = init_ktls_pairs(c, p); + if (!ASSERT_OK(err, "init_ktls_pairs(c, p)")) + goto out; + + skel->bss->cork_byte = sizeof(msg); + if (push) { + start_push = 1; + push_len = 2; + } + skel->bss->push_start = start_push; + skel->bss->push_end = push_len; + + off = sizeof(msg) / 2; + sent = send(c, msg, off, 0); + if (!ASSERT_EQ(sent, off, "send(msg)")) + goto out; + + recvd = recv_timeout(p, rcv, sizeof(rcv), MSG_DONTWAIT, 1); + if (!ASSERT_EQ(-1, recvd, "expected no data")) + goto out; + + /* send remaining msg */ + sent = send(c, msg + off, sizeof(msg) - off, 0); + if (!ASSERT_EQ(sent, sizeof(msg) - off, "send remaining data")) + goto out; + + recvd = recv_timeout(p, rcv, sizeof(rcv), MSG_DONTWAIT, 1); + if (!ASSERT_OK(err, "recv(msg)") || + !ASSERT_EQ(recvd, sizeof(msg) + push_len, "check length mismatch")) + goto out; + + for (i = 0, j = 0; i < recvd;) { + /* skip checking the data that has been pushed in */ + if (i >= start_push && i <= start_push + push_len - 1) { + i++; + continue; + } + if (!ASSERT_EQ(rcv[i], msg[j], "data mismatch")) + goto out; + i++; + j++; + } +out: + if (c) + close(c); + if (p) + close(p); + test_sockmap_ktls__destroy(skel); +} + +static void test_sockmap_ktls_tx_no_buf(int family, int sotype, bool push) +{ + int c = -1, p = -1, one = 1, two = 2; + struct test_sockmap_ktls *skel; + unsigned char *data = NULL; + struct msghdr msg = {0}; + struct iovec iov[2]; + int prog_fd, map_fd; + int txrx_buf = 1024; + int iov_length = 8192; + int err; + + skel = test_sockmap_ktls__open_and_load(); + if (!ASSERT_TRUE(skel, "open ktls skel")) + return; + + err = create_pair(family, sotype, &c, &p); + if (!ASSERT_OK(err, "create_pair()")) + goto out; + + err = setsockopt(c, SOL_SOCKET, SO_RCVBUFFORCE, &txrx_buf, sizeof(int)); + err |= setsockopt(p, SOL_SOCKET, SO_SNDBUFFORCE, &txrx_buf, sizeof(int)); + if (!ASSERT_OK(err, "set buf limit")) + goto out; + + prog_fd = bpf_program__fd(skel->progs.prog_sk_policy_redir); + map_fd = bpf_map__fd(skel->maps.sock_map); + + err = bpf_prog_attach(prog_fd, map_fd, BPF_SK_MSG_VERDICT, 0); + if (!ASSERT_OK(err, "bpf_prog_attach sk msg")) + goto out; + + err = bpf_map_update_elem(map_fd, &one, &c, BPF_NOEXIST); + if (!ASSERT_OK(err, "bpf_map_update_elem(c)")) + goto out; + + err = bpf_map_update_elem(map_fd, &two, &p, BPF_NOEXIST); + if (!ASSERT_OK(err, "bpf_map_update_elem(p)")) + goto out; + + skel->bss->apply_bytes = 1024; + + err = init_ktls_pairs(c, p); + if (!ASSERT_OK(err, "init_ktls_pairs(c, p)")) + goto out; + + data = calloc(iov_length, sizeof(char)); + if (!data) + goto out; + + iov[0].iov_base = data; + iov[0].iov_len = iov_length; + iov[1].iov_base = data; + iov[1].iov_len = iov_length; + msg.msg_iov = iov; + msg.msg_iovlen = 2; + + for (;;) { + err = sendmsg(c, &msg, MSG_DONTWAIT); + if (err <= 0) + break; + } + +out: + if (data) + free(data); + if (c != -1) + close(c); + if (p != -1) + close(p); + + test_sockmap_ktls__destroy(skel); +} + +static void test_sockmap_ktls_tx_pop(int family, int sotype) +{ + char msg[37] = "0123456789abcdefghijklmnopqrstuvwxyz\0"; + int c = 0, p = 0, one = 1, sent, recvd; + struct test_sockmap_ktls *skel; + int prog_fd, map_fd; + char rcv[50] = {0}; + int err; + int i, m, r; + + skel = test_sockmap_ktls__open_and_load(); + if (!ASSERT_TRUE(skel, "open ktls skel")) + return; + + err = create_pair(family, sotype, &c, &p); + if (!ASSERT_OK(err, "create_pair()")) + goto out; + + prog_fd = bpf_program__fd(skel->progs.prog_sk_policy); + map_fd = bpf_map__fd(skel->maps.sock_map); + + err = bpf_prog_attach(prog_fd, map_fd, BPF_SK_MSG_VERDICT, 0); + if (!ASSERT_OK(err, "bpf_prog_attach sk msg")) + goto out; + + err = bpf_map_update_elem(map_fd, &one, &c, BPF_NOEXIST); + if (!ASSERT_OK(err, "bpf_map_update_elem(c)")) + goto out; + + err = init_ktls_pairs(c, p); + if (!ASSERT_OK(err, "init_ktls_pairs(c, p)")) + goto out; + + struct { + int pop_start; + int pop_len; + } pop_policy[] = { + /* trim the start */ + {0, 2}, + {0, 10}, + {1, 2}, + {1, 10}, + /* trim the end */ + {35, 2}, + /* New entries should be added before this line */ + {-1, -1}, + }; + + i = 0; + while (pop_policy[i].pop_start >= 0) { + skel->bss->pop_start = pop_policy[i].pop_start; + skel->bss->pop_end = pop_policy[i].pop_len; + + sent = send(c, msg, sizeof(msg), 0); + if (!ASSERT_EQ(sent, sizeof(msg), "send(msg)")) + goto out; + + recvd = recv_timeout(p, rcv, sizeof(rcv), MSG_DONTWAIT, 1); + if (!ASSERT_EQ(recvd, sizeof(msg) - pop_policy[i].pop_len, "pop len mismatch")) + goto out; + + /* verify the data + * msg: 0123456789a bcdefghij klmnopqrstuvwxyz + * | | + * popped data + */ + for (m = 0, r = 0; m < sizeof(msg);) { + /* skip checking the data that has been popped */ + if (m >= pop_policy[i].pop_start && + m <= pop_policy[i].pop_start + pop_policy[i].pop_len - 1) { + m++; + continue; + } + + if (!ASSERT_EQ(msg[m], rcv[r], "data mismatch")) + goto out; + m++; + r++; + } + i++; + } +out: + if (c) + close(c); + if (p) + close(p); + test_sockmap_ktls__destroy(skel); +} + static void run_tests(int family, enum bpf_map_type map_type) { int map; @@ -154,18 +411,32 @@ static void run_tests(int family, enum bpf_map_type map_type) if (!ASSERT_GE(map, 0, "bpf_map_create")) return; - if (test__start_subtest(fmt_test_name("disconnect_after_delete", family, map_type))) - test_sockmap_ktls_disconnect_after_delete(family, map); if (test__start_subtest(fmt_test_name("update_fails_when_sock_has_ulp", family, map_type))) test_sockmap_ktls_update_fails_when_sock_has_ulp(family, map); close(map); } +static void run_ktls_test(int family, int sotype) +{ + if (test__start_subtest("tls simple offload")) + test_sockmap_ktls_offload(family, sotype); + if (test__start_subtest("tls tx cork")) + test_sockmap_ktls_tx_cork(family, sotype, false); + if (test__start_subtest("tls tx cork with push")) + test_sockmap_ktls_tx_cork(family, sotype, true); + if (test__start_subtest("tls tx egress with no buf")) + test_sockmap_ktls_tx_no_buf(family, sotype, true); + if (test__start_subtest("tls tx with pop")) + test_sockmap_ktls_tx_pop(family, sotype); +} + void test_sockmap_ktls(void) { run_tests(AF_INET, BPF_MAP_TYPE_SOCKMAP); run_tests(AF_INET, BPF_MAP_TYPE_SOCKHASH); run_tests(AF_INET6, BPF_MAP_TYPE_SOCKMAP); run_tests(AF_INET6, BPF_MAP_TYPE_SOCKHASH); + run_ktls_test(AF_INET, SOCK_STREAM); + run_ktls_test(AF_INET6, SOCK_STREAM); } diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c index 4ee1148d22be..f1bdccc7e4e7 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c @@ -924,6 +924,8 @@ static void redir_partial(int family, int sotype, int sock_map, int parser_map) goto close; n = xsend(c1, buf, sizeof(buf), 0); + if (n == -1) + goto close; if (n < sizeof(buf)) FAIL("incomplete write"); @@ -1366,237 +1368,6 @@ static void test_redir(struct test_sockmap_listen *skel, struct bpf_map *map, } } -static void pairs_redir_to_connected(int cli0, int peer0, int cli1, int peer1, - int sock_mapfd, int nop_mapfd, - int verd_mapfd, enum redir_mode mode, - int send_flags) -{ - const char *log_prefix = redir_mode_str(mode); - unsigned int pass; - int err, n; - u32 key; - char b; - - zero_verdict_count(verd_mapfd); - - err = add_to_sockmap(sock_mapfd, peer0, peer1); - if (err) - return; - - if (nop_mapfd >= 0) { - err = add_to_sockmap(nop_mapfd, cli0, cli1); - if (err) - return; - } - - /* Last byte is OOB data when send_flags has MSG_OOB bit set */ - n = xsend(cli1, "ab", 2, send_flags); - if (n >= 0 && n < 2) - FAIL("%s: incomplete send", log_prefix); - if (n < 2) - return; - - key = SK_PASS; - err = xbpf_map_lookup_elem(verd_mapfd, &key, &pass); - if (err) - return; - if (pass != 1) - FAIL("%s: want pass count 1, have %d", log_prefix, pass); - - n = recv_timeout(mode == REDIR_INGRESS ? peer0 : cli0, &b, 1, 0, IO_TIMEOUT_SEC); - if (n < 0) - FAIL_ERRNO("%s: recv_timeout", log_prefix); - if (n == 0) - FAIL("%s: incomplete recv", log_prefix); - - if (send_flags & MSG_OOB) { - /* Check that we can't read OOB while in sockmap */ - errno = 0; - n = recv(peer1, &b, 1, MSG_OOB | MSG_DONTWAIT); - if (n != -1 || errno != EOPNOTSUPP) - FAIL("%s: recv(MSG_OOB): expected EOPNOTSUPP: retval=%d errno=%d", - log_prefix, n, errno); - - /* Remove peer1 from sockmap */ - xbpf_map_delete_elem(sock_mapfd, &(int){ 1 }); - - /* Check that OOB was dropped on redirect */ - errno = 0; - n = recv(peer1, &b, 1, MSG_OOB | MSG_DONTWAIT); - if (n != -1 || errno != EINVAL) - FAIL("%s: recv(MSG_OOB): expected EINVAL: retval=%d errno=%d", - log_prefix, n, errno); - } -} - -static void unix_redir_to_connected(int sotype, int sock_mapfd, - int verd_mapfd, enum redir_mode mode) -{ - int c0, c1, p0, p1; - int sfd[2]; - - if (socketpair(AF_UNIX, sotype | SOCK_NONBLOCK, 0, sfd)) - return; - c0 = sfd[0], p0 = sfd[1]; - - if (socketpair(AF_UNIX, sotype | SOCK_NONBLOCK, 0, sfd)) - goto close0; - c1 = sfd[0], p1 = sfd[1]; - - pairs_redir_to_connected(c0, p0, c1, p1, sock_mapfd, -1, verd_mapfd, - mode, NO_FLAGS); - - xclose(c1); - xclose(p1); -close0: - xclose(c0); - xclose(p0); -} - -static void unix_skb_redir_to_connected(struct test_sockmap_listen *skel, - struct bpf_map *inner_map, int sotype) -{ - int verdict = bpf_program__fd(skel->progs.prog_skb_verdict); - int verdict_map = bpf_map__fd(skel->maps.verdict_map); - int sock_map = bpf_map__fd(inner_map); - int err; - - err = xbpf_prog_attach(verdict, sock_map, BPF_SK_SKB_VERDICT, 0); - if (err) - return; - - skel->bss->test_ingress = false; - unix_redir_to_connected(sotype, sock_map, verdict_map, REDIR_EGRESS); - skel->bss->test_ingress = true; - unix_redir_to_connected(sotype, sock_map, verdict_map, REDIR_INGRESS); - - xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_VERDICT); -} - -static void test_unix_redir(struct test_sockmap_listen *skel, struct bpf_map *map, - int sotype) -{ - const char *family_name, *map_name; - char s[MAX_TEST_NAME]; - - family_name = family_str(AF_UNIX); - map_name = map_type_str(map); - snprintf(s, sizeof(s), "%s %s %s", map_name, family_name, __func__); - if (!test__start_subtest(s)) - return; - unix_skb_redir_to_connected(skel, map, sotype); -} - -/* Returns two connected loopback vsock sockets */ -static int vsock_socketpair_connectible(int sotype, int *v0, int *v1) -{ - return create_pair(AF_VSOCK, sotype | SOCK_NONBLOCK, v0, v1); -} - -static void vsock_unix_redir_connectible(int sock_mapfd, int verd_mapfd, - enum redir_mode mode, int sotype) -{ - const char *log_prefix = redir_mode_str(mode); - char a = 'a', b = 'b'; - int u0, u1, v0, v1; - int sfd[2]; - unsigned int pass; - int err, n; - u32 key; - - zero_verdict_count(verd_mapfd); - - if (socketpair(AF_UNIX, SOCK_STREAM | SOCK_NONBLOCK, 0, sfd)) - return; - - u0 = sfd[0]; - u1 = sfd[1]; - - err = vsock_socketpair_connectible(sotype, &v0, &v1); - if (err) { - FAIL("vsock_socketpair_connectible() failed"); - goto close_uds; - } - - err = add_to_sockmap(sock_mapfd, u0, v0); - if (err) { - FAIL("add_to_sockmap failed"); - goto close_vsock; - } - - n = write(v1, &a, sizeof(a)); - if (n < 0) - FAIL_ERRNO("%s: write", log_prefix); - if (n == 0) - FAIL("%s: incomplete write", log_prefix); - if (n < 1) - goto out; - - n = xrecv_nonblock(mode == REDIR_INGRESS ? u0 : u1, &b, sizeof(b), 0); - if (n < 0) - FAIL("%s: recv() err, errno=%d", log_prefix, errno); - if (n == 0) - FAIL("%s: incomplete recv", log_prefix); - if (b != a) - FAIL("%s: vsock socket map failed, %c != %c", log_prefix, a, b); - - key = SK_PASS; - err = xbpf_map_lookup_elem(verd_mapfd, &key, &pass); - if (err) - goto out; - if (pass != 1) - FAIL("%s: want pass count 1, have %d", log_prefix, pass); -out: - key = 0; - bpf_map_delete_elem(sock_mapfd, &key); - key = 1; - bpf_map_delete_elem(sock_mapfd, &key); - -close_vsock: - close(v0); - close(v1); - -close_uds: - close(u0); - close(u1); -} - -static void vsock_unix_skb_redir_connectible(struct test_sockmap_listen *skel, - struct bpf_map *inner_map, - int sotype) -{ - int verdict = bpf_program__fd(skel->progs.prog_skb_verdict); - int verdict_map = bpf_map__fd(skel->maps.verdict_map); - int sock_map = bpf_map__fd(inner_map); - int err; - - err = xbpf_prog_attach(verdict, sock_map, BPF_SK_SKB_VERDICT, 0); - if (err) - return; - - skel->bss->test_ingress = false; - vsock_unix_redir_connectible(sock_map, verdict_map, REDIR_EGRESS, sotype); - skel->bss->test_ingress = true; - vsock_unix_redir_connectible(sock_map, verdict_map, REDIR_INGRESS, sotype); - - xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_VERDICT); -} - -static void test_vsock_redir(struct test_sockmap_listen *skel, struct bpf_map *map) -{ - const char *family_name, *map_name; - char s[MAX_TEST_NAME]; - - family_name = family_str(AF_VSOCK); - map_name = map_type_str(map); - snprintf(s, sizeof(s), "%s %s %s", map_name, family_name, __func__); - if (!test__start_subtest(s)) - return; - - vsock_unix_skb_redir_connectible(skel, map, SOCK_STREAM); - vsock_unix_skb_redir_connectible(skel, map, SOCK_SEQPACKET); -} - static void test_reuseport(struct test_sockmap_listen *skel, struct bpf_map *map, int family, int sotype) { @@ -1637,224 +1408,6 @@ static void test_reuseport(struct test_sockmap_listen *skel, } } -static int inet_socketpair(int family, int type, int *s, int *c) -{ - return create_pair(family, type | SOCK_NONBLOCK, s, c); -} - -static void udp_redir_to_connected(int family, int sock_mapfd, int verd_mapfd, - enum redir_mode mode) -{ - int c0, c1, p0, p1; - int err; - - err = inet_socketpair(family, SOCK_DGRAM, &p0, &c0); - if (err) - return; - err = inet_socketpair(family, SOCK_DGRAM, &p1, &c1); - if (err) - goto close_cli0; - - pairs_redir_to_connected(c0, p0, c1, p1, sock_mapfd, -1, verd_mapfd, - mode, NO_FLAGS); - - xclose(c1); - xclose(p1); -close_cli0: - xclose(c0); - xclose(p0); -} - -static void udp_skb_redir_to_connected(struct test_sockmap_listen *skel, - struct bpf_map *inner_map, int family) -{ - int verdict = bpf_program__fd(skel->progs.prog_skb_verdict); - int verdict_map = bpf_map__fd(skel->maps.verdict_map); - int sock_map = bpf_map__fd(inner_map); - int err; - - err = xbpf_prog_attach(verdict, sock_map, BPF_SK_SKB_VERDICT, 0); - if (err) - return; - - skel->bss->test_ingress = false; - udp_redir_to_connected(family, sock_map, verdict_map, REDIR_EGRESS); - skel->bss->test_ingress = true; - udp_redir_to_connected(family, sock_map, verdict_map, REDIR_INGRESS); - - xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_VERDICT); -} - -static void test_udp_redir(struct test_sockmap_listen *skel, struct bpf_map *map, - int family) -{ - const char *family_name, *map_name; - char s[MAX_TEST_NAME]; - - family_name = family_str(family); - map_name = map_type_str(map); - snprintf(s, sizeof(s), "%s %s %s", map_name, family_name, __func__); - if (!test__start_subtest(s)) - return; - udp_skb_redir_to_connected(skel, map, family); -} - -static void inet_unix_redir_to_connected(int family, int type, int sock_mapfd, - int verd_mapfd, enum redir_mode mode) -{ - int c0, c1, p0, p1; - int sfd[2]; - int err; - - if (socketpair(AF_UNIX, type | SOCK_NONBLOCK, 0, sfd)) - return; - c0 = sfd[0], p0 = sfd[1]; - - err = inet_socketpair(family, type, &p1, &c1); - if (err) - goto close; - - pairs_redir_to_connected(c0, p0, c1, p1, sock_mapfd, -1, verd_mapfd, - mode, NO_FLAGS); - - xclose(c1); - xclose(p1); -close: - xclose(c0); - xclose(p0); -} - -static void inet_unix_skb_redir_to_connected(struct test_sockmap_listen *skel, - struct bpf_map *inner_map, int family) -{ - int verdict = bpf_program__fd(skel->progs.prog_skb_verdict); - int verdict_map = bpf_map__fd(skel->maps.verdict_map); - int sock_map = bpf_map__fd(inner_map); - int err; - - err = xbpf_prog_attach(verdict, sock_map, BPF_SK_SKB_VERDICT, 0); - if (err) - return; - - skel->bss->test_ingress = false; - inet_unix_redir_to_connected(family, SOCK_DGRAM, sock_map, verdict_map, - REDIR_EGRESS); - inet_unix_redir_to_connected(family, SOCK_STREAM, sock_map, verdict_map, - REDIR_EGRESS); - skel->bss->test_ingress = true; - inet_unix_redir_to_connected(family, SOCK_DGRAM, sock_map, verdict_map, - REDIR_INGRESS); - inet_unix_redir_to_connected(family, SOCK_STREAM, sock_map, verdict_map, - REDIR_INGRESS); - - xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_VERDICT); -} - -static void unix_inet_redir_to_connected(int family, int type, int sock_mapfd, - int nop_mapfd, int verd_mapfd, - enum redir_mode mode, int send_flags) -{ - int c0, c1, p0, p1; - int sfd[2]; - int err; - - err = inet_socketpair(family, type, &p0, &c0); - if (err) - return; - - if (socketpair(AF_UNIX, type | SOCK_NONBLOCK, 0, sfd)) - goto close_cli0; - c1 = sfd[0], p1 = sfd[1]; - - pairs_redir_to_connected(c0, p0, c1, p1, sock_mapfd, nop_mapfd, - verd_mapfd, mode, send_flags); - - xclose(c1); - xclose(p1); -close_cli0: - xclose(c0); - xclose(p0); -} - -static void unix_inet_skb_redir_to_connected(struct test_sockmap_listen *skel, - struct bpf_map *inner_map, int family) -{ - int verdict = bpf_program__fd(skel->progs.prog_skb_verdict); - int nop_map = bpf_map__fd(skel->maps.nop_map); - int verdict_map = bpf_map__fd(skel->maps.verdict_map); - int sock_map = bpf_map__fd(inner_map); - int err; - - err = xbpf_prog_attach(verdict, sock_map, BPF_SK_SKB_VERDICT, 0); - if (err) - return; - - skel->bss->test_ingress = false; - unix_inet_redir_to_connected(family, SOCK_DGRAM, - sock_map, -1, verdict_map, - REDIR_EGRESS, NO_FLAGS); - unix_inet_redir_to_connected(family, SOCK_STREAM, - sock_map, -1, verdict_map, - REDIR_EGRESS, NO_FLAGS); - - unix_inet_redir_to_connected(family, SOCK_DGRAM, - sock_map, nop_map, verdict_map, - REDIR_EGRESS, NO_FLAGS); - unix_inet_redir_to_connected(family, SOCK_STREAM, - sock_map, nop_map, verdict_map, - REDIR_EGRESS, NO_FLAGS); - - /* MSG_OOB not supported by AF_UNIX SOCK_DGRAM */ - unix_inet_redir_to_connected(family, SOCK_STREAM, - sock_map, nop_map, verdict_map, - REDIR_EGRESS, MSG_OOB); - - skel->bss->test_ingress = true; - unix_inet_redir_to_connected(family, SOCK_DGRAM, - sock_map, -1, verdict_map, - REDIR_INGRESS, NO_FLAGS); - unix_inet_redir_to_connected(family, SOCK_STREAM, - sock_map, -1, verdict_map, - REDIR_INGRESS, NO_FLAGS); - - unix_inet_redir_to_connected(family, SOCK_DGRAM, - sock_map, nop_map, verdict_map, - REDIR_INGRESS, NO_FLAGS); - unix_inet_redir_to_connected(family, SOCK_STREAM, - sock_map, nop_map, verdict_map, - REDIR_INGRESS, NO_FLAGS); - - /* MSG_OOB not supported by AF_UNIX SOCK_DGRAM */ - unix_inet_redir_to_connected(family, SOCK_STREAM, - sock_map, nop_map, verdict_map, - REDIR_INGRESS, MSG_OOB); - - xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_VERDICT); -} - -static void test_udp_unix_redir(struct test_sockmap_listen *skel, struct bpf_map *map, - int family) -{ - const char *family_name, *map_name; - struct netns_obj *netns; - char s[MAX_TEST_NAME]; - - family_name = family_str(family); - map_name = map_type_str(map); - snprintf(s, sizeof(s), "%s %s %s", map_name, family_name, __func__); - if (!test__start_subtest(s)) - return; - - netns = netns_new("sockmap_listen", true); - if (!ASSERT_OK_PTR(netns, "netns_new")) - return; - - inet_unix_skb_redir_to_connected(skel, map, family); - unix_inet_skb_redir_to_connected(skel, map, family); - - netns_free(netns); -} - static void run_tests(struct test_sockmap_listen *skel, struct bpf_map *map, int family) { @@ -1863,8 +1416,6 @@ static void run_tests(struct test_sockmap_listen *skel, struct bpf_map *map, test_redir(skel, map, family, SOCK_STREAM); test_reuseport(skel, map, family, SOCK_STREAM); test_reuseport(skel, map, family, SOCK_DGRAM); - test_udp_redir(skel, map, family); - test_udp_unix_redir(skel, map, family); } void serial_test_sockmap_listen(void) @@ -1880,16 +1431,10 @@ void serial_test_sockmap_listen(void) skel->bss->test_sockmap = true; run_tests(skel, skel->maps.sock_map, AF_INET); run_tests(skel, skel->maps.sock_map, AF_INET6); - test_unix_redir(skel, skel->maps.sock_map, SOCK_DGRAM); - test_unix_redir(skel, skel->maps.sock_map, SOCK_STREAM); - test_vsock_redir(skel, skel->maps.sock_map); skel->bss->test_sockmap = false; run_tests(skel, skel->maps.sock_hash, AF_INET); run_tests(skel, skel->maps.sock_hash, AF_INET6); - test_unix_redir(skel, skel->maps.sock_hash, SOCK_DGRAM); - test_unix_redir(skel, skel->maps.sock_hash, SOCK_STREAM); - test_vsock_redir(skel, skel->maps.sock_hash); test_sockmap_listen__destroy(skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_redir.c b/tools/testing/selftests/bpf/prog_tests/sockmap_redir.c new file mode 100644 index 000000000000..9c461d93113d --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_redir.c @@ -0,0 +1,465 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Test for sockmap/sockhash redirection. + * + * BPF_MAP_TYPE_SOCKMAP + * BPF_MAP_TYPE_SOCKHASH + * x + * sk_msg-to-egress + * sk_msg-to-ingress + * sk_skb-to-egress + * sk_skb-to-ingress + * x + * AF_INET, SOCK_STREAM + * AF_INET6, SOCK_STREAM + * AF_INET, SOCK_DGRAM + * AF_INET6, SOCK_DGRAM + * AF_UNIX, SOCK_STREAM + * AF_UNIX, SOCK_DGRAM + * AF_VSOCK, SOCK_STREAM + * AF_VSOCK, SOCK_SEQPACKET + */ + +#include <errno.h> +#include <error.h> +#include <sched.h> +#include <stdio.h> +#include <unistd.h> + +#include <netinet/in.h> +#include <sys/socket.h> +#include <sys/types.h> +#include <sys/un.h> +#include <linux/string.h> +#include <linux/vm_sockets.h> + +#include <bpf/bpf.h> +#include <bpf/libbpf.h> + +#include "linux/const.h" +#include "test_progs.h" +#include "sockmap_helpers.h" +#include "test_sockmap_redir.skel.h" + +/* The meaning of SUPPORTED is "will redirect packet as expected". + */ +#define SUPPORTED _BITUL(0) + +/* Note on sk_skb-to-ingress ->af_vsock: + * + * Peer socket may receive the packet some time after the return from sendmsg(). + * In a typical usage scenario, recvmsg() will block until the redirected packet + * appears in the destination queue, or timeout if the packet was dropped. By + * that point, the verdict map has already been updated to reflect what has + * happened. + * + * But sk_skb-to-ingress/af_vsock is an unsupported combination, so no recvmsg() + * takes place. Which means we may race the execution of the verdict logic and + * read map_verd before it has been updated, i.e. we might observe + * map_verd[SK_DROP]=0 instead of map_verd[SK_DROP]=1. + * + * This confuses the selftest logic: if there was no packet dropped, where's the + * packet? So here's a heuristic: on map_verd[SK_DROP]=map_verd[SK_PASS]=0 + * (which implies the verdict program has not been ran) just re-read the verdict + * map again. + */ +#define UNSUPPORTED_RACY_VERD _BITUL(1) + +enum prog_type { + SK_MSG_EGRESS, + SK_MSG_INGRESS, + SK_SKB_EGRESS, + SK_SKB_INGRESS, +}; + +enum { + SEND_INNER = 0, + SEND_OUTER, +}; + +enum { + RECV_INNER = 0, + RECV_OUTER, +}; + +struct maps { + int in; + int out; + int verd; +}; + +struct combo_spec { + enum prog_type prog_type; + const char *in, *out; +}; + +struct redir_spec { + const char *name; + int idx_send; + int idx_recv; + enum prog_type prog_type; +}; + +struct socket_spec { + int family; + int sotype; + int send_flags; + int in[2]; + int out[2]; +}; + +static int socket_spec_pairs(struct socket_spec *s) +{ + return create_socket_pairs(s->family, s->sotype, + &s->in[0], &s->out[0], + &s->in[1], &s->out[1]); +} + +static void socket_spec_close(struct socket_spec *s) +{ + xclose(s->in[0]); + xclose(s->in[1]); + xclose(s->out[0]); + xclose(s->out[1]); +} + +static void get_redir_params(struct redir_spec *redir, + struct test_sockmap_redir *skel, int *prog_fd, + enum bpf_attach_type *attach_type, + int *redirect_flags) +{ + enum prog_type type = redir->prog_type; + struct bpf_program *prog; + bool sk_msg; + + sk_msg = type == SK_MSG_INGRESS || type == SK_MSG_EGRESS; + prog = sk_msg ? skel->progs.prog_msg_verdict : skel->progs.prog_skb_verdict; + + *prog_fd = bpf_program__fd(prog); + *attach_type = sk_msg ? BPF_SK_MSG_VERDICT : BPF_SK_SKB_VERDICT; + + if (type == SK_MSG_INGRESS || type == SK_SKB_INGRESS) + *redirect_flags = BPF_F_INGRESS; + else + *redirect_flags = 0; +} + +static void try_recv(const char *prefix, int fd, int flags, bool expect_success) +{ + ssize_t n; + char buf; + + errno = 0; + n = recv(fd, &buf, 1, flags); + if (n < 0 && expect_success) + FAIL_ERRNO("%s: unexpected failure: retval=%zd", prefix, n); + if (!n && !expect_success) + FAIL("%s: expected failure: retval=%zd", prefix, n); +} + +static void handle_unsupported(int sd_send, int sd_peer, int sd_in, int sd_out, + int sd_recv, int map_verd, int status) +{ + unsigned int drop, pass; + char recv_buf; + ssize_t n; + +get_verdict: + if (xbpf_map_lookup_elem(map_verd, &u32(SK_DROP), &drop) || + xbpf_map_lookup_elem(map_verd, &u32(SK_PASS), &pass)) + return; + + if (pass == 0 && drop == 0 && (status & UNSUPPORTED_RACY_VERD)) { + sched_yield(); + goto get_verdict; + } + + if (pass != 0) { + FAIL("unsupported: wanted verdict pass 0, have %u", pass); + return; + } + + /* If nothing was dropped, packet should have reached the peer */ + if (drop == 0) { + errno = 0; + n = recv_timeout(sd_peer, &recv_buf, 1, 0, IO_TIMEOUT_SEC); + if (n != 1) + FAIL_ERRNO("unsupported: packet missing, retval=%zd", n); + } + + /* Ensure queues are empty */ + try_recv("bpf.recv(sd_send)", sd_send, MSG_DONTWAIT, false); + if (sd_in != sd_send) + try_recv("bpf.recv(sd_in)", sd_in, MSG_DONTWAIT, false); + + try_recv("bpf.recv(sd_out)", sd_out, MSG_DONTWAIT, false); + if (sd_recv != sd_out) + try_recv("bpf.recv(sd_recv)", sd_recv, MSG_DONTWAIT, false); +} + +static void test_send_redir_recv(int sd_send, int send_flags, int sd_peer, + int sd_in, int sd_out, int sd_recv, + struct maps *maps, int status) +{ + unsigned int drop, pass; + char *send_buf = "ab"; + char recv_buf = '\0'; + ssize_t n, len = 1; + + /* Zero out the verdict map */ + if (xbpf_map_update_elem(maps->verd, &u32(SK_DROP), &u32(0), BPF_ANY) || + xbpf_map_update_elem(maps->verd, &u32(SK_PASS), &u32(0), BPF_ANY)) + return; + + if (xbpf_map_update_elem(maps->in, &u32(0), &u64(sd_in), BPF_NOEXIST)) + return; + + if (xbpf_map_update_elem(maps->out, &u32(0), &u64(sd_out), BPF_NOEXIST)) + goto del_in; + + /* Last byte is OOB data when send_flags has MSG_OOB bit set */ + if (send_flags & MSG_OOB) + len++; + n = send(sd_send, send_buf, len, send_flags); + if (n >= 0 && n < len) + FAIL("incomplete send"); + if (n < 0) { + /* sk_msg redirect combo not supported? */ + if (status & SUPPORTED || errno != EACCES) + FAIL_ERRNO("send"); + goto out; + } + + if (!(status & SUPPORTED)) { + handle_unsupported(sd_send, sd_peer, sd_in, sd_out, sd_recv, + maps->verd, status); + goto out; + } + + errno = 0; + n = recv_timeout(sd_recv, &recv_buf, 1, 0, IO_TIMEOUT_SEC); + if (n != 1) { + FAIL_ERRNO("recv_timeout()"); + goto out; + } + + /* Check verdict _after_ recv(); af_vsock may need time to catch up */ + if (xbpf_map_lookup_elem(maps->verd, &u32(SK_DROP), &drop) || + xbpf_map_lookup_elem(maps->verd, &u32(SK_PASS), &pass)) + goto out; + + if (drop != 0 || pass != 1) + FAIL("unexpected verdict drop/pass: wanted 0/1, have %u/%u", + drop, pass); + + if (recv_buf != send_buf[0]) + FAIL("recv(): payload check, %02x != %02x", recv_buf, send_buf[0]); + + if (send_flags & MSG_OOB) { + /* Fail reading OOB while in sockmap */ + try_recv("bpf.recv(sd_out, MSG_OOB)", sd_out, + MSG_OOB | MSG_DONTWAIT, false); + + /* Remove sd_out from sockmap */ + xbpf_map_delete_elem(maps->out, &u32(0)); + + /* Check that OOB was dropped on redirect */ + try_recv("recv(sd_out, MSG_OOB)", sd_out, + MSG_OOB | MSG_DONTWAIT, false); + + goto del_in; + } +out: + xbpf_map_delete_elem(maps->out, &u32(0)); +del_in: + xbpf_map_delete_elem(maps->in, &u32(0)); +} + +static int is_redir_supported(enum prog_type type, const char *in, + const char *out) +{ + /* Matching based on strings returned by socket_kind_to_str(): + * tcp4, udp4, tcp6, udp6, u_str, u_dgr, v_str, v_seq + * Plus a wildcard: any + * Not in use: u_seq, v_dgr + */ + struct combo_spec *c, combos[] = { + /* Send to local: TCP -> any, but vsock */ + { SK_MSG_INGRESS, "tcp", "tcp" }, + { SK_MSG_INGRESS, "tcp", "udp" }, + { SK_MSG_INGRESS, "tcp", "u_str" }, + { SK_MSG_INGRESS, "tcp", "u_dgr" }, + + /* Send to egress: TCP -> TCP */ + { SK_MSG_EGRESS, "tcp", "tcp" }, + + /* Ingress to egress: any -> any */ + { SK_SKB_EGRESS, "any", "any" }, + + /* Ingress to local: any -> any, but vsock */ + { SK_SKB_INGRESS, "any", "tcp" }, + { SK_SKB_INGRESS, "any", "udp" }, + { SK_SKB_INGRESS, "any", "u_str" }, + { SK_SKB_INGRESS, "any", "u_dgr" }, + }; + + for (c = combos; c < combos + ARRAY_SIZE(combos); c++) { + if (c->prog_type == type && + (!strcmp(c->in, "any") || strstarts(in, c->in)) && + (!strcmp(c->out, "any") || strstarts(out, c->out))) + return SUPPORTED; + } + + return 0; +} + +static int get_support_status(enum prog_type type, const char *in, + const char *out) +{ + int status = is_redir_supported(type, in, out); + + if (type == SK_SKB_INGRESS && strstarts(out, "v_")) + status |= UNSUPPORTED_RACY_VERD; + + return status; +} + +static void test_socket(enum bpf_map_type type, struct redir_spec *redir, + struct maps *maps, struct socket_spec *s_in, + struct socket_spec *s_out) +{ + int fd_in, fd_out, fd_send, fd_peer, fd_recv, flags, status; + const char *in_str, *out_str; + char s[MAX_TEST_NAME]; + + fd_in = s_in->in[0]; + fd_out = s_out->out[0]; + fd_send = s_in->in[redir->idx_send]; + fd_peer = s_in->in[redir->idx_send ^ 1]; + fd_recv = s_out->out[redir->idx_recv]; + flags = s_in->send_flags; + + in_str = socket_kind_to_str(fd_in); + out_str = socket_kind_to_str(fd_out); + status = get_support_status(redir->prog_type, in_str, out_str); + + snprintf(s, sizeof(s), + "%-4s %-17s %-5s %s %-5s%6s", + /* hash sk_skb-to-ingress u_str → v_str (OOB) */ + type == BPF_MAP_TYPE_SOCKMAP ? "map" : "hash", + redir->name, + in_str, + status & SUPPORTED ? "→" : " ", + out_str, + (flags & MSG_OOB) ? "(OOB)" : ""); + + if (!test__start_subtest(s)) + return; + + test_send_redir_recv(fd_send, flags, fd_peer, fd_in, fd_out, fd_recv, + maps, status); +} + +static void test_redir(enum bpf_map_type type, struct redir_spec *redir, + struct maps *maps) +{ + struct socket_spec *s, sockets[] = { + { AF_INET, SOCK_STREAM }, + // { AF_INET, SOCK_STREAM, MSG_OOB }, /* Known to be broken */ + { AF_INET6, SOCK_STREAM }, + { AF_INET, SOCK_DGRAM }, + { AF_INET6, SOCK_DGRAM }, + { AF_UNIX, SOCK_STREAM }, + { AF_UNIX, SOCK_STREAM, MSG_OOB }, + { AF_UNIX, SOCK_DGRAM }, + // { AF_UNIX, SOCK_SEQPACKET}, /* Unsupported BPF_MAP_UPDATE_ELEM */ + { AF_VSOCK, SOCK_STREAM }, + // { AF_VSOCK, SOCK_DGRAM }, /* Unsupported socket() */ + { AF_VSOCK, SOCK_SEQPACKET }, + }; + + for (s = sockets; s < sockets + ARRAY_SIZE(sockets); s++) + if (socket_spec_pairs(s)) + goto out; + + /* Intra-proto */ + for (s = sockets; s < sockets + ARRAY_SIZE(sockets); s++) + test_socket(type, redir, maps, s, s); + + /* Cross-proto */ + for (int i = 0; i < ARRAY_SIZE(sockets); i++) { + for (int j = 0; j < ARRAY_SIZE(sockets); j++) { + struct socket_spec *out = &sockets[j]; + struct socket_spec *in = &sockets[i]; + + /* Skip intra-proto and between variants */ + if (out->send_flags || + (in->family == out->family && + in->sotype == out->sotype)) + continue; + + test_socket(type, redir, maps, in, out); + } + } +out: + while (--s >= sockets) + socket_spec_close(s); +} + +static void test_map(enum bpf_map_type type) +{ + struct redir_spec *r, redirs[] = { + { "sk_msg-to-ingress", SEND_INNER, RECV_INNER, SK_MSG_INGRESS }, + { "sk_msg-to-egress", SEND_INNER, RECV_OUTER, SK_MSG_EGRESS }, + { "sk_skb-to-egress", SEND_OUTER, RECV_OUTER, SK_SKB_EGRESS }, + { "sk_skb-to-ingress", SEND_OUTER, RECV_INNER, SK_SKB_INGRESS }, + }; + + for (r = redirs; r < redirs + ARRAY_SIZE(redirs); r++) { + enum bpf_attach_type attach_type; + struct test_sockmap_redir *skel; + struct maps maps; + int prog_fd; + + skel = test_sockmap_redir__open_and_load(); + if (!skel) { + FAIL("open_and_load"); + return; + } + + switch (type) { + case BPF_MAP_TYPE_SOCKMAP: + maps.in = bpf_map__fd(skel->maps.nop_map); + maps.out = bpf_map__fd(skel->maps.sock_map); + break; + case BPF_MAP_TYPE_SOCKHASH: + maps.in = bpf_map__fd(skel->maps.nop_hash); + maps.out = bpf_map__fd(skel->maps.sock_hash); + break; + default: + FAIL("Unsupported bpf_map_type"); + return; + } + + skel->bss->redirect_type = type; + maps.verd = bpf_map__fd(skel->maps.verdict_map); + get_redir_params(r, skel, &prog_fd, &attach_type, + &skel->bss->redirect_flags); + + if (xbpf_prog_attach(prog_fd, maps.in, attach_type, 0)) + return; + + test_redir(type, r, &maps); + + if (xbpf_prog_detach2(prog_fd, maps.in, attach_type)) + return; + + test_sockmap_redir__destroy(skel); + } +} + +void serial_test_sockmap_redir(void) +{ + test_map(BPF_MAP_TYPE_SOCKMAP); + test_map(BPF_MAP_TYPE_SOCKHASH); +} diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_strp.c b/tools/testing/selftests/bpf/prog_tests/sockmap_strp.c new file mode 100644 index 000000000000..621b3b71888e --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_strp.c @@ -0,0 +1,454 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <error.h> +#include <netinet/tcp.h> +#include <test_progs.h> +#include "sockmap_helpers.h" +#include "test_skmsg_load_helpers.skel.h" +#include "test_sockmap_strp.skel.h" + +#define STRP_PKT_HEAD_LEN 4 +#define STRP_PKT_BODY_LEN 6 +#define STRP_PKT_FULL_LEN (STRP_PKT_HEAD_LEN + STRP_PKT_BODY_LEN) + +static const char packet[STRP_PKT_FULL_LEN] = "head+body\0"; +static const int test_packet_num = 100; + +/* Current implementation of tcp_bpf_recvmsg_parser() invokes data_ready + * with sk held if an skb exists in sk_receive_queue. Then for the + * data_ready implementation of strparser, it will delay the read + * operation if sk is held and EAGAIN is returned. + */ +static int sockmap_strp_consume_pre_data(int p) +{ + int recvd; + bool retried = false; + char rcv[10]; + +retry: + errno = 0; + recvd = recv_timeout(p, rcv, sizeof(rcv), 0, 1); + if (recvd < 0 && errno == EAGAIN && retried == false) { + /* On the first call, EAGAIN will certainly be returned. + * A 1-second wait is enough for the workqueue to finish. + */ + sleep(1); + retried = true; + goto retry; + } + + if (!ASSERT_EQ(recvd, STRP_PKT_FULL_LEN, "recv error or truncated data") || + !ASSERT_OK(memcmp(packet, rcv, STRP_PKT_FULL_LEN), + "data mismatch")) + return -1; + return 0; +} + +static struct test_sockmap_strp *sockmap_strp_init(int *out_map, bool pass, + bool need_parser) +{ + struct test_sockmap_strp *strp = NULL; + int verdict, parser; + int err; + + strp = test_sockmap_strp__open_and_load(); + *out_map = bpf_map__fd(strp->maps.sock_map); + + if (need_parser) + parser = bpf_program__fd(strp->progs.prog_skb_parser_partial); + else + parser = bpf_program__fd(strp->progs.prog_skb_parser); + + if (pass) + verdict = bpf_program__fd(strp->progs.prog_skb_verdict_pass); + else + verdict = bpf_program__fd(strp->progs.prog_skb_verdict); + + err = bpf_prog_attach(parser, *out_map, BPF_SK_SKB_STREAM_PARSER, 0); + if (!ASSERT_OK(err, "bpf_prog_attach stream parser")) + goto err; + + err = bpf_prog_attach(verdict, *out_map, BPF_SK_SKB_STREAM_VERDICT, 0); + if (!ASSERT_OK(err, "bpf_prog_attach stream verdict")) + goto err; + + return strp; +err: + test_sockmap_strp__destroy(strp); + return NULL; +} + +/* Dispatch packets to different socket by packet size: + * + * ------ ------ + * | pkt4 || pkt1 |... > remote socket + * ------ ------ / ------ ------ + * | pkt8 | pkt7 |... + * ------ ------ \ ------ ------ + * | pkt3 || pkt2 |... > local socket + * ------ ------ + */ +static void test_sockmap_strp_dispatch_pkt(int family, int sotype) +{ + int i, j, zero = 0, one = 1, recvd; + int err, map; + int c0 = -1, p0 = -1, c1 = -1, p1 = -1; + struct test_sockmap_strp *strp = NULL; + int test_cnt = 6; + char rcv[10]; + struct { + char data[7]; + int data_len; + int send_cnt; + int *receiver; + } send_dir[2] = { + /* data expected to deliver to local */ + {"llllll", 6, 0, &p0}, + /* data expected to deliver to remote */ + {"rrrrr", 5, 0, &c1} + }; + + strp = sockmap_strp_init(&map, false, false); + if (!ASSERT_TRUE(strp, "sockmap_strp_init")) + return; + + err = create_socket_pairs(family, sotype, &c0, &c1, &p0, &p1); + if (!ASSERT_OK(err, "create_socket_pairs()")) + goto out; + + err = bpf_map_update_elem(map, &zero, &p0, BPF_NOEXIST); + if (!ASSERT_OK(err, "bpf_map_update_elem(p0)")) + goto out_close; + + err = bpf_map_update_elem(map, &one, &p1, BPF_NOEXIST); + if (!ASSERT_OK(err, "bpf_map_update_elem(p1)")) + goto out_close; + + err = setsockopt(c1, IPPROTO_TCP, TCP_NODELAY, &zero, sizeof(zero)); + if (!ASSERT_OK(err, "setsockopt(TCP_NODELAY)")) + goto out_close; + + /* deliver data with data size greater than 5 to local */ + strp->data->verdict_max_size = 5; + + for (i = 0; i < test_cnt; i++) { + int d = i % 2; + + xsend(c0, send_dir[d].data, send_dir[d].data_len, 0); + send_dir[d].send_cnt++; + } + + for (i = 0; i < 2; i++) { + for (j = 0; j < send_dir[i].send_cnt; j++) { + int expected = send_dir[i].data_len; + + recvd = recv_timeout(*send_dir[i].receiver, rcv, + expected, MSG_DONTWAIT, + IO_TIMEOUT_SEC); + if (!ASSERT_EQ(recvd, expected, "recv_timeout()")) + goto out_close; + if (!ASSERT_OK(memcmp(send_dir[i].data, rcv, recvd), + "data mismatch")) + goto out_close; + } + } +out_close: + close(c0); + close(c1); + close(p0); + close(p1); +out: + test_sockmap_strp__destroy(strp); +} + +/* We have multiple packets in one skb + * ------------ ------------ ------------ + * | packet1 | packet2 | ... + * ------------ ------------ ------------ + */ +static void test_sockmap_strp_multiple_pkt(int family, int sotype) +{ + int i, zero = 0; + int sent, recvd, total; + int err, map; + int c = -1, p = -1; + struct test_sockmap_strp *strp = NULL; + char *snd = NULL, *rcv = NULL; + + strp = sockmap_strp_init(&map, true, true); + if (!ASSERT_TRUE(strp, "sockmap_strp_init")) + return; + + err = create_pair(family, sotype, &c, &p); + if (err) + goto out; + + err = bpf_map_update_elem(map, &zero, &p, BPF_NOEXIST); + if (!ASSERT_OK(err, "bpf_map_update_elem(zero, p)")) + goto out_close; + + /* construct multiple packets in one buffer */ + total = test_packet_num * STRP_PKT_FULL_LEN; + snd = malloc(total); + rcv = malloc(total + 1); + if (!ASSERT_TRUE(snd, "malloc(snd)") || + !ASSERT_TRUE(rcv, "malloc(rcv)")) + goto out_close; + + for (i = 0; i < test_packet_num; i++) { + memcpy(snd + i * STRP_PKT_FULL_LEN, + packet, STRP_PKT_FULL_LEN); + } + + sent = xsend(c, snd, total, 0); + if (!ASSERT_EQ(sent, total, "xsend(c)")) + goto out_close; + + /* try to recv one more byte to avoid truncation check */ + recvd = recv_timeout(p, rcv, total + 1, MSG_DONTWAIT, IO_TIMEOUT_SEC); + if (!ASSERT_EQ(recvd, total, "recv(rcv)")) + goto out_close; + + /* we sent TCP segment with multiple encapsulation + * then check whether packets are handled correctly + */ + if (!ASSERT_OK(memcmp(snd, rcv, total), "data mismatch")) + goto out_close; + +out_close: + close(c); + close(p); + if (snd) + free(snd); + if (rcv) + free(rcv); +out: + test_sockmap_strp__destroy(strp); +} + +/* Test strparser with partial read */ +static void test_sockmap_strp_partial_read(int family, int sotype) +{ + int zero = 0, recvd, off; + int err, map; + int c = -1, p = -1; + struct test_sockmap_strp *strp = NULL; + char rcv[STRP_PKT_FULL_LEN + 1] = "0"; + + strp = sockmap_strp_init(&map, true, true); + if (!ASSERT_TRUE(strp, "sockmap_strp_init")) + return; + + err = create_pair(family, sotype, &c, &p); + if (err) + goto out; + + /* sk_data_ready of 'p' will be replaced by strparser handler */ + err = bpf_map_update_elem(map, &zero, &p, BPF_NOEXIST); + if (!ASSERT_OK(err, "bpf_map_update_elem(zero, p)")) + goto out_close; + + /* 1.1 send partial head, 1 byte header left */ + off = STRP_PKT_HEAD_LEN - 1; + xsend(c, packet, off, 0); + recvd = recv_timeout(p, rcv, sizeof(rcv), MSG_DONTWAIT, 1); + if (!ASSERT_EQ(-1, recvd, "partial head sent, expected no data")) + goto out_close; + + /* 1.2 send remaining head and body */ + xsend(c, packet + off, STRP_PKT_FULL_LEN - off, 0); + recvd = recv_timeout(p, rcv, sizeof(rcv), MSG_DONTWAIT, IO_TIMEOUT_SEC); + if (!ASSERT_EQ(recvd, STRP_PKT_FULL_LEN, "expected full data")) + goto out_close; + + /* 2.1 send partial head, 1 byte header left */ + off = STRP_PKT_HEAD_LEN - 1; + xsend(c, packet, off, 0); + + /* 2.2 send remaining head and partial body, 1 byte body left */ + xsend(c, packet + off, STRP_PKT_FULL_LEN - off - 1, 0); + off = STRP_PKT_FULL_LEN - 1; + recvd = recv_timeout(p, rcv, sizeof(rcv), MSG_DONTWAIT, 1); + if (!ASSERT_EQ(-1, recvd, "partial body sent, expected no data")) + goto out_close; + + /* 2.3 send remaining body */ + xsend(c, packet + off, STRP_PKT_FULL_LEN - off, 0); + recvd = recv_timeout(p, rcv, sizeof(rcv), MSG_DONTWAIT, IO_TIMEOUT_SEC); + if (!ASSERT_EQ(recvd, STRP_PKT_FULL_LEN, "expected full data")) + goto out_close; + +out_close: + close(c); + close(p); + +out: + test_sockmap_strp__destroy(strp); +} + +/* Test simple socket read/write with strparser + FIONREAD */ +static void test_sockmap_strp_pass(int family, int sotype, bool fionread) +{ + int zero = 0, pkt_size = STRP_PKT_FULL_LEN, sent, recvd, avail; + int err, map; + int c = -1, p = -1; + int test_cnt = 10, i; + struct test_sockmap_strp *strp = NULL; + char rcv[STRP_PKT_FULL_LEN + 1] = "0"; + + strp = sockmap_strp_init(&map, true, true); + if (!ASSERT_TRUE(strp, "sockmap_strp_init")) + return; + + err = create_pair(family, sotype, &c, &p); + if (err) + goto out; + + /* inject some data before bpf process, it should be read + * correctly because we check sk_receive_queue in + * tcp_bpf_recvmsg_parser(). + */ + sent = xsend(c, packet, pkt_size, 0); + if (!ASSERT_EQ(sent, pkt_size, "xsend(pre-data)")) + goto out_close; + + /* sk_data_ready of 'p' will be replaced by strparser handler */ + err = bpf_map_update_elem(map, &zero, &p, BPF_NOEXIST); + if (!ASSERT_OK(err, "bpf_map_update_elem(p)")) + goto out_close; + + /* consume previous data we injected */ + if (sockmap_strp_consume_pre_data(p)) + goto out_close; + + /* Previously, we encountered issues such as deadlocks and + * sequence errors that resulted in the inability to read + * continuously. Therefore, we perform multiple iterations + * of testing here. + */ + for (i = 0; i < test_cnt; i++) { + sent = xsend(c, packet, pkt_size, 0); + if (!ASSERT_EQ(sent, pkt_size, "xsend(c)")) + goto out_close; + + recvd = recv_timeout(p, rcv, sizeof(rcv), MSG_DONTWAIT, + IO_TIMEOUT_SEC); + if (!ASSERT_EQ(recvd, pkt_size, "recv_timeout(p)") || + !ASSERT_OK(memcmp(packet, rcv, pkt_size), + "memcmp, data mismatch")) + goto out_close; + } + + if (fionread) { + sent = xsend(c, packet, pkt_size, 0); + if (!ASSERT_EQ(sent, pkt_size, "second xsend(c)")) + goto out_close; + + err = ioctl(p, FIONREAD, &avail); + if (!ASSERT_OK(err, "ioctl(FIONREAD) error") || + !ASSERT_EQ(avail, pkt_size, "ioctl(FIONREAD)")) + goto out_close; + + recvd = recv_timeout(p, rcv, sizeof(rcv), MSG_DONTWAIT, + IO_TIMEOUT_SEC); + if (!ASSERT_EQ(recvd, pkt_size, "second recv_timeout(p)") || + !ASSERT_OK(memcmp(packet, rcv, pkt_size), + "second memcmp, data mismatch")) + goto out_close; + } + +out_close: + close(c); + close(p); + +out: + test_sockmap_strp__destroy(strp); +} + +/* Test strparser with verdict mode */ +static void test_sockmap_strp_verdict(int family, int sotype) +{ + int zero = 0, one = 1, sent, recvd, off; + int err, map; + int c0 = -1, p0 = -1, c1 = -1, p1 = -1; + struct test_sockmap_strp *strp = NULL; + char rcv[STRP_PKT_FULL_LEN + 1] = "0"; + + strp = sockmap_strp_init(&map, false, true); + if (!ASSERT_TRUE(strp, "sockmap_strp_init")) + return; + + /* We simulate a reverse proxy server. + * When p0 receives data from c0, we forward it to c1. + * From c1's perspective, it will consider this data + * as being sent by p1. + */ + err = create_socket_pairs(family, sotype, &c0, &c1, &p0, &p1); + if (!ASSERT_OK(err, "create_socket_pairs()")) + goto out; + + err = bpf_map_update_elem(map, &zero, &p0, BPF_NOEXIST); + if (!ASSERT_OK(err, "bpf_map_update_elem(p0)")) + goto out_close; + + err = bpf_map_update_elem(map, &one, &p1, BPF_NOEXIST); + if (!ASSERT_OK(err, "bpf_map_update_elem(p1)")) + goto out_close; + + sent = xsend(c0, packet, STRP_PKT_FULL_LEN, 0); + if (!ASSERT_EQ(sent, STRP_PKT_FULL_LEN, "xsend(c0)")) + goto out_close; + + recvd = recv_timeout(c1, rcv, sizeof(rcv), MSG_DONTWAIT, + IO_TIMEOUT_SEC); + if (!ASSERT_EQ(recvd, STRP_PKT_FULL_LEN, "recv_timeout(c1)") || + !ASSERT_OK(memcmp(packet, rcv, STRP_PKT_FULL_LEN), + "received data does not match the sent data")) + goto out_close; + + /* send again to ensure the stream is functioning correctly. */ + sent = xsend(c0, packet, STRP_PKT_FULL_LEN, 0); + if (!ASSERT_EQ(sent, STRP_PKT_FULL_LEN, "second xsend(c0)")) + goto out_close; + + /* partial read */ + off = STRP_PKT_FULL_LEN / 2; + recvd = recv_timeout(c1, rcv, off, MSG_DONTWAIT, + IO_TIMEOUT_SEC); + recvd += recv_timeout(c1, rcv + off, sizeof(rcv) - off, MSG_DONTWAIT, + IO_TIMEOUT_SEC); + + if (!ASSERT_EQ(recvd, STRP_PKT_FULL_LEN, "partial recv_timeout(c1)") || + !ASSERT_OK(memcmp(packet, rcv, STRP_PKT_FULL_LEN), + "partial received data does not match the sent data")) + goto out_close; + +out_close: + close(c0); + close(c1); + close(p0); + close(p1); +out: + test_sockmap_strp__destroy(strp); +} + +void test_sockmap_strp(void) +{ + if (test__start_subtest("sockmap strp tcp pass")) + test_sockmap_strp_pass(AF_INET, SOCK_STREAM, false); + if (test__start_subtest("sockmap strp tcp v6 pass")) + test_sockmap_strp_pass(AF_INET6, SOCK_STREAM, false); + if (test__start_subtest("sockmap strp tcp pass fionread")) + test_sockmap_strp_pass(AF_INET, SOCK_STREAM, true); + if (test__start_subtest("sockmap strp tcp v6 pass fionread")) + test_sockmap_strp_pass(AF_INET6, SOCK_STREAM, true); + if (test__start_subtest("sockmap strp tcp verdict")) + test_sockmap_strp_verdict(AF_INET, SOCK_STREAM); + if (test__start_subtest("sockmap strp tcp v6 verdict")) + test_sockmap_strp_verdict(AF_INET6, SOCK_STREAM); + if (test__start_subtest("sockmap strp tcp partial read")) + test_sockmap_strp_partial_read(AF_INET, SOCK_STREAM); + if (test__start_subtest("sockmap strp tcp multiple packets")) + test_sockmap_strp_multiple_pkt(AF_INET, SOCK_STREAM); + if (test__start_subtest("sockmap strp tcp dispatch")) + test_sockmap_strp_dispatch_pkt(AF_INET, SOCK_STREAM); +} diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c index 05d0e07da394..ba6b3ec1156a 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c +++ b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c @@ -2,7 +2,7 @@ #include <test_progs.h> #include "cgroup_helpers.h" -#include <linux/tcp.h> +#include <netinet/tcp.h> #include <linux/netlink.h> #include "sockopt_sk.skel.h" diff --git a/tools/testing/selftests/bpf/prog_tests/spin_lock.c b/tools/testing/selftests/bpf/prog_tests/spin_lock.c index 2b0068742ef9..e3ea5dc2f697 100644 --- a/tools/testing/selftests/bpf/prog_tests/spin_lock.c +++ b/tools/testing/selftests/bpf/prog_tests/spin_lock.c @@ -50,6 +50,9 @@ static struct { { "lock_id_mismatch_innermapval_mapval", "bpf_spin_unlock of different lock" }, { "lock_global_subprog_call1", "global function calls are not allowed while holding a lock" }, { "lock_global_subprog_call2", "global function calls are not allowed while holding a lock" }, + { "lock_global_sleepable_helper_subprog", "global function calls are not allowed while holding a lock" }, + { "lock_global_sleepable_kfunc_subprog", "global function calls are not allowed while holding a lock" }, + { "lock_global_sleepable_subprog_indirect", "global function calls are not allowed while holding a lock" }, }; static int match_regex(const char *pattern, const char *string) diff --git a/tools/testing/selftests/bpf/prog_tests/stream.c b/tools/testing/selftests/bpf/prog_tests/stream.c new file mode 100644 index 000000000000..d9f0185dca61 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/stream.c @@ -0,0 +1,141 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ +#include <test_progs.h> +#include <sys/mman.h> +#include <regex.h> + +#include "stream.skel.h" +#include "stream_fail.skel.h" + +void test_stream_failure(void) +{ + RUN_TESTS(stream_fail); +} + +void test_stream_success(void) +{ + RUN_TESTS(stream); + return; +} + +struct { + int prog_off; + const char *errstr; +} stream_error_arr[] = { + { + offsetof(struct stream, progs.stream_cond_break), + "ERROR: Timeout detected for may_goto instruction\n" + "CPU: [0-9]+ UID: 0 PID: [0-9]+ Comm: .*\n" + "Call trace:\n" + "([a-zA-Z_][a-zA-Z0-9_]*\\+0x[0-9a-fA-F]+/0x[0-9a-fA-F]+\n" + "|[ \t]+[^\n]+\n)*", + }, + { + offsetof(struct stream, progs.stream_deadlock), + "ERROR: AA or ABBA deadlock detected for bpf_res_spin_lock\n" + "Attempted lock = (0x[0-9a-fA-F]+)\n" + "Total held locks = 1\n" + "Held lock\\[ 0\\] = \\1\n" // Lock address must match + "CPU: [0-9]+ UID: 0 PID: [0-9]+ Comm: .*\n" + "Call trace:\n" + "([a-zA-Z_][a-zA-Z0-9_]*\\+0x[0-9a-fA-F]+/0x[0-9a-fA-F]+\n" + "|[ \t]+[^\n]+\n)*", + }, +}; + +static int match_regex(const char *pattern, const char *string) +{ + int err, rc; + regex_t re; + + err = regcomp(&re, pattern, REG_EXTENDED | REG_NEWLINE); + if (err) + return -1; + rc = regexec(&re, string, 0, NULL, 0); + regfree(&re); + return rc == 0 ? 1 : 0; +} + +void test_stream_errors(void) +{ + LIBBPF_OPTS(bpf_test_run_opts, opts); + LIBBPF_OPTS(bpf_prog_stream_read_opts, ropts); + struct stream *skel; + int ret, prog_fd; + char buf[1024]; + + skel = stream__open_and_load(); + if (!ASSERT_OK_PTR(skel, "stream__open_and_load")) + return; + + for (int i = 0; i < ARRAY_SIZE(stream_error_arr); i++) { + struct bpf_program **prog; + + prog = (struct bpf_program **)(((char *)skel) + stream_error_arr[i].prog_off); + prog_fd = bpf_program__fd(*prog); + ret = bpf_prog_test_run_opts(prog_fd, &opts); + ASSERT_OK(ret, "ret"); + ASSERT_OK(opts.retval, "retval"); + +#if !defined(__x86_64__) + ASSERT_TRUE(1, "Timed may_goto unsupported, skip."); + if (i == 0) { + ret = bpf_prog_stream_read(prog_fd, 2, buf, sizeof(buf), &ropts); + ASSERT_EQ(ret, 0, "stream read"); + continue; + } +#endif + + ret = bpf_prog_stream_read(prog_fd, BPF_STREAM_STDERR, buf, sizeof(buf), &ropts); + ASSERT_GT(ret, 0, "stream read"); + ASSERT_LE(ret, 1023, "len for buf"); + buf[ret] = '\0'; + + ret = match_regex(stream_error_arr[i].errstr, buf); + if (!ASSERT_TRUE(ret == 1, "regex match")) + fprintf(stderr, "Output from stream:\n%s\n", buf); + } + + stream__destroy(skel); +} + +void test_stream_syscall(void) +{ + LIBBPF_OPTS(bpf_test_run_opts, opts); + LIBBPF_OPTS(bpf_prog_stream_read_opts, ropts); + struct stream *skel; + int ret, prog_fd; + char buf[64]; + + skel = stream__open_and_load(); + if (!ASSERT_OK_PTR(skel, "stream__open_and_load")) + return; + + prog_fd = bpf_program__fd(skel->progs.stream_syscall); + ret = bpf_prog_test_run_opts(prog_fd, &opts); + ASSERT_OK(ret, "ret"); + ASSERT_OK(opts.retval, "retval"); + + ASSERT_LT(bpf_prog_stream_read(0, BPF_STREAM_STDOUT, buf, sizeof(buf), &ropts), 0, "error"); + ret = -errno; + ASSERT_EQ(ret, -EINVAL, "bad prog_fd"); + + ASSERT_LT(bpf_prog_stream_read(prog_fd, 0, buf, sizeof(buf), &ropts), 0, "error"); + ret = -errno; + ASSERT_EQ(ret, -ENOENT, "bad stream id"); + + ASSERT_LT(bpf_prog_stream_read(prog_fd, BPF_STREAM_STDOUT, NULL, sizeof(buf), NULL), 0, "error"); + ret = -errno; + ASSERT_EQ(ret, -EFAULT, "bad stream buf"); + + ret = bpf_prog_stream_read(prog_fd, BPF_STREAM_STDOUT, buf, 2, NULL); + ASSERT_EQ(ret, 2, "bytes"); + ret = bpf_prog_stream_read(prog_fd, BPF_STREAM_STDOUT, buf, 2, NULL); + ASSERT_EQ(ret, 1, "bytes"); + ret = bpf_prog_stream_read(prog_fd, BPF_STREAM_STDOUT, buf, 1, &ropts); + ASSERT_EQ(ret, 0, "no bytes stdout"); + ret = bpf_prog_stream_read(prog_fd, BPF_STREAM_STDERR, buf, 1, &ropts); + ASSERT_EQ(ret, 0, "no bytes stderr"); + + stream__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/string_kfuncs.c b/tools/testing/selftests/bpf/prog_tests/string_kfuncs.c new file mode 100644 index 000000000000..35af8044d059 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/string_kfuncs.c @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2025 Red Hat, Inc.*/ +#include <test_progs.h> +#include "string_kfuncs_success.skel.h" +#include "string_kfuncs_failure1.skel.h" +#include "string_kfuncs_failure2.skel.h" +#include <sys/mman.h> + +static const char * const test_cases[] = { + "strcmp", + "strchr", + "strchrnul", + "strnchr", + "strrchr", + "strlen", + "strnlen", + "strspn_str", + "strspn_accept", + "strcspn_str", + "strcspn_reject", + "strstr", + "strnstr", +}; + +void run_too_long_tests(void) +{ + struct string_kfuncs_failure2 *skel; + struct bpf_program *prog; + char test_name[256]; + int err, i; + + skel = string_kfuncs_failure2__open_and_load(); + if (!ASSERT_OK_PTR(skel, "string_kfuncs_failure2__open_and_load")) + return; + + memset(skel->bss->long_str, 'a', sizeof(skel->bss->long_str)); + + for (i = 0; i < ARRAY_SIZE(test_cases); i++) { + sprintf(test_name, "test_%s_too_long", test_cases[i]); + if (!test__start_subtest(test_name)) + continue; + + prog = bpf_object__find_program_by_name(skel->obj, test_name); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) + goto cleanup; + + LIBBPF_OPTS(bpf_test_run_opts, topts); + err = bpf_prog_test_run_opts(bpf_program__fd(prog), &topts); + if (!ASSERT_OK(err, "bpf_prog_test_run")) + goto cleanup; + + ASSERT_EQ(topts.retval, -E2BIG, "reading too long string fails with -E2BIG"); + } + +cleanup: + string_kfuncs_failure2__destroy(skel); +} + +void test_string_kfuncs(void) +{ + RUN_TESTS(string_kfuncs_success); + RUN_TESTS(string_kfuncs_failure1); + + run_too_long_tests(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/summarization.c b/tools/testing/selftests/bpf/prog_tests/summarization.c new file mode 100644 index 000000000000..5dd6c120a838 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/summarization.c @@ -0,0 +1,144 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "bpf/libbpf.h" +#include "summarization_freplace.skel.h" +#include "summarization.skel.h" +#include <test_progs.h> + +static void print_verifier_log(const char *log) +{ + if (env.verbosity >= VERBOSE_VERY) + fprintf(stdout, "VERIFIER LOG:\n=============\n%s=============\n", log); +} + +static void test_aux(const char *main_prog_name, + const char *to_be_replaced, + const char *replacement, + bool expect_load, + const char *err_msg) +{ + struct summarization_freplace *freplace = NULL; + struct bpf_program *freplace_prog = NULL; + struct bpf_program *main_prog = NULL; + LIBBPF_OPTS(bpf_object_open_opts, opts); + struct summarization *main = NULL; + char log[16*1024]; + int err; + + opts.kernel_log_buf = log; + opts.kernel_log_size = sizeof(log); + if (env.verbosity >= VERBOSE_SUPER) + opts.kernel_log_level = 1 | 2 | 4; + main = summarization__open_opts(&opts); + if (!ASSERT_OK_PTR(main, "summarization__open")) + goto out; + main_prog = bpf_object__find_program_by_name(main->obj, main_prog_name); + if (!ASSERT_OK_PTR(main_prog, "main_prog")) + goto out; + bpf_program__set_autoload(main_prog, true); + err = summarization__load(main); + print_verifier_log(log); + if (!ASSERT_OK(err, "summarization__load")) + goto out; + freplace = summarization_freplace__open_opts(&opts); + if (!ASSERT_OK_PTR(freplace, "summarization_freplace__open")) + goto out; + freplace_prog = bpf_object__find_program_by_name(freplace->obj, replacement); + if (!ASSERT_OK_PTR(freplace_prog, "freplace_prog")) + goto out; + bpf_program__set_autoload(freplace_prog, true); + bpf_program__set_autoattach(freplace_prog, true); + bpf_program__set_attach_target(freplace_prog, + bpf_program__fd(main_prog), + to_be_replaced); + err = summarization_freplace__load(freplace); + print_verifier_log(log); + + /* The might_sleep extension doesn't work yet as sleepable calls are not + * allowed, but preserve the check in case it's supported later and then + * this particular combination can be enabled. + */ + if (!strcmp("might_sleep", replacement) && err) { + ASSERT_HAS_SUBSTR(log, "helper call might sleep in a non-sleepable prog", "error log"); + ASSERT_EQ(err, -EINVAL, "err"); + test__skip(); + goto out; + } + + if (expect_load) { + ASSERT_OK(err, "summarization_freplace__load"); + } else { + ASSERT_ERR(err, "summarization_freplace__load"); + ASSERT_HAS_SUBSTR(log, err_msg, "error log"); + } + +out: + summarization_freplace__destroy(freplace); + summarization__destroy(main); +} + +/* There are two global subprograms in both summarization.skel.h: + * - one changes packet data; + * - another does not. + * It is ok to freplace subprograms that change packet data with those + * that either do or do not. It is only ok to freplace subprograms + * that do not change packet data with those that do not as well. + * The below tests check outcomes for each combination of such freplace. + * Also test a case when main subprogram itself is replaced and is a single + * subprogram in a program. + * + * This holds for might_sleep programs. It is ok to replace might_sleep with + * might_sleep and with does_not_sleep, but does_not_sleep cannot be replaced + * with might_sleep. + */ +void test_summarization_freplace(void) +{ + struct { + const char *main; + const char *to_be_replaced; + bool has_side_effect; + } mains[2][4] = { + { + { "main_changes_with_subprogs", "changes_pkt_data", true }, + { "main_changes_with_subprogs", "does_not_change_pkt_data", false }, + { "main_changes", "main_changes", true }, + { "main_does_not_change", "main_does_not_change", false }, + }, + { + { "main_might_sleep_with_subprogs", "might_sleep", true }, + { "main_might_sleep_with_subprogs", "does_not_sleep", false }, + { "main_might_sleep", "main_might_sleep", true }, + { "main_does_not_sleep", "main_does_not_sleep", false }, + }, + }; + const char *pkt_err = "Extension program changes packet data"; + const char *slp_err = "Extension program may sleep"; + struct { + const char *func; + bool has_side_effect; + const char *err_msg; + } replacements[2][2] = { + { + { "changes_pkt_data", true, pkt_err }, + { "does_not_change_pkt_data", false, pkt_err }, + }, + { + { "might_sleep", true, slp_err }, + { "does_not_sleep", false, slp_err }, + }, + }; + char buf[64]; + + for (int t = 0; t < 2; t++) { + for (int i = 0; i < ARRAY_SIZE(mains); ++i) { + for (int j = 0; j < ARRAY_SIZE(replacements); ++j) { + snprintf(buf, sizeof(buf), "%s_with_%s", + mains[t][i].to_be_replaced, replacements[t][j].func); + if (!test__start_subtest(buf)) + continue; + test_aux(mains[t][i].main, mains[t][i].to_be_replaced, replacements[t][j].func, + mains[t][i].has_side_effect || !replacements[t][j].has_side_effect, + replacements[t][j].err_msg); + } + } + } +} diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c index 544144620ca6..0ab36503c3b2 100644 --- a/tools/testing/selftests/bpf/prog_tests/tailcalls.c +++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c @@ -1195,7 +1195,7 @@ static void test_tailcall_hierarchy_count(const char *which, bool test_fentry, bool test_fexit, bool test_fentry_entry) { - int err, map_fd, prog_fd, main_data_fd, fentry_data_fd, fexit_data_fd, i, val; + int err, map_fd, prog_fd, main_data_fd, fentry_data_fd = 0, fexit_data_fd = 0, i, val; struct bpf_object *obj = NULL, *fentry_obj = NULL, *fexit_obj = NULL; struct bpf_link *fentry_link = NULL, *fexit_link = NULL; struct bpf_program *prog, *fentry_prog; @@ -1600,6 +1600,7 @@ static void test_tailcall_bpf2bpf_freplace(void) goto out; err = bpf_link__destroy(freplace_link); + freplace_link = NULL; if (!ASSERT_OK(err, "destroy link")) goto out; diff --git a/tools/testing/selftests/bpf/prog_tests/tc_change_tail.c b/tools/testing/selftests/bpf/prog_tests/tc_change_tail.c new file mode 100644 index 000000000000..74752233e779 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/tc_change_tail.c @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <error.h> +#include <test_progs.h> +#include <linux/pkt_cls.h> + +#include "test_tc_change_tail.skel.h" +#include "socket_helpers.h" + +#define LO_IFINDEX 1 + +void test_tc_change_tail(void) +{ + LIBBPF_OPTS(bpf_tcx_opts, tcx_opts); + struct test_tc_change_tail *skel = NULL; + struct bpf_link *link; + int c1, p1; + char buf[2]; + int ret; + + skel = test_tc_change_tail__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_tc_change_tail__open_and_load")) + return; + + link = bpf_program__attach_tcx(skel->progs.change_tail, LO_IFINDEX, + &tcx_opts); + if (!ASSERT_OK_PTR(link, "bpf_program__attach_tcx")) + goto destroy; + + skel->links.change_tail = link; + ret = create_pair(AF_INET, SOCK_DGRAM, &c1, &p1); + if (!ASSERT_OK(ret, "create_pair")) + goto destroy; + + ret = xsend(p1, "Tr", 2, 0); + ASSERT_EQ(ret, 2, "xsend(p1)"); + ret = recv(c1, buf, 2, 0); + ASSERT_EQ(ret, 2, "recv(c1)"); + ASSERT_EQ(skel->data->change_tail_ret, 0, "change_tail_ret"); + + ret = xsend(p1, "G", 1, 0); + ASSERT_EQ(ret, 1, "xsend(p1)"); + ret = recv(c1, buf, 2, 0); + ASSERT_EQ(ret, 1, "recv(c1)"); + ASSERT_EQ(skel->data->change_tail_ret, 0, "change_tail_ret"); + + ret = xsend(p1, "E", 1, 0); + ASSERT_EQ(ret, 1, "xsend(p1)"); + ret = recv(c1, buf, 1, 0); + ASSERT_EQ(ret, 1, "recv(c1)"); + ASSERT_EQ(skel->data->change_tail_ret, -EINVAL, "change_tail_ret"); + + ret = xsend(p1, "Z", 1, 0); + ASSERT_EQ(ret, 1, "xsend(p1)"); + ret = recv(c1, buf, 1, 0); + ASSERT_EQ(ret, 1, "recv(c1)"); + ASSERT_EQ(skel->data->change_tail_ret, -EINVAL, "change_tail_ret"); + + close(c1); + close(p1); +destroy: + test_tc_change_tail__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/tc_helpers.h b/tools/testing/selftests/bpf/prog_tests/tc_helpers.h index 924d0e25320c..d52a62af77bf 100644 --- a/tools/testing/selftests/bpf/prog_tests/tc_helpers.h +++ b/tools/testing/selftests/bpf/prog_tests/tc_helpers.h @@ -8,34 +8,6 @@ # define loopback 1 #endif -static inline __u32 id_from_prog_fd(int fd) -{ - struct bpf_prog_info prog_info = {}; - __u32 prog_info_len = sizeof(prog_info); - int err; - - err = bpf_obj_get_info_by_fd(fd, &prog_info, &prog_info_len); - if (!ASSERT_OK(err, "id_from_prog_fd")) - return 0; - - ASSERT_NEQ(prog_info.id, 0, "prog_info.id"); - return prog_info.id; -} - -static inline __u32 id_from_link_fd(int fd) -{ - struct bpf_link_info link_info = {}; - __u32 link_info_len = sizeof(link_info); - int err; - - err = bpf_link_get_info_by_fd(fd, &link_info, &link_info_len); - if (!ASSERT_OK(err, "id_from_link_fd")) - return 0; - - ASSERT_NEQ(link_info.id, 0, "link_info.id"); - return link_info.id; -} - static inline __u32 ifindex_from_link_fd(int fd) { struct bpf_link_info link_info = {}; diff --git a/tools/testing/selftests/bpf/prog_tests/tc_links.c b/tools/testing/selftests/bpf/prog_tests/tc_links.c index 1af9ec1149aa..2186a24e7d8a 100644 --- a/tools/testing/selftests/bpf/prog_tests/tc_links.c +++ b/tools/testing/selftests/bpf/prog_tests/tc_links.c @@ -13,7 +13,7 @@ #include "netlink_helpers.h" #include "tc_helpers.h" -void serial_test_tc_links_basic(void) +void test_ns_tc_links_basic(void) { LIBBPF_OPTS(bpf_prog_query_opts, optq); LIBBPF_OPTS(bpf_tcx_opts, optl); @@ -260,7 +260,7 @@ cleanup: assert_mprog_count(target, 0); } -void serial_test_tc_links_before(void) +void test_ns_tc_links_before(void) { test_tc_links_before_target(BPF_TCX_INGRESS); test_tc_links_before_target(BPF_TCX_EGRESS); @@ -414,7 +414,7 @@ cleanup: assert_mprog_count(target, 0); } -void serial_test_tc_links_after(void) +void test_ns_tc_links_after(void) { test_tc_links_after_target(BPF_TCX_INGRESS); test_tc_links_after_target(BPF_TCX_EGRESS); @@ -514,7 +514,7 @@ cleanup: assert_mprog_count(target, 0); } -void serial_test_tc_links_revision(void) +void test_ns_tc_links_revision(void) { test_tc_links_revision_target(BPF_TCX_INGRESS); test_tc_links_revision_target(BPF_TCX_EGRESS); @@ -618,7 +618,7 @@ cleanup: assert_mprog_count(target, 0); } -void serial_test_tc_links_chain_classic(void) +void test_ns_tc_links_chain_classic(void) { test_tc_chain_classic(BPF_TCX_INGRESS, false); test_tc_chain_classic(BPF_TCX_EGRESS, false); @@ -846,7 +846,7 @@ cleanup: assert_mprog_count(target, 0); } -void serial_test_tc_links_replace(void) +void test_ns_tc_links_replace(void) { test_tc_links_replace_target(BPF_TCX_INGRESS); test_tc_links_replace_target(BPF_TCX_EGRESS); @@ -1158,7 +1158,7 @@ cleanup: assert_mprog_count(target, 0); } -void serial_test_tc_links_invalid(void) +void test_ns_tc_links_invalid(void) { test_tc_links_invalid_target(BPF_TCX_INGRESS); test_tc_links_invalid_target(BPF_TCX_EGRESS); @@ -1314,7 +1314,7 @@ cleanup: assert_mprog_count(target, 0); } -void serial_test_tc_links_prepend(void) +void test_ns_tc_links_prepend(void) { test_tc_links_prepend_target(BPF_TCX_INGRESS); test_tc_links_prepend_target(BPF_TCX_EGRESS); @@ -1470,7 +1470,7 @@ cleanup: assert_mprog_count(target, 0); } -void serial_test_tc_links_append(void) +void test_ns_tc_links_append(void) { test_tc_links_append_target(BPF_TCX_INGRESS); test_tc_links_append_target(BPF_TCX_EGRESS); @@ -1568,7 +1568,7 @@ cleanup: ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed"); } -void serial_test_tc_links_dev_cleanup(void) +void test_ns_tc_links_dev_cleanup(void) { test_tc_links_dev_cleanup_target(BPF_TCX_INGRESS); test_tc_links_dev_cleanup_target(BPF_TCX_EGRESS); @@ -1672,7 +1672,7 @@ cleanup: test_tc_link__destroy(skel); } -void serial_test_tc_links_chain_mixed(void) +void test_ns_tc_links_chain_mixed(void) { test_tc_chain_mixed(BPF_TCX_INGRESS); test_tc_chain_mixed(BPF_TCX_EGRESS); @@ -1782,7 +1782,7 @@ cleanup: assert_mprog_count(target, 0); } -void serial_test_tc_links_ingress(void) +void test_ns_tc_links_ingress(void) { test_tc_links_ingress(BPF_TCX_INGRESS, true, true); test_tc_links_ingress(BPF_TCX_INGRESS, true, false); @@ -1823,7 +1823,7 @@ static int qdisc_replace(int ifindex, const char *kind, bool block) return err; } -void serial_test_tc_links_dev_chain0(void) +void test_ns_tc_links_dev_chain0(void) { int err, ifindex; @@ -1955,7 +1955,7 @@ cleanup: ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed"); } -void serial_test_tc_links_dev_mixed(void) +void test_ns_tc_links_dev_mixed(void) { test_tc_links_dev_mixed(BPF_TCX_INGRESS); test_tc_links_dev_mixed(BPF_TCX_EGRESS); diff --git a/tools/testing/selftests/bpf/prog_tests/tc_netkit.c b/tools/testing/selftests/bpf/prog_tests/tc_netkit.c index 151a4210028f..2461d183dee5 100644 --- a/tools/testing/selftests/bpf/prog_tests/tc_netkit.c +++ b/tools/testing/selftests/bpf/prog_tests/tc_netkit.c @@ -14,10 +14,16 @@ #include "netlink_helpers.h" #include "tc_helpers.h" +#define NETKIT_HEADROOM 32 +#define NETKIT_TAILROOM 8 + #define MARK 42 #define PRIO 0xeb9f #define ICMP_ECHO 8 +#define FLAG_ADJUST_ROOM (1 << 0) +#define FLAG_SAME_NETNS (1 << 1) + struct icmphdr { __u8 type; __u8 code; @@ -35,7 +41,7 @@ struct iplink_req { }; static int create_netkit(int mode, int policy, int peer_policy, int *ifindex, - bool same_netns, int scrub, int peer_scrub) + int scrub, int peer_scrub, __u32 flags) { struct rtnl_handle rth = { .fd = -1 }; struct iplink_req req = {}; @@ -63,6 +69,10 @@ static int create_netkit(int mode, int policy, int peer_policy, int *ifindex, addattr32(&req.n, sizeof(req), IFLA_NETKIT_SCRUB, scrub); addattr32(&req.n, sizeof(req), IFLA_NETKIT_PEER_SCRUB, peer_scrub); addattr32(&req.n, sizeof(req), IFLA_NETKIT_MODE, mode); + if (flags & FLAG_ADJUST_ROOM) { + addattr16(&req.n, sizeof(req), IFLA_NETKIT_HEADROOM, NETKIT_HEADROOM); + addattr16(&req.n, sizeof(req), IFLA_NETKIT_TAILROOM, NETKIT_TAILROOM); + } addattr_nest_end(&req.n, data); addattr_nest_end(&req.n, linkinfo); @@ -87,7 +97,7 @@ static int create_netkit(int mode, int policy, int peer_policy, int *ifindex, " addr ee:ff:bb:cc:aa:dd"), "set hwaddress"); } - if (same_netns) { + if (flags & FLAG_SAME_NETNS) { ASSERT_OK(system("ip link set dev " netkit_peer " up"), "up peer"); ASSERT_OK(system("ip addr add dev " netkit_peer " 10.0.0.2/24"), @@ -184,8 +194,8 @@ void serial_test_tc_netkit_basic(void) int err, ifindex; err = create_netkit(NETKIT_L2, NETKIT_PASS, NETKIT_PASS, - &ifindex, false, NETKIT_SCRUB_DEFAULT, - NETKIT_SCRUB_DEFAULT); + &ifindex, NETKIT_SCRUB_DEFAULT, + NETKIT_SCRUB_DEFAULT, 0); if (err) return; @@ -299,8 +309,8 @@ static void serial_test_tc_netkit_multi_links_target(int mode, int target) int err, ifindex; err = create_netkit(mode, NETKIT_PASS, NETKIT_PASS, - &ifindex, false, NETKIT_SCRUB_DEFAULT, - NETKIT_SCRUB_DEFAULT); + &ifindex, NETKIT_SCRUB_DEFAULT, + NETKIT_SCRUB_DEFAULT, 0); if (err) return; @@ -428,8 +438,8 @@ static void serial_test_tc_netkit_multi_opts_target(int mode, int target) int err, ifindex; err = create_netkit(mode, NETKIT_PASS, NETKIT_PASS, - &ifindex, false, NETKIT_SCRUB_DEFAULT, - NETKIT_SCRUB_DEFAULT); + &ifindex, NETKIT_SCRUB_DEFAULT, + NETKIT_SCRUB_DEFAULT, 0); if (err) return; @@ -543,8 +553,8 @@ void serial_test_tc_netkit_device(void) int err, ifindex, ifindex2; err = create_netkit(NETKIT_L3, NETKIT_PASS, NETKIT_PASS, - &ifindex, true, NETKIT_SCRUB_DEFAULT, - NETKIT_SCRUB_DEFAULT); + &ifindex, NETKIT_SCRUB_DEFAULT, + NETKIT_SCRUB_DEFAULT, FLAG_SAME_NETNS); if (err) return; @@ -655,8 +665,8 @@ static void serial_test_tc_netkit_neigh_links_target(int mode, int target) int err, ifindex; err = create_netkit(mode, NETKIT_PASS, NETKIT_PASS, - &ifindex, false, NETKIT_SCRUB_DEFAULT, - NETKIT_SCRUB_DEFAULT); + &ifindex, NETKIT_SCRUB_DEFAULT, + NETKIT_SCRUB_DEFAULT, 0); if (err) return; @@ -733,8 +743,8 @@ static void serial_test_tc_netkit_pkt_type_mode(int mode) struct bpf_link *link; err = create_netkit(mode, NETKIT_PASS, NETKIT_PASS, - &ifindex, true, NETKIT_SCRUB_DEFAULT, - NETKIT_SCRUB_DEFAULT); + &ifindex, NETKIT_SCRUB_DEFAULT, + NETKIT_SCRUB_DEFAULT, FLAG_SAME_NETNS); if (err) return; @@ -799,7 +809,7 @@ void serial_test_tc_netkit_pkt_type(void) serial_test_tc_netkit_pkt_type_mode(NETKIT_L3); } -static void serial_test_tc_netkit_scrub_type(int scrub) +static void serial_test_tc_netkit_scrub_type(int scrub, bool room) { LIBBPF_OPTS(bpf_netkit_opts, optl); struct test_tc_link *skel; @@ -807,7 +817,8 @@ static void serial_test_tc_netkit_scrub_type(int scrub) int err, ifindex; err = create_netkit(NETKIT_L2, NETKIT_PASS, NETKIT_PASS, - &ifindex, false, scrub, scrub); + &ifindex, scrub, scrub, + room ? FLAG_ADJUST_ROOM : 0); if (err) return; @@ -842,6 +853,8 @@ static void serial_test_tc_netkit_scrub_type(int scrub) ASSERT_EQ(skel->bss->seen_tc8, true, "seen_tc8"); ASSERT_EQ(skel->bss->mark, scrub == NETKIT_SCRUB_NONE ? MARK : 0, "mark"); ASSERT_EQ(skel->bss->prio, scrub == NETKIT_SCRUB_NONE ? PRIO : 0, "prio"); + ASSERT_EQ(skel->bss->headroom, room ? NETKIT_HEADROOM : 0, "headroom"); + ASSERT_EQ(skel->bss->tailroom, room ? NETKIT_TAILROOM : 0, "tailroom"); cleanup: test_tc_link__destroy(skel); @@ -852,6 +865,6 @@ cleanup: void serial_test_tc_netkit_scrub(void) { - serial_test_tc_netkit_scrub_type(NETKIT_SCRUB_DEFAULT); - serial_test_tc_netkit_scrub_type(NETKIT_SCRUB_NONE); + serial_test_tc_netkit_scrub_type(NETKIT_SCRUB_DEFAULT, false); + serial_test_tc_netkit_scrub_type(NETKIT_SCRUB_NONE, true); } diff --git a/tools/testing/selftests/bpf/prog_tests/tc_opts.c b/tools/testing/selftests/bpf/prog_tests/tc_opts.c index f77f604389aa..dd7a138d8c3d 100644 --- a/tools/testing/selftests/bpf/prog_tests/tc_opts.c +++ b/tools/testing/selftests/bpf/prog_tests/tc_opts.c @@ -10,7 +10,7 @@ #include "test_tc_link.skel.h" #include "tc_helpers.h" -void serial_test_tc_opts_basic(void) +void test_ns_tc_opts_basic(void) { LIBBPF_OPTS(bpf_prog_attach_opts, opta); LIBBPF_OPTS(bpf_prog_detach_opts, optd); @@ -254,7 +254,7 @@ cleanup: test_tc_link__destroy(skel); } -void serial_test_tc_opts_before(void) +void test_ns_tc_opts_before(void) { test_tc_opts_before_target(BPF_TCX_INGRESS); test_tc_opts_before_target(BPF_TCX_EGRESS); @@ -445,7 +445,7 @@ cleanup: test_tc_link__destroy(skel); } -void serial_test_tc_opts_after(void) +void test_ns_tc_opts_after(void) { test_tc_opts_after_target(BPF_TCX_INGRESS); test_tc_opts_after_target(BPF_TCX_EGRESS); @@ -554,7 +554,7 @@ cleanup: test_tc_link__destroy(skel); } -void serial_test_tc_opts_revision(void) +void test_ns_tc_opts_revision(void) { test_tc_opts_revision_target(BPF_TCX_INGRESS); test_tc_opts_revision_target(BPF_TCX_EGRESS); @@ -655,7 +655,7 @@ cleanup: assert_mprog_count(target, 0); } -void serial_test_tc_opts_chain_classic(void) +void test_ns_tc_opts_chain_classic(void) { test_tc_chain_classic(BPF_TCX_INGRESS, false); test_tc_chain_classic(BPF_TCX_EGRESS, false); @@ -864,7 +864,7 @@ cleanup: test_tc_link__destroy(skel); } -void serial_test_tc_opts_replace(void) +void test_ns_tc_opts_replace(void) { test_tc_opts_replace_target(BPF_TCX_INGRESS); test_tc_opts_replace_target(BPF_TCX_EGRESS); @@ -1017,7 +1017,7 @@ cleanup: test_tc_link__destroy(skel); } -void serial_test_tc_opts_invalid(void) +void test_ns_tc_opts_invalid(void) { test_tc_opts_invalid_target(BPF_TCX_INGRESS); test_tc_opts_invalid_target(BPF_TCX_EGRESS); @@ -1157,7 +1157,7 @@ cleanup: test_tc_link__destroy(skel); } -void serial_test_tc_opts_prepend(void) +void test_ns_tc_opts_prepend(void) { test_tc_opts_prepend_target(BPF_TCX_INGRESS); test_tc_opts_prepend_target(BPF_TCX_EGRESS); @@ -1297,7 +1297,7 @@ cleanup: test_tc_link__destroy(skel); } -void serial_test_tc_opts_append(void) +void test_ns_tc_opts_append(void) { test_tc_opts_append_target(BPF_TCX_INGRESS); test_tc_opts_append_target(BPF_TCX_EGRESS); @@ -1387,7 +1387,7 @@ cleanup: ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed"); } -void serial_test_tc_opts_dev_cleanup(void) +void test_ns_tc_opts_dev_cleanup(void) { test_tc_opts_dev_cleanup_target(BPF_TCX_INGRESS); test_tc_opts_dev_cleanup_target(BPF_TCX_EGRESS); @@ -1563,7 +1563,7 @@ cleanup: assert_mprog_count(target, 0); } -void serial_test_tc_opts_mixed(void) +void test_ns_tc_opts_mixed(void) { test_tc_opts_mixed_target(BPF_TCX_INGRESS); test_tc_opts_mixed_target(BPF_TCX_EGRESS); @@ -1642,7 +1642,7 @@ cleanup: assert_mprog_count(target, 0); } -void serial_test_tc_opts_demixed(void) +void test_ns_tc_opts_demixed(void) { test_tc_opts_demixed_target(BPF_TCX_INGRESS); test_tc_opts_demixed_target(BPF_TCX_EGRESS); @@ -1813,7 +1813,7 @@ cleanup: test_tc_link__destroy(skel); } -void serial_test_tc_opts_detach(void) +void test_ns_tc_opts_detach(void) { test_tc_opts_detach_target(BPF_TCX_INGRESS); test_tc_opts_detach_target(BPF_TCX_EGRESS); @@ -2020,7 +2020,7 @@ cleanup: test_tc_link__destroy(skel); } -void serial_test_tc_opts_detach_before(void) +void test_ns_tc_opts_detach_before(void) { test_tc_opts_detach_before_target(BPF_TCX_INGRESS); test_tc_opts_detach_before_target(BPF_TCX_EGRESS); @@ -2236,7 +2236,7 @@ cleanup: test_tc_link__destroy(skel); } -void serial_test_tc_opts_detach_after(void) +void test_ns_tc_opts_detach_after(void) { test_tc_opts_detach_after_target(BPF_TCX_INGRESS); test_tc_opts_detach_after_target(BPF_TCX_EGRESS); @@ -2265,7 +2265,7 @@ static void test_tc_opts_delete_empty(int target, bool chain_tc_old) assert_mprog_count(target, 0); } -void serial_test_tc_opts_delete_empty(void) +void test_ns_tc_opts_delete_empty(void) { test_tc_opts_delete_empty(BPF_TCX_INGRESS, false); test_tc_opts_delete_empty(BPF_TCX_EGRESS, false); @@ -2372,7 +2372,7 @@ cleanup: test_tc_link__destroy(skel); } -void serial_test_tc_opts_chain_mixed(void) +void test_ns_tc_opts_chain_mixed(void) { test_tc_chain_mixed(BPF_TCX_INGRESS); test_tc_chain_mixed(BPF_TCX_EGRESS); @@ -2446,7 +2446,7 @@ cleanup: ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed"); } -void serial_test_tc_opts_max(void) +void test_ns_tc_opts_max(void) { test_tc_opts_max_target(BPF_TCX_INGRESS, 0, false); test_tc_opts_max_target(BPF_TCX_EGRESS, 0, false); @@ -2748,7 +2748,7 @@ cleanup: test_tc_link__destroy(skel); } -void serial_test_tc_opts_query(void) +void test_ns_tc_opts_query(void) { test_tc_opts_query_target(BPF_TCX_INGRESS); test_tc_opts_query_target(BPF_TCX_EGRESS); @@ -2807,7 +2807,7 @@ cleanup: test_tc_link__destroy(skel); } -void serial_test_tc_opts_query_attach(void) +void test_ns_tc_opts_query_attach(void) { test_tc_opts_query_attach_target(BPF_TCX_INGRESS); test_tc_opts_query_attach_target(BPF_TCX_EGRESS); diff --git a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c index c85798966aec..76d72a59365e 100644 --- a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c +++ b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c @@ -56,6 +56,8 @@ #define MAC_DST_FWD "00:11:22:33:44:55" #define MAC_DST "00:22:33:44:55:66" +#define MAC_SRC_FWD "00:33:44:55:66:77" +#define MAC_SRC "00:44:55:66:77:88" #define IFADDR_STR_LEN 18 #define PING_ARGS "-i 0.2 -c 3 -w 10 -q" @@ -207,11 +209,10 @@ static int netns_setup_links_and_routes(struct netns_setup_result *result) int err; if (result->dev_mode == MODE_VETH) { - SYS(fail, "ip link add src type veth peer name src_fwd"); - SYS(fail, "ip link add dst type veth peer name dst_fwd"); - - SYS(fail, "ip link set dst_fwd address " MAC_DST_FWD); - SYS(fail, "ip link set dst address " MAC_DST); + SYS(fail, "ip link add src address " MAC_SRC " type veth " + "peer name src_fwd address " MAC_SRC_FWD); + SYS(fail, "ip link add dst address " MAC_DST " type veth " + "peer name dst_fwd address " MAC_DST_FWD); } else if (result->dev_mode == MODE_NETKIT) { err = create_netkit(NETKIT_L3, "src", "src_fwd"); if (!ASSERT_OK(err, "create_ifindex_src")) diff --git a/tools/testing/selftests/bpf/prog_tests/test_btf_ext.c b/tools/testing/selftests/bpf/prog_tests/test_btf_ext.c new file mode 100644 index 000000000000..7d1b478c99a0 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_btf_ext.c @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms Inc. */ +#include <test_progs.h> +#include "test_btf_ext.skel.h" +#include "btf_helpers.h" + +static void subtest_line_func_info(void) +{ + struct test_btf_ext *skel; + struct bpf_prog_info info; + struct bpf_line_info line_info[128], *libbpf_line_info; + struct bpf_func_info func_info[128], *libbpf_func_info; + __u32 info_len = sizeof(info), libbbpf_line_info_cnt, libbbpf_func_info_cnt; + int err, fd; + + skel = test_btf_ext__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) + return; + + fd = bpf_program__fd(skel->progs.global_func); + + memset(&info, 0, sizeof(info)); + info.line_info = ptr_to_u64(&line_info); + info.nr_line_info = sizeof(line_info); + info.line_info_rec_size = sizeof(*line_info); + err = bpf_prog_get_info_by_fd(fd, &info, &info_len); + if (!ASSERT_OK(err, "prog_line_info")) + goto out; + + libbpf_line_info = bpf_program__line_info(skel->progs.global_func); + libbbpf_line_info_cnt = bpf_program__line_info_cnt(skel->progs.global_func); + + memset(&info, 0, sizeof(info)); + info.func_info = ptr_to_u64(&func_info); + info.nr_func_info = sizeof(func_info); + info.func_info_rec_size = sizeof(*func_info); + err = bpf_prog_get_info_by_fd(fd, &info, &info_len); + if (!ASSERT_OK(err, "prog_func_info")) + goto out; + + libbpf_func_info = bpf_program__func_info(skel->progs.global_func); + libbbpf_func_info_cnt = bpf_program__func_info_cnt(skel->progs.global_func); + + if (!ASSERT_OK_PTR(libbpf_line_info, "bpf_program__line_info")) + goto out; + if (!ASSERT_EQ(libbbpf_line_info_cnt, info.nr_line_info, "line_info_cnt")) + goto out; + if (!ASSERT_OK_PTR(libbpf_func_info, "bpf_program__func_info")) + goto out; + if (!ASSERT_EQ(libbbpf_func_info_cnt, info.nr_func_info, "func_info_cnt")) + goto out; + ASSERT_MEMEQ(libbpf_line_info, line_info, libbbpf_line_info_cnt * sizeof(*line_info), + "line_info"); + ASSERT_MEMEQ(libbpf_func_info, func_info, libbbpf_func_info_cnt * sizeof(*func_info), + "func_info"); +out: + test_btf_ext__destroy(skel); +} + +void test_btf_ext(void) +{ + if (test__start_subtest("line_func_info")) + subtest_line_func_info(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_kptr_return.c b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_kptr_return.c new file mode 100644 index 000000000000..467cc72a3588 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_kptr_return.c @@ -0,0 +1,16 @@ +#include <test_progs.h> + +#include "struct_ops_kptr_return.skel.h" +#include "struct_ops_kptr_return_fail__wrong_type.skel.h" +#include "struct_ops_kptr_return_fail__invalid_scalar.skel.h" +#include "struct_ops_kptr_return_fail__nonzero_offset.skel.h" +#include "struct_ops_kptr_return_fail__local_kptr.skel.h" + +void test_struct_ops_kptr_return(void) +{ + RUN_TESTS(struct_ops_kptr_return); + RUN_TESTS(struct_ops_kptr_return_fail__wrong_type); + RUN_TESTS(struct_ops_kptr_return_fail__invalid_scalar); + RUN_TESTS(struct_ops_kptr_return_fail__nonzero_offset); + RUN_TESTS(struct_ops_kptr_return_fail__local_kptr); +} diff --git a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_refcounted.c b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_refcounted.c new file mode 100644 index 000000000000..da60c715fc59 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_refcounted.c @@ -0,0 +1,14 @@ +#include <test_progs.h> + +#include "struct_ops_refcounted.skel.h" +#include "struct_ops_refcounted_fail__ref_leak.skel.h" +#include "struct_ops_refcounted_fail__global_subprog.skel.h" +#include "struct_ops_refcounted_fail__tail_call.skel.h" + +void test_struct_ops_refcounted(void) +{ + RUN_TESTS(struct_ops_refcounted); + RUN_TESTS(struct_ops_refcounted_fail__ref_leak); + RUN_TESTS(struct_ops_refcounted_fail__global_subprog); + RUN_TESTS(struct_ops_refcounted_fail__tail_call); +} diff --git a/tools/testing/selftests/bpf/test_sysctl.c b/tools/testing/selftests/bpf/prog_tests/test_sysctl.c index bcdbd27f22f0..273dd41ca09e 100644 --- a/tools/testing/selftests/bpf/test_sysctl.c +++ b/tools/testing/selftests/bpf/prog_tests/test_sysctl.c @@ -1,22 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2019 Facebook -#include <fcntl.h> -#include <stdint.h> -#include <stdio.h> -#include <stdlib.h> -#include <string.h> -#include <unistd.h> - -#include <linux/filter.h> - -#include <bpf/bpf.h> -#include <bpf/libbpf.h> - -#include <bpf/bpf_endian.h> -#include "bpf_util.h" +#include "test_progs.h" #include "cgroup_helpers.h" -#include "testing_helpers.h" #define CG_PATH "/foo" #define MAX_INSNS 512 @@ -1608,26 +1594,19 @@ static int run_tests(int cgfd) return fails ? -1 : 0; } -int main(int argc, char **argv) +void test_sysctl(void) { - int cgfd = -1; - int err = 0; + int cgfd; cgfd = cgroup_setup_and_join(CG_PATH); - if (cgfd < 0) - goto err; + if (!ASSERT_OK_FD(cgfd < 0, "create_cgroup")) + goto out; - /* Use libbpf 1.0 API mode */ - libbpf_set_strict_mode(LIBBPF_STRICT_ALL); + if (!ASSERT_OK(run_tests(cgfd), "run_tests")) + goto out; - if (run_tests(cgfd)) - goto err; - - goto out; -err: - err = -1; out: close(cgfd); cleanup_cgroup_environment(); - return err; + return; } diff --git a/tools/testing/selftests/bpf/prog_tests/test_tunnel.c b/tools/testing/selftests/bpf/prog_tests/test_tunnel.c index cec746e77cd3..bae0e9de277d 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_tunnel.c +++ b/tools/testing/selftests/bpf/prog_tests/test_tunnel.c @@ -71,6 +71,8 @@ #define IP4_ADDR2_VETH1 "172.16.1.20" #define IP4_ADDR_TUNL_DEV0 "10.1.1.100" #define IP4_ADDR_TUNL_DEV1 "10.1.1.200" +#define IP6_ADDR_TUNL_DEV0 "fc80::100" +#define IP6_ADDR_TUNL_DEV1 "fc80::200" #define IP6_ADDR_VETH0 "::11" #define IP6_ADDR1_VETH1 "::22" @@ -98,6 +100,27 @@ #define XFRM_SPI_IN_TO_OUT 0x1 #define XFRM_SPI_OUT_TO_IN 0x2 +#define GRE_TUNL_DEV0 "gre00" +#define GRE_TUNL_DEV1 "gre11" + +#define IP6GRE_TUNL_DEV0 "ip6gre00" +#define IP6GRE_TUNL_DEV1 "ip6gre11" + +#define ERSPAN_TUNL_DEV0 "erspan00" +#define ERSPAN_TUNL_DEV1 "erspan11" + +#define IP6ERSPAN_TUNL_DEV0 "ip6erspan00" +#define IP6ERSPAN_TUNL_DEV1 "ip6erspan11" + +#define GENEVE_TUNL_DEV0 "geneve00" +#define GENEVE_TUNL_DEV1 "geneve11" + +#define IP6GENEVE_TUNL_DEV0 "ip6geneve00" +#define IP6GENEVE_TUNL_DEV1 "ip6geneve11" + +#define IP6TNL_TUNL_DEV0 "ip6tnl00" +#define IP6TNL_TUNL_DEV1 "ip6tnl11" + #define PING_ARGS "-i 0.01 -c 3 -w 10 -q" static int config_device(void) @@ -216,6 +239,18 @@ fail: return -1; } +static int set_ipv4_addr(const char *dev0, const char *dev1) +{ + SYS(fail, "ip -n at_ns0 link set dev %s up", dev0); + SYS(fail, "ip -n at_ns0 addr add dev %s %s/24", dev0, IP4_ADDR_TUNL_DEV0); + SYS(fail, "ip link set dev %s up", dev1); + SYS(fail, "ip addr add dev %s %s/24", dev1, IP4_ADDR_TUNL_DEV1); + + return 0; +fail: + return 1; +} + static int add_ipip_tunnel(enum ipip_encap encap) { int err; @@ -356,6 +391,99 @@ static void delete_xfrm_tunnel(void) IP4_ADDR1_VETH1, IP4_ADDR_VETH0, XFRM_SPI_OUT_TO_IN); } +static int add_ipv4_tunnel(const char *dev0, const char *dev1, + const char *type, const char *opt) +{ + if (!type || !opt || !dev0 || !dev1) + return -1; + + SYS(fail, "ip -n at_ns0 link add dev %s type %s %s local %s remote %s", + dev0, type, opt, IP4_ADDR_VETH0, IP4_ADDR1_VETH1); + + SYS(fail, "ip link add dev %s type %s external", dev1, type); + + return set_ipv4_addr(dev0, dev1); +fail: + return -1; +} + +static void delete_tunnel(const char *dev0, const char *dev1) +{ + if (!dev0 || !dev1) + return; + + SYS_NOFAIL("ip netns exec at_ns0 ip link delete dev %s", dev0); + SYS_NOFAIL("ip link delete dev %s", dev1); +} + +static int set_ipv6_addr(const char *dev0, const char *dev1) +{ + /* disable IPv6 DAD because it might take too long and fail tests */ + SYS(fail, "ip -n at_ns0 addr add %s/96 dev veth0 nodad", IP6_ADDR_VETH0); + SYS(fail, "ip -n at_ns0 link set dev veth0 up"); + SYS(fail, "ip addr add %s/96 dev veth1 nodad", IP6_ADDR1_VETH1); + SYS(fail, "ip link set dev veth1 up"); + + SYS(fail, "ip -n at_ns0 addr add dev %s %s/24", dev0, IP4_ADDR_TUNL_DEV0); + SYS(fail, "ip -n at_ns0 addr add dev %s %s/96 nodad", dev0, IP6_ADDR_TUNL_DEV0); + SYS(fail, "ip -n at_ns0 link set dev %s up", dev0); + + SYS(fail, "ip addr add dev %s %s/24", dev1, IP4_ADDR_TUNL_DEV1); + SYS(fail, "ip addr add dev %s %s/96 nodad", dev1, IP6_ADDR_TUNL_DEV1); + SYS(fail, "ip link set dev %s up", dev1); + return 0; +fail: + return 1; +} + +static int add_ipv6_tunnel(const char *dev0, const char *dev1, + const char *type, const char *opt) +{ + if (!type || !opt || !dev0 || !dev1) + return -1; + + SYS(fail, "ip -n at_ns0 link add dev %s type %s %s local %s remote %s", + dev0, type, opt, IP6_ADDR_VETH0, IP6_ADDR1_VETH1); + + SYS(fail, "ip link add dev %s type %s external", dev1, type); + + return set_ipv6_addr(dev0, dev1); +fail: + return -1; +} + +static int add_geneve_tunnel(const char *dev0, const char *dev1, + const char *type, const char *opt) +{ + if (!type || !opt || !dev0 || !dev1) + return -1; + + SYS(fail, "ip -n at_ns0 link add dev %s type %s id 2 %s remote %s", + dev0, type, opt, IP4_ADDR1_VETH1); + + SYS(fail, "ip link add dev %s type %s %s external", dev1, type, opt); + + return set_ipv4_addr(dev0, dev1); +fail: + return -1; +} + +static int add_ip6geneve_tunnel(const char *dev0, const char *dev1, + const char *type, const char *opt) +{ + if (!type || !opt || !dev0 || !dev1) + return -1; + + SYS(fail, "ip -n at_ns0 link add dev %s type %s id 22 %s remote %s", + dev0, type, opt, IP6_ADDR1_VETH1); + + SYS(fail, "ip link add dev %s type %s %s external", dev1, type, opt); + + return set_ipv6_addr(dev0, dev1); +fail: + return -1; +} + static int test_ping(int family, const char *addr) { SYS(fail, "%s %s %s > /dev/null", ping_command(family), PING_ARGS, addr); @@ -364,32 +492,76 @@ fail: return -1; } -static int attach_tc_prog(struct bpf_tc_hook *hook, int igr_fd, int egr_fd) +static void ping_dev0(void) { + /* ping from root namespace test */ + test_ping(AF_INET, IP4_ADDR_TUNL_DEV0); +} + +static void ping_dev1(void) +{ + struct nstoken *nstoken; + + /* ping from at_ns0 namespace test */ + nstoken = open_netns("at_ns0"); + if (!ASSERT_OK_PTR(nstoken, "setns")) + return; + + test_ping(AF_INET, IP4_ADDR_TUNL_DEV1); + close_netns(nstoken); +} + +static void ping6_veth0(void) +{ + test_ping(AF_INET6, IP6_ADDR_VETH0); +} + +static void ping6_dev0(void) +{ + test_ping(AF_INET6, IP6_ADDR_TUNL_DEV0); +} + +static void ping6_dev1(void) +{ + struct nstoken *nstoken; + + /* ping from at_ns0 namespace test */ + nstoken = open_netns("at_ns0"); + if (!ASSERT_OK_PTR(nstoken, "setns")) + return; + + test_ping(AF_INET, IP6_ADDR_TUNL_DEV1); + close_netns(nstoken); +} + +static int attach_tc_prog(int ifindex, int igr_fd, int egr_fd) +{ + DECLARE_LIBBPF_OPTS(bpf_tc_hook, hook, .ifindex = ifindex, + .attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS); DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts1, .handle = 1, .priority = 1, .prog_fd = igr_fd); DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts2, .handle = 1, .priority = 1, .prog_fd = egr_fd); int ret; - ret = bpf_tc_hook_create(hook); + ret = bpf_tc_hook_create(&hook); if (!ASSERT_OK(ret, "create tc hook")) return ret; if (igr_fd >= 0) { - hook->attach_point = BPF_TC_INGRESS; - ret = bpf_tc_attach(hook, &opts1); + hook.attach_point = BPF_TC_INGRESS; + ret = bpf_tc_attach(&hook, &opts1); if (!ASSERT_OK(ret, "bpf_tc_attach")) { - bpf_tc_hook_destroy(hook); + bpf_tc_hook_destroy(&hook); return ret; } } if (egr_fd >= 0) { - hook->attach_point = BPF_TC_EGRESS; - ret = bpf_tc_attach(hook, &opts2); + hook.attach_point = BPF_TC_EGRESS; + ret = bpf_tc_attach(&hook, &opts2); if (!ASSERT_OK(ret, "bpf_tc_attach")) { - bpf_tc_hook_destroy(hook); + bpf_tc_hook_destroy(&hook); return ret; } } @@ -397,6 +569,50 @@ static int attach_tc_prog(struct bpf_tc_hook *hook, int igr_fd, int egr_fd) return 0; } +static int generic_attach(const char *dev, int igr_fd, int egr_fd) +{ + int ifindex; + + if (!ASSERT_OK_FD(igr_fd, "check ingress fd")) + return -1; + if (!ASSERT_OK_FD(egr_fd, "check egress fd")) + return -1; + + ifindex = if_nametoindex(dev); + if (!ASSERT_NEQ(ifindex, 0, "get ifindex")) + return -1; + + return attach_tc_prog(ifindex, igr_fd, egr_fd); +} + +static int generic_attach_igr(const char *dev, int igr_fd) +{ + int ifindex; + + if (!ASSERT_OK_FD(igr_fd, "check ingress fd")) + return -1; + + ifindex = if_nametoindex(dev); + if (!ASSERT_NEQ(ifindex, 0, "get ifindex")) + return -1; + + return attach_tc_prog(ifindex, igr_fd, -1); +} + +static int generic_attach_egr(const char *dev, int egr_fd) +{ + int ifindex; + + if (!ASSERT_OK_FD(egr_fd, "check egress fd")) + return -1; + + ifindex = if_nametoindex(dev); + if (!ASSERT_NEQ(ifindex, 0, "get ifindex")) + return -1; + + return attach_tc_prog(ifindex, -1, egr_fd); +} + static void test_vxlan_tunnel(void) { struct test_tunnel_kern *skel = NULL; @@ -404,11 +620,9 @@ static void test_vxlan_tunnel(void) int local_ip_map_fd = -1; int set_src_prog_fd, get_src_prog_fd; int set_dst_prog_fd; - int key = 0, ifindex = -1; + int key = 0; uint local_ip; int err; - DECLARE_LIBBPF_OPTS(bpf_tc_hook, tc_hook, - .attach_point = BPF_TC_INGRESS); /* add vxlan tunnel */ err = add_vxlan_tunnel(); @@ -419,42 +633,22 @@ static void test_vxlan_tunnel(void) skel = test_tunnel_kern__open_and_load(); if (!ASSERT_OK_PTR(skel, "test_tunnel_kern__open_and_load")) goto done; - ifindex = if_nametoindex(VXLAN_TUNL_DEV1); - if (!ASSERT_NEQ(ifindex, 0, "vxlan11 ifindex")) - goto done; - tc_hook.ifindex = ifindex; get_src_prog_fd = bpf_program__fd(skel->progs.vxlan_get_tunnel_src); set_src_prog_fd = bpf_program__fd(skel->progs.vxlan_set_tunnel_src); - if (!ASSERT_GE(get_src_prog_fd, 0, "bpf_program__fd")) - goto done; - if (!ASSERT_GE(set_src_prog_fd, 0, "bpf_program__fd")) - goto done; - if (attach_tc_prog(&tc_hook, get_src_prog_fd, set_src_prog_fd)) + if (generic_attach(VXLAN_TUNL_DEV1, get_src_prog_fd, set_src_prog_fd)) goto done; /* load and attach bpf prog to veth dev tc hook point */ - ifindex = if_nametoindex("veth1"); - if (!ASSERT_NEQ(ifindex, 0, "veth1 ifindex")) - goto done; - tc_hook.ifindex = ifindex; set_dst_prog_fd = bpf_program__fd(skel->progs.veth_set_outer_dst); - if (!ASSERT_GE(set_dst_prog_fd, 0, "bpf_program__fd")) - goto done; - if (attach_tc_prog(&tc_hook, set_dst_prog_fd, -1)) + if (generic_attach_igr("veth1", set_dst_prog_fd)) goto done; /* load and attach prog set_md to tunnel dev tc hook point at_ns0 */ nstoken = open_netns("at_ns0"); if (!ASSERT_OK_PTR(nstoken, "setns src")) goto done; - ifindex = if_nametoindex(VXLAN_TUNL_DEV0); - if (!ASSERT_NEQ(ifindex, 0, "vxlan00 ifindex")) - goto done; - tc_hook.ifindex = ifindex; set_dst_prog_fd = bpf_program__fd(skel->progs.vxlan_set_tunnel_dst); - if (!ASSERT_GE(set_dst_prog_fd, 0, "bpf_program__fd")) - goto done; - if (attach_tc_prog(&tc_hook, -1, set_dst_prog_fd)) + if (generic_attach_egr(VXLAN_TUNL_DEV0, set_dst_prog_fd)) goto done; close_netns(nstoken); @@ -468,9 +662,7 @@ static void test_vxlan_tunnel(void) goto done; /* ping test */ - err = test_ping(AF_INET, IP4_ADDR_TUNL_DEV0); - if (!ASSERT_OK(err, "test_ping")) - goto done; + ping_dev0(); done: /* delete vxlan tunnel */ @@ -488,11 +680,9 @@ static void test_ip6vxlan_tunnel(void) int local_ip_map_fd = -1; int set_src_prog_fd, get_src_prog_fd; int set_dst_prog_fd; - int key = 0, ifindex = -1; + int key = 0; uint local_ip; int err; - DECLARE_LIBBPF_OPTS(bpf_tc_hook, tc_hook, - .attach_point = BPF_TC_INGRESS); /* add vxlan tunnel */ err = add_ip6vxlan_tunnel(); @@ -503,31 +693,17 @@ static void test_ip6vxlan_tunnel(void) skel = test_tunnel_kern__open_and_load(); if (!ASSERT_OK_PTR(skel, "test_tunnel_kern__open_and_load")) goto done; - ifindex = if_nametoindex(IP6VXLAN_TUNL_DEV1); - if (!ASSERT_NEQ(ifindex, 0, "ip6vxlan11 ifindex")) - goto done; - tc_hook.ifindex = ifindex; get_src_prog_fd = bpf_program__fd(skel->progs.ip6vxlan_get_tunnel_src); set_src_prog_fd = bpf_program__fd(skel->progs.ip6vxlan_set_tunnel_src); - if (!ASSERT_GE(set_src_prog_fd, 0, "bpf_program__fd")) - goto done; - if (!ASSERT_GE(get_src_prog_fd, 0, "bpf_program__fd")) - goto done; - if (attach_tc_prog(&tc_hook, get_src_prog_fd, set_src_prog_fd)) + if (generic_attach(IP6VXLAN_TUNL_DEV1, get_src_prog_fd, set_src_prog_fd)) goto done; /* load and attach prog set_md to tunnel dev tc hook point at_ns0 */ nstoken = open_netns("at_ns0"); if (!ASSERT_OK_PTR(nstoken, "setns src")) goto done; - ifindex = if_nametoindex(IP6VXLAN_TUNL_DEV0); - if (!ASSERT_NEQ(ifindex, 0, "ip6vxlan00 ifindex")) - goto done; - tc_hook.ifindex = ifindex; set_dst_prog_fd = bpf_program__fd(skel->progs.ip6vxlan_set_tunnel_dst); - if (!ASSERT_GE(set_dst_prog_fd, 0, "bpf_program__fd")) - goto done; - if (attach_tc_prog(&tc_hook, -1, set_dst_prog_fd)) + if (generic_attach_egr(IP6VXLAN_TUNL_DEV0, set_dst_prog_fd)) goto done; close_netns(nstoken); @@ -541,9 +717,7 @@ static void test_ip6vxlan_tunnel(void) goto done; /* ping test */ - err = test_ping(AF_INET, IP4_ADDR_TUNL_DEV0); - if (!ASSERT_OK(err, "test_ping")) - goto done; + ping_dev0(); done: /* delete ipv6 vxlan tunnel */ @@ -557,12 +731,8 @@ done: static void test_ipip_tunnel(enum ipip_encap encap) { struct test_tunnel_kern *skel = NULL; - struct nstoken *nstoken; int set_src_prog_fd, get_src_prog_fd; - int ifindex = -1; int err; - DECLARE_LIBBPF_OPTS(bpf_tc_hook, tc_hook, - .attach_point = BPF_TC_INGRESS); /* add ipip tunnel */ err = add_ipip_tunnel(encap); @@ -573,10 +743,6 @@ static void test_ipip_tunnel(enum ipip_encap encap) skel = test_tunnel_kern__open_and_load(); if (!ASSERT_OK_PTR(skel, "test_tunnel_kern__open_and_load")) goto done; - ifindex = if_nametoindex(IPIP_TUNL_DEV1); - if (!ASSERT_NEQ(ifindex, 0, "ipip11 ifindex")) - goto done; - tc_hook.ifindex = ifindex; switch (encap) { case FOU: @@ -598,26 +764,11 @@ static void test_ipip_tunnel(enum ipip_encap encap) skel->progs.ipip_set_tunnel); } - if (!ASSERT_GE(set_src_prog_fd, 0, "bpf_program__fd")) - goto done; - if (!ASSERT_GE(get_src_prog_fd, 0, "bpf_program__fd")) - goto done; - if (attach_tc_prog(&tc_hook, get_src_prog_fd, set_src_prog_fd)) + if (generic_attach(IPIP_TUNL_DEV1, get_src_prog_fd, set_src_prog_fd)) goto done; - /* ping from root namespace test */ - err = test_ping(AF_INET, IP4_ADDR_TUNL_DEV0); - if (!ASSERT_OK(err, "test_ping")) - goto done; - - /* ping from at_ns0 namespace test */ - nstoken = open_netns("at_ns0"); - if (!ASSERT_OK_PTR(nstoken, "setns")) - goto done; - err = test_ping(AF_INET, IP4_ADDR_TUNL_DEV1); - if (!ASSERT_OK(err, "test_ping")) - goto done; - close_netns(nstoken); + ping_dev0(); + ping_dev1(); done: /* delete ipip tunnel */ @@ -628,11 +779,8 @@ done: static void test_xfrm_tunnel(void) { - DECLARE_LIBBPF_OPTS(bpf_tc_hook, tc_hook, - .attach_point = BPF_TC_INGRESS); LIBBPF_OPTS(bpf_xdp_attach_opts, opts); struct test_tunnel_kern *skel = NULL; - struct nstoken *nstoken; int xdp_prog_fd; int tc_prog_fd; int ifindex; @@ -646,19 +794,16 @@ static void test_xfrm_tunnel(void) if (!ASSERT_OK_PTR(skel, "test_tunnel_kern__open_and_load")) goto done; - ifindex = if_nametoindex("veth1"); - if (!ASSERT_NEQ(ifindex, 0, "veth1 ifindex")) - goto done; /* attach tc prog to tunnel dev */ - tc_hook.ifindex = ifindex; tc_prog_fd = bpf_program__fd(skel->progs.xfrm_get_state); - if (!ASSERT_GE(tc_prog_fd, 0, "bpf_program__fd")) - goto done; - if (attach_tc_prog(&tc_hook, tc_prog_fd, -1)) + if (generic_attach_igr("veth1", tc_prog_fd)) goto done; /* attach xdp prog to tunnel dev */ + ifindex = if_nametoindex("veth1"); + if (!ASSERT_NEQ(ifindex, 0, "veth1 ifindex")) + goto done; xdp_prog_fd = bpf_program__fd(skel->progs.xfrm_get_state_xdp); if (!ASSERT_GE(xdp_prog_fd, 0, "bpf_program__fd")) goto done; @@ -666,14 +811,7 @@ static void test_xfrm_tunnel(void) if (!ASSERT_OK(err, "bpf_xdp_attach")) goto done; - /* ping from at_ns0 namespace test */ - nstoken = open_netns("at_ns0"); - if (!ASSERT_OK_PTR(nstoken, "setns")) - goto done; - err = test_ping(AF_INET, IP4_ADDR_TUNL_DEV1); - close_netns(nstoken); - if (!ASSERT_OK(err, "test_ping")) - goto done; + ping_dev1(); if (!ASSERT_EQ(skel->bss->xfrm_reqid, 1, "req_id")) goto done; @@ -690,6 +828,281 @@ done: test_tunnel_kern__destroy(skel); } +enum gre_test { + GRE, + GRE_NOKEY, + GRETAP, + GRETAP_NOKEY, +}; + +static void test_gre_tunnel(enum gre_test test) +{ + struct test_tunnel_kern *skel; + int set_fd, get_fd; + int err; + + skel = test_tunnel_kern__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_tunnel_kern__open_and_load")) + return; + + switch (test) { + case GRE: + err = add_ipv4_tunnel(GRE_TUNL_DEV0, GRE_TUNL_DEV1, "gre", "seq"); + set_fd = bpf_program__fd(skel->progs.gre_set_tunnel_no_key); + get_fd = bpf_program__fd(skel->progs.gre_get_tunnel); + break; + case GRE_NOKEY: + err = add_ipv4_tunnel(GRE_TUNL_DEV0, GRE_TUNL_DEV1, "gre", "seq key 2"); + set_fd = bpf_program__fd(skel->progs.gre_set_tunnel); + get_fd = bpf_program__fd(skel->progs.gre_get_tunnel); + break; + case GRETAP: + err = add_ipv4_tunnel(GRE_TUNL_DEV0, GRE_TUNL_DEV1, "gretap", "seq"); + set_fd = bpf_program__fd(skel->progs.gre_set_tunnel_no_key); + get_fd = bpf_program__fd(skel->progs.gre_get_tunnel); + break; + case GRETAP_NOKEY: + err = add_ipv4_tunnel(GRE_TUNL_DEV0, GRE_TUNL_DEV1, "gretap", "seq key 2"); + set_fd = bpf_program__fd(skel->progs.gre_set_tunnel); + get_fd = bpf_program__fd(skel->progs.gre_get_tunnel); + break; + } + if (!ASSERT_OK(err, "add tunnel")) + goto done; + + if (generic_attach(GRE_TUNL_DEV1, get_fd, set_fd)) + goto done; + + ping_dev0(); + ping_dev1(); + +done: + delete_tunnel(GRE_TUNL_DEV0, GRE_TUNL_DEV1); + test_tunnel_kern__destroy(skel); +} + +enum ip6gre_test { + IP6GRE, + IP6GRETAP +}; + +static void test_ip6gre_tunnel(enum ip6gre_test test) +{ + struct test_tunnel_kern *skel; + int set_fd, get_fd; + int err; + + skel = test_tunnel_kern__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_tunnel_kern__open_and_load")) + return; + + switch (test) { + case IP6GRE: + err = add_ipv6_tunnel(IP6GRE_TUNL_DEV0, IP6GRE_TUNL_DEV1, + "ip6gre", "flowlabel 0xbcdef key 2"); + break; + case IP6GRETAP: + err = add_ipv6_tunnel(IP6GRE_TUNL_DEV0, IP6GRE_TUNL_DEV1, + "ip6gretap", "flowlabel 0xbcdef key 2"); + break; + } + if (!ASSERT_OK(err, "add tunnel")) + goto done; + + set_fd = bpf_program__fd(skel->progs.ip6gretap_set_tunnel); + get_fd = bpf_program__fd(skel->progs.ip6gretap_get_tunnel); + if (generic_attach(IP6GRE_TUNL_DEV1, get_fd, set_fd)) + goto done; + + ping6_veth0(); + ping6_dev1(); + ping_dev0(); + ping_dev1(); +done: + delete_tunnel(IP6GRE_TUNL_DEV0, IP6GRE_TUNL_DEV1); + test_tunnel_kern__destroy(skel); +} + +enum erspan_test { + V1, + V2 +}; + +static void test_erspan_tunnel(enum erspan_test test) +{ + struct test_tunnel_kern *skel; + int set_fd, get_fd; + int err; + + skel = test_tunnel_kern__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_tunnel_kern__open_and_load")) + return; + + switch (test) { + case V1: + err = add_ipv4_tunnel(ERSPAN_TUNL_DEV0, ERSPAN_TUNL_DEV1, + "erspan", "seq key 2 erspan_ver 1 erspan 123"); + break; + case V2: + err = add_ipv4_tunnel(ERSPAN_TUNL_DEV0, ERSPAN_TUNL_DEV1, + "erspan", + "seq key 2 erspan_ver 2 erspan_dir egress erspan_hwid 3"); + break; + } + if (!ASSERT_OK(err, "add tunnel")) + goto done; + + set_fd = bpf_program__fd(skel->progs.erspan_set_tunnel); + get_fd = bpf_program__fd(skel->progs.erspan_get_tunnel); + if (generic_attach(ERSPAN_TUNL_DEV1, get_fd, set_fd)) + goto done; + + ping_dev0(); + ping_dev1(); +done: + delete_tunnel(ERSPAN_TUNL_DEV0, ERSPAN_TUNL_DEV1); + test_tunnel_kern__destroy(skel); +} + +static void test_ip6erspan_tunnel(enum erspan_test test) +{ + struct test_tunnel_kern *skel; + int set_fd, get_fd; + int err; + + skel = test_tunnel_kern__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_tunnel_kern__open_and_load")) + return; + + switch (test) { + case V1: + err = add_ipv6_tunnel(IP6ERSPAN_TUNL_DEV0, IP6ERSPAN_TUNL_DEV1, + "ip6erspan", "seq key 2 erspan_ver 1 erspan 123"); + break; + case V2: + err = add_ipv6_tunnel(IP6ERSPAN_TUNL_DEV0, IP6ERSPAN_TUNL_DEV1, + "ip6erspan", + "seq key 2 erspan_ver 2 erspan_dir egress erspan_hwid 7"); + break; + } + if (!ASSERT_OK(err, "add tunnel")) + goto done; + + set_fd = bpf_program__fd(skel->progs.ip4ip6erspan_set_tunnel); + get_fd = bpf_program__fd(skel->progs.ip4ip6erspan_get_tunnel); + if (generic_attach(IP6ERSPAN_TUNL_DEV1, get_fd, set_fd)) + goto done; + + ping6_veth0(); + ping_dev1(); +done: + delete_tunnel(IP6ERSPAN_TUNL_DEV0, IP6ERSPAN_TUNL_DEV1); + test_tunnel_kern__destroy(skel); +} + +static void test_geneve_tunnel(void) +{ + struct test_tunnel_kern *skel; + int set_fd, get_fd; + int err; + + skel = test_tunnel_kern__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_tunnel_kern__open_and_load")) + return; + + err = add_geneve_tunnel(GENEVE_TUNL_DEV0, GENEVE_TUNL_DEV1, + "geneve", "dstport 6081"); + if (!ASSERT_OK(err, "add tunnel")) + goto done; + + set_fd = bpf_program__fd(skel->progs.geneve_set_tunnel); + get_fd = bpf_program__fd(skel->progs.geneve_get_tunnel); + if (generic_attach(GENEVE_TUNL_DEV1, get_fd, set_fd)) + goto done; + + ping_dev0(); + ping_dev1(); +done: + delete_tunnel(GENEVE_TUNL_DEV0, GENEVE_TUNL_DEV1); + test_tunnel_kern__destroy(skel); +} + +static void test_ip6geneve_tunnel(void) +{ + struct test_tunnel_kern *skel; + int set_fd, get_fd; + int err; + + skel = test_tunnel_kern__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_tunnel_kern__open_and_load")) + return; + + err = add_ip6geneve_tunnel(IP6GENEVE_TUNL_DEV0, IP6GENEVE_TUNL_DEV1, + "geneve", ""); + if (!ASSERT_OK(err, "add tunnel")) + goto done; + + set_fd = bpf_program__fd(skel->progs.ip6geneve_set_tunnel); + get_fd = bpf_program__fd(skel->progs.ip6geneve_get_tunnel); + if (generic_attach(IP6GENEVE_TUNL_DEV1, get_fd, set_fd)) + goto done; + + ping_dev0(); + ping_dev1(); +done: + delete_tunnel(IP6GENEVE_TUNL_DEV0, IP6GENEVE_TUNL_DEV1); + test_tunnel_kern__destroy(skel); +} + +enum ip6tnl_test { + IPIP6, + IP6IP6 +}; + +static void test_ip6tnl_tunnel(enum ip6tnl_test test) +{ + struct test_tunnel_kern *skel; + int set_fd, get_fd; + int err; + + skel = test_tunnel_kern__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_tunnel_kern__open_and_load")) + return; + + err = add_ipv6_tunnel(IP6TNL_TUNL_DEV0, IP6TNL_TUNL_DEV1, "ip6tnl", ""); + if (!ASSERT_OK(err, "add tunnel")) + goto done; + + switch (test) { + case IPIP6: + set_fd = bpf_program__fd(skel->progs.ipip6_set_tunnel); + get_fd = bpf_program__fd(skel->progs.ipip6_get_tunnel); + break; + case IP6IP6: + set_fd = bpf_program__fd(skel->progs.ip6ip6_set_tunnel); + get_fd = bpf_program__fd(skel->progs.ip6ip6_get_tunnel); + break; + } + if (generic_attach(IP6TNL_TUNL_DEV1, get_fd, set_fd)) + goto done; + + ping6_veth0(); + switch (test) { + case IPIP6: + ping_dev0(); + ping_dev1(); + break; + case IP6IP6: + ping6_dev0(); + ping6_dev1(); + break; + } + +done: + delete_tunnel(IP6TNL_TUNL_DEV0, IP6TNL_TUNL_DEV1); + test_tunnel_kern__destroy(skel); +} + #define RUN_TEST(name, ...) \ ({ \ if (test__start_subtest(#name)) { \ @@ -707,6 +1120,20 @@ static void *test_tunnel_run_tests(void *arg) RUN_TEST(ipip_tunnel, FOU); RUN_TEST(ipip_tunnel, GUE); RUN_TEST(xfrm_tunnel); + RUN_TEST(gre_tunnel, GRE); + RUN_TEST(gre_tunnel, GRE_NOKEY); + RUN_TEST(gre_tunnel, GRETAP); + RUN_TEST(gre_tunnel, GRETAP_NOKEY); + RUN_TEST(ip6gre_tunnel, IP6GRE); + RUN_TEST(ip6gre_tunnel, IP6GRETAP); + RUN_TEST(erspan_tunnel, V1); + RUN_TEST(erspan_tunnel, V2); + RUN_TEST(ip6erspan_tunnel, V1); + RUN_TEST(ip6erspan_tunnel, V2); + RUN_TEST(geneve_tunnel); + RUN_TEST(ip6geneve_tunnel); + RUN_TEST(ip6tnl_tunnel, IPIP6); + RUN_TEST(ip6tnl_tunnel, IP6IP6); return NULL; } diff --git a/tools/testing/selftests/bpf/prog_tests/test_veristat.c b/tools/testing/selftests/bpf/prog_tests/test_veristat.c new file mode 100644 index 000000000000..367f47e4a936 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_veristat.c @@ -0,0 +1,261 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ +#include <test_progs.h> +#include <string.h> +#include <stdio.h> + +#define __CHECK_STR(str, name) \ + do { \ + if (!ASSERT_HAS_SUBSTR(fix->output, (str), (name))) \ + goto out; \ + } while (0) + +struct fixture { + char tmpfile[80]; + int fd; + char *output; + size_t sz; + char veristat[80]; +}; + +static struct fixture *init_fixture(void) +{ + struct fixture *fix = malloc(sizeof(struct fixture)); + + /* for no_alu32 and cpuv4 veristat is in parent folder */ + if (access("./veristat", F_OK) == 0) + strcpy(fix->veristat, "./veristat"); + else if (access("../veristat", F_OK) == 0) + strcpy(fix->veristat, "../veristat"); + else + PRINT_FAIL("Can't find veristat binary"); + + snprintf(fix->tmpfile, sizeof(fix->tmpfile), "/tmp/test_veristat.XXXXXX"); + fix->fd = mkstemp(fix->tmpfile); + fix->sz = 1000000; + fix->output = malloc(fix->sz); + return fix; +} + +static void teardown_fixture(struct fixture *fix) +{ + free(fix->output); + close(fix->fd); + remove(fix->tmpfile); + free(fix); +} + +static void test_set_global_vars_succeeds(void) +{ + struct fixture *fix = init_fixture(); + + SYS(out, + "%s set_global_vars.bpf.o"\ + " -G \"var_s64 = 0xf000000000000001\" "\ + " -G \"var_u64 = 0xfedcba9876543210\" "\ + " -G \"var_s32 = -0x80000000\" "\ + " -G \"var_u32 = 0x76543210\" "\ + " -G \"var_s16 = -32768\" "\ + " -G \"var_u16 = 60652\" "\ + " -G \"var_s8 = -128\" "\ + " -G \"var_u8 = 255\" "\ + " -G \"var_ea = EA2\" "\ + " -G \"var_eb = EB2\" "\ + " -G \"var_ec=EC2\" "\ + " -G \"var_b = 1\" "\ + " -G \"struct1[2].struct2[1][2].u.var_u8[2]=170\" "\ + " -G \"union1.struct3.var_u8_l = 0xaa\" "\ + " -G \"union1.struct3.var_u8_h = 0xaa\" "\ + " -G \"arr[3]= 171\" " \ + " -G \"arr[EA2] =172\" " \ + " -G \"enum_arr[EC2]=EA3\" " \ + " -G \"three_d[31][7][EA2]=173\"" \ + " -G \"struct1[2].struct2[1][2].u.mat[5][3]=174\" " \ + " -G \"struct11 [ 7 ] [ 5 ] .struct2[0][1].u.mat[3][0] = 175\" " \ + " -vl2 > %s", fix->veristat, fix->tmpfile); + + read(fix->fd, fix->output, fix->sz); + __CHECK_STR("_w=0xf000000000000001 ", "var_s64 = 0xf000000000000001"); + __CHECK_STR("_w=0xfedcba9876543210 ", "var_u64 = 0xfedcba9876543210"); + __CHECK_STR("_w=0x80000000 ", "var_s32 = -0x80000000"); + __CHECK_STR("_w=0x76543210 ", "var_u32 = 0x76543210"); + __CHECK_STR("_w=0x8000 ", "var_s16 = -32768"); + __CHECK_STR("_w=0xecec ", "var_u16 = 60652"); + __CHECK_STR("_w=128 ", "var_s8 = -128"); + __CHECK_STR("_w=255 ", "var_u8 = 255"); + __CHECK_STR("_w=11 ", "var_ea = EA2"); + __CHECK_STR("_w=12 ", "var_eb = EB2"); + __CHECK_STR("_w=13 ", "var_ec = EC2"); + __CHECK_STR("_w=1 ", "var_b = 1"); + __CHECK_STR("_w=170 ", "struct1[2].struct2[1][2].u.var_u8[2]=170"); + __CHECK_STR("_w=0xaaaa ", "union1.var_u16 = 0xaaaa"); + __CHECK_STR("_w=171 ", "arr[3]= 171"); + __CHECK_STR("_w=172 ", "arr[EA2] =172"); + __CHECK_STR("_w=10 ", "enum_arr[EC2]=EA3"); + __CHECK_STR("_w=173 ", "matrix[31][7][11]=173"); + __CHECK_STR("_w=174 ", "struct1[2].struct2[1][2].u.mat[5][3]=174"); + __CHECK_STR("_w=175 ", "struct11[7][5].struct2[0][1].u.mat[3][0]=175"); + +out: + teardown_fixture(fix); +} + +static void test_set_global_vars_from_file_succeeds(void) +{ + struct fixture *fix = init_fixture(); + char input_file[80]; + const char *vars = "var_s16 = -32768\nvar_u16 = 60652"; + int fd; + + snprintf(input_file, sizeof(input_file), "/tmp/veristat_input.XXXXXX"); + fd = mkstemp(input_file); + if (!ASSERT_GE(fd, 0, "valid fd")) + goto out; + + write(fd, vars, strlen(vars)); + syncfs(fd); + SYS(out, "%s set_global_vars.bpf.o -G \"@%s\" -vl2 > %s", + fix->veristat, input_file, fix->tmpfile); + read(fix->fd, fix->output, fix->sz); + __CHECK_STR("_w=0x8000 ", "var_s16 = -32768"); + __CHECK_STR("_w=0xecec ", "var_u16 = 60652"); + +out: + close(fd); + remove(input_file); + teardown_fixture(fix); +} + +static void test_set_global_vars_out_of_range(void) +{ + struct fixture *fix = init_fixture(); + + SYS_FAIL(out, + "%s set_global_vars.bpf.o -G \"var_s32 = 2147483648\" -vl2 2> %s", + fix->veristat, fix->tmpfile); + + read(fix->fd, fix->output, fix->sz); + __CHECK_STR("is out of range [-2147483648; 2147483647]", "out of range"); + +out: + teardown_fixture(fix); +} + +static void test_unsupported_ptr_array_type(void) +{ + struct fixture *fix = init_fixture(); + + SYS_FAIL(out, + "%s set_global_vars.bpf.o -G \"ptr_arr[0] = 0\" -vl2 2> %s", + fix->veristat, fix->tmpfile); + + read(fix->fd, fix->output, fix->sz); + __CHECK_STR("Can't set ptr_arr[0]. Only ints and enums are supported", "ptr_arr"); + +out: + teardown_fixture(fix); +} + +static void test_array_out_of_bounds(void) +{ + struct fixture *fix = init_fixture(); + + SYS_FAIL(out, + "%s set_global_vars.bpf.o -G \"arr[99] = 0\" -vl2 2> %s", + fix->veristat, fix->tmpfile); + + read(fix->fd, fix->output, fix->sz); + __CHECK_STR("Array index 99 is out of bounds", "arr[99]"); + +out: + teardown_fixture(fix); +} + +static void test_array_index_not_found(void) +{ + struct fixture *fix = init_fixture(); + + SYS_FAIL(out, + "%s set_global_vars.bpf.o -G \"arr[EG2] = 0\" -vl2 2> %s", + fix->veristat, fix->tmpfile); + + read(fix->fd, fix->output, fix->sz); + __CHECK_STR("Can't resolve enum value EG2", "arr[EG2]"); + +out: + teardown_fixture(fix); +} + +static void test_array_index_for_non_array(void) +{ + struct fixture *fix = init_fixture(); + + SYS_FAIL(out, + "%s set_global_vars.bpf.o -G \"var_b[0] = 1\" -vl2 2> %s", + fix->veristat, fix->tmpfile); + + pread(fix->fd, fix->output, fix->sz, 0); + __CHECK_STR("Array index is not expected for var_b", "var_b[0] = 1"); + + SYS_FAIL(out, + "%s set_global_vars.bpf.o -G \"union1.struct3[0].var_u8_l=1\" -vl2 2> %s", + fix->veristat, fix->tmpfile); + + pread(fix->fd, fix->output, fix->sz, 0); + __CHECK_STR("Array index is not expected for struct3", "union1.struct3[0].var_u8_l=1"); + +out: + teardown_fixture(fix); +} + +static void test_no_array_index_for_array(void) +{ + struct fixture *fix = init_fixture(); + + SYS_FAIL(out, + "%s set_global_vars.bpf.o -G \"arr = 1\" -vl2 2> %s", + fix->veristat, fix->tmpfile); + + pread(fix->fd, fix->output, fix->sz, 0); + __CHECK_STR("Can't set arr. Only ints and enums are supported", "arr = 1"); + + SYS_FAIL(out, + "%s set_global_vars.bpf.o -G \"struct1[0].struct2.u.var_u8[2]=1\" -vl2 2> %s", + fix->veristat, fix->tmpfile); + + pread(fix->fd, fix->output, fix->sz, 0); + __CHECK_STR("Can't resolve field u for non-composite type", "struct1[0].struct2.u.var_u8[2]=1"); + +out: + teardown_fixture(fix); +} + +void test_veristat(void) +{ + if (test__start_subtest("set_global_vars_succeeds")) + test_set_global_vars_succeeds(); + + if (test__start_subtest("set_global_vars_out_of_range")) + test_set_global_vars_out_of_range(); + + if (test__start_subtest("set_global_vars_from_file_succeeds")) + test_set_global_vars_from_file_succeeds(); + + if (test__start_subtest("test_unsupported_ptr_array_type")) + test_unsupported_ptr_array_type(); + + if (test__start_subtest("test_array_out_of_bounds")) + test_array_out_of_bounds(); + + if (test__start_subtest("test_array_index_not_found")) + test_array_index_not_found(); + + if (test__start_subtest("test_array_index_for_non_array")) + test_array_index_for_non_array(); + + if (test__start_subtest("test_no_array_index_for_array")) + test_no_array_index_for_array(); + +} + +#undef __CHECK_STR diff --git a/tools/testing/selftests/bpf/prog_tests/test_xdp_veth.c b/tools/testing/selftests/bpf/prog_tests/test_xdp_veth.c index 8d75424fe6bc..3e98a1665936 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_xdp_veth.c +++ b/tools/testing/selftests/bpf/prog_tests/test_xdp_veth.c @@ -3,17 +3,50 @@ /* Create 3 namespaces with 3 veth peers, and forward packets in-between using * native XDP * - * XDP_TX - * NS1(veth11) NS2(veth22) NS3(veth33) - * | | | - * | | | - * (veth1, (veth2, (veth3, - * id:111) id:122) id:133) - * ^ | ^ | ^ | - * | | XDP_REDIRECT | | XDP_REDIRECT | | - * | ------------------ ------------------ | - * ----------------------------------------- - * XDP_REDIRECT + * Network topology: + * ---------- ---------- ---------- + * | NS1 | | NS2 | | NS3 | + * | veth11 | | veth22 | | veth33 | + * ----|----- -----|---- -----|---- + * | | | + * ----|------------------|----------------|---- + * | veth1 veth2 veth3 | + * | | + * | NSO | + * --------------------------------------------- + * + * Test cases: + * - [test_xdp_veth_redirect] : ping veth33 from veth11 + * + * veth11 veth22 veth33 + * (XDP_PASS) (XDP_TX) (XDP_PASS) + * | | | + * | | | + * veth1 veth2 veth3 + * (XDP_REDIRECT) (XDP_REDIRECT) (XDP_REDIRECT) + * ^ | ^ | ^ | + * | | | | | | + * | ------------------ ------------------ | + * ----------------------------------------- + * + * - [test_xdp_veth_broadcast_redirect]: broadcast from veth11 + * - IPv4 ping : BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS + * -> echo request received by all except veth11 + * - IPv4 ping : BPF_F_BROADCAST + * -> echo request received by all veth + * - [test_xdp_veth_egress]: + * - all src mac should be the magic mac + * + * veth11 veth22 veth33 + * (XDP_PASS) (XDP_PASS) (XDP_PASS) + * | | | + * | | | + * veth1 veth2 veth3 + * (XDP_REDIRECT) (XDP_REDIRECT) (XDP_REDIRECT) + * | ^ ^ + * | | | + * ---------------------------------------- + * */ #define _GNU_SOURCE @@ -22,192 +55,545 @@ #include "network_helpers.h" #include "xdp_dummy.skel.h" #include "xdp_redirect_map.skel.h" +#include "xdp_redirect_multi_kern.skel.h" #include "xdp_tx.skel.h" +#include <uapi/linux/if_link.h> #define VETH_PAIRS_COUNT 3 -#define NS_SUFFIX_LEN 6 -#define VETH_NAME_MAX_LEN 16 +#define VETH_NAME_MAX_LEN 32 +#define IP_MAX_LEN 16 #define IP_SRC "10.1.1.11" #define IP_DST "10.1.1.33" -#define IP_CMD_MAX_LEN 128 - -struct skeletons { - struct xdp_dummy *xdp_dummy; - struct xdp_tx *xdp_tx; - struct xdp_redirect_map *xdp_redirect_maps; -}; +#define IP_NEIGH "10.1.1.253" +#define PROG_NAME_MAX_LEN 128 +#define NS_NAME_MAX_LEN 32 struct veth_configuration { char local_veth[VETH_NAME_MAX_LEN]; /* Interface in main namespace */ char remote_veth[VETH_NAME_MAX_LEN]; /* Peer interface in dedicated namespace*/ - const char *namespace; /* Namespace for the remote veth */ - char next_veth[VETH_NAME_MAX_LEN]; /* Local interface to redirect traffic to */ - char *remote_addr; /* IP address of the remote veth */ + char namespace[NS_NAME_MAX_LEN]; /* Namespace for the remote veth */ + int next_veth; /* Local interface to redirect traffic to */ + char remote_addr[IP_MAX_LEN]; /* IP address of the remote veth */ }; -static struct veth_configuration config[VETH_PAIRS_COUNT] = { - { - .local_veth = "veth1", - .remote_veth = "veth11", - .next_veth = "veth2", - .remote_addr = IP_SRC, - .namespace = "ns-veth11" - }, - { - .local_veth = "veth2", - .remote_veth = "veth22", - .next_veth = "veth3", - .remote_addr = NULL, - .namespace = "ns-veth22" - }, +struct net_configuration { + char ns0_name[NS_NAME_MAX_LEN]; + struct veth_configuration veth_cfg[VETH_PAIRS_COUNT]; +}; + +static const struct net_configuration default_config = { + .ns0_name = "ns0-", { - .local_veth = "veth3", - .remote_veth = "veth33", - .next_veth = "veth1", - .remote_addr = IP_DST, - .namespace = "ns-veth33" + { + .local_veth = "veth1-", + .remote_veth = "veth11", + .next_veth = 1, + .remote_addr = IP_SRC, + .namespace = "ns-veth11-" + }, + { + .local_veth = "veth2-", + .remote_veth = "veth22", + .next_veth = 2, + .remote_addr = "", + .namespace = "ns-veth22-" + }, + { + .local_veth = "veth3-", + .remote_veth = "veth33", + .next_veth = 0, + .remote_addr = IP_DST, + .namespace = "ns-veth33-" + } } }; -static int attach_programs_to_veth_pair(struct skeletons *skeletons, int index) +struct prog_configuration { + char local_name[PROG_NAME_MAX_LEN]; /* BPF prog to attach to local_veth */ + char remote_name[PROG_NAME_MAX_LEN]; /* BPF prog to attach to remote_veth */ + u32 local_flags; /* XDP flags to use on local_veth */ + u32 remote_flags; /* XDP flags to use on remote_veth */ +}; + +static int attach_programs_to_veth_pair(struct bpf_object **objs, size_t nb_obj, + struct net_configuration *net_config, + struct prog_configuration *prog, int index) { struct bpf_program *local_prog, *remote_prog; - struct bpf_link **local_link, **remote_link; struct nstoken *nstoken; - struct bpf_link *link; - int interface; - - switch (index) { - case 0: - local_prog = skeletons->xdp_redirect_maps->progs.xdp_redirect_map_0; - local_link = &skeletons->xdp_redirect_maps->links.xdp_redirect_map_0; - remote_prog = skeletons->xdp_dummy->progs.xdp_dummy_prog; - remote_link = &skeletons->xdp_dummy->links.xdp_dummy_prog; - break; - case 1: - local_prog = skeletons->xdp_redirect_maps->progs.xdp_redirect_map_1; - local_link = &skeletons->xdp_redirect_maps->links.xdp_redirect_map_1; - remote_prog = skeletons->xdp_tx->progs.xdp_tx; - remote_link = &skeletons->xdp_tx->links.xdp_tx; - break; - case 2: - local_prog = skeletons->xdp_redirect_maps->progs.xdp_redirect_map_2; - local_link = &skeletons->xdp_redirect_maps->links.xdp_redirect_map_2; - remote_prog = skeletons->xdp_dummy->progs.xdp_dummy_prog; - remote_link = &skeletons->xdp_dummy->links.xdp_dummy_prog; - break; + int interface, ret, i; + + for (i = 0; i < nb_obj; i++) { + local_prog = bpf_object__find_program_by_name(objs[i], prog[index].local_name); + if (local_prog) + break; } - interface = if_nametoindex(config[index].local_veth); + if (!ASSERT_OK_PTR(local_prog, "find local program")) + return -1; + + for (i = 0; i < nb_obj; i++) { + remote_prog = bpf_object__find_program_by_name(objs[i], prog[index].remote_name); + if (remote_prog) + break; + } + if (!ASSERT_OK_PTR(remote_prog, "find remote program")) + return -1; + + interface = if_nametoindex(net_config->veth_cfg[index].local_veth); if (!ASSERT_NEQ(interface, 0, "non zero interface index")) return -1; - link = bpf_program__attach_xdp(local_prog, interface); - if (!ASSERT_OK_PTR(link, "attach xdp program to local veth")) + + ret = bpf_xdp_attach(interface, bpf_program__fd(local_prog), + prog[index].local_flags, NULL); + if (!ASSERT_OK(ret, "attach xdp program to local veth")) return -1; - *local_link = link; - nstoken = open_netns(config[index].namespace); + + nstoken = open_netns(net_config->veth_cfg[index].namespace); if (!ASSERT_OK_PTR(nstoken, "switch to remote veth namespace")) return -1; - interface = if_nametoindex(config[index].remote_veth); + + interface = if_nametoindex(net_config->veth_cfg[index].remote_veth); if (!ASSERT_NEQ(interface, 0, "non zero interface index")) { close_netns(nstoken); return -1; } - link = bpf_program__attach_xdp(remote_prog, interface); - *remote_link = link; - close_netns(nstoken); - if (!ASSERT_OK_PTR(link, "attach xdp program to remote veth")) + + ret = bpf_xdp_attach(interface, bpf_program__fd(remote_prog), + prog[index].remote_flags, NULL); + if (!ASSERT_OK(ret, "attach xdp program to remote veth")) { + close_netns(nstoken); return -1; + } + close_netns(nstoken); return 0; } -static int configure_network(struct skeletons *skeletons) +static int create_network(struct net_configuration *net_config) { - int interface_id; - int map_fd; - int err; - int i = 0; + struct nstoken *nstoken = NULL; + int i, err; + + memcpy(net_config, &default_config, sizeof(struct net_configuration)); + + /* Create unique namespaces */ + err = append_tid(net_config->ns0_name, NS_NAME_MAX_LEN); + if (!ASSERT_OK(err, "append TID to ns0 name")) + goto fail; + SYS(fail, "ip netns add %s", net_config->ns0_name); - /* First create and configure all interfaces */ for (i = 0; i < VETH_PAIRS_COUNT; i++) { - SYS(fail, "ip netns add %s", config[i].namespace); - SYS(fail, "ip link add %s type veth peer name %s netns %s", - config[i].local_veth, config[i].remote_veth, config[i].namespace); - SYS(fail, "ip link set dev %s up", config[i].local_veth); - if (config[i].remote_addr) - SYS(fail, "ip -n %s addr add %s/24 dev %s", config[i].namespace, - config[i].remote_addr, config[i].remote_veth); - SYS(fail, "ip -n %s link set dev %s up", config[i].namespace, - config[i].remote_veth); + err = append_tid(net_config->veth_cfg[i].namespace, NS_NAME_MAX_LEN); + if (!ASSERT_OK(err, "append TID to ns name")) + goto fail; + SYS(fail, "ip netns add %s", net_config->veth_cfg[i].namespace); } - /* Then configure the redirect map and attach programs to interfaces */ - map_fd = bpf_map__fd(skeletons->xdp_redirect_maps->maps.tx_port); - if (!ASSERT_GE(map_fd, 0, "open redirect map")) + /* Create interfaces */ + nstoken = open_netns(net_config->ns0_name); + if (!nstoken) goto fail; + for (i = 0; i < VETH_PAIRS_COUNT; i++) { - interface_id = if_nametoindex(config[i].next_veth); - if (!ASSERT_NEQ(interface_id, 0, "non zero interface index")) - goto fail; - err = bpf_map_update_elem(map_fd, &i, &interface_id, BPF_ANY); - if (!ASSERT_OK(err, "configure interface redirection through map")) - goto fail; - if (attach_programs_to_veth_pair(skeletons, i)) - goto fail; + SYS(fail, "ip link add %s type veth peer name %s netns %s", + net_config->veth_cfg[i].local_veth, net_config->veth_cfg[i].remote_veth, + net_config->veth_cfg[i].namespace); + SYS(fail, "ip link set dev %s up", net_config->veth_cfg[i].local_veth); + if (net_config->veth_cfg[i].remote_addr[0]) + SYS(fail, "ip -n %s addr add %s/24 dev %s", + net_config->veth_cfg[i].namespace, + net_config->veth_cfg[i].remote_addr, + net_config->veth_cfg[i].remote_veth); + SYS(fail, "ip -n %s link set dev %s up", net_config->veth_cfg[i].namespace, + net_config->veth_cfg[i].remote_veth); } + close_netns(nstoken); return 0; fail: + close_netns(nstoken); return -1; } -static void cleanup_network(void) +static void cleanup_network(struct net_configuration *net_config) { int i; - /* Deleting namespaces is enough to automatically remove veth pairs as well - */ + SYS_NOFAIL("ip netns del %s", net_config->ns0_name); for (i = 0; i < VETH_PAIRS_COUNT; i++) - SYS_NOFAIL("ip netns del %s", config[i].namespace); + SYS_NOFAIL("ip netns del %s", net_config->veth_cfg[i].namespace); } -static int check_ping(struct skeletons *skeletons) +#define VETH_REDIRECT_SKEL_NB 3 +static void xdp_veth_redirect(u32 flags) { + struct prog_configuration ping_config[VETH_PAIRS_COUNT] = { + { + .local_name = "xdp_redirect_map_0", + .remote_name = "xdp_dummy_prog", + .local_flags = flags, + .remote_flags = flags, + }, + { + .local_name = "xdp_redirect_map_1", + .remote_name = "xdp_tx", + .local_flags = flags, + .remote_flags = flags, + }, + { + .local_name = "xdp_redirect_map_2", + .remote_name = "xdp_dummy_prog", + .local_flags = flags, + .remote_flags = flags, + } + }; + struct bpf_object *bpf_objs[VETH_REDIRECT_SKEL_NB]; + struct xdp_redirect_map *xdp_redirect_map; + struct net_configuration net_config; + struct nstoken *nstoken = NULL; + struct xdp_dummy *xdp_dummy; + struct xdp_tx *xdp_tx; + int map_fd; + int i; + + xdp_dummy = xdp_dummy__open_and_load(); + if (!ASSERT_OK_PTR(xdp_dummy, "xdp_dummy__open_and_load")) + return; + + xdp_tx = xdp_tx__open_and_load(); + if (!ASSERT_OK_PTR(xdp_tx, "xdp_tx__open_and_load")) + goto destroy_xdp_dummy; + + xdp_redirect_map = xdp_redirect_map__open_and_load(); + if (!ASSERT_OK_PTR(xdp_redirect_map, "xdp_redirect_map__open_and_load")) + goto destroy_xdp_tx; + + if (!ASSERT_OK(create_network(&net_config), "create network")) + goto destroy_xdp_redirect_map; + + /* Then configure the redirect map and attach programs to interfaces */ + map_fd = bpf_map__fd(xdp_redirect_map->maps.tx_port); + if (!ASSERT_OK_FD(map_fd, "open redirect map")) + goto destroy_xdp_redirect_map; + + bpf_objs[0] = xdp_dummy->obj; + bpf_objs[1] = xdp_tx->obj; + bpf_objs[2] = xdp_redirect_map->obj; + + nstoken = open_netns(net_config.ns0_name); + if (!ASSERT_OK_PTR(nstoken, "open NS0")) + goto destroy_xdp_redirect_map; + + for (i = 0; i < VETH_PAIRS_COUNT; i++) { + int next_veth = net_config.veth_cfg[i].next_veth; + int interface_id; + int err; + + interface_id = if_nametoindex(net_config.veth_cfg[next_veth].local_veth); + if (!ASSERT_NEQ(interface_id, 0, "non zero interface index")) + goto destroy_xdp_redirect_map; + err = bpf_map_update_elem(map_fd, &i, &interface_id, BPF_ANY); + if (!ASSERT_OK(err, "configure interface redirection through map")) + goto destroy_xdp_redirect_map; + if (attach_programs_to_veth_pair(bpf_objs, VETH_REDIRECT_SKEL_NB, + &net_config, ping_config, i)) + goto destroy_xdp_redirect_map; + } + /* Test: if all interfaces are properly configured, we must be able to ping * veth33 from veth11 */ - return SYS_NOFAIL("ip netns exec %s ping -c 1 -W 1 %s > /dev/null", - config[0].namespace, IP_DST); + ASSERT_OK(SYS_NOFAIL("ip netns exec %s ping -c 1 -W 1 %s > /dev/null", + net_config.veth_cfg[0].namespace, IP_DST), "ping"); + +destroy_xdp_redirect_map: + close_netns(nstoken); + xdp_redirect_map__destroy(xdp_redirect_map); +destroy_xdp_tx: + xdp_tx__destroy(xdp_tx); +destroy_xdp_dummy: + xdp_dummy__destroy(xdp_dummy); + + cleanup_network(&net_config); } -void test_xdp_veth_redirect(void) +#define BROADCAST_REDIRECT_SKEL_NB 2 +static void xdp_veth_broadcast_redirect(u32 attach_flags, u64 redirect_flags) { - struct skeletons skeletons = {}; + struct prog_configuration prog_cfg[VETH_PAIRS_COUNT] = { + { + .local_name = "xdp_redirect_map_multi_prog", + .remote_name = "xdp_count_0", + .local_flags = attach_flags, + .remote_flags = attach_flags, + }, + { + .local_name = "xdp_redirect_map_multi_prog", + .remote_name = "xdp_count_1", + .local_flags = attach_flags, + .remote_flags = attach_flags, + }, + { + .local_name = "xdp_redirect_map_multi_prog", + .remote_name = "xdp_count_2", + .local_flags = attach_flags, + .remote_flags = attach_flags, + } + }; + struct bpf_object *bpf_objs[BROADCAST_REDIRECT_SKEL_NB]; + struct xdp_redirect_multi_kern *xdp_redirect_multi_kern; + struct xdp_redirect_map *xdp_redirect_map; + struct bpf_devmap_val devmap_val = {}; + struct net_configuration net_config; + struct nstoken *nstoken = NULL; + u16 protocol = ETH_P_IP; + int group_map; + int flags_map; + int cnt_map; + u64 cnt = 0; + int i, err; - skeletons.xdp_dummy = xdp_dummy__open_and_load(); - if (!ASSERT_OK_PTR(skeletons.xdp_dummy, "xdp_dummy__open_and_load")) + xdp_redirect_multi_kern = xdp_redirect_multi_kern__open_and_load(); + if (!ASSERT_OK_PTR(xdp_redirect_multi_kern, "xdp_redirect_multi_kern__open_and_load")) return; - skeletons.xdp_tx = xdp_tx__open_and_load(); - if (!ASSERT_OK_PTR(skeletons.xdp_tx, "xdp_tx__open_and_load")) + xdp_redirect_map = xdp_redirect_map__open_and_load(); + if (!ASSERT_OK_PTR(xdp_redirect_map, "xdp_redirect_map__open_and_load")) + goto destroy_xdp_redirect_multi_kern; + + if (!ASSERT_OK(create_network(&net_config), "create network")) + goto destroy_xdp_redirect_map; + + group_map = bpf_map__fd(xdp_redirect_multi_kern->maps.map_all); + if (!ASSERT_OK_FD(group_map, "open map_all")) + goto destroy_xdp_redirect_map; + + flags_map = bpf_map__fd(xdp_redirect_multi_kern->maps.redirect_flags); + if (!ASSERT_OK_FD(group_map, "open map_all")) + goto destroy_xdp_redirect_map; + + err = bpf_map_update_elem(flags_map, &protocol, &redirect_flags, BPF_NOEXIST); + if (!ASSERT_OK(err, "init IP count")) + goto destroy_xdp_redirect_map; + + cnt_map = bpf_map__fd(xdp_redirect_map->maps.rxcnt); + if (!ASSERT_OK_FD(cnt_map, "open rxcnt map")) + goto destroy_xdp_redirect_map; + + bpf_objs[0] = xdp_redirect_multi_kern->obj; + bpf_objs[1] = xdp_redirect_map->obj; + + nstoken = open_netns(net_config.ns0_name); + if (!ASSERT_OK_PTR(nstoken, "open NS0")) + goto destroy_xdp_redirect_map; + + for (i = 0; i < VETH_PAIRS_COUNT; i++) { + int ifindex = if_nametoindex(net_config.veth_cfg[i].local_veth); + + if (attach_programs_to_veth_pair(bpf_objs, BROADCAST_REDIRECT_SKEL_NB, + &net_config, prog_cfg, i)) + goto destroy_xdp_redirect_map; + + SYS(destroy_xdp_redirect_map, + "ip -n %s neigh add %s lladdr 00:00:00:00:00:01 dev %s", + net_config.veth_cfg[i].namespace, IP_NEIGH, net_config.veth_cfg[i].remote_veth); + + devmap_val.ifindex = ifindex; + err = bpf_map_update_elem(group_map, &ifindex, &devmap_val, 0); + if (!ASSERT_OK(err, "bpf_map_update_elem")) + goto destroy_xdp_redirect_map; + + } + + SYS_NOFAIL("ip netns exec %s ping %s -i 0.1 -c 4 -W1 > /dev/null ", + net_config.veth_cfg[0].namespace, IP_NEIGH); + + for (i = 0; i < VETH_PAIRS_COUNT; i++) { + err = bpf_map_lookup_elem(cnt_map, &i, &cnt); + if (!ASSERT_OK(err, "get IP cnt")) + goto destroy_xdp_redirect_map; + + if (redirect_flags & BPF_F_EXCLUDE_INGRESS) + /* veth11 shouldn't receive the ICMP requests; + * others should + */ + ASSERT_EQ(cnt, i ? 4 : 0, "compare IP cnt"); + else + /* All remote veth should receive the ICMP requests */ + ASSERT_EQ(cnt, 4, "compare IP cnt"); + } + +destroy_xdp_redirect_map: + close_netns(nstoken); + xdp_redirect_map__destroy(xdp_redirect_map); +destroy_xdp_redirect_multi_kern: + xdp_redirect_multi_kern__destroy(xdp_redirect_multi_kern); + + cleanup_network(&net_config); +} + +#define VETH_EGRESS_SKEL_NB 3 +static void xdp_veth_egress(u32 flags) +{ + struct prog_configuration prog_cfg[VETH_PAIRS_COUNT] = { + { + .local_name = "xdp_redirect_map_all_prog", + .remote_name = "xdp_dummy_prog", + .local_flags = flags, + .remote_flags = flags, + }, + { + .local_name = "xdp_redirect_map_all_prog", + .remote_name = "store_mac_1", + .local_flags = flags, + .remote_flags = flags, + }, + { + .local_name = "xdp_redirect_map_all_prog", + .remote_name = "store_mac_2", + .local_flags = flags, + .remote_flags = flags, + } + }; + const char magic_mac[6] = { 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF}; + struct xdp_redirect_multi_kern *xdp_redirect_multi_kern; + struct bpf_object *bpf_objs[VETH_EGRESS_SKEL_NB]; + struct xdp_redirect_map *xdp_redirect_map; + struct bpf_devmap_val devmap_val = {}; + struct net_configuration net_config; + int mac_map, egress_map, res_map; + struct nstoken *nstoken = NULL; + struct xdp_dummy *xdp_dummy; + int err; + int i; + + xdp_dummy = xdp_dummy__open_and_load(); + if (!ASSERT_OK_PTR(xdp_dummy, "xdp_dummy__open_and_load")) + return; + + xdp_redirect_multi_kern = xdp_redirect_multi_kern__open_and_load(); + if (!ASSERT_OK_PTR(xdp_redirect_multi_kern, "xdp_redirect_multi_kern__open_and_load")) goto destroy_xdp_dummy; - skeletons.xdp_redirect_maps = xdp_redirect_map__open_and_load(); - if (!ASSERT_OK_PTR(skeletons.xdp_redirect_maps, "xdp_redirect_map__open_and_load")) - goto destroy_xdp_tx; + xdp_redirect_map = xdp_redirect_map__open_and_load(); + if (!ASSERT_OK_PTR(xdp_redirect_map, "xdp_redirect_map__open_and_load")) + goto destroy_xdp_redirect_multi_kern; - if (configure_network(&skeletons)) + if (!ASSERT_OK(create_network(&net_config), "create network")) goto destroy_xdp_redirect_map; - ASSERT_OK(check_ping(&skeletons), "ping"); + mac_map = bpf_map__fd(xdp_redirect_multi_kern->maps.mac_map); + if (!ASSERT_OK_FD(mac_map, "open mac_map")) + goto destroy_xdp_redirect_map; + + egress_map = bpf_map__fd(xdp_redirect_multi_kern->maps.map_egress); + if (!ASSERT_OK_FD(egress_map, "open map_egress")) + goto destroy_xdp_redirect_map; + + devmap_val.bpf_prog.fd = bpf_program__fd(xdp_redirect_multi_kern->progs.xdp_devmap_prog); + + bpf_objs[0] = xdp_dummy->obj; + bpf_objs[1] = xdp_redirect_multi_kern->obj; + bpf_objs[2] = xdp_redirect_map->obj; + + nstoken = open_netns(net_config.ns0_name); + if (!ASSERT_OK_PTR(nstoken, "open NS0")) + goto destroy_xdp_redirect_map; + + for (i = 0; i < VETH_PAIRS_COUNT; i++) { + int ifindex = if_nametoindex(net_config.veth_cfg[i].local_veth); + + SYS(destroy_xdp_redirect_map, + "ip -n %s neigh add %s lladdr 00:00:00:00:00:01 dev %s", + net_config.veth_cfg[i].namespace, IP_NEIGH, net_config.veth_cfg[i].remote_veth); + + if (attach_programs_to_veth_pair(bpf_objs, VETH_REDIRECT_SKEL_NB, + &net_config, prog_cfg, i)) + goto destroy_xdp_redirect_map; + + err = bpf_map_update_elem(mac_map, &ifindex, magic_mac, 0); + if (!ASSERT_OK(err, "bpf_map_update_elem")) + goto destroy_xdp_redirect_map; + + devmap_val.ifindex = ifindex; + err = bpf_map_update_elem(egress_map, &ifindex, &devmap_val, 0); + if (!ASSERT_OK(err, "bpf_map_update_elem")) + goto destroy_xdp_redirect_map; + } + + SYS_NOFAIL("ip netns exec %s ping %s -i 0.1 -c 4 -W1 > /dev/null ", + net_config.veth_cfg[0].namespace, IP_NEIGH); + + res_map = bpf_map__fd(xdp_redirect_map->maps.rx_mac); + if (!ASSERT_OK_FD(res_map, "open rx_map")) + goto destroy_xdp_redirect_map; + + for (i = 0; i < 2; i++) { + u32 key = i; + u64 res; + + err = bpf_map_lookup_elem(res_map, &key, &res); + if (!ASSERT_OK(err, "get MAC res")) + goto destroy_xdp_redirect_map; + + ASSERT_STRNEQ((const char *)&res, magic_mac, ETH_ALEN, "compare mac"); + } destroy_xdp_redirect_map: - xdp_redirect_map__destroy(skeletons.xdp_redirect_maps); -destroy_xdp_tx: - xdp_tx__destroy(skeletons.xdp_tx); + close_netns(nstoken); + xdp_redirect_map__destroy(xdp_redirect_map); +destroy_xdp_redirect_multi_kern: + xdp_redirect_multi_kern__destroy(xdp_redirect_multi_kern); destroy_xdp_dummy: - xdp_dummy__destroy(skeletons.xdp_dummy); + xdp_dummy__destroy(xdp_dummy); + + cleanup_network(&net_config); +} + +void test_xdp_veth_redirect(void) +{ + if (test__start_subtest("0")) + xdp_veth_redirect(0); + + if (test__start_subtest("DRV_MODE")) + xdp_veth_redirect(XDP_FLAGS_DRV_MODE); + + if (test__start_subtest("SKB_MODE")) + xdp_veth_redirect(XDP_FLAGS_SKB_MODE); +} + +void test_xdp_veth_broadcast_redirect(void) +{ + if (test__start_subtest("0/BROADCAST")) + xdp_veth_broadcast_redirect(0, BPF_F_BROADCAST); + + if (test__start_subtest("0/(BROADCAST | EXCLUDE_INGRESS)")) + xdp_veth_broadcast_redirect(0, BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS); + + if (test__start_subtest("DRV_MODE/BROADCAST")) + xdp_veth_broadcast_redirect(XDP_FLAGS_DRV_MODE, BPF_F_BROADCAST); + + if (test__start_subtest("DRV_MODE/(BROADCAST | EXCLUDE_INGRESS)")) + xdp_veth_broadcast_redirect(XDP_FLAGS_DRV_MODE, + BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS); + + if (test__start_subtest("SKB_MODE/BROADCAST")) + xdp_veth_broadcast_redirect(XDP_FLAGS_SKB_MODE, BPF_F_BROADCAST); + + if (test__start_subtest("SKB_MODE/(BROADCAST | EXCLUDE_INGRESS)")) + xdp_veth_broadcast_redirect(XDP_FLAGS_SKB_MODE, + BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS); +} + +void test_xdp_veth_egress(void) +{ + if (test__start_subtest("0/egress")) + xdp_veth_egress(0); + + if (test__start_subtest("DRV_MODE/egress")) + xdp_veth_egress(XDP_FLAGS_DRV_MODE); - cleanup_network(); + if (test__start_subtest("SKB_MODE/egress")) + xdp_veth_egress(XDP_FLAGS_SKB_MODE); } diff --git a/tools/testing/selftests/bpf/prog_tests/token.c b/tools/testing/selftests/bpf/prog_tests/token.c index c3ab9b6fb069..b81dde283052 100644 --- a/tools/testing/selftests/bpf/prog_tests/token.c +++ b/tools/testing/selftests/bpf/prog_tests/token.c @@ -19,6 +19,7 @@ #include "priv_prog.skel.h" #include "dummy_st_ops_success.skel.h" #include "token_lsm.skel.h" +#include "priv_freplace_prog.skel.h" static inline int sys_mount(const char *dev_name, const char *dir_name, const char *type, unsigned long flags, @@ -114,7 +115,7 @@ static int create_bpffs_fd(void) static int materialize_bpffs_fd(int fs_fd, struct bpffs_opts *opts) { - int mnt_fd, err; + int err; /* set up token delegation mount options */ err = set_delegate_mask(fs_fd, "delegate_cmds", opts->cmds, opts->cmds_str); @@ -135,12 +136,7 @@ static int materialize_bpffs_fd(int fs_fd, struct bpffs_opts *opts) if (err < 0) return -errno; - /* create O_PATH fd for detached mount */ - mnt_fd = sys_fsmount(fs_fd, 0, 0); - if (err < 0) - return -errno; - - return mnt_fd; + return 0; } /* send FD over Unix domain (AF_UNIX) socket */ @@ -286,6 +282,7 @@ static void child(int sock_fd, struct bpffs_opts *opts, child_callback_fn callba { int mnt_fd = -1, fs_fd = -1, err = 0, bpffs_fd = -1, token_fd = -1; struct token_lsm *lsm_skel = NULL; + char one; /* load and attach LSM "policy" before we go into unpriv userns */ lsm_skel = token_lsm__open_and_load(); @@ -332,13 +329,19 @@ static void child(int sock_fd, struct bpffs_opts *opts, child_callback_fn callba err = sendfd(sock_fd, fs_fd); if (!ASSERT_OK(err, "send_fs_fd")) goto cleanup; - zclose(fs_fd); + + /* wait that the parent reads the fd, does the fsconfig() calls + * and send us a signal that it is done + */ + err = read(sock_fd, &one, sizeof(one)); + if (!ASSERT_GE(err, 0, "read_one")) + goto cleanup; /* avoid mucking around with mount namespaces and mounting at - * well-known path, just get detach-mounted BPF FS fd back from parent + * well-known path, just create O_PATH fd for detached mount */ - err = recvfd(sock_fd, &mnt_fd); - if (!ASSERT_OK(err, "recv_mnt_fd")) + mnt_fd = sys_fsmount(fs_fd, 0, 0); + if (!ASSERT_OK_FD(mnt_fd, "mnt_fd")) goto cleanup; /* try to fspick() BPF FS and try to add some delegation options */ @@ -428,24 +431,24 @@ again: static void parent(int child_pid, struct bpffs_opts *bpffs_opts, int sock_fd) { - int fs_fd = -1, mnt_fd = -1, token_fd = -1, err; + int fs_fd = -1, token_fd = -1, err; + char one = 1; err = recvfd(sock_fd, &fs_fd); if (!ASSERT_OK(err, "recv_bpffs_fd")) goto cleanup; - mnt_fd = materialize_bpffs_fd(fs_fd, bpffs_opts); - if (!ASSERT_GE(mnt_fd, 0, "materialize_bpffs_fd")) { + err = materialize_bpffs_fd(fs_fd, bpffs_opts); + if (!ASSERT_GE(err, 0, "materialize_bpffs_fd")) { err = -EINVAL; goto cleanup; } - zclose(fs_fd); - /* pass BPF FS context object to parent */ - err = sendfd(sock_fd, mnt_fd); - if (!ASSERT_OK(err, "send_mnt_fd")) + /* notify the child that we did the fsconfig() calls and it can proceed. */ + err = write(sock_fd, &one, sizeof(one)); + if (!ASSERT_EQ(err, sizeof(one), "send_one")) goto cleanup; - zclose(mnt_fd); + zclose(fs_fd); /* receive BPF token FD back from child for some extra tests */ err = recvfd(sock_fd, &token_fd); @@ -458,7 +461,6 @@ static void parent(int child_pid, struct bpffs_opts *bpffs_opts, int sock_fd) cleanup: zclose(sock_fd); zclose(fs_fd); - zclose(mnt_fd); zclose(token_fd); if (child_pid > 0) @@ -788,6 +790,84 @@ static int userns_obj_priv_prog(int mnt_fd, struct token_lsm *lsm_skel) return 0; } +static int userns_obj_priv_freplace_setup(int mnt_fd, struct priv_freplace_prog **fr_skel, + struct priv_prog **skel, int *tgt_fd) +{ + LIBBPF_OPTS(bpf_object_open_opts, opts); + int err; + char buf[256]; + + /* use bpf_token_path to provide BPF FS path */ + snprintf(buf, sizeof(buf), "/proc/self/fd/%d", mnt_fd); + opts.bpf_token_path = buf; + *skel = priv_prog__open_opts(&opts); + if (!ASSERT_OK_PTR(*skel, "priv_prog__open_opts")) + return -EINVAL; + err = priv_prog__load(*skel); + if (!ASSERT_OK(err, "priv_prog__load")) + return -EINVAL; + + *fr_skel = priv_freplace_prog__open_opts(&opts); + if (!ASSERT_OK_PTR(*skel, "priv_freplace_prog__open_opts")) + return -EINVAL; + + *tgt_fd = bpf_program__fd((*skel)->progs.xdp_prog1); + return 0; +} + +/* Verify that freplace works from user namespace, because bpf token is loaded + * in bpf_object__prepare + */ +static int userns_obj_priv_freplace_prog(int mnt_fd, struct token_lsm *lsm_skel) +{ + struct priv_freplace_prog *fr_skel = NULL; + struct priv_prog *skel = NULL; + int err, tgt_fd; + + err = userns_obj_priv_freplace_setup(mnt_fd, &fr_skel, &skel, &tgt_fd); + if (!ASSERT_OK(err, "setup")) + goto out; + + err = bpf_object__prepare(fr_skel->obj); + if (!ASSERT_OK(err, "freplace__prepare")) + goto out; + + err = bpf_program__set_attach_target(fr_skel->progs.new_xdp_prog2, tgt_fd, "xdp_prog1"); + if (!ASSERT_OK(err, "set_attach_target")) + goto out; + + err = priv_freplace_prog__load(fr_skel); + ASSERT_OK(err, "priv_freplace_prog__load"); + +out: + priv_freplace_prog__destroy(fr_skel); + priv_prog__destroy(skel); + return err; +} + +/* Verify that replace fails to set attach target from user namespace without bpf token */ +static int userns_obj_priv_freplace_prog_fail(int mnt_fd, struct token_lsm *lsm_skel) +{ + struct priv_freplace_prog *fr_skel = NULL; + struct priv_prog *skel = NULL; + int err, tgt_fd; + + err = userns_obj_priv_freplace_setup(mnt_fd, &fr_skel, &skel, &tgt_fd); + if (!ASSERT_OK(err, "setup")) + goto out; + + err = bpf_program__set_attach_target(fr_skel->progs.new_xdp_prog2, tgt_fd, "xdp_prog1"); + if (ASSERT_ERR(err, "attach fails")) + err = 0; + else + err = -EINVAL; + +out: + priv_freplace_prog__destroy(fr_skel); + priv_prog__destroy(skel); + return err; +} + /* this test is called with BPF FS that doesn't delegate BPF_BTF_LOAD command, * which should cause struct_ops application to fail, as BTF won't be uploaded * into the kernel, even if STRUCT_OPS programs themselves are allowed @@ -967,6 +1047,41 @@ err_out: #define bit(n) (1ULL << (n)) +static int userns_bpf_token_info(int mnt_fd, struct token_lsm *lsm_skel) +{ + int err, token_fd = -1; + struct bpf_token_info info; + u32 len = sizeof(struct bpf_token_info); + + /* create BPF token from BPF FS mount */ + token_fd = bpf_token_create(mnt_fd, NULL); + if (!ASSERT_GT(token_fd, 0, "token_create")) { + err = -EINVAL; + goto cleanup; + } + + memset(&info, 0, len); + err = bpf_obj_get_info_by_fd(token_fd, &info, &len); + if (!ASSERT_ERR(err, "bpf_obj_get_token_info")) + goto cleanup; + if (!ASSERT_EQ(info.allowed_cmds, bit(BPF_MAP_CREATE), "token_info_cmds_map_create")) { + err = -EINVAL; + goto cleanup; + } + if (!ASSERT_EQ(info.allowed_progs, bit(BPF_PROG_TYPE_XDP), "token_info_progs_xdp")) { + err = -EINVAL; + goto cleanup; + } + + /* The BPF_PROG_TYPE_EXT is not set in token */ + if (ASSERT_EQ(info.allowed_progs, bit(BPF_PROG_TYPE_EXT), "token_info_progs_ext")) + err = -EINVAL; + +cleanup: + zclose(token_fd); + return err; +} + void test_token(void) { if (test__start_subtest("map_token")) { @@ -1004,12 +1119,28 @@ void test_token(void) if (test__start_subtest("obj_priv_prog")) { struct bpffs_opts opts = { .cmds = bit(BPF_PROG_LOAD), - .progs = bit(BPF_PROG_TYPE_KPROBE), + .progs = bit(BPF_PROG_TYPE_XDP), .attachs = ~0ULL, }; subtest_userns(&opts, userns_obj_priv_prog); } + if (test__start_subtest("obj_priv_freplace_prog")) { + struct bpffs_opts opts = { + .cmds = bit(BPF_BTF_LOAD) | bit(BPF_PROG_LOAD) | bit(BPF_BTF_GET_FD_BY_ID), + .progs = bit(BPF_PROG_TYPE_EXT) | bit(BPF_PROG_TYPE_XDP), + .attachs = ~0ULL, + }; + subtest_userns(&opts, userns_obj_priv_freplace_prog); + } + if (test__start_subtest("obj_priv_freplace_prog_fail")) { + struct bpffs_opts opts = { + .cmds = bit(BPF_BTF_LOAD) | bit(BPF_PROG_LOAD) | bit(BPF_BTF_GET_FD_BY_ID), + .progs = bit(BPF_PROG_TYPE_EXT) | bit(BPF_PROG_TYPE_XDP), + .attachs = ~0ULL, + }; + subtest_userns(&opts, userns_obj_priv_freplace_prog_fail); + } if (test__start_subtest("obj_priv_btf_fail")) { struct bpffs_opts opts = { /* disallow BTF loading */ @@ -1054,4 +1185,13 @@ void test_token(void) subtest_userns(&opts, userns_obj_priv_implicit_token_envvar); } + if (test__start_subtest("bpf_token_info")) { + struct bpffs_opts opts = { + .cmds = bit(BPF_MAP_CREATE), + .progs = bit(BPF_PROG_TYPE_XDP), + .attachs = ~0ULL, + }; + + subtest_userns(&opts, userns_bpf_token_info); + } } diff --git a/tools/testing/selftests/bpf/prog_tests/tracing_failure.c b/tools/testing/selftests/bpf/prog_tests/tracing_failure.c index a222df765bc3..10e231965589 100644 --- a/tools/testing/selftests/bpf/prog_tests/tracing_failure.c +++ b/tools/testing/selftests/bpf/prog_tests/tracing_failure.c @@ -28,10 +28,62 @@ out: tracing_failure__destroy(skel); } +static void test_tracing_fail_prog(const char *prog_name, const char *exp_msg) +{ + struct tracing_failure *skel; + struct bpf_program *prog; + char log_buf[256]; + int err; + + skel = tracing_failure__open(); + if (!ASSERT_OK_PTR(skel, "tracing_failure__open")) + return; + + prog = bpf_object__find_program_by_name(skel->obj, prog_name); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) + goto out; + + bpf_program__set_autoload(prog, true); + bpf_program__set_log_buf(prog, log_buf, sizeof(log_buf)); + + err = tracing_failure__load(skel); + if (!ASSERT_ERR(err, "tracing_failure__load")) + goto out; + + ASSERT_HAS_SUBSTR(log_buf, exp_msg, "log_buf"); +out: + tracing_failure__destroy(skel); +} + +static void test_tracing_deny(void) +{ + int btf_id; + + /* __rcu_read_lock depends on CONFIG_PREEMPT_RCU */ + btf_id = libbpf_find_vmlinux_btf_id("__rcu_read_lock", BPF_TRACE_FENTRY); + if (btf_id <= 0) { + test__skip(); + return; + } + + test_tracing_fail_prog("tracing_deny", + "Attaching tracing programs to function '__rcu_read_lock' is rejected."); +} + +static void test_fexit_noreturns(void) +{ + test_tracing_fail_prog("fexit_noreturns", + "Attaching fexit/fmod_ret to __noreturn function 'do_exit' is rejected."); +} + void test_tracing_failure(void) { if (test__start_subtest("bpf_spin_lock")) test_bpf_spin_lock(true); if (test__start_subtest("bpf_spin_unlock")) test_bpf_spin_lock(false); + if (test__start_subtest("tracing_deny")) + test_tracing_deny(); + if (test__start_subtest("fexit_noreturns")) + test_fexit_noreturns(); } diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c b/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c index c397336fe1ed..b17dc39a23db 100644 --- a/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c +++ b/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c @@ -251,7 +251,7 @@ static void test_uretprobe_syscall_call(void) .retprobe = true, ); struct uprobe_syscall_executed *skel; - int pid, status, err, go[2], c; + int pid, status, err, go[2], c = 0; if (!ASSERT_OK(pipe(go), "pipe")) return; diff --git a/tools/testing/selftests/bpf/prog_tests/usdt.c b/tools/testing/selftests/bpf/prog_tests/usdt.c index 56ed1eb9b527..9057e983cc54 100644 --- a/tools/testing/selftests/bpf/prog_tests/usdt.c +++ b/tools/testing/selftests/bpf/prog_tests/usdt.c @@ -45,7 +45,7 @@ static void subtest_basic_usdt(void) LIBBPF_OPTS(bpf_usdt_opts, opts); struct test_usdt *skel; struct test_usdt__bss *bss; - int err; + int err, i; skel = test_usdt__open_and_load(); if (!ASSERT_OK_PTR(skel, "skel_open")) @@ -75,6 +75,7 @@ static void subtest_basic_usdt(void) ASSERT_EQ(bss->usdt0_cookie, 0xcafedeadbeeffeed, "usdt0_cookie"); ASSERT_EQ(bss->usdt0_arg_cnt, 0, "usdt0_arg_cnt"); ASSERT_EQ(bss->usdt0_arg_ret, -ENOENT, "usdt0_arg_ret"); + ASSERT_EQ(bss->usdt0_arg_size, -ENOENT, "usdt0_arg_size"); /* auto-attached usdt3 gets default zero cookie value */ ASSERT_EQ(bss->usdt3_cookie, 0, "usdt3_cookie"); @@ -86,6 +87,9 @@ static void subtest_basic_usdt(void) ASSERT_EQ(bss->usdt3_args[0], 1, "usdt3_arg1"); ASSERT_EQ(bss->usdt3_args[1], 42, "usdt3_arg2"); ASSERT_EQ(bss->usdt3_args[2], (uintptr_t)&bla, "usdt3_arg3"); + ASSERT_EQ(bss->usdt3_arg_sizes[0], 4, "usdt3_arg1_size"); + ASSERT_EQ(bss->usdt3_arg_sizes[1], 8, "usdt3_arg2_size"); + ASSERT_EQ(bss->usdt3_arg_sizes[2], 8, "usdt3_arg3_size"); /* auto-attached usdt12 gets default zero cookie value */ ASSERT_EQ(bss->usdt12_cookie, 0, "usdt12_cookie"); @@ -104,6 +108,11 @@ static void subtest_basic_usdt(void) ASSERT_EQ(bss->usdt12_args[10], nums[idx], "usdt12_arg11"); ASSERT_EQ(bss->usdt12_args[11], t1.y, "usdt12_arg12"); + int usdt12_expected_arg_sizes[12] = { 4, 4, 8, 8, 4, 8, 8, 8, 4, 2, 2, 1 }; + + for (i = 0; i < 12; i++) + ASSERT_EQ(bss->usdt12_arg_sizes[i], usdt12_expected_arg_sizes[i], "usdt12_arg_size"); + /* trigger_func() is marked __always_inline, so USDT invocations will be * inlined in two different places, meaning that each USDT will have * at least 2 different places to be attached to. This verifies that @@ -261,8 +270,16 @@ static void subtest_multispec_usdt(void) */ trigger_300_usdts(); - /* we'll reuse usdt_100 BPF program for usdt_300 test */ bpf_link__destroy(skel->links.usdt_100); + + bss->usdt_100_called = 0; + bss->usdt_100_sum = 0; + + /* If built with arm64/clang, there will be much less number of specs + * for usdt_300 call sites. + */ +#if !defined(__aarch64__) || !defined(__clang__) + /* we'll reuse usdt_100 BPF program for usdt_300 test */ skel->links.usdt_100 = bpf_program__attach_usdt(skel->progs.usdt_100, -1, "/proc/self/exe", "test", "usdt_300", NULL); err = -errno; @@ -273,13 +290,11 @@ static void subtest_multispec_usdt(void) /* let's check that there are no "dangling" BPF programs attached due * to partial success of the above test:usdt_300 attachment */ - bss->usdt_100_called = 0; - bss->usdt_100_sum = 0; - f300(777); /* this is 301st instance of usdt_300 */ ASSERT_EQ(bss->usdt_100_called, 0, "usdt_301_called"); ASSERT_EQ(bss->usdt_100_sum, 0, "usdt_301_sum"); +#endif /* This time we have USDT with 400 inlined invocations, but arg specs * should be the same across all sites, so libbpf will only need to diff --git a/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c b/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c index d424e7ecbd12..9fd3ae987321 100644 --- a/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c +++ b/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c @@ -21,8 +21,7 @@ #include "../progs/test_user_ringbuf.h" static const long c_sample_size = sizeof(struct sample) + BPF_RINGBUF_HDR_SZ; -static const long c_ringbuf_size = 1 << 12; /* 1 small page */ -static const long c_max_entries = c_ringbuf_size / c_sample_size; +static long c_ringbuf_size, c_max_entries; static void drain_current_samples(void) { @@ -424,7 +423,9 @@ static void test_user_ringbuf_loop(void) uint32_t remaining_samples = total_samples; int err; - BUILD_BUG_ON(total_samples <= c_max_entries); + if (!ASSERT_LT(c_max_entries, total_samples, "compare_c_max_entries")) + return; + err = load_skel_create_user_ringbuf(&skel, &ringbuf); if (err) return; @@ -686,6 +687,9 @@ void test_user_ringbuf(void) { int i; + c_ringbuf_size = getpagesize(); /* 1 page */ + c_max_entries = c_ringbuf_size / c_sample_size; + for (i = 0; i < ARRAY_SIZE(success_tests); i++) { if (!test__start_subtest(success_tests[i].test_name)) continue; diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 3ee40ee9413a..77ec95d4ffaa 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -14,6 +14,7 @@ #include "verifier_bounds_deduction_non_const.skel.h" #include "verifier_bounds_mix_sign_unsign.skel.h" #include "verifier_bpf_get_stack.skel.h" +#include "verifier_bpf_trap.skel.h" #include "verifier_bswap.skel.h" #include "verifier_btf_ctx_access.skel.h" #include "verifier_btf_unreliable_prog.skel.h" @@ -45,6 +46,7 @@ #include "verifier_ldsx.skel.h" #include "verifier_leak_ptr.skel.h" #include "verifier_linked_scalars.skel.h" +#include "verifier_load_acquire.skel.h" #include "verifier_loops1.skel.h" #include "verifier_lwt.skel.h" #include "verifier_map_in_map.skel.h" @@ -52,6 +54,8 @@ #include "verifier_map_ptr_mixing.skel.h" #include "verifier_map_ret_val.skel.h" #include "verifier_masking.skel.h" +#include "verifier_may_goto_1.skel.h" +#include "verifier_may_goto_2.skel.h" #include "verifier_meta_access.skel.h" #include "verifier_movsx.skel.h" #include "verifier_mtu.skel.h" @@ -78,8 +82,10 @@ #include "verifier_spill_fill.skel.h" #include "verifier_spin_lock.skel.h" #include "verifier_stack_ptr.skel.h" +#include "verifier_store_release.skel.h" #include "verifier_subprog_precision.skel.h" #include "verifier_subreg.skel.h" +#include "verifier_tailcall.skel.h" #include "verifier_tailcall_jit.skel.h" #include "verifier_typedef.skel.h" #include "verifier_uninit.skel.h" @@ -98,6 +104,7 @@ #include "verifier_xdp_direct_packet_access.skel.h" #include "verifier_bits_iter.skel.h" #include "verifier_lsm.skel.h" +#include "irq.skel.h" #define MAX_ENTRIES 11 @@ -118,7 +125,7 @@ static void run_tests_aux(const char *skel_name, /* test_verifier tests are executed w/o CAP_SYS_ADMIN, do the same here */ err = cap_disable_effective(1ULL << CAP_SYS_ADMIN, &old_caps); if (err) { - PRINT_FAIL("failed to drop CAP_SYS_ADMIN: %i, %s\n", err, strerror(err)); + PRINT_FAIL("failed to drop CAP_SYS_ADMIN: %i, %s\n", err, strerror(-err)); return; } @@ -128,7 +135,7 @@ static void run_tests_aux(const char *skel_name, err = cap_enable_effective(old_caps, NULL); if (err) - PRINT_FAIL("failed to restore CAP_SYS_ADMIN: %i, %s\n", err, strerror(err)); + PRINT_FAIL("failed to restore CAP_SYS_ADMIN: %i, %s\n", err, strerror(-err)); } #define RUN(skel) run_tests_aux(#skel, skel##__elf_bytes, NULL) @@ -143,6 +150,7 @@ void test_verifier_bounds_deduction(void) { RUN(verifier_bounds_deduction); void test_verifier_bounds_deduction_non_const(void) { RUN(verifier_bounds_deduction_non_const); } void test_verifier_bounds_mix_sign_unsign(void) { RUN(verifier_bounds_mix_sign_unsign); } void test_verifier_bpf_get_stack(void) { RUN(verifier_bpf_get_stack); } +void test_verifier_bpf_trap(void) { RUN(verifier_bpf_trap); } void test_verifier_bswap(void) { RUN(verifier_bswap); } void test_verifier_btf_ctx_access(void) { RUN(verifier_btf_ctx_access); } void test_verifier_btf_unreliable_prog(void) { RUN(verifier_btf_unreliable_prog); } @@ -170,6 +178,7 @@ void test_verifier_int_ptr(void) { RUN(verifier_int_ptr); } void test_verifier_iterating_callbacks(void) { RUN(verifier_iterating_callbacks); } void test_verifier_jeq_infer_not_null(void) { RUN(verifier_jeq_infer_not_null); } void test_verifier_jit_convergence(void) { RUN(verifier_jit_convergence); } +void test_verifier_load_acquire(void) { RUN(verifier_load_acquire); } void test_verifier_ld_ind(void) { RUN(verifier_ld_ind); } void test_verifier_ldsx(void) { RUN(verifier_ldsx); } void test_verifier_leak_ptr(void) { RUN(verifier_leak_ptr); } @@ -181,6 +190,8 @@ void test_verifier_map_ptr(void) { RUN(verifier_map_ptr); } void test_verifier_map_ptr_mixing(void) { RUN(verifier_map_ptr_mixing); } void test_verifier_map_ret_val(void) { RUN(verifier_map_ret_val); } void test_verifier_masking(void) { RUN(verifier_masking); } +void test_verifier_may_goto_1(void) { RUN(verifier_may_goto_1); } +void test_verifier_may_goto_2(void) { RUN(verifier_may_goto_2); } void test_verifier_meta_access(void) { RUN(verifier_meta_access); } void test_verifier_movsx(void) { RUN(verifier_movsx); } void test_verifier_netfilter_ctx(void) { RUN(verifier_netfilter_ctx); } @@ -206,8 +217,10 @@ void test_verifier_sockmap_mutate(void) { RUN(verifier_sockmap_mutate); } void test_verifier_spill_fill(void) { RUN(verifier_spill_fill); } void test_verifier_spin_lock(void) { RUN(verifier_spin_lock); } void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); } +void test_verifier_store_release(void) { RUN(verifier_store_release); } void test_verifier_subprog_precision(void) { RUN(verifier_subprog_precision); } void test_verifier_subreg(void) { RUN(verifier_subreg); } +void test_verifier_tailcall(void) { RUN(verifier_tailcall); } void test_verifier_tailcall_jit(void) { RUN(verifier_tailcall_jit); } void test_verifier_typedef(void) { RUN(verifier_typedef); } void test_verifier_uninit(void) { RUN(verifier_uninit); } @@ -225,6 +238,7 @@ void test_verifier_xdp(void) { RUN(verifier_xdp); } void test_verifier_xdp_direct_packet_access(void) { RUN(verifier_xdp_direct_packet_access); } void test_verifier_bits_iter(void) { RUN(verifier_bits_iter); } void test_verifier_lsm(void) { RUN(verifier_lsm); } +void test_irq(void) { RUN(irq); } void test_verifier_mtu(void) { RUN(verifier_mtu); } static int init_test_val_map(struct bpf_object *obj, char *map_name) diff --git a/tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c b/tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c index ab0f02faa80c..4d69d9d55e17 100644 --- a/tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c +++ b/tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c @@ -268,7 +268,7 @@ static void test_verify_pkcs7_sig_from_map(void) char *tmp_dir; struct test_verify_pkcs7_sig *skel = NULL; struct bpf_map *map; - struct data data; + struct data data = {}; int ret, zero = 0; /* Trigger creation of session keyring. */ diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c index 53d6ad8c2257..43264347e7d7 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c @@ -37,21 +37,26 @@ static void test_xdp_adjust_tail_shrink(void) bpf_object__close(obj); } -static void test_xdp_adjust_tail_grow(void) +static void test_xdp_adjust_tail_grow(bool is_64k_pagesize) { const char *file = "./test_xdp_adjust_tail_grow.bpf.o"; struct bpf_object *obj; - char buf[4096]; /* avoid segfault: large buf to hold grow results */ + char buf[8192]; /* avoid segfault: large buf to hold grow results */ __u32 expect_sz; int err, prog_fd; LIBBPF_OPTS(bpf_test_run_opts, topts, .data_in = &pkt_v4, - .data_size_in = sizeof(pkt_v4), .data_out = buf, .data_size_out = sizeof(buf), .repeat = 1, ); + /* topts.data_size_in as a special signal to bpf prog */ + if (is_64k_pagesize) + topts.data_size_in = sizeof(pkt_v4) - 1; + else + topts.data_size_in = sizeof(pkt_v4); + err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); if (!ASSERT_OK(err, "test_xdp_adjust_tail_grow")) return; @@ -82,6 +87,8 @@ static void test_xdp_adjust_tail_grow2(void) /* SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) */ #if defined(__s390x__) int tailroom = 512; +#elif defined(__powerpc__) + int tailroom = 384; #else int tailroom = 320; #endif @@ -206,7 +213,7 @@ out: bpf_object__close(obj); } -static void test_xdp_adjust_frags_tail_grow(void) +static void test_xdp_adjust_frags_tail_grow_4k(void) { const char *file = "./test_xdp_adjust_tail_grow.bpf.o"; __u32 exp_size; @@ -244,14 +251,20 @@ static void test_xdp_adjust_frags_tail_grow(void) ASSERT_EQ(topts.retval, XDP_TX, "9Kb+10b retval"); ASSERT_EQ(topts.data_size_out, exp_size, "9Kb+10b size"); - for (i = 0; i < 9000; i++) - ASSERT_EQ(buf[i], 1, "9Kb+10b-old"); + for (i = 0; i < 9000; i++) { + if (buf[i] != 1) + ASSERT_EQ(buf[i], 1, "9Kb+10b-old"); + } - for (i = 9000; i < 9010; i++) - ASSERT_EQ(buf[i], 0, "9Kb+10b-new"); + for (i = 9000; i < 9010; i++) { + if (buf[i] != 0) + ASSERT_EQ(buf[i], 0, "9Kb+10b-new"); + } - for (i = 9010; i < 16384; i++) - ASSERT_EQ(buf[i], 1, "9Kb+10b-untouched"); + for (i = 9010; i < 16384; i++) { + if (buf[i] != 1) + ASSERT_EQ(buf[i], 1, "9Kb+10b-untouched"); + } /* Test a too large grow */ memset(buf, 1, 16384); @@ -271,16 +284,93 @@ out: bpf_object__close(obj); } +static void test_xdp_adjust_frags_tail_grow_64k(void) +{ + const char *file = "./test_xdp_adjust_tail_grow.bpf.o"; + __u32 exp_size; + struct bpf_program *prog; + struct bpf_object *obj; + int err, i, prog_fd; + __u8 *buf; + LIBBPF_OPTS(bpf_test_run_opts, topts); + + obj = bpf_object__open(file); + if (libbpf_get_error(obj)) + return; + + prog = bpf_object__next_program(obj, NULL); + if (bpf_object__load(obj)) + goto out; + + prog_fd = bpf_program__fd(prog); + + buf = malloc(262144); + if (!ASSERT_OK_PTR(buf, "alloc buf 256Kb")) + goto out; + + /* Test case add 10 bytes to last frag */ + memset(buf, 1, 262144); + exp_size = 90000 + 10; + + topts.data_in = buf; + topts.data_out = buf; + topts.data_size_in = 90000; + topts.data_size_out = 262144; + err = bpf_prog_test_run_opts(prog_fd, &topts); + + ASSERT_OK(err, "90Kb+10b"); + ASSERT_EQ(topts.retval, XDP_TX, "90Kb+10b retval"); + ASSERT_EQ(topts.data_size_out, exp_size, "90Kb+10b size"); + + for (i = 0; i < 90000; i++) { + if (buf[i] != 1) + ASSERT_EQ(buf[i], 1, "90Kb+10b-old"); + } + + for (i = 90000; i < 90010; i++) { + if (buf[i] != 0) + ASSERT_EQ(buf[i], 0, "90Kb+10b-new"); + } + + for (i = 90010; i < 262144; i++) { + if (buf[i] != 1) + ASSERT_EQ(buf[i], 1, "90Kb+10b-untouched"); + } + + /* Test a too large grow */ + memset(buf, 1, 262144); + exp_size = 90001; + + topts.data_in = topts.data_out = buf; + topts.data_size_in = 90001; + topts.data_size_out = 262144; + err = bpf_prog_test_run_opts(prog_fd, &topts); + + ASSERT_OK(err, "90Kb+10b"); + ASSERT_EQ(topts.retval, XDP_DROP, "90Kb+10b retval"); + ASSERT_EQ(topts.data_size_out, exp_size, "90Kb+10b size"); + + free(buf); +out: + bpf_object__close(obj); +} + void test_xdp_adjust_tail(void) { + int page_size = getpagesize(); + if (test__start_subtest("xdp_adjust_tail_shrink")) test_xdp_adjust_tail_shrink(); if (test__start_subtest("xdp_adjust_tail_grow")) - test_xdp_adjust_tail_grow(); + test_xdp_adjust_tail_grow(page_size == 65536); if (test__start_subtest("xdp_adjust_tail_grow2")) test_xdp_adjust_tail_grow2(); if (test__start_subtest("xdp_adjust_frags_tail_shrink")) test_xdp_adjust_frags_tail_shrink(); - if (test__start_subtest("xdp_adjust_frags_tail_grow")) - test_xdp_adjust_frags_tail_grow(); + if (test__start_subtest("xdp_adjust_frags_tail_grow")) { + if (page_size == 65536) + test_xdp_adjust_frags_tail_grow_64k(); + else + test_xdp_adjust_frags_tail_grow_4k(); + } } diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c b/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c index 6d8b54124cb3..fb952703653e 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c @@ -17,7 +17,7 @@ #include "network_helpers.h" #include <linux/if_bonding.h> #include <linux/limits.h> -#include <linux/udp.h> +#include <netinet/udp.h> #include <uapi/linux/netdev.h> #include "xdp_dummy.skel.h" diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c b/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c index e6a783c7f5db..b9d9f0a502ce 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c @@ -2,6 +2,22 @@ #include <test_progs.h> #include <network_helpers.h> #include "test_xdp_context_test_run.skel.h" +#include "test_xdp_meta.skel.h" + +#define RX_NAME "veth0" +#define TX_NAME "veth1" +#define TX_NETNS "xdp_context_tx" +#define RX_NETNS "xdp_context_rx" +#define TAP_NAME "tap0" +#define TAP_NETNS "xdp_context_tuntap" + +#define TEST_PAYLOAD_LEN 32 +static const __u8 test_payload[TEST_PAYLOAD_LEN] = { + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, +}; void test_xdp_context_error(int prog_fd, struct bpf_test_run_opts opts, __u32 data_meta, __u32 data, __u32 data_end, @@ -103,3 +119,205 @@ void test_xdp_context_test_run(void) test_xdp_context_test_run__destroy(skel); } + +static int send_test_packet(int ifindex) +{ + int n, sock = -1; + __u8 packet[sizeof(struct ethhdr) + TEST_PAYLOAD_LEN]; + + /* The ethernet header is not relevant for this test and doesn't need to + * be meaningful. + */ + struct ethhdr eth = { 0 }; + + memcpy(packet, ð, sizeof(eth)); + memcpy(packet + sizeof(eth), test_payload, TEST_PAYLOAD_LEN); + + sock = socket(AF_PACKET, SOCK_RAW, IPPROTO_RAW); + if (!ASSERT_GE(sock, 0, "socket")) + goto err; + + struct sockaddr_ll saddr = { + .sll_family = PF_PACKET, + .sll_ifindex = ifindex, + .sll_halen = ETH_ALEN + }; + n = sendto(sock, packet, sizeof(packet), 0, (struct sockaddr *)&saddr, + sizeof(saddr)); + if (!ASSERT_EQ(n, sizeof(packet), "sendto")) + goto err; + + close(sock); + return 0; + +err: + if (sock >= 0) + close(sock); + return -1; +} + +static void assert_test_result(struct test_xdp_meta *skel) +{ + int err; + __u32 map_key = 0; + __u8 map_value[TEST_PAYLOAD_LEN]; + + err = bpf_map__lookup_elem(skel->maps.test_result, &map_key, + sizeof(map_key), &map_value, + TEST_PAYLOAD_LEN, BPF_ANY); + if (!ASSERT_OK(err, "lookup test_result")) + return; + + ASSERT_MEMEQ(&map_value, &test_payload, TEST_PAYLOAD_LEN, + "test_result map contains test payload"); +} + +void test_xdp_context_veth(void) +{ + LIBBPF_OPTS(bpf_tc_hook, tc_hook, .attach_point = BPF_TC_INGRESS); + LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1); + struct netns_obj *rx_ns = NULL, *tx_ns = NULL; + struct bpf_program *tc_prog, *xdp_prog; + struct test_xdp_meta *skel = NULL; + struct nstoken *nstoken = NULL; + int rx_ifindex, tx_ifindex; + int ret; + + tx_ns = netns_new(TX_NETNS, false); + if (!ASSERT_OK_PTR(tx_ns, "create tx_ns")) + return; + + rx_ns = netns_new(RX_NETNS, false); + if (!ASSERT_OK_PTR(rx_ns, "create rx_ns")) + goto close; + + SYS(close, "ip link add " RX_NAME " netns " RX_NETNS + " type veth peer name " TX_NAME " netns " TX_NETNS); + + nstoken = open_netns(RX_NETNS); + if (!ASSERT_OK_PTR(nstoken, "setns rx_ns")) + goto close; + + SYS(close, "ip link set dev " RX_NAME " up"); + + skel = test_xdp_meta__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open and load skeleton")) + goto close; + + rx_ifindex = if_nametoindex(RX_NAME); + if (!ASSERT_GE(rx_ifindex, 0, "if_nametoindex rx")) + goto close; + + tc_hook.ifindex = rx_ifindex; + ret = bpf_tc_hook_create(&tc_hook); + if (!ASSERT_OK(ret, "bpf_tc_hook_create")) + goto close; + + tc_prog = bpf_object__find_program_by_name(skel->obj, "ing_cls"); + if (!ASSERT_OK_PTR(tc_prog, "open ing_cls prog")) + goto close; + + tc_opts.prog_fd = bpf_program__fd(tc_prog); + ret = bpf_tc_attach(&tc_hook, &tc_opts); + if (!ASSERT_OK(ret, "bpf_tc_attach")) + goto close; + + xdp_prog = bpf_object__find_program_by_name(skel->obj, "ing_xdp"); + if (!ASSERT_OK_PTR(xdp_prog, "open ing_xdp prog")) + goto close; + + ret = bpf_xdp_attach(rx_ifindex, + bpf_program__fd(xdp_prog), + 0, NULL); + if (!ASSERT_GE(ret, 0, "bpf_xdp_attach")) + goto close; + + close_netns(nstoken); + + nstoken = open_netns(TX_NETNS); + if (!ASSERT_OK_PTR(nstoken, "setns tx_ns")) + goto close; + + SYS(close, "ip link set dev " TX_NAME " up"); + + tx_ifindex = if_nametoindex(TX_NAME); + if (!ASSERT_GE(tx_ifindex, 0, "if_nametoindex tx")) + goto close; + + ret = send_test_packet(tx_ifindex); + if (!ASSERT_OK(ret, "send_test_packet")) + goto close; + + assert_test_result(skel); + +close: + close_netns(nstoken); + test_xdp_meta__destroy(skel); + netns_free(rx_ns); + netns_free(tx_ns); +} + +void test_xdp_context_tuntap(void) +{ + LIBBPF_OPTS(bpf_tc_hook, tc_hook, .attach_point = BPF_TC_INGRESS); + LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1); + struct netns_obj *ns = NULL; + struct test_xdp_meta *skel = NULL; + __u8 packet[sizeof(struct ethhdr) + TEST_PAYLOAD_LEN]; + int tap_fd = -1; + int tap_ifindex; + int ret; + + ns = netns_new(TAP_NETNS, true); + if (!ASSERT_OK_PTR(ns, "create and open ns")) + return; + + tap_fd = open_tuntap(TAP_NAME, true); + if (!ASSERT_GE(tap_fd, 0, "open_tuntap")) + goto close; + + SYS(close, "ip link set dev " TAP_NAME " up"); + + skel = test_xdp_meta__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open and load skeleton")) + goto close; + + tap_ifindex = if_nametoindex(TAP_NAME); + if (!ASSERT_GE(tap_ifindex, 0, "if_nametoindex")) + goto close; + + tc_hook.ifindex = tap_ifindex; + ret = bpf_tc_hook_create(&tc_hook); + if (!ASSERT_OK(ret, "bpf_tc_hook_create")) + goto close; + + tc_opts.prog_fd = bpf_program__fd(skel->progs.ing_cls); + ret = bpf_tc_attach(&tc_hook, &tc_opts); + if (!ASSERT_OK(ret, "bpf_tc_attach")) + goto close; + + ret = bpf_xdp_attach(tap_ifindex, bpf_program__fd(skel->progs.ing_xdp), + 0, NULL); + if (!ASSERT_GE(ret, 0, "bpf_xdp_attach")) + goto close; + + /* The ethernet header is not relevant for this test and doesn't need to + * be meaningful. + */ + struct ethhdr eth = { 0 }; + + memcpy(packet, ð, sizeof(eth)); + memcpy(packet + sizeof(eth), test_payload, TEST_PAYLOAD_LEN); + + ret = write(tap_fd, packet, sizeof(packet)); + if (!ASSERT_EQ(ret, sizeof(packet), "write packet")) + goto close; + + assert_test_result(skel); + +close: + if (tap_fd >= 0) + close(tap_fd); + test_xdp_meta__destroy(skel); + netns_free(ns); +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c index c7f74f068e78..df27535995af 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c @@ -52,10 +52,10 @@ static void test_xdp_with_cpumap_helpers(void) ASSERT_EQ(info.id, val.bpf_prog.id, "Match program id to cpumap entry prog_id"); /* send a packet to trigger any potential bugs in there */ - char data[10] = {}; + char data[ETH_HLEN] = {}; DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts, .data_in = &data, - .data_size_in = 10, + .data_size_in = sizeof(data), .flags = BPF_F_TEST_XDP_LIVE_FRAMES, .repeat = 1, ); diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c index 27ffed17d4be..461ab18705d5 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c @@ -23,7 +23,7 @@ static void test_xdp_with_devmap_helpers(void) __u32 len = sizeof(info); int err, dm_fd, dm_fd_redir, map_fd; struct nstoken *nstoken = NULL; - char data[10] = {}; + char data[ETH_HLEN] = {}; __u32 idx = 0; SYS(out_close, "ip netns add %s", TEST_NS); @@ -58,7 +58,7 @@ static void test_xdp_with_devmap_helpers(void) /* send a packet to trigger any potential bugs in there */ DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts, .data_in = &data, - .data_size_in = 10, + .data_size_in = sizeof(data), .flags = BPF_F_TEST_XDP_LIVE_FRAMES, .repeat = 1, ); @@ -158,7 +158,7 @@ static void test_xdp_with_devmap_helpers_veth(void) struct nstoken *nstoken = NULL; __u32 len = sizeof(info); int err, dm_fd, dm_fd_redir, map_fd, ifindex_dst; - char data[10] = {}; + char data[ETH_HLEN] = {}; __u32 idx = 0; SYS(out_close, "ip netns add %s", TEST_NS); @@ -208,7 +208,7 @@ static void test_xdp_with_devmap_helpers_veth(void) /* send a packet to trigger any potential bugs in there */ DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts, .data_in = &data, - .data_size_in = 10, + .data_size_in = sizeof(data), .flags = BPF_F_TEST_XDP_LIVE_FRAMES, .repeat = 1, ); diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c index bad0ea167be7..dd34b0cc4b4e 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c @@ -7,10 +7,11 @@ #include <linux/if_link.h> #include <linux/ipv6.h> #include <linux/in6.h> -#include <linux/udp.h> +#include <netinet/udp.h> #include <bpf/bpf_endian.h> #include <uapi/linux/netdev.h> #include "test_xdp_do_redirect.skel.h" +#include "xdp_dummy.skel.h" struct udp_packet { struct ethhdr eth; @@ -65,16 +66,25 @@ static int attach_tc_prog(struct bpf_tc_hook *hook, int fd) #else #define MAX_PKT_SIZE 3408 #endif + +#define PAGE_SIZE_4K 4096 +#define PAGE_SIZE_64K 65536 + static void test_max_pkt_size(int fd) { - char data[MAX_PKT_SIZE + 1] = {}; + char data[PAGE_SIZE_64K + 1] = {}; int err; DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts, .data_in = &data, - .data_size_in = MAX_PKT_SIZE, .flags = BPF_F_TEST_XDP_LIVE_FRAMES, .repeat = 1, ); + + if (getpagesize() == PAGE_SIZE_64K) + opts.data_size_in = MAX_PKT_SIZE + PAGE_SIZE_64K - PAGE_SIZE_4K; + else + opts.data_size_in = MAX_PKT_SIZE; + err = bpf_prog_test_run_opts(fd, &opts); ASSERT_OK(err, "prog_run_max_size"); @@ -246,3 +256,166 @@ out: SYS_NOFAIL("ip netns del testns"); test_xdp_do_redirect__destroy(skel); } + +#define NS_NB 3 +#define NS0 "NS0" +#define NS1 "NS1" +#define NS2 "NS2" +#define IPV4_NETWORK "10.1.1" +#define VETH1_INDEX 111 +#define VETH2_INDEX 222 + +struct test_data { + struct netns_obj *ns[NS_NB]; + u32 xdp_flags; +}; + +static void cleanup(struct test_data *data) +{ + int i; + + for (i = 0; i < NS_NB; i++) + netns_free(data->ns[i]); +} + +/** + * ping_setup - + * Create two veth peers and forward packets in-between using XDP + * + * ------------ ------------ + * | NS1 | | NS2 | + * | veth0 | | veth0 | + * | 10.1.1.1 | | 10.1.1.2 | + * -----|------ ------|----- + * | | + * | | + * -----|-----------------------|------- + * | veth1 veth2 | + * | (id:111) (id:222) | + * | | | | + * | ----- xdp forwarding ----- | + * | | + * | NS0 | + * ------------------------------------- + */ +static int ping_setup(struct test_data *data) +{ + int i; + + data->ns[0] = netns_new(NS0, false); + if (!ASSERT_OK_PTR(data->ns[0], "create ns")) + return -1; + + for (i = 1; i < NS_NB; i++) { + char ns_name[4] = {}; + + snprintf(ns_name, 4, "NS%d", i); + data->ns[i] = netns_new(ns_name, false); + if (!ASSERT_OK_PTR(data->ns[i], "create ns")) + goto fail; + + SYS(fail, + "ip -n %s link add veth%d index %d%d%d type veth peer name veth0 netns %s", + NS0, i, i, i, i, ns_name); + SYS(fail, "ip -n %s link set veth%d up", NS0, i); + + SYS(fail, "ip -n %s addr add %s.%d/24 dev veth0", ns_name, IPV4_NETWORK, i); + SYS(fail, "ip -n %s link set veth0 up", ns_name); + } + + return 0; + +fail: + cleanup(data); + return -1; +} + +static void ping_test(struct test_data *data) +{ + struct test_xdp_do_redirect *skel = NULL; + struct xdp_dummy *skel_dummy = NULL; + struct nstoken *nstoken = NULL; + int i, ret; + + skel_dummy = xdp_dummy__open_and_load(); + if (!ASSERT_OK_PTR(skel_dummy, "open and load xdp_dummy skeleton")) + goto close; + + for (i = 1; i < NS_NB; i++) { + char ns_name[4] = {}; + + snprintf(ns_name, 4, "NS%d", i); + nstoken = open_netns(ns_name); + if (!ASSERT_OK_PTR(nstoken, "open ns")) + goto close; + + ret = bpf_xdp_attach(if_nametoindex("veth0"), + bpf_program__fd(skel_dummy->progs.xdp_dummy_prog), + data->xdp_flags, NULL); + if (!ASSERT_GE(ret, 0, "bpf_xdp_attach dummy_prog")) + goto close; + + close_netns(nstoken); + nstoken = NULL; + } + + skel = test_xdp_do_redirect__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open and load skeleton")) + goto close; + + nstoken = open_netns(NS0); + if (!ASSERT_OK_PTR(nstoken, "open NS0")) + goto close; + + ret = bpf_xdp_attach(VETH2_INDEX, + bpf_program__fd(skel->progs.xdp_redirect_to_111), + data->xdp_flags, NULL); + if (!ASSERT_GE(ret, 0, "bpf_xdp_attach")) + goto close; + + ret = bpf_xdp_attach(VETH1_INDEX, + bpf_program__fd(skel->progs.xdp_redirect_to_222), + data->xdp_flags, NULL); + if (!ASSERT_GE(ret, 0, "bpf_xdp_attach")) + goto close; + + close_netns(nstoken); + nstoken = NULL; + + nstoken = open_netns(NS1); + if (!ASSERT_OK_PTR(nstoken, "open NS1")) + goto close; + + SYS(close, "ping -c 1 %s.2 > /dev/null", IPV4_NETWORK); + +close: + close_netns(nstoken); + xdp_dummy__destroy(skel_dummy); + test_xdp_do_redirect__destroy(skel); +} + + +static void xdp_redirect_ping(u32 xdp_flags) +{ + struct test_data data = {}; + + if (ping_setup(&data) < 0) + return; + + data.xdp_flags = xdp_flags; + ping_test(&data); + cleanup(&data); +} + +void test_xdp_index_redirect(void) +{ + if (test__start_subtest("noflag")) + xdp_redirect_ping(0); + + if (test__start_subtest("drvflag")) + xdp_redirect_ping(XDP_FLAGS_DRV_MODE); + + if (test__start_subtest("skbflag")) + xdp_redirect_ping(XDP_FLAGS_SKB_MODE); +} + diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_flowtable.c b/tools/testing/selftests/bpf/prog_tests/xdp_flowtable.c index e1bf141d3401..3f9146d83d79 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_flowtable.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_flowtable.c @@ -3,7 +3,7 @@ #include <network_helpers.h> #include <bpf/btf.h> #include <linux/if_link.h> -#include <linux/udp.h> +#include <netinet/udp.h> #include <net/if.h> #include <unistd.h> diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_metadata.c b/tools/testing/selftests/bpf/prog_tests/xdp_metadata.c index c87ee2bf558c..19f92affc2da 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_metadata.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_metadata.c @@ -10,7 +10,7 @@ #include <linux/errqueue.h> #include <linux/if_link.h> #include <linux/net_tstamp.h> -#include <linux/udp.h> +#include <netinet/udp.h> #include <sys/mman.h> #include <net/if.h> #include <poll.h> @@ -133,23 +133,6 @@ static void close_xsk(struct xsk *xsk) munmap(xsk->umem_area, UMEM_SIZE); } -static void ip_csum(struct iphdr *iph) -{ - __u32 sum = 0; - __u16 *p; - int i; - - iph->check = 0; - p = (void *)iph; - for (i = 0; i < sizeof(*iph) / sizeof(*p); i++) - sum += p[i]; - - while (sum >> 16) - sum = (sum & 0xffff) + (sum >> 16); - - iph->check = ~sum; -} - static int generate_packet(struct xsk *xsk, __u16 dst_port) { struct xsk_tx_metadata *meta; @@ -192,7 +175,7 @@ static int generate_packet(struct xsk *xsk, __u16 dst_port) iph->protocol = IPPROTO_UDP; ASSERT_EQ(inet_pton(FAMILY, TX_ADDR, &iph->saddr), 1, "inet_pton(TX_ADDR)"); ASSERT_EQ(inet_pton(FAMILY, RX_ADDR, &iph->daddr), 1, "inet_pton(RX_ADDR)"); - ip_csum(iph); + iph->check = build_ip_csum(iph); udph->source = htons(UDP_SOURCE_PORT); udph->dest = htons(dst_port); @@ -368,9 +351,10 @@ void test_xdp_metadata(void) struct xdp_metadata2 *bpf_obj2 = NULL; struct xdp_metadata *bpf_obj = NULL; struct bpf_program *new_prog, *prog; + struct bpf_devmap_val devmap_e = {}; + struct bpf_map *prog_arr, *devmap; struct nstoken *tok = NULL; __u32 queue_id = QUEUE_ID; - struct bpf_map *prog_arr; struct xsk tx_xsk = {}; struct xsk rx_xsk = {}; __u32 val, key = 0; @@ -426,6 +410,13 @@ void test_xdp_metadata(void) bpf_program__set_ifindex(prog, rx_ifindex); bpf_program__set_flags(prog, BPF_F_XDP_DEV_BOUND_ONLY); + /* Make sure we can load a dev-bound program that performs + * XDP_REDIRECT into a devmap. + */ + new_prog = bpf_object__find_program_by_name(bpf_obj->obj, "redirect"); + bpf_program__set_ifindex(new_prog, rx_ifindex); + bpf_program__set_flags(new_prog, BPF_F_XDP_DEV_BOUND_ONLY); + if (!ASSERT_OK(xdp_metadata__load(bpf_obj), "load skeleton")) goto out; @@ -440,6 +431,18 @@ void test_xdp_metadata(void) "update prog_arr")) goto out; + /* Make sure we can't add dev-bound programs to devmaps. */ + devmap = bpf_object__find_map_by_name(bpf_obj->obj, "dev_map"); + if (!ASSERT_OK_PTR(devmap, "no dev_map found")) + goto out; + + devmap_e.bpf_prog.fd = val; + if (!ASSERT_ERR(bpf_map__update_elem(devmap, &key, sizeof(key), + &devmap_e, sizeof(devmap_e), + BPF_ANY), + "update dev_map")) + goto out; + /* Attach BPF program to RX interface. */ ret = bpf_xdp_attach(rx_ifindex, diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_vlan.c b/tools/testing/selftests/bpf/prog_tests/xdp_vlan.c new file mode 100644 index 000000000000..18dd25344de7 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/xdp_vlan.c @@ -0,0 +1,175 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Network topology: + * ----------- ----------- + * | NS1 | | NS2 | + * | veth0 -|--------|- veth0 | + * ----------- ----------- + * + */ + +#define _GNU_SOURCE +#include <net/if.h> +#include <uapi/linux/if_link.h> + +#include "network_helpers.h" +#include "test_progs.h" +#include "test_xdp_vlan.skel.h" + + +#define VETH_NAME "veth0" +#define NS_MAX_SIZE 32 +#define NS1_NAME "ns-xdp-vlan-1-" +#define NS2_NAME "ns-xdp-vlan-2-" +#define NS1_IP_ADDR "100.64.10.1" +#define NS2_IP_ADDR "100.64.10.2" +#define VLAN_ID 4011 + +static int setup_network(char *ns1, char *ns2) +{ + if (!ASSERT_OK(append_tid(ns1, NS_MAX_SIZE), "create ns1 name")) + goto fail; + if (!ASSERT_OK(append_tid(ns2, NS_MAX_SIZE), "create ns2 name")) + goto fail; + + SYS(fail, "ip netns add %s", ns1); + SYS(fail, "ip netns add %s", ns2); + SYS(fail, "ip -n %s link add %s type veth peer name %s netns %s", + ns1, VETH_NAME, VETH_NAME, ns2); + + /* NOTICE: XDP require VLAN header inside packet payload + * - Thus, disable VLAN offloading driver features + */ + SYS(fail, "ip netns exec %s ethtool -K %s rxvlan off txvlan off", ns1, VETH_NAME); + SYS(fail, "ip netns exec %s ethtool -K %s rxvlan off txvlan off", ns2, VETH_NAME); + + /* NS1 configuration */ + SYS(fail, "ip -n %s addr add %s/24 dev %s", ns1, NS1_IP_ADDR, VETH_NAME); + SYS(fail, "ip -n %s link set %s up", ns1, VETH_NAME); + + /* NS2 configuration */ + SYS(fail, "ip -n %s link add link %s name %s.%d type vlan id %d", + ns2, VETH_NAME, VETH_NAME, VLAN_ID, VLAN_ID); + SYS(fail, "ip -n %s addr add %s/24 dev %s.%d", ns2, NS2_IP_ADDR, VETH_NAME, VLAN_ID); + SYS(fail, "ip -n %s link set %s up", ns2, VETH_NAME); + SYS(fail, "ip -n %s link set %s.%d up", ns2, VETH_NAME, VLAN_ID); + + /* At this point ping should fail because VLAN tags are only used by NS2 */ + return !SYS_NOFAIL("ip netns exec %s ping -W 1 -c1 %s", ns2, NS1_IP_ADDR); + +fail: + return -1; +} + +static void cleanup_network(const char *ns1, const char *ns2) +{ + SYS_NOFAIL("ip netns del %s", ns1); + SYS_NOFAIL("ip netns del %s", ns2); +} + +static void xdp_vlan(struct bpf_program *xdp, struct bpf_program *tc, u32 flags) +{ + LIBBPF_OPTS(bpf_tc_hook, tc_hook, .attach_point = BPF_TC_EGRESS); + LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1); + char ns1[NS_MAX_SIZE] = NS1_NAME; + char ns2[NS_MAX_SIZE] = NS2_NAME; + struct nstoken *nstoken = NULL; + int interface; + int ret; + + if (!ASSERT_OK(setup_network(ns1, ns2), "setup network")) + goto cleanup; + + nstoken = open_netns(ns1); + if (!ASSERT_OK_PTR(nstoken, "open NS1")) + goto cleanup; + + interface = if_nametoindex(VETH_NAME); + if (!ASSERT_NEQ(interface, 0, "get interface index")) + goto cleanup; + + ret = bpf_xdp_attach(interface, bpf_program__fd(xdp), flags, NULL); + if (!ASSERT_OK(ret, "attach xdp_vlan_change")) + goto cleanup; + + tc_hook.ifindex = interface; + ret = bpf_tc_hook_create(&tc_hook); + if (!ASSERT_OK(ret, "bpf_tc_hook_create")) + goto detach_xdp; + + /* Now we'll use BPF programs to pop/push the VLAN tags */ + tc_opts.prog_fd = bpf_program__fd(tc); + ret = bpf_tc_attach(&tc_hook, &tc_opts); + if (!ASSERT_OK(ret, "bpf_tc_attach")) + goto detach_xdp; + + close_netns(nstoken); + nstoken = NULL; + + /* Now the namespaces can reach each-other, test with pings */ + SYS(detach_tc, "ip netns exec %s ping -i 0.2 -W 2 -c 2 %s > /dev/null", ns1, NS2_IP_ADDR); + SYS(detach_tc, "ip netns exec %s ping -i 0.2 -W 2 -c 2 %s > /dev/null", ns2, NS1_IP_ADDR); + + +detach_tc: + bpf_tc_detach(&tc_hook, &tc_opts); +detach_xdp: + bpf_xdp_detach(interface, flags, NULL); +cleanup: + close_netns(nstoken); + cleanup_network(ns1, ns2); +} + +/* First test: Remove VLAN by setting VLAN ID 0, using "xdp_vlan_change" + * egress use TC to add back VLAN tag 4011 + */ +void test_xdp_vlan_change(void) +{ + struct test_xdp_vlan *skel; + + skel = test_xdp_vlan__open_and_load(); + if (!ASSERT_OK_PTR(skel, "xdp_vlan__open_and_load")) + return; + + if (test__start_subtest("0")) + xdp_vlan(skel->progs.xdp_vlan_change, skel->progs.tc_vlan_push, 0); + + if (test__start_subtest("DRV_MODE")) + xdp_vlan(skel->progs.xdp_vlan_change, skel->progs.tc_vlan_push, + XDP_FLAGS_DRV_MODE); + + if (test__start_subtest("SKB_MODE")) + xdp_vlan(skel->progs.xdp_vlan_change, skel->progs.tc_vlan_push, + XDP_FLAGS_SKB_MODE); + + test_xdp_vlan__destroy(skel); +} + +/* Second test: XDP prog fully remove vlan header + * + * Catch kernel bug for generic-XDP, that doesn't allow us to + * remove a VLAN header, because skb->protocol still contain VLAN + * ETH_P_8021Q indication, and this cause overwriting of our changes. + */ +void test_xdp_vlan_remove(void) +{ + struct test_xdp_vlan *skel; + + skel = test_xdp_vlan__open_and_load(); + if (!ASSERT_OK_PTR(skel, "xdp_vlan__open_and_load")) + return; + + if (test__start_subtest("0")) + xdp_vlan(skel->progs.xdp_vlan_remove_outer2, skel->progs.tc_vlan_push, 0); + + if (test__start_subtest("DRV_MODE")) + xdp_vlan(skel->progs.xdp_vlan_remove_outer2, skel->progs.tc_vlan_push, + XDP_FLAGS_DRV_MODE); + + if (test__start_subtest("SKB_MODE")) + xdp_vlan(skel->progs.xdp_vlan_remove_outer2, skel->progs.tc_vlan_push, + XDP_FLAGS_SKB_MODE); + + test_xdp_vlan__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/progs/arena_atomics.c b/tools/testing/selftests/bpf/progs/arena_atomics.c index 40dd57fca5cc..a52feff98112 100644 --- a/tools/testing/selftests/bpf/progs/arena_atomics.c +++ b/tools/testing/selftests/bpf/progs/arena_atomics.c @@ -6,6 +6,8 @@ #include <stdbool.h> #include <stdatomic.h> #include "bpf_arena_common.h" +#include "../../../include/linux/filter.h" +#include "bpf_misc.h" struct { __uint(type, BPF_MAP_TYPE_ARENA); @@ -19,9 +21,17 @@ struct { } arena SEC(".maps"); #if defined(ENABLE_ATOMICS_TESTS) && defined(__BPF_FEATURE_ADDR_SPACE_CAST) -bool skip_tests __attribute((__section__(".data"))) = false; +bool skip_all_tests __attribute((__section__(".data"))) = false; #else -bool skip_tests = true; +bool skip_all_tests = true; +#endif + +#if defined(ENABLE_ATOMICS_TESTS) && \ + defined(__BPF_FEATURE_ADDR_SPACE_CAST) && \ + (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86)) +bool skip_lacq_srel_tests __attribute((__section__(".data"))) = false; +#else +bool skip_lacq_srel_tests = true; #endif __u32 pid = 0; @@ -274,4 +284,111 @@ int uaf(const void *ctx) return 0; } +#if __clang_major__ >= 18 +__u8 __arena_global load_acquire8_value = 0x12; +__u16 __arena_global load_acquire16_value = 0x1234; +__u32 __arena_global load_acquire32_value = 0x12345678; +__u64 __arena_global load_acquire64_value = 0x1234567890abcdef; + +__u8 __arena_global load_acquire8_result = 0; +__u16 __arena_global load_acquire16_result = 0; +__u32 __arena_global load_acquire32_result = 0; +__u64 __arena_global load_acquire64_result = 0; +#else +/* clang-17 crashes if the .addr_space.1 ELF section has holes. Work around + * this issue by defining the below variables as 64-bit. + */ +__u64 __arena_global load_acquire8_value; +__u64 __arena_global load_acquire16_value; +__u64 __arena_global load_acquire32_value; +__u64 __arena_global load_acquire64_value; + +__u64 __arena_global load_acquire8_result; +__u64 __arena_global load_acquire16_result; +__u64 __arena_global load_acquire32_result; +__u64 __arena_global load_acquire64_result; +#endif + +SEC("raw_tp/sys_enter") +int load_acquire(const void *ctx) +{ +#if defined(ENABLE_ATOMICS_TESTS) && \ + defined(__BPF_FEATURE_ADDR_SPACE_CAST) && \ + (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86)) + +#define LOAD_ACQUIRE_ARENA(SIZEOP, SIZE, SRC, DST) \ + { asm volatile ( \ + "r1 = %[" #SRC "] ll;" \ + "r1 = addr_space_cast(r1, 0x0, 0x1);" \ + ".8byte %[load_acquire_insn];" \ + "r3 = %[" #DST "] ll;" \ + "r3 = addr_space_cast(r3, 0x0, 0x1);" \ + "*(" #SIZE " *)(r3 + 0) = r2;" \ + : \ + : __imm_addr(SRC), \ + __imm_insn(load_acquire_insn, \ + BPF_ATOMIC_OP(BPF_##SIZEOP, BPF_LOAD_ACQ, \ + BPF_REG_2, BPF_REG_1, 0)), \ + __imm_addr(DST) \ + : __clobber_all); } \ + + LOAD_ACQUIRE_ARENA(B, u8, load_acquire8_value, load_acquire8_result) + LOAD_ACQUIRE_ARENA(H, u16, load_acquire16_value, + load_acquire16_result) + LOAD_ACQUIRE_ARENA(W, u32, load_acquire32_value, + load_acquire32_result) + LOAD_ACQUIRE_ARENA(DW, u64, load_acquire64_value, + load_acquire64_result) +#undef LOAD_ACQUIRE_ARENA + +#endif + return 0; +} + +#if __clang_major__ >= 18 +__u8 __arena_global store_release8_result = 0; +__u16 __arena_global store_release16_result = 0; +__u32 __arena_global store_release32_result = 0; +__u64 __arena_global store_release64_result = 0; +#else +/* clang-17 crashes if the .addr_space.1 ELF section has holes. Work around + * this issue by defining the below variables as 64-bit. + */ +__u64 __arena_global store_release8_result; +__u64 __arena_global store_release16_result; +__u64 __arena_global store_release32_result; +__u64 __arena_global store_release64_result; +#endif + +SEC("raw_tp/sys_enter") +int store_release(const void *ctx) +{ +#if defined(ENABLE_ATOMICS_TESTS) && \ + defined(__BPF_FEATURE_ADDR_SPACE_CAST) && \ + (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86)) + +#define STORE_RELEASE_ARENA(SIZEOP, DST, VAL) \ + { asm volatile ( \ + "r1 = " VAL ";" \ + "r2 = %[" #DST "] ll;" \ + "r2 = addr_space_cast(r2, 0x0, 0x1);" \ + ".8byte %[store_release_insn];" \ + : \ + : __imm_addr(DST), \ + __imm_insn(store_release_insn, \ + BPF_ATOMIC_OP(BPF_##SIZEOP, BPF_STORE_REL, \ + BPF_REG_2, BPF_REG_1, 0)) \ + : __clobber_all); } \ + + STORE_RELEASE_ARENA(B, store_release8_result, "0x12") + STORE_RELEASE_ARENA(H, store_release16_result, "0x1234") + STORE_RELEASE_ARENA(W, store_release32_result, "0x12345678") + STORE_RELEASE_ARENA(DW, store_release64_result, + "0x1234567890abcdef ll") +#undef STORE_RELEASE_ARENA + +#endif + return 0; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/arena_spin_lock.c b/tools/testing/selftests/bpf/progs/arena_spin_lock.c new file mode 100644 index 000000000000..c4500c37f85e --- /dev/null +++ b/tools/testing/selftests/bpf/progs/arena_spin_lock.c @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" +#include "bpf_arena_spin_lock.h" + +struct { + __uint(type, BPF_MAP_TYPE_ARENA); + __uint(map_flags, BPF_F_MMAPABLE); + __uint(max_entries, 100); /* number of pages */ +#ifdef __TARGET_ARCH_arm64 + __ulong(map_extra, 0x1ull << 32); /* start of mmap() region */ +#else + __ulong(map_extra, 0x1ull << 44); /* start of mmap() region */ +#endif +} arena SEC(".maps"); + +int cs_count; + +#if defined(ENABLE_ATOMICS_TESTS) && defined(__BPF_FEATURE_ADDR_SPACE_CAST) +arena_spinlock_t __arena lock; +int test_skip = 1; +#else +int test_skip = 2; +#endif + +int counter; +int limit; + +SEC("tc") +int prog(void *ctx) +{ + int ret = -2; + +#if defined(ENABLE_ATOMICS_TESTS) && defined(__BPF_FEATURE_ADDR_SPACE_CAST) + unsigned long flags; + + if ((ret = arena_spin_lock_irqsave(&lock, flags))) + return ret; + if (counter != limit) + counter++; + bpf_repeat(cs_count); + ret = 0; + arena_spin_unlock_irqrestore(&lock, flags); +#endif + return ret; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/bad_struct_ops.c b/tools/testing/selftests/bpf/progs/bad_struct_ops.c index b7e175cd0af0..b3f77b4561c8 100644 --- a/tools/testing/selftests/bpf/progs/bad_struct_ops.c +++ b/tools/testing/selftests/bpf/progs/bad_struct_ops.c @@ -3,7 +3,7 @@ #include <vmlinux.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> -#include "../bpf_testmod/bpf_testmod.h" +#include "../test_kmods/bpf_testmod.h" char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/bench_sockmap_prog.c b/tools/testing/selftests/bpf/progs/bench_sockmap_prog.c new file mode 100644 index 000000000000..079bf3794b3a --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bench_sockmap_prog.c @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> + +long process_byte = 0; +int verdict_dir = 0; +int dropped = 0; +int pkt_size = 0; +struct { + __uint(type, BPF_MAP_TYPE_SOCKMAP); + __uint(max_entries, 20); + __type(key, int); + __type(value, int); +} sock_map_rx SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_SOCKMAP); + __uint(max_entries, 20); + __type(key, int); + __type(value, int); +} sock_map_tx SEC(".maps"); + +SEC("sk_skb/stream_parser") +int prog_skb_parser(struct __sk_buff *skb) +{ + return pkt_size; +} + +SEC("sk_skb/stream_verdict") +int prog_skb_verdict(struct __sk_buff *skb) +{ + int one = 1; + int ret = bpf_sk_redirect_map(skb, &sock_map_rx, one, verdict_dir); + + if (ret == SK_DROP) + dropped++; + __sync_fetch_and_add(&process_byte, skb->len); + return ret; +} + +SEC("sk_skb/stream_verdict") +int prog_skb_pass(struct __sk_buff *skb) +{ + __sync_fetch_and_add(&process_byte, skb->len); + return SK_PASS; +} + +SEC("sk_msg") +int prog_skmsg_verdict(struct sk_msg_md *msg) +{ + int one = 1; + + __sync_fetch_and_add(&process_byte, msg->size); + return bpf_msg_redirect_map(msg, &sock_map_tx, one, verdict_dir); +} + +SEC("sk_msg") +int prog_skmsg_pass(struct sk_msg_md *msg) +{ + __sync_fetch_and_add(&process_byte, msg->size); + return SK_PASS; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h b/tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h new file mode 100644 index 000000000000..d67466c1ff77 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h @@ -0,0 +1,542 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ +#ifndef BPF_ARENA_SPIN_LOCK_H +#define BPF_ARENA_SPIN_LOCK_H + +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include "bpf_atomic.h" + +#define arch_mcs_spin_lock_contended_label(l, label) smp_cond_load_acquire_label(l, VAL, label) +#define arch_mcs_spin_unlock_contended(l) smp_store_release((l), 1) + +#if defined(ENABLE_ATOMICS_TESTS) && defined(__BPF_FEATURE_ADDR_SPACE_CAST) + +#define EBUSY 16 +#define EOPNOTSUPP 95 +#define ETIMEDOUT 110 + +#ifndef __arena +#define __arena __attribute__((address_space(1))) +#endif + +extern unsigned long CONFIG_NR_CPUS __kconfig; + +/* + * Typically, we'd just rely on the definition in vmlinux.h for qspinlock, but + * PowerPC overrides the definition to define lock->val as u32 instead of + * atomic_t, leading to compilation errors. Import a local definition below so + * that we don't depend on the vmlinux.h version. + */ + +struct __qspinlock { + union { + atomic_t val; +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + struct { + u8 locked; + u8 pending; + }; + struct { + u16 locked_pending; + u16 tail; + }; +#else + struct { + u16 tail; + u16 locked_pending; + }; + struct { + u8 reserved[2]; + u8 pending; + u8 locked; + }; +#endif + }; +}; + +#define arena_spinlock_t struct __qspinlock +/* FIXME: Using typedef causes CO-RE relocation error */ +/* typedef struct qspinlock arena_spinlock_t; */ + +struct arena_mcs_spinlock { + struct arena_mcs_spinlock __arena *next; + int locked; + int count; +}; + +struct arena_qnode { + struct arena_mcs_spinlock mcs; +}; + +#define _Q_MAX_NODES 4 +#define _Q_PENDING_LOOPS 1 + +/* + * Bitfields in the atomic value: + * + * 0- 7: locked byte + * 8: pending + * 9-15: not used + * 16-17: tail index + * 18-31: tail cpu (+1) + */ +#define _Q_MAX_CPUS 1024 + +#define _Q_SET_MASK(type) (((1U << _Q_ ## type ## _BITS) - 1)\ + << _Q_ ## type ## _OFFSET) +#define _Q_LOCKED_OFFSET 0 +#define _Q_LOCKED_BITS 8 +#define _Q_LOCKED_MASK _Q_SET_MASK(LOCKED) + +#define _Q_PENDING_OFFSET (_Q_LOCKED_OFFSET + _Q_LOCKED_BITS) +#define _Q_PENDING_BITS 8 +#define _Q_PENDING_MASK _Q_SET_MASK(PENDING) + +#define _Q_TAIL_IDX_OFFSET (_Q_PENDING_OFFSET + _Q_PENDING_BITS) +#define _Q_TAIL_IDX_BITS 2 +#define _Q_TAIL_IDX_MASK _Q_SET_MASK(TAIL_IDX) + +#define _Q_TAIL_CPU_OFFSET (_Q_TAIL_IDX_OFFSET + _Q_TAIL_IDX_BITS) +#define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET) +#define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU) + +#define _Q_TAIL_OFFSET _Q_TAIL_IDX_OFFSET +#define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK) + +#define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET) +#define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET) + +struct arena_qnode __arena qnodes[_Q_MAX_CPUS][_Q_MAX_NODES]; + +static inline u32 encode_tail(int cpu, int idx) +{ + u32 tail; + + tail = (cpu + 1) << _Q_TAIL_CPU_OFFSET; + tail |= idx << _Q_TAIL_IDX_OFFSET; /* assume < 4 */ + + return tail; +} + +static inline struct arena_mcs_spinlock __arena *decode_tail(u32 tail) +{ + u32 cpu = (tail >> _Q_TAIL_CPU_OFFSET) - 1; + u32 idx = (tail & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET; + + return &qnodes[cpu][idx].mcs; +} + +static inline +struct arena_mcs_spinlock __arena *grab_mcs_node(struct arena_mcs_spinlock __arena *base, int idx) +{ + return &((struct arena_qnode __arena *)base + idx)->mcs; +} + +#define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK) + +/** + * xchg_tail - Put in the new queue tail code word & retrieve previous one + * @lock : Pointer to queued spinlock structure + * @tail : The new queue tail code word + * Return: The previous queue tail code word + * + * xchg(lock, tail) + * + * p,*,* -> n,*,* ; prev = xchg(lock, node) + */ +static __always_inline u32 xchg_tail(arena_spinlock_t __arena *lock, u32 tail) +{ + u32 old, new; + + old = atomic_read(&lock->val); + do { + new = (old & _Q_LOCKED_PENDING_MASK) | tail; + /* + * We can use relaxed semantics since the caller ensures that + * the MCS node is properly initialized before updating the + * tail. + */ + /* These loops are not expected to stall, but we still need to + * prove to the verifier they will terminate eventually. + */ + cond_break_label(out); + } while (!atomic_try_cmpxchg_relaxed(&lock->val, &old, new)); + + return old; +out: + bpf_printk("RUNTIME ERROR: %s unexpected cond_break exit!!!", __func__); + return old; +} + +/** + * clear_pending - clear the pending bit. + * @lock: Pointer to queued spinlock structure + * + * *,1,* -> *,0,* + */ +static __always_inline void clear_pending(arena_spinlock_t __arena *lock) +{ + WRITE_ONCE(lock->pending, 0); +} + +/** + * clear_pending_set_locked - take ownership and clear the pending bit. + * @lock: Pointer to queued spinlock structure + * + * *,1,0 -> *,0,1 + * + * Lock stealing is not allowed if this function is used. + */ +static __always_inline void clear_pending_set_locked(arena_spinlock_t __arena *lock) +{ + WRITE_ONCE(lock->locked_pending, _Q_LOCKED_VAL); +} + +/** + * set_locked - Set the lock bit and own the lock + * @lock: Pointer to queued spinlock structure + * + * *,*,0 -> *,0,1 + */ +static __always_inline void set_locked(arena_spinlock_t __arena *lock) +{ + WRITE_ONCE(lock->locked, _Q_LOCKED_VAL); +} + +static __always_inline +u32 arena_fetch_set_pending_acquire(arena_spinlock_t __arena *lock) +{ + u32 old, new; + + old = atomic_read(&lock->val); + do { + new = old | _Q_PENDING_VAL; + /* + * These loops are not expected to stall, but we still need to + * prove to the verifier they will terminate eventually. + */ + cond_break_label(out); + } while (!atomic_try_cmpxchg_acquire(&lock->val, &old, new)); + + return old; +out: + bpf_printk("RUNTIME ERROR: %s unexpected cond_break exit!!!", __func__); + return old; +} + +/** + * arena_spin_trylock - try to acquire the queued spinlock + * @lock : Pointer to queued spinlock structure + * Return: 1 if lock acquired, 0 if failed + */ +static __always_inline int arena_spin_trylock(arena_spinlock_t __arena *lock) +{ + int val = atomic_read(&lock->val); + + if (unlikely(val)) + return 0; + + return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)); +} + +__noinline +int arena_spin_lock_slowpath(arena_spinlock_t __arena __arg_arena *lock, u32 val) +{ + struct arena_mcs_spinlock __arena *prev, *next, *node0, *node; + int ret = -ETIMEDOUT; + u32 old, tail; + int idx; + + /* + * Wait for in-progress pending->locked hand-overs with a bounded + * number of spins so that we guarantee forward progress. + * + * 0,1,0 -> 0,0,1 + */ + if (val == _Q_PENDING_VAL) { + int cnt = _Q_PENDING_LOOPS; + val = atomic_cond_read_relaxed_label(&lock->val, + (VAL != _Q_PENDING_VAL) || !cnt--, + release_err); + } + + /* + * If we observe any contention; queue. + */ + if (val & ~_Q_LOCKED_MASK) + goto queue; + + /* + * trylock || pending + * + * 0,0,* -> 0,1,* -> 0,0,1 pending, trylock + */ + val = arena_fetch_set_pending_acquire(lock); + + /* + * If we observe contention, there is a concurrent locker. + * + * Undo and queue; our setting of PENDING might have made the + * n,0,0 -> 0,0,0 transition fail and it will now be waiting + * on @next to become !NULL. + */ + if (unlikely(val & ~_Q_LOCKED_MASK)) { + + /* Undo PENDING if we set it. */ + if (!(val & _Q_PENDING_MASK)) + clear_pending(lock); + + goto queue; + } + + /* + * We're pending, wait for the owner to go away. + * + * 0,1,1 -> *,1,0 + * + * this wait loop must be a load-acquire such that we match the + * store-release that clears the locked bit and create lock + * sequentiality; this is because not all + * clear_pending_set_locked() implementations imply full + * barriers. + */ + if (val & _Q_LOCKED_MASK) + smp_cond_load_acquire_label(&lock->locked, !VAL, release_err); + + /* + * take ownership and clear the pending bit. + * + * 0,1,0 -> 0,0,1 + */ + clear_pending_set_locked(lock); + return 0; + + /* + * End of pending bit optimistic spinning and beginning of MCS + * queuing. + */ +queue: + node0 = &(qnodes[bpf_get_smp_processor_id()])[0].mcs; + idx = node0->count++; + tail = encode_tail(bpf_get_smp_processor_id(), idx); + + /* + * 4 nodes are allocated based on the assumption that there will not be + * nested NMIs taking spinlocks. That may not be true in some + * architectures even though the chance of needing more than 4 nodes + * will still be extremely unlikely. When that happens, we simply return + * an error. Original qspinlock has a trylock fallback in this case. + */ + if (unlikely(idx >= _Q_MAX_NODES)) { + ret = -EBUSY; + goto release_node_err; + } + + node = grab_mcs_node(node0, idx); + + /* + * Ensure that we increment the head node->count before initialising + * the actual node. If the compiler is kind enough to reorder these + * stores, then an IRQ could overwrite our assignments. + */ + barrier(); + + node->locked = 0; + node->next = NULL; + + /* + * We touched a (possibly) cold cacheline in the per-cpu queue node; + * attempt the trylock once more in the hope someone let go while we + * weren't watching. + */ + if (arena_spin_trylock(lock)) + goto release; + + /* + * Ensure that the initialisation of @node is complete before we + * publish the updated tail via xchg_tail() and potentially link + * @node into the waitqueue via WRITE_ONCE(prev->next, node) below. + */ + smp_wmb(); + + /* + * Publish the updated tail. + * We have already touched the queueing cacheline; don't bother with + * pending stuff. + * + * p,*,* -> n,*,* + */ + old = xchg_tail(lock, tail); + next = NULL; + + /* + * if there was a previous node; link it and wait until reaching the + * head of the waitqueue. + */ + if (old & _Q_TAIL_MASK) { + prev = decode_tail(old); + + /* Link @node into the waitqueue. */ + WRITE_ONCE(prev->next, node); + + arch_mcs_spin_lock_contended_label(&node->locked, release_node_err); + + /* + * While waiting for the MCS lock, the next pointer may have + * been set by another lock waiter. We cannot prefetch here + * due to lack of equivalent instruction in BPF ISA. + */ + next = READ_ONCE(node->next); + } + + /* + * we're at the head of the waitqueue, wait for the owner & pending to + * go away. + * + * *,x,y -> *,0,0 + * + * this wait loop must use a load-acquire such that we match the + * store-release that clears the locked bit and create lock + * sequentiality; this is because the set_locked() function below + * does not imply a full barrier. + */ + val = atomic_cond_read_acquire_label(&lock->val, !(VAL & _Q_LOCKED_PENDING_MASK), + release_node_err); + + /* + * claim the lock: + * + * n,0,0 -> 0,0,1 : lock, uncontended + * *,*,0 -> *,*,1 : lock, contended + * + * If the queue head is the only one in the queue (lock value == tail) + * and nobody is pending, clear the tail code and grab the lock. + * Otherwise, we only need to grab the lock. + */ + + /* + * In the PV case we might already have _Q_LOCKED_VAL set, because + * of lock stealing; therefore we must also allow: + * + * n,0,1 -> 0,0,1 + * + * Note: at this point: (val & _Q_PENDING_MASK) == 0, because of the + * above wait condition, therefore any concurrent setting of + * PENDING will make the uncontended transition fail. + */ + if ((val & _Q_TAIL_MASK) == tail) { + if (atomic_try_cmpxchg_relaxed(&lock->val, &val, _Q_LOCKED_VAL)) + goto release; /* No contention */ + } + + /* + * Either somebody is queued behind us or _Q_PENDING_VAL got set + * which will then detect the remaining tail and queue behind us + * ensuring we'll see a @next. + */ + set_locked(lock); + + /* + * contended path; wait for next if not observed yet, release. + */ + if (!next) + next = smp_cond_load_relaxed_label(&node->next, (VAL), release_node_err); + + arch_mcs_spin_unlock_contended(&next->locked); + +release:; + /* + * release the node + * + * Doing a normal dec vs this_cpu_dec is fine. An upper context always + * decrements count it incremented before returning, thus we're fine. + * For contexts interrupting us, they either observe our dec or not. + * Just ensure the compiler doesn't reorder this statement, as a + * this_cpu_dec implicitly implied that. + */ + barrier(); + node0->count--; + return 0; +release_node_err: + barrier(); + node0->count--; + goto release_err; +release_err: + return ret; +} + +/** + * arena_spin_lock - acquire a queued spinlock + * @lock: Pointer to queued spinlock structure + * + * On error, returned value will be negative. + * On success, zero is returned. + * + * The return value _must_ be tested against zero for success, + * instead of checking it against negative, for passing the + * BPF verifier. + * + * The user should do: + * if (arena_spin_lock(...) != 0) // failure + * or + * if (arena_spin_lock(...) == 0) // success + * or + * if (arena_spin_lock(...)) // failure + * or + * if (!arena_spin_lock(...)) // success + * instead of: + * if (arena_spin_lock(...) < 0) // failure + * + * The return value can still be inspected later. + */ +static __always_inline int arena_spin_lock(arena_spinlock_t __arena *lock) +{ + int val = 0; + + if (CONFIG_NR_CPUS > 1024) + return -EOPNOTSUPP; + + bpf_preempt_disable(); + if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL))) + return 0; + + val = arena_spin_lock_slowpath(lock, val); + /* FIXME: bpf_assert_range(-MAX_ERRNO, 0) once we have it working for all cases. */ + if (val) + bpf_preempt_enable(); + return val; +} + +/** + * arena_spin_unlock - release a queued spinlock + * @lock : Pointer to queued spinlock structure + */ +static __always_inline void arena_spin_unlock(arena_spinlock_t __arena *lock) +{ + /* + * unlock() needs release semantics: + */ + smp_store_release(&lock->locked, 0); + bpf_preempt_enable(); +} + +#define arena_spin_lock_irqsave(lock, flags) \ + ({ \ + int __ret; \ + bpf_local_irq_save(&(flags)); \ + __ret = arena_spin_lock((lock)); \ + if (__ret) \ + bpf_local_irq_restore(&(flags)); \ + (__ret); \ + }) + +#define arena_spin_unlock_irqrestore(lock, flags) \ + ({ \ + arena_spin_unlock((lock)); \ + bpf_local_irq_restore(&(flags)); \ + }) + +#endif + +#endif /* BPF_ARENA_SPIN_LOCK_H */ diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_map_elem.c b/tools/testing/selftests/bpf/progs/bpf_iter_map_elem.c new file mode 100644 index 000000000000..2f20485e0de3 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_iter_map_elem.c @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include "vmlinux.h" +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> + +char _license[] SEC("license") = "GPL"; + +__u32 value_sum = 0; + +SEC("iter/bpf_map_elem") +int dump_bpf_map_values(struct bpf_iter__bpf_map_elem *ctx) +{ + __u32 value = 0; + + if (ctx->value == (void *)0) + return 0; + + bpf_probe_read_kernel(&value, sizeof(value), ctx->value); + value_sum += value; + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_tasks.c b/tools/testing/selftests/bpf/progs/bpf_iter_tasks.c index bc10c4e4b4fa..966ee5a7b066 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_tasks.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_tasks.c @@ -9,6 +9,13 @@ char _license[] SEC("license") = "GPL"; uint32_t tid = 0; int num_unknown_tid = 0; int num_known_tid = 0; +void *user_ptr = 0; +void *user_ptr_long = 0; +uint32_t pid = 0; + +static char big_str1[5000]; +static char big_str2[5005]; +static char big_str3[4996]; SEC("iter/task") int dump_task(struct bpf_iter__task *ctx) @@ -35,7 +42,9 @@ int dump_task(struct bpf_iter__task *ctx) } int num_expected_failure_copy_from_user_task = 0; +int num_expected_failure_copy_from_user_task_str = 0; int num_success_copy_from_user_task = 0; +int num_success_copy_from_user_task_str = 0; SEC("iter.s/task") int dump_task_sleepable(struct bpf_iter__task *ctx) @@ -44,6 +53,9 @@ int dump_task_sleepable(struct bpf_iter__task *ctx) struct task_struct *task = ctx->task; static const char info[] = " === END ==="; struct pt_regs *regs; + char task_str1[10] = "aaaaaaaaaa"; + char task_str2[10], task_str3[10]; + char task_str4[20] = "aaaaaaaaaaaaaaaaaaaa"; void *ptr; uint32_t user_data = 0; int ret; @@ -78,8 +90,106 @@ int dump_task_sleepable(struct bpf_iter__task *ctx) BPF_SEQ_PRINTF(seq, "%s\n", info); return 0; } + ++num_success_copy_from_user_task; + /* Read an invalid pointer and ensure we get an error */ + ptr = NULL; + ret = bpf_copy_from_user_task_str((char *)task_str1, sizeof(task_str1), ptr, task, 0); + if (ret >= 0 || task_str1[9] != 'a' || task_str1[0] != '\0') { + BPF_SEQ_PRINTF(seq, "%s\n", info); + return 0; + } + + /* Read an invalid pointer and ensure we get error with pad zeros flag */ + ptr = NULL; + ret = bpf_copy_from_user_task_str((char *)task_str1, sizeof(task_str1), + ptr, task, BPF_F_PAD_ZEROS); + if (ret >= 0 || task_str1[9] != '\0' || task_str1[0] != '\0') { + BPF_SEQ_PRINTF(seq, "%s\n", info); + return 0; + } + + ++num_expected_failure_copy_from_user_task_str; + + /* Same length as the string */ + ret = bpf_copy_from_user_task_str((char *)task_str2, 10, user_ptr, task, 0); + /* only need to do the task pid check once */ + if (bpf_strncmp(task_str2, 10, "test_data\0") != 0 || ret != 10 || task->tgid != pid) { + BPF_SEQ_PRINTF(seq, "%s\n", info); + return 0; + } + + /* Shorter length than the string */ + ret = bpf_copy_from_user_task_str((char *)task_str3, 2, user_ptr, task, 0); + if (bpf_strncmp(task_str3, 2, "t\0") != 0 || ret != 2) { + BPF_SEQ_PRINTF(seq, "%s\n", info); + return 0; + } + + /* Longer length than the string */ + ret = bpf_copy_from_user_task_str((char *)task_str4, 20, user_ptr, task, 0); + if (bpf_strncmp(task_str4, 10, "test_data\0") != 0 || ret != 10 + || task_str4[sizeof(task_str4) - 1] != 'a') { + BPF_SEQ_PRINTF(seq, "%s\n", info); + return 0; + } + + /* Longer length than the string with pad zeros flag */ + ret = bpf_copy_from_user_task_str((char *)task_str4, 20, user_ptr, task, BPF_F_PAD_ZEROS); + if (bpf_strncmp(task_str4, 10, "test_data\0") != 0 || ret != 10 + || task_str4[sizeof(task_str4) - 1] != '\0') { + BPF_SEQ_PRINTF(seq, "%s\n", info); + return 0; + } + + /* Longer length than the string past a page boundary */ + ret = bpf_copy_from_user_task_str(big_str1, 5000, user_ptr, task, 0); + if (bpf_strncmp(big_str1, 10, "test_data\0") != 0 || ret != 10) { + BPF_SEQ_PRINTF(seq, "%s\n", info); + return 0; + } + + /* String that crosses a page boundary */ + ret = bpf_copy_from_user_task_str(big_str1, 5000, user_ptr_long, task, BPF_F_PAD_ZEROS); + if (bpf_strncmp(big_str1, 4, "baba") != 0 || ret != 5000 + || bpf_strncmp(big_str1 + 4996, 4, "bab\0") != 0) { + BPF_SEQ_PRINTF(seq, "%s\n", info); + return 0; + } + + for (int i = 0; i < 4999; ++i) { + if (i % 2 == 0) { + if (big_str1[i] != 'b') { + BPF_SEQ_PRINTF(seq, "%s\n", info); + return 0; + } + } else { + if (big_str1[i] != 'a') { + BPF_SEQ_PRINTF(seq, "%s\n", info); + return 0; + } + } + } + + /* Longer length than the string that crosses a page boundary */ + ret = bpf_copy_from_user_task_str(big_str2, 5005, user_ptr_long, task, BPF_F_PAD_ZEROS); + if (bpf_strncmp(big_str2, 4, "baba") != 0 || ret != 5000 + || bpf_strncmp(big_str2 + 4996, 5, "bab\0\0") != 0) { + BPF_SEQ_PRINTF(seq, "%s\n", info); + return 0; + } + + /* Shorter length than the string that crosses a page boundary */ + ret = bpf_copy_from_user_task_str(big_str3, 4996, user_ptr_long, task, 0); + if (bpf_strncmp(big_str3, 4, "baba") != 0 || ret != 4996 + || bpf_strncmp(big_str3 + 4992, 4, "bab\0") != 0) { + BPF_SEQ_PRINTF(seq, "%s\n", info); + return 0; + } + + ++num_success_copy_from_user_task_str; + if (ctx->meta->seq_num == 0) BPF_SEQ_PRINTF(seq, " tgid gid data\n"); diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c b/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c index d22449c69363..164640db3a29 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c @@ -99,10 +99,10 @@ static int dump_tcp_sock(struct seq_file *seq, struct tcp_sock *tp, icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { timer_active = 1; - timer_expires = icsk->icsk_timeout; + timer_expires = icsk->icsk_retransmit_timer.expires; } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { timer_active = 4; - timer_expires = icsk->icsk_timeout; + timer_expires = icsk->icsk_retransmit_timer.expires; } else if (timer_pending(&sp->sk_timer)) { timer_active = 2; timer_expires = sp->sk_timer.expires; diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c b/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c index 8b072666f9d9..591c703f5032 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c @@ -99,10 +99,10 @@ static int dump_tcp6_sock(struct seq_file *seq, struct tcp6_sock *tp, icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { timer_active = 1; - timer_expires = icsk->icsk_timeout; + timer_expires = icsk->icsk_retransmit_timer.expires; } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { timer_active = 4; - timer_expires = icsk->icsk_timeout; + timer_expires = icsk->icsk_retransmit_timer.expires; } else if (timer_pending(&sp->sk_timer)) { timer_active = 2; timer_expires = sp->sk_timer.expires; diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h index f45f4352feeb..c1cfd297aabf 100644 --- a/tools/testing/selftests/bpf/progs/bpf_misc.h +++ b/tools/testing/selftests/bpf/progs/bpf_misc.h @@ -83,9 +83,11 @@ * expect return value to match passed parameter: * - a decimal number * - a hexadecimal number, when starts from 0x - * - literal INT_MIN - * - literal POINTER_VALUE (see definition below) - * - literal TEST_DATA_LEN (see definition below) + * - a macro which expands to one of the above + * - literal _INT_MIN (expands to INT_MIN) + * In addition, two special macros are defined below: + * - POINTER_VALUE + * - TEST_DATA_LEN * __retval_unpriv Same, but load program in unprivileged mode. * * __description Text to be used instead of a program name for display @@ -125,8 +127,8 @@ #define __success_unpriv __attribute__((btf_decl_tag("comment:test_expect_success_unpriv"))) #define __log_level(lvl) __attribute__((btf_decl_tag("comment:test_log_level="#lvl))) #define __flag(flag) __attribute__((btf_decl_tag("comment:test_prog_flags="#flag))) -#define __retval(val) __attribute__((btf_decl_tag("comment:test_retval="#val))) -#define __retval_unpriv(val) __attribute__((btf_decl_tag("comment:test_retval_unpriv="#val))) +#define __retval(val) __attribute__((btf_decl_tag("comment:test_retval="XSTR(val)))) +#define __retval_unpriv(val) __attribute__((btf_decl_tag("comment:test_retval_unpriv="XSTR(val)))) #define __auxiliary __attribute__((btf_decl_tag("comment:test_auxiliary"))) #define __auxiliary_unpriv __attribute__((btf_decl_tag("comment:test_auxiliary_unpriv"))) #define __btf_path(path) __attribute__((btf_decl_tag("comment:test_btf_path=" path))) @@ -135,6 +137,8 @@ #define __arch_arm64 __arch("ARM64") #define __arch_riscv64 __arch("RISCV64") #define __caps_unpriv(caps) __attribute__((btf_decl_tag("comment:test_caps_unpriv=" EXPAND_QUOTE(caps)))) +#define __load_if_JITed() __attribute__((btf_decl_tag("comment:load_mode=jited"))) +#define __load_if_no_JITed() __attribute__((btf_decl_tag("comment:load_mode=no_jited"))) /* Define common capabilities tested using __caps_unpriv */ #define CAP_NET_ADMIN 12 @@ -153,7 +157,7 @@ #define __imm_insn(name, expr) [name]"i"(*(long *)&(expr)) /* Magic constants used with __retval() */ -#define POINTER_VALUE 0xcafe4all +#define POINTER_VALUE 0xbadcafe #define TEST_DATA_LEN 64 #ifndef __used @@ -172,6 +176,9 @@ #elif defined(__TARGET_ARCH_riscv) #define SYSCALL_WRAPPER 1 #define SYS_PREFIX "__riscv_" +#elif defined(__TARGET_ARCH_powerpc) +#define SYSCALL_WRAPPER 1 +#define SYS_PREFIX "" #else #define SYSCALL_WRAPPER 0 #define SYS_PREFIX "__se_" @@ -208,4 +215,31 @@ #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) #endif +#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \ + (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \ + defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \ + defined(__TARGET_ARCH_loongarch)) && \ + __clang_major__ >= 18 +#define CAN_USE_GOTOL +#endif + +#if __clang_major__ >= 18 +#define CAN_USE_BPF_ST +#endif + +#if __clang_major__ >= 18 && defined(ENABLE_ATOMICS_TESTS) && \ + (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \ + (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) || \ + (defined(__TARGET_ARCH_powerpc)) +#define CAN_USE_LOAD_ACQ_STORE_REL +#endif + +#if defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) +#define SPEC_V1 +#endif + +#if defined(__TARGET_ARCH_x86) +#define SPEC_V4 +#endif + #endif diff --git a/tools/testing/selftests/bpf/progs/bpf_qdisc_common.h b/tools/testing/selftests/bpf/progs/bpf_qdisc_common.h new file mode 100644 index 000000000000..3754f581b328 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_qdisc_common.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _BPF_QDISC_COMMON_H +#define _BPF_QDISC_COMMON_H + +#define NET_XMIT_SUCCESS 0x00 +#define NET_XMIT_DROP 0x01 /* skb dropped */ +#define NET_XMIT_CN 0x02 /* congestion notification */ + +#define TC_PRIO_CONTROL 7 +#define TC_PRIO_MAX 15 + +#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8))) + +struct bpf_sk_buff_ptr; + +static struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb) +{ + return (struct qdisc_skb_cb *)skb->cb; +} + +static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb) +{ + return qdisc_skb_cb(skb)->pkt_len; +} + +#endif diff --git a/tools/testing/selftests/bpf/progs/bpf_qdisc_fail__incompl_ops.c b/tools/testing/selftests/bpf/progs/bpf_qdisc_fail__incompl_ops.c new file mode 100644 index 000000000000..f188062ed730 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_qdisc_fail__incompl_ops.c @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <vmlinux.h> +#include "bpf_experimental.h" +#include "bpf_qdisc_common.h" + +char _license[] SEC("license") = "GPL"; + +SEC("struct_ops") +int BPF_PROG(bpf_qdisc_test_enqueue, struct sk_buff *skb, struct Qdisc *sch, + struct bpf_sk_buff_ptr *to_free) +{ + bpf_qdisc_skb_drop(skb, to_free); + return NET_XMIT_DROP; +} + +SEC("struct_ops") +struct sk_buff *BPF_PROG(bpf_qdisc_test_dequeue, struct Qdisc *sch) +{ + return NULL; +} + +SEC("struct_ops") +void BPF_PROG(bpf_qdisc_test_reset, struct Qdisc *sch) +{ +} + +SEC("struct_ops") +void BPF_PROG(bpf_qdisc_test_destroy, struct Qdisc *sch) +{ +} + +SEC(".struct_ops") +struct Qdisc_ops test = { + .enqueue = (void *)bpf_qdisc_test_enqueue, + .dequeue = (void *)bpf_qdisc_test_dequeue, + .reset = (void *)bpf_qdisc_test_reset, + .destroy = (void *)bpf_qdisc_test_destroy, + .id = "bpf_qdisc_test", +}; + diff --git a/tools/testing/selftests/bpf/progs/bpf_qdisc_fifo.c b/tools/testing/selftests/bpf/progs/bpf_qdisc_fifo.c new file mode 100644 index 000000000000..1de2be3e370b --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_qdisc_fifo.c @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <vmlinux.h> +#include "bpf_experimental.h" +#include "bpf_qdisc_common.h" + +char _license[] SEC("license") = "GPL"; + +struct skb_node { + struct sk_buff __kptr * skb; + struct bpf_list_node node; +}; + +private(A) struct bpf_spin_lock q_fifo_lock; +private(A) struct bpf_list_head q_fifo __contains(skb_node, node); + +bool init_called; + +SEC("struct_ops/bpf_fifo_enqueue") +int BPF_PROG(bpf_fifo_enqueue, struct sk_buff *skb, struct Qdisc *sch, + struct bpf_sk_buff_ptr *to_free) +{ + struct skb_node *skbn; + u32 pkt_len; + + if (sch->q.qlen == sch->limit) + goto drop; + + skbn = bpf_obj_new(typeof(*skbn)); + if (!skbn) + goto drop; + + pkt_len = qdisc_pkt_len(skb); + + sch->q.qlen++; + skb = bpf_kptr_xchg(&skbn->skb, skb); + if (skb) + bpf_qdisc_skb_drop(skb, to_free); + + bpf_spin_lock(&q_fifo_lock); + bpf_list_push_back(&q_fifo, &skbn->node); + bpf_spin_unlock(&q_fifo_lock); + + sch->qstats.backlog += pkt_len; + return NET_XMIT_SUCCESS; +drop: + bpf_qdisc_skb_drop(skb, to_free); + return NET_XMIT_DROP; +} + +SEC("struct_ops/bpf_fifo_dequeue") +struct sk_buff *BPF_PROG(bpf_fifo_dequeue, struct Qdisc *sch) +{ + struct bpf_list_node *node; + struct sk_buff *skb = NULL; + struct skb_node *skbn; + + bpf_spin_lock(&q_fifo_lock); + node = bpf_list_pop_front(&q_fifo); + bpf_spin_unlock(&q_fifo_lock); + if (!node) + return NULL; + + skbn = container_of(node, struct skb_node, node); + skb = bpf_kptr_xchg(&skbn->skb, skb); + bpf_obj_drop(skbn); + if (!skb) + return NULL; + + sch->qstats.backlog -= qdisc_pkt_len(skb); + bpf_qdisc_bstats_update(sch, skb); + sch->q.qlen--; + + return skb; +} + +SEC("struct_ops/bpf_fifo_init") +int BPF_PROG(bpf_fifo_init, struct Qdisc *sch, struct nlattr *opt, + struct netlink_ext_ack *extack) +{ + sch->limit = 1000; + init_called = true; + return 0; +} + +SEC("struct_ops/bpf_fifo_reset") +void BPF_PROG(bpf_fifo_reset, struct Qdisc *sch) +{ + struct bpf_list_node *node; + struct skb_node *skbn; + int i; + + bpf_for(i, 0, sch->q.qlen) { + struct sk_buff *skb = NULL; + + bpf_spin_lock(&q_fifo_lock); + node = bpf_list_pop_front(&q_fifo); + bpf_spin_unlock(&q_fifo_lock); + + if (!node) + break; + + skbn = container_of(node, struct skb_node, node); + skb = bpf_kptr_xchg(&skbn->skb, skb); + if (skb) + bpf_kfree_skb(skb); + bpf_obj_drop(skbn); + } + sch->q.qlen = 0; +} + +SEC("struct_ops") +void BPF_PROG(bpf_fifo_destroy, struct Qdisc *sch) +{ +} + +SEC(".struct_ops") +struct Qdisc_ops fifo = { + .enqueue = (void *)bpf_fifo_enqueue, + .dequeue = (void *)bpf_fifo_dequeue, + .init = (void *)bpf_fifo_init, + .reset = (void *)bpf_fifo_reset, + .destroy = (void *)bpf_fifo_destroy, + .id = "bpf_fifo", +}; + diff --git a/tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c b/tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c new file mode 100644 index 000000000000..1a3233a275c7 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c @@ -0,0 +1,756 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* bpf_fq is intended for testing the bpf qdisc infrastructure and not a direct + * copy of sch_fq. bpf_fq implements the scheduling algorithm of sch_fq before + * 29f834aa326e ("net_sched: sch_fq: add 3 bands and WRR scheduling") was + * introduced. It gives each flow a fair chance to transmit packets in a + * round-robin fashion. Note that for flow pacing, bpf_fq currently only + * respects skb->tstamp but not skb->sk->sk_pacing_rate. In addition, if there + * are multiple bpf_fq instances, they will have a shared view of flows and + * configuration since some key data structure such as fq_prio_flows, + * fq_nonprio_flows, and fq_bpf_data are global. + * + * To use bpf_fq alone without running selftests, use the following commands. + * + * 1. Register bpf_fq to the kernel + * bpftool struct_ops register bpf_qdisc_fq.bpf.o /sys/fs/bpf + * 2. Add bpf_fq to an interface + * tc qdisc add dev <interface name> root handle <handle> bpf_fq + * 3. Delete bpf_fq attached to the interface + * tc qdisc delete dev <interface name> root + * 4. Unregister bpf_fq + * bpftool struct_ops unregister name fq + * + * The qdisc name, bpf_fq, used in tc commands is defined by Qdisc_ops.id. + * The struct_ops_map_name, fq, used in the bpftool command is the name of the + * Qdisc_ops. + * + * SEC(".struct_ops") + * struct Qdisc_ops fq = { + * ... + * .id = "bpf_fq", + * }; + */ + +#include <vmlinux.h> +#include <errno.h> +#include <bpf/bpf_helpers.h> +#include "bpf_experimental.h" +#include "bpf_qdisc_common.h" + +char _license[] SEC("license") = "GPL"; + +#define NSEC_PER_USEC 1000L +#define NSEC_PER_SEC 1000000000L + +#define NUM_QUEUE (1 << 20) + +struct fq_bpf_data { + u32 quantum; + u32 initial_quantum; + u32 flow_refill_delay; + u32 flow_plimit; + u64 horizon; + u32 orphan_mask; + u32 timer_slack; + u64 time_next_delayed_flow; + u64 unthrottle_latency_ns; + u8 horizon_drop; + u32 new_flow_cnt; + u32 old_flow_cnt; + u64 ktime_cache; +}; + +enum { + CLS_RET_PRIO = 0, + CLS_RET_NONPRIO = 1, + CLS_RET_ERR = 2, +}; + +struct skb_node { + u64 tstamp; + struct sk_buff __kptr * skb; + struct bpf_rb_node node; +}; + +struct fq_flow_node { + int credit; + u32 qlen; + u64 age; + u64 time_next_packet; + struct bpf_list_node list_node; + struct bpf_rb_node rb_node; + struct bpf_rb_root queue __contains(skb_node, node); + struct bpf_spin_lock lock; + struct bpf_refcount refcount; +}; + +struct dequeue_nonprio_ctx { + bool stop_iter; + u64 expire; + u64 now; +}; + +struct remove_flows_ctx { + bool gc_only; + u32 reset_cnt; + u32 reset_max; +}; + +struct unset_throttled_flows_ctx { + bool unset_all; + u64 now; +}; + +struct fq_stashed_flow { + struct fq_flow_node __kptr * flow; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, __u64); + __type(value, struct fq_stashed_flow); + __uint(max_entries, NUM_QUEUE); +} fq_nonprio_flows SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, __u64); + __type(value, struct fq_stashed_flow); + __uint(max_entries, 1); +} fq_prio_flows SEC(".maps"); + +private(A) struct bpf_spin_lock fq_delayed_lock; +private(A) struct bpf_rb_root fq_delayed __contains(fq_flow_node, rb_node); + +private(B) struct bpf_spin_lock fq_new_flows_lock; +private(B) struct bpf_list_head fq_new_flows __contains(fq_flow_node, list_node); + +private(C) struct bpf_spin_lock fq_old_flows_lock; +private(C) struct bpf_list_head fq_old_flows __contains(fq_flow_node, list_node); + +private(D) struct fq_bpf_data q; + +/* Wrapper for bpf_kptr_xchg that expects NULL dst */ +static void bpf_kptr_xchg_back(void *map_val, void *ptr) +{ + void *ret; + + ret = bpf_kptr_xchg(map_val, ptr); + if (ret) + bpf_obj_drop(ret); +} + +static bool skbn_tstamp_less(struct bpf_rb_node *a, const struct bpf_rb_node *b) +{ + struct skb_node *skbn_a; + struct skb_node *skbn_b; + + skbn_a = container_of(a, struct skb_node, node); + skbn_b = container_of(b, struct skb_node, node); + + return skbn_a->tstamp < skbn_b->tstamp; +} + +static bool fn_time_next_packet_less(struct bpf_rb_node *a, const struct bpf_rb_node *b) +{ + struct fq_flow_node *flow_a; + struct fq_flow_node *flow_b; + + flow_a = container_of(a, struct fq_flow_node, rb_node); + flow_b = container_of(b, struct fq_flow_node, rb_node); + + return flow_a->time_next_packet < flow_b->time_next_packet; +} + +static void +fq_flows_add_head(struct bpf_list_head *head, struct bpf_spin_lock *lock, + struct fq_flow_node *flow, u32 *flow_cnt) +{ + bpf_spin_lock(lock); + bpf_list_push_front(head, &flow->list_node); + bpf_spin_unlock(lock); + *flow_cnt += 1; +} + +static void +fq_flows_add_tail(struct bpf_list_head *head, struct bpf_spin_lock *lock, + struct fq_flow_node *flow, u32 *flow_cnt) +{ + bpf_spin_lock(lock); + bpf_list_push_back(head, &flow->list_node); + bpf_spin_unlock(lock); + *flow_cnt += 1; +} + +static void +fq_flows_remove_front(struct bpf_list_head *head, struct bpf_spin_lock *lock, + struct bpf_list_node **node, u32 *flow_cnt) +{ + bpf_spin_lock(lock); + *node = bpf_list_pop_front(head); + bpf_spin_unlock(lock); + *flow_cnt -= 1; +} + +static bool +fq_flows_is_empty(struct bpf_list_head *head, struct bpf_spin_lock *lock) +{ + struct bpf_list_node *node; + + bpf_spin_lock(lock); + node = bpf_list_pop_front(head); + if (node) { + bpf_list_push_front(head, node); + bpf_spin_unlock(lock); + return false; + } + bpf_spin_unlock(lock); + + return true; +} + +/* flow->age is used to denote the state of the flow (not-detached, detached, throttled) + * as well as the timestamp when the flow is detached. + * + * 0: not-detached + * 1 - (~0ULL-1): detached + * ~0ULL: throttled + */ +static void fq_flow_set_detached(struct fq_flow_node *flow) +{ + flow->age = bpf_jiffies64(); +} + +static bool fq_flow_is_detached(struct fq_flow_node *flow) +{ + return flow->age != 0 && flow->age != ~0ULL; +} + +static bool sk_listener(struct sock *sk) +{ + return (1 << sk->__sk_common.skc_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV); +} + +static void fq_gc(void); + +static int fq_new_flow(void *flow_map, struct fq_stashed_flow **sflow, u64 hash) +{ + struct fq_stashed_flow tmp = {}; + struct fq_flow_node *flow; + int ret; + + flow = bpf_obj_new(typeof(*flow)); + if (!flow) + return -ENOMEM; + + flow->credit = q.initial_quantum, + flow->qlen = 0, + flow->age = 1, + flow->time_next_packet = 0, + + ret = bpf_map_update_elem(flow_map, &hash, &tmp, 0); + if (ret == -ENOMEM || ret == -E2BIG) { + fq_gc(); + bpf_map_update_elem(&fq_nonprio_flows, &hash, &tmp, 0); + } + + *sflow = bpf_map_lookup_elem(flow_map, &hash); + if (!*sflow) { + bpf_obj_drop(flow); + return -ENOMEM; + } + + bpf_kptr_xchg_back(&(*sflow)->flow, flow); + return 0; +} + +static int +fq_classify(struct sk_buff *skb, struct fq_stashed_flow **sflow) +{ + struct sock *sk = skb->sk; + int ret = CLS_RET_NONPRIO; + u64 hash = 0; + + if ((skb->priority & TC_PRIO_MAX) == TC_PRIO_CONTROL) { + *sflow = bpf_map_lookup_elem(&fq_prio_flows, &hash); + ret = CLS_RET_PRIO; + } else { + if (!sk || sk_listener(sk)) { + hash = bpf_skb_get_hash(skb) & q.orphan_mask; + /* Avoid collision with an existing flow hash, which + * only uses the lower 32 bits of hash, by setting the + * upper half of hash to 1. + */ + hash |= (1ULL << 32); + } else if (sk->__sk_common.skc_state == TCP_CLOSE) { + hash = bpf_skb_get_hash(skb) & q.orphan_mask; + hash |= (1ULL << 32); + } else { + hash = sk->__sk_common.skc_hash; + } + *sflow = bpf_map_lookup_elem(&fq_nonprio_flows, &hash); + } + + if (!*sflow) + ret = fq_new_flow(&fq_nonprio_flows, sflow, hash) < 0 ? + CLS_RET_ERR : CLS_RET_NONPRIO; + + return ret; +} + +static bool fq_packet_beyond_horizon(struct sk_buff *skb) +{ + return (s64)skb->tstamp > (s64)(q.ktime_cache + q.horizon); +} + +SEC("struct_ops/bpf_fq_enqueue") +int BPF_PROG(bpf_fq_enqueue, struct sk_buff *skb, struct Qdisc *sch, + struct bpf_sk_buff_ptr *to_free) +{ + struct fq_flow_node *flow = NULL, *flow_copy; + struct fq_stashed_flow *sflow; + u64 time_to_send, jiffies; + struct skb_node *skbn; + int ret; + + if (sch->q.qlen >= sch->limit) + goto drop; + + if (!skb->tstamp) { + time_to_send = q.ktime_cache = bpf_ktime_get_ns(); + } else { + if (fq_packet_beyond_horizon(skb)) { + q.ktime_cache = bpf_ktime_get_ns(); + if (fq_packet_beyond_horizon(skb)) { + if (q.horizon_drop) + goto drop; + + skb->tstamp = q.ktime_cache + q.horizon; + } + } + time_to_send = skb->tstamp; + } + + ret = fq_classify(skb, &sflow); + if (ret == CLS_RET_ERR) + goto drop; + + flow = bpf_kptr_xchg(&sflow->flow, flow); + if (!flow) + goto drop; + + if (ret == CLS_RET_NONPRIO) { + if (flow->qlen >= q.flow_plimit) { + bpf_kptr_xchg_back(&sflow->flow, flow); + goto drop; + } + + if (fq_flow_is_detached(flow)) { + flow_copy = bpf_refcount_acquire(flow); + + jiffies = bpf_jiffies64(); + if ((s64)(jiffies - (flow_copy->age + q.flow_refill_delay)) > 0) { + if (flow_copy->credit < q.quantum) + flow_copy->credit = q.quantum; + } + flow_copy->age = 0; + fq_flows_add_tail(&fq_new_flows, &fq_new_flows_lock, flow_copy, + &q.new_flow_cnt); + } + } + + skbn = bpf_obj_new(typeof(*skbn)); + if (!skbn) { + bpf_kptr_xchg_back(&sflow->flow, flow); + goto drop; + } + + skbn->tstamp = skb->tstamp = time_to_send; + + sch->qstats.backlog += qdisc_pkt_len(skb); + + skb = bpf_kptr_xchg(&skbn->skb, skb); + if (skb) + bpf_qdisc_skb_drop(skb, to_free); + + bpf_spin_lock(&flow->lock); + bpf_rbtree_add(&flow->queue, &skbn->node, skbn_tstamp_less); + bpf_spin_unlock(&flow->lock); + + flow->qlen++; + bpf_kptr_xchg_back(&sflow->flow, flow); + + sch->q.qlen++; + return NET_XMIT_SUCCESS; + +drop: + bpf_qdisc_skb_drop(skb, to_free); + sch->qstats.drops++; + return NET_XMIT_DROP; +} + +static int fq_unset_throttled_flows(u32 index, struct unset_throttled_flows_ctx *ctx) +{ + struct bpf_rb_node *node = NULL; + struct fq_flow_node *flow; + + bpf_spin_lock(&fq_delayed_lock); + + node = bpf_rbtree_first(&fq_delayed); + if (!node) { + bpf_spin_unlock(&fq_delayed_lock); + return 1; + } + + flow = container_of(node, struct fq_flow_node, rb_node); + if (!ctx->unset_all && flow->time_next_packet > ctx->now) { + q.time_next_delayed_flow = flow->time_next_packet; + bpf_spin_unlock(&fq_delayed_lock); + return 1; + } + + node = bpf_rbtree_remove(&fq_delayed, &flow->rb_node); + + bpf_spin_unlock(&fq_delayed_lock); + + if (!node) + return 1; + + flow = container_of(node, struct fq_flow_node, rb_node); + flow->age = 0; + fq_flows_add_tail(&fq_old_flows, &fq_old_flows_lock, flow, &q.old_flow_cnt); + + return 0; +} + +static void fq_flow_set_throttled(struct fq_flow_node *flow) +{ + flow->age = ~0ULL; + + if (q.time_next_delayed_flow > flow->time_next_packet) + q.time_next_delayed_flow = flow->time_next_packet; + + bpf_spin_lock(&fq_delayed_lock); + bpf_rbtree_add(&fq_delayed, &flow->rb_node, fn_time_next_packet_less); + bpf_spin_unlock(&fq_delayed_lock); +} + +static void fq_check_throttled(u64 now) +{ + struct unset_throttled_flows_ctx ctx = { + .unset_all = false, + .now = now, + }; + unsigned long sample; + + if (q.time_next_delayed_flow > now) + return; + + sample = (unsigned long)(now - q.time_next_delayed_flow); + q.unthrottle_latency_ns -= q.unthrottle_latency_ns >> 3; + q.unthrottle_latency_ns += sample >> 3; + + q.time_next_delayed_flow = ~0ULL; + bpf_loop(NUM_QUEUE, fq_unset_throttled_flows, &ctx, 0); +} + +static struct sk_buff* +fq_dequeue_nonprio_flows(u32 index, struct dequeue_nonprio_ctx *ctx) +{ + u64 time_next_packet, time_to_send; + struct bpf_rb_node *rb_node; + struct sk_buff *skb = NULL; + struct bpf_list_head *head; + struct bpf_list_node *node; + struct bpf_spin_lock *lock; + struct fq_flow_node *flow; + struct skb_node *skbn; + bool is_empty; + u32 *cnt; + + if (q.new_flow_cnt) { + head = &fq_new_flows; + lock = &fq_new_flows_lock; + cnt = &q.new_flow_cnt; + } else if (q.old_flow_cnt) { + head = &fq_old_flows; + lock = &fq_old_flows_lock; + cnt = &q.old_flow_cnt; + } else { + if (q.time_next_delayed_flow != ~0ULL) + ctx->expire = q.time_next_delayed_flow; + goto break_loop; + } + + fq_flows_remove_front(head, lock, &node, cnt); + if (!node) + goto break_loop; + + flow = container_of(node, struct fq_flow_node, list_node); + if (flow->credit <= 0) { + flow->credit += q.quantum; + fq_flows_add_tail(&fq_old_flows, &fq_old_flows_lock, flow, &q.old_flow_cnt); + return NULL; + } + + bpf_spin_lock(&flow->lock); + rb_node = bpf_rbtree_first(&flow->queue); + if (!rb_node) { + bpf_spin_unlock(&flow->lock); + is_empty = fq_flows_is_empty(&fq_old_flows, &fq_old_flows_lock); + if (head == &fq_new_flows && !is_empty) { + fq_flows_add_tail(&fq_old_flows, &fq_old_flows_lock, flow, &q.old_flow_cnt); + } else { + fq_flow_set_detached(flow); + bpf_obj_drop(flow); + } + return NULL; + } + + skbn = container_of(rb_node, struct skb_node, node); + time_to_send = skbn->tstamp; + + time_next_packet = (time_to_send > flow->time_next_packet) ? + time_to_send : flow->time_next_packet; + if (ctx->now < time_next_packet) { + bpf_spin_unlock(&flow->lock); + flow->time_next_packet = time_next_packet; + fq_flow_set_throttled(flow); + return NULL; + } + + rb_node = bpf_rbtree_remove(&flow->queue, rb_node); + bpf_spin_unlock(&flow->lock); + + if (!rb_node) + goto add_flow_and_break; + + skbn = container_of(rb_node, struct skb_node, node); + skb = bpf_kptr_xchg(&skbn->skb, skb); + bpf_obj_drop(skbn); + + if (!skb) + goto add_flow_and_break; + + flow->credit -= qdisc_skb_cb(skb)->pkt_len; + flow->qlen--; + +add_flow_and_break: + fq_flows_add_head(head, lock, flow, cnt); + +break_loop: + ctx->stop_iter = true; + return skb; +} + +static struct sk_buff *fq_dequeue_prio(void) +{ + struct fq_flow_node *flow = NULL; + struct fq_stashed_flow *sflow; + struct bpf_rb_node *rb_node; + struct sk_buff *skb = NULL; + struct skb_node *skbn; + u64 hash = 0; + + sflow = bpf_map_lookup_elem(&fq_prio_flows, &hash); + if (!sflow) + return NULL; + + flow = bpf_kptr_xchg(&sflow->flow, flow); + if (!flow) + return NULL; + + bpf_spin_lock(&flow->lock); + rb_node = bpf_rbtree_first(&flow->queue); + if (!rb_node) { + bpf_spin_unlock(&flow->lock); + goto out; + } + + skbn = container_of(rb_node, struct skb_node, node); + rb_node = bpf_rbtree_remove(&flow->queue, &skbn->node); + bpf_spin_unlock(&flow->lock); + + if (!rb_node) + goto out; + + skbn = container_of(rb_node, struct skb_node, node); + skb = bpf_kptr_xchg(&skbn->skb, skb); + bpf_obj_drop(skbn); + +out: + bpf_kptr_xchg_back(&sflow->flow, flow); + + return skb; +} + +SEC("struct_ops/bpf_fq_dequeue") +struct sk_buff *BPF_PROG(bpf_fq_dequeue, struct Qdisc *sch) +{ + struct dequeue_nonprio_ctx cb_ctx = {}; + struct sk_buff *skb = NULL; + int i; + + if (!sch->q.qlen) + goto out; + + skb = fq_dequeue_prio(); + if (skb) + goto dequeue; + + q.ktime_cache = cb_ctx.now = bpf_ktime_get_ns(); + fq_check_throttled(q.ktime_cache); + bpf_for(i, 0, sch->limit) { + skb = fq_dequeue_nonprio_flows(i, &cb_ctx); + if (cb_ctx.stop_iter) + break; + }; + + if (skb) { +dequeue: + sch->q.qlen--; + sch->qstats.backlog -= qdisc_pkt_len(skb); + bpf_qdisc_bstats_update(sch, skb); + return skb; + } + + if (cb_ctx.expire) + bpf_qdisc_watchdog_schedule(sch, cb_ctx.expire, q.timer_slack); +out: + return NULL; +} + +static int fq_remove_flows_in_list(u32 index, void *ctx) +{ + struct bpf_list_node *node; + struct fq_flow_node *flow; + + bpf_spin_lock(&fq_new_flows_lock); + node = bpf_list_pop_front(&fq_new_flows); + bpf_spin_unlock(&fq_new_flows_lock); + if (!node) { + bpf_spin_lock(&fq_old_flows_lock); + node = bpf_list_pop_front(&fq_old_flows); + bpf_spin_unlock(&fq_old_flows_lock); + if (!node) + return 1; + } + + flow = container_of(node, struct fq_flow_node, list_node); + bpf_obj_drop(flow); + + return 0; +} + +extern unsigned CONFIG_HZ __kconfig; + +/* limit number of collected flows per round */ +#define FQ_GC_MAX 8 +#define FQ_GC_AGE (3*CONFIG_HZ) + +static bool fq_gc_candidate(struct fq_flow_node *flow) +{ + u64 jiffies = bpf_jiffies64(); + + return fq_flow_is_detached(flow) && + ((s64)(jiffies - (flow->age + FQ_GC_AGE)) > 0); +} + +static int +fq_remove_flows(struct bpf_map *flow_map, u64 *hash, + struct fq_stashed_flow *sflow, struct remove_flows_ctx *ctx) +{ + if (sflow->flow && + (!ctx->gc_only || fq_gc_candidate(sflow->flow))) { + bpf_map_delete_elem(flow_map, hash); + ctx->reset_cnt++; + } + + return ctx->reset_cnt < ctx->reset_max ? 0 : 1; +} + +static void fq_gc(void) +{ + struct remove_flows_ctx cb_ctx = { + .gc_only = true, + .reset_cnt = 0, + .reset_max = FQ_GC_MAX, + }; + + bpf_for_each_map_elem(&fq_nonprio_flows, fq_remove_flows, &cb_ctx, 0); +} + +SEC("struct_ops/bpf_fq_reset") +void BPF_PROG(bpf_fq_reset, struct Qdisc *sch) +{ + struct unset_throttled_flows_ctx utf_ctx = { + .unset_all = true, + }; + struct remove_flows_ctx rf_ctx = { + .gc_only = false, + .reset_cnt = 0, + .reset_max = NUM_QUEUE, + }; + struct fq_stashed_flow *sflow; + u64 hash = 0; + + sch->q.qlen = 0; + sch->qstats.backlog = 0; + + bpf_for_each_map_elem(&fq_nonprio_flows, fq_remove_flows, &rf_ctx, 0); + + rf_ctx.reset_cnt = 0; + bpf_for_each_map_elem(&fq_prio_flows, fq_remove_flows, &rf_ctx, 0); + fq_new_flow(&fq_prio_flows, &sflow, hash); + + bpf_loop(NUM_QUEUE, fq_remove_flows_in_list, NULL, 0); + q.new_flow_cnt = 0; + q.old_flow_cnt = 0; + + bpf_loop(NUM_QUEUE, fq_unset_throttled_flows, &utf_ctx, 0); +} + +SEC("struct_ops/bpf_fq_init") +int BPF_PROG(bpf_fq_init, struct Qdisc *sch, struct nlattr *opt, + struct netlink_ext_ack *extack) +{ + struct net_device *dev = sch->dev_queue->dev; + u32 psched_mtu = dev->mtu + dev->hard_header_len; + struct fq_stashed_flow *sflow; + u64 hash = 0; + + if (fq_new_flow(&fq_prio_flows, &sflow, hash) < 0) + return -ENOMEM; + + sch->limit = 10000; + q.initial_quantum = 10 * psched_mtu; + q.quantum = 2 * psched_mtu; + q.flow_refill_delay = 40; + q.flow_plimit = 100; + q.horizon = 10ULL * NSEC_PER_SEC; + q.horizon_drop = 1; + q.orphan_mask = 1024 - 1; + q.timer_slack = 10 * NSEC_PER_USEC; + q.time_next_delayed_flow = ~0ULL; + q.unthrottle_latency_ns = 0ULL; + q.new_flow_cnt = 0; + q.old_flow_cnt = 0; + + return 0; +} + +SEC("struct_ops") +void BPF_PROG(bpf_fq_destroy, struct Qdisc *sch) +{ +} + +SEC(".struct_ops") +struct Qdisc_ops fq = { + .enqueue = (void *)bpf_fq_enqueue, + .dequeue = (void *)bpf_fq_dequeue, + .reset = (void *)bpf_fq_reset, + .init = (void *)bpf_fq_init, + .destroy = (void *)bpf_fq_destroy, + .id = "bpf_fq", +}; diff --git a/tools/testing/selftests/bpf/progs/bpf_tracing_net.h b/tools/testing/selftests/bpf/progs/bpf_tracing_net.h index 59843b430f76..17db400f0e0d 100644 --- a/tools/testing/selftests/bpf/progs/bpf_tracing_net.h +++ b/tools/testing/selftests/bpf/progs/bpf_tracing_net.h @@ -15,7 +15,11 @@ #define SO_KEEPALIVE 9 #define SO_PRIORITY 12 #define SO_REUSEPORT 15 +#if defined(__TARGET_ARCH_powerpc) +#define SO_RCVLOWAT 16 +#else #define SO_RCVLOWAT 18 +#endif #define SO_BINDTODEVICE 25 #define SO_MARK 36 #define SO_MAX_PACING_RATE 47 @@ -49,6 +53,7 @@ #define TCP_SAVED_SYN 28 #define TCP_CA_NAME_MAX 16 #define TCP_NAGLE_OFF 1 +#define TCP_RTO_MAX_MS 44 #define TCP_ECN_OK 1 #define TCP_ECN_QUEUE_CWR 2 @@ -123,6 +128,7 @@ #define sk_refcnt __sk_common.skc_refcnt #define sk_state __sk_common.skc_state #define sk_net __sk_common.skc_net +#define sk_rcv_saddr __sk_common.skc_rcv_saddr #define sk_v6_daddr __sk_common.skc_v6_daddr #define sk_v6_rcv_saddr __sk_common.skc_v6_rcv_saddr #define sk_flags __sk_common.skc_flags diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_bad_signed_arr_elem_sz.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_bad_signed_arr_elem_sz.c new file mode 100644 index 000000000000..21a560427b10 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_bad_signed_arr_elem_sz.c @@ -0,0 +1,3 @@ +#include "core_reloc_types.h" + +void f(struct core_reloc_arrays___err_bad_signed_arr_elem_sz x) {} diff --git a/tools/testing/selftests/bpf/progs/btf_type_tag_percpu.c b/tools/testing/selftests/bpf/progs/btf_type_tag_percpu.c index 38f78d9345de..d93f68024cc6 100644 --- a/tools/testing/selftests/bpf/progs/btf_type_tag_percpu.c +++ b/tools/testing/selftests/bpf/progs/btf_type_tag_percpu.c @@ -30,36 +30,42 @@ int BPF_PROG(test_percpu2, struct bpf_testmod_btf_type_tag_2 *arg) /* trace_cgroup_mkdir(struct cgroup *cgrp, const char *path) * - * struct cgroup_rstat_cpu { + * struct css_rstat_cpu { * ... - * struct cgroup *updated_children; + * struct cgroup_subsys_state *updated_children; * ... * }; * - * struct cgroup { + * struct cgroup_subsys_state { + * ... + * struct css_rstat_cpu __percpu *rstat_cpu; * ... - * struct cgroup_rstat_cpu __percpu *rstat_cpu; + * }; + * + * struct cgroup { + * struct cgroup_subsys_state self; * ... * }; */ SEC("tp_btf/cgroup_mkdir") int BPF_PROG(test_percpu_load, struct cgroup *cgrp, const char *path) { - g = (__u64)cgrp->rstat_cpu->updated_children; + g = (__u64)cgrp->self.rstat_cpu->updated_children; return 0; } SEC("tp_btf/cgroup_mkdir") int BPF_PROG(test_percpu_helper, struct cgroup *cgrp, const char *path) { - struct cgroup_rstat_cpu *rstat; + struct css_rstat_cpu *rstat; __u32 cpu; cpu = bpf_get_smp_processor_id(); - rstat = (struct cgroup_rstat_cpu *)bpf_per_cpu_ptr(cgrp->rstat_cpu, cpu); + rstat = (struct css_rstat_cpu *)bpf_per_cpu_ptr( + cgrp->self.rstat_cpu, cpu); if (rstat) { /* READ_ONCE */ - *(volatile int *)rstat; + *(volatile long *)rstat; } return 0; diff --git a/tools/testing/selftests/bpf/progs/cb_refs.c b/tools/testing/selftests/bpf/progs/cb_refs.c index 56c764df8196..5d6fc7f01ebb 100644 --- a/tools/testing/selftests/bpf/progs/cb_refs.c +++ b/tools/testing/selftests/bpf/progs/cb_refs.c @@ -2,7 +2,7 @@ #include <vmlinux.h> #include <bpf/bpf_tracing.h> #include <bpf/bpf_helpers.h> -#include "../bpf_testmod/bpf_testmod_kfunc.h" +#include "../test_kmods/bpf_testmod_kfunc.h" struct map_value { struct prog_test_ref_kfunc __kptr *ptr; diff --git a/tools/testing/selftests/bpf/progs/cgroup_hierarchical_stats.c b/tools/testing/selftests/bpf/progs/cgroup_hierarchical_stats.c index c74362854948..ff189a736ad8 100644 --- a/tools/testing/selftests/bpf/progs/cgroup_hierarchical_stats.c +++ b/tools/testing/selftests/bpf/progs/cgroup_hierarchical_stats.c @@ -37,8 +37,9 @@ struct { __type(value, struct attach_counter); } attach_counters SEC(".maps"); -extern void cgroup_rstat_updated(struct cgroup *cgrp, int cpu) __ksym; -extern void cgroup_rstat_flush(struct cgroup *cgrp) __ksym; +extern void css_rstat_updated( + struct cgroup_subsys_state *css, int cpu) __ksym; +extern void css_rstat_flush(struct cgroup_subsys_state *css) __ksym; static uint64_t cgroup_id(struct cgroup *cgrp) { @@ -75,7 +76,7 @@ int BPF_PROG(counter, struct cgroup *dst_cgrp, struct task_struct *leader, else if (create_percpu_attach_counter(cg_id, 1)) return 0; - cgroup_rstat_updated(dst_cgrp, bpf_get_smp_processor_id()); + css_rstat_updated(&dst_cgrp->self, bpf_get_smp_processor_id()); return 0; } @@ -141,7 +142,7 @@ int BPF_PROG(dumper, struct bpf_iter_meta *meta, struct cgroup *cgrp) return 1; /* Flush the stats to make sure we get the most updated numbers */ - cgroup_rstat_flush(cgrp); + css_rstat_flush(&cgrp->self); total_counter = bpf_map_lookup_elem(&attach_counters, &cg_id); if (!total_counter) { diff --git a/tools/testing/selftests/bpf/progs/cgroup_mprog.c b/tools/testing/selftests/bpf/progs/cgroup_mprog.c new file mode 100644 index 000000000000..6a0ea02c4de2 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/cgroup_mprog.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> + +char _license[] SEC("license") = "GPL"; + +SEC("cgroup/getsockopt") +int getsockopt_1(struct bpf_sockopt *ctx) +{ + return 1; +} + +SEC("cgroup/getsockopt") +int getsockopt_2(struct bpf_sockopt *ctx) +{ + return 1; +} + +SEC("cgroup/getsockopt") +int getsockopt_3(struct bpf_sockopt *ctx) +{ + return 1; +} + +SEC("cgroup/getsockopt") +int getsockopt_4(struct bpf_sockopt *ctx) +{ + return 1; +} diff --git a/tools/testing/selftests/bpf/progs/cgroup_preorder.c b/tools/testing/selftests/bpf/progs/cgroup_preorder.c new file mode 100644 index 000000000000..4ef6202baa0a --- /dev/null +++ b/tools/testing/selftests/bpf/progs/cgroup_preorder.c @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> + +char _license[] SEC("license") = "GPL"; + +unsigned int idx; +__u8 result[4]; + +SEC("cgroup/getsockopt") +int child(struct bpf_sockopt *ctx) +{ + if (idx < 4) + result[idx++] = 1; + return 1; +} + +SEC("cgroup/getsockopt") +int child_2(struct bpf_sockopt *ctx) +{ + if (idx < 4) + result[idx++] = 2; + return 1; +} + +SEC("cgroup/getsockopt") +int parent(struct bpf_sockopt *ctx) +{ + if (idx < 4) + result[idx++] = 3; + return 1; +} + +SEC("cgroup/getsockopt") +int parent_2(struct bpf_sockopt *ctx) +{ + if (idx < 4) + result[idx++] = 4; + return 1; +} diff --git a/tools/testing/selftests/bpf/progs/cgroup_read_xattr.c b/tools/testing/selftests/bpf/progs/cgroup_read_xattr.c new file mode 100644 index 000000000000..092db1d0435e --- /dev/null +++ b/tools/testing/selftests/bpf/progs/cgroup_read_xattr.c @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ + +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> +#include "bpf_experimental.h" +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + +char value[16]; + +static __always_inline void read_xattr(struct cgroup *cgroup) +{ + struct bpf_dynptr value_ptr; + + bpf_dynptr_from_mem(value, sizeof(value), 0, &value_ptr); + bpf_cgroup_read_xattr(cgroup, "user.bpf_test", + &value_ptr); +} + +SEC("lsm.s/socket_connect") +__success +int BPF_PROG(trusted_cgroup_ptr_sleepable) +{ + u64 cgrp_id = bpf_get_current_cgroup_id(); + struct cgroup *cgrp; + + cgrp = bpf_cgroup_from_id(cgrp_id); + if (!cgrp) + return 0; + + read_xattr(cgrp); + bpf_cgroup_release(cgrp); + return 0; +} + +SEC("lsm/socket_connect") +__success +int BPF_PROG(trusted_cgroup_ptr_non_sleepable) +{ + u64 cgrp_id = bpf_get_current_cgroup_id(); + struct cgroup *cgrp; + + cgrp = bpf_cgroup_from_id(cgrp_id); + if (!cgrp) + return 0; + + read_xattr(cgrp); + bpf_cgroup_release(cgrp); + return 0; +} + +SEC("lsm/socket_connect") +__success +int BPF_PROG(use_css_iter_non_sleepable) +{ + u64 cgrp_id = bpf_get_current_cgroup_id(); + struct cgroup_subsys_state *css; + struct cgroup *cgrp; + + cgrp = bpf_cgroup_from_id(cgrp_id); + if (!cgrp) + return 0; + + bpf_for_each(css, css, &cgrp->self, BPF_CGROUP_ITER_ANCESTORS_UP) + read_xattr(css->cgroup); + + bpf_cgroup_release(cgrp); + return 0; +} + +SEC("lsm.s/socket_connect") +__failure __msg("expected an RCU CS") +int BPF_PROG(use_css_iter_sleepable_missing_rcu_lock) +{ + u64 cgrp_id = bpf_get_current_cgroup_id(); + struct cgroup_subsys_state *css; + struct cgroup *cgrp; + + cgrp = bpf_cgroup_from_id(cgrp_id); + if (!cgrp) + return 0; + + bpf_for_each(css, css, &cgrp->self, BPF_CGROUP_ITER_ANCESTORS_UP) + read_xattr(css->cgroup); + + bpf_cgroup_release(cgrp); + return 0; +} + +SEC("lsm.s/socket_connect") +__success +int BPF_PROG(use_css_iter_sleepable_with_rcu_lock) +{ + u64 cgrp_id = bpf_get_current_cgroup_id(); + struct cgroup_subsys_state *css; + struct cgroup *cgrp; + + bpf_rcu_read_lock(); + cgrp = bpf_cgroup_from_id(cgrp_id); + if (!cgrp) + goto out; + + bpf_for_each(css, css, &cgrp->self, BPF_CGROUP_ITER_ANCESTORS_UP) + read_xattr(css->cgroup); + + bpf_cgroup_release(cgrp); +out: + bpf_rcu_read_unlock(); + return 0; +} + +SEC("lsm/socket_connect") +__success +int BPF_PROG(use_bpf_cgroup_ancestor) +{ + u64 cgrp_id = bpf_get_current_cgroup_id(); + struct cgroup *cgrp, *ancestor; + + cgrp = bpf_cgroup_from_id(cgrp_id); + if (!cgrp) + return 0; + + ancestor = bpf_cgroup_ancestor(cgrp, 1); + if (!ancestor) + goto out; + + read_xattr(cgrp); + bpf_cgroup_release(ancestor); +out: + bpf_cgroup_release(cgrp); + return 0; +} + +SEC("cgroup/sendmsg4") +__success +int BPF_PROG(cgroup_skb) +{ + u64 cgrp_id = bpf_get_current_cgroup_id(); + struct cgroup *cgrp, *ancestor; + + cgrp = bpf_cgroup_from_id(cgrp_id); + if (!cgrp) + return 0; + + ancestor = bpf_cgroup_ancestor(cgrp, 1); + if (!ancestor) + goto out; + + read_xattr(cgrp); + bpf_cgroup_release(ancestor); +out: + bpf_cgroup_release(cgrp); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/cgroup_skb_direct_packet_access.c b/tools/testing/selftests/bpf/progs/cgroup_skb_direct_packet_access.c new file mode 100644 index 000000000000..e32b07d802bb --- /dev/null +++ b/tools/testing/selftests/bpf/progs/cgroup_skb_direct_packet_access.c @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> + +__u32 data_end; + +SEC("cgroup_skb/ingress") +int direct_packet_access(struct __sk_buff *skb) +{ + data_end = skb->data_end; + return 1; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/compute_live_registers.c b/tools/testing/selftests/bpf/progs/compute_live_registers.c new file mode 100644 index 000000000000..6884ab99a421 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/compute_live_registers.c @@ -0,0 +1,440 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "../../../include/linux/filter.h" +#include "bpf_arena_common.h" +#include "bpf_misc.h" + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u64); +} test_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARENA); + __uint(map_flags, BPF_F_MMAPABLE); + __uint(max_entries, 1); +} arena SEC(".maps"); + +SEC("socket") +__log_level(2) +__msg(" 0: .......... (b7) r0 = 42") +__msg(" 1: 0......... (bf) r1 = r0") +__msg(" 2: .1........ (bf) r2 = r1") +__msg(" 3: ..2....... (bf) r3 = r2") +__msg(" 4: ...3...... (bf) r4 = r3") +__msg(" 5: ....4..... (bf) r5 = r4") +__msg(" 6: .....5.... (bf) r6 = r5") +__msg(" 7: ......6... (bf) r7 = r6") +__msg(" 8: .......7.. (bf) r8 = r7") +__msg(" 9: ........8. (bf) r9 = r8") +__msg("10: .........9 (bf) r0 = r9") +__msg("11: 0......... (95) exit") +__naked void assign_chain(void) +{ + asm volatile ( + "r0 = 42;" + "r1 = r0;" + "r2 = r1;" + "r3 = r2;" + "r4 = r3;" + "r5 = r4;" + "r6 = r5;" + "r7 = r6;" + "r8 = r7;" + "r9 = r8;" + "r0 = r9;" + "exit;" + ::: __clobber_all); +} + +SEC("socket") +__log_level(2) +__msg("0: .......... (b7) r1 = 7") +__msg("1: .1........ (07) r1 += 7") +__msg("2: .......... (b7) r2 = 7") +__msg("3: ..2....... (b7) r3 = 42") +__msg("4: ..23...... (0f) r2 += r3") +__msg("5: .......... (b7) r0 = 0") +__msg("6: 0......... (95) exit") +__naked void arithmetics(void) +{ + asm volatile ( + "r1 = 7;" + "r1 += 7;" + "r2 = 7;" + "r3 = 42;" + "r2 += r3;" + "r0 = 0;" + "exit;" + ::: __clobber_all); +} + +#ifdef CAN_USE_BPF_ST +SEC("socket") +__log_level(2) +__msg(" 1: .1........ (07) r1 += -8") +__msg(" 2: .1........ (7a) *(u64 *)(r1 +0) = 7") +__msg(" 3: .1........ (b7) r2 = 42") +__msg(" 4: .12....... (7b) *(u64 *)(r1 +0) = r2") +__msg(" 5: .12....... (7b) *(u64 *)(r1 +0) = r2") +__msg(" 6: .......... (b7) r0 = 0") +__naked void store(void) +{ + asm volatile ( + "r1 = r10;" + "r1 += -8;" + "*(u64 *)(r1 +0) = 7;" + "r2 = 42;" + "*(u64 *)(r1 +0) = r2;" + "*(u64 *)(r1 +0) = r2;" + "r0 = 0;" + "exit;" + ::: __clobber_all); +} +#endif + +SEC("socket") +__log_level(2) +__msg("1: ....4..... (07) r4 += -8") +__msg("2: ....4..... (79) r5 = *(u64 *)(r4 +0)") +__msg("3: ....45.... (07) r4 += -8") +__naked void load(void) +{ + asm volatile ( + "r4 = r10;" + "r4 += -8;" + "r5 = *(u64 *)(r4 +0);" + "r4 += -8;" + "r0 = r5;" + "exit;" + ::: __clobber_all); +} + +SEC("socket") +__log_level(2) +__msg("0: .1........ (61) r2 = *(u32 *)(r1 +0)") +__msg("1: ..2....... (d4) r2 = le64 r2") +__msg("2: ..2....... (bf) r0 = r2") +__naked void endian(void) +{ + asm volatile ( + "r2 = *(u32 *)(r1 +0);" + "r2 = le64 r2;" + "r0 = r2;" + "exit;" + ::: __clobber_all); +} + +SEC("socket") +__log_level(2) +__msg(" 8: 0......... (b7) r1 = 1") +__msg(" 9: 01........ (db) r1 = atomic64_fetch_add((u64 *)(r0 +0), r1)") +__msg("10: 01........ (c3) lock *(u32 *)(r0 +0) += r1") +__msg("11: 01........ (db) r1 = atomic64_xchg((u64 *)(r0 +0), r1)") +__msg("12: 01........ (bf) r2 = r0") +__msg("13: .12....... (bf) r0 = r1") +__msg("14: 012....... (db) r0 = atomic64_cmpxchg((u64 *)(r2 +0), r0, r1)") +__naked void atomic(void) +{ + asm volatile ( + "r2 = r10;" + "r2 += -8;" + "r1 = 0;" + "*(u64 *)(r2 +0) = r1;" + "r1 = %[test_map] ll;" + "call %[bpf_map_lookup_elem];" + "if r0 == 0 goto 1f;" + "r1 = 1;" + "r1 = atomic_fetch_add((u64 *)(r0 +0), r1);" + ".8byte %[add_nofetch];" /* same as "lock *(u32 *)(r0 +0) += r1;" */ + "r1 = xchg_64(r0 + 0, r1);" + "r2 = r0;" + "r0 = r1;" + "r0 = cmpxchg_64(r2 + 0, r0, r1);" + "1: exit;" + : + : __imm(bpf_map_lookup_elem), + __imm_addr(test_map), + __imm_insn(add_nofetch, BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_0, BPF_REG_1, 0)) + : __clobber_all); +} + +#ifdef CAN_USE_LOAD_ACQ_STORE_REL + +SEC("socket") +__log_level(2) +__msg("2: .12....... (db) store_release((u64 *)(r2 -8), r1)") +__msg("3: .......... (bf) r3 = r10") +__msg("4: ...3...... (db) r4 = load_acquire((u64 *)(r3 -8))") +__naked void atomic_load_acq_store_rel(void) +{ + asm volatile ( + "r1 = 42;" + "r2 = r10;" + ".8byte %[store_release_insn];" /* store_release((u64 *)(r2 - 8), r1); */ + "r3 = r10;" + ".8byte %[load_acquire_insn];" /* r4 = load_acquire((u64 *)(r3 + 0)); */ + "r0 = r4;" + "exit;" + : + : __imm_insn(store_release_insn, + BPF_ATOMIC_OP(BPF_DW, BPF_STORE_REL, BPF_REG_2, BPF_REG_1, -8)), + __imm_insn(load_acquire_insn, + BPF_ATOMIC_OP(BPF_DW, BPF_LOAD_ACQ, BPF_REG_4, BPF_REG_3, -8)) + : __clobber_all); +} + +#endif /* CAN_USE_LOAD_ACQ_STORE_REL */ + +SEC("socket") +__log_level(2) +__msg("4: .12....7.. (85) call bpf_trace_printk#6") +__msg("5: 0......7.. (0f) r0 += r7") +__naked void regular_call(void) +{ + asm volatile ( + "r7 = 1;" + "r1 = r10;" + "r1 += -8;" + "r2 = 1;" + "call %[bpf_trace_printk];" + "r0 += r7;" + "exit;" + : + : __imm(bpf_trace_printk) + : __clobber_all); +} + +SEC("socket") +__log_level(2) +__msg("2: 012....... (25) if r1 > 0x7 goto pc+1") +__msg("3: ..2....... (bf) r0 = r2") +__naked void if1(void) +{ + asm volatile ( + "r0 = 1;" + "r2 = 2;" + "if r1 > 0x7 goto +1;" + "r0 = r2;" + "exit;" + ::: __clobber_all); +} + +SEC("socket") +__log_level(2) +__msg("3: 0123...... (2d) if r1 > r3 goto pc+1") +__msg("4: ..2....... (bf) r0 = r2") +__naked void if2(void) +{ + asm volatile ( + "r0 = 1;" + "r2 = 2;" + "r3 = 7;" + "if r1 > r3 goto +1;" + "r0 = r2;" + "exit;" + ::: __clobber_all); +} + +/* Verifier misses that r2 is alive if jset is not handled properly */ +SEC("socket") +__log_level(2) +__msg("2: 012....... (45) if r1 & 0x7 goto pc+1") +__naked void if3_jset_bug(void) +{ + asm volatile ( + "r0 = 1;" + "r2 = 2;" + "if r1 & 0x7 goto +1;" + "exit;" + "r0 = r2;" + "exit;" + ::: __clobber_all); +} + +SEC("socket") +__log_level(2) +__msg("0: .......... (b7) r1 = 0") +__msg("1: .1........ (b7) r2 = 7") +__msg("2: .12....... (25) if r1 > 0x7 goto pc+4") +__msg("3: .12....... (07) r1 += 1") +__msg("4: .12....... (27) r2 *= 2") +__msg("5: .12....... (05) goto pc+0") +__msg("6: .12....... (05) goto pc-5") +__msg("7: .......... (b7) r0 = 0") +__msg("8: 0......... (95) exit") +__naked void loop(void) +{ + asm volatile ( + "r1 = 0;" + "r2 = 7;" + "if r1 > 0x7 goto +4;" + "r1 += 1;" + "r2 *= 2;" + "goto +0;" + "goto -5;" + "r0 = 0;" + "exit;" + : + : __imm(bpf_trace_printk) + : __clobber_all); +} + +#ifdef CAN_USE_GOTOL +SEC("socket") +__log_level(2) +__msg("2: .123...... (25) if r1 > 0x7 goto pc+2") +__msg("3: ..2....... (bf) r0 = r2") +__msg("4: 0......... (06) gotol pc+1") +__msg("5: ...3...... (bf) r0 = r3") +__msg("6: 0......... (95) exit") +__naked void gotol(void) +{ + asm volatile ( + "r2 = 42;" + "r3 = 24;" + "if r1 > 0x7 goto +2;" + "r0 = r2;" + "gotol +1;" + "r0 = r3;" + "exit;" + : + : __imm(bpf_trace_printk) + : __clobber_all); +} +#endif + +SEC("socket") +__log_level(2) +__msg("0: .......... (b7) r1 = 1") +__msg("1: .1........ (e5) may_goto pc+1") +__msg("2: .......... (05) goto pc-3") +__msg("3: .1........ (bf) r0 = r1") +__msg("4: 0......... (95) exit") +__naked void may_goto(void) +{ + asm volatile ( + "1: r1 = 1;" + ".8byte %[may_goto];" + "goto 1b;" + "r0 = r1;" + "exit;" + : + : __imm(bpf_get_smp_processor_id), + __imm_insn(may_goto, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, +1 /* offset */, 0)) + : __clobber_all); +} + +SEC("socket") +__log_level(2) +__msg("1: 0......... (18) r2 = 0x7") +__msg("3: 0.2....... (0f) r0 += r2") +__naked void ldimm64(void) +{ + asm volatile ( + "r0 = 0;" + "r2 = 0x7 ll;" + "r0 += r2;" + "exit;" + : + :: __clobber_all); +} + +/* No rules specific for LD_ABS/LD_IND, default behaviour kicks in */ +SEC("socket") +__log_level(2) +__msg("2: 0123456789 (30) r0 = *(u8 *)skb[42]") +__msg("3: 012.456789 (0f) r7 += r0") +__msg("4: 012.456789 (b7) r3 = 42") +__msg("5: 0123456789 (50) r0 = *(u8 *)skb[r3 + 0]") +__msg("6: 0......7.. (0f) r7 += r0") +__naked void ldabs(void) +{ + asm volatile ( + "r6 = r1;" + "r7 = 0;" + "r0 = *(u8 *)skb[42];" + "r7 += r0;" + "r3 = 42;" + ".8byte %[ld_ind];" /* same as "r0 = *(u8 *)skb[r3];" */ + "r7 += r0;" + "r0 = r7;" + "exit;" + : + : __imm_insn(ld_ind, BPF_LD_IND(BPF_B, BPF_REG_3, 0)) + : __clobber_all); +} + + +#ifdef __BPF_FEATURE_ADDR_SPACE_CAST +SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") +__log_level(2) +__msg(" 6: .12345.... (85) call bpf_arena_alloc_pages") +__msg(" 7: 0......... (bf) r1 = addr_space_cast(r0, 0, 1)") +__msg(" 8: .1........ (b7) r2 = 42") +__naked void addr_space_cast(void) +{ + asm volatile ( + "r1 = %[arena] ll;" + "r2 = 0;" + "r3 = 1;" + "r4 = 0;" + "r5 = 0;" + "call %[bpf_arena_alloc_pages];" + "r1 = addr_space_cast(r0, 0, 1);" + "r2 = 42;" + "*(u64 *)(r1 +0) = r2;" + "r0 = 0;" + "exit;" + : + : __imm(bpf_arena_alloc_pages), + __imm_addr(arena) + : __clobber_all); +} +#endif + +static __used __naked int aux1(void) +{ + asm volatile ( + "r0 = r1;" + "r0 += r2;" + "exit;" + ::: __clobber_all); +} + +SEC("socket") +__log_level(2) +__msg("0: ....45.... (b7) r1 = 1") +__msg("1: .1..45.... (b7) r2 = 2") +__msg("2: .12.45.... (b7) r3 = 3") +/* Conservative liveness for subprog parameters. */ +__msg("3: .12345.... (85) call pc+2") +__msg("4: .......... (b7) r0 = 0") +__msg("5: 0......... (95) exit") +__msg("6: .12....... (bf) r0 = r1") +__msg("7: 0.2....... (0f) r0 += r2") +/* Conservative liveness for subprog return value. */ +__msg("8: 0......... (95) exit") +__naked void subprog1(void) +{ + asm volatile ( + "r1 = 1;" + "r2 = 2;" + "r3 = 3;" + "call aux1;" + "r0 = 0;" + "exit;" + ::: __clobber_all); +} + +/* to retain debug info for BTF generation */ +void kfunc_root(void) +{ + bpf_arena_alloc_pages(0, 0, 0, 0, 0); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/connect4_dropper.c b/tools/testing/selftests/bpf/progs/connect4_dropper.c index d3f4c5e4fb69..a3819a5d09c8 100644 --- a/tools/testing/selftests/bpf/progs/connect4_dropper.c +++ b/tools/testing/selftests/bpf/progs/connect4_dropper.c @@ -13,12 +13,14 @@ #define VERDICT_REJECT 0 #define VERDICT_PROCEED 1 +int port; + SEC("cgroup/connect4") int connect_v4_dropper(struct bpf_sock_addr *ctx) { if (ctx->type != SOCK_STREAM) return VERDICT_PROCEED; - if (ctx->user_port == bpf_htons(60120)) + if (ctx->user_port == bpf_htons(port)) return VERDICT_REJECT; return VERDICT_PROCEED; } diff --git a/tools/testing/selftests/bpf/progs/core_reloc_types.h b/tools/testing/selftests/bpf/progs/core_reloc_types.h index fd8e1b4c6762..5760ae015e09 100644 --- a/tools/testing/selftests/bpf/progs/core_reloc_types.h +++ b/tools/testing/selftests/bpf/progs/core_reloc_types.h @@ -347,6 +347,7 @@ struct core_reloc_nesting___err_too_deep { */ struct core_reloc_arrays_output { int a2; + int a3; char b123; int c1c; int d00d; @@ -455,6 +456,15 @@ struct core_reloc_arrays___err_bad_zero_sz_arr { struct core_reloc_arrays_substruct d[1][2]; }; +struct core_reloc_arrays___err_bad_signed_arr_elem_sz { + /* int -> short (signed!): not supported case */ + short a[5]; + char b[2][3][4]; + struct core_reloc_arrays_substruct c[3]; + struct core_reloc_arrays_substruct d[1][2]; + struct core_reloc_arrays_substruct f[][2]; +}; + /* * PRIMITIVES */ diff --git a/tools/testing/selftests/bpf/progs/cpumask_common.h b/tools/testing/selftests/bpf/progs/cpumask_common.h index 4ece7873ba60..86085b79f5ca 100644 --- a/tools/testing/selftests/bpf/progs/cpumask_common.h +++ b/tools/testing/selftests/bpf/progs/cpumask_common.h @@ -61,6 +61,7 @@ u32 bpf_cpumask_any_distribute(const struct cpumask *src) __ksym __weak; u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1, const struct cpumask *src2) __ksym __weak; u32 bpf_cpumask_weight(const struct cpumask *cpumask) __ksym __weak; +int bpf_cpumask_populate(struct cpumask *cpumask, void *src, size_t src__sz) __ksym __weak; void bpf_rcu_read_lock(void) __ksym __weak; void bpf_rcu_read_unlock(void) __ksym __weak; diff --git a/tools/testing/selftests/bpf/progs/cpumask_failure.c b/tools/testing/selftests/bpf/progs/cpumask_failure.c index b40b52548ffb..8a2fd596c8a3 100644 --- a/tools/testing/selftests/bpf/progs/cpumask_failure.c +++ b/tools/testing/selftests/bpf/progs/cpumask_failure.c @@ -222,3 +222,41 @@ int BPF_PROG(test_invalid_nested_array, struct task_struct *task, u64 clone_flag return 0; } + +SEC("tp_btf/task_newtask") +__failure __msg("type=scalar expected=fp") +int BPF_PROG(test_populate_invalid_destination, struct task_struct *task, u64 clone_flags) +{ + struct bpf_cpumask *invalid = (struct bpf_cpumask *)0x123456; + u64 bits; + int ret; + + ret = bpf_cpumask_populate((struct cpumask *)invalid, &bits, sizeof(bits)); + if (!ret) + err = 2; + + return 0; +} + +SEC("tp_btf/task_newtask") +__failure __msg("leads to invalid memory access") +int BPF_PROG(test_populate_invalid_source, struct task_struct *task, u64 clone_flags) +{ + void *garbage = (void *)0x123456; + struct bpf_cpumask *local; + int ret; + + local = create_cpumask(); + if (!local) { + err = 1; + return 0; + } + + ret = bpf_cpumask_populate((struct cpumask *)local, garbage, 8); + if (!ret) + err = 2; + + bpf_cpumask_release(local); + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/cpumask_success.c b/tools/testing/selftests/bpf/progs/cpumask_success.c index 80ee469b0b60..0e04c31b91c0 100644 --- a/tools/testing/selftests/bpf/progs/cpumask_success.c +++ b/tools/testing/selftests/bpf/progs/cpumask_success.c @@ -749,7 +749,6 @@ out: } SEC("tp_btf/task_newtask") -__success int BPF_PROG(test_refcount_null_tracking, struct task_struct *task, u64 clone_flags) { struct bpf_cpumask *mask1, *mask2; @@ -770,3 +769,122 @@ free_masks_return: bpf_cpumask_release(mask2); return 0; } + +SEC("tp_btf/task_newtask") +int BPF_PROG(test_populate_reject_small_mask, struct task_struct *task, u64 clone_flags) +{ + struct bpf_cpumask *local; + u8 toofewbits; + int ret; + + if (!is_test_task()) + return 0; + + local = create_cpumask(); + if (!local) + return 0; + + /* The kfunc should prevent this operation */ + ret = bpf_cpumask_populate((struct cpumask *)local, &toofewbits, sizeof(toofewbits)); + if (ret != -EACCES) + err = 2; + + bpf_cpumask_release(local); + + return 0; +} + +/* Mask is guaranteed to be large enough for bpf_cpumask_t. */ +#define CPUMASK_TEST_MASKLEN (sizeof(cpumask_t)) + +/* Add an extra word for the test_populate_reject_unaligned test. */ +u64 bits[CPUMASK_TEST_MASKLEN / 8 + 1]; +extern bool CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS __kconfig __weak; + +SEC("tp_btf/task_newtask") +int BPF_PROG(test_populate_reject_unaligned, struct task_struct *task, u64 clone_flags) +{ + struct bpf_cpumask *mask; + char *src; + int ret; + + if (!is_test_task()) + return 0; + + /* Skip if unaligned accesses are fine for this arch. */ + if (CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + return 0; + + mask = bpf_cpumask_create(); + if (!mask) { + err = 1; + return 0; + } + + /* Misalign the source array by a byte. */ + src = &((char *)bits)[1]; + + ret = bpf_cpumask_populate((struct cpumask *)mask, src, CPUMASK_TEST_MASKLEN); + if (ret != -EINVAL) + err = 2; + + bpf_cpumask_release(mask); + + return 0; +} + + +SEC("tp_btf/task_newtask") +int BPF_PROG(test_populate, struct task_struct *task, u64 clone_flags) +{ + struct bpf_cpumask *mask; + bool bit; + int ret; + int i; + + if (!is_test_task()) + return 0; + + /* Set only odd bits. */ + __builtin_memset(bits, 0xaa, CPUMASK_TEST_MASKLEN); + + mask = bpf_cpumask_create(); + if (!mask) { + err = 1; + return 0; + } + + /* Pass the entire bits array, the kfunc will only copy the valid bits. */ + ret = bpf_cpumask_populate((struct cpumask *)mask, bits, CPUMASK_TEST_MASKLEN); + if (ret) { + err = 2; + goto out; + } + + /* + * Test is there to appease the verifier. We cannot directly + * access NR_CPUS, the upper bound for nr_cpus, so we infer + * it from the size of cpumask_t. + */ + if (nr_cpus < 0 || nr_cpus >= CPUMASK_TEST_MASKLEN * 8) { + err = 3; + goto out; + } + + bpf_for(i, 0, nr_cpus) { + /* Odd-numbered bits should be set, even ones unset. */ + bit = bpf_cpumask_test_cpu(i, (const struct cpumask *)mask); + if (bit == (i % 2 != 0)) + continue; + + err = 4; + break; + } + +out: + bpf_cpumask_release(mask); + + return 0; +} + +#undef CPUMASK_TEST_MASKLEN diff --git a/tools/testing/selftests/bpf/progs/dmabuf_iter.c b/tools/testing/selftests/bpf/progs/dmabuf_iter.c new file mode 100644 index 000000000000..13cdb11fdeb2 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/dmabuf_iter.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Google LLC */ +#include <vmlinux.h> +#include <bpf/bpf_core_read.h> +#include <bpf/bpf_helpers.h> + +/* From uapi/linux/dma-buf.h */ +#define DMA_BUF_NAME_LEN 32 + +char _license[] SEC("license") = "GPL"; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(key_size, DMA_BUF_NAME_LEN); + __type(value, bool); + __uint(max_entries, 5); +} testbuf_hash SEC(".maps"); + +/* + * Fields output by this iterator are delimited by newlines. Convert any + * newlines in user-provided printed strings to spaces. + */ +static void sanitize_string(char *src, size_t size) +{ + for (char *c = src; (size_t)(c - src) < size && *c; ++c) + if (*c == '\n') + *c = ' '; +} + +SEC("iter/dmabuf") +int dmabuf_collector(struct bpf_iter__dmabuf *ctx) +{ + const struct dma_buf *dmabuf = ctx->dmabuf; + struct seq_file *seq = ctx->meta->seq; + unsigned long inode = 0; + size_t size; + const char *pname, *exporter; + char name[DMA_BUF_NAME_LEN] = {'\0'}; + + if (!dmabuf) + return 0; + + if (BPF_CORE_READ_INTO(&inode, dmabuf, file, f_inode, i_ino) || + bpf_core_read(&size, sizeof(size), &dmabuf->size) || + bpf_core_read(&pname, sizeof(pname), &dmabuf->name) || + bpf_core_read(&exporter, sizeof(exporter), &dmabuf->exp_name)) + return 1; + + /* Buffers are not required to be named */ + if (pname) { + if (bpf_probe_read_kernel(name, sizeof(name), pname)) + return 1; + + /* Name strings can be provided by userspace */ + sanitize_string(name, sizeof(name)); + } + + BPF_SEQ_PRINTF(seq, "%lu\n%llu\n%s\n%s\n", inode, size, name, exporter); + return 0; +} + +SEC("syscall") +int iter_dmabuf_for_each(const void *ctx) +{ + struct dma_buf *d; + + bpf_for_each(dmabuf, d) { + char name[DMA_BUF_NAME_LEN]; + const char *pname; + bool *found; + long len; + int i; + + if (bpf_core_read(&pname, sizeof(pname), &d->name)) + return 1; + + /* Buffers are not required to be named */ + if (!pname) + continue; + + len = bpf_probe_read_kernel_str(name, sizeof(name), pname); + if (len < 0) + return 1; + + /* + * The entire name buffer is used as a map key. + * Zeroize any uninitialized trailing bytes after the NUL. + */ + bpf_for(i, len, DMA_BUF_NAME_LEN) + name[i] = 0; + + found = bpf_map_lookup_elem(&testbuf_hash, name); + if (found) { + bool t = true; + + bpf_map_update_elem(&testbuf_hash, name, &t, BPF_EXIST); + } + } + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c index dfd817d0348c..bd8f15229f5c 100644 --- a/tools/testing/selftests/bpf/progs/dynptr_fail.c +++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c @@ -192,7 +192,7 @@ done: /* Can't add a dynptr to a map */ SEC("?raw_tp") -__failure __msg("invalid indirect read from stack") +__failure __msg("invalid read from stack") int add_dynptr_to_map1(void *ctx) { struct bpf_dynptr ptr; @@ -210,7 +210,7 @@ int add_dynptr_to_map1(void *ctx) /* Can't add a struct with an embedded dynptr to a map */ SEC("?raw_tp") -__failure __msg("invalid indirect read from stack") +__failure __msg("invalid read from stack") int add_dynptr_to_map2(void *ctx) { struct test_info x; @@ -398,7 +398,7 @@ int data_slice_missing_null_check2(void *ctx) * dynptr argument */ SEC("?raw_tp") -__failure __msg("invalid indirect read from stack") +__failure __msg("invalid read from stack") int invalid_helper1(void *ctx) { struct bpf_dynptr ptr; diff --git a/tools/testing/selftests/bpf/progs/dynptr_success.c b/tools/testing/selftests/bpf/progs/dynptr_success.c index bfcc85686cf0..8315273cb900 100644 --- a/tools/testing/selftests/bpf/progs/dynptr_success.c +++ b/tools/testing/selftests/bpf/progs/dynptr_success.c @@ -1,20 +1,21 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2022 Facebook */ +#include <vmlinux.h> #include <string.h> #include <stdbool.h> -#include <linux/bpf.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> #include "bpf_misc.h" -#include "bpf_kfuncs.h" #include "errno.h" +#define PAGE_SIZE_64K 65536 + char _license[] SEC("license") = "GPL"; int pid, err, val; -struct sample { +struct ringbuf_sample { int pid; int seq; long value; @@ -121,7 +122,7 @@ int test_dynptr_data(void *ctx) static int ringbuf_callback(__u32 index, void *data) { - struct sample *sample; + struct ringbuf_sample *sample; struct bpf_dynptr *ptr = (struct bpf_dynptr *)data; @@ -138,7 +139,7 @@ SEC("?tp/syscalls/sys_enter_nanosleep") int test_ringbuf(void *ctx) { struct bpf_dynptr ptr; - struct sample *sample; + struct ringbuf_sample *sample; if (bpf_get_current_pid_tgid() >> 32 != pid) return 0; @@ -567,3 +568,515 @@ int BPF_PROG(test_dynptr_skb_tp_btf, void *skb, void *location) return 1; } + +static inline int bpf_memcmp(const char *a, const char *b, u32 size) +{ + int i; + + bpf_for(i, 0, size) { + if (a[i] != b[i]) + return a[i] < b[i] ? -1 : 1; + } + return 0; +} + +SEC("?tp/syscalls/sys_enter_nanosleep") +int test_dynptr_copy(void *ctx) +{ + char data[] = "hello there, world!!"; + char buf[32] = {'\0'}; + __u32 sz = sizeof(data); + struct bpf_dynptr src, dst; + + bpf_ringbuf_reserve_dynptr(&ringbuf, sz, 0, &src); + bpf_ringbuf_reserve_dynptr(&ringbuf, sz, 0, &dst); + + /* Test basic case of copying contiguous memory backed dynptrs */ + err = bpf_dynptr_write(&src, 0, data, sz, 0); + err = err ?: bpf_dynptr_copy(&dst, 0, &src, 0, sz); + err = err ?: bpf_dynptr_read(buf, sz, &dst, 0, 0); + err = err ?: bpf_memcmp(data, buf, sz); + + /* Test that offsets are handled correctly */ + err = err ?: bpf_dynptr_copy(&dst, 3, &src, 5, sz - 5); + err = err ?: bpf_dynptr_read(buf, sz - 5, &dst, 3, 0); + err = err ?: bpf_memcmp(data + 5, buf, sz - 5); + + bpf_ringbuf_discard_dynptr(&src, 0); + bpf_ringbuf_discard_dynptr(&dst, 0); + return 0; +} + +SEC("xdp") +int test_dynptr_copy_xdp(struct xdp_md *xdp) +{ + struct bpf_dynptr ptr_buf, ptr_xdp; + char data[] = "qwertyuiopasdfghjkl"; + char buf[32] = {'\0'}; + __u32 len = sizeof(data), xdp_data_size; + int i, chunks = 200; + + /* ptr_xdp is backed by non-contiguous memory */ + bpf_dynptr_from_xdp(xdp, 0, &ptr_xdp); + xdp_data_size = bpf_dynptr_size(&ptr_xdp); + bpf_ringbuf_reserve_dynptr(&ringbuf, len * chunks, 0, &ptr_buf); + + /* Destination dynptr is backed by non-contiguous memory */ + bpf_for(i, 0, chunks) { + err = bpf_dynptr_write(&ptr_buf, i * len, data, len, 0); + if (err) + goto out; + } + + err = bpf_dynptr_copy(&ptr_xdp, 0, &ptr_buf, 0, len * chunks); + if (err) + goto out; + + bpf_for(i, 0, chunks) { + __builtin_memset(buf, 0, sizeof(buf)); + err = bpf_dynptr_read(&buf, len, &ptr_xdp, i * len, 0); + if (err) + goto out; + if (bpf_memcmp(data, buf, len) != 0) + goto out; + } + + /* Source dynptr is backed by non-contiguous memory */ + __builtin_memset(buf, 0, sizeof(buf)); + bpf_for(i, 0, chunks) { + err = bpf_dynptr_write(&ptr_buf, i * len, buf, len, 0); + if (err) + goto out; + } + + err = bpf_dynptr_copy(&ptr_buf, 0, &ptr_xdp, 0, len * chunks); + if (err) + goto out; + + bpf_for(i, 0, chunks) { + __builtin_memset(buf, 0, sizeof(buf)); + err = bpf_dynptr_read(&buf, len, &ptr_buf, i * len, 0); + if (err) + goto out; + if (bpf_memcmp(data, buf, len) != 0) + goto out; + } + + /* Both source and destination dynptrs are backed by non-contiguous memory */ + err = bpf_dynptr_copy(&ptr_xdp, 2, &ptr_xdp, len, len * (chunks - 1)); + if (err) + goto out; + + bpf_for(i, 0, chunks - 1) { + __builtin_memset(buf, 0, sizeof(buf)); + err = bpf_dynptr_read(&buf, len, &ptr_xdp, 2 + i * len, 0); + if (err) + goto out; + if (bpf_memcmp(data, buf, len) != 0) + goto out; + } + + if (bpf_dynptr_copy(&ptr_xdp, xdp_data_size - 3000, &ptr_xdp, 0, len * chunks) != -E2BIG) + err = 1; + +out: + bpf_ringbuf_discard_dynptr(&ptr_buf, 0); + return XDP_DROP; +} + +char memset_zero_data[] = "data to be zeroed"; + +SEC("?tp/syscalls/sys_enter_nanosleep") +int test_dynptr_memset_zero(void *ctx) +{ + __u32 data_sz = sizeof(memset_zero_data); + char zeroes[32] = {'\0'}; + struct bpf_dynptr ptr; + + err = bpf_dynptr_from_mem(memset_zero_data, data_sz, 0, &ptr); + err = err ?: bpf_dynptr_memset(&ptr, 0, data_sz, 0); + err = err ?: bpf_memcmp(zeroes, memset_zero_data, data_sz); + + return 0; +} + +#define DYNPTR_MEMSET_VAL 42 + +char memset_notzero_data[] = "data to be overwritten"; + +SEC("?tp/syscalls/sys_enter_nanosleep") +int test_dynptr_memset_notzero(void *ctx) +{ + u32 data_sz = sizeof(memset_notzero_data); + struct bpf_dynptr ptr; + char expected[32]; + + __builtin_memset(expected, DYNPTR_MEMSET_VAL, data_sz); + + err = bpf_dynptr_from_mem(memset_notzero_data, data_sz, 0, &ptr); + err = err ?: bpf_dynptr_memset(&ptr, 0, data_sz, DYNPTR_MEMSET_VAL); + err = err ?: bpf_memcmp(expected, memset_notzero_data, data_sz); + + return 0; +} + +char memset_zero_offset_data[] = "data to be zeroed partially"; + +SEC("?tp/syscalls/sys_enter_nanosleep") +int test_dynptr_memset_zero_offset(void *ctx) +{ + char expected[] = "data to \0\0\0\0eroed partially"; + __u32 data_sz = sizeof(memset_zero_offset_data); + struct bpf_dynptr ptr; + + err = bpf_dynptr_from_mem(memset_zero_offset_data, data_sz, 0, &ptr); + err = err ?: bpf_dynptr_memset(&ptr, 8, 4, 0); + err = err ?: bpf_memcmp(expected, memset_zero_offset_data, data_sz); + + return 0; +} + +char memset_zero_adjusted_data[] = "data to be zeroed partially"; + +SEC("?tp/syscalls/sys_enter_nanosleep") +int test_dynptr_memset_zero_adjusted(void *ctx) +{ + char expected[] = "data\0\0\0\0be zeroed partially"; + __u32 data_sz = sizeof(memset_zero_adjusted_data); + struct bpf_dynptr ptr; + + err = bpf_dynptr_from_mem(memset_zero_adjusted_data, data_sz, 0, &ptr); + err = err ?: bpf_dynptr_adjust(&ptr, 4, 8); + err = err ?: bpf_dynptr_memset(&ptr, 0, bpf_dynptr_size(&ptr), 0); + err = err ?: bpf_memcmp(expected, memset_zero_adjusted_data, data_sz); + + return 0; +} + +char memset_overflow_data[] = "memset overflow data"; + +SEC("?tp/syscalls/sys_enter_nanosleep") +int test_dynptr_memset_overflow(void *ctx) +{ + __u32 data_sz = sizeof(memset_overflow_data); + struct bpf_dynptr ptr; + int ret; + + err = bpf_dynptr_from_mem(memset_overflow_data, data_sz, 0, &ptr); + ret = bpf_dynptr_memset(&ptr, 0, data_sz + 1, 0); + if (ret != -E2BIG) + err = 1; + + return 0; +} + +SEC("?tp/syscalls/sys_enter_nanosleep") +int test_dynptr_memset_overflow_offset(void *ctx) +{ + __u32 data_sz = sizeof(memset_overflow_data); + struct bpf_dynptr ptr; + int ret; + + err = bpf_dynptr_from_mem(memset_overflow_data, data_sz, 0, &ptr); + ret = bpf_dynptr_memset(&ptr, 1, data_sz, 0); + if (ret != -E2BIG) + err = 1; + + return 0; +} + +SEC("?cgroup_skb/egress") +int test_dynptr_memset_readonly(struct __sk_buff *skb) +{ + struct bpf_dynptr ptr; + int ret; + + err = bpf_dynptr_from_skb(skb, 0, &ptr); + + /* cgroup skbs are read only, memset should fail */ + ret = bpf_dynptr_memset(&ptr, 0, bpf_dynptr_size(&ptr), 0); + if (ret != -EINVAL) + err = 1; + + return 0; +} + +#define min_t(type, x, y) ({ \ + type __x = (x); \ + type __y = (y); \ + __x < __y ? __x : __y; }) + +SEC("xdp") +int test_dynptr_memset_xdp_chunks(struct xdp_md *xdp) +{ + u32 data_sz, chunk_sz, offset = 0; + const int max_chunks = 200; + struct bpf_dynptr ptr_xdp; + char expected_buf[32]; + char buf[32]; + int i; + + __builtin_memset(expected_buf, DYNPTR_MEMSET_VAL, sizeof(expected_buf)); + + /* ptr_xdp is backed by non-contiguous memory */ + bpf_dynptr_from_xdp(xdp, 0, &ptr_xdp); + data_sz = bpf_dynptr_size(&ptr_xdp); + + err = bpf_dynptr_memset(&ptr_xdp, 0, data_sz, DYNPTR_MEMSET_VAL); + if (err) { + /* bpf_dynptr_memset() eventually called bpf_xdp_pointer() + * where if data_sz is greater than 0xffff, -EFAULT will be + * returned. For 64K page size, data_sz is greater than + * 64K, so error is expected and let us zero out error and + * return success. + */ + if (data_sz >= PAGE_SIZE_64K) + err = 0; + goto out; + } + + bpf_for(i, 0, max_chunks) { + offset = i * sizeof(buf); + if (offset >= data_sz) + goto out; + chunk_sz = min_t(u32, sizeof(buf), data_sz - offset); + err = bpf_dynptr_read(&buf, chunk_sz, &ptr_xdp, offset, 0); + if (err) + goto out; + err = bpf_memcmp(buf, expected_buf, sizeof(buf)); + if (err) + goto out; + } +out: + return XDP_DROP; +} + +void *user_ptr; +/* Contains the copy of the data pointed by user_ptr. + * Size 384 to make it not fit into a single kernel chunk when copying + * but less than the maximum bpf stack size (512). + */ +char expected_str[384]; +__u32 test_len[7] = {0/* placeholder */, 0, 1, 2, 255, 256, 257}; + +typedef int (*bpf_read_dynptr_fn_t)(struct bpf_dynptr *dptr, u32 off, + u32 size, const void *unsafe_ptr); + +/* Returns the offset just before the end of the maximum sized xdp fragment. + * Any write larger than 32 bytes will be split between 2 fragments. + */ +__u32 xdp_near_frag_end_offset(void) +{ + const __u32 headroom = 256; + const __u32 max_frag_size = __PAGE_SIZE - headroom - sizeof(struct skb_shared_info); + + /* 32 bytes before the approximate end of the fragment */ + return max_frag_size - 32; +} + +/* Use __always_inline on test_dynptr_probe[_str][_xdp]() and callbacks + * of type bpf_read_dynptr_fn_t to prevent compiler from generating + * indirect calls that make program fail to load with "unknown opcode" error. + */ +static __always_inline void test_dynptr_probe(void *ptr, bpf_read_dynptr_fn_t bpf_read_dynptr_fn) +{ + char buf[sizeof(expected_str)]; + struct bpf_dynptr ptr_buf; + int i; + + if (bpf_get_current_pid_tgid() >> 32 != pid) + return; + + err = bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(buf), 0, &ptr_buf); + + bpf_for(i, 0, ARRAY_SIZE(test_len)) { + __u32 len = test_len[i]; + + err = err ?: bpf_read_dynptr_fn(&ptr_buf, 0, test_len[i], ptr); + if (len > sizeof(buf)) + break; + err = err ?: bpf_dynptr_read(&buf, len, &ptr_buf, 0, 0); + + if (err || bpf_memcmp(expected_str, buf, len)) + err = 1; + + /* Reset buffer and dynptr */ + __builtin_memset(buf, 0, sizeof(buf)); + err = err ?: bpf_dynptr_write(&ptr_buf, 0, buf, len, 0); + } + bpf_ringbuf_discard_dynptr(&ptr_buf, 0); +} + +static __always_inline void test_dynptr_probe_str(void *ptr, + bpf_read_dynptr_fn_t bpf_read_dynptr_fn) +{ + char buf[sizeof(expected_str)]; + struct bpf_dynptr ptr_buf; + __u32 cnt, i; + + if (bpf_get_current_pid_tgid() >> 32 != pid) + return; + + bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(buf), 0, &ptr_buf); + + bpf_for(i, 0, ARRAY_SIZE(test_len)) { + __u32 len = test_len[i]; + + cnt = bpf_read_dynptr_fn(&ptr_buf, 0, len, ptr); + if (cnt != len) + err = 1; + + if (len > sizeof(buf)) + continue; + err = err ?: bpf_dynptr_read(&buf, len, &ptr_buf, 0, 0); + if (!len) + continue; + if (err || bpf_memcmp(expected_str, buf, len - 1) || buf[len - 1] != '\0') + err = 1; + } + bpf_ringbuf_discard_dynptr(&ptr_buf, 0); +} + +static __always_inline void test_dynptr_probe_xdp(struct xdp_md *xdp, void *ptr, + bpf_read_dynptr_fn_t bpf_read_dynptr_fn) +{ + struct bpf_dynptr ptr_xdp; + char buf[sizeof(expected_str)]; + __u32 off, i; + + if (bpf_get_current_pid_tgid() >> 32 != pid) + return; + + off = xdp_near_frag_end_offset(); + err = bpf_dynptr_from_xdp(xdp, 0, &ptr_xdp); + + bpf_for(i, 0, ARRAY_SIZE(test_len)) { + __u32 len = test_len[i]; + + err = err ?: bpf_read_dynptr_fn(&ptr_xdp, off, len, ptr); + if (len > sizeof(buf)) + continue; + err = err ?: bpf_dynptr_read(&buf, len, &ptr_xdp, off, 0); + if (err || bpf_memcmp(expected_str, buf, len)) + err = 1; + /* Reset buffer and dynptr */ + __builtin_memset(buf, 0, sizeof(buf)); + err = err ?: bpf_dynptr_write(&ptr_xdp, off, buf, len, 0); + } +} + +static __always_inline void test_dynptr_probe_str_xdp(struct xdp_md *xdp, void *ptr, + bpf_read_dynptr_fn_t bpf_read_dynptr_fn) +{ + struct bpf_dynptr ptr_xdp; + char buf[sizeof(expected_str)]; + __u32 cnt, off, i; + + if (bpf_get_current_pid_tgid() >> 32 != pid) + return; + + off = xdp_near_frag_end_offset(); + err = bpf_dynptr_from_xdp(xdp, 0, &ptr_xdp); + if (err) + return; + + bpf_for(i, 0, ARRAY_SIZE(test_len)) { + __u32 len = test_len[i]; + + cnt = bpf_read_dynptr_fn(&ptr_xdp, off, len, ptr); + if (cnt != len) + err = 1; + + if (len > sizeof(buf)) + continue; + err = err ?: bpf_dynptr_read(&buf, len, &ptr_xdp, off, 0); + + if (!len) + continue; + if (err || bpf_memcmp(expected_str, buf, len - 1) || buf[len - 1] != '\0') + err = 1; + + __builtin_memset(buf, 0, sizeof(buf)); + err = err ?: bpf_dynptr_write(&ptr_xdp, off, buf, len, 0); + } +} + +SEC("xdp") +int test_probe_read_user_dynptr(struct xdp_md *xdp) +{ + test_dynptr_probe(user_ptr, bpf_probe_read_user_dynptr); + if (!err) + test_dynptr_probe_xdp(xdp, user_ptr, bpf_probe_read_user_dynptr); + return XDP_PASS; +} + +SEC("xdp") +int test_probe_read_kernel_dynptr(struct xdp_md *xdp) +{ + test_dynptr_probe(expected_str, bpf_probe_read_kernel_dynptr); + if (!err) + test_dynptr_probe_xdp(xdp, expected_str, bpf_probe_read_kernel_dynptr); + return XDP_PASS; +} + +SEC("xdp") +int test_probe_read_user_str_dynptr(struct xdp_md *xdp) +{ + test_dynptr_probe_str(user_ptr, bpf_probe_read_user_str_dynptr); + if (!err) + test_dynptr_probe_str_xdp(xdp, user_ptr, bpf_probe_read_user_str_dynptr); + return XDP_PASS; +} + +SEC("xdp") +int test_probe_read_kernel_str_dynptr(struct xdp_md *xdp) +{ + test_dynptr_probe_str(expected_str, bpf_probe_read_kernel_str_dynptr); + if (!err) + test_dynptr_probe_str_xdp(xdp, expected_str, bpf_probe_read_kernel_str_dynptr); + return XDP_PASS; +} + +SEC("fentry.s/" SYS_PREFIX "sys_nanosleep") +int test_copy_from_user_dynptr(void *ctx) +{ + test_dynptr_probe(user_ptr, bpf_copy_from_user_dynptr); + return 0; +} + +SEC("fentry.s/" SYS_PREFIX "sys_nanosleep") +int test_copy_from_user_str_dynptr(void *ctx) +{ + test_dynptr_probe_str(user_ptr, bpf_copy_from_user_str_dynptr); + return 0; +} + +static int bpf_copy_data_from_user_task(struct bpf_dynptr *dptr, u32 off, + u32 size, const void *unsafe_ptr) +{ + struct task_struct *task = bpf_get_current_task_btf(); + + return bpf_copy_from_user_task_dynptr(dptr, off, size, unsafe_ptr, task); +} + +static int bpf_copy_data_from_user_task_str(struct bpf_dynptr *dptr, u32 off, + u32 size, const void *unsafe_ptr) +{ + struct task_struct *task = bpf_get_current_task_btf(); + + return bpf_copy_from_user_task_str_dynptr(dptr, off, size, unsafe_ptr, task); +} + +SEC("fentry.s/" SYS_PREFIX "sys_nanosleep") +int test_copy_from_user_task_dynptr(void *ctx) +{ + test_dynptr_probe(user_ptr, bpf_copy_data_from_user_task); + return 0; +} + +SEC("fentry.s/" SYS_PREFIX "sys_nanosleep") +int test_copy_from_user_task_str_dynptr(void *ctx) +{ + test_dynptr_probe_str(user_ptr, bpf_copy_data_from_user_task_str); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/epilogue_exit.c b/tools/testing/selftests/bpf/progs/epilogue_exit.c index 33d3a57bee90..35fec7c75bef 100644 --- a/tools/testing/selftests/bpf/progs/epilogue_exit.c +++ b/tools/testing/selftests/bpf/progs/epilogue_exit.c @@ -4,8 +4,8 @@ #include <vmlinux.h> #include <bpf/bpf_tracing.h> #include "bpf_misc.h" -#include "../bpf_testmod/bpf_testmod.h" -#include "../bpf_testmod/bpf_testmod_kfunc.h" +#include "../test_kmods/bpf_testmod.h" +#include "../test_kmods/bpf_testmod_kfunc.h" char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/epilogue_tailcall.c b/tools/testing/selftests/bpf/progs/epilogue_tailcall.c index 7275dd594de0..153514691ba4 100644 --- a/tools/testing/selftests/bpf/progs/epilogue_tailcall.c +++ b/tools/testing/selftests/bpf/progs/epilogue_tailcall.c @@ -4,8 +4,8 @@ #include <vmlinux.h> #include <bpf/bpf_tracing.h> #include "bpf_misc.h" -#include "../bpf_testmod/bpf_testmod.h" -#include "../bpf_testmod/bpf_testmod_kfunc.h" +#include "../test_kmods/bpf_testmod.h" +#include "../test_kmods/bpf_testmod_kfunc.h" char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/exceptions_fail.c b/tools/testing/selftests/bpf/progs/exceptions_fail.c index fe0f3fa5aab6..8a0fdff89927 100644 --- a/tools/testing/selftests/bpf/progs/exceptions_fail.c +++ b/tools/testing/selftests/bpf/progs/exceptions_fail.c @@ -131,7 +131,7 @@ int reject_subprog_with_lock(void *ctx) } SEC("?tc") -__failure __msg("BPF_EXIT instruction cannot be used inside bpf_rcu_read_lock-ed region") +__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_rcu_read_lock-ed region") int reject_with_rcu_read_lock(void *ctx) { bpf_rcu_read_lock(); @@ -147,7 +147,7 @@ __noinline static int throwing_subprog(struct __sk_buff *ctx) } SEC("?tc") -__failure __msg("BPF_EXIT instruction cannot be used inside bpf_rcu_read_lock-ed region") +__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_rcu_read_lock-ed region") int reject_subprog_with_rcu_read_lock(void *ctx) { bpf_rcu_read_lock(); diff --git a/tools/testing/selftests/bpf/progs/fd_htab_lookup.c b/tools/testing/selftests/bpf/progs/fd_htab_lookup.c new file mode 100644 index 000000000000..a4a9e1db626f --- /dev/null +++ b/tools/testing/selftests/bpf/progs/fd_htab_lookup.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2025. Huawei Technologies Co., Ltd */ +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> + +char _license[] SEC("license") = "GPL"; + +struct inner_map_type { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(key_size, 4); + __uint(value_size, 4); + __uint(max_entries, 1); +} inner_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS); + __uint(max_entries, 64); + __type(key, int); + __type(value, int); + __array(values, struct inner_map_type); +} outer_map SEC(".maps") = { + .values = { + [0] = &inner_map, + }, +}; diff --git a/tools/testing/selftests/bpf/progs/find_vma.c b/tools/testing/selftests/bpf/progs/find_vma.c index 38034fb82530..02b82774469c 100644 --- a/tools/testing/selftests/bpf/progs/find_vma.c +++ b/tools/testing/selftests/bpf/progs/find_vma.c @@ -25,7 +25,7 @@ static long check_vma(struct task_struct *task, struct vm_area_struct *vma, { if (vma->vm_file) bpf_probe_read_kernel_str(d_iname, DNAME_INLINE_LEN - 1, - vma->vm_file->f_path.dentry->d_iname); + vma->vm_file->f_path.dentry->d_shortname.string); /* check for VM_EXEC */ if (vma->vm_flags & VM_EXEC) diff --git a/tools/testing/selftests/bpf/progs/for_each_hash_modify.c b/tools/testing/selftests/bpf/progs/for_each_hash_modify.c new file mode 100644 index 000000000000..82307166f789 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/for_each_hash_modify.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Intel Corporation */ +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> + +char _license[] SEC("license") = "GPL"; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 128); + __type(key, __u64); + __type(value, __u64); +} hashmap SEC(".maps"); + +static int cb(struct bpf_map *map, __u64 *key, __u64 *val, void *arg) +{ + bpf_map_delete_elem(map, key); + bpf_map_update_elem(map, key, val, 0); + return 0; +} + +SEC("tc") +int test_pkt_access(struct __sk_buff *skb) +{ + (void)skb; + + bpf_for_each_map_elem(&hashmap, cb, NULL, 0); + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/free_timer.c b/tools/testing/selftests/bpf/progs/free_timer.c new file mode 100644 index 000000000000..4501ae8fc414 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/free_timer.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2025. Huawei Technologies Co., Ltd */ +#include <linux/bpf.h> +#include <time.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> + +#define MAX_ENTRIES 8 + +struct map_value { + struct bpf_timer timer; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, int); + __type(value, struct map_value); + __uint(max_entries, MAX_ENTRIES); +} map SEC(".maps"); + +static int timer_cb(void *map, void *key, struct map_value *value) +{ + volatile int sum = 0; + int i; + + bpf_for(i, 0, 1024 * 1024) sum += i; + + return 0; +} + +static int start_cb(int key) +{ + struct map_value *value; + + value = bpf_map_lookup_elem(&map, (void *)&key); + if (!value) + return 0; + + bpf_timer_init(&value->timer, &map, CLOCK_MONOTONIC); + bpf_timer_set_callback(&value->timer, timer_cb); + /* Hope 100us will be enough to wake-up and run the overwrite thread */ + bpf_timer_start(&value->timer, 100000, BPF_F_TIMER_CPU_PIN); + + return 0; +} + +static int overwrite_cb(int key) +{ + struct map_value zero = {}; + + /* Free the timer which may run on other CPU */ + bpf_map_update_elem(&map, (void *)&key, &zero, BPF_ANY); + + return 0; +} + +SEC("syscall") +int BPF_PROG(start_timer) +{ + bpf_loop(MAX_ENTRIES, start_cb, NULL, 0); + return 0; +} + +SEC("syscall") +int BPF_PROG(overwrite_timer) +{ + bpf_loop(MAX_ENTRIES, overwrite_cb, NULL, 0); + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/irq.c b/tools/testing/selftests/bpf/progs/irq.c new file mode 100644 index 000000000000..74d912b22de9 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/irq.c @@ -0,0 +1,566 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" +#include "bpf_experimental.h" + +unsigned long global_flags; + +extern void bpf_local_irq_save(unsigned long *) __weak __ksym; +extern void bpf_local_irq_restore(unsigned long *) __weak __ksym; +extern int bpf_copy_from_user_str(void *dst, u32 dst__sz, const void *unsafe_ptr__ign, u64 flags) __weak __ksym; + +struct bpf_res_spin_lock lockA __hidden SEC(".data.A"); +struct bpf_res_spin_lock lockB __hidden SEC(".data.B"); + +SEC("?tc") +__failure __msg("arg#0 doesn't point to an irq flag on stack") +int irq_save_bad_arg(struct __sk_buff *ctx) +{ + bpf_local_irq_save(&global_flags); + return 0; +} + +SEC("?tc") +__failure __msg("arg#0 doesn't point to an irq flag on stack") +int irq_restore_bad_arg(struct __sk_buff *ctx) +{ + bpf_local_irq_restore(&global_flags); + return 0; +} + +SEC("?tc") +__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_local_irq_save-ed region") +int irq_restore_missing_2(struct __sk_buff *ctx) +{ + unsigned long flags1; + unsigned long flags2; + + bpf_local_irq_save(&flags1); + bpf_local_irq_save(&flags2); + return 0; +} + +SEC("?tc") +__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_local_irq_save-ed region") +int irq_restore_missing_3(struct __sk_buff *ctx) +{ + unsigned long flags1; + unsigned long flags2; + unsigned long flags3; + + bpf_local_irq_save(&flags1); + bpf_local_irq_save(&flags2); + bpf_local_irq_save(&flags3); + return 0; +} + +SEC("?tc") +__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_local_irq_save-ed region") +int irq_restore_missing_3_minus_2(struct __sk_buff *ctx) +{ + unsigned long flags1; + unsigned long flags2; + unsigned long flags3; + + bpf_local_irq_save(&flags1); + bpf_local_irq_save(&flags2); + bpf_local_irq_save(&flags3); + bpf_local_irq_restore(&flags3); + bpf_local_irq_restore(&flags2); + return 0; +} + +static __noinline void local_irq_save(unsigned long *flags) +{ + bpf_local_irq_save(flags); +} + +static __noinline void local_irq_restore(unsigned long *flags) +{ + bpf_local_irq_restore(flags); +} + +SEC("?tc") +__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_local_irq_save-ed region") +int irq_restore_missing_1_subprog(struct __sk_buff *ctx) +{ + unsigned long flags; + + local_irq_save(&flags); + return 0; +} + +SEC("?tc") +__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_local_irq_save-ed region") +int irq_restore_missing_2_subprog(struct __sk_buff *ctx) +{ + unsigned long flags1; + unsigned long flags2; + + local_irq_save(&flags1); + local_irq_save(&flags2); + return 0; +} + +SEC("?tc") +__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_local_irq_save-ed region") +int irq_restore_missing_3_subprog(struct __sk_buff *ctx) +{ + unsigned long flags1; + unsigned long flags2; + unsigned long flags3; + + local_irq_save(&flags1); + local_irq_save(&flags2); + local_irq_save(&flags3); + return 0; +} + +SEC("?tc") +__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_local_irq_save-ed region") +int irq_restore_missing_3_minus_2_subprog(struct __sk_buff *ctx) +{ + unsigned long flags1; + unsigned long flags2; + unsigned long flags3; + + local_irq_save(&flags1); + local_irq_save(&flags2); + local_irq_save(&flags3); + local_irq_restore(&flags3); + local_irq_restore(&flags2); + return 0; +} + +SEC("?tc") +__success +int irq_balance(struct __sk_buff *ctx) +{ + unsigned long flags; + + local_irq_save(&flags); + local_irq_restore(&flags); + return 0; +} + +SEC("?tc") +__success +int irq_balance_n(struct __sk_buff *ctx) +{ + unsigned long flags1; + unsigned long flags2; + unsigned long flags3; + + local_irq_save(&flags1); + local_irq_save(&flags2); + local_irq_save(&flags3); + local_irq_restore(&flags3); + local_irq_restore(&flags2); + local_irq_restore(&flags1); + return 0; +} + +static __noinline void local_irq_balance(void) +{ + unsigned long flags; + + local_irq_save(&flags); + local_irq_restore(&flags); +} + +static __noinline void local_irq_balance_n(void) +{ + unsigned long flags1; + unsigned long flags2; + unsigned long flags3; + + local_irq_save(&flags1); + local_irq_save(&flags2); + local_irq_save(&flags3); + local_irq_restore(&flags3); + local_irq_restore(&flags2); + local_irq_restore(&flags1); +} + +SEC("?tc") +__success +int irq_balance_subprog(struct __sk_buff *ctx) +{ + local_irq_balance(); + return 0; +} + +SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") +__failure __msg("sleepable helper bpf_copy_from_user#") +int irq_sleepable_helper(void *ctx) +{ + unsigned long flags; + u32 data; + + local_irq_save(&flags); + bpf_copy_from_user(&data, sizeof(data), NULL); + local_irq_restore(&flags); + return 0; +} + +SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") +__failure __msg("kernel func bpf_copy_from_user_str is sleepable within IRQ-disabled region") +int irq_sleepable_kfunc(void *ctx) +{ + unsigned long flags; + u32 data; + + local_irq_save(&flags); + bpf_copy_from_user_str(&data, sizeof(data), NULL, 0); + local_irq_restore(&flags); + return 0; +} + +int __noinline global_local_irq_balance(void) +{ + local_irq_balance_n(); + return 0; +} + +SEC("?tc") +__success +int irq_global_subprog(struct __sk_buff *ctx) +{ + unsigned long flags; + + bpf_local_irq_save(&flags); + global_local_irq_balance(); + bpf_local_irq_restore(&flags); + return 0; +} + +SEC("?tc") +__failure __msg("cannot restore irq state out of order") +int irq_restore_ooo(struct __sk_buff *ctx) +{ + unsigned long flags1; + unsigned long flags2; + + bpf_local_irq_save(&flags1); + bpf_local_irq_save(&flags2); + bpf_local_irq_restore(&flags1); + bpf_local_irq_restore(&flags2); + return 0; +} + +SEC("?tc") +__failure __msg("cannot restore irq state out of order") +int irq_restore_ooo_3(struct __sk_buff *ctx) +{ + unsigned long flags1; + unsigned long flags2; + unsigned long flags3; + + bpf_local_irq_save(&flags1); + bpf_local_irq_save(&flags2); + bpf_local_irq_restore(&flags2); + bpf_local_irq_save(&flags3); + bpf_local_irq_restore(&flags1); + bpf_local_irq_restore(&flags3); + return 0; +} + +static __noinline void local_irq_save_3(unsigned long *flags1, unsigned long *flags2, + unsigned long *flags3) +{ + local_irq_save(flags1); + local_irq_save(flags2); + local_irq_save(flags3); +} + +SEC("?tc") +__success +int irq_restore_3_subprog(struct __sk_buff *ctx) +{ + unsigned long flags1; + unsigned long flags2; + unsigned long flags3; + + local_irq_save_3(&flags1, &flags2, &flags3); + bpf_local_irq_restore(&flags3); + bpf_local_irq_restore(&flags2); + bpf_local_irq_restore(&flags1); + return 0; +} + +SEC("?tc") +__failure __msg("cannot restore irq state out of order") +int irq_restore_4_subprog(struct __sk_buff *ctx) +{ + unsigned long flags1; + unsigned long flags2; + unsigned long flags3; + unsigned long flags4; + + local_irq_save_3(&flags1, &flags2, &flags3); + bpf_local_irq_restore(&flags3); + bpf_local_irq_save(&flags4); + bpf_local_irq_restore(&flags4); + bpf_local_irq_restore(&flags1); + return 0; +} + +SEC("?tc") +__failure __msg("cannot restore irq state out of order") +int irq_restore_ooo_3_subprog(struct __sk_buff *ctx) +{ + unsigned long flags1; + unsigned long flags2; + unsigned long flags3; + + local_irq_save_3(&flags1, &flags2, &flags3); + bpf_local_irq_restore(&flags3); + bpf_local_irq_restore(&flags2); + bpf_local_irq_save(&flags3); + bpf_local_irq_restore(&flags1); + return 0; +} + +SEC("?tc") +__failure __msg("expected an initialized") +int irq_restore_invalid(struct __sk_buff *ctx) +{ + unsigned long flags1; + unsigned long flags = 0xfaceb00c; + + bpf_local_irq_save(&flags1); + bpf_local_irq_restore(&flags); + return 0; +} + +SEC("?tc") +__failure __msg("expected uninitialized") +int irq_save_invalid(struct __sk_buff *ctx) +{ + unsigned long flags1; + + bpf_local_irq_save(&flags1); + bpf_local_irq_save(&flags1); + return 0; +} + +SEC("?tc") +__failure __msg("expected an initialized") +int irq_restore_iter(struct __sk_buff *ctx) +{ + struct bpf_iter_num it; + + bpf_iter_num_new(&it, 0, 42); + bpf_local_irq_restore((unsigned long *)&it); + return 0; +} + +SEC("?tc") +__failure __msg("Unreleased reference id=1") +int irq_save_iter(struct __sk_buff *ctx) +{ + struct bpf_iter_num it; + + /* Ensure same sized slot has st->ref_obj_id set, so we reject based on + * slot_type != STACK_IRQ_FLAG... + */ + _Static_assert(sizeof(it) == sizeof(unsigned long), "broken iterator size"); + + bpf_iter_num_new(&it, 0, 42); + bpf_local_irq_save((unsigned long *)&it); + bpf_local_irq_restore((unsigned long *)&it); + return 0; +} + +SEC("?tc") +__failure __msg("expected an initialized") +int irq_flag_overwrite(struct __sk_buff *ctx) +{ + unsigned long flags; + + bpf_local_irq_save(&flags); + flags = 0xdeadbeef; + bpf_local_irq_restore(&flags); + return 0; +} + +SEC("?tc") +__failure __msg("expected an initialized") +int irq_flag_overwrite_partial(struct __sk_buff *ctx) +{ + unsigned long flags; + + bpf_local_irq_save(&flags); + *(((char *)&flags) + 1) = 0xff; + bpf_local_irq_restore(&flags); + return 0; +} + +SEC("?tc") +__failure __msg("cannot restore irq state out of order") +int irq_ooo_refs_array(struct __sk_buff *ctx) +{ + unsigned long flags[4]; + struct { int i; } *p; + + /* refs=1 */ + bpf_local_irq_save(&flags[0]); + + /* refs=1,2 */ + p = bpf_obj_new(typeof(*p)); + if (!p) { + bpf_local_irq_restore(&flags[0]); + return 0; + } + + /* refs=1,2,3 */ + bpf_local_irq_save(&flags[1]); + + /* refs=1,2,3,4 */ + bpf_local_irq_save(&flags[2]); + + /* Now when we remove ref=2, the verifier must not break the ordering in + * the refs array between 1,3,4. With an older implementation, the + * verifier would swap the last element with the removed element, but to + * maintain the stack property we need to use memmove. + */ + bpf_obj_drop(p); + + /* Save and restore to reset active_irq_id to 3, as the ordering is now + * refs=1,4,3. When restoring the linear scan will find prev_id in order + * as 3 instead of 4. + */ + bpf_local_irq_save(&flags[3]); + bpf_local_irq_restore(&flags[3]); + + /* With the incorrect implementation, we can release flags[1], flags[2], + * and flags[0], i.e. in the wrong order. + */ + bpf_local_irq_restore(&flags[1]); + bpf_local_irq_restore(&flags[2]); + bpf_local_irq_restore(&flags[0]); + return 0; +} + +int __noinline +global_subprog(int i) +{ + if (i) + bpf_printk("%p", &i); + return i; +} + +int __noinline +global_sleepable_helper_subprog(int i) +{ + if (i) + bpf_copy_from_user(&i, sizeof(i), NULL); + return i; +} + +int __noinline +global_sleepable_kfunc_subprog(int i) +{ + if (i) + bpf_copy_from_user_str(&i, sizeof(i), NULL, 0); + global_subprog(i); + return i; +} + +int __noinline +global_subprog_calling_sleepable_global(int i) +{ + if (!i) + global_sleepable_kfunc_subprog(i); + return i; +} + +SEC("?syscall") +__success +int irq_non_sleepable_global_subprog(void *ctx) +{ + unsigned long flags; + + bpf_local_irq_save(&flags); + global_subprog(0); + bpf_local_irq_restore(&flags); + return 0; +} + +SEC("?syscall") +__failure __msg("global functions that may sleep are not allowed in non-sleepable context") +int irq_sleepable_helper_global_subprog(void *ctx) +{ + unsigned long flags; + + bpf_local_irq_save(&flags); + global_sleepable_helper_subprog(0); + bpf_local_irq_restore(&flags); + return 0; +} + +SEC("?syscall") +__failure __msg("global functions that may sleep are not allowed in non-sleepable context") +int irq_sleepable_global_subprog_indirect(void *ctx) +{ + unsigned long flags; + + bpf_local_irq_save(&flags); + global_subprog_calling_sleepable_global(0); + bpf_local_irq_restore(&flags); + return 0; +} + +SEC("?tc") +__failure __msg("cannot restore irq state out of order") +int irq_ooo_lock_cond_inv(struct __sk_buff *ctx) +{ + unsigned long flags1, flags2; + + if (bpf_res_spin_lock_irqsave(&lockA, &flags1)) + return 0; + if (bpf_res_spin_lock_irqsave(&lockB, &flags2)) { + bpf_res_spin_unlock_irqrestore(&lockA, &flags1); + return 0; + } + + bpf_res_spin_unlock_irqrestore(&lockB, &flags1); + bpf_res_spin_unlock_irqrestore(&lockA, &flags2); + return 0; +} + +SEC("?tc") +__failure __msg("function calls are not allowed") +int irq_wrong_kfunc_class_1(struct __sk_buff *ctx) +{ + unsigned long flags1; + + if (bpf_res_spin_lock_irqsave(&lockA, &flags1)) + return 0; + /* For now, bpf_local_irq_restore is not allowed in critical section, + * but this test ensures error will be caught with kfunc_class when it's + * opened up. Tested by temporarily permitting this kfunc in critical + * section. + */ + bpf_local_irq_restore(&flags1); + bpf_res_spin_unlock_irqrestore(&lockA, &flags1); + return 0; +} + +SEC("?tc") +__failure __msg("function calls are not allowed") +int irq_wrong_kfunc_class_2(struct __sk_buff *ctx) +{ + unsigned long flags1, flags2; + + bpf_local_irq_save(&flags1); + if (bpf_res_spin_lock_irqsave(&lockA, &flags2)) + return 0; + bpf_local_irq_restore(&flags2); + bpf_res_spin_unlock_irqrestore(&lockA, &flags1); + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/iters.c b/tools/testing/selftests/bpf/progs/iters.c index 7c969c127573..7dd92a303bf6 100644 --- a/tools/testing/selftests/bpf/progs/iters.c +++ b/tools/testing/selftests/bpf/progs/iters.c @@ -524,11 +524,11 @@ int iter_subprog_iters(const void *ctx) } struct { - __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(type, BPF_MAP_TYPE_HASH); __type(key, int); __type(value, int); __uint(max_entries, 1000); -} arr_map SEC(".maps"); +} hash_map SEC(".maps"); SEC("?raw_tp") __failure __msg("invalid mem access 'scalar'") @@ -539,7 +539,7 @@ int iter_err_too_permissive1(const void *ctx) MY_PID_GUARD(); - map_val = bpf_map_lookup_elem(&arr_map, &key); + map_val = bpf_map_lookup_elem(&hash_map, &key); if (!map_val) return 0; @@ -561,12 +561,12 @@ int iter_err_too_permissive2(const void *ctx) MY_PID_GUARD(); - map_val = bpf_map_lookup_elem(&arr_map, &key); + map_val = bpf_map_lookup_elem(&hash_map, &key); if (!map_val) return 0; bpf_repeat(1000000) { - map_val = bpf_map_lookup_elem(&arr_map, &key); + map_val = bpf_map_lookup_elem(&hash_map, &key); } *map_val = 123; @@ -585,7 +585,7 @@ int iter_err_too_permissive3(const void *ctx) MY_PID_GUARD(); bpf_repeat(1000000) { - map_val = bpf_map_lookup_elem(&arr_map, &key); + map_val = bpf_map_lookup_elem(&hash_map, &key); found = true; } @@ -606,7 +606,7 @@ int iter_tricky_but_fine(const void *ctx) MY_PID_GUARD(); bpf_repeat(1000000) { - map_val = bpf_map_lookup_elem(&arr_map, &key); + map_val = bpf_map_lookup_elem(&hash_map, &key); if (map_val) { found = true; break; @@ -1175,6 +1175,122 @@ __naked int loop_state_deps2(void) } SEC("?raw_tp") +__failure +__msg("math between fp pointer and register with unbounded") +__flag(BPF_F_TEST_STATE_FREQ) +__naked int loop_state_deps3(void) +{ + /* This is equivalent to a C program below. + * + * if (random() != 24) { // assume false branch is placed first + * i = iter_new(); // fp[-8] + * while (iter_next(i)); + * iter_destroy(i); + * return; + * } + * + * for (i = 10; i > 0; i--); // increase dfs_depth for child states + * + * i = iter_new(); // fp[-8] + * b = -24; // r8 + * for (;;) { // checkpoint (L) + * if (iter_next(i)) // checkpoint (N) + * break; + * if (random() == 77) { // assume false branch is placed first + * *(u64 *)(r10 + b) = 7; // this is not safe when b == -25 + * iter_destroy(i); + * return; + * } + * if (random() == 42) { // assume false branch is placed first + * b = -25; + * } + * } + * iter_destroy(i); + * + * In case of a buggy verifier first loop might poison + * env->cur_state->loop_entry with a state having 0 branches + * and small dfs_depth. This would trigger NOT_EXACT states + * comparison for some states within second loop. + * Specifically, checkpoint (L) might be problematic if: + * - branch with '*(u64 *)(r10 + b) = 7' is not explored yet; + * - checkpoint (L) is first reached in state {b=-24}; + * - traversal is pruned at checkpoint (N) setting checkpoint's (L) + * branch count to 0, thus making it eligible for use in pruning; + * - checkpoint (L) is next reached in state {b=-25}, + * this would cause NOT_EXACT comparison with a state {b=-24} + * while 'b' is not marked precise yet. + */ + asm volatile ( + "call %[bpf_get_prandom_u32];" + "if r0 == 24 goto 2f;" + "r1 = r10;" + "r1 += -8;" + "r2 = 0;" + "r3 = 5;" + "call %[bpf_iter_num_new];" + "1:" + "r1 = r10;" + "r1 += -8;" + "call %[bpf_iter_num_next];" + "if r0 != 0 goto 1b;" + "r1 = r10;" + "r1 += -8;" + "call %[bpf_iter_num_destroy];" + "r0 = 0;" + "exit;" + "2:" + /* loop to increase dfs_depth */ + "r0 = 10;" + "3:" + "r0 -= 1;" + "if r0 != 0 goto 3b;" + /* end of loop */ + "r1 = r10;" + "r1 += -8;" + "r2 = 0;" + "r3 = 10;" + "call %[bpf_iter_num_new];" + "r8 = -24;" + "main_loop_%=:" + "r1 = r10;" + "r1 += -8;" + "call %[bpf_iter_num_next];" + "if r0 == 0 goto main_loop_end_%=;" + /* first if */ + "call %[bpf_get_prandom_u32];" + "if r0 == 77 goto unsafe_write_%=;" + /* second if */ + "call %[bpf_get_prandom_u32];" + "if r0 == 42 goto poison_r8_%=;" + /* iterate */ + "goto main_loop_%=;" + "main_loop_end_%=:" + "r1 = r10;" + "r1 += -8;" + "call %[bpf_iter_num_destroy];" + "r0 = 0;" + "exit;" + + "unsafe_write_%=:" + "r0 = r10;" + "r0 += r8;" + "r1 = 7;" + "*(u64 *)(r0 + 0) = r1;" + "goto main_loop_end_%=;" + + "poison_r8_%=:" + "r8 = -25;" + "goto main_loop_%=;" + : + : __imm(bpf_get_prandom_u32), + __imm(bpf_iter_num_new), + __imm(bpf_iter_num_next), + __imm(bpf_iter_num_destroy) + : __clobber_all + ); +} + +SEC("?raw_tp") __success __naked int triple_continue(void) { @@ -1512,4 +1628,302 @@ int iter_destroy_bad_arg(const void *ctx) return 0; } +SEC("raw_tp") +__success +int clean_live_states(const void *ctx) +{ + char buf[1]; + int i, j, k, l, m, n, o; + + bpf_for(i, 0, 10) + bpf_for(j, 0, 10) + bpf_for(k, 0, 10) + bpf_for(l, 0, 10) + bpf_for(m, 0, 10) + bpf_for(n, 0, 10) + bpf_for(o, 0, 10) { + if (unlikely(bpf_get_prandom_u32())) + buf[0] = 42; + bpf_printk("%s", buf); + } + return 0; +} + +SEC("?raw_tp") +__flag(BPF_F_TEST_STATE_FREQ) +__failure __msg("misaligned stack access off 0+-31+0 size 8") +__naked int absent_mark_in_the_middle_state(void) +{ + /* This is equivalent to C program below. + * + * r8 = bpf_get_prandom_u32(); + * r6 = -32; + * bpf_iter_num_new(&fp[-8], 0, 10); + * if (unlikely(bpf_get_prandom_u32())) + * r6 = -31; + * while (bpf_iter_num_next(&fp[-8])) { + * if (unlikely(bpf_get_prandom_u32())) + * *(fp + r6) = 7; + * } + * bpf_iter_num_destroy(&fp[-8]) + * return 0 + */ + asm volatile ( + "call %[bpf_get_prandom_u32];" + "r8 = r0;" + "r7 = 0;" + "r6 = -32;" + "r0 = 0;" + "*(u64 *)(r10 - 16) = r0;" + "r1 = r10;" + "r1 += -8;" + "r2 = 0;" + "r3 = 10;" + "call %[bpf_iter_num_new];" + "call %[bpf_get_prandom_u32];" + "if r0 == r8 goto change_r6_%=;" + "loop_%=:" + "call noop;" + "r1 = r10;" + "r1 += -8;" + "call %[bpf_iter_num_next];" + "if r0 == 0 goto loop_end_%=;" + "call %[bpf_get_prandom_u32];" + "if r0 == r8 goto use_r6_%=;" + "goto loop_%=;" + "loop_end_%=:" + "r1 = r10;" + "r1 += -8;" + "call %[bpf_iter_num_destroy];" + "r0 = 0;" + "exit;" + "use_r6_%=:" + "r0 = r10;" + "r0 += r6;" + "r1 = 7;" + "*(u64 *)(r0 + 0) = r1;" + "goto loop_%=;" + "change_r6_%=:" + "r6 = -31;" + "goto loop_%=;" + : + : __imm(bpf_iter_num_new), + __imm(bpf_iter_num_next), + __imm(bpf_iter_num_destroy), + __imm(bpf_get_prandom_u32) + : __clobber_all + ); +} + +__used __naked +static int noop(void) +{ + asm volatile ( + "r0 = 0;" + "exit;" + ); +} + +SEC("?raw_tp") +__flag(BPF_F_TEST_STATE_FREQ) +__failure __msg("misaligned stack access off 0+-31+0 size 8") +__naked int absent_mark_in_the_middle_state2(void) +{ + /* This is equivalent to C program below. + * + * r8 = bpf_get_prandom_u32(); + * r6 = -32; + * bpf_iter_num_new(&fp[-8], 0, 10); + * if (unlikely(bpf_get_prandom_u32())) { + * r6 = -31; + * jump_into_loop: + * goto +0; + * goto loop; + * } + * if (unlikely(bpf_get_prandom_u32())) + * goto jump_into_loop; + * loop: + * while (bpf_iter_num_next(&fp[-8])) { + * if (unlikely(bpf_get_prandom_u32())) + * *(fp + r6) = 7; + * } + * bpf_iter_num_destroy(&fp[-8]) + * return 0 + */ + asm volatile ( + "call %[bpf_get_prandom_u32];" + "r8 = r0;" + "r7 = 0;" + "r6 = -32;" + "r0 = 0;" + "*(u64 *)(r10 - 16) = r0;" + "r1 = r10;" + "r1 += -8;" + "r2 = 0;" + "r3 = 10;" + "call %[bpf_iter_num_new];" + "call %[bpf_get_prandom_u32];" + "if r0 == r8 goto change_r6_%=;" + "call %[bpf_get_prandom_u32];" + "if r0 == r8 goto jump_into_loop_%=;" + "loop_%=:" + "r1 = r10;" + "r1 += -8;" + "call %[bpf_iter_num_next];" + "if r0 == 0 goto loop_end_%=;" + "call %[bpf_get_prandom_u32];" + "if r0 == r8 goto use_r6_%=;" + "goto loop_%=;" + "loop_end_%=:" + "r1 = r10;" + "r1 += -8;" + "call %[bpf_iter_num_destroy];" + "r0 = 0;" + "exit;" + "use_r6_%=:" + "r0 = r10;" + "r0 += r6;" + "r1 = 7;" + "*(u64 *)(r0 + 0) = r1;" + "goto loop_%=;" + "change_r6_%=:" + "r6 = -31;" + "jump_into_loop_%=: " + "goto +0;" + "goto loop_%=;" + : + : __imm(bpf_iter_num_new), + __imm(bpf_iter_num_next), + __imm(bpf_iter_num_destroy), + __imm(bpf_get_prandom_u32) + : __clobber_all + ); +} + +SEC("?raw_tp") +__flag(BPF_F_TEST_STATE_FREQ) +__failure __msg("misaligned stack access off 0+-31+0 size 8") +__naked int absent_mark_in_the_middle_state3(void) +{ + /* + * bpf_iter_num_new(&fp[-8], 0, 10) + * loop1(-32, &fp[-8]) + * loop1_wrapper(&fp[-8]) + * bpf_iter_num_destroy(&fp[-8]) + */ + asm volatile ( + "r1 = r10;" + "r1 += -8;" + "r2 = 0;" + "r3 = 10;" + "call %[bpf_iter_num_new];" + /* call #1 */ + "r1 = -32;" + "r2 = r10;" + "r2 += -8;" + "call loop1;" + "r1 = r10;" + "r1 += -8;" + "call %[bpf_iter_num_destroy];" + /* call #2 */ + "r1 = r10;" + "r1 += -8;" + "r2 = 0;" + "r3 = 10;" + "call %[bpf_iter_num_new];" + "r1 = r10;" + "r1 += -8;" + "call loop1_wrapper;" + /* return */ + "r1 = r10;" + "r1 += -8;" + "call %[bpf_iter_num_destroy];" + "r0 = 0;" + "exit;" + : + : __imm(bpf_iter_num_new), + __imm(bpf_iter_num_destroy), + __imm(bpf_get_prandom_u32) + : __clobber_all + ); +} + +__used __naked +static int loop1(void) +{ + /* + * int loop1(num, iter) { + * r6 = num; + * r7 = iter; + * while (bpf_iter_num_next(r7)) { + * if (unlikely(bpf_get_prandom_u32())) + * *(fp + r6) = 7; + * } + * return 0 + * } + */ + asm volatile ( + "r6 = r1;" + "r7 = r2;" + "call %[bpf_get_prandom_u32];" + "r8 = r0;" + "loop_%=:" + "r1 = r7;" + "call %[bpf_iter_num_next];" + "if r0 == 0 goto loop_end_%=;" + "call %[bpf_get_prandom_u32];" + "if r0 == r8 goto use_r6_%=;" + "goto loop_%=;" + "loop_end_%=:" + "r0 = 0;" + "exit;" + "use_r6_%=:" + "r0 = r10;" + "r0 += r6;" + "r1 = 7;" + "*(u64 *)(r0 + 0) = r1;" + "goto loop_%=;" + : + : __imm(bpf_iter_num_next), + __imm(bpf_get_prandom_u32) + : __clobber_all + ); +} + +__used __naked +static int loop1_wrapper(void) +{ + /* + * int loop1_wrapper(iter) { + * r6 = -32; + * r7 = iter; + * if (unlikely(bpf_get_prandom_u32())) + * r6 = -31; + * loop1(r6, r7); + * return 0; + * } + */ + asm volatile ( + "r6 = -32;" + "r7 = r1;" + "call %[bpf_get_prandom_u32];" + "r8 = r0;" + "call %[bpf_get_prandom_u32];" + "if r0 == r8 goto change_r6_%=;" + "loop_%=:" + "r1 = r6;" + "r2 = r7;" + "call loop1;" + "r0 = 0;" + "exit;" + "change_r6_%=:" + "r6 = -31;" + "goto loop_%=;" + : + : __imm(bpf_iter_num_next), + __imm(bpf_get_prandom_u32) + : __clobber_all + ); +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/iters_testmod.c b/tools/testing/selftests/bpf/progs/iters_testmod.c index df1d3db60b1b..9e4b45201e69 100644 --- a/tools/testing/selftests/bpf/progs/iters_testmod.c +++ b/tools/testing/selftests/bpf/progs/iters_testmod.c @@ -4,7 +4,7 @@ #include "bpf_experimental.h" #include <bpf/bpf_helpers.h> #include "bpf_misc.h" -#include "../bpf_testmod/bpf_testmod_kfunc.h" +#include "../test_kmods/bpf_testmod_kfunc.h" char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/jit_probe_mem.c b/tools/testing/selftests/bpf/progs/jit_probe_mem.c index f9789e668297..82190d79de37 100644 --- a/tools/testing/selftests/bpf/progs/jit_probe_mem.c +++ b/tools/testing/selftests/bpf/progs/jit_probe_mem.c @@ -3,7 +3,7 @@ #include <vmlinux.h> #include <bpf/bpf_tracing.h> #include <bpf/bpf_helpers.h> -#include "../bpf_testmod/bpf_testmod_kfunc.h" +#include "../test_kmods/bpf_testmod_kfunc.h" static struct prog_test_ref_kfunc __kptr *v; long total_sum = -1; diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_destructive.c b/tools/testing/selftests/bpf/progs/kfunc_call_destructive.c index 7632d9ecb253..b9670e9a6e3d 100644 --- a/tools/testing/selftests/bpf/progs/kfunc_call_destructive.c +++ b/tools/testing/selftests/bpf/progs/kfunc_call_destructive.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <vmlinux.h> #include <bpf/bpf_helpers.h> -#include "../bpf_testmod/bpf_testmod_kfunc.h" +#include "../test_kmods/bpf_testmod_kfunc.h" SEC("tc") int kfunc_destructive_test(void) diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_fail.c b/tools/testing/selftests/bpf/progs/kfunc_call_fail.c index 08fae306539c..a1963497f0bf 100644 --- a/tools/testing/selftests/bpf/progs/kfunc_call_fail.c +++ b/tools/testing/selftests/bpf/progs/kfunc_call_fail.c @@ -2,7 +2,7 @@ /* Copyright (c) 2021 Facebook */ #include <vmlinux.h> #include <bpf/bpf_helpers.h> -#include "../bpf_testmod/bpf_testmod_kfunc.h" +#include "../test_kmods/bpf_testmod_kfunc.h" struct syscall_test_args { __u8 data[16]; diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_race.c b/tools/testing/selftests/bpf/progs/kfunc_call_race.c index d532af07decf..48f64827cd93 100644 --- a/tools/testing/selftests/bpf/progs/kfunc_call_race.c +++ b/tools/testing/selftests/bpf/progs/kfunc_call_race.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <vmlinux.h> #include <bpf/bpf_helpers.h> -#include "../bpf_testmod/bpf_testmod_kfunc.h" +#include "../test_kmods/bpf_testmod_kfunc.h" SEC("tc") int kfunc_call_fail(struct __sk_buff *ctx) diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test.c b/tools/testing/selftests/bpf/progs/kfunc_call_test.c index f502f755f567..8b86113a0126 100644 --- a/tools/testing/selftests/bpf/progs/kfunc_call_test.c +++ b/tools/testing/selftests/bpf/progs/kfunc_call_test.c @@ -2,7 +2,7 @@ /* Copyright (c) 2021 Facebook */ #include <vmlinux.h> #include <bpf/bpf_helpers.h> -#include "../bpf_testmod/bpf_testmod_kfunc.h" +#include "../test_kmods/bpf_testmod_kfunc.h" SEC("tc") int kfunc_call_test4(struct __sk_buff *skb) diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c b/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c index 2380c75e74ce..8e150e85b50d 100644 --- a/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c +++ b/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2021 Facebook */ -#include "../bpf_testmod/bpf_testmod_kfunc.h" +#include "../test_kmods/bpf_testmod_kfunc.h" extern const int bpf_prog_active __ksym; int active_res = -1; diff --git a/tools/testing/selftests/bpf/progs/linked_list_peek.c b/tools/testing/selftests/bpf/progs/linked_list_peek.c new file mode 100644 index 000000000000..264e81bfb287 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/linked_list_peek.c @@ -0,0 +1,113 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ + +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" +#include "bpf_experimental.h" + +struct node_data { + struct bpf_list_node l; + int key; +}; + +#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8))) +private(A) struct bpf_spin_lock glock; +private(A) struct bpf_list_head ghead __contains(node_data, l); + +#define list_entry(ptr, type, member) container_of(ptr, type, member) +#define NR_NODES 16 + +int zero = 0; + +SEC("syscall") +__retval(0) +long list_peek(void *ctx) +{ + struct bpf_list_node *l_n; + struct node_data *n; + int i, err = 0; + + bpf_spin_lock(&glock); + l_n = bpf_list_front(&ghead); + bpf_spin_unlock(&glock); + if (l_n) + return __LINE__; + + bpf_spin_lock(&glock); + l_n = bpf_list_back(&ghead); + bpf_spin_unlock(&glock); + if (l_n) + return __LINE__; + + for (i = zero; i < NR_NODES && can_loop; i++) { + n = bpf_obj_new(typeof(*n)); + if (!n) + return __LINE__; + n->key = i; + bpf_spin_lock(&glock); + bpf_list_push_back(&ghead, &n->l); + bpf_spin_unlock(&glock); + } + + bpf_spin_lock(&glock); + + l_n = bpf_list_front(&ghead); + if (!l_n) { + err = __LINE__; + goto done; + } + + n = list_entry(l_n, struct node_data, l); + if (n->key != 0) { + err = __LINE__; + goto done; + } + + l_n = bpf_list_back(&ghead); + if (!l_n) { + err = __LINE__; + goto done; + } + + n = list_entry(l_n, struct node_data, l); + if (n->key != NR_NODES - 1) { + err = __LINE__; + goto done; + } + +done: + bpf_spin_unlock(&glock); + return err; +} + +#define TEST_FB(op, dolock) \ +SEC("syscall") \ +__failure __msg(MSG) \ +long test_##op##_spinlock_##dolock(void *ctx) \ +{ \ + struct bpf_list_node *l_n; \ + __u64 jiffies = 0; \ + \ + if (dolock) \ + bpf_spin_lock(&glock); \ + l_n = bpf_list_##op(&ghead); \ + if (l_n) \ + jiffies = bpf_jiffies64(); \ + if (dolock) \ + bpf_spin_unlock(&glock); \ + \ + return !!jiffies; \ +} + +#define MSG "call bpf_list_{{(front|back).+}}; R0{{(_w)?}}=ptr_or_null_node_data(id={{[0-9]+}},non_own_ref" +TEST_FB(front, true) +TEST_FB(back, true) +#undef MSG + +#define MSG "bpf_spin_lock at off=0 must be held for bpf_list_head" +TEST_FB(front, false) +TEST_FB(back, false) +#undef MSG + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/local_kptr_stash.c b/tools/testing/selftests/bpf/progs/local_kptr_stash.c index b092a72b2c9d..d736506a4c80 100644 --- a/tools/testing/selftests/bpf/progs/local_kptr_stash.c +++ b/tools/testing/selftests/bpf/progs/local_kptr_stash.c @@ -6,7 +6,7 @@ #include <bpf/bpf_helpers.h> #include <bpf/bpf_core_read.h> #include "../bpf_experimental.h" -#include "../bpf_testmod/bpf_testmod_kfunc.h" +#include "../test_kmods/bpf_testmod_kfunc.h" struct plain_local; diff --git a/tools/testing/selftests/bpf/progs/map_kptr.c b/tools/testing/selftests/bpf/progs/map_kptr.c index ab0ce1d01a4a..edaba481db9d 100644 --- a/tools/testing/selftests/bpf/progs/map_kptr.c +++ b/tools/testing/selftests/bpf/progs/map_kptr.c @@ -2,7 +2,7 @@ #include <vmlinux.h> #include <bpf/bpf_tracing.h> #include <bpf/bpf_helpers.h> -#include "../bpf_testmod/bpf_testmod_kfunc.h" +#include "../test_kmods/bpf_testmod_kfunc.h" struct map_value { struct prog_test_ref_kfunc __kptr_untrusted *unref_ptr; diff --git a/tools/testing/selftests/bpf/progs/map_kptr_fail.c b/tools/testing/selftests/bpf/progs/map_kptr_fail.c index 450bb373b179..4c0ff01f1a96 100644 --- a/tools/testing/selftests/bpf/progs/map_kptr_fail.c +++ b/tools/testing/selftests/bpf/progs/map_kptr_fail.c @@ -4,7 +4,7 @@ #include <bpf/bpf_helpers.h> #include <bpf/bpf_core_read.h> #include "bpf_misc.h" -#include "../bpf_testmod/bpf_testmod_kfunc.h" +#include "../test_kmods/bpf_testmod_kfunc.h" struct map_value { char buf[8]; @@ -345,7 +345,7 @@ int reject_indirect_global_func_access(struct __sk_buff *ctx) } SEC("?tc") -__failure __msg("Unreleased reference id=5 alloc_insn=") +__failure __msg("Unreleased reference id=4 alloc_insn=") int kptr_xchg_ref_state(struct __sk_buff *ctx) { struct prog_test_ref_kfunc *p; diff --git a/tools/testing/selftests/bpf/progs/mem_rdonly_untrusted.c b/tools/testing/selftests/bpf/progs/mem_rdonly_untrusted.c new file mode 100644 index 000000000000..4f94c971ae86 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/mem_rdonly_untrusted.c @@ -0,0 +1,229 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <vmlinux.h> +#include <bpf/bpf_core_read.h> +#include "bpf_misc.h" +#include "../test_kmods/bpf_testmod_kfunc.h" + +SEC("tp_btf/sys_enter") +__success +__log_level(2) +__msg("r8 = *(u64 *)(r7 +0) ; R7_w=ptr_nameidata(off={{[0-9]+}}) R8_w=rdonly_untrusted_mem(sz=0)") +__msg("r9 = *(u8 *)(r8 +0) ; R8_w=rdonly_untrusted_mem(sz=0) R9_w=scalar") +int btf_id_to_ptr_mem(void *ctx) +{ + struct task_struct *task; + struct nameidata *idata; + u64 ret, off; + + task = bpf_get_current_task_btf(); + idata = task->nameidata; + off = bpf_core_field_offset(struct nameidata, pathname); + /* + * asm block to have reliable match target for __msg, equivalent of: + * ret = task->nameidata->pathname[0]; + */ + asm volatile ( + "r7 = %[idata];" + "r7 += %[off];" + "r8 = *(u64 *)(r7 + 0);" + "r9 = *(u8 *)(r8 + 0);" + "%[ret] = r9;" + : [ret]"=r"(ret) + : [idata]"r"(idata), + [off]"r"(off) + : "r7", "r8", "r9"); + return ret; +} + +SEC("socket") +__success +__retval(0) +int ldx_is_ok_bad_addr(void *ctx) +{ + char *p; + + if (!bpf_core_enum_value_exists(enum bpf_features, BPF_FEAT_RDONLY_CAST_TO_VOID)) + return 42; + + p = bpf_rdonly_cast(0, 0); + return p[0x7fff]; +} + +SEC("socket") +__success +__retval(1) +int ldx_is_ok_good_addr(void *ctx) +{ + int v, *p; + + v = 1; + p = bpf_rdonly_cast(&v, 0); + return *p; +} + +SEC("socket") +__success +int offset_not_tracked(void *ctx) +{ + int *p, i, s; + + p = bpf_rdonly_cast(0, 0); + s = 0; + bpf_for(i, 0, 1000 * 1000 * 1000) { + p++; + s += *p; + } + return s; +} + +SEC("socket") +__failure +__msg("cannot write into rdonly_untrusted_mem") +int stx_not_ok(void *ctx) +{ + int v, *p; + + v = 1; + p = bpf_rdonly_cast(&v, 0); + *p = 1; + return 0; +} + +SEC("socket") +__failure +__msg("cannot write into rdonly_untrusted_mem") +int atomic_not_ok(void *ctx) +{ + int v, *p; + + v = 1; + p = bpf_rdonly_cast(&v, 0); + __sync_fetch_and_add(p, 1); + return 0; +} + +SEC("socket") +__failure +__msg("cannot write into rdonly_untrusted_mem") +int atomic_rmw_not_ok(void *ctx) +{ + long v, *p; + + v = 1; + p = bpf_rdonly_cast(&v, 0); + return __sync_val_compare_and_swap(p, 0, 42); +} + +SEC("socket") +__failure +__msg("invalid access to memory, mem_size=0 off=0 size=4") +__msg("R1 min value is outside of the allowed memory range") +int kfunc_param_not_ok(void *ctx) +{ + int *p; + + p = bpf_rdonly_cast(0, 0); + bpf_kfunc_trusted_num_test(p); + return 0; +} + +SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") +__failure +__msg("R1 type=rdonly_untrusted_mem expected=") +int helper_param_not_ok(void *ctx) +{ + char *p; + + p = bpf_rdonly_cast(0, 0); + /* + * Any helper with ARG_CONST_SIZE_OR_ZERO constraint will do, + * the most permissive constraint + */ + bpf_copy_from_user(p, 0, (void *)42); + return 0; +} + +static __noinline u64 *get_some_addr(void) +{ + if (bpf_get_prandom_u32()) + return bpf_rdonly_cast(0, bpf_core_type_id_kernel(struct sock)); + else + return bpf_rdonly_cast(0, 0); +} + +SEC("socket") +__success +__retval(0) +int mixed_mem_type(void *ctx) +{ + u64 *p; + + /* Try to avoid compiler hoisting load to if branches by using __noinline func. */ + p = get_some_addr(); + return *p; +} + +__attribute__((__aligned__(8))) +u8 global[] = { + 0x11, 0x22, 0x33, 0x44, + 0x55, 0x66, 0x77, 0x88, + 0x99 +}; + +__always_inline +static u64 combine(void *p) +{ + u64 acc; + + acc = 0; +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + acc |= (*(u64 *)p >> 56) << 24; + acc |= (*(u32 *)p >> 24) << 16; + acc |= (*(u16 *)p >> 8) << 8; + acc |= *(u8 *)p; +#else + acc |= (*(u64 *)p & 0xff) << 24; + acc |= (*(u32 *)p & 0xff) << 16; + acc |= (*(u16 *)p & 0xff) << 8; + acc |= *(u8 *)p; +#endif + return acc; +} + +SEC("socket") +__retval(0x88442211) +int diff_size_access(void *ctx) +{ + return combine(bpf_rdonly_cast(&global, 0)); +} + +SEC("socket") +__retval(0x99553322) +int misaligned_access(void *ctx) +{ + return combine(bpf_rdonly_cast(&global, 0) + 1); +} + +__weak int return_one(void) +{ + return 1; +} + +SEC("socket") +__success +__retval(1) +int null_check(void *ctx) +{ + int *p; + + p = bpf_rdonly_cast(0, 0); + if (p == 0) + /* make this a function call to avoid compiler + * moving r0 assignment before check. + */ + return return_one(); + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/missed_kprobe.c b/tools/testing/selftests/bpf/progs/missed_kprobe.c index 7f9ef701f5de..51a4fe64c917 100644 --- a/tools/testing/selftests/bpf/progs/missed_kprobe.c +++ b/tools/testing/selftests/bpf/progs/missed_kprobe.c @@ -2,7 +2,7 @@ #include "vmlinux.h" #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> -#include "../bpf_testmod/bpf_testmod_kfunc.h" +#include "../test_kmods/bpf_testmod_kfunc.h" char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/missed_kprobe_recursion.c b/tools/testing/selftests/bpf/progs/missed_kprobe_recursion.c index 8ea71cbd6c45..29c18d869ec1 100644 --- a/tools/testing/selftests/bpf/progs/missed_kprobe_recursion.c +++ b/tools/testing/selftests/bpf/progs/missed_kprobe_recursion.c @@ -2,7 +2,7 @@ #include "vmlinux.h" #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> -#include "../bpf_testmod/bpf_testmod_kfunc.h" +#include "../test_kmods/bpf_testmod_kfunc.h" char _license[] SEC("license") = "GPL"; @@ -46,3 +46,9 @@ int test5(struct pt_regs *ctx) { return 0; } + +SEC("kprobe.session/bpf_kfunc_common_test") +int test6(struct pt_regs *ctx) +{ + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/nested_acquire.c b/tools/testing/selftests/bpf/progs/nested_acquire.c index 8e521a21d995..49ad7b9adf56 100644 --- a/tools/testing/selftests/bpf/progs/nested_acquire.c +++ b/tools/testing/selftests/bpf/progs/nested_acquire.c @@ -4,7 +4,7 @@ #include <bpf/bpf_tracing.h> #include <bpf/bpf_helpers.h> #include "bpf_misc.h" -#include "../bpf_testmod/bpf_testmod_kfunc.h" +#include "../test_kmods/bpf_testmod_kfunc.h" char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/net_timestamping.c b/tools/testing/selftests/bpf/progs/net_timestamping.c new file mode 100644 index 000000000000..b4c2f0f2be11 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/net_timestamping.c @@ -0,0 +1,248 @@ +#include "vmlinux.h" +#include "bpf_tracing_net.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include "bpf_misc.h" +#include "bpf_kfuncs.h" +#include <errno.h> + +__u32 monitored_pid = 0; + +int nr_active; +int nr_snd; +int nr_passive; +int nr_sched; +int nr_txsw; +int nr_ack; + +struct sk_stg { + __u64 sendmsg_ns; /* record ts when sendmsg is called */ +}; + +struct sk_tskey { + u64 cookie; + u32 tskey; +}; + +struct delay_info { + u64 sendmsg_ns; /* record ts when sendmsg is called */ + u32 sched_delay; /* SCHED_CB - sendmsg_ns */ + u32 snd_sw_delay; /* SND_SW_CB - SCHED_CB */ + u32 ack_delay; /* ACK_CB - SND_SW_CB */ +}; + +struct { + __uint(type, BPF_MAP_TYPE_SK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, struct sk_stg); +} sk_stg_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, struct sk_tskey); + __type(value, struct delay_info); + __uint(max_entries, 1024); +} time_map SEC(".maps"); + +static u64 delay_tolerance_nsec = 10000000000; /* 10 second as an example */ + +extern int bpf_sock_ops_enable_tx_tstamp(struct bpf_sock_ops_kern *skops, u64 flags) __ksym; + +static int bpf_test_sockopt(void *ctx, const struct sock *sk, int expected) +{ + int tmp, new = SK_BPF_CB_TX_TIMESTAMPING; + int opt = SK_BPF_CB_FLAGS; + int level = SOL_SOCKET; + + if (bpf_setsockopt(ctx, level, opt, &new, sizeof(new)) != expected) + return 1; + + if (bpf_getsockopt(ctx, level, opt, &tmp, sizeof(tmp)) != expected || + (!expected && tmp != new)) + return 1; + + return 0; +} + +static bool bpf_test_access_sockopt(void *ctx, const struct sock *sk) +{ + if (bpf_test_sockopt(ctx, sk, -EOPNOTSUPP)) + return true; + return false; +} + +static bool bpf_test_access_load_hdr_opt(struct bpf_sock_ops *skops) +{ + u8 opt[3] = {0}; + int load_flags = 0; + int ret; + + ret = bpf_load_hdr_opt(skops, opt, sizeof(opt), load_flags); + if (ret != -EOPNOTSUPP) + return true; + + return false; +} + +static bool bpf_test_access_cb_flags_set(struct bpf_sock_ops *skops) +{ + int ret; + + ret = bpf_sock_ops_cb_flags_set(skops, 0); + if (ret != -EOPNOTSUPP) + return true; + + return false; +} + +/* In the timestamping callbacks, we're not allowed to call the following + * BPF CALLs for the safety concern. Return false if expected. + */ +static bool bpf_test_access_bpf_calls(struct bpf_sock_ops *skops, + const struct sock *sk) +{ + if (bpf_test_access_sockopt(skops, sk)) + return true; + + if (bpf_test_access_load_hdr_opt(skops)) + return true; + + if (bpf_test_access_cb_flags_set(skops)) + return true; + + return false; +} + +static bool bpf_test_delay(struct bpf_sock_ops *skops, const struct sock *sk) +{ + struct bpf_sock_ops_kern *skops_kern; + u64 timestamp = bpf_ktime_get_ns(); + struct skb_shared_info *shinfo; + struct delay_info dinfo = {0}; + struct sk_tskey key = {0}; + struct delay_info *val; + struct sk_buff *skb; + struct sk_stg *stg; + u64 prior_ts, delay; + + if (bpf_test_access_bpf_calls(skops, sk)) + return false; + + skops_kern = bpf_cast_to_kern_ctx(skops); + skb = skops_kern->skb; + shinfo = bpf_core_cast(skb->head + skb->end, struct skb_shared_info); + + key.cookie = bpf_get_socket_cookie(skops); + if (!key.cookie) + return false; + + if (skops->op == BPF_SOCK_OPS_TSTAMP_SENDMSG_CB) { + stg = bpf_sk_storage_get(&sk_stg_map, (void *)sk, 0, 0); + if (!stg) + return false; + dinfo.sendmsg_ns = stg->sendmsg_ns; + bpf_sock_ops_enable_tx_tstamp(skops_kern, 0); + key.tskey = shinfo->tskey; + if (!key.tskey) + return false; + bpf_map_update_elem(&time_map, &key, &dinfo, BPF_ANY); + return true; + } + + key.tskey = shinfo->tskey; + if (!key.tskey) + return false; + + val = bpf_map_lookup_elem(&time_map, &key); + if (!val) + return false; + + switch (skops->op) { + case BPF_SOCK_OPS_TSTAMP_SCHED_CB: + val->sched_delay = timestamp - val->sendmsg_ns; + delay = val->sched_delay; + break; + case BPF_SOCK_OPS_TSTAMP_SND_SW_CB: + prior_ts = val->sched_delay + val->sendmsg_ns; + val->snd_sw_delay = timestamp - prior_ts; + delay = val->snd_sw_delay; + break; + case BPF_SOCK_OPS_TSTAMP_ACK_CB: + prior_ts = val->snd_sw_delay + val->sched_delay + val->sendmsg_ns; + val->ack_delay = timestamp - prior_ts; + delay = val->ack_delay; + break; + } + + if (delay >= delay_tolerance_nsec) + return false; + + /* Since it's the last one, remove from the map after latency check */ + if (skops->op == BPF_SOCK_OPS_TSTAMP_ACK_CB) + bpf_map_delete_elem(&time_map, &key); + + return true; +} + +SEC("fentry/tcp_sendmsg_locked") +int BPF_PROG(trace_tcp_sendmsg_locked, struct sock *sk, struct msghdr *msg, + size_t size) +{ + __u32 pid = bpf_get_current_pid_tgid() >> 32; + u64 timestamp = bpf_ktime_get_ns(); + u32 flag = sk->sk_bpf_cb_flags; + struct sk_stg *stg; + + if (pid != monitored_pid || !flag) + return 0; + + stg = bpf_sk_storage_get(&sk_stg_map, sk, 0, + BPF_SK_STORAGE_GET_F_CREATE); + if (!stg) + return 0; + + stg->sendmsg_ns = timestamp; + nr_snd += 1; + return 0; +} + +SEC("sockops") +int skops_sockopt(struct bpf_sock_ops *skops) +{ + struct bpf_sock *bpf_sk = skops->sk; + const struct sock *sk; + + if (!bpf_sk) + return 1; + + sk = (struct sock *)bpf_skc_to_tcp_sock(bpf_sk); + if (!sk) + return 1; + + switch (skops->op) { + case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB: + nr_active += !bpf_test_sockopt(skops, sk, 0); + break; + case BPF_SOCK_OPS_TSTAMP_SENDMSG_CB: + if (bpf_test_delay(skops, sk)) + nr_snd += 1; + break; + case BPF_SOCK_OPS_TSTAMP_SCHED_CB: + if (bpf_test_delay(skops, sk)) + nr_sched += 1; + break; + case BPF_SOCK_OPS_TSTAMP_SND_SW_CB: + if (bpf_test_delay(skops, sk)) + nr_txsw += 1; + break; + case BPF_SOCK_OPS_TSTAMP_ACK_CB: + if (bpf_test_delay(skops, sk)) + nr_ack += 1; + break; + } + + return 1; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/netns_cookie_prog.c b/tools/testing/selftests/bpf/progs/netns_cookie_prog.c index c6edf8dbefeb..94040714af18 100644 --- a/tools/testing/selftests/bpf/progs/netns_cookie_prog.c +++ b/tools/testing/selftests/bpf/progs/netns_cookie_prog.c @@ -28,6 +28,7 @@ struct { } sock_map SEC(".maps"); int tcx_init_netns_cookie, tcx_netns_cookie; +int cgroup_skb_init_netns_cookie, cgroup_skb_netns_cookie; SEC("sockops") int get_netns_cookie_sockops(struct bpf_sock_ops *ctx) @@ -91,4 +92,12 @@ int get_netns_cookie_tcx(struct __sk_buff *skb) return TCX_PASS; } +SEC("cgroup_skb/ingress") +int get_netns_cookie_cgroup_skb(struct __sk_buff *skb) +{ + cgroup_skb_init_netns_cookie = bpf_get_netns_cookie(NULL); + cgroup_skb_netns_cookie = bpf_get_netns_cookie(skb); + return SK_PASS; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/preempt_lock.c b/tools/testing/selftests/bpf/progs/preempt_lock.c index 885377e83607..7d04254e61f1 100644 --- a/tools/testing/selftests/bpf/progs/preempt_lock.c +++ b/tools/testing/selftests/bpf/progs/preempt_lock.c @@ -5,8 +5,10 @@ #include "bpf_misc.h" #include "bpf_experimental.h" +extern int bpf_copy_from_user_str(void *dst, u32 dst__sz, const void *unsafe_ptr__ign, u64 flags) __weak __ksym; + SEC("?tc") -__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region") +__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region") int preempt_lock_missing_1(struct __sk_buff *ctx) { bpf_preempt_disable(); @@ -14,7 +16,7 @@ int preempt_lock_missing_1(struct __sk_buff *ctx) } SEC("?tc") -__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region") +__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region") int preempt_lock_missing_2(struct __sk_buff *ctx) { bpf_preempt_disable(); @@ -23,7 +25,7 @@ int preempt_lock_missing_2(struct __sk_buff *ctx) } SEC("?tc") -__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region") +__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region") int preempt_lock_missing_3(struct __sk_buff *ctx) { bpf_preempt_disable(); @@ -33,7 +35,7 @@ int preempt_lock_missing_3(struct __sk_buff *ctx) } SEC("?tc") -__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region") +__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region") int preempt_lock_missing_3_minus_2(struct __sk_buff *ctx) { bpf_preempt_disable(); @@ -55,7 +57,7 @@ static __noinline void preempt_enable(void) } SEC("?tc") -__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region") +__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region") int preempt_lock_missing_1_subprog(struct __sk_buff *ctx) { preempt_disable(); @@ -63,7 +65,7 @@ int preempt_lock_missing_1_subprog(struct __sk_buff *ctx) } SEC("?tc") -__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region") +__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region") int preempt_lock_missing_2_subprog(struct __sk_buff *ctx) { preempt_disable(); @@ -72,7 +74,7 @@ int preempt_lock_missing_2_subprog(struct __sk_buff *ctx) } SEC("?tc") -__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region") +__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region") int preempt_lock_missing_2_minus_1_subprog(struct __sk_buff *ctx) { preempt_disable(); @@ -113,6 +115,18 @@ int preempt_sleepable_helper(void *ctx) return 0; } +SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") +__failure __msg("kernel func bpf_copy_from_user_str is sleepable within non-preemptible region") +int preempt_sleepable_kfunc(void *ctx) +{ + u32 data; + + bpf_preempt_disable(); + bpf_copy_from_user_str(&data, sizeof(data), NULL, 0); + bpf_preempt_enable(); + return 0; +} + int __noinline preempt_global_subprog(void) { preempt_balance_subprog(); @@ -120,7 +134,7 @@ int __noinline preempt_global_subprog(void) } SEC("?tc") -__failure __msg("global function calls are not allowed with preemption disabled") +__success int preempt_global_subprog_test(struct __sk_buff *ctx) { preempt_disable(); @@ -129,4 +143,70 @@ int preempt_global_subprog_test(struct __sk_buff *ctx) return 0; } +int __noinline +global_subprog(int i) +{ + if (i) + bpf_printk("%p", &i); + return i; +} + +int __noinline +global_sleepable_helper_subprog(int i) +{ + if (i) + bpf_copy_from_user(&i, sizeof(i), NULL); + return i; +} + +int __noinline +global_sleepable_kfunc_subprog(int i) +{ + if (i) + bpf_copy_from_user_str(&i, sizeof(i), NULL, 0); + global_subprog(i); + return i; +} + +int __noinline +global_subprog_calling_sleepable_global(int i) +{ + if (!i) + global_sleepable_kfunc_subprog(i); + return i; +} + +SEC("?syscall") +__failure __msg("global functions that may sleep are not allowed in non-sleepable context") +int preempt_global_sleepable_helper_subprog(struct __sk_buff *ctx) +{ + preempt_disable(); + if (ctx->mark) + global_sleepable_helper_subprog(ctx->mark); + preempt_enable(); + return 0; +} + +SEC("?syscall") +__failure __msg("global functions that may sleep are not allowed in non-sleepable context") +int preempt_global_sleepable_kfunc_subprog(struct __sk_buff *ctx) +{ + preempt_disable(); + if (ctx->mark) + global_sleepable_kfunc_subprog(ctx->mark); + preempt_enable(); + return 0; +} + +SEC("?syscall") +__failure __msg("global functions that may sleep are not allowed in non-sleepable context") +int preempt_global_sleepable_subprog_indirect(struct __sk_buff *ctx) +{ + preempt_disable(); + if (ctx->mark) + global_subprog_calling_sleepable_global(ctx->mark); + preempt_enable(); + return 0; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/prepare.c b/tools/testing/selftests/bpf/progs/prepare.c new file mode 100644 index 000000000000..cfc1f48e0d28 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/prepare.c @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta */ +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> + +char _license[] SEC("license") = "GPL"; + +int err; + +struct { + __uint(type, BPF_MAP_TYPE_RINGBUF); + __uint(max_entries, 4096); +} ringbuf SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u32); +} array_map SEC(".maps"); + +SEC("cgroup_skb/egress") +int program(struct __sk_buff *skb) +{ + err = 0; + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/priv_freplace_prog.c b/tools/testing/selftests/bpf/progs/priv_freplace_prog.c new file mode 100644 index 000000000000..ccf1b04010ba --- /dev/null +++ b/tools/testing/selftests/bpf/progs/priv_freplace_prog.c @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> + +char _license[] SEC("license") = "GPL"; + +SEC("freplace/xdp_prog1") +int new_xdp_prog2(struct xdp_md *xd) +{ + return XDP_DROP; +} diff --git a/tools/testing/selftests/bpf/progs/priv_prog.c b/tools/testing/selftests/bpf/progs/priv_prog.c index 3c7b2b618c8a..725e29595079 100644 --- a/tools/testing/selftests/bpf/progs/priv_prog.c +++ b/tools/testing/selftests/bpf/progs/priv_prog.c @@ -6,8 +6,8 @@ char _license[] SEC("license") = "GPL"; -SEC("kprobe") -int kprobe_prog(void *ctx) +SEC("xdp") +int xdp_prog1(struct xdp_md *xdp) { - return 1; + return XDP_DROP; } diff --git a/tools/testing/selftests/bpf/progs/pro_epilogue.c b/tools/testing/selftests/bpf/progs/pro_epilogue.c index 44bc3f06b4b6..d97d6e07ef5c 100644 --- a/tools/testing/selftests/bpf/progs/pro_epilogue.c +++ b/tools/testing/selftests/bpf/progs/pro_epilogue.c @@ -4,8 +4,8 @@ #include <vmlinux.h> #include <bpf/bpf_tracing.h> #include "bpf_misc.h" -#include "../bpf_testmod/bpf_testmod.h" -#include "../bpf_testmod/bpf_testmod_kfunc.h" +#include "../test_kmods/bpf_testmod.h" +#include "../test_kmods/bpf_testmod_kfunc.h" char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/pro_epilogue_goto_start.c b/tools/testing/selftests/bpf/progs/pro_epilogue_goto_start.c index 3529e53be355..6048d79be48b 100644 --- a/tools/testing/selftests/bpf/progs/pro_epilogue_goto_start.c +++ b/tools/testing/selftests/bpf/progs/pro_epilogue_goto_start.c @@ -4,8 +4,8 @@ #include <vmlinux.h> #include <bpf/bpf_tracing.h> #include "bpf_misc.h" -#include "../bpf_testmod/bpf_testmod.h" -#include "../bpf_testmod/bpf_testmod_kfunc.h" +#include "../test_kmods/bpf_testmod.h" +#include "../test_kmods/bpf_testmod_kfunc.h" char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/pro_epilogue_with_kfunc.c b/tools/testing/selftests/bpf/progs/pro_epilogue_with_kfunc.c new file mode 100644 index 000000000000..a5a8f08ac8fb --- /dev/null +++ b/tools/testing/selftests/bpf/progs/pro_epilogue_with_kfunc.c @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ + +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include "bpf_misc.h" +#include "../test_kmods/bpf_testmod.h" +#include "../test_kmods/bpf_testmod_kfunc.h" + +char _license[] SEC("license") = "GPL"; + +void __kfunc_btf_root(void) +{ + bpf_kfunc_st_ops_inc10(NULL); +} + +static __noinline __used int subprog(struct st_ops_args *args) +{ + args->a += 1; + return args->a; +} + +__success +/* prologue */ +__xlated("0: r8 = r1") +__xlated("1: r1 = 0") +__xlated("2: call kernel-function") +__xlated("3: if r0 != 0x0 goto pc+5") +__xlated("4: r6 = *(u64 *)(r8 +0)") +__xlated("5: r7 = *(u64 *)(r6 +0)") +__xlated("6: r7 += 1000") +__xlated("7: *(u64 *)(r6 +0) = r7") +__xlated("8: goto pc+2") +__xlated("9: r1 = r0") +__xlated("10: call kernel-function") +__xlated("11: r1 = r8") +/* save __u64 *ctx to stack */ +__xlated("12: *(u64 *)(r10 -8) = r1") +/* main prog */ +__xlated("13: r1 = *(u64 *)(r1 +0)") +__xlated("14: r6 = r1") +__xlated("15: call kernel-function") +__xlated("16: r1 = r6") +__xlated("17: call pc+") +/* epilogue */ +__xlated("18: r1 = 0") +__xlated("19: r6 = 0") +__xlated("20: call kernel-function") +__xlated("21: if r0 != 0x0 goto pc+6") +__xlated("22: r1 = *(u64 *)(r10 -8)") +__xlated("23: r1 = *(u64 *)(r1 +0)") +__xlated("24: r6 = *(u64 *)(r1 +0)") +__xlated("25: r6 += 10000") +__xlated("26: *(u64 *)(r1 +0) = r6") +__xlated("27: goto pc+2") +__xlated("28: r1 = r0") +__xlated("29: call kernel-function") +__xlated("30: r0 = r6") +__xlated("31: r0 *= 2") +__xlated("32: exit") +SEC("struct_ops/test_pro_epilogue") +__naked int test_kfunc_pro_epilogue(void) +{ + asm volatile ( + "r1 = *(u64 *)(r1 +0);" + "r6 = r1;" + "call %[bpf_kfunc_st_ops_inc10];" + "r1 = r6;" + "call subprog;" + "exit;" + : + : __imm(bpf_kfunc_st_ops_inc10) + : __clobber_all); +} + +SEC("syscall") +__retval(22022) /* (PROLOGUE_A [1000] + KFUNC_INC10 + SUBPROG_A [1] + EPILOGUE_A [10000]) * 2 */ +int syscall_pro_epilogue(void *ctx) +{ + struct st_ops_args args = {}; + + return bpf_kfunc_st_ops_test_pro_epilogue(&args); +} + +SEC(".struct_ops.link") +struct bpf_testmod_st_ops pro_epilogue_with_kfunc = { + .test_pro_epilogue = (void *)test_kfunc_pro_epilogue, +}; diff --git a/tools/testing/selftests/bpf/progs/profiler.inc.h b/tools/testing/selftests/bpf/progs/profiler.inc.h index 8bd1ebd7d6af..813143b4985d 100644 --- a/tools/testing/selftests/bpf/progs/profiler.inc.h +++ b/tools/testing/selftests/bpf/progs/profiler.inc.h @@ -223,7 +223,7 @@ static INLINE void* read_full_cgroup_path(struct kernfs_node* cgroup_node, if (bpf_cmp_likely(filepart_length, <=, MAX_PATH)) { payload += filepart_length; } - cgroup_node = BPF_CORE_READ(cgroup_node, parent); + cgroup_node = BPF_CORE_READ(cgroup_node, __parent); } return payload; } diff --git a/tools/testing/selftests/bpf/progs/raw_tp_null.c b/tools/testing/selftests/bpf/progs/raw_tp_null.c index 457f34c151e3..efa416f53968 100644 --- a/tools/testing/selftests/bpf/progs/raw_tp_null.c +++ b/tools/testing/selftests/bpf/progs/raw_tp_null.c @@ -3,13 +3,14 @@ #include <vmlinux.h> #include <bpf/bpf_tracing.h> +#include "bpf_misc.h" char _license[] SEC("license") = "GPL"; int tid; int i; -SEC("tp_btf/bpf_testmod_test_raw_tp_null") +SEC("tp_btf/bpf_testmod_test_raw_tp_null_tp") int BPF_PROG(test_raw_tp_null, struct sk_buff *skb) { struct task_struct *task = bpf_get_current_task_btf(); @@ -17,16 +18,14 @@ int BPF_PROG(test_raw_tp_null, struct sk_buff *skb) if (task->pid != tid) return 0; - i = i + skb->mark + 1; - /* The compiler may move the NULL check before this deref, which causes - * the load to fail as deref of scalar. Prevent that by using a barrier. + /* If dead code elimination kicks in, the increment +=2 will be + * removed. For raw_tp programs attaching to tracepoints in kernel + * modules, we mark input arguments as PTR_MAYBE_NULL, so branch + * prediction should never kick in. */ - barrier(); - /* If dead code elimination kicks in, the increment below will - * be removed. For raw_tp programs, we mark input arguments as - * PTR_MAYBE_NULL, so branch prediction should never kick in. - */ - if (!skb) - i += 2; + asm volatile ("%[i] += 1; if %[ctx] != 0 goto +1; %[i] += 2;" + : [i]"+r"(i) + : [ctx]"r"(skb) + : "memory"); return 0; } diff --git a/tools/testing/selftests/bpf/progs/raw_tp_null_fail.c b/tools/testing/selftests/bpf/progs/raw_tp_null_fail.c new file mode 100644 index 000000000000..0d58114a4955 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/raw_tp_null_fail.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + +/* Ensure module parameter has PTR_MAYBE_NULL */ +SEC("tp_btf/bpf_testmod_test_raw_tp_null_tp") +__failure __msg("R1 invalid mem access 'trusted_ptr_or_null_'") +int test_raw_tp_null_bpf_testmod_test_raw_tp_null_arg_1(void *ctx) { + asm volatile("r1 = *(u64 *)(r1 +0); r1 = *(u64 *)(r1 +0);" ::: __clobber_all); + return 0; +} + +/* Check NULL marking */ +SEC("tp_btf/sched_pi_setprio") +__failure __msg("R1 invalid mem access 'trusted_ptr_or_null_'") +int test_raw_tp_null_sched_pi_setprio_arg_2(void *ctx) { + asm volatile("r1 = *(u64 *)(r1 +8); r1 = *(u64 *)(r1 +0);" ::: __clobber_all); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/rbtree.c b/tools/testing/selftests/bpf/progs/rbtree.c index a3620c15c136..49fe93d7e059 100644 --- a/tools/testing/selftests/bpf/progs/rbtree.c +++ b/tools/testing/selftests/bpf/progs/rbtree.c @@ -61,19 +61,19 @@ static long __add_three(struct bpf_rb_root *root, struct bpf_spin_lock *lock) } m->key = 1; - bpf_spin_lock(&glock); - bpf_rbtree_add(&groot, &n->node, less); - bpf_rbtree_add(&groot, &m->node, less); - bpf_spin_unlock(&glock); + bpf_spin_lock(lock); + bpf_rbtree_add(root, &n->node, less); + bpf_rbtree_add(root, &m->node, less); + bpf_spin_unlock(lock); n = bpf_obj_new(typeof(*n)); if (!n) return 3; n->key = 3; - bpf_spin_lock(&glock); - bpf_rbtree_add(&groot, &n->node, less); - bpf_spin_unlock(&glock); + bpf_spin_lock(lock); + bpf_rbtree_add(root, &n->node, less); + bpf_spin_unlock(lock); return 0; } diff --git a/tools/testing/selftests/bpf/progs/rbtree_fail.c b/tools/testing/selftests/bpf/progs/rbtree_fail.c index dbd5eee8e25e..4acb6af2dfe3 100644 --- a/tools/testing/selftests/bpf/progs/rbtree_fail.c +++ b/tools/testing/selftests/bpf/progs/rbtree_fail.c @@ -69,11 +69,11 @@ long rbtree_api_nolock_first(void *ctx) } SEC("?tc") -__failure __msg("rbtree_remove node input must be non-owning ref") +__retval(0) long rbtree_api_remove_unadded_node(void *ctx) { struct node_data *n, *m; - struct bpf_rb_node *res; + struct bpf_rb_node *res_n, *res_m; n = bpf_obj_new(typeof(*n)); if (!n) @@ -88,19 +88,20 @@ long rbtree_api_remove_unadded_node(void *ctx) bpf_spin_lock(&glock); bpf_rbtree_add(&groot, &n->node, less); - /* This remove should pass verifier */ - res = bpf_rbtree_remove(&groot, &n->node); - n = container_of(res, struct node_data, node); + res_n = bpf_rbtree_remove(&groot, &n->node); - /* This remove shouldn't, m isn't in an rbtree */ - res = bpf_rbtree_remove(&groot, &m->node); - m = container_of(res, struct node_data, node); + res_m = bpf_rbtree_remove(&groot, &m->node); bpf_spin_unlock(&glock); - if (n) - bpf_obj_drop(n); - if (m) - bpf_obj_drop(m); + bpf_obj_drop(m); + if (res_n) + bpf_obj_drop(container_of(res_n, struct node_data, node)); + if (res_m) { + bpf_obj_drop(container_of(res_m, struct node_data, node)); + /* m was not added to the rbtree */ + return 2; + } + return 0; } @@ -178,7 +179,7 @@ err_out: } SEC("?tc") -__failure __msg("rbtree_remove node input must be non-owning ref") +__failure __msg("bpf_rbtree_remove can only take non-owning or refcounted bpf_rb_node pointer") long rbtree_api_add_release_unlock_escape(void *ctx) { struct node_data *n; @@ -202,7 +203,7 @@ long rbtree_api_add_release_unlock_escape(void *ctx) } SEC("?tc") -__failure __msg("rbtree_remove node input must be non-owning ref") +__failure __msg("bpf_rbtree_remove can only take non-owning or refcounted bpf_rb_node pointer") long rbtree_api_first_release_unlock_escape(void *ctx) { struct bpf_rb_node *res; diff --git a/tools/testing/selftests/bpf/progs/rbtree_search.c b/tools/testing/selftests/bpf/progs/rbtree_search.c new file mode 100644 index 000000000000..098ef970fac1 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/rbtree_search.c @@ -0,0 +1,206 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ + +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" +#include "bpf_experimental.h" + +struct node_data { + struct bpf_refcount ref; + struct bpf_rb_node r0; + struct bpf_rb_node r1; + int key0; + int key1; +}; + +#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8))) +private(A) struct bpf_spin_lock glock0; +private(A) struct bpf_rb_root groot0 __contains(node_data, r0); + +private(B) struct bpf_spin_lock glock1; +private(B) struct bpf_rb_root groot1 __contains(node_data, r1); + +#define rb_entry(ptr, type, member) container_of(ptr, type, member) +#define NR_NODES 16 + +int zero = 0; + +static bool less0(struct bpf_rb_node *a, const struct bpf_rb_node *b) +{ + struct node_data *node_a; + struct node_data *node_b; + + node_a = rb_entry(a, struct node_data, r0); + node_b = rb_entry(b, struct node_data, r0); + + return node_a->key0 < node_b->key0; +} + +static bool less1(struct bpf_rb_node *a, const struct bpf_rb_node *b) +{ + struct node_data *node_a; + struct node_data *node_b; + + node_a = rb_entry(a, struct node_data, r1); + node_b = rb_entry(b, struct node_data, r1); + + return node_a->key1 < node_b->key1; +} + +SEC("syscall") +__retval(0) +long rbtree_search(void *ctx) +{ + struct bpf_rb_node *rb_n, *rb_m, *gc_ns[NR_NODES]; + long lookup_key = NR_NODES / 2; + struct node_data *n, *m; + int i, nr_gc = 0; + + for (i = zero; i < NR_NODES && can_loop; i++) { + n = bpf_obj_new(typeof(*n)); + if (!n) + return __LINE__; + + m = bpf_refcount_acquire(n); + + n->key0 = i; + m->key1 = i; + + bpf_spin_lock(&glock0); + bpf_rbtree_add(&groot0, &n->r0, less0); + bpf_spin_unlock(&glock0); + + bpf_spin_lock(&glock1); + bpf_rbtree_add(&groot1, &m->r1, less1); + bpf_spin_unlock(&glock1); + } + + n = NULL; + bpf_spin_lock(&glock0); + rb_n = bpf_rbtree_root(&groot0); + while (can_loop) { + if (!rb_n) { + bpf_spin_unlock(&glock0); + return __LINE__; + } + + n = rb_entry(rb_n, struct node_data, r0); + if (lookup_key == n->key0) + break; + if (nr_gc < NR_NODES) + gc_ns[nr_gc++] = rb_n; + if (lookup_key < n->key0) + rb_n = bpf_rbtree_left(&groot0, rb_n); + else + rb_n = bpf_rbtree_right(&groot0, rb_n); + } + + if (!n || lookup_key != n->key0) { + bpf_spin_unlock(&glock0); + return __LINE__; + } + + for (i = 0; i < nr_gc; i++) { + rb_n = gc_ns[i]; + gc_ns[i] = bpf_rbtree_remove(&groot0, rb_n); + } + + m = bpf_refcount_acquire(n); + bpf_spin_unlock(&glock0); + + for (i = 0; i < nr_gc; i++) { + rb_n = gc_ns[i]; + if (rb_n) { + n = rb_entry(rb_n, struct node_data, r0); + bpf_obj_drop(n); + } + } + + if (!m) + return __LINE__; + + bpf_spin_lock(&glock1); + rb_m = bpf_rbtree_remove(&groot1, &m->r1); + bpf_spin_unlock(&glock1); + bpf_obj_drop(m); + if (!rb_m) + return __LINE__; + bpf_obj_drop(rb_entry(rb_m, struct node_data, r1)); + + return 0; +} + +#define TEST_ROOT(dolock) \ +SEC("syscall") \ +__failure __msg(MSG) \ +long test_root_spinlock_##dolock(void *ctx) \ +{ \ + struct bpf_rb_node *rb_n; \ + __u64 jiffies = 0; \ + \ + if (dolock) \ + bpf_spin_lock(&glock0); \ + rb_n = bpf_rbtree_root(&groot0); \ + if (rb_n) \ + jiffies = bpf_jiffies64(); \ + if (dolock) \ + bpf_spin_unlock(&glock0); \ + \ + return !!jiffies; \ +} + +#define TEST_LR(op, dolock) \ +SEC("syscall") \ +__failure __msg(MSG) \ +long test_##op##_spinlock_##dolock(void *ctx) \ +{ \ + struct bpf_rb_node *rb_n; \ + struct node_data *n; \ + __u64 jiffies = 0; \ + \ + bpf_spin_lock(&glock0); \ + rb_n = bpf_rbtree_root(&groot0); \ + if (!rb_n) { \ + bpf_spin_unlock(&glock0); \ + return 1; \ + } \ + n = rb_entry(rb_n, struct node_data, r0); \ + n = bpf_refcount_acquire(n); \ + bpf_spin_unlock(&glock0); \ + if (!n) \ + return 1; \ + \ + if (dolock) \ + bpf_spin_lock(&glock0); \ + rb_n = bpf_rbtree_##op(&groot0, &n->r0); \ + if (rb_n) \ + jiffies = bpf_jiffies64(); \ + if (dolock) \ + bpf_spin_unlock(&glock0); \ + \ + return !!jiffies; \ +} + +/* + * Use a spearate MSG macro instead of passing to TEST_XXX(..., MSG) + * to ensure the message itself is not in the bpf prog lineinfo + * which the verifier includes in its log. + * Otherwise, the test_loader will incorrectly match the prog lineinfo + * instead of the log generated by the verifier. + */ +#define MSG "call bpf_rbtree_root{{.+}}; R0{{(_w)?}}=rcu_ptr_or_null_node_data(id={{[0-9]+}},non_own_ref" +TEST_ROOT(true) +#undef MSG +#define MSG "call bpf_rbtree_{{(left|right).+}}; R0{{(_w)?}}=rcu_ptr_or_null_node_data(id={{[0-9]+}},non_own_ref" +TEST_LR(left, true) +TEST_LR(right, true) +#undef MSG + +#define MSG "bpf_spin_lock at off=0 must be held for bpf_rb_root" +TEST_ROOT(false) +TEST_LR(left, false) +TEST_LR(right, false) +#undef MSG + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/rcu_read_lock.c b/tools/testing/selftests/bpf/progs/rcu_read_lock.c index ab3a532b7dd6..3a868a199349 100644 --- a/tools/testing/selftests/bpf/progs/rcu_read_lock.c +++ b/tools/testing/selftests/bpf/progs/rcu_read_lock.c @@ -16,10 +16,11 @@ struct { __type(value, long); } map_a SEC(".maps"); -__u32 user_data, key_serial, target_pid; +__u32 user_data, target_pid; +__s32 key_serial; __u64 flags, task_storage_val, cgroup_id; -struct bpf_key *bpf_lookup_user_key(__u32 serial, __u64 flags) __ksym; +struct bpf_key *bpf_lookup_user_key(__s32 serial, __u64 flags) __ksym; void bpf_key_put(struct bpf_key *key) __ksym; void bpf_rcu_read_lock(void) __ksym; void bpf_rcu_read_unlock(void) __ksym; @@ -242,7 +243,8 @@ out: } SEC("?lsm.s/bpf") -int BPF_PROG(inproper_sleepable_kfunc, int cmd, union bpf_attr *attr, unsigned int size) +int BPF_PROG(inproper_sleepable_kfunc, int cmd, union bpf_attr *attr, unsigned int size, + bool kernel) { struct bpf_key *bkey; @@ -439,3 +441,61 @@ int rcu_read_lock_global_subprog_unlock(void *ctx) ret += global_subprog_unlock(ret); return 0; } + +int __noinline +global_sleepable_helper_subprog(int i) +{ + if (i) + bpf_copy_from_user(&i, sizeof(i), NULL); + return i; +} + +int __noinline +global_sleepable_kfunc_subprog(int i) +{ + if (i) + bpf_copy_from_user_str(&i, sizeof(i), NULL, 0); + global_subprog(i); + return i; +} + +int __noinline +global_subprog_calling_sleepable_global(int i) +{ + if (!i) + global_sleepable_kfunc_subprog(i); + return i; +} + +SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") +int rcu_read_lock_sleepable_helper_global_subprog(void *ctx) +{ + volatile int ret = 0; + + bpf_rcu_read_lock(); + ret += global_sleepable_helper_subprog(ret); + bpf_rcu_read_unlock(); + return 0; +} + +SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") +int rcu_read_lock_sleepable_kfunc_global_subprog(void *ctx) +{ + volatile int ret = 0; + + bpf_rcu_read_lock(); + ret += global_sleepable_kfunc_subprog(ret); + bpf_rcu_read_unlock(); + return 0; +} + +SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") +int rcu_read_lock_sleepable_global_subprog_indirect(void *ctx) +{ + volatile int ret = 0; + + bpf_rcu_read_lock(); + ret += global_subprog_calling_sleepable_global(ret); + bpf_rcu_read_unlock(); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/read_cgroupfs_xattr.c b/tools/testing/selftests/bpf/progs/read_cgroupfs_xattr.c new file mode 100644 index 000000000000..405adbe5e8b0 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/read_cgroupfs_xattr.c @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ + +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> +#include "bpf_experimental.h" + +char _license[] SEC("license") = "GPL"; + +pid_t target_pid = 0; + +char xattr_value[64]; +static const char expected_value_a[] = "bpf_selftest_value_a"; +static const char expected_value_b[] = "bpf_selftest_value_b"; +bool found_value_a; +bool found_value_b; + +SEC("lsm.s/file_open") +int BPF_PROG(test_file_open) +{ + u64 cgrp_id = bpf_get_current_cgroup_id(); + struct cgroup_subsys_state *css, *tmp; + struct bpf_dynptr value_ptr; + struct cgroup *cgrp; + + if ((bpf_get_current_pid_tgid() >> 32) != target_pid) + return 0; + + bpf_rcu_read_lock(); + cgrp = bpf_cgroup_from_id(cgrp_id); + if (!cgrp) { + bpf_rcu_read_unlock(); + return 0; + } + + css = &cgrp->self; + bpf_dynptr_from_mem(xattr_value, sizeof(xattr_value), 0, &value_ptr); + bpf_for_each(css, tmp, css, BPF_CGROUP_ITER_ANCESTORS_UP) { + int ret; + + ret = bpf_cgroup_read_xattr(tmp->cgroup, "user.bpf_test", + &value_ptr); + if (ret < 0) + continue; + + if (ret == sizeof(expected_value_a) && + !bpf_strncmp(xattr_value, sizeof(expected_value_a), expected_value_a)) + found_value_a = true; + if (ret == sizeof(expected_value_b) && + !bpf_strncmp(xattr_value, sizeof(expected_value_b), expected_value_b)) + found_value_b = true; + } + + bpf_rcu_read_unlock(); + bpf_cgroup_release(cgrp); + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/read_vsyscall.c b/tools/testing/selftests/bpf/progs/read_vsyscall.c index 39ebef430059..395591374d4f 100644 --- a/tools/testing/selftests/bpf/progs/read_vsyscall.c +++ b/tools/testing/selftests/bpf/progs/read_vsyscall.c @@ -8,14 +8,16 @@ int target_pid = 0; void *user_ptr = 0; -int read_ret[9]; +int read_ret[10]; char _license[] SEC("license") = "GPL"; /* - * This is the only kfunc, the others are helpers + * These are the kfuncs, the others are helpers */ int bpf_copy_from_user_str(void *dst, u32, const void *, u64) __weak __ksym; +int bpf_copy_from_user_task_str(void *dst, u32, const void *, + struct task_struct *, u64) __weak __ksym; SEC("fentry/" SYS_PREFIX "sys_nanosleep") int do_probe_read(void *ctx) @@ -47,6 +49,11 @@ int do_copy_from_user(void *ctx) read_ret[7] = bpf_copy_from_user_task(buf, sizeof(buf), user_ptr, bpf_get_current_task_btf(), 0); read_ret[8] = bpf_copy_from_user_str((char *)buf, sizeof(buf), user_ptr, 0); + read_ret[9] = bpf_copy_from_user_task_str((char *)buf, + sizeof(buf), + user_ptr, + bpf_get_current_task_btf(), + 0); return 0; } diff --git a/tools/testing/selftests/bpf/progs/res_spin_lock.c b/tools/testing/selftests/bpf/progs/res_spin_lock.c new file mode 100644 index 000000000000..22c4fb8b9266 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/res_spin_lock.c @@ -0,0 +1,147 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024-2025 Meta Platforms, Inc. and affiliates. */ +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +#define EDEADLK 35 +#define ETIMEDOUT 110 + +struct arr_elem { + struct bpf_res_spin_lock lock; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 64); + __type(key, int); + __type(value, struct arr_elem); +} arrmap SEC(".maps"); + +struct bpf_res_spin_lock lockA __hidden SEC(".data.A"); +struct bpf_res_spin_lock lockB __hidden SEC(".data.B"); + +SEC("tc") +int res_spin_lock_test(struct __sk_buff *ctx) +{ + struct arr_elem *elem1, *elem2; + int r; + + elem1 = bpf_map_lookup_elem(&arrmap, &(int){0}); + if (!elem1) + return -1; + elem2 = bpf_map_lookup_elem(&arrmap, &(int){0}); + if (!elem2) + return -1; + + r = bpf_res_spin_lock(&elem1->lock); + if (r) + return r; + r = bpf_res_spin_lock(&elem2->lock); + if (!r) { + bpf_res_spin_unlock(&elem2->lock); + bpf_res_spin_unlock(&elem1->lock); + return -1; + } + bpf_res_spin_unlock(&elem1->lock); + return r != -EDEADLK; +} + +SEC("tc") +int res_spin_lock_test_AB(struct __sk_buff *ctx) +{ + int r; + + r = bpf_res_spin_lock(&lockA); + if (r) + return !r; + /* Only unlock if we took the lock. */ + if (!bpf_res_spin_lock(&lockB)) + bpf_res_spin_unlock(&lockB); + bpf_res_spin_unlock(&lockA); + return 0; +} + +int err; + +SEC("tc") +int res_spin_lock_test_BA(struct __sk_buff *ctx) +{ + int r; + + r = bpf_res_spin_lock(&lockB); + if (r) + return !r; + if (!bpf_res_spin_lock(&lockA)) + bpf_res_spin_unlock(&lockA); + else + err = -EDEADLK; + bpf_res_spin_unlock(&lockB); + return err ?: 0; +} + +SEC("tc") +int res_spin_lock_test_held_lock_max(struct __sk_buff *ctx) +{ + struct bpf_res_spin_lock *locks[48] = {}; + struct arr_elem *e; + u64 time_beg, time; + int ret = 0, i; + + _Static_assert(ARRAY_SIZE(((struct rqspinlock_held){}).locks) == 31, + "RES_NR_HELD assumed to be 31"); + + for (i = 0; i < 34; i++) { + int key = i; + + /* We cannot pass in i as it will get spilled/filled by the compiler and + * loses bounds in verifier state. + */ + e = bpf_map_lookup_elem(&arrmap, &key); + if (!e) + return 1; + locks[i] = &e->lock; + } + + for (; i < 48; i++) { + int key = i - 2; + + /* We cannot pass in i as it will get spilled/filled by the compiler and + * loses bounds in verifier state. + */ + e = bpf_map_lookup_elem(&arrmap, &key); + if (!e) + return 1; + locks[i] = &e->lock; + } + + time_beg = bpf_ktime_get_ns(); + for (i = 0; i < 34; i++) { + if (bpf_res_spin_lock(locks[i])) + goto end; + } + + /* Trigger AA, after exhausting entries in the held lock table. This + * time, only the timeout can save us, as AA detection won't succeed. + */ + ret = bpf_res_spin_lock(locks[34]); + if (!ret) { + bpf_res_spin_unlock(locks[34]); + ret = 1; + goto end; + } + + ret = ret != -ETIMEDOUT ? 2 : 0; + +end: + for (i = i - 1; i >= 0; i--) + bpf_res_spin_unlock(locks[i]); + time = bpf_ktime_get_ns() - time_beg; + /* Time spent should be easily above our limit (1/4 s), since AA + * detection won't be expedited due to lack of held lock entry. + */ + return ret ?: (time > 1000000000 / 4 ? 0 : 1); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/res_spin_lock_fail.c b/tools/testing/selftests/bpf/progs/res_spin_lock_fail.c new file mode 100644 index 000000000000..330682a88c16 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/res_spin_lock_fail.c @@ -0,0 +1,244 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024-2025 Meta Platforms, Inc. and affiliates. */ +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> +#include "bpf_misc.h" +#include "bpf_experimental.h" + +struct arr_elem { + struct bpf_res_spin_lock lock; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, struct arr_elem); +} arrmap SEC(".maps"); + +long value; + +struct bpf_spin_lock lock __hidden SEC(".data.A"); +struct bpf_res_spin_lock res_lock __hidden SEC(".data.B"); + +SEC("?tc") +__failure __msg("point to map value or allocated object") +int res_spin_lock_arg(struct __sk_buff *ctx) +{ + struct arr_elem *elem; + + elem = bpf_map_lookup_elem(&arrmap, &(int){0}); + if (!elem) + return 0; + bpf_res_spin_lock((struct bpf_res_spin_lock *)bpf_core_cast(&elem->lock, struct __sk_buff)); + bpf_res_spin_lock(&elem->lock); + return 0; +} + +SEC("?tc") +__failure __msg("AA deadlock detected") +int res_spin_lock_AA(struct __sk_buff *ctx) +{ + struct arr_elem *elem; + + elem = bpf_map_lookup_elem(&arrmap, &(int){0}); + if (!elem) + return 0; + bpf_res_spin_lock(&elem->lock); + bpf_res_spin_lock(&elem->lock); + return 0; +} + +SEC("?tc") +__failure __msg("AA deadlock detected") +int res_spin_lock_cond_AA(struct __sk_buff *ctx) +{ + struct arr_elem *elem; + + elem = bpf_map_lookup_elem(&arrmap, &(int){0}); + if (!elem) + return 0; + if (bpf_res_spin_lock(&elem->lock)) + return 0; + bpf_res_spin_lock(&elem->lock); + return 0; +} + +SEC("?tc") +__failure __msg("unlock of different lock") +int res_spin_lock_mismatch_1(struct __sk_buff *ctx) +{ + struct arr_elem *elem; + + elem = bpf_map_lookup_elem(&arrmap, &(int){0}); + if (!elem) + return 0; + if (bpf_res_spin_lock(&elem->lock)) + return 0; + bpf_res_spin_unlock(&res_lock); + return 0; +} + +SEC("?tc") +__failure __msg("unlock of different lock") +int res_spin_lock_mismatch_2(struct __sk_buff *ctx) +{ + struct arr_elem *elem; + + elem = bpf_map_lookup_elem(&arrmap, &(int){0}); + if (!elem) + return 0; + if (bpf_res_spin_lock(&res_lock)) + return 0; + bpf_res_spin_unlock(&elem->lock); + return 0; +} + +SEC("?tc") +__failure __msg("unlock of different lock") +int res_spin_lock_irq_mismatch_1(struct __sk_buff *ctx) +{ + struct arr_elem *elem; + unsigned long f1; + + elem = bpf_map_lookup_elem(&arrmap, &(int){0}); + if (!elem) + return 0; + bpf_local_irq_save(&f1); + if (bpf_res_spin_lock(&res_lock)) + return 0; + bpf_res_spin_unlock_irqrestore(&res_lock, &f1); + return 0; +} + +SEC("?tc") +__failure __msg("unlock of different lock") +int res_spin_lock_irq_mismatch_2(struct __sk_buff *ctx) +{ + struct arr_elem *elem; + unsigned long f1; + + elem = bpf_map_lookup_elem(&arrmap, &(int){0}); + if (!elem) + return 0; + if (bpf_res_spin_lock_irqsave(&res_lock, &f1)) + return 0; + bpf_res_spin_unlock(&res_lock); + return 0; +} + +SEC("?tc") +__success +int res_spin_lock_ooo(struct __sk_buff *ctx) +{ + struct arr_elem *elem; + + elem = bpf_map_lookup_elem(&arrmap, &(int){0}); + if (!elem) + return 0; + if (bpf_res_spin_lock(&res_lock)) + return 0; + if (bpf_res_spin_lock(&elem->lock)) { + bpf_res_spin_unlock(&res_lock); + return 0; + } + bpf_res_spin_unlock(&elem->lock); + bpf_res_spin_unlock(&res_lock); + return 0; +} + +SEC("?tc") +__success +int res_spin_lock_ooo_irq(struct __sk_buff *ctx) +{ + struct arr_elem *elem; + unsigned long f1, f2; + + elem = bpf_map_lookup_elem(&arrmap, &(int){0}); + if (!elem) + return 0; + if (bpf_res_spin_lock_irqsave(&res_lock, &f1)) + return 0; + if (bpf_res_spin_lock_irqsave(&elem->lock, &f2)) { + bpf_res_spin_unlock_irqrestore(&res_lock, &f1); + /* We won't have a unreleased IRQ flag error here. */ + return 0; + } + bpf_res_spin_unlock_irqrestore(&elem->lock, &f2); + bpf_res_spin_unlock_irqrestore(&res_lock, &f1); + return 0; +} + +struct bpf_res_spin_lock lock1 __hidden SEC(".data.OO1"); +struct bpf_res_spin_lock lock2 __hidden SEC(".data.OO2"); + +SEC("?tc") +__failure __msg("bpf_res_spin_unlock cannot be out of order") +int res_spin_lock_ooo_unlock(struct __sk_buff *ctx) +{ + if (bpf_res_spin_lock(&lock1)) + return 0; + if (bpf_res_spin_lock(&lock2)) { + bpf_res_spin_unlock(&lock1); + return 0; + } + bpf_res_spin_unlock(&lock1); + bpf_res_spin_unlock(&lock2); + return 0; +} + +SEC("?tc") +__failure __msg("off 1 doesn't point to 'struct bpf_res_spin_lock' that is at 0") +int res_spin_lock_bad_off(struct __sk_buff *ctx) +{ + struct arr_elem *elem; + + elem = bpf_map_lookup_elem(&arrmap, &(int){0}); + if (!elem) + return 0; + bpf_res_spin_lock((void *)&elem->lock + 1); + return 0; +} + +SEC("?tc") +__failure __msg("R1 doesn't have constant offset. bpf_res_spin_lock has to be at the constant offset") +int res_spin_lock_var_off(struct __sk_buff *ctx) +{ + struct arr_elem *elem; + u64 val = value; + + elem = bpf_map_lookup_elem(&arrmap, &(int){0}); + if (!elem) { + // FIXME: Only inline assembly use in assert macro doesn't emit + // BTF definition. + bpf_throw(0); + return 0; + } + bpf_assert_range(val, 0, 40); + bpf_res_spin_lock((void *)&value + val); + return 0; +} + +SEC("?tc") +__failure __msg("map 'res_spin.bss' has no valid bpf_res_spin_lock") +int res_spin_lock_no_lock_map(struct __sk_buff *ctx) +{ + bpf_res_spin_lock((void *)&value + 1); + return 0; +} + +SEC("?tc") +__failure __msg("local 'kptr' has no valid bpf_res_spin_lock") +int res_spin_lock_no_lock_kptr(struct __sk_buff *ctx) +{ + struct { int i; } *p = bpf_obj_new(typeof(*p)); + + if (!p) + return 0; + bpf_res_spin_lock((void *)p); + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/security_bpf_map.c b/tools/testing/selftests/bpf/progs/security_bpf_map.c new file mode 100644 index 000000000000..7216b3450e96 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/security_bpf_map.c @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include "vmlinux.h" +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> + +char _license[] SEC("license") = "GPL"; + +#define EPERM 1 /* Operation not permitted */ + +/* From include/linux/mm.h. */ +#define FMODE_WRITE 0x2 + +struct map; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, __u32); + __type(value, __u32); + __uint(max_entries, 1); +} prot_status_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, __u32); + __type(value, __u32); + __uint(max_entries, 3); +} prot_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, __u32); + __type(value, __u32); + __uint(max_entries, 3); +} not_prot_map SEC(".maps"); + +SEC("fmod_ret/security_bpf_map") +int BPF_PROG(fmod_bpf_map, struct bpf_map *map, int fmode) +{ + __u32 key = 0; + __u32 *status_ptr = bpf_map_lookup_elem(&prot_status_map, &key); + + if (!status_ptr || !*status_ptr) + return 0; + + if (map == &prot_map) { + /* Allow read-only access */ + if (fmode & FMODE_WRITE) + return -EPERM; + } + + return 0; +} + +/* + * This program keeps references to maps. This is needed to prevent + * optimizing them out. + */ +SEC("fentry/bpf_fentry_test1") +int BPF_PROG(fentry_dummy1, int a) +{ + __u32 key = 0; + __u32 val1 = a; + __u32 val2 = a + 1; + + bpf_map_update_elem(&prot_map, &key, &val1, BPF_ANY); + bpf_map_update_elem(¬_prot_map, &key, &val2, BPF_ANY); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/set_global_vars.c b/tools/testing/selftests/bpf/progs/set_global_vars.c new file mode 100644 index 000000000000..ebaef28b2cb3 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/set_global_vars.c @@ -0,0 +1,106 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ +#include "bpf_experimental.h" +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" +#include <stdbool.h> + +char _license[] SEC("license") = "GPL"; + +typedef __s32 s32; +typedef s32 i32; +typedef __u8 u8; + +enum Enum { EA1 = 0, EA2 = 11, EA3 = 10 }; +enum Enumu64 {EB1 = 0llu, EB2 = 12llu }; +enum Enums64 { EC1 = 0ll, EC2 = 13ll }; + +const volatile __s64 var_s64 = -1; +const volatile __u64 var_u64 = 0; +const volatile i32 var_s32 = -1; +const volatile __u32 var_u32 = 0; +const volatile __s16 var_s16 = -1; +const volatile __u16 var_u16 = 0; +const volatile __s8 var_s8 = -1; +const volatile u8 var_u8 = 0; +const volatile enum Enum var_ea = EA1; +const volatile enum Enumu64 var_eb = EB1; +const volatile enum Enums64 var_ec = EC1; +const volatile bool var_b = false; +const volatile i32 arr[32]; +const volatile enum Enum enum_arr[32]; +const volatile i32 three_d[47][19][17]; +const volatile i32 *ptr_arr[32]; + +struct Struct { + int:16; + __u16 filler; + struct { + const __u16 filler2; + }; + struct Struct2 { + __u16 filler; + volatile struct { + const int:1; + union { + const volatile u8 var_u8[3]; + const volatile __s16 filler3; + const int:1; + s32 mat[7][5]; + } u; + }; + } struct2[2][4]; +}; + +const volatile __u32 stru = 0; /* same prefix as below */ +const volatile struct Struct struct1[3]; +const volatile struct Struct struct11[11][7]; + +struct Struct3 { + struct { + u8 var_u8_l; + }; + struct { + struct { + u8 var_u8_h; + }; + }; +}; + +typedef struct Struct3 Struct3_t; + +union Union { + __u16 var_u16; + Struct3_t struct3; +}; + +const volatile union Union union1 = {.var_u16 = -1}; + +SEC("socket") +int test_set_globals(void *ctx) +{ + volatile __s8 a; + + a = var_s64; + a = var_u64; + a = var_s32; + a = var_u32; + a = var_s16; + a = var_u16; + a = var_s8; + a = var_u8; + a = var_ea; + a = var_eb; + a = var_ec; + a = var_b; + a = struct1[2].struct2[1][2].u.var_u8[2]; + a = union1.var_u16; + a = arr[3]; + a = arr[EA2]; + a = enum_arr[EC2]; + a = three_d[31][7][EA2]; + a = struct1[2].struct2[1][2].u.mat[5][3]; + a = struct11[7][5].struct2[0][1].u.mat[3][0]; + + return a; +} diff --git a/tools/testing/selftests/bpf/progs/setget_sockopt.c b/tools/testing/selftests/bpf/progs/setget_sockopt.c index 6dd4318debbf..d330b1511979 100644 --- a/tools/testing/selftests/bpf/progs/setget_sockopt.c +++ b/tools/testing/selftests/bpf/progs/setget_sockopt.c @@ -61,6 +61,9 @@ static const struct sockopt_test sol_tcp_tests[] = { { .opt = TCP_NOTSENT_LOWAT, .new = 1314, .expected = 1314, }, { .opt = TCP_BPF_SOCK_OPS_CB_FLAGS, .new = BPF_SOCK_OPS_ALL_CB_FLAGS, .expected = BPF_SOCK_OPS_ALL_CB_FLAGS, }, + { .opt = TCP_BPF_DELACK_MAX, .new = 30000, .expected = 30000, }, + { .opt = TCP_BPF_RTO_MIN, .new = 30000, .expected = 30000, }, + { .opt = TCP_RTO_MAX_MS, .new = 2000, .expected = 2000, }, { .opt = 0, }, }; @@ -80,6 +83,14 @@ struct loop_ctx { struct sock *sk; }; +static bool sk_is_tcp(struct sock *sk) +{ + return (sk->__sk_common.skc_family == AF_INET || + sk->__sk_common.skc_family == AF_INET6) && + sk->sk_type == SOCK_STREAM && + sk->sk_protocol == IPPROTO_TCP; +} + static int bpf_test_sockopt_flip(void *ctx, struct sock *sk, const struct sockopt_test *t, int level) @@ -88,6 +99,9 @@ static int bpf_test_sockopt_flip(void *ctx, struct sock *sk, opt = t->opt; + if (opt == SO_TXREHASH && !sk_is_tcp(sk)) + return 0; + if (bpf_getsockopt(ctx, level, opt, &old, sizeof(old))) return 1; /* kernel initialized txrehash to 255 */ diff --git a/tools/testing/selftests/bpf/progs/sock_addr_kern.c b/tools/testing/selftests/bpf/progs/sock_addr_kern.c index 8386bb15ccdc..84ad515eafd6 100644 --- a/tools/testing/selftests/bpf/progs/sock_addr_kern.c +++ b/tools/testing/selftests/bpf/progs/sock_addr_kern.c @@ -2,7 +2,7 @@ /* Copyright (c) 2024 Google LLC */ #include <vmlinux.h> #include <bpf/bpf_helpers.h> -#include "../bpf_testmod/bpf_testmod_kfunc.h" +#include "../test_kmods/bpf_testmod_kfunc.h" SEC("syscall") int init_sock(struct init_sock_args *args) diff --git a/tools/testing/selftests/bpf/progs/sock_iter_batch.c b/tools/testing/selftests/bpf/progs/sock_iter_batch.c index 96531b0d9d55..77966ded5467 100644 --- a/tools/testing/selftests/bpf/progs/sock_iter_batch.c +++ b/tools/testing/selftests/bpf/progs/sock_iter_batch.c @@ -17,6 +17,13 @@ static bool ipv6_addr_loopback(const struct in6_addr *a) a->s6_addr32[2] | (a->s6_addr32[3] ^ bpf_htonl(1))) == 0; } +static bool ipv4_addr_loopback(__be32 a) +{ + return a == bpf_ntohl(0x7f000001); +} + +volatile const unsigned int sf; +volatile const unsigned int ss; volatile const __u16 ports[2]; unsigned int bucket[2]; @@ -26,22 +33,28 @@ int iter_tcp_soreuse(struct bpf_iter__tcp *ctx) struct sock *sk = (struct sock *)ctx->sk_common; struct inet_hashinfo *hinfo; unsigned int hash; + __u64 sock_cookie; struct net *net; int idx; if (!sk) return 0; + sock_cookie = bpf_get_socket_cookie(sk); sk = bpf_core_cast(sk, struct sock); - if (sk->sk_family != AF_INET6 || - sk->sk_state != TCP_LISTEN || - !ipv6_addr_loopback(&sk->sk_v6_rcv_saddr)) + if (sk->sk_family != sf || + (ss && sk->sk_state != ss) || + (sk->sk_family == AF_INET6 ? + !ipv6_addr_loopback(&sk->sk_v6_rcv_saddr) : + !ipv4_addr_loopback(sk->sk_rcv_saddr))) return 0; if (sk->sk_num == ports[0]) idx = 0; else if (sk->sk_num == ports[1]) idx = 1; + else if (!ports[0] && !ports[1]) + idx = 0; else return 0; @@ -52,6 +65,28 @@ int iter_tcp_soreuse(struct bpf_iter__tcp *ctx) hinfo = net->ipv4.tcp_death_row.hashinfo; bucket[idx] = hash & hinfo->lhash2_mask; bpf_seq_write(ctx->meta->seq, &idx, sizeof(idx)); + bpf_seq_write(ctx->meta->seq, &sock_cookie, sizeof(sock_cookie)); + + return 0; +} + +volatile const __u64 destroy_cookie; + +SEC("iter/tcp") +int iter_tcp_destroy(struct bpf_iter__tcp *ctx) +{ + struct sock_common *sk_common = (struct sock_common *)ctx->sk_common; + __u64 sock_cookie; + + if (!sk_common) + return 0; + + sock_cookie = bpf_get_socket_cookie(sk_common); + if (sock_cookie != destroy_cookie) + return 0; + + bpf_sock_destroy(sk_common); + bpf_seq_write(ctx->meta->seq, &sock_cookie, sizeof(sock_cookie)); return 0; } @@ -63,20 +98,26 @@ int iter_udp_soreuse(struct bpf_iter__udp *ctx) { struct sock *sk = (struct sock *)ctx->udp_sk; struct udp_table *udptable; + __u64 sock_cookie; int idx; if (!sk) return 0; + sock_cookie = bpf_get_socket_cookie(sk); sk = bpf_core_cast(sk, struct sock); - if (sk->sk_family != AF_INET6 || - !ipv6_addr_loopback(&sk->sk_v6_rcv_saddr)) + if (sk->sk_family != sf || + (sk->sk_family == AF_INET6 ? + !ipv6_addr_loopback(&sk->sk_v6_rcv_saddr) : + !ipv4_addr_loopback(sk->sk_rcv_saddr))) return 0; if (sk->sk_num == ports[0]) idx = 0; else if (sk->sk_num == ports[1]) idx = 1; + else if (!ports[0] && !ports[1]) + idx = 0; else return 0; @@ -84,6 +125,7 @@ int iter_udp_soreuse(struct bpf_iter__udp *ctx) udptable = sk->sk_net.net->ipv4.udp_table; bucket[idx] = udp_sk(sk)->udp_portaddr_hash & udptable->mask; bpf_seq_write(ctx->meta->seq, &idx, sizeof(idx)); + bpf_seq_write(ctx->meta->seq, &sock_cookie, sizeof(sock_cookie)); return 0; } diff --git a/tools/testing/selftests/bpf/progs/stream.c b/tools/testing/selftests/bpf/progs/stream.c new file mode 100644 index 000000000000..35790897dc87 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/stream.c @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" +#include "bpf_experimental.h" + +struct arr_elem { + struct bpf_res_spin_lock lock; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, struct arr_elem); +} arrmap SEC(".maps"); + +#define ENOSPC 28 +#define _STR "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + +int size; + +SEC("syscall") +__success __retval(0) +int stream_exhaust(void *ctx) +{ + /* Use global variable for loop convergence. */ + size = 0; + bpf_repeat(BPF_MAX_LOOPS) { + if (bpf_stream_printk(BPF_STDOUT, _STR) == -ENOSPC && size == 99954) + return 0; + size += sizeof(_STR) - 1; + } + return 1; +} + +SEC("syscall") +__success __retval(0) +int stream_cond_break(void *ctx) +{ + while (can_loop) + ; + return 0; +} + +SEC("syscall") +__success __retval(0) +int stream_deadlock(void *ctx) +{ + struct bpf_res_spin_lock *lock, *nlock; + + lock = bpf_map_lookup_elem(&arrmap, &(int){0}); + if (!lock) + return 1; + nlock = bpf_map_lookup_elem(&arrmap, &(int){0}); + if (!nlock) + return 1; + if (bpf_res_spin_lock(lock)) + return 1; + if (bpf_res_spin_lock(nlock)) { + bpf_res_spin_unlock(lock); + return 0; + } + bpf_res_spin_unlock(nlock); + bpf_res_spin_unlock(lock); + return 1; +} + +SEC("syscall") +__success __retval(0) +int stream_syscall(void *ctx) +{ + bpf_stream_printk(BPF_STDOUT, "foo"); + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/stream_fail.c b/tools/testing/selftests/bpf/progs/stream_fail.c new file mode 100644 index 000000000000..b4a0d0cc8ec8 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/stream_fail.c @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> +#include "bpf_misc.h" + +SEC("syscall") +__failure __msg("Possibly NULL pointer passed") +int stream_vprintk_null_arg(void *ctx) +{ + bpf_stream_vprintk(BPF_STDOUT, "", NULL, 0, NULL); + return 0; +} + +SEC("syscall") +__failure __msg("R3 type=scalar expected=") +int stream_vprintk_scalar_arg(void *ctx) +{ + bpf_stream_vprintk(BPF_STDOUT, "", (void *)46, 0, NULL); + return 0; +} + +SEC("syscall") +__failure __msg("arg#1 doesn't point to a const string") +int stream_vprintk_string_arg(void *ctx) +{ + bpf_stream_vprintk(BPF_STDOUT, ctx, NULL, 0, NULL); + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/string_kfuncs_failure1.c b/tools/testing/selftests/bpf/progs/string_kfuncs_failure1.c new file mode 100644 index 000000000000..53af438bd998 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/string_kfuncs_failure1.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2025 Red Hat, Inc.*/ +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <linux/limits.h> +#include "bpf_misc.h" +#include "errno.h" + +char *user_ptr = (char *)1; +char *invalid_kern_ptr = (char *)-1; + +/* + * When passing userspace pointers, the error code differs based on arch: + * -ERANGE on arches with non-overlapping address spaces + * -EFAULT on other arches + */ +#if defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_loongarch) || \ + defined(__TARGET_ARCH_powerpc) || defined(__TARGET_ARCH_x86) +#define USER_PTR_ERR -ERANGE +#else +#define USER_PTR_ERR -EFAULT +#endif + +/* + * On s390, __get_kernel_nofault (used in string kfuncs) returns 0 for NULL and + * user_ptr (instead of causing an exception) so the below two groups of tests + * are not applicable. + */ +#ifndef __TARGET_ARCH_s390 + +/* Passing NULL to string kfuncs (treated as a userspace ptr) */ +SEC("syscall") __retval(USER_PTR_ERR) int test_strcmp_null1(void *ctx) { return bpf_strcmp(NULL, "hello"); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strcmp_null2(void *ctx) { return bpf_strcmp("hello", NULL); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strchr_null(void *ctx) { return bpf_strchr(NULL, 'a'); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strchrnul_null(void *ctx) { return bpf_strchrnul(NULL, 'a'); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strnchr_null(void *ctx) { return bpf_strnchr(NULL, 1, 'a'); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strrchr_null(void *ctx) { return bpf_strrchr(NULL, 'a'); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strlen_null(void *ctx) { return bpf_strlen(NULL); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strnlen_null(void *ctx) { return bpf_strnlen(NULL, 1); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strspn_null1(void *ctx) { return bpf_strspn(NULL, "hello"); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strspn_null2(void *ctx) { return bpf_strspn("hello", NULL); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strcspn_null1(void *ctx) { return bpf_strcspn(NULL, "hello"); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strcspn_null2(void *ctx) { return bpf_strcspn("hello", NULL); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strstr_null1(void *ctx) { return bpf_strstr(NULL, "hello"); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strstr_null2(void *ctx) { return bpf_strstr("hello", NULL); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strnstr_null1(void *ctx) { return bpf_strnstr(NULL, "hello", 1); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strnstr_null2(void *ctx) { return bpf_strnstr("hello", NULL, 1); } + +/* Passing userspace ptr to string kfuncs */ +SEC("syscall") __retval(USER_PTR_ERR) int test_strcmp_user_ptr1(void *ctx) { return bpf_strcmp(user_ptr, "hello"); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strcmp_user_ptr2(void *ctx) { return bpf_strcmp("hello", user_ptr); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strchr_user_ptr(void *ctx) { return bpf_strchr(user_ptr, 'a'); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strchrnul_user_ptr(void *ctx) { return bpf_strchrnul(user_ptr, 'a'); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strnchr_user_ptr(void *ctx) { return bpf_strnchr(user_ptr, 1, 'a'); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strrchr_user_ptr(void *ctx) { return bpf_strrchr(user_ptr, 'a'); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strlen_user_ptr(void *ctx) { return bpf_strlen(user_ptr); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strnlen_user_ptr(void *ctx) { return bpf_strnlen(user_ptr, 1); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strspn_user_ptr1(void *ctx) { return bpf_strspn(user_ptr, "hello"); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strspn_user_ptr2(void *ctx) { return bpf_strspn("hello", user_ptr); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strcspn_user_ptr1(void *ctx) { return bpf_strcspn(user_ptr, "hello"); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strcspn_user_ptr2(void *ctx) { return bpf_strcspn("hello", user_ptr); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strstr_user_ptr1(void *ctx) { return bpf_strstr(user_ptr, "hello"); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strstr_user_ptr2(void *ctx) { return bpf_strstr("hello", user_ptr); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strnstr_user_ptr1(void *ctx) { return bpf_strnstr(user_ptr, "hello", 1); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strnstr_user_ptr2(void *ctx) { return bpf_strnstr("hello", user_ptr, 1); } + +#endif /* __TARGET_ARCH_s390 */ + +/* Passing invalid kernel ptr to string kfuncs should always return -EFAULT */ +SEC("syscall") __retval(-EFAULT) int test_strcmp_pagefault1(void *ctx) { return bpf_strcmp(invalid_kern_ptr, "hello"); } +SEC("syscall") __retval(-EFAULT) int test_strcmp_pagefault2(void *ctx) { return bpf_strcmp("hello", invalid_kern_ptr); } +SEC("syscall") __retval(-EFAULT) int test_strchr_pagefault(void *ctx) { return bpf_strchr(invalid_kern_ptr, 'a'); } +SEC("syscall") __retval(-EFAULT) int test_strchrnul_pagefault(void *ctx) { return bpf_strchrnul(invalid_kern_ptr, 'a'); } +SEC("syscall") __retval(-EFAULT) int test_strnchr_pagefault(void *ctx) { return bpf_strnchr(invalid_kern_ptr, 1, 'a'); } +SEC("syscall") __retval(-EFAULT) int test_strrchr_pagefault(void *ctx) { return bpf_strrchr(invalid_kern_ptr, 'a'); } +SEC("syscall") __retval(-EFAULT) int test_strlen_pagefault(void *ctx) { return bpf_strlen(invalid_kern_ptr); } +SEC("syscall") __retval(-EFAULT) int test_strnlen_pagefault(void *ctx) { return bpf_strnlen(invalid_kern_ptr, 1); } +SEC("syscall") __retval(-EFAULT) int test_strspn_pagefault1(void *ctx) { return bpf_strspn(invalid_kern_ptr, "hello"); } +SEC("syscall") __retval(-EFAULT) int test_strspn_pagefault2(void *ctx) { return bpf_strspn("hello", invalid_kern_ptr); } +SEC("syscall") __retval(-EFAULT) int test_strcspn_pagefault1(void *ctx) { return bpf_strcspn(invalid_kern_ptr, "hello"); } +SEC("syscall") __retval(-EFAULT) int test_strcspn_pagefault2(void *ctx) { return bpf_strcspn("hello", invalid_kern_ptr); } +SEC("syscall") __retval(-EFAULT) int test_strstr_pagefault1(void *ctx) { return bpf_strstr(invalid_kern_ptr, "hello"); } +SEC("syscall") __retval(-EFAULT) int test_strstr_pagefault2(void *ctx) { return bpf_strstr("hello", invalid_kern_ptr); } +SEC("syscall") __retval(-EFAULT) int test_strnstr_pagefault1(void *ctx) { return bpf_strnstr(invalid_kern_ptr, "hello", 1); } +SEC("syscall") __retval(-EFAULT) int test_strnstr_pagefault2(void *ctx) { return bpf_strnstr("hello", invalid_kern_ptr, 1); } + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/string_kfuncs_failure2.c b/tools/testing/selftests/bpf/progs/string_kfuncs_failure2.c new file mode 100644 index 000000000000..89fb4669b0e9 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/string_kfuncs_failure2.c @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2025 Red Hat, Inc.*/ +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <linux/limits.h> + +char long_str[XATTR_SIZE_MAX + 1]; + +SEC("syscall") int test_strcmp_too_long(void *ctx) { return bpf_strcmp(long_str, long_str); } +SEC("syscall") int test_strchr_too_long(void *ctx) { return bpf_strchr(long_str, 'b'); } +SEC("syscall") int test_strchrnul_too_long(void *ctx) { return bpf_strchrnul(long_str, 'b'); } +SEC("syscall") int test_strnchr_too_long(void *ctx) { return bpf_strnchr(long_str, sizeof(long_str), 'b'); } +SEC("syscall") int test_strrchr_too_long(void *ctx) { return bpf_strrchr(long_str, 'b'); } +SEC("syscall") int test_strlen_too_long(void *ctx) { return bpf_strlen(long_str); } +SEC("syscall") int test_strnlen_too_long(void *ctx) { return bpf_strnlen(long_str, sizeof(long_str)); } +SEC("syscall") int test_strspn_str_too_long(void *ctx) { return bpf_strspn(long_str, "a"); } +SEC("syscall") int test_strspn_accept_too_long(void *ctx) { return bpf_strspn("b", long_str); } +SEC("syscall") int test_strcspn_str_too_long(void *ctx) { return bpf_strcspn(long_str, "b"); } +SEC("syscall") int test_strcspn_reject_too_long(void *ctx) { return bpf_strcspn("b", long_str); } +SEC("syscall") int test_strstr_too_long(void *ctx) { return bpf_strstr(long_str, "hello"); } +SEC("syscall") int test_strnstr_too_long(void *ctx) { return bpf_strnstr(long_str, "hello", sizeof(long_str)); } + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/string_kfuncs_success.c b/tools/testing/selftests/bpf/progs/string_kfuncs_success.c new file mode 100644 index 000000000000..46697f381878 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/string_kfuncs_success.c @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2025 Red Hat, Inc.*/ +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" +#include "errno.h" + +char str[] = "hello world"; + +#define __test(retval) SEC("syscall") __success __retval(retval) + +/* Functional tests */ +__test(0) int test_strcmp_eq(void *ctx) { return bpf_strcmp(str, "hello world"); } +__test(1) int test_strcmp_neq(void *ctx) { return bpf_strcmp(str, "hello"); } +__test(1) int test_strchr_found(void *ctx) { return bpf_strchr(str, 'e'); } +__test(11) int test_strchr_null(void *ctx) { return bpf_strchr(str, '\0'); } +__test(-ENOENT) int test_strchr_notfound(void *ctx) { return bpf_strchr(str, 'x'); } +__test(1) int test_strchrnul_found(void *ctx) { return bpf_strchrnul(str, 'e'); } +__test(11) int test_strchrnul_notfound(void *ctx) { return bpf_strchrnul(str, 'x'); } +__test(1) int test_strnchr_found(void *ctx) { return bpf_strnchr(str, 5, 'e'); } +__test(11) int test_strnchr_null(void *ctx) { return bpf_strnchr(str, 12, '\0'); } +__test(-ENOENT) int test_strnchr_notfound(void *ctx) { return bpf_strnchr(str, 5, 'w'); } +__test(9) int test_strrchr_found(void *ctx) { return bpf_strrchr(str, 'l'); } +__test(11) int test_strrchr_null(void *ctx) { return bpf_strrchr(str, '\0'); } +__test(-ENOENT) int test_strrchr_notfound(void *ctx) { return bpf_strrchr(str, 'x'); } +__test(11) int test_strlen(void *ctx) { return bpf_strlen(str); } +__test(11) int test_strnlen(void *ctx) { return bpf_strnlen(str, 12); } +__test(5) int test_strspn(void *ctx) { return bpf_strspn(str, "ehlo"); } +__test(2) int test_strcspn(void *ctx) { return bpf_strcspn(str, "lo"); } +__test(6) int test_strstr_found(void *ctx) { return bpf_strstr(str, "world"); } +__test(-ENOENT) int test_strstr_notfound(void *ctx) { return bpf_strstr(str, "hi"); } +__test(0) int test_strstr_empty(void *ctx) { return bpf_strstr(str, ""); } +__test(0) int test_strnstr_found(void *ctx) { return bpf_strnstr(str, "hello", 6); } +__test(-ENOENT) int test_strnstr_notfound(void *ctx) { return bpf_strnstr(str, "hi", 10); } +__test(0) int test_strnstr_empty(void *ctx) { return bpf_strnstr(str, "", 1); } + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/strncmp_bench.c b/tools/testing/selftests/bpf/progs/strncmp_bench.c index 18373a7df76e..f47bf88f8d2a 100644 --- a/tools/testing/selftests/bpf/progs/strncmp_bench.c +++ b/tools/testing/selftests/bpf/progs/strncmp_bench.c @@ -35,7 +35,10 @@ static __always_inline int local_strncmp(const char *s1, unsigned int sz, SEC("tp/syscalls/sys_enter_getpgid") int strncmp_no_helper(void *ctx) { - if (local_strncmp(str, cmp_str_len + 1, target) < 0) + const char *target_str = target; + + barrier_var(target_str); + if (local_strncmp(str, cmp_str_len + 1, target_str) < 0) __sync_add_and_fetch(&hits, 1); return 0; } diff --git a/tools/testing/selftests/bpf/progs/struct_ops_detach.c b/tools/testing/selftests/bpf/progs/struct_ops_detach.c index d7fdcabe7d90..284a5b008e0c 100644 --- a/tools/testing/selftests/bpf/progs/struct_ops_detach.c +++ b/tools/testing/selftests/bpf/progs/struct_ops_detach.c @@ -2,7 +2,7 @@ /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ #include <vmlinux.h> #include <bpf/bpf_helpers.h> -#include "../bpf_testmod/bpf_testmod.h" +#include "../test_kmods/bpf_testmod.h" char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/struct_ops_forgotten_cb.c b/tools/testing/selftests/bpf/progs/struct_ops_forgotten_cb.c index 3c822103bd40..d8cc99f5c2e2 100644 --- a/tools/testing/selftests/bpf/progs/struct_ops_forgotten_cb.c +++ b/tools/testing/selftests/bpf/progs/struct_ops_forgotten_cb.c @@ -2,7 +2,7 @@ /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ #include <vmlinux.h> #include <bpf/bpf_tracing.h> -#include "../bpf_testmod/bpf_testmod.h" +#include "../test_kmods/bpf_testmod.h" char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/struct_ops_kptr_return.c b/tools/testing/selftests/bpf/progs/struct_ops_kptr_return.c new file mode 100644 index 000000000000..36386b3c23a1 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/struct_ops_kptr_return.c @@ -0,0 +1,30 @@ +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include "../test_kmods/bpf_testmod.h" +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + +void bpf_task_release(struct task_struct *p) __ksym; + +/* This test struct_ops BPF programs returning referenced kptr. The verifier should + * allow a referenced kptr or a NULL pointer to be returned. A referenced kptr to task + * here is acquried automatically as the task argument is tagged with "__ref". + */ +SEC("struct_ops/test_return_ref_kptr") +struct task_struct *BPF_PROG(kptr_return, int dummy, + struct task_struct *task, struct cgroup *cgrp) +{ + if (dummy % 2) { + bpf_task_release(task); + return NULL; + } + return task; +} + +SEC(".struct_ops.link") +struct bpf_testmod_ops testmod_kptr_return = { + .test_return_ref_kptr = (void *)kptr_return, +}; + + diff --git a/tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__invalid_scalar.c b/tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__invalid_scalar.c new file mode 100644 index 000000000000..caeea158ef69 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__invalid_scalar.c @@ -0,0 +1,26 @@ +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include "../test_kmods/bpf_testmod.h" +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + +struct cgroup *bpf_cgroup_acquire(struct cgroup *p) __ksym; +void bpf_task_release(struct task_struct *p) __ksym; + +/* This test struct_ops BPF programs returning referenced kptr. The verifier should + * reject programs returning a non-zero scalar value. + */ +SEC("struct_ops/test_return_ref_kptr") +__failure __msg("At program exit the register R0 has smin=1 smax=1 should have been in [0, 0]") +struct task_struct *BPF_PROG(kptr_return_fail__invalid_scalar, int dummy, + struct task_struct *task, struct cgroup *cgrp) +{ + bpf_task_release(task); + return (struct task_struct *)1; +} + +SEC(".struct_ops.link") +struct bpf_testmod_ops testmod_kptr_return = { + .test_return_ref_kptr = (void *)kptr_return_fail__invalid_scalar, +}; diff --git a/tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__local_kptr.c b/tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__local_kptr.c new file mode 100644 index 000000000000..b8b4f05c3d7f --- /dev/null +++ b/tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__local_kptr.c @@ -0,0 +1,34 @@ +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include "../test_kmods/bpf_testmod.h" +#include "bpf_experimental.h" +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + +struct cgroup *bpf_cgroup_acquire(struct cgroup *p) __ksym; +void bpf_task_release(struct task_struct *p) __ksym; + +/* This test struct_ops BPF programs returning referenced kptr. The verifier should + * reject programs returning a local kptr. + */ +SEC("struct_ops/test_return_ref_kptr") +__failure __msg("At program exit the register R0 is not a known value (ptr_or_null_)") +struct task_struct *BPF_PROG(kptr_return_fail__local_kptr, int dummy, + struct task_struct *task, struct cgroup *cgrp) +{ + struct task_struct *t; + + bpf_task_release(task); + + t = bpf_obj_new(typeof(*task)); + if (!t) + return NULL; + + return t; +} + +SEC(".struct_ops.link") +struct bpf_testmod_ops testmod_kptr_return = { + .test_return_ref_kptr = (void *)kptr_return_fail__local_kptr, +}; diff --git a/tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__nonzero_offset.c b/tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__nonzero_offset.c new file mode 100644 index 000000000000..7ddeb28c2329 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__nonzero_offset.c @@ -0,0 +1,25 @@ +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include "../test_kmods/bpf_testmod.h" +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + +struct cgroup *bpf_cgroup_acquire(struct cgroup *p) __ksym; +void bpf_task_release(struct task_struct *p) __ksym; + +/* This test struct_ops BPF programs returning referenced kptr. The verifier should + * reject programs returning a modified referenced kptr. + */ +SEC("struct_ops/test_return_ref_kptr") +__failure __msg("dereference of modified trusted_ptr_ ptr R0 off={{[0-9]+}} disallowed") +struct task_struct *BPF_PROG(kptr_return_fail__nonzero_offset, int dummy, + struct task_struct *task, struct cgroup *cgrp) +{ + return (struct task_struct *)&task->jobctl; +} + +SEC(".struct_ops.link") +struct bpf_testmod_ops testmod_kptr_return = { + .test_return_ref_kptr = (void *)kptr_return_fail__nonzero_offset, +}; diff --git a/tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__wrong_type.c b/tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__wrong_type.c new file mode 100644 index 000000000000..6a2dd5367802 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__wrong_type.c @@ -0,0 +1,30 @@ +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include "../test_kmods/bpf_testmod.h" +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + +struct cgroup *bpf_cgroup_acquire(struct cgroup *p) __ksym; +void bpf_task_release(struct task_struct *p) __ksym; + +/* This test struct_ops BPF programs returning referenced kptr. The verifier should + * reject programs returning a referenced kptr of the wrong type. + */ +SEC("struct_ops/test_return_ref_kptr") +__failure __msg("At program exit the register R0 is not a known value (ptr_or_null_)") +struct task_struct *BPF_PROG(kptr_return_fail__wrong_type, int dummy, + struct task_struct *task, struct cgroup *cgrp) +{ + struct task_struct *ret; + + ret = (struct task_struct *)bpf_cgroup_acquire(cgrp); + bpf_task_release(task); + + return ret; +} + +SEC(".struct_ops.link") +struct bpf_testmod_ops testmod_kptr_return = { + .test_return_ref_kptr = (void *)kptr_return_fail__wrong_type, +}; diff --git a/tools/testing/selftests/bpf/progs/struct_ops_maybe_null.c b/tools/testing/selftests/bpf/progs/struct_ops_maybe_null.c index b450f72e744a..ccab3935aa42 100644 --- a/tools/testing/selftests/bpf/progs/struct_ops_maybe_null.c +++ b/tools/testing/selftests/bpf/progs/struct_ops_maybe_null.c @@ -2,7 +2,7 @@ /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ #include <vmlinux.h> #include <bpf/bpf_tracing.h> -#include "../bpf_testmod/bpf_testmod.h" +#include "../test_kmods/bpf_testmod.h" char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/struct_ops_maybe_null_fail.c b/tools/testing/selftests/bpf/progs/struct_ops_maybe_null_fail.c index 6283099ec383..8b5515f4f724 100644 --- a/tools/testing/selftests/bpf/progs/struct_ops_maybe_null_fail.c +++ b/tools/testing/selftests/bpf/progs/struct_ops_maybe_null_fail.c @@ -2,7 +2,7 @@ /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ #include <vmlinux.h> #include <bpf/bpf_tracing.h> -#include "../bpf_testmod/bpf_testmod.h" +#include "../test_kmods/bpf_testmod.h" char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/struct_ops_module.c b/tools/testing/selftests/bpf/progs/struct_ops_module.c index 4c56d4a9d9f4..71c420c3a5a6 100644 --- a/tools/testing/selftests/bpf/progs/struct_ops_module.c +++ b/tools/testing/selftests/bpf/progs/struct_ops_module.c @@ -3,7 +3,7 @@ #include <vmlinux.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> -#include "../bpf_testmod/bpf_testmod.h" +#include "../test_kmods/bpf_testmod.h" char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/struct_ops_multi_pages.c b/tools/testing/selftests/bpf/progs/struct_ops_multi_pages.c index 9efcc6e4d356..5b23ea817f1f 100644 --- a/tools/testing/selftests/bpf/progs/struct_ops_multi_pages.c +++ b/tools/testing/selftests/bpf/progs/struct_ops_multi_pages.c @@ -3,7 +3,7 @@ #include <vmlinux.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> -#include "../bpf_testmod/bpf_testmod.h" +#include "../test_kmods/bpf_testmod.h" char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/struct_ops_nulled_out_cb.c b/tools/testing/selftests/bpf/progs/struct_ops_nulled_out_cb.c index fa2021388485..5d0937fa07be 100644 --- a/tools/testing/selftests/bpf/progs/struct_ops_nulled_out_cb.c +++ b/tools/testing/selftests/bpf/progs/struct_ops_nulled_out_cb.c @@ -2,7 +2,7 @@ /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ #include <vmlinux.h> #include <bpf/bpf_tracing.h> -#include "../bpf_testmod/bpf_testmod.h" +#include "../test_kmods/bpf_testmod.h" char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/struct_ops_private_stack.c b/tools/testing/selftests/bpf/progs/struct_ops_private_stack.c index 8ea57e5348ab..dbe646013811 100644 --- a/tools/testing/selftests/bpf/progs/struct_ops_private_stack.c +++ b/tools/testing/selftests/bpf/progs/struct_ops_private_stack.c @@ -3,11 +3,11 @@ #include <vmlinux.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> -#include "../bpf_testmod/bpf_testmod.h" +#include "../test_kmods/bpf_testmod.h" char _license[] SEC("license") = "GPL"; -#if defined(__TARGET_ARCH_x86) +#if defined(__TARGET_ARCH_x86) || defined(__TARGET_ARCH_arm64) bool skip __attribute((__section__(".data"))) = false; #else bool skip = true; diff --git a/tools/testing/selftests/bpf/progs/struct_ops_private_stack_fail.c b/tools/testing/selftests/bpf/progs/struct_ops_private_stack_fail.c index 1f55ec4cee37..3d89ad7cbe2a 100644 --- a/tools/testing/selftests/bpf/progs/struct_ops_private_stack_fail.c +++ b/tools/testing/selftests/bpf/progs/struct_ops_private_stack_fail.c @@ -3,11 +3,11 @@ #include <vmlinux.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> -#include "../bpf_testmod/bpf_testmod.h" +#include "../test_kmods/bpf_testmod.h" char _license[] SEC("license") = "GPL"; -#if defined(__TARGET_ARCH_x86) +#if defined(__TARGET_ARCH_x86) || defined(__TARGET_ARCH_arm64) bool skip __attribute((__section__(".data"))) = false; #else bool skip = true; diff --git a/tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c b/tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c index f2f300d50988..b1f6d7e5a8e5 100644 --- a/tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c +++ b/tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c @@ -3,11 +3,11 @@ #include <vmlinux.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> -#include "../bpf_testmod/bpf_testmod.h" +#include "../test_kmods/bpf_testmod.h" char _license[] SEC("license") = "GPL"; -#if defined(__TARGET_ARCH_x86) +#if defined(__TARGET_ARCH_x86) || defined(__TARGET_ARCH_arm64) bool skip __attribute((__section__(".data"))) = false; #else bool skip = true; diff --git a/tools/testing/selftests/bpf/progs/struct_ops_refcounted.c b/tools/testing/selftests/bpf/progs/struct_ops_refcounted.c new file mode 100644 index 000000000000..76dcb6089d7f --- /dev/null +++ b/tools/testing/selftests/bpf/progs/struct_ops_refcounted.c @@ -0,0 +1,31 @@ +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include "../test_kmods/bpf_testmod.h" +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + +__attribute__((nomerge)) extern void bpf_task_release(struct task_struct *p) __ksym; + +/* This is a test BPF program that uses struct_ops to access a referenced + * kptr argument. This is a test for the verifier to ensure that it + * 1) recongnizes the task as a referenced object (i.e., ref_obj_id > 0), and + * 2) the same reference can be acquired from multiple paths as long as it + * has not been released. + */ +SEC("struct_ops/test_refcounted") +int BPF_PROG(refcounted, int dummy, struct task_struct *task) +{ + if (dummy == 1) + bpf_task_release(task); + else + bpf_task_release(task); + return 0; +} + +SEC(".struct_ops.link") +struct bpf_testmod_ops testmod_refcounted = { + .test_refcounted = (void *)refcounted, +}; + + diff --git a/tools/testing/selftests/bpf/progs/struct_ops_refcounted_fail__global_subprog.c b/tools/testing/selftests/bpf/progs/struct_ops_refcounted_fail__global_subprog.c new file mode 100644 index 000000000000..ae074aa62852 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/struct_ops_refcounted_fail__global_subprog.c @@ -0,0 +1,39 @@ +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include "../test_kmods/bpf_testmod.h" +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + +extern void bpf_task_release(struct task_struct *p) __ksym; + +__noinline int subprog_release(__u64 *ctx __arg_ctx) +{ + struct task_struct *task = (struct task_struct *)ctx[1]; + int dummy = (int)ctx[0]; + + bpf_task_release(task); + + return dummy + 1; +} + +/* Test that the verifier rejects a program that contains a global + * subprogram with referenced kptr arguments + */ +SEC("struct_ops/test_refcounted") +__failure __log_level(2) +__msg("Validating subprog_release() func#1...") +__msg("invalid bpf_context access off=8. Reference may already be released") +int refcounted_fail__global_subprog(unsigned long long *ctx) +{ + struct task_struct *task = (struct task_struct *)ctx[1]; + + bpf_task_release(task); + + return subprog_release(ctx); +} + +SEC(".struct_ops.link") +struct bpf_testmod_ops testmod_ref_acquire = { + .test_refcounted = (void *)refcounted_fail__global_subprog, +}; diff --git a/tools/testing/selftests/bpf/progs/struct_ops_refcounted_fail__ref_leak.c b/tools/testing/selftests/bpf/progs/struct_ops_refcounted_fail__ref_leak.c new file mode 100644 index 000000000000..e945b1a04294 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/struct_ops_refcounted_fail__ref_leak.c @@ -0,0 +1,22 @@ +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include "../test_kmods/bpf_testmod.h" +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + +/* Test that the verifier rejects a program that acquires a referenced + * kptr through context without releasing the reference + */ +SEC("struct_ops/test_refcounted") +__failure __msg("Unreleased reference id=1 alloc_insn=0") +int BPF_PROG(refcounted_fail__ref_leak, int dummy, + struct task_struct *task) +{ + return 0; +} + +SEC(".struct_ops.link") +struct bpf_testmod_ops testmod_ref_acquire = { + .test_refcounted = (void *)refcounted_fail__ref_leak, +}; diff --git a/tools/testing/selftests/bpf/progs/struct_ops_refcounted_fail__tail_call.c b/tools/testing/selftests/bpf/progs/struct_ops_refcounted_fail__tail_call.c new file mode 100644 index 000000000000..3b125025a1f2 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/struct_ops_refcounted_fail__tail_call.c @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include "../test_kmods/bpf_testmod.h" +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 1); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(__u32)); +} prog_array SEC(".maps"); + +/* Test that the verifier rejects a program with referenced kptr arguments + * that tail call + */ +SEC("struct_ops/test_refcounted") +__failure __msg("program with __ref argument cannot tail call") +int refcounted_fail__tail_call(unsigned long long *ctx) +{ + struct task_struct *task = (struct task_struct *)ctx[1]; + + bpf_task_release(task); + bpf_tail_call(ctx, &prog_array, 0); + + return 0; +} + +SEC(".struct_ops.link") +struct bpf_testmod_ops testmod_ref_acquire = { + .test_refcounted = (void *)refcounted_fail__tail_call, +}; + diff --git a/tools/testing/selftests/bpf/progs/summarization.c b/tools/testing/selftests/bpf/progs/summarization.c new file mode 100644 index 000000000000..f89effe82c9e --- /dev/null +++ b/tools/testing/selftests/bpf/progs/summarization.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +__noinline +long changes_pkt_data(struct __sk_buff *sk) +{ + return bpf_skb_pull_data(sk, 0); +} + +__noinline __weak +long does_not_change_pkt_data(struct __sk_buff *sk) +{ + return 0; +} + +SEC("?tc") +int main_changes_with_subprogs(struct __sk_buff *sk) +{ + changes_pkt_data(sk); + does_not_change_pkt_data(sk); + return 0; +} + +SEC("?tc") +int main_changes(struct __sk_buff *sk) +{ + bpf_skb_pull_data(sk, 0); + return 0; +} + +SEC("?tc") +int main_does_not_change(struct __sk_buff *sk) +{ + return 0; +} + +__noinline +long might_sleep(struct pt_regs *ctx __arg_ctx) +{ + int i; + + bpf_copy_from_user(&i, sizeof(i), NULL); + return i; +} + +__noinline __weak +long does_not_sleep(struct pt_regs *ctx __arg_ctx) +{ + return 0; +} + +SEC("?uprobe.s") +int main_might_sleep_with_subprogs(struct pt_regs *ctx) +{ + might_sleep(ctx); + does_not_sleep(ctx); + return 0; +} + +SEC("?uprobe.s") +int main_might_sleep(struct pt_regs *ctx) +{ + int i; + + bpf_copy_from_user(&i, sizeof(i), NULL); + return i; +} + +SEC("?uprobe.s") +int main_does_not_sleep(struct pt_regs *ctx) +{ + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/summarization_freplace.c b/tools/testing/selftests/bpf/progs/summarization_freplace.c new file mode 100644 index 000000000000..935f00e0e9ea --- /dev/null +++ b/tools/testing/selftests/bpf/progs/summarization_freplace.c @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> + +SEC("?freplace") +long changes_pkt_data(struct __sk_buff *sk) +{ + return bpf_skb_pull_data(sk, 0); +} + +SEC("?freplace") +long does_not_change_pkt_data(struct __sk_buff *sk) +{ + return 0; +} + +SEC("?freplace") +long might_sleep(struct pt_regs *ctx) +{ + int i; + + bpf_copy_from_user(&i, sizeof(i), NULL); + return i; +} + +SEC("?freplace") +long does_not_sleep(struct pt_regs *ctx) +{ + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/syscall.c b/tools/testing/selftests/bpf/progs/syscall.c index 0f4dfb770c32..b698cc62a371 100644 --- a/tools/testing/selftests/bpf/progs/syscall.c +++ b/tools/testing/selftests/bpf/progs/syscall.c @@ -76,9 +76,9 @@ static int btf_load(void) .magic = BTF_MAGIC, .version = BTF_VERSION, .hdr_len = sizeof(struct btf_header), - .type_len = sizeof(__u32) * 8, - .str_off = sizeof(__u32) * 8, - .str_len = sizeof(__u32), + .type_len = sizeof(raw_btf.types), + .str_off = offsetof(struct btf_blob, str) - offsetof(struct btf_blob, types), + .str_len = sizeof(raw_btf.str), }, .types = { /* long */ diff --git a/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c b/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c index d1a57f7d09bd..fe6249d99b31 100644 --- a/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c +++ b/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c @@ -11,6 +11,8 @@ int subprog_tc(struct __sk_buff *skb) __sink(skb); __sink(ret); + /* let verifier know that 'subprog_tc' can change pointers to skb->data */ + bpf_skb_change_proto(skb, 0, 0); return ret; } diff --git a/tools/testing/selftests/bpf/progs/test_btf_ext.c b/tools/testing/selftests/bpf/progs/test_btf_ext.c new file mode 100644 index 000000000000..cdf20331db04 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_btf_ext.c @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2025 Meta Platforms Inc. */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + +__noinline static void f0(void) +{ + __u64 a = 1; + + __sink(a); +} + +SEC("xdp") +__u64 global_func(struct xdp_md *xdp) +{ + f0(); + return XDP_DROP; +} diff --git a/tools/testing/selftests/bpf/progs/test_cgroup1_hierarchy.c b/tools/testing/selftests/bpf/progs/test_cgroup1_hierarchy.c index 44628865fe1d..4fee0fdc7607 100644 --- a/tools/testing/selftests/bpf/progs/test_cgroup1_hierarchy.c +++ b/tools/testing/selftests/bpf/progs/test_cgroup1_hierarchy.c @@ -51,13 +51,13 @@ out: } SEC("lsm/bpf") -int BPF_PROG(lsm_run, int cmd, union bpf_attr *attr, unsigned int size) +int BPF_PROG(lsm_run, int cmd, union bpf_attr *attr, unsigned int size, bool kernel) { return bpf_link_create_verify(cmd); } SEC("lsm.s/bpf") -int BPF_PROG(lsm_s_run, int cmd, union bpf_attr *attr, unsigned int size) +int BPF_PROG(lsm_s_run, int cmd, union bpf_attr *attr, unsigned int size, bool kernel) { return bpf_link_create_verify(cmd); } diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect.c b/tools/testing/selftests/bpf/progs/test_cls_redirect.c index 683c8aaa63da..f344c6835e84 100644 --- a/tools/testing/selftests/bpf/progs/test_cls_redirect.c +++ b/tools/testing/selftests/bpf/progs/test_cls_redirect.c @@ -15,7 +15,7 @@ #include <linux/ipv6.h> #include <linux/pkt_cls.h> #include <linux/tcp.h> -#include <linux/udp.h> +#include <netinet/udp.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_endian.h> diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect.h b/tools/testing/selftests/bpf/progs/test_cls_redirect.h index 233b089d1fba..eb55cb8a3dbd 100644 --- a/tools/testing/selftests/bpf/progs/test_cls_redirect.h +++ b/tools/testing/selftests/bpf/progs/test_cls_redirect.h @@ -10,7 +10,7 @@ #include <linux/in.h> #include <linux/ip.h> #include <linux/ipv6.h> -#include <linux/udp.h> +#include <netinet/udp.h> /* offsetof() is used in static asserts, and the libbpf-redefined CO-RE * friendly version breaks compilation for older clang versions <= 15 diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c b/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c index 464515b824b9..d0f7670351e5 100644 --- a/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c +++ b/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c @@ -15,7 +15,7 @@ #include <linux/ipv6.h> #include <linux/pkt_cls.h> #include <linux/tcp.h> -#include <linux/udp.h> +#include <netinet/udp.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_endian.h> diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_arrays.c b/tools/testing/selftests/bpf/progs/test_core_reloc_arrays.c index 51b3f79df523..448403634eea 100644 --- a/tools/testing/selftests/bpf/progs/test_core_reloc_arrays.c +++ b/tools/testing/selftests/bpf/progs/test_core_reloc_arrays.c @@ -15,6 +15,7 @@ struct { struct core_reloc_arrays_output { int a2; + int a3; char b123; int c1c; int d00d; @@ -41,6 +42,7 @@ int test_core_arrays(void *ctx) { struct core_reloc_arrays *in = (void *)&data.in; struct core_reloc_arrays_output *out = (void *)&data.out; + int *a; if (CORE_READ(&out->a2, &in->a[2])) return 1; @@ -53,6 +55,9 @@ int test_core_arrays(void *ctx) if (CORE_READ(&out->f01c, &in->f[0][1].c)) return 1; + a = __builtin_preserve_access_index(({ in->a; })); + out->a3 = a[0] + a[1] + a[2] + a[3]; + return 0; } diff --git a/tools/testing/selftests/bpf/progs/test_fill_link_info.c b/tools/testing/selftests/bpf/progs/test_fill_link_info.c index 6afa834756e9..fac33a14f200 100644 --- a/tools/testing/selftests/bpf/progs/test_fill_link_info.c +++ b/tools/testing/selftests/bpf/progs/test_fill_link_info.c @@ -6,13 +6,20 @@ #include <stdbool.h> extern bool CONFIG_X86_KERNEL_IBT __kconfig __weak; +extern bool CONFIG_PPC_FTRACE_OUT_OF_LINE __kconfig __weak; +extern bool CONFIG_KPROBES_ON_FTRACE __kconfig __weak; +extern bool CONFIG_PPC64 __kconfig __weak; -/* This function is here to have CONFIG_X86_KERNEL_IBT - * used and added to object BTF. +/* This function is here to have CONFIG_X86_KERNEL_IBT, + * CONFIG_PPC_FTRACE_OUT_OF_LINE, CONFIG_KPROBES_ON_FTRACE, + * CONFIG_PPC6 used and added to object BTF. */ int unused(void) { - return CONFIG_X86_KERNEL_IBT ? 0 : 1; + return CONFIG_X86_KERNEL_IBT || + CONFIG_PPC_FTRACE_OUT_OF_LINE || + CONFIG_KPROBES_ON_FTRACE || + CONFIG_PPC64 ? 0 : 1; } SEC("kprobe") diff --git a/tools/testing/selftests/bpf/progs/test_get_xattr.c b/tools/testing/selftests/bpf/progs/test_get_xattr.c index 66e737720f7c..54305f4c9f2d 100644 --- a/tools/testing/selftests/bpf/progs/test_get_xattr.c +++ b/tools/testing/selftests/bpf/progs/test_get_xattr.c @@ -6,6 +6,7 @@ #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> #include "bpf_kfuncs.h" +#include "bpf_misc.h" char _license[] SEC("license") = "GPL"; @@ -17,12 +18,23 @@ static const char expected_value[] = "hello"; char value1[32]; char value2[32]; +/* Matches caller of test_get_xattr() in prog_tests/fs_kfuncs.c */ +static const char xattr_names[][64] = { + /* The following work. */ + "user.kfuncs", + "security.bpf.xxx", + + /* The following do not work. */ + "security.bpf", + "security.selinux" +}; + SEC("lsm.s/file_open") int BPF_PROG(test_file_open, struct file *f) { struct bpf_dynptr value_ptr; __u32 pid; - int ret; + int ret, i; pid = bpf_get_current_pid_tgid() >> 32; if (pid != monitored_pid) @@ -30,7 +42,11 @@ int BPF_PROG(test_file_open, struct file *f) bpf_dynptr_from_mem(value1, sizeof(value1), 0, &value_ptr); - ret = bpf_get_file_xattr(f, "user.kfuncs", &value_ptr); + for (i = 0; i < ARRAY_SIZE(xattr_names); i++) { + ret = bpf_get_file_xattr(f, xattr_names[i], &value_ptr); + if (ret == sizeof(expected_value)) + break; + } if (ret != sizeof(expected_value)) return 0; if (bpf_strncmp(value1, ret, expected_value)) @@ -44,7 +60,7 @@ int BPF_PROG(test_inode_getxattr, struct dentry *dentry, char *name) { struct bpf_dynptr value_ptr; __u32 pid; - int ret; + int ret, i; pid = bpf_get_current_pid_tgid() >> 32; if (pid != monitored_pid) @@ -52,7 +68,11 @@ int BPF_PROG(test_inode_getxattr, struct dentry *dentry, char *name) bpf_dynptr_from_mem(value2, sizeof(value2), 0, &value_ptr); - ret = bpf_get_dentry_xattr(dentry, "user.kfuncs", &value_ptr); + for (i = 0; i < ARRAY_SIZE(xattr_names); i++) { + ret = bpf_get_dentry_xattr(dentry, xattr_names[i], &value_ptr); + if (ret == sizeof(expected_value)) + break; + } if (ret != sizeof(expected_value)) return 0; if (bpf_strncmp(value2, ret, expected_value)) diff --git a/tools/testing/selftests/bpf/progs/test_global_func10.c b/tools/testing/selftests/bpf/progs/test_global_func10.c index 5da001ca57a5..09d027bd3ea8 100644 --- a/tools/testing/selftests/bpf/progs/test_global_func10.c +++ b/tools/testing/selftests/bpf/progs/test_global_func10.c @@ -26,7 +26,7 @@ __noinline int foo(const struct Big *big) } SEC("cgroup_skb/ingress") -__failure __msg("invalid indirect access to stack") +__failure __msg("invalid read from stack") int global_func10(struct __sk_buff *skb) { const struct Small small = {.x = skb->len }; diff --git a/tools/testing/selftests/bpf/progs/test_global_map_resize.c b/tools/testing/selftests/bpf/progs/test_global_map_resize.c index a3f220ba7025..ee65bad0436d 100644 --- a/tools/testing/selftests/bpf/progs/test_global_map_resize.c +++ b/tools/testing/selftests/bpf/progs/test_global_map_resize.c @@ -32,6 +32,16 @@ int my_int_last SEC(".data.array_not_last"); int percpu_arr[1] SEC(".data.percpu_arr"); +/* at least one extern is included, to ensure that a specific + * regression is tested whereby resizing resulted in a free-after-use + * bug after type information is invalidated by the resize operation. + * + * There isn't a particularly good API to test for this specific condition, + * but by having externs for the resizing tests it will cover this path. + */ +extern int LINUX_KERNEL_VERSION __kconfig; +long version_sink; + SEC("tp/syscalls/sys_enter_getpid") int bss_array_sum(void *ctx) { @@ -44,6 +54,9 @@ int bss_array_sum(void *ctx) for (size_t i = 0; i < bss_array_len; ++i) sum += array[i]; + /* see above; ensure this is not optimized out */ + version_sink = LINUX_KERNEL_VERSION; + return 0; } @@ -59,6 +72,9 @@ int data_array_sum(void *ctx) for (size_t i = 0; i < data_array_len; ++i) sum += my_array[i]; + /* see above; ensure this is not optimized out */ + version_sink = LINUX_KERNEL_VERSION; + return 0; } diff --git a/tools/testing/selftests/bpf/progs/test_kernel_flag.c b/tools/testing/selftests/bpf/progs/test_kernel_flag.c new file mode 100644 index 000000000000..b45fab3be352 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_kernel_flag.c @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright (C) 2025 Microsoft Corporation + * + * Author: Blaise Boscaccy <bboscaccy@linux.microsoft.com> + */ + +#include "vmlinux.h" +#include <errno.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +char _license[] SEC("license") = "GPL"; + +__u32 monitored_tid; + +SEC("lsm.s/bpf") +int BPF_PROG(bpf, int cmd, union bpf_attr *attr, unsigned int size, bool kernel) +{ + __u32 tid; + + tid = bpf_get_current_pid_tgid() & 0xFFFFFFFF; + if (!kernel || tid != monitored_tid) + return 0; + else + return -EINVAL; +} diff --git a/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c b/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c index cd4d752bd089..061befb004c2 100644 --- a/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c +++ b/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c @@ -36,7 +36,7 @@ char _license[] SEC("license") = "GPL"; SEC("?lsm.s/bpf") __failure __msg("cannot pass in dynptr at an offset=-8") -int BPF_PROG(not_valid_dynptr, int cmd, union bpf_attr *attr, unsigned int size) +int BPF_PROG(not_valid_dynptr, int cmd, union bpf_attr *attr, unsigned int size, bool kernel) { unsigned long val; @@ -46,7 +46,7 @@ int BPF_PROG(not_valid_dynptr, int cmd, union bpf_attr *attr, unsigned int size) SEC("?lsm.s/bpf") __failure __msg("arg#0 expected pointer to stack or const struct bpf_dynptr") -int BPF_PROG(not_ptr_to_stack, int cmd, union bpf_attr *attr, unsigned int size) +int BPF_PROG(not_ptr_to_stack, int cmd, union bpf_attr *attr, unsigned int size, bool kernel) { unsigned long val = 0; @@ -55,7 +55,7 @@ int BPF_PROG(not_ptr_to_stack, int cmd, union bpf_attr *attr, unsigned int size) } SEC("lsm.s/bpf") -int BPF_PROG(dynptr_data_null, int cmd, union bpf_attr *attr, unsigned int size) +int BPF_PROG(dynptr_data_null, int cmd, union bpf_attr *attr, unsigned int size, bool kernel) { struct bpf_key *trusted_keyring; struct bpf_dynptr ptr; diff --git a/tools/testing/selftests/bpf/progs/test_kfunc_param_nullable.c b/tools/testing/selftests/bpf/progs/test_kfunc_param_nullable.c index 7ac7e1de34d8..0ad1bf1ede8d 100644 --- a/tools/testing/selftests/bpf/progs/test_kfunc_param_nullable.c +++ b/tools/testing/selftests/bpf/progs/test_kfunc_param_nullable.c @@ -4,7 +4,7 @@ #include <bpf/bpf_helpers.h> #include "bpf_misc.h" #include "bpf_kfuncs.h" -#include "../bpf_testmod/bpf_testmod_kfunc.h" +#include "../test_kmods/bpf_testmod_kfunc.h" SEC("tc") int kfunc_dynptr_nullable_test1(struct __sk_buff *skb) diff --git a/tools/testing/selftests/bpf/progs/test_lookup_key.c b/tools/testing/selftests/bpf/progs/test_lookup_key.c index c73776990ae3..1f7e1e59b073 100644 --- a/tools/testing/selftests/bpf/progs/test_lookup_key.c +++ b/tools/testing/selftests/bpf/progs/test_lookup_key.c @@ -14,16 +14,16 @@ char _license[] SEC("license") = "GPL"; __u32 monitored_pid; -__u32 key_serial; +__s32 key_serial; __u32 key_id; __u64 flags; -extern struct bpf_key *bpf_lookup_user_key(__u32 serial, __u64 flags) __ksym; +extern struct bpf_key *bpf_lookup_user_key(__s32 serial, __u64 flags) __ksym; extern struct bpf_key *bpf_lookup_system_key(__u64 id) __ksym; extern void bpf_key_put(struct bpf_key *key) __ksym; SEC("lsm.s/bpf") -int BPF_PROG(bpf, int cmd, union bpf_attr *attr, unsigned int size) +int BPF_PROG(bpf, int cmd, union bpf_attr *attr, unsigned int size, bool kernel) { struct bpf_key *bkey; __u32 pid; diff --git a/tools/testing/selftests/bpf/progs/test_module_attach.c b/tools/testing/selftests/bpf/progs/test_module_attach.c index cc1a012d038f..03d7f89787a1 100644 --- a/tools/testing/selftests/bpf/progs/test_module_attach.c +++ b/tools/testing/selftests/bpf/progs/test_module_attach.c @@ -5,7 +5,7 @@ #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> #include <bpf/bpf_core_read.h> -#include "../bpf_testmod/bpf_testmod.h" +#include "../test_kmods/bpf_testmod.h" __u32 raw_tp_read_sz = 0; @@ -19,7 +19,7 @@ int BPF_PROG(handle_raw_tp, __u32 raw_tp_bare_write_sz = 0; -SEC("raw_tp/bpf_testmod_test_write_bare") +SEC("raw_tp/bpf_testmod_test_write_bare_tp") int BPF_PROG(handle_raw_tp_bare, struct task_struct *task, struct bpf_testmod_test_write_ctx *write_ctx) { @@ -31,7 +31,7 @@ int raw_tp_writable_bare_in_val = 0; int raw_tp_writable_bare_early_ret = 0; int raw_tp_writable_bare_out_val = 0; -SEC("raw_tp.w/bpf_testmod_test_writable_bare") +SEC("raw_tp.w/bpf_testmod_test_writable_bare_tp") int BPF_PROG(handle_raw_tp_writable_bare, struct bpf_testmod_test_writable_ctx *writable) { @@ -117,7 +117,7 @@ int BPF_PROG(handle_fexit_ret, int arg, struct file *ret) bpf_probe_read_kernel(&buf, 8, ret); bpf_probe_read_kernel(&buf, 8, (char *)ret + 256); - *(volatile long long *)ret; + *(volatile int *)ret; *(volatile int *)&ret->f_mode; return 0; } diff --git a/tools/testing/selftests/bpf/progs/test_ptr_untrusted.c b/tools/testing/selftests/bpf/progs/test_ptr_untrusted.c index 2fdc44e76624..89b0cd5a3e06 100644 --- a/tools/testing/selftests/bpf/progs/test_ptr_untrusted.c +++ b/tools/testing/selftests/bpf/progs/test_ptr_untrusted.c @@ -7,7 +7,7 @@ char tp_name[128]; SEC("lsm.s/bpf") -int BPF_PROG(lsm_run, int cmd, union bpf_attr *attr, unsigned int size) +int BPF_PROG(lsm_run, int cmd, union bpf_attr *attr, unsigned int size, bool kernel) { switch (cmd) { case BPF_RAW_TRACEPOINT_OPEN: diff --git a/tools/testing/selftests/bpf/progs/test_ringbuf_write.c b/tools/testing/selftests/bpf/progs/test_ringbuf_write.c index 350513c0e4c9..f063a0013f85 100644 --- a/tools/testing/selftests/bpf/progs/test_ringbuf_write.c +++ b/tools/testing/selftests/bpf/progs/test_ringbuf_write.c @@ -26,11 +26,11 @@ int test_ringbuf_write(void *ctx) if (cur_pid != pid) return 0; - sample1 = bpf_ringbuf_reserve(&ringbuf, 0x3000, 0); + sample1 = bpf_ringbuf_reserve(&ringbuf, 0x30000, 0); if (!sample1) return 0; /* first one can pass */ - sample2 = bpf_ringbuf_reserve(&ringbuf, 0x3000, 0); + sample2 = bpf_ringbuf_reserve(&ringbuf, 0x30000, 0); if (!sample2) { bpf_ringbuf_discard(sample1, 0); __sync_fetch_and_add(&discarded, 1); diff --git a/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c b/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c index 5eb25c6ad75b..a5be3267dbb0 100644 --- a/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c +++ b/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2018 Facebook */ -#include <stdlib.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/ipv6.h> diff --git a/tools/testing/selftests/bpf/progs/test_set_remove_xattr.c b/tools/testing/selftests/bpf/progs/test_set_remove_xattr.c new file mode 100644 index 000000000000..6a612cf168d3 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_set_remove_xattr.c @@ -0,0 +1,133 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#include "vmlinux.h" +#include <errno.h> +#include <bpf/bpf_tracing.h> +#include "bpf_kfuncs.h" +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + +__u32 monitored_pid; + +const char xattr_foo[] = "security.bpf.foo"; +const char xattr_bar[] = "security.bpf.bar"; +static const char xattr_selinux[] = "security.selinux"; +char value_bar[] = "world"; +char read_value[32]; + +bool set_security_bpf_bar_success; +bool remove_security_bpf_bar_success; +bool set_security_selinux_fail; +bool remove_security_selinux_fail; + +char name_buf[32]; + +static inline bool name_match_foo(const char *name) +{ + bpf_probe_read_kernel(name_buf, sizeof(name_buf), name); + + return !bpf_strncmp(name_buf, sizeof(xattr_foo), xattr_foo); +} + +/* Test bpf_set_dentry_xattr and bpf_remove_dentry_xattr */ +SEC("lsm.s/inode_getxattr") +int BPF_PROG(test_inode_getxattr, struct dentry *dentry, char *name) +{ + struct bpf_dynptr value_ptr; + __u32 pid; + int ret; + + pid = bpf_get_current_pid_tgid() >> 32; + if (pid != monitored_pid) + return 0; + + /* Only do the following for security.bpf.foo */ + if (!name_match_foo(name)) + return 0; + + bpf_dynptr_from_mem(read_value, sizeof(read_value), 0, &value_ptr); + + /* read security.bpf.bar */ + ret = bpf_get_dentry_xattr(dentry, xattr_bar, &value_ptr); + + if (ret < 0) { + /* If security.bpf.bar doesn't exist, set it */ + bpf_dynptr_from_mem(value_bar, sizeof(value_bar), 0, &value_ptr); + + ret = bpf_set_dentry_xattr(dentry, xattr_bar, &value_ptr, 0); + if (!ret) + set_security_bpf_bar_success = true; + ret = bpf_set_dentry_xattr(dentry, xattr_selinux, &value_ptr, 0); + if (ret) + set_security_selinux_fail = true; + } else { + /* If security.bpf.bar exists, remove it */ + ret = bpf_remove_dentry_xattr(dentry, xattr_bar); + if (!ret) + remove_security_bpf_bar_success = true; + + ret = bpf_remove_dentry_xattr(dentry, xattr_selinux); + if (ret) + remove_security_selinux_fail = true; + } + + return 0; +} + +bool locked_set_security_bpf_bar_success; +bool locked_remove_security_bpf_bar_success; +bool locked_set_security_selinux_fail; +bool locked_remove_security_selinux_fail; + +/* Test bpf_set_dentry_xattr_locked and bpf_remove_dentry_xattr_locked. + * It not necessary to differentiate the _locked version and the + * not-_locked version in the BPF program. The verifier will fix them up + * properly. + */ +SEC("lsm.s/inode_setxattr") +int BPF_PROG(test_inode_setxattr, struct mnt_idmap *idmap, + struct dentry *dentry, const char *name, + const void *value, size_t size, int flags) +{ + struct bpf_dynptr value_ptr; + __u32 pid; + int ret; + + pid = bpf_get_current_pid_tgid() >> 32; + if (pid != monitored_pid) + return 0; + + /* Only do the following for security.bpf.foo */ + if (!name_match_foo(name)) + return 0; + + bpf_dynptr_from_mem(read_value, sizeof(read_value), 0, &value_ptr); + + /* read security.bpf.bar */ + ret = bpf_get_dentry_xattr(dentry, xattr_bar, &value_ptr); + + if (ret < 0) { + /* If security.bpf.bar doesn't exist, set it */ + bpf_dynptr_from_mem(value_bar, sizeof(value_bar), 0, &value_ptr); + + ret = bpf_set_dentry_xattr(dentry, xattr_bar, &value_ptr, 0); + if (!ret) + locked_set_security_bpf_bar_success = true; + ret = bpf_set_dentry_xattr(dentry, xattr_selinux, &value_ptr, 0); + if (ret) + locked_set_security_selinux_fail = true; + } else { + /* If security.bpf.bar exists, remove it */ + ret = bpf_remove_dentry_xattr(dentry, xattr_bar); + if (!ret) + locked_remove_security_bpf_bar_success = true; + + ret = bpf_remove_dentry_xattr(dentry, xattr_selinux); + if (ret) + locked_remove_security_selinux_fail = true; + } + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/test_sig_in_xattr.c b/tools/testing/selftests/bpf/progs/test_sig_in_xattr.c index 8ef6b39335b6..34b30e2603f0 100644 --- a/tools/testing/selftests/bpf/progs/test_sig_in_xattr.c +++ b/tools/testing/selftests/bpf/progs/test_sig_in_xattr.c @@ -40,7 +40,7 @@ char digest[MAGIC_SIZE + SIZEOF_STRUCT_FSVERITY_DIGEST + SHA256_DIGEST_SIZE]; __u32 monitored_pid; char sig[MAX_SIG_SIZE]; __u32 sig_size; -__u32 user_keyring_serial; +__s32 user_keyring_serial; SEC("lsm.s/file_open") int BPF_PROG(test_file_open, struct file *f) diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_change_tail.c b/tools/testing/selftests/bpf/progs/test_sockmap_change_tail.c new file mode 100644 index 000000000000..1c7941a4ad00 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_sockmap_change_tail.c @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 ByteDance */ +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> + +#ifndef PAGE_SIZE +#define PAGE_SIZE __PAGE_SIZE +#endif +#define BPF_SKB_MAX_LEN (PAGE_SIZE << 2) + +struct { + __uint(type, BPF_MAP_TYPE_SOCKMAP); + __uint(max_entries, 1); + __type(key, int); + __type(value, int); +} sock_map_rx SEC(".maps"); + +long change_tail_ret = 1; + +SEC("sk_skb") +int prog_skb_verdict(struct __sk_buff *skb) +{ + char *data, *data_end; + + bpf_skb_pull_data(skb, 1); + data = (char *)(unsigned long)skb->data; + data_end = (char *)(unsigned long)skb->data_end; + + if (data + 1 > data_end) + return SK_PASS; + + if (data[0] == 'T') { /* Trim the packet */ + change_tail_ret = bpf_skb_change_tail(skb, skb->len - 1, 0); + return SK_PASS; + } else if (data[0] == 'G') { /* Grow the packet */ + change_tail_ret = bpf_skb_change_tail(skb, skb->len + 1, 0); + return SK_PASS; + } else if (data[0] == 'E') { /* Error */ + change_tail_ret = bpf_skb_change_tail(skb, BPF_SKB_MAX_LEN, 0); + return SK_PASS; + } + return SK_PASS; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_ktls.c b/tools/testing/selftests/bpf/progs/test_sockmap_ktls.c new file mode 100644 index 000000000000..83df4919c224 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_sockmap_ktls.c @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> + +int cork_byte; +int push_start; +int push_end; +int apply_bytes; +int pop_start; +int pop_end; + +struct { + __uint(type, BPF_MAP_TYPE_SOCKMAP); + __uint(max_entries, 20); + __type(key, int); + __type(value, int); +} sock_map SEC(".maps"); + +SEC("sk_msg") +int prog_sk_policy(struct sk_msg_md *msg) +{ + if (cork_byte > 0) + bpf_msg_cork_bytes(msg, cork_byte); + if (push_start > 0 && push_end > 0) + bpf_msg_push_data(msg, push_start, push_end, 0); + if (pop_start >= 0 && pop_end > 0) + bpf_msg_pop_data(msg, pop_start, pop_end, 0); + + return SK_PASS; +} + +SEC("sk_msg") +int prog_sk_policy_redir(struct sk_msg_md *msg) +{ + int two = 2; + + bpf_msg_apply_bytes(msg, apply_bytes); + return bpf_msg_redirect_map(msg, &sock_map, two, 0); +} diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_redir.c b/tools/testing/selftests/bpf/progs/test_sockmap_redir.c new file mode 100644 index 000000000000..34d9f4f2f0a2 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_sockmap_redir.c @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +SEC(".maps") struct { + __uint(type, BPF_MAP_TYPE_SOCKMAP); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u64); +} nop_map, sock_map; + +SEC(".maps") struct { + __uint(type, BPF_MAP_TYPE_SOCKHASH); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u64); +} nop_hash, sock_hash; + +SEC(".maps") struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 2); + __type(key, int); + __type(value, unsigned int); +} verdict_map; + +/* Set by user space */ +int redirect_type; +int redirect_flags; + +#define redirect_map(__data) \ + _Generic((__data), \ + struct __sk_buff * : bpf_sk_redirect_map, \ + struct sk_msg_md * : bpf_msg_redirect_map \ + )((__data), &sock_map, (__u32){0}, redirect_flags) + +#define redirect_hash(__data) \ + _Generic((__data), \ + struct __sk_buff * : bpf_sk_redirect_hash, \ + struct sk_msg_md * : bpf_msg_redirect_hash \ + )((__data), &sock_hash, &(__u32){0}, redirect_flags) + +#define DEFINE_PROG(__type, __param) \ +SEC("sk_" XSTR(__type)) \ +int prog_ ## __type ## _verdict(__param data) \ +{ \ + unsigned int *count; \ + int verdict; \ + \ + if (redirect_type == BPF_MAP_TYPE_SOCKMAP) \ + verdict = redirect_map(data); \ + else if (redirect_type == BPF_MAP_TYPE_SOCKHASH) \ + verdict = redirect_hash(data); \ + else \ + verdict = redirect_type - __MAX_BPF_MAP_TYPE; \ + \ + count = bpf_map_lookup_elem(&verdict_map, &verdict); \ + if (count) \ + (*count)++; \ + \ + return verdict; \ +} + +DEFINE_PROG(skb, struct __sk_buff *); +DEFINE_PROG(msg, struct sk_msg_md *); + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_strp.c b/tools/testing/selftests/bpf/progs/test_sockmap_strp.c new file mode 100644 index 000000000000..dde3d5bec515 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_sockmap_strp.c @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> +int verdict_max_size = 10000; +struct { + __uint(type, BPF_MAP_TYPE_SOCKMAP); + __uint(max_entries, 20); + __type(key, int); + __type(value, int); +} sock_map SEC(".maps"); + +SEC("sk_skb/stream_verdict") +int prog_skb_verdict(struct __sk_buff *skb) +{ + __u32 one = 1; + + if (skb->len > verdict_max_size) + return SK_PASS; + + return bpf_sk_redirect_map(skb, &sock_map, one, 0); +} + +SEC("sk_skb/stream_verdict") +int prog_skb_verdict_pass(struct __sk_buff *skb) +{ + return SK_PASS; +} + +SEC("sk_skb/stream_parser") +int prog_skb_parser(struct __sk_buff *skb) +{ + return skb->len; +} + +SEC("sk_skb/stream_parser") +int prog_skb_parser_partial(struct __sk_buff *skb) +{ + /* agreement with the test program on a 4-byte size header + * and 6-byte body. + */ + if (skb->len < 4) { + /* need more header to determine full length */ + return 0; + } + /* return full length decoded from header. + * the return value may be larger than skb->len which + * means framework must wait body coming. + */ + return 10; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_spin_lock_fail.c b/tools/testing/selftests/bpf/progs/test_spin_lock_fail.c index 1c8b678e2e9a..f678ee6bd7ea 100644 --- a/tools/testing/selftests/bpf/progs/test_spin_lock_fail.c +++ b/tools/testing/selftests/bpf/progs/test_spin_lock_fail.c @@ -245,4 +245,73 @@ int lock_global_subprog_call2(struct __sk_buff *ctx) return ret; } +int __noinline +global_subprog_int(int i) +{ + if (i) + bpf_printk("%p", &i); + return i; +} + +int __noinline +global_sleepable_helper_subprog(int i) +{ + if (i) + bpf_copy_from_user(&i, sizeof(i), NULL); + return i; +} + +int __noinline +global_sleepable_kfunc_subprog(int i) +{ + if (i) + bpf_copy_from_user_str(&i, sizeof(i), NULL, 0); + global_subprog_int(i); + return i; +} + +int __noinline +global_subprog_calling_sleepable_global(int i) +{ + if (!i) + global_sleepable_kfunc_subprog(i); + return i; +} + +SEC("?syscall") +int lock_global_sleepable_helper_subprog(struct __sk_buff *ctx) +{ + int ret = 0; + + bpf_spin_lock(&lockA); + if (ctx->mark == 42) + ret = global_sleepable_helper_subprog(ctx->mark); + bpf_spin_unlock(&lockA); + return ret; +} + +SEC("?syscall") +int lock_global_sleepable_kfunc_subprog(struct __sk_buff *ctx) +{ + int ret = 0; + + bpf_spin_lock(&lockA); + if (ctx->mark == 42) + ret = global_sleepable_kfunc_subprog(ctx->mark); + bpf_spin_unlock(&lockA); + return ret; +} + +SEC("?syscall") +int lock_global_sleepable_subprog_indirect(struct __sk_buff *ctx) +{ + int ret = 0; + + bpf_spin_lock(&lockA); + if (ctx->mark == 42) + ret = global_subprog_calling_sleepable_global(ctx->mark); + bpf_spin_unlock(&lockA); + return ret; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_subprogs_extable.c b/tools/testing/selftests/bpf/progs/test_subprogs_extable.c index e2a21fbd4e44..dcac69f5928a 100644 --- a/tools/testing/selftests/bpf/progs/test_subprogs_extable.c +++ b/tools/testing/selftests/bpf/progs/test_subprogs_extable.c @@ -21,7 +21,7 @@ static __u64 test_cb(struct bpf_map *map, __u32 *key, __u64 *val, void *data) SEC("fexit/bpf_testmod_return_ptr") int BPF_PROG(handle_fexit_ret_subprogs, int arg, struct file *ret) { - *(volatile long *)ret; + *(volatile int *)ret; *(volatile int *)&ret->f_mode; bpf_for_each_map_elem(&test_array, test_cb, NULL, 0); triggered++; @@ -31,7 +31,7 @@ int BPF_PROG(handle_fexit_ret_subprogs, int arg, struct file *ret) SEC("fexit/bpf_testmod_return_ptr") int BPF_PROG(handle_fexit_ret_subprogs2, int arg, struct file *ret) { - *(volatile long *)ret; + *(volatile int *)ret; *(volatile int *)&ret->f_mode; bpf_for_each_map_elem(&test_array, test_cb, NULL, 0); triggered++; @@ -41,7 +41,7 @@ int BPF_PROG(handle_fexit_ret_subprogs2, int arg, struct file *ret) SEC("fexit/bpf_testmod_return_ptr") int BPF_PROG(handle_fexit_ret_subprogs3, int arg, struct file *ret) { - *(volatile long *)ret; + *(volatile int *)ret; *(volatile int *)&ret->f_mode; bpf_for_each_map_elem(&test_array, test_cb, NULL, 0); triggered++; diff --git a/tools/testing/selftests/bpf/progs/test_task_under_cgroup.c b/tools/testing/selftests/bpf/progs/test_task_under_cgroup.c index 7e750309ce27..0b74b8bd22e8 100644 --- a/tools/testing/selftests/bpf/progs/test_task_under_cgroup.c +++ b/tools/testing/selftests/bpf/progs/test_task_under_cgroup.c @@ -49,7 +49,7 @@ out: } SEC("lsm.s/bpf") -int BPF_PROG(lsm_run, int cmd, union bpf_attr *attr, unsigned int size) +int BPF_PROG(lsm_run, int cmd, union bpf_attr *attr, unsigned int size, bool kernel) { struct cgroup *cgrp = NULL; struct task_struct *task; diff --git a/tools/testing/selftests/bpf/progs/test_tc_change_tail.c b/tools/testing/selftests/bpf/progs/test_tc_change_tail.c new file mode 100644 index 000000000000..fcba8299f0bc --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_tc_change_tail.c @@ -0,0 +1,106 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> + +#ifndef PAGE_SIZE +#define PAGE_SIZE __PAGE_SIZE +#endif +#define BPF_SKB_MAX_LEN (PAGE_SIZE << 2) + +long change_tail_ret = 1; + +static __always_inline struct iphdr *parse_ip_header(struct __sk_buff *skb, int *ip_proto) +{ + void *data_end = (void *)(long)skb->data_end; + void *data = (void *)(long)skb->data; + struct ethhdr *eth = data; + struct iphdr *iph; + + /* Verify Ethernet header */ + if ((void *)(data + sizeof(*eth)) > data_end) + return NULL; + + /* Skip Ethernet header to get to IP header */ + iph = (void *)(data + sizeof(struct ethhdr)); + + /* Verify IP header */ + if ((void *)(data + sizeof(struct ethhdr) + sizeof(*iph)) > data_end) + return NULL; + + /* Basic IP header validation */ + if (iph->version != 4) /* Only support IPv4 */ + return NULL; + + if (iph->ihl < 5) /* Minimum IP header length */ + return NULL; + + *ip_proto = iph->protocol; + return iph; +} + +static __always_inline struct udphdr *parse_udp_header(struct __sk_buff *skb, struct iphdr *iph) +{ + void *data_end = (void *)(long)skb->data_end; + void *hdr = (void *)iph; + struct udphdr *udp; + + /* Calculate UDP header position */ + udp = hdr + (iph->ihl * 4); + hdr = (void *)udp; + + /* Verify UDP header bounds */ + if ((void *)(hdr + sizeof(*udp)) > data_end) + return NULL; + + return udp; +} + +SEC("tc/ingress") +int change_tail(struct __sk_buff *skb) +{ + int len = skb->len; + struct udphdr *udp; + struct iphdr *iph; + void *data_end; + char *payload; + int ip_proto; + + bpf_skb_pull_data(skb, len); + + data_end = (void *)(long)skb->data_end; + iph = parse_ip_header(skb, &ip_proto); + if (!iph) + return TCX_PASS; + + if (ip_proto != IPPROTO_UDP) + return TCX_PASS; + + udp = parse_udp_header(skb, iph); + if (!udp) + return TCX_PASS; + + payload = (char *)udp + (sizeof(struct udphdr)); + if (payload + 1 > (char *)data_end) + return TCX_PASS; + + if (payload[0] == 'T') { /* Trim the packet */ + change_tail_ret = bpf_skb_change_tail(skb, len - 1, 0); + if (!change_tail_ret) + bpf_skb_change_tail(skb, len, 0); + return TCX_PASS; + } else if (payload[0] == 'G') { /* Grow the packet */ + change_tail_ret = bpf_skb_change_tail(skb, len + 1, 0); + if (!change_tail_ret) + bpf_skb_change_tail(skb, len, 0); + return TCX_PASS; + } else if (payload[0] == 'E') { /* Error */ + change_tail_ret = bpf_skb_change_tail(skb, BPF_SKB_MAX_LEN, 0); + return TCX_PASS; + } else if (payload[0] == 'Z') { /* Zero */ + change_tail_ret = bpf_skb_change_tail(skb, 0, 0); + return TCX_PASS; + } + return TCX_DROP; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_tc_link.c b/tools/testing/selftests/bpf/progs/test_tc_link.c index 10d825928499..630f12e51b07 100644 --- a/tools/testing/selftests/bpf/progs/test_tc_link.c +++ b/tools/testing/selftests/bpf/progs/test_tc_link.c @@ -8,6 +8,7 @@ #include <linux/if_packet.h> #include <bpf/bpf_endian.h> #include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> char LICENSE[] SEC("license") = "GPL"; @@ -27,6 +28,7 @@ bool seen_host; bool seen_mcast; int mark, prio; +unsigned short headroom, tailroom; SEC("tc/ingress") int tc1(struct __sk_buff *skb) @@ -104,11 +106,24 @@ out: return TCX_PASS; } +struct sk_buff { + struct net_device *dev; +}; + +struct net_device { + unsigned short needed_headroom; + unsigned short needed_tailroom; +}; + SEC("tc/egress") int tc8(struct __sk_buff *skb) { + struct net_device *dev = BPF_CORE_READ((struct sk_buff *)skb, dev); + seen_tc8 = true; mark = skb->mark; prio = skb->priority; + headroom = BPF_CORE_READ(dev, needed_headroom); + tailroom = BPF_CORE_READ(dev, needed_tailroom); return TCX_PASS; } diff --git a/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c index eb5cca1fce16..7d5293de1952 100644 --- a/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c +++ b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c @@ -294,7 +294,9 @@ static int tcp_validate_sysctl(struct tcp_syncookie *ctx) (ctx->ipv6 && ctx->attrs.mss != MSS_LOCAL_IPV6)) goto err; - if (!ctx->attrs.wscale_ok || ctx->attrs.snd_wscale != 7) + if (!ctx->attrs.wscale_ok || + !ctx->attrs.snd_wscale || + ctx->attrs.snd_wscale >= BPF_SYNCOOKIE_WSCALE_MASK) goto err; if (!ctx->attrs.tstamp_ok) diff --git a/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c b/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c index 5aaf2b065f86..cf0547a613ff 100644 --- a/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c +++ b/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c @@ -3,21 +3,17 @@ #include "vmlinux.h" #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> -#include "../bpf_testmod/bpf_testmod.h" +#include "../test_kmods/bpf_testmod.h" #include "bpf_misc.h" -SEC("tp_btf/bpf_testmod_test_nullable_bare") -/* This used to be a failure test, but raw_tp nullable arguments can now - * directly be dereferenced, whether they have nullable annotation or not, - * and don't need to be explicitly checked. - */ -__success +SEC("tp_btf/bpf_testmod_test_nullable_bare_tp") +__failure __msg("R1 invalid mem access 'trusted_ptr_or_null_'") int BPF_PROG(handle_tp_btf_nullable_bare1, struct bpf_testmod_test_read_ctx *nullable_ctx) { return nullable_ctx->len; } -SEC("tp_btf/bpf_testmod_test_nullable_bare") +SEC("tp_btf/bpf_testmod_test_nullable_bare_tp") int BPF_PROG(handle_tp_btf_nullable_bare2, struct bpf_testmod_test_read_ctx *nullable_ctx) { if (nullable_ctx) diff --git a/tools/testing/selftests/bpf/progs/test_usdt.c b/tools/testing/selftests/bpf/progs/test_usdt.c index 505aab9a5234..096488f47fbc 100644 --- a/tools/testing/selftests/bpf/progs/test_usdt.c +++ b/tools/testing/selftests/bpf/progs/test_usdt.c @@ -11,6 +11,7 @@ int usdt0_called; u64 usdt0_cookie; int usdt0_arg_cnt; int usdt0_arg_ret; +int usdt0_arg_size; SEC("usdt") int usdt0(struct pt_regs *ctx) @@ -26,6 +27,7 @@ int usdt0(struct pt_regs *ctx) usdt0_arg_cnt = bpf_usdt_arg_cnt(ctx); /* should return -ENOENT for any arg_num */ usdt0_arg_ret = bpf_usdt_arg(ctx, bpf_get_prandom_u32(), &tmp); + usdt0_arg_size = bpf_usdt_arg_size(ctx, bpf_get_prandom_u32()); return 0; } @@ -34,6 +36,7 @@ u64 usdt3_cookie; int usdt3_arg_cnt; int usdt3_arg_rets[3]; u64 usdt3_args[3]; +int usdt3_arg_sizes[3]; SEC("usdt//proc/self/exe:test:usdt3") int usdt3(struct pt_regs *ctx) @@ -50,12 +53,15 @@ int usdt3(struct pt_regs *ctx) usdt3_arg_rets[0] = bpf_usdt_arg(ctx, 0, &tmp); usdt3_args[0] = (int)tmp; + usdt3_arg_sizes[0] = bpf_usdt_arg_size(ctx, 0); usdt3_arg_rets[1] = bpf_usdt_arg(ctx, 1, &tmp); usdt3_args[1] = (long)tmp; + usdt3_arg_sizes[1] = bpf_usdt_arg_size(ctx, 1); usdt3_arg_rets[2] = bpf_usdt_arg(ctx, 2, &tmp); usdt3_args[2] = (uintptr_t)tmp; + usdt3_arg_sizes[2] = bpf_usdt_arg_size(ctx, 2); return 0; } @@ -64,12 +70,15 @@ int usdt12_called; u64 usdt12_cookie; int usdt12_arg_cnt; u64 usdt12_args[12]; +int usdt12_arg_sizes[12]; SEC("usdt//proc/self/exe:test:usdt12") int BPF_USDT(usdt12, int a1, int a2, long a3, long a4, unsigned a5, long a6, __u64 a7, uintptr_t a8, int a9, short a10, short a11, signed char a12) { + int i; + if (my_pid != (bpf_get_current_pid_tgid() >> 32)) return 0; @@ -90,6 +99,11 @@ int BPF_USDT(usdt12, int a1, int a2, long a3, long a4, unsigned a5, usdt12_args[9] = a10; usdt12_args[10] = a11; usdt12_args[11] = a12; + + bpf_for(i, 0, 12) { + usdt12_arg_sizes[i] = bpf_usdt_arg_size(ctx, i); + } + return 0; } diff --git a/tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c b/tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c index 12034a73ee2d..ff8d755548b9 100644 --- a/tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c +++ b/tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c @@ -17,7 +17,7 @@ #define MAX_SIG_SIZE 1024 __u32 monitored_pid; -__u32 user_keyring_serial; +__s32 user_keyring_serial; __u64 system_keyring_id; struct data { @@ -37,7 +37,7 @@ struct { char _license[] SEC("license") = "GPL"; SEC("lsm.s/bpf") -int BPF_PROG(bpf, int cmd, union bpf_attr *attr, unsigned int size) +int BPF_PROG(bpf, int cmd, union bpf_attr *attr, unsigned int size, bool kernel) { struct bpf_dynptr data_ptr, sig_ptr; struct data *data_val; diff --git a/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c b/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c index 81bb38d72ced..5904f45cfbc4 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c @@ -10,6 +10,8 @@ int _xdp_adjust_tail_grow(struct xdp_md *xdp) /* SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) */ #if defined(__TARGET_ARCH_s390) int tailroom = 512; +#elif defined(__TARGET_ARCH_powerpc) + int tailroom = 384; #else int tailroom = 320; #endif @@ -17,7 +19,9 @@ int _xdp_adjust_tail_grow(struct xdp_md *xdp) /* Data length determine test case */ if (data_len == 54) { /* sizeof(pkt_v4) */ - offset = 4096; /* test too large offset */ + offset = 4096; /* test too large offset, 4k page size */ + } else if (data_len == 53) { /* sizeof(pkt_v4) - 1 */ + offset = 65536; /* test too large offset, 64k page size */ } else if (data_len == 74) { /* sizeof(pkt_v6) */ offset = 40; } else if (data_len == 64) { @@ -29,6 +33,10 @@ int _xdp_adjust_tail_grow(struct xdp_md *xdp) offset = 10; } else if (data_len == 9001) { offset = 4096; + } else if (data_len == 90000) { + offset = 10; /* test a small offset, 64k page size */ + } else if (data_len == 90001) { + offset = 65536; /* test too large offset, 64k page size */ } else { return XDP_ABORTED; /* No matching test */ } diff --git a/tools/testing/selftests/bpf/progs/test_xdp_do_redirect.c b/tools/testing/selftests/bpf/progs/test_xdp_do_redirect.c index 3abf068b8446..5928ed0911ca 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_do_redirect.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_do_redirect.c @@ -98,6 +98,18 @@ int xdp_count_pkts(struct xdp_md *xdp) return XDP_DROP; } +SEC("xdp") +int xdp_redirect_to_111(struct xdp_md *xdp) +{ + return bpf_redirect(111, 0); +} + +SEC("xdp") +int xdp_redirect_to_222(struct xdp_md *xdp) +{ + return bpf_redirect(222, 0); +} + SEC("tc") int tc_count_pkts(struct __sk_buff *skb) { diff --git a/tools/testing/selftests/bpf/progs/test_xdp_meta.c b/tools/testing/selftests/bpf/progs/test_xdp_meta.c index a7c4a7d49fe6..fcf6ca14f2ea 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_meta.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_meta.c @@ -4,37 +4,50 @@ #include <bpf/bpf_helpers.h> -#define __round_mask(x, y) ((__typeof__(x))((y) - 1)) -#define round_up(x, y) ((((x) - 1) | __round_mask(x, y)) + 1) +#define META_SIZE 32 + #define ctx_ptr(ctx, mem) (void *)(unsigned long)ctx->mem -SEC("t") +/* Demonstrates how metadata can be passed from an XDP program to a TC program + * using bpf_xdp_adjust_meta. + * For the sake of testing the metadata support in drivers, the XDP program uses + * a fixed-size payload after the Ethernet header as metadata. The TC program + * copies the metadata it receives into a map so it can be checked from + * userspace. + */ + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, __u32); + __uint(value_size, META_SIZE); +} test_result SEC(".maps"); + +SEC("tc") int ing_cls(struct __sk_buff *ctx) { - __u8 *data, *data_meta, *data_end; - __u32 diff = 0; + __u8 *data, *data_meta; + __u32 key = 0; data_meta = ctx_ptr(ctx, data_meta); - data_end = ctx_ptr(ctx, data_end); data = ctx_ptr(ctx, data); - if (data + ETH_ALEN > data_end || - data_meta + round_up(ETH_ALEN, 4) > data) + if (data_meta + META_SIZE > data) return TC_ACT_SHOT; - diff |= ((__u32 *)data_meta)[0] ^ ((__u32 *)data)[0]; - diff |= ((__u16 *)data_meta)[2] ^ ((__u16 *)data)[2]; + bpf_map_update_elem(&test_result, &key, data_meta, BPF_ANY); - return diff ? TC_ACT_SHOT : TC_ACT_OK; + return TC_ACT_SHOT; } -SEC("x") +SEC("xdp") int ing_xdp(struct xdp_md *ctx) { - __u8 *data, *data_meta, *data_end; + __u8 *data, *data_meta, *data_end, *payload; + struct ethhdr *eth; int ret; - ret = bpf_xdp_adjust_meta(ctx, -round_up(ETH_ALEN, 4)); + ret = bpf_xdp_adjust_meta(ctx, -META_SIZE); if (ret < 0) return XDP_DROP; @@ -42,11 +55,21 @@ int ing_xdp(struct xdp_md *ctx) data_end = ctx_ptr(ctx, data_end); data = ctx_ptr(ctx, data); - if (data + ETH_ALEN > data_end || - data_meta + round_up(ETH_ALEN, 4) > data) + eth = (struct ethhdr *)data; + payload = data + sizeof(struct ethhdr); + + if (payload + META_SIZE > data_end || + data_meta + META_SIZE > data) + return XDP_DROP; + + /* The Linux networking stack may send other packets on the test + * interface that interfere with the test. Just drop them. + * The test packets can be recognized by their ethertype of zero. + */ + if (eth->h_proto != 0) return XDP_DROP; - __builtin_memcpy(data_meta, data, ETH_ALEN); + __builtin_memcpy(data_meta, payload, META_SIZE); return XDP_PASS; } diff --git a/tools/testing/selftests/bpf/progs/test_xdp_redirect.c b/tools/testing/selftests/bpf/progs/test_xdp_redirect.c deleted file mode 100644 index b778cad45485..000000000000 --- a/tools/testing/selftests/bpf/progs/test_xdp_redirect.c +++ /dev/null @@ -1,26 +0,0 @@ -/* Copyright (c) 2017 VMware - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -#include <linux/bpf.h> -#include <bpf/bpf_helpers.h> - -SEC("redirect_to_111") -int xdp_redirect_to_111(struct xdp_md *xdp) -{ - return bpf_redirect(111, 0); -} -SEC("redirect_to_222") -int xdp_redirect_to_222(struct xdp_md *xdp) -{ - return bpf_redirect(222, 0); -} - -char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_xdp_vlan.c b/tools/testing/selftests/bpf/progs/test_xdp_vlan.c index a7588302268d..a80cc5f2f4f2 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_vlan.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_vlan.c @@ -102,8 +102,8 @@ bool parse_eth_frame(struct ethhdr *eth, void *data_end, struct parse_pkt *pkt) #define TESTVLAN 4011 /* 0xFAB */ // #define TO_VLAN 4000 /* 0xFA0 (hint 0xOA0 = 160) */ -SEC("xdp_drop_vlan_4011") -int xdp_prognum0(struct xdp_md *ctx) +SEC("xdp") +int xdp_drop_vlan_4011(struct xdp_md *ctx) { void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; @@ -144,8 +144,8 @@ Load prog with ip tool: /* Changing VLAN to zero, have same practical effect as removing the VLAN. */ #define TO_VLAN 0 -SEC("xdp_vlan_change") -int xdp_prognum1(struct xdp_md *ctx) +SEC("xdp") +int xdp_vlan_change(struct xdp_md *ctx) { void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; @@ -178,8 +178,8 @@ int xdp_prognum1(struct xdp_md *ctx) #endif #define VLAN_HDR_SZ 4 /* bytes */ -SEC("xdp_vlan_remove_outer") -int xdp_prognum2(struct xdp_md *ctx) +SEC("xdp") +int xdp_vlan_remove_outer(struct xdp_md *ctx) { void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; @@ -224,8 +224,8 @@ void shift_mac_4bytes_32bit(void *data) p[1] = p[0]; } -SEC("xdp_vlan_remove_outer2") -int xdp_prognum3(struct xdp_md *ctx) +SEC("xdp") +int xdp_vlan_remove_outer2(struct xdp_md *ctx) { void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; @@ -254,8 +254,8 @@ int xdp_prognum3(struct xdp_md *ctx) * The TC-clsact eBPF programs (currently) need to be attach via TC commands */ -SEC("tc_vlan_push") -int _tc_progA(struct __sk_buff *ctx) +SEC("tc") +int tc_vlan_push(struct __sk_buff *ctx) { bpf_skb_vlan_push(ctx, bpf_htons(ETH_P_8021Q), TESTVLAN); diff --git a/tools/testing/selftests/bpf/progs/tracing_failure.c b/tools/testing/selftests/bpf/progs/tracing_failure.c index d41665d2ec8c..65e485c4468c 100644 --- a/tools/testing/selftests/bpf/progs/tracing_failure.c +++ b/tools/testing/selftests/bpf/progs/tracing_failure.c @@ -18,3 +18,15 @@ int BPF_PROG(test_spin_unlock, struct bpf_spin_lock *lock) { return 0; } + +SEC("?fentry/__rcu_read_lock") +int BPF_PROG(tracing_deny) +{ + return 0; +} + +SEC("?fexit/do_exit") +int BPF_PROG(fexit_noreturns) +{ + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/uninit_stack.c b/tools/testing/selftests/bpf/progs/uninit_stack.c index 8a403470e557..046a204c8fc6 100644 --- a/tools/testing/selftests/bpf/progs/uninit_stack.c +++ b/tools/testing/selftests/bpf/progs/uninit_stack.c @@ -70,7 +70,8 @@ __naked int helper_uninit_to_misc(void *ctx) r1 = r10; \ r1 += -128; \ r2 = 32; \ - call %[bpf_trace_printk]; \ + r3 = 0; \ + call %[bpf_probe_read_user]; \ /* Call to dummy() forces print_verifier_state(..., true), \ * thus showing the stack state, matched by __msg(). \ */ \ @@ -79,7 +80,7 @@ __naked int helper_uninit_to_misc(void *ctx) exit; \ " : - : __imm(bpf_trace_printk), + : __imm(bpf_probe_read_user), __imm(dummy) : __clobber_all); } diff --git a/tools/testing/selftests/bpf/progs/unsupported_ops.c b/tools/testing/selftests/bpf/progs/unsupported_ops.c index 9180365a3568..8aa2e0dd624e 100644 --- a/tools/testing/selftests/bpf/progs/unsupported_ops.c +++ b/tools/testing/selftests/bpf/progs/unsupported_ops.c @@ -4,7 +4,7 @@ #include <vmlinux.h> #include <bpf/bpf_tracing.h> #include "bpf_misc.h" -#include "../bpf_testmod/bpf_testmod.h" +#include "../test_kmods/bpf_testmod.h" char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_and.c b/tools/testing/selftests/bpf/progs/verifier_and.c index e97e518516b6..2b4fdca162be 100644 --- a/tools/testing/selftests/bpf/progs/verifier_and.c +++ b/tools/testing/selftests/bpf/progs/verifier_and.c @@ -85,8 +85,14 @@ l0_%=: r0 = r0; \ SEC("socket") __description("check known subreg with unknown reg") -__success __failure_unpriv __msg_unpriv("R1 !read_ok") +__success __success_unpriv __retval(0) +#ifdef SPEC_V1 +__xlated_unpriv("if w0 < 0x1 goto pc+2") +__xlated_unpriv("nospec") /* inserted to prevent `R1 !read_ok'` */ +__xlated_unpriv("goto pc-1") /* `r1 = *(u32*)(r1 + 512)`, sanitized dead code */ +__xlated_unpriv("r0 = 0") +#endif __naked void known_subreg_with_unknown_reg(void) { asm volatile (" \ diff --git a/tools/testing/selftests/bpf/progs/verifier_arena.c b/tools/testing/selftests/bpf/progs/verifier_arena.c index 67509c5d3982..7f4827eede3c 100644 --- a/tools/testing/selftests/bpf/progs/verifier_arena.c +++ b/tools/testing/selftests/bpf/progs/verifier_arena.c @@ -3,6 +3,7 @@ #define BPF_NO_KFUNC_PROTOTYPES #include <vmlinux.h> +#include <errno.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> #include "bpf_misc.h" @@ -114,6 +115,111 @@ int basic_alloc3(void *ctx) return 0; } +SEC("syscall") +__success __retval(0) +int basic_reserve1(void *ctx) +{ +#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) + char __arena *page; + int ret; + + page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); + if (!page) + return 1; + + page += __PAGE_SIZE; + + /* Reserve the second page */ + ret = bpf_arena_reserve_pages(&arena, page, 1); + if (ret) + return 2; + + /* Try to explicitly allocate the reserved page. */ + page = bpf_arena_alloc_pages(&arena, page, 1, NUMA_NO_NODE, 0); + if (page) + return 3; + + /* Try to implicitly allocate the page (since there's only 2 of them). */ + page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); + if (page) + return 4; +#endif + return 0; +} + +SEC("syscall") +__success __retval(0) +int basic_reserve2(void *ctx) +{ +#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) + char __arena *page; + int ret; + + page = arena_base(&arena); + ret = bpf_arena_reserve_pages(&arena, page, 1); + if (ret) + return 1; + + page = bpf_arena_alloc_pages(&arena, page, 1, NUMA_NO_NODE, 0); + if ((u64)page) + return 2; +#endif + return 0; +} + +/* Reserve the same page twice, should return -EBUSY. */ +SEC("syscall") +__success __retval(0) +int reserve_twice(void *ctx) +{ +#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) + char __arena *page; + int ret; + + page = arena_base(&arena); + + ret = bpf_arena_reserve_pages(&arena, page, 1); + if (ret) + return 1; + + ret = bpf_arena_reserve_pages(&arena, page, 1); + if (ret != -EBUSY) + return 2; +#endif + return 0; +} + +/* Try to reserve past the end of the arena. */ +SEC("syscall") +__success __retval(0) +int reserve_invalid_region(void *ctx) +{ +#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) + char __arena *page; + int ret; + + /* Try a NULL pointer. */ + ret = bpf_arena_reserve_pages(&arena, NULL, 3); + if (ret != -EINVAL) + return 1; + + page = arena_base(&arena); + + ret = bpf_arena_reserve_pages(&arena, page, 3); + if (ret != -EINVAL) + return 2; + + ret = bpf_arena_reserve_pages(&arena, page, 4096); + if (ret != -EINVAL) + return 3; + + ret = bpf_arena_reserve_pages(&arena, page, (1ULL << 32) - 1); + if (ret != -EINVAL) + return 4; +#endif + return 0; +} + SEC("iter.s/bpf_map") __success __log_level(2) int iter_maps1(struct bpf_iter__bpf_map *ctx) diff --git a/tools/testing/selftests/bpf/progs/verifier_arena_large.c b/tools/testing/selftests/bpf/progs/verifier_arena_large.c index f94f30cf1bb8..9dbdf123542d 100644 --- a/tools/testing/selftests/bpf/progs/verifier_arena_large.c +++ b/tools/testing/selftests/bpf/progs/verifier_arena_large.c @@ -67,6 +67,104 @@ int big_alloc1(void *ctx) return 0; } +/* Try to access a reserved page. Behavior should be identical with accessing unallocated pages. */ +SEC("syscall") +__success __retval(0) +int access_reserved(void *ctx) +{ +#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) + volatile char __arena *page; + char __arena *base; + const size_t len = 4; + int ret, i; + + /* Get a separate region of the arena. */ + page = base = arena_base(&arena) + 16384 * PAGE_SIZE; + + ret = bpf_arena_reserve_pages(&arena, base, len); + if (ret) + return 1; + + /* Try to dirty reserved memory. */ + for (i = 0; i < len && can_loop; i++) + *page = 0x5a; + + for (i = 0; i < len && can_loop; i++) { + page = (volatile char __arena *)(base + i * PAGE_SIZE); + + /* + * Error out in case either the write went through, + * or the address has random garbage. + */ + if (*page == 0x5a) + return 2 + 2 * i; + + if (*page) + return 2 + 2 * i + 1; + } +#endif + return 0; +} + +/* Try to allocate a region overlapping with a reservation. */ +SEC("syscall") +__success __retval(0) +int request_partially_reserved(void *ctx) +{ +#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) + volatile char __arena *page; + char __arena *base; + int ret; + + /* Add an arbitrary page offset. */ + page = base = arena_base(&arena) + 4096 * __PAGE_SIZE; + + ret = bpf_arena_reserve_pages(&arena, base + 3 * __PAGE_SIZE, 4); + if (ret) + return 1; + + page = bpf_arena_alloc_pages(&arena, base, 5, NUMA_NO_NODE, 0); + if ((u64)page != 0ULL) + return 2; +#endif + return 0; +} + +SEC("syscall") +__success __retval(0) +int free_reserved(void *ctx) +{ +#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) + char __arena *addr; + char __arena *page; + int ret; + + /* Add an arbitrary page offset. */ + addr = arena_base(&arena) + 32768 * __PAGE_SIZE; + + page = bpf_arena_alloc_pages(&arena, addr, 2, NUMA_NO_NODE, 0); + if (!page) + return 1; + + ret = bpf_arena_reserve_pages(&arena, addr + 2 * __PAGE_SIZE, 2); + if (ret) + return 2; + + /* + * Reserved and allocated pages should be interchangeable for + * bpf_arena_free_pages(). Free a reserved and an allocated + * page with a single call. + */ + bpf_arena_free_pages(&arena, addr + __PAGE_SIZE , 2); + + /* The free call above should have succeeded, so this allocation should too. */ + page = bpf_arena_alloc_pages(&arena, addr + __PAGE_SIZE, 2, NUMA_NO_NODE, 0); + if (!page) + return 3; +#endif + return 0; +} + #if defined(__BPF_FEATURE_ADDR_SPACE_CAST) #define PAGE_CNT 100 __u8 __arena * __arena page[PAGE_CNT]; /* occupies the first page */ diff --git a/tools/testing/selftests/bpf/progs/verifier_array_access.c b/tools/testing/selftests/bpf/progs/verifier_array_access.c index 4195aa824ba5..0a187ff725cc 100644 --- a/tools/testing/selftests/bpf/progs/verifier_array_access.c +++ b/tools/testing/selftests/bpf/progs/verifier_array_access.c @@ -29,6 +29,20 @@ struct { } map_array_wo SEC(".maps"); struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(max_entries, 2); + __type(key, __u32); + __type(value, struct test_val); +} map_array_pcpu SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 2); + __type(key, __u32); + __type(value, struct test_val); +} map_array SEC(".maps"); + +struct { __uint(type, BPF_MAP_TYPE_HASH); __uint(max_entries, 1); __type(key, long long); @@ -525,4 +539,193 @@ l0_%=: exit; \ : __clobber_all); } +SEC("socket") +__description("valid map access into an array using constant without nullness") +__success __retval(4) __log_level(2) +__msg("mark_precise: frame0: regs= stack=-8 before {{[0-9]}}: ({{[a-f0-9]+}}) *(u32 *)(r10 -8) = {{(1|r[0-9])}}") +unsigned int an_array_with_a_constant_no_nullness(void) +{ + /* Need 8-byte alignment for spill tracking */ + __u32 __attribute__((aligned(8))) key = 1; + struct test_val *val; + + val = bpf_map_lookup_elem(&map_array, &key); + val->index = offsetof(struct test_val, foo); + + return val->index; +} + +SEC("socket") +__description("valid multiple map access into an array using constant without nullness") +__success __retval(8) __log_level(2) +__msg("mark_precise: frame0: regs= stack=-8 before {{[0-9]}}: ({{[a-f0-9]+}}) *(u32 *)(r10 -16) = {{(0|r[0-9])}}") +__msg("mark_precise: frame0: regs= stack=-8 before {{[0-9]}}: ({{[a-f0-9]+}}) *(u32 *)(r10 -8) = {{(1|r[0-9])}}") +unsigned int multiple_array_with_a_constant_no_nullness(void) +{ + __u32 __attribute__((aligned(8))) key = 1; + __u32 __attribute__((aligned(8))) key2 = 0; + struct test_val *val, *val2; + + val = bpf_map_lookup_elem(&map_array, &key); + val->index = offsetof(struct test_val, foo); + + val2 = bpf_map_lookup_elem(&map_array, &key2); + val2->index = offsetof(struct test_val, foo); + + return val->index + val2->index; +} + +SEC("socket") +__description("valid map access into an array using natural aligned 32-bit constant 0 without nullness") +__success __retval(4) +unsigned int an_array_with_a_32bit_constant_0_no_nullness(void) +{ + /* Unlike the above tests, 32-bit zeroing is precisely tracked even + * if writes are not aligned to BPF_REG_SIZE. This tests that our + * STACK_ZERO handling functions. + */ + struct test_val *val; + __u32 key = 0; + + val = bpf_map_lookup_elem(&map_array, &key); + val->index = offsetof(struct test_val, foo); + + return val->index; +} + +SEC("socket") +__description("valid map access into a pcpu array using constant without nullness") +__success __retval(4) __log_level(2) +__msg("mark_precise: frame0: regs= stack=-8 before {{[0-9]}}: ({{[a-f0-9]+}}) *(u32 *)(r10 -8) = {{(1|r[0-9])}}") +unsigned int a_pcpu_array_with_a_constant_no_nullness(void) +{ + __u32 __attribute__((aligned(8))) key = 1; + struct test_val *val; + + val = bpf_map_lookup_elem(&map_array_pcpu, &key); + val->index = offsetof(struct test_val, foo); + + return val->index; +} + +SEC("socket") +__description("invalid map access into an array using constant without nullness") +__failure __msg("R0 invalid mem access 'map_value_or_null'") +unsigned int an_array_with_a_constant_no_nullness_out_of_bounds(void) +{ + /* Out of bounds */ + __u32 __attribute__((aligned(8))) key = 3; + struct test_val *val; + + val = bpf_map_lookup_elem(&map_array, &key); + val->index = offsetof(struct test_val, foo); + + return val->index; +} + +SEC("socket") +__description("invalid map access into an array using constant smaller than key_size") +__failure __msg("R0 invalid mem access 'map_value_or_null'") +unsigned int an_array_with_a_constant_too_small(void) +{ + __u32 __attribute__((aligned(8))) key; + struct test_val *val; + + /* Mark entire key as STACK_MISC */ + bpf_probe_read_user(&key, sizeof(key), NULL); + + /* Spilling only the bottom byte results in a tnum const of 1. + * We want to check that the verifier rejects it, as the spill is < 4B. + */ + *(__u8 *)&key = 1; + val = bpf_map_lookup_elem(&map_array, &key); + + /* Should fail, as verifier cannot prove in-bound lookup */ + val->index = offsetof(struct test_val, foo); + + return val->index; +} + +SEC("socket") +__description("invalid map access into an array using constant larger than key_size") +__failure __msg("R0 invalid mem access 'map_value_or_null'") +unsigned int an_array_with_a_constant_too_big(void) +{ + struct test_val *val; + __u64 key = 1; + + /* Even if the constant value is < max_entries, if the spill size is + * larger than the key size, the set bits may not be where we expect them + * to be on different endian architectures. + */ + val = bpf_map_lookup_elem(&map_array, &key); + val->index = offsetof(struct test_val, foo); + + return val->index; +} + +SEC("socket") +__description("invalid elided lookup using const and non-const key") +__failure __msg("R0 invalid mem access 'map_value_or_null'") +unsigned int mixed_const_and_non_const_key_lookup(void) +{ + __u32 __attribute__((aligned(8))) key; + struct test_val *val; + __u32 rand; + + rand = bpf_get_prandom_u32(); + key = rand > 42 ? 1 : rand; + val = bpf_map_lookup_elem(&map_array, &key); + + return val->index; +} + +SEC("socket") +__failure __msg("invalid read from stack R2 off=4096 size=4") +__naked void key_lookup_at_invalid_fp(void) +{ + asm volatile (" \ + r1 = %[map_array] ll; \ + r2 = r10; \ + r2 += 4096; \ + call %[bpf_map_lookup_elem]; \ + r0 = *(u64*)(r0 + 0); \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array) + : __clobber_all); +} + +volatile __u32 __attribute__((aligned(8))) global_key; + +SEC("socket") +__description("invalid elided lookup using non-stack key") +__failure __msg("R0 invalid mem access 'map_value_or_null'") +unsigned int non_stack_key_lookup(void) +{ + struct test_val *val; + + global_key = 1; + val = bpf_map_lookup_elem(&map_array, (void *)&global_key); + val->index = offsetof(struct test_val, foo); + + return val->index; +} + +SEC("socket") +__description("doesn't reject UINT64_MAX as s64 for irrelevant maps") +__success __retval(42) +unsigned int doesnt_reject_irrelevant_maps(void) +{ + __u64 key = 0xFFFFFFFFFFFFFFFF; + struct test_val *val; + + val = bpf_map_lookup_elem(&map_hash_48b, &key); + if (val) + return val->index; + + return 42; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_basic_stack.c b/tools/testing/selftests/bpf/progs/verifier_basic_stack.c index 8d77cc5323d3..fb62e09f2114 100644 --- a/tools/testing/selftests/bpf/progs/verifier_basic_stack.c +++ b/tools/testing/selftests/bpf/progs/verifier_basic_stack.c @@ -28,7 +28,7 @@ __naked void stack_out_of_bounds(void) SEC("socket") __description("uninitialized stack1") __success __log_level(4) __msg("stack depth 8") -__failure_unpriv __msg_unpriv("invalid indirect read from stack") +__failure_unpriv __msg_unpriv("invalid read from stack") __naked void uninitialized_stack1(void) { asm volatile (" \ diff --git a/tools/testing/selftests/bpf/progs/verifier_bounds.c b/tools/testing/selftests/bpf/progs/verifier_bounds.c index a0bb7fb40ea5..87a2c60d86e6 100644 --- a/tools/testing/selftests/bpf/progs/verifier_bounds.c +++ b/tools/testing/selftests/bpf/progs/verifier_bounds.c @@ -2,6 +2,7 @@ /* Converted from tools/testing/selftests/bpf/verifier/bounds.c */ #include <linux/bpf.h> +#include <../../../include/linux/filter.h> #include <bpf/bpf_helpers.h> #include "bpf_misc.h" @@ -620,8 +621,14 @@ l1_%=: exit; \ SEC("socket") __description("bounds check mixed 32bit and 64bit arithmetic. test1") -__success __failure_unpriv __msg_unpriv("R0 invalid mem access 'scalar'") +__success __success_unpriv __retval(0) +#ifdef SPEC_V1 +__xlated_unpriv("goto pc+2") +__xlated_unpriv("nospec") /* inserted to prevent `R0 invalid mem access 'scalar'` */ +__xlated_unpriv("goto pc-1") /* sanitized dead code */ +__xlated_unpriv("exit") +#endif __naked void _32bit_and_64bit_arithmetic_test1(void) { asm volatile (" \ @@ -643,8 +650,14 @@ l1_%=: exit; \ SEC("socket") __description("bounds check mixed 32bit and 64bit arithmetic. test2") -__success __failure_unpriv __msg_unpriv("R0 invalid mem access 'scalar'") +__success __success_unpriv __retval(0) +#ifdef SPEC_V1 +__xlated_unpriv("goto pc+2") +__xlated_unpriv("nospec") /* inserted to prevent `R0 invalid mem access 'scalar'` */ +__xlated_unpriv("goto pc-1") /* sanitized dead code */ +__xlated_unpriv("exit") +#endif __naked void _32bit_and_64bit_arithmetic_test2(void) { asm volatile (" \ @@ -691,9 +704,14 @@ l0_%=: r0 = 0; \ SEC("socket") __description("bounds check for reg = 0, reg xor 1") -__success __failure_unpriv -__msg_unpriv("R0 min value is outside of the allowed memory range") +__success __success_unpriv __retval(0) +#ifdef SPEC_V1 +__xlated_unpriv("if r1 != 0x0 goto pc+2") +__xlated_unpriv("nospec") /* inserted to prevent `R0 min value is outside of the allowed memory range` */ +__xlated_unpriv("goto pc-1") /* sanitized dead code */ +__xlated_unpriv("r0 = 0") +#endif __naked void reg_0_reg_xor_1(void) { asm volatile (" \ @@ -719,9 +737,14 @@ l1_%=: r0 = 0; \ SEC("socket") __description("bounds check for reg32 = 0, reg32 xor 1") -__success __failure_unpriv -__msg_unpriv("R0 min value is outside of the allowed memory range") +__success __success_unpriv __retval(0) +#ifdef SPEC_V1 +__xlated_unpriv("if w1 != 0x0 goto pc+2") +__xlated_unpriv("nospec") /* inserted to prevent `R0 min value is outside of the allowed memory range` */ +__xlated_unpriv("goto pc-1") /* sanitized dead code */ +__xlated_unpriv("r0 = 0") +#endif __naked void reg32_0_reg32_xor_1(void) { asm volatile (" \ @@ -747,9 +770,14 @@ l1_%=: r0 = 0; \ SEC("socket") __description("bounds check for reg = 2, reg xor 3") -__success __failure_unpriv -__msg_unpriv("R0 min value is outside of the allowed memory range") +__success __success_unpriv __retval(0) +#ifdef SPEC_V1 +__xlated_unpriv("if r1 > 0x0 goto pc+2") +__xlated_unpriv("nospec") /* inserted to prevent `R0 min value is outside of the allowed memory range` */ +__xlated_unpriv("goto pc-1") /* sanitized dead code */ +__xlated_unpriv("r0 = 0") +#endif __naked void reg_2_reg_xor_3(void) { asm volatile (" \ @@ -829,9 +857,14 @@ l1_%=: r0 = 0; \ SEC("socket") __description("bounds check for reg > 0, reg xor 3") -__success __failure_unpriv -__msg_unpriv("R0 min value is outside of the allowed memory range") +__success __success_unpriv __retval(0) +#ifdef SPEC_V1 +__xlated_unpriv("if r1 >= 0x0 goto pc+2") +__xlated_unpriv("nospec") /* inserted to prevent `R0 min value is outside of the allowed memory range` */ +__xlated_unpriv("goto pc-1") /* sanitized dead code */ +__xlated_unpriv("r0 = 0") +#endif __naked void reg_0_reg_xor_3(void) { asm volatile (" \ @@ -858,9 +891,14 @@ l1_%=: r0 = 0; \ SEC("socket") __description("bounds check for reg32 > 0, reg32 xor 3") -__success __failure_unpriv -__msg_unpriv("R0 min value is outside of the allowed memory range") +__success __success_unpriv __retval(0) +#ifdef SPEC_V1 +__xlated_unpriv("if w1 >= 0x0 goto pc+2") +__xlated_unpriv("nospec") /* inserted to prevent `R0 min value is outside of the allowed memory range` */ +__xlated_unpriv("goto pc-1") /* sanitized dead code */ +__xlated_unpriv("r0 = 0") +#endif __naked void reg32_0_reg32_xor_3(void) { asm volatile (" \ @@ -1028,7 +1066,7 @@ l0_%=: r0 = 0; \ SEC("xdp") __description("bound check with JMP_JSLT for crossing 64-bit signed boundary") __success __retval(0) -__flag(!BPF_F_TEST_REG_INVARIANTS) /* known invariants violation */ +__flag(BPF_F_TEST_REG_INVARIANTS) __naked void crossing_64_bit_signed_boundary_2(void) { asm volatile (" \ @@ -1200,4 +1238,434 @@ l0_%=: r0 = 0; \ : __clobber_all); } +SEC("tc") +__description("multiply mixed sign bounds. test 1") +__success __log_level(2) +__msg("r6 *= r7 {{.*}}; R6_w=scalar(smin=umin=0x1bc16d5cd4927ee1,smax=umax=0x1bc16d674ec80000,smax32=0x7ffffeff,umax32=0xfffffeff,var_off=(0x1bc16d4000000000; 0x3ffffffeff))") +__naked void mult_mixed0_sign(void) +{ + asm volatile ( + "call %[bpf_get_prandom_u32];" + "r6 = r0;" + "call %[bpf_get_prandom_u32];" + "r7 = r0;" + "r6 &= 0xf;" + "r6 -= 1000000000;" + "r7 &= 0xf;" + "r7 -= 2000000000;" + "r6 *= r7;" + "exit" + : + : __imm(bpf_get_prandom_u32), + __imm(bpf_skb_store_bytes) + : __clobber_all); +} + +SEC("tc") +__description("multiply mixed sign bounds. test 2") +__success __log_level(2) +__msg("r6 *= r7 {{.*}}; R6_w=scalar(smin=smin32=-100,smax=smax32=200)") +__naked void mult_mixed1_sign(void) +{ + asm volatile ( + "call %[bpf_get_prandom_u32];" + "r6 = r0;" + "call %[bpf_get_prandom_u32];" + "r7 = r0;" + "r6 &= 0xf;" + "r6 -= 0xa;" + "r7 &= 0xf;" + "r7 -= 0x14;" + "r6 *= r7;" + "exit" + : + : __imm(bpf_get_prandom_u32), + __imm(bpf_skb_store_bytes) + : __clobber_all); +} + +SEC("tc") +__description("multiply negative bounds") +__success __log_level(2) +__msg("r6 *= r7 {{.*}}; R6_w=scalar(smin=umin=smin32=umin32=0x3ff280b0,smax=umax=smax32=umax32=0x3fff0001,var_off=(0x3ff00000; 0xf81ff))") +__naked void mult_sign_bounds(void) +{ + asm volatile ( + "r8 = 0x7fff;" + "call %[bpf_get_prandom_u32];" + "r6 = r0;" + "call %[bpf_get_prandom_u32];" + "r7 = r0;" + "r6 &= 0xa;" + "r6 -= r8;" + "r7 &= 0xf;" + "r7 -= r8;" + "r6 *= r7;" + "exit" + : + : __imm(bpf_get_prandom_u32), + __imm(bpf_skb_store_bytes) + : __clobber_all); +} + +SEC("tc") +__description("multiply bounds that don't cross signed boundary") +__success __log_level(2) +__msg("r8 *= r6 {{.*}}; R6_w=scalar(smin=smin32=0,smax=umax=smax32=umax32=11,var_off=(0x0; 0xb)) R8_w=scalar(smin=0,smax=umax=0x7b96bb0a94a3a7cd,var_off=(0x0; 0x7fffffffffffffff))") +__naked void mult_no_sign_crossing(void) +{ + asm volatile ( + "r6 = 0xb;" + "r8 = 0xb3c3f8c99262687 ll;" + "call %[bpf_get_prandom_u32];" + "r7 = r0;" + "r6 &= r7;" + "r8 *= r6;" + "exit" + : + : __imm(bpf_get_prandom_u32), + __imm(bpf_skb_store_bytes) + : __clobber_all); +} + +SEC("tc") +__description("multiplication overflow, result in unbounded reg. test 1") +__success __log_level(2) +__msg("r6 *= r7 {{.*}}; R6_w=scalar()") +__naked void mult_unsign_ovf(void) +{ + asm volatile ( + "r8 = 0x7ffffffffff ll;" + "call %[bpf_get_prandom_u32];" + "r6 = r0;" + "call %[bpf_get_prandom_u32];" + "r7 = r0;" + "r6 &= 0x7fffffff;" + "r7 &= r8;" + "r6 *= r7;" + "exit" + : + : __imm(bpf_get_prandom_u32), + __imm(bpf_skb_store_bytes) + : __clobber_all); +} + +SEC("tc") +__description("multiplication overflow, result in unbounded reg. test 2") +__success __log_level(2) +__msg("r6 *= r7 {{.*}}; R6_w=scalar()") +__naked void mult_sign_ovf(void) +{ + asm volatile ( + "r8 = 0x7ffffffff ll;" + "call %[bpf_get_prandom_u32];" + "r6 = r0;" + "call %[bpf_get_prandom_u32];" + "r7 = r0;" + "r6 &= 0xa;" + "r6 -= r8;" + "r7 &= 0x7fffffff;" + "r6 *= r7;" + "exit" + : + : __imm(bpf_get_prandom_u32), + __imm(bpf_skb_store_bytes) + : __clobber_all); +} + +SEC("socket") +__description("64-bit addition, all outcomes overflow") +__success __log_level(2) +__msg("5: (0f) r3 += r3 {{.*}} R3_w=scalar(umin=0x4000000000000000,umax=0xfffffffffffffffe)") +__retval(0) +__naked void add64_full_overflow(void) +{ + asm volatile ( + "call %[bpf_get_prandom_u32];" + "r4 = r0;" + "r3 = 0xa000000000000000 ll;" + "r3 |= r4;" + "r3 += r3;" + "r0 = 0;" + "exit" + : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("64-bit addition, partial overflow, result in unbounded reg") +__success __log_level(2) +__msg("4: (0f) r3 += r3 {{.*}} R3_w=scalar()") +__retval(0) +__naked void add64_partial_overflow(void) +{ + asm volatile ( + "call %[bpf_get_prandom_u32];" + "r4 = r0;" + "r3 = 2;" + "r3 |= r4;" + "r3 += r3;" + "r0 = 0;" + "exit" + : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("32-bit addition overflow, all outcomes overflow") +__success __log_level(2) +__msg("4: (0c) w3 += w3 {{.*}} R3_w=scalar(smin=umin=umin32=0x40000000,smax=umax=umax32=0xfffffffe,var_off=(0x0; 0xffffffff))") +__retval(0) +__naked void add32_full_overflow(void) +{ + asm volatile ( + "call %[bpf_get_prandom_u32];" + "w4 = w0;" + "w3 = 0xa0000000;" + "w3 |= w4;" + "w3 += w3;" + "r0 = 0;" + "exit" + : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("32-bit addition, partial overflow, result in unbounded u32 bounds") +__success __log_level(2) +__msg("4: (0c) w3 += w3 {{.*}} R3_w=scalar(smin=0,smax=umax=0xffffffff,var_off=(0x0; 0xffffffff))") +__retval(0) +__naked void add32_partial_overflow(void) +{ + asm volatile ( + "call %[bpf_get_prandom_u32];" + "w4 = w0;" + "w3 = 2;" + "w3 |= w4;" + "w3 += w3;" + "r0 = 0;" + "exit" + : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("64-bit subtraction, all outcomes underflow") +__success __log_level(2) +__msg("6: (1f) r3 -= r1 {{.*}} R3_w=scalar(umin=1,umax=0x8000000000000000)") +__retval(0) +__naked void sub64_full_overflow(void) +{ + asm volatile ( + "call %[bpf_get_prandom_u32];" + "r1 = r0;" + "r2 = 0x8000000000000000 ll;" + "r1 |= r2;" + "r3 = 0;" + "r3 -= r1;" + "r0 = 0;" + "exit" + : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("64-bit subtraction, partial overflow, result in unbounded reg") +__success __log_level(2) +__msg("3: (1f) r3 -= r2 {{.*}} R3_w=scalar()") +__retval(0) +__naked void sub64_partial_overflow(void) +{ + asm volatile ( + "call %[bpf_get_prandom_u32];" + "r3 = r0;" + "r2 = 1;" + "r3 -= r2;" + "r0 = 0;" + "exit" + : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("32-bit subtraction overflow, all outcomes underflow") +__success __log_level(2) +__msg("5: (1c) w3 -= w1 {{.*}} R3_w=scalar(smin=umin=umin32=1,smax=umax=umax32=0x80000000,var_off=(0x0; 0xffffffff))") +__retval(0) +__naked void sub32_full_overflow(void) +{ + asm volatile ( + "call %[bpf_get_prandom_u32];" + "w1 = w0;" + "w2 = 0x80000000;" + "w1 |= w2;" + "w3 = 0;" + "w3 -= w1;" + "r0 = 0;" + "exit" + : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("32-bit subtraction, partial overflow, result in unbounded u32 bounds") +__success __log_level(2) +__msg("3: (1c) w3 -= w2 {{.*}} R3_w=scalar(smin=0,smax=umax=0xffffffff,var_off=(0x0; 0xffffffff))") +__retval(0) +__naked void sub32_partial_overflow(void) +{ + asm volatile ( + "call %[bpf_get_prandom_u32];" + "w3 = w0;" + "w2 = 1;" + "w3 -= w2;" + "r0 = 0;" + "exit" + : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("dead branch on jset, does not result in invariants violation error") +__success __log_level(2) +__retval(0) __flag(BPF_F_TEST_REG_INVARIANTS) +__naked void jset_range_analysis(void) +{ + asm volatile (" \ + call %[bpf_get_netns_cookie]; \ + if r0 == 0 goto l0_%=; \ + if r0 & 0xffffffff goto +0; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_netns_cookie) + : __clobber_all); +} + +/* This test covers the bounds deduction on 64bits when the s64 and u64 ranges + * overlap on the negative side. At instruction 7, the ranges look as follows: + * + * 0 umin=0xfffffcf1 umax=0xff..ff6e U64_MAX + * | [xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx] | + * |----------------------------|------------------------------| + * |xxxxxxxxxx] [xxxxxxxxxxxx| + * 0 smax=0xeffffeee smin=-655 -1 + * + * We should therefore deduce the following new bounds: + * + * 0 u64=[0xff..ffd71;0xff..ff6e] U64_MAX + * | [xxx] | + * |----------------------------|------------------------------| + * | [xxx] | + * 0 s64=[-655;-146] -1 + * + * Without the deduction cross sign boundary, we end up with an invariant + * violation error. + */ +SEC("socket") +__description("bounds deduction cross sign boundary, negative overlap") +__success __log_level(2) __flag(BPF_F_TEST_REG_INVARIANTS) +__msg("7: (1f) r0 -= r6 {{.*}} R0=scalar(smin=smin32=-655,smax=smax32=-146,umin=0xfffffffffffffd71,umax=0xffffffffffffff6e,umin32=0xfffffd71,umax32=0xffffff6e,var_off=(0xfffffffffffffc00; 0x3ff))") +__retval(0) +__naked void bounds_deduct_negative_overlap(void) +{ + asm volatile(" \ + call %[bpf_get_prandom_u32]; \ + w3 = w0; \ + w6 = (s8)w0; \ + r0 = (s8)r0; \ + if w6 >= 0xf0000000 goto l0_%=; \ + r0 += r6; \ + r6 += 400; \ + r0 -= r6; \ + if r3 < r0 goto l0_%=; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +/* This test covers the bounds deduction on 64bits when the s64 and u64 ranges + * overlap on the positive side. At instruction 3, the ranges look as follows: + * + * 0 umin=0 umax=0xffffffffffffff00 U64_MAX + * [xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx] | + * |----------------------------|------------------------------| + * |xxxxxxxx] [xxxxxxxx| + * 0 smax=127 smin=-128 -1 + * + * We should therefore deduce the following new bounds: + * + * 0 u64=[0;127] U64_MAX + * [xxxxxxxx] | + * |----------------------------|------------------------------| + * [xxxxxxxx] | + * 0 s64=[0;127] -1 + * + * Without the deduction cross sign boundary, the program is rejected due to + * the frame pointer write. + */ +SEC("socket") +__description("bounds deduction cross sign boundary, positive overlap") +__success __log_level(2) __flag(BPF_F_TEST_REG_INVARIANTS) +__msg("3: (2d) if r0 > r1 {{.*}} R0_w=scalar(smin=smin32=0,smax=umax=smax32=umax32=127,var_off=(0x0; 0x7f))") +__retval(0) +__naked void bounds_deduct_positive_overlap(void) +{ + asm volatile(" \ + call %[bpf_get_prandom_u32]; \ + r0 = (s8)r0; \ + r1 = 0xffffffffffffff00; \ + if r0 > r1 goto l0_%=; \ + if r0 < 128 goto l0_%=; \ + r10 = 0; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +/* This test is the same as above, but the s64 and u64 ranges overlap in two + * places. At instruction 3, the ranges look as follows: + * + * 0 umin=0 umax=0xffffffffffffff80 U64_MAX + * [xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx] | + * |----------------------------|------------------------------| + * |xxxxxxxx] [xxxxxxxx| + * 0 smax=127 smin=-128 -1 + * + * 0xffffffffffffff80 = (u64)-128. We therefore can't deduce anything new and + * the program should fail due to the frame pointer write. + */ +SEC("socket") +__description("bounds deduction cross sign boundary, two overlaps") +__failure __flag(BPF_F_TEST_REG_INVARIANTS) +__msg("3: (2d) if r0 > r1 {{.*}} R0_w=scalar(smin=smin32=-128,smax=smax32=127,umax=0xffffffffffffff80)") +__msg("frame pointer is read only") +__naked void bounds_deduct_two_overlaps(void) +{ + asm volatile(" \ + call %[bpf_get_prandom_u32]; \ + r0 = (s8)r0; \ + r1 = 0xffffffffffffff80; \ + if r0 > r1 goto l0_%=; \ + if r0 < 128 goto l0_%=; \ + r10 = 0; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_bounds_deduction.c b/tools/testing/selftests/bpf/progs/verifier_bounds_deduction.c index c506afbdd936..260a6df264e3 100644 --- a/tools/testing/selftests/bpf/progs/verifier_bounds_deduction.c +++ b/tools/testing/selftests/bpf/progs/verifier_bounds_deduction.c @@ -159,13 +159,16 @@ __failure_unpriv __naked void deducing_bounds_from_const_10(void) { asm volatile (" \ + r6 = r1; \ r0 = 0; \ if r0 s<= 0 goto l0_%=; \ -l0_%=: /* Marks reg as unknown. */ \ - r0 = -r0; \ - r0 -= r1; \ +l0_%=: /* Marks r0 as unknown. */ \ + call %[bpf_get_prandom_u32]; \ + r0 -= r6; \ exit; \ -" ::: __clobber_all); +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); } char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c b/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c index 5094c288cfd7..c258b0722e04 100644 --- a/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c +++ b/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c @@ -12,7 +12,7 @@ SEC("raw_tp") __arch_x86_64 __log_level(4) __msg("stack depth 8") __xlated("4: r5 = 5") -__xlated("5: w0 = ") +__xlated("5: r0 = ") __xlated("6: r0 = &(void __percpu *)(r0)") __xlated("7: r0 = *(u32 *)(r0 +0)") __xlated("8: exit") @@ -620,23 +620,61 @@ __naked void helper_call_does_not_prevent_bpf_fastcall(void) SEC("raw_tp") __arch_x86_64 +__log_level(4) __msg("stack depth 24") +/* may_goto counter at -24 */ +__xlated("0: *(u64 *)(r10 -24) =") +/* may_goto timestamp at -16 */ +__xlated("1: *(u64 *)(r10 -16) =") +__xlated("2: r1 = 1") +__xlated("...") +__xlated("4: r0 = &(void __percpu *)(r0)") +__xlated("...") +/* may_goto expansion starts */ +__xlated("6: r11 = *(u64 *)(r10 -24)") +__xlated("7: if r11 == 0x0 goto pc+6") +__xlated("8: r11 -= 1") +__xlated("9: if r11 != 0x0 goto pc+2") +__xlated("10: r11 = -24") +__xlated("11: call unknown") +__xlated("12: *(u64 *)(r10 -24) = r11") +/* may_goto expansion ends */ +__xlated("13: *(u64 *)(r10 -8) = r1") +__xlated("14: exit") +__success +__naked void may_goto_interaction_x86_64(void) +{ + asm volatile ( + "r1 = 1;" + "*(u64 *)(r10 - 16) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u64 *)(r10 - 16);" + ".8byte %[may_goto];" + /* just touch some stack at -8 */ + "*(u64 *)(r10 - 8) = r1;" + "exit;" + : + : __imm(bpf_get_smp_processor_id), + __imm_insn(may_goto, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, +1 /* offset */, 0)) + : __clobber_all); +} + +SEC("raw_tp") +__arch_arm64 __log_level(4) __msg("stack depth 16") /* may_goto counter at -16 */ __xlated("0: *(u64 *)(r10 -16) =") __xlated("1: r1 = 1") -__xlated("...") -__xlated("3: r0 = &(void __percpu *)(r0)") -__xlated("...") +__xlated("2: call bpf_get_smp_processor_id") /* may_goto expansion starts */ -__xlated("5: r11 = *(u64 *)(r10 -16)") -__xlated("6: if r11 == 0x0 goto pc+3") -__xlated("7: r11 -= 1") -__xlated("8: *(u64 *)(r10 -16) = r11") +__xlated("3: r11 = *(u64 *)(r10 -16)") +__xlated("4: if r11 == 0x0 goto pc+3") +__xlated("5: r11 -= 1") +__xlated("6: *(u64 *)(r10 -16) = r11") /* may_goto expansion ends */ -__xlated("9: *(u64 *)(r10 -8) = r1") -__xlated("10: exit") +__xlated("7: *(u64 *)(r10 -8) = r1") +__xlated("8: exit") __success -__naked void may_goto_interaction(void) +__naked void may_goto_interaction_arm64(void) { asm volatile ( "r1 = 1;" @@ -666,7 +704,7 @@ SEC("raw_tp") __arch_x86_64 __log_level(4) __msg("stack depth 32+0") __xlated("2: r1 = 1") -__xlated("3: w0 =") +__xlated("3: r0 =") __xlated("4: r0 = &(void __percpu *)(r0)") __xlated("5: r0 = *(u32 *)(r0 +0)") /* bpf_loop params setup */ @@ -715,7 +753,7 @@ __arch_x86_64 __log_level(4) __msg("stack depth 40+0") /* call bpf_get_smp_processor_id */ __xlated("2: r1 = 42") -__xlated("3: w0 =") +__xlated("3: r0 =") __xlated("4: r0 = &(void __percpu *)(r0)") __xlated("5: r0 = *(u32 *)(r0 +0)") /* call bpf_get_prandom_u32 */ diff --git a/tools/testing/selftests/bpf/progs/verifier_bpf_trap.c b/tools/testing/selftests/bpf/progs/verifier_bpf_trap.c new file mode 100644 index 000000000000..35e2cdc00a01 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_bpf_trap.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +#if __clang_major__ >= 21 && 0 +SEC("socket") +__description("__builtin_trap with simple c code") +__failure __msg("unexpected __bpf_trap() due to uninitialized variable?") +void bpf_builtin_trap_with_simple_c(void) +{ + __builtin_trap(); +} +#endif + +SEC("socket") +__description("__bpf_trap with simple c code") +__failure __msg("unexpected __bpf_trap() due to uninitialized variable?") +void bpf_trap_with_simple_c(void) +{ + __bpf_trap(); +} + +SEC("socket") +__description("__bpf_trap as the second-from-last insn") +__failure __msg("unexpected __bpf_trap() due to uninitialized variable?") +__naked void bpf_trap_at_func_end(void) +{ + asm volatile ( + "r0 = 0;" + "call %[__bpf_trap];" + "exit;" + : + : __imm(__bpf_trap) + : __clobber_all); +} + +SEC("socket") +__description("dead code __bpf_trap in the middle of code") +__success +__naked void dead_bpf_trap_in_middle(void) +{ + asm volatile ( + "r0 = 0;" + "if r0 == 0 goto +1;" + "call %[__bpf_trap];" + "r0 = 2;" + "exit;" + : + : __imm(__bpf_trap) + : __clobber_all); +} + +SEC("socket") +__description("reachable __bpf_trap in the middle of code") +__failure __msg("unexpected __bpf_trap() due to uninitialized variable?") +__naked void live_bpf_trap_in_middle(void) +{ + asm volatile ( + "r0 = 0;" + "if r0 == 1 goto +1;" + "call %[__bpf_trap];" + "r0 = 2;" + "exit;" + : + : __imm(__bpf_trap) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_btf_ctx_access.c b/tools/testing/selftests/bpf/progs/verifier_btf_ctx_access.c index a570e48b917a..03942cec07e5 100644 --- a/tools/testing/selftests/bpf/progs/verifier_btf_ctx_access.c +++ b/tools/testing/selftests/bpf/progs/verifier_btf_ctx_access.c @@ -11,7 +11,7 @@ __success __retval(0) __naked void btf_ctx_access_accept(void) { asm volatile (" \ - r2 = *(u32*)(r1 + 8); /* load 2nd argument value (int pointer) */\ + r2 = *(u64 *)(r1 + 8); /* load 2nd argument value (int pointer) */\ r0 = 0; \ exit; \ " ::: __clobber_all); @@ -23,7 +23,55 @@ __success __retval(0) __naked void ctx_access_u32_pointer_accept(void) { asm volatile (" \ - r2 = *(u32*)(r1 + 0); /* load 1nd argument value (u32 pointer) */\ + r2 = *(u64 *)(r1 + 0); /* load 1nd argument value (u32 pointer) */\ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("fentry/bpf_fentry_test9") +__description("btf_ctx_access u32 pointer reject u32") +__failure __msg("size 4 must be 8") +__naked void ctx_access_u32_pointer_reject_32(void) +{ + asm volatile (" \ + r2 = *(u32 *)(r1 + 0); /* load 1st argument with narrow load */\ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("fentry/bpf_fentry_test9") +__description("btf_ctx_access u32 pointer reject u16") +__failure __msg("size 2 must be 8") +__naked void ctx_access_u32_pointer_reject_16(void) +{ + asm volatile (" \ + r2 = *(u16 *)(r1 + 0); /* load 1st argument with narrow load */\ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("fentry/bpf_fentry_test9") +__description("btf_ctx_access u32 pointer reject u8") +__failure __msg("size 1 must be 8") +__naked void ctx_access_u32_pointer_reject_8(void) +{ + asm volatile (" \ + r2 = *(u8 *)(r1 + 0); /* load 1st argument with narrow load */\ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("fentry/bpf_fentry_test10") +__description("btf_ctx_access const void pointer accept") +__success __retval(0) +__naked void ctx_access_const_void_pointer_accept(void) +{ + asm volatile (" \ + r2 = *(u64 *)(r1 + 0); /* load 1st argument value (const void pointer) */\ r0 = 0; \ exit; \ " ::: __clobber_all); diff --git a/tools/testing/selftests/bpf/progs/verifier_const_or.c b/tools/testing/selftests/bpf/progs/verifier_const_or.c index ba8922b2eebd..68c568c3c3a0 100644 --- a/tools/testing/selftests/bpf/progs/verifier_const_or.c +++ b/tools/testing/selftests/bpf/progs/verifier_const_or.c @@ -25,7 +25,7 @@ __naked void constant_should_keep_constant_type(void) SEC("tracepoint") __description("constant register |= constant should not bypass stack boundary checks") -__failure __msg("invalid indirect access to stack R1 off=-48 size=58") +__failure __msg("invalid write to stack R1 off=-48 size=58") __naked void not_bypass_stack_boundary_checks_1(void) { asm volatile (" \ @@ -62,7 +62,7 @@ __naked void register_should_keep_constant_type(void) SEC("tracepoint") __description("constant register |= constant register should not bypass stack boundary checks") -__failure __msg("invalid indirect access to stack R1 off=-48 size=58") +__failure __msg("invalid write to stack R1 off=-48 size=58") __naked void not_bypass_stack_boundary_checks_2(void) { asm volatile (" \ diff --git a/tools/testing/selftests/bpf/progs/verifier_ctx.c b/tools/testing/selftests/bpf/progs/verifier_ctx.c index a83809a1dbbf..424463094760 100644 --- a/tools/testing/selftests/bpf/progs/verifier_ctx.c +++ b/tools/testing/selftests/bpf/progs/verifier_ctx.c @@ -1,10 +1,12 @@ // SPDX-License-Identifier: GPL-2.0 /* Converted from tools/testing/selftests/bpf/verifier/ctx.c */ -#include <linux/bpf.h> +#include "vmlinux.h" #include <bpf/bpf_helpers.h> #include "bpf_misc.h" +#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER)) + SEC("tc") __description("context stores via BPF_ATOMIC") __failure __msg("BPF_ATOMIC stores into R1 ctx is not allowed") @@ -218,4 +220,48 @@ __naked void null_check_8_null_bind(void) : __clobber_all); } +#define narrow_load(type, ctx, field) \ + SEC(type) \ + __description("narrow load on field " #field " of " #ctx) \ + __failure __msg("invalid bpf_context access") \ + __naked void invalid_narrow_load##ctx##field(void) \ + { \ + asm volatile (" \ + r1 = *(u32 *)(r1 + %[off]); \ + r0 = 0; \ + exit;" \ + : \ + : __imm_const(off, offsetof(struct ctx, field) + 4) \ + : __clobber_all); \ + } + +narrow_load("cgroup/getsockopt", bpf_sockopt, sk); +narrow_load("cgroup/getsockopt", bpf_sockopt, optval); +narrow_load("cgroup/getsockopt", bpf_sockopt, optval_end); +narrow_load("tc", __sk_buff, sk); +narrow_load("cgroup/bind4", bpf_sock_addr, sk); +narrow_load("sockops", bpf_sock_ops, sk); +narrow_load("sockops", bpf_sock_ops, skb_data); +narrow_load("sockops", bpf_sock_ops, skb_data_end); +narrow_load("sockops", bpf_sock_ops, skb_hwtstamp); + +#define unaligned_access(type, ctx, field) \ + SEC(type) \ + __description("unaligned access on field " #field " of " #ctx) \ + __failure __msg("invalid bpf_context access") \ + __naked void unaligned_ctx_access_##ctx##field(void) \ + { \ + asm volatile (" \ + r1 = *(u%[size] *)(r1 + %[off]); \ + r0 = 0; \ + exit;" \ + : \ + : __imm_const(size, sizeof_field(struct ctx, field) * 8), \ + __imm_const(off, offsetof(struct ctx, field) + 1) \ + : __clobber_all); \ + } + +unaligned_access("flow_dissector", __sk_buff, data); +unaligned_access("netfilter", bpf_nf_ctx, skb); + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_d_path.c b/tools/testing/selftests/bpf/progs/verifier_d_path.c index ec79cbcfde91..87e51a215558 100644 --- a/tools/testing/selftests/bpf/progs/verifier_d_path.c +++ b/tools/testing/selftests/bpf/progs/verifier_d_path.c @@ -11,7 +11,7 @@ __success __retval(0) __naked void d_path_accept(void) { asm volatile (" \ - r1 = *(u32*)(r1 + 0); \ + r1 = *(u64 *)(r1 + 0); \ r2 = r10; \ r2 += -8; \ r6 = 0; \ @@ -31,7 +31,7 @@ __failure __msg("helper call is not allowed in probe") __naked void d_path_reject(void) { asm volatile (" \ - r1 = *(u32*)(r1 + 0); \ + r1 = *(u64 *)(r1 + 0); \ r2 = r10; \ r2 += -8; \ r6 = 0; \ diff --git a/tools/testing/selftests/bpf/progs/verifier_div_overflow.c b/tools/testing/selftests/bpf/progs/verifier_div_overflow.c index 458984da804c..34e0c012ee76 100644 --- a/tools/testing/selftests/bpf/progs/verifier_div_overflow.c +++ b/tools/testing/selftests/bpf/progs/verifier_div_overflow.c @@ -77,7 +77,7 @@ l0_%=: exit; \ SEC("tc") __description("MOD32 overflow, check 1") -__success __retval(INT_MIN) +__success __retval(_INT_MIN) __naked void mod32_overflow_check_1(void) { asm volatile (" \ @@ -92,7 +92,7 @@ __naked void mod32_overflow_check_1(void) SEC("tc") __description("MOD32 overflow, check 2") -__success __retval(INT_MIN) +__success __retval(_INT_MIN) __naked void mod32_overflow_check_2(void) { asm volatile (" \ diff --git a/tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c b/tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c index 4ab0ef18d7eb..181da86ba5f0 100644 --- a/tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c +++ b/tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c @@ -179,4 +179,132 @@ int BPF_PROG(trusted_acq_rel, struct task_struct *task, u64 clone_flags) return subprog_trusted_acq_rel(task); } +__weak int subprog_untrusted_bad_tags(struct task_struct *task __arg_untrusted __arg_nullable) +{ + return task->pid; +} + +SEC("tp_btf/sys_enter") +__failure +__msg("arg#0 untrusted cannot be combined with any other tags") +int untrusted_bad_tags(void *ctx) +{ + return subprog_untrusted_bad_tags(0); +} + +struct local_type_wont_be_accepted {}; + +__weak int subprog_untrusted_bad_type(struct local_type_wont_be_accepted *p __arg_untrusted) +{ + return 0; +} + +SEC("tp_btf/sys_enter") +__failure +__msg("arg#0 reference type('STRUCT local_type_wont_be_accepted') has no matches") +int untrusted_bad_type(void *ctx) +{ + return subprog_untrusted_bad_type(bpf_rdonly_cast(0, 0)); +} + +__weak int subprog_untrusted(const volatile struct task_struct *restrict task __arg_untrusted) +{ + return task->pid; +} + +SEC("tp_btf/sys_enter") +__success +__log_level(2) +__msg("r1 = {{.*}}; {{.*}}R1_w=trusted_ptr_task_struct()") +__msg("Func#1 ('subprog_untrusted') is global and assumed valid.") +__msg("Validating subprog_untrusted() func#1...") +__msg(": R1=untrusted_ptr_task_struct") +int trusted_to_untrusted(void *ctx) +{ + return subprog_untrusted(bpf_get_current_task_btf()); +} + +char mem[16]; +u32 off; + +SEC("tp_btf/sys_enter") +__success +int anything_to_untrusted(void *ctx) +{ + /* untrusted to untrusted */ + subprog_untrusted(bpf_core_cast(0, struct task_struct)); + /* wrong type to untrusted */ + subprog_untrusted((void *)bpf_core_cast(0, struct bpf_verifier_env)); + /* map value to untrusted */ + subprog_untrusted((void *)mem); + /* scalar to untrusted */ + subprog_untrusted(0); + /* variable offset to untrusted (map) */ + subprog_untrusted((void *)mem + off); + /* variable offset to untrusted (trusted) */ + subprog_untrusted((void *)bpf_get_current_task_btf() + off); + return 0; +} + +__weak int subprog_untrusted2(struct task_struct *task __arg_untrusted) +{ + return subprog_trusted_task_nullable(task); +} + +SEC("tp_btf/sys_enter") +__failure +__msg("R1 type=untrusted_ptr_ expected=ptr_, trusted_ptr_, rcu_ptr_") +__msg("Caller passes invalid args into func#{{.*}} ('subprog_trusted_task_nullable')") +int untrusted_to_trusted(void *ctx) +{ + return subprog_untrusted2(bpf_get_current_task_btf()); +} + +__weak int subprog_void_untrusted(void *p __arg_untrusted) +{ + return *(int *)p; +} + +__weak int subprog_char_untrusted(char *p __arg_untrusted) +{ + return *(int *)p; +} + +__weak int subprog_enum_untrusted(enum bpf_attach_type *p __arg_untrusted) +{ + return *(int *)p; +} + +SEC("tp_btf/sys_enter") +__success +__log_level(2) +__msg("r1 = {{.*}}; {{.*}}R1_w=trusted_ptr_task_struct()") +__msg("Func#1 ('subprog_void_untrusted') is global and assumed valid.") +__msg("Validating subprog_void_untrusted() func#1...") +__msg(": R1=rdonly_untrusted_mem(sz=0)") +int trusted_to_untrusted_mem(void *ctx) +{ + return subprog_void_untrusted(bpf_get_current_task_btf()); +} + +SEC("tp_btf/sys_enter") +__success +int anything_to_untrusted_mem(void *ctx) +{ + /* untrusted to untrusted mem */ + subprog_void_untrusted(bpf_core_cast(0, struct task_struct)); + /* map value to untrusted mem */ + subprog_void_untrusted(mem); + /* scalar to untrusted mem */ + subprog_void_untrusted(0); + /* variable offset to untrusted mem (map) */ + subprog_void_untrusted((void *)mem + off); + /* variable offset to untrusted mem (trusted) */ + subprog_void_untrusted(bpf_get_current_task_btf() + off); + /* variable offset to untrusted char/enum (map) */ + subprog_char_untrusted(mem + off); + subprog_enum_untrusted((void *)mem + off); + return 0; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_gotol.c b/tools/testing/selftests/bpf/progs/verifier_gotol.c index 05a329ee45ee..d5d8f24df394 100644 --- a/tools/testing/selftests/bpf/progs/verifier_gotol.c +++ b/tools/testing/selftests/bpf/progs/verifier_gotol.c @@ -4,11 +4,7 @@ #include <bpf/bpf_helpers.h> #include "bpf_misc.h" -#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \ - (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \ - defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \ - defined(__TARGET_ARCH_loongarch)) && \ - __clang_major__ >= 18 +#ifdef CAN_USE_GOTOL SEC("socket") __description("gotol, small_imm") diff --git a/tools/testing/selftests/bpf/progs/verifier_helper_access_var_len.c b/tools/testing/selftests/bpf/progs/verifier_helper_access_var_len.c index 50c6b22606f6..f2c54e4d89eb 100644 --- a/tools/testing/selftests/bpf/progs/verifier_helper_access_var_len.c +++ b/tools/testing/selftests/bpf/progs/verifier_helper_access_var_len.c @@ -67,7 +67,7 @@ SEC("socket") __description("helper access to variable memory: stack, bitwise AND, zero included") /* in privileged mode reads from uninitialized stack locations are permitted */ __success __failure_unpriv -__msg_unpriv("invalid indirect read from stack R2 off -64+0 size 64") +__msg_unpriv("invalid read from stack R2 off -64+0 size 64") __retval(0) __naked void stack_bitwise_and_zero_included(void) { @@ -100,7 +100,7 @@ __naked void stack_bitwise_and_zero_included(void) SEC("tracepoint") __description("helper access to variable memory: stack, bitwise AND + JMP, wrong max") -__failure __msg("invalid indirect access to stack R1 off=-64 size=65") +__failure __msg("invalid write to stack R1 off=-64 size=65") __naked void bitwise_and_jmp_wrong_max(void) { asm volatile (" \ @@ -187,7 +187,7 @@ l0_%=: r0 = 0; \ SEC("tracepoint") __description("helper access to variable memory: stack, JMP, bounds + offset") -__failure __msg("invalid indirect access to stack R1 off=-64 size=65") +__failure __msg("invalid write to stack R1 off=-64 size=65") __naked void memory_stack_jmp_bounds_offset(void) { asm volatile (" \ @@ -211,7 +211,7 @@ l0_%=: r0 = 0; \ SEC("tracepoint") __description("helper access to variable memory: stack, JMP, wrong max") -__failure __msg("invalid indirect access to stack R1 off=-64 size=65") +__failure __msg("invalid write to stack R1 off=-64 size=65") __naked void memory_stack_jmp_wrong_max(void) { asm volatile (" \ @@ -260,7 +260,7 @@ SEC("socket") __description("helper access to variable memory: stack, JMP, no min check") /* in privileged mode reads from uninitialized stack locations are permitted */ __success __failure_unpriv -__msg_unpriv("invalid indirect read from stack R2 off -64+0 size 64") +__msg_unpriv("invalid read from stack R2 off -64+0 size 64") __retval(0) __naked void stack_jmp_no_min_check(void) { @@ -750,7 +750,7 @@ SEC("socket") __description("helper access to variable memory: 8 bytes leak") /* in privileged mode reads from uninitialized stack locations are permitted */ __success __failure_unpriv -__msg_unpriv("invalid indirect read from stack R2 off -64+32 size 64") +__msg_unpriv("invalid read from stack R2 off -64+32 size 64") __retval(0) __naked void variable_memory_8_bytes_leak(void) { diff --git a/tools/testing/selftests/bpf/progs/verifier_int_ptr.c b/tools/testing/selftests/bpf/progs/verifier_int_ptr.c index 5f2efb895edb..59e34d558654 100644 --- a/tools/testing/selftests/bpf/progs/verifier_int_ptr.c +++ b/tools/testing/selftests/bpf/progs/verifier_int_ptr.c @@ -96,7 +96,7 @@ __naked void arg_ptr_to_long_misaligned(void) SEC("cgroup/sysctl") __description("arg pointer to long size < sizeof(long)") -__failure __msg("invalid indirect access to stack R4 off=-4 size=8") +__failure __msg("invalid write to stack R4 off=-4 size=8") __naked void to_long_size_sizeof_long(void) { asm volatile (" \ diff --git a/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c b/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c index e54bb5385bc1..75dd922e4e9f 100644 --- a/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c +++ b/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c @@ -407,11 +407,7 @@ l0_%=: call %[bpf_jiffies64]; \ : __clobber_all); } -#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \ - (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \ - defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \ - defined(__TARGET_ARCH_loongarch)) && \ - __clang_major__ >= 18 +#ifdef CAN_USE_GOTOL SEC("socket") __success __retval(0) __naked void gotol_and_may_goto(void) diff --git a/tools/testing/selftests/bpf/progs/verifier_load_acquire.c b/tools/testing/selftests/bpf/progs/verifier_load_acquire.c new file mode 100644 index 000000000000..74f4f19c10b8 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_load_acquire.c @@ -0,0 +1,234 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Google LLC. */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "../../../include/linux/filter.h" +#include "bpf_misc.h" + +#ifdef CAN_USE_LOAD_ACQ_STORE_REL + +SEC("socket") +__description("load-acquire, 8-bit") +__success __success_unpriv __retval(0) +__naked void load_acquire_8(void) +{ + asm volatile ( + "r0 = 0;" + "w1 = 0xfe;" + "*(u8 *)(r10 - 1) = w1;" + ".8byte %[load_acquire_insn];" // w2 = load_acquire((u8 *)(r10 - 1)); + "if r2 == r1 goto 1f;" + "r0 = 1;" +"1:" + "exit;" + : + : __imm_insn(load_acquire_insn, + BPF_ATOMIC_OP(BPF_B, BPF_LOAD_ACQ, BPF_REG_2, BPF_REG_10, -1)) + : __clobber_all); +} + +SEC("socket") +__description("load-acquire, 16-bit") +__success __success_unpriv __retval(0) +__naked void load_acquire_16(void) +{ + asm volatile ( + "r0 = 0;" + "w1 = 0xfedc;" + "*(u16 *)(r10 - 2) = w1;" + ".8byte %[load_acquire_insn];" // w2 = load_acquire((u16 *)(r10 - 2)); + "if r2 == r1 goto 1f;" + "r0 = 1;" +"1:" + "exit;" + : + : __imm_insn(load_acquire_insn, + BPF_ATOMIC_OP(BPF_H, BPF_LOAD_ACQ, BPF_REG_2, BPF_REG_10, -2)) + : __clobber_all); +} + +SEC("socket") +__description("load-acquire, 32-bit") +__success __success_unpriv __retval(0) +__naked void load_acquire_32(void) +{ + asm volatile ( + "r0 = 0;" + "w1 = 0xfedcba09;" + "*(u32 *)(r10 - 4) = w1;" + ".8byte %[load_acquire_insn];" // w2 = load_acquire((u32 *)(r10 - 4)); + "if r2 == r1 goto 1f;" + "r0 = 1;" +"1:" + "exit;" + : + : __imm_insn(load_acquire_insn, + BPF_ATOMIC_OP(BPF_W, BPF_LOAD_ACQ, BPF_REG_2, BPF_REG_10, -4)) + : __clobber_all); +} + +SEC("socket") +__description("load-acquire, 64-bit") +__success __success_unpriv __retval(0) +__naked void load_acquire_64(void) +{ + asm volatile ( + "r0 = 0;" + "r1 = 0xfedcba0987654321 ll;" + "*(u64 *)(r10 - 8) = r1;" + ".8byte %[load_acquire_insn];" // r2 = load_acquire((u64 *)(r10 - 8)); + "if r2 == r1 goto 1f;" + "r0 = 1;" +"1:" + "exit;" + : + : __imm_insn(load_acquire_insn, + BPF_ATOMIC_OP(BPF_DW, BPF_LOAD_ACQ, BPF_REG_2, BPF_REG_10, -8)) + : __clobber_all); +} + +SEC("socket") +__description("load-acquire with uninitialized src_reg") +__failure __failure_unpriv __msg("R2 !read_ok") +__naked void load_acquire_with_uninitialized_src_reg(void) +{ + asm volatile ( + ".8byte %[load_acquire_insn];" // r0 = load_acquire((u64 *)(r2 + 0)); + "exit;" + : + : __imm_insn(load_acquire_insn, + BPF_ATOMIC_OP(BPF_DW, BPF_LOAD_ACQ, BPF_REG_0, BPF_REG_2, 0)) + : __clobber_all); +} + +SEC("socket") +__description("load-acquire with non-pointer src_reg") +__failure __failure_unpriv __msg("R1 invalid mem access 'scalar'") +__naked void load_acquire_with_non_pointer_src_reg(void) +{ + asm volatile ( + "r1 = 0;" + ".8byte %[load_acquire_insn];" // r0 = load_acquire((u64 *)(r1 + 0)); + "exit;" + : + : __imm_insn(load_acquire_insn, + BPF_ATOMIC_OP(BPF_DW, BPF_LOAD_ACQ, BPF_REG_0, BPF_REG_1, 0)) + : __clobber_all); +} + +SEC("socket") +__description("misaligned load-acquire") +__failure __failure_unpriv __msg("misaligned stack access off") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void load_acquire_misaligned(void) +{ + asm volatile ( + "r1 = 0;" + "*(u64 *)(r10 - 8) = r1;" + ".8byte %[load_acquire_insn];" // w0 = load_acquire((u32 *)(r10 - 5)); + "exit;" + : + : __imm_insn(load_acquire_insn, + BPF_ATOMIC_OP(BPF_W, BPF_LOAD_ACQ, BPF_REG_0, BPF_REG_10, -5)) + : __clobber_all); +} + +SEC("socket") +__description("load-acquire from ctx pointer") +__failure __failure_unpriv __msg("BPF_ATOMIC loads from R1 ctx is not allowed") +__naked void load_acquire_from_ctx_pointer(void) +{ + asm volatile ( + ".8byte %[load_acquire_insn];" // w0 = load_acquire((u8 *)(r1 + 0)); + "exit;" + : + : __imm_insn(load_acquire_insn, + BPF_ATOMIC_OP(BPF_B, BPF_LOAD_ACQ, BPF_REG_0, BPF_REG_1, 0)) + : __clobber_all); +} + +SEC("xdp") +__description("load-acquire from pkt pointer") +__failure __msg("BPF_ATOMIC loads from R2 pkt is not allowed") +__naked void load_acquire_from_pkt_pointer(void) +{ + asm volatile ( + "r2 = *(u32 *)(r1 + %[xdp_md_data]);" + "r3 = *(u32 *)(r1 + %[xdp_md_data_end]);" + "r1 = r2;" + "r1 += 8;" + "if r1 >= r3 goto l0_%=;" + ".8byte %[load_acquire_insn];" // w0 = load_acquire((u8 *)(r2 + 0)); +"l0_%=: r0 = 0;" + "exit;" + : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)), + __imm_insn(load_acquire_insn, + BPF_ATOMIC_OP(BPF_B, BPF_LOAD_ACQ, BPF_REG_0, BPF_REG_2, 0)) + : __clobber_all); +} + +SEC("flow_dissector") +__description("load-acquire from flow_keys pointer") +__failure __msg("BPF_ATOMIC loads from R2 flow_keys is not allowed") +__naked void load_acquire_from_flow_keys_pointer(void) +{ + asm volatile ( + "r2 = *(u64 *)(r1 + %[__sk_buff_flow_keys]);" + ".8byte %[load_acquire_insn];" // w0 = load_acquire((u8 *)(r2 + 0)); + "exit;" + : + : __imm_const(__sk_buff_flow_keys, + offsetof(struct __sk_buff, flow_keys)), + __imm_insn(load_acquire_insn, + BPF_ATOMIC_OP(BPF_B, BPF_LOAD_ACQ, BPF_REG_0, BPF_REG_2, 0)) + : __clobber_all); +} + +SEC("sk_reuseport") +__description("load-acquire from sock pointer") +__failure __msg("BPF_ATOMIC loads from R2 sock is not allowed") +__naked void load_acquire_from_sock_pointer(void) +{ + asm volatile ( + "r2 = *(u64 *)(r1 + %[sk_reuseport_md_sk]);" + // w0 = load_acquire((u8 *)(r2 + offsetof(struct bpf_sock, family))); + ".8byte %[load_acquire_insn];" + "exit;" + : + : __imm_const(sk_reuseport_md_sk, offsetof(struct sk_reuseport_md, sk)), + __imm_insn(load_acquire_insn, + BPF_ATOMIC_OP(BPF_B, BPF_LOAD_ACQ, BPF_REG_0, BPF_REG_2, + offsetof(struct bpf_sock, family))) + : __clobber_all); +} + +SEC("socket") +__description("load-acquire with invalid register R15") +__failure __failure_unpriv __msg("R15 is invalid") +__naked void load_acquire_with_invalid_reg(void) +{ + asm volatile ( + ".8byte %[load_acquire_insn];" // r0 = load_acquire((u64 *)(r15 + 0)); + "exit;" + : + : __imm_insn(load_acquire_insn, + BPF_ATOMIC_OP(BPF_DW, BPF_LOAD_ACQ, BPF_REG_0, 15 /* invalid reg */, 0)) + : __clobber_all); +} + +#else /* CAN_USE_LOAD_ACQ_STORE_REL */ + +SEC("socket") +__description("Clang version < 18, ENABLE_ATOMICS_TESTS not defined, and/or JIT doesn't support load-acquire, use a dummy test") +__success +int dummy_test(void) +{ + return 0; +} + +#endif /* CAN_USE_LOAD_ACQ_STORE_REL */ + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_map_in_map.c b/tools/testing/selftests/bpf/progs/verifier_map_in_map.c index 4eaab1468eb7..16b761e510f0 100644 --- a/tools/testing/selftests/bpf/progs/verifier_map_in_map.c +++ b/tools/testing/selftests/bpf/progs/verifier_map_in_map.c @@ -47,7 +47,7 @@ l0_%=: r0 = 0; \ SEC("xdp") __description("map in map state pruning") -__success __msg("processed 26 insns") +__success __msg("processed 15 insns") __log_level(2) __retval(0) __flag(BPF_F_TEST_STATE_FREQ) __naked void map_in_map_state_pruning(void) { @@ -139,4 +139,122 @@ __naked void on_the_inner_map_pointer(void) : __clobber_all); } +SEC("socket") +__description("map_ptr is never null") +__success +__naked void map_ptr_is_never_null(void) +{ + asm volatile (" \ + r0 = 0; \ + r1 = %[map_in_map] ll; \ + if r1 != 0 goto l0_%=; \ + r10 = 42; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_in_map) + : __clobber_all); +} + +SEC("socket") +__description("map_ptr is never null inner") +__success +__naked void map_ptr_is_never_null_inner(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_in_map] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + if r0 != 0 goto l0_%=; \ + r10 = 42; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_in_map) + : __clobber_all); +} + +SEC("socket") +__description("map_ptr is never null inner spill fill") +__success +__naked void map_ptr_is_never_null_inner_spill_fill(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_in_map] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: *(u64 *)(r10 -16) = r0; \ + r1 = *(u64 *)(r10 -16); \ + if r1 == 0 goto l1_%=; \ + exit; \ +l1_%=: r10 = 42; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_in_map) + : __clobber_all); +} + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS); + __uint(max_entries, 1); + __type(key, int); + __type(value, int); + __array(values, struct { + __uint(type, BPF_MAP_TYPE_RINGBUF); + __uint(max_entries, 64 * 1024); + }); +} rb_in_map SEC(".maps"); + +struct rb_ctx { + void *rb; + struct bpf_dynptr dptr; +}; + +static __always_inline struct rb_ctx __rb_event_reserve(__u32 sz) +{ + struct rb_ctx rb_ctx = {}; + void *rb; + __u32 cpu = bpf_get_smp_processor_id(); + __u32 rb_slot = cpu & 1; + + rb = bpf_map_lookup_elem(&rb_in_map, &rb_slot); + if (!rb) + return rb_ctx; + + rb_ctx.rb = rb; + bpf_ringbuf_reserve_dynptr(rb, sz, 0, &rb_ctx.dptr); + + return rb_ctx; +} + +static __noinline void __rb_event_submit(struct rb_ctx *ctx) +{ + if (!ctx->rb) + return; + + /* If the verifier (incorrectly) concludes that ctx->rb can be + * NULL at this point, we'll get "BPF_EXIT instruction in main + * prog would lead to reference leak" error + */ + bpf_ringbuf_submit_dynptr(&ctx->dptr, 0); +} + +SEC("socket") +int map_ptr_is_never_null_rb(void *ctx) +{ + struct rb_ctx event_ctx = __rb_event_reserve(256); + __rb_event_submit(&event_ctx); + return 0; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_may_goto_1.c b/tools/testing/selftests/bpf/progs/verifier_may_goto_1.c new file mode 100644 index 000000000000..3966d827f288 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_may_goto_1.c @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "../../../include/linux/filter.h" +#include "bpf_misc.h" + +SEC("raw_tp") +__description("may_goto 0") +__arch_x86_64 +__xlated("0: r0 = 1") +__xlated("1: exit") +__success +__naked void may_goto_simple(void) +{ + asm volatile ( + ".8byte %[may_goto];" + "r0 = 1;" + ".8byte %[may_goto];" + "exit;" + : + : __imm_insn(may_goto, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 0 /* offset */, 0)) + : __clobber_all); +} + +SEC("raw_tp") +__description("batch 2 of may_goto 0") +__arch_x86_64 +__xlated("0: r0 = 1") +__xlated("1: exit") +__success +__naked void may_goto_batch_0(void) +{ + asm volatile ( + ".8byte %[may_goto1];" + ".8byte %[may_goto1];" + "r0 = 1;" + ".8byte %[may_goto1];" + ".8byte %[may_goto1];" + "exit;" + : + : __imm_insn(may_goto1, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 0 /* offset */, 0)) + : __clobber_all); +} + +SEC("raw_tp") +__description("may_goto batch with offsets 2/1/0") +__arch_x86_64 +__xlated("0: r0 = 1") +__xlated("1: exit") +__success +__naked void may_goto_batch_1(void) +{ + asm volatile ( + ".8byte %[may_goto1];" + ".8byte %[may_goto2];" + ".8byte %[may_goto3];" + "r0 = 1;" + ".8byte %[may_goto1];" + ".8byte %[may_goto2];" + ".8byte %[may_goto3];" + "exit;" + : + : __imm_insn(may_goto1, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 2 /* offset */, 0)), + __imm_insn(may_goto2, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 1 /* offset */, 0)), + __imm_insn(may_goto3, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 0 /* offset */, 0)) + : __clobber_all); +} + +SEC("raw_tp") +__description("may_goto batch with offsets 2/0 - x86_64") +__arch_x86_64 +__xlated("0: *(u64 *)(r10 -16) = 65535") +__xlated("1: *(u64 *)(r10 -8) = 0") +__xlated("2: r11 = *(u64 *)(r10 -16)") +__xlated("3: if r11 == 0x0 goto pc+6") +__xlated("4: r11 -= 1") +__xlated("5: if r11 != 0x0 goto pc+2") +__xlated("6: r11 = -16") +__xlated("7: call unknown") +__xlated("8: *(u64 *)(r10 -16) = r11") +__xlated("9: r0 = 1") +__xlated("10: r0 = 2") +__xlated("11: exit") +__success +__naked void may_goto_batch_2_x86_64(void) +{ + asm volatile ( + ".8byte %[may_goto1];" + ".8byte %[may_goto3];" + "r0 = 1;" + "r0 = 2;" + "exit;" + : + : __imm_insn(may_goto1, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 2 /* offset */, 0)), + __imm_insn(may_goto3, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 0 /* offset */, 0)) + : __clobber_all); +} + +SEC("raw_tp") +__description("may_goto batch with offsets 2/0 - arm64") +__arch_arm64 +__xlated("0: *(u64 *)(r10 -8) = 8388608") +__xlated("1: r11 = *(u64 *)(r10 -8)") +__xlated("2: if r11 == 0x0 goto pc+3") +__xlated("3: r11 -= 1") +__xlated("4: *(u64 *)(r10 -8) = r11") +__xlated("5: r0 = 1") +__xlated("6: r0 = 2") +__xlated("7: exit") +__success +__naked void may_goto_batch_2_arm64(void) +{ + asm volatile ( + ".8byte %[may_goto1];" + ".8byte %[may_goto3];" + "r0 = 1;" + "r0 = 2;" + "exit;" + : + : __imm_insn(may_goto1, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 2 /* offset */, 0)), + __imm_insn(may_goto3, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 0 /* offset */, 0)) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_may_goto_2.c b/tools/testing/selftests/bpf/progs/verifier_may_goto_2.c new file mode 100644 index 000000000000..b891faf50660 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_may_goto_2.c @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ + +#include "bpf_misc.h" +#include "bpf_experimental.h" + +int gvar; + +SEC("raw_tp") +__description("C code with may_goto 0") +__success +int may_goto_c_code(void) +{ + int i, tmp[3]; + + for (i = 0; i < 3 && can_loop; i++) + tmp[i] = 0; + + for (i = 0; i < 3 && can_loop; i++) + tmp[i] = gvar - i; + + for (i = 0; i < 3 && can_loop; i++) + gvar += tmp[i]; + + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_movsx.c b/tools/testing/selftests/bpf/progs/verifier_movsx.c index 994bbc346d25..a4d8814eb5ed 100644 --- a/tools/testing/selftests/bpf/progs/verifier_movsx.c +++ b/tools/testing/selftests/bpf/progs/verifier_movsx.c @@ -245,7 +245,13 @@ l0_%=: \ SEC("socket") __description("MOV32SX, S8, var_off not u32_max, positive after s8 extension") __success __retval(0) -__failure_unpriv __msg_unpriv("frame pointer is read only") +__success_unpriv +#ifdef SPEC_V1 +__xlated_unpriv("w0 = 0") +__xlated_unpriv("exit") +__xlated_unpriv("nospec") /* inserted to prevent `frame pointer is read only` */ +__xlated_unpriv("goto pc-1") +#endif __naked void mov64sx_s32_varoff_2(void) { asm volatile (" \ @@ -267,7 +273,13 @@ l0_%=: \ SEC("socket") __description("MOV32SX, S8, var_off not u32_max, negative after s8 extension") __success __retval(0) -__failure_unpriv __msg_unpriv("frame pointer is read only") +__success_unpriv +#ifdef SPEC_V1 +__xlated_unpriv("w0 = 0") +__xlated_unpriv("exit") +__xlated_unpriv("nospec") /* inserted to prevent `frame pointer is read only` */ +__xlated_unpriv("goto pc-1") +#endif __naked void mov64sx_s32_varoff_3(void) { asm volatile (" \ diff --git a/tools/testing/selftests/bpf/progs/verifier_mtu.c b/tools/testing/selftests/bpf/progs/verifier_mtu.c index 4ccf1ebc42d1..256956ea1ac5 100644 --- a/tools/testing/selftests/bpf/progs/verifier_mtu.c +++ b/tools/testing/selftests/bpf/progs/verifier_mtu.c @@ -8,7 +8,7 @@ SEC("tc/ingress") __description("uninit/mtu: write rejected") __success __caps_unpriv(CAP_BPF|CAP_NET_ADMIN) -__failure_unpriv __msg_unpriv("invalid indirect read from stack") +__failure_unpriv __msg_unpriv("invalid read from stack") int tc_uninit_mtu(struct __sk_buff *ctx) { __u32 mtu; diff --git a/tools/testing/selftests/bpf/progs/verifier_precision.c b/tools/testing/selftests/bpf/progs/verifier_precision.c index 6b564d4c0986..73fee2aec698 100644 --- a/tools/testing/selftests/bpf/progs/verifier_precision.c +++ b/tools/testing/selftests/bpf/progs/verifier_precision.c @@ -2,6 +2,7 @@ /* Copyright (C) 2023 SUSE LLC */ #include <linux/bpf.h> #include <bpf/bpf_helpers.h> +#include "../../../include/linux/filter.h" #include "bpf_misc.h" SEC("?raw_tp") @@ -90,6 +91,53 @@ __naked int bpf_end_bswap(void) ::: __clobber_all); } +#ifdef CAN_USE_LOAD_ACQ_STORE_REL + +SEC("?raw_tp") +__success __log_level(2) +__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r3 = r10") +__msg("mark_precise: frame0: regs=r2 stack= before 2: (db) r2 = load_acquire((u64 *)(r10 -8))") +__msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1") +__msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8") +__naked int bpf_load_acquire(void) +{ + asm volatile ( + "r1 = 8;" + "*(u64 *)(r10 - 8) = r1;" + ".8byte %[load_acquire_insn];" /* r2 = load_acquire((u64 *)(r10 - 8)); */ + "r3 = r10;" + "r3 += r2;" /* mark_precise */ + "r0 = 0;" + "exit;" + : + : __imm_insn(load_acquire_insn, + BPF_ATOMIC_OP(BPF_DW, BPF_LOAD_ACQ, BPF_REG_2, BPF_REG_10, -8)) + : __clobber_all); +} + +SEC("?raw_tp") +__success __log_level(2) +__msg("mark_precise: frame0: regs=r1 stack= before 3: (bf) r2 = r10") +__msg("mark_precise: frame0: regs=r1 stack= before 2: (79) r1 = *(u64 *)(r10 -8)") +__msg("mark_precise: frame0: regs= stack=-8 before 1: (db) store_release((u64 *)(r10 -8), r1)") +__msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8") +__naked int bpf_store_release(void) +{ + asm volatile ( + "r1 = 8;" + ".8byte %[store_release_insn];" /* store_release((u64 *)(r10 - 8), r1); */ + "r1 = *(u64 *)(r10 - 8);" + "r2 = r10;" + "r2 += r1;" /* mark_precise */ + "r0 = 0;" + "exit;" + : + : __imm_insn(store_release_insn, + BPF_ATOMIC_OP(BPF_DW, BPF_STORE_REL, BPF_REG_10, BPF_REG_1, -8)) + : __clobber_all); +} + +#endif /* CAN_USE_LOAD_ACQ_STORE_REL */ #endif /* v4 instruction */ SEC("?raw_tp") @@ -130,4 +178,127 @@ __naked int state_loop_first_last_equal(void) ); } +__used __naked static void __bpf_cond_op_r10(void) +{ + asm volatile ( + "r2 = 2314885393468386424 ll;" + "goto +0;" + "if r2 <= r10 goto +3;" + "if r1 >= -1835016 goto +0;" + "if r2 <= 8 goto +0;" + "if r3 <= 0 goto +0;" + "exit;" + ::: __clobber_all); +} + +SEC("?raw_tp") +__success __log_level(2) +__msg("8: (bd) if r2 <= r10 goto pc+3") +__msg("9: (35) if r1 >= 0xffe3fff8 goto pc+0") +__msg("10: (b5) if r2 <= 0x8 goto pc+0") +__msg("mark_precise: frame1: last_idx 10 first_idx 0 subseq_idx -1") +__msg("mark_precise: frame1: regs=r2 stack= before 9: (35) if r1 >= 0xffe3fff8 goto pc+0") +__msg("mark_precise: frame1: regs=r2 stack= before 8: (bd) if r2 <= r10 goto pc+3") +__msg("mark_precise: frame1: regs=r2 stack= before 7: (05) goto pc+0") +__naked void bpf_cond_op_r10(void) +{ + asm volatile ( + "r3 = 0 ll;" + "call __bpf_cond_op_r10;" + "r0 = 0;" + "exit;" + ::: __clobber_all); +} + +SEC("?raw_tp") +__success __log_level(2) +__msg("3: (bf) r3 = r10") +__msg("4: (bd) if r3 <= r2 goto pc+1") +__msg("5: (b5) if r2 <= 0x8 goto pc+2") +__msg("mark_precise: frame0: last_idx 5 first_idx 0 subseq_idx -1") +__msg("mark_precise: frame0: regs=r2 stack= before 4: (bd) if r3 <= r2 goto pc+1") +__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r3 = r10") +__naked void bpf_cond_op_not_r10(void) +{ + asm volatile ( + "r0 = 0;" + "r2 = 2314885393468386424 ll;" + "r3 = r10;" + "if r3 <= r2 goto +1;" + "if r2 <= 8 goto +2;" + "r0 = 2 ll;" + "exit;" + ::: __clobber_all); +} + +SEC("lsm.s/socket_connect") +__success __log_level(2) +__msg("0: (b7) r0 = 1 ; R0_w=1") +__msg("1: (84) w0 = -w0 ; R0_w=0xffffffff") +__msg("mark_precise: frame0: last_idx 2 first_idx 0 subseq_idx -1") +__msg("mark_precise: frame0: regs=r0 stack= before 1: (84) w0 = -w0") +__msg("mark_precise: frame0: regs=r0 stack= before 0: (b7) r0 = 1") +__naked int bpf_neg_2(void) +{ + /* + * lsm.s/socket_connect requires a return value within [-4095, 0]. + * Returning -1 is allowed + */ + asm volatile ( + "r0 = 1;" + "w0 = -w0;" + "exit;" + ::: __clobber_all); +} + +SEC("lsm.s/socket_connect") +__failure __msg("At program exit the register R0 has") +__naked int bpf_neg_3(void) +{ + /* + * lsm.s/socket_connect requires a return value within [-4095, 0]. + * Returning -10000 is not allowed. + */ + asm volatile ( + "r0 = 10000;" + "w0 = -w0;" + "exit;" + ::: __clobber_all); +} + +SEC("lsm.s/socket_connect") +__success __log_level(2) +__msg("0: (b7) r0 = 1 ; R0_w=1") +__msg("1: (87) r0 = -r0 ; R0_w=-1") +__msg("mark_precise: frame0: last_idx 2 first_idx 0 subseq_idx -1") +__msg("mark_precise: frame0: regs=r0 stack= before 1: (87) r0 = -r0") +__msg("mark_precise: frame0: regs=r0 stack= before 0: (b7) r0 = 1") +__naked int bpf_neg_4(void) +{ + /* + * lsm.s/socket_connect requires a return value within [-4095, 0]. + * Returning -1 is allowed + */ + asm volatile ( + "r0 = 1;" + "r0 = -r0;" + "exit;" + ::: __clobber_all); +} + +SEC("lsm.s/socket_connect") +__failure __msg("At program exit the register R0 has") +__naked int bpf_neg_5(void) +{ + /* + * lsm.s/socket_connect requires a return value within [-4095, 0]. + * Returning -10000 is not allowed. + */ + asm volatile ( + "r0 = 10000;" + "r0 = -r0;" + "exit;" + ::: __clobber_all); +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_private_stack.c b/tools/testing/selftests/bpf/progs/verifier_private_stack.c index b1fbdf119553..1ecd34ebde19 100644 --- a/tools/testing/selftests/bpf/progs/verifier_private_stack.c +++ b/tools/testing/selftests/bpf/progs/verifier_private_stack.c @@ -8,7 +8,7 @@ /* From include/linux/filter.h */ #define MAX_BPF_STACK 512 -#if defined(__TARGET_ARCH_x86) +#if defined(__TARGET_ARCH_x86) || defined(__TARGET_ARCH_arm64) struct elem { struct bpf_timer t; @@ -27,9 +27,21 @@ __description("Private stack, single prog") __success __arch_x86_64 __jited(" movabsq $0x{{.*}}, %r9") -__jited(" addq %gs:0x{{.*}}, %r9") +__jited(" addq %gs:{{.*}}, %r9") __jited(" movl $0x2a, %edi") __jited(" movq %rdi, -0x100(%r9)") +__arch_arm64 +__jited(" stp x25, x27, [sp, {{.*}}]!") +__jited(" mov x27, {{.*}}") +__jited(" movk x27, {{.*}}, lsl #16") +__jited(" movk x27, {{.*}}") +__jited(" mrs x10, TPIDR_EL{{[0-1]}}") +__jited(" add x27, x27, x10") +__jited(" add x25, x27, {{.*}}") +__jited(" mov x0, #0x2a") +__jited(" str x0, [x27]") +__jited("...") +__jited(" ldp x25, x27, [sp], {{.*}}") __naked void private_stack_single_prog(void) { asm volatile (" \ @@ -45,6 +57,9 @@ __description("No private stack") __success __arch_x86_64 __jited(" subq $0x8, %rsp") +__arch_arm64 +__jited(" mov x25, sp") +__jited(" sub sp, sp, #0x10") __naked void no_private_stack_nested(void) { asm volatile (" \ @@ -74,13 +89,26 @@ __success __arch_x86_64 /* private stack fp for the main prog */ __jited(" movabsq $0x{{.*}}, %r9") -__jited(" addq %gs:0x{{.*}}, %r9") +__jited(" addq %gs:{{.*}}, %r9") __jited(" movl $0x2a, %edi") __jited(" movq %rdi, -0x200(%r9)") __jited(" pushq %r9") __jited(" callq 0x{{.*}}") __jited(" popq %r9") __jited(" xorl %eax, %eax") +__arch_arm64 +__jited(" stp x25, x27, [sp, {{.*}}]!") +__jited(" mov x27, {{.*}}") +__jited(" movk x27, {{.*}}, lsl #16") +__jited(" movk x27, {{.*}}") +__jited(" mrs x10, TPIDR_EL{{[0-1]}}") +__jited(" add x27, x27, x10") +__jited(" add x25, x27, {{.*}}") +__jited(" mov x0, #0x2a") +__jited(" str x0, [x27]") +__jited(" bl {{.*}}") +__jited("...") +__jited(" ldp x25, x27, [sp], {{.*}}") __naked void private_stack_nested_1(void) { asm volatile (" \ @@ -122,7 +150,7 @@ __jited(" pushq %rbp") __jited(" movq %rsp, %rbp") __jited(" endbr64") __jited(" movabsq $0x{{.*}}, %r9") -__jited(" addq %gs:0x{{.*}}, %r9") +__jited(" addq %gs:{{.*}}, %r9") __jited(" pushq %r9") __jited(" callq") __jited(" popq %r9") @@ -131,6 +159,24 @@ __jited(" movq %rdi, -0x200(%r9)") __jited(" pushq %r9") __jited(" callq") __jited(" popq %r9") +__arch_arm64 +__jited("func #1") +__jited("...") +__jited(" stp x25, x27, [sp, {{.*}}]!") +__jited(" mov x27, {{.*}}") +__jited(" movk x27, {{.*}}, lsl #16") +__jited(" movk x27, {{.*}}") +__jited(" mrs x10, TPIDR_EL{{[0-1]}}") +__jited(" add x27, x27, x10") +__jited(" add x25, x27, {{.*}}") +__jited(" bl 0x{{.*}}") +__jited(" add x7, x0, #0x0") +__jited(" mov x0, #0x2a") +__jited(" str x0, [x27]") +__jited(" bl 0x{{.*}}") +__jited(" add x7, x0, #0x0") +__jited(" mov x7, #0x0") +__jited(" ldp x25, x27, [sp], {{.*}}") __naked void private_stack_callback(void) { asm volatile (" \ @@ -154,6 +200,28 @@ __arch_x86_64 __jited(" pushq %r9") __jited(" callq") __jited(" popq %r9") +__arch_arm64 +__jited(" stp x29, x30, [sp, #-0x10]!") +__jited(" mov x29, sp") +__jited(" stp xzr, x26, [sp, #-0x10]!") +__jited(" mov x26, sp") +__jited(" stp x19, x20, [sp, #-0x10]!") +__jited(" stp x21, x22, [sp, #-0x10]!") +__jited(" stp x23, x24, [sp, #-0x10]!") +__jited(" stp x25, x26, [sp, #-0x10]!") +__jited(" stp x27, x28, [sp, #-0x10]!") +__jited(" mov x27, {{.*}}") +__jited(" movk x27, {{.*}}, lsl #16") +__jited(" movk x27, {{.*}}") +__jited(" mrs x10, TPIDR_EL{{[0-1]}}") +__jited(" add x27, x27, x10") +__jited(" add x25, x27, {{.*}}") +__jited(" mov x0, #0x2a") +__jited(" str x0, [x27]") +__jited(" mov x0, #0x0") +__jited(" bl 0x{{.*}}") +__jited(" add x7, x0, #0x0") +__jited(" ldp x27, x28, [sp], #0x10") int private_stack_exception_main_prog(void) { asm volatile (" \ @@ -179,6 +247,19 @@ __jited(" movq %rdi, -0x200(%r9)") __jited(" pushq %r9") __jited(" callq") __jited(" popq %r9") +__arch_arm64 +__jited(" stp x27, x28, [sp, #-0x10]!") +__jited(" mov x27, {{.*}}") +__jited(" movk x27, {{.*}}, lsl #16") +__jited(" movk x27, {{.*}}") +__jited(" mrs x10, TPIDR_EL{{[0-1]}}") +__jited(" add x27, x27, x10") +__jited(" add x25, x27, {{.*}}") +__jited(" mov x0, #0x2a") +__jited(" str x0, [x27]") +__jited(" bl 0x{{.*}}") +__jited(" add x7, x0, #0x0") +__jited(" ldp x27, x28, [sp], #0x10") int private_stack_exception_sub_prog(void) { asm volatile (" \ @@ -220,6 +301,10 @@ __description("Private stack, async callback, not nested") __success __retval(0) __arch_x86_64 __jited(" movabsq $0x{{.*}}, %r9") +__arch_arm64 +__jited(" mrs x10, TPIDR_EL{{[0-1]}}") +__jited(" add x27, x27, x10") +__jited(" add x25, x27, {{.*}}") int private_stack_async_callback_1(void) { struct bpf_timer *arr_timer; @@ -241,6 +326,8 @@ __description("Private stack, async callback, potential nesting") __success __retval(0) __arch_x86_64 __jited(" subq $0x100, %rsp") +__arch_arm64 +__jited(" sub sp, sp, #0x100") int private_stack_async_callback_2(void) { struct bpf_timer *arr_timer; diff --git a/tools/testing/selftests/bpf/progs/verifier_raw_stack.c b/tools/testing/selftests/bpf/progs/verifier_raw_stack.c index 7cc83acac727..c689665e07b9 100644 --- a/tools/testing/selftests/bpf/progs/verifier_raw_stack.c +++ b/tools/testing/selftests/bpf/progs/verifier_raw_stack.c @@ -236,7 +236,7 @@ __naked void load_bytes_spilled_regs_data(void) SEC("tc") __description("raw_stack: skb_load_bytes, invalid access 1") -__failure __msg("invalid indirect access to stack R3 off=-513 size=8") +__failure __msg("invalid write to stack R3 off=-513 size=8") __naked void load_bytes_invalid_access_1(void) { asm volatile (" \ @@ -255,7 +255,7 @@ __naked void load_bytes_invalid_access_1(void) SEC("tc") __description("raw_stack: skb_load_bytes, invalid access 2") -__failure __msg("invalid indirect access to stack R3 off=-1 size=8") +__failure __msg("invalid write to stack R3 off=-1 size=8") __naked void load_bytes_invalid_access_2(void) { asm volatile (" \ diff --git a/tools/testing/selftests/bpf/progs/verifier_ref_tracking.c b/tools/testing/selftests/bpf/progs/verifier_ref_tracking.c index 683a882b3e6d..910365201f68 100644 --- a/tools/testing/selftests/bpf/progs/verifier_ref_tracking.c +++ b/tools/testing/selftests/bpf/progs/verifier_ref_tracking.c @@ -27,7 +27,7 @@ struct bpf_key {} __attribute__((preserve_access_index)); extern void bpf_key_put(struct bpf_key *key) __ksym; extern struct bpf_key *bpf_lookup_system_key(__u64 id) __ksym; -extern struct bpf_key *bpf_lookup_user_key(__u32 serial, __u64 flags) __ksym; +extern struct bpf_key *bpf_lookup_user_key(__s32 serial, __u64 flags) __ksym; /* BTF FUNC records are not generated for kfuncs referenced * from inline assembly. These records are necessary for diff --git a/tools/testing/selftests/bpf/progs/verifier_sock.c b/tools/testing/selftests/bpf/progs/verifier_sock.c index d3e70e38e442..0d5e56dffabb 100644 --- a/tools/testing/selftests/bpf/progs/verifier_sock.c +++ b/tools/testing/selftests/bpf/progs/verifier_sock.c @@ -50,6 +50,13 @@ struct { __uint(map_flags, BPF_F_NO_PREALLOC); } sk_storage_map SEC(".maps"); +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 1); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(__u32)); +} jmp_table SEC(".maps"); + SEC("cgroup/skb") __description("skb->sk: no NULL check") __failure __msg("invalid mem access 'sock_common_or_null'") @@ -1037,4 +1044,53 @@ __naked void sock_create_read_src_port(void) : __clobber_all); } +__noinline +long skb_pull_data2(struct __sk_buff *sk, __u32 len) +{ + return bpf_skb_pull_data(sk, len); +} + +__noinline +long skb_pull_data1(struct __sk_buff *sk, __u32 len) +{ + return skb_pull_data2(sk, len); +} + +/* global function calls bpf_skb_pull_data(), which invalidates packet + * pointers established before global function call. + */ +SEC("tc") +__failure __msg("invalid mem access") +int invalidate_pkt_pointers_from_global_func(struct __sk_buff *sk) +{ + int *p = (void *)(long)sk->data; + + if ((void *)(p + 1) > (void *)(long)sk->data_end) + return TCX_DROP; + skb_pull_data1(sk, 0); + *p = 42; /* this is unsafe */ + return TCX_PASS; +} + +__noinline +int tail_call(struct __sk_buff *sk) +{ + bpf_tail_call_static(sk, &jmp_table, 0); + return 0; +} + +/* Tail calls invalidate packet pointers. */ +SEC("tc") +__failure __msg("invalid mem access") +int invalidate_pkt_pointers_by_tail_call(struct __sk_buff *sk) +{ + int *p = (void *)(long)sk->data; + + if ((void *)(p + 1) > (void *)(long)sk->data_end) + return TCX_DROP; + tail_call(sk); + *p = 42; /* this is unsafe */ + return TCX_PASS; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_spin_lock.c b/tools/testing/selftests/bpf/progs/verifier_spin_lock.c index 3f679de73229..d9d7b05cf6d2 100644 --- a/tools/testing/selftests/bpf/progs/verifier_spin_lock.c +++ b/tools/testing/selftests/bpf/progs/verifier_spin_lock.c @@ -187,7 +187,7 @@ l0_%=: r6 = r0; \ SEC("cgroup/skb") __description("spin_lock: test6 missing unlock") -__failure __msg("BPF_EXIT instruction cannot be used inside bpf_spin_lock-ed region") +__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_spin_lock-ed region") __failure_unpriv __msg_unpriv("") __naked void spin_lock_test6_missing_unlock(void) { @@ -530,4 +530,30 @@ l1_%=: exit; \ : __clobber_all); } +SEC("tc") +__description("spin_lock: loop within a locked region") +__success __failure_unpriv __msg_unpriv("") +__retval(0) +int bpf_loop_inside_locked_region(void) +{ + const int zero = 0; + struct val *val; + int i, j = 0; + + val = bpf_map_lookup_elem(&map_spin_lock, &zero); + if (!val) + return -1; + + bpf_spin_lock(&val->l); + bpf_for(i, 0, 10) { + j++; + /* Silence "unused variable" warnings. */ + if (j == 10) + break; + } + bpf_spin_unlock(&val->l); + + return 0; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_stack_ptr.c b/tools/testing/selftests/bpf/progs/verifier_stack_ptr.c index 417c61cd4b19..24aabc6083fd 100644 --- a/tools/testing/selftests/bpf/progs/verifier_stack_ptr.c +++ b/tools/testing/selftests/bpf/progs/verifier_stack_ptr.c @@ -481,4 +481,56 @@ l1_%=: r0 = 42; \ : __clobber_all); } +SEC("socket") +__description("PTR_TO_STACK stack size > 512") +__failure __msg("invalid write to stack R1 off=-520 size=8") +__naked void stack_check_size_gt_512(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += -520; \ + r0 = 42; \ + *(u64*)(r1 + 0) = r0; \ + exit; \ +" ::: __clobber_all); +} + +#ifdef __BPF_FEATURE_MAY_GOTO +SEC("socket") +__description("PTR_TO_STACK stack size 512 with may_goto with jit") +__load_if_JITed() +__success __retval(42) +__naked void stack_check_size_512_with_may_goto_jit(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += -512; \ + r0 = 42; \ + *(u32*)(r1 + 0) = r0; \ + may_goto l0_%=; \ + r2 = 100; \ + l0_%=: \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK stack size 512 with may_goto without jit") +__load_if_no_JITed() +__failure __msg("stack size 520(extra 8) is too large") +__naked void stack_check_size_512_with_may_goto(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += -512; \ + r0 = 42; \ + *(u32*)(r1 + 0) = r0; \ + may_goto l0_%=; \ + r2 = 100; \ + l0_%=: \ + exit; \ +" ::: __clobber_all); +} +#endif + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_store_release.c b/tools/testing/selftests/bpf/progs/verifier_store_release.c new file mode 100644 index 000000000000..72f1eb006074 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_store_release.c @@ -0,0 +1,301 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Google LLC. */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "../../../include/linux/filter.h" +#include "bpf_misc.h" + +#ifdef CAN_USE_LOAD_ACQ_STORE_REL + +SEC("socket") +__description("store-release, 8-bit") +__success __success_unpriv __retval(0) +__naked void store_release_8(void) +{ + asm volatile ( + "r0 = 0;" + "w1 = 0x12;" + ".8byte %[store_release_insn];" // store_release((u8 *)(r10 - 1), w1); + "w2 = *(u8 *)(r10 - 1);" + "if r2 == r1 goto 1f;" + "r0 = 1;" +"1:" + "exit;" + : + : __imm_insn(store_release_insn, + BPF_ATOMIC_OP(BPF_B, BPF_STORE_REL, BPF_REG_10, BPF_REG_1, -1)) + : __clobber_all); +} + +SEC("socket") +__description("store-release, 16-bit") +__success __success_unpriv __retval(0) +__naked void store_release_16(void) +{ + asm volatile ( + "r0 = 0;" + "w1 = 0x1234;" + ".8byte %[store_release_insn];" // store_release((u16 *)(r10 - 2), w1); + "w2 = *(u16 *)(r10 - 2);" + "if r2 == r1 goto 1f;" + "r0 = 1;" +"1:" + "exit;" + : + : __imm_insn(store_release_insn, + BPF_ATOMIC_OP(BPF_H, BPF_STORE_REL, BPF_REG_10, BPF_REG_1, -2)) + : __clobber_all); +} + +SEC("socket") +__description("store-release, 32-bit") +__success __success_unpriv __retval(0) +__naked void store_release_32(void) +{ + asm volatile ( + "r0 = 0;" + "w1 = 0x12345678;" + ".8byte %[store_release_insn];" // store_release((u32 *)(r10 - 4), w1); + "w2 = *(u32 *)(r10 - 4);" + "if r2 == r1 goto 1f;" + "r0 = 1;" +"1:" + "exit;" + : + : __imm_insn(store_release_insn, + BPF_ATOMIC_OP(BPF_W, BPF_STORE_REL, BPF_REG_10, BPF_REG_1, -4)) + : __clobber_all); +} + +SEC("socket") +__description("store-release, 64-bit") +__success __success_unpriv __retval(0) +__naked void store_release_64(void) +{ + asm volatile ( + "r0 = 0;" + "r1 = 0x1234567890abcdef ll;" + ".8byte %[store_release_insn];" // store_release((u64 *)(r10 - 8), r1); + "r2 = *(u64 *)(r10 - 8);" + "if r2 == r1 goto 1f;" + "r0 = 1;" +"1:" + "exit;" + : + : __imm_insn(store_release_insn, + BPF_ATOMIC_OP(BPF_DW, BPF_STORE_REL, BPF_REG_10, BPF_REG_1, -8)) + : __clobber_all); +} + +SEC("socket") +__description("store-release with uninitialized src_reg") +__failure __failure_unpriv __msg("R2 !read_ok") +__naked void store_release_with_uninitialized_src_reg(void) +{ + asm volatile ( + ".8byte %[store_release_insn];" // store_release((u64 *)(r10 - 8), r2); + "exit;" + : + : __imm_insn(store_release_insn, + BPF_ATOMIC_OP(BPF_DW, BPF_STORE_REL, BPF_REG_10, BPF_REG_2, -8)) + : __clobber_all); +} + +SEC("socket") +__description("store-release with uninitialized dst_reg") +__failure __failure_unpriv __msg("R2 !read_ok") +__naked void store_release_with_uninitialized_dst_reg(void) +{ + asm volatile ( + "r1 = 0;" + ".8byte %[store_release_insn];" // store_release((u64 *)(r2 - 8), r1); + "exit;" + : + : __imm_insn(store_release_insn, + BPF_ATOMIC_OP(BPF_DW, BPF_STORE_REL, BPF_REG_2, BPF_REG_1, -8)) + : __clobber_all); +} + +SEC("socket") +__description("store-release with non-pointer dst_reg") +__failure __failure_unpriv __msg("R1 invalid mem access 'scalar'") +__naked void store_release_with_non_pointer_dst_reg(void) +{ + asm volatile ( + "r1 = 0;" + ".8byte %[store_release_insn];" // store_release((u64 *)(r1 + 0), r1); + "exit;" + : + : __imm_insn(store_release_insn, + BPF_ATOMIC_OP(BPF_DW, BPF_STORE_REL, BPF_REG_1, BPF_REG_1, 0)) + : __clobber_all); +} + +SEC("socket") +__description("misaligned store-release") +__failure __failure_unpriv __msg("misaligned stack access off") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void store_release_misaligned(void) +{ + asm volatile ( + "w0 = 0;" + ".8byte %[store_release_insn];" // store_release((u32 *)(r10 - 5), w0); + "exit;" + : + : __imm_insn(store_release_insn, + BPF_ATOMIC_OP(BPF_W, BPF_STORE_REL, BPF_REG_10, BPF_REG_0, -5)) + : __clobber_all); +} + +SEC("socket") +__description("store-release to ctx pointer") +__failure __failure_unpriv __msg("BPF_ATOMIC stores into R1 ctx is not allowed") +__naked void store_release_to_ctx_pointer(void) +{ + asm volatile ( + "w0 = 0;" + // store_release((u8 *)(r1 + offsetof(struct __sk_buff, cb[0])), w0); + ".8byte %[store_release_insn];" + "exit;" + : + : __imm_insn(store_release_insn, + BPF_ATOMIC_OP(BPF_B, BPF_STORE_REL, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0]))) + : __clobber_all); +} + +SEC("xdp") +__description("store-release to pkt pointer") +__failure __msg("BPF_ATOMIC stores into R2 pkt is not allowed") +__naked void store_release_to_pkt_pointer(void) +{ + asm volatile ( + "w0 = 0;" + "r2 = *(u32 *)(r1 + %[xdp_md_data]);" + "r3 = *(u32 *)(r1 + %[xdp_md_data_end]);" + "r1 = r2;" + "r1 += 8;" + "if r1 >= r3 goto l0_%=;" + ".8byte %[store_release_insn];" // store_release((u8 *)(r2 + 0), w0); +"l0_%=: r0 = 0;" + "exit;" + : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)), + __imm_insn(store_release_insn, + BPF_ATOMIC_OP(BPF_B, BPF_STORE_REL, BPF_REG_2, BPF_REG_0, 0)) + : __clobber_all); +} + +SEC("flow_dissector") +__description("store-release to flow_keys pointer") +__failure __msg("BPF_ATOMIC stores into R2 flow_keys is not allowed") +__naked void store_release_to_flow_keys_pointer(void) +{ + asm volatile ( + "w0 = 0;" + "r2 = *(u64 *)(r1 + %[__sk_buff_flow_keys]);" + ".8byte %[store_release_insn];" // store_release((u8 *)(r2 + 0), w0); + "exit;" + : + : __imm_const(__sk_buff_flow_keys, + offsetof(struct __sk_buff, flow_keys)), + __imm_insn(store_release_insn, + BPF_ATOMIC_OP(BPF_B, BPF_STORE_REL, BPF_REG_2, BPF_REG_0, 0)) + : __clobber_all); +} + +SEC("sk_reuseport") +__description("store-release to sock pointer") +__failure __msg("R2 cannot write into sock") +__naked void store_release_to_sock_pointer(void) +{ + asm volatile ( + "w0 = 0;" + "r2 = *(u64 *)(r1 + %[sk_reuseport_md_sk]);" + ".8byte %[store_release_insn];" // store_release((u8 *)(r2 + 0), w0); + "exit;" + : + : __imm_const(sk_reuseport_md_sk, offsetof(struct sk_reuseport_md, sk)), + __imm_insn(store_release_insn, + BPF_ATOMIC_OP(BPF_B, BPF_STORE_REL, BPF_REG_2, BPF_REG_0, 0)) + : __clobber_all); +} + +SEC("socket") +__description("store-release, leak pointer to stack") +__success __success_unpriv __retval(0) +__naked void store_release_leak_pointer_to_stack(void) +{ + asm volatile ( + ".8byte %[store_release_insn];" // store_release((u64 *)(r10 - 8), r1); + "r0 = 0;" + "exit;" + : + : __imm_insn(store_release_insn, + BPF_ATOMIC_OP(BPF_DW, BPF_STORE_REL, BPF_REG_10, BPF_REG_1, -8)) + : __clobber_all); +} + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, long long); +} map_hash_8b SEC(".maps"); + +SEC("socket") +__description("store-release, leak pointer to map") +__success __retval(0) +__failure_unpriv __msg_unpriv("R6 leaks addr into map") +__naked void store_release_leak_pointer_to_map(void) +{ + asm volatile ( + "r6 = r1;" + "r1 = %[map_hash_8b] ll;" + "r2 = 0;" + "*(u64 *)(r10 - 8) = r2;" + "r2 = r10;" + "r2 += -8;" + "call %[bpf_map_lookup_elem];" + "if r0 == 0 goto l0_%=;" + ".8byte %[store_release_insn];" // store_release((u64 *)(r0 + 0), r6); +"l0_%=:" + "r0 = 0;" + "exit;" + : + : __imm_addr(map_hash_8b), + __imm(bpf_map_lookup_elem), + __imm_insn(store_release_insn, + BPF_ATOMIC_OP(BPF_DW, BPF_STORE_REL, BPF_REG_0, BPF_REG_6, 0)) + : __clobber_all); +} + +SEC("socket") +__description("store-release with invalid register R15") +__failure __failure_unpriv __msg("R15 is invalid") +__naked void store_release_with_invalid_reg(void) +{ + asm volatile ( + ".8byte %[store_release_insn];" // store_release((u64 *)(r15 + 0), r1); + "exit;" + : + : __imm_insn(store_release_insn, + BPF_ATOMIC_OP(BPF_DW, BPF_STORE_REL, 15 /* invalid reg */, BPF_REG_1, 0)) + : __clobber_all); +} + +#else /* CAN_USE_LOAD_ACQ_STORE_REL */ + +SEC("socket") +__description("Clang version < 18, ENABLE_ATOMICS_TESTS not defined, and/or JIT doesn't support store-release, use a dummy test") +__success +int dummy_test(void) +{ + return 0; +} + +#endif /* CAN_USE_LOAD_ACQ_STORE_REL */ + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_tailcall.c b/tools/testing/selftests/bpf/progs/verifier_tailcall.c new file mode 100644 index 000000000000..b4acce60fb9b --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_tailcall.c @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u32); +} map_array SEC(".maps"); + +SEC("socket") +__description("invalid map type for tail call") +__failure __msg("expected prog array map for tail call") +__failure_unpriv +__naked void invalid_map_for_tail_call(void) +{ + asm volatile (" \ + r2 = %[map_array] ll; \ + r3 = 0; \ + call %[bpf_tail_call]; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_array) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_unpriv.c b/tools/testing/selftests/bpf/progs/verifier_unpriv.c index 7ea535bfbacd..28b4f7035ceb 100644 --- a/tools/testing/selftests/bpf/progs/verifier_unpriv.c +++ b/tools/testing/selftests/bpf/progs/verifier_unpriv.c @@ -199,7 +199,7 @@ __naked void pass_pointer_to_helper_function(void) SEC("socket") __description("unpriv: indirectly pass pointer on stack to helper function") __success __failure_unpriv -__msg_unpriv("invalid indirect read from stack R2 off -8+0 size 8") +__msg_unpriv("invalid read from stack R2 off -8+0 size 8") __retval(0) __naked void on_stack_to_helper_function(void) { @@ -572,8 +572,14 @@ l0_%=: exit; \ SEC("socket") __description("alu32: mov u32 const") -__success __failure_unpriv __msg_unpriv("R7 invalid mem access 'scalar'") +__success __success_unpriv __retval(0) +#ifdef SPEC_V1 +__xlated_unpriv("if r0 == 0x0 goto pc+2") +__xlated_unpriv("nospec") /* inserted to prevent `R7 invalid mem access 'scalar'` */ +__xlated_unpriv("goto pc-1") /* sanitized dead code */ +__xlated_unpriv("exit") +#endif __naked void alu32_mov_u32_const(void) { asm volatile (" \ @@ -619,12 +625,11 @@ __naked void pass_pointer_to_tail_call(void) SEC("socket") __description("unpriv: cmp map pointer with zero") -__success __failure_unpriv __msg_unpriv("R1 pointer comparison") +__success __success_unpriv __retval(0) __naked void cmp_map_pointer_with_zero(void) { asm volatile (" \ - r1 = 0; \ r1 = %[map_hash_8b] ll; \ if r1 == 0 goto l0_%=; \ l0_%=: r0 = 0; \ @@ -635,6 +640,22 @@ l0_%=: r0 = 0; \ } SEC("socket") +__description("unpriv: cmp map pointer with const") +__success __failure_unpriv __msg_unpriv("R1 pointer comparison prohibited") +__retval(0) +__naked void cmp_map_pointer_with_const(void) +{ + asm volatile (" \ + r1 = %[map_hash_8b] ll; \ + if r1 == 0x0000beef goto l0_%=; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") __description("unpriv: write into frame pointer") __failure __msg("frame pointer is read only") __failure_unpriv @@ -723,4 +744,210 @@ l0_%=: r0 = 0; \ " ::: __clobber_all); } +SEC("socket") +__description("unpriv: Spectre v1 path-based type confusion of scalar as stack-ptr") +__success __success_unpriv __retval(0) +#ifdef SPEC_V1 +__xlated_unpriv("if r0 != 0x1 goto pc+2") +/* This nospec prevents the exploit because it forces the mispredicted (not + * taken) `if r0 != 0x0 goto l0_%=` to resolve before using r6 as a pointer. + * This causes the CPU to realize that `r6 = r9` should have never executed. It + * ensures that r6 always contains a readable stack slot ptr when the insn after + * the nospec executes. + */ +__xlated_unpriv("nospec") +__xlated_unpriv("r9 = *(u8 *)(r6 +0)") +#endif +__naked void unpriv_spec_v1_type_confusion(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + /* r0: pointer to a map array entry */ \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + /* r1, r2: prepared call args */ \ + r6 = r10; \ + r6 += -8; \ + /* r6: pointer to readable stack slot */ \ + r9 = 0xffffc900; \ + r9 <<= 32; \ + /* r9: scalar controlled by attacker */ \ + r0 = *(u64 *)(r0 + 0); /* cache miss */ \ + if r0 != 0x0 goto l0_%=; \ + r6 = r9; \ +l0_%=: if r0 != 0x1 goto l1_%=; \ + r9 = *(u8 *)(r6 + 0); \ +l1_%=: /* leak r9 */ \ + r9 &= 1; \ + r9 <<= 9; \ + *(u64*)(r10 - 8) = r9; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + /* leak secret into is_cached(map[0|512]): */ \ + r0 = *(u64 *)(r0 + 0); \ +l2_%=: \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("unpriv: ldimm64 before Spectre v4 barrier") +__success __success_unpriv +__retval(0) +#ifdef SPEC_V4 +__xlated_unpriv("r1 = 0x2020200005642020") /* should not matter */ +__xlated_unpriv("*(u64 *)(r10 -8) = r1") +__xlated_unpriv("nospec") +#endif +__naked void unpriv_ldimm64_spectre_v4(void) +{ + asm volatile (" \ + r1 = 0x2020200005642020 ll; \ + *(u64 *)(r10 -8) = r1; \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: Spectre v1 and v4 barrier") +__success __success_unpriv +__retval(0) +#ifdef SPEC_V1 +#ifdef SPEC_V4 +/* starts with r0 == r8 == r9 == 0 */ +__xlated_unpriv("if r8 != 0x0 goto pc+1") +__xlated_unpriv("goto pc+2") +__xlated_unpriv("if r9 == 0x0 goto pc+4") +__xlated_unpriv("r2 = r0") +/* Following nospec required to prevent following dangerous `*(u64 *)(NOT_FP -64) + * = r1` iff `if r9 == 0 goto pc+4` was mispredicted because of Spectre v1. The + * test therefore ensures the Spectre-v4--induced nospec does not prevent the + * Spectre-v1--induced speculative path from being fully analyzed. + */ +__xlated_unpriv("nospec") /* Spectre v1 */ +__xlated_unpriv("*(u64 *)(r2 -64) = r1") /* could be used to leak r2 */ +__xlated_unpriv("nospec") /* Spectre v4 */ +#endif +#endif +__naked void unpriv_spectre_v1_and_v4(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + r8 = r0; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + r9 = r0; \ + r0 = r10; \ + r1 = 0; \ + r2 = r10; \ + if r8 != 0 goto l0_%=; \ + if r9 != 0 goto l0_%=; \ + r0 = 0; \ +l0_%=: if r8 != 0 goto l1_%=; \ + goto l2_%=; \ +l1_%=: if r9 == 0 goto l3_%=; \ + r2 = r0; \ +l2_%=: *(u64 *)(r2 -64) = r1; \ +l3_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("unpriv: Spectre v1 and v4 barrier (simple)") +__success __success_unpriv +__retval(0) +#ifdef SPEC_V1 +#ifdef SPEC_V4 +__xlated_unpriv("if r8 != 0x0 goto pc+1") +__xlated_unpriv("goto pc+2") +__xlated_unpriv("goto pc-1") /* if r9 == 0 goto l3_%= */ +__xlated_unpriv("goto pc-1") /* r2 = r0 */ +__xlated_unpriv("nospec") +__xlated_unpriv("*(u64 *)(r2 -64) = r1") +__xlated_unpriv("nospec") +#endif +#endif +__naked void unpriv_spectre_v1_and_v4_simple(void) +{ + asm volatile (" \ + r8 = 0; \ + r9 = 0; \ + r0 = r10; \ + r1 = 0; \ + r2 = r10; \ + if r8 != 0 goto l0_%=; \ + if r9 != 0 goto l0_%=; \ + r0 = 0; \ +l0_%=: if r8 != 0 goto l1_%=; \ + goto l2_%=; \ +l1_%=: if r9 == 0 goto l3_%=; \ + r2 = r0; \ +l2_%=: *(u64 *)(r2 -64) = r1; \ +l3_%=: r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: ldimm64 before Spectre v1 and v4 barrier (simple)") +__success __success_unpriv +__retval(0) +#ifdef SPEC_V1 +#ifdef SPEC_V4 +__xlated_unpriv("if r8 != 0x0 goto pc+1") +__xlated_unpriv("goto pc+4") +__xlated_unpriv("goto pc-1") /* if r9 == 0 goto l3_%= */ +__xlated_unpriv("goto pc-1") /* r2 = r0 */ +__xlated_unpriv("goto pc-1") /* r1 = 0x2020200005642020 ll */ +__xlated_unpriv("goto pc-1") /* second part of ldimm64 */ +__xlated_unpriv("nospec") +__xlated_unpriv("*(u64 *)(r2 -64) = r1") +__xlated_unpriv("nospec") +#endif +#endif +__naked void unpriv_ldimm64_spectre_v1_and_v4_simple(void) +{ + asm volatile (" \ + r8 = 0; \ + r9 = 0; \ + r0 = r10; \ + r1 = 0; \ + r2 = r10; \ + if r8 != 0 goto l0_%=; \ + if r9 != 0 goto l0_%=; \ + r0 = 0; \ +l0_%=: if r8 != 0 goto l1_%=; \ + goto l2_%=; \ +l1_%=: if r9 == 0 goto l3_%=; \ + r2 = r0; \ + r1 = 0x2020200005642020 ll; \ +l2_%=: *(u64 *)(r2 -64) = r1; \ +l3_%=: r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c b/tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c index 5ba6e53571c8..af7938ce56cb 100644 --- a/tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c +++ b/tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c @@ -231,6 +231,10 @@ __retval(1) __naked void ptr_unknown_vs_unknown_lt(void) { asm volatile (" \ + r8 = r1; \ + call %[bpf_get_prandom_u32]; \ + r9 = r0; \ + r1 = r8; \ r0 = *(u32*)(r1 + %[__sk_buff_len]); \ r1 = 0; \ *(u64*)(r10 - 8) = r1; \ @@ -245,11 +249,11 @@ l1_%=: call %[bpf_map_lookup_elem]; \ r4 = *(u8*)(r0 + 0); \ if r4 == 1 goto l3_%=; \ r1 = 6; \ - r1 = -r1; \ + r1 = r9; \ r1 &= 0x3; \ goto l4_%=; \ l3_%=: r1 = 6; \ - r1 = -r1; \ + r1 = r9; \ r1 &= 0x7; \ l4_%=: r1 += r0; \ r0 = *(u8*)(r1 + 0); \ @@ -259,7 +263,8 @@ l2_%=: r0 = 1; \ : __imm(bpf_map_lookup_elem), __imm_addr(map_array_48b), __imm_addr(map_hash_16b), - __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)), + __imm(bpf_get_prandom_u32) : __clobber_all); } @@ -271,6 +276,10 @@ __retval(1) __naked void ptr_unknown_vs_unknown_gt(void) { asm volatile (" \ + r8 = r1; \ + call %[bpf_get_prandom_u32]; \ + r9 = r0; \ + r1 = r8; \ r0 = *(u32*)(r1 + %[__sk_buff_len]); \ r1 = 0; \ *(u64*)(r10 - 8) = r1; \ @@ -285,11 +294,11 @@ l1_%=: call %[bpf_map_lookup_elem]; \ r4 = *(u8*)(r0 + 0); \ if r4 == 1 goto l3_%=; \ r1 = 6; \ - r1 = -r1; \ + r1 = r9; \ r1 &= 0x7; \ goto l4_%=; \ l3_%=: r1 = 6; \ - r1 = -r1; \ + r1 = r9; \ r1 &= 0x3; \ l4_%=: r1 += r0; \ r0 = *(u8*)(r1 + 0); \ @@ -299,7 +308,8 @@ l2_%=: r0 = 1; \ : __imm(bpf_map_lookup_elem), __imm_addr(map_array_48b), __imm_addr(map_hash_16b), - __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)), + __imm(bpf_get_prandom_u32) : __clobber_all); } @@ -398,7 +408,8 @@ l2_%=: r0 = 1; \ SEC("socket") __description("map access: mixing value pointer and scalar, 1") -__success __failure_unpriv __msg_unpriv("R2 pointer comparison prohibited") +__success __failure_unpriv +__msg_unpriv("R2 tried to add from different maps, paths or scalars, pointer arithmetic with it prohibited for !root") __retval(0) __naked void value_pointer_and_scalar_1(void) { @@ -433,6 +444,7 @@ l2_%=: /* common instruction */ \ l3_%=: /* branch B */ \ r0 = 0x13371337; \ /* verifier follows fall-through */ \ + /* unpriv: nospec (inserted to prevent `R2 pointer comparison prohibited`) */\ if r2 != 0x100000 goto l4_%=; \ r0 = 0; \ exit; \ @@ -450,7 +462,8 @@ l4_%=: /* fake-dead code; targeted from branch A to \ SEC("socket") __description("map access: mixing value pointer and scalar, 2") -__success __failure_unpriv __msg_unpriv("R0 invalid mem access 'scalar'") +__success __failure_unpriv +__msg_unpriv("R2 tried to add from different maps, paths or scalars, pointer arithmetic with it prohibited for !root") __retval(0) __naked void value_pointer_and_scalar_2(void) { @@ -492,6 +505,7 @@ l4_%=: /* fake-dead code; targeted from branch A to \ * prevent dead code sanitization, rejected \ * via branch B however \ */ \ + /* unpriv: nospec (inserted to prevent `R0 invalid mem access 'scalar'`) */\ r0 = *(u8*)(r0 + 0); \ r0 = 0; \ exit; \ @@ -1296,9 +1310,13 @@ l0_%=: r0 = 1; \ SEC("socket") __description("map access: value_ptr -= unknown scalar, 2") -__success __failure_unpriv -__msg_unpriv("R0 pointer arithmetic of map value goes out of range") +__success __success_unpriv __retval(1) +#ifdef SPEC_V1 +__xlated_unpriv("r1 &= 7") +__xlated_unpriv("nospec") /* inserted to prevent `R0 pointer arithmetic of map value goes out of range` */ +__xlated_unpriv("r0 -= r1") +#endif __naked void value_ptr_unknown_scalar_2_2(void) { asm volatile (" \ diff --git a/tools/testing/selftests/bpf/progs/verifier_var_off.c b/tools/testing/selftests/bpf/progs/verifier_var_off.c index c810f4f6f479..1d36d01b746e 100644 --- a/tools/testing/selftests/bpf/progs/verifier_var_off.c +++ b/tools/testing/selftests/bpf/progs/verifier_var_off.c @@ -203,7 +203,7 @@ __naked void stack_write_clobbers_spilled_regs(void) SEC("sockops") __description("indirect variable-offset stack access, unbounded") -__failure __msg("invalid unbounded variable-offset indirect access to stack R4") +__failure __msg("invalid unbounded variable-offset write to stack R4") __naked void variable_offset_stack_access_unbounded(void) { asm volatile (" \ @@ -236,7 +236,7 @@ l0_%=: r0 = 0; \ SEC("lwt_in") __description("indirect variable-offset stack access, max out of bound") -__failure __msg("invalid variable-offset indirect access to stack R2") +__failure __msg("invalid variable-offset read from stack R2") __naked void access_max_out_of_bound(void) { asm volatile (" \ @@ -269,7 +269,7 @@ __naked void access_max_out_of_bound(void) */ SEC("socket") __description("indirect variable-offset stack access, zero-sized, max out of bound") -__failure __msg("invalid variable-offset indirect access to stack R1") +__failure __msg("invalid variable-offset write to stack R1") __naked void zero_sized_access_max_out_of_bound(void) { asm volatile (" \ @@ -294,7 +294,7 @@ __naked void zero_sized_access_max_out_of_bound(void) SEC("lwt_in") __description("indirect variable-offset stack access, min out of bound") -__failure __msg("invalid variable-offset indirect access to stack R2") +__failure __msg("invalid variable-offset read from stack R2") __naked void access_min_out_of_bound(void) { asm volatile (" \ diff --git a/tools/testing/selftests/bpf/progs/verifier_vfs_accept.c b/tools/testing/selftests/bpf/progs/verifier_vfs_accept.c index a7c0a553aa50..3e2d76ee8050 100644 --- a/tools/testing/selftests/bpf/progs/verifier_vfs_accept.c +++ b/tools/testing/selftests/bpf/progs/verifier_vfs_accept.c @@ -2,6 +2,7 @@ /* Copyright (c) 2024 Google LLC. */ #include <vmlinux.h> +#include <errno.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> @@ -82,4 +83,21 @@ int BPF_PROG(path_d_path_from_file_argument, struct file *file) return 0; } +SEC("lsm.s/inode_rename") +__success +int BPF_PROG(inode_rename, struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, struct dentry *new_dentry, + unsigned int flags) +{ + struct inode *inode = new_dentry->d_inode; + ino_t ino; + + if (!inode) + return 0; + ino = inode->i_ino; + if (ino == 0) + return -EACCES; + return 0; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c b/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c index d6d3f4fcb24c..4b392c6c8fc4 100644 --- a/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c +++ b/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c @@ -2,6 +2,7 @@ /* Copyright (c) 2024 Google LLC. */ #include <vmlinux.h> +#include <errno.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> #include <linux/limits.h> @@ -158,4 +159,18 @@ int BPF_PROG(path_d_path_kfunc_non_lsm, struct path *path, struct file *f) return 0; } +SEC("lsm.s/inode_rename") +__failure __msg("invalid mem access 'trusted_ptr_or_null_'") +int BPF_PROG(inode_rename, struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, struct dentry *new_dentry, + unsigned int flags) +{ + struct inode *inode = new_dentry->d_inode; + ino_t ino; + + ino = inode->i_ino; + if (ino == 0) + return -EACCES; + return 0; +} char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/wq.c b/tools/testing/selftests/bpf/progs/wq.c index f8d3ae0c29ae..2f1ba08c293e 100644 --- a/tools/testing/selftests/bpf/progs/wq.c +++ b/tools/testing/selftests/bpf/progs/wq.c @@ -5,7 +5,7 @@ #include "bpf_experimental.h" #include <bpf/bpf_helpers.h> #include "bpf_misc.h" -#include "../bpf_testmod/bpf_testmod_kfunc.h" +#include "../test_kmods/bpf_testmod_kfunc.h" char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/wq_failures.c b/tools/testing/selftests/bpf/progs/wq_failures.c index 25b51a72fe0f..4240211a1900 100644 --- a/tools/testing/selftests/bpf/progs/wq_failures.c +++ b/tools/testing/selftests/bpf/progs/wq_failures.c @@ -5,7 +5,7 @@ #include "bpf_experimental.h" #include <bpf/bpf_helpers.h> #include "bpf_misc.h" -#include "../bpf_testmod/bpf_testmod_kfunc.h" +#include "../test_kmods/bpf_testmod_kfunc.h" char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/xdp_metadata.c b/tools/testing/selftests/bpf/progs/xdp_metadata.c index 31ca229bb3c0..09bb8a038d52 100644 --- a/tools/testing/selftests/bpf/progs/xdp_metadata.c +++ b/tools/testing/selftests/bpf/progs/xdp_metadata.c @@ -19,6 +19,13 @@ struct { __type(value, __u32); } prog_arr SEC(".maps"); +struct { + __uint(type, BPF_MAP_TYPE_DEVMAP); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(struct bpf_devmap_val)); + __uint(max_entries, 1); +} dev_map SEC(".maps"); + extern int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx, __u64 *timestamp) __ksym; extern int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, __u32 *hash, @@ -95,4 +102,10 @@ int rx(struct xdp_md *ctx) return bpf_redirect_map(&xsk, ctx->rx_queue_index, XDP_PASS); } +SEC("xdp") +int redirect(struct xdp_md *ctx) +{ + return bpf_redirect_map(&dev_map, ctx->rx_queue_index, XDP_PASS); +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/xdp_redirect_map.c b/tools/testing/selftests/bpf/progs/xdp_redirect_map.c index 682dda8dabbc..50c8958f94e5 100644 --- a/tools/testing/selftests/bpf/progs/xdp_redirect_map.c +++ b/tools/testing/selftests/bpf/progs/xdp_redirect_map.c @@ -1,7 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 +#include <linux/if_ether.h> + #include <linux/bpf.h> #include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> struct { __uint(type, BPF_MAP_TYPE_DEVMAP); @@ -28,4 +31,89 @@ int xdp_redirect_map_2(struct xdp_md *xdp) return bpf_redirect_map(&tx_port, 2, 0); } +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 3); + __type(key, __u32); + __type(value, __u64); +} rxcnt SEC(".maps"); + +static int xdp_count(struct xdp_md *xdp, __u32 key) +{ + void *data_end = (void *)(long)xdp->data_end; + void *data = (void *)(long)xdp->data; + struct ethhdr *eth = data; + __u64 *count; + + if (data + sizeof(*eth) > data_end) + return XDP_DROP; + + if (bpf_htons(eth->h_proto) == ETH_P_IP) { + /* We only count IPv4 packets */ + count = bpf_map_lookup_elem(&rxcnt, &key); + if (count) + *count += 1; + } + + return XDP_PASS; +} + +SEC("xdp") +int xdp_count_0(struct xdp_md *xdp) +{ + return xdp_count(xdp, 0); +} + +SEC("xdp") +int xdp_count_1(struct xdp_md *xdp) +{ + return xdp_count(xdp, 1); +} + +SEC("xdp") +int xdp_count_2(struct xdp_md *xdp) +{ + return xdp_count(xdp, 2); +} + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 2); + __type(key, __u32); + __type(value, __be64); +} rx_mac SEC(".maps"); + +static int store_mac(struct xdp_md *xdp, __u32 id) +{ + void *data_end = (void *)(long)xdp->data_end; + void *data = (void *)(long)xdp->data; + struct ethhdr *eth = data; + __u32 key = id; + __be64 mac = 0; + + if (data + sizeof(*eth) > data_end) + return XDP_DROP; + + /* Only store IPv4 MAC to avoid being polluted by IPv6 packets */ + if (eth->h_proto == bpf_htons(ETH_P_IP)) { + __builtin_memcpy(&mac, eth->h_source, ETH_ALEN); + bpf_map_update_elem(&rx_mac, &key, &mac, 0); + bpf_printk("%s - %x", __func__, mac); + } + + return XDP_PASS; +} + +SEC("xdp") +int store_mac_1(struct xdp_md *xdp) +{ + return store_mac(xdp, 0); +} + +SEC("xdp") +int store_mac_2(struct xdp_md *xdp) +{ + return store_mac(xdp, 1); +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/xdp_redirect_multi_kern.c b/tools/testing/selftests/bpf/progs/xdp_redirect_multi_kern.c index 97b26a30b59a..bc2945ed8a80 100644 --- a/tools/testing/selftests/bpf/progs/xdp_redirect_multi_kern.c +++ b/tools/testing/selftests/bpf/progs/xdp_redirect_multi_kern.c @@ -34,6 +34,14 @@ struct { __uint(max_entries, 128); } mac_map SEC(".maps"); +/* map to store redirect flags for each protocol*/ +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, __u16); + __type(value, __u64); + __uint(max_entries, 16); +} redirect_flags SEC(".maps"); + SEC("xdp") int xdp_redirect_map_multi_prog(struct xdp_md *ctx) { @@ -41,25 +49,34 @@ int xdp_redirect_map_multi_prog(struct xdp_md *ctx) void *data = (void *)(long)ctx->data; int if_index = ctx->ingress_ifindex; struct ethhdr *eth = data; + __u64 *flags_from_map; __u16 h_proto; __u64 nh_off; + __u64 flags; nh_off = sizeof(*eth); if (data + nh_off > data_end) return XDP_DROP; - h_proto = eth->h_proto; - - /* Using IPv4 for (BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS) testing */ - if (h_proto == bpf_htons(ETH_P_IP)) - return bpf_redirect_map(&map_all, 0, - BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS); - /* Using IPv6 for none flag testing */ - else if (h_proto == bpf_htons(ETH_P_IPV6)) - return bpf_redirect_map(&map_all, if_index, 0); - /* All others for BPF_F_BROADCAST testing */ - else - return bpf_redirect_map(&map_all, 0, BPF_F_BROADCAST); + h_proto = bpf_htons(eth->h_proto); + + flags_from_map = bpf_map_lookup_elem(&redirect_flags, &h_proto); + + /* Default flags for IPv4 : (BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS) */ + if (h_proto == ETH_P_IP) { + flags = flags_from_map ? *flags_from_map : BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS; + return bpf_redirect_map(&map_all, 0, flags); + } + /* Default flags for IPv6 : 0 */ + if (h_proto == ETH_P_IPV6) { + flags = flags_from_map ? *flags_from_map : 0; + return bpf_redirect_map(&map_all, if_index, flags); + } + /* Default flags for others BPF_F_BROADCAST : 0 */ + else { + flags = flags_from_map ? *flags_from_map : BPF_F_BROADCAST; + return bpf_redirect_map(&map_all, 0, flags); + } } /* The following 2 progs are for 2nd devmap prog testing */ diff --git a/tools/testing/selftests/bpf/progs/xsk_xdp_progs.c b/tools/testing/selftests/bpf/progs/xsk_xdp_progs.c index ccde6a4c6319..683306db8594 100644 --- a/tools/testing/selftests/bpf/progs/xsk_xdp_progs.c +++ b/tools/testing/selftests/bpf/progs/xsk_xdp_progs.c @@ -4,6 +4,8 @@ #include <linux/bpf.h> #include <bpf/bpf_helpers.h> #include <linux/if_ether.h> +#include <linux/ip.h> +#include <linux/errno.h> #include "xsk_xdp_common.h" struct { @@ -14,6 +16,7 @@ struct { } xsk SEC(".maps"); static unsigned int idx; +int adjust_value = 0; int count = 0; SEC("xdp.frags") int xsk_def_prog(struct xdp_md *xdp) @@ -70,4 +73,51 @@ SEC("xdp") int xsk_xdp_shared_umem(struct xdp_md *xdp) return bpf_redirect_map(&xsk, idx, XDP_DROP); } +SEC("xdp.frags") int xsk_xdp_adjust_tail(struct xdp_md *xdp) +{ + __u32 buff_len, curr_buff_len; + int ret; + + buff_len = bpf_xdp_get_buff_len(xdp); + if (buff_len == 0) + return XDP_DROP; + + ret = bpf_xdp_adjust_tail(xdp, adjust_value); + if (ret < 0) { + /* Handle unsupported cases */ + if (ret == -EOPNOTSUPP) { + /* Set adjust_value to -EOPNOTSUPP to indicate to userspace that this case + * is unsupported + */ + adjust_value = -EOPNOTSUPP; + return bpf_redirect_map(&xsk, 0, XDP_DROP); + } + + return XDP_DROP; + } + + curr_buff_len = bpf_xdp_get_buff_len(xdp); + if (curr_buff_len != buff_len + adjust_value) + return XDP_DROP; + + if (curr_buff_len > buff_len) { + __u32 *pkt_data = (void *)(long)xdp->data; + __u32 len, words_to_end, seq_num; + + len = curr_buff_len - PKT_HDR_ALIGN; + words_to_end = len / sizeof(*pkt_data) - 1; + seq_num = words_to_end; + + /* Convert sequence number to network byte order. Store this in the last 4 bytes of + * the packet. Use 'adjust_value' to determine the position at the end of the + * packet for storing the sequence number. + */ + seq_num = __constant_htonl(words_to_end); + bpf_xdp_store_bytes(xdp, curr_buff_len - sizeof(seq_num), &seq_num, + sizeof(seq_num)); + } + + return bpf_redirect_map(&xsk, 0, XDP_DROP); +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/sdt.h b/tools/testing/selftests/bpf/sdt.h index ca0162b4dc57..1fcfa5160231 100644 --- a/tools/testing/selftests/bpf/sdt.h +++ b/tools/testing/selftests/bpf/sdt.h @@ -102,6 +102,8 @@ # define STAP_SDT_ARG_CONSTRAINT nZr # elif defined __arm__ # define STAP_SDT_ARG_CONSTRAINT g +# elif defined __loongarch__ +# define STAP_SDT_ARG_CONSTRAINT nmr # else # define STAP_SDT_ARG_CONSTRAINT nor # endif diff --git a/tools/testing/selftests/bpf/test_bpftool_map.sh b/tools/testing/selftests/bpf/test_bpftool_map.sh new file mode 100755 index 000000000000..515b1df0501e --- /dev/null +++ b/tools/testing/selftests/bpf/test_bpftool_map.sh @@ -0,0 +1,398 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 + +TESTNAME="bpftool_map" +BPF_FILE="security_bpf_map.bpf.o" +BPF_ITER_FILE="bpf_iter_map_elem.bpf.o" +PROTECTED_MAP_NAME="prot_map" +NOT_PROTECTED_MAP_NAME="not_prot_map" +BPF_FS_TMP_PARENT="/tmp" +BPF_FS_PARENT=$(awk '$3 == "bpf" {print $2; exit}' /proc/mounts) +BPF_FS_PARENT=${BPF_FS_PARENT:-$BPF_FS_TMP_PARENT} +# bpftool will mount bpf file system under BPF_DIR if it is not mounted +# under BPF_FS_PARENT. +BPF_DIR="$BPF_FS_PARENT/test_$TESTNAME" +SCRIPT_DIR=$(dirname $(realpath "$0")) +BPF_FILE_PATH="$SCRIPT_DIR/$BPF_FILE" +BPF_ITER_FILE_PATH="$SCRIPT_DIR/$BPF_ITER_FILE" +BPFTOOL_PATH="bpftool" +# Assume the script is located under tools/testing/selftests/bpf/ +KDIR_ROOT_DIR=$(realpath "$SCRIPT_DIR"/../../../../) + +_cleanup() +{ + set +eu + + # If BPF_DIR is a mount point this will not remove the mount point itself. + [ -d "$BPF_DIR" ] && rm -rf "$BPF_DIR" 2> /dev/null + + # Unmount if BPF filesystem was temporarily created. + if [ "$BPF_FS_PARENT" = "$BPF_FS_TMP_PARENT" ]; then + # A loop and recursive unmount are required as bpftool might + # create multiple mounts. For example, a bind mount of the directory + # to itself. The bind mount is created to change mount propagation + # flags on an actual mount point. + max_attempts=3 + attempt=0 + while mountpoint -q "$BPF_DIR" && [ $attempt -lt $max_attempts ]; do + umount -R "$BPF_DIR" 2>/dev/null + attempt=$((attempt+1)) + done + + # The directory still exists. Remove it now. + [ -d "$BPF_DIR" ] && rm -rf "$BPF_DIR" 2>/dev/null + fi +} + +cleanup_skip() +{ + echo "selftests: $TESTNAME [SKIP]" + _cleanup + + exit $ksft_skip +} + +cleanup() +{ + if [ "$?" = 0 ]; then + echo "selftests: $TESTNAME [PASS]" + else + echo "selftests: $TESTNAME [FAILED]" + fi + _cleanup +} + +check_root_privileges() { + if [ $(id -u) -ne 0 ]; then + echo "Need root privileges" + exit $ksft_skip + fi +} + +# Function to verify bpftool path. +# Parameters: +# $1: bpftool path +verify_bpftool_path() { + local bpftool_path="$1" + if ! "$bpftool_path" version > /dev/null 2>&1; then + echo "Could not run test without bpftool" + exit $ksft_skip + fi +} + +# Function to verify BTF support. +# The test requires BTF support for fmod_ret programs. +verify_btf_support() { + if [ ! -f /sys/kernel/btf/vmlinux ]; then + echo "Could not run test without BTF support" + exit $ksft_skip + fi +} + +# Function to initialize map entries with keys [0..2] and values set to 0. +# Parameters: +# $1: Map name +# $2: bpftool path +initialize_map_entries() { + local map_name="$1" + local bpftool_path="$2" + + for key in 0 1 2; do + "$bpftool_path" map update name "$map_name" key $key 0 0 0 value 0 0 0 $key + done +} + +# Test read access to the map. +# Parameters: +# $1: Name command (name/pinned) +# $2: Map name +# $3: bpftool path +# $4: key +access_for_read() { + local name_cmd="$1" + local map_name="$2" + local bpftool_path="$3" + local key="$4" + + # Test read access to the map. + if ! "$bpftool_path" map lookup "$name_cmd" "$map_name" key $key 1>/dev/null; then + echo " Read access to $key in $map_name failed" + exit 1 + fi + + # Test read access to map's BTF data. + if ! "$bpftool_path" btf dump map "$name_cmd" "$map_name" 1>/dev/null; then + echo " Read access to $map_name for BTF data failed" + exit 1 + fi +} + +# Test write access to the map. +# Parameters: +# $1: Name command (name/pinned) +# $2: Map name +# $3: bpftool path +# $4: key +# $5: Whether write should succeed (true/false) +access_for_write() { + local name_cmd="$1" + local map_name="$2" + local bpftool_path="$3" + local key="$4" + local write_should_succeed="$5" + local value="1 1 1 1" + + if "$bpftool_path" map update "$name_cmd" "$map_name" key $key value \ + $value 2>/dev/null; then + if [ "$write_should_succeed" = "false" ]; then + echo " Write access to $key in $map_name succeeded but should have failed" + exit 1 + fi + else + if [ "$write_should_succeed" = "true" ]; then + echo " Write access to $key in $map_name failed but should have succeeded" + exit 1 + fi + fi +} + +# Test entry deletion for the map. +# Parameters: +# $1: Name command (name/pinned) +# $2: Map name +# $3: bpftool path +# $4: key +# $5: Whether write should succeed (true/false) +access_for_deletion() { + local name_cmd="$1" + local map_name="$2" + local bpftool_path="$3" + local key="$4" + local write_should_succeed="$5" + local value="1 1 1 1" + + # Test deletion by key for the map. + # Before deleting, check the key exists. + if ! "$bpftool_path" map lookup "$name_cmd" "$map_name" key $key 1>/dev/null; then + echo " Key $key does not exist in $map_name" + exit 1 + fi + + # Delete by key. + if "$bpftool_path" map delete "$name_cmd" "$map_name" key $key 2>/dev/null; then + if [ "$write_should_succeed" = "false" ]; then + echo " Deletion for $key in $map_name succeeded but should have failed" + exit 1 + fi + else + if [ "$write_should_succeed" = "true" ]; then + echo " Deletion for $key in $map_name failed but should have succeeded" + exit 1 + fi + fi + + # After deleting, check the entry existence according to the expected status. + if "$bpftool_path" map lookup "$name_cmd" "$map_name" key $key 1>/dev/null; then + if [ "$write_should_succeed" = "true" ]; then + echo " Key $key for $map_name was not deleted but should have been deleted" + exit 1 + fi + else + if [ "$write_should_succeed" = "false" ]; then + echo "Key $key for $map_name was deleted but should have not been deleted" + exit 1 + fi + fi + + # Test creation of map's deleted entry, if deletion was successful. + # Otherwise, the entry exists. + if "$bpftool_path" map update "$name_cmd" "$map_name" key $key value \ + $value 2>/dev/null; then + if [ "$write_should_succeed" = "false" ]; then + echo " Write access to $key in $map_name succeeded after deletion attempt but should have failed" + exit 1 + fi + else + if [ "$write_should_succeed" = "true" ]; then + echo " Write access to $key in $map_name failed after deletion attempt but should have succeeded" + exit 1 + fi + fi +} + +# Test map elements iterator. +# Parameters: +# $1: Name command (name/pinned) +# $2: Map name +# $3: bpftool path +# $4: BPF_DIR +# $5: bpf iterator object file path +iterate_map_elem() { + local name_cmd="$1" + local map_name="$2" + local bpftool_path="$3" + local bpf_dir="$4" + local bpf_file="$5" + local pin_path="$bpf_dir/map_iterator" + + "$bpftool_path" iter pin "$bpf_file" "$pin_path" map "$name_cmd" "$map_name" + if [ ! -f "$pin_path" ]; then + echo " Failed to pin iterator to $pin_path" + exit 1 + fi + + cat "$pin_path" 1>/dev/null + rm "$pin_path" 2>/dev/null +} + +# Function to test map access with configurable write expectations +# Parameters: +# $1: Name command (name/pinned) +# $2: Map name +# $3: bpftool path +# $4: key for rw +# $5: key to delete +# $6: Whether write should succeed (true/false) +# $7: BPF_DIR +# $8: bpf iterator object file path +access_map() { + local name_cmd="$1" + local map_name="$2" + local bpftool_path="$3" + local key_for_rw="$4" + local key_to_del="$5" + local write_should_succeed="$6" + local bpf_dir="$7" + local bpf_iter_file_path="$8" + + access_for_read "$name_cmd" "$map_name" "$bpftool_path" "$key_for_rw" + access_for_write "$name_cmd" "$map_name" "$bpftool_path" "$key_for_rw" \ + "$write_should_succeed" + access_for_deletion "$name_cmd" "$map_name" "$bpftool_path" "$key_to_del" \ + "$write_should_succeed" + iterate_map_elem "$name_cmd" "$map_name" "$bpftool_path" "$bpf_dir" \ + "$bpf_iter_file_path" +} + +# Function to test map access with configurable write expectations +# Parameters: +# $1: Map name +# $2: bpftool path +# $3: BPF_DIR +# $4: Whether write should succeed (true/false) +# $5: bpf iterator object file path +test_map_access() { + local map_name="$1" + local bpftool_path="$2" + local bpf_dir="$3" + local pin_path="$bpf_dir/${map_name}_pinned" + local write_should_succeed="$4" + local bpf_iter_file_path="$5" + + # Test access to the map by name. + access_map "name" "$map_name" "$bpftool_path" "0 0 0 0" "1 0 0 0" \ + "$write_should_succeed" "$bpf_dir" "$bpf_iter_file_path" + + # Pin the map to the BPF filesystem + "$bpftool_path" map pin name "$map_name" "$pin_path" + if [ ! -e "$pin_path" ]; then + echo " Failed to pin $map_name" + exit 1 + fi + + # Test access to the pinned map. + access_map "pinned" "$pin_path" "$bpftool_path" "0 0 0 0" "2 0 0 0" \ + "$write_should_succeed" "$bpf_dir" "$bpf_iter_file_path" +} + +# Function to test map creation and map-of-maps +# Parameters: +# $1: bpftool path +# $2: BPF_DIR +test_map_creation_and_map_of_maps() { + local bpftool_path="$1" + local bpf_dir="$2" + local outer_map_name="outer_map_tt" + local inner_map_name="inner_map_tt" + + "$bpftool_path" map create "$bpf_dir/$inner_map_name" type array key 4 \ + value 4 entries 4 name "$inner_map_name" + if [ ! -f "$bpf_dir/$inner_map_name" ]; then + echo " Failed to create inner map file at $bpf_dir/$outer_map_name" + return 1 + fi + + "$bpftool_path" map create "$bpf_dir/$outer_map_name" type hash_of_maps \ + key 4 value 4 entries 2 name "$outer_map_name" inner_map name "$inner_map_name" + if [ ! -f "$bpf_dir/$outer_map_name" ]; then + echo " Failed to create outer map file at $bpf_dir/$outer_map_name" + return 1 + fi + + # Add entries to the outer map by name and by pinned path. + "$bpftool_path" map update pinned "$bpf_dir/$outer_map_name" key 0 0 0 0 \ + value pinned "$bpf_dir/$inner_map_name" + "$bpftool_path" map update name "$outer_map_name" key 1 0 0 0 value \ + name "$inner_map_name" + + # The outer map should be full by now. + # The following map update command is expected to fail. + if "$bpftool_path" map update name "$outer_map_name" key 2 0 0 0 value name \ + "$inner_map_name" 2>/dev/null; then + echo " Update for $outer_map_name succeeded but should have failed" + exit 1 + fi +} + +# Function to test map access with the btf list command +# Parameters: +# $1: bpftool path +test_map_access_with_btf_list() { + local bpftool_path="$1" + + # The btf list command iterates over maps for + # loaded BPF programs. + if ! "$bpftool_path" btf list 1>/dev/null; then + echo " Failed to access btf data" + exit 1 + fi +} + +set -eu + +trap cleanup_skip EXIT + +check_root_privileges + +verify_bpftool_path "$BPFTOOL_PATH" + +verify_btf_support + +trap cleanup EXIT + +# Load and attach the BPF programs to control maps access. +"$BPFTOOL_PATH" prog loadall "$BPF_FILE_PATH" "$BPF_DIR" autoattach + +initialize_map_entries "$PROTECTED_MAP_NAME" "$BPFTOOL_PATH" +initialize_map_entries "$NOT_PROTECTED_MAP_NAME" "$BPFTOOL_PATH" + +# Activate the map protection mechanism. Protection status is controlled +# by a value stored in the prot_status_map at index 0. +"$BPFTOOL_PATH" map update name prot_status_map key 0 0 0 0 value 1 0 0 0 + +# Test protected map (write should fail). +test_map_access "$PROTECTED_MAP_NAME" "$BPFTOOL_PATH" "$BPF_DIR" "false" \ + "$BPF_ITER_FILE_PATH" + +# Test not protected map (write should succeed). +test_map_access "$NOT_PROTECTED_MAP_NAME" "$BPFTOOL_PATH" "$BPF_DIR" "true" \ + "$BPF_ITER_FILE_PATH" + +test_map_creation_and_map_of_maps "$BPFTOOL_PATH" "$BPF_DIR" + +test_map_access_with_btf_list "$BPFTOOL_PATH" + +exit 0 diff --git a/tools/testing/selftests/bpf/test_bpftool_synctypes.py b/tools/testing/selftests/bpf/test_bpftool_synctypes.py index 0ed67b6b31dd..238121fda5b6 100755 --- a/tools/testing/selftests/bpf/test_bpftool_synctypes.py +++ b/tools/testing/selftests/bpf/test_bpftool_synctypes.py @@ -66,7 +66,7 @@ class ArrayParser(BlockParser): def __init__(self, reader, array_name): self.array_name = array_name - self.start_marker = re.compile(f'(static )?const bool {self.array_name}\[.*\] = {{\n') + self.start_marker = re.compile(fr'(static )?const bool {self.array_name}\[.*\] = {{\n') super().__init__(reader) def search_block(self): @@ -80,7 +80,7 @@ class ArrayParser(BlockParser): Parse a block and return data as a dictionary. Items to extract must be on separate lines in the file. """ - pattern = re.compile('\[(BPF_\w*)\]\s*= (true|false),?$') + pattern = re.compile(r'\[(BPF_\w*)\]\s*= (true|false),?$') entries = set() while True: line = self.reader.readline() @@ -178,7 +178,7 @@ class FileExtractor(object): @enum_name: name of the enum to parse """ start_marker = re.compile(f'enum {enum_name} {{\n') - pattern = re.compile('^\s*(BPF_\w+),?(\s+/\*.*\*/)?$') + pattern = re.compile(r'^\s*(BPF_\w+),?(\s+/\*.*\*/)?$') end_marker = re.compile('^};') parser = BlockParser(self.reader) parser.search_block(start_marker) @@ -226,8 +226,8 @@ class FileExtractor(object): @block_name: name of the blog to parse, 'TYPE' in the example """ - start_marker = re.compile(f'\*{block_name}\* := {{') - pattern = re.compile('\*\*([\w/-]+)\*\*') + start_marker = re.compile(fr'\*{block_name}\* := {{') + pattern = re.compile(r'\*\*([\w/-]+)\*\*') end_marker = re.compile('}\n') return self.__get_description_list(start_marker, pattern, end_marker) @@ -245,8 +245,8 @@ class FileExtractor(object): @block_name: name of the blog to parse, 'TYPE' in the example """ - start_marker = re.compile(f'"\s*{block_name} := {{') - pattern = re.compile('([\w/]+) [|}]') + start_marker = re.compile(fr'"\s*{block_name} := {{') + pattern = re.compile(r'([\w/]+) [|}]') end_marker = re.compile('}') return self.__get_description_list(start_marker, pattern, end_marker) @@ -264,8 +264,8 @@ class FileExtractor(object): @macro: macro starting the block, 'HELP_SPEC_OPTIONS' in the example """ - start_marker = re.compile(f'"\s*{macro}\s*" [|}}]') - pattern = re.compile('([\w-]+) ?(?:\||}[ }\]])') + start_marker = re.compile(fr'"\s*{macro}\s*" [|}}]') + pattern = re.compile(r'([\w-]+) ?(?:\||}[ }\]])') end_marker = re.compile('}\\\\n') return self.__get_description_list(start_marker, pattern, end_marker) @@ -283,8 +283,8 @@ class FileExtractor(object): @block_name: name of the blog to parse, 'TYPE' in the example """ - start_marker = re.compile(f'local {block_name}=\'') - pattern = re.compile('(?:.*=\')?([\w/]+)') + start_marker = re.compile(fr'local {block_name}=\'') + pattern = re.compile(r'(?:.*=\')?([\w/]+)') end_marker = re.compile('\'$') return self.__get_description_list(start_marker, pattern, end_marker) @@ -316,7 +316,7 @@ class MainHeaderFileExtractor(SourceFileExtractor): {'-p', '-d', '--pretty', '--debug', '--json', '-j'} """ start_marker = re.compile(f'"OPTIONS :=') - pattern = re.compile('([\w-]+) ?(?:\||}[ }\]"])') + pattern = re.compile(r'([\w-]+) ?(?:\||}[ }\]"])') end_marker = re.compile('#define') parser = InlineListParser(self.reader) @@ -338,8 +338,8 @@ class ManSubstitutionsExtractor(SourceFileExtractor): {'-p', '-d', '--pretty', '--debug', '--json', '-j'} """ - start_marker = re.compile('\|COMMON_OPTIONS\| replace:: {') - pattern = re.compile('\*\*([\w/-]+)\*\*') + start_marker = re.compile(r'\|COMMON_OPTIONS\| replace:: {') + pattern = re.compile(r'\*\*([\w/-]+)\*\*') end_marker = re.compile('}$') parser = InlineListParser(self.reader) diff --git a/tools/testing/selftests/bpf/test_btf.h b/tools/testing/selftests/bpf/test_btf.h index fb4f4714eeb4..e65889ab4adf 100644 --- a/tools/testing/selftests/bpf/test_btf.h +++ b/tools/testing/selftests/bpf/test_btf.h @@ -72,9 +72,15 @@ #define BTF_TYPE_FLOAT_ENC(name, sz) \ BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_FLOAT, 0, 0), sz) +#define BTF_DECL_ATTR_ENC(value, type, component_idx) \ + BTF_TYPE_ENC(value, BTF_INFO_ENC(BTF_KIND_DECL_TAG, 1, 0), type), (component_idx) + #define BTF_DECL_TAG_ENC(value, type, component_idx) \ BTF_TYPE_ENC(value, BTF_INFO_ENC(BTF_KIND_DECL_TAG, 0, 0), type), (component_idx) +#define BTF_TYPE_ATTR_ENC(value, type) \ + BTF_TYPE_ENC(value, BTF_INFO_ENC(BTF_KIND_TYPE_TAG, 1, 0), type) + #define BTF_TYPE_TAG_ENC(value, type) \ BTF_TYPE_ENC(value, BTF_INFO_ENC(BTF_KIND_TYPE_TAG, 0, 0), type) diff --git a/tools/testing/selftests/bpf/test_flow_dissector.c b/tools/testing/selftests/bpf/test_flow_dissector.c deleted file mode 100644 index 571cc076dd7d..000000000000 --- a/tools/testing/selftests/bpf/test_flow_dissector.c +++ /dev/null @@ -1,780 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Inject packets with all sorts of encapsulation into the kernel. - * - * IPv4/IPv6 outer layer 3 - * GRE/GUE/BARE outer layer 4, where bare is IPIP/SIT/IPv4-in-IPv6/.. - * IPv4/IPv6 inner layer 3 - */ - -#define _GNU_SOURCE - -#include <stddef.h> -#include <arpa/inet.h> -#include <asm/byteorder.h> -#include <error.h> -#include <errno.h> -#include <linux/if_packet.h> -#include <linux/if_ether.h> -#include <linux/ipv6.h> -#include <netinet/ip.h> -#include <netinet/in.h> -#include <netinet/udp.h> -#include <poll.h> -#include <stdbool.h> -#include <stdlib.h> -#include <stdio.h> -#include <string.h> -#include <sys/ioctl.h> -#include <sys/socket.h> -#include <sys/stat.h> -#include <sys/time.h> -#include <sys/types.h> -#include <unistd.h> - -#define CFG_PORT_INNER 8000 - -/* Add some protocol definitions that do not exist in userspace */ - -struct grehdr { - uint16_t unused; - uint16_t protocol; -} __attribute__((packed)); - -struct guehdr { - union { - struct { -#if defined(__LITTLE_ENDIAN_BITFIELD) - __u8 hlen:5, - control:1, - version:2; -#elif defined (__BIG_ENDIAN_BITFIELD) - __u8 version:2, - control:1, - hlen:5; -#else -#error "Please fix <asm/byteorder.h>" -#endif - __u8 proto_ctype; - __be16 flags; - }; - __be32 word; - }; -}; - -static uint8_t cfg_dsfield_inner; -static uint8_t cfg_dsfield_outer; -static uint8_t cfg_encap_proto; -static bool cfg_expect_failure = false; -static int cfg_l3_extra = AF_UNSPEC; /* optional SIT prefix */ -static int cfg_l3_inner = AF_UNSPEC; -static int cfg_l3_outer = AF_UNSPEC; -static int cfg_num_pkt = 10; -static int cfg_num_secs = 0; -static char cfg_payload_char = 'a'; -static int cfg_payload_len = 100; -static int cfg_port_gue = 6080; -static bool cfg_only_rx; -static bool cfg_only_tx; -static int cfg_src_port = 9; - -static char buf[ETH_DATA_LEN]; - -#define INIT_ADDR4(name, addr4, port) \ - static struct sockaddr_in name = { \ - .sin_family = AF_INET, \ - .sin_port = __constant_htons(port), \ - .sin_addr.s_addr = __constant_htonl(addr4), \ - }; - -#define INIT_ADDR6(name, addr6, port) \ - static struct sockaddr_in6 name = { \ - .sin6_family = AF_INET6, \ - .sin6_port = __constant_htons(port), \ - .sin6_addr = addr6, \ - }; - -INIT_ADDR4(in_daddr4, INADDR_LOOPBACK, CFG_PORT_INNER) -INIT_ADDR4(in_saddr4, INADDR_LOOPBACK + 2, 0) -INIT_ADDR4(out_daddr4, INADDR_LOOPBACK, 0) -INIT_ADDR4(out_saddr4, INADDR_LOOPBACK + 1, 0) -INIT_ADDR4(extra_daddr4, INADDR_LOOPBACK, 0) -INIT_ADDR4(extra_saddr4, INADDR_LOOPBACK + 1, 0) - -INIT_ADDR6(in_daddr6, IN6ADDR_LOOPBACK_INIT, CFG_PORT_INNER) -INIT_ADDR6(in_saddr6, IN6ADDR_LOOPBACK_INIT, 0) -INIT_ADDR6(out_daddr6, IN6ADDR_LOOPBACK_INIT, 0) -INIT_ADDR6(out_saddr6, IN6ADDR_LOOPBACK_INIT, 0) -INIT_ADDR6(extra_daddr6, IN6ADDR_LOOPBACK_INIT, 0) -INIT_ADDR6(extra_saddr6, IN6ADDR_LOOPBACK_INIT, 0) - -static unsigned long util_gettime(void) -{ - struct timeval tv; - - gettimeofday(&tv, NULL); - return (tv.tv_sec * 1000) + (tv.tv_usec / 1000); -} - -static void util_printaddr(const char *msg, struct sockaddr *addr) -{ - unsigned long off = 0; - char nbuf[INET6_ADDRSTRLEN]; - - switch (addr->sa_family) { - case PF_INET: - off = __builtin_offsetof(struct sockaddr_in, sin_addr); - break; - case PF_INET6: - off = __builtin_offsetof(struct sockaddr_in6, sin6_addr); - break; - default: - error(1, 0, "printaddr: unsupported family %u\n", - addr->sa_family); - } - - if (!inet_ntop(addr->sa_family, ((void *) addr) + off, nbuf, - sizeof(nbuf))) - error(1, errno, "inet_ntop"); - - fprintf(stderr, "%s: %s\n", msg, nbuf); -} - -static unsigned long add_csum_hword(const uint16_t *start, int num_u16) -{ - unsigned long sum = 0; - int i; - - for (i = 0; i < num_u16; i++) - sum += start[i]; - - return sum; -} - -static uint16_t build_ip_csum(const uint16_t *start, int num_u16, - unsigned long sum) -{ - sum += add_csum_hword(start, num_u16); - - while (sum >> 16) - sum = (sum & 0xffff) + (sum >> 16); - - return ~sum; -} - -static void build_ipv4_header(void *header, uint8_t proto, - uint32_t src, uint32_t dst, - int payload_len, uint8_t tos) -{ - struct iphdr *iph = header; - - iph->ihl = 5; - iph->version = 4; - iph->tos = tos; - iph->ttl = 8; - iph->tot_len = htons(sizeof(*iph) + payload_len); - iph->id = htons(1337); - iph->protocol = proto; - iph->saddr = src; - iph->daddr = dst; - iph->check = build_ip_csum((void *) iph, iph->ihl << 1, 0); -} - -static void ipv6_set_dsfield(struct ipv6hdr *ip6h, uint8_t dsfield) -{ - uint16_t val, *ptr = (uint16_t *)ip6h; - - val = ntohs(*ptr); - val &= 0xF00F; - val |= ((uint16_t) dsfield) << 4; - *ptr = htons(val); -} - -static void build_ipv6_header(void *header, uint8_t proto, - struct sockaddr_in6 *src, - struct sockaddr_in6 *dst, - int payload_len, uint8_t dsfield) -{ - struct ipv6hdr *ip6h = header; - - ip6h->version = 6; - ip6h->payload_len = htons(payload_len); - ip6h->nexthdr = proto; - ip6h->hop_limit = 8; - ipv6_set_dsfield(ip6h, dsfield); - - memcpy(&ip6h->saddr, &src->sin6_addr, sizeof(ip6h->saddr)); - memcpy(&ip6h->daddr, &dst->sin6_addr, sizeof(ip6h->daddr)); -} - -static uint16_t build_udp_v4_csum(const struct iphdr *iph, - const struct udphdr *udph, - int num_words) -{ - unsigned long pseudo_sum; - int num_u16 = sizeof(iph->saddr); /* halfwords: twice byte len */ - - pseudo_sum = add_csum_hword((void *) &iph->saddr, num_u16); - pseudo_sum += htons(IPPROTO_UDP); - pseudo_sum += udph->len; - return build_ip_csum((void *) udph, num_words, pseudo_sum); -} - -static uint16_t build_udp_v6_csum(const struct ipv6hdr *ip6h, - const struct udphdr *udph, - int num_words) -{ - unsigned long pseudo_sum; - int num_u16 = sizeof(ip6h->saddr); /* halfwords: twice byte len */ - - pseudo_sum = add_csum_hword((void *) &ip6h->saddr, num_u16); - pseudo_sum += htons(ip6h->nexthdr); - pseudo_sum += ip6h->payload_len; - return build_ip_csum((void *) udph, num_words, pseudo_sum); -} - -static void build_udp_header(void *header, int payload_len, - uint16_t dport, int family) -{ - struct udphdr *udph = header; - int len = sizeof(*udph) + payload_len; - - udph->source = htons(cfg_src_port); - udph->dest = htons(dport); - udph->len = htons(len); - udph->check = 0; - if (family == AF_INET) - udph->check = build_udp_v4_csum(header - sizeof(struct iphdr), - udph, len >> 1); - else - udph->check = build_udp_v6_csum(header - sizeof(struct ipv6hdr), - udph, len >> 1); -} - -static void build_gue_header(void *header, uint8_t proto) -{ - struct guehdr *gueh = header; - - gueh->proto_ctype = proto; -} - -static void build_gre_header(void *header, uint16_t proto) -{ - struct grehdr *greh = header; - - greh->protocol = htons(proto); -} - -static int l3_length(int family) -{ - if (family == AF_INET) - return sizeof(struct iphdr); - else - return sizeof(struct ipv6hdr); -} - -static int build_packet(void) -{ - int ol3_len = 0, ol4_len = 0, il3_len = 0, il4_len = 0; - int el3_len = 0; - - if (cfg_l3_extra) - el3_len = l3_length(cfg_l3_extra); - - /* calculate header offsets */ - if (cfg_encap_proto) { - ol3_len = l3_length(cfg_l3_outer); - - if (cfg_encap_proto == IPPROTO_GRE) - ol4_len = sizeof(struct grehdr); - else if (cfg_encap_proto == IPPROTO_UDP) - ol4_len = sizeof(struct udphdr) + sizeof(struct guehdr); - } - - il3_len = l3_length(cfg_l3_inner); - il4_len = sizeof(struct udphdr); - - if (el3_len + ol3_len + ol4_len + il3_len + il4_len + cfg_payload_len >= - sizeof(buf)) - error(1, 0, "packet too large\n"); - - /* - * Fill packet from inside out, to calculate correct checksums. - * But create ip before udp headers, as udp uses ip for pseudo-sum. - */ - memset(buf + el3_len + ol3_len + ol4_len + il3_len + il4_len, - cfg_payload_char, cfg_payload_len); - - /* add zero byte for udp csum padding */ - buf[el3_len + ol3_len + ol4_len + il3_len + il4_len + cfg_payload_len] = 0; - - switch (cfg_l3_inner) { - case PF_INET: - build_ipv4_header(buf + el3_len + ol3_len + ol4_len, - IPPROTO_UDP, - in_saddr4.sin_addr.s_addr, - in_daddr4.sin_addr.s_addr, - il4_len + cfg_payload_len, - cfg_dsfield_inner); - break; - case PF_INET6: - build_ipv6_header(buf + el3_len + ol3_len + ol4_len, - IPPROTO_UDP, - &in_saddr6, &in_daddr6, - il4_len + cfg_payload_len, - cfg_dsfield_inner); - break; - } - - build_udp_header(buf + el3_len + ol3_len + ol4_len + il3_len, - cfg_payload_len, CFG_PORT_INNER, cfg_l3_inner); - - if (!cfg_encap_proto) - return il3_len + il4_len + cfg_payload_len; - - switch (cfg_l3_outer) { - case PF_INET: - build_ipv4_header(buf + el3_len, cfg_encap_proto, - out_saddr4.sin_addr.s_addr, - out_daddr4.sin_addr.s_addr, - ol4_len + il3_len + il4_len + cfg_payload_len, - cfg_dsfield_outer); - break; - case PF_INET6: - build_ipv6_header(buf + el3_len, cfg_encap_proto, - &out_saddr6, &out_daddr6, - ol4_len + il3_len + il4_len + cfg_payload_len, - cfg_dsfield_outer); - break; - } - - switch (cfg_encap_proto) { - case IPPROTO_UDP: - build_gue_header(buf + el3_len + ol3_len + ol4_len - - sizeof(struct guehdr), - cfg_l3_inner == PF_INET ? IPPROTO_IPIP - : IPPROTO_IPV6); - build_udp_header(buf + el3_len + ol3_len, - sizeof(struct guehdr) + il3_len + il4_len + - cfg_payload_len, - cfg_port_gue, cfg_l3_outer); - break; - case IPPROTO_GRE: - build_gre_header(buf + el3_len + ol3_len, - cfg_l3_inner == PF_INET ? ETH_P_IP - : ETH_P_IPV6); - break; - } - - switch (cfg_l3_extra) { - case PF_INET: - build_ipv4_header(buf, - cfg_l3_outer == PF_INET ? IPPROTO_IPIP - : IPPROTO_IPV6, - extra_saddr4.sin_addr.s_addr, - extra_daddr4.sin_addr.s_addr, - ol3_len + ol4_len + il3_len + il4_len + - cfg_payload_len, 0); - break; - case PF_INET6: - build_ipv6_header(buf, - cfg_l3_outer == PF_INET ? IPPROTO_IPIP - : IPPROTO_IPV6, - &extra_saddr6, &extra_daddr6, - ol3_len + ol4_len + il3_len + il4_len + - cfg_payload_len, 0); - break; - } - - return el3_len + ol3_len + ol4_len + il3_len + il4_len + - cfg_payload_len; -} - -/* sender transmits encapsulated over RAW or unencap'd over UDP */ -static int setup_tx(void) -{ - int family, fd, ret; - - if (cfg_l3_extra) - family = cfg_l3_extra; - else if (cfg_l3_outer) - family = cfg_l3_outer; - else - family = cfg_l3_inner; - - fd = socket(family, SOCK_RAW, IPPROTO_RAW); - if (fd == -1) - error(1, errno, "socket tx"); - - if (cfg_l3_extra) { - if (cfg_l3_extra == PF_INET) - ret = connect(fd, (void *) &extra_daddr4, - sizeof(extra_daddr4)); - else - ret = connect(fd, (void *) &extra_daddr6, - sizeof(extra_daddr6)); - if (ret) - error(1, errno, "connect tx"); - } else if (cfg_l3_outer) { - /* connect to destination if not encapsulated */ - if (cfg_l3_outer == PF_INET) - ret = connect(fd, (void *) &out_daddr4, - sizeof(out_daddr4)); - else - ret = connect(fd, (void *) &out_daddr6, - sizeof(out_daddr6)); - if (ret) - error(1, errno, "connect tx"); - } else { - /* otherwise using loopback */ - if (cfg_l3_inner == PF_INET) - ret = connect(fd, (void *) &in_daddr4, - sizeof(in_daddr4)); - else - ret = connect(fd, (void *) &in_daddr6, - sizeof(in_daddr6)); - if (ret) - error(1, errno, "connect tx"); - } - - return fd; -} - -/* receiver reads unencapsulated UDP */ -static int setup_rx(void) -{ - int fd, ret; - - fd = socket(cfg_l3_inner, SOCK_DGRAM, 0); - if (fd == -1) - error(1, errno, "socket rx"); - - if (cfg_l3_inner == PF_INET) - ret = bind(fd, (void *) &in_daddr4, sizeof(in_daddr4)); - else - ret = bind(fd, (void *) &in_daddr6, sizeof(in_daddr6)); - if (ret) - error(1, errno, "bind rx"); - - return fd; -} - -static int do_tx(int fd, const char *pkt, int len) -{ - int ret; - - ret = write(fd, pkt, len); - if (ret == -1) - error(1, errno, "send"); - if (ret != len) - error(1, errno, "send: len (%d < %d)\n", ret, len); - - return 1; -} - -static int do_poll(int fd, short events, int timeout) -{ - struct pollfd pfd; - int ret; - - pfd.fd = fd; - pfd.events = events; - - ret = poll(&pfd, 1, timeout); - if (ret == -1) - error(1, errno, "poll"); - if (ret && !(pfd.revents & POLLIN)) - error(1, errno, "poll: unexpected event 0x%x\n", pfd.revents); - - return ret; -} - -static int do_rx(int fd) -{ - char rbuf; - int ret, num = 0; - - while (1) { - ret = recv(fd, &rbuf, 1, MSG_DONTWAIT); - if (ret == -1 && errno == EAGAIN) - break; - if (ret == -1) - error(1, errno, "recv"); - if (rbuf != cfg_payload_char) - error(1, 0, "recv: payload mismatch"); - num++; - } - - return num; -} - -static int do_main(void) -{ - unsigned long tstop, treport, tcur; - int fdt = -1, fdr = -1, len, tx = 0, rx = 0; - - if (!cfg_only_tx) - fdr = setup_rx(); - if (!cfg_only_rx) - fdt = setup_tx(); - - len = build_packet(); - - tcur = util_gettime(); - treport = tcur + 1000; - tstop = tcur + (cfg_num_secs * 1000); - - while (1) { - if (!cfg_only_rx) - tx += do_tx(fdt, buf, len); - - if (!cfg_only_tx) - rx += do_rx(fdr); - - if (cfg_num_secs) { - tcur = util_gettime(); - if (tcur >= tstop) - break; - if (tcur >= treport) { - fprintf(stderr, "pkts: tx=%u rx=%u\n", tx, rx); - tx = 0; - rx = 0; - treport = tcur + 1000; - } - } else { - if (tx == cfg_num_pkt) - break; - } - } - - /* read straggler packets, if any */ - if (rx < tx) { - tstop = util_gettime() + 100; - while (rx < tx) { - tcur = util_gettime(); - if (tcur >= tstop) - break; - - do_poll(fdr, POLLIN, tstop - tcur); - rx += do_rx(fdr); - } - } - - fprintf(stderr, "pkts: tx=%u rx=%u\n", tx, rx); - - if (fdr != -1 && close(fdr)) - error(1, errno, "close rx"); - if (fdt != -1 && close(fdt)) - error(1, errno, "close tx"); - - /* - * success (== 0) only if received all packets - * unless failure is expected, in which case none must arrive. - */ - if (cfg_expect_failure) - return rx != 0; - else - return rx != tx; -} - - -static void __attribute__((noreturn)) usage(const char *filepath) -{ - fprintf(stderr, "Usage: %s [-e gre|gue|bare|none] [-i 4|6] [-l len] " - "[-O 4|6] [-o 4|6] [-n num] [-t secs] [-R] [-T] " - "[-s <osrc> [-d <odst>] [-S <isrc>] [-D <idst>] " - "[-x <otos>] [-X <itos>] [-f <isport>] [-F]\n", - filepath); - exit(1); -} - -static void parse_addr(int family, void *addr, const char *optarg) -{ - int ret; - - ret = inet_pton(family, optarg, addr); - if (ret == -1) - error(1, errno, "inet_pton"); - if (ret == 0) - error(1, 0, "inet_pton: bad string"); -} - -static void parse_addr4(struct sockaddr_in *addr, const char *optarg) -{ - parse_addr(AF_INET, &addr->sin_addr, optarg); -} - -static void parse_addr6(struct sockaddr_in6 *addr, const char *optarg) -{ - parse_addr(AF_INET6, &addr->sin6_addr, optarg); -} - -static int parse_protocol_family(const char *filepath, const char *optarg) -{ - if (!strcmp(optarg, "4")) - return PF_INET; - if (!strcmp(optarg, "6")) - return PF_INET6; - - usage(filepath); -} - -static void parse_opts(int argc, char **argv) -{ - int c; - - while ((c = getopt(argc, argv, "d:D:e:f:Fhi:l:n:o:O:Rs:S:t:Tx:X:")) != -1) { - switch (c) { - case 'd': - if (cfg_l3_outer == AF_UNSPEC) - error(1, 0, "-d must be preceded by -o"); - if (cfg_l3_outer == AF_INET) - parse_addr4(&out_daddr4, optarg); - else - parse_addr6(&out_daddr6, optarg); - break; - case 'D': - if (cfg_l3_inner == AF_UNSPEC) - error(1, 0, "-D must be preceded by -i"); - if (cfg_l3_inner == AF_INET) - parse_addr4(&in_daddr4, optarg); - else - parse_addr6(&in_daddr6, optarg); - break; - case 'e': - if (!strcmp(optarg, "gre")) - cfg_encap_proto = IPPROTO_GRE; - else if (!strcmp(optarg, "gue")) - cfg_encap_proto = IPPROTO_UDP; - else if (!strcmp(optarg, "bare")) - cfg_encap_proto = IPPROTO_IPIP; - else if (!strcmp(optarg, "none")) - cfg_encap_proto = IPPROTO_IP; /* == 0 */ - else - usage(argv[0]); - break; - case 'f': - cfg_src_port = strtol(optarg, NULL, 0); - break; - case 'F': - cfg_expect_failure = true; - break; - case 'h': - usage(argv[0]); - break; - case 'i': - if (!strcmp(optarg, "4")) - cfg_l3_inner = PF_INET; - else if (!strcmp(optarg, "6")) - cfg_l3_inner = PF_INET6; - else - usage(argv[0]); - break; - case 'l': - cfg_payload_len = strtol(optarg, NULL, 0); - break; - case 'n': - cfg_num_pkt = strtol(optarg, NULL, 0); - break; - case 'o': - cfg_l3_outer = parse_protocol_family(argv[0], optarg); - break; - case 'O': - cfg_l3_extra = parse_protocol_family(argv[0], optarg); - break; - case 'R': - cfg_only_rx = true; - break; - case 's': - if (cfg_l3_outer == AF_INET) - parse_addr4(&out_saddr4, optarg); - else - parse_addr6(&out_saddr6, optarg); - break; - case 'S': - if (cfg_l3_inner == AF_INET) - parse_addr4(&in_saddr4, optarg); - else - parse_addr6(&in_saddr6, optarg); - break; - case 't': - cfg_num_secs = strtol(optarg, NULL, 0); - break; - case 'T': - cfg_only_tx = true; - break; - case 'x': - cfg_dsfield_outer = strtol(optarg, NULL, 0); - break; - case 'X': - cfg_dsfield_inner = strtol(optarg, NULL, 0); - break; - } - } - - if (cfg_only_rx && cfg_only_tx) - error(1, 0, "options: cannot combine rx-only and tx-only"); - - if (cfg_encap_proto && cfg_l3_outer == AF_UNSPEC) - error(1, 0, "options: must specify outer with encap"); - else if ((!cfg_encap_proto) && cfg_l3_outer != AF_UNSPEC) - error(1, 0, "options: cannot combine no-encap and outer"); - else if ((!cfg_encap_proto) && cfg_l3_extra != AF_UNSPEC) - error(1, 0, "options: cannot combine no-encap and extra"); - - if (cfg_l3_inner == AF_UNSPEC) - cfg_l3_inner = AF_INET6; - if (cfg_l3_inner == AF_INET6 && cfg_encap_proto == IPPROTO_IPIP) - cfg_encap_proto = IPPROTO_IPV6; - - /* RFC 6040 4.2: - * on decap, if outer encountered congestion (CE == 0x3), - * but inner cannot encode ECN (NoECT == 0x0), then drop packet. - */ - if (((cfg_dsfield_outer & 0x3) == 0x3) && - ((cfg_dsfield_inner & 0x3) == 0x0)) - cfg_expect_failure = true; -} - -static void print_opts(void) -{ - if (cfg_l3_inner == PF_INET6) { - util_printaddr("inner.dest6", (void *) &in_daddr6); - util_printaddr("inner.source6", (void *) &in_saddr6); - } else { - util_printaddr("inner.dest4", (void *) &in_daddr4); - util_printaddr("inner.source4", (void *) &in_saddr4); - } - - if (!cfg_l3_outer) - return; - - fprintf(stderr, "encap proto: %u\n", cfg_encap_proto); - - if (cfg_l3_outer == PF_INET6) { - util_printaddr("outer.dest6", (void *) &out_daddr6); - util_printaddr("outer.source6", (void *) &out_saddr6); - } else { - util_printaddr("outer.dest4", (void *) &out_daddr4); - util_printaddr("outer.source4", (void *) &out_saddr4); - } - - if (!cfg_l3_extra) - return; - - if (cfg_l3_outer == PF_INET6) { - util_printaddr("extra.dest6", (void *) &extra_daddr6); - util_printaddr("extra.source6", (void *) &extra_saddr6); - } else { - util_printaddr("extra.dest4", (void *) &extra_daddr4); - util_printaddr("extra.source4", (void *) &extra_saddr4); - } - -} - -int main(int argc, char **argv) -{ - parse_opts(argc, argv); - print_opts(); - return do_main(); -} diff --git a/tools/testing/selftests/bpf/test_flow_dissector.sh b/tools/testing/selftests/bpf/test_flow_dissector.sh deleted file mode 100755 index 4b298863797a..000000000000 --- a/tools/testing/selftests/bpf/test_flow_dissector.sh +++ /dev/null @@ -1,178 +0,0 @@ -#!/bin/bash -# SPDX-License-Identifier: GPL-2.0 -# -# Load BPF flow dissector and verify it correctly dissects traffic - -BPF_FILE="bpf_flow.bpf.o" -export TESTNAME=test_flow_dissector -unmount=0 - -# Kselftest framework requirement - SKIP code is 4. -ksft_skip=4 - -msg="skip all tests:" -if [ $UID != 0 ]; then - echo $msg please run this as root >&2 - exit $ksft_skip -fi - -# This test needs to be run in a network namespace with in_netns.sh. Check if -# this is the case and run it with in_netns.sh if it is being run in the root -# namespace. -if [[ -z $(ip netns identify $$) ]]; then - err=0 - if bpftool="$(which bpftool)"; then - echo "Testing global flow dissector..." - - $bpftool prog loadall $BPF_FILE /sys/fs/bpf/flow \ - type flow_dissector - - if ! unshare --net $bpftool prog attach pinned \ - /sys/fs/bpf/flow/_dissect flow_dissector; then - echo "Unexpected unsuccessful attach in namespace" >&2 - err=1 - fi - - $bpftool prog attach pinned /sys/fs/bpf/flow/_dissect \ - flow_dissector - - if unshare --net $bpftool prog attach pinned \ - /sys/fs/bpf/flow/_dissect flow_dissector; then - echo "Unexpected successful attach in namespace" >&2 - err=1 - fi - - if ! $bpftool prog detach pinned \ - /sys/fs/bpf/flow/_dissect flow_dissector; then - echo "Failed to detach flow dissector" >&2 - err=1 - fi - - rm -rf /sys/fs/bpf/flow - else - echo "Skipping root flow dissector test, bpftool not found" >&2 - fi - - # Run the rest of the tests in a net namespace. - ../net/in_netns.sh "$0" "$@" - err=$(( $err + $? )) - - if (( $err == 0 )); then - echo "selftests: $TESTNAME [PASS]"; - else - echo "selftests: $TESTNAME [FAILED]"; - fi - - exit $err -fi - -# Determine selftest success via shell exit code -exit_handler() -{ - set +e - - # Cleanup - tc filter del dev lo ingress pref 1337 2> /dev/null - tc qdisc del dev lo ingress 2> /dev/null - ./flow_dissector_load -d 2> /dev/null - if [ $unmount -ne 0 ]; then - umount bpffs 2> /dev/null - fi -} - -# Exit script immediately (well catched by trap handler) if any -# program/thing exits with a non-zero status. -set -e - -# (Use 'trap -l' to list meaning of numbers) -trap exit_handler 0 2 3 6 9 - -# Mount BPF file system -if /bin/mount | grep /sys/fs/bpf > /dev/null; then - echo "bpffs already mounted" -else - echo "bpffs not mounted. Mounting..." - unmount=1 - /bin/mount bpffs /sys/fs/bpf -t bpf -fi - -# Attach BPF program -./flow_dissector_load -p $BPF_FILE -s _dissect - -# Setup -tc qdisc add dev lo ingress -echo 0 > /proc/sys/net/ipv4/conf/default/rp_filter -echo 0 > /proc/sys/net/ipv4/conf/all/rp_filter -echo 0 > /proc/sys/net/ipv4/conf/lo/rp_filter - -echo "Testing IPv4..." -# Drops all IP/UDP packets coming from port 9 -tc filter add dev lo parent ffff: protocol ip pref 1337 flower ip_proto \ - udp src_port 9 action drop - -# Send 10 IPv4/UDP packets from port 8. Filter should not drop any. -./test_flow_dissector -i 4 -f 8 -# Send 10 IPv4/UDP packets from port 9. Filter should drop all. -./test_flow_dissector -i 4 -f 9 -F -# Send 10 IPv4/UDP packets from port 10. Filter should not drop any. -./test_flow_dissector -i 4 -f 10 - -echo "Testing IPv4 from 127.0.0.127 (fallback to generic dissector)..." -# Send 10 IPv4/UDP packets from port 8. Filter should not drop any. -./test_flow_dissector -i 4 -S 127.0.0.127 -f 8 -# Send 10 IPv4/UDP packets from port 9. Filter should drop all. -./test_flow_dissector -i 4 -S 127.0.0.127 -f 9 -F -# Send 10 IPv4/UDP packets from port 10. Filter should not drop any. -./test_flow_dissector -i 4 -S 127.0.0.127 -f 10 - -echo "Testing IPIP..." -# Send 10 IPv4/IPv4/UDP packets from port 8. Filter should not drop any. -./with_addr.sh ./with_tunnels.sh ./test_flow_dissector -o 4 -e bare -i 4 \ - -D 192.168.0.1 -S 1.1.1.1 -f 8 -# Send 10 IPv4/IPv4/UDP packets from port 9. Filter should drop all. -./with_addr.sh ./with_tunnels.sh ./test_flow_dissector -o 4 -e bare -i 4 \ - -D 192.168.0.1 -S 1.1.1.1 -f 9 -F -# Send 10 IPv4/IPv4/UDP packets from port 10. Filter should not drop any. -./with_addr.sh ./with_tunnels.sh ./test_flow_dissector -o 4 -e bare -i 4 \ - -D 192.168.0.1 -S 1.1.1.1 -f 10 - -echo "Testing IPv4 + GRE..." -# Send 10 IPv4/GRE/IPv4/UDP packets from port 8. Filter should not drop any. -./with_addr.sh ./with_tunnels.sh ./test_flow_dissector -o 4 -e gre -i 4 \ - -D 192.168.0.1 -S 1.1.1.1 -f 8 -# Send 10 IPv4/GRE/IPv4/UDP packets from port 9. Filter should drop all. -./with_addr.sh ./with_tunnels.sh ./test_flow_dissector -o 4 -e gre -i 4 \ - -D 192.168.0.1 -S 1.1.1.1 -f 9 -F -# Send 10 IPv4/GRE/IPv4/UDP packets from port 10. Filter should not drop any. -./with_addr.sh ./with_tunnels.sh ./test_flow_dissector -o 4 -e gre -i 4 \ - -D 192.168.0.1 -S 1.1.1.1 -f 10 - -tc filter del dev lo ingress pref 1337 - -echo "Testing port range..." -# Drops all IP/UDP packets coming from port 8-10 -tc filter add dev lo parent ffff: protocol ip pref 1337 flower ip_proto \ - udp src_port 8-10 action drop - -# Send 10 IPv4/UDP packets from port 7. Filter should not drop any. -./test_flow_dissector -i 4 -f 7 -# Send 10 IPv4/UDP packets from port 9. Filter should drop all. -./test_flow_dissector -i 4 -f 9 -F -# Send 10 IPv4/UDP packets from port 11. Filter should not drop any. -./test_flow_dissector -i 4 -f 11 - -tc filter del dev lo ingress pref 1337 - -echo "Testing IPv6..." -# Drops all IPv6/UDP packets coming from port 9 -tc filter add dev lo parent ffff: protocol ipv6 pref 1337 flower ip_proto \ - udp src_port 9 action drop - -# Send 10 IPv6/UDP packets from port 8. Filter should not drop any. -./test_flow_dissector -i 6 -f 8 -# Send 10 IPv6/UDP packets from port 9. Filter should drop all. -./test_flow_dissector -i 6 -f 9 -F -# Send 10 IPv6/UDP packets from port 10. Filter should not drop any. -./test_flow_dissector -i 6 -f 10 - -exit 0 diff --git a/tools/testing/selftests/bpf/bpf_testmod/.gitignore b/tools/testing/selftests/bpf/test_kmods/.gitignore index ded513777281..ded513777281 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/.gitignore +++ b/tools/testing/selftests/bpf/test_kmods/.gitignore diff --git a/tools/testing/selftests/bpf/test_kmods/Makefile b/tools/testing/selftests/bpf/test_kmods/Makefile new file mode 100644 index 000000000000..d4e50c4509c9 --- /dev/null +++ b/tools/testing/selftests/bpf/test_kmods/Makefile @@ -0,0 +1,21 @@ +TEST_KMOD_DIR := $(realpath $(dir $(abspath $(lastword $(MAKEFILE_LIST))))) +KDIR ?= $(abspath $(TEST_KMOD_DIR)/../../../../..) + +ifeq ($(V),1) +Q = +else +Q = @ +endif + +MODULES = bpf_testmod.ko bpf_test_no_cfi.ko bpf_test_modorder_x.ko \ + bpf_test_modorder_y.ko + +$(foreach m,$(MODULES),$(eval obj-m += $(m:.ko=.o))) + +CFLAGS_bpf_testmod.o = -I$(src) + +all: + $(Q)$(MAKE) -C $(KDIR) M=$(TEST_KMOD_DIR) modules + +clean: + $(Q)$(MAKE) -C $(KDIR) M=$(TEST_KMOD_DIR) clean diff --git a/tools/testing/selftests/bpf/bpf_test_modorder_x/bpf_test_modorder_x.c b/tools/testing/selftests/bpf/test_kmods/bpf_test_modorder_x.c index 0cc747fa912f..0cc747fa912f 100644 --- a/tools/testing/selftests/bpf/bpf_test_modorder_x/bpf_test_modorder_x.c +++ b/tools/testing/selftests/bpf/test_kmods/bpf_test_modorder_x.c diff --git a/tools/testing/selftests/bpf/bpf_test_modorder_y/bpf_test_modorder_y.c b/tools/testing/selftests/bpf/test_kmods/bpf_test_modorder_y.c index c627ee085d13..c627ee085d13 100644 --- a/tools/testing/selftests/bpf/bpf_test_modorder_y/bpf_test_modorder_y.c +++ b/tools/testing/selftests/bpf/test_kmods/bpf_test_modorder_y.c diff --git a/tools/testing/selftests/bpf/bpf_test_no_cfi/bpf_test_no_cfi.c b/tools/testing/selftests/bpf/test_kmods/bpf_test_no_cfi.c index 948eb3962732..948eb3962732 100644 --- a/tools/testing/selftests/bpf/bpf_test_no_cfi/bpf_test_no_cfi.c +++ b/tools/testing/selftests/bpf/test_kmods/bpf_test_no_cfi.c diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h b/tools/testing/selftests/bpf/test_kmods/bpf_testmod-events.h index aeef86b3da74..aeef86b3da74 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h +++ b/tools/testing/selftests/bpf/test_kmods/bpf_testmod-events.h diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c index cc9dde507aba..e9e918cdf31f 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c +++ b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c @@ -134,6 +134,10 @@ bpf_testmod_test_arg_ptr_to_struct(struct bpf_testmod_struct_arg_1 *a) { return bpf_testmod_test_struct_arg_result; } +__weak noinline void bpf_testmod_looooooooooooooooooooooooooooooong_name(void) +{ +} + __bpf_kfunc void bpf_testmod_test_mod_kfunc(int i) { @@ -381,7 +385,7 @@ int bpf_testmod_fentry_ok; noinline ssize_t bpf_testmod_test_read(struct file *file, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t len) { struct bpf_testmod_test_read_ctx ctx = { @@ -413,7 +417,7 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj, (void)bpf_testmod_test_arg_ptr_to_struct(&struct_arg1_2); - (void)trace_bpf_testmod_test_raw_tp_null(NULL); + (void)trace_bpf_testmod_test_raw_tp_null_tp(NULL); bpf_testmod_test_struct_ops3(); @@ -431,14 +435,14 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj, if (bpf_testmod_loop_test(101) > 100) trace_bpf_testmod_test_read(current, &ctx); - trace_bpf_testmod_test_nullable_bare(NULL); + trace_bpf_testmod_test_nullable_bare_tp(NULL); /* Magic number to enable writable tp */ if (len == 64) { struct bpf_testmod_test_writable_ctx writable = { .val = 1024, }; - trace_bpf_testmod_test_writable_bare(&writable); + trace_bpf_testmod_test_writable_bare_tp(&writable); if (writable.early_ret) return snprintf(buf, len, "%d\n", writable.val); } @@ -461,7 +465,7 @@ ALLOW_ERROR_INJECTION(bpf_testmod_test_read, ERRNO); noinline ssize_t bpf_testmod_test_write(struct file *file, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t len) { struct bpf_testmod_test_write_ctx ctx = { @@ -470,7 +474,7 @@ bpf_testmod_test_write(struct file *file, struct kobject *kobj, .len = len, }; - trace_bpf_testmod_test_write_bare(current, &ctx); + trace_bpf_testmod_test_write_bare_tp(current, &ctx); return -EIO; /* always fail */ } @@ -563,7 +567,7 @@ static void testmod_unregister_uprobe(void) static ssize_t bpf_testmod_uprobe_write(struct file *file, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t len) { unsigned long offset = 0; @@ -1130,6 +1134,7 @@ static const struct btf_kfunc_id_set bpf_testmod_kfunc_set = { }; static const struct bpf_verifier_ops bpf_testmod_verifier_ops = { + .get_func_proto = bpf_base_func_proto, .is_valid_access = bpf_testmod_ops_is_valid_access, }; @@ -1176,10 +1181,25 @@ static int bpf_testmod_ops__test_maybe_null(int dummy, return 0; } +static int bpf_testmod_ops__test_refcounted(int dummy, + struct task_struct *task__ref) +{ + return 0; +} + +static struct task_struct * +bpf_testmod_ops__test_return_ref_kptr(int dummy, struct task_struct *task__ref, + struct cgroup *cgrp) +{ + return NULL; +} + static struct bpf_testmod_ops __bpf_testmod_ops = { .test_1 = bpf_testmod_test_1, .test_2 = bpf_testmod_test_2, .test_maybe_null = bpf_testmod_ops__test_maybe_null, + .test_refcounted = bpf_testmod_ops__test_refcounted, + .test_return_ref_kptr = bpf_testmod_ops__test_return_ref_kptr, }; struct bpf_struct_ops bpf_bpf_testmod_ops = { @@ -1293,6 +1313,85 @@ static int bpf_test_mod_st_ops__test_pro_epilogue(struct st_ops_args *args) return 0; } +static int bpf_cgroup_from_id_id; +static int bpf_cgroup_release_id; + +static int st_ops_gen_prologue_with_kfunc(struct bpf_insn *insn_buf, bool direct_write, + const struct bpf_prog *prog) +{ + struct bpf_insn *insn = insn_buf; + + /* r8 = r1; // r8 will be "u64 *ctx". + * r1 = 0; + * r0 = bpf_cgroup_from_id(r1); + * if r0 != 0 goto pc+5; + * r6 = r8[0]; // r6 will be "struct st_ops *args". + * r7 = r6->a; + * r7 += 1000; + * r6->a = r7; + * goto pc+2; + * r1 = r0; + * bpf_cgroup_release(r1); + * r1 = r8; + */ + *insn++ = BPF_MOV64_REG(BPF_REG_8, BPF_REG_1); + *insn++ = BPF_MOV64_IMM(BPF_REG_1, 0); + *insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_from_id_id); + *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 5); + *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_8, 0); + *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_6, offsetof(struct st_ops_args, a)); + *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 1000); + *insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_7, offsetof(struct st_ops_args, a)); + *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 2); + *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0); + *insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_release_id); + *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_8); + *insn++ = prog->insnsi[0]; + + return insn - insn_buf; +} + +static int st_ops_gen_epilogue_with_kfunc(struct bpf_insn *insn_buf, const struct bpf_prog *prog, + s16 ctx_stack_off) +{ + struct bpf_insn *insn = insn_buf; + + /* r1 = 0; + * r6 = 0; + * r0 = bpf_cgroup_from_id(r1); + * if r0 != 0 goto pc+6; + * r1 = stack[ctx_stack_off]; // r1 will be "u64 *ctx" + * r1 = r1[0]; // r1 will be "struct st_ops *args" + * r6 = r1->a; + * r6 += 10000; + * r1->a = r6; + * goto pc+2 + * r1 = r0; + * bpf_cgroup_release(r1); + * r0 = r6; + * r0 *= 2; + * BPF_EXIT; + */ + *insn++ = BPF_MOV64_IMM(BPF_REG_1, 0); + *insn++ = BPF_MOV64_IMM(BPF_REG_6, 0); + *insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_from_id_id); + *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 6); + *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_FP, ctx_stack_off); + *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0); + *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct st_ops_args, a)); + *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 10000); + *insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, offsetof(struct st_ops_args, a)); + *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 2); + *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0); + *insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_release_id); + *insn++ = BPF_MOV64_REG(BPF_REG_0, BPF_REG_6); + *insn++ = BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, 2); + *insn++ = BPF_EXIT_INSN(); + + return insn - insn_buf; +} + +#define KFUNC_PRO_EPI_PREFIX "test_kfunc_" static int st_ops_gen_prologue(struct bpf_insn *insn_buf, bool direct_write, const struct bpf_prog *prog) { @@ -1302,6 +1401,9 @@ static int st_ops_gen_prologue(struct bpf_insn *insn_buf, bool direct_write, strcmp(prog->aux->attach_func_name, "test_pro_epilogue")) return 0; + if (!strncmp(prog->aux->name, KFUNC_PRO_EPI_PREFIX, strlen(KFUNC_PRO_EPI_PREFIX))) + return st_ops_gen_prologue_with_kfunc(insn_buf, direct_write, prog); + /* r6 = r1[0]; // r6 will be "struct st_ops *args". r1 is "u64 *ctx". * r7 = r6->a; * r7 += 1000; @@ -1325,6 +1427,9 @@ static int st_ops_gen_epilogue(struct bpf_insn *insn_buf, const struct bpf_prog strcmp(prog->aux->attach_func_name, "test_pro_epilogue")) return 0; + if (!strncmp(prog->aux->name, KFUNC_PRO_EPI_PREFIX, strlen(KFUNC_PRO_EPI_PREFIX))) + return st_ops_gen_epilogue_with_kfunc(insn_buf, prog, ctx_stack_off); + /* r1 = stack[ctx_stack_off]; // r1 will be "u64 *ctx" * r1 = r1[0]; // r1 will be "struct st_ops *args" * r6 = r1->a; @@ -1395,6 +1500,13 @@ static void st_ops_unreg(void *kdata, struct bpf_link *link) static int st_ops_init(struct btf *btf) { + struct btf *kfunc_btf; + + bpf_cgroup_from_id_id = bpf_find_btf_id("bpf_cgroup_from_id", BTF_KIND_FUNC, &kfunc_btf); + bpf_cgroup_release_id = bpf_find_btf_id("bpf_cgroup_release", BTF_KIND_FUNC, &kfunc_btf); + if (bpf_cgroup_from_id_id < 0 || bpf_cgroup_release_id < 0) + return -EINVAL; + return 0; } diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.h index 356803d1c10e..c9fab51f16e2 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h +++ b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.h @@ -6,6 +6,7 @@ #include <linux/types.h> struct task_struct; +struct cgroup; struct bpf_testmod_test_read_ctx { char *buf; @@ -36,6 +37,11 @@ struct bpf_testmod_ops { /* Used to test nullable arguments. */ int (*test_maybe_null)(int dummy, struct task_struct *task); int (*unsupported_ops)(void); + /* Used to test ref_acquired arguments. */ + int (*test_refcounted)(int dummy, struct task_struct *task); + /* Used to test returning referenced kptr. */ + struct task_struct *(*test_return_ref_kptr)(int dummy, struct task_struct *task, + struct cgroup *cgrp); /* The following fields are used to test shadow copies. */ char onebyte; diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h b/tools/testing/selftests/bpf/test_kmods/bpf_testmod_kfunc.h index b58817938deb..b58817938deb 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h +++ b/tools/testing/selftests/bpf/test_kmods/bpf_testmod_kfunc.h diff --git a/tools/testing/selftests/bpf/test_loader.c b/tools/testing/selftests/bpf/test_loader.c index 53b06647cf57..78423cf89e01 100644 --- a/tools/testing/selftests/bpf/test_loader.c +++ b/tools/testing/selftests/bpf/test_loader.c @@ -37,9 +37,10 @@ #define TEST_TAG_JITED_PFX "comment:test_jited=" #define TEST_TAG_JITED_PFX_UNPRIV "comment:test_jited_unpriv=" #define TEST_TAG_CAPS_UNPRIV "comment:test_caps_unpriv=" +#define TEST_TAG_LOAD_MODE_PFX "comment:load_mode=" /* Warning: duplicated in bpf_misc.h */ -#define POINTER_VALUE 0xcafe4all +#define POINTER_VALUE 0xbadcafe #define TEST_DATA_LEN 64 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS @@ -55,6 +56,11 @@ enum mode { UNPRIV = 2 }; +enum load_mode { + JITED = 1 << 0, + NO_JITED = 1 << 1, +}; + struct expect_msg { const char *substr; /* substring match */ regex_t regex; @@ -87,6 +93,7 @@ struct test_spec { int prog_flags; int mode_mask; int arch_mask; + int load_mask; bool auxiliary; bool valid; }; @@ -311,20 +318,14 @@ static int parse_caps(const char *str, __u64 *val, const char *name) static int parse_retval(const char *str, int *val, const char *name) { - struct { - char *name; - int val; - } named_values[] = { - { "INT_MIN" , INT_MIN }, - { "POINTER_VALUE", POINTER_VALUE }, - { "TEST_DATA_LEN", TEST_DATA_LEN }, - }; - int i; - - for (i = 0; i < ARRAY_SIZE(named_values); ++i) { - if (strcmp(str, named_values[i].name) != 0) - continue; - *val = named_values[i].val; + /* + * INT_MIN is defined as (-INT_MAX -1), i.e. it doesn't expand to a + * single int and cannot be parsed with strtol, so we handle it + * separately here. In addition, it expands to different expressions in + * different compilers so we use a prefixed _INT_MIN instead. + */ + if (strcmp(str, "_INT_MIN") == 0) { + *val = INT_MIN; return 0; } @@ -406,6 +407,7 @@ static int parse_test_spec(struct test_loader *tester, bool collect_jit = false; int func_id, i, err = 0; u32 arch_mask = 0; + u32 load_mask = 0; struct btf *btf; enum arch arch; @@ -580,10 +582,22 @@ static int parse_test_spec(struct test_loader *tester, if (err) goto cleanup; spec->mode_mask |= UNPRIV; + } else if (str_has_pfx(s, TEST_TAG_LOAD_MODE_PFX)) { + val = s + sizeof(TEST_TAG_LOAD_MODE_PFX) - 1; + if (strcmp(val, "jited") == 0) { + load_mask = JITED; + } else if (strcmp(val, "no_jited") == 0) { + load_mask = NO_JITED; + } else { + PRINT_FAIL("bad load spec: '%s'", val); + err = -EINVAL; + goto cleanup; + } } } spec->arch_mask = arch_mask ?: -1; + spec->load_mask = load_mask ?: (JITED | NO_JITED); if (spec->mode_mask == 0) spec->mode_mask = PRIV; @@ -773,7 +787,7 @@ static int drop_capabilities(struct cap_state *caps) err = cap_disable_effective(caps_to_drop, &caps->old_caps); if (err) { - PRINT_FAIL("failed to drop capabilities: %i, %s\n", err, strerror(err)); + PRINT_FAIL("failed to drop capabilities: %i, %s\n", err, strerror(-err)); return err; } @@ -790,7 +804,7 @@ static int restore_capabilities(struct cap_state *caps) err = cap_enable_effective(caps->old_caps, NULL); if (err) - PRINT_FAIL("failed to restore capabilities: %i, %s\n", err, strerror(err)); + PRINT_FAIL("failed to restore capabilities: %i, %s\n", err, strerror(-err)); caps->initialized = false; return err; } @@ -928,6 +942,7 @@ void run_subtest(struct test_loader *tester, bool unpriv) { struct test_subspec *subspec = unpriv ? &spec->unpriv : &spec->priv; + int current_runtime = is_jit_enabled() ? JITED : NO_JITED; struct bpf_program *tprog = NULL, *tprog_iter; struct bpf_link *link, *links[32] = {}; struct test_spec *spec_iter; @@ -946,6 +961,11 @@ void run_subtest(struct test_loader *tester, return; } + if ((current_runtime & spec->load_mask) == 0) { + test__skip(); + return; + } + if (unpriv) { if (!can_execute_unpriv(tester, spec)) { test__skip(); @@ -959,7 +979,7 @@ void run_subtest(struct test_loader *tester, if (subspec->caps) { err = cap_enable_effective(subspec->caps, NULL); if (err) { - PRINT_FAIL("failed to set capabilities: %i, %s\n", err, strerror(err)); + PRINT_FAIL("failed to set capabilities: %i, %s\n", err, strerror(-err)); goto subtest_cleanup; } } @@ -1016,6 +1036,14 @@ void run_subtest(struct test_loader *tester, emit_verifier_log(tester->log_buf, false /*force*/); validate_msgs(tester->log_buf, &subspec->expect_msgs, emit_verifier_log); + /* Restore capabilities because the kernel will silently ignore requests + * for program info (such as xlated program text) if we are not + * bpf-capable. Also, for some reason test_verifier executes programs + * with all capabilities restored. Do the same here. + */ + if (restore_capabilities(&caps)) + goto tobj_cleanup; + if (subspec->expect_xlated.cnt) { err = get_xlated_program_text(bpf_program__fd(tprog), tester->log_buf, tester->log_buf_sz); @@ -1041,12 +1069,6 @@ void run_subtest(struct test_loader *tester, } if (should_do_test_run(spec, subspec)) { - /* For some reason test_verifier executes programs - * with all capabilities restored. Do the same here. - */ - if (restore_capabilities(&caps)) - goto tobj_cleanup; - /* Do bpf_map__attach_struct_ops() for each struct_ops map. * This should trigger bpf_struct_ops->reg callback on kernel side. */ @@ -1075,9 +1097,9 @@ void run_subtest(struct test_loader *tester, } } - do_prog_test_run(bpf_program__fd(tprog), &retval, - bpf_program__type(tprog) == BPF_PROG_TYPE_SYSCALL ? true : false); - if (retval != subspec->retval && subspec->retval != POINTER_VALUE) { + err = do_prog_test_run(bpf_program__fd(tprog), &retval, + bpf_program__type(tprog) == BPF_PROG_TYPE_SYSCALL ? true : false); + if (!err && retval != subspec->retval && subspec->retval != POINTER_VALUE) { PRINT_FAIL("Unexpected retval: %d != %d\n", retval, subspec->retval); goto tobj_cleanup; } diff --git a/tools/testing/selftests/bpf/test_lru_map.c b/tools/testing/selftests/bpf/test_lru_map.c index fda7589c5023..0921939532c6 100644 --- a/tools/testing/selftests/bpf/test_lru_map.c +++ b/tools/testing/selftests/bpf/test_lru_map.c @@ -138,6 +138,18 @@ static int sched_next_online(int pid, int *next_to_try) return ret; } +/* Derive target_free from map_size, same as bpf_common_lru_populate */ +static unsigned int __tgt_size(unsigned int map_size) +{ + return (map_size / nr_cpus) / 2; +} + +/* Inverse of how bpf_common_lru_populate derives target_free from map_size. */ +static unsigned int __map_size(unsigned int tgt_free) +{ + return tgt_free * nr_cpus * 2; +} + /* Size of the LRU map is 2 * Add key=1 (+1 key) * Add key=2 (+1 key) @@ -231,11 +243,11 @@ static void test_lru_sanity0(int map_type, int map_flags) printf("Pass\n"); } -/* Size of the LRU map is 1.5*tgt_free - * Insert 1 to tgt_free (+tgt_free keys) - * Lookup 1 to tgt_free/2 - * Insert 1+tgt_free to 2*tgt_free (+tgt_free keys) - * => 1+tgt_free/2 to LOCALFREE_TARGET will be removed by LRU +/* Verify that unreferenced elements are recycled before referenced ones. + * Insert elements. + * Reference a subset of these. + * Insert more, enough to trigger recycling. + * Verify that unreferenced are recycled. */ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free) { @@ -257,7 +269,7 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free) batch_size = tgt_free / 2; assert(batch_size * 2 == tgt_free); - map_size = tgt_free + batch_size; + map_size = __map_size(tgt_free) + batch_size; lru_map_fd = create_map(map_type, map_flags, map_size); assert(lru_map_fd != -1); @@ -266,13 +278,13 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free) value[0] = 1234; - /* Insert 1 to tgt_free (+tgt_free keys) */ - end_key = 1 + tgt_free; + /* Insert map_size - batch_size keys */ + end_key = 1 + __map_size(tgt_free); for (key = 1; key < end_key; key++) assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST)); - /* Lookup 1 to tgt_free/2 */ + /* Lookup 1 to batch_size */ end_key = 1 + batch_size; for (key = 1; key < end_key; key++) { assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value)); @@ -280,12 +292,13 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free) BPF_NOEXIST)); } - /* Insert 1+tgt_free to 2*tgt_free - * => 1+tgt_free/2 to LOCALFREE_TARGET will be + /* Insert another map_size - batch_size keys + * Map will contain 1 to batch_size plus these latest, i.e., + * => previous 1+batch_size to map_size - batch_size will have been * removed by LRU */ - key = 1 + tgt_free; - end_key = key + tgt_free; + key = 1 + __map_size(tgt_free); + end_key = key + __map_size(tgt_free); for (; key < end_key; key++) { assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST)); @@ -301,17 +314,8 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free) printf("Pass\n"); } -/* Size of the LRU map 1.5 * tgt_free - * Insert 1 to tgt_free (+tgt_free keys) - * Update 1 to tgt_free/2 - * => The original 1 to tgt_free/2 will be removed due to - * the LRU shrink process - * Re-insert 1 to tgt_free/2 again and do a lookup immeidately - * Insert 1+tgt_free to tgt_free*3/2 - * Insert 1+tgt_free*3/2 to tgt_free*5/2 - * => Key 1+tgt_free to tgt_free*3/2 - * will be removed from LRU because it has never - * been lookup and ref bit is not set +/* Verify that insertions exceeding map size will recycle the oldest. + * Verify that unreferenced elements are recycled before referenced. */ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free) { @@ -334,7 +338,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free) batch_size = tgt_free / 2; assert(batch_size * 2 == tgt_free); - map_size = tgt_free + batch_size; + map_size = __map_size(tgt_free) + batch_size; lru_map_fd = create_map(map_type, map_flags, map_size); assert(lru_map_fd != -1); @@ -343,8 +347,8 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free) value[0] = 1234; - /* Insert 1 to tgt_free (+tgt_free keys) */ - end_key = 1 + tgt_free; + /* Insert map_size - batch_size keys */ + end_key = 1 + __map_size(tgt_free); for (key = 1; key < end_key; key++) assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST)); @@ -357,8 +361,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free) * shrink the inactive list to get tgt_free * number of free nodes. * - * Hence, the oldest key 1 to tgt_free/2 - * are removed from the LRU list. + * Hence, the oldest key is removed from the LRU list. */ key = 1; if (map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { @@ -370,8 +373,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free) BPF_EXIST)); } - /* Re-insert 1 to tgt_free/2 again and do a lookup - * immeidately. + /* Re-insert 1 to batch_size again and do a lookup immediately. */ end_key = 1 + batch_size; value[0] = 4321; @@ -387,17 +389,18 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free) value[0] = 1234; - /* Insert 1+tgt_free to tgt_free*3/2 */ - end_key = 1 + tgt_free + batch_size; - for (key = 1 + tgt_free; key < end_key; key++) + /* Insert batch_size new elements */ + key = 1 + __map_size(tgt_free); + end_key = key + batch_size; + for (; key < end_key; key++) /* These newly added but not referenced keys will be * gone during the next LRU shrink. */ assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST)); - /* Insert 1+tgt_free*3/2 to tgt_free*5/2 */ - end_key = key + tgt_free; + /* Insert map_size - batch_size elements */ + end_key += __map_size(tgt_free); for (; key < end_key; key++) { assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST)); @@ -413,12 +416,12 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free) printf("Pass\n"); } -/* Size of the LRU map is 2*tgt_free - * It is to test the active/inactive list rotation - * Insert 1 to 2*tgt_free (+2*tgt_free keys) - * Lookup key 1 to tgt_free*3/2 - * Add 1+2*tgt_free to tgt_free*5/2 (+tgt_free/2 keys) - * => key 1+tgt_free*3/2 to 2*tgt_free are removed from LRU +/* Test the active/inactive list rotation + * + * Fill the whole map, deplete the free list. + * Reference all except the last lru->target_free elements. + * Insert lru->target_free new elements. This triggers one shrink. + * Verify that the non-referenced elements are replaced. */ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free) { @@ -437,8 +440,7 @@ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free) assert(sched_next_online(0, &next_cpu) != -1); - batch_size = tgt_free / 2; - assert(batch_size * 2 == tgt_free); + batch_size = __tgt_size(tgt_free); map_size = tgt_free * 2; lru_map_fd = create_map(map_type, map_flags, map_size); @@ -449,23 +451,21 @@ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free) value[0] = 1234; - /* Insert 1 to 2*tgt_free (+2*tgt_free keys) */ - end_key = 1 + (2 * tgt_free); + /* Fill the map */ + end_key = 1 + map_size; for (key = 1; key < end_key; key++) assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST)); - /* Lookup key 1 to tgt_free*3/2 */ - end_key = tgt_free + batch_size; + /* Reference all but the last batch_size */ + end_key = 1 + map_size - batch_size; for (key = 1; key < end_key; key++) { assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value)); assert(!bpf_map_update_elem(expected_map_fd, &key, value, BPF_NOEXIST)); } - /* Add 1+2*tgt_free to tgt_free*5/2 - * (+tgt_free/2 keys) - */ + /* Insert new batch_size: replaces the non-referenced elements */ key = 2 * tgt_free + 1; end_key = key + batch_size; for (; key < end_key; key++) { @@ -500,7 +500,8 @@ static void test_lru_sanity4(int map_type, int map_flags, unsigned int tgt_free) lru_map_fd = create_map(map_type, map_flags, 3 * tgt_free * nr_cpus); else - lru_map_fd = create_map(map_type, map_flags, 3 * tgt_free); + lru_map_fd = create_map(map_type, map_flags, + 3 * __map_size(tgt_free)); assert(lru_map_fd != -1); expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, diff --git a/tools/testing/selftests/bpf/test_lwt_ip_encap.sh b/tools/testing/selftests/bpf/test_lwt_ip_encap.sh deleted file mode 100755 index 1e565f47aca9..000000000000 --- a/tools/testing/selftests/bpf/test_lwt_ip_encap.sh +++ /dev/null @@ -1,476 +0,0 @@ -#!/bin/bash -# SPDX-License-Identifier: GPL-2.0 -# -# Setup/topology: -# -# NS1 NS2 NS3 -# veth1 <---> veth2 veth3 <---> veth4 (the top route) -# veth5 <---> veth6 veth7 <---> veth8 (the bottom route) -# -# each vethN gets IPv[4|6]_N address -# -# IPv*_SRC = IPv*_1 -# IPv*_DST = IPv*_4 -# -# all tests test pings from IPv*_SRC to IPv*_DST -# -# by default, routes are configured to allow packets to go -# IP*_1 <=> IP*_2 <=> IP*_3 <=> IP*_4 (the top route) -# -# a GRE device is installed in NS3 with IPv*_GRE, and -# NS1/NS2 are configured to route packets to IPv*_GRE via IP*_8 -# (the bottom route) -# -# Tests: -# -# 1. routes NS2->IPv*_DST are brought down, so the only way a ping -# from IP*_SRC to IP*_DST can work is via IPv*_GRE -# -# 2a. in an egress test, a bpf LWT_XMIT program is installed on veth1 -# that encaps the packets with an IP/GRE header to route to IPv*_GRE -# -# ping: SRC->[encap at veth1:egress]->GRE:decap->DST -# ping replies go DST->SRC directly -# -# 2b. in an ingress test, a bpf LWT_IN program is installed on veth2 -# that encaps the packets with an IP/GRE header to route to IPv*_GRE -# -# ping: SRC->[encap at veth2:ingress]->GRE:decap->DST -# ping replies go DST->SRC directly - -BPF_FILE="test_lwt_ip_encap.bpf.o" -if [[ $EUID -ne 0 ]]; then - echo "This script must be run as root" - echo "FAIL" - exit 1 -fi - -readonly NS1="ns1-$(mktemp -u XXXXXX)" -readonly NS2="ns2-$(mktemp -u XXXXXX)" -readonly NS3="ns3-$(mktemp -u XXXXXX)" - -readonly IPv4_1="172.16.1.100" -readonly IPv4_2="172.16.2.100" -readonly IPv4_3="172.16.3.100" -readonly IPv4_4="172.16.4.100" -readonly IPv4_5="172.16.5.100" -readonly IPv4_6="172.16.6.100" -readonly IPv4_7="172.16.7.100" -readonly IPv4_8="172.16.8.100" -readonly IPv4_GRE="172.16.16.100" - -readonly IPv4_SRC=$IPv4_1 -readonly IPv4_DST=$IPv4_4 - -readonly IPv6_1="fb01::1" -readonly IPv6_2="fb02::1" -readonly IPv6_3="fb03::1" -readonly IPv6_4="fb04::1" -readonly IPv6_5="fb05::1" -readonly IPv6_6="fb06::1" -readonly IPv6_7="fb07::1" -readonly IPv6_8="fb08::1" -readonly IPv6_GRE="fb10::1" - -readonly IPv6_SRC=$IPv6_1 -readonly IPv6_DST=$IPv6_4 - -TEST_STATUS=0 -TESTS_SUCCEEDED=0 -TESTS_FAILED=0 - -TMPFILE="" - -process_test_results() -{ - if [[ "${TEST_STATUS}" -eq 0 ]] ; then - echo "PASS" - TESTS_SUCCEEDED=$((TESTS_SUCCEEDED+1)) - else - echo "FAIL" - TESTS_FAILED=$((TESTS_FAILED+1)) - fi -} - -print_test_summary_and_exit() -{ - echo "passed tests: ${TESTS_SUCCEEDED}" - echo "failed tests: ${TESTS_FAILED}" - if [ "${TESTS_FAILED}" -eq "0" ] ; then - exit 0 - else - exit 1 - fi -} - -setup() -{ - set -e # exit on error - TEST_STATUS=0 - - # create devices and namespaces - ip netns add "${NS1}" - ip netns add "${NS2}" - ip netns add "${NS3}" - - # rp_filter gets confused by what these tests are doing, so disable it - ip netns exec ${NS1} sysctl -wq net.ipv4.conf.all.rp_filter=0 - ip netns exec ${NS2} sysctl -wq net.ipv4.conf.all.rp_filter=0 - ip netns exec ${NS3} sysctl -wq net.ipv4.conf.all.rp_filter=0 - ip netns exec ${NS1} sysctl -wq net.ipv4.conf.default.rp_filter=0 - ip netns exec ${NS2} sysctl -wq net.ipv4.conf.default.rp_filter=0 - ip netns exec ${NS3} sysctl -wq net.ipv4.conf.default.rp_filter=0 - - # disable IPv6 DAD because it sometimes takes too long and fails tests - ip netns exec ${NS1} sysctl -wq net.ipv6.conf.all.accept_dad=0 - ip netns exec ${NS2} sysctl -wq net.ipv6.conf.all.accept_dad=0 - ip netns exec ${NS3} sysctl -wq net.ipv6.conf.all.accept_dad=0 - ip netns exec ${NS1} sysctl -wq net.ipv6.conf.default.accept_dad=0 - ip netns exec ${NS2} sysctl -wq net.ipv6.conf.default.accept_dad=0 - ip netns exec ${NS3} sysctl -wq net.ipv6.conf.default.accept_dad=0 - - ip link add veth1 type veth peer name veth2 - ip link add veth3 type veth peer name veth4 - ip link add veth5 type veth peer name veth6 - ip link add veth7 type veth peer name veth8 - - ip netns exec ${NS2} sysctl -wq net.ipv4.ip_forward=1 - ip netns exec ${NS2} sysctl -wq net.ipv6.conf.all.forwarding=1 - - ip link set veth1 netns ${NS1} - ip link set veth2 netns ${NS2} - ip link set veth3 netns ${NS2} - ip link set veth4 netns ${NS3} - ip link set veth5 netns ${NS1} - ip link set veth6 netns ${NS2} - ip link set veth7 netns ${NS2} - ip link set veth8 netns ${NS3} - - if [ ! -z "${VRF}" ] ; then - ip -netns ${NS1} link add red type vrf table 1001 - ip -netns ${NS1} link set red up - ip -netns ${NS1} route add table 1001 unreachable default metric 8192 - ip -netns ${NS1} -6 route add table 1001 unreachable default metric 8192 - ip -netns ${NS1} link set veth1 vrf red - ip -netns ${NS1} link set veth5 vrf red - - ip -netns ${NS2} link add red type vrf table 1001 - ip -netns ${NS2} link set red up - ip -netns ${NS2} route add table 1001 unreachable default metric 8192 - ip -netns ${NS2} -6 route add table 1001 unreachable default metric 8192 - ip -netns ${NS2} link set veth2 vrf red - ip -netns ${NS2} link set veth3 vrf red - ip -netns ${NS2} link set veth6 vrf red - ip -netns ${NS2} link set veth7 vrf red - fi - - # configure addesses: the top route (1-2-3-4) - ip -netns ${NS1} addr add ${IPv4_1}/24 dev veth1 - ip -netns ${NS2} addr add ${IPv4_2}/24 dev veth2 - ip -netns ${NS2} addr add ${IPv4_3}/24 dev veth3 - ip -netns ${NS3} addr add ${IPv4_4}/24 dev veth4 - ip -netns ${NS1} -6 addr add ${IPv6_1}/128 nodad dev veth1 - ip -netns ${NS2} -6 addr add ${IPv6_2}/128 nodad dev veth2 - ip -netns ${NS2} -6 addr add ${IPv6_3}/128 nodad dev veth3 - ip -netns ${NS3} -6 addr add ${IPv6_4}/128 nodad dev veth4 - - # configure addresses: the bottom route (5-6-7-8) - ip -netns ${NS1} addr add ${IPv4_5}/24 dev veth5 - ip -netns ${NS2} addr add ${IPv4_6}/24 dev veth6 - ip -netns ${NS2} addr add ${IPv4_7}/24 dev veth7 - ip -netns ${NS3} addr add ${IPv4_8}/24 dev veth8 - ip -netns ${NS1} -6 addr add ${IPv6_5}/128 nodad dev veth5 - ip -netns ${NS2} -6 addr add ${IPv6_6}/128 nodad dev veth6 - ip -netns ${NS2} -6 addr add ${IPv6_7}/128 nodad dev veth7 - ip -netns ${NS3} -6 addr add ${IPv6_8}/128 nodad dev veth8 - - ip -netns ${NS1} link set dev veth1 up - ip -netns ${NS2} link set dev veth2 up - ip -netns ${NS2} link set dev veth3 up - ip -netns ${NS3} link set dev veth4 up - ip -netns ${NS1} link set dev veth5 up - ip -netns ${NS2} link set dev veth6 up - ip -netns ${NS2} link set dev veth7 up - ip -netns ${NS3} link set dev veth8 up - - # configure routes: IP*_SRC -> veth1/IP*_2 (= top route) default; - # the bottom route to specific bottom addresses - - # NS1 - # top route - ip -netns ${NS1} route add ${IPv4_2}/32 dev veth1 ${VRF} - ip -netns ${NS1} route add default dev veth1 via ${IPv4_2} ${VRF} # go top by default - ip -netns ${NS1} -6 route add ${IPv6_2}/128 dev veth1 ${VRF} - ip -netns ${NS1} -6 route add default dev veth1 via ${IPv6_2} ${VRF} # go top by default - # bottom route - ip -netns ${NS1} route add ${IPv4_6}/32 dev veth5 ${VRF} - ip -netns ${NS1} route add ${IPv4_7}/32 dev veth5 via ${IPv4_6} ${VRF} - ip -netns ${NS1} route add ${IPv4_8}/32 dev veth5 via ${IPv4_6} ${VRF} - ip -netns ${NS1} -6 route add ${IPv6_6}/128 dev veth5 ${VRF} - ip -netns ${NS1} -6 route add ${IPv6_7}/128 dev veth5 via ${IPv6_6} ${VRF} - ip -netns ${NS1} -6 route add ${IPv6_8}/128 dev veth5 via ${IPv6_6} ${VRF} - - # NS2 - # top route - ip -netns ${NS2} route add ${IPv4_1}/32 dev veth2 ${VRF} - ip -netns ${NS2} route add ${IPv4_4}/32 dev veth3 ${VRF} - ip -netns ${NS2} -6 route add ${IPv6_1}/128 dev veth2 ${VRF} - ip -netns ${NS2} -6 route add ${IPv6_4}/128 dev veth3 ${VRF} - # bottom route - ip -netns ${NS2} route add ${IPv4_5}/32 dev veth6 ${VRF} - ip -netns ${NS2} route add ${IPv4_8}/32 dev veth7 ${VRF} - ip -netns ${NS2} -6 route add ${IPv6_5}/128 dev veth6 ${VRF} - ip -netns ${NS2} -6 route add ${IPv6_8}/128 dev veth7 ${VRF} - - # NS3 - # top route - ip -netns ${NS3} route add ${IPv4_3}/32 dev veth4 - ip -netns ${NS3} route add ${IPv4_1}/32 dev veth4 via ${IPv4_3} - ip -netns ${NS3} route add ${IPv4_2}/32 dev veth4 via ${IPv4_3} - ip -netns ${NS3} -6 route add ${IPv6_3}/128 dev veth4 - ip -netns ${NS3} -6 route add ${IPv6_1}/128 dev veth4 via ${IPv6_3} - ip -netns ${NS3} -6 route add ${IPv6_2}/128 dev veth4 via ${IPv6_3} - # bottom route - ip -netns ${NS3} route add ${IPv4_7}/32 dev veth8 - ip -netns ${NS3} route add ${IPv4_5}/32 dev veth8 via ${IPv4_7} - ip -netns ${NS3} route add ${IPv4_6}/32 dev veth8 via ${IPv4_7} - ip -netns ${NS3} -6 route add ${IPv6_7}/128 dev veth8 - ip -netns ${NS3} -6 route add ${IPv6_5}/128 dev veth8 via ${IPv6_7} - ip -netns ${NS3} -6 route add ${IPv6_6}/128 dev veth8 via ${IPv6_7} - - # configure IPv4 GRE device in NS3, and a route to it via the "bottom" route - ip -netns ${NS3} tunnel add gre_dev mode gre remote ${IPv4_1} local ${IPv4_GRE} ttl 255 - ip -netns ${NS3} link set gre_dev up - ip -netns ${NS3} addr add ${IPv4_GRE} dev gre_dev - ip -netns ${NS1} route add ${IPv4_GRE}/32 dev veth5 via ${IPv4_6} ${VRF} - ip -netns ${NS2} route add ${IPv4_GRE}/32 dev veth7 via ${IPv4_8} ${VRF} - - - # configure IPv6 GRE device in NS3, and a route to it via the "bottom" route - ip -netns ${NS3} -6 tunnel add name gre6_dev mode ip6gre remote ${IPv6_1} local ${IPv6_GRE} ttl 255 - ip -netns ${NS3} link set gre6_dev up - ip -netns ${NS3} -6 addr add ${IPv6_GRE} nodad dev gre6_dev - ip -netns ${NS1} -6 route add ${IPv6_GRE}/128 dev veth5 via ${IPv6_6} ${VRF} - ip -netns ${NS2} -6 route add ${IPv6_GRE}/128 dev veth7 via ${IPv6_8} ${VRF} - - TMPFILE=$(mktemp /tmp/test_lwt_ip_encap.XXXXXX) - - sleep 1 # reduce flakiness - set +e -} - -cleanup() -{ - if [ -f ${TMPFILE} ] ; then - rm ${TMPFILE} - fi - - ip netns del ${NS1} 2> /dev/null - ip netns del ${NS2} 2> /dev/null - ip netns del ${NS3} 2> /dev/null -} - -trap cleanup EXIT - -remove_routes_to_gredev() -{ - ip -netns ${NS1} route del ${IPv4_GRE} dev veth5 ${VRF} - ip -netns ${NS2} route del ${IPv4_GRE} dev veth7 ${VRF} - ip -netns ${NS1} -6 route del ${IPv6_GRE}/128 dev veth5 ${VRF} - ip -netns ${NS2} -6 route del ${IPv6_GRE}/128 dev veth7 ${VRF} -} - -add_unreachable_routes_to_gredev() -{ - ip -netns ${NS1} route add unreachable ${IPv4_GRE}/32 ${VRF} - ip -netns ${NS2} route add unreachable ${IPv4_GRE}/32 ${VRF} - ip -netns ${NS1} -6 route add unreachable ${IPv6_GRE}/128 ${VRF} - ip -netns ${NS2} -6 route add unreachable ${IPv6_GRE}/128 ${VRF} -} - -test_ping() -{ - local readonly PROTO=$1 - local readonly EXPECTED=$2 - local RET=0 - - if [ "${PROTO}" == "IPv4" ] ; then - ip netns exec ${NS1} ping -c 1 -W 1 -I veth1 ${IPv4_DST} 2>&1 > /dev/null - RET=$? - elif [ "${PROTO}" == "IPv6" ] ; then - ip netns exec ${NS1} ping6 -c 1 -W 1 -I veth1 ${IPv6_DST} 2>&1 > /dev/null - RET=$? - else - echo " test_ping: unknown PROTO: ${PROTO}" - TEST_STATUS=1 - fi - - if [ "0" != "${RET}" ]; then - RET=1 - fi - - if [ "${EXPECTED}" != "${RET}" ] ; then - echo " test_ping failed: expected: ${EXPECTED}; got ${RET}" - TEST_STATUS=1 - fi -} - -test_gso() -{ - local readonly PROTO=$1 - local readonly PKT_SZ=5000 - local IP_DST="" - : > ${TMPFILE} # trim the capture file - - # check that nc is present - command -v nc >/dev/null 2>&1 || \ - { echo >&2 "nc is not available: skipping TSO tests"; return; } - - # listen on port 9000, capture TCP into $TMPFILE - if [ "${PROTO}" == "IPv4" ] ; then - IP_DST=${IPv4_DST} - ip netns exec ${NS3} bash -c \ - "nc -4 -l -p 9000 > ${TMPFILE} &" - elif [ "${PROTO}" == "IPv6" ] ; then - IP_DST=${IPv6_DST} - ip netns exec ${NS3} bash -c \ - "nc -6 -l -p 9000 > ${TMPFILE} &" - RET=$? - else - echo " test_gso: unknown PROTO: ${PROTO}" - TEST_STATUS=1 - fi - sleep 1 # let nc start listening - - # send a packet larger than MTU - ip netns exec ${NS1} bash -c \ - "dd if=/dev/zero bs=$PKT_SZ count=1 > /dev/tcp/${IP_DST}/9000 2>/dev/null" - sleep 2 # let the packet get delivered - - # verify we received all expected bytes - SZ=$(stat -c %s ${TMPFILE}) - if [ "$SZ" != "$PKT_SZ" ] ; then - echo " test_gso failed: ${PROTO}" - TEST_STATUS=1 - fi -} - -test_egress() -{ - local readonly ENCAP=$1 - echo "starting egress ${ENCAP} encap test ${VRF}" - setup - - # by default, pings work - test_ping IPv4 0 - test_ping IPv6 0 - - # remove NS2->DST routes, ping fails - ip -netns ${NS2} route del ${IPv4_DST}/32 dev veth3 ${VRF} - ip -netns ${NS2} -6 route del ${IPv6_DST}/128 dev veth3 ${VRF} - test_ping IPv4 1 - test_ping IPv6 1 - - # install replacement routes (LWT/eBPF), pings succeed - if [ "${ENCAP}" == "IPv4" ] ; then - ip -netns ${NS1} route add ${IPv4_DST} encap bpf xmit obj \ - ${BPF_FILE} sec encap_gre dev veth1 ${VRF} - ip -netns ${NS1} -6 route add ${IPv6_DST} encap bpf xmit obj \ - ${BPF_FILE} sec encap_gre dev veth1 ${VRF} - elif [ "${ENCAP}" == "IPv6" ] ; then - ip -netns ${NS1} route add ${IPv4_DST} encap bpf xmit obj \ - ${BPF_FILE} sec encap_gre6 dev veth1 ${VRF} - ip -netns ${NS1} -6 route add ${IPv6_DST} encap bpf xmit obj \ - ${BPF_FILE} sec encap_gre6 dev veth1 ${VRF} - else - echo " unknown encap ${ENCAP}" - TEST_STATUS=1 - fi - test_ping IPv4 0 - test_ping IPv6 0 - - # skip GSO tests with VRF: VRF routing needs properly assigned - # source IP/device, which is easy to do with ping and hard with dd/nc. - if [ -z "${VRF}" ] ; then - test_gso IPv4 - test_gso IPv6 - fi - - # a negative test: remove routes to GRE devices: ping fails - remove_routes_to_gredev - test_ping IPv4 1 - test_ping IPv6 1 - - # another negative test - add_unreachable_routes_to_gredev - test_ping IPv4 1 - test_ping IPv6 1 - - cleanup - process_test_results -} - -test_ingress() -{ - local readonly ENCAP=$1 - echo "starting ingress ${ENCAP} encap test ${VRF}" - setup - - # need to wait a bit for IPv6 to autoconf, otherwise - # ping6 sometimes fails with "unable to bind to address" - - # by default, pings work - test_ping IPv4 0 - test_ping IPv6 0 - - # remove NS2->DST routes, pings fail - ip -netns ${NS2} route del ${IPv4_DST}/32 dev veth3 ${VRF} - ip -netns ${NS2} -6 route del ${IPv6_DST}/128 dev veth3 ${VRF} - test_ping IPv4 1 - test_ping IPv6 1 - - # install replacement routes (LWT/eBPF), pings succeed - if [ "${ENCAP}" == "IPv4" ] ; then - ip -netns ${NS2} route add ${IPv4_DST} encap bpf in obj \ - ${BPF_FILE} sec encap_gre dev veth2 ${VRF} - ip -netns ${NS2} -6 route add ${IPv6_DST} encap bpf in obj \ - ${BPF_FILE} sec encap_gre dev veth2 ${VRF} - elif [ "${ENCAP}" == "IPv6" ] ; then - ip -netns ${NS2} route add ${IPv4_DST} encap bpf in obj \ - ${BPF_FILE} sec encap_gre6 dev veth2 ${VRF} - ip -netns ${NS2} -6 route add ${IPv6_DST} encap bpf in obj \ - ${BPF_FILE} sec encap_gre6 dev veth2 ${VRF} - else - echo "FAIL: unknown encap ${ENCAP}" - TEST_STATUS=1 - fi - test_ping IPv4 0 - test_ping IPv6 0 - - # a negative test: remove routes to GRE devices: ping fails - remove_routes_to_gredev - test_ping IPv4 1 - test_ping IPv6 1 - - # another negative test - add_unreachable_routes_to_gredev - test_ping IPv4 1 - test_ping IPv6 1 - - cleanup - process_test_results -} - -VRF="" -test_egress IPv4 -test_egress IPv6 -test_ingress IPv4 -test_ingress IPv6 - -VRF="vrf red" -test_egress IPv4 -test_egress IPv6 -test_ingress IPv4 -test_ingress IPv6 - -print_test_summary_and_exit diff --git a/tools/testing/selftests/bpf/test_lwt_seg6local.sh b/tools/testing/selftests/bpf/test_lwt_seg6local.sh deleted file mode 100755 index 0efea2292d6a..000000000000 --- a/tools/testing/selftests/bpf/test_lwt_seg6local.sh +++ /dev/null @@ -1,156 +0,0 @@ -#!/bin/bash -# Connects 6 network namespaces through veths. -# Each NS may have different IPv6 global scope addresses : -# NS1 ---- NS2 ---- NS3 ---- NS4 ---- NS5 ---- NS6 -# fb00::1 fd00::1 fd00::2 fd00::3 fb00::6 -# fc42::1 fd00::4 -# -# All IPv6 packets going to fb00::/16 through NS2 will be encapsulated in a -# IPv6 header with a Segment Routing Header, with segments : -# fd00::1 -> fd00::2 -> fd00::3 -> fd00::4 -# -# 3 fd00::/16 IPv6 addresses are binded to seg6local End.BPF actions : -# - fd00::1 : add a TLV, change the flags and apply a End.X action to fc42::1 -# - fd00::2 : remove the TLV, change the flags, add a tag -# - fd00::3 : apply an End.T action to fd00::4, through routing table 117 -# -# fd00::4 is a simple Segment Routing node decapsulating the inner IPv6 packet. -# Each End.BPF action will validate the operations applied on the SRH by the -# previous BPF program in the chain, otherwise the packet is dropped. -# -# An UDP datagram is sent from fb00::1 to fb00::6. The test succeeds if this -# datagram can be read on NS6 when binding to fb00::6. - -# Kselftest framework requirement - SKIP code is 4. -ksft_skip=4 -BPF_FILE="test_lwt_seg6local.bpf.o" -readonly NS1="ns1-$(mktemp -u XXXXXX)" -readonly NS2="ns2-$(mktemp -u XXXXXX)" -readonly NS3="ns3-$(mktemp -u XXXXXX)" -readonly NS4="ns4-$(mktemp -u XXXXXX)" -readonly NS5="ns5-$(mktemp -u XXXXXX)" -readonly NS6="ns6-$(mktemp -u XXXXXX)" - -msg="skip all tests:" -if [ $UID != 0 ]; then - echo $msg please run this as root >&2 - exit $ksft_skip -fi - -TMP_FILE="/tmp/selftest_lwt_seg6local.txt" - -cleanup() -{ - if [ "$?" = "0" ]; then - echo "selftests: test_lwt_seg6local [PASS]"; - else - echo "selftests: test_lwt_seg6local [FAILED]"; - fi - - set +e - ip netns del ${NS1} 2> /dev/null - ip netns del ${NS2} 2> /dev/null - ip netns del ${NS3} 2> /dev/null - ip netns del ${NS4} 2> /dev/null - ip netns del ${NS5} 2> /dev/null - ip netns del ${NS6} 2> /dev/null - rm -f $TMP_FILE -} - -set -e - -ip netns add ${NS1} -ip netns add ${NS2} -ip netns add ${NS3} -ip netns add ${NS4} -ip netns add ${NS5} -ip netns add ${NS6} - -trap cleanup 0 2 3 6 9 - -ip link add veth1 type veth peer name veth2 -ip link add veth3 type veth peer name veth4 -ip link add veth5 type veth peer name veth6 -ip link add veth7 type veth peer name veth8 -ip link add veth9 type veth peer name veth10 - -ip link set veth1 netns ${NS1} -ip link set veth2 netns ${NS2} -ip link set veth3 netns ${NS2} -ip link set veth4 netns ${NS3} -ip link set veth5 netns ${NS3} -ip link set veth6 netns ${NS4} -ip link set veth7 netns ${NS4} -ip link set veth8 netns ${NS5} -ip link set veth9 netns ${NS5} -ip link set veth10 netns ${NS6} - -ip netns exec ${NS1} ip link set dev veth1 up -ip netns exec ${NS2} ip link set dev veth2 up -ip netns exec ${NS2} ip link set dev veth3 up -ip netns exec ${NS3} ip link set dev veth4 up -ip netns exec ${NS3} ip link set dev veth5 up -ip netns exec ${NS4} ip link set dev veth6 up -ip netns exec ${NS4} ip link set dev veth7 up -ip netns exec ${NS5} ip link set dev veth8 up -ip netns exec ${NS5} ip link set dev veth9 up -ip netns exec ${NS6} ip link set dev veth10 up -ip netns exec ${NS6} ip link set dev lo up - -# All link scope addresses and routes required between veths -ip netns exec ${NS1} ip -6 addr add fb00::12/16 dev veth1 scope link -ip netns exec ${NS1} ip -6 route add fb00::21 dev veth1 scope link -ip netns exec ${NS2} ip -6 addr add fb00::21/16 dev veth2 scope link -ip netns exec ${NS2} ip -6 addr add fb00::34/16 dev veth3 scope link -ip netns exec ${NS2} ip -6 route add fb00::43 dev veth3 scope link -ip netns exec ${NS3} ip -6 route add fb00::65 dev veth5 scope link -ip netns exec ${NS3} ip -6 addr add fb00::43/16 dev veth4 scope link -ip netns exec ${NS3} ip -6 addr add fb00::56/16 dev veth5 scope link -ip netns exec ${NS4} ip -6 addr add fb00::65/16 dev veth6 scope link -ip netns exec ${NS4} ip -6 addr add fb00::78/16 dev veth7 scope link -ip netns exec ${NS4} ip -6 route add fb00::87 dev veth7 scope link -ip netns exec ${NS5} ip -6 addr add fb00::87/16 dev veth8 scope link -ip netns exec ${NS5} ip -6 addr add fb00::910/16 dev veth9 scope link -ip netns exec ${NS5} ip -6 route add fb00::109 dev veth9 scope link -ip netns exec ${NS5} ip -6 route add fb00::109 table 117 dev veth9 scope link -ip netns exec ${NS6} ip -6 addr add fb00::109/16 dev veth10 scope link - -ip netns exec ${NS1} ip -6 addr add fb00::1/16 dev lo -ip netns exec ${NS1} ip -6 route add fb00::6 dev veth1 via fb00::21 - -ip netns exec ${NS2} ip -6 route add fb00::6 encap bpf in obj ${BPF_FILE} sec encap_srh dev veth2 -ip netns exec ${NS2} ip -6 route add fd00::1 dev veth3 via fb00::43 scope link - -ip netns exec ${NS3} ip -6 route add fc42::1 dev veth5 via fb00::65 -ip netns exec ${NS3} ip -6 route add fd00::1 encap seg6local action End.BPF endpoint obj ${BPF_FILE} sec add_egr_x dev veth4 - -ip netns exec ${NS4} ip -6 route add fd00::2 encap seg6local action End.BPF endpoint obj ${BPF_FILE} sec pop_egr dev veth6 -ip netns exec ${NS4} ip -6 addr add fc42::1 dev lo -ip netns exec ${NS4} ip -6 route add fd00::3 dev veth7 via fb00::87 - -ip netns exec ${NS5} ip -6 route add fd00::4 table 117 dev veth9 via fb00::109 -ip netns exec ${NS5} ip -6 route add fd00::3 encap seg6local action End.BPF endpoint obj ${BPF_FILE} sec inspect_t dev veth8 - -ip netns exec ${NS6} ip -6 addr add fb00::6/16 dev lo -ip netns exec ${NS6} ip -6 addr add fd00::4/16 dev lo - -ip netns exec ${NS1} sysctl net.ipv6.conf.all.forwarding=1 > /dev/null -ip netns exec ${NS2} sysctl net.ipv6.conf.all.forwarding=1 > /dev/null -ip netns exec ${NS3} sysctl net.ipv6.conf.all.forwarding=1 > /dev/null -ip netns exec ${NS4} sysctl net.ipv6.conf.all.forwarding=1 > /dev/null -ip netns exec ${NS5} sysctl net.ipv6.conf.all.forwarding=1 > /dev/null - -ip netns exec ${NS6} sysctl net.ipv6.conf.all.seg6_enabled=1 > /dev/null -ip netns exec ${NS6} sysctl net.ipv6.conf.lo.seg6_enabled=1 > /dev/null -ip netns exec ${NS6} sysctl net.ipv6.conf.veth10.seg6_enabled=1 > /dev/null - -ip netns exec ${NS6} nc -l -6 -u -d 7330 > $TMP_FILE & -ip netns exec ${NS1} bash -c "echo 'foobar' | nc -w0 -6 -u -p 2121 -s fb00::1 fb00::6 7330" -sleep 5 # wait enough time to ensure the UDP datagram arrived to the last segment -kill -TERM $! - -if [[ $(< $TMP_FILE) != "foobar" ]]; then - exit 1 -fi - -exit 0 diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index 8b40e9496af1..3fae9ce46ca9 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c @@ -535,7 +535,7 @@ static void test_devmap_hash(unsigned int task, void *data) static void test_queuemap(unsigned int task, void *data) { const int MAP_SIZE = 32; - __u32 vals[MAP_SIZE + MAP_SIZE/2], val; + __u32 vals[MAP_SIZE + MAP_SIZE/2], val = 0; int fd, i; /* Fill test values to be used */ @@ -591,7 +591,7 @@ static void test_queuemap(unsigned int task, void *data) static void test_stackmap(unsigned int task, void *data) { const int MAP_SIZE = 32; - __u32 vals[MAP_SIZE + MAP_SIZE/2], val; + __u32 vals[MAP_SIZE + MAP_SIZE/2], val = 0; int fd, i; /* Fill test values to be used */ @@ -1396,9 +1396,10 @@ static void test_map_stress(void) #define MAX_DELAY_US 50000 #define MIN_DELAY_RANGE_US 5000 -static bool retry_for_again_or_busy(int err) +static bool can_retry(int err) { - return (err == EAGAIN || err == EBUSY); + return (err == EAGAIN || err == EBUSY || + (err == ENOMEM && map_opts.map_flags == BPF_F_NO_PREALLOC)); } int map_update_retriable(int map_fd, const void *key, const void *value, int flags, int attempts, @@ -1451,12 +1452,12 @@ static void test_update_delete(unsigned int fn, void *data) if (do_update) { err = map_update_retriable(fd, &key, &value, BPF_NOEXIST, MAP_RETRIES, - retry_for_again_or_busy); + can_retry); if (err) printf("error %d %d\n", err, errno); assert(err == 0); err = map_update_retriable(fd, &key, &value, BPF_EXIST, MAP_RETRIES, - retry_for_again_or_busy); + can_retry); if (err) printf("error %d %d\n", err, errno); assert(err == 0); diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index 6088d8222d59..309d9d4a8ace 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -88,7 +88,9 @@ static void stdio_hijack(char **log_buf, size_t *log_cnt) #endif } -static void stdio_restore_cleanup(void) +static pthread_mutex_t stdout_lock = PTHREAD_MUTEX_INITIALIZER; + +static void stdio_restore(void) { #ifdef __GLIBC__ if (verbose() && env.worker_id == -1) { @@ -98,34 +100,33 @@ static void stdio_restore_cleanup(void) fflush(stdout); + pthread_mutex_lock(&stdout_lock); + if (env.subtest_state) { - fclose(env.subtest_state->stdout_saved); + if (env.subtest_state->stdout_saved) + fclose(env.subtest_state->stdout_saved); env.subtest_state->stdout_saved = NULL; stdout = env.test_state->stdout_saved; stderr = env.test_state->stdout_saved; } else { - fclose(env.test_state->stdout_saved); + if (env.test_state->stdout_saved) + fclose(env.test_state->stdout_saved); env.test_state->stdout_saved = NULL; + stdout = env.stdout_saved; + stderr = env.stderr_saved; } + + pthread_mutex_unlock(&stdout_lock); #endif } -static void stdio_restore(void) +static int traffic_monitor_print_fn(const char *format, va_list args) { -#ifdef __GLIBC__ - if (verbose() && env.worker_id == -1) { - /* nothing to do, output to stdout by default */ - return; - } - - if (stdout == env.stdout_saved) - return; - - stdio_restore_cleanup(); + pthread_mutex_lock(&stdout_lock); + vfprintf(stdout, format, args); + pthread_mutex_unlock(&stdout_lock); - stdout = env.stdout_saved; - stderr = env.stderr_saved; -#endif + return 0; } /* Adapted from perf/util/string.c */ @@ -474,8 +475,6 @@ static void dump_test_log(const struct prog_test_def *test, print_test_result(test, test_state); } -static void stdio_restore(void); - /* A bunch of tests set custom affinity per-thread and/or per-process. Reset * it after each test/sub-test. */ @@ -490,13 +489,11 @@ static void reset_affinity(void) err = sched_setaffinity(0, sizeof(cpuset), &cpuset); if (err < 0) { - stdio_restore(); fprintf(stderr, "Failed to reset process affinity: %d!\n", err); exit(EXIT_ERR_SETUP_INFRA); } err = pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset); if (err < 0) { - stdio_restore(); fprintf(stderr, "Failed to reset thread affinity: %d!\n", err); exit(EXIT_ERR_SETUP_INFRA); } @@ -514,7 +511,6 @@ static void save_netns(void) static void restore_netns(void) { if (setns(env.saved_netns_fd, CLONE_NEWNET) == -1) { - stdio_restore(); perror("setns(CLONE_NEWNS)"); exit(EXIT_ERR_SETUP_INFRA); } @@ -541,7 +537,8 @@ void test__end_subtest(void) test_result(subtest_state->error_cnt, subtest_state->skipped)); - stdio_restore_cleanup(); + stdio_restore(); + env.subtest_state = NULL; } @@ -1270,8 +1267,10 @@ void crash_handler(int signum) sz = backtrace(bt, ARRAY_SIZE(bt)); - if (env.stdout_saved) - stdio_restore(); + fflush(stdout); + stdout = env.stdout_saved; + stderr = env.stderr_saved; + if (env.test) { env.test_state->error_cnt++; dump_test_log(env.test, env.test_state, true, false, NULL); @@ -1282,6 +1281,21 @@ void crash_handler(int signum) backtrace_symbols_fd(bt, sz, STDERR_FILENO); } +void hexdump(const char *prefix, const void *buf, size_t len) +{ + for (int i = 0; i < len; i++) { + if (!(i % 16)) { + if (i) + fprintf(stdout, "\n"); + fprintf(stdout, "%s", prefix); + } + if (i && !(i % 8) && (i % 16)) + fprintf(stdout, "\t"); + fprintf(stdout, "%02X ", ((uint8_t *)(buf))[i]); + } + fprintf(stdout, "\n"); +} + static void sigint_handler(int signum) { int i; @@ -1350,10 +1364,19 @@ static int recv_message(int sock, struct msg *msg) return ret; } +static bool ns_is_needed(const char *test_name) +{ + if (strlen(test_name) < 3) + return false; + + return !strncmp(test_name, "ns_", 3); +} + static void run_one_test(int test_num) { struct prog_test_def *test = &prog_test_defs[test_num]; struct test_state *state = &test_states[test_num]; + struct netns_obj *ns = NULL; env.test = test; env.test_state = state; @@ -1361,10 +1384,13 @@ static void run_one_test(int test_num) stdio_hijack(&state->log_buf, &state->log_cnt); watchdog_start(); + if (ns_is_needed(test->test_name)) + ns = netns_new(test->test_name, true); if (test->run_test) test->run_test(); else if (test->run_serial_test) test->run_serial_test(); + netns_free(ns); watchdog_stop(); /* ensure last sub-test is finalized properly */ @@ -1373,6 +1399,8 @@ static void run_one_test(int test_num) state->tested = true; + stdio_restore(); + if (verbose() && env.worker_id == -1) print_test_result(test, state); @@ -1381,7 +1409,6 @@ static void run_one_test(int test_num) if (test->need_cgroup_cleanup) cleanup_cgroup_environment(); - stdio_restore(); free(stop_libbpf_log_capture()); dump_test_log(test, state, false, false, NULL); @@ -1916,6 +1943,9 @@ int main(int argc, char **argv) sigaction(SIGSEGV, &sigact, NULL); + env.stdout_saved = stdout; + env.stderr_saved = stderr; + env.secs_till_notify = 10; env.secs_till_kill = 120; err = argp_parse(&argp, argc, argv, 0, NULL, &env); @@ -1932,6 +1962,8 @@ int main(int argc, char **argv) libbpf_set_strict_mode(LIBBPF_STRICT_ALL); libbpf_set_print(libbpf_print_fn); + traffic_monitor_set_print(traffic_monitor_print_fn); + srand(time(NULL)); env.jit_enabled = is_jit_enabled(); @@ -1942,9 +1974,6 @@ int main(int argc, char **argv) return -1; } - env.stdout_saved = stdout; - env.stderr_saved = stderr; - env.has_testmod = true; if (!env.list_test_names) { /* ensure previous instance of the module is unloaded */ diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h index 74de33ae37e5..df2222a1806f 100644 --- a/tools/testing/selftests/bpf/test_progs.h +++ b/tools/testing/selftests/bpf/test_progs.h @@ -185,6 +185,7 @@ void test__end_subtest(void); void test__skip(void); void test__fail(void); int test__join_cgroup(const char *path); +void hexdump(const char *prefix, const void *buf, size_t len); #define PRINT_FAIL(format...) \ ({ \ @@ -344,6 +345,20 @@ int test__join_cgroup(const char *path); ___ok; \ }) +#define ASSERT_MEMEQ(actual, expected, len, name) ({ \ + static int duration = 0; \ + const void *__act = actual; \ + const void *__exp = expected; \ + int __len = len; \ + bool ___ok = memcmp(__act, __exp, __len) == 0; \ + CHECK(!___ok, (name), "unexpected memory mismatch\n"); \ + fprintf(stdout, "actual:\n"); \ + hexdump("\t", __act, __len); \ + fprintf(stdout, "expected:\n"); \ + hexdump("\t", __exp, __len); \ + ___ok; \ +}) + #define ASSERT_OK(res, name) ({ \ static int duration = 0; \ long long ___res = (res); \ @@ -412,6 +427,14 @@ int test__join_cgroup(const char *path); goto goto_label; \ }) +#define SYS_FAIL(goto_label, fmt, ...) \ + ({ \ + char cmd[1024]; \ + snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__); \ + if (!ASSERT_NEQ(0, system(cmd), cmd)) \ + goto goto_label; \ + }) + #define ALL_TO_DEV_NULL " >/dev/null 2>&1" #define SYS_NOFAIL(fmt, ...) \ @@ -437,6 +460,34 @@ static inline void *u64_to_ptr(__u64 ptr) return (void *) (unsigned long) ptr; } +static inline __u32 id_from_prog_fd(int fd) +{ + struct bpf_prog_info prog_info = {}; + __u32 prog_info_len = sizeof(prog_info); + int err; + + err = bpf_obj_get_info_by_fd(fd, &prog_info, &prog_info_len); + if (!ASSERT_OK(err, "id_from_prog_fd")) + return 0; + + ASSERT_NEQ(prog_info.id, 0, "prog_info.id"); + return prog_info.id; +} + +static inline __u32 id_from_link_fd(int fd) +{ + struct bpf_link_info link_info = {}; + __u32 link_info_len = sizeof(link_info); + int err; + + err = bpf_link_get_info_by_fd(fd, &link_info, &link_info_len); + if (!ASSERT_OK(err, "id_from_link_fd")) + return 0; + + ASSERT_NEQ(link_info.id, 0, "link_info.id"); + return link_info.id; +} + int bpf_find_map(const char *test, struct bpf_object *obj, const char *name); int compare_map_keys(int map1_fd, int map2_fd); int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len); diff --git a/tools/testing/selftests/bpf/test_tc_tunnel.sh b/tools/testing/selftests/bpf/test_tc_tunnel.sh index 7989ec608454..cb55a908bb0d 100755 --- a/tools/testing/selftests/bpf/test_tc_tunnel.sh +++ b/tools/testing/selftests/bpf/test_tc_tunnel.sh @@ -305,6 +305,7 @@ else client_connect verify_data server_listen + wait_for_port ${port} ${netcat_opt} fi # serverside, use BPF for decap diff --git a/tools/testing/selftests/bpf/test_tunnel.sh b/tools/testing/selftests/bpf/test_tunnel.sh deleted file mode 100755 index d9661b9988ba..000000000000 --- a/tools/testing/selftests/bpf/test_tunnel.sh +++ /dev/null @@ -1,645 +0,0 @@ -#!/bin/bash -# SPDX-License-Identifier: GPL-2.0 - -# End-to-end eBPF tunnel test suite -# The script tests BPF network tunnel implementation. -# -# Topology: -# --------- -# root namespace | at_ns0 namespace -# | -# ----------- | ----------- -# | tnl dev | | | tnl dev | (overlay network) -# ----------- | ----------- -# metadata-mode | native-mode -# with bpf | -# | -# ---------- | ---------- -# | veth1 | --------- | veth0 | (underlay network) -# ---------- peer ---------- -# -# -# Device Configuration -# -------------------- -# Root namespace with metadata-mode tunnel + BPF -# Device names and addresses: -# veth1 IP: 172.16.1.200, IPv6: 00::22 (underlay) -# tunnel dev <type>11, ex: gre11, IPv4: 10.1.1.200, IPv6: 1::22 (overlay) -# -# Namespace at_ns0 with native tunnel -# Device names and addresses: -# veth0 IPv4: 172.16.1.100, IPv6: 00::11 (underlay) -# tunnel dev <type>00, ex: gre00, IPv4: 10.1.1.100, IPv6: 1::11 (overlay) -# -# -# End-to-end ping packet flow -# --------------------------- -# Most of the tests start by namespace creation, device configuration, -# then ping the underlay and overlay network. When doing 'ping 10.1.1.100' -# from root namespace, the following operations happen: -# 1) Route lookup shows 10.1.1.100/24 belongs to tnl dev, fwd to tnl dev. -# 2) Tnl device's egress BPF program is triggered and set the tunnel metadata, -# with remote_ip=172.16.1.100 and others. -# 3) Outer tunnel header is prepended and route the packet to veth1's egress -# 4) veth0's ingress queue receive the tunneled packet at namespace at_ns0 -# 5) Tunnel protocol handler, ex: vxlan_rcv, decap the packet -# 6) Forward the packet to the overlay tnl dev - -BPF_FILE="test_tunnel_kern.bpf.o" -BPF_PIN_TUNNEL_DIR="/sys/fs/bpf/tc/tunnel" -PING_ARG="-c 3 -w 10 -q" -ret=0 -GREEN='\033[0;92m' -RED='\033[0;31m' -NC='\033[0m' # No Color - -config_device() -{ - ip netns add at_ns0 - ip link add veth0 type veth peer name veth1 - ip link set veth0 netns at_ns0 - ip netns exec at_ns0 ip addr add 172.16.1.100/24 dev veth0 - ip netns exec at_ns0 ip link set dev veth0 up - ip link set dev veth1 up mtu 1500 - ip addr add dev veth1 172.16.1.200/24 -} - -add_gre_tunnel() -{ - tun_key= - if [ -n "$1" ]; then - tun_key="key $1" - fi - - # at_ns0 namespace - ip netns exec at_ns0 \ - ip link add dev $DEV_NS type $TYPE seq $tun_key \ - local 172.16.1.100 remote 172.16.1.200 - ip netns exec at_ns0 ip link set dev $DEV_NS up - ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24 - - # root namespace - ip link add dev $DEV type $TYPE $tun_key external - ip link set dev $DEV up - ip addr add dev $DEV 10.1.1.200/24 -} - -add_ip6gretap_tunnel() -{ - - # assign ipv6 address - ip netns exec at_ns0 ip addr add ::11/96 dev veth0 - ip netns exec at_ns0 ip link set dev veth0 up - ip addr add dev veth1 ::22/96 - ip link set dev veth1 up - - # at_ns0 namespace - ip netns exec at_ns0 \ - ip link add dev $DEV_NS type $TYPE seq flowlabel 0xbcdef key 2 \ - local ::11 remote ::22 - - ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24 - ip netns exec at_ns0 ip addr add dev $DEV_NS fc80::100/96 - ip netns exec at_ns0 ip link set dev $DEV_NS up - - # root namespace - ip link add dev $DEV type $TYPE external - ip addr add dev $DEV 10.1.1.200/24 - ip addr add dev $DEV fc80::200/24 - ip link set dev $DEV up -} - -add_erspan_tunnel() -{ - # at_ns0 namespace - if [ "$1" == "v1" ]; then - ip netns exec at_ns0 \ - ip link add dev $DEV_NS type $TYPE seq key 2 \ - local 172.16.1.100 remote 172.16.1.200 \ - erspan_ver 1 erspan 123 - else - ip netns exec at_ns0 \ - ip link add dev $DEV_NS type $TYPE seq key 2 \ - local 172.16.1.100 remote 172.16.1.200 \ - erspan_ver 2 erspan_dir egress erspan_hwid 3 - fi - ip netns exec at_ns0 ip link set dev $DEV_NS up - ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24 - - # root namespace - ip link add dev $DEV type $TYPE external - ip link set dev $DEV up - ip addr add dev $DEV 10.1.1.200/24 -} - -add_ip6erspan_tunnel() -{ - - # assign ipv6 address - ip netns exec at_ns0 ip addr add ::11/96 dev veth0 - ip netns exec at_ns0 ip link set dev veth0 up - ip addr add dev veth1 ::22/96 - ip link set dev veth1 up - - # at_ns0 namespace - if [ "$1" == "v1" ]; then - ip netns exec at_ns0 \ - ip link add dev $DEV_NS type $TYPE seq key 2 \ - local ::11 remote ::22 \ - erspan_ver 1 erspan 123 - else - ip netns exec at_ns0 \ - ip link add dev $DEV_NS type $TYPE seq key 2 \ - local ::11 remote ::22 \ - erspan_ver 2 erspan_dir egress erspan_hwid 7 - fi - ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24 - ip netns exec at_ns0 ip link set dev $DEV_NS up - - # root namespace - ip link add dev $DEV type $TYPE external - ip addr add dev $DEV 10.1.1.200/24 - ip link set dev $DEV up -} - -add_geneve_tunnel() -{ - # at_ns0 namespace - ip netns exec at_ns0 \ - ip link add dev $DEV_NS type $TYPE \ - id 2 dstport 6081 remote 172.16.1.200 - ip netns exec at_ns0 ip link set dev $DEV_NS up - ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24 - - # root namespace - ip link add dev $DEV type $TYPE dstport 6081 external - ip link set dev $DEV up - ip addr add dev $DEV 10.1.1.200/24 -} - -add_ip6geneve_tunnel() -{ - ip netns exec at_ns0 ip addr add ::11/96 dev veth0 - ip netns exec at_ns0 ip link set dev veth0 up - ip addr add dev veth1 ::22/96 - ip link set dev veth1 up - - # at_ns0 namespace - ip netns exec at_ns0 \ - ip link add dev $DEV_NS type $TYPE id 22 \ - remote ::22 # geneve has no local option - ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24 - ip netns exec at_ns0 ip link set dev $DEV_NS up - - # root namespace - ip link add dev $DEV type $TYPE external - ip addr add dev $DEV 10.1.1.200/24 - ip link set dev $DEV up -} - -add_ipip_tunnel() -{ - # at_ns0 namespace - ip netns exec at_ns0 \ - ip link add dev $DEV_NS type $TYPE \ - local 172.16.1.100 remote 172.16.1.200 - ip netns exec at_ns0 ip link set dev $DEV_NS up - ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24 - - # root namespace - ip link add dev $DEV type $TYPE external - ip link set dev $DEV up - ip addr add dev $DEV 10.1.1.200/24 -} - -add_ip6tnl_tunnel() -{ - ip netns exec at_ns0 ip addr add ::11/96 dev veth0 - ip netns exec at_ns0 ip link set dev veth0 up - ip addr add dev veth1 ::22/96 - ip link set dev veth1 up - - # at_ns0 namespace - ip netns exec at_ns0 \ - ip link add dev $DEV_NS type $TYPE \ - local ::11 remote ::22 - ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24 - ip netns exec at_ns0 ip addr add dev $DEV_NS 1::11/96 - ip netns exec at_ns0 ip link set dev $DEV_NS up - - # root namespace - ip link add dev $DEV type $TYPE external - ip addr add dev $DEV 10.1.1.200/24 - ip addr add dev $DEV 1::22/96 - ip link set dev $DEV up -} - -test_gre() -{ - TYPE=gretap - DEV_NS=gretap00 - DEV=gretap11 - ret=0 - - check $TYPE - config_device - add_gre_tunnel 2 - attach_bpf $DEV gre_set_tunnel gre_get_tunnel - ping $PING_ARG 10.1.1.100 - check_err $? - ip netns exec at_ns0 ping $PING_ARG 10.1.1.200 - check_err $? - cleanup - - if [ $ret -ne 0 ]; then - echo -e ${RED}"FAIL: $TYPE"${NC} - return 1 - fi - echo -e ${GREEN}"PASS: $TYPE"${NC} -} - -test_gre_no_tunnel_key() -{ - TYPE=gre - DEV_NS=gre00 - DEV=gre11 - ret=0 - - check $TYPE - config_device - add_gre_tunnel - attach_bpf $DEV gre_set_tunnel_no_key gre_get_tunnel - ping $PING_ARG 10.1.1.100 - check_err $? - ip netns exec at_ns0 ping $PING_ARG 10.1.1.200 - check_err $? - cleanup - - if [ $ret -ne 0 ]; then - echo -e ${RED}"FAIL: $TYPE"${NC} - return 1 - fi - echo -e ${GREEN}"PASS: $TYPE"${NC} -} - -test_ip6gre() -{ - TYPE=ip6gre - DEV_NS=ip6gre00 - DEV=ip6gre11 - ret=0 - - check $TYPE - config_device - # reuse the ip6gretap function - add_ip6gretap_tunnel - attach_bpf $DEV ip6gretap_set_tunnel ip6gretap_get_tunnel - # underlay - ping6 $PING_ARG ::11 - # overlay: ipv4 over ipv6 - ip netns exec at_ns0 ping $PING_ARG 10.1.1.200 - ping $PING_ARG 10.1.1.100 - check_err $? - # overlay: ipv6 over ipv6 - ip netns exec at_ns0 ping6 $PING_ARG fc80::200 - check_err $? - cleanup - - if [ $ret -ne 0 ]; then - echo -e ${RED}"FAIL: $TYPE"${NC} - return 1 - fi - echo -e ${GREEN}"PASS: $TYPE"${NC} -} - -test_ip6gretap() -{ - TYPE=ip6gretap - DEV_NS=ip6gretap00 - DEV=ip6gretap11 - ret=0 - - check $TYPE - config_device - add_ip6gretap_tunnel - attach_bpf $DEV ip6gretap_set_tunnel ip6gretap_get_tunnel - # underlay - ping6 $PING_ARG ::11 - # overlay: ipv4 over ipv6 - ip netns exec at_ns0 ping $PING_ARG 10.1.1.200 - ping $PING_ARG 10.1.1.100 - check_err $? - # overlay: ipv6 over ipv6 - ip netns exec at_ns0 ping6 $PING_ARG fc80::200 - check_err $? - cleanup - - if [ $ret -ne 0 ]; then - echo -e ${RED}"FAIL: $TYPE"${NC} - return 1 - fi - echo -e ${GREEN}"PASS: $TYPE"${NC} -} - -test_erspan() -{ - TYPE=erspan - DEV_NS=erspan00 - DEV=erspan11 - ret=0 - - check $TYPE - config_device - add_erspan_tunnel $1 - attach_bpf $DEV erspan_set_tunnel erspan_get_tunnel - ping $PING_ARG 10.1.1.100 - check_err $? - ip netns exec at_ns0 ping $PING_ARG 10.1.1.200 - check_err $? - cleanup - - if [ $ret -ne 0 ]; then - echo -e ${RED}"FAIL: $TYPE"${NC} - return 1 - fi - echo -e ${GREEN}"PASS: $TYPE"${NC} -} - -test_ip6erspan() -{ - TYPE=ip6erspan - DEV_NS=ip6erspan00 - DEV=ip6erspan11 - ret=0 - - check $TYPE - config_device - add_ip6erspan_tunnel $1 - attach_bpf $DEV ip4ip6erspan_set_tunnel ip4ip6erspan_get_tunnel - ping6 $PING_ARG ::11 - ip netns exec at_ns0 ping $PING_ARG 10.1.1.200 - check_err $? - cleanup - - if [ $ret -ne 0 ]; then - echo -e ${RED}"FAIL: $TYPE"${NC} - return 1 - fi - echo -e ${GREEN}"PASS: $TYPE"${NC} -} - -test_geneve() -{ - TYPE=geneve - DEV_NS=geneve00 - DEV=geneve11 - ret=0 - - check $TYPE - config_device - add_geneve_tunnel - attach_bpf $DEV geneve_set_tunnel geneve_get_tunnel - ping $PING_ARG 10.1.1.100 - check_err $? - ip netns exec at_ns0 ping $PING_ARG 10.1.1.200 - check_err $? - cleanup - - if [ $ret -ne 0 ]; then - echo -e ${RED}"FAIL: $TYPE"${NC} - return 1 - fi - echo -e ${GREEN}"PASS: $TYPE"${NC} -} - -test_ip6geneve() -{ - TYPE=geneve - DEV_NS=ip6geneve00 - DEV=ip6geneve11 - ret=0 - - check $TYPE - config_device - add_ip6geneve_tunnel - attach_bpf $DEV ip6geneve_set_tunnel ip6geneve_get_tunnel - ping $PING_ARG 10.1.1.100 - check_err $? - ip netns exec at_ns0 ping $PING_ARG 10.1.1.200 - check_err $? - cleanup - - if [ $ret -ne 0 ]; then - echo -e ${RED}"FAIL: ip6$TYPE"${NC} - return 1 - fi - echo -e ${GREEN}"PASS: ip6$TYPE"${NC} -} - -test_ipip() -{ - TYPE=ipip - DEV_NS=ipip00 - DEV=ipip11 - ret=0 - - check $TYPE - config_device - add_ipip_tunnel - ip link set dev veth1 mtu 1500 - attach_bpf $DEV ipip_set_tunnel ipip_get_tunnel - ping $PING_ARG 10.1.1.100 - check_err $? - ip netns exec at_ns0 ping $PING_ARG 10.1.1.200 - check_err $? - cleanup - - if [ $ret -ne 0 ]; then - echo -e ${RED}"FAIL: $TYPE"${NC} - return 1 - fi - echo -e ${GREEN}"PASS: $TYPE"${NC} -} - -test_ipip6() -{ - TYPE=ip6tnl - DEV_NS=ipip6tnl00 - DEV=ipip6tnl11 - ret=0 - - check $TYPE - config_device - add_ip6tnl_tunnel - ip link set dev veth1 mtu 1500 - attach_bpf $DEV ipip6_set_tunnel ipip6_get_tunnel - # underlay - ping6 $PING_ARG ::11 - # ip4 over ip6 - ping $PING_ARG 10.1.1.100 - check_err $? - ip netns exec at_ns0 ping $PING_ARG 10.1.1.200 - check_err $? - cleanup - - if [ $ret -ne 0 ]; then - echo -e ${RED}"FAIL: $TYPE"${NC} - return 1 - fi - echo -e ${GREEN}"PASS: $TYPE"${NC} -} - -test_ip6ip6() -{ - TYPE=ip6tnl - DEV_NS=ip6ip6tnl00 - DEV=ip6ip6tnl11 - ret=0 - - check $TYPE - config_device - add_ip6tnl_tunnel - ip link set dev veth1 mtu 1500 - attach_bpf $DEV ip6ip6_set_tunnel ip6ip6_get_tunnel - # underlay - ping6 $PING_ARG ::11 - # ip6 over ip6 - ping6 $PING_ARG 1::11 - check_err $? - ip netns exec at_ns0 ping6 $PING_ARG 1::22 - check_err $? - cleanup - - if [ $ret -ne 0 ]; then - echo -e ${RED}"FAIL: ip6$TYPE"${NC} - return 1 - fi - echo -e ${GREEN}"PASS: ip6$TYPE"${NC} -} - -attach_bpf() -{ - DEV=$1 - SET=$2 - GET=$3 - mkdir -p ${BPF_PIN_TUNNEL_DIR} - bpftool prog loadall ${BPF_FILE} ${BPF_PIN_TUNNEL_DIR}/ - tc qdisc add dev $DEV clsact - tc filter add dev $DEV egress bpf da object-pinned ${BPF_PIN_TUNNEL_DIR}/$SET - tc filter add dev $DEV ingress bpf da object-pinned ${BPF_PIN_TUNNEL_DIR}/$GET -} - -cleanup() -{ - rm -rf ${BPF_PIN_TUNNEL_DIR} - - ip netns delete at_ns0 2> /dev/null - ip link del veth1 2> /dev/null - ip link del ipip11 2> /dev/null - ip link del ipip6tnl11 2> /dev/null - ip link del ip6ip6tnl11 2> /dev/null - ip link del gretap11 2> /dev/null - ip link del gre11 2> /dev/null - ip link del ip6gre11 2> /dev/null - ip link del ip6gretap11 2> /dev/null - ip link del geneve11 2> /dev/null - ip link del ip6geneve11 2> /dev/null - ip link del erspan11 2> /dev/null - ip link del ip6erspan11 2> /dev/null -} - -cleanup_exit() -{ - echo "CATCH SIGKILL or SIGINT, cleanup and exit" - cleanup - exit 0 -} - -check() -{ - ip link help 2>&1 | grep -q "\s$1\s" - if [ $? -ne 0 ];then - echo "SKIP $1: iproute2 not support" - cleanup - return 1 - fi -} - -enable_debug() -{ - echo 'file ip_gre.c +p' > /sys/kernel/debug/dynamic_debug/control - echo 'file ip6_gre.c +p' > /sys/kernel/debug/dynamic_debug/control - echo 'file geneve.c +p' > /sys/kernel/debug/dynamic_debug/control - echo 'file ipip.c +p' > /sys/kernel/debug/dynamic_debug/control -} - -check_err() -{ - if [ $ret -eq 0 ]; then - ret=$1 - fi -} - -bpf_tunnel_test() -{ - local errors=0 - - echo "Testing GRE tunnel..." - test_gre - errors=$(( $errors + $? )) - - echo "Testing GRE tunnel (without tunnel keys)..." - test_gre_no_tunnel_key - errors=$(( $errors + $? )) - - echo "Testing IP6GRE tunnel..." - test_ip6gre - errors=$(( $errors + $? )) - - echo "Testing IP6GRETAP tunnel..." - test_ip6gretap - errors=$(( $errors + $? )) - - echo "Testing ERSPAN tunnel..." - test_erspan v2 - errors=$(( $errors + $? )) - - echo "Testing IP6ERSPAN tunnel..." - test_ip6erspan v2 - errors=$(( $errors + $? )) - - echo "Testing GENEVE tunnel..." - test_geneve - errors=$(( $errors + $? )) - - echo "Testing IP6GENEVE tunnel..." - test_ip6geneve - errors=$(( $errors + $? )) - - echo "Testing IPIP tunnel..." - test_ipip - errors=$(( $errors + $? )) - - echo "Testing IPIP6 tunnel..." - test_ipip6 - errors=$(( $errors + $? )) - - echo "Testing IP6IP6 tunnel..." - test_ip6ip6 - errors=$(( $errors + $? )) - - return $errors -} - -trap cleanup 0 3 6 -trap cleanup_exit 2 9 - -cleanup -bpf_tunnel_test - -if [ $? -ne 0 ]; then - echo -e "$(basename $0): ${RED}FAIL${NC}" - exit 1 -fi -echo -e "$(basename $0): ${GREEN}PASS${NC}" -exit 0 diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 447b68509d76..27db34ecf3f5 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -734,7 +734,7 @@ static __u32 btf_raw_types[] = { BTF_MEMBER_ENC(71, 13, 128), /* struct prog_test_member __kptr *ptr; */ }; -static char bpf_vlog[UINT_MAX >> 8]; +static char bpf_vlog[UINT_MAX >> 5]; static int load_btf_spec(__u32 *types, int types_len, const char *strings, int strings_len) @@ -1559,10 +1559,10 @@ static void do_test_single(struct bpf_test *test, bool unpriv, test->errstr_unpriv : test->errstr; opts.expected_attach_type = test->expected_attach_type; - if (verbose) - opts.log_level = verif_log_level | 4; /* force stats */ - else if (expected_ret == VERBOSE_ACCEPT) + if (expected_ret == VERBOSE_ACCEPT) opts.log_level = 2; + else if (verbose) + opts.log_level = verif_log_level | 4; /* force stats */ else opts.log_level = DEFAULT_LIBBPF_LOG_LEVEL; opts.prog_flags = pflags; diff --git a/tools/testing/selftests/bpf/test_xdp_meta.sh b/tools/testing/selftests/bpf/test_xdp_meta.sh deleted file mode 100755 index 2740322c1878..000000000000 --- a/tools/testing/selftests/bpf/test_xdp_meta.sh +++ /dev/null @@ -1,58 +0,0 @@ -#!/bin/sh - -BPF_FILE="test_xdp_meta.bpf.o" -# Kselftest framework requirement - SKIP code is 4. -readonly KSFT_SKIP=4 -readonly NS1="ns1-$(mktemp -u XXXXXX)" -readonly NS2="ns2-$(mktemp -u XXXXXX)" - -cleanup() -{ - if [ "$?" = "0" ]; then - echo "selftests: test_xdp_meta [PASS]"; - else - echo "selftests: test_xdp_meta [FAILED]"; - fi - - set +e - ip link del veth1 2> /dev/null - ip netns del ${NS1} 2> /dev/null - ip netns del ${NS2} 2> /dev/null -} - -ip link set dev lo xdp off 2>/dev/null > /dev/null -if [ $? -ne 0 ];then - echo "selftests: [SKIP] Could not run test without the ip xdp support" - exit $KSFT_SKIP -fi -set -e - -ip netns add ${NS1} -ip netns add ${NS2} - -trap cleanup 0 2 3 6 9 - -ip link add veth1 type veth peer name veth2 - -ip link set veth1 netns ${NS1} -ip link set veth2 netns ${NS2} - -ip netns exec ${NS1} ip addr add 10.1.1.11/24 dev veth1 -ip netns exec ${NS2} ip addr add 10.1.1.22/24 dev veth2 - -ip netns exec ${NS1} tc qdisc add dev veth1 clsact -ip netns exec ${NS2} tc qdisc add dev veth2 clsact - -ip netns exec ${NS1} tc filter add dev veth1 ingress bpf da obj ${BPF_FILE} sec t -ip netns exec ${NS2} tc filter add dev veth2 ingress bpf da obj ${BPF_FILE} sec t - -ip netns exec ${NS1} ip link set dev veth1 xdp obj ${BPF_FILE} sec x -ip netns exec ${NS2} ip link set dev veth2 xdp obj ${BPF_FILE} sec x - -ip netns exec ${NS1} ip link set dev veth1 up -ip netns exec ${NS2} ip link set dev veth2 up - -ip netns exec ${NS1} ping -c 1 10.1.1.22 -ip netns exec ${NS2} ping -c 1 10.1.1.11 - -exit 0 diff --git a/tools/testing/selftests/bpf/test_xdp_redirect.sh b/tools/testing/selftests/bpf/test_xdp_redirect.sh deleted file mode 100755 index 0746a4fde9d3..000000000000 --- a/tools/testing/selftests/bpf/test_xdp_redirect.sh +++ /dev/null @@ -1,79 +0,0 @@ -#!/bin/bash -# Create 2 namespaces with two veth peers, and -# forward packets in-between using generic XDP -# -# NS1(veth11) NS2(veth22) -# | | -# | | -# (veth1, ------ (veth2, -# id:111) id:222) -# | xdp forwarding | -# ------------------ - -readonly NS1="ns1-$(mktemp -u XXXXXX)" -readonly NS2="ns2-$(mktemp -u XXXXXX)" -ret=0 - -setup() -{ - - local xdpmode=$1 - - ip netns add ${NS1} - ip netns add ${NS2} - - ip link add veth1 index 111 type veth peer name veth11 netns ${NS1} - ip link add veth2 index 222 type veth peer name veth22 netns ${NS2} - - ip link set veth1 up - ip link set veth2 up - ip -n ${NS1} link set dev veth11 up - ip -n ${NS2} link set dev veth22 up - - ip -n ${NS1} addr add 10.1.1.11/24 dev veth11 - ip -n ${NS2} addr add 10.1.1.22/24 dev veth22 -} - -cleanup() -{ - ip link del veth1 2> /dev/null - ip link del veth2 2> /dev/null - ip netns del ${NS1} 2> /dev/null - ip netns del ${NS2} 2> /dev/null -} - -test_xdp_redirect() -{ - local xdpmode=$1 - - setup - - ip link set dev veth1 $xdpmode off &> /dev/null - if [ $? -ne 0 ];then - echo "selftests: test_xdp_redirect $xdpmode [SKIP]" - return 0 - fi - - ip -n ${NS1} link set veth11 $xdpmode obj xdp_dummy.bpf.o sec xdp &> /dev/null - ip -n ${NS2} link set veth22 $xdpmode obj xdp_dummy.bpf.o sec xdp &> /dev/null - ip link set dev veth1 $xdpmode obj test_xdp_redirect.bpf.o sec redirect_to_222 &> /dev/null - ip link set dev veth2 $xdpmode obj test_xdp_redirect.bpf.o sec redirect_to_111 &> /dev/null - - if ip netns exec ${NS1} ping -c 1 10.1.1.22 &> /dev/null && - ip netns exec ${NS2} ping -c 1 10.1.1.11 &> /dev/null; then - echo "selftests: test_xdp_redirect $xdpmode [PASS]"; - else - ret=1 - echo "selftests: test_xdp_redirect $xdpmode [FAILED]"; - fi - - cleanup -} - -set -e -trap cleanup 2 3 6 9 - -test_xdp_redirect xdpgeneric -test_xdp_redirect xdpdrv - -exit $ret diff --git a/tools/testing/selftests/bpf/test_xdp_redirect_multi.sh b/tools/testing/selftests/bpf/test_xdp_redirect_multi.sh deleted file mode 100755 index 4c3c3fdd2d73..000000000000 --- a/tools/testing/selftests/bpf/test_xdp_redirect_multi.sh +++ /dev/null @@ -1,214 +0,0 @@ -#!/bin/bash -# SPDX-License-Identifier: GPL-2.0 -# -# Test topology: -# - - - - - - - - - - - - - - - - - - - -# | veth1 veth2 veth3 | ns0 -# - -| - - - - - - | - - - - - - | - - -# --------- --------- --------- -# | veth0 | | veth0 | | veth0 | -# --------- --------- --------- -# ns1 ns2 ns3 -# -# Test modules: -# XDP modes: generic, native, native + egress_prog -# -# Test cases: -# ARP: Testing BPF_F_BROADCAST, the ingress interface also should receive -# the redirects. -# ns1 -> gw: ns1, ns2, ns3, should receive the arp request -# IPv4: Testing BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS, the ingress -# interface should not receive the redirects. -# ns1 -> gw: ns1 should not receive, ns2, ns3 should receive redirects. -# IPv6: Testing none flag, all the pkts should be redirected back -# ping test: ns1 -> ns2 (block), echo requests will be redirect back -# egress_prog: -# all src mac should be egress interface's mac - -# netns numbers -NUM=3 -IFACES="" -DRV_MODE="xdpgeneric xdpdrv xdpegress" -PASS=0 -FAIL=0 -LOG_DIR=$(mktemp -d) -declare -a NS -NS[0]="ns0-$(mktemp -u XXXXXX)" -NS[1]="ns1-$(mktemp -u XXXXXX)" -NS[2]="ns2-$(mktemp -u XXXXXX)" -NS[3]="ns3-$(mktemp -u XXXXXX)" - -test_pass() -{ - echo "Pass: $@" - PASS=$((PASS + 1)) -} - -test_fail() -{ - echo "fail: $@" - FAIL=$((FAIL + 1)) -} - -clean_up() -{ - for i in $(seq 0 $NUM); do - ip netns del ${NS[$i]} 2> /dev/null - done -} - -# Kselftest framework requirement - SKIP code is 4. -check_env() -{ - ip link set dev lo xdpgeneric off &>/dev/null - if [ $? -ne 0 ];then - echo "selftests: [SKIP] Could not run test without the ip xdpgeneric support" - exit 4 - fi - - which tcpdump &>/dev/null - if [ $? -ne 0 ];then - echo "selftests: [SKIP] Could not run test without tcpdump" - exit 4 - fi -} - -setup_ns() -{ - local mode=$1 - IFACES="" - - if [ "$mode" = "xdpegress" ]; then - mode="xdpdrv" - fi - - ip netns add ${NS[0]} - for i in $(seq $NUM); do - ip netns add ${NS[$i]} - ip -n ${NS[$i]} link add veth0 type veth peer name veth$i netns ${NS[0]} - ip -n ${NS[$i]} link set veth0 up - ip -n ${NS[0]} link set veth$i up - - ip -n ${NS[$i]} addr add 192.0.2.$i/24 dev veth0 - ip -n ${NS[$i]} addr add 2001:db8::$i/64 dev veth0 - # Add a neigh entry for IPv4 ping test - ip -n ${NS[$i]} neigh add 192.0.2.253 lladdr 00:00:00:00:00:01 dev veth0 - ip -n ${NS[$i]} link set veth0 $mode obj \ - xdp_dummy.bpf.o sec xdp &> /dev/null || \ - { test_fail "Unable to load dummy xdp" && exit 1; } - IFACES="$IFACES veth$i" - veth_mac[$i]=$(ip -n ${NS[0]} link show veth$i | awk '/link\/ether/ {print $2}') - done -} - -do_egress_tests() -{ - local mode=$1 - - # mac test - ip netns exec ${NS[2]} tcpdump -e -i veth0 -nn -l -e &> ${LOG_DIR}/mac_ns1-2_${mode}.log & - ip netns exec ${NS[3]} tcpdump -e -i veth0 -nn -l -e &> ${LOG_DIR}/mac_ns1-3_${mode}.log & - sleep 0.5 - ip netns exec ${NS[1]} ping 192.0.2.254 -i 0.1 -c 4 &> /dev/null - sleep 0.5 - pkill tcpdump - - # mac check - grep -q "${veth_mac[2]} > ff:ff:ff:ff:ff:ff" ${LOG_DIR}/mac_ns1-2_${mode}.log && \ - test_pass "$mode mac ns1-2" || test_fail "$mode mac ns1-2" - grep -q "${veth_mac[3]} > ff:ff:ff:ff:ff:ff" ${LOG_DIR}/mac_ns1-3_${mode}.log && \ - test_pass "$mode mac ns1-3" || test_fail "$mode mac ns1-3" -} - -do_ping_tests() -{ - local mode=$1 - - # ping6 test: echo request should be redirect back to itself, not others - ip netns exec ${NS[1]} ip neigh add 2001:db8::2 dev veth0 lladdr 00:00:00:00:00:02 - - ip netns exec ${NS[1]} tcpdump -i veth0 -nn -l -e &> ${LOG_DIR}/ns1-1_${mode}.log & - ip netns exec ${NS[2]} tcpdump -i veth0 -nn -l -e &> ${LOG_DIR}/ns1-2_${mode}.log & - ip netns exec ${NS[3]} tcpdump -i veth0 -nn -l -e &> ${LOG_DIR}/ns1-3_${mode}.log & - sleep 0.5 - # ARP test - ip netns exec ${NS[1]} arping -q -c 2 -I veth0 192.0.2.254 - # IPv4 test - ip netns exec ${NS[1]} ping 192.0.2.253 -i 0.1 -c 4 &> /dev/null - # IPv6 test - ip netns exec ${NS[1]} ping6 2001:db8::2 -i 0.1 -c 2 &> /dev/null - sleep 0.5 - pkill tcpdump - - # All netns should receive the redirect arp requests - [ $(grep -cF "who-has 192.0.2.254" ${LOG_DIR}/ns1-1_${mode}.log) -eq 4 ] && \ - test_pass "$mode arp(F_BROADCAST) ns1-1" || \ - test_fail "$mode arp(F_BROADCAST) ns1-1" - [ $(grep -cF "who-has 192.0.2.254" ${LOG_DIR}/ns1-2_${mode}.log) -eq 2 ] && \ - test_pass "$mode arp(F_BROADCAST) ns1-2" || \ - test_fail "$mode arp(F_BROADCAST) ns1-2" - [ $(grep -cF "who-has 192.0.2.254" ${LOG_DIR}/ns1-3_${mode}.log) -eq 2 ] && \ - test_pass "$mode arp(F_BROADCAST) ns1-3" || \ - test_fail "$mode arp(F_BROADCAST) ns1-3" - - # ns1 should not receive the redirect echo request, others should - [ $(grep -c "ICMP echo request" ${LOG_DIR}/ns1-1_${mode}.log) -eq 4 ] && \ - test_pass "$mode IPv4 (F_BROADCAST|F_EXCLUDE_INGRESS) ns1-1" || \ - test_fail "$mode IPv4 (F_BROADCAST|F_EXCLUDE_INGRESS) ns1-1" - [ $(grep -c "ICMP echo request" ${LOG_DIR}/ns1-2_${mode}.log) -eq 4 ] && \ - test_pass "$mode IPv4 (F_BROADCAST|F_EXCLUDE_INGRESS) ns1-2" || \ - test_fail "$mode IPv4 (F_BROADCAST|F_EXCLUDE_INGRESS) ns1-2" - [ $(grep -c "ICMP echo request" ${LOG_DIR}/ns1-3_${mode}.log) -eq 4 ] && \ - test_pass "$mode IPv4 (F_BROADCAST|F_EXCLUDE_INGRESS) ns1-3" || \ - test_fail "$mode IPv4 (F_BROADCAST|F_EXCLUDE_INGRESS) ns1-3" - - # ns1 should receive the echo request, ns2 should not - [ $(grep -c "ICMP6, echo request" ${LOG_DIR}/ns1-1_${mode}.log) -eq 4 ] && \ - test_pass "$mode IPv6 (no flags) ns1-1" || \ - test_fail "$mode IPv6 (no flags) ns1-1" - [ $(grep -c "ICMP6, echo request" ${LOG_DIR}/ns1-2_${mode}.log) -eq 0 ] && \ - test_pass "$mode IPv6 (no flags) ns1-2" || \ - test_fail "$mode IPv6 (no flags) ns1-2" -} - -do_tests() -{ - local mode=$1 - local drv_p - - case ${mode} in - xdpdrv) drv_p="-N";; - xdpegress) drv_p="-X";; - xdpgeneric) drv_p="-S";; - esac - - ip netns exec ${NS[0]} ./xdp_redirect_multi $drv_p $IFACES &> ${LOG_DIR}/xdp_redirect_${mode}.log & - xdp_pid=$! - sleep 1 - if ! ps -p $xdp_pid > /dev/null; then - test_fail "$mode xdp_redirect_multi start failed" - return 1 - fi - - if [ "$mode" = "xdpegress" ]; then - do_egress_tests $mode - else - do_ping_tests $mode - fi - - kill $xdp_pid -} - -check_env - -trap clean_up EXIT - -for mode in ${DRV_MODE}; do - setup_ns $mode - do_tests $mode - clean_up -done -rm -rf ${LOG_DIR} - -echo "Summary: PASS $PASS, FAIL $FAIL" -[ $FAIL -eq 0 ] && exit 0 || exit 1 diff --git a/tools/testing/selftests/bpf/test_xdp_vlan.sh b/tools/testing/selftests/bpf/test_xdp_vlan.sh deleted file mode 100755 index fbcaa9f0120b..000000000000 --- a/tools/testing/selftests/bpf/test_xdp_vlan.sh +++ /dev/null @@ -1,233 +0,0 @@ -#!/bin/bash -# SPDX-License-Identifier: GPL-2.0 -# Author: Jesper Dangaard Brouer <hawk@kernel.org> - -# Kselftest framework requirement - SKIP code is 4. -readonly KSFT_SKIP=4 -readonly NS1="ns1-$(mktemp -u XXXXXX)" -readonly NS2="ns2-$(mktemp -u XXXXXX)" - -# Allow wrapper scripts to name test -if [ -z "$TESTNAME" ]; then - TESTNAME=xdp_vlan -fi - -# Default XDP mode -XDP_MODE=xdpgeneric - -usage() { - echo "Testing XDP + TC eBPF VLAN manipulations: $TESTNAME" - echo "" - echo "Usage: $0 [-vfh]" - echo " -v | --verbose : Verbose" - echo " --flush : Flush before starting (e.g. after --interactive)" - echo " --interactive : Keep netns setup running after test-run" - echo " --mode=XXX : Choose XDP mode (xdp | xdpgeneric | xdpdrv)" - echo "" -} - -valid_xdp_mode() -{ - local mode=$1 - - case "$mode" in - xdpgeneric | xdpdrv | xdp) - return 0 - ;; - *) - return 1 - esac -} - -cleanup() -{ - local status=$? - - if [ "$status" = "0" ]; then - echo "selftests: $TESTNAME [PASS]"; - else - echo "selftests: $TESTNAME [FAILED]"; - fi - - if [ -n "$INTERACTIVE" ]; then - echo "Namespace setup still active explore with:" - echo " ip netns exec ${NS1} bash" - echo " ip netns exec ${NS2} bash" - exit $status - fi - - set +e - ip link del veth1 2> /dev/null - ip netns del ${NS1} 2> /dev/null - ip netns del ${NS2} 2> /dev/null -} - -# Using external program "getopt" to get --long-options -OPTIONS=$(getopt -o hvfi: \ - --long verbose,flush,help,interactive,debug,mode: -- "$@") -if (( $? != 0 )); then - usage - echo "selftests: $TESTNAME [FAILED] Error calling getopt, unknown option?" - exit 2 -fi -eval set -- "$OPTIONS" - -## --- Parse command line arguments / parameters --- -while true; do - case "$1" in - -v | --verbose) - export VERBOSE=yes - shift - ;; - -i | --interactive | --debug ) - INTERACTIVE=yes - shift - ;; - -f | --flush ) - cleanup - shift - ;; - --mode ) - shift - XDP_MODE=$1 - shift - ;; - -- ) - shift - break - ;; - -h | --help ) - usage; - echo "selftests: $TESTNAME [SKIP] usage help info requested" - exit $KSFT_SKIP - ;; - * ) - shift - break - ;; - esac -done - -if [ "$EUID" -ne 0 ]; then - echo "selftests: $TESTNAME [FAILED] need root privileges" - exit 1 -fi - -valid_xdp_mode $XDP_MODE -if [ $? -ne 0 ]; then - echo "selftests: $TESTNAME [FAILED] unknown XDP mode ($XDP_MODE)" - exit 1 -fi - -ip link set dev lo xdpgeneric off 2>/dev/null > /dev/null -if [ $? -ne 0 ]; then - echo "selftests: $TESTNAME [SKIP] need ip xdp support" - exit $KSFT_SKIP -fi - -# Interactive mode likely require us to cleanup netns -if [ -n "$INTERACTIVE" ]; then - ip link del veth1 2> /dev/null - ip netns del ${NS1} 2> /dev/null - ip netns del ${NS2} 2> /dev/null -fi - -# Exit on failure -set -e - -# Some shell-tools dependencies -which ip > /dev/null -which tc > /dev/null -which ethtool > /dev/null - -# Make rest of shell verbose, showing comments as doc/info -if [ -n "$VERBOSE" ]; then - set -v -fi - -# Create two namespaces -ip netns add ${NS1} -ip netns add ${NS2} - -# Run cleanup if failing or on kill -trap cleanup 0 2 3 6 9 - -# Create veth pair -ip link add veth1 type veth peer name veth2 - -# Move veth1 and veth2 into the respective namespaces -ip link set veth1 netns ${NS1} -ip link set veth2 netns ${NS2} - -# NOTICE: XDP require VLAN header inside packet payload -# - Thus, disable VLAN offloading driver features -# - For veth REMEMBER TX side VLAN-offload -# -# Disable rx-vlan-offload (mostly needed on ns1) -ip netns exec ${NS1} ethtool -K veth1 rxvlan off -ip netns exec ${NS2} ethtool -K veth2 rxvlan off -# -# Disable tx-vlan-offload (mostly needed on ns2) -ip netns exec ${NS2} ethtool -K veth2 txvlan off -ip netns exec ${NS1} ethtool -K veth1 txvlan off - -export IPADDR1=100.64.41.1 -export IPADDR2=100.64.41.2 - -# In ns1/veth1 add IP-addr on plain net_device -ip netns exec ${NS1} ip addr add ${IPADDR1}/24 dev veth1 -ip netns exec ${NS1} ip link set veth1 up - -# In ns2/veth2 create VLAN device -export VLAN=4011 -export DEVNS2=veth2 -ip netns exec ${NS2} ip link add link $DEVNS2 name $DEVNS2.$VLAN type vlan id $VLAN -ip netns exec ${NS2} ip addr add ${IPADDR2}/24 dev $DEVNS2.$VLAN -ip netns exec ${NS2} ip link set $DEVNS2 up -ip netns exec ${NS2} ip link set $DEVNS2.$VLAN up - -# Bringup lo in netns (to avoids confusing people using --interactive) -ip netns exec ${NS1} ip link set lo up -ip netns exec ${NS2} ip link set lo up - -# At this point, the hosts cannot reach each-other, -# because ns2 are using VLAN tags on the packets. - -ip netns exec ${NS2} sh -c 'ping -W 1 -c 1 100.64.41.1 || echo "Success: First ping must fail"' - - -# Now we can use the test_xdp_vlan.c program to pop/push these VLAN tags -# ---------------------------------------------------------------------- -# In ns1: ingress use XDP to remove VLAN tags -export DEVNS1=veth1 -export BPF_FILE=test_xdp_vlan.bpf.o - -# First test: Remove VLAN by setting VLAN ID 0, using "xdp_vlan_change" -export XDP_PROG=xdp_vlan_change -ip netns exec ${NS1} ip link set $DEVNS1 $XDP_MODE object $BPF_FILE section $XDP_PROG - -# In ns1: egress use TC to add back VLAN tag 4011 -# (del cmd) -# tc qdisc del dev $DEVNS1 clsact 2> /dev/null -# -ip netns exec ${NS1} tc qdisc add dev $DEVNS1 clsact -ip netns exec ${NS1} tc filter add dev $DEVNS1 egress \ - prio 1 handle 1 bpf da obj $BPF_FILE sec tc_vlan_push - -# Now the namespaces can reach each-other, test with ping: -ip netns exec ${NS2} ping -i 0.2 -W 2 -c 2 $IPADDR1 -ip netns exec ${NS1} ping -i 0.2 -W 2 -c 2 $IPADDR2 - -# Second test: Replace xdp prog, that fully remove vlan header -# -# Catch kernel bug for generic-XDP, that does didn't allow us to -# remove a VLAN header, because skb->protocol still contain VLAN -# ETH_P_8021Q indication, and this cause overwriting of our changes. -# -export XDP_PROG=xdp_vlan_remove_outer2 -ip netns exec ${NS1} ip link set $DEVNS1 $XDP_MODE off -ip netns exec ${NS1} ip link set $DEVNS1 $XDP_MODE object $BPF_FILE section $XDP_PROG - -# Now the namespaces should still be able reach each-other, test with ping: -ip netns exec ${NS2} ping -i 0.2 -W 2 -c 2 $IPADDR1 -ip netns exec ${NS1} ping -i 0.2 -W 2 -c 2 $IPADDR2 diff --git a/tools/testing/selftests/bpf/test_xdp_vlan_mode_generic.sh b/tools/testing/selftests/bpf/test_xdp_vlan_mode_generic.sh deleted file mode 100755 index c515326d6d59..000000000000 --- a/tools/testing/selftests/bpf/test_xdp_vlan_mode_generic.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash -# SPDX-License-Identifier: GPL-2.0 - -# Exit on failure -set -e - -# Wrapper script to test generic-XDP -export TESTNAME=xdp_vlan_mode_generic -./test_xdp_vlan.sh --mode=xdpgeneric diff --git a/tools/testing/selftests/bpf/test_xdp_vlan_mode_native.sh b/tools/testing/selftests/bpf/test_xdp_vlan_mode_native.sh deleted file mode 100755 index 5cf7ce1f16c1..000000000000 --- a/tools/testing/selftests/bpf/test_xdp_vlan_mode_native.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash -# SPDX-License-Identifier: GPL-2.0 - -# Exit on failure -set -e - -# Wrapper script to test native-XDP -export TESTNAME=xdp_vlan_mode_native -./test_xdp_vlan.sh --mode=xdpdrv diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c index 2d742fdac6b9..81943c6254e6 100644 --- a/tools/testing/selftests/bpf/trace_helpers.c +++ b/tools/testing/selftests/bpf/trace_helpers.c @@ -293,6 +293,10 @@ static int procmap_query(int fd, const void *addr, __u32 query_flags, size_t *st return 0; } #else +# ifndef PROCMAP_QUERY_VMA_EXECUTABLE +# define PROCMAP_QUERY_VMA_EXECUTABLE 0x04 +# endif + static int procmap_query(int fd, const void *addr, __u32 query_flags, size_t *start, size_t *offset, int *flags) { return -EOPNOTSUPP; diff --git a/tools/testing/selftests/bpf/unpriv_helpers.c b/tools/testing/selftests/bpf/unpriv_helpers.c index 220f6a963813..f997d7ec8fd0 100644 --- a/tools/testing/selftests/bpf/unpriv_helpers.c +++ b/tools/testing/selftests/bpf/unpriv_helpers.c @@ -1,15 +1,76 @@ // SPDX-License-Identifier: GPL-2.0-only +#include <errno.h> +#include <limits.h> #include <stdbool.h> #include <stdlib.h> #include <stdio.h> #include <string.h> +#include <sys/utsname.h> #include <unistd.h> #include <fcntl.h> +#include <zlib.h> #include "unpriv_helpers.h" -static bool get_mitigations_off(void) +static gzFile open_config(void) +{ + struct utsname uts; + char buf[PATH_MAX]; + gzFile config; + + if (uname(&uts)) { + perror("uname"); + goto config_gz; + } + + snprintf(buf, sizeof(buf), "/boot/config-%s", uts.release); + config = gzopen(buf, "rb"); + if (config) + return config; + fprintf(stderr, "gzopen %s: %s\n", buf, strerror(errno)); + +config_gz: + config = gzopen("/proc/config.gz", "rb"); + if (!config) + perror("gzopen /proc/config.gz"); + return config; +} + +static int config_contains(const char *pat) +{ + const char *msg; + char buf[1024]; + gzFile config; + int n, err; + + config = open_config(); + if (!config) + return -1; + + for (;;) { + if (!gzgets(config, buf, sizeof(buf))) { + msg = gzerror(config, &err); + if (err == Z_ERRNO) + perror("gzgets /proc/config.gz"); + else if (err != Z_OK) + fprintf(stderr, "gzgets /proc/config.gz: %s", msg); + gzclose(config); + return -1; + } + n = strlen(buf); + if (buf[n - 1] == '\n') + buf[n - 1] = 0; + if (strcmp(buf, pat) == 0) { + gzclose(config); + return 1; + } + } + gzclose(config); + return 0; +} + +static bool cmdline_contains(const char *pat) { char cmdline[4096], *c; int fd, ret = false; @@ -27,7 +88,7 @@ static bool get_mitigations_off(void) cmdline[sizeof(cmdline) - 1] = '\0'; for (c = strtok(cmdline, " \n"); c; c = strtok(NULL, " \n")) { - if (strncmp(c, "mitigations=off", strlen(c))) + if (strncmp(c, pat, strlen(c))) continue; ret = true; break; @@ -37,8 +98,21 @@ out: return ret; } +static int get_mitigations_off(void) +{ + int enabled_in_config; + + if (cmdline_contains("mitigations=off")) + return 1; + enabled_in_config = config_contains("CONFIG_CPU_MITIGATIONS=y"); + if (enabled_in_config < 0) + return -1; + return !enabled_in_config; +} + bool get_unpriv_disabled(void) { + int mitigations_off; bool disabled; char buf[2]; FILE *fd; @@ -52,5 +126,19 @@ bool get_unpriv_disabled(void) disabled = true; } - return disabled ? true : get_mitigations_off(); + if (disabled) + return true; + + /* + * Some unpriv tests rely on spectre mitigations being on. + * If mitigations are off or status can't be determined + * assume that unpriv tests are disabled. + */ + mitigations_off = get_mitigations_off(); + if (mitigations_off < 0) { + fprintf(stderr, + "Can't determine if mitigations are enabled, disabling unpriv tests."); + return true; + } + return mitigations_off; } diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c index 7afc2619ab14..f3492efc8834 100644 --- a/tools/testing/selftests/bpf/verifier/calls.c +++ b/tools/testing/selftests/bpf/verifier/calls.c @@ -2252,7 +2252,7 @@ BPF_EXIT_INSN(), }, .fixup_map_hash_48b = { 7 }, - .errstr_unpriv = "invalid indirect read from stack R2 off -8+0 size 8", + .errstr_unpriv = "invalid read from stack R2 off -8+0 size 8", .result_unpriv = REJECT, /* in privileged mode reads from uninitialized stack locations are permitted */ .result = ACCEPT, @@ -2409,3 +2409,27 @@ .errstr_unpriv = "", .prog_type = BPF_PROG_TYPE_CGROUP_SKB, }, +{ + "calls: several args with ref_obj_id", + .insns = { + /* Reserve at least sizeof(struct iphdr) bytes in the ring buffer. + * With a smaller size, the verifier would reject the call to + * bpf_tcp_raw_gen_syncookie_ipv4 before we can reach the + * ref_obj_id error. + */ + BPF_MOV64_IMM(BPF_REG_2, 20), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve), + /* if r0 == 0 goto <exit> */ + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tcp_raw_gen_syncookie_ipv4), + BPF_EXIT_INSN(), + }, + .fixup_map_ringbuf = { 2 }, + .result = REJECT, + .errstr = "more than one arg with ref_obj_id", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, diff --git a/tools/testing/selftests/bpf/verifier/dead_code.c b/tools/testing/selftests/bpf/verifier/dead_code.c index ee454327e5c6..77207b498c6f 100644 --- a/tools/testing/selftests/bpf/verifier/dead_code.c +++ b/tools/testing/selftests/bpf/verifier/dead_code.c @@ -2,14 +2,13 @@ "dead code: start", .insns = { BPF_JMP_IMM(BPF_JA, 0, 0, 2), + /* unpriv: nospec (inserted to prevent "R9 !read_ok") */ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0), BPF_JMP_IMM(BPF_JA, 0, 0, 2), BPF_MOV64_IMM(BPF_REG_0, 7), BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, -4), BPF_EXIT_INSN(), }, - .errstr_unpriv = "R9 !read_ok", - .result_unpriv = REJECT, .result = ACCEPT, .retval = 7, }, diff --git a/tools/testing/selftests/bpf/verifier/jmp32.c b/tools/testing/selftests/bpf/verifier/jmp32.c index 43776f6f92f4..91d83e9cb148 100644 --- a/tools/testing/selftests/bpf/verifier/jmp32.c +++ b/tools/testing/selftests/bpf/verifier/jmp32.c @@ -84,11 +84,10 @@ BPF_JMP32_IMM(BPF_JSET, BPF_REG_7, 0x10, 1), BPF_EXIT_INSN(), BPF_JMP32_IMM(BPF_JGE, BPF_REG_7, 0x10, 1), + /* unpriv: nospec (inserted to prevent "R9 !read_ok") */ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0), BPF_EXIT_INSN(), }, - .errstr_unpriv = "R9 !read_ok", - .result_unpriv = REJECT, .result = ACCEPT, }, { @@ -149,11 +148,10 @@ BPF_JMP32_IMM(BPF_JEQ, BPF_REG_7, 0x10, 1), BPF_EXIT_INSN(), BPF_JMP32_IMM(BPF_JSGE, BPF_REG_7, 0xf, 1), + /* unpriv: nospec (inserted to prevent "R9 !read_ok") */ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0), BPF_EXIT_INSN(), }, - .errstr_unpriv = "R9 !read_ok", - .result_unpriv = REJECT, .result = ACCEPT, }, { @@ -214,11 +212,10 @@ BPF_JMP32_IMM(BPF_JNE, BPF_REG_7, 0x10, 1), BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x10, 1), BPF_EXIT_INSN(), + /* unpriv: nospec (inserted to prevent "R9 !read_ok") */ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0), BPF_EXIT_INSN(), }, - .errstr_unpriv = "R9 !read_ok", - .result_unpriv = REJECT, .result = ACCEPT, }, { @@ -283,11 +280,10 @@ BPF_JMP32_REG(BPF_JGE, BPF_REG_7, BPF_REG_8, 1), BPF_EXIT_INSN(), BPF_JMP32_IMM(BPF_JGE, BPF_REG_7, 0x7ffffff0, 1), + /* unpriv: nospec (inserted to prevent "R0 invalid mem access 'scalar'") */ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .errstr_unpriv = "R0 invalid mem access 'scalar'", - .result_unpriv = REJECT, .result = ACCEPT, .retval = 2, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, @@ -354,11 +350,10 @@ BPF_JMP32_REG(BPF_JGT, BPF_REG_7, BPF_REG_8, 1), BPF_EXIT_INSN(), BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 0x7ffffff0, 1), + /* unpriv: nospec (inserted to prevent "R0 invalid mem access 'scalar'") */ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .errstr_unpriv = "R0 invalid mem access 'scalar'", - .result_unpriv = REJECT, .result = ACCEPT, .retval = 2, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, @@ -425,11 +420,10 @@ BPF_JMP32_REG(BPF_JLE, BPF_REG_7, BPF_REG_8, 1), BPF_EXIT_INSN(), BPF_JMP32_IMM(BPF_JLE, BPF_REG_7, 0x7ffffff0, 1), + /* unpriv: nospec (inserted to prevent "R0 invalid mem access 'scalar'") */ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .errstr_unpriv = "R0 invalid mem access 'scalar'", - .result_unpriv = REJECT, .result = ACCEPT, .retval = 2, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, @@ -496,11 +490,10 @@ BPF_JMP32_REG(BPF_JLT, BPF_REG_7, BPF_REG_8, 1), BPF_EXIT_INSN(), BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0x7ffffff0, 1), + /* unpriv: nospec (inserted to prevent "R0 invalid mem access 'scalar'") */ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .errstr_unpriv = "R0 invalid mem access 'scalar'", - .result_unpriv = REJECT, .result = ACCEPT, .retval = 2, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, @@ -567,11 +560,10 @@ BPF_JMP32_REG(BPF_JSGE, BPF_REG_7, BPF_REG_8, 1), BPF_EXIT_INSN(), BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0x7ffffff0, 1), + /* unpriv: nospec (inserted to prevent "R0 invalid mem access 'scalar'") */ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .errstr_unpriv = "R0 invalid mem access 'scalar'", - .result_unpriv = REJECT, .result = ACCEPT, .retval = 2, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, @@ -638,11 +630,10 @@ BPF_JMP32_REG(BPF_JSGT, BPF_REG_7, BPF_REG_8, 1), BPF_EXIT_INSN(), BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, -2, 1), + /* unpriv: nospec (inserted to prevent "R0 invalid mem access 'scalar'") */ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .errstr_unpriv = "R0 invalid mem access 'scalar'", - .result_unpriv = REJECT, .result = ACCEPT, .retval = 2, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, @@ -709,11 +700,10 @@ BPF_JMP32_REG(BPF_JSLE, BPF_REG_7, BPF_REG_8, 1), BPF_EXIT_INSN(), BPF_JMP_IMM(BPF_JSLE, BPF_REG_7, 0x7ffffff0, 1), + /* unpriv: nospec (inserted to prevent "R0 invalid mem access 'scalar'") */ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .errstr_unpriv = "R0 invalid mem access 'scalar'", - .result_unpriv = REJECT, .result = ACCEPT, .retval = 2, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, @@ -780,11 +770,10 @@ BPF_JMP32_REG(BPF_JSLT, BPF_REG_7, BPF_REG_8, 1), BPF_EXIT_INSN(), BPF_JMP32_IMM(BPF_JSLT, BPF_REG_7, -1, 1), + /* unpriv: nospec (inserted to prevent "R0 invalid mem access 'scalar'") */ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .errstr_unpriv = "R0 invalid mem access 'scalar'", - .result_unpriv = REJECT, .result = ACCEPT, .retval = 2, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, diff --git a/tools/testing/selftests/bpf/verifier/jset.c b/tools/testing/selftests/bpf/verifier/jset.c index 11fc68da735e..e901eefd774a 100644 --- a/tools/testing/selftests/bpf/verifier/jset.c +++ b/tools/testing/selftests/bpf/verifier/jset.c @@ -78,12 +78,11 @@ .insns = { BPF_MOV64_IMM(BPF_REG_0, 1), BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1), + /* unpriv: nospec (inserted to prevent "R9 !read_ok") */ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0), BPF_EXIT_INSN(), }, .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, - .errstr_unpriv = "R9 !read_ok", - .result_unpriv = REJECT, .retval = 1, .result = ACCEPT, }, @@ -136,13 +135,12 @@ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), BPF_ALU64_IMM(BPF_OR, BPF_REG_0, 2), BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 3, 1), + /* unpriv: nospec (inserted to prevent "R9 !read_ok") */ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, - .errstr_unpriv = "R9 !read_ok", - .result_unpriv = REJECT, .result = ACCEPT, }, { @@ -154,16 +152,16 @@ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xff), BPF_JMP_IMM(BPF_JSET, BPF_REG_1, 0xf0, 3), BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 0x10, 1), + /* unpriv: nospec (inserted to prevent "R9 !read_ok") */ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0), BPF_EXIT_INSN(), BPF_JMP_IMM(BPF_JSET, BPF_REG_1, 0x10, 1), BPF_EXIT_INSN(), BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0x10, 1), + /* unpriv: nospec (inserted to prevent "R9 !read_ok") */ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0), BPF_EXIT_INSN(), }, .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, - .errstr_unpriv = "R9 !read_ok", - .result_unpriv = REJECT, .result = ACCEPT, }, diff --git a/tools/testing/selftests/bpf/verifier/map_kptr.c b/tools/testing/selftests/bpf/verifier/map_kptr.c index f420c0312aa0..4b39f8472f9b 100644 --- a/tools/testing/selftests/bpf/verifier/map_kptr.c +++ b/tools/testing/selftests/bpf/verifier/map_kptr.c @@ -373,7 +373,7 @@ .prog_type = BPF_PROG_TYPE_SCHED_CLS, .fixup_map_kptr = { 1 }, .result = REJECT, - .errstr = "Unreleased reference id=5 alloc_insn=20", + .errstr = "Unreleased reference id=4 alloc_insn=20", .fixup_kfunc_btf_id = { { "bpf_kfunc_call_test_acquire", 15 }, } diff --git a/tools/testing/selftests/bpf/veristat.c b/tools/testing/selftests/bpf/veristat.c index e12ef953fba8..d532dd82a3a8 100644 --- a/tools/testing/selftests/bpf/veristat.c +++ b/tools/testing/selftests/bpf/veristat.c @@ -3,6 +3,7 @@ #define _GNU_SOURCE #include <argp.h> #include <libgen.h> +#include <ctype.h> #include <string.h> #include <stdlib.h> #include <sched.h> @@ -21,11 +22,21 @@ #include <gelf.h> #include <float.h> #include <math.h> +#include <limits.h> +#include <assert.h> #ifndef ARRAY_SIZE #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) #endif +#ifndef max +#define max(a, b) ((a) > (b) ? (a) : (b)) +#endif + +#ifndef min +#define min(a, b) ((a) < (b) ? (a) : (b)) +#endif + enum stat_id { VERDICT, DURATION, @@ -34,6 +45,12 @@ enum stat_id { PEAK_STATES, MAX_STATES_PER_INSN, MARK_READ_MAX_LEN, + SIZE, + JITED_SIZE, + STACK, + PROG_TYPE, + ATTACH_TYPE, + MEMORY_PEAK, FILE_NAME, PROG_NAME, @@ -140,6 +157,30 @@ struct filter { bool abs; }; +struct rvalue { + enum { INTEGRAL, ENUMERATOR } type; + union { + long long ivalue; + char *svalue; + }; +}; + +struct field_access { + enum { FIELD_NAME, ARRAY_INDEX } type; + union { + char *name; + struct rvalue index; + }; +}; + +struct var_preset { + struct field_access *atoms; + int atom_count; + char *full_name; + struct rvalue value; + bool applied; +}; + static struct env { char **filenames; int filename_cnt; @@ -181,6 +222,11 @@ static struct env { int progs_processed; int progs_skipped; int top_src_lines; + struct var_preset *presets; + int npresets; + char orig_cgroup[PATH_MAX]; + char stat_cgroup[PATH_MAX]; + int memory_peak_fd; } env; static int libbpf_print_fn(enum libbpf_print_level level, const char *format, va_list args) @@ -192,6 +238,22 @@ static int libbpf_print_fn(enum libbpf_print_level level, const char *format, va return vfprintf(stderr, format, args); } +#define log_errno(fmt, ...) log_errno_aux(__FILE__, __LINE__, fmt, ##__VA_ARGS__) + +__attribute__((format(printf, 3, 4))) +static int log_errno_aux(const char *file, int line, const char *fmt, ...) +{ + int err = -errno; + va_list ap; + + va_start(ap, fmt); + fprintf(stderr, "%s:%d: ", file, line); + vfprintf(stderr, fmt, ap); + fprintf(stderr, " failed with error '%s'.\n", strerror(errno)); + va_end(ap); + return err; +} + #ifndef VERISTAT_VERSION #define VERISTAT_VERSION "<kernel>" #endif @@ -203,7 +265,8 @@ const char argp_program_doc[] = "\n" "USAGE: veristat <obj-file> [<obj-file>...]\n" " OR: veristat -C <baseline.csv> <comparison.csv>\n" -" OR: veristat -R <results.csv>\n"; +" OR: veristat -R <results.csv>\n" +" OR: veristat -vl2 <to_analyze.bpf.o>\n"; enum { OPT_LOG_FIXED = 1000, @@ -215,7 +278,7 @@ static const struct argp_option opts[] = { { "version", 'V', NULL, 0, "Print version" }, { "verbose", 'v', NULL, 0, "Verbose mode" }, { "debug", 'd', NULL, 0, "Debug mode (turns on libbpf debug logging)" }, - { "log-level", 'l', "LEVEL", 0, "Verifier log level (default 0 for normal mode, 1 for verbose mode)" }, + { "log-level", 'l', "LEVEL", 0, "Verifier log level (default 0 for normal mode, 1 for verbose mode, 2 for full verification log)" }, { "log-fixed", OPT_LOG_FIXED, NULL, 0, "Disable verifier log rotation" }, { "log-size", OPT_LOG_SIZE, "BYTES", 0, "Customize verifier log size (default to 16MB)" }, { "top-n", 'n', "N", 0, "Emit only up to first N results." }, @@ -231,16 +294,20 @@ static const struct argp_option opts[] = { { "test-reg-invariants", 'r', NULL, 0, "Force BPF verifier failure on register invariant violation (BPF_F_TEST_REG_INVARIANTS program flag)" }, { "top-src-lines", 'S', "N", 0, "Emit N most frequent source code lines" }, + { "set-global-vars", 'G', "GLOBAL", 0, "Set global variables provided in the expression, for example \"var1 = 1\"" }, {}, }; static int parse_stats(const char *stats_str, struct stat_specs *specs); static int append_filter(struct filter **filters, int *cnt, const char *str); static int append_filter_file(const char *path); +static int append_var_preset(struct var_preset **presets, int *cnt, const char *expr); +static int append_var_preset_file(const char *filename); +static int append_file(const char *path); +static int append_file_from_file(const char *path); static error_t parse_arg(int key, char *arg, struct argp_state *state) { - void *tmp; int err; switch (key) { @@ -312,6 +379,7 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state) fprintf(stderr, "invalid top N specifier: %s\n", arg); argp_usage(state); } + break; case 'C': env.comparison_mode = true; break; @@ -338,15 +406,26 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state) argp_usage(state); } break; + case 'G': { + if (arg[0] == '@') + err = append_var_preset_file(arg + 1); + else + err = append_var_preset(&env.presets, &env.npresets, arg); + if (err) { + fprintf(stderr, "Failed to parse global variable presets: %s\n", arg); + return err; + } + break; + } case ARGP_KEY_ARG: - tmp = realloc(env.filenames, (env.filename_cnt + 1) * sizeof(*env.filenames)); - if (!tmp) - return -ENOMEM; - env.filenames = tmp; - env.filenames[env.filename_cnt] = strdup(arg); - if (!env.filenames[env.filename_cnt]) - return -ENOMEM; - env.filename_cnt++; + if (arg[0] == '@') + err = append_file_from_file(arg + 1); + else + err = append_file(arg); + if (err) { + fprintf(stderr, "Failed to collect BPF object files: %d\n", err); + return err; + } break; default: return ARGP_ERR_UNKNOWN; @@ -617,7 +696,7 @@ static int append_filter_file(const char *path) f = fopen(path, "r"); if (!f) { err = -errno; - fprintf(stderr, "Failed to open filters in '%s': %d\n", path, err); + fprintf(stderr, "Failed to open filters in '%s': %s\n", path, strerror(-err)); return err; } @@ -640,19 +719,64 @@ cleanup: } static const struct stat_specs default_output_spec = { - .spec_cnt = 7, + .spec_cnt = 8, .ids = { FILE_NAME, PROG_NAME, VERDICT, DURATION, - TOTAL_INSNS, TOTAL_STATES, PEAK_STATES, + TOTAL_INSNS, TOTAL_STATES, SIZE, JITED_SIZE }, }; +static int append_file(const char *path) +{ + void *tmp; + + tmp = realloc(env.filenames, (env.filename_cnt + 1) * sizeof(*env.filenames)); + if (!tmp) + return -ENOMEM; + env.filenames = tmp; + env.filenames[env.filename_cnt] = strdup(path); + if (!env.filenames[env.filename_cnt]) + return -ENOMEM; + env.filename_cnt++; + return 0; +} + +static int append_file_from_file(const char *path) +{ + char buf[1024]; + int err = 0; + FILE *f; + + f = fopen(path, "r"); + if (!f) { + err = -errno; + fprintf(stderr, "Failed to open object files list in '%s': %s\n", + path, strerror(errno)); + return err; + } + + while (fscanf(f, " %1023[^\n]\n", buf) == 1) { + /* lines starting with # are comments, skip them */ + if (buf[0] == '\0' || buf[0] == '#') + continue; + err = append_file(buf); + if (err) + goto cleanup; + } + +cleanup: + fclose(f); + return err; +} + static const struct stat_specs default_csv_output_spec = { - .spec_cnt = 9, + .spec_cnt = 15, .ids = { FILE_NAME, PROG_NAME, VERDICT, DURATION, TOTAL_INSNS, TOTAL_STATES, PEAK_STATES, MAX_STATES_PER_INSN, MARK_READ_MAX_LEN, + SIZE, JITED_SIZE, PROG_TYPE, ATTACH_TYPE, + STACK, MEMORY_PEAK, }, }; @@ -688,6 +812,12 @@ static struct stat_def { [PEAK_STATES] = { "Peak states", {"peak_states"}, }, [MAX_STATES_PER_INSN] = { "Max states per insn", {"max_states_per_insn"}, }, [MARK_READ_MAX_LEN] = { "Max mark read length", {"max_mark_read_len", "mark_read"}, }, + [SIZE] = { "Program size", {"prog_size"}, }, + [JITED_SIZE] = { "Jited size", {"prog_size_jited"}, }, + [STACK] = {"Stack depth", {"stack_depth", "stack"}, }, + [PROG_TYPE] = { "Program type", {"prog_type"}, }, + [ATTACH_TYPE] = { "Attach type", {"attach_type", }, }, + [MEMORY_PEAK] = { "Peak memory (MiB)", {"mem_peak", }, }, }; static bool parse_stat_id_var(const char *name, size_t len, int *id, @@ -761,6 +891,18 @@ static bool is_desc_sym(char c) return c == 'v' || c == 'V' || c == '.' || c == '!' || c == '_'; } +static char *rtrim(char *str) +{ + int i; + + for (i = strlen(str) - 1; i > 0; --i) { + if (!isspace(str[i])) + break; + str[i] = '\0'; + } + return str; +} + static int parse_stat(const char *stat_name, struct stat_specs *specs) { int id; @@ -835,7 +977,8 @@ static char verif_log_buf[64 * 1024]; static int parse_verif_log(char * const buf, size_t buf_sz, struct verif_stats *s) { const char *cur; - int pos, lines; + int pos, lines, sub_stack, cnt = 0; + char *state = NULL, *token, stack[512]; buf[buf_sz - 1] = '\0'; @@ -853,15 +996,22 @@ static int parse_verif_log(char * const buf, size_t buf_sz, struct verif_stats * if (1 == sscanf(cur, "verification time %ld usec\n", &s->stats[DURATION])) continue; - if (6 == sscanf(cur, "processed %ld insns (limit %*d) max_states_per_insn %ld total_states %ld peak_states %ld mark_read %ld", + if (5 == sscanf(cur, "processed %ld insns (limit %*d) max_states_per_insn %ld total_states %ld peak_states %ld mark_read %ld", &s->stats[TOTAL_INSNS], &s->stats[MAX_STATES_PER_INSN], &s->stats[TOTAL_STATES], &s->stats[PEAK_STATES], &s->stats[MARK_READ_MAX_LEN])) continue; - } + if (1 == sscanf(cur, "stack depth %511s", stack)) + continue; + } + while ((token = strtok_r(cnt++ ? NULL : stack, "+", &state))) { + if (sscanf(token, "%d", &sub_stack) == 0) + break; + s->stats[STACK] += sub_stack; + } return 0; } @@ -884,7 +1034,7 @@ static int line_cnt_cmp(const void *a, const void *b) const struct line_cnt *b_cnt = (const struct line_cnt *)b; if (a_cnt->cnt != b_cnt->cnt) - return a_cnt->cnt < b_cnt->cnt ? -1 : 1; + return a_cnt->cnt > b_cnt->cnt ? -1 : 1; return strcmp(a_cnt->line, b_cnt->line); } @@ -1032,6 +1182,41 @@ static int guess_prog_type_by_ctx_name(const char *ctx_name, return -ESRCH; } +/* Make sure only target program is referenced from struct_ops map, + * otherwise libbpf would automatically set autocreate for all + * referenced programs. + * See libbpf.c:bpf_object_adjust_struct_ops_autoload. + */ +static void mask_unrelated_struct_ops_progs(struct bpf_object *obj, + struct bpf_map *map, + struct bpf_program *prog) +{ + struct btf *btf = bpf_object__btf(obj); + const struct btf_type *t, *mt; + struct btf_member *m; + int i, moff; + size_t data_sz, ptr_sz = sizeof(void *); + void *data; + + t = btf__type_by_id(btf, bpf_map__btf_value_type_id(map)); + if (!btf_is_struct(t)) + return; + + data = bpf_map__initial_value(map, &data_sz); + for (i = 0; i < btf_vlen(t); i++) { + m = &btf_members(t)[i]; + mt = btf__type_by_id(btf, m->type); + if (!btf_is_ptr(mt)) + continue; + moff = m->offset / 8; + if (moff + ptr_sz > data_sz) + continue; + if (memcmp(data + moff, &prog, ptr_sz) == 0) + continue; + memset(data + moff, 0, ptr_sz); + } +} + static void fixup_obj(struct bpf_object *obj, struct bpf_program *prog, const char *filename) { struct bpf_map *map; @@ -1046,6 +1231,10 @@ static void fixup_obj(struct bpf_object *obj, struct bpf_program *prog, const ch case BPF_MAP_TYPE_TASK_STORAGE: case BPF_MAP_TYPE_INODE_STORAGE: case BPF_MAP_TYPE_CGROUP_STORAGE: + case BPF_MAP_TYPE_CGRP_STORAGE: + break; + case BPF_MAP_TYPE_STRUCT_OPS: + mask_unrelated_struct_ops_progs(obj, map, prog); break; default: if (bpf_map__max_entries(map) == 0) @@ -1095,13 +1284,13 @@ static void fixup_obj(struct bpf_object *obj, struct bpf_program *prog, const ch bpf_program__set_expected_attach_type(prog, attach_type); if (!env.quiet) { - printf("Using guessed program type '%s' for %s/%s...\n", + fprintf(stderr, "Using guessed program type '%s' for %s/%s...\n", libbpf_bpf_prog_type_str(prog_type), filename, prog_name); } } else { if (!env.quiet) { - printf("Failed to guess program type for freplace program with context type name '%s' for %s/%s. Consider using canonical type names to help veristat...\n", + fprintf(stderr, "Failed to guess program type for freplace program with context type name '%s' for %s/%s. Consider using canonical type names to help veristat...\n", ctx_name, filename, prog_name); } } @@ -1139,15 +1328,245 @@ static int max_verifier_log_size(void) return log_size; } +static bool output_stat_enabled(int id) +{ + int i; + + for (i = 0; i < env.output_spec.spec_cnt; i++) + if (env.output_spec.ids[i] == id) + return true; + return false; +} + +__attribute__((format(printf, 2, 3))) +static int write_one_line(const char *file, const char *fmt, ...) +{ + int err, saved_errno; + va_list ap; + FILE *f; + + f = fopen(file, "w"); + if (!f) + return -1; + + va_start(ap, fmt); + errno = 0; + err = vfprintf(f, fmt, ap); + saved_errno = errno; + va_end(ap); + fclose(f); + errno = saved_errno; + return err < 0 ? -1 : 0; +} + +__attribute__((format(scanf, 3, 4))) +static int scanf_one_line(const char *file, int fields_expected, const char *fmt, ...) +{ + int res = 0, saved_errno = 0; + char *line = NULL; + size_t line_len; + va_list ap; + FILE *f; + + f = fopen(file, "r"); + if (!f) + return -1; + + va_start(ap, fmt); + while (getline(&line, &line_len, f) > 0) { + res = vsscanf(line, fmt, ap); + if (res == fields_expected) + goto out; + } + if (ferror(f)) { + saved_errno = errno; + res = -1; + } + +out: + va_end(ap); + free(line); + fclose(f); + errno = saved_errno; + return res; +} + +static void destroy_stat_cgroup(void) +{ + char buf[PATH_MAX]; + int err; + + close(env.memory_peak_fd); + + if (env.orig_cgroup[0]) { + snprintf(buf, sizeof(buf), "%s/cgroup.procs", env.orig_cgroup); + err = write_one_line(buf, "%d\n", getpid()); + if (err < 0) + log_errno("moving self to original cgroup %s\n", env.orig_cgroup); + } + + if (env.stat_cgroup[0]) { + err = rmdir(env.stat_cgroup); + if (err < 0) + log_errno("deletion of cgroup %s", env.stat_cgroup); + } + + env.memory_peak_fd = -1; + env.orig_cgroup[0] = 0; + env.stat_cgroup[0] = 0; +} + +/* + * Creates a cgroup at /sys/fs/cgroup/veristat-accounting-<pid>, + * moves current process to this cgroup. + */ +static void create_stat_cgroup(void) +{ + char cgroup_fs_mount[4096]; + char buf[4096]; + int err; + + env.memory_peak_fd = -1; + + if (!output_stat_enabled(MEMORY_PEAK)) + return; + + err = scanf_one_line("/proc/self/mounts", 2, "%*s %4095s cgroup2 %s", + cgroup_fs_mount, buf); + if (err != 2) { + if (err < 0) + log_errno("reading /proc/self/mounts"); + else if (!env.quiet) + fprintf(stderr, "Can't find cgroupfs v2 mount point.\n"); + goto err_out; + } + + /* cgroup-v2.rst promises the line "0::<group>" for cgroups v2 */ + err = scanf_one_line("/proc/self/cgroup", 1, "0::%4095s", buf); + if (err != 1) { + if (err < 0) + log_errno("reading /proc/self/cgroup"); + else if (!env.quiet) + fprintf(stderr, "Can't infer veristat process cgroup."); + goto err_out; + } + + snprintf(env.orig_cgroup, sizeof(env.orig_cgroup), "%s/%s", cgroup_fs_mount, buf); + + snprintf(buf, sizeof(buf), "%s/veristat-accounting-%d", cgroup_fs_mount, getpid()); + err = mkdir(buf, 0777); + if (err < 0) { + log_errno("creation of cgroup %s", buf); + goto err_out; + } + strcpy(env.stat_cgroup, buf); + + snprintf(buf, sizeof(buf), "%s/cgroup.procs", env.stat_cgroup); + err = write_one_line(buf, "%d\n", getpid()); + if (err < 0) { + log_errno("entering cgroup %s", buf); + goto err_out; + } + + snprintf(buf, sizeof(buf), "%s/memory.peak", env.stat_cgroup); + env.memory_peak_fd = open(buf, O_RDWR | O_APPEND); + if (env.memory_peak_fd < 0) { + log_errno("opening %s", buf); + goto err_out; + } + + return; + +err_out: + if (!env.quiet) + fprintf(stderr, "Memory usage metric unavailable.\n"); + destroy_stat_cgroup(); +} + +/* Current value of /sys/fs/cgroup/veristat-accounting-<pid>/memory.peak */ +static long cgroup_memory_peak(void) +{ + long err, memory_peak; + char buf[32]; + + if (env.memory_peak_fd < 0) + return -1; + + err = pread(env.memory_peak_fd, buf, sizeof(buf) - 1, 0); + if (err <= 0) { + log_errno("pread(%s/memory.peak)", env.stat_cgroup); + return -1; + } + + buf[err] = 0; + errno = 0; + memory_peak = strtoll(buf, NULL, 10); + if (errno) { + log_errno("%s/memory.peak:strtoll(%s)", env.stat_cgroup, buf); + return -1; + } + + return memory_peak; +} + +static int reset_stat_cgroup(void) +{ + char buf[] = "r\n"; + int err; + + if (env.memory_peak_fd < 0) + return -1; + + err = pwrite(env.memory_peak_fd, buf, sizeof(buf), 0); + if (err <= 0) { + log_errno("pwrite(%s/memory.peak)", env.stat_cgroup); + return -1; + } + return 0; +} + +static int parse_rvalue(const char *val, struct rvalue *rvalue) +{ + long long value; + char *val_end; + + if (val[0] == '-' || isdigit(val[0])) { + /* must be a number */ + errno = 0; + value = strtoll(val, &val_end, 0); + if (errno == ERANGE) { + errno = 0; + value = strtoull(val, &val_end, 0); + } + if (errno || *val_end != '\0') { + fprintf(stderr, "Failed to parse value '%s'\n", val); + return -EINVAL; + } + rvalue->ivalue = value; + rvalue->type = INTEGRAL; + } else { + /* if not a number, consider it enum value */ + rvalue->svalue = strdup(val); + if (!rvalue->svalue) + return -ENOMEM; + rvalue->type = ENUMERATOR; + } + return 0; +} + static int process_prog(const char *filename, struct bpf_object *obj, struct bpf_program *prog) { const char *base_filename = basename(strdupa(filename)); const char *prog_name = bpf_program__name(prog); + long mem_peak_a, mem_peak_b, mem_peak = -1; char *buf; int buf_sz, log_level; struct verif_stats *stats; - int err = 0; + struct bpf_prog_info info; + __u32 info_len = sizeof(info); + int err = 0, cgroup_err; void *tmp; + int fd; if (!should_process_file_prog(base_filename, bpf_program__name(prog))) { env.progs_skipped++; @@ -1190,12 +1609,30 @@ static int process_prog(const char *filename, struct bpf_object *obj, struct bpf if (env.force_reg_invariants) bpf_program__set_flags(prog, bpf_program__flags(prog) | BPF_F_TEST_REG_INVARIANTS); - err = bpf_object__load(obj); + err = bpf_object__prepare(obj); + if (!err) { + cgroup_err = reset_stat_cgroup(); + mem_peak_a = cgroup_memory_peak(); + err = bpf_object__load(obj); + mem_peak_b = cgroup_memory_peak(); + if (!cgroup_err && mem_peak_a >= 0 && mem_peak_b >= 0) + mem_peak = mem_peak_b - mem_peak_a; + } env.progs_processed++; stats->file_name = strdup(base_filename); stats->prog_name = strdup(bpf_program__name(prog)); stats->stats[VERDICT] = err == 0; /* 1 - success, 0 - failure */ + stats->stats[SIZE] = bpf_program__insn_cnt(prog); + stats->stats[PROG_TYPE] = bpf_program__type(prog); + stats->stats[ATTACH_TYPE] = bpf_program__expected_attach_type(prog); + stats->stats[MEMORY_PEAK] = mem_peak < 0 ? -1 : mem_peak / (1024 * 1024); + + memset(&info, 0, info_len); + fd = bpf_program__fd(prog); + if (fd > 0 && bpf_prog_get_info_by_fd(fd, &info, &info_len) == 0) + stats->stats[JITED_SIZE] = info.jited_prog_len; + parse_verif_log(buf, buf_sz, stats); if (env.verbose) { @@ -1210,7 +1647,483 @@ static int process_prog(const char *filename, struct bpf_object *obj, struct bpf free(buf); return 0; -}; +} + +static int append_preset_atom(struct var_preset *preset, char *value, bool is_index) +{ + struct field_access *tmp; + int i = preset->atom_count; + int err; + + tmp = reallocarray(preset->atoms, i + 1, sizeof(*preset->atoms)); + if (!tmp) + return -ENOMEM; + + preset->atoms = tmp; + preset->atom_count++; + + if (is_index) { + preset->atoms[i].type = ARRAY_INDEX; + err = parse_rvalue(value, &preset->atoms[i].index); + if (err) + return err; + } else { + preset->atoms[i].type = FIELD_NAME; + preset->atoms[i].name = strdup(value); + if (!preset->atoms[i].name) + return -ENOMEM; + } + return 0; +} + +static int parse_var_atoms(const char *full_var, struct var_preset *preset) +{ + char expr[256], var[256], *name, *saveptr; + int n, len, off, err; + + snprintf(expr, sizeof(expr), "%s", full_var); + preset->atom_count = 0; + while ((name = strtok_r(preset->atom_count ? NULL : expr, ".", &saveptr))) { + len = strlen(name); + /* parse variable name */ + if (sscanf(name, "%[a-zA-Z0-9_] %n", var, &off) != 1) { + fprintf(stderr, "Can't parse %s\n", name); + return -EINVAL; + } + err = append_preset_atom(preset, var, false); + if (err) + return err; + + /* parse optional array indexes */ + while (off < len) { + if (sscanf(name + off, " [ %[a-zA-Z0-9_] ] %n", var, &n) != 1) { + fprintf(stderr, "Can't parse %s as index\n", name + off); + return -EINVAL; + } + err = append_preset_atom(preset, var, true); + if (err) + return err; + off += n; + } + } + return 0; +} + +static int append_var_preset(struct var_preset **presets, int *cnt, const char *expr) +{ + void *tmp; + struct var_preset *cur; + char var[256], val[256]; + int n, err; + + tmp = realloc(*presets, (*cnt + 1) * sizeof(**presets)); + if (!tmp) + return -ENOMEM; + *presets = tmp; + cur = &(*presets)[*cnt]; + memset(cur, 0, sizeof(*cur)); + (*cnt)++; + + if (sscanf(expr, " %[][a-zA-Z0-9_. ] = %s %n", var, val, &n) != 2 || n != strlen(expr)) { + fprintf(stderr, "Failed to parse expression '%s'\n", expr); + return -EINVAL; + } + /* Remove trailing spaces from var, as scanf may add those */ + rtrim(var); + + err = parse_rvalue(val, &cur->value); + if (err) + return err; + + cur->full_name = strdup(var); + if (!cur->full_name) + return -ENOMEM; + + err = parse_var_atoms(var, cur); + if (err) + return err; + + return 0; +} + +static int append_var_preset_file(const char *filename) +{ + char buf[1024]; + FILE *f; + int err = 0; + + f = fopen(filename, "rt"); + if (!f) { + err = -errno; + fprintf(stderr, "Failed to open presets in '%s': %s\n", filename, strerror(-err)); + return -EINVAL; + } + + while (fscanf(f, " %1023[^\n]\n", buf) == 1) { + if (buf[0] == '\0' || buf[0] == '#') + continue; + + err = append_var_preset(&env.presets, &env.npresets, buf); + if (err) + goto cleanup; + } + +cleanup: + fclose(f); + return err; +} + +static bool is_signed_type(const struct btf_type *t) +{ + if (btf_is_int(t)) + return btf_int_encoding(t) & BTF_INT_SIGNED; + if (btf_is_any_enum(t)) + return btf_kflag(t); + return true; +} + +static int enum_value_from_name(const struct btf *btf, const struct btf_type *t, + const char *evalue, long long *retval) +{ + if (btf_is_enum(t)) { + struct btf_enum *e = btf_enum(t); + int i, n = btf_vlen(t); + + for (i = 0; i < n; ++i, ++e) { + const char *cur_name = btf__name_by_offset(btf, e->name_off); + + if (strcmp(cur_name, evalue) == 0) { + *retval = e->val; + return 0; + } + } + } else if (btf_is_enum64(t)) { + struct btf_enum64 *e = btf_enum64(t); + int i, n = btf_vlen(t); + + for (i = 0; i < n; ++i, ++e) { + const char *cur_name = btf__name_by_offset(btf, e->name_off); + __u64 value = btf_enum64_value(e); + + if (strcmp(cur_name, evalue) == 0) { + *retval = value; + return 0; + } + } + } + return -EINVAL; +} + +static bool is_preset_supported(const struct btf_type *t) +{ + return btf_is_int(t) || btf_is_enum(t) || btf_is_enum64(t); +} + +static int find_enum_value(const struct btf *btf, const char *name, long long *value) +{ + const struct btf_type *t; + int cnt, i; + long long lvalue; + + cnt = btf__type_cnt(btf); + for (i = 1; i != cnt; ++i) { + t = btf__type_by_id(btf, i); + + if (!btf_is_any_enum(t)) + continue; + + if (enum_value_from_name(btf, t, name, &lvalue) == 0) { + *value = lvalue; + return 0; + } + } + return -ESRCH; +} + +static int resolve_rvalue(struct btf *btf, const struct rvalue *rvalue, long long *result) +{ + int err = 0; + + switch (rvalue->type) { + case INTEGRAL: + *result = rvalue->ivalue; + return 0; + case ENUMERATOR: + err = find_enum_value(btf, rvalue->svalue, result); + if (err) { + fprintf(stderr, "Can't resolve enum value %s\n", rvalue->svalue); + return err; + } + return 0; + default: + fprintf(stderr, "Unknown rvalue type\n"); + return -EOPNOTSUPP; + } + return 0; +} + +static int adjust_var_secinfo_array(struct btf *btf, int tid, struct field_access *atom, + const char *array_name, struct btf_var_secinfo *sinfo) +{ + const struct btf_type *t; + struct btf_array *barr; + long long idx; + int err; + + tid = btf__resolve_type(btf, tid); + t = btf__type_by_id(btf, tid); + if (!btf_is_array(t)) { + fprintf(stderr, "Array index is not expected for %s\n", + array_name); + return -EINVAL; + } + barr = btf_array(t); + err = resolve_rvalue(btf, &atom->index, &idx); + if (err) + return err; + if (idx < 0 || idx >= barr->nelems) { + fprintf(stderr, "Array index %lld is out of bounds [0, %u): %s\n", + idx, barr->nelems, array_name); + return -EINVAL; + } + sinfo->size = btf__resolve_size(btf, barr->type); + sinfo->offset += sinfo->size * idx; + sinfo->type = btf__resolve_type(btf, barr->type); + return 0; +} + +static int adjust_var_secinfo_member(const struct btf *btf, + const struct btf_type *parent_type, + __u32 parent_offset, + const char *member_name, + struct btf_var_secinfo *sinfo) +{ + int i; + + if (!btf_is_composite(parent_type)) { + fprintf(stderr, "Can't resolve field %s for non-composite type\n", member_name); + return -EINVAL; + } + + for (i = 0; i < btf_vlen(parent_type); ++i) { + const struct btf_member *member; + const struct btf_type *member_type; + int tid, off; + + member = btf_members(parent_type) + i; + tid = btf__resolve_type(btf, member->type); + if (tid < 0) + return -EINVAL; + + member_type = btf__type_by_id(btf, tid); + off = parent_offset + member->offset; + if (member->name_off) { + const char *name = btf__name_by_offset(btf, member->name_off); + + if (strcmp(member_name, name) == 0) { + if (btf_member_bitfield_size(parent_type, i) != 0) { + fprintf(stderr, "Bitfield presets are not supported %s\n", + name); + return -EINVAL; + } + sinfo->offset += off / 8; + sinfo->type = tid; + sinfo->size = member_type->size; + return 0; + } + } else if (btf_is_composite(member_type)) { + int err; + + err = adjust_var_secinfo_member(btf, member_type, off, + member_name, sinfo); + if (!err) + return 0; + } + } + + return -ESRCH; +} + +static int adjust_var_secinfo(struct btf *btf, const struct btf_type *t, + struct btf_var_secinfo *sinfo, struct var_preset *preset) +{ + const struct btf_type *base_type; + const char *prev_name; + int err, i; + int tid; + + assert(preset->atom_count > 0); + assert(preset->atoms[0].type == FIELD_NAME); + + tid = btf__resolve_type(btf, t->type); + base_type = btf__type_by_id(btf, tid); + prev_name = preset->atoms[0].name; + + for (i = 1; i < preset->atom_count; ++i) { + struct field_access *atom = preset->atoms + i; + + switch (atom->type) { + case ARRAY_INDEX: + err = adjust_var_secinfo_array(btf, tid, atom, prev_name, sinfo); + break; + case FIELD_NAME: + err = adjust_var_secinfo_member(btf, base_type, 0, atom->name, sinfo); + if (err == -ESRCH) + fprintf(stderr, "Can't find '%s'\n", atom->name); + prev_name = atom->name; + break; + default: + fprintf(stderr, "Unknown field_access type\n"); + return -EOPNOTSUPP; + } + if (err) + return err; + base_type = btf__type_by_id(btf, sinfo->type); + tid = sinfo->type; + } + + return 0; +} + +static int set_global_var(struct bpf_object *obj, struct btf *btf, + struct bpf_map *map, struct btf_var_secinfo *sinfo, + struct var_preset *preset) +{ + const struct btf_type *base_type; + void *ptr; + long long value = preset->value.ivalue; + size_t size; + + base_type = btf__type_by_id(btf, btf__resolve_type(btf, sinfo->type)); + if (!base_type) { + fprintf(stderr, "Failed to resolve type %d\n", sinfo->type); + return -EINVAL; + } + if (!is_preset_supported(base_type)) { + fprintf(stderr, "Can't set %s. Only ints and enums are supported\n", + preset->full_name); + return -EINVAL; + } + + if (preset->value.type == ENUMERATOR) { + if (btf_is_any_enum(base_type)) { + if (enum_value_from_name(btf, base_type, preset->value.svalue, &value)) { + fprintf(stderr, + "Failed to find integer value for enum element %s\n", + preset->value.svalue); + return -EINVAL; + } + } else { + fprintf(stderr, "Value %s is not supported for type %s\n", + preset->value.svalue, + btf__name_by_offset(btf, base_type->name_off)); + return -EINVAL; + } + } + + /* Check if value fits into the target variable size */ + if (sinfo->size < sizeof(value)) { + bool is_signed = is_signed_type(base_type); + __u32 unsigned_bits = sinfo->size * 8 - (is_signed ? 1 : 0); + long long max_val = 1ll << unsigned_bits; + + if (value >= max_val || value < -max_val) { + fprintf(stderr, + "Variable %s value %lld is out of range [%lld; %lld]\n", + btf__name_by_offset(btf, base_type->name_off), value, + is_signed ? -max_val : 0, max_val - 1); + return -EINVAL; + } + } + + ptr = bpf_map__initial_value(map, &size); + if (!ptr || sinfo->offset + sinfo->size > size) + return -EINVAL; + + if (__BYTE_ORDER == __LITTLE_ENDIAN) { + memcpy(ptr + sinfo->offset, &value, sinfo->size); + } else { /* __BYTE_ORDER == __BIG_ENDIAN */ + __u8 src_offset = sizeof(value) - sinfo->size; + + memcpy(ptr + sinfo->offset, (void *)&value + src_offset, sinfo->size); + } + return 0; +} + +static int set_global_vars(struct bpf_object *obj, struct var_preset *presets, int npresets) +{ + struct btf_var_secinfo *sinfo; + const char *sec_name; + const struct btf_type *t; + struct bpf_map *map; + struct btf *btf; + int i, j, k, n, cnt, err = 0; + + if (npresets == 0) + return 0; + + btf = bpf_object__btf(obj); + if (!btf) + return -EINVAL; + + cnt = btf__type_cnt(btf); + for (i = 1; i != cnt; ++i) { + t = btf__type_by_id(btf, i); + + if (!btf_is_datasec(t)) + continue; + + sinfo = btf_var_secinfos(t); + sec_name = btf__name_by_offset(btf, t->name_off); + map = bpf_object__find_map_by_name(obj, sec_name); + if (!map) + continue; + + n = btf_vlen(t); + for (j = 0; j < n; ++j, ++sinfo) { + const struct btf_type *var_type = btf__type_by_id(btf, sinfo->type); + const char *var_name; + + if (!btf_is_var(var_type)) + continue; + + var_name = btf__name_by_offset(btf, var_type->name_off); + + for (k = 0; k < npresets; ++k) { + struct btf_var_secinfo tmp_sinfo; + + if (strcmp(var_name, presets[k].atoms[0].name) != 0) + continue; + + if (presets[k].applied) { + fprintf(stderr, "Variable %s is set more than once", + var_name); + return -EINVAL; + } + tmp_sinfo = *sinfo; + err = adjust_var_secinfo(btf, var_type, + &tmp_sinfo, presets + k); + if (err) + return err; + + err = set_global_var(obj, btf, map, &tmp_sinfo, presets + k); + if (err) + return err; + + presets[k].applied = true; + } + } + } + for (i = 0; i < npresets; ++i) { + if (!presets[i].applied) { + fprintf(stderr, "Global variable preset %s has not been applied\n", + presets[i].full_name); + err = -EINVAL; + } + presets[i].applied = false; + } + return err; +} static int process_obj(const char *filename) { @@ -1261,6 +2174,11 @@ static int process_obj(const char *filename) if (prog_cnt == 1) { prog = bpf_object__next_program(obj, NULL); bpf_program__set_autoload(prog, true); + err = set_global_vars(obj, env.presets, env.npresets); + if (err) { + fprintf(stderr, "Failed to set global variables %d\n", err); + goto cleanup; + } process_prog(filename, obj, prog); goto cleanup; } @@ -1275,6 +2193,12 @@ static int process_obj(const char *filename) goto cleanup; } + err = set_global_vars(tobj, env.presets, env.npresets); + if (err) { + fprintf(stderr, "Failed to set global variables %d\n", err); + goto cleanup; + } + lprog = NULL; bpf_object__for_each_program(tprog, tobj) { const char *tprog_name = bpf_program__name(tprog); @@ -1309,12 +2233,18 @@ static int cmp_stat(const struct verif_stats *s1, const struct verif_stats *s2, case PROG_NAME: cmp = strcmp(s1->prog_name, s2->prog_name); break; + case ATTACH_TYPE: + case PROG_TYPE: + case SIZE: + case JITED_SIZE: + case STACK: case VERDICT: case DURATION: case TOTAL_INSNS: case TOTAL_STATES: case PEAK_STATES: case MAX_STATES_PER_INSN: + case MEMORY_PEAK: case MARK_READ_MAX_LEN: { long v1 = s1->stats[id]; long v2 = s2->stats[id]; @@ -1523,12 +2453,28 @@ static void prepare_value(const struct verif_stats *s, enum stat_id id, else *str = s->stats[VERDICT] ? "success" : "failure"; break; + case ATTACH_TYPE: + if (!s) + *str = "N/A"; + else + *str = libbpf_bpf_attach_type_str(s->stats[ATTACH_TYPE]) ?: "N/A"; + break; + case PROG_TYPE: + if (!s) + *str = "N/A"; + else + *str = libbpf_bpf_prog_type_str(s->stats[PROG_TYPE]) ?: "N/A"; + break; case DURATION: case TOTAL_INSNS: case TOTAL_STATES: case PEAK_STATES: case MAX_STATES_PER_INSN: case MARK_READ_MAX_LEN: + case STACK: + case SIZE: + case JITED_SIZE: + case MEMORY_PEAK: *val = s ? s->stats[id] : 0; break; default: @@ -1612,7 +2558,11 @@ static int parse_stat_value(const char *str, enum stat_id id, struct verif_stats case TOTAL_STATES: case PEAK_STATES: case MAX_STATES_PER_INSN: - case MARK_READ_MAX_LEN: { + case MARK_READ_MAX_LEN: + case SIZE: + case JITED_SIZE: + case MEMORY_PEAK: + case STACK: { long val; int err, n; @@ -1625,6 +2575,42 @@ static int parse_stat_value(const char *str, enum stat_id id, struct verif_stats st->stats[id] = val; break; } + case PROG_TYPE: { + enum bpf_prog_type prog_type = 0; + const char *type; + + while ((type = libbpf_bpf_prog_type_str(prog_type))) { + if (strcmp(type, str) == 0) { + st->stats[id] = prog_type; + break; + } + prog_type++; + } + + if (!type) { + fprintf(stderr, "Unrecognized prog type %s\n", str); + return -EINVAL; + } + break; + } + case ATTACH_TYPE: { + enum bpf_attach_type attach_type = 0; + const char *type; + + while ((type = libbpf_bpf_attach_type_str(attach_type))) { + if (strcmp(type, str) == 0) { + st->stats[id] = attach_type; + break; + } + attach_type++; + } + + if (!type) { + fprintf(stderr, "Unrecognized attach type %s\n", str); + return -EINVAL; + } + break; + } default: fprintf(stderr, "Unrecognized stat #%d\n", id); return -EINVAL; @@ -2213,7 +3199,7 @@ static void output_prog_stats(void) static int handle_verif_mode(void) { - int i, err; + int i, err = 0; if (env.filename_cnt == 0) { fprintf(stderr, "Please provide path to BPF object file!\n\n"); @@ -2221,11 +3207,12 @@ static int handle_verif_mode(void) return -EINVAL; } + create_stat_cgroup(); for (i = 0; i < env.filename_cnt; i++) { err = process_obj(env.filenames[i]); if (err) { fprintf(stderr, "Failed to process '%s': %d\n", env.filenames[i], err); - return err; + goto out; } } @@ -2233,7 +3220,9 @@ static int handle_verif_mode(void) output_prog_stats(); - return 0; +out: + destroy_stat_cgroup(); + return err; } static int handle_replay_mode(void) @@ -2263,7 +3252,7 @@ static int handle_replay_mode(void) int main(int argc, char **argv) { - int err = 0, i; + int err = 0, i, j; if (argp_parse(&argp, argc, argv, 0, NULL, NULL)) return 1; @@ -2321,5 +3310,21 @@ int main(int argc, char **argv) free(env.deny_filters[i].prog_glob); } free(env.deny_filters); + for (i = 0; i < env.npresets; ++i) { + free(env.presets[i].full_name); + for (j = 0; j < env.presets[i].atom_count; ++j) { + switch (env.presets[i].atoms[j].type) { + case FIELD_NAME: + free(env.presets[i].atoms[j].name); + break; + case ARRAY_INDEX: + if (env.presets[i].atoms[j].index.type == ENUMERATOR) + free(env.presets[i].atoms[j].index.svalue); + break; + } + } + free(env.presets[i].atoms); + } + free(env.presets); return -err; } diff --git a/tools/testing/selftests/bpf/vmtest.sh b/tools/testing/selftests/bpf/vmtest.sh index 79505d294c44..2f869daf8a06 100755 --- a/tools/testing/selftests/bpf/vmtest.sh +++ b/tools/testing/selftests/bpf/vmtest.sh @@ -43,6 +43,15 @@ riscv64) BZIMAGE="arch/riscv/boot/Image" ARCH="riscv" ;; +ppc64el) + QEMU_BINARY=qemu-system-ppc64 + QEMU_CONSOLE="hvc0" + # KVM could not be tested for powerpc, therefore not enabled for now. + HOST_FLAGS=(-machine pseries -cpu POWER9) + CROSS_FLAGS=(-machine pseries -cpu POWER9) + BZIMAGE="vmlinux" + ARCH="powerpc" + ;; *) echo "Unsupported architecture" exit 1 diff --git a/tools/testing/selftests/bpf/with_addr.sh b/tools/testing/selftests/bpf/with_addr.sh deleted file mode 100755 index ffcd3953f94c..000000000000 --- a/tools/testing/selftests/bpf/with_addr.sh +++ /dev/null @@ -1,54 +0,0 @@ -#!/bin/bash -# SPDX-License-Identifier: GPL-2.0 -# -# add private ipv4 and ipv6 addresses to loopback - -readonly V6_INNER='100::a/128' -readonly V4_INNER='192.168.0.1/32' - -if getopts ":s" opt; then - readonly SIT_DEV_NAME='sixtofourtest0' - readonly V6_SIT='2::/64' - readonly V4_SIT='172.17.0.1/32' - shift -fi - -fail() { - echo "error: $*" 1>&2 - exit 1 -} - -setup() { - ip -6 addr add "${V6_INNER}" dev lo || fail 'failed to setup v6 address' - ip -4 addr add "${V4_INNER}" dev lo || fail 'failed to setup v4 address' - - if [[ -n "${V6_SIT}" ]]; then - ip link add "${SIT_DEV_NAME}" type sit remote any local any \ - || fail 'failed to add sit' - ip link set dev "${SIT_DEV_NAME}" up \ - || fail 'failed to bring sit device up' - ip -6 addr add "${V6_SIT}" dev "${SIT_DEV_NAME}" \ - || fail 'failed to setup v6 SIT address' - ip -4 addr add "${V4_SIT}" dev "${SIT_DEV_NAME}" \ - || fail 'failed to setup v4 SIT address' - fi - - sleep 2 # avoid race causing bind to fail -} - -cleanup() { - if [[ -n "${V6_SIT}" ]]; then - ip -4 addr del "${V4_SIT}" dev "${SIT_DEV_NAME}" - ip -6 addr del "${V6_SIT}" dev "${SIT_DEV_NAME}" - ip link del "${SIT_DEV_NAME}" - fi - - ip -4 addr del "${V4_INNER}" dev lo - ip -6 addr del "${V6_INNER}" dev lo -} - -trap cleanup EXIT - -setup -"$@" -exit "$?" diff --git a/tools/testing/selftests/bpf/with_tunnels.sh b/tools/testing/selftests/bpf/with_tunnels.sh deleted file mode 100755 index e24949ed3a20..000000000000 --- a/tools/testing/selftests/bpf/with_tunnels.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash -# SPDX-License-Identifier: GPL-2.0 -# -# setup tunnels for flow dissection test - -readonly SUFFIX="test_$(mktemp -u XXXX)" -CONFIG="remote 127.0.0.2 local 127.0.0.1 dev lo" - -setup() { - ip link add "ipip_${SUFFIX}" type ipip ${CONFIG} - ip link add "gre_${SUFFIX}" type gre ${CONFIG} - ip link add "sit_${SUFFIX}" type sit ${CONFIG} - - echo "tunnels before test:" - ip tunnel show - - ip link set "ipip_${SUFFIX}" up - ip link set "gre_${SUFFIX}" up - ip link set "sit_${SUFFIX}" up -} - - -cleanup() { - ip tunnel del "ipip_${SUFFIX}" - ip tunnel del "gre_${SUFFIX}" - ip tunnel del "sit_${SUFFIX}" - - echo "tunnels after test:" - ip tunnel show -} - -trap cleanup EXIT - -setup -"$@" -exit "$?" diff --git a/tools/testing/selftests/bpf/xdp_hw_metadata.c b/tools/testing/selftests/bpf/xdp_hw_metadata.c index 6f9956eed797..3d8de0d4c96a 100644 --- a/tools/testing/selftests/bpf/xdp_hw_metadata.c +++ b/tools/testing/selftests/bpf/xdp_hw_metadata.c @@ -13,6 +13,7 @@ * - UDP 9091 packets trigger TX reply * - TX HW timestamp is requested and reported back upon completion * - TX checksum is requested + * - TX launch time HW offload is requested for transmission */ #include <test_progs.h> @@ -27,7 +28,7 @@ #include <linux/errqueue.h> #include <linux/if_link.h> #include <linux/net_tstamp.h> -#include <linux/udp.h> +#include <netinet/udp.h> #include <linux/sockios.h> #include <linux/if_xdp.h> #include <sys/mman.h> @@ -37,6 +38,15 @@ #include <time.h> #include <unistd.h> #include <libgen.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/ioctl.h> +#include <linux/pkt_sched.h> +#include <linux/pkt_cls.h> +#include <linux/ethtool.h> +#include <sys/socket.h> +#include <arpa/inet.h> #include "xdp_metadata.h" @@ -64,6 +74,18 @@ int rxq; bool skip_tx; __u64 last_hw_rx_timestamp; __u64 last_xdp_rx_timestamp; +__u64 last_launch_time; +__u64 launch_time_delta_to_hw_rx_timestamp; +int launch_time_queue; + +#define run_command(cmd, ...) \ +({ \ + char command[1024]; \ + memset(command, 0, sizeof(command)); \ + snprintf(command, sizeof(command), cmd, ##__VA_ARGS__); \ + fprintf(stderr, "Running: %s\n", command); \ + system(command); \ +}) void test__fail(void) { /* for network_helpers.c */ } @@ -79,7 +101,7 @@ static int open_xsk(int ifindex, struct xsk *xsk, __u32 queue_id) .fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS, .comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS, .frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE, - .flags = XSK_UMEM__DEFAULT_FLAGS, + .flags = XDP_UMEM_TX_METADATA_LEN, .tx_metadata_len = sizeof(struct xsk_tx_metadata), }; __u32 idx = 0; @@ -298,6 +320,12 @@ static bool complete_tx(struct xsk *xsk, clockid_t clock_id) if (meta->completion.tx_timestamp) { __u64 ref_tstamp = gettime(clock_id); + if (launch_time_delta_to_hw_rx_timestamp) { + print_tstamp_delta("HW Launch-time", + "HW TX-complete-time", + last_launch_time, + meta->completion.tx_timestamp); + } print_tstamp_delta("HW TX-complete-time", "User TX-complete-time", meta->completion.tx_timestamp, ref_tstamp); print_tstamp_delta("XDP RX-time", "User TX-complete-time", @@ -395,6 +423,17 @@ static void ping_pong(struct xsk *xsk, void *rx_packet, clockid_t clock_id) xsk, ntohs(udph->check), ntohs(want_csum), meta->request.csum_start, meta->request.csum_offset); + /* Set the value of launch time */ + if (launch_time_delta_to_hw_rx_timestamp) { + meta->flags |= XDP_TXMD_FLAGS_LAUNCH_TIME; + meta->request.launch_time = last_hw_rx_timestamp + + launch_time_delta_to_hw_rx_timestamp; + last_launch_time = meta->request.launch_time; + print_tstamp_delta("HW RX-time", "HW Launch-time", + last_hw_rx_timestamp, + meta->request.launch_time); + } + memcpy(data, rx_packet, len); /* don't share umem chunk for simplicity */ tx_desc->options |= XDP_TX_METADATA; tx_desc->len = len; @@ -407,6 +446,7 @@ static int verify_metadata(struct xsk *rx_xsk, int rxq, int server_fd, clockid_t const struct xdp_desc *rx_desc; struct pollfd fds[rxq + 1]; __u64 comp_addr; + __u64 deadline; __u64 addr; __u32 idx = 0; int ret; @@ -477,9 +517,15 @@ peek: if (ret) printf("kick_tx ret=%d\n", ret); - for (int j = 0; j < 500; j++) { + /* wait 1 second + cover launch time */ + deadline = gettime(clock_id) + + NANOSEC_PER_SEC + + launch_time_delta_to_hw_rx_timestamp; + while (true) { if (complete_tx(xsk, clock_id)) break; + if (gettime(clock_id) >= deadline) + break; usleep(10); } } @@ -551,6 +597,7 @@ static void hwtstamp_enable(const char *ifname) { struct hwtstamp_config cfg = { .rx_filter = HWTSTAMP_FILTER_ALL, + .tx_type = HWTSTAMP_TX_ON, }; hwtstamp_ioctl(SIOCGHWTSTAMP, ifname, &saved_hwtstamp_cfg); @@ -607,6 +654,10 @@ static void print_usage(void) " -h Display this help and exit\n\n" " -m Enable multi-buffer XDP for larger MTU\n" " -r Don't generate AF_XDP reply (rx metadata only)\n" + " -l Delta of launch time relative to HW RX-time in ns\n" + " default: 0 ns (launch time request is disabled)\n" + " -L Tx Queue to be enabled with launch time offload\n" + " default: 0 (Tx Queue 0)\n" "Generate test packets on the other machine with:\n" " echo -n xdp | nc -u -q1 <dst_ip> 9091\n"; @@ -617,7 +668,7 @@ static void read_args(int argc, char *argv[]) { int opt; - while ((opt = getopt(argc, argv, "chmr")) != -1) { + while ((opt = getopt(argc, argv, "chmrl:L:")) != -1) { switch (opt) { case 'c': bind_flags &= ~XDP_USE_NEED_WAKEUP; @@ -633,6 +684,12 @@ static void read_args(int argc, char *argv[]) case 'r': skip_tx = true; break; + case 'l': + launch_time_delta_to_hw_rx_timestamp = atoll(optarg); + break; + case 'L': + launch_time_queue = atoll(optarg); + break; case '?': if (isprint(optopt)) fprintf(stderr, "Unknown option: -%c\n", optopt); @@ -656,23 +713,118 @@ static void read_args(int argc, char *argv[]) error(-1, errno, "Invalid interface name"); } +void clean_existing_configurations(void) +{ + /* Check and delete root qdisc if exists */ + if (run_command("sudo tc qdisc show dev %s | grep -q 'qdisc mqprio 8001:'", ifname) == 0) + run_command("sudo tc qdisc del dev %s root", ifname); + + /* Check and delete ingress qdisc if exists */ + if (run_command("sudo tc qdisc show dev %s | grep -q 'qdisc ingress ffff:'", ifname) == 0) + run_command("sudo tc qdisc del dev %s ingress", ifname); + + /* Check and delete ethtool filters if any exist */ + if (run_command("sudo ethtool -n %s | grep -q 'Filter:'", ifname) == 0) { + run_command("sudo ethtool -n %s | grep 'Filter:' | awk '{print $2}' | xargs -n1 sudo ethtool -N %s delete >&2", + ifname, ifname); + } +} + +#define MAX_TC 16 + int main(int argc, char *argv[]) { clockid_t clock_id = CLOCK_TAI; + struct bpf_program *prog; int server_fd = -1; + size_t map_len = 0; + size_t que_len = 0; + char *buf = NULL; + char *map = NULL; + char *que = NULL; + char *tmp = NULL; + int tc = 0; int ret; int i; - struct bpf_program *prog; - read_args(argc, argv); rxq = rxq_num(ifname); - printf("rxq: %d\n", rxq); + if (launch_time_queue >= rxq || launch_time_queue < 0) + error(1, 0, "Invalid launch_time_queue."); + + clean_existing_configurations(); + sleep(1); + + /* Enable tx and rx hardware timestamping */ hwtstamp_enable(ifname); + /* Prepare priority to traffic class map for tc-mqprio */ + for (i = 0; i < MAX_TC; i++) { + if (i < rxq) + tc = i; + + if (asprintf(&buf, "%d ", tc) == -1) { + printf("Failed to malloc buf for tc map.\n"); + goto free_mem; + } + + map_len += strlen(buf); + tmp = realloc(map, map_len + 1); + if (!tmp) { + printf("Failed to realloc tc map.\n"); + goto free_mem; + } + map = tmp; + strcat(map, buf); + free(buf); + buf = NULL; + } + + /* Prepare traffic class to hardware queue map for tc-mqprio */ + for (i = 0; i <= tc; i++) { + if (asprintf(&buf, "1@%d ", i) == -1) { + printf("Failed to malloc buf for tc queues.\n"); + goto free_mem; + } + + que_len += strlen(buf); + tmp = realloc(que, que_len + 1); + if (!tmp) { + printf("Failed to realloc tc queues.\n"); + goto free_mem; + } + que = tmp; + strcat(que, buf); + free(buf); + buf = NULL; + } + + /* Add mqprio qdisc */ + run_command("sudo tc qdisc add dev %s handle 8001: parent root mqprio num_tc %d map %squeues %shw 0", + ifname, tc + 1, map, que); + + /* To test launch time, send UDP packet with VLAN priority 1 to port 9091 */ + if (launch_time_delta_to_hw_rx_timestamp) { + /* Enable launch time hardware offload on launch_time_queue */ + run_command("sudo tc qdisc replace dev %s parent 8001:%d etf offload clockid CLOCK_TAI delta 500000", + ifname, launch_time_queue + 1); + sleep(1); + + /* Route incoming packet with VLAN priority 1 into launch_time_queue */ + if (run_command("sudo ethtool -N %s flow-type ether vlan 0x2000 vlan-mask 0x1FFF action %d", + ifname, launch_time_queue)) { + run_command("sudo tc qdisc add dev %s ingress", ifname); + run_command("sudo tc filter add dev %s parent ffff: protocol 802.1Q flower vlan_prio 1 hw_tc %d", + ifname, launch_time_queue); + } + + /* Enable VLAN tag stripping offload */ + run_command("sudo ethtool -K %s rxvlan on", ifname); + } + rx_xsk = malloc(sizeof(struct xsk) * rxq); if (!rx_xsk) error(1, ENOMEM, "malloc"); @@ -732,4 +884,11 @@ int main(int argc, char *argv[]) cleanup(); if (ret) error(1, -ret, "verify_metadata"); + + clean_existing_configurations(); + +free_mem: + free(buf); + free(map); + free(que); } diff --git a/tools/testing/selftests/bpf/xdp_redirect_multi.c b/tools/testing/selftests/bpf/xdp_redirect_multi.c deleted file mode 100644 index c1fc44c87c30..000000000000 --- a/tools/testing/selftests/bpf/xdp_redirect_multi.c +++ /dev/null @@ -1,226 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include <linux/bpf.h> -#include <linux/if_link.h> -#include <assert.h> -#include <errno.h> -#include <signal.h> -#include <stdio.h> -#include <stdlib.h> -#include <string.h> -#include <net/if.h> -#include <unistd.h> -#include <libgen.h> -#include <sys/ioctl.h> -#include <sys/types.h> -#include <sys/socket.h> -#include <netinet/in.h> - -#include "bpf_util.h" -#include <bpf/bpf.h> -#include <bpf/libbpf.h> - -#define MAX_IFACE_NUM 32 -#define MAX_INDEX_NUM 1024 - -static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST; -static int ifaces[MAX_IFACE_NUM] = {}; - -static void int_exit(int sig) -{ - __u32 prog_id = 0; - int i; - - for (i = 0; ifaces[i] > 0; i++) { - if (bpf_xdp_query_id(ifaces[i], xdp_flags, &prog_id)) { - printf("bpf_xdp_query_id failed\n"); - exit(1); - } - if (prog_id) - bpf_xdp_detach(ifaces[i], xdp_flags, NULL); - } - - exit(0); -} - -static int get_mac_addr(unsigned int ifindex, void *mac_addr) -{ - char ifname[IF_NAMESIZE]; - struct ifreq ifr; - int fd, ret = -1; - - fd = socket(AF_INET, SOCK_DGRAM, 0); - if (fd < 0) - return ret; - - if (!if_indextoname(ifindex, ifname)) - goto err_out; - - strcpy(ifr.ifr_name, ifname); - - if (ioctl(fd, SIOCGIFHWADDR, &ifr) != 0) - goto err_out; - - memcpy(mac_addr, ifr.ifr_hwaddr.sa_data, 6 * sizeof(char)); - ret = 0; - -err_out: - close(fd); - return ret; -} - -static void usage(const char *prog) -{ - fprintf(stderr, - "usage: %s [OPTS] <IFNAME|IFINDEX> <IFNAME|IFINDEX> ...\n" - "OPTS:\n" - " -S use skb-mode\n" - " -N enforce native mode\n" - " -F force loading prog\n" - " -X load xdp program on egress\n", - prog); -} - -int main(int argc, char **argv) -{ - int prog_fd, group_all, mac_map; - struct bpf_program *ingress_prog, *egress_prog; - int i, err, ret, opt, egress_prog_fd = 0; - struct bpf_devmap_val devmap_val; - bool attach_egress_prog = false; - unsigned char mac_addr[6]; - char ifname[IF_NAMESIZE]; - struct bpf_object *obj; - unsigned int ifindex; - char filename[256]; - - while ((opt = getopt(argc, argv, "SNFX")) != -1) { - switch (opt) { - case 'S': - xdp_flags |= XDP_FLAGS_SKB_MODE; - break; - case 'N': - /* default, set below */ - break; - case 'F': - xdp_flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST; - break; - case 'X': - attach_egress_prog = true; - break; - default: - usage(basename(argv[0])); - return 1; - } - } - - if (!(xdp_flags & XDP_FLAGS_SKB_MODE)) { - xdp_flags |= XDP_FLAGS_DRV_MODE; - } else if (attach_egress_prog) { - printf("Load xdp program on egress with SKB mode not supported yet\n"); - goto err_out; - } - - if (optind == argc) { - printf("usage: %s <IFNAME|IFINDEX> <IFNAME|IFINDEX> ...\n", argv[0]); - goto err_out; - } - - printf("Get interfaces:"); - for (i = 0; i < MAX_IFACE_NUM && argv[optind + i]; i++) { - ifaces[i] = if_nametoindex(argv[optind + i]); - if (!ifaces[i]) - ifaces[i] = strtoul(argv[optind + i], NULL, 0); - if (!if_indextoname(ifaces[i], ifname)) { - perror("Invalid interface name or i"); - goto err_out; - } - if (ifaces[i] > MAX_INDEX_NUM) { - printf(" interface index too large\n"); - goto err_out; - } - printf(" %d", ifaces[i]); - } - printf("\n"); - - snprintf(filename, sizeof(filename), "%s_kern.bpf.o", argv[0]); - obj = bpf_object__open_file(filename, NULL); - err = libbpf_get_error(obj); - if (err) - goto err_out; - err = bpf_object__load(obj); - if (err) - goto err_out; - prog_fd = bpf_program__fd(bpf_object__next_program(obj, NULL)); - - if (attach_egress_prog) - group_all = bpf_object__find_map_fd_by_name(obj, "map_egress"); - else - group_all = bpf_object__find_map_fd_by_name(obj, "map_all"); - mac_map = bpf_object__find_map_fd_by_name(obj, "mac_map"); - - if (group_all < 0 || mac_map < 0) { - printf("bpf_object__find_map_fd_by_name failed\n"); - goto err_out; - } - - if (attach_egress_prog) { - /* Find ingress/egress prog for 2nd xdp prog */ - ingress_prog = bpf_object__find_program_by_name(obj, "xdp_redirect_map_all_prog"); - egress_prog = bpf_object__find_program_by_name(obj, "xdp_devmap_prog"); - if (!ingress_prog || !egress_prog) { - printf("finding ingress/egress_prog in obj file failed\n"); - goto err_out; - } - prog_fd = bpf_program__fd(ingress_prog); - egress_prog_fd = bpf_program__fd(egress_prog); - if (prog_fd < 0 || egress_prog_fd < 0) { - printf("find egress_prog fd failed\n"); - goto err_out; - } - } - - signal(SIGINT, int_exit); - signal(SIGTERM, int_exit); - - /* Init forward multicast groups and exclude group */ - for (i = 0; ifaces[i] > 0; i++) { - ifindex = ifaces[i]; - - if (attach_egress_prog) { - ret = get_mac_addr(ifindex, mac_addr); - if (ret < 0) { - printf("get interface %d mac failed\n", ifindex); - goto err_out; - } - ret = bpf_map_update_elem(mac_map, &ifindex, mac_addr, 0); - if (ret) { - perror("bpf_update_elem mac_map failed\n"); - goto err_out; - } - } - - /* Add all the interfaces to group all */ - devmap_val.ifindex = ifindex; - devmap_val.bpf_prog.fd = egress_prog_fd; - ret = bpf_map_update_elem(group_all, &ifindex, &devmap_val, 0); - if (ret) { - perror("bpf_map_update_elem"); - goto err_out; - } - - /* bind prog_fd to each interface */ - ret = bpf_xdp_attach(ifindex, prog_fd, xdp_flags, NULL); - if (ret) { - printf("Set xdp fd failed on %d\n", ifindex); - goto err_out; - } - } - - /* sleep some time for testing */ - sleep(999); - - return 0; - -err_out: - return 1; -} diff --git a/tools/testing/selftests/bpf/xsk_xdp_common.h b/tools/testing/selftests/bpf/xsk_xdp_common.h index 5a6f36f07383..45810ff552da 100644 --- a/tools/testing/selftests/bpf/xsk_xdp_common.h +++ b/tools/testing/selftests/bpf/xsk_xdp_common.h @@ -4,6 +4,7 @@ #define XSK_XDP_COMMON_H_ #define MAX_SOCKETS 2 +#define PKT_HDR_ALIGN (sizeof(struct ethhdr) + 2) /* Just to align the data in the packet */ struct xdp_info { __u64 count; diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c index 11f047b8af75..a29de0713f19 100644 --- a/tools/testing/selftests/bpf/xskxceiver.c +++ b/tools/testing/selftests/bpf/xskxceiver.c @@ -109,6 +109,8 @@ #include <network_helpers.h> +#define MAX_TX_BUDGET_DEFAULT 32 + static bool opt_verbose; static bool opt_print_tests; static enum test_mode opt_mode = TEST_MODE_ALL; @@ -524,6 +526,8 @@ static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx, test->nb_sockets = 1; test->fail = false; test->set_ring = false; + test->adjust_tail = false; + test->adjust_tail_support = false; test->mtu = MAX_ETH_PKT_SIZE; test->xdp_prog_rx = ifobj_rx->xdp_progs->progs.xsk_def_prog; test->xskmap_rx = ifobj_rx->xdp_progs->maps.xsk; @@ -757,14 +761,15 @@ static struct pkt_stream *pkt_stream_clone(struct pkt_stream *pkt_stream) return pkt_stream_generate(pkt_stream->nb_pkts, pkt_stream->pkts[0].len); } -static void pkt_stream_replace(struct test_spec *test, u32 nb_pkts, u32 pkt_len) +static void pkt_stream_replace_ifobject(struct ifobject *ifobj, u32 nb_pkts, u32 pkt_len) { - struct pkt_stream *pkt_stream; + ifobj->xsk->pkt_stream = pkt_stream_generate(nb_pkts, pkt_len); +} - pkt_stream = pkt_stream_generate(nb_pkts, pkt_len); - test->ifobj_tx->xsk->pkt_stream = pkt_stream; - pkt_stream = pkt_stream_generate(nb_pkts, pkt_len); - test->ifobj_rx->xsk->pkt_stream = pkt_stream; +static void pkt_stream_replace(struct test_spec *test, u32 nb_pkts, u32 pkt_len) +{ + pkt_stream_replace_ifobject(test->ifobj_tx, nb_pkts, pkt_len); + pkt_stream_replace_ifobject(test->ifobj_rx, nb_pkts, pkt_len); } static void __pkt_stream_replace_half(struct ifobject *ifobj, u32 pkt_len, @@ -991,6 +996,31 @@ static bool is_metadata_correct(struct pkt *pkt, void *buffer, u64 addr) return true; } +static bool is_adjust_tail_supported(struct xsk_xdp_progs *skel_rx) +{ + struct bpf_map *data_map; + int adjust_value = 0; + int key = 0; + int ret; + + data_map = bpf_object__find_map_by_name(skel_rx->obj, "xsk_xdp_.bss"); + if (!data_map || !bpf_map__is_internal(data_map)) { + ksft_print_msg("Error: could not find bss section of XDP program\n"); + exit_with_error(errno); + } + + ret = bpf_map_lookup_elem(bpf_map__fd(data_map), &key, &adjust_value); + if (ret) { + ksft_print_msg("Error: bpf_map_lookup_elem failed with error %d\n", ret); + exit_with_error(errno); + } + + /* Set the 'adjust_value' variable to -EOPNOTSUPP in the XDP program if the adjust_tail + * helper is not supported. Skip the adjust_tail test case in this scenario. + */ + return adjust_value != -EOPNOTSUPP; +} + static bool is_frag_valid(struct xsk_umem_info *umem, u64 addr, u32 len, u32 expected_pkt_nb, u32 bytes_processed) { @@ -1063,11 +1093,45 @@ static bool is_pkt_valid(struct pkt *pkt, void *buffer, u64 addr, u32 len) return true; } +static u32 load_value(u32 *counter) +{ + return __atomic_load_n(counter, __ATOMIC_ACQUIRE); +} + +static bool kick_tx_with_check(struct xsk_socket_info *xsk, int *ret) +{ + u32 max_budget = MAX_TX_BUDGET_DEFAULT; + u32 cons, ready_to_send; + int delta; + + cons = load_value(xsk->tx.consumer); + ready_to_send = load_value(xsk->tx.producer) - cons; + *ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0); + + delta = load_value(xsk->tx.consumer) - cons; + /* By default, xsk should consume exact @max_budget descs at one + * send in this case where hitting the max budget limit in while + * loop is triggered in __xsk_generic_xmit(). Please make sure that + * the number of descs to be sent is larger than @max_budget, or + * else the tx.consumer will be updated in xskq_cons_peek_desc() + * in time which hides the issue we try to verify. + */ + if (ready_to_send > max_budget && delta != max_budget) + return false; + + return true; +} + static int kick_tx(struct xsk_socket_info *xsk) { int ret; - ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0); + if (xsk->check_consumer) { + if (!kick_tx_with_check(xsk, &ret)) + return TEST_FAILURE; + } else { + ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0); + } if (ret >= 0) return TEST_PASS; if (errno == ENOBUFS || errno == EAGAIN || errno == EBUSY || errno == ENETDOWN) { @@ -1767,8 +1831,13 @@ static void *worker_testapp_validate_rx(void *arg) if (!err && ifobject->validation_func) err = ifobject->validation_func(ifobject); - if (err) - report_failure(test); + + if (err) { + if (test->adjust_tail && !is_adjust_tail_supported(ifobject->xdp_progs)) + test->adjust_tail_support = false; + else + report_failure(test); + } pthread_exit(NULL); } @@ -2515,6 +2584,88 @@ static int testapp_hw_sw_max_ring_size(struct test_spec *test) return testapp_validate_traffic(test); } +static int testapp_xdp_adjust_tail(struct test_spec *test, int adjust_value) +{ + struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs; + struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs; + + test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_adjust_tail, + skel_tx->progs.xsk_xdp_adjust_tail, + skel_rx->maps.xsk, skel_tx->maps.xsk); + + skel_rx->bss->adjust_value = adjust_value; + + return testapp_validate_traffic(test); +} + +static int testapp_adjust_tail(struct test_spec *test, u32 value, u32 pkt_len) +{ + int ret; + + test->adjust_tail_support = true; + test->adjust_tail = true; + test->total_steps = 1; + + pkt_stream_replace_ifobject(test->ifobj_tx, DEFAULT_BATCH_SIZE, pkt_len); + pkt_stream_replace_ifobject(test->ifobj_rx, DEFAULT_BATCH_SIZE, pkt_len + value); + + ret = testapp_xdp_adjust_tail(test, value); + if (ret) + return ret; + + if (!test->adjust_tail_support) { + ksft_test_result_skip("%s %sResize pkt with bpf_xdp_adjust_tail() not supported\n", + mode_string(test), busy_poll_string(test)); + return TEST_SKIP; + } + + return 0; +} + +static int testapp_adjust_tail_shrink(struct test_spec *test) +{ + /* Shrink by 4 bytes for testing purpose */ + return testapp_adjust_tail(test, -4, MIN_PKT_SIZE * 2); +} + +static int testapp_adjust_tail_shrink_mb(struct test_spec *test) +{ + test->mtu = MAX_ETH_JUMBO_SIZE; + /* Shrink by the frag size */ + return testapp_adjust_tail(test, -XSK_UMEM__MAX_FRAME_SIZE, XSK_UMEM__LARGE_FRAME_SIZE * 2); +} + +static int testapp_adjust_tail_grow(struct test_spec *test) +{ + /* Grow by 4 bytes for testing purpose */ + return testapp_adjust_tail(test, 4, MIN_PKT_SIZE * 2); +} + +static int testapp_adjust_tail_grow_mb(struct test_spec *test) +{ + test->mtu = MAX_ETH_JUMBO_SIZE; + /* Grow by (frag_size - last_frag_Size) - 1 to stay inside the last fragment */ + return testapp_adjust_tail(test, (XSK_UMEM__MAX_FRAME_SIZE / 2) - 1, + XSK_UMEM__LARGE_FRAME_SIZE * 2); +} + +static int testapp_tx_queue_consumer(struct test_spec *test) +{ + int nr_packets; + + if (test->mode == TEST_MODE_ZC) { + ksft_test_result_skip("Can not run TX_QUEUE_CONSUMER test for ZC mode\n"); + return TEST_SKIP; + } + + nr_packets = MAX_TX_BUDGET_DEFAULT + 1; + pkt_stream_replace(test, nr_packets, MIN_PKT_SIZE); + test->ifobj_tx->xsk->batch_size = nr_packets; + test->ifobj_tx->xsk->check_consumer = true; + + return testapp_validate_traffic(test); +} + static void run_pkt_test(struct test_spec *test) { int ret; @@ -2621,6 +2772,11 @@ static const struct test_spec tests[] = { {.name = "TOO_MANY_FRAGS", .test_func = testapp_too_many_frags}, {.name = "HW_SW_MIN_RING_SIZE", .test_func = testapp_hw_sw_min_ring_size}, {.name = "HW_SW_MAX_RING_SIZE", .test_func = testapp_hw_sw_max_ring_size}, + {.name = "XDP_ADJUST_TAIL_SHRINK", .test_func = testapp_adjust_tail_shrink}, + {.name = "XDP_ADJUST_TAIL_SHRINK_MULTI_BUFF", .test_func = testapp_adjust_tail_shrink_mb}, + {.name = "XDP_ADJUST_TAIL_GROW", .test_func = testapp_adjust_tail_grow}, + {.name = "XDP_ADJUST_TAIL_GROW_MULTI_BUFF", .test_func = testapp_adjust_tail_grow_mb}, + {.name = "TX_QUEUE_CONSUMER", .test_func = testapp_tx_queue_consumer}, }; static void print_tests(void) diff --git a/tools/testing/selftests/bpf/xskxceiver.h b/tools/testing/selftests/bpf/xskxceiver.h index e46e823f6a1a..4df3a5d329ac 100644 --- a/tools/testing/selftests/bpf/xskxceiver.h +++ b/tools/testing/selftests/bpf/xskxceiver.h @@ -95,6 +95,7 @@ struct xsk_socket_info { u32 batch_size; u8 dst_mac[ETH_ALEN]; u8 src_mac[ETH_ALEN]; + bool check_consumer; }; struct pkt { @@ -173,6 +174,8 @@ struct test_spec { u16 nb_sockets; bool fail; bool set_ring; + bool adjust_tail; + bool adjust_tail_support; enum test_mode mode; char name[MAX_TEST_NAME_SIZE]; }; |