diff options
Diffstat (limited to 'tools/testing/selftests/bpf')
172 files changed, 4607 insertions, 1665 deletions
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore index 1ff0a9f49c01..ec464859c6b6 100644 --- a/tools/testing/selftests/bpf/.gitignore +++ b/tools/testing/selftests/bpf/.gitignore @@ -21,23 +21,20 @@ test_lirc_mode2_user get_cgroup_id_user test_skb_cgroup_id_user test_socket_cookie -test_cgroup_attach test_cgroup_storage -test_select_reuseport test_flow_dissector flow_dissector_load test_netcnt -test_section_names test_tcpnotify_user test_libbpf test_tcp_check_syncookie_user test_sysctl -libbpf.pc -libbpf.so.* test_hashmap test_btf_dump xdping test_cpp +*.skel.h /no_alu32 /bpf_gcc -bpf_helper_defs.h +/tools + diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index e2fd6f8d579c..257a1aaaa37d 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -3,10 +3,12 @@ include ../../../../scripts/Kbuild.include include ../../../scripts/Makefile.arch CURDIR := $(abspath .) -LIBDIR := $(abspath ../../../lib) +TOOLSDIR := $(abspath ../../..) +LIBDIR := $(TOOLSDIR)/lib BPFDIR := $(LIBDIR)/bpf -TOOLSDIR := $(abspath ../../../include) -APIDIR := $(TOOLSDIR)/uapi +TOOLSINCDIR := $(TOOLSDIR)/include +BPFTOOLDIR := $(TOOLSDIR)/bpf/bpftool +APIDIR := $(TOOLSINCDIR)/uapi GENDIR := $(abspath ../../../../include/generated) GENHDR := $(GENDIR)/autoconf.h @@ -18,19 +20,19 @@ CLANG ?= clang LLC ?= llc LLVM_OBJCOPY ?= llvm-objcopy BPF_GCC ?= $(shell command -v bpf-gcc;) -CFLAGS += -g -Wall -O2 $(GENFLAGS) -I$(APIDIR) -I$(LIBDIR) -I$(BPFDIR) \ - -I$(GENDIR) -I$(TOOLSDIR) -I$(CURDIR) \ +CFLAGS += -g -Wall -O2 $(GENFLAGS) -I$(CURDIR) -I$(APIDIR) \ + -I$(INCLUDE_DIR) -I$(GENDIR) -I$(LIBDIR) -I$(TOOLSINCDIR) \ -Dbpf_prog_load=bpf_prog_test_load \ -Dbpf_load_program=bpf_test_load_program -LDLIBS += -lcap -lelf -lrt -lpthread +LDLIBS += -lcap -lelf -lz -lrt -lpthread # Order correspond to 'make run_tests' order TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \ test_align test_verifier_log test_dev_cgroup test_tcpbpf_user \ test_sock test_btf test_sockmap get_cgroup_id_user test_socket_cookie \ - test_cgroup_storage test_select_reuseport \ + test_cgroup_storage \ test_netcnt test_tcpnotify_user test_sock_fields test_sysctl test_hashmap \ - test_cgroup_attach test_progs-no_alu32 + test_progs-no_alu32 # Also test bpf-gcc, if present ifneq ($(BPF_GCC),) @@ -71,12 +73,39 @@ TEST_PROGS_EXTENDED := with_addr.sh \ # Compile but not part of 'make run_tests' TEST_GEN_PROGS_EXTENDED = test_sock_addr test_skb_cgroup_id_user \ flow_dissector_load test_flow_dissector test_tcp_check_syncookie_user \ - test_lirc_mode2_user xdping test_cpp + test_lirc_mode2_user xdping test_cpp runqslower TEST_CUSTOM_PROGS = urandom_read +# Emit succinct information message describing current building step +# $1 - generic step name (e.g., CC, LINK, etc); +# $2 - optional "flavor" specifier; if provided, will be emitted as [flavor]; +# $3 - target (assumed to be file); only file name will be emitted; +# $4 - optional extra arg, emitted as-is, if provided. +ifeq ($(V),1) +Q = +msg = +else +Q = @ +msg = @printf ' %-8s%s %s%s\n' "$(1)" "$(if $(2), [$(2)])" "$(notdir $(3))" "$(if $(4), $(4))"; +MAKEFLAGS += --no-print-directory +submake_extras := feature_display=0 +endif + +# override lib.mk's default rules +OVERRIDE_TARGETS := 1 +override define CLEAN + $(call msg,CLEAN) + $(RM) -r $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(EXTRA_CLEAN) +endef + include ../lib.mk +SCRATCH_DIR := $(OUTPUT)/tools +BUILD_DIR := $(SCRATCH_DIR)/build +INCLUDE_DIR := $(SCRATCH_DIR)/include +BPFOBJ := $(BUILD_DIR)/libbpf/libbpf.a + # Define simple and short `make test_progs`, `make test_sysctl`, etc targets # to build individual tests. # NOTE: Semicolon at the end is critical to override lib.mk's default static @@ -87,13 +116,26 @@ $(notdir $(TEST_GEN_PROGS) \ $(TEST_GEN_PROGS_EXTENDED) \ $(TEST_CUSTOM_PROGS)): %: $(OUTPUT)/% ; +$(OUTPUT)/%:%.c + $(call msg,BINARY,,$@) + $(LINK.c) $^ $(LDLIBS) -o $@ + $(OUTPUT)/urandom_read: urandom_read.c - $(CC) -o $@ $< -Wl,--build-id + $(call msg,BINARY,,$@) + $(CC) $(LDFLAGS) -o $@ $< $(LDLIBS) -Wl,--build-id -$(OUTPUT)/test_stub.o: test_stub.c +$(OUTPUT)/test_stub.o: test_stub.c $(BPFOBJ) + $(call msg,CC,,$@) $(CC) -c $(CFLAGS) -o $@ $< -BPFOBJ := $(OUTPUT)/libbpf.a +VMLINUX_BTF_PATHS := $(abspath ../../../../vmlinux) \ + /sys/kernel/btf/vmlinux \ + /boot/vmlinux-$(shell uname -r) +VMLINUX_BTF:= $(firstword $(wildcard $(VMLINUX_BTF_PATHS))) +$(OUTPUT)/runqslower: $(BPFOBJ) + $(Q)$(MAKE) $(submake_extras) -C $(TOOLSDIR)/bpf/runqslower \ + OUTPUT=$(SCRATCH_DIR)/ VMLINUX_BTF=$(VMLINUX_BTF) \ + BPFOBJ=$(BPFOBJ) BPF_INCLUDE=$(INCLUDE_DIR) $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): $(OUTPUT)/test_stub.o $(BPFOBJ) @@ -110,19 +152,24 @@ $(OUTPUT)/test_cgroup_storage: cgroup_helpers.c $(OUTPUT)/test_netcnt: cgroup_helpers.c $(OUTPUT)/test_sock_fields: cgroup_helpers.c $(OUTPUT)/test_sysctl: cgroup_helpers.c -$(OUTPUT)/test_cgroup_attach: cgroup_helpers.c - -.PHONY: force -# force a rebuild of BPFOBJ when its dependencies are updated -force: +DEFAULT_BPFTOOL := $(SCRATCH_DIR)/sbin/bpftool +BPFTOOL ?= $(DEFAULT_BPFTOOL) +$(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \ + $(BPFOBJ) | $(BUILD_DIR)/bpftool + $(Q)$(MAKE) $(submake_extras) -C $(BPFTOOLDIR) \ + OUTPUT=$(BUILD_DIR)/bpftool/ \ + prefix= DESTDIR=$(SCRATCH_DIR)/ install -$(BPFOBJ): force - $(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/ +$(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \ + ../../../include/uapi/linux/bpf.h \ + | $(INCLUDE_DIR) $(BUILD_DIR)/libbpf + $(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) OUTPUT=$(BUILD_DIR)/libbpf/ \ + DESTDIR=$(SCRATCH_DIR) prefix= all install_headers -BPF_HELPERS := $(OUTPUT)/bpf_helper_defs.h $(wildcard $(BPFDIR)/bpf_*.h) -$(OUTPUT)/bpf_helper_defs.h: - $(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/ $(OUTPUT)/bpf_helper_defs.h +$(BUILD_DIR)/libbpf $(BUILD_DIR)/bpftool $(INCLUDE_DIR): + $(call msg,MKDIR,,$@) + mkdir -p $@ # Get Clang's default includes on this system, as opposed to those seen by # '-target bpf'. This fixes "missing" files on some architectures/distros, @@ -142,8 +189,8 @@ MENDIAN=$(if $(IS_LITTLE_ENDIAN),-mlittle-endian,-mbig-endian) CLANG_SYS_INCLUDES = $(call get_sys_includes,$(CLANG)) BPF_CFLAGS = -g -D__TARGET_ARCH_$(SRCARCH) $(MENDIAN) \ - -I. -I./include/uapi -I$(APIDIR) \ - -I$(BPFDIR) -I$(abspath $(OUTPUT)/../usr/include) + -I$(INCLUDE_DIR) -I$(CURDIR) -I$(CURDIR)/include/uapi \ + -I$(APIDIR) -I$(abspath $(OUTPUT)/../usr/include) CLANG_CFLAGS = $(CLANG_SYS_INCLUDES) \ -Wno-compare-distinct-pointer-types @@ -159,27 +206,33 @@ $(OUTPUT)/flow_dissector_load.o: flow_dissector_load.h # $3 - CFLAGS # $4 - LDFLAGS define CLANG_BPF_BUILD_RULE + $(call msg,CLNG-LLC,$(TRUNNER_BINARY),$2) ($(CLANG) $3 -O2 -target bpf -emit-llvm \ -c $1 -o - || echo "BPF obj compilation failed") | \ $(LLC) -mattr=dwarfris -march=bpf -mcpu=probe $4 -filetype=obj -o $2 endef # Similar to CLANG_BPF_BUILD_RULE, but with disabled alu32 define CLANG_NOALU32_BPF_BUILD_RULE + $(call msg,CLNG-LLC,$(TRUNNER_BINARY),$2) ($(CLANG) $3 -O2 -target bpf -emit-llvm \ -c $1 -o - || echo "BPF obj compilation failed") | \ $(LLC) -march=bpf -mcpu=v2 $4 -filetype=obj -o $2 endef # Similar to CLANG_BPF_BUILD_RULE, but using native Clang and bpf LLC define CLANG_NATIVE_BPF_BUILD_RULE + $(call msg,CLNG-BPF,$(TRUNNER_BINARY),$2) ($(CLANG) $3 -O2 -emit-llvm \ -c $1 -o - || echo "BPF obj compilation failed") | \ $(LLC) -march=bpf -mcpu=probe $4 -filetype=obj -o $2 endef # Build BPF object using GCC define GCC_BPF_BUILD_RULE + $(call msg,GCC-BPF,$(TRUNNER_BINARY),$2) $(BPF_GCC) $3 $4 -O2 -c $1 -o $2 endef +SKEL_BLACKLIST := btf__% test_pinning_invalid.c + # Set up extra TRUNNER_XXX "temporary" variables in the environment (relies on # $eval()) and pass control to DEFINE_TEST_RUNNER_RULES. # Parameters: @@ -195,8 +248,11 @@ TRUNNER_EXTRA_OBJS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.o, \ $$(filter %.c,$(TRUNNER_EXTRA_SOURCES))) TRUNNER_EXTRA_HDRS := $$(filter %.h,$(TRUNNER_EXTRA_SOURCES)) TRUNNER_TESTS_HDR := $(TRUNNER_TESTS_DIR)/tests.h -TRUNNER_BPF_OBJS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.o, \ - $$(notdir $$(wildcard $(TRUNNER_BPF_PROGS_DIR)/*.c))) +TRUNNER_BPF_SRCS := $$(notdir $$(wildcard $(TRUNNER_BPF_PROGS_DIR)/*.c)) +TRUNNER_BPF_OBJS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.o, $$(TRUNNER_BPF_SRCS)) +TRUNNER_BPF_SKELS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.skel.h, \ + $$(filter-out $(SKEL_BLACKLIST), \ + $$(TRUNNER_BPF_SRCS))) # Evaluate rules now with extra TRUNNER_XXX variables above already defined $$(eval $$(call DEFINE_TEST_RUNNER_RULES,$1,$2)) @@ -212,6 +268,7 @@ define DEFINE_TEST_RUNNER_RULES ifeq ($($(TRUNNER_OUTPUT)-dir),) $(TRUNNER_OUTPUT)-dir := y $(TRUNNER_OUTPUT): + $$(call msg,MKDIR,,$$@) mkdir -p $$@ endif @@ -222,16 +279,23 @@ $(TRUNNER_BPF_PROGS_DIR)$(if $2,-)$2-bpfobjs := y $(TRUNNER_BPF_OBJS): $(TRUNNER_OUTPUT)/%.o: \ $(TRUNNER_BPF_PROGS_DIR)/%.c \ $(TRUNNER_BPF_PROGS_DIR)/*.h \ - $$(BPF_HELPERS) | $(TRUNNER_OUTPUT) + $$(BPFOBJ) | $(TRUNNER_OUTPUT) $$(call $(TRUNNER_BPF_BUILD_RULE),$$<,$$@, \ $(TRUNNER_BPF_CFLAGS), \ $(TRUNNER_BPF_LDFLAGS)) + +$(TRUNNER_BPF_SKELS): $(TRUNNER_OUTPUT)/%.skel.h: \ + $(TRUNNER_OUTPUT)/%.o \ + | $(BPFTOOL) $(TRUNNER_OUTPUT) + $$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@) + $$(BPFTOOL) gen skeleton $$< > $$@ endif # ensure we set up tests.h header generation rule just once ifeq ($($(TRUNNER_TESTS_DIR)-tests-hdr),) $(TRUNNER_TESTS_DIR)-tests-hdr := y $(TRUNNER_TESTS_HDR): $(TRUNNER_TESTS_DIR)/*.c + $$(call msg,TEST-HDR,$(TRUNNER_BINARY),$$@) $$(shell ( cd $(TRUNNER_TESTS_DIR); \ echo '/* Generated header, do not edit */'; \ ls *.c 2> /dev/null | \ @@ -245,7 +309,9 @@ $(TRUNNER_TEST_OBJS): $(TRUNNER_OUTPUT)/%.test.o: \ $(TRUNNER_TESTS_DIR)/%.c \ $(TRUNNER_EXTRA_HDRS) \ $(TRUNNER_BPF_OBJS) \ + $(TRUNNER_BPF_SKELS) \ $$(BPFOBJ) | $(TRUNNER_OUTPUT) + $$(call msg,TEST-OBJ,$(TRUNNER_BINARY),$$@) cd $$(@D) && $$(CC) $$(CFLAGS) -c $(CURDIR)/$$< $$(LDLIBS) -o $$(@F) $(TRUNNER_EXTRA_OBJS): $(TRUNNER_OUTPUT)/%.o: \ @@ -253,17 +319,20 @@ $(TRUNNER_EXTRA_OBJS): $(TRUNNER_OUTPUT)/%.o: \ $(TRUNNER_EXTRA_HDRS) \ $(TRUNNER_TESTS_HDR) \ $$(BPFOBJ) | $(TRUNNER_OUTPUT) + $$(call msg,EXT-OBJ,$(TRUNNER_BINARY),$$@) $$(CC) $$(CFLAGS) -c $$< $$(LDLIBS) -o $$@ +# only copy extra resources if in flavored build $(TRUNNER_BINARY)-extras: $(TRUNNER_EXTRA_FILES) | $(TRUNNER_OUTPUT) ifneq ($2,) - # only copy extra resources if in flavored build + $$(call msg,EXT-COPY,$(TRUNNER_BINARY),$(TRUNNER_EXTRA_FILES)) cp -a $$^ $(TRUNNER_OUTPUT)/ endif $(OUTPUT)/$(TRUNNER_BINARY): $(TRUNNER_TEST_OBJS) \ $(TRUNNER_EXTRA_OBJS) $$(BPFOBJ) \ | $(TRUNNER_BINARY)-extras + $$(call msg,BINARY,,$$@) $$(CC) $$(CFLAGS) $$(filter %.a %.o,$$^) $$(LDLIBS) -o $$@ endef @@ -276,7 +345,7 @@ TRUNNER_EXTRA_SOURCES := test_progs.c cgroup_helpers.c trace_helpers.c \ TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read \ $(wildcard progs/btf_dump_test_case_*.c) TRUNNER_BPF_BUILD_RULE := CLANG_BPF_BUILD_RULE -TRUNNER_BPF_CFLAGS := -I. -I$(OUTPUT) $(BPF_CFLAGS) $(CLANG_CFLAGS) +TRUNNER_BPF_CFLAGS := $(BPF_CFLAGS) $(CLANG_CFLAGS) TRUNNER_BPF_LDFLAGS := -mattr=+alu32 $(eval $(call DEFINE_TEST_RUNNER,test_progs)) @@ -315,12 +384,15 @@ verifier/tests.h: verifier/*.c echo '#endif' \ ) > verifier/tests.h) $(OUTPUT)/test_verifier: test_verifier.c verifier/tests.h $(BPFOBJ) | $(OUTPUT) + $(call msg,BINARY,,$@) $(CC) $(CFLAGS) $(filter %.a %.o %.c,$^) $(LDLIBS) -o $@ # Make sure we are able to include and link libbpf against c++. -$(OUTPUT)/test_cpp: test_cpp.cpp $(BPFOBJ) +$(OUTPUT)/test_cpp: test_cpp.cpp $(OUTPUT)/test_core_extern.skel.h $(BPFOBJ) + $(call msg,CXX,,$@) $(CXX) $(CFLAGS) $^ $(LDLIBS) -o $@ -EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) \ +EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(SCRATCH_DIR) \ prog_tests/tests.h map_tests/tests.h verifier/tests.h \ - feature $(OUTPUT)/*.o $(OUTPUT)/no_alu32 $(OUTPUT)/bpf_gcc + feature \ + $(addprefix $(OUTPUT)/,*.o *.skel.h no_alu32 bpf_gcc) diff --git a/tools/testing/selftests/bpf/bpf_tcp_helpers.h b/tools/testing/selftests/bpf/bpf_tcp_helpers.h new file mode 100644 index 000000000000..8f21965ffc6c --- /dev/null +++ b/tools/testing/selftests/bpf/bpf_tcp_helpers.h @@ -0,0 +1,235 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __BPF_TCP_HELPERS_H +#define __BPF_TCP_HELPERS_H + +#include <stdbool.h> +#include <linux/types.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> +#include "bpf_trace_helpers.h" + +#define BPF_STRUCT_OPS(name, args...) \ +SEC("struct_ops/"#name) \ +BPF_PROG(name, args) + +#define tcp_jiffies32 ((__u32)bpf_jiffies64()) + +struct sock_common { + unsigned char skc_state; +} __attribute__((preserve_access_index)); + +enum sk_pacing { + SK_PACING_NONE = 0, + SK_PACING_NEEDED = 1, + SK_PACING_FQ = 2, +}; + +struct sock { + struct sock_common __sk_common; + unsigned long sk_pacing_rate; + __u32 sk_pacing_status; /* see enum sk_pacing */ +} __attribute__((preserve_access_index)); + +struct inet_sock { + struct sock sk; +} __attribute__((preserve_access_index)); + +struct inet_connection_sock { + struct inet_sock icsk_inet; + __u8 icsk_ca_state:6, + icsk_ca_setsockopt:1, + icsk_ca_dst_locked:1; + struct { + __u8 pending; + } icsk_ack; + __u64 icsk_ca_priv[104 / sizeof(__u64)]; +} __attribute__((preserve_access_index)); + +struct tcp_sock { + struct inet_connection_sock inet_conn; + + __u32 rcv_nxt; + __u32 snd_nxt; + __u32 snd_una; + __u8 ecn_flags; + __u32 delivered; + __u32 delivered_ce; + __u32 snd_cwnd; + __u32 snd_cwnd_cnt; + __u32 snd_cwnd_clamp; + __u32 snd_ssthresh; + __u8 syn_data:1, /* SYN includes data */ + syn_fastopen:1, /* SYN includes Fast Open option */ + syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */ + syn_fastopen_ch:1, /* Active TFO re-enabling probe */ + syn_data_acked:1,/* data in SYN is acked by SYN-ACK */ + save_syn:1, /* Save headers of SYN packet */ + is_cwnd_limited:1,/* forward progress limited by snd_cwnd? */ + syn_smc:1; /* SYN includes SMC */ + __u32 max_packets_out; + __u32 lsndtime; + __u32 prior_cwnd; + __u64 tcp_mstamp; /* most recent packet received/sent */ +} __attribute__((preserve_access_index)); + +static __always_inline struct inet_connection_sock *inet_csk(const struct sock *sk) +{ + return (struct inet_connection_sock *)sk; +} + +static __always_inline void *inet_csk_ca(const struct sock *sk) +{ + return (void *)inet_csk(sk)->icsk_ca_priv; +} + +static __always_inline struct tcp_sock *tcp_sk(const struct sock *sk) +{ + return (struct tcp_sock *)sk; +} + +static __always_inline bool before(__u32 seq1, __u32 seq2) +{ + return (__s32)(seq1-seq2) < 0; +} +#define after(seq2, seq1) before(seq1, seq2) + +#define TCP_ECN_OK 1 +#define TCP_ECN_QUEUE_CWR 2 +#define TCP_ECN_DEMAND_CWR 4 +#define TCP_ECN_SEEN 8 + +enum inet_csk_ack_state_t { + ICSK_ACK_SCHED = 1, + ICSK_ACK_TIMER = 2, + ICSK_ACK_PUSHED = 4, + ICSK_ACK_PUSHED2 = 8, + ICSK_ACK_NOW = 16 /* Send the next ACK immediately (once) */ +}; + +enum tcp_ca_event { + CA_EVENT_TX_START = 0, + CA_EVENT_CWND_RESTART = 1, + CA_EVENT_COMPLETE_CWR = 2, + CA_EVENT_LOSS = 3, + CA_EVENT_ECN_NO_CE = 4, + CA_EVENT_ECN_IS_CE = 5, +}; + +enum tcp_ca_state { + TCP_CA_Open = 0, + TCP_CA_Disorder = 1, + TCP_CA_CWR = 2, + TCP_CA_Recovery = 3, + TCP_CA_Loss = 4 +}; + +struct ack_sample { + __u32 pkts_acked; + __s32 rtt_us; + __u32 in_flight; +} __attribute__((preserve_access_index)); + +struct rate_sample { + __u64 prior_mstamp; /* starting timestamp for interval */ + __u32 prior_delivered; /* tp->delivered at "prior_mstamp" */ + __s32 delivered; /* number of packets delivered over interval */ + long interval_us; /* time for tp->delivered to incr "delivered" */ + __u32 snd_interval_us; /* snd interval for delivered packets */ + __u32 rcv_interval_us; /* rcv interval for delivered packets */ + long rtt_us; /* RTT of last (S)ACKed packet (or -1) */ + int losses; /* number of packets marked lost upon ACK */ + __u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */ + __u32 prior_in_flight; /* in flight before this ACK */ + bool is_app_limited; /* is sample from packet with bubble in pipe? */ + bool is_retrans; /* is sample from retransmission? */ + bool is_ack_delayed; /* is this (likely) a delayed ACK? */ +} __attribute__((preserve_access_index)); + +#define TCP_CA_NAME_MAX 16 +#define TCP_CONG_NEEDS_ECN 0x2 + +struct tcp_congestion_ops { + char name[TCP_CA_NAME_MAX]; + __u32 flags; + + /* initialize private data (optional) */ + void (*init)(struct sock *sk); + /* cleanup private data (optional) */ + void (*release)(struct sock *sk); + + /* return slow start threshold (required) */ + __u32 (*ssthresh)(struct sock *sk); + /* do new cwnd calculation (required) */ + void (*cong_avoid)(struct sock *sk, __u32 ack, __u32 acked); + /* call before changing ca_state (optional) */ + void (*set_state)(struct sock *sk, __u8 new_state); + /* call when cwnd event occurs (optional) */ + void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev); + /* call when ack arrives (optional) */ + void (*in_ack_event)(struct sock *sk, __u32 flags); + /* new value of cwnd after loss (required) */ + __u32 (*undo_cwnd)(struct sock *sk); + /* hook for packet ack accounting (optional) */ + void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample); + /* override sysctl_tcp_min_tso_segs */ + __u32 (*min_tso_segs)(struct sock *sk); + /* returns the multiplier used in tcp_sndbuf_expand (optional) */ + __u32 (*sndbuf_expand)(struct sock *sk); + /* call when packets are delivered to update cwnd and pacing rate, + * after all the ca_state processing. (optional) + */ + void (*cong_control)(struct sock *sk, const struct rate_sample *rs); +}; + +#define min(a, b) ((a) < (b) ? (a) : (b)) +#define max(a, b) ((a) > (b) ? (a) : (b)) +#define min_not_zero(x, y) ({ \ + typeof(x) __x = (x); \ + typeof(y) __y = (y); \ + __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); }) + +static __always_inline __u32 tcp_slow_start(struct tcp_sock *tp, __u32 acked) +{ + __u32 cwnd = min(tp->snd_cwnd + acked, tp->snd_ssthresh); + + acked -= cwnd - tp->snd_cwnd; + tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp); + + return acked; +} + +static __always_inline bool tcp_in_slow_start(const struct tcp_sock *tp) +{ + return tp->snd_cwnd < tp->snd_ssthresh; +} + +static __always_inline bool tcp_is_cwnd_limited(const struct sock *sk) +{ + const struct tcp_sock *tp = tcp_sk(sk); + + /* If in slow start, ensure cwnd grows to twice what was ACKed. */ + if (tcp_in_slow_start(tp)) + return tp->snd_cwnd < 2 * tp->max_packets_out; + + return !!BPF_CORE_READ_BITFIELD(tp, is_cwnd_limited); +} + +static __always_inline void tcp_cong_avoid_ai(struct tcp_sock *tp, __u32 w, __u32 acked) +{ + /* If credits accumulated at a higher w, apply them gently now. */ + if (tp->snd_cwnd_cnt >= w) { + tp->snd_cwnd_cnt = 0; + tp->snd_cwnd++; + } + + tp->snd_cwnd_cnt += acked; + if (tp->snd_cwnd_cnt >= w) { + __u32 delta = tp->snd_cwnd_cnt / w; + + tp->snd_cwnd_cnt -= delta * w; + tp->snd_cwnd += delta; + } + tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_cwnd_clamp); +} + +#endif diff --git a/tools/testing/selftests/bpf/bpf_trace_helpers.h b/tools/testing/selftests/bpf/bpf_trace_helpers.h index c76a214a53b0..c6f1354d93fb 100644 --- a/tools/testing/selftests/bpf/bpf_trace_helpers.h +++ b/tools/testing/selftests/bpf/bpf_trace_helpers.h @@ -1,58 +1,120 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ #ifndef __BPF_TRACE_HELPERS_H #define __BPF_TRACE_HELPERS_H -#include "bpf_helpers.h" - -#define __BPF_MAP_0(i, m, v, ...) v -#define __BPF_MAP_1(i, m, v, t, a, ...) m(t, a, ctx[i]) -#define __BPF_MAP_2(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_1(i+1, m, v, __VA_ARGS__) -#define __BPF_MAP_3(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_2(i+1, m, v, __VA_ARGS__) -#define __BPF_MAP_4(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_3(i+1, m, v, __VA_ARGS__) -#define __BPF_MAP_5(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_4(i+1, m, v, __VA_ARGS__) -#define __BPF_MAP_6(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_5(i+1, m, v, __VA_ARGS__) -#define __BPF_MAP_7(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_6(i+1, m, v, __VA_ARGS__) -#define __BPF_MAP_8(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_7(i+1, m, v, __VA_ARGS__) -#define __BPF_MAP_9(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_8(i+1, m, v, __VA_ARGS__) -#define __BPF_MAP_10(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_9(i+1, m, v, __VA_ARGS__) -#define __BPF_MAP_11(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_10(i+1, m, v, __VA_ARGS__) -#define __BPF_MAP_12(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_11(i+1, m, v, __VA_ARGS__) -#define __BPF_MAP(n, ...) __BPF_MAP_##n(0, __VA_ARGS__) - -/* BPF sizeof(void *) is always 8, so no need to cast to long first - * for ptr to avoid compiler warning. +#include <bpf/bpf_helpers.h> + +#define ___bpf_concat(a, b) a ## b +#define ___bpf_apply(fn, n) ___bpf_concat(fn, n) +#define ___bpf_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c, N, ...) N +#define ___bpf_narg(...) \ + ___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) +#define ___bpf_empty(...) \ + ___bpf_nth(_, ##__VA_ARGS__, N, N, N, N, N, N, N, N, N, N, 0) + +#define ___bpf_ctx_cast0() ctx +#define ___bpf_ctx_cast1(x) ___bpf_ctx_cast0(), (void *)ctx[0] +#define ___bpf_ctx_cast2(x, args...) ___bpf_ctx_cast1(args), (void *)ctx[1] +#define ___bpf_ctx_cast3(x, args...) ___bpf_ctx_cast2(args), (void *)ctx[2] +#define ___bpf_ctx_cast4(x, args...) ___bpf_ctx_cast3(args), (void *)ctx[3] +#define ___bpf_ctx_cast5(x, args...) ___bpf_ctx_cast4(args), (void *)ctx[4] +#define ___bpf_ctx_cast6(x, args...) ___bpf_ctx_cast5(args), (void *)ctx[5] +#define ___bpf_ctx_cast7(x, args...) ___bpf_ctx_cast6(args), (void *)ctx[6] +#define ___bpf_ctx_cast8(x, args...) ___bpf_ctx_cast7(args), (void *)ctx[7] +#define ___bpf_ctx_cast9(x, args...) ___bpf_ctx_cast8(args), (void *)ctx[8] +#define ___bpf_ctx_cast10(x, args...) ___bpf_ctx_cast9(args), (void *)ctx[9] +#define ___bpf_ctx_cast11(x, args...) ___bpf_ctx_cast10(args), (void *)ctx[10] +#define ___bpf_ctx_cast12(x, args...) ___bpf_ctx_cast11(args), (void *)ctx[11] +#define ___bpf_ctx_cast(args...) \ + ___bpf_apply(___bpf_ctx_cast, ___bpf_narg(args))(args) + +/* + * BPF_PROG is a convenience wrapper for generic tp_btf/fentry/fexit and + * similar kinds of BPF programs, that accept input arguments as a single + * pointer to untyped u64 array, where each u64 can actually be a typed + * pointer or integer of different size. Instead of requring user to write + * manual casts and work with array elements by index, BPF_PROG macro + * allows user to declare a list of named and typed input arguments in the + * same syntax as for normal C function. All the casting is hidden and + * performed transparently, while user code can just assume working with + * function arguments of specified type and name. + * + * Original raw context argument is preserved as well as 'ctx' argument. + * This is useful when using BPF helpers that expect original context + * as one of the parameters (e.g., for bpf_perf_event_output()). */ -#define __BPF_CAST(t, a, ctx) (t) ctx -#define __BPF_V void -#define __BPF_N - -#define __BPF_DECL_ARGS(t, a, ctx) t a - -#define BPF_TRACE_x(x, sec_name, fname, ret_type, ...) \ -static __always_inline ret_type \ -____##fname(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \ - \ -SEC(sec_name) \ -ret_type fname(__u64 *ctx) \ -{ \ - return ____##fname(__BPF_MAP(x, __BPF_CAST, __BPF_N, __VA_ARGS__));\ -} \ - \ -static __always_inline \ -ret_type ____##fname(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)) - -#define BPF_TRACE_0(sec, fname, ...) BPF_TRACE_x(0, sec, fname, int, __VA_ARGS__) -#define BPF_TRACE_1(sec, fname, ...) BPF_TRACE_x(1, sec, fname, int, __VA_ARGS__) -#define BPF_TRACE_2(sec, fname, ...) BPF_TRACE_x(2, sec, fname, int, __VA_ARGS__) -#define BPF_TRACE_3(sec, fname, ...) BPF_TRACE_x(3, sec, fname, int, __VA_ARGS__) -#define BPF_TRACE_4(sec, fname, ...) BPF_TRACE_x(4, sec, fname, int, __VA_ARGS__) -#define BPF_TRACE_5(sec, fname, ...) BPF_TRACE_x(5, sec, fname, int, __VA_ARGS__) -#define BPF_TRACE_6(sec, fname, ...) BPF_TRACE_x(6, sec, fname, int, __VA_ARGS__) -#define BPF_TRACE_7(sec, fname, ...) BPF_TRACE_x(7, sec, fname, int, __VA_ARGS__) -#define BPF_TRACE_8(sec, fname, ...) BPF_TRACE_x(8, sec, fname, int, __VA_ARGS__) -#define BPF_TRACE_9(sec, fname, ...) BPF_TRACE_x(9, sec, fname, int, __VA_ARGS__) -#define BPF_TRACE_10(sec, fname, ...) BPF_TRACE_x(10, sec, fname, int, __VA_ARGS__) -#define BPF_TRACE_11(sec, fname, ...) BPF_TRACE_x(11, sec, fname, int, __VA_ARGS__) -#define BPF_TRACE_12(sec, fname, ...) BPF_TRACE_x(12, sec, fname, int, __VA_ARGS__) +#define BPF_PROG(name, args...) \ +name(unsigned long long *ctx); \ +static __always_inline typeof(name(0)) \ +____##name(unsigned long long *ctx, ##args); \ +typeof(name(0)) name(unsigned long long *ctx) \ +{ \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ + return ____##name(___bpf_ctx_cast(args)); \ + _Pragma("GCC diagnostic pop") \ +} \ +static __always_inline typeof(name(0)) \ +____##name(unsigned long long *ctx, ##args) + +struct pt_regs; + +#define ___bpf_kprobe_args0() ctx +#define ___bpf_kprobe_args1(x) \ + ___bpf_kprobe_args0(), (void *)PT_REGS_PARM1(ctx) +#define ___bpf_kprobe_args2(x, args...) \ + ___bpf_kprobe_args1(args), (void *)PT_REGS_PARM2(ctx) +#define ___bpf_kprobe_args3(x, args...) \ + ___bpf_kprobe_args2(args), (void *)PT_REGS_PARM3(ctx) +#define ___bpf_kprobe_args4(x, args...) \ + ___bpf_kprobe_args3(args), (void *)PT_REGS_PARM4(ctx) +#define ___bpf_kprobe_args5(x, args...) \ + ___bpf_kprobe_args4(args), (void *)PT_REGS_PARM5(ctx) +#define ___bpf_kprobe_args(args...) \ + ___bpf_apply(___bpf_kprobe_args, ___bpf_narg(args))(args) +/* + * BPF_KPROBE serves the same purpose for kprobes as BPF_PROG for + * tp_btf/fentry/fexit BPF programs. It hides the underlying platform-specific + * low-level way of getting kprobe input arguments from struct pt_regs, and + * provides a familiar typed and named function arguments syntax and + * semantics of accessing kprobe input paremeters. + * + * Original struct pt_regs* context is preserved as 'ctx' argument. This might + * be necessary when using BPF helpers like bpf_perf_event_output(). + */ +#define BPF_KPROBE(name, args...) \ +name(struct pt_regs *ctx); \ +static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args);\ +typeof(name(0)) name(struct pt_regs *ctx) \ +{ \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ + return ____##name(___bpf_kprobe_args(args)); \ + _Pragma("GCC diagnostic pop") \ +} \ +static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args) + +#define ___bpf_kretprobe_args0() ctx +#define ___bpf_kretprobe_argsN(x, args...) \ + ___bpf_kprobe_args(args), (void *)PT_REGS_RET(ctx) +#define ___bpf_kretprobe_args(args...) \ + ___bpf_apply(___bpf_kretprobe_args, ___bpf_empty(args))(args) + +/* + * BPF_KRETPROBE is similar to BPF_KPROBE, except, in addition to listing all + * input kprobe arguments, one last extra argument has to be specified, which + * captures kprobe return value. + */ +#define BPF_KRETPROBE(name, args...) \ +name(struct pt_regs *ctx); \ +static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args);\ +typeof(name(0)) name(struct pt_regs *ctx) \ +{ \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ + return ____##name(___bpf_kretprobe_args(args)); \ + _Pragma("GCC diagnostic pop") \ +} \ +static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args) #endif diff --git a/tools/testing/selftests/bpf/bpf_util.h b/tools/testing/selftests/bpf/bpf_util.h index ec219f84e041..a3352a64c067 100644 --- a/tools/testing/selftests/bpf/bpf_util.h +++ b/tools/testing/selftests/bpf/bpf_util.h @@ -6,7 +6,7 @@ #include <stdlib.h> #include <string.h> #include <errno.h> -#include <libbpf.h> /* libbpf_num_possible_cpus */ +#include <bpf/libbpf.h> /* libbpf_num_possible_cpus */ static inline unsigned int bpf_num_possible_cpus(void) { diff --git a/tools/testing/selftests/bpf/map_tests/array_map_batch_ops.c b/tools/testing/selftests/bpf/map_tests/array_map_batch_ops.c new file mode 100644 index 000000000000..f0a64d8ac59a --- /dev/null +++ b/tools/testing/selftests/bpf/map_tests/array_map_batch_ops.c @@ -0,0 +1,129 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <stdio.h> +#include <errno.h> +#include <string.h> + +#include <bpf/bpf.h> +#include <bpf/libbpf.h> + +#include <test_maps.h> + +static void map_batch_update(int map_fd, __u32 max_entries, int *keys, + int *values) +{ + int i, err; + DECLARE_LIBBPF_OPTS(bpf_map_batch_opts, opts, + .elem_flags = 0, + .flags = 0, + ); + + for (i = 0; i < max_entries; i++) { + keys[i] = i; + values[i] = i + 1; + } + + err = bpf_map_update_batch(map_fd, keys, values, &max_entries, &opts); + CHECK(err, "bpf_map_update_batch()", "error:%s\n", strerror(errno)); +} + +static void map_batch_verify(int *visited, __u32 max_entries, + int *keys, int *values) +{ + int i; + + memset(visited, 0, max_entries * sizeof(*visited)); + for (i = 0; i < max_entries; i++) { + CHECK(keys[i] + 1 != values[i], "key/value checking", + "error: i %d key %d value %d\n", i, keys[i], values[i]); + visited[i] = 1; + } + for (i = 0; i < max_entries; i++) { + CHECK(visited[i] != 1, "visited checking", + "error: keys array at index %d missing\n", i); + } +} + +void test_array_map_batch_ops(void) +{ + struct bpf_create_map_attr xattr = { + .name = "array_map", + .map_type = BPF_MAP_TYPE_ARRAY, + .key_size = sizeof(int), + .value_size = sizeof(int), + }; + int map_fd, *keys, *values, *visited; + __u32 count, total, total_success; + const __u32 max_entries = 10; + bool nospace_err; + __u64 batch = 0; + int err, step; + DECLARE_LIBBPF_OPTS(bpf_map_batch_opts, opts, + .elem_flags = 0, + .flags = 0, + ); + + xattr.max_entries = max_entries; + map_fd = bpf_create_map_xattr(&xattr); + CHECK(map_fd == -1, + "bpf_create_map_xattr()", "error:%s\n", strerror(errno)); + + keys = malloc(max_entries * sizeof(int)); + values = malloc(max_entries * sizeof(int)); + visited = malloc(max_entries * sizeof(int)); + CHECK(!keys || !values || !visited, "malloc()", "error:%s\n", + strerror(errno)); + + /* populate elements to the map */ + map_batch_update(map_fd, max_entries, keys, values); + + /* test 1: lookup in a loop with various steps. */ + total_success = 0; + for (step = 1; step < max_entries; step++) { + map_batch_update(map_fd, max_entries, keys, values); + map_batch_verify(visited, max_entries, keys, values); + memset(keys, 0, max_entries * sizeof(*keys)); + memset(values, 0, max_entries * sizeof(*values)); + batch = 0; + total = 0; + /* iteratively lookup/delete elements with 'step' + * elements each. + */ + count = step; + nospace_err = false; + while (true) { + err = bpf_map_lookup_batch(map_fd, + total ? &batch : NULL, &batch, + keys + total, + values + total, + &count, &opts); + + CHECK((err && errno != ENOENT), "lookup with steps", + "error: %s\n", strerror(errno)); + + total += count; + if (err) + break; + + } + + if (nospace_err == true) + continue; + + CHECK(total != max_entries, "lookup with steps", + "total = %u, max_entries = %u\n", total, max_entries); + + map_batch_verify(visited, max_entries, keys, values); + + total_success++; + } + + CHECK(total_success == 0, "check total_success", + "unexpected failure\n"); + + printf("%s:PASS\n", __func__); + + free(keys); + free(values); + free(visited); +} diff --git a/tools/testing/selftests/bpf/map_tests/htab_map_batch_ops.c b/tools/testing/selftests/bpf/map_tests/htab_map_batch_ops.c new file mode 100644 index 000000000000..976bf415fbdd --- /dev/null +++ b/tools/testing/selftests/bpf/map_tests/htab_map_batch_ops.c @@ -0,0 +1,283 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ +#include <stdio.h> +#include <errno.h> +#include <string.h> + +#include <bpf/bpf.h> +#include <bpf/libbpf.h> + +#include <bpf_util.h> +#include <test_maps.h> + +static void map_batch_update(int map_fd, __u32 max_entries, int *keys, + void *values, bool is_pcpu) +{ + typedef BPF_DECLARE_PERCPU(int, value); + value *v = NULL; + int i, j, err; + DECLARE_LIBBPF_OPTS(bpf_map_batch_opts, opts, + .elem_flags = 0, + .flags = 0, + ); + + if (is_pcpu) + v = (value *)values; + + for (i = 0; i < max_entries; i++) { + keys[i] = i + 1; + if (is_pcpu) + for (j = 0; j < bpf_num_possible_cpus(); j++) + bpf_percpu(v[i], j) = i + 2 + j; + else + ((int *)values)[i] = i + 2; + } + + err = bpf_map_update_batch(map_fd, keys, values, &max_entries, &opts); + CHECK(err, "bpf_map_update_batch()", "error:%s\n", strerror(errno)); +} + +static void map_batch_verify(int *visited, __u32 max_entries, + int *keys, void *values, bool is_pcpu) +{ + typedef BPF_DECLARE_PERCPU(int, value); + value *v = NULL; + int i, j; + + if (is_pcpu) + v = (value *)values; + + memset(visited, 0, max_entries * sizeof(*visited)); + for (i = 0; i < max_entries; i++) { + + if (is_pcpu) { + for (j = 0; j < bpf_num_possible_cpus(); j++) { + CHECK(keys[i] + 1 + j != bpf_percpu(v[i], j), + "key/value checking", + "error: i %d j %d key %d value %d\n", + i, j, keys[i], bpf_percpu(v[i], j)); + } + } else { + CHECK(keys[i] + 1 != ((int *)values)[i], + "key/value checking", + "error: i %d key %d value %d\n", i, keys[i], + ((int *)values)[i]); + } + + visited[i] = 1; + + } + for (i = 0; i < max_entries; i++) { + CHECK(visited[i] != 1, "visited checking", + "error: keys array at index %d missing\n", i); + } +} + +void __test_map_lookup_and_delete_batch(bool is_pcpu) +{ + __u32 batch, count, total, total_success; + typedef BPF_DECLARE_PERCPU(int, value); + int map_fd, *keys, *visited, key; + const __u32 max_entries = 10; + value pcpu_values[max_entries]; + int err, step, value_size; + bool nospace_err; + void *values; + struct bpf_create_map_attr xattr = { + .name = "hash_map", + .map_type = is_pcpu ? BPF_MAP_TYPE_PERCPU_HASH : + BPF_MAP_TYPE_HASH, + .key_size = sizeof(int), + .value_size = sizeof(int), + }; + DECLARE_LIBBPF_OPTS(bpf_map_batch_opts, opts, + .elem_flags = 0, + .flags = 0, + ); + + xattr.max_entries = max_entries; + map_fd = bpf_create_map_xattr(&xattr); + CHECK(map_fd == -1, + "bpf_create_map_xattr()", "error:%s\n", strerror(errno)); + + value_size = is_pcpu ? sizeof(value) : sizeof(int); + keys = malloc(max_entries * sizeof(int)); + if (is_pcpu) + values = pcpu_values; + else + values = malloc(max_entries * sizeof(int)); + visited = malloc(max_entries * sizeof(int)); + CHECK(!keys || !values || !visited, "malloc()", + "error:%s\n", strerror(errno)); + + /* test 1: lookup/delete an empty hash table, -ENOENT */ + count = max_entries; + err = bpf_map_lookup_and_delete_batch(map_fd, NULL, &batch, keys, + values, &count, &opts); + CHECK((err && errno != ENOENT), "empty map", + "error: %s\n", strerror(errno)); + + /* populate elements to the map */ + map_batch_update(map_fd, max_entries, keys, values, is_pcpu); + + /* test 2: lookup/delete with count = 0, success */ + count = 0; + err = bpf_map_lookup_and_delete_batch(map_fd, NULL, &batch, keys, + values, &count, &opts); + CHECK(err, "count = 0", "error: %s\n", strerror(errno)); + + /* test 3: lookup/delete with count = max_entries, success */ + memset(keys, 0, max_entries * sizeof(*keys)); + memset(values, 0, max_entries * value_size); + count = max_entries; + err = bpf_map_lookup_and_delete_batch(map_fd, NULL, &batch, keys, + values, &count, &opts); + CHECK((err && errno != ENOENT), "count = max_entries", + "error: %s\n", strerror(errno)); + CHECK(count != max_entries, "count = max_entries", + "count = %u, max_entries = %u\n", count, max_entries); + map_batch_verify(visited, max_entries, keys, values, is_pcpu); + + /* bpf_map_get_next_key() should return -ENOENT for an empty map. */ + err = bpf_map_get_next_key(map_fd, NULL, &key); + CHECK(!err, "bpf_map_get_next_key()", "error: %s\n", strerror(errno)); + + /* test 4: lookup/delete in a loop with various steps. */ + total_success = 0; + for (step = 1; step < max_entries; step++) { + map_batch_update(map_fd, max_entries, keys, values, is_pcpu); + memset(keys, 0, max_entries * sizeof(*keys)); + memset(values, 0, max_entries * value_size); + total = 0; + /* iteratively lookup/delete elements with 'step' + * elements each + */ + count = step; + nospace_err = false; + while (true) { + err = bpf_map_lookup_batch(map_fd, + total ? &batch : NULL, + &batch, keys + total, + values + + total * value_size, + &count, &opts); + /* It is possible that we are failing due to buffer size + * not big enough. In such cases, let us just exit and + * go with large steps. Not that a buffer size with + * max_entries should always work. + */ + if (err && errno == ENOSPC) { + nospace_err = true; + break; + } + + CHECK((err && errno != ENOENT), "lookup with steps", + "error: %s\n", strerror(errno)); + + total += count; + if (err) + break; + + } + if (nospace_err == true) + continue; + + CHECK(total != max_entries, "lookup with steps", + "total = %u, max_entries = %u\n", total, max_entries); + map_batch_verify(visited, max_entries, keys, values, is_pcpu); + + total = 0; + count = step; + while (total < max_entries) { + if (max_entries - total < step) + count = max_entries - total; + err = bpf_map_delete_batch(map_fd, + keys + total, + &count, &opts); + CHECK((err && errno != ENOENT), "delete batch", + "error: %s\n", strerror(errno)); + total += count; + if (err) + break; + } + CHECK(total != max_entries, "delete with steps", + "total = %u, max_entries = %u\n", total, max_entries); + + /* check map is empty, errono == ENOENT */ + err = bpf_map_get_next_key(map_fd, NULL, &key); + CHECK(!err || errno != ENOENT, "bpf_map_get_next_key()", + "error: %s\n", strerror(errno)); + + /* iteratively lookup/delete elements with 'step' + * elements each + */ + map_batch_update(map_fd, max_entries, keys, values, is_pcpu); + memset(keys, 0, max_entries * sizeof(*keys)); + memset(values, 0, max_entries * value_size); + total = 0; + count = step; + nospace_err = false; + while (true) { + err = bpf_map_lookup_and_delete_batch(map_fd, + total ? &batch : NULL, + &batch, keys + total, + values + + total * value_size, + &count, &opts); + /* It is possible that we are failing due to buffer size + * not big enough. In such cases, let us just exit and + * go with large steps. Not that a buffer size with + * max_entries should always work. + */ + if (err && errno == ENOSPC) { + nospace_err = true; + break; + } + + CHECK((err && errno != ENOENT), "lookup with steps", + "error: %s\n", strerror(errno)); + + total += count; + if (err) + break; + } + + if (nospace_err == true) + continue; + + CHECK(total != max_entries, "lookup/delete with steps", + "total = %u, max_entries = %u\n", total, max_entries); + + map_batch_verify(visited, max_entries, keys, values, is_pcpu); + err = bpf_map_get_next_key(map_fd, NULL, &key); + CHECK(!err, "bpf_map_get_next_key()", "error: %s\n", + strerror(errno)); + + total_success++; + } + + CHECK(total_success == 0, "check total_success", + "unexpected failure\n"); + free(keys); + free(visited); + if (!is_pcpu) + free(values); +} + +void htab_map_batch_ops(void) +{ + __test_map_lookup_and_delete_batch(false); + printf("test_%s:PASS\n", __func__); +} + +void htab_percpu_map_batch_ops(void) +{ + __test_map_lookup_and_delete_batch(true); + printf("test_%s:PASS\n", __func__); +} + +void test_htab_map_batch_ops(void) +{ + htab_map_batch_ops(); + htab_percpu_map_batch_ops(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/attach_probe.c b/tools/testing/selftests/bpf/prog_tests/attach_probe.c index a83111a32d4a..a0ee87c8e1ea 100644 --- a/tools/testing/selftests/bpf/prog_tests/attach_probe.c +++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c @@ -1,26 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 #include <test_progs.h> - -#define EMBED_FILE(NAME, PATH) \ -asm ( \ -" .pushsection \".rodata\", \"a\", @progbits \n" \ -" .global "#NAME"_data \n" \ -#NAME"_data: \n" \ -" .incbin \"" PATH "\" \n" \ -#NAME"_data_end: \n" \ -" .global "#NAME"_size \n" \ -" .type "#NAME"_size, @object \n" \ -" .size "#NAME"_size, 4 \n" \ -" .align 4, \n" \ -#NAME"_size: \n" \ -" .int "#NAME"_data_end - "#NAME"_data \n" \ -" .popsection \n" \ -); \ -extern char NAME##_data[]; \ -extern int NAME##_size; +#include "test_attach_probe.skel.h" ssize_t get_base_addr() { - size_t start; + size_t start, offset; char buf[256]; FILE *f; @@ -28,10 +11,11 @@ ssize_t get_base_addr() { if (!f) return -errno; - while (fscanf(f, "%zx-%*x %s %*s\n", &start, buf) == 2) { + while (fscanf(f, "%zx-%*x %s %zx %*[^\n]\n", + &start, buf, &offset) == 3) { if (strcmp(buf, "r-xp") == 0) { fclose(f); - return start; + return start - offset; } } @@ -39,30 +23,12 @@ ssize_t get_base_addr() { return -EINVAL; } -EMBED_FILE(probe, "test_attach_probe.o"); - void test_attach_probe(void) { - const char *kprobe_name = "kprobe/sys_nanosleep"; - const char *kretprobe_name = "kretprobe/sys_nanosleep"; - const char *uprobe_name = "uprobe/trigger_func"; - const char *uretprobe_name = "uretprobe/trigger_func"; - const int kprobe_idx = 0, kretprobe_idx = 1; - const int uprobe_idx = 2, uretprobe_idx = 3; - const char *obj_name = "attach_probe"; - DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts, - .object_name = obj_name, - .relaxed_maps = true, - ); - struct bpf_program *kprobe_prog, *kretprobe_prog; - struct bpf_program *uprobe_prog, *uretprobe_prog; - struct bpf_object *obj; - int err, duration = 0, res; - struct bpf_link *kprobe_link = NULL; - struct bpf_link *kretprobe_link = NULL; - struct bpf_link *uprobe_link = NULL; - struct bpf_link *uretprobe_link = NULL; - int results_map_fd; + int duration = 0; + struct bpf_link *kprobe_link, *kretprobe_link; + struct bpf_link *uprobe_link, *uretprobe_link; + struct test_attach_probe* skel; size_t uprobe_offset; ssize_t base_addr; @@ -72,123 +38,68 @@ void test_attach_probe(void) return; uprobe_offset = (size_t)&get_base_addr - base_addr; - /* open object */ - obj = bpf_object__open_mem(probe_data, probe_size, &open_opts); - if (CHECK(IS_ERR(obj), "obj_open_mem", "err %ld\n", PTR_ERR(obj))) + skel = test_attach_probe__open_and_load(); + if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) return; - - if (CHECK(strcmp(bpf_object__name(obj), obj_name), "obj_name", - "wrong obj name '%s', expected '%s'\n", - bpf_object__name(obj), obj_name)) + if (CHECK(!skel->bss, "check_bss", ".bss wasn't mmap()-ed\n")) goto cleanup; - kprobe_prog = bpf_object__find_program_by_title(obj, kprobe_name); - if (CHECK(!kprobe_prog, "find_probe", - "prog '%s' not found\n", kprobe_name)) - goto cleanup; - kretprobe_prog = bpf_object__find_program_by_title(obj, kretprobe_name); - if (CHECK(!kretprobe_prog, "find_probe", - "prog '%s' not found\n", kretprobe_name)) - goto cleanup; - uprobe_prog = bpf_object__find_program_by_title(obj, uprobe_name); - if (CHECK(!uprobe_prog, "find_probe", - "prog '%s' not found\n", uprobe_name)) - goto cleanup; - uretprobe_prog = bpf_object__find_program_by_title(obj, uretprobe_name); - if (CHECK(!uretprobe_prog, "find_probe", - "prog '%s' not found\n", uretprobe_name)) - goto cleanup; - - /* create maps && load programs */ - err = bpf_object__load(obj); - if (CHECK(err, "obj_load", "err %d\n", err)) - goto cleanup; - - /* load maps */ - results_map_fd = bpf_find_map(__func__, obj, "results_map"); - if (CHECK(results_map_fd < 0, "find_results_map", - "err %d\n", results_map_fd)) - goto cleanup; - - kprobe_link = bpf_program__attach_kprobe(kprobe_prog, + kprobe_link = bpf_program__attach_kprobe(skel->progs.handle_kprobe, false /* retprobe */, SYS_NANOSLEEP_KPROBE_NAME); if (CHECK(IS_ERR(kprobe_link), "attach_kprobe", - "err %ld\n", PTR_ERR(kprobe_link))) { - kprobe_link = NULL; + "err %ld\n", PTR_ERR(kprobe_link))) goto cleanup; - } - kretprobe_link = bpf_program__attach_kprobe(kretprobe_prog, + skel->links.handle_kprobe = kprobe_link; + + kretprobe_link = bpf_program__attach_kprobe(skel->progs.handle_kretprobe, true /* retprobe */, SYS_NANOSLEEP_KPROBE_NAME); if (CHECK(IS_ERR(kretprobe_link), "attach_kretprobe", - "err %ld\n", PTR_ERR(kretprobe_link))) { - kretprobe_link = NULL; + "err %ld\n", PTR_ERR(kretprobe_link))) goto cleanup; - } - uprobe_link = bpf_program__attach_uprobe(uprobe_prog, + skel->links.handle_kretprobe = kretprobe_link; + + uprobe_link = bpf_program__attach_uprobe(skel->progs.handle_uprobe, false /* retprobe */, 0 /* self pid */, "/proc/self/exe", uprobe_offset); if (CHECK(IS_ERR(uprobe_link), "attach_uprobe", - "err %ld\n", PTR_ERR(uprobe_link))) { - uprobe_link = NULL; + "err %ld\n", PTR_ERR(uprobe_link))) goto cleanup; - } - uretprobe_link = bpf_program__attach_uprobe(uretprobe_prog, + skel->links.handle_uprobe = uprobe_link; + + uretprobe_link = bpf_program__attach_uprobe(skel->progs.handle_uretprobe, true /* retprobe */, -1 /* any pid */, "/proc/self/exe", uprobe_offset); if (CHECK(IS_ERR(uretprobe_link), "attach_uretprobe", - "err %ld\n", PTR_ERR(uretprobe_link))) { - uretprobe_link = NULL; + "err %ld\n", PTR_ERR(uretprobe_link))) goto cleanup; - } + skel->links.handle_uretprobe = uretprobe_link; /* trigger & validate kprobe && kretprobe */ usleep(1); - err = bpf_map_lookup_elem(results_map_fd, &kprobe_idx, &res); - if (CHECK(err, "get_kprobe_res", - "failed to get kprobe res: %d\n", err)) + if (CHECK(skel->bss->kprobe_res != 1, "check_kprobe_res", + "wrong kprobe res: %d\n", skel->bss->kprobe_res)) goto cleanup; - if (CHECK(res != kprobe_idx + 1, "check_kprobe_res", - "wrong kprobe res: %d\n", res)) - goto cleanup; - - err = bpf_map_lookup_elem(results_map_fd, &kretprobe_idx, &res); - if (CHECK(err, "get_kretprobe_res", - "failed to get kretprobe res: %d\n", err)) - goto cleanup; - if (CHECK(res != kretprobe_idx + 1, "check_kretprobe_res", - "wrong kretprobe res: %d\n", res)) + if (CHECK(skel->bss->kretprobe_res != 2, "check_kretprobe_res", + "wrong kretprobe res: %d\n", skel->bss->kretprobe_res)) goto cleanup; /* trigger & validate uprobe & uretprobe */ get_base_addr(); - err = bpf_map_lookup_elem(results_map_fd, &uprobe_idx, &res); - if (CHECK(err, "get_uprobe_res", - "failed to get uprobe res: %d\n", err)) - goto cleanup; - if (CHECK(res != uprobe_idx + 1, "check_uprobe_res", - "wrong uprobe res: %d\n", res)) - goto cleanup; - - err = bpf_map_lookup_elem(results_map_fd, &uretprobe_idx, &res); - if (CHECK(err, "get_uretprobe_res", - "failed to get uretprobe res: %d\n", err)) + if (CHECK(skel->bss->uprobe_res != 3, "check_uprobe_res", + "wrong uprobe res: %d\n", skel->bss->uprobe_res)) goto cleanup; - if (CHECK(res != uretprobe_idx + 1, "check_uretprobe_res", - "wrong uretprobe res: %d\n", res)) + if (CHECK(skel->bss->uretprobe_res != 4, "check_uretprobe_res", + "wrong uretprobe res: %d\n", skel->bss->uretprobe_res)) goto cleanup; cleanup: - bpf_link__destroy(kprobe_link); - bpf_link__destroy(kretprobe_link); - bpf_link__destroy(uprobe_link); - bpf_link__destroy(uretprobe_link); - bpf_object__close(obj); + test_attach_probe__destroy(skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c new file mode 100644 index 000000000000..8482bbc67eec --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c @@ -0,0 +1,212 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ + +#include <linux/err.h> +#include <test_progs.h> +#include "bpf_dctcp.skel.h" +#include "bpf_cubic.skel.h" + +#define min(a, b) ((a) < (b) ? (a) : (b)) + +static const unsigned int total_bytes = 10 * 1024 * 1024; +static const struct timeval timeo_sec = { .tv_sec = 10 }; +static const size_t timeo_optlen = sizeof(timeo_sec); +static int stop, duration; + +static int settimeo(int fd) +{ + int err; + + err = setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, &timeo_sec, + timeo_optlen); + if (CHECK(err == -1, "setsockopt(fd, SO_RCVTIMEO)", "errno:%d\n", + errno)) + return -1; + + err = setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, &timeo_sec, + timeo_optlen); + if (CHECK(err == -1, "setsockopt(fd, SO_SNDTIMEO)", "errno:%d\n", + errno)) + return -1; + + return 0; +} + +static int settcpca(int fd, const char *tcp_ca) +{ + int err; + + err = setsockopt(fd, IPPROTO_TCP, TCP_CONGESTION, tcp_ca, strlen(tcp_ca)); + if (CHECK(err == -1, "setsockopt(fd, TCP_CONGESTION)", "errno:%d\n", + errno)) + return -1; + + return 0; +} + +static void *server(void *arg) +{ + int lfd = (int)(long)arg, err = 0, fd; + ssize_t nr_sent = 0, bytes = 0; + char batch[1500]; + + fd = accept(lfd, NULL, NULL); + while (fd == -1) { + if (errno == EINTR) + continue; + err = -errno; + goto done; + } + + if (settimeo(fd)) { + err = -errno; + goto done; + } + + while (bytes < total_bytes && !READ_ONCE(stop)) { + nr_sent = send(fd, &batch, + min(total_bytes - bytes, sizeof(batch)), 0); + if (nr_sent == -1 && errno == EINTR) + continue; + if (nr_sent == -1) { + err = -errno; + break; + } + bytes += nr_sent; + } + + CHECK(bytes != total_bytes, "send", "%zd != %u nr_sent:%zd errno:%d\n", + bytes, total_bytes, nr_sent, errno); + +done: + if (fd != -1) + close(fd); + if (err) { + WRITE_ONCE(stop, 1); + return ERR_PTR(err); + } + return NULL; +} + +static void do_test(const char *tcp_ca) +{ + struct sockaddr_in6 sa6 = {}; + ssize_t nr_recv = 0, bytes = 0; + int lfd = -1, fd = -1; + pthread_t srv_thread; + socklen_t addrlen = sizeof(sa6); + void *thread_ret; + char batch[1500]; + int err; + + WRITE_ONCE(stop, 0); + + lfd = socket(AF_INET6, SOCK_STREAM, 0); + if (CHECK(lfd == -1, "socket", "errno:%d\n", errno)) + return; + fd = socket(AF_INET6, SOCK_STREAM, 0); + if (CHECK(fd == -1, "socket", "errno:%d\n", errno)) { + close(lfd); + return; + } + + if (settcpca(lfd, tcp_ca) || settcpca(fd, tcp_ca) || + settimeo(lfd) || settimeo(fd)) + goto done; + + /* bind, listen and start server thread to accept */ + sa6.sin6_family = AF_INET6; + sa6.sin6_addr = in6addr_loopback; + err = bind(lfd, (struct sockaddr *)&sa6, addrlen); + if (CHECK(err == -1, "bind", "errno:%d\n", errno)) + goto done; + err = getsockname(lfd, (struct sockaddr *)&sa6, &addrlen); + if (CHECK(err == -1, "getsockname", "errno:%d\n", errno)) + goto done; + err = listen(lfd, 1); + if (CHECK(err == -1, "listen", "errno:%d\n", errno)) + goto done; + err = pthread_create(&srv_thread, NULL, server, (void *)(long)lfd); + if (CHECK(err != 0, "pthread_create", "err:%d\n", err)) + goto done; + + /* connect to server */ + err = connect(fd, (struct sockaddr *)&sa6, addrlen); + if (CHECK(err == -1, "connect", "errno:%d\n", errno)) + goto wait_thread; + + /* recv total_bytes */ + while (bytes < total_bytes && !READ_ONCE(stop)) { + nr_recv = recv(fd, &batch, + min(total_bytes - bytes, sizeof(batch)), 0); + if (nr_recv == -1 && errno == EINTR) + continue; + if (nr_recv == -1) + break; + bytes += nr_recv; + } + + CHECK(bytes != total_bytes, "recv", "%zd != %u nr_recv:%zd errno:%d\n", + bytes, total_bytes, nr_recv, errno); + +wait_thread: + WRITE_ONCE(stop, 1); + pthread_join(srv_thread, &thread_ret); + CHECK(IS_ERR(thread_ret), "pthread_join", "thread_ret:%ld", + PTR_ERR(thread_ret)); +done: + close(lfd); + close(fd); +} + +static void test_cubic(void) +{ + struct bpf_cubic *cubic_skel; + struct bpf_link *link; + + cubic_skel = bpf_cubic__open_and_load(); + if (CHECK(!cubic_skel, "bpf_cubic__open_and_load", "failed\n")) + return; + + link = bpf_map__attach_struct_ops(cubic_skel->maps.cubic); + if (CHECK(IS_ERR(link), "bpf_map__attach_struct_ops", "err:%ld\n", + PTR_ERR(link))) { + bpf_cubic__destroy(cubic_skel); + return; + } + + do_test("bpf_cubic"); + + bpf_link__destroy(link); + bpf_cubic__destroy(cubic_skel); +} + +static void test_dctcp(void) +{ + struct bpf_dctcp *dctcp_skel; + struct bpf_link *link; + + dctcp_skel = bpf_dctcp__open_and_load(); + if (CHECK(!dctcp_skel, "bpf_dctcp__open_and_load", "failed\n")) + return; + + link = bpf_map__attach_struct_ops(dctcp_skel->maps.dctcp); + if (CHECK(IS_ERR(link), "bpf_map__attach_struct_ops", "err:%ld\n", + PTR_ERR(link))) { + bpf_dctcp__destroy(dctcp_skel); + return; + } + + do_test("bpf_dctcp"); + + bpf_link__destroy(link); + bpf_dctcp__destroy(dctcp_skel); +} + +void test_bpf_tcp_ca(void) +{ + if (test__start_subtest("dctcp")) + test_dctcp(); + if (test__start_subtest("cubic")) + test_cubic(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c index 9486c13af6b2..e9f2f12ba06b 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c @@ -48,6 +48,8 @@ void test_bpf_verif_scale(void) { "test_verif_scale2.o", BPF_PROG_TYPE_SCHED_CLS }, { "test_verif_scale3.o", BPF_PROG_TYPE_SCHED_CLS }, + { "pyperf_global.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, + /* full unroll by llvm */ { "pyperf50.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, { "pyperf100.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c new file mode 100644 index 000000000000..5b13f2c6c402 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <test_progs.h> + +#include "cgroup_helpers.h" + +#define PING_CMD "ping -q -c1 -w1 127.0.0.1 > /dev/null" + +char bpf_log_buf[BPF_LOG_BUF_SIZE]; + +static int prog_load(void) +{ + struct bpf_insn prog[] = { + BPF_MOV64_IMM(BPF_REG_0, 1), /* r0 = 1 */ + BPF_EXIT_INSN(), + }; + size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn); + + return bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB, + prog, insns_cnt, "GPL", 0, + bpf_log_buf, BPF_LOG_BUF_SIZE); +} + +void test_cgroup_attach_autodetach(void) +{ + __u32 duration = 0, prog_cnt = 4, attach_flags; + int allow_prog[2] = {-1}; + __u32 prog_ids[2] = {0}; + void *ptr = NULL; + int cg = 0, i; + int attempts; + + for (i = 0; i < ARRAY_SIZE(allow_prog); i++) { + allow_prog[i] = prog_load(); + if (CHECK(allow_prog[i] < 0, "prog_load", + "verifier output:\n%s\n-------\n", bpf_log_buf)) + goto err; + } + + if (CHECK_FAIL(setup_cgroup_environment())) + goto err; + + /* create a cgroup, attach two programs and remember their ids */ + cg = create_and_get_cgroup("/cg_autodetach"); + if (CHECK_FAIL(cg < 0)) + goto err; + + if (CHECK_FAIL(join_cgroup("/cg_autodetach"))) + goto err; + + for (i = 0; i < ARRAY_SIZE(allow_prog); i++) + if (CHECK(bpf_prog_attach(allow_prog[i], cg, + BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_MULTI), + "prog_attach", "prog[%d], errno=%d\n", i, errno)) + goto err; + + /* make sure that programs are attached and run some traffic */ + if (CHECK(bpf_prog_query(cg, BPF_CGROUP_INET_EGRESS, 0, &attach_flags, + prog_ids, &prog_cnt), + "prog_query", "errno=%d\n", errno)) + goto err; + if (CHECK_FAIL(system(PING_CMD))) + goto err; + + /* allocate some memory (4Mb) to pin the original cgroup */ + ptr = malloc(4 * (1 << 20)); + if (CHECK_FAIL(!ptr)) + goto err; + + /* close programs and cgroup fd */ + for (i = 0; i < ARRAY_SIZE(allow_prog); i++) { + close(allow_prog[i]); + allow_prog[i] = -1; + } + + close(cg); + cg = 0; + + /* leave the cgroup and remove it. don't detach programs */ + cleanup_cgroup_environment(); + + /* wait for the asynchronous auto-detachment. + * wait for no more than 5 sec and give up. + */ + for (i = 0; i < ARRAY_SIZE(prog_ids); i++) { + for (attempts = 5; attempts >= 0; attempts--) { + int fd = bpf_prog_get_fd_by_id(prog_ids[i]); + + if (fd < 0) + break; + + /* don't leave the fd open */ + close(fd); + + if (CHECK_FAIL(!attempts)) + goto err; + + sleep(1); + } + } + +err: + for (i = 0; i < ARRAY_SIZE(allow_prog); i++) + if (allow_prog[i] >= 0) + close(allow_prog[i]); + if (cg) + close(cg); + free(ptr); + cleanup_cgroup_environment(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c new file mode 100644 index 000000000000..2ff21dbce179 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c @@ -0,0 +1,285 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <test_progs.h> + +#include "cgroup_helpers.h" + +#define PING_CMD "ping -q -c1 -w1 127.0.0.1 > /dev/null" + +char bpf_log_buf[BPF_LOG_BUF_SIZE]; + +static int map_fd = -1; + +static int prog_load_cnt(int verdict, int val) +{ + int cgroup_storage_fd, percpu_cgroup_storage_fd; + + if (map_fd < 0) + map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0); + if (map_fd < 0) { + printf("failed to create map '%s'\n", strerror(errno)); + return -1; + } + + cgroup_storage_fd = bpf_create_map(BPF_MAP_TYPE_CGROUP_STORAGE, + sizeof(struct bpf_cgroup_storage_key), 8, 0, 0); + if (cgroup_storage_fd < 0) { + printf("failed to create map '%s'\n", strerror(errno)); + return -1; + } + + percpu_cgroup_storage_fd = bpf_create_map( + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, + sizeof(struct bpf_cgroup_storage_key), 8, 0, 0); + if (percpu_cgroup_storage_fd < 0) { + printf("failed to create map '%s'\n", strerror(errno)); + return -1; + } + + struct bpf_insn prog[] = { + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */ + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), /* r2 = fp - 4 */ + BPF_LD_MAP_FD(BPF_REG_1, map_fd), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_MOV64_IMM(BPF_REG_1, val), /* r1 = 1 */ + BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */ + + BPF_LD_MAP_FD(BPF_REG_1, cgroup_storage_fd), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), + BPF_MOV64_IMM(BPF_REG_1, val), + BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_0, BPF_REG_1, 0, 0), + + BPF_LD_MAP_FD(BPF_REG_1, percpu_cgroup_storage_fd), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 0x1), + BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0), + + BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */ + BPF_EXIT_INSN(), + }; + size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn); + int ret; + + ret = bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB, + prog, insns_cnt, "GPL", 0, + bpf_log_buf, BPF_LOG_BUF_SIZE); + + close(cgroup_storage_fd); + return ret; +} + +void test_cgroup_attach_multi(void) +{ + __u32 prog_ids[4], prog_cnt = 0, attach_flags, saved_prog_id; + int cg1 = 0, cg2 = 0, cg3 = 0, cg4 = 0, cg5 = 0, key = 0; + DECLARE_LIBBPF_OPTS(bpf_prog_attach_opts, attach_opts); + int allow_prog[7] = {-1}; + unsigned long long value; + __u32 duration = 0; + int i = 0; + + for (i = 0; i < ARRAY_SIZE(allow_prog); i++) { + allow_prog[i] = prog_load_cnt(1, 1 << i); + if (CHECK(allow_prog[i] < 0, "prog_load", + "verifier output:\n%s\n-------\n", bpf_log_buf)) + goto err; + } + + if (CHECK_FAIL(setup_cgroup_environment())) + goto err; + + cg1 = create_and_get_cgroup("/cg1"); + if (CHECK_FAIL(cg1 < 0)) + goto err; + cg2 = create_and_get_cgroup("/cg1/cg2"); + if (CHECK_FAIL(cg2 < 0)) + goto err; + cg3 = create_and_get_cgroup("/cg1/cg2/cg3"); + if (CHECK_FAIL(cg3 < 0)) + goto err; + cg4 = create_and_get_cgroup("/cg1/cg2/cg3/cg4"); + if (CHECK_FAIL(cg4 < 0)) + goto err; + cg5 = create_and_get_cgroup("/cg1/cg2/cg3/cg4/cg5"); + if (CHECK_FAIL(cg5 < 0)) + goto err; + + if (CHECK_FAIL(join_cgroup("/cg1/cg2/cg3/cg4/cg5"))) + goto err; + + if (CHECK(bpf_prog_attach(allow_prog[0], cg1, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_MULTI), + "prog0_attach_to_cg1_multi", "errno=%d\n", errno)) + goto err; + + if (CHECK(!bpf_prog_attach(allow_prog[0], cg1, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_MULTI), + "fail_same_prog_attach_to_cg1", "unexpected success\n")) + goto err; + + if (CHECK(bpf_prog_attach(allow_prog[1], cg1, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_MULTI), + "prog1_attach_to_cg1_multi", "errno=%d\n", errno)) + goto err; + + if (CHECK(bpf_prog_attach(allow_prog[2], cg2, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_OVERRIDE), + "prog2_attach_to_cg2_override", "errno=%d\n", errno)) + goto err; + + if (CHECK(bpf_prog_attach(allow_prog[3], cg3, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_MULTI), + "prog3_attach_to_cg3_multi", "errno=%d\n", errno)) + goto err; + + if (CHECK(bpf_prog_attach(allow_prog[4], cg4, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_OVERRIDE), + "prog4_attach_to_cg4_override", "errno=%d\n", errno)) + goto err; + + if (CHECK(bpf_prog_attach(allow_prog[5], cg5, BPF_CGROUP_INET_EGRESS, 0), + "prog5_attach_to_cg5_none", "errno=%d\n", errno)) + goto err; + + CHECK_FAIL(system(PING_CMD)); + CHECK_FAIL(bpf_map_lookup_elem(map_fd, &key, &value)); + CHECK_FAIL(value != 1 + 2 + 8 + 32); + + /* query the number of effective progs in cg5 */ + CHECK_FAIL(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, + BPF_F_QUERY_EFFECTIVE, NULL, NULL, &prog_cnt)); + CHECK_FAIL(prog_cnt != 4); + /* retrieve prog_ids of effective progs in cg5 */ + CHECK_FAIL(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, + BPF_F_QUERY_EFFECTIVE, &attach_flags, + prog_ids, &prog_cnt)); + CHECK_FAIL(prog_cnt != 4); + CHECK_FAIL(attach_flags != 0); + saved_prog_id = prog_ids[0]; + /* check enospc handling */ + prog_ids[0] = 0; + prog_cnt = 2; + CHECK_FAIL(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, + BPF_F_QUERY_EFFECTIVE, &attach_flags, + prog_ids, &prog_cnt) != -1); + CHECK_FAIL(errno != ENOSPC); + CHECK_FAIL(prog_cnt != 4); + /* check that prog_ids are returned even when buffer is too small */ + CHECK_FAIL(prog_ids[0] != saved_prog_id); + /* retrieve prog_id of single attached prog in cg5 */ + prog_ids[0] = 0; + CHECK_FAIL(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, 0, NULL, + prog_ids, &prog_cnt)); + CHECK_FAIL(prog_cnt != 1); + CHECK_FAIL(prog_ids[0] != saved_prog_id); + + /* detach bottom program and ping again */ + if (CHECK(bpf_prog_detach2(-1, cg5, BPF_CGROUP_INET_EGRESS), + "prog_detach_from_cg5", "errno=%d\n", errno)) + goto err; + + value = 0; + CHECK_FAIL(bpf_map_update_elem(map_fd, &key, &value, 0)); + CHECK_FAIL(system(PING_CMD)); + CHECK_FAIL(bpf_map_lookup_elem(map_fd, &key, &value)); + CHECK_FAIL(value != 1 + 2 + 8 + 16); + + /* test replace */ + + attach_opts.flags = BPF_F_ALLOW_OVERRIDE | BPF_F_REPLACE; + attach_opts.replace_prog_fd = allow_prog[0]; + if (CHECK(!bpf_prog_attach_xattr(allow_prog[6], cg1, + BPF_CGROUP_INET_EGRESS, &attach_opts), + "fail_prog_replace_override", "unexpected success\n")) + goto err; + CHECK_FAIL(errno != EINVAL); + + attach_opts.flags = BPF_F_REPLACE; + if (CHECK(!bpf_prog_attach_xattr(allow_prog[6], cg1, + BPF_CGROUP_INET_EGRESS, &attach_opts), + "fail_prog_replace_no_multi", "unexpected success\n")) + goto err; + CHECK_FAIL(errno != EINVAL); + + attach_opts.flags = BPF_F_ALLOW_MULTI | BPF_F_REPLACE; + attach_opts.replace_prog_fd = -1; + if (CHECK(!bpf_prog_attach_xattr(allow_prog[6], cg1, + BPF_CGROUP_INET_EGRESS, &attach_opts), + "fail_prog_replace_bad_fd", "unexpected success\n")) + goto err; + CHECK_FAIL(errno != EBADF); + + /* replacing a program that is not attached to cgroup should fail */ + attach_opts.replace_prog_fd = allow_prog[3]; + if (CHECK(!bpf_prog_attach_xattr(allow_prog[6], cg1, + BPF_CGROUP_INET_EGRESS, &attach_opts), + "fail_prog_replace_no_ent", "unexpected success\n")) + goto err; + CHECK_FAIL(errno != ENOENT); + + /* replace 1st from the top program */ + attach_opts.replace_prog_fd = allow_prog[0]; + if (CHECK(bpf_prog_attach_xattr(allow_prog[6], cg1, + BPF_CGROUP_INET_EGRESS, &attach_opts), + "prog_replace", "errno=%d\n", errno)) + goto err; + + value = 0; + CHECK_FAIL(bpf_map_update_elem(map_fd, &key, &value, 0)); + CHECK_FAIL(system(PING_CMD)); + CHECK_FAIL(bpf_map_lookup_elem(map_fd, &key, &value)); + CHECK_FAIL(value != 64 + 2 + 8 + 16); + + /* detach 3rd from bottom program and ping again */ + if (CHECK(!bpf_prog_detach2(0, cg3, BPF_CGROUP_INET_EGRESS), + "fail_prog_detach_from_cg3", "unexpected success\n")) + goto err; + + if (CHECK(bpf_prog_detach2(allow_prog[3], cg3, BPF_CGROUP_INET_EGRESS), + "prog3_detach_from_cg3", "errno=%d\n", errno)) + goto err; + + value = 0; + CHECK_FAIL(bpf_map_update_elem(map_fd, &key, &value, 0)); + CHECK_FAIL(system(PING_CMD)); + CHECK_FAIL(bpf_map_lookup_elem(map_fd, &key, &value)); + CHECK_FAIL(value != 64 + 2 + 16); + + /* detach 2nd from bottom program and ping again */ + if (CHECK(bpf_prog_detach2(-1, cg4, BPF_CGROUP_INET_EGRESS), + "prog_detach_from_cg4", "errno=%d\n", errno)) + goto err; + + value = 0; + CHECK_FAIL(bpf_map_update_elem(map_fd, &key, &value, 0)); + CHECK_FAIL(system(PING_CMD)); + CHECK_FAIL(bpf_map_lookup_elem(map_fd, &key, &value)); + CHECK_FAIL(value != 64 + 2 + 4); + + prog_cnt = 4; + CHECK_FAIL(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, + BPF_F_QUERY_EFFECTIVE, &attach_flags, + prog_ids, &prog_cnt)); + CHECK_FAIL(prog_cnt != 3); + CHECK_FAIL(attach_flags != 0); + CHECK_FAIL(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, 0, NULL, + prog_ids, &prog_cnt)); + CHECK_FAIL(prog_cnt != 0); + +err: + for (i = 0; i < ARRAY_SIZE(allow_prog); i++) + if (allow_prog[i] >= 0) + close(allow_prog[i]); + close(cg1); + close(cg2); + close(cg3); + close(cg4); + close(cg5); + cleanup_cgroup_environment(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c new file mode 100644 index 000000000000..9d8cb48b99de --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <test_progs.h> + +#include "cgroup_helpers.h" + +#define FOO "/foo" +#define BAR "/foo/bar/" +#define PING_CMD "ping -q -c1 -w1 127.0.0.1 > /dev/null" + +char bpf_log_buf[BPF_LOG_BUF_SIZE]; + +static int prog_load(int verdict) +{ + struct bpf_insn prog[] = { + BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */ + BPF_EXIT_INSN(), + }; + size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn); + + return bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB, + prog, insns_cnt, "GPL", 0, + bpf_log_buf, BPF_LOG_BUF_SIZE); +} + +void test_cgroup_attach_override(void) +{ + int drop_prog = -1, allow_prog = -1, foo = -1, bar = -1; + __u32 duration = 0; + + allow_prog = prog_load(1); + if (CHECK(allow_prog < 0, "prog_load_allow", + "verifier output:\n%s\n-------\n", bpf_log_buf)) + goto err; + + drop_prog = prog_load(0); + if (CHECK(drop_prog < 0, "prog_load_drop", + "verifier output:\n%s\n-------\n", bpf_log_buf)) + goto err; + + foo = test__join_cgroup(FOO); + if (CHECK(foo < 0, "cgroup_join_foo", "cgroup setup failed\n")) + goto err; + + if (CHECK(bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_OVERRIDE), + "prog_attach_drop_foo_override", + "attach prog to %s failed, errno=%d\n", FOO, errno)) + goto err; + + if (CHECK(!system(PING_CMD), "ping_fail", + "ping unexpectedly succeeded\n")) + goto err; + + bar = test__join_cgroup(BAR); + if (CHECK(bar < 0, "cgroup_join_bar", "cgroup setup failed\n")) + goto err; + + if (CHECK(!system(PING_CMD), "ping_fail", + "ping unexpectedly succeeded\n")) + goto err; + + if (CHECK(bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_OVERRIDE), + "prog_attach_allow_bar_override", + "attach prog to %s failed, errno=%d\n", BAR, errno)) + goto err; + + if (CHECK(system(PING_CMD), "ping_ok", "ping failed\n")) + goto err; + + if (CHECK(bpf_prog_detach(bar, BPF_CGROUP_INET_EGRESS), + "prog_detach_bar", + "detach prog from %s failed, errno=%d\n", BAR, errno)) + goto err; + + if (CHECK(!system(PING_CMD), "ping_fail", + "ping unexpectedly succeeded\n")) + goto err; + + if (CHECK(bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_OVERRIDE), + "prog_attach_allow_bar_override", + "attach prog to %s failed, errno=%d\n", BAR, errno)) + goto err; + + if (CHECK(bpf_prog_detach(foo, BPF_CGROUP_INET_EGRESS), + "prog_detach_foo", + "detach prog from %s failed, errno=%d\n", FOO, errno)) + goto err; + + if (CHECK(system(PING_CMD), "ping_ok", "ping failed\n")) + goto err; + + if (CHECK(bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_OVERRIDE), + "prog_attach_allow_bar_override", + "attach prog to %s failed, errno=%d\n", BAR, errno)) + goto err; + + if (CHECK(!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 0), + "fail_prog_attach_allow_bar_none", + "attach prog to %s unexpectedly succeeded\n", BAR)) + goto err; + + if (CHECK(bpf_prog_detach(bar, BPF_CGROUP_INET_EGRESS), + "prog_detach_bar", + "detach prog from %s failed, errno=%d\n", BAR, errno)) + goto err; + + if (CHECK(!bpf_prog_detach(foo, BPF_CGROUP_INET_EGRESS), + "fail_prog_detach_foo", + "double detach from %s unexpectedly succeeded\n", FOO)) + goto err; + + if (CHECK(bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS, 0), + "prog_attach_allow_foo_none", + "attach prog to %s failed, errno=%d\n", FOO, errno)) + goto err; + + if (CHECK(!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 0), + "fail_prog_attach_allow_bar_none", + "attach prog to %s unexpectedly succeeded\n", BAR)) + goto err; + + if (CHECK(!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_OVERRIDE), + "fail_prog_attach_allow_bar_override", + "attach prog to %s unexpectedly succeeded\n", BAR)) + goto err; + + if (CHECK(!bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_OVERRIDE), + "fail_prog_attach_allow_foo_override", + "attach prog to %s unexpectedly succeeded\n", FOO)) + goto err; + + if (CHECK(bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS, 0), + "prog_attach_drop_foo_none", + "attach prog to %s failed, errno=%d\n", FOO, errno)) + goto err; + +err: + close(foo); + close(bar); + close(allow_prog); + close(drop_prog); +} diff --git a/tools/testing/selftests/bpf/prog_tests/core_extern.c b/tools/testing/selftests/bpf/prog_tests/core_extern.c new file mode 100644 index 000000000000..b093787e9448 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/core_extern.c @@ -0,0 +1,169 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ + +#include <test_progs.h> +#include <sys/mman.h> +#include <sys/utsname.h> +#include <linux/version.h> +#include "test_core_extern.skel.h" + +static uint32_t get_kernel_version(void) +{ + uint32_t major, minor, patch; + struct utsname info; + + uname(&info); + if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3) + return 0; + return KERNEL_VERSION(major, minor, patch); +} + +#define CFG "CONFIG_BPF_SYSCALL=n\n" + +static struct test_case { + const char *name; + const char *cfg; + bool fails; + struct test_core_extern__data data; +} test_cases[] = { + { .name = "default search path", .data = { .bpf_syscall = true } }, + { + .name = "custom values", + .cfg = "CONFIG_BPF_SYSCALL=n\n" + "CONFIG_TRISTATE=m\n" + "CONFIG_BOOL=y\n" + "CONFIG_CHAR=100\n" + "CONFIG_USHORT=30000\n" + "CONFIG_INT=123456\n" + "CONFIG_ULONG=0xDEADBEEFC0DE\n" + "CONFIG_STR=\"abracad\"\n" + "CONFIG_MISSING=0", + .data = { + .bpf_syscall = false, + .tristate_val = TRI_MODULE, + .bool_val = true, + .char_val = 100, + .ushort_val = 30000, + .int_val = 123456, + .ulong_val = 0xDEADBEEFC0DE, + .str_val = "abracad", + }, + }, + /* TRISTATE */ + { .name = "tristate (y)", .cfg = CFG"CONFIG_TRISTATE=y\n", + .data = { .tristate_val = TRI_YES } }, + { .name = "tristate (n)", .cfg = CFG"CONFIG_TRISTATE=n\n", + .data = { .tristate_val = TRI_NO } }, + { .name = "tristate (m)", .cfg = CFG"CONFIG_TRISTATE=m\n", + .data = { .tristate_val = TRI_MODULE } }, + { .name = "tristate (int)", .fails = 1, .cfg = CFG"CONFIG_TRISTATE=1" }, + { .name = "tristate (bad)", .fails = 1, .cfg = CFG"CONFIG_TRISTATE=M" }, + /* BOOL */ + { .name = "bool (y)", .cfg = CFG"CONFIG_BOOL=y\n", + .data = { .bool_val = true } }, + { .name = "bool (n)", .cfg = CFG"CONFIG_BOOL=n\n", + .data = { .bool_val = false } }, + { .name = "bool (tristate)", .fails = 1, .cfg = CFG"CONFIG_BOOL=m" }, + { .name = "bool (int)", .fails = 1, .cfg = CFG"CONFIG_BOOL=1" }, + /* CHAR */ + { .name = "char (tristate)", .cfg = CFG"CONFIG_CHAR=m\n", + .data = { .char_val = 'm' } }, + { .name = "char (bad)", .fails = 1, .cfg = CFG"CONFIG_CHAR=q\n" }, + { .name = "char (empty)", .fails = 1, .cfg = CFG"CONFIG_CHAR=\n" }, + { .name = "char (str)", .fails = 1, .cfg = CFG"CONFIG_CHAR=\"y\"\n" }, + /* STRING */ + { .name = "str (empty)", .cfg = CFG"CONFIG_STR=\"\"\n", + .data = { .str_val = "\0\0\0\0\0\0\0" } }, + { .name = "str (padded)", .cfg = CFG"CONFIG_STR=\"abra\"\n", + .data = { .str_val = "abra\0\0\0" } }, + { .name = "str (too long)", .cfg = CFG"CONFIG_STR=\"abracada\"\n", + .data = { .str_val = "abracad" } }, + { .name = "str (no value)", .fails = 1, .cfg = CFG"CONFIG_STR=\n" }, + { .name = "str (bad value)", .fails = 1, .cfg = CFG"CONFIG_STR=bla\n" }, + /* INTEGERS */ + { + .name = "integer forms", + .cfg = CFG + "CONFIG_CHAR=0xA\n" + "CONFIG_USHORT=0462\n" + "CONFIG_INT=-100\n" + "CONFIG_ULONG=+1000000000000", + .data = { + .char_val = 0xA, + .ushort_val = 0462, + .int_val = -100, + .ulong_val = 1000000000000, + }, + }, + { .name = "int (bad)", .fails = 1, .cfg = CFG"CONFIG_INT=abc" }, + { .name = "int (str)", .fails = 1, .cfg = CFG"CONFIG_INT=\"abc\"" }, + { .name = "int (empty)", .fails = 1, .cfg = CFG"CONFIG_INT=" }, + { .name = "int (mixed)", .fails = 1, .cfg = CFG"CONFIG_INT=123abc" }, + { .name = "int (max)", .cfg = CFG"CONFIG_INT=2147483647", + .data = { .int_val = 2147483647 } }, + { .name = "int (min)", .cfg = CFG"CONFIG_INT=-2147483648", + .data = { .int_val = -2147483648 } }, + { .name = "int (max+1)", .fails = 1, .cfg = CFG"CONFIG_INT=2147483648" }, + { .name = "int (min-1)", .fails = 1, .cfg = CFG"CONFIG_INT=-2147483649" }, + { .name = "ushort (max)", .cfg = CFG"CONFIG_USHORT=65535", + .data = { .ushort_val = 65535 } }, + { .name = "ushort (min)", .cfg = CFG"CONFIG_USHORT=0", + .data = { .ushort_val = 0 } }, + { .name = "ushort (max+1)", .fails = 1, .cfg = CFG"CONFIG_USHORT=65536" }, + { .name = "ushort (min-1)", .fails = 1, .cfg = CFG"CONFIG_USHORT=-1" }, + { .name = "u64 (max)", .cfg = CFG"CONFIG_ULONG=0xffffffffffffffff", + .data = { .ulong_val = 0xffffffffffffffff } }, + { .name = "u64 (min)", .cfg = CFG"CONFIG_ULONG=0", + .data = { .ulong_val = 0 } }, + { .name = "u64 (max+1)", .fails = 1, .cfg = CFG"CONFIG_ULONG=0x10000000000000000" }, +}; + +void test_core_extern(void) +{ + const uint32_t kern_ver = get_kernel_version(); + int err, duration = 0, i, j; + struct test_core_extern *skel = NULL; + uint64_t *got, *exp; + int n = sizeof(*skel->data) / sizeof(uint64_t); + + for (i = 0; i < ARRAY_SIZE(test_cases); i++) { + struct test_case *t = &test_cases[i]; + DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, + .kconfig = t->cfg, + ); + + if (!test__start_subtest(t->name)) + continue; + + skel = test_core_extern__open_opts(&opts); + if (CHECK(!skel, "skel_open", "skeleton open failed\n")) + goto cleanup; + err = test_core_extern__load(skel); + if (t->fails) { + CHECK(!err, "skel_load", + "shouldn't succeed open/load of skeleton\n"); + goto cleanup; + } else if (CHECK(err, "skel_load", + "failed to open/load skeleton\n")) { + goto cleanup; + } + err = test_core_extern__attach(skel); + if (CHECK(err, "attach_raw_tp", "failed attach: %d\n", err)) + goto cleanup; + + usleep(1); + + t->data.kern_ver = kern_ver; + t->data.missing_val = 0xDEADC0DE; + got = (uint64_t *)skel->data; + exp = (uint64_t *)&t->data; + for (j = 0; j < n; j++) { + CHECK(got[j] != exp[j], "check_res", + "result #%d: expected %lx, but got %lx\n", + j, exp[j], got[j]); + } +cleanup: + test_core_extern__destroy(skel); + skel = NULL; + } +} diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c index 05fe85281ff7..31e177adbdf1 100644 --- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c +++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c @@ -74,6 +74,7 @@ .b123 = 2, \ .c1c = 3, \ .d00d = 4, \ + .f10c = 0, \ }, \ .output_len = sizeof(struct core_reloc_arrays_output) \ } @@ -308,12 +309,15 @@ static struct core_reloc_test_case test_cases[] = { ARRAYS_CASE(arrays), ARRAYS_CASE(arrays___diff_arr_dim), ARRAYS_CASE(arrays___diff_arr_val_sz), + ARRAYS_CASE(arrays___equiv_zero_sz_arr), + ARRAYS_CASE(arrays___fixed_arr), ARRAYS_ERR_CASE(arrays___err_too_small), ARRAYS_ERR_CASE(arrays___err_too_shallow), ARRAYS_ERR_CASE(arrays___err_non_array), ARRAYS_ERR_CASE(arrays___err_wrong_val_type1), ARRAYS_ERR_CASE(arrays___err_wrong_val_type2), + ARRAYS_ERR_CASE(arrays___err_bad_zero_sz_arr), /* enum/ptr/int handling scenarios */ PRIMITIVES_CASE(primitives), diff --git a/tools/testing/selftests/bpf/prog_tests/cpu_mask.c b/tools/testing/selftests/bpf/prog_tests/cpu_mask.c new file mode 100644 index 000000000000..f7c7e25232be --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cpu_mask.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <bpf/btf.h> +#include "bpf/libbpf_internal.h" + +static int duration = 0; + +static void validate_mask(int case_nr, const char *exp, bool *mask, int n) +{ + int i; + + for (i = 0; exp[i]; i++) { + if (exp[i] == '1') { + if (CHECK(i + 1 > n, "mask_short", + "case #%d: mask too short, got n=%d, need at least %d\n", + case_nr, n, i + 1)) + return; + CHECK(!mask[i], "cpu_not_set", + "case #%d: mask differs, expected cpu#%d SET\n", + case_nr, i); + } else { + CHECK(i < n && mask[i], "cpu_set", + "case #%d: mask differs, expected cpu#%d UNSET\n", + case_nr, i); + } + } + CHECK(i < n, "mask_long", + "case #%d: mask too long, got n=%d, expected at most %d\n", + case_nr, n, i); +} + +static struct { + const char *cpu_mask; + const char *expect; + bool fails; +} test_cases[] = { + { "0\n", "1", false }, + { "0,2\n", "101", false }, + { "0-2\n", "111", false }, + { "0-2,3-4\n", "11111", false }, + { "0", "1", false }, + { "0-2", "111", false }, + { "0,2", "101", false }, + { "0,1-3", "1111", false }, + { "0,1,2,3", "1111", false }, + { "0,2-3,5", "101101", false }, + { "3-3", "0001", false }, + { "2-4,6,9-10", "00111010011", false }, + /* failure cases */ + { "", "", true }, + { "0-", "", true }, + { "0 ", "", true }, + { "0_1", "", true }, + { "1-0", "", true }, + { "-1", "", true }, +}; + +void test_cpu_mask() +{ + int i, err, n; + bool *mask; + + for (i = 0; i < ARRAY_SIZE(test_cases); i++) { + mask = NULL; + err = parse_cpu_mask_str(test_cases[i].cpu_mask, &mask, &n); + if (test_cases[i].fails) { + CHECK(!err, "should_fail", + "case #%d: parsing should fail!\n", i + 1); + } else { + if (CHECK(err, "parse_err", + "case #%d: cpu mask parsing failed: %d\n", + i + 1, err)) + continue; + validate_mask(i + 1, test_cases[i].expect, mask, n); + } + free(mask); + } +} diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c b/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c index 40bcff2cc274..235ac4f67f5b 100644 --- a/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c +++ b/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c @@ -1,90 +1,55 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2019 Facebook */ #include <test_progs.h> +#include "test_pkt_access.skel.h" +#include "fentry_test.skel.h" +#include "fexit_test.skel.h" void test_fentry_fexit(void) { - struct bpf_prog_load_attr attr_fentry = { - .file = "./fentry_test.o", - }; - struct bpf_prog_load_attr attr_fexit = { - .file = "./fexit_test.o", - }; - - struct bpf_object *obj_fentry = NULL, *obj_fexit = NULL, *pkt_obj; - struct bpf_map *data_map_fentry, *data_map_fexit; - char fentry_name[] = "fentry/bpf_fentry_testX"; - char fexit_name[] = "fexit/bpf_fentry_testX"; - int err, pkt_fd, kfree_skb_fd, i; - struct bpf_link *link[12] = {}; - struct bpf_program *prog[12]; - __u32 duration, retval; - const int zero = 0; - u64 result[12]; - - err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS, - &pkt_obj, &pkt_fd); - if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno)) + struct test_pkt_access *pkt_skel = NULL; + struct fentry_test *fentry_skel = NULL; + struct fexit_test *fexit_skel = NULL; + __u64 *fentry_res, *fexit_res; + __u32 duration = 0, retval; + int err, pkt_fd, i; + + pkt_skel = test_pkt_access__open_and_load(); + if (CHECK(!pkt_skel, "pkt_skel_load", "pkt_access skeleton failed\n")) return; - err = bpf_prog_load_xattr(&attr_fentry, &obj_fentry, &kfree_skb_fd); - if (CHECK(err, "prog_load fail", "err %d errno %d\n", err, errno)) + fentry_skel = fentry_test__open_and_load(); + if (CHECK(!fentry_skel, "fentry_skel_load", "fentry skeleton failed\n")) goto close_prog; - err = bpf_prog_load_xattr(&attr_fexit, &obj_fexit, &kfree_skb_fd); - if (CHECK(err, "prog_load fail", "err %d errno %d\n", err, errno)) + fexit_skel = fexit_test__open_and_load(); + if (CHECK(!fexit_skel, "fexit_skel_load", "fexit skeleton failed\n")) goto close_prog; - for (i = 0; i < 6; i++) { - fentry_name[sizeof(fentry_name) - 2] = '1' + i; - prog[i] = bpf_object__find_program_by_title(obj_fentry, fentry_name); - if (CHECK(!prog[i], "find_prog", "prog %s not found\n", fentry_name)) - goto close_prog; - link[i] = bpf_program__attach_trace(prog[i]); - if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n")) - goto close_prog; - } - data_map_fentry = bpf_object__find_map_by_name(obj_fentry, "fentry_t.bss"); - if (CHECK(!data_map_fentry, "find_data_map", "data map not found\n")) + err = fentry_test__attach(fentry_skel); + if (CHECK(err, "fentry_attach", "fentry attach failed: %d\n", err)) goto close_prog; - - for (i = 6; i < 12; i++) { - fexit_name[sizeof(fexit_name) - 2] = '1' + i - 6; - prog[i] = bpf_object__find_program_by_title(obj_fexit, fexit_name); - if (CHECK(!prog[i], "find_prog", "prog %s not found\n", fexit_name)) - goto close_prog; - link[i] = bpf_program__attach_trace(prog[i]); - if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n")) - goto close_prog; - } - data_map_fexit = bpf_object__find_map_by_name(obj_fexit, "fexit_te.bss"); - if (CHECK(!data_map_fexit, "find_data_map", "data map not found\n")) + err = fexit_test__attach(fexit_skel); + if (CHECK(err, "fexit_attach", "fexit attach failed: %d\n", err)) goto close_prog; + pkt_fd = bpf_program__fd(pkt_skel->progs.test_pkt_access); err = bpf_prog_test_run(pkt_fd, 1, &pkt_v6, sizeof(pkt_v6), NULL, NULL, &retval, &duration); CHECK(err || retval, "ipv6", "err %d errno %d retval %d duration %d\n", err, errno, retval, duration); - err = bpf_map_lookup_elem(bpf_map__fd(data_map_fentry), &zero, &result); - if (CHECK(err, "get_result", - "failed to get output data: %d\n", err)) - goto close_prog; - - err = bpf_map_lookup_elem(bpf_map__fd(data_map_fexit), &zero, result + 6); - if (CHECK(err, "get_result", - "failed to get output data: %d\n", err)) - goto close_prog; - - for (i = 0; i < 12; i++) - if (CHECK(result[i] != 1, "result", "bpf_fentry_test%d failed err %ld\n", - i % 6 + 1, result[i])) - goto close_prog; + fentry_res = (__u64 *)fentry_skel->bss; + fexit_res = (__u64 *)fexit_skel->bss; + printf("%lld\n", fentry_skel->bss->test1_result); + for (i = 0; i < 6; i++) { + CHECK(fentry_res[i] != 1, "result", + "fentry_test%d failed err %lld\n", i + 1, fentry_res[i]); + CHECK(fexit_res[i] != 1, "result", + "fexit_test%d failed err %lld\n", i + 1, fexit_res[i]); + } close_prog: - for (i = 0; i < 12; i++) - if (!IS_ERR_OR_NULL(link[i])) - bpf_link__destroy(link[i]); - bpf_object__close(obj_fentry); - bpf_object__close(obj_fexit); - bpf_object__close(pkt_obj); + test_pkt_access__destroy(pkt_skel); + fentry_test__destroy(fentry_skel); + fexit_test__destroy(fexit_skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_test.c b/tools/testing/selftests/bpf/prog_tests/fentry_test.c index 9fb103193878..5cc06021f27d 100644 --- a/tools/testing/selftests/bpf/prog_tests/fentry_test.c +++ b/tools/testing/selftests/bpf/prog_tests/fentry_test.c @@ -1,64 +1,43 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2019 Facebook */ #include <test_progs.h> +#include "test_pkt_access.skel.h" +#include "fentry_test.skel.h" void test_fentry_test(void) { - struct bpf_prog_load_attr attr = { - .file = "./fentry_test.o", - }; - - char prog_name[] = "fentry/bpf_fentry_testX"; - struct bpf_object *obj = NULL, *pkt_obj; - int err, pkt_fd, kfree_skb_fd, i; - struct bpf_link *link[6] = {}; - struct bpf_program *prog[6]; - __u32 duration, retval; - struct bpf_map *data_map; - const int zero = 0; - u64 result[6]; - - err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS, - &pkt_obj, &pkt_fd); - if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno)) + struct test_pkt_access *pkt_skel = NULL; + struct fentry_test *fentry_skel = NULL; + int err, pkt_fd, i; + __u32 duration = 0, retval; + __u64 *result; + + pkt_skel = test_pkt_access__open_and_load(); + if (CHECK(!pkt_skel, "pkt_skel_load", "pkt_access skeleton failed\n")) return; - err = bpf_prog_load_xattr(&attr, &obj, &kfree_skb_fd); - if (CHECK(err, "prog_load fail", "err %d errno %d\n", err, errno)) - goto close_prog; + fentry_skel = fentry_test__open_and_load(); + if (CHECK(!fentry_skel, "fentry_skel_load", "fentry skeleton failed\n")) + goto cleanup; - for (i = 0; i < 6; i++) { - prog_name[sizeof(prog_name) - 2] = '1' + i; - prog[i] = bpf_object__find_program_by_title(obj, prog_name); - if (CHECK(!prog[i], "find_prog", "prog %s not found\n", prog_name)) - goto close_prog; - link[i] = bpf_program__attach_trace(prog[i]); - if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n")) - goto close_prog; - } - data_map = bpf_object__find_map_by_name(obj, "fentry_t.bss"); - if (CHECK(!data_map, "find_data_map", "data map not found\n")) - goto close_prog; + err = fentry_test__attach(fentry_skel); + if (CHECK(err, "fentry_attach", "fentry attach failed: %d\n", err)) + goto cleanup; + pkt_fd = bpf_program__fd(pkt_skel->progs.test_pkt_access); err = bpf_prog_test_run(pkt_fd, 1, &pkt_v6, sizeof(pkt_v6), NULL, NULL, &retval, &duration); CHECK(err || retval, "ipv6", "err %d errno %d retval %d duration %d\n", err, errno, retval, duration); - err = bpf_map_lookup_elem(bpf_map__fd(data_map), &zero, &result); - if (CHECK(err, "get_result", - "failed to get output data: %d\n", err)) - goto close_prog; - - for (i = 0; i < 6; i++) - if (CHECK(result[i] != 1, "result", "bpf_fentry_test%d failed err %ld\n", - i + 1, result[i])) - goto close_prog; + result = (__u64 *)fentry_skel->bss; + for (i = 0; i < 6; i++) { + if (CHECK(result[i] != 1, "result", + "fentry_test%d failed err %lld\n", i + 1, result[i])) + goto cleanup; + } -close_prog: - for (i = 0; i < 6; i++) - if (!IS_ERR_OR_NULL(link[i])) - bpf_link__destroy(link[i]); - bpf_object__close(obj); - bpf_object__close(pkt_obj); +cleanup: + fentry_test__destroy(fentry_skel); + test_pkt_access__destroy(pkt_skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c index b426bf2f97e4..cde463af7071 100644 --- a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c +++ b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c @@ -11,7 +11,7 @@ static void test_fexit_bpf2bpf_common(const char *obj_file, int err, pkt_fd, i; struct bpf_link **link = NULL; struct bpf_program **prog = NULL; - __u32 duration, retval; + __u32 duration = 0, retval; struct bpf_map *data_map; const int zero = 0; u64 *result = NULL; @@ -26,7 +26,7 @@ static void test_fexit_bpf2bpf_common(const char *obj_file, link = calloc(sizeof(struct bpf_link *), prog_cnt); prog = calloc(sizeof(struct bpf_program *), prog_cnt); - result = malloc(prog_cnt * sizeof(u64)); + result = malloc((prog_cnt + 32 /* spare */) * sizeof(u64)); if (CHECK(!link || !prog || !result, "alloc_memory", "failed to alloc memory")) goto close_prog; @@ -98,6 +98,24 @@ static void test_target_yes_callees(void) "fexit/test_pkt_access", "fexit/test_pkt_access_subprog1", "fexit/test_pkt_access_subprog2", + "fexit/test_pkt_access_subprog3", + }; + test_fexit_bpf2bpf_common("./fexit_bpf2bpf.o", + "./test_pkt_access.o", + ARRAY_SIZE(prog_name), + prog_name); +} + +static void test_func_replace(void) +{ + const char *prog_name[] = { + "fexit/test_pkt_access", + "fexit/test_pkt_access_subprog1", + "fexit/test_pkt_access_subprog2", + "fexit/test_pkt_access_subprog3", + "freplace/get_skb_len", + "freplace/get_skb_ifindex", + "freplace/get_constant", }; test_fexit_bpf2bpf_common("./fexit_bpf2bpf.o", "./test_pkt_access.o", @@ -109,4 +127,5 @@ void test_fexit_bpf2bpf(void) { test_target_no_callees(); test_target_yes_callees(); + test_func_replace(); } diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_test.c b/tools/testing/selftests/bpf/prog_tests/fexit_test.c index f99013222c74..d2c3655dd7a3 100644 --- a/tools/testing/selftests/bpf/prog_tests/fexit_test.c +++ b/tools/testing/selftests/bpf/prog_tests/fexit_test.c @@ -13,7 +13,7 @@ void test_fexit_test(void) int err, pkt_fd, kfree_skb_fd, i; struct bpf_link *link[6] = {}; struct bpf_program *prog[6]; - __u32 duration, retval; + __u32 duration = 0, retval; struct bpf_map *data_map; const int zero = 0; u64 result[6]; diff --git a/tools/testing/selftests/bpf/prog_tests/mmap.c b/tools/testing/selftests/bpf/prog_tests/mmap.c index 051a6d48762c..16a814eb4d64 100644 --- a/tools/testing/selftests/bpf/prog_tests/mmap.c +++ b/tools/testing/selftests/bpf/prog_tests/mmap.c @@ -1,16 +1,12 @@ // SPDX-License-Identifier: GPL-2.0 #include <test_progs.h> #include <sys/mman.h> +#include "test_mmap.skel.h" struct map_data { __u64 val[512 * 4]; }; -struct bss_data { - __u64 in_val; - __u64 out_val; -}; - static size_t roundup_page(size_t sz) { long page_size = sysconf(_SC_PAGE_SIZE); @@ -19,41 +15,25 @@ static size_t roundup_page(size_t sz) void test_mmap(void) { - const char *file = "test_mmap.o"; - const char *probe_name = "raw_tracepoint/sys_enter"; - const char *tp_name = "sys_enter"; - const size_t bss_sz = roundup_page(sizeof(struct bss_data)); + const size_t bss_sz = roundup_page(sizeof(struct test_mmap__bss)); const size_t map_sz = roundup_page(sizeof(struct map_data)); const int zero = 0, one = 1, two = 2, far = 1500; const long page_size = sysconf(_SC_PAGE_SIZE); int err, duration = 0, i, data_map_fd; - struct bpf_program *prog; - struct bpf_object *obj; - struct bpf_link *link = NULL; struct bpf_map *data_map, *bss_map; void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp1, *tmp2; - volatile struct bss_data *bss_data; - volatile struct map_data *map_data; + struct test_mmap__bss *bss_data; + struct map_data *map_data; + struct test_mmap *skel; __u64 val = 0; - obj = bpf_object__open_file("test_mmap.o", NULL); - if (CHECK(IS_ERR(obj), "obj_open", "failed to open '%s': %ld\n", - file, PTR_ERR(obj))) + + skel = test_mmap__open_and_load(); + if (CHECK(!skel, "skel_open_and_load", "skeleton open/load failed\n")) return; - prog = bpf_object__find_program_by_title(obj, probe_name); - if (CHECK(!prog, "find_probe", "prog '%s' not found\n", probe_name)) - goto cleanup; - err = bpf_object__load(obj); - if (CHECK(err, "obj_load", "failed to load prog '%s': %d\n", - probe_name, err)) - goto cleanup; - bss_map = bpf_object__find_map_by_name(obj, "test_mma.bss"); - if (CHECK(!bss_map, "find_bss_map", ".bss map not found\n")) - goto cleanup; - data_map = bpf_object__find_map_by_name(obj, "data_map"); - if (CHECK(!data_map, "find_data_map", "data_map map not found\n")) - goto cleanup; + bss_map = skel->maps.bss; + data_map = skel->maps.data_map; data_map_fd = bpf_map__fd(data_map); bss_mmaped = mmap(NULL, bss_sz, PROT_READ | PROT_WRITE, MAP_SHARED, @@ -77,13 +57,15 @@ void test_mmap(void) CHECK_FAIL(bss_data->in_val); CHECK_FAIL(bss_data->out_val); + CHECK_FAIL(skel->bss->in_val); + CHECK_FAIL(skel->bss->out_val); CHECK_FAIL(map_data->val[0]); CHECK_FAIL(map_data->val[1]); CHECK_FAIL(map_data->val[2]); CHECK_FAIL(map_data->val[far]); - link = bpf_program__attach_raw_tracepoint(prog, tp_name); - if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n", PTR_ERR(link))) + err = test_mmap__attach(skel); + if (CHECK(err, "attach_raw_tp", "err %d\n", err)) goto cleanup; bss_data->in_val = 123; @@ -94,6 +76,8 @@ void test_mmap(void) CHECK_FAIL(bss_data->in_val != 123); CHECK_FAIL(bss_data->out_val != 123); + CHECK_FAIL(skel->bss->in_val != 123); + CHECK_FAIL(skel->bss->out_val != 123); CHECK_FAIL(map_data->val[0] != 111); CHECK_FAIL(map_data->val[1] != 222); CHECK_FAIL(map_data->val[2] != 123); @@ -160,6 +144,8 @@ void test_mmap(void) usleep(1); CHECK_FAIL(bss_data->in_val != 321); CHECK_FAIL(bss_data->out_val != 321); + CHECK_FAIL(skel->bss->in_val != 321); + CHECK_FAIL(skel->bss->out_val != 321); CHECK_FAIL(map_data->val[0] != 111); CHECK_FAIL(map_data->val[1] != 222); CHECK_FAIL(map_data->val[2] != 321); @@ -203,6 +189,8 @@ void test_mmap(void) map_data = tmp2; CHECK_FAIL(bss_data->in_val != 321); CHECK_FAIL(bss_data->out_val != 321); + CHECK_FAIL(skel->bss->in_val != 321); + CHECK_FAIL(skel->bss->out_val != 321); CHECK_FAIL(map_data->val[0] != 111); CHECK_FAIL(map_data->val[1] != 222); CHECK_FAIL(map_data->val[2] != 321); @@ -214,7 +202,5 @@ cleanup: CHECK_FAIL(munmap(bss_mmaped, bss_sz)); if (map_mmaped) CHECK_FAIL(munmap(map_mmaped, map_sz)); - if (!IS_ERR_OR_NULL(link)) - bpf_link__destroy(link); - bpf_object__close(obj); + test_mmap__destroy(skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/perf_buffer.c b/tools/testing/selftests/bpf/prog_tests/perf_buffer.c index 3003fddc0613..1450ea2dd4cc 100644 --- a/tools/testing/selftests/bpf/prog_tests/perf_buffer.c +++ b/tools/testing/selftests/bpf/prog_tests/perf_buffer.c @@ -4,6 +4,7 @@ #include <sched.h> #include <sys/socket.h> #include <test_progs.h> +#include "bpf/libbpf_internal.h" static void on_sample(void *ctx, int cpu, void *data, __u32 size) { @@ -19,7 +20,7 @@ static void on_sample(void *ctx, int cpu, void *data, __u32 size) void test_perf_buffer(void) { - int err, prog_fd, nr_cpus, i, duration = 0; + int err, prog_fd, on_len, nr_on_cpus = 0, nr_cpus, i, duration = 0; const char *prog_name = "kprobe/sys_nanosleep"; const char *file = "./test_perf_buffer.o"; struct perf_buffer_opts pb_opts = {}; @@ -29,15 +30,27 @@ void test_perf_buffer(void) struct bpf_object *obj; struct perf_buffer *pb; struct bpf_link *link; + bool *online; nr_cpus = libbpf_num_possible_cpus(); if (CHECK(nr_cpus < 0, "nr_cpus", "err %d\n", nr_cpus)) return; + err = parse_cpu_mask_file("/sys/devices/system/cpu/online", + &online, &on_len); + if (CHECK(err, "nr_on_cpus", "err %d\n", err)) + return; + + for (i = 0; i < on_len; i++) + if (online[i]) + nr_on_cpus++; + /* load program */ err = bpf_prog_load(file, BPF_PROG_TYPE_KPROBE, &obj, &prog_fd); - if (CHECK(err, "obj_load", "err %d errno %d\n", err, errno)) - return; + if (CHECK(err, "obj_load", "err %d errno %d\n", err, errno)) { + obj = NULL; + goto out_close; + } prog = bpf_object__find_program_by_title(obj, prog_name); if (CHECK(!prog, "find_probe", "prog '%s' not found\n", prog_name)) @@ -64,6 +77,11 @@ void test_perf_buffer(void) /* trigger kprobe on every CPU */ CPU_ZERO(&cpu_seen); for (i = 0; i < nr_cpus; i++) { + if (i >= on_len || !online[i]) { + printf("skipping offline CPU #%d\n", i); + continue; + } + CPU_ZERO(&cpu_set); CPU_SET(i, &cpu_set); @@ -81,8 +99,8 @@ void test_perf_buffer(void) if (CHECK(err < 0, "perf_buffer__poll", "err %d\n", err)) goto out_free_pb; - if (CHECK(CPU_COUNT(&cpu_seen) != nr_cpus, "seen_cpu_cnt", - "expect %d, seen %d\n", nr_cpus, CPU_COUNT(&cpu_seen))) + if (CHECK(CPU_COUNT(&cpu_seen) != nr_on_cpus, "seen_cpu_cnt", + "expect %d, seen %d\n", nr_on_cpus, CPU_COUNT(&cpu_seen))) goto out_free_pb; out_free_pb: @@ -91,4 +109,5 @@ out_detach: bpf_link__destroy(link); out_close: bpf_object__close(obj); + free(online); } diff --git a/tools/testing/selftests/bpf/prog_tests/probe_user.c b/tools/testing/selftests/bpf/prog_tests/probe_user.c index 8a3187dec048..7aecfd9e87d1 100644 --- a/tools/testing/selftests/bpf/prog_tests/probe_user.c +++ b/tools/testing/selftests/bpf/prog_tests/probe_user.c @@ -3,8 +3,7 @@ void test_probe_user(void) { -#define kprobe_name "__sys_connect" - const char *prog_name = "kprobe/" kprobe_name; + const char *prog_name = "kprobe/__sys_connect"; const char *obj_file = "./test_probe_user.o"; DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, ); int err, results_map_fd, sock_fd, duration = 0; @@ -33,8 +32,7 @@ void test_probe_user(void) "err %d\n", results_map_fd)) goto cleanup; - kprobe_link = bpf_program__attach_kprobe(kprobe_prog, false, - kprobe_name); + kprobe_link = bpf_program__attach(kprobe_prog); if (CHECK(IS_ERR(kprobe_link), "attach_kprobe", "err %ld\n", PTR_ERR(kprobe_link))) { kprobe_link = NULL; diff --git a/tools/testing/selftests/bpf/prog_tests/rdonly_maps.c b/tools/testing/selftests/bpf/prog_tests/rdonly_maps.c index d90acc13d1ec..563e12120e77 100644 --- a/tools/testing/selftests/bpf/prog_tests/rdonly_maps.c +++ b/tools/testing/selftests/bpf/prog_tests/rdonly_maps.c @@ -16,14 +16,11 @@ struct rdonly_map_subtest { void test_rdonly_maps(void) { - const char *prog_name_skip_loop = "raw_tracepoint/sys_enter:skip_loop"; - const char *prog_name_part_loop = "raw_tracepoint/sys_enter:part_loop"; - const char *prog_name_full_loop = "raw_tracepoint/sys_enter:full_loop"; const char *file = "test_rdonly_maps.o"; struct rdonly_map_subtest subtests[] = { - { "skip loop", prog_name_skip_loop, 0, 0 }, - { "part loop", prog_name_part_loop, 3, 2 + 3 + 4 }, - { "full loop", prog_name_full_loop, 4, 2 + 3 + 4 + 5 }, + { "skip loop", "skip_loop", 0, 0 }, + { "part loop", "part_loop", 3, 2 + 3 + 4 }, + { "full loop", "full_loop", 4, 2 + 3 + 4 + 5 }, }; int i, err, zero = 0, duration = 0; struct bpf_link *link = NULL; @@ -50,7 +47,7 @@ void test_rdonly_maps(void) if (!test__start_subtest(t->subtest_name)) continue; - prog = bpf_object__find_program_by_title(obj, t->prog_name); + prog = bpf_object__find_program_by_name(obj, t->prog_name); if (CHECK(!prog, "find_prog", "prog '%s' not found\n", t->prog_name)) goto cleanup; diff --git a/tools/testing/selftests/bpf/test_select_reuseport.c b/tools/testing/selftests/bpf/prog_tests/select_reuseport.c index 7566c13eb51a..098bcae5f827 100644 --- a/tools/testing/selftests/bpf/test_select_reuseport.c +++ b/tools/testing/selftests/bpf/prog_tests/select_reuseport.c @@ -20,8 +20,11 @@ #include <bpf/libbpf.h> #include "bpf_rlimit.h" #include "bpf_util.h" + +#include "test_progs.h" #include "test_select_reuseport_common.h" +#define MAX_TEST_NAME 80 #define MIN_TCPHDR_LEN 20 #define UDPHDR_LEN 8 @@ -30,13 +33,13 @@ #define REUSEPORT_ARRAY_SIZE 32 static int result_map, tmp_index_ovr_map, linum_map, data_check_map; -static enum result expected_results[NR_RESULTS]; +static __u32 expected_results[NR_RESULTS]; static int sk_fds[REUSEPORT_ARRAY_SIZE]; -static int reuseport_array, outer_map; +static int reuseport_array = -1, outer_map = -1; static int select_by_skb_data_prog; -static int saved_tcp_syncookie; +static int saved_tcp_syncookie = -1; static struct bpf_object *obj; -static int saved_tcp_fo; +static int saved_tcp_fo = -1; static __u32 index_zero; static int epfd; @@ -46,16 +49,21 @@ static union sa46 { sa_family_t family; } srv_sa; -#define CHECK(condition, tag, format...) ({ \ - int __ret = !!(condition); \ - if (__ret) { \ - printf("%s(%d):FAIL:%s ", __func__, __LINE__, tag); \ - printf(format); \ - exit(-1); \ +#define RET_IF(condition, tag, format...) ({ \ + if (CHECK_FAIL(condition)) { \ + printf(tag " " format); \ + return; \ + } \ +}) + +#define RET_ERR(condition, tag, format...) ({ \ + if (CHECK_FAIL(condition)) { \ + printf(tag " " format); \ + return -1; \ } \ }) -static void create_maps(void) +static int create_maps(void) { struct bpf_create_map_attr attr = {}; @@ -67,8 +75,8 @@ static void create_maps(void) attr.max_entries = REUSEPORT_ARRAY_SIZE; reuseport_array = bpf_create_map_xattr(&attr); - CHECK(reuseport_array == -1, "creating reuseport_array", - "reuseport_array:%d errno:%d\n", reuseport_array, errno); + RET_ERR(reuseport_array == -1, "creating reuseport_array", + "reuseport_array:%d errno:%d\n", reuseport_array, errno); /* Creating outer_map */ attr.name = "outer_map"; @@ -78,63 +86,61 @@ static void create_maps(void) attr.max_entries = 1; attr.inner_map_fd = reuseport_array; outer_map = bpf_create_map_xattr(&attr); - CHECK(outer_map == -1, "creating outer_map", - "outer_map:%d errno:%d\n", outer_map, errno); + RET_ERR(outer_map == -1, "creating outer_map", + "outer_map:%d errno:%d\n", outer_map, errno); + + return 0; } -static void prepare_bpf_obj(void) +static int prepare_bpf_obj(void) { struct bpf_program *prog; struct bpf_map *map; int err; - struct bpf_object_open_attr attr = { - .file = "test_select_reuseport_kern.o", - .prog_type = BPF_PROG_TYPE_SK_REUSEPORT, - }; - - obj = bpf_object__open_xattr(&attr); - CHECK(IS_ERR_OR_NULL(obj), "open test_select_reuseport_kern.o", - "obj:%p PTR_ERR(obj):%ld\n", obj, PTR_ERR(obj)); - prog = bpf_program__next(NULL, obj); - CHECK(!prog, "get first bpf_program", "!prog\n"); - bpf_program__set_type(prog, attr.prog_type); + obj = bpf_object__open("test_select_reuseport_kern.o"); + RET_ERR(IS_ERR_OR_NULL(obj), "open test_select_reuseport_kern.o", + "obj:%p PTR_ERR(obj):%ld\n", obj, PTR_ERR(obj)); map = bpf_object__find_map_by_name(obj, "outer_map"); - CHECK(!map, "find outer_map", "!map\n"); + RET_ERR(!map, "find outer_map", "!map\n"); err = bpf_map__reuse_fd(map, outer_map); - CHECK(err, "reuse outer_map", "err:%d\n", err); + RET_ERR(err, "reuse outer_map", "err:%d\n", err); err = bpf_object__load(obj); - CHECK(err, "load bpf_object", "err:%d\n", err); + RET_ERR(err, "load bpf_object", "err:%d\n", err); + prog = bpf_program__next(NULL, obj); + RET_ERR(!prog, "get first bpf_program", "!prog\n"); select_by_skb_data_prog = bpf_program__fd(prog); - CHECK(select_by_skb_data_prog == -1, "get prog fd", - "select_by_skb_data_prog:%d\n", select_by_skb_data_prog); + RET_ERR(select_by_skb_data_prog == -1, "get prog fd", + "select_by_skb_data_prog:%d\n", select_by_skb_data_prog); map = bpf_object__find_map_by_name(obj, "result_map"); - CHECK(!map, "find result_map", "!map\n"); + RET_ERR(!map, "find result_map", "!map\n"); result_map = bpf_map__fd(map); - CHECK(result_map == -1, "get result_map fd", - "result_map:%d\n", result_map); + RET_ERR(result_map == -1, "get result_map fd", + "result_map:%d\n", result_map); map = bpf_object__find_map_by_name(obj, "tmp_index_ovr_map"); - CHECK(!map, "find tmp_index_ovr_map", "!map\n"); + RET_ERR(!map, "find tmp_index_ovr_map\n", "!map"); tmp_index_ovr_map = bpf_map__fd(map); - CHECK(tmp_index_ovr_map == -1, "get tmp_index_ovr_map fd", - "tmp_index_ovr_map:%d\n", tmp_index_ovr_map); + RET_ERR(tmp_index_ovr_map == -1, "get tmp_index_ovr_map fd", + "tmp_index_ovr_map:%d\n", tmp_index_ovr_map); map = bpf_object__find_map_by_name(obj, "linum_map"); - CHECK(!map, "find linum_map", "!map\n"); + RET_ERR(!map, "find linum_map", "!map\n"); linum_map = bpf_map__fd(map); - CHECK(linum_map == -1, "get linum_map fd", - "linum_map:%d\n", linum_map); + RET_ERR(linum_map == -1, "get linum_map fd", + "linum_map:%d\n", linum_map); map = bpf_object__find_map_by_name(obj, "data_check_map"); - CHECK(!map, "find data_check_map", "!map\n"); + RET_ERR(!map, "find data_check_map", "!map\n"); data_check_map = bpf_map__fd(map); - CHECK(data_check_map == -1, "get data_check_map fd", - "data_check_map:%d\n", data_check_map); + RET_ERR(data_check_map == -1, "get data_check_map fd", + "data_check_map:%d\n", data_check_map); + + return 0; } static void sa46_init_loopback(union sa46 *sa, sa_family_t family) @@ -163,65 +169,73 @@ static int read_int_sysctl(const char *sysctl) int fd, ret; fd = open(sysctl, 0); - CHECK(fd == -1, "open(sysctl)", "sysctl:%s fd:%d errno:%d\n", - sysctl, fd, errno); + RET_ERR(fd == -1, "open(sysctl)", + "sysctl:%s fd:%d errno:%d\n", sysctl, fd, errno); ret = read(fd, buf, sizeof(buf)); - CHECK(ret <= 0, "read(sysctl)", "sysctl:%s ret:%d errno:%d\n", - sysctl, ret, errno); - close(fd); + RET_ERR(ret <= 0, "read(sysctl)", + "sysctl:%s ret:%d errno:%d\n", sysctl, ret, errno); + close(fd); return atoi(buf); } -static void write_int_sysctl(const char *sysctl, int v) +static int write_int_sysctl(const char *sysctl, int v) { int fd, ret, size; char buf[16]; fd = open(sysctl, O_RDWR); - CHECK(fd == -1, "open(sysctl)", "sysctl:%s fd:%d errno:%d\n", - sysctl, fd, errno); + RET_ERR(fd == -1, "open(sysctl)", + "sysctl:%s fd:%d errno:%d\n", sysctl, fd, errno); size = snprintf(buf, sizeof(buf), "%d", v); ret = write(fd, buf, size); - CHECK(ret != size, "write(sysctl)", - "sysctl:%s ret:%d size:%d errno:%d\n", sysctl, ret, size, errno); + RET_ERR(ret != size, "write(sysctl)", + "sysctl:%s ret:%d size:%d errno:%d\n", + sysctl, ret, size, errno); + close(fd); + return 0; } static void restore_sysctls(void) { - write_int_sysctl(TCP_FO_SYSCTL, saved_tcp_fo); - write_int_sysctl(TCP_SYNCOOKIE_SYSCTL, saved_tcp_syncookie); + if (saved_tcp_fo != -1) + write_int_sysctl(TCP_FO_SYSCTL, saved_tcp_fo); + if (saved_tcp_syncookie != -1) + write_int_sysctl(TCP_SYNCOOKIE_SYSCTL, saved_tcp_syncookie); } -static void enable_fastopen(void) +static int enable_fastopen(void) { int fo; fo = read_int_sysctl(TCP_FO_SYSCTL); - write_int_sysctl(TCP_FO_SYSCTL, fo | 7); + if (fo < 0) + return -1; + + return write_int_sysctl(TCP_FO_SYSCTL, fo | 7); } -static void enable_syncookie(void) +static int enable_syncookie(void) { - write_int_sysctl(TCP_SYNCOOKIE_SYSCTL, 2); + return write_int_sysctl(TCP_SYNCOOKIE_SYSCTL, 2); } -static void disable_syncookie(void) +static int disable_syncookie(void) { - write_int_sysctl(TCP_SYNCOOKIE_SYSCTL, 0); + return write_int_sysctl(TCP_SYNCOOKIE_SYSCTL, 0); } -static __u32 get_linum(void) +static long get_linum(void) { __u32 linum; int err; err = bpf_map_lookup_elem(linum_map, &index_zero, &linum); - CHECK(err == -1, "lookup_elem(linum_map)", "err:%d errno:%d\n", - err, errno); + RET_ERR(err == -1, "lookup_elem(linum_map)", "err:%d errno:%d\n", + err, errno); return linum; } @@ -237,12 +251,12 @@ static void check_data(int type, sa_family_t family, const struct cmd *cmd, addrlen = sizeof(cli_sa); err = getsockname(cli_fd, (struct sockaddr *)&cli_sa, &addrlen); - CHECK(err == -1, "getsockname(cli_fd)", "err:%d errno:%d\n", - err, errno); + RET_IF(err == -1, "getsockname(cli_fd)", "err:%d errno:%d\n", + err, errno); err = bpf_map_lookup_elem(data_check_map, &index_zero, &result); - CHECK(err == -1, "lookup_elem(data_check_map)", "err:%d errno:%d\n", - err, errno); + RET_IF(err == -1, "lookup_elem(data_check_map)", "err:%d errno:%d\n", + err, errno); if (type == SOCK_STREAM) { expected.len = MIN_TCPHDR_LEN; @@ -284,22 +298,42 @@ static void check_data(int type, sa_family_t family, const struct cmd *cmd, printf("expected: (0x%x, %u, %u)\n", expected.eth_protocol, expected.ip_protocol, expected.bind_inany); - CHECK(1, "data_check result != expected", - "bpf_prog_linum:%u\n", get_linum()); + RET_IF(1, "data_check result != expected", + "bpf_prog_linum:%ld\n", get_linum()); } - CHECK(!result.hash, "data_check result.hash empty", - "result.hash:%u", result.hash); + RET_IF(!result.hash, "data_check result.hash empty", + "result.hash:%u", result.hash); expected.len += cmd ? sizeof(*cmd) : 0; if (type == SOCK_STREAM) - CHECK(expected.len > result.len, "expected.len > result.len", - "expected.len:%u result.len:%u bpf_prog_linum:%u\n", - expected.len, result.len, get_linum()); + RET_IF(expected.len > result.len, "expected.len > result.len", + "expected.len:%u result.len:%u bpf_prog_linum:%ld\n", + expected.len, result.len, get_linum()); else - CHECK(expected.len != result.len, "expected.len != result.len", - "expected.len:%u result.len:%u bpf_prog_linum:%u\n", - expected.len, result.len, get_linum()); + RET_IF(expected.len != result.len, "expected.len != result.len", + "expected.len:%u result.len:%u bpf_prog_linum:%ld\n", + expected.len, result.len, get_linum()); +} + +static const char *result_to_str(enum result res) +{ + switch (res) { + case DROP_ERR_INNER_MAP: + return "DROP_ERR_INNER_MAP"; + case DROP_ERR_SKB_DATA: + return "DROP_ERR_SKB_DATA"; + case DROP_ERR_SK_SELECT_REUSEPORT: + return "DROP_ERR_SK_SELECT_REUSEPORT"; + case DROP_MISC: + return "DROP_MISC"; + case PASS: + return "PASS"; + case PASS_ERR_SK_SELECT_REUSEPORT: + return "PASS_ERR_SK_SELECT_REUSEPORT"; + default: + return "UNKNOWN"; + } } static void check_results(void) @@ -310,8 +344,8 @@ static void check_results(void) for (i = 0; i < NR_RESULTS; i++) { err = bpf_map_lookup_elem(result_map, &i, &results[i]); - CHECK(err == -1, "lookup_elem(result_map)", - "i:%u err:%d errno:%d\n", i, err, errno); + RET_IF(err == -1, "lookup_elem(result_map)", + "i:%u err:%d errno:%d\n", i, err, errno); } for (i = 0; i < NR_RESULTS; i++) { @@ -337,10 +371,10 @@ static void check_results(void) printf(", %u", expected_results[i]); printf("]\n"); - CHECK(expected_results[broken] != results[broken], - "unexpected result", - "expected_results[%u] != results[%u] bpf_prog_linum:%u\n", - broken, broken, get_linum()); + printf("mismatch on %s (bpf_prog_linum:%ld)\n", result_to_str(broken), + get_linum()); + + CHECK_FAIL(true); } static int send_data(int type, sa_family_t family, void *data, size_t len, @@ -350,17 +384,17 @@ static int send_data(int type, sa_family_t family, void *data, size_t len, int fd, err; fd = socket(family, type, 0); - CHECK(fd == -1, "socket()", "fd:%d errno:%d\n", fd, errno); + RET_ERR(fd == -1, "socket()", "fd:%d errno:%d\n", fd, errno); sa46_init_loopback(&cli_sa, family); err = bind(fd, (struct sockaddr *)&cli_sa, sizeof(cli_sa)); - CHECK(fd == -1, "bind(cli_sa)", "err:%d errno:%d\n", err, errno); + RET_ERR(fd == -1, "bind(cli_sa)", "err:%d errno:%d\n", err, errno); err = sendto(fd, data, len, MSG_FASTOPEN, (struct sockaddr *)&srv_sa, sizeof(srv_sa)); - CHECK(err != len && expected >= PASS, - "sendto()", "family:%u err:%d errno:%d expected:%d\n", - family, err, errno, expected); + RET_ERR(err != len && expected >= PASS, + "sendto()", "family:%u err:%d errno:%d expected:%d\n", + family, err, errno, expected); return fd; } @@ -375,47 +409,49 @@ static void do_test(int type, sa_family_t family, struct cmd *cmd, cli_fd = send_data(type, family, cmd, cmd ? sizeof(*cmd) : 0, expected); + if (cli_fd < 0) + return; nev = epoll_wait(epfd, &ev, 1, expected >= PASS ? 5 : 0); - CHECK((nev <= 0 && expected >= PASS) || - (nev > 0 && expected < PASS), - "nev <> expected", - "nev:%d expected:%d type:%d family:%d data:(%d, %d)\n", - nev, expected, type, family, - cmd ? cmd->reuseport_index : -1, - cmd ? cmd->pass_on_failure : -1); + RET_IF((nev <= 0 && expected >= PASS) || + (nev > 0 && expected < PASS), + "nev <> expected", + "nev:%d expected:%d type:%d family:%d data:(%d, %d)\n", + nev, expected, type, family, + cmd ? cmd->reuseport_index : -1, + cmd ? cmd->pass_on_failure : -1); check_results(); check_data(type, family, cmd, cli_fd); if (expected < PASS) return; - CHECK(expected != PASS_ERR_SK_SELECT_REUSEPORT && - cmd->reuseport_index != ev.data.u32, - "check cmd->reuseport_index", - "cmd:(%u, %u) ev.data.u32:%u\n", - cmd->pass_on_failure, cmd->reuseport_index, ev.data.u32); + RET_IF(expected != PASS_ERR_SK_SELECT_REUSEPORT && + cmd->reuseport_index != ev.data.u32, + "check cmd->reuseport_index", + "cmd:(%u, %u) ev.data.u32:%u\n", + cmd->pass_on_failure, cmd->reuseport_index, ev.data.u32); srv_fd = sk_fds[ev.data.u32]; if (type == SOCK_STREAM) { int new_fd = accept(srv_fd, NULL, 0); - CHECK(new_fd == -1, "accept(srv_fd)", - "ev.data.u32:%u new_fd:%d errno:%d\n", - ev.data.u32, new_fd, errno); + RET_IF(new_fd == -1, "accept(srv_fd)", + "ev.data.u32:%u new_fd:%d errno:%d\n", + ev.data.u32, new_fd, errno); nread = recv(new_fd, &rcv_cmd, sizeof(rcv_cmd), MSG_DONTWAIT); - CHECK(nread != sizeof(rcv_cmd), - "recv(new_fd)", - "ev.data.u32:%u nread:%zd sizeof(rcv_cmd):%zu errno:%d\n", - ev.data.u32, nread, sizeof(rcv_cmd), errno); + RET_IF(nread != sizeof(rcv_cmd), + "recv(new_fd)", + "ev.data.u32:%u nread:%zd sizeof(rcv_cmd):%zu errno:%d\n", + ev.data.u32, nread, sizeof(rcv_cmd), errno); close(new_fd); } else { nread = recv(srv_fd, &rcv_cmd, sizeof(rcv_cmd), MSG_DONTWAIT); - CHECK(nread != sizeof(rcv_cmd), - "recv(sk_fds)", - "ev.data.u32:%u nread:%zd sizeof(rcv_cmd):%zu errno:%d\n", - ev.data.u32, nread, sizeof(rcv_cmd), errno); + RET_IF(nread != sizeof(rcv_cmd), + "recv(sk_fds)", + "ev.data.u32:%u nread:%zd sizeof(rcv_cmd):%zu errno:%d\n", + ev.data.u32, nread, sizeof(rcv_cmd), errno); } close(cli_fd); @@ -428,18 +464,14 @@ static void test_err_inner_map(int type, sa_family_t family) .pass_on_failure = 0, }; - printf("%s: ", __func__); expected_results[DROP_ERR_INNER_MAP]++; do_test(type, family, &cmd, DROP_ERR_INNER_MAP); - printf("OK\n"); } static void test_err_skb_data(int type, sa_family_t family) { - printf("%s: ", __func__); expected_results[DROP_ERR_SKB_DATA]++; do_test(type, family, NULL, DROP_ERR_SKB_DATA); - printf("OK\n"); } static void test_err_sk_select_port(int type, sa_family_t family) @@ -449,10 +481,8 @@ static void test_err_sk_select_port(int type, sa_family_t family) .pass_on_failure = 0, }; - printf("%s: ", __func__); expected_results[DROP_ERR_SK_SELECT_REUSEPORT]++; do_test(type, family, &cmd, DROP_ERR_SK_SELECT_REUSEPORT); - printf("OK\n"); } static void test_pass(int type, sa_family_t family) @@ -460,14 +490,12 @@ static void test_pass(int type, sa_family_t family) struct cmd cmd; int i; - printf("%s: ", __func__); cmd.pass_on_failure = 0; for (i = 0; i < REUSEPORT_ARRAY_SIZE; i++) { expected_results[PASS]++; cmd.reuseport_index = i; do_test(type, family, &cmd, PASS); } - printf("OK\n"); } static void test_syncookie(int type, sa_family_t family) @@ -481,7 +509,6 @@ static void test_syncookie(int type, sa_family_t family) if (type != SOCK_STREAM) return; - printf("%s: ", __func__); /* * +1 for TCP-SYN and * +1 for the TCP-ACK (ack the syncookie) @@ -497,17 +524,16 @@ static void test_syncookie(int type, sa_family_t family) */ err = bpf_map_update_elem(tmp_index_ovr_map, &index_zero, &tmp_index, BPF_ANY); - CHECK(err == -1, "update_elem(tmp_index_ovr_map, 0, 1)", - "err:%d errno:%d\n", err, errno); + RET_IF(err == -1, "update_elem(tmp_index_ovr_map, 0, 1)", + "err:%d errno:%d\n", err, errno); do_test(type, family, &cmd, PASS); err = bpf_map_lookup_elem(tmp_index_ovr_map, &index_zero, &tmp_index); - CHECK(err == -1 || tmp_index != -1, - "lookup_elem(tmp_index_ovr_map)", - "err:%d errno:%d tmp_index:%d\n", - err, errno, tmp_index); + RET_IF(err == -1 || tmp_index != -1, + "lookup_elem(tmp_index_ovr_map)", + "err:%d errno:%d tmp_index:%d\n", + err, errno, tmp_index); disable_syncookie(); - printf("OK\n"); } static void test_pass_on_err(int type, sa_family_t family) @@ -517,10 +543,8 @@ static void test_pass_on_err(int type, sa_family_t family) .pass_on_failure = 1, }; - printf("%s: ", __func__); expected_results[PASS_ERR_SK_SELECT_REUSEPORT] += 1; do_test(type, family, &cmd, PASS_ERR_SK_SELECT_REUSEPORT); - printf("OK\n"); } static void test_detach_bpf(int type, sa_family_t family) @@ -532,46 +556,47 @@ static void test_detach_bpf(int type, sa_family_t family) struct cmd cmd = {}; int optvalue = 0; - printf("%s: ", __func__); err = setsockopt(sk_fds[0], SOL_SOCKET, SO_DETACH_REUSEPORT_BPF, &optvalue, sizeof(optvalue)); - CHECK(err == -1, "setsockopt(SO_DETACH_REUSEPORT_BPF)", - "err:%d errno:%d\n", err, errno); + RET_IF(err == -1, "setsockopt(SO_DETACH_REUSEPORT_BPF)", + "err:%d errno:%d\n", err, errno); err = setsockopt(sk_fds[1], SOL_SOCKET, SO_DETACH_REUSEPORT_BPF, &optvalue, sizeof(optvalue)); - CHECK(err == 0 || errno != ENOENT, "setsockopt(SO_DETACH_REUSEPORT_BPF)", - "err:%d errno:%d\n", err, errno); + RET_IF(err == 0 || errno != ENOENT, + "setsockopt(SO_DETACH_REUSEPORT_BPF)", + "err:%d errno:%d\n", err, errno); for (i = 0; i < NR_RESULTS; i++) { err = bpf_map_lookup_elem(result_map, &i, &tmp); - CHECK(err == -1, "lookup_elem(result_map)", - "i:%u err:%d errno:%d\n", i, err, errno); + RET_IF(err == -1, "lookup_elem(result_map)", + "i:%u err:%d errno:%d\n", i, err, errno); nr_run_before += tmp; } cli_fd = send_data(type, family, &cmd, sizeof(cmd), PASS); + if (cli_fd < 0) + return; nev = epoll_wait(epfd, &ev, 1, 5); - CHECK(nev <= 0, "nev <= 0", - "nev:%d expected:1 type:%d family:%d data:(0, 0)\n", - nev, type, family); + RET_IF(nev <= 0, "nev <= 0", + "nev:%d expected:1 type:%d family:%d data:(0, 0)\n", + nev, type, family); for (i = 0; i < NR_RESULTS; i++) { err = bpf_map_lookup_elem(result_map, &i, &tmp); - CHECK(err == -1, "lookup_elem(result_map)", - "i:%u err:%d errno:%d\n", i, err, errno); + RET_IF(err == -1, "lookup_elem(result_map)", + "i:%u err:%d errno:%d\n", i, err, errno); nr_run_after += tmp; } - CHECK(nr_run_before != nr_run_after, - "nr_run_before != nr_run_after", - "nr_run_before:%u nr_run_after:%u\n", - nr_run_before, nr_run_after); + RET_IF(nr_run_before != nr_run_after, + "nr_run_before != nr_run_after", + "nr_run_before:%u nr_run_after:%u\n", + nr_run_before, nr_run_after); - printf("OK\n"); close(cli_fd); #else - printf("%s: SKIP\n", __func__); + test__skip(); #endif } @@ -594,149 +619,220 @@ static void prepare_sk_fds(int type, sa_family_t family, bool inany) */ for (i = first; i >= 0; i--) { sk_fds[i] = socket(family, type, 0); - CHECK(sk_fds[i] == -1, "socket()", "sk_fds[%d]:%d errno:%d\n", - i, sk_fds[i], errno); + RET_IF(sk_fds[i] == -1, "socket()", "sk_fds[%d]:%d errno:%d\n", + i, sk_fds[i], errno); err = setsockopt(sk_fds[i], SOL_SOCKET, SO_REUSEPORT, &optval, sizeof(optval)); - CHECK(err == -1, "setsockopt(SO_REUSEPORT)", - "sk_fds[%d] err:%d errno:%d\n", - i, err, errno); + RET_IF(err == -1, "setsockopt(SO_REUSEPORT)", + "sk_fds[%d] err:%d errno:%d\n", + i, err, errno); if (i == first) { err = setsockopt(sk_fds[i], SOL_SOCKET, SO_ATTACH_REUSEPORT_EBPF, &select_by_skb_data_prog, sizeof(select_by_skb_data_prog)); - CHECK(err == -1, "setsockopt(SO_ATTACH_REUEPORT_EBPF)", - "err:%d errno:%d\n", err, errno); + RET_IF(err == -1, "setsockopt(SO_ATTACH_REUEPORT_EBPF)", + "err:%d errno:%d\n", err, errno); } err = bind(sk_fds[i], (struct sockaddr *)&srv_sa, addrlen); - CHECK(err == -1, "bind()", "sk_fds[%d] err:%d errno:%d\n", - i, err, errno); + RET_IF(err == -1, "bind()", "sk_fds[%d] err:%d errno:%d\n", + i, err, errno); if (type == SOCK_STREAM) { err = listen(sk_fds[i], 10); - CHECK(err == -1, "listen()", - "sk_fds[%d] err:%d errno:%d\n", - i, err, errno); + RET_IF(err == -1, "listen()", + "sk_fds[%d] err:%d errno:%d\n", + i, err, errno); } err = bpf_map_update_elem(reuseport_array, &i, &sk_fds[i], BPF_NOEXIST); - CHECK(err == -1, "update_elem(reuseport_array)", - "sk_fds[%d] err:%d errno:%d\n", i, err, errno); + RET_IF(err == -1, "update_elem(reuseport_array)", + "sk_fds[%d] err:%d errno:%d\n", i, err, errno); if (i == first) { socklen_t addrlen = sizeof(srv_sa); err = getsockname(sk_fds[i], (struct sockaddr *)&srv_sa, &addrlen); - CHECK(err == -1, "getsockname()", - "sk_fds[%d] err:%d errno:%d\n", i, err, errno); + RET_IF(err == -1, "getsockname()", + "sk_fds[%d] err:%d errno:%d\n", i, err, errno); } } epfd = epoll_create(1); - CHECK(epfd == -1, "epoll_create(1)", - "epfd:%d errno:%d\n", epfd, errno); + RET_IF(epfd == -1, "epoll_create(1)", + "epfd:%d errno:%d\n", epfd, errno); ev.events = EPOLLIN; for (i = 0; i < REUSEPORT_ARRAY_SIZE; i++) { ev.data.u32 = i; err = epoll_ctl(epfd, EPOLL_CTL_ADD, sk_fds[i], &ev); - CHECK(err, "epoll_ctl(EPOLL_CTL_ADD)", "sk_fds[%d]\n", i); + RET_IF(err, "epoll_ctl(EPOLL_CTL_ADD)", "sk_fds[%d]\n", i); } } -static void setup_per_test(int type, unsigned short family, bool inany) +static void setup_per_test(int type, sa_family_t family, bool inany, + bool no_inner_map) { int ovr = -1, err; prepare_sk_fds(type, family, inany); err = bpf_map_update_elem(tmp_index_ovr_map, &index_zero, &ovr, BPF_ANY); - CHECK(err == -1, "update_elem(tmp_index_ovr_map, 0, -1)", - "err:%d errno:%d\n", err, errno); + RET_IF(err == -1, "update_elem(tmp_index_ovr_map, 0, -1)", + "err:%d errno:%d\n", err, errno); + + /* Install reuseport_array to outer_map? */ + if (no_inner_map) + return; + + err = bpf_map_update_elem(outer_map, &index_zero, &reuseport_array, + BPF_ANY); + RET_IF(err == -1, "update_elem(outer_map, 0, reuseport_array)", + "err:%d errno:%d\n", err, errno); } -static void cleanup_per_test(void) +static void cleanup_per_test(bool no_inner_map) { - int i, err; + int i, err, zero = 0; + + memset(expected_results, 0, sizeof(expected_results)); + + for (i = 0; i < NR_RESULTS; i++) { + err = bpf_map_update_elem(result_map, &i, &zero, BPF_ANY); + RET_IF(err, "reset elem in result_map", + "i:%u err:%d errno:%d\n", i, err, errno); + } + + err = bpf_map_update_elem(linum_map, &zero, &zero, BPF_ANY); + RET_IF(err, "reset line number in linum_map", "err:%d errno:%d\n", + err, errno); for (i = 0; i < REUSEPORT_ARRAY_SIZE; i++) close(sk_fds[i]); close(epfd); + /* Delete reuseport_array from outer_map? */ + if (no_inner_map) + return; + err = bpf_map_delete_elem(outer_map, &index_zero); - CHECK(err == -1, "delete_elem(outer_map)", - "err:%d errno:%d\n", err, errno); + RET_IF(err == -1, "delete_elem(outer_map)", + "err:%d errno:%d\n", err, errno); } static void cleanup(void) { - close(outer_map); - close(reuseport_array); - bpf_object__close(obj); + if (outer_map != -1) + close(outer_map); + if (reuseport_array != -1) + close(reuseport_array); + if (obj) + bpf_object__close(obj); } -static void test_all(void) +static const char *family_str(sa_family_t family) { - /* Extra SOCK_STREAM to test bind_inany==true */ - const int types[] = { SOCK_STREAM, SOCK_DGRAM, SOCK_STREAM }; - const char * const type_strings[] = { "TCP", "UDP", "TCP" }; - const char * const family_strings[] = { "IPv6", "IPv4" }; - const unsigned short families[] = { AF_INET6, AF_INET }; - const bool bind_inany[] = { false, false, true }; - int t, f, err; - - for (f = 0; f < ARRAY_SIZE(families); f++) { - unsigned short family = families[f]; - - for (t = 0; t < ARRAY_SIZE(types); t++) { - bool inany = bind_inany[t]; - int type = types[t]; - - printf("######## %s/%s %s ########\n", - family_strings[f], type_strings[t], - inany ? " INANY " : "LOOPBACK"); - - setup_per_test(type, family, inany); - - test_err_inner_map(type, family); - - /* Install reuseport_array to the outer_map */ - err = bpf_map_update_elem(outer_map, &index_zero, - &reuseport_array, BPF_ANY); - CHECK(err == -1, "update_elem(outer_map)", - "err:%d errno:%d\n", err, errno); - - test_err_skb_data(type, family); - test_err_sk_select_port(type, family); - test_pass(type, family); - test_syncookie(type, family); - test_pass_on_err(type, family); - /* Must be the last test */ - test_detach_bpf(type, family); - - cleanup_per_test(); - printf("\n"); - } + switch (family) { + case AF_INET: + return "IPv4"; + case AF_INET6: + return "IPv6"; + default: + return "unknown"; + } +} + +static const char *sotype_str(int sotype) +{ + switch (sotype) { + case SOCK_STREAM: + return "TCP"; + case SOCK_DGRAM: + return "UDP"; + default: + return "unknown"; } } -int main(int argc, const char **argv) +#define TEST_INIT(fn, ...) { fn, #fn, __VA_ARGS__ } + +static void test_config(int sotype, sa_family_t family, bool inany) { - create_maps(); - prepare_bpf_obj(); + const struct test { + void (*fn)(int sotype, sa_family_t family); + const char *name; + bool no_inner_map; + } tests[] = { + TEST_INIT(test_err_inner_map, true /* no_inner_map */), + TEST_INIT(test_err_skb_data), + TEST_INIT(test_err_sk_select_port), + TEST_INIT(test_pass), + TEST_INIT(test_syncookie), + TEST_INIT(test_pass_on_err), + TEST_INIT(test_detach_bpf), + }; + char s[MAX_TEST_NAME]; + const struct test *t; + + for (t = tests; t < tests + ARRAY_SIZE(tests); t++) { + snprintf(s, sizeof(s), "%s/%s %s %s", + family_str(family), sotype_str(sotype), + inany ? "INANY" : "LOOPBACK", t->name); + + if (!test__start_subtest(s)) + continue; + + setup_per_test(sotype, family, inany, t->no_inner_map); + t->fn(sotype, family); + cleanup_per_test(t->no_inner_map); + } +} + +#define BIND_INANY true + +static void test_all(void) +{ + const struct config { + int sotype; + sa_family_t family; + bool inany; + } configs[] = { + { SOCK_STREAM, AF_INET }, + { SOCK_STREAM, AF_INET, BIND_INANY }, + { SOCK_STREAM, AF_INET6 }, + { SOCK_STREAM, AF_INET6, BIND_INANY }, + { SOCK_DGRAM, AF_INET }, + { SOCK_DGRAM, AF_INET6 }, + }; + const struct config *c; + + for (c = configs; c < configs + ARRAY_SIZE(configs); c++) + test_config(c->sotype, c->family, c->inany); +} + +void test_select_reuseport(void) +{ + if (create_maps()) + goto out; + if (prepare_bpf_obj()) + goto out; + saved_tcp_fo = read_int_sysctl(TCP_FO_SYSCTL); saved_tcp_syncookie = read_int_sysctl(TCP_SYNCOOKIE_SYSCTL); - enable_fastopen(); - disable_syncookie(); - atexit(restore_sysctls); + if (saved_tcp_syncookie < 0 || saved_tcp_syncookie < 0) + goto out; - test_all(); + if (enable_fastopen()) + goto out; + if (disable_syncookie()) + goto out; + test_all(); +out: cleanup(); - return 0; + restore_sysctls(); } diff --git a/tools/testing/selftests/bpf/prog_tests/send_signal.c b/tools/testing/selftests/bpf/prog_tests/send_signal.c index b607112c64e7..504abb7bfb95 100644 --- a/tools/testing/selftests/bpf/prog_tests/send_signal.c +++ b/tools/testing/selftests/bpf/prog_tests/send_signal.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <test_progs.h> +#include "test_send_signal_kern.skel.h" static volatile int sigusr1_received = 0; @@ -9,17 +10,15 @@ static void sigusr1_handler(int signum) } static void test_send_signal_common(struct perf_event_attr *attr, - int prog_type, + bool signal_thread, const char *test_name) { - int err = -1, pmu_fd, prog_fd, info_map_fd, status_map_fd; - const char *file = "./test_send_signal_kern.o"; - struct bpf_object *obj = NULL; + struct test_send_signal_kern *skel; int pipe_c2p[2], pipe_p2c[2]; - __u32 key = 0, duration = 0; + int err = -1, pmu_fd = -1; + __u32 duration = 0; char buf[256]; pid_t pid; - __u64 val; if (CHECK(pipe(pipe_c2p), test_name, "pipe pipe_c2p error: %s\n", strerror(errno))) @@ -73,45 +72,39 @@ static void test_send_signal_common(struct perf_event_attr *attr, close(pipe_c2p[1]); /* close write */ close(pipe_p2c[0]); /* close read */ - err = bpf_prog_load(file, prog_type, &obj, &prog_fd); - if (CHECK(err < 0, test_name, "bpf_prog_load error: %s\n", - strerror(errno))) - goto prog_load_failure; - - pmu_fd = syscall(__NR_perf_event_open, attr, pid, -1, - -1 /* group id */, 0 /* flags */); - if (CHECK(pmu_fd < 0, test_name, "perf_event_open error: %s\n", - strerror(errno))) { - err = -1; - goto close_prog; - } + skel = test_send_signal_kern__open_and_load(); + if (CHECK(!skel, "skel_open_and_load", "skeleton open_and_load failed\n")) + goto skel_open_load_failure; - err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0); - if (CHECK(err < 0, test_name, "ioctl perf_event_ioc_enable error: %s\n", - strerror(errno))) - goto disable_pmu; - - err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd); - if (CHECK(err < 0, test_name, "ioctl perf_event_ioc_set_bpf error: %s\n", - strerror(errno))) - goto disable_pmu; - - err = -1; - info_map_fd = bpf_object__find_map_fd_by_name(obj, "info_map"); - if (CHECK(info_map_fd < 0, test_name, "find map %s error\n", "info_map")) - goto disable_pmu; + if (!attr) { + err = test_send_signal_kern__attach(skel); + if (CHECK(err, "skel_attach", "skeleton attach failed\n")) { + err = -1; + goto destroy_skel; + } + } else { + pmu_fd = syscall(__NR_perf_event_open, attr, pid, -1, + -1 /* group id */, 0 /* flags */); + if (CHECK(pmu_fd < 0, test_name, "perf_event_open error: %s\n", + strerror(errno))) { + err = -1; + goto destroy_skel; + } - status_map_fd = bpf_object__find_map_fd_by_name(obj, "status_map"); - if (CHECK(status_map_fd < 0, test_name, "find map %s error\n", "status_map")) - goto disable_pmu; + skel->links.send_signal_perf = + bpf_program__attach_perf_event(skel->progs.send_signal_perf, pmu_fd); + if (CHECK(IS_ERR(skel->links.send_signal_perf), "attach_perf_event", + "err %ld\n", PTR_ERR(skel->links.send_signal_perf))) + goto disable_pmu; + } /* wait until child signal handler installed */ read(pipe_c2p[0], buf, 1); /* trigger the bpf send_signal */ - key = 0; - val = (((__u64)(SIGUSR1)) << 32) | pid; - bpf_map_update_elem(info_map_fd, &key, &val, 0); + skel->bss->pid = pid; + skel->bss->sig = SIGUSR1; + skel->bss->signal_thread = signal_thread; /* notify child that bpf program can send_signal now */ write(pipe_p2c[1], buf, 1); @@ -132,46 +125,20 @@ static void test_send_signal_common(struct perf_event_attr *attr, disable_pmu: close(pmu_fd); -close_prog: - bpf_object__close(obj); -prog_load_failure: +destroy_skel: + test_send_signal_kern__destroy(skel); +skel_open_load_failure: close(pipe_c2p[0]); close(pipe_p2c[1]); wait(NULL); } -static void test_send_signal_tracepoint(void) +static void test_send_signal_tracepoint(bool signal_thread) { - const char *id_path = "/sys/kernel/debug/tracing/events/syscalls/sys_enter_nanosleep/id"; - struct perf_event_attr attr = { - .type = PERF_TYPE_TRACEPOINT, - .sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN, - .sample_period = 1, - .wakeup_events = 1, - }; - __u32 duration = 0; - int bytes, efd; - char buf[256]; - - efd = open(id_path, O_RDONLY, 0); - if (CHECK(efd < 0, "tracepoint", - "open syscalls/sys_enter_nanosleep/id failure: %s\n", - strerror(errno))) - return; - - bytes = read(efd, buf, sizeof(buf)); - close(efd); - if (CHECK(bytes <= 0 || bytes >= sizeof(buf), "tracepoint", - "read syscalls/sys_enter_nanosleep/id failure: %s\n", - strerror(errno))) - return; - - attr.config = strtol(buf, NULL, 0); - - test_send_signal_common(&attr, BPF_PROG_TYPE_TRACEPOINT, "tracepoint"); + test_send_signal_common(NULL, signal_thread, "tracepoint"); } -static void test_send_signal_perf(void) +static void test_send_signal_perf(bool signal_thread) { struct perf_event_attr attr = { .sample_period = 1, @@ -179,15 +146,13 @@ static void test_send_signal_perf(void) .config = PERF_COUNT_SW_CPU_CLOCK, }; - test_send_signal_common(&attr, BPF_PROG_TYPE_PERF_EVENT, - "perf_sw_event"); + test_send_signal_common(&attr, signal_thread, "perf_sw_event"); } -static void test_send_signal_nmi(void) +static void test_send_signal_nmi(bool signal_thread) { struct perf_event_attr attr = { - .sample_freq = 50, - .freq = 1, + .sample_period = 1, .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES, }; @@ -210,16 +175,21 @@ static void test_send_signal_nmi(void) close(pmu_fd); } - test_send_signal_common(&attr, BPF_PROG_TYPE_PERF_EVENT, - "perf_hw_event"); + test_send_signal_common(&attr, signal_thread, "perf_hw_event"); } void test_send_signal(void) { if (test__start_subtest("send_signal_tracepoint")) - test_send_signal_tracepoint(); + test_send_signal_tracepoint(false); if (test__start_subtest("send_signal_perf")) - test_send_signal_perf(); + test_send_signal_perf(false); if (test__start_subtest("send_signal_nmi")) - test_send_signal_nmi(); + test_send_signal_nmi(false); + if (test__start_subtest("send_signal_tracepoint_thread")) + test_send_signal_tracepoint(true); + if (test__start_subtest("send_signal_perf_thread")) + test_send_signal_perf(true); + if (test__start_subtest("send_signal_nmi_thread")) + test_send_signal_nmi(true); } diff --git a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c index a2eb8db8dafb..c6d6b685a946 100644 --- a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c +++ b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c @@ -11,6 +11,9 @@ void test_skb_ctx(void) .cb[4] = 5, .priority = 6, .tstamp = 7, + .wire_len = 100, + .gso_segs = 8, + .mark = 9, }; struct bpf_prog_test_run_attr tattr = { .data_in = &pkt_v4, @@ -91,4 +94,8 @@ void test_skb_ctx(void) "ctx_out_tstamp", "skb->tstamp == %lld, expected %d\n", skb.tstamp, 8); + CHECK_ATTR(skb.mark != 10, + "ctx_out_mark", + "skb->mark == %u, expected %d\n", + skb.mark, 10); } diff --git a/tools/testing/selftests/bpf/prog_tests/skeleton.c b/tools/testing/selftests/bpf/prog_tests/skeleton.c new file mode 100644 index 000000000000..9264a2736018 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/skeleton.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ + +#include <test_progs.h> + +struct s { + int a; + long long b; +} __attribute__((packed)); + +#include "test_skeleton.skel.h" + +void test_skeleton(void) +{ + int duration = 0, err; + struct test_skeleton* skel; + struct test_skeleton__bss *bss; + struct test_skeleton__kconfig *kcfg; + + skel = test_skeleton__open(); + if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) + return; + + if (CHECK(skel->kconfig, "skel_kconfig", "kconfig is mmaped()!\n")) + goto cleanup; + + err = test_skeleton__load(skel); + if (CHECK(err, "skel_load", "failed to load skeleton: %d\n", err)) + goto cleanup; + + bss = skel->bss; + bss->in1 = 1; + bss->in2 = 2; + bss->in3 = 3; + bss->in4 = 4; + bss->in5.a = 5; + bss->in5.b = 6; + kcfg = skel->kconfig; + + err = test_skeleton__attach(skel); + if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err)) + goto cleanup; + + /* trigger tracepoint */ + usleep(1); + + CHECK(bss->out1 != 1, "res1", "got %d != exp %d\n", bss->out1, 1); + CHECK(bss->out2 != 2, "res2", "got %lld != exp %d\n", bss->out2, 2); + CHECK(bss->out3 != 3, "res3", "got %d != exp %d\n", (int)bss->out3, 3); + CHECK(bss->out4 != 4, "res4", "got %lld != exp %d\n", bss->out4, 4); + CHECK(bss->handler_out5.a != 5, "res5", "got %d != exp %d\n", + bss->handler_out5.a, 5); + CHECK(bss->handler_out5.b != 6, "res6", "got %lld != exp %d\n", + bss->handler_out5.b, 6); + + CHECK(bss->bpf_syscall != kcfg->CONFIG_BPF_SYSCALL, "ext1", + "got %d != exp %d\n", bss->bpf_syscall, kcfg->CONFIG_BPF_SYSCALL); + CHECK(bss->kern_ver != kcfg->LINUX_KERNEL_VERSION, "ext2", + "got %d != exp %d\n", bss->kern_ver, kcfg->LINUX_KERNEL_VERSION); + +cleanup: + test_skeleton__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c new file mode 100644 index 000000000000..07f5b462c2ef --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2020 Cloudflare + +#include "test_progs.h" + +static int connected_socket_v4(void) +{ + struct sockaddr_in addr = { + .sin_family = AF_INET, + .sin_port = htons(80), + .sin_addr = { inet_addr("127.0.0.1") }, + }; + socklen_t len = sizeof(addr); + int s, repair, err; + + s = socket(AF_INET, SOCK_STREAM, 0); + if (CHECK_FAIL(s == -1)) + goto error; + + repair = TCP_REPAIR_ON; + err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair)); + if (CHECK_FAIL(err)) + goto error; + + err = connect(s, (struct sockaddr *)&addr, len); + if (CHECK_FAIL(err)) + goto error; + + repair = TCP_REPAIR_OFF_NO_WP; + err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair)); + if (CHECK_FAIL(err)) + goto error; + + return s; +error: + perror(__func__); + close(s); + return -1; +} + +/* Create a map, populate it with one socket, and free the map. */ +static void test_sockmap_create_update_free(enum bpf_map_type map_type) +{ + const int zero = 0; + int s, map, err; + + s = connected_socket_v4(); + if (CHECK_FAIL(s == -1)) + return; + + map = bpf_create_map(map_type, sizeof(int), sizeof(int), 1, 0); + if (CHECK_FAIL(map == -1)) { + perror("bpf_create_map"); + goto out; + } + + err = bpf_map_update_elem(map, &zero, &s, BPF_NOEXIST); + if (CHECK_FAIL(err)) { + perror("bpf_map_update"); + goto out; + } + +out: + close(map); + close(s); +} + +void test_sockmap_basic(void) +{ + if (test__start_subtest("sockmap create_update_free")) + test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKMAP); + if (test__start_subtest("sockhash create_update_free")) + test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKHASH); +} diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c index d841dced971f..e8399ae50e77 100644 --- a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c +++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c @@ -1,16 +1,14 @@ // SPDX-License-Identifier: GPL-2.0 #include <test_progs.h> +#include "test_stacktrace_build_id.skel.h" void test_stacktrace_build_id(void) { + int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd; - const char *prog_name = "tracepoint/random/urandom_read"; - const char *file = "./test_stacktrace_build_id.o"; - int err, prog_fd, stack_trace_len; + struct test_stacktrace_build_id *skel; + int err, stack_trace_len; __u32 key, previous_key, val, duration = 0; - struct bpf_program *prog; - struct bpf_object *obj; - struct bpf_link *link = NULL; char buf[256]; int i, j; struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; @@ -18,43 +16,24 @@ void test_stacktrace_build_id(void) int retry = 1; retry: - err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); - if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) + skel = test_stacktrace_build_id__open_and_load(); + if (CHECK(!skel, "skel_open_and_load", "skeleton open/load failed\n")) return; - prog = bpf_object__find_program_by_title(obj, prog_name); - if (CHECK(!prog, "find_prog", "prog '%s' not found\n", prog_name)) - goto close_prog; - - link = bpf_program__attach_tracepoint(prog, "random", "urandom_read"); - if (CHECK(IS_ERR(link), "attach_tp", "err %ld\n", PTR_ERR(link))) - goto close_prog; + err = test_stacktrace_build_id__attach(skel); + if (CHECK(err, "attach_tp", "err %d\n", err)) + goto cleanup; /* find map fds */ - control_map_fd = bpf_find_map(__func__, obj, "control_map"); - if (CHECK(control_map_fd < 0, "bpf_find_map control_map", - "err %d errno %d\n", err, errno)) - goto disable_pmu; - - stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap"); - if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap", - "err %d errno %d\n", err, errno)) - goto disable_pmu; - - stackmap_fd = bpf_find_map(__func__, obj, "stackmap"); - if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n", - err, errno)) - goto disable_pmu; - - stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap"); - if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap", - "err %d errno %d\n", err, errno)) - goto disable_pmu; + control_map_fd = bpf_map__fd(skel->maps.control_map); + stackid_hmap_fd = bpf_map__fd(skel->maps.stackid_hmap); + stackmap_fd = bpf_map__fd(skel->maps.stackmap); + stack_amap_fd = bpf_map__fd(skel->maps.stack_amap); if (CHECK_FAIL(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null"))) - goto disable_pmu; + goto cleanup; if (CHECK_FAIL(system("./urandom_read"))) - goto disable_pmu; + goto cleanup; /* disable stack trace collection */ key = 0; val = 1; @@ -66,23 +45,23 @@ retry: err = compare_map_keys(stackid_hmap_fd, stackmap_fd); if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap", "err %d errno %d\n", err, errno)) - goto disable_pmu; + goto cleanup; err = compare_map_keys(stackmap_fd, stackid_hmap_fd); if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap", "err %d errno %d\n", err, errno)) - goto disable_pmu; + goto cleanup; err = extract_build_id(buf, 256); if (CHECK(err, "get build_id with readelf", "err %d errno %d\n", err, errno)) - goto disable_pmu; + goto cleanup; err = bpf_map_get_next_key(stackmap_fd, NULL, &key); if (CHECK(err, "get_next_key from stackmap", "err %d, errno %d\n", err, errno)) - goto disable_pmu; + goto cleanup; do { char build_id[64]; @@ -90,7 +69,7 @@ retry: err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs); if (CHECK(err, "lookup_elem from stackmap", "err %d, errno %d\n", err, errno)) - goto disable_pmu; + goto cleanup; for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i) if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID && id_offs[i].offset != 0) { @@ -108,8 +87,7 @@ retry: * try it one more time. */ if (build_id_matches < 1 && retry--) { - bpf_link__destroy(link); - bpf_object__close(obj); + test_stacktrace_build_id__destroy(skel); printf("%s:WARN:Didn't find expected build ID from the map, retrying\n", __func__); goto retry; @@ -117,17 +95,14 @@ retry: if (CHECK(build_id_matches < 1, "build id match", "Didn't find expected build ID from the map\n")) - goto disable_pmu; + goto cleanup; - stack_trace_len = PERF_MAX_STACK_DEPTH - * sizeof(struct bpf_stack_build_id); + stack_trace_len = PERF_MAX_STACK_DEPTH * + sizeof(struct bpf_stack_build_id); err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len); CHECK(err, "compare_stack_ips stackmap vs. stack_amap", "err %d errno %d\n", err, errno); -disable_pmu: - bpf_link__destroy(link); - -close_prog: - bpf_object__close(obj); +cleanup: + test_stacktrace_build_id__destroy(skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c index f62aa0eb959b..f002e3090d92 100644 --- a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c +++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <test_progs.h> +#include "test_stacktrace_build_id.skel.h" static __u64 read_perf_max_sample_freq(void) { @@ -16,19 +17,15 @@ static __u64 read_perf_max_sample_freq(void) void test_stacktrace_build_id_nmi(void) { - int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd; - const char *prog_name = "tracepoint/random/urandom_read"; - const char *file = "./test_stacktrace_build_id.o"; - int err, pmu_fd, prog_fd; + int control_map_fd, stackid_hmap_fd, stackmap_fd; + struct test_stacktrace_build_id *skel; + int err, pmu_fd; struct perf_event_attr attr = { .freq = 1, .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES, }; __u32 key, previous_key, val, duration = 0; - struct bpf_program *prog; - struct bpf_object *obj; - struct bpf_link *link; char buf[256]; int i, j; struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; @@ -38,54 +35,46 @@ void test_stacktrace_build_id_nmi(void) attr.sample_freq = read_perf_max_sample_freq(); retry: - err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd); - if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) + skel = test_stacktrace_build_id__open(); + if (CHECK(!skel, "skel_open", "skeleton open failed\n")) return; - prog = bpf_object__find_program_by_title(obj, prog_name); - if (CHECK(!prog, "find_prog", "prog '%s' not found\n", prog_name)) - goto close_prog; + /* override program type */ + bpf_program__set_perf_event(skel->progs.oncpu); + + err = test_stacktrace_build_id__load(skel); + if (CHECK(err, "skel_load", "skeleton load failed: %d\n", err)) + goto cleanup; pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu 0 */, -1 /* group id */, 0 /* flags */); - if (CHECK(pmu_fd < 0, "perf_event_open", - "err %d errno %d. Does the test host support PERF_COUNT_HW_CPU_CYCLES?\n", + if (pmu_fd < 0 && errno == ENOENT) { + printf("%s:SKIP:no PERF_COUNT_HW_CPU_CYCLES\n", __func__); + test__skip(); + goto cleanup; + } + if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", pmu_fd, errno)) - goto close_prog; + goto cleanup; - link = bpf_program__attach_perf_event(prog, pmu_fd); - if (CHECK(IS_ERR(link), "attach_perf_event", - "err %ld\n", PTR_ERR(link))) { + skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu, + pmu_fd); + if (CHECK(IS_ERR(skel->links.oncpu), "attach_perf_event", + "err %ld\n", PTR_ERR(skel->links.oncpu))) { close(pmu_fd); - goto close_prog; + goto cleanup; } /* find map fds */ - control_map_fd = bpf_find_map(__func__, obj, "control_map"); - if (CHECK(control_map_fd < 0, "bpf_find_map control_map", - "err %d errno %d\n", err, errno)) - goto disable_pmu; - - stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap"); - if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap", - "err %d errno %d\n", err, errno)) - goto disable_pmu; - - stackmap_fd = bpf_find_map(__func__, obj, "stackmap"); - if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n", - err, errno)) - goto disable_pmu; - - stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap"); - if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap", - "err %d errno %d\n", err, errno)) - goto disable_pmu; + control_map_fd = bpf_map__fd(skel->maps.control_map); + stackid_hmap_fd = bpf_map__fd(skel->maps.stackid_hmap); + stackmap_fd = bpf_map__fd(skel->maps.stackmap); if (CHECK_FAIL(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null"))) - goto disable_pmu; + goto cleanup; if (CHECK_FAIL(system("taskset 0x1 ./urandom_read 100000"))) - goto disable_pmu; + goto cleanup; /* disable stack trace collection */ key = 0; val = 1; @@ -97,23 +86,23 @@ retry: err = compare_map_keys(stackid_hmap_fd, stackmap_fd); if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap", "err %d errno %d\n", err, errno)) - goto disable_pmu; + goto cleanup; err = compare_map_keys(stackmap_fd, stackid_hmap_fd); if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap", "err %d errno %d\n", err, errno)) - goto disable_pmu; + goto cleanup; err = extract_build_id(buf, 256); if (CHECK(err, "get build_id with readelf", "err %d errno %d\n", err, errno)) - goto disable_pmu; + goto cleanup; err = bpf_map_get_next_key(stackmap_fd, NULL, &key); if (CHECK(err, "get_next_key from stackmap", "err %d, errno %d\n", err, errno)) - goto disable_pmu; + goto cleanup; do { char build_id[64]; @@ -121,7 +110,7 @@ retry: err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs); if (CHECK(err, "lookup_elem from stackmap", "err %d, errno %d\n", err, errno)) - goto disable_pmu; + goto cleanup; for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i) if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID && id_offs[i].offset != 0) { @@ -139,8 +128,7 @@ retry: * try it one more time. */ if (build_id_matches < 1 && retry--) { - bpf_link__destroy(link); - bpf_object__close(obj); + test_stacktrace_build_id__destroy(skel); printf("%s:WARN:Didn't find expected build ID from the map, retrying\n", __func__); goto retry; @@ -148,7 +136,7 @@ retry: if (CHECK(build_id_matches < 1, "build id match", "Didn't find expected build ID from the map\n")) - goto disable_pmu; + goto cleanup; /* * We intentionally skip compare_stack_ips(). This is because we @@ -157,8 +145,6 @@ retry: * BPF_STACK_BUILD_ID_IP; */ -disable_pmu: - bpf_link__destroy(link); -close_prog: - bpf_object__close(obj); +cleanup: + test_stacktrace_build_id__destroy(skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c b/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c new file mode 100644 index 000000000000..25b068591e9a --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include <test_progs.h> + +const char *err_str; +bool found; + +static int libbpf_debug_print(enum libbpf_print_level level, + const char *format, va_list args) +{ + char *log_buf; + + if (level != LIBBPF_WARN || + strcmp(format, "libbpf: \n%s\n")) { + vprintf(format, args); + return 0; + } + + log_buf = va_arg(args, char *); + if (!log_buf) + goto out; + if (strstr(log_buf, err_str) == 0) + found = true; +out: + printf(format, log_buf); + return 0; +} + +extern int extra_prog_load_log_flags; + +static int check_load(const char *file) +{ + struct bpf_prog_load_attr attr; + struct bpf_object *obj = NULL; + int err, prog_fd; + + memset(&attr, 0, sizeof(struct bpf_prog_load_attr)); + attr.file = file; + attr.prog_type = BPF_PROG_TYPE_UNSPEC; + attr.log_level = extra_prog_load_log_flags; + attr.prog_flags = BPF_F_TEST_RND_HI32; + found = false; + err = bpf_prog_load_xattr(&attr, &obj, &prog_fd); + bpf_object__close(obj); + return err; +} + +struct test_def { + const char *file; + const char *err_str; +}; + +void test_test_global_funcs(void) +{ + struct test_def tests[] = { + { "test_global_func1.o", "combined stack size of 4 calls is 544" }, + { "test_global_func2.o" }, + { "test_global_func3.o" , "the call stack of 8 frames" }, + { "test_global_func4.o" }, + { "test_global_func5.o" , "expected pointer to ctx, but got PTR" }, + { "test_global_func6.o" , "modified ctx ptr R2" }, + { "test_global_func7.o" , "foo() doesn't return scalar" }, + }; + libbpf_print_fn_t old_print_fn = NULL; + int err, i, duration = 0; + + old_print_fn = libbpf_set_print(libbpf_debug_print); + + for (i = 0; i < ARRAY_SIZE(tests); i++) { + const struct test_def *test = &tests[i]; + + if (!test__start_subtest(test->file)) + continue; + + err_str = test->err_str; + err = check_load(test->file); + CHECK_FAIL(!!err ^ !!err_str); + if (err_str) + CHECK(found, "", "expected string '%s'", err_str); + } + libbpf_set_print(old_print_fn); +} diff --git a/tools/testing/selftests/bpf/prog_tests/test_overhead.c b/tools/testing/selftests/bpf/prog_tests/test_overhead.c index c32aa28bd93f..465b371a561d 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_overhead.c +++ b/tools/testing/selftests/bpf/prog_tests/test_overhead.c @@ -2,6 +2,7 @@ /* Copyright (c) 2019 Facebook */ #define _GNU_SOURCE #include <sched.h> +#include <sys/prctl.h> #include <test_progs.h> #define MAX_CNT 100000 @@ -17,7 +18,7 @@ static __u64 time_get_ns(void) static int test_task_rename(const char *prog) { int i, fd, duration = 0, err; - char buf[] = "test\n"; + char buf[] = "test_overhead"; __u64 start_time; fd = open("/proc/self/comm", O_WRONLY|O_TRUNC); @@ -66,6 +67,10 @@ void test_test_overhead(void) struct bpf_object *obj; struct bpf_link *link; int err, duration = 0; + char comm[16] = {}; + + if (CHECK_FAIL(prctl(PR_GET_NAME, comm, 0L, 0L, 0L))) + return; obj = bpf_object__open_file("./test_overhead.o", NULL); if (CHECK(IS_ERR(obj), "obj_open_file", "err %ld\n", PTR_ERR(obj))) @@ -138,5 +143,6 @@ void test_test_overhead(void) test_run("fexit"); bpf_link__destroy(link); cleanup: + prctl(PR_SET_NAME, comm, 0L, 0L, 0L); bpf_object__close(obj); } diff --git a/tools/testing/selftests/bpf/prog_tests/trampoline_count.c b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c new file mode 100644 index 000000000000..1f6ccdaed1ac --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: GPL-2.0-only +#define _GNU_SOURCE +#include <sched.h> +#include <sys/prctl.h> +#include <test_progs.h> + +#define MAX_TRAMP_PROGS 40 + +struct inst { + struct bpf_object *obj; + struct bpf_link *link_fentry; + struct bpf_link *link_fexit; +}; + +static int test_task_rename(void) +{ + int fd, duration = 0, err; + char buf[] = "test_overhead"; + + fd = open("/proc/self/comm", O_WRONLY|O_TRUNC); + if (CHECK(fd < 0, "open /proc", "err %d", errno)) + return -1; + err = write(fd, buf, sizeof(buf)); + if (err < 0) { + CHECK(err < 0, "task rename", "err %d", errno); + close(fd); + return -1; + } + close(fd); + return 0; +} + +static struct bpf_link *load(struct bpf_object *obj, const char *name) +{ + struct bpf_program *prog; + int duration = 0; + + prog = bpf_object__find_program_by_title(obj, name); + if (CHECK(!prog, "find_probe", "prog '%s' not found\n", name)) + return ERR_PTR(-EINVAL); + return bpf_program__attach_trace(prog); +} + +void test_trampoline_count(void) +{ + const char *fentry_name = "fentry/__set_task_comm"; + const char *fexit_name = "fexit/__set_task_comm"; + const char *object = "test_trampoline_count.o"; + struct inst inst[MAX_TRAMP_PROGS] = {}; + int err, i = 0, duration = 0; + struct bpf_object *obj; + struct bpf_link *link; + char comm[16] = {}; + + /* attach 'allowed' 40 trampoline programs */ + for (i = 0; i < MAX_TRAMP_PROGS; i++) { + obj = bpf_object__open_file(object, NULL); + if (CHECK(IS_ERR(obj), "obj_open_file", "err %ld\n", PTR_ERR(obj))) + goto cleanup; + + err = bpf_object__load(obj); + if (CHECK(err, "obj_load", "err %d\n", err)) + goto cleanup; + inst[i].obj = obj; + + if (rand() % 2) { + link = load(obj, fentry_name); + if (CHECK(IS_ERR(link), "attach prog", "err %ld\n", PTR_ERR(link))) + goto cleanup; + inst[i].link_fentry = link; + } else { + link = load(obj, fexit_name); + if (CHECK(IS_ERR(link), "attach prog", "err %ld\n", PTR_ERR(link))) + goto cleanup; + inst[i].link_fexit = link; + } + } + + /* and try 1 extra.. */ + obj = bpf_object__open_file(object, NULL); + if (CHECK(IS_ERR(obj), "obj_open_file", "err %ld\n", PTR_ERR(obj))) + goto cleanup; + + err = bpf_object__load(obj); + if (CHECK(err, "obj_load", "err %d\n", err)) + goto cleanup_extra; + + /* ..that needs to fail */ + link = load(obj, fentry_name); + if (CHECK(!IS_ERR(link), "cannot attach over the limit", "err %ld\n", PTR_ERR(link))) { + bpf_link__destroy(link); + goto cleanup_extra; + } + + /* with E2BIG error */ + CHECK(PTR_ERR(link) != -E2BIG, "proper error check", "err %ld\n", PTR_ERR(link)); + + /* and finaly execute the probe */ + if (CHECK_FAIL(prctl(PR_GET_NAME, comm, 0L, 0L, 0L))) + goto cleanup_extra; + CHECK_FAIL(test_task_rename()); + CHECK_FAIL(prctl(PR_SET_NAME, comm, 0L, 0L, 0L)); + +cleanup_extra: + bpf_object__close(obj); +cleanup: + while (--i) { + bpf_link__destroy(inst[i].link_fentry); + bpf_link__destroy(inst[i].link_fexit); + bpf_object__close(inst[i].obj); + } +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c new file mode 100644 index 000000000000..6b56bdc73ebc --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <net/if.h> +#include "test_xdp.skel.h" +#include "test_xdp_bpf2bpf.skel.h" + +void test_xdp_bpf2bpf(void) +{ + __u32 duration = 0, retval, size; + char buf[128]; + int err, pkt_fd, map_fd; + struct iphdr *iph = (void *)buf + sizeof(struct ethhdr); + struct iptnl_info value4 = {.family = AF_INET}; + struct test_xdp *pkt_skel = NULL; + struct test_xdp_bpf2bpf *ftrace_skel = NULL; + struct vip key4 = {.protocol = 6, .family = AF_INET}; + DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts); + + /* Load XDP program to introspect */ + pkt_skel = test_xdp__open_and_load(); + if (CHECK(!pkt_skel, "pkt_skel_load", "test_xdp skeleton failed\n")) + return; + + pkt_fd = bpf_program__fd(pkt_skel->progs._xdp_tx_iptunnel); + + map_fd = bpf_map__fd(pkt_skel->maps.vip2tnl); + bpf_map_update_elem(map_fd, &key4, &value4, 0); + + /* Load trace program */ + opts.attach_prog_fd = pkt_fd, + ftrace_skel = test_xdp_bpf2bpf__open_opts(&opts); + if (CHECK(!ftrace_skel, "__open", "ftrace skeleton failed\n")) + goto out; + + err = test_xdp_bpf2bpf__load(ftrace_skel); + if (CHECK(err, "__load", "ftrace skeleton failed\n")) + goto out; + + err = test_xdp_bpf2bpf__attach(ftrace_skel); + if (CHECK(err, "ftrace_attach", "ftrace attach failed: %d\n", err)) + goto out; + + /* Run test program */ + err = bpf_prog_test_run(pkt_fd, 1, &pkt_v4, sizeof(pkt_v4), + buf, &size, &retval, &duration); + + if (CHECK(err || retval != XDP_TX || size != 74 || + iph->protocol != IPPROTO_IPIP, "ipv4", + "err %d errno %d retval %d size %d\n", + err, errno, retval, size)) + goto out; + + /* Verify test results */ + if (CHECK(ftrace_skel->bss->test_result_fentry != if_nametoindex("lo"), + "result", "fentry failed err %llu\n", + ftrace_skel->bss->test_result_fentry)) + goto out; + + CHECK(ftrace_skel->bss->test_result_fexit != XDP_TX, "result", + "fexit failed err %llu\n", ftrace_skel->bss->test_result_fexit); + +out: + test_xdp__destroy(pkt_skel); + test_xdp_bpf2bpf__destroy(ftrace_skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_perf.c b/tools/testing/selftests/bpf/prog_tests/xdp_perf.c new file mode 100644 index 000000000000..7185bee16fe4 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/xdp_perf.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +void test_xdp_perf(void) +{ + const char *file = "./xdp_dummy.o"; + __u32 duration, retval, size; + struct bpf_object *obj; + char in[128], out[128]; + int err, prog_fd; + + err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); + if (CHECK_FAIL(err)) + return; + + err = bpf_prog_test_run(prog_fd, 1000000, &in[0], 128, + out, &size, &retval, &duration); + + CHECK(err || retval != XDP_PASS || size != 128, + "xdp-perf", + "err %d errno %d retval %d size %d\n", + err, errno, retval, size); + + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/progs/bpf_cubic.c b/tools/testing/selftests/bpf/progs/bpf_cubic.c new file mode 100644 index 000000000000..7897c8f4d363 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_cubic.c @@ -0,0 +1,544 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/* WARNING: This implemenation is not necessarily the same + * as the tcp_cubic.c. The purpose is mainly for testing + * the kernel BPF logic. + * + * Highlights: + * 1. CONFIG_HZ .kconfig map is used. + * 2. In bictcp_update(), calculation is changed to use usec + * resolution (i.e. USEC_PER_JIFFY) instead of using jiffies. + * Thus, usecs_to_jiffies() is not used in the bpf_cubic.c. + * 3. In bitctcp_update() [under tcp_friendliness], the original + * "while (ca->ack_cnt > delta)" loop is changed to the equivalent + * "ca->ack_cnt / delta" operation. + */ + +#include <linux/bpf.h> +#include "bpf_tcp_helpers.h" + +char _license[] SEC("license") = "GPL"; + +#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi) + +#define BICTCP_BETA_SCALE 1024 /* Scale factor beta calculation + * max_cwnd = snd_cwnd * beta + */ +#define BICTCP_HZ 10 /* BIC HZ 2^10 = 1024 */ + +/* Two methods of hybrid slow start */ +#define HYSTART_ACK_TRAIN 0x1 +#define HYSTART_DELAY 0x2 + +/* Number of delay samples for detecting the increase of delay */ +#define HYSTART_MIN_SAMPLES 8 +#define HYSTART_DELAY_MIN (4000U) /* 4ms */ +#define HYSTART_DELAY_MAX (16000U) /* 16 ms */ +#define HYSTART_DELAY_THRESH(x) clamp(x, HYSTART_DELAY_MIN, HYSTART_DELAY_MAX) + +static int fast_convergence = 1; +static const int beta = 717; /* = 717/1024 (BICTCP_BETA_SCALE) */ +static int initial_ssthresh; +static const int bic_scale = 41; +static int tcp_friendliness = 1; + +static int hystart = 1; +static int hystart_detect = HYSTART_ACK_TRAIN | HYSTART_DELAY; +static int hystart_low_window = 16; +static int hystart_ack_delta_us = 2000; + +static const __u32 cube_rtt_scale = (bic_scale * 10); /* 1024*c/rtt */ +static const __u32 beta_scale = 8*(BICTCP_BETA_SCALE+beta) / 3 + / (BICTCP_BETA_SCALE - beta); +/* calculate the "K" for (wmax-cwnd) = c/rtt * K^3 + * so K = cubic_root( (wmax-cwnd)*rtt/c ) + * the unit of K is bictcp_HZ=2^10, not HZ + * + * c = bic_scale >> 10 + * rtt = 100ms + * + * the following code has been designed and tested for + * cwnd < 1 million packets + * RTT < 100 seconds + * HZ < 1,000,00 (corresponding to 10 nano-second) + */ + +/* 1/c * 2^2*bictcp_HZ * srtt, 2^40 */ +static const __u64 cube_factor = (__u64)(1ull << (10+3*BICTCP_HZ)) + / (bic_scale * 10); + +/* BIC TCP Parameters */ +struct bictcp { + __u32 cnt; /* increase cwnd by 1 after ACKs */ + __u32 last_max_cwnd; /* last maximum snd_cwnd */ + __u32 last_cwnd; /* the last snd_cwnd */ + __u32 last_time; /* time when updated last_cwnd */ + __u32 bic_origin_point;/* origin point of bic function */ + __u32 bic_K; /* time to origin point + from the beginning of the current epoch */ + __u32 delay_min; /* min delay (usec) */ + __u32 epoch_start; /* beginning of an epoch */ + __u32 ack_cnt; /* number of acks */ + __u32 tcp_cwnd; /* estimated tcp cwnd */ + __u16 unused; + __u8 sample_cnt; /* number of samples to decide curr_rtt */ + __u8 found; /* the exit point is found? */ + __u32 round_start; /* beginning of each round */ + __u32 end_seq; /* end_seq of the round */ + __u32 last_ack; /* last time when the ACK spacing is close */ + __u32 curr_rtt; /* the minimum rtt of current round */ +}; + +static inline void bictcp_reset(struct bictcp *ca) +{ + ca->cnt = 0; + ca->last_max_cwnd = 0; + ca->last_cwnd = 0; + ca->last_time = 0; + ca->bic_origin_point = 0; + ca->bic_K = 0; + ca->delay_min = 0; + ca->epoch_start = 0; + ca->ack_cnt = 0; + ca->tcp_cwnd = 0; + ca->found = 0; +} + +extern unsigned long CONFIG_HZ __kconfig; +#define HZ CONFIG_HZ +#define USEC_PER_MSEC 1000UL +#define USEC_PER_SEC 1000000UL +#define USEC_PER_JIFFY (USEC_PER_SEC / HZ) + +static __always_inline __u64 div64_u64(__u64 dividend, __u64 divisor) +{ + return dividend / divisor; +} + +#define div64_ul div64_u64 + +#define BITS_PER_U64 (sizeof(__u64) * 8) +static __always_inline int fls64(__u64 x) +{ + int num = BITS_PER_U64 - 1; + + if (x == 0) + return 0; + + if (!(x & (~0ull << (BITS_PER_U64-32)))) { + num -= 32; + x <<= 32; + } + if (!(x & (~0ull << (BITS_PER_U64-16)))) { + num -= 16; + x <<= 16; + } + if (!(x & (~0ull << (BITS_PER_U64-8)))) { + num -= 8; + x <<= 8; + } + if (!(x & (~0ull << (BITS_PER_U64-4)))) { + num -= 4; + x <<= 4; + } + if (!(x & (~0ull << (BITS_PER_U64-2)))) { + num -= 2; + x <<= 2; + } + if (!(x & (~0ull << (BITS_PER_U64-1)))) + num -= 1; + + return num + 1; +} + +static __always_inline __u32 bictcp_clock_us(const struct sock *sk) +{ + return tcp_sk(sk)->tcp_mstamp; +} + +static __always_inline void bictcp_hystart_reset(struct sock *sk) +{ + struct tcp_sock *tp = tcp_sk(sk); + struct bictcp *ca = inet_csk_ca(sk); + + ca->round_start = ca->last_ack = bictcp_clock_us(sk); + ca->end_seq = tp->snd_nxt; + ca->curr_rtt = ~0U; + ca->sample_cnt = 0; +} + +/* "struct_ops/" prefix is not a requirement + * It will be recognized as BPF_PROG_TYPE_STRUCT_OPS + * as long as it is used in one of the func ptr + * under SEC(".struct_ops"). + */ +SEC("struct_ops/bictcp_init") +void BPF_PROG(bictcp_init, struct sock *sk) +{ + struct bictcp *ca = inet_csk_ca(sk); + + bictcp_reset(ca); + + if (hystart) + bictcp_hystart_reset(sk); + + if (!hystart && initial_ssthresh) + tcp_sk(sk)->snd_ssthresh = initial_ssthresh; +} + +/* No prefix in SEC will also work. + * The remaining tcp-cubic functions have an easier way. + */ +SEC("no-sec-prefix-bictcp_cwnd_event") +void BPF_PROG(bictcp_cwnd_event, struct sock *sk, enum tcp_ca_event event) +{ + if (event == CA_EVENT_TX_START) { + struct bictcp *ca = inet_csk_ca(sk); + __u32 now = tcp_jiffies32; + __s32 delta; + + delta = now - tcp_sk(sk)->lsndtime; + + /* We were application limited (idle) for a while. + * Shift epoch_start to keep cwnd growth to cubic curve. + */ + if (ca->epoch_start && delta > 0) { + ca->epoch_start += delta; + if (after(ca->epoch_start, now)) + ca->epoch_start = now; + } + return; + } +} + +/* + * cbrt(x) MSB values for x MSB values in [0..63]. + * Precomputed then refined by hand - Willy Tarreau + * + * For x in [0..63], + * v = cbrt(x << 18) - 1 + * cbrt(x) = (v[x] + 10) >> 6 + */ +static const __u8 v[] = { + /* 0x00 */ 0, 54, 54, 54, 118, 118, 118, 118, + /* 0x08 */ 123, 129, 134, 138, 143, 147, 151, 156, + /* 0x10 */ 157, 161, 164, 168, 170, 173, 176, 179, + /* 0x18 */ 181, 185, 187, 190, 192, 194, 197, 199, + /* 0x20 */ 200, 202, 204, 206, 209, 211, 213, 215, + /* 0x28 */ 217, 219, 221, 222, 224, 225, 227, 229, + /* 0x30 */ 231, 232, 234, 236, 237, 239, 240, 242, + /* 0x38 */ 244, 245, 246, 248, 250, 251, 252, 254, +}; + +/* calculate the cubic root of x using a table lookup followed by one + * Newton-Raphson iteration. + * Avg err ~= 0.195% + */ +static __always_inline __u32 cubic_root(__u64 a) +{ + __u32 x, b, shift; + + if (a < 64) { + /* a in [0..63] */ + return ((__u32)v[(__u32)a] + 35) >> 6; + } + + b = fls64(a); + b = ((b * 84) >> 8) - 1; + shift = (a >> (b * 3)); + + /* it is needed for verifier's bound check on v */ + if (shift >= 64) + return 0; + + x = ((__u32)(((__u32)v[shift] + 10) << b)) >> 6; + + /* + * Newton-Raphson iteration + * 2 + * x = ( 2 * x + a / x ) / 3 + * k+1 k k + */ + x = (2 * x + (__u32)div64_u64(a, (__u64)x * (__u64)(x - 1))); + x = ((x * 341) >> 10); + return x; +} + +/* + * Compute congestion window to use. + */ +static __always_inline void bictcp_update(struct bictcp *ca, __u32 cwnd, + __u32 acked) +{ + __u32 delta, bic_target, max_cnt; + __u64 offs, t; + + ca->ack_cnt += acked; /* count the number of ACKed packets */ + + if (ca->last_cwnd == cwnd && + (__s32)(tcp_jiffies32 - ca->last_time) <= HZ / 32) + return; + + /* The CUBIC function can update ca->cnt at most once per jiffy. + * On all cwnd reduction events, ca->epoch_start is set to 0, + * which will force a recalculation of ca->cnt. + */ + if (ca->epoch_start && tcp_jiffies32 == ca->last_time) + goto tcp_friendliness; + + ca->last_cwnd = cwnd; + ca->last_time = tcp_jiffies32; + + if (ca->epoch_start == 0) { + ca->epoch_start = tcp_jiffies32; /* record beginning */ + ca->ack_cnt = acked; /* start counting */ + ca->tcp_cwnd = cwnd; /* syn with cubic */ + + if (ca->last_max_cwnd <= cwnd) { + ca->bic_K = 0; + ca->bic_origin_point = cwnd; + } else { + /* Compute new K based on + * (wmax-cwnd) * (srtt>>3 / HZ) / c * 2^(3*bictcp_HZ) + */ + ca->bic_K = cubic_root(cube_factor + * (ca->last_max_cwnd - cwnd)); + ca->bic_origin_point = ca->last_max_cwnd; + } + } + + /* cubic function - calc*/ + /* calculate c * time^3 / rtt, + * while considering overflow in calculation of time^3 + * (so time^3 is done by using 64 bit) + * and without the support of division of 64bit numbers + * (so all divisions are done by using 32 bit) + * also NOTE the unit of those veriables + * time = (t - K) / 2^bictcp_HZ + * c = bic_scale >> 10 + * rtt = (srtt >> 3) / HZ + * !!! The following code does not have overflow problems, + * if the cwnd < 1 million packets !!! + */ + + t = (__s32)(tcp_jiffies32 - ca->epoch_start) * USEC_PER_JIFFY; + t += ca->delay_min; + /* change the unit from usec to bictcp_HZ */ + t <<= BICTCP_HZ; + t /= USEC_PER_SEC; + + if (t < ca->bic_K) /* t - K */ + offs = ca->bic_K - t; + else + offs = t - ca->bic_K; + + /* c/rtt * (t-K)^3 */ + delta = (cube_rtt_scale * offs * offs * offs) >> (10+3*BICTCP_HZ); + if (t < ca->bic_K) /* below origin*/ + bic_target = ca->bic_origin_point - delta; + else /* above origin*/ + bic_target = ca->bic_origin_point + delta; + + /* cubic function - calc bictcp_cnt*/ + if (bic_target > cwnd) { + ca->cnt = cwnd / (bic_target - cwnd); + } else { + ca->cnt = 100 * cwnd; /* very small increment*/ + } + + /* + * The initial growth of cubic function may be too conservative + * when the available bandwidth is still unknown. + */ + if (ca->last_max_cwnd == 0 && ca->cnt > 20) + ca->cnt = 20; /* increase cwnd 5% per RTT */ + +tcp_friendliness: + /* TCP Friendly */ + if (tcp_friendliness) { + __u32 scale = beta_scale; + __u32 n; + + /* update tcp cwnd */ + delta = (cwnd * scale) >> 3; + if (ca->ack_cnt > delta && delta) { + n = ca->ack_cnt / delta; + ca->ack_cnt -= n * delta; + ca->tcp_cwnd += n; + } + + if (ca->tcp_cwnd > cwnd) { /* if bic is slower than tcp */ + delta = ca->tcp_cwnd - cwnd; + max_cnt = cwnd / delta; + if (ca->cnt > max_cnt) + ca->cnt = max_cnt; + } + } + + /* The maximum rate of cwnd increase CUBIC allows is 1 packet per + * 2 packets ACKed, meaning cwnd grows at 1.5x per RTT. + */ + ca->cnt = max(ca->cnt, 2U); +} + +/* Or simply use the BPF_STRUCT_OPS to avoid the SEC boiler plate. */ +void BPF_STRUCT_OPS(bictcp_cong_avoid, struct sock *sk, __u32 ack, __u32 acked) +{ + struct tcp_sock *tp = tcp_sk(sk); + struct bictcp *ca = inet_csk_ca(sk); + + if (!tcp_is_cwnd_limited(sk)) + return; + + if (tcp_in_slow_start(tp)) { + if (hystart && after(ack, ca->end_seq)) + bictcp_hystart_reset(sk); + acked = tcp_slow_start(tp, acked); + if (!acked) + return; + } + bictcp_update(ca, tp->snd_cwnd, acked); + tcp_cong_avoid_ai(tp, ca->cnt, acked); +} + +__u32 BPF_STRUCT_OPS(bictcp_recalc_ssthresh, struct sock *sk) +{ + const struct tcp_sock *tp = tcp_sk(sk); + struct bictcp *ca = inet_csk_ca(sk); + + ca->epoch_start = 0; /* end of epoch */ + + /* Wmax and fast convergence */ + if (tp->snd_cwnd < ca->last_max_cwnd && fast_convergence) + ca->last_max_cwnd = (tp->snd_cwnd * (BICTCP_BETA_SCALE + beta)) + / (2 * BICTCP_BETA_SCALE); + else + ca->last_max_cwnd = tp->snd_cwnd; + + return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U); +} + +void BPF_STRUCT_OPS(bictcp_state, struct sock *sk, __u8 new_state) +{ + if (new_state == TCP_CA_Loss) { + bictcp_reset(inet_csk_ca(sk)); + bictcp_hystart_reset(sk); + } +} + +#define GSO_MAX_SIZE 65536 + +/* Account for TSO/GRO delays. + * Otherwise short RTT flows could get too small ssthresh, since during + * slow start we begin with small TSO packets and ca->delay_min would + * not account for long aggregation delay when TSO packets get bigger. + * Ideally even with a very small RTT we would like to have at least one + * TSO packet being sent and received by GRO, and another one in qdisc layer. + * We apply another 100% factor because @rate is doubled at this point. + * We cap the cushion to 1ms. + */ +static __always_inline __u32 hystart_ack_delay(struct sock *sk) +{ + unsigned long rate; + + rate = sk->sk_pacing_rate; + if (!rate) + return 0; + return min((__u64)USEC_PER_MSEC, + div64_ul((__u64)GSO_MAX_SIZE * 4 * USEC_PER_SEC, rate)); +} + +static __always_inline void hystart_update(struct sock *sk, __u32 delay) +{ + struct tcp_sock *tp = tcp_sk(sk); + struct bictcp *ca = inet_csk_ca(sk); + __u32 threshold; + + if (hystart_detect & HYSTART_ACK_TRAIN) { + __u32 now = bictcp_clock_us(sk); + + /* first detection parameter - ack-train detection */ + if ((__s32)(now - ca->last_ack) <= hystart_ack_delta_us) { + ca->last_ack = now; + + threshold = ca->delay_min + hystart_ack_delay(sk); + + /* Hystart ack train triggers if we get ack past + * ca->delay_min/2. + * Pacing might have delayed packets up to RTT/2 + * during slow start. + */ + if (sk->sk_pacing_status == SK_PACING_NONE) + threshold >>= 1; + + if ((__s32)(now - ca->round_start) > threshold) { + ca->found = 1; + tp->snd_ssthresh = tp->snd_cwnd; + } + } + } + + if (hystart_detect & HYSTART_DELAY) { + /* obtain the minimum delay of more than sampling packets */ + if (ca->sample_cnt < HYSTART_MIN_SAMPLES) { + if (ca->curr_rtt > delay) + ca->curr_rtt = delay; + + ca->sample_cnt++; + } else { + if (ca->curr_rtt > ca->delay_min + + HYSTART_DELAY_THRESH(ca->delay_min >> 3)) { + ca->found = 1; + tp->snd_ssthresh = tp->snd_cwnd; + } + } + } +} + +void BPF_STRUCT_OPS(bictcp_acked, struct sock *sk, + const struct ack_sample *sample) +{ + const struct tcp_sock *tp = tcp_sk(sk); + struct bictcp *ca = inet_csk_ca(sk); + __u32 delay; + + /* Some calls are for duplicates without timetamps */ + if (sample->rtt_us < 0) + return; + + /* Discard delay samples right after fast recovery */ + if (ca->epoch_start && (__s32)(tcp_jiffies32 - ca->epoch_start) < HZ) + return; + + delay = sample->rtt_us; + if (delay == 0) + delay = 1; + + /* first time call or link delay decreases */ + if (ca->delay_min == 0 || ca->delay_min > delay) + ca->delay_min = delay; + + /* hystart triggers when cwnd is larger than some threshold */ + if (!ca->found && tcp_in_slow_start(tp) && hystart && + tp->snd_cwnd >= hystart_low_window) + hystart_update(sk, delay); +} + +__u32 BPF_STRUCT_OPS(tcp_reno_undo_cwnd, struct sock *sk) +{ + const struct tcp_sock *tp = tcp_sk(sk); + + return max(tp->snd_cwnd, tp->prior_cwnd); +} + +SEC(".struct_ops") +struct tcp_congestion_ops cubic = { + .init = (void *)bictcp_init, + .ssthresh = (void *)bictcp_recalc_ssthresh, + .cong_avoid = (void *)bictcp_cong_avoid, + .set_state = (void *)bictcp_state, + .undo_cwnd = (void *)tcp_reno_undo_cwnd, + .cwnd_event = (void *)bictcp_cwnd_event, + .pkts_acked = (void *)bictcp_acked, + .name = "bpf_cubic", +}; diff --git a/tools/testing/selftests/bpf/progs/bpf_dctcp.c b/tools/testing/selftests/bpf/progs/bpf_dctcp.c new file mode 100644 index 000000000000..b631fb5032d2 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_dctcp.c @@ -0,0 +1,216 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ + +/* WARNING: This implemenation is not necessarily the same + * as the tcp_dctcp.c. The purpose is mainly for testing + * the kernel BPF logic. + */ + +#include <linux/bpf.h> +#include <linux/types.h> +#include <bpf/bpf_helpers.h> +#include "bpf_trace_helpers.h" +#include "bpf_tcp_helpers.h" + +char _license[] SEC("license") = "GPL"; + +#define DCTCP_MAX_ALPHA 1024U + +struct dctcp { + __u32 old_delivered; + __u32 old_delivered_ce; + __u32 prior_rcv_nxt; + __u32 dctcp_alpha; + __u32 next_seq; + __u32 ce_state; + __u32 loss_cwnd; +}; + +static unsigned int dctcp_shift_g = 4; /* g = 1/2^4 */ +static unsigned int dctcp_alpha_on_init = DCTCP_MAX_ALPHA; + +static __always_inline void dctcp_reset(const struct tcp_sock *tp, + struct dctcp *ca) +{ + ca->next_seq = tp->snd_nxt; + + ca->old_delivered = tp->delivered; + ca->old_delivered_ce = tp->delivered_ce; +} + +SEC("struct_ops/dctcp_init") +void BPF_PROG(dctcp_init, struct sock *sk) +{ + const struct tcp_sock *tp = tcp_sk(sk); + struct dctcp *ca = inet_csk_ca(sk); + + ca->prior_rcv_nxt = tp->rcv_nxt; + ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA); + ca->loss_cwnd = 0; + ca->ce_state = 0; + + dctcp_reset(tp, ca); +} + +SEC("struct_ops/dctcp_ssthresh") +__u32 BPF_PROG(dctcp_ssthresh, struct sock *sk) +{ + struct dctcp *ca = inet_csk_ca(sk); + struct tcp_sock *tp = tcp_sk(sk); + + ca->loss_cwnd = tp->snd_cwnd; + return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U); +} + +SEC("struct_ops/dctcp_update_alpha") +void BPF_PROG(dctcp_update_alpha, struct sock *sk, __u32 flags) +{ + const struct tcp_sock *tp = tcp_sk(sk); + struct dctcp *ca = inet_csk_ca(sk); + + /* Expired RTT */ + if (!before(tp->snd_una, ca->next_seq)) { + __u32 delivered_ce = tp->delivered_ce - ca->old_delivered_ce; + __u32 alpha = ca->dctcp_alpha; + + /* alpha = (1 - g) * alpha + g * F */ + + alpha -= min_not_zero(alpha, alpha >> dctcp_shift_g); + if (delivered_ce) { + __u32 delivered = tp->delivered - ca->old_delivered; + + /* If dctcp_shift_g == 1, a 32bit value would overflow + * after 8 M packets. + */ + delivered_ce <<= (10 - dctcp_shift_g); + delivered_ce /= max(1U, delivered); + + alpha = min(alpha + delivered_ce, DCTCP_MAX_ALPHA); + } + ca->dctcp_alpha = alpha; + dctcp_reset(tp, ca); + } +} + +static __always_inline void dctcp_react_to_loss(struct sock *sk) +{ + struct dctcp *ca = inet_csk_ca(sk); + struct tcp_sock *tp = tcp_sk(sk); + + ca->loss_cwnd = tp->snd_cwnd; + tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U); +} + +SEC("struct_ops/dctcp_state") +void BPF_PROG(dctcp_state, struct sock *sk, __u8 new_state) +{ + if (new_state == TCP_CA_Recovery && + new_state != BPF_CORE_READ_BITFIELD(inet_csk(sk), icsk_ca_state)) + dctcp_react_to_loss(sk); + /* We handle RTO in dctcp_cwnd_event to ensure that we perform only + * one loss-adjustment per RTT. + */ +} + +static __always_inline void dctcp_ece_ack_cwr(struct sock *sk, __u32 ce_state) +{ + struct tcp_sock *tp = tcp_sk(sk); + + if (ce_state == 1) + tp->ecn_flags |= TCP_ECN_DEMAND_CWR; + else + tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; +} + +/* Minimal DCTP CE state machine: + * + * S: 0 <- last pkt was non-CE + * 1 <- last pkt was CE + */ +static __always_inline +void dctcp_ece_ack_update(struct sock *sk, enum tcp_ca_event evt, + __u32 *prior_rcv_nxt, __u32 *ce_state) +{ + __u32 new_ce_state = (evt == CA_EVENT_ECN_IS_CE) ? 1 : 0; + + if (*ce_state != new_ce_state) { + /* CE state has changed, force an immediate ACK to + * reflect the new CE state. If an ACK was delayed, + * send that first to reflect the prior CE state. + */ + if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) { + dctcp_ece_ack_cwr(sk, *ce_state); + bpf_tcp_send_ack(sk, *prior_rcv_nxt); + } + inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW; + } + *prior_rcv_nxt = tcp_sk(sk)->rcv_nxt; + *ce_state = new_ce_state; + dctcp_ece_ack_cwr(sk, new_ce_state); +} + +SEC("struct_ops/dctcp_cwnd_event") +void BPF_PROG(dctcp_cwnd_event, struct sock *sk, enum tcp_ca_event ev) +{ + struct dctcp *ca = inet_csk_ca(sk); + + switch (ev) { + case CA_EVENT_ECN_IS_CE: + case CA_EVENT_ECN_NO_CE: + dctcp_ece_ack_update(sk, ev, &ca->prior_rcv_nxt, &ca->ce_state); + break; + case CA_EVENT_LOSS: + dctcp_react_to_loss(sk); + break; + default: + /* Don't care for the rest. */ + break; + } +} + +SEC("struct_ops/dctcp_cwnd_undo") +__u32 BPF_PROG(dctcp_cwnd_undo, struct sock *sk) +{ + const struct dctcp *ca = inet_csk_ca(sk); + + return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd); +} + +SEC("struct_ops/tcp_reno_cong_avoid") +void BPF_PROG(tcp_reno_cong_avoid, struct sock *sk, __u32 ack, __u32 acked) +{ + struct tcp_sock *tp = tcp_sk(sk); + + if (!tcp_is_cwnd_limited(sk)) + return; + + /* In "safe" area, increase. */ + if (tcp_in_slow_start(tp)) { + acked = tcp_slow_start(tp, acked); + if (!acked) + return; + } + /* In dangerous area, increase slowly. */ + tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked); +} + +SEC(".struct_ops") +struct tcp_congestion_ops dctcp_nouse = { + .init = (void *)dctcp_init, + .set_state = (void *)dctcp_state, + .flags = TCP_CONG_NEEDS_ECN, + .name = "bpf_dctcp_nouse", +}; + +SEC(".struct_ops") +struct tcp_congestion_ops dctcp = { + .init = (void *)dctcp_init, + .in_ack_event = (void *)dctcp_update_alpha, + .cwnd_event = (void *)dctcp_cwnd_event, + .ssthresh = (void *)dctcp_ssthresh, + .cong_avoid = (void *)tcp_reno_cong_avoid, + .undo_cwnd = (void *)dctcp_cwnd_undo, + .set_state = (void *)dctcp_state, + .flags = TCP_CONG_NEEDS_ECN, + .name = "bpf_dctcp", +}; diff --git a/tools/testing/selftests/bpf/progs/bpf_flow.c b/tools/testing/selftests/bpf/progs/bpf_flow.c index 040a44206f29..9941f0ba471e 100644 --- a/tools/testing/selftests/bpf/progs/bpf_flow.c +++ b/tools/testing/selftests/bpf/progs/bpf_flow.c @@ -16,8 +16,8 @@ #include <sys/socket.h> #include <linux/if_tunnel.h> #include <linux/mpls.h> -#include "bpf_helpers.h" -#include "bpf_endian.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> int _version SEC("version") = 1; #define PROG(F) SEC(#F) int bpf_func_##F diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___equiv_zero_sz_arr.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___equiv_zero_sz_arr.c new file mode 100644 index 000000000000..65eac371b061 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___equiv_zero_sz_arr.c @@ -0,0 +1,3 @@ +#include "core_reloc_types.h" + +void f(struct core_reloc_arrays___equiv_zero_sz_arr x) {} diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_bad_zero_sz_arr.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_bad_zero_sz_arr.c new file mode 100644 index 000000000000..ecda2b545ac2 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_bad_zero_sz_arr.c @@ -0,0 +1,3 @@ +#include "core_reloc_types.h" + +void f(struct core_reloc_arrays___err_bad_zero_sz_arr x) {} diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___fixed_arr.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___fixed_arr.c new file mode 100644 index 000000000000..fe1d01232c22 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___fixed_arr.c @@ -0,0 +1,3 @@ +#include "core_reloc_types.h" + +void f(struct core_reloc_arrays___fixed_arr x) {} diff --git a/tools/testing/selftests/bpf/progs/connect4_prog.c b/tools/testing/selftests/bpf/progs/connect4_prog.c index 1fd244d35ba9..75085119c5bb 100644 --- a/tools/testing/selftests/bpf/progs/connect4_prog.c +++ b/tools/testing/selftests/bpf/progs/connect4_prog.c @@ -9,8 +9,8 @@ #include <linux/in6.h> #include <sys/socket.h> -#include "bpf_helpers.h" -#include "bpf_endian.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> #define SRC_REWRITE_IP4 0x7f000004U #define DST_REWRITE_IP4 0x7f000001U diff --git a/tools/testing/selftests/bpf/progs/connect6_prog.c b/tools/testing/selftests/bpf/progs/connect6_prog.c index 26397ab7b3c7..506d0f81a375 100644 --- a/tools/testing/selftests/bpf/progs/connect6_prog.c +++ b/tools/testing/selftests/bpf/progs/connect6_prog.c @@ -9,8 +9,8 @@ #include <linux/in6.h> #include <sys/socket.h> -#include "bpf_helpers.h" -#include "bpf_endian.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> #define SRC_REWRITE_IP6_0 0 #define SRC_REWRITE_IP6_1 0 diff --git a/tools/testing/selftests/bpf/progs/core_reloc_types.h b/tools/testing/selftests/bpf/progs/core_reloc_types.h index 9311489e14b2..6d598cfbdb3e 100644 --- a/tools/testing/selftests/bpf/progs/core_reloc_types.h +++ b/tools/testing/selftests/bpf/progs/core_reloc_types.h @@ -327,6 +327,7 @@ struct core_reloc_arrays_output { char b123; int c1c; int d00d; + int f10c; }; struct core_reloc_arrays_substruct { @@ -339,6 +340,7 @@ struct core_reloc_arrays { char b[2][3][4]; struct core_reloc_arrays_substruct c[3]; struct core_reloc_arrays_substruct d[1][2]; + struct core_reloc_arrays_substruct f[][2]; }; /* bigger array dimensions */ @@ -347,6 +349,7 @@ struct core_reloc_arrays___diff_arr_dim { char b[3][4][5]; struct core_reloc_arrays_substruct c[4]; struct core_reloc_arrays_substruct d[2][3]; + struct core_reloc_arrays_substruct f[1][3]; }; /* different size of array's value (struct) */ @@ -363,6 +366,29 @@ struct core_reloc_arrays___diff_arr_val_sz { int d; int __padding2; } d[1][2]; + struct { + int __padding1; + int c; + int __padding2; + } f[][2]; +}; + +struct core_reloc_arrays___equiv_zero_sz_arr { + int a[5]; + char b[2][3][4]; + struct core_reloc_arrays_substruct c[3]; + struct core_reloc_arrays_substruct d[1][2]; + /* equivalent to flexible array */ + struct core_reloc_arrays_substruct f[0][2]; +}; + +struct core_reloc_arrays___fixed_arr { + int a[5]; + char b[2][3][4]; + struct core_reloc_arrays_substruct c[3]; + struct core_reloc_arrays_substruct d[1][2]; + /* not a flexible array anymore, but within access bounds */ + struct core_reloc_arrays_substruct f[1][2]; }; struct core_reloc_arrays___err_too_small { @@ -370,6 +396,7 @@ struct core_reloc_arrays___err_too_small { char b[2][3][4]; struct core_reloc_arrays_substruct c[3]; struct core_reloc_arrays_substruct d[1][2]; + struct core_reloc_arrays_substruct f[][2]; }; struct core_reloc_arrays___err_too_shallow { @@ -377,6 +404,7 @@ struct core_reloc_arrays___err_too_shallow { char b[2][3]; /* this one lacks one dimension */ struct core_reloc_arrays_substruct c[3]; struct core_reloc_arrays_substruct d[1][2]; + struct core_reloc_arrays_substruct f[][2]; }; struct core_reloc_arrays___err_non_array { @@ -384,6 +412,7 @@ struct core_reloc_arrays___err_non_array { char b[2][3][4]; struct core_reloc_arrays_substruct c[3]; struct core_reloc_arrays_substruct d[1][2]; + struct core_reloc_arrays_substruct f[][2]; }; struct core_reloc_arrays___err_wrong_val_type { @@ -391,6 +420,16 @@ struct core_reloc_arrays___err_wrong_val_type { char b[2][3][4]; int c[3]; /* value is not a struct */ struct core_reloc_arrays_substruct d[1][2]; + struct core_reloc_arrays_substruct f[][2]; +}; + +struct core_reloc_arrays___err_bad_zero_sz_arr { + /* zero-sized array, but not at the end */ + struct core_reloc_arrays_substruct f[0][2]; + int a[5]; + char b[2][3][4]; + struct core_reloc_arrays_substruct c[3]; + struct core_reloc_arrays_substruct d[1][2]; }; /* diff --git a/tools/testing/selftests/bpf/progs/dev_cgroup.c b/tools/testing/selftests/bpf/progs/dev_cgroup.c index ce41a3475f27..8924e06bdef0 100644 --- a/tools/testing/selftests/bpf/progs/dev_cgroup.c +++ b/tools/testing/selftests/bpf/progs/dev_cgroup.c @@ -7,7 +7,7 @@ #include <linux/bpf.h> #include <linux/version.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> SEC("cgroup/dev") int bpf_prog1(struct bpf_cgroup_dev_ctx *ctx) diff --git a/tools/testing/selftests/bpf/progs/fentry_test.c b/tools/testing/selftests/bpf/progs/fentry_test.c index 615f7c6bca77..38d3a82144ca 100644 --- a/tools/testing/selftests/bpf/progs/fentry_test.c +++ b/tools/testing/selftests/bpf/progs/fentry_test.c @@ -1,43 +1,46 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2019 Facebook */ #include <linux/bpf.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> #include "bpf_trace_helpers.h" char _license[] SEC("license") = "GPL"; __u64 test1_result = 0; -BPF_TRACE_1("fentry/bpf_fentry_test1", test1, int, a) +SEC("fentry/bpf_fentry_test1") +int BPF_PROG(test1, int a) { test1_result = a == 1; return 0; } __u64 test2_result = 0; -BPF_TRACE_2("fentry/bpf_fentry_test2", test2, int, a, __u64, b) +SEC("fentry/bpf_fentry_test2") +int BPF_PROG(test2, int a, __u64 b) { test2_result = a == 2 && b == 3; return 0; } __u64 test3_result = 0; -BPF_TRACE_3("fentry/bpf_fentry_test3", test3, char, a, int, b, __u64, c) +SEC("fentry/bpf_fentry_test3") +int BPF_PROG(test3, char a, int b, __u64 c) { test3_result = a == 4 && b == 5 && c == 6; return 0; } __u64 test4_result = 0; -BPF_TRACE_4("fentry/bpf_fentry_test4", test4, - void *, a, char, b, int, c, __u64, d) +SEC("fentry/bpf_fentry_test4") +int BPF_PROG(test4, void *a, char b, int c, __u64 d) { test4_result = a == (void *)7 && b == 8 && c == 9 && d == 10; return 0; } __u64 test5_result = 0; -BPF_TRACE_5("fentry/bpf_fentry_test5", test5, - __u64, a, void *, b, short, c, int, d, __u64, e) +SEC("fentry/bpf_fentry_test5") +int BPF_PROG(test5, __u64 a, void *b, short c, int d, __u64 e) { test5_result = a == 11 && b == (void *)12 && c == 13 && d == 14 && e == 15; @@ -45,8 +48,8 @@ BPF_TRACE_5("fentry/bpf_fentry_test5", test5, } __u64 test6_result = 0; -BPF_TRACE_6("fentry/bpf_fentry_test6", test6, - __u64, a, void *, b, short, c, int, d, void *, e, __u64, f) +SEC("fentry/bpf_fentry_test6") +int BPF_PROG(test6, __u64 a, void *b, short c, int d, void * e, __u64 f) { test6_result = a == 16 && b == (void *)17 && c == 18 && d == 19 && e == (void *)20 && f == 21; diff --git a/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c index 2d211ee98a1c..c329fccf9842 100644 --- a/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c +++ b/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c @@ -1,7 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2019 Facebook */ +#include <linux/stddef.h> +#include <linux/ipv6.h> #include <linux/bpf.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> #include "bpf_trace_helpers.h" struct sk_buff { @@ -9,8 +12,8 @@ struct sk_buff { }; __u64 test_result = 0; -BPF_TRACE_2("fexit/test_pkt_access", test_main, - struct sk_buff *, skb, int, ret) +SEC("fexit/test_pkt_access") +int BPF_PROG(test_main, struct sk_buff *skb, int ret) { int len; @@ -24,8 +27,8 @@ BPF_TRACE_2("fexit/test_pkt_access", test_main, } __u64 test_result_subprog1 = 0; -BPF_TRACE_2("fexit/test_pkt_access_subprog1", test_subprog1, - struct sk_buff *, skb, int, ret) +SEC("fexit/test_pkt_access_subprog1") +int BPF_PROG(test_subprog1, struct sk_buff *skb, int ret) { int len; @@ -79,4 +82,73 @@ int test_subprog2(struct args_subprog2 *ctx) test_result_subprog2 = 1; return 0; } + +__u64 test_result_subprog3 = 0; +SEC("fexit/test_pkt_access_subprog3") +int BPF_PROG(test_subprog3, int val, struct sk_buff *skb, int ret) +{ + int len; + + __builtin_preserve_access_index(({ + len = skb->len; + })); + if (len != 74 || ret != 74 * val || val != 3) + return 0; + test_result_subprog3 = 1; + return 0; +} + +__u64 test_get_skb_len = 0; +SEC("freplace/get_skb_len") +int new_get_skb_len(struct __sk_buff *skb) +{ + int len = skb->len; + + if (len != 74) + return 0; + test_get_skb_len = 1; + return 74; /* original get_skb_len() returns skb->len */ +} + +__u64 test_get_skb_ifindex = 0; +SEC("freplace/get_skb_ifindex") +int new_get_skb_ifindex(int val, struct __sk_buff *skb, int var) +{ + void *data_end = (void *)(long)skb->data_end; + void *data = (void *)(long)skb->data; + struct ipv6hdr ip6, *ip6p; + int ifindex = skb->ifindex; + __u32 eth_proto; + __u32 nh_off; + + /* check that BPF extension can read packet via direct packet access */ + if (data + 14 + sizeof(ip6) > data_end) + return 0; + ip6p = data + 14; + + if (ip6p->nexthdr != 6 || ip6p->payload_len != __bpf_constant_htons(123)) + return 0; + + /* check that legacy packet access helper works too */ + if (bpf_skb_load_bytes(skb, 14, &ip6, sizeof(ip6)) < 0) + return 0; + ip6p = &ip6; + if (ip6p->nexthdr != 6 || ip6p->payload_len != __bpf_constant_htons(123)) + return 0; + + if (ifindex != 1 || val != 3 || var != 1) + return 0; + test_get_skb_ifindex = 1; + return 3; /* original get_skb_ifindex() returns val * ifindex * var */ +} + +volatile __u64 test_get_constant = 0; +SEC("freplace/get_constant") +int new_get_constant(long val) +{ + if (val != 123) + return 0; + test_get_constant = 1; + return test_get_constant; /* original get_constant() returns val - 122 */ +} char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/fexit_bpf2bpf_simple.c b/tools/testing/selftests/bpf/progs/fexit_bpf2bpf_simple.c index ebc0ab7f0f5c..92f3fa47cf40 100644 --- a/tools/testing/selftests/bpf/progs/fexit_bpf2bpf_simple.c +++ b/tools/testing/selftests/bpf/progs/fexit_bpf2bpf_simple.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2019 Facebook */ #include <linux/bpf.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> #include "bpf_trace_helpers.h" struct sk_buff { @@ -9,8 +9,9 @@ struct sk_buff { }; __u64 test_result = 0; -BPF_TRACE_2("fexit/test_pkt_md_access", test_main2, - struct sk_buff *, skb, int, ret) + +SEC("fexit/test_pkt_md_access") +int BPF_PROG(test_main2, struct sk_buff *skb, int ret) { int len; diff --git a/tools/testing/selftests/bpf/progs/fexit_test.c b/tools/testing/selftests/bpf/progs/fexit_test.c index 86db0d60fb6e..348109b9ea07 100644 --- a/tools/testing/selftests/bpf/progs/fexit_test.c +++ b/tools/testing/selftests/bpf/progs/fexit_test.c @@ -1,45 +1,47 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2019 Facebook */ #include <linux/bpf.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> #include "bpf_trace_helpers.h" char _license[] SEC("license") = "GPL"; __u64 test1_result = 0; -BPF_TRACE_2("fexit/bpf_fentry_test1", test1, int, a, int, ret) +SEC("fexit/bpf_fentry_test1") +int BPF_PROG(test1, int a, int ret) { test1_result = a == 1 && ret == 2; return 0; } __u64 test2_result = 0; -BPF_TRACE_3("fexit/bpf_fentry_test2", test2, int, a, __u64, b, int, ret) +SEC("fexit/bpf_fentry_test2") +int BPF_PROG(test2, int a, __u64 b, int ret) { test2_result = a == 2 && b == 3 && ret == 5; return 0; } __u64 test3_result = 0; -BPF_TRACE_4("fexit/bpf_fentry_test3", test3, char, a, int, b, __u64, c, int, ret) +SEC("fexit/bpf_fentry_test3") +int BPF_PROG(test3, char a, int b, __u64 c, int ret) { test3_result = a == 4 && b == 5 && c == 6 && ret == 15; return 0; } __u64 test4_result = 0; -BPF_TRACE_5("fexit/bpf_fentry_test4", test4, - void *, a, char, b, int, c, __u64, d, int, ret) +SEC("fexit/bpf_fentry_test4") +int BPF_PROG(test4, void *a, char b, int c, __u64 d, int ret) { - test4_result = a == (void *)7 && b == 8 && c == 9 && d == 10 && ret == 34; return 0; } __u64 test5_result = 0; -BPF_TRACE_6("fexit/bpf_fentry_test5", test5, - __u64, a, void *, b, short, c, int, d, __u64, e, int, ret) +SEC("fexit/bpf_fentry_test5") +int BPF_PROG(test5, __u64 a, void *b, short c, int d, __u64 e, int ret) { test5_result = a == 11 && b == (void *)12 && c == 13 && d == 14 && e == 15 && ret == 65; @@ -47,9 +49,8 @@ BPF_TRACE_6("fexit/bpf_fentry_test5", test5, } __u64 test6_result = 0; -BPF_TRACE_7("fexit/bpf_fentry_test6", test6, - __u64, a, void *, b, short, c, int, d, void *, e, __u64, f, - int, ret) +SEC("fexit/bpf_fentry_test6") +int BPF_PROG(test6, __u64 a, void *b, short c, int d, void *e, __u64 f, int ret) { test6_result = a == 16 && b == (void *)17 && c == 18 && d == 19 && e == (void *)20 && f == 21 && ret == 111; diff --git a/tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c b/tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c index 16c54ade6888..6b42db2fe391 100644 --- a/tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c +++ b/tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c @@ -2,7 +2,7 @@ // Copyright (c) 2018 Facebook #include <linux/bpf.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> struct { __uint(type, BPF_MAP_TYPE_ARRAY); diff --git a/tools/testing/selftests/bpf/progs/kfree_skb.c b/tools/testing/selftests/bpf/progs/kfree_skb.c index 974d6f3bb319..8f48a909f079 100644 --- a/tools/testing/selftests/bpf/progs/kfree_skb.c +++ b/tools/testing/selftests/bpf/progs/kfree_skb.c @@ -2,8 +2,8 @@ // Copyright (c) 2019 Facebook #include <linux/bpf.h> #include <stdbool.h> -#include "bpf_helpers.h" -#include "bpf_endian.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> #include "bpf_trace_helpers.h" char _license[] SEC("license") = "GPL"; @@ -57,8 +57,8 @@ struct meta { /* TRACE_EVENT(kfree_skb, * TP_PROTO(struct sk_buff *skb, void *location), */ -BPF_TRACE_2("tp_btf/kfree_skb", trace_kfree_skb, - struct sk_buff *, skb, void *, location) +SEC("tp_btf/kfree_skb") +int BPF_PROG(trace_kfree_skb, struct sk_buff *skb, void *location) { struct net_device *dev; struct callback_head *ptr; @@ -114,9 +114,9 @@ static volatile struct { bool fexit_test_ok; } result; -BPF_TRACE_3("fentry/eth_type_trans", fentry_eth_type_trans, - struct sk_buff *, skb, struct net_device *, dev, - unsigned short, protocol) +SEC("fentry/eth_type_trans") +int BPF_PROG(fentry_eth_type_trans, struct sk_buff *skb, struct net_device *dev, + unsigned short protocol) { int len, ifindex; @@ -132,9 +132,9 @@ BPF_TRACE_3("fentry/eth_type_trans", fentry_eth_type_trans, return 0; } -BPF_TRACE_3("fexit/eth_type_trans", fexit_eth_type_trans, - struct sk_buff *, skb, struct net_device *, dev, - unsigned short, protocol) +SEC("fexit/eth_type_trans") +int BPF_PROG(fexit_eth_type_trans, struct sk_buff *skb, struct net_device *dev, + unsigned short protocol) { int len, ifindex; diff --git a/tools/testing/selftests/bpf/progs/loop1.c b/tools/testing/selftests/bpf/progs/loop1.c index 40ac722a9da5..50e66772c046 100644 --- a/tools/testing/selftests/bpf/progs/loop1.c +++ b/tools/testing/selftests/bpf/progs/loop1.c @@ -6,8 +6,8 @@ #include <stddef.h> #include <stdbool.h> #include <linux/bpf.h> -#include "bpf_helpers.h" -#include "bpf_tracing.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/loop2.c b/tools/testing/selftests/bpf/progs/loop2.c index bb80f29aa7f7..947bb7e988c2 100644 --- a/tools/testing/selftests/bpf/progs/loop2.c +++ b/tools/testing/selftests/bpf/progs/loop2.c @@ -6,8 +6,8 @@ #include <stddef.h> #include <stdbool.h> #include <linux/bpf.h> -#include "bpf_helpers.h" -#include "bpf_tracing.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/loop3.c b/tools/testing/selftests/bpf/progs/loop3.c index 2b9165a7afe1..76e93b31c14b 100644 --- a/tools/testing/selftests/bpf/progs/loop3.c +++ b/tools/testing/selftests/bpf/progs/loop3.c @@ -6,8 +6,8 @@ #include <stddef.h> #include <stdbool.h> #include <linux/bpf.h> -#include "bpf_helpers.h" -#include "bpf_tracing.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/loop4.c b/tools/testing/selftests/bpf/progs/loop4.c index 650859022771..b35337926d66 100644 --- a/tools/testing/selftests/bpf/progs/loop4.c +++ b/tools/testing/selftests/bpf/progs/loop4.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2019 Facebook #include <linux/bpf.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/loop5.c b/tools/testing/selftests/bpf/progs/loop5.c index 28d1d668f07c..913791923fa3 100644 --- a/tools/testing/selftests/bpf/progs/loop5.c +++ b/tools/testing/selftests/bpf/progs/loop5.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2019 Facebook #include <linux/bpf.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> #define barrier() __asm__ __volatile__("": : :"memory") char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/netcnt_prog.c b/tools/testing/selftests/bpf/progs/netcnt_prog.c index 38a997852cad..d071adf178bd 100644 --- a/tools/testing/selftests/bpf/progs/netcnt_prog.c +++ b/tools/testing/selftests/bpf/progs/netcnt_prog.c @@ -2,7 +2,7 @@ #include <linux/bpf.h> #include <linux/version.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> #include "netcnt_common.h" #define MAX_BPS (3 * 1024 * 1024) diff --git a/tools/testing/selftests/bpf/progs/pyperf.h b/tools/testing/selftests/bpf/progs/pyperf.h index 71d383cc9b85..cc615b82b56e 100644 --- a/tools/testing/selftests/bpf/progs/pyperf.h +++ b/tools/testing/selftests/bpf/progs/pyperf.h @@ -6,7 +6,7 @@ #include <stddef.h> #include <stdbool.h> #include <linux/bpf.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> #define FUNCTION_NAME_LEN 64 #define FILE_NAME_LEN 128 @@ -154,7 +154,12 @@ struct { __uint(value_size, sizeof(long long) * 127); } stackmap SEC(".maps"); -static __always_inline int __on_event(struct pt_regs *ctx) +#ifdef GLOBAL_FUNC +__attribute__((noinline)) +#else +static __always_inline +#endif +int __on_event(struct bpf_raw_tracepoint_args *ctx) { uint64_t pid_tgid = bpf_get_current_pid_tgid(); pid_t pid = (pid_t)(pid_tgid >> 32); @@ -254,7 +259,7 @@ static __always_inline int __on_event(struct pt_regs *ctx) } SEC("raw_tracepoint/kfree_skb") -int on_event(struct pt_regs* ctx) +int on_event(struct bpf_raw_tracepoint_args* ctx) { int i, ret = 0; ret |= __on_event(ctx); diff --git a/tools/testing/selftests/bpf/progs/pyperf_global.c b/tools/testing/selftests/bpf/progs/pyperf_global.c new file mode 100644 index 000000000000..079e78a7562b --- /dev/null +++ b/tools/testing/selftests/bpf/progs/pyperf_global.c @@ -0,0 +1,5 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#define STACK_MAX_LEN 50 +#define GLOBAL_FUNC +#include "pyperf.h" diff --git a/tools/testing/selftests/bpf/progs/sample_map_ret0.c b/tools/testing/selftests/bpf/progs/sample_map_ret0.c index 0756303676ac..1612a32007b6 100644 --- a/tools/testing/selftests/bpf/progs/sample_map_ret0.c +++ b/tools/testing/selftests/bpf/progs/sample_map_ret0.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */ #include <linux/bpf.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> struct bpf_map_def SEC("maps") htab = { .type = BPF_MAP_TYPE_HASH, diff --git a/tools/testing/selftests/bpf/progs/sendmsg4_prog.c b/tools/testing/selftests/bpf/progs/sendmsg4_prog.c index a91536b1c47e..092d9da536f3 100644 --- a/tools/testing/selftests/bpf/progs/sendmsg4_prog.c +++ b/tools/testing/selftests/bpf/progs/sendmsg4_prog.c @@ -5,8 +5,8 @@ #include <linux/bpf.h> #include <sys/socket.h> -#include "bpf_helpers.h" -#include "bpf_endian.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> #define SRC1_IP4 0xAC100001U /* 172.16.0.1 */ #define SRC2_IP4 0x00000000U diff --git a/tools/testing/selftests/bpf/progs/sendmsg6_prog.c b/tools/testing/selftests/bpf/progs/sendmsg6_prog.c index a68062820410..255a432bc163 100644 --- a/tools/testing/selftests/bpf/progs/sendmsg6_prog.c +++ b/tools/testing/selftests/bpf/progs/sendmsg6_prog.c @@ -5,8 +5,8 @@ #include <linux/bpf.h> #include <sys/socket.h> -#include "bpf_helpers.h" -#include "bpf_endian.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> #define SRC_REWRITE_IP6_0 0 #define SRC_REWRITE_IP6_1 0 diff --git a/tools/testing/selftests/bpf/progs/socket_cookie_prog.c b/tools/testing/selftests/bpf/progs/socket_cookie_prog.c index e4440fdd94cb..0cb5656a22b0 100644 --- a/tools/testing/selftests/bpf/progs/socket_cookie_prog.c +++ b/tools/testing/selftests/bpf/progs/socket_cookie_prog.c @@ -4,8 +4,8 @@ #include <linux/bpf.h> #include <sys/socket.h> -#include "bpf_helpers.h" -#include "bpf_endian.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> struct socket_cookie { __u64 cookie_key; diff --git a/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c b/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c index 9390e0244259..a5c6d5903b22 100644 --- a/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c +++ b/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c @@ -1,6 +1,6 @@ #include <linux/bpf.h> -#include "bpf_helpers.h" -#include "bpf_endian.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> int _version SEC("version") = 1; diff --git a/tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c b/tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c index e80484d98a1a..fdb4bf4408fa 100644 --- a/tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c +++ b/tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c @@ -1,7 +1,7 @@ #include <linux/bpf.h> -#include "bpf_helpers.h" -#include "bpf_endian.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> int _version SEC("version") = 1; diff --git a/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c b/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c index 433e23918a62..4797dc985064 100644 --- a/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c +++ b/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c @@ -1,6 +1,6 @@ #include <linux/bpf.h> -#include "bpf_helpers.h" -#include "bpf_endian.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> int _version SEC("version") = 1; diff --git a/tools/testing/selftests/bpf/progs/sockopt_inherit.c b/tools/testing/selftests/bpf/progs/sockopt_inherit.c index dede0fcd6102..c6d428a8d785 100644 --- a/tools/testing/selftests/bpf/progs/sockopt_inherit.c +++ b/tools/testing/selftests/bpf/progs/sockopt_inherit.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/bpf.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> char _license[] SEC("license") = "GPL"; __u32 _version SEC("version") = 1; diff --git a/tools/testing/selftests/bpf/progs/sockopt_multi.c b/tools/testing/selftests/bpf/progs/sockopt_multi.c index 4afd2595c08e..9d8c212dde9f 100644 --- a/tools/testing/selftests/bpf/progs/sockopt_multi.c +++ b/tools/testing/selftests/bpf/progs/sockopt_multi.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <netinet/in.h> #include <linux/bpf.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> char _license[] SEC("license") = "GPL"; __u32 _version SEC("version") = 1; diff --git a/tools/testing/selftests/bpf/progs/sockopt_sk.c b/tools/testing/selftests/bpf/progs/sockopt_sk.c index 1bafbb944e37..d5a5eeb5fb52 100644 --- a/tools/testing/selftests/bpf/progs/sockopt_sk.c +++ b/tools/testing/selftests/bpf/progs/sockopt_sk.c @@ -3,7 +3,7 @@ #include <netinet/in.h> #include <netinet/tcp.h> #include <linux/bpf.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> char _license[] SEC("license") = "GPL"; __u32 _version SEC("version") = 1; diff --git a/tools/testing/selftests/bpf/progs/strobemeta.h b/tools/testing/selftests/bpf/progs/strobemeta.h index 4bf16e0a1b0e..ad61b722a9de 100644 --- a/tools/testing/selftests/bpf/progs/strobemeta.h +++ b/tools/testing/selftests/bpf/progs/strobemeta.h @@ -8,7 +8,7 @@ #include <linux/ptrace.h> #include <linux/sched.h> #include <linux/types.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> typedef uint32_t pid_t; struct task_struct {}; diff --git a/tools/testing/selftests/bpf/progs/tailcall1.c b/tools/testing/selftests/bpf/progs/tailcall1.c index 63531e1a9fa4..1f407e65ae52 100644 --- a/tools/testing/selftests/bpf/progs/tailcall1.c +++ b/tools/testing/selftests/bpf/progs/tailcall1.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/bpf.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> struct { __uint(type, BPF_MAP_TYPE_PROG_ARRAY); diff --git a/tools/testing/selftests/bpf/progs/tailcall2.c b/tools/testing/selftests/bpf/progs/tailcall2.c index 21c85c477210..a093e739cf0e 100644 --- a/tools/testing/selftests/bpf/progs/tailcall2.c +++ b/tools/testing/selftests/bpf/progs/tailcall2.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/bpf.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> struct { __uint(type, BPF_MAP_TYPE_PROG_ARRAY); diff --git a/tools/testing/selftests/bpf/progs/tailcall3.c b/tools/testing/selftests/bpf/progs/tailcall3.c index 1ecae198b8c1..cabda877cf0a 100644 --- a/tools/testing/selftests/bpf/progs/tailcall3.c +++ b/tools/testing/selftests/bpf/progs/tailcall3.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/bpf.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> struct { __uint(type, BPF_MAP_TYPE_PROG_ARRAY); diff --git a/tools/testing/selftests/bpf/progs/tailcall4.c b/tools/testing/selftests/bpf/progs/tailcall4.c index 499388758119..f82075b47d7d 100644 --- a/tools/testing/selftests/bpf/progs/tailcall4.c +++ b/tools/testing/selftests/bpf/progs/tailcall4.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/bpf.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> struct { __uint(type, BPF_MAP_TYPE_PROG_ARRAY); diff --git a/tools/testing/selftests/bpf/progs/tailcall5.c b/tools/testing/selftests/bpf/progs/tailcall5.c index 49c64eb53f19..ce5450744fd4 100644 --- a/tools/testing/selftests/bpf/progs/tailcall5.c +++ b/tools/testing/selftests/bpf/progs/tailcall5.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/bpf.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> struct { __uint(type, BPF_MAP_TYPE_PROG_ARRAY); diff --git a/tools/testing/selftests/bpf/progs/tcp_rtt.c b/tools/testing/selftests/bpf/progs/tcp_rtt.c index 2cf813a06b83..0cb3204ddb18 100644 --- a/tools/testing/selftests/bpf/progs/tcp_rtt.c +++ b/tools/testing/selftests/bpf/progs/tcp_rtt.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/bpf.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> char _license[] SEC("license") = "GPL"; __u32 _version SEC("version") = 1; diff --git a/tools/testing/selftests/bpf/progs/test_adjust_tail.c b/tools/testing/selftests/bpf/progs/test_adjust_tail.c index 4cd5e860c903..b7fc85769bdc 100644 --- a/tools/testing/selftests/bpf/progs/test_adjust_tail.c +++ b/tools/testing/selftests/bpf/progs/test_adjust_tail.c @@ -7,7 +7,7 @@ */ #include <linux/bpf.h> #include <linux/if_ether.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> int _version SEC("version") = 1; diff --git a/tools/testing/selftests/bpf/progs/test_attach_probe.c b/tools/testing/selftests/bpf/progs/test_attach_probe.c index 534621e38906..dd8fae6660ab 100644 --- a/tools/testing/selftests/bpf/progs/test_attach_probe.c +++ b/tools/testing/selftests/bpf/progs/test_attach_probe.c @@ -3,48 +3,38 @@ #include <linux/ptrace.h> #include <linux/bpf.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> -struct { - __uint(type, BPF_MAP_TYPE_ARRAY); - __uint(max_entries, 4); - __type(key, int); - __type(value, int); -} results_map SEC(".maps"); +int kprobe_res = 0; +int kretprobe_res = 0; +int uprobe_res = 0; +int uretprobe_res = 0; SEC("kprobe/sys_nanosleep") -int handle_sys_nanosleep_entry(struct pt_regs *ctx) +int handle_kprobe(struct pt_regs *ctx) { - const int key = 0, value = 1; - - bpf_map_update_elem(&results_map, &key, &value, 0); + kprobe_res = 1; return 0; } SEC("kretprobe/sys_nanosleep") -int handle_sys_getpid_return(struct pt_regs *ctx) +int handle_kretprobe(struct pt_regs *ctx) { - const int key = 1, value = 2; - - bpf_map_update_elem(&results_map, &key, &value, 0); + kretprobe_res = 2; return 0; } SEC("uprobe/trigger_func") -int handle_uprobe_entry(struct pt_regs *ctx) +int handle_uprobe(struct pt_regs *ctx) { - const int key = 2, value = 3; - - bpf_map_update_elem(&results_map, &key, &value, 0); + uprobe_res = 3; return 0; } SEC("uretprobe/trigger_func") -int handle_uprobe_return(struct pt_regs *ctx) +int handle_uretprobe(struct pt_regs *ctx) { - const int key = 3, value = 4; - - bpf_map_update_elem(&results_map, &key, &value, 0); + uretprobe_res = 4; return 0; } diff --git a/tools/testing/selftests/bpf/progs/test_btf_haskv.c b/tools/testing/selftests/bpf/progs/test_btf_haskv.c index 62ad7e22105e..88b0566da13d 100644 --- a/tools/testing/selftests/bpf/progs/test_btf_haskv.c +++ b/tools/testing/selftests/bpf/progs/test_btf_haskv.c @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2018 Facebook */ #include <linux/bpf.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> #include "bpf_legacy.h" int _version SEC("version") = 1; diff --git a/tools/testing/selftests/bpf/progs/test_btf_newkv.c b/tools/testing/selftests/bpf/progs/test_btf_newkv.c index fb8d91a1dbe0..a924e53c8e9d 100644 --- a/tools/testing/selftests/bpf/progs/test_btf_newkv.c +++ b/tools/testing/selftests/bpf/progs/test_btf_newkv.c @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2018 Facebook */ #include <linux/bpf.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> #include "bpf_legacy.h" int _version SEC("version") = 1; diff --git a/tools/testing/selftests/bpf/progs/test_btf_nokv.c b/tools/testing/selftests/bpf/progs/test_btf_nokv.c index 3f4422044759..983aedd1c072 100644 --- a/tools/testing/selftests/bpf/progs/test_btf_nokv.c +++ b/tools/testing/selftests/bpf/progs/test_btf_nokv.c @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2018 Facebook */ #include <linux/bpf.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> int _version SEC("version") = 1; diff --git a/tools/testing/selftests/bpf/progs/test_core_extern.c b/tools/testing/selftests/bpf/progs/test_core_extern.c new file mode 100644 index 000000000000..3ac3603ad53d --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_core_extern.c @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ + +#include <stdint.h> +#include <stdbool.h> +#include <linux/ptrace.h> +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> + +/* non-existing BPF helper, to test dead code elimination */ +static int (*bpf_missing_helper)(const void *arg1, int arg2) = (void *) 999; + +extern int LINUX_KERNEL_VERSION __kconfig; +extern bool CONFIG_BPF_SYSCALL __kconfig; /* strong */ +extern enum libbpf_tristate CONFIG_TRISTATE __kconfig __weak; +extern bool CONFIG_BOOL __kconfig __weak; +extern char CONFIG_CHAR __kconfig __weak; +extern uint16_t CONFIG_USHORT __kconfig __weak; +extern int CONFIG_INT __kconfig __weak; +extern uint64_t CONFIG_ULONG __kconfig __weak; +extern const char CONFIG_STR[8] __kconfig __weak; +extern uint64_t CONFIG_MISSING __kconfig __weak; + +uint64_t kern_ver = -1; +uint64_t bpf_syscall = -1; +uint64_t tristate_val = -1; +uint64_t bool_val = -1; +uint64_t char_val = -1; +uint64_t ushort_val = -1; +uint64_t int_val = -1; +uint64_t ulong_val = -1; +char str_val[8] = {-1, -1, -1, -1, -1, -1, -1, -1}; +uint64_t missing_val = -1; + +SEC("raw_tp/sys_enter") +int handle_sys_enter(struct pt_regs *ctx) +{ + int i; + + kern_ver = LINUX_KERNEL_VERSION; + bpf_syscall = CONFIG_BPF_SYSCALL; + tristate_val = CONFIG_TRISTATE; + bool_val = CONFIG_BOOL; + char_val = CONFIG_CHAR; + ushort_val = CONFIG_USHORT; + int_val = CONFIG_INT; + ulong_val = CONFIG_ULONG; + + for (i = 0; i < sizeof(CONFIG_STR); i++) { + str_val[i] = CONFIG_STR[i]; + } + + if (CONFIG_MISSING) + /* invalid, but dead code - never executed */ + missing_val = bpf_missing_helper(ctx, 123); + else + missing_val = 0xDEADC0DE; + + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_arrays.c b/tools/testing/selftests/bpf/progs/test_core_reloc_arrays.c index 89951b684282..51b3f79df523 100644 --- a/tools/testing/selftests/bpf/progs/test_core_reloc_arrays.c +++ b/tools/testing/selftests/bpf/progs/test_core_reloc_arrays.c @@ -3,8 +3,8 @@ #include <linux/bpf.h> #include <stdint.h> -#include "bpf_helpers.h" -#include "bpf_core_read.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> char _license[] SEC("license") = "GPL"; @@ -18,6 +18,7 @@ struct core_reloc_arrays_output { char b123; int c1c; int d00d; + int f01c; }; struct core_reloc_arrays_substruct { @@ -30,6 +31,7 @@ struct core_reloc_arrays { char b[2][3][4]; struct core_reloc_arrays_substruct c[3]; struct core_reloc_arrays_substruct d[1][2]; + struct core_reloc_arrays_substruct f[][2]; }; #define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src) @@ -40,18 +42,16 @@ int test_core_arrays(void *ctx) struct core_reloc_arrays *in = (void *)&data.in; struct core_reloc_arrays_output *out = (void *)&data.out; - /* in->a[2] */ if (CORE_READ(&out->a2, &in->a[2])) return 1; - /* in->b[1][2][3] */ if (CORE_READ(&out->b123, &in->b[1][2][3])) return 1; - /* in->c[1].c */ if (CORE_READ(&out->c1c, &in->c[1].c)) return 1; - /* in->d[0][0].d */ if (CORE_READ(&out->d00d, &in->d[0][0].d)) return 1; + if (CORE_READ(&out->f01c, &in->f[0][1].c)) + return 1; return 0; } diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_direct.c b/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_direct.c index edc0f7c9e56d..56aec20212b5 100644 --- a/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_direct.c +++ b/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_direct.c @@ -3,8 +3,8 @@ #include <linux/bpf.h> #include <stdint.h> -#include "bpf_helpers.h" -#include "bpf_core_read.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_probed.c b/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_probed.c index 6c20e433558b..ab1e647aeb31 100644 --- a/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_probed.c +++ b/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_probed.c @@ -3,8 +3,8 @@ #include <linux/bpf.h> #include <stdint.h> -#include "bpf_helpers.h" -#include "bpf_core_read.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_existence.c b/tools/testing/selftests/bpf/progs/test_core_reloc_existence.c index 1b7f0ae49cfb..7e45e2bdf6cd 100644 --- a/tools/testing/selftests/bpf/progs/test_core_reloc_existence.c +++ b/tools/testing/selftests/bpf/progs/test_core_reloc_existence.c @@ -3,8 +3,8 @@ #include <linux/bpf.h> #include <stdint.h> -#include "bpf_helpers.h" -#include "bpf_core_read.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_flavors.c b/tools/testing/selftests/bpf/progs/test_core_reloc_flavors.c index b5dbeef540fd..525acc2f841b 100644 --- a/tools/testing/selftests/bpf/progs/test_core_reloc_flavors.c +++ b/tools/testing/selftests/bpf/progs/test_core_reloc_flavors.c @@ -3,8 +3,8 @@ #include <linux/bpf.h> #include <stdint.h> -#include "bpf_helpers.h" -#include "bpf_core_read.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_ints.c b/tools/testing/selftests/bpf/progs/test_core_reloc_ints.c index c78ab6d28a14..6b5290739806 100644 --- a/tools/testing/selftests/bpf/progs/test_core_reloc_ints.c +++ b/tools/testing/selftests/bpf/progs/test_core_reloc_ints.c @@ -3,8 +3,8 @@ #include <linux/bpf.h> #include <stdint.h> -#include "bpf_helpers.h" -#include "bpf_core_read.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c b/tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c index 270de441b60a..aba928fd60d3 100644 --- a/tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c +++ b/tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c @@ -3,8 +3,8 @@ #include <linux/bpf.h> #include <stdint.h> -#include "bpf_helpers.h" -#include "bpf_core_read.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_misc.c b/tools/testing/selftests/bpf/progs/test_core_reloc_misc.c index 292a5c4ee76a..d5756dbdef82 100644 --- a/tools/testing/selftests/bpf/progs/test_core_reloc_misc.c +++ b/tools/testing/selftests/bpf/progs/test_core_reloc_misc.c @@ -3,8 +3,8 @@ #include <linux/bpf.h> #include <stdint.h> -#include "bpf_helpers.h" -#include "bpf_core_read.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_mods.c b/tools/testing/selftests/bpf/progs/test_core_reloc_mods.c index 0b28bfacc8fd..8b533db4a7a5 100644 --- a/tools/testing/selftests/bpf/progs/test_core_reloc_mods.c +++ b/tools/testing/selftests/bpf/progs/test_core_reloc_mods.c @@ -3,8 +3,8 @@ #include <linux/bpf.h> #include <stdint.h> -#include "bpf_helpers.h" -#include "bpf_core_read.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_nesting.c b/tools/testing/selftests/bpf/progs/test_core_reloc_nesting.c index 39279bf0c9db..2b4b6d49c677 100644 --- a/tools/testing/selftests/bpf/progs/test_core_reloc_nesting.c +++ b/tools/testing/selftests/bpf/progs/test_core_reloc_nesting.c @@ -3,8 +3,8 @@ #include <linux/bpf.h> #include <stdint.h> -#include "bpf_helpers.h" -#include "bpf_core_read.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_primitives.c b/tools/testing/selftests/bpf/progs/test_core_reloc_primitives.c index ea57973cdd19..2a8975678aa6 100644 --- a/tools/testing/selftests/bpf/progs/test_core_reloc_primitives.c +++ b/tools/testing/selftests/bpf/progs/test_core_reloc_primitives.c @@ -3,8 +3,8 @@ #include <linux/bpf.h> #include <stdint.h> -#include "bpf_helpers.h" -#include "bpf_core_read.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_ptr_as_arr.c b/tools/testing/selftests/bpf/progs/test_core_reloc_ptr_as_arr.c index d1eb59d4ea64..ca61a5183b88 100644 --- a/tools/testing/selftests/bpf/progs/test_core_reloc_ptr_as_arr.c +++ b/tools/testing/selftests/bpf/progs/test_core_reloc_ptr_as_arr.c @@ -3,8 +3,8 @@ #include <linux/bpf.h> #include <stdint.h> -#include "bpf_helpers.h" -#include "bpf_core_read.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_size.c b/tools/testing/selftests/bpf/progs/test_core_reloc_size.c index 9e091124d3bd..d7fb6cfc7891 100644 --- a/tools/testing/selftests/bpf/progs/test_core_reloc_size.c +++ b/tools/testing/selftests/bpf/progs/test_core_reloc_size.c @@ -3,8 +3,8 @@ #include <linux/bpf.h> #include <stdint.h> -#include "bpf_helpers.h" -#include "bpf_core_read.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c b/tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c index 6a4a8f57f174..29817a703984 100644 --- a/tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c +++ b/tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/bpf.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> /* Permit pretty deep stack traces */ #define MAX_STACK_RAWTP 100 diff --git a/tools/testing/selftests/bpf/progs/test_global_data.c b/tools/testing/selftests/bpf/progs/test_global_data.c index 32a6073acb99..dd7a4d3dbc0d 100644 --- a/tools/testing/selftests/bpf/progs/test_global_data.c +++ b/tools/testing/selftests/bpf/progs/test_global_data.c @@ -5,7 +5,7 @@ #include <linux/pkt_cls.h> #include <string.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> struct { __uint(type, BPF_MAP_TYPE_ARRAY); diff --git a/tools/testing/selftests/bpf/progs/test_global_func1.c b/tools/testing/selftests/bpf/progs/test_global_func1.c new file mode 100644 index 000000000000..880260f6d536 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_global_func1.c @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2020 Facebook */ +#include <stddef.h> +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> + +#ifndef MAX_STACK +#define MAX_STACK (512 - 3 * 32 + 8) +#endif + +static __attribute__ ((noinline)) +int f0(int var, struct __sk_buff *skb) +{ + return skb->len; +} + +__attribute__ ((noinline)) +int f1(struct __sk_buff *skb) +{ + volatile char buf[MAX_STACK] = {}; + + return f0(0, skb) + skb->len; +} + +int f3(int, struct __sk_buff *skb, int); + +__attribute__ ((noinline)) +int f2(int val, struct __sk_buff *skb) +{ + return f1(skb) + f3(val, skb, 1); +} + +__attribute__ ((noinline)) +int f3(int val, struct __sk_buff *skb, int var) +{ + volatile char buf[MAX_STACK] = {}; + + return skb->ifindex * val * var; +} + +SEC("classifier/test") +int test_cls(struct __sk_buff *skb) +{ + return f0(1, skb) + f1(skb) + f2(2, skb) + f3(3, skb, 4); +} diff --git a/tools/testing/selftests/bpf/progs/test_global_func2.c b/tools/testing/selftests/bpf/progs/test_global_func2.c new file mode 100644 index 000000000000..2c18d82923a2 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_global_func2.c @@ -0,0 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2020 Facebook */ +#define MAX_STACK (512 - 3 * 32) +#include "test_global_func1.c" diff --git a/tools/testing/selftests/bpf/progs/test_global_func3.c b/tools/testing/selftests/bpf/progs/test_global_func3.c new file mode 100644 index 000000000000..86f0ecb304fc --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_global_func3.c @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2020 Facebook */ +#include <stddef.h> +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> + +__attribute__ ((noinline)) +int f1(struct __sk_buff *skb) +{ + return skb->len; +} + +__attribute__ ((noinline)) +int f2(int val, struct __sk_buff *skb) +{ + return f1(skb) + val; +} + +__attribute__ ((noinline)) +int f3(int val, struct __sk_buff *skb, int var) +{ + return f2(var, skb) + val; +} + +__attribute__ ((noinline)) +int f4(struct __sk_buff *skb) +{ + return f3(1, skb, 2); +} + +__attribute__ ((noinline)) +int f5(struct __sk_buff *skb) +{ + return f4(skb); +} + +__attribute__ ((noinline)) +int f6(struct __sk_buff *skb) +{ + return f5(skb); +} + +__attribute__ ((noinline)) +int f7(struct __sk_buff *skb) +{ + return f6(skb); +} + +#ifndef NO_FN8 +__attribute__ ((noinline)) +int f8(struct __sk_buff *skb) +{ + return f7(skb); +} +#endif + +SEC("classifier/test") +int test_cls(struct __sk_buff *skb) +{ +#ifndef NO_FN8 + return f8(skb); +#else + return f7(skb); +#endif +} diff --git a/tools/testing/selftests/bpf/progs/test_global_func4.c b/tools/testing/selftests/bpf/progs/test_global_func4.c new file mode 100644 index 000000000000..610f75edf276 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_global_func4.c @@ -0,0 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2020 Facebook */ +#define NO_FN8 +#include "test_global_func3.c" diff --git a/tools/testing/selftests/bpf/progs/test_global_func5.c b/tools/testing/selftests/bpf/progs/test_global_func5.c new file mode 100644 index 000000000000..260c25b827ef --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_global_func5.c @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2020 Facebook */ +#include <stddef.h> +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> + +__attribute__ ((noinline)) +int f1(struct __sk_buff *skb) +{ + return skb->len; +} + +int f3(int, struct __sk_buff *skb); + +__attribute__ ((noinline)) +int f2(int val, struct __sk_buff *skb) +{ + return f1(skb) + f3(val, (void *)&val); /* type mismatch */ +} + +__attribute__ ((noinline)) +int f3(int val, struct __sk_buff *skb) +{ + return skb->ifindex * val; +} + +SEC("classifier/test") +int test_cls(struct __sk_buff *skb) +{ + return f1(skb) + f2(2, skb) + f3(3, skb); +} diff --git a/tools/testing/selftests/bpf/progs/test_global_func6.c b/tools/testing/selftests/bpf/progs/test_global_func6.c new file mode 100644 index 000000000000..69e19c64e10b --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_global_func6.c @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2020 Facebook */ +#include <stddef.h> +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> + +__attribute__ ((noinline)) +int f1(struct __sk_buff *skb) +{ + return skb->len; +} + +int f3(int, struct __sk_buff *skb); + +__attribute__ ((noinline)) +int f2(int val, struct __sk_buff *skb) +{ + return f1(skb) + f3(val, skb + 1); /* type mismatch */ +} + +__attribute__ ((noinline)) +int f3(int val, struct __sk_buff *skb) +{ + return skb->ifindex * val; +} + +SEC("classifier/test") +int test_cls(struct __sk_buff *skb) +{ + return f1(skb) + f2(2, skb) + f3(3, skb); +} diff --git a/tools/testing/selftests/bpf/progs/test_global_func7.c b/tools/testing/selftests/bpf/progs/test_global_func7.c new file mode 100644 index 000000000000..309b3f6136bd --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_global_func7.c @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2020 Facebook */ +#include <stddef.h> +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> + +__attribute__ ((noinline)) +void foo(struct __sk_buff *skb) +{ + skb->tc_index = 0; +} + +SEC("classifier/test") +int test_cls(struct __sk_buff *skb) +{ + foo(skb); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/test_l4lb.c b/tools/testing/selftests/bpf/progs/test_l4lb.c index 1d652ee8e73d..33493911d87a 100644 --- a/tools/testing/selftests/bpf/progs/test_l4lb.c +++ b/tools/testing/selftests/bpf/progs/test_l4lb.c @@ -17,9 +17,9 @@ #include <linux/icmpv6.h> #include <linux/tcp.h> #include <linux/udp.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> #include "test_iptunnel_common.h" -#include "bpf_endian.h" +#include <bpf/bpf_endian.h> int _version SEC("version") = 1; diff --git a/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c b/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c index 2e4efe70b1e5..28351936a438 100644 --- a/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c +++ b/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c @@ -13,9 +13,9 @@ #include <linux/icmpv6.h> #include <linux/tcp.h> #include <linux/udp.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> #include "test_iptunnel_common.h" -#include "bpf_endian.h" +#include <bpf/bpf_endian.h> int _version SEC("version") = 1; diff --git a/tools/testing/selftests/bpf/progs/test_lirc_mode2_kern.c b/tools/testing/selftests/bpf/progs/test_lirc_mode2_kern.c index 4147130cc3b7..7a6620671a83 100644 --- a/tools/testing/selftests/bpf/progs/test_lirc_mode2_kern.c +++ b/tools/testing/selftests/bpf/progs/test_lirc_mode2_kern.c @@ -5,7 +5,7 @@ #include <linux/bpf.h> #include <linux/lirc.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> SEC("lirc_mode2") int bpf_decoder(unsigned int *sample) diff --git a/tools/testing/selftests/bpf/progs/test_lwt_ip_encap.c b/tools/testing/selftests/bpf/progs/test_lwt_ip_encap.c index c957d6dfe6d7..d6cb986e7533 100644 --- a/tools/testing/selftests/bpf/progs/test_lwt_ip_encap.c +++ b/tools/testing/selftests/bpf/progs/test_lwt_ip_encap.c @@ -4,8 +4,8 @@ #include <linux/bpf.h> #include <linux/ip.h> #include <linux/ipv6.h> -#include "bpf_helpers.h" -#include "bpf_endian.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> struct grehdr { __be16 flags; diff --git a/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c b/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c index 41a3ebcd593d..48ff2b2ad5e7 100644 --- a/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c +++ b/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c @@ -3,8 +3,8 @@ #include <errno.h> #include <linux/seg6_local.h> #include <linux/bpf.h> -#include "bpf_helpers.h" -#include "bpf_endian.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> /* Packet parsing state machine helpers. */ #define cursor_advance(_cursor, _len) \ diff --git a/tools/testing/selftests/bpf/progs/test_map_in_map.c b/tools/testing/selftests/bpf/progs/test_map_in_map.c index 113226115365..1cfeb940cf9f 100644 --- a/tools/testing/selftests/bpf/progs/test_map_in_map.c +++ b/tools/testing/selftests/bpf/progs/test_map_in_map.c @@ -3,7 +3,7 @@ #include <stddef.h> #include <linux/bpf.h> #include <linux/types.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> struct { __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS); diff --git a/tools/testing/selftests/bpf/progs/test_map_lock.c b/tools/testing/selftests/bpf/progs/test_map_lock.c index bb7ce35f691b..b5c07ae7b68f 100644 --- a/tools/testing/selftests/bpf/progs/test_map_lock.c +++ b/tools/testing/selftests/bpf/progs/test_map_lock.c @@ -2,7 +2,7 @@ // Copyright (c) 2019 Facebook #include <linux/bpf.h> #include <linux/version.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> #define VAR_NUM 16 diff --git a/tools/testing/selftests/bpf/progs/test_mmap.c b/tools/testing/selftests/bpf/progs/test_mmap.c index e808791b7047..6239596cd14e 100644 --- a/tools/testing/selftests/bpf/progs/test_mmap.c +++ b/tools/testing/selftests/bpf/progs/test_mmap.c @@ -3,7 +3,7 @@ #include <linux/bpf.h> #include <stdint.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_obj_id.c b/tools/testing/selftests/bpf/progs/test_obj_id.c index 3d30c02bdae9..98b9de2fafd0 100644 --- a/tools/testing/selftests/bpf/progs/test_obj_id.c +++ b/tools/testing/selftests/bpf/progs/test_obj_id.c @@ -4,7 +4,7 @@ #include <stddef.h> #include <linux/bpf.h> #include <linux/pkt_cls.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> /* It is a dumb bpf program such that it must have no * issue to be loaded since testing the verifier is diff --git a/tools/testing/selftests/bpf/progs/test_overhead.c b/tools/testing/selftests/bpf/progs/test_overhead.c index 96c0124a04ba..bfe9fbcb9684 100644 --- a/tools/testing/selftests/bpf/progs/test_overhead.c +++ b/tools/testing/selftests/bpf/progs/test_overhead.c @@ -1,39 +1,45 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2019 Facebook */ +#include <stdbool.h> +#include <stddef.h> #include <linux/bpf.h> -#include "bpf_helpers.h" -#include "bpf_tracing.h" +#include <linux/ptrace.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> #include "bpf_trace_helpers.h" +struct task_struct; + SEC("kprobe/__set_task_comm") -int prog1(struct pt_regs *ctx) +int BPF_KPROBE(prog1, struct task_struct *tsk, const char *buf, bool exec) { - return 0; + return !tsk; } SEC("kretprobe/__set_task_comm") -int prog2(struct pt_regs *ctx) +int BPF_KRETPROBE(prog2, + struct task_struct *tsk, const char *buf, bool exec, + int ret) { - return 0; + return !PT_REGS_PARM1(ctx) && ret; } SEC("raw_tp/task_rename") int prog3(struct bpf_raw_tracepoint_args *ctx) { - return 0; + return !ctx->args[0]; } -struct task_struct; -BPF_TRACE_3("fentry/__set_task_comm", prog4, - struct task_struct *, tsk, const char *, buf, __u8, exec) +SEC("fentry/__set_task_comm") +int BPF_PROG(prog4, struct task_struct *tsk, const char *buf, bool exec) { - return 0; + return !tsk; } -BPF_TRACE_3("fexit/__set_task_comm", prog5, - struct task_struct *, tsk, const char *, buf, __u8, exec) +SEC("fexit/__set_task_comm") +int BPF_PROG(prog5, struct task_struct *tsk, const char *buf, bool exec) { - return 0; + return !tsk; } char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_perf_buffer.c b/tools/testing/selftests/bpf/progs/test_perf_buffer.c index 07c09ca5546a..ebfcc9f50c35 100644 --- a/tools/testing/selftests/bpf/progs/test_perf_buffer.c +++ b/tools/testing/selftests/bpf/progs/test_perf_buffer.c @@ -3,7 +3,8 @@ #include <linux/ptrace.h> #include <linux/bpf.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> +#include "bpf_trace_helpers.h" struct { __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); @@ -12,7 +13,7 @@ struct { } perf_buf_map SEC(".maps"); SEC("kprobe/sys_nanosleep") -int handle_sys_nanosleep_entry(struct pt_regs *ctx) +int BPF_KPROBE(handle_sys_nanosleep_entry) { int cpu = bpf_get_smp_processor_id(); diff --git a/tools/testing/selftests/bpf/progs/test_pinning.c b/tools/testing/selftests/bpf/progs/test_pinning.c index f20e7e00373f..4ef2630292b2 100644 --- a/tools/testing/selftests/bpf/progs/test_pinning.c +++ b/tools/testing/selftests/bpf/progs/test_pinning.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/bpf.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> int _version SEC("version") = 1; diff --git a/tools/testing/selftests/bpf/progs/test_pinning_invalid.c b/tools/testing/selftests/bpf/progs/test_pinning_invalid.c index 51b38abe7ba1..5412e0c732c7 100644 --- a/tools/testing/selftests/bpf/progs/test_pinning_invalid.c +++ b/tools/testing/selftests/bpf/progs/test_pinning_invalid.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/bpf.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> int _version SEC("version") = 1; diff --git a/tools/testing/selftests/bpf/progs/test_pkt_access.c b/tools/testing/selftests/bpf/progs/test_pkt_access.c index 3a7b4b607ed3..e72eba4a93d2 100644 --- a/tools/testing/selftests/bpf/progs/test_pkt_access.c +++ b/tools/testing/selftests/bpf/progs/test_pkt_access.c @@ -11,8 +11,8 @@ #include <linux/in.h> #include <linux/tcp.h> #include <linux/pkt_cls.h> -#include "bpf_helpers.h" -#include "bpf_endian.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> #define barrier() __asm__ __volatile__("": : :"memory") int _version SEC("version") = 1; @@ -47,6 +47,38 @@ int test_pkt_access_subprog2(int val, volatile struct __sk_buff *skb) return skb->len * val; } +#define MAX_STACK (512 - 2 * 32) + +__attribute__ ((noinline)) +int get_skb_len(struct __sk_buff *skb) +{ + volatile char buf[MAX_STACK] = {}; + + return skb->len; +} + +__attribute__ ((noinline)) +int get_constant(long val) +{ + return val - 122; +} + +int get_skb_ifindex(int, struct __sk_buff *skb, int); + +__attribute__ ((noinline)) +int test_pkt_access_subprog3(int val, struct __sk_buff *skb) +{ + return get_skb_len(skb) * get_skb_ifindex(val, skb, get_constant(123)); +} + +__attribute__ ((noinline)) +int get_skb_ifindex(int val, struct __sk_buff *skb, int var) +{ + volatile char buf[MAX_STACK] = {}; + + return skb->ifindex * val * var; +} + SEC("classifier/test_pkt_access") int test_pkt_access(struct __sk_buff *skb) { @@ -82,6 +114,8 @@ int test_pkt_access(struct __sk_buff *skb) return TC_ACT_SHOT; if (test_pkt_access_subprog2(2, skb) != skb->len * 2) return TC_ACT_SHOT; + if (test_pkt_access_subprog3(3, skb) != skb->len * 3 * skb->ifindex) + return TC_ACT_SHOT; if (tcp) { if (((void *)(tcp) + 20) > data_end || proto != 6) return TC_ACT_SHOT; diff --git a/tools/testing/selftests/bpf/progs/test_pkt_md_access.c b/tools/testing/selftests/bpf/progs/test_pkt_md_access.c index 1db2623021ad..610c74ea9f64 100644 --- a/tools/testing/selftests/bpf/progs/test_pkt_md_access.c +++ b/tools/testing/selftests/bpf/progs/test_pkt_md_access.c @@ -5,7 +5,7 @@ #include <string.h> #include <linux/bpf.h> #include <linux/pkt_cls.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> int _version SEC("version") = 1; diff --git a/tools/testing/selftests/bpf/progs/test_probe_user.c b/tools/testing/selftests/bpf/progs/test_probe_user.c index 1871e2ece0c4..d556b1572cc6 100644 --- a/tools/testing/selftests/bpf/progs/test_probe_user.c +++ b/tools/testing/selftests/bpf/progs/test_probe_user.c @@ -5,13 +5,14 @@ #include <netinet/in.h> -#include "bpf_helpers.h" -#include "bpf_tracing.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include "bpf_trace_helpers.h" static struct sockaddr_in old; SEC("kprobe/__sys_connect") -int handle_sys_connect(struct pt_regs *ctx) +int BPF_KPROBE(handle_sys_connect) { void *ptr = (void *)PT_REGS_PARM2(ctx); struct sockaddr_in new; diff --git a/tools/testing/selftests/bpf/progs/test_queue_stack_map.h b/tools/testing/selftests/bpf/progs/test_queue_stack_map.h index 0e014d3b2b36..4dd9806ad73b 100644 --- a/tools/testing/selftests/bpf/progs/test_queue_stack_map.h +++ b/tools/testing/selftests/bpf/progs/test_queue_stack_map.h @@ -6,7 +6,7 @@ #include <linux/if_ether.h> #include <linux/ip.h> #include <linux/pkt_cls.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> int _version SEC("version") = 1; diff --git a/tools/testing/selftests/bpf/progs/test_rdonly_maps.c b/tools/testing/selftests/bpf/progs/test_rdonly_maps.c index 52d94e8b214d..ecbeea2df259 100644 --- a/tools/testing/selftests/bpf/progs/test_rdonly_maps.c +++ b/tools/testing/selftests/bpf/progs/test_rdonly_maps.c @@ -3,7 +3,7 @@ #include <linux/ptrace.h> #include <linux/bpf.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> static volatile const struct { unsigned a[4]; diff --git a/tools/testing/selftests/bpf/progs/test_seg6_loop.c b/tools/testing/selftests/bpf/progs/test_seg6_loop.c index 69880c1e7700..a7278f064368 100644 --- a/tools/testing/selftests/bpf/progs/test_seg6_loop.c +++ b/tools/testing/selftests/bpf/progs/test_seg6_loop.c @@ -3,8 +3,8 @@ #include <errno.h> #include <linux/seg6_local.h> #include <linux/bpf.h> -#include "bpf_helpers.h" -#include "bpf_endian.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> /* Packet parsing state machine helpers. */ #define cursor_advance(_cursor, _len) \ diff --git a/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c b/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c index ea7d84f01235..26e77dcc7e91 100644 --- a/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c +++ b/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c @@ -11,8 +11,8 @@ #include <linux/types.h> #include <linux/if_ether.h> -#include "bpf_endian.h" -#include "bpf_helpers.h" +#include <bpf/bpf_endian.h> +#include <bpf/bpf_helpers.h> #include "test_select_reuseport_common.h" int _version SEC("version") = 1; @@ -62,7 +62,7 @@ struct { goto done; \ }) -SEC("select_by_skb_data") +SEC("sk_reuseport") int _select_by_skb_data(struct sk_reuseport_md *reuse_md) { __u32 linum, index = 0, flags = 0, index_zero = 0; @@ -113,6 +113,12 @@ int _select_by_skb_data(struct sk_reuseport_md *reuse_md) data_check.skb_ports[0] = th->source; data_check.skb_ports[1] = th->dest; + if (th->fin) + /* The connection is being torn down at the end of a + * test. It can't contain a cmd, so return early. + */ + return SK_PASS; + if ((th->doff << 2) + sizeof(*cmd) > data_check.len) GOTO_DONE(DROP_ERR_SKB_DATA); if (bpf_skb_load_bytes(reuse_md, th->doff << 2, &cmd_copy, diff --git a/tools/testing/selftests/bpf/progs/test_send_signal_kern.c b/tools/testing/selftests/bpf/progs/test_send_signal_kern.c index 0e6be01157e6..1acc91e87bfc 100644 --- a/tools/testing/selftests/bpf/progs/test_send_signal_kern.c +++ b/tools/testing/selftests/bpf/progs/test_send_signal_kern.c @@ -2,46 +2,39 @@ // Copyright (c) 2019 Facebook #include <linux/bpf.h> #include <linux/version.h> -#include "bpf_helpers.h" - -struct { - __uint(type, BPF_MAP_TYPE_ARRAY); - __uint(max_entries, 1); - __type(key, __u32); - __type(value, __u64); -} info_map SEC(".maps"); - -struct { - __uint(type, BPF_MAP_TYPE_ARRAY); - __uint(max_entries, 1); - __type(key, __u32); - __type(value, __u64); -} status_map SEC(".maps"); - -SEC("send_signal_demo") -int bpf_send_signal_test(void *ctx) +#include <bpf/bpf_helpers.h> + +__u32 sig = 0, pid = 0, status = 0, signal_thread = 0; + +static __always_inline int bpf_send_signal_test(void *ctx) { - __u64 *info_val, *status_val; - __u32 key = 0, pid, sig; int ret; - status_val = bpf_map_lookup_elem(&status_map, &key); - if (!status_val || *status_val != 0) - return 0; - - info_val = bpf_map_lookup_elem(&info_map, &key); - if (!info_val || *info_val == 0) + if (status != 0 || sig == 0 || pid == 0) return 0; - sig = *info_val >> 32; - pid = *info_val & 0xffffFFFF; - if ((bpf_get_current_pid_tgid() >> 32) == pid) { - ret = bpf_send_signal(sig); + if (signal_thread) + ret = bpf_send_signal_thread(sig); + else + ret = bpf_send_signal(sig); if (ret == 0) - *status_val = 1; + status = 1; } return 0; } + +SEC("tracepoint/syscalls/sys_enter_nanosleep") +int send_signal_tp(void *ctx) +{ + return bpf_send_signal_test(ctx); +} + +SEC("perf_event") +int send_signal_perf(void *ctx) +{ + return bpf_send_signal_test(ctx); +} + char __license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c b/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c index cb49ccb707d1..d2b38fa6a5b0 100644 --- a/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c +++ b/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c @@ -12,8 +12,8 @@ #include <linux/pkt_cls.h> #include <linux/tcp.h> #include <sys/socket.h> -#include "bpf_helpers.h" -#include "bpf_endian.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> int _version SEC("version") = 1; char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c b/tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c index 68cf9829f5a7..552f2090665c 100644 --- a/tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c +++ b/tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c @@ -6,7 +6,7 @@ #include <string.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> #define NUM_CGROUP_LEVELS 4 diff --git a/tools/testing/selftests/bpf/progs/test_skb_ctx.c b/tools/testing/selftests/bpf/progs/test_skb_ctx.c index 2a9f4c736ebc..202de3938494 100644 --- a/tools/testing/selftests/bpf/progs/test_skb_ctx.c +++ b/tools/testing/selftests/bpf/progs/test_skb_ctx.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/bpf.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> int _version SEC("version") = 1; char _license[] SEC("license") = "GPL"; @@ -17,6 +17,12 @@ int process(struct __sk_buff *skb) } skb->priority++; skb->tstamp++; + skb->mark++; + + if (skb->wire_len != 100) + return 1; + if (skb->gso_segs != 8) + return 1; return 0; } diff --git a/tools/testing/selftests/bpf/progs/test_skeleton.c b/tools/testing/selftests/bpf/progs/test_skeleton.c new file mode 100644 index 000000000000..de03a90f78ca --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_skeleton.c @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ + +#include <stdbool.h> +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> + +struct s { + int a; + long long b; +} __attribute__((packed)); + +int in1 = 0; +long long in2 = 0; +char in3 = '\0'; +long long in4 __attribute__((aligned(64))) = 0; +struct s in5 = {}; + +long long out2 = 0; +char out3 = 0; +long long out4 = 0; +int out1 = 0; + +extern bool CONFIG_BPF_SYSCALL __kconfig; +extern int LINUX_KERNEL_VERSION __kconfig; +bool bpf_syscall = 0; +int kern_ver = 0; + +SEC("raw_tp/sys_enter") +int handler(const void *ctx) +{ + static volatile struct s out5; + + out1 = in1; + out2 = in2; + out3 = in3; + out4 = in4; + out5 = in5; + + bpf_syscall = CONFIG_BPF_SYSCALL; + kern_ver = LINUX_KERNEL_VERSION; + + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_sock_fields_kern.c b/tools/testing/selftests/bpf/progs/test_sock_fields_kern.c index a47b003623ef..9bcaa37f476a 100644 --- a/tools/testing/selftests/bpf/progs/test_sock_fields_kern.c +++ b/tools/testing/selftests/bpf/progs/test_sock_fields_kern.c @@ -5,8 +5,8 @@ #include <netinet/in.h> #include <stdbool.h> -#include "bpf_helpers.h" -#include "bpf_endian.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> enum bpf_addr_array_idx { ADDR_SRV_IDX, diff --git a/tools/testing/selftests/bpf/progs/test_spin_lock.c b/tools/testing/selftests/bpf/progs/test_spin_lock.c index a43b999c8da2..0d31a3b3505f 100644 --- a/tools/testing/selftests/bpf/progs/test_spin_lock.c +++ b/tools/testing/selftests/bpf/progs/test_spin_lock.c @@ -2,7 +2,7 @@ // Copyright (c) 2019 Facebook #include <linux/bpf.h> #include <linux/version.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> struct hmap_elem { volatile int cnt; diff --git a/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c b/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c index f5638e26865d..0cf0134631b4 100644 --- a/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c +++ b/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c @@ -2,7 +2,7 @@ // Copyright (c) 2018 Facebook #include <linux/bpf.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> #ifndef PERF_MAX_STACK_DEPTH #define PERF_MAX_STACK_DEPTH 127 diff --git a/tools/testing/selftests/bpf/progs/test_stacktrace_map.c b/tools/testing/selftests/bpf/progs/test_stacktrace_map.c index 3b7e1dca8829..00ed48672620 100644 --- a/tools/testing/selftests/bpf/progs/test_stacktrace_map.c +++ b/tools/testing/selftests/bpf/progs/test_stacktrace_map.c @@ -2,7 +2,7 @@ // Copyright (c) 2018 Facebook #include <linux/bpf.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> #ifndef PERF_MAX_STACK_DEPTH #define PERF_MAX_STACK_DEPTH 127 diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c index d22e438198cf..458b0d69133e 100644 --- a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c +++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c @@ -7,7 +7,7 @@ #include <linux/stddef.h> #include <linux/bpf.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> #ifndef ARRAY_SIZE #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c index cb201cbe11e7..b2e6f9b0894d 100644 --- a/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c +++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c @@ -7,7 +7,7 @@ #include <linux/stddef.h> #include <linux/bpf.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> #ifndef ARRAY_SIZE #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_prog.c b/tools/testing/selftests/bpf/progs/test_sysctl_prog.c index 5cbbff416998..2d0b0b82a78a 100644 --- a/tools/testing/selftests/bpf/progs/test_sysctl_prog.c +++ b/tools/testing/selftests/bpf/progs/test_sysctl_prog.c @@ -7,7 +7,7 @@ #include <linux/stddef.h> #include <linux/bpf.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> /* Max supported length of a string with unsigned long in base 10 (pow2 - 1). */ #define MAX_ULONG_STR_LEN 0xF diff --git a/tools/testing/selftests/bpf/progs/test_tc_edt.c b/tools/testing/selftests/bpf/progs/test_tc_edt.c index 0961415ba477..bf28814bfde5 100644 --- a/tools/testing/selftests/bpf/progs/test_tc_edt.c +++ b/tools/testing/selftests/bpf/progs/test_tc_edt.c @@ -7,8 +7,8 @@ #include <linux/ip.h> #include <linux/pkt_cls.h> #include <linux/tcp.h> -#include "bpf_helpers.h" -#include "bpf_endian.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> /* the maximum delay we are willing to add (drop packets beyond that) */ #define TIME_HORIZON_NS (2000 * 1000 * 1000) diff --git a/tools/testing/selftests/bpf/progs/test_tc_tunnel.c b/tools/testing/selftests/bpf/progs/test_tc_tunnel.c index 74370e7e286d..37bce7a7c394 100644 --- a/tools/testing/selftests/bpf/progs/test_tc_tunnel.c +++ b/tools/testing/selftests/bpf/progs/test_tc_tunnel.c @@ -17,8 +17,8 @@ #include <linux/pkt_cls.h> #include <linux/types.h> -#include "bpf_endian.h" -#include "bpf_helpers.h" +#include <bpf/bpf_endian.h> +#include <bpf/bpf_helpers.h> static const int cfg_port = 8000; diff --git a/tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c b/tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c index d8803dfa8d32..47cbe2eeae43 100644 --- a/tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c +++ b/tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c @@ -13,8 +13,8 @@ #include <sys/socket.h> #include <linux/tcp.h> -#include "bpf_helpers.h" -#include "bpf_endian.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> struct bpf_map_def SEC("maps") results = { .type = BPF_MAP_TYPE_ARRAY, diff --git a/tools/testing/selftests/bpf/progs/test_tcp_estats.c b/tools/testing/selftests/bpf/progs/test_tcp_estats.c index 87b7d934ce73..adc83a54c352 100644 --- a/tools/testing/selftests/bpf/progs/test_tcp_estats.c +++ b/tools/testing/selftests/bpf/progs/test_tcp_estats.c @@ -36,7 +36,7 @@ #include <linux/ipv6.h> #include <linux/version.h> #include <sys/socket.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> #define _(P) ({typeof(P) val = 0; bpf_probe_read_kernel(&val, sizeof(val), &P); val;}) #define TCP_ESTATS_MAGIC 0xBAADBEEF diff --git a/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c index 7fa4595d2b66..1f1966e86e9f 100644 --- a/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c +++ b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c @@ -10,8 +10,8 @@ #include <linux/types.h> #include <linux/socket.h> #include <linux/tcp.h> -#include "bpf_helpers.h" -#include "bpf_endian.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> #include "test_tcpbpf.h" struct { diff --git a/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c b/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c index 08346e7765d5..ac63410bb541 100644 --- a/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c +++ b/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c @@ -10,8 +10,8 @@ #include <linux/types.h> #include <linux/socket.h> #include <linux/tcp.h> -#include "bpf_helpers.h" -#include "bpf_endian.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> #include "test_tcpnotify.h" struct { diff --git a/tools/testing/selftests/bpf/progs/test_tracepoint.c b/tools/testing/selftests/bpf/progs/test_tracepoint.c index 04bf084517e0..4b825ee122cf 100644 --- a/tools/testing/selftests/bpf/progs/test_tracepoint.c +++ b/tools/testing/selftests/bpf/progs/test_tracepoint.c @@ -2,7 +2,7 @@ // Copyright (c) 2017 Facebook #include <linux/bpf.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> /* taken from /sys/kernel/debug/tracing/events/sched/sched_switch/format */ struct sched_switch_args { diff --git a/tools/testing/selftests/bpf/progs/test_trampoline_count.c b/tools/testing/selftests/bpf/progs/test_trampoline_count.c new file mode 100644 index 000000000000..e51e6e3a81c2 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_trampoline_count.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <stdbool.h> +#include <stddef.h> +#include <linux/bpf.h> +#include "bpf_trace_helpers.h" + +struct task_struct; + +SEC("fentry/__set_task_comm") +int BPF_PROG(prog1, struct task_struct *tsk, const char *buf, bool exec) +{ + return 0; +} + +SEC("fexit/__set_task_comm") +int BPF_PROG(prog2, struct task_struct *tsk, const char *buf, bool exec) +{ + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c index 504df69c83df..f48dbfe24ddc 100644 --- a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c +++ b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c @@ -19,8 +19,8 @@ #include <linux/socket.h> #include <linux/pkt_cls.h> #include <linux/erspan.h> -#include "bpf_helpers.h" -#include "bpf_endian.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> #define ERROR(ret) do {\ char fmt[] = "ERROR line:%d ret:%d\n";\ diff --git a/tools/testing/selftests/bpf/progs/test_verif_scale1.c b/tools/testing/selftests/bpf/progs/test_verif_scale1.c index f3236ce35f31..d38153dab3dd 100644 --- a/tools/testing/selftests/bpf/progs/test_verif_scale1.c +++ b/tools/testing/selftests/bpf/progs/test_verif_scale1.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2019 Facebook #include <linux/bpf.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> #define ATTR __attribute__((noinline)) #include "test_jhash.h" diff --git a/tools/testing/selftests/bpf/progs/test_verif_scale2.c b/tools/testing/selftests/bpf/progs/test_verif_scale2.c index 9897150ed516..f024154c7be7 100644 --- a/tools/testing/selftests/bpf/progs/test_verif_scale2.c +++ b/tools/testing/selftests/bpf/progs/test_verif_scale2.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2019 Facebook #include <linux/bpf.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> #define ATTR __always_inline #include "test_jhash.h" diff --git a/tools/testing/selftests/bpf/progs/test_verif_scale3.c b/tools/testing/selftests/bpf/progs/test_verif_scale3.c index 1848da04ea41..9beb5bf80373 100644 --- a/tools/testing/selftests/bpf/progs/test_verif_scale3.c +++ b/tools/testing/selftests/bpf/progs/test_verif_scale3.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2019 Facebook #include <linux/bpf.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> #define ATTR __attribute__((noinline)) #include "test_jhash.h" diff --git a/tools/testing/selftests/bpf/progs/test_xdp.c b/tools/testing/selftests/bpf/progs/test_xdp.c index 0941c655b07b..31f9bce37491 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp.c +++ b/tools/testing/selftests/bpf/progs/test_xdp.c @@ -16,8 +16,8 @@ #include <linux/tcp.h> #include <linux/pkt_cls.h> #include <sys/socket.h> -#include "bpf_helpers.h" -#include "bpf_endian.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> #include "test_iptunnel_common.h" int _version SEC("version") = 1; diff --git a/tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c b/tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c new file mode 100644 index 000000000000..cb8a04ab7a78 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_trace_helpers.h" + +struct net_device { + /* Structure does not need to contain all entries, + * as "preserve_access_index" will use BTF to fix this... + */ + int ifindex; +} __attribute__((preserve_access_index)); + +struct xdp_rxq_info { + /* Structure does not need to contain all entries, + * as "preserve_access_index" will use BTF to fix this... + */ + struct net_device *dev; + __u32 queue_index; +} __attribute__((preserve_access_index)); + +struct xdp_buff { + void *data; + void *data_end; + void *data_meta; + void *data_hard_start; + unsigned long handle; + struct xdp_rxq_info *rxq; +} __attribute__((preserve_access_index)); + +__u64 test_result_fentry = 0; +SEC("fentry/_xdp_tx_iptunnel") +int BPF_PROG(trace_on_entry, struct xdp_buff *xdp) +{ + test_result_fentry = xdp->rxq->dev->ifindex; + return 0; +} + +__u64 test_result_fexit = 0; +SEC("fexit/_xdp_tx_iptunnel") +int BPF_PROG(trace_on_exit, struct xdp_buff *xdp, int ret) +{ + test_result_fexit = ret; + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/test_xdp_loop.c b/tools/testing/selftests/bpf/progs/test_xdp_loop.c index 97175f73c3fe..fcabcda30ba3 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_loop.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_loop.c @@ -12,8 +12,8 @@ #include <linux/tcp.h> #include <linux/pkt_cls.h> #include <sys/socket.h> -#include "bpf_helpers.h" -#include "bpf_endian.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> #include "test_iptunnel_common.h" int _version SEC("version") = 1; diff --git a/tools/testing/selftests/bpf/progs/test_xdp_meta.c b/tools/testing/selftests/bpf/progs/test_xdp_meta.c index 8d0182650653..a7c4a7d49fe6 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_meta.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_meta.c @@ -2,7 +2,7 @@ #include <linux/if_ether.h> #include <linux/pkt_cls.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> #define __round_mask(x, y) ((__typeof__(x))((y) - 1)) #define round_up(x, y) ((((x) - 1) | __round_mask(x, y)) + 1) diff --git a/tools/testing/selftests/bpf/progs/test_xdp_noinline.c b/tools/testing/selftests/bpf/progs/test_xdp_noinline.c index e88d7b9d65ab..8beecec166d9 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_noinline.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_noinline.c @@ -13,8 +13,8 @@ #include <linux/icmpv6.h> #include <linux/tcp.h> #include <linux/udp.h> -#include "bpf_helpers.h" -#include "bpf_endian.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> static __u32 rol32(__u32 word, unsigned int shift) { @@ -86,7 +86,7 @@ u32 jhash(const void *key, u32 length, u32 initval) return c; } -static __attribute__ ((noinline)) +__attribute__ ((noinline)) u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval) { a += initval; @@ -96,7 +96,7 @@ u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval) return c; } -static __attribute__ ((noinline)) +__attribute__ ((noinline)) u32 jhash_2words(u32 a, u32 b, u32 initval) { return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2)); diff --git a/tools/testing/selftests/bpf/progs/test_xdp_redirect.c b/tools/testing/selftests/bpf/progs/test_xdp_redirect.c index ef9e704be140..a5337cd9400b 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_redirect.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_redirect.c @@ -10,7 +10,7 @@ * General Public License for more details. */ #include <linux/bpf.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> int _version SEC("version") = 1; diff --git a/tools/testing/selftests/bpf/progs/test_xdp_vlan.c b/tools/testing/selftests/bpf/progs/test_xdp_vlan.c index 365a7d2d9f5c..134768f6b788 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_vlan.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_vlan.c @@ -22,8 +22,8 @@ #include <linux/in.h> #include <linux/pkt_cls.h> -#include "bpf_helpers.h" -#include "bpf_endian.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> /* linux/if_vlan.h have not exposed this as UAPI, thus mirror some here * diff --git a/tools/testing/selftests/bpf/progs/xdp_dummy.c b/tools/testing/selftests/bpf/progs/xdp_dummy.c index 43b0ef1001ed..ea25e8881992 100644 --- a/tools/testing/selftests/bpf/progs/xdp_dummy.c +++ b/tools/testing/selftests/bpf/progs/xdp_dummy.c @@ -2,7 +2,7 @@ #define KBUILD_MODNAME "xdp_dummy" #include <linux/bpf.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> SEC("xdp_dummy") int xdp_dummy_prog(struct xdp_md *ctx) diff --git a/tools/testing/selftests/bpf/progs/xdp_redirect_map.c b/tools/testing/selftests/bpf/progs/xdp_redirect_map.c index 1c5f298d7196..d037262c8937 100644 --- a/tools/testing/selftests/bpf/progs/xdp_redirect_map.c +++ b/tools/testing/selftests/bpf/progs/xdp_redirect_map.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/bpf.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> struct { __uint(type, BPF_MAP_TYPE_DEVMAP); diff --git a/tools/testing/selftests/bpf/progs/xdp_tx.c b/tools/testing/selftests/bpf/progs/xdp_tx.c index 57912e7c94b0..94e6c2b281cb 100644 --- a/tools/testing/selftests/bpf/progs/xdp_tx.c +++ b/tools/testing/selftests/bpf/progs/xdp_tx.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/bpf.h> -#include "bpf_helpers.h" +#include <bpf/bpf_helpers.h> SEC("tx") int xdp_tx(struct xdp_md *xdp) diff --git a/tools/testing/selftests/bpf/progs/xdping_kern.c b/tools/testing/selftests/bpf/progs/xdping_kern.c index 112a2857f4e2..6b9ca40bd1f4 100644 --- a/tools/testing/selftests/bpf/progs/xdping_kern.c +++ b/tools/testing/selftests/bpf/progs/xdping_kern.c @@ -12,8 +12,8 @@ #include <linux/if_vlan.h> #include <linux/ip.h> -#include "bpf_helpers.h" -#include "bpf_endian.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> #include "xdping.h" diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c index 3d617e806054..93040ca83e60 100644 --- a/tools/testing/selftests/bpf/test_btf.c +++ b/tools/testing/selftests/bpf/test_btf.c @@ -4148,10 +4148,6 @@ static int do_test_file(unsigned int test_num) if (CHECK(IS_ERR(obj), "obj: %ld", PTR_ERR(obj))) return PTR_ERR(obj); - err = bpf_object__btf_fd(obj); - if (CHECK(err == -1, "bpf_object__btf_fd: -1")) - goto done; - prog = bpf_program__next(NULL, obj); if (CHECK(!prog, "Cannot find bpf_prog")) { err = -1; diff --git a/tools/testing/selftests/bpf/test_cgroup_attach.c b/tools/testing/selftests/bpf/test_cgroup_attach.c deleted file mode 100644 index 7671909ee1cb..000000000000 --- a/tools/testing/selftests/bpf/test_cgroup_attach.c +++ /dev/null @@ -1,571 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -/* eBPF example program: - * - * - Creates arraymap in kernel with 4 bytes keys and 8 byte values - * - * - Loads eBPF program - * - * The eBPF program accesses the map passed in to store two pieces of - * information. The number of invocations of the program, which maps - * to the number of packets received, is stored to key 0. Key 1 is - * incremented on each iteration by the number of bytes stored in - * the skb. The program also stores the number of received bytes - * in the cgroup storage. - * - * - Attaches the new program to a cgroup using BPF_PROG_ATTACH - * - * - Every second, reads map[0] and map[1] to see how many bytes and - * packets were seen on any socket of tasks in the given cgroup. - */ - -#define _GNU_SOURCE - -#include <stdio.h> -#include <stdlib.h> -#include <assert.h> -#include <sys/resource.h> -#include <sys/time.h> -#include <unistd.h> -#include <linux/filter.h> - -#include <linux/bpf.h> -#include <bpf/bpf.h> - -#include "bpf_util.h" -#include "bpf_rlimit.h" -#include "cgroup_helpers.h" - -#define FOO "/foo" -#define BAR "/foo/bar/" -#define PING_CMD "ping -q -c1 -w1 127.0.0.1 > /dev/null" - -char bpf_log_buf[BPF_LOG_BUF_SIZE]; - -#ifdef DEBUG -#define debug(args...) printf(args) -#else -#define debug(args...) -#endif - -static int prog_load(int verdict) -{ - int ret; - struct bpf_insn prog[] = { - BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */ - BPF_EXIT_INSN(), - }; - size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn); - - ret = bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB, - prog, insns_cnt, "GPL", 0, - bpf_log_buf, BPF_LOG_BUF_SIZE); - - if (ret < 0) { - log_err("Loading program"); - printf("Output from verifier:\n%s\n-------\n", bpf_log_buf); - return 0; - } - return ret; -} - -static int test_foo_bar(void) -{ - int drop_prog, allow_prog, foo = 0, bar = 0, rc = 0; - - allow_prog = prog_load(1); - if (!allow_prog) - goto err; - - drop_prog = prog_load(0); - if (!drop_prog) - goto err; - - if (setup_cgroup_environment()) - goto err; - - /* Create cgroup /foo, get fd, and join it */ - foo = create_and_get_cgroup(FOO); - if (foo < 0) - goto err; - - if (join_cgroup(FOO)) - goto err; - - if (bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS, - BPF_F_ALLOW_OVERRIDE)) { - log_err("Attaching prog to /foo"); - goto err; - } - - debug("Attached DROP prog. This ping in cgroup /foo should fail...\n"); - assert(system(PING_CMD) != 0); - - /* Create cgroup /foo/bar, get fd, and join it */ - bar = create_and_get_cgroup(BAR); - if (bar < 0) - goto err; - - if (join_cgroup(BAR)) - goto err; - - debug("Attached DROP prog. This ping in cgroup /foo/bar should fail...\n"); - assert(system(PING_CMD) != 0); - - if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, - BPF_F_ALLOW_OVERRIDE)) { - log_err("Attaching prog to /foo/bar"); - goto err; - } - - debug("Attached PASS prog. This ping in cgroup /foo/bar should pass...\n"); - assert(system(PING_CMD) == 0); - - if (bpf_prog_detach(bar, BPF_CGROUP_INET_EGRESS)) { - log_err("Detaching program from /foo/bar"); - goto err; - } - - debug("Detached PASS from /foo/bar while DROP is attached to /foo.\n" - "This ping in cgroup /foo/bar should fail...\n"); - assert(system(PING_CMD) != 0); - - if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, - BPF_F_ALLOW_OVERRIDE)) { - log_err("Attaching prog to /foo/bar"); - goto err; - } - - if (bpf_prog_detach(foo, BPF_CGROUP_INET_EGRESS)) { - log_err("Detaching program from /foo"); - goto err; - } - - debug("Attached PASS from /foo/bar and detached DROP from /foo.\n" - "This ping in cgroup /foo/bar should pass...\n"); - assert(system(PING_CMD) == 0); - - if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, - BPF_F_ALLOW_OVERRIDE)) { - log_err("Attaching prog to /foo/bar"); - goto err; - } - - if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 0)) { - errno = 0; - log_err("Unexpected success attaching prog to /foo/bar"); - goto err; - } - - if (bpf_prog_detach(bar, BPF_CGROUP_INET_EGRESS)) { - log_err("Detaching program from /foo/bar"); - goto err; - } - - if (!bpf_prog_detach(foo, BPF_CGROUP_INET_EGRESS)) { - errno = 0; - log_err("Unexpected success in double detach from /foo"); - goto err; - } - - if (bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS, 0)) { - log_err("Attaching non-overridable prog to /foo"); - goto err; - } - - if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 0)) { - errno = 0; - log_err("Unexpected success attaching non-overridable prog to /foo/bar"); - goto err; - } - - if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, - BPF_F_ALLOW_OVERRIDE)) { - errno = 0; - log_err("Unexpected success attaching overridable prog to /foo/bar"); - goto err; - } - - if (!bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS, - BPF_F_ALLOW_OVERRIDE)) { - errno = 0; - log_err("Unexpected success attaching overridable prog to /foo"); - goto err; - } - - if (bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS, 0)) { - log_err("Attaching different non-overridable prog to /foo"); - goto err; - } - - goto out; - -err: - rc = 1; - -out: - close(foo); - close(bar); - cleanup_cgroup_environment(); - if (!rc) - printf("#override:PASS\n"); - else - printf("#override:FAIL\n"); - return rc; -} - -static int map_fd = -1; - -static int prog_load_cnt(int verdict, int val) -{ - int cgroup_storage_fd, percpu_cgroup_storage_fd; - - if (map_fd < 0) - map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0); - if (map_fd < 0) { - printf("failed to create map '%s'\n", strerror(errno)); - return -1; - } - - cgroup_storage_fd = bpf_create_map(BPF_MAP_TYPE_CGROUP_STORAGE, - sizeof(struct bpf_cgroup_storage_key), 8, 0, 0); - if (cgroup_storage_fd < 0) { - printf("failed to create map '%s'\n", strerror(errno)); - return -1; - } - - percpu_cgroup_storage_fd = bpf_create_map( - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, - sizeof(struct bpf_cgroup_storage_key), 8, 0, 0); - if (percpu_cgroup_storage_fd < 0) { - printf("failed to create map '%s'\n", strerror(errno)); - return -1; - } - - struct bpf_insn prog[] = { - BPF_MOV32_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */ - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), /* r2 = fp - 4 */ - BPF_LD_MAP_FD(BPF_REG_1, map_fd), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_1, val), /* r1 = 1 */ - BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */ - - BPF_LD_MAP_FD(BPF_REG_1, cgroup_storage_fd), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), - BPF_MOV64_IMM(BPF_REG_1, val), - BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_0, BPF_REG_1, 0, 0), - - BPF_LD_MAP_FD(BPF_REG_1, percpu_cgroup_storage_fd), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 0x1), - BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0), - - BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */ - BPF_EXIT_INSN(), - }; - size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn); - int ret; - - ret = bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB, - prog, insns_cnt, "GPL", 0, - bpf_log_buf, BPF_LOG_BUF_SIZE); - - if (ret < 0) { - log_err("Loading program"); - printf("Output from verifier:\n%s\n-------\n", bpf_log_buf); - return 0; - } - close(cgroup_storage_fd); - return ret; -} - - -static int test_multiprog(void) -{ - __u32 prog_ids[4], prog_cnt = 0, attach_flags, saved_prog_id; - int cg1 = 0, cg2 = 0, cg3 = 0, cg4 = 0, cg5 = 0, key = 0; - int drop_prog, allow_prog[6] = {}, rc = 0; - unsigned long long value; - int i = 0; - - for (i = 0; i < 6; i++) { - allow_prog[i] = prog_load_cnt(1, 1 << i); - if (!allow_prog[i]) - goto err; - } - drop_prog = prog_load_cnt(0, 1); - if (!drop_prog) - goto err; - - if (setup_cgroup_environment()) - goto err; - - cg1 = create_and_get_cgroup("/cg1"); - if (cg1 < 0) - goto err; - cg2 = create_and_get_cgroup("/cg1/cg2"); - if (cg2 < 0) - goto err; - cg3 = create_and_get_cgroup("/cg1/cg2/cg3"); - if (cg3 < 0) - goto err; - cg4 = create_and_get_cgroup("/cg1/cg2/cg3/cg4"); - if (cg4 < 0) - goto err; - cg5 = create_and_get_cgroup("/cg1/cg2/cg3/cg4/cg5"); - if (cg5 < 0) - goto err; - - if (join_cgroup("/cg1/cg2/cg3/cg4/cg5")) - goto err; - - if (bpf_prog_attach(allow_prog[0], cg1, BPF_CGROUP_INET_EGRESS, - BPF_F_ALLOW_MULTI)) { - log_err("Attaching prog to cg1"); - goto err; - } - if (!bpf_prog_attach(allow_prog[0], cg1, BPF_CGROUP_INET_EGRESS, - BPF_F_ALLOW_MULTI)) { - log_err("Unexpected success attaching the same prog to cg1"); - goto err; - } - if (bpf_prog_attach(allow_prog[1], cg1, BPF_CGROUP_INET_EGRESS, - BPF_F_ALLOW_MULTI)) { - log_err("Attaching prog2 to cg1"); - goto err; - } - if (bpf_prog_attach(allow_prog[2], cg2, BPF_CGROUP_INET_EGRESS, - BPF_F_ALLOW_OVERRIDE)) { - log_err("Attaching prog to cg2"); - goto err; - } - if (bpf_prog_attach(allow_prog[3], cg3, BPF_CGROUP_INET_EGRESS, - BPF_F_ALLOW_MULTI)) { - log_err("Attaching prog to cg3"); - goto err; - } - if (bpf_prog_attach(allow_prog[4], cg4, BPF_CGROUP_INET_EGRESS, - BPF_F_ALLOW_OVERRIDE)) { - log_err("Attaching prog to cg4"); - goto err; - } - if (bpf_prog_attach(allow_prog[5], cg5, BPF_CGROUP_INET_EGRESS, 0)) { - log_err("Attaching prog to cg5"); - goto err; - } - assert(system(PING_CMD) == 0); - assert(bpf_map_lookup_elem(map_fd, &key, &value) == 0); - assert(value == 1 + 2 + 8 + 32); - - /* query the number of effective progs in cg5 */ - assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, BPF_F_QUERY_EFFECTIVE, - NULL, NULL, &prog_cnt) == 0); - assert(prog_cnt == 4); - /* retrieve prog_ids of effective progs in cg5 */ - assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, BPF_F_QUERY_EFFECTIVE, - &attach_flags, prog_ids, &prog_cnt) == 0); - assert(prog_cnt == 4); - assert(attach_flags == 0); - saved_prog_id = prog_ids[0]; - /* check enospc handling */ - prog_ids[0] = 0; - prog_cnt = 2; - assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, BPF_F_QUERY_EFFECTIVE, - &attach_flags, prog_ids, &prog_cnt) == -1 && - errno == ENOSPC); - assert(prog_cnt == 4); - /* check that prog_ids are returned even when buffer is too small */ - assert(prog_ids[0] == saved_prog_id); - /* retrieve prog_id of single attached prog in cg5 */ - prog_ids[0] = 0; - assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, 0, - NULL, prog_ids, &prog_cnt) == 0); - assert(prog_cnt == 1); - assert(prog_ids[0] == saved_prog_id); - - /* detach bottom program and ping again */ - if (bpf_prog_detach2(-1, cg5, BPF_CGROUP_INET_EGRESS)) { - log_err("Detaching prog from cg5"); - goto err; - } - value = 0; - assert(bpf_map_update_elem(map_fd, &key, &value, 0) == 0); - assert(system(PING_CMD) == 0); - assert(bpf_map_lookup_elem(map_fd, &key, &value) == 0); - assert(value == 1 + 2 + 8 + 16); - - /* detach 3rd from bottom program and ping again */ - errno = 0; - if (!bpf_prog_detach2(0, cg3, BPF_CGROUP_INET_EGRESS)) { - log_err("Unexpected success on detach from cg3"); - goto err; - } - if (bpf_prog_detach2(allow_prog[3], cg3, BPF_CGROUP_INET_EGRESS)) { - log_err("Detaching from cg3"); - goto err; - } - value = 0; - assert(bpf_map_update_elem(map_fd, &key, &value, 0) == 0); - assert(system(PING_CMD) == 0); - assert(bpf_map_lookup_elem(map_fd, &key, &value) == 0); - assert(value == 1 + 2 + 16); - - /* detach 2nd from bottom program and ping again */ - if (bpf_prog_detach2(-1, cg4, BPF_CGROUP_INET_EGRESS)) { - log_err("Detaching prog from cg4"); - goto err; - } - value = 0; - assert(bpf_map_update_elem(map_fd, &key, &value, 0) == 0); - assert(system(PING_CMD) == 0); - assert(bpf_map_lookup_elem(map_fd, &key, &value) == 0); - assert(value == 1 + 2 + 4); - - prog_cnt = 4; - assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, BPF_F_QUERY_EFFECTIVE, - &attach_flags, prog_ids, &prog_cnt) == 0); - assert(prog_cnt == 3); - assert(attach_flags == 0); - assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, 0, - NULL, prog_ids, &prog_cnt) == 0); - assert(prog_cnt == 0); - goto out; -err: - rc = 1; - -out: - for (i = 0; i < 6; i++) - if (allow_prog[i] > 0) - close(allow_prog[i]); - close(cg1); - close(cg2); - close(cg3); - close(cg4); - close(cg5); - cleanup_cgroup_environment(); - if (!rc) - printf("#multi:PASS\n"); - else - printf("#multi:FAIL\n"); - return rc; -} - -static int test_autodetach(void) -{ - __u32 prog_cnt = 4, attach_flags; - int allow_prog[2] = {0}; - __u32 prog_ids[2] = {0}; - int cg = 0, i, rc = -1; - void *ptr = NULL; - int attempts; - - for (i = 0; i < ARRAY_SIZE(allow_prog); i++) { - allow_prog[i] = prog_load_cnt(1, 1 << i); - if (!allow_prog[i]) - goto err; - } - - if (setup_cgroup_environment()) - goto err; - - /* create a cgroup, attach two programs and remember their ids */ - cg = create_and_get_cgroup("/cg_autodetach"); - if (cg < 0) - goto err; - - if (join_cgroup("/cg_autodetach")) - goto err; - - for (i = 0; i < ARRAY_SIZE(allow_prog); i++) { - if (bpf_prog_attach(allow_prog[i], cg, BPF_CGROUP_INET_EGRESS, - BPF_F_ALLOW_MULTI)) { - log_err("Attaching prog[%d] to cg:egress", i); - goto err; - } - } - - /* make sure that programs are attached and run some traffic */ - assert(bpf_prog_query(cg, BPF_CGROUP_INET_EGRESS, 0, &attach_flags, - prog_ids, &prog_cnt) == 0); - assert(system(PING_CMD) == 0); - - /* allocate some memory (4Mb) to pin the original cgroup */ - ptr = malloc(4 * (1 << 20)); - if (!ptr) - goto err; - - /* close programs and cgroup fd */ - for (i = 0; i < ARRAY_SIZE(allow_prog); i++) { - close(allow_prog[i]); - allow_prog[i] = 0; - } - - close(cg); - cg = 0; - - /* leave the cgroup and remove it. don't detach programs */ - cleanup_cgroup_environment(); - - /* wait for the asynchronous auto-detachment. - * wait for no more than 5 sec and give up. - */ - for (i = 0; i < ARRAY_SIZE(prog_ids); i++) { - for (attempts = 5; attempts >= 0; attempts--) { - int fd = bpf_prog_get_fd_by_id(prog_ids[i]); - - if (fd < 0) - break; - - /* don't leave the fd open */ - close(fd); - - if (!attempts) - goto err; - - sleep(1); - } - } - - rc = 0; -err: - for (i = 0; i < ARRAY_SIZE(allow_prog); i++) - if (allow_prog[i] > 0) - close(allow_prog[i]); - if (cg) - close(cg); - free(ptr); - cleanup_cgroup_environment(); - if (!rc) - printf("#autodetach:PASS\n"); - else - printf("#autodetach:FAIL\n"); - return rc; -} - -int main(void) -{ - int (*tests[])(void) = { - test_foo_bar, - test_multiprog, - test_autodetach, - }; - int errors = 0; - int i; - - for (i = 0; i < ARRAY_SIZE(tests); i++) - if (tests[i]()) - errors++; - - if (errors) - printf("test_cgroup_attach:FAIL\n"); - else - printf("test_cgroup_attach:PASS\n"); - - return errors ? EXIT_FAILURE : EXIT_SUCCESS; -} diff --git a/tools/testing/selftests/bpf/test_cpp.cpp b/tools/testing/selftests/bpf/test_cpp.cpp index f0eb2727b766..a8d2e9a87fbf 100644 --- a/tools/testing/selftests/bpf/test_cpp.cpp +++ b/tools/testing/selftests/bpf/test_cpp.cpp @@ -1,12 +1,16 @@ /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ -#include "libbpf.h" -#include "bpf.h" -#include "btf.h" +#include <iostream> +#include <bpf/libbpf.h> +#include <bpf/bpf.h> +#include <bpf/btf.h> +#include "test_core_extern.skel.h" /* do nothing, just make sure we can link successfully */ int main(int argc, char *argv[]) { + struct test_core_extern *skel; + /* libbpf.h */ libbpf_set_print(NULL); @@ -16,5 +20,11 @@ int main(int argc, char *argv[]) /* btf.h */ btf__new(NULL, 0); + /* BPF skeleton */ + skel = test_core_extern__open_and_load(); + test_core_extern__destroy(skel); + + std::cout << "DONE!" << std::endl; + return 0; } diff --git a/tools/testing/selftests/bpf/test_flow_dissector.sh b/tools/testing/selftests/bpf/test_flow_dissector.sh index a8485ae103d1..174b72a64a4c 100755 --- a/tools/testing/selftests/bpf/test_flow_dissector.sh +++ b/tools/testing/selftests/bpf/test_flow_dissector.sh @@ -139,6 +139,20 @@ echo "Testing IPv4 + GRE..." tc filter del dev lo ingress pref 1337 +echo "Testing port range..." +# Drops all IP/UDP packets coming from port 8-10 +tc filter add dev lo parent ffff: protocol ip pref 1337 flower ip_proto \ + udp src_port 8-10 action drop + +# Send 10 IPv4/UDP packets from port 7. Filter should not drop any. +./test_flow_dissector -i 4 -f 7 +# Send 10 IPv4/UDP packets from port 9. Filter should drop all. +./test_flow_dissector -i 4 -f 9 -F +# Send 10 IPv4/UDP packets from port 11. Filter should not drop any. +./test_flow_dissector -i 4 -f 11 + +tc filter del dev lo ingress pref 1337 + echo "Testing IPv6..." # Drops all IPv6/UDP packets coming from port 9 tc filter add dev lo parent ffff: protocol ipv6 pref 1337 flower ip_proto \ diff --git a/tools/testing/selftests/bpf/test_hashmap.c b/tools/testing/selftests/bpf/test_hashmap.c index b64094c981e3..c490e012c23f 100644 --- a/tools/testing/selftests/bpf/test_hashmap.c +++ b/tools/testing/selftests/bpf/test_hashmap.c @@ -8,7 +8,7 @@ #include <stdio.h> #include <errno.h> #include <linux/err.h> -#include "hashmap.h" +#include "bpf/hashmap.h" #define CHECK(condition, format...) ({ \ int __ret = !!(condition); \ diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index 7fa7d08a8104..bab1e6f1d8f1 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -8,7 +8,7 @@ #include <string.h> /* defined in test_progs.h */ -struct test_env env; +struct test_env env = {}; struct prog_test_def { const char *test_name; @@ -29,10 +29,19 @@ struct prog_test_def { static bool should_run(struct test_selector *sel, int num, const char *name) { - if (sel->name && sel->name[0] && !strstr(name, sel->name)) - return false; + int i; + + for (i = 0; i < sel->blacklist.cnt; i++) { + if (strstr(name, sel->blacklist.strs[i])) + return false; + } - if (!sel->num_set) + for (i = 0; i < sel->whitelist.cnt; i++) { + if (strstr(name, sel->whitelist.strs[i])) + return true; + } + + if (!sel->whitelist.cnt && !sel->num_set) return true; return num < sel->num_set_len && sel->num_set[num]; @@ -334,6 +343,7 @@ const char argp_program_doc[] = "BPF selftests test runner"; enum ARG_KEYS { ARG_TEST_NUM = 'n', ARG_TEST_NAME = 't', + ARG_TEST_NAME_BLACKLIST = 'b', ARG_VERIFIER_STATS = 's', ARG_VERBOSE = 'v', }; @@ -341,8 +351,10 @@ enum ARG_KEYS { static const struct argp_option opts[] = { { "num", ARG_TEST_NUM, "NUM", 0, "Run test number NUM only " }, - { "name", ARG_TEST_NAME, "NAME", 0, - "Run tests with names containing NAME" }, + { "name", ARG_TEST_NAME, "NAMES", 0, + "Run tests with names containing any string from NAMES list" }, + { "name-blacklist", ARG_TEST_NAME_BLACKLIST, "NAMES", 0, + "Don't run tests with names containing any string from NAMES list" }, { "verifier-stats", ARG_VERIFIER_STATS, NULL, 0, "Output verifier statistics", }, { "verbose", ARG_VERBOSE, "LEVEL", OPTION_ARG_OPTIONAL, @@ -359,6 +371,41 @@ static int libbpf_print_fn(enum libbpf_print_level level, return 0; } +static int parse_str_list(const char *s, struct str_set *set) +{ + char *input, *state = NULL, *next, **tmp, **strs = NULL; + int cnt = 0; + + input = strdup(s); + if (!input) + return -ENOMEM; + + set->cnt = 0; + set->strs = NULL; + + while ((next = strtok_r(state ? NULL : input, ",", &state))) { + tmp = realloc(strs, sizeof(*strs) * (cnt + 1)); + if (!tmp) + goto err; + strs = tmp; + + strs[cnt] = strdup(next); + if (!strs[cnt]) + goto err; + + cnt++; + } + + set->cnt = cnt; + set->strs = (const char **)strs; + free(input); + return 0; +err: + free(strs); + free(input); + return -ENOMEM; +} + int parse_num_list(const char *s, struct test_selector *sel) { int i, set_len = 0, num, start = 0, end = -1; @@ -449,12 +496,24 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state) if (subtest_str) { *subtest_str = '\0'; - env->subtest_selector.name = strdup(subtest_str + 1); - if (!env->subtest_selector.name) + if (parse_str_list(subtest_str + 1, + &env->subtest_selector.whitelist)) + return -ENOMEM; + } + if (parse_str_list(arg, &env->test_selector.whitelist)) + return -ENOMEM; + break; + } + case ARG_TEST_NAME_BLACKLIST: { + char *subtest_str = strchr(arg, '/'); + + if (subtest_str) { + *subtest_str = '\0'; + if (parse_str_list(subtest_str + 1, + &env->subtest_selector.blacklist)) return -ENOMEM; } - env->test_selector.name = strdup(arg); - if (!env->test_selector.name) + if (parse_str_list(arg, &env->test_selector.blacklist)) return -ENOMEM; break; } @@ -617,7 +676,11 @@ int main(int argc, char **argv) printf("Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n", env.succ_cnt, env.sub_succ_cnt, env.skip_cnt, env.fail_cnt); + free(env.test_selector.blacklist.strs); + free(env.test_selector.whitelist.strs); free(env.test_selector.num_set); + free(env.subtest_selector.blacklist.strs); + free(env.subtest_selector.whitelist.strs); free(env.subtest_selector.num_set); return env.fail_cnt ? EXIT_FAILURE : EXIT_SUCCESS; diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h index 8477df835979..bcfa9ef23fda 100644 --- a/tools/testing/selftests/bpf/test_progs.h +++ b/tools/testing/selftests/bpf/test_progs.h @@ -35,7 +35,7 @@ typedef __u16 __sum16; #include "test_iptunnel_common.h" #include "bpf_util.h" -#include "bpf_endian.h" +#include <bpf/bpf_endian.h> #include "trace_helpers.h" #include "flow_dissector_load.h" @@ -46,8 +46,14 @@ enum verbosity { VERBOSE_SUPER, }; +struct str_set { + const char **strs; + int cnt; +}; + struct test_selector { - const char *name; + struct str_set whitelist; + struct str_set blacklist; bool *num_set; int num_set_len; }; @@ -100,6 +106,7 @@ extern struct ipv6_packet pkt_v6; #define _CHECK(condition, tag, duration, format...) ({ \ int __ret = !!(condition); \ + int __save_errno = errno; \ if (__ret) { \ test__fail(); \ printf("%s:FAIL:%s ", __func__, tag); \ @@ -108,15 +115,18 @@ extern struct ipv6_packet pkt_v6; printf("%s:PASS:%s %d nsec\n", \ __func__, tag, duration); \ } \ + errno = __save_errno; \ __ret; \ }) #define CHECK_FAIL(condition) ({ \ int __ret = !!(condition); \ + int __save_errno = errno; \ if (__ret) { \ test__fail(); \ printf("%s:FAIL:%d\n", __func__, __LINE__); \ } \ + errno = __save_errno; \ __ret; \ }) diff --git a/tools/testing/selftests/bpf/test_sock.c b/tools/testing/selftests/bpf/test_sock.c index 0e6652733462..52bf14955797 100644 --- a/tools/testing/selftests/bpf/test_sock.c +++ b/tools/testing/selftests/bpf/test_sock.c @@ -13,7 +13,7 @@ #include <bpf/bpf.h> #include "cgroup_helpers.h" -#include "bpf_endian.h" +#include <bpf/bpf_endian.h> #include "bpf_rlimit.h" #include "bpf_util.h" diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c index 4a851513c842..779e11da979c 100644 --- a/tools/testing/selftests/bpf/test_sockmap.c +++ b/tools/testing/selftests/bpf/test_sockmap.c @@ -331,7 +331,7 @@ static int msg_loop_sendpage(int fd, int iov_length, int cnt, FILE *file; int i, fp; - file = fopen(".sendpage_tst.tmp", "w+"); + file = tmpfile(); if (!file) { perror("create file for sendpage"); return 1; @@ -340,13 +340,8 @@ static int msg_loop_sendpage(int fd, int iov_length, int cnt, fwrite(&k, sizeof(char), 1, file); fflush(file); fseek(file, 0, SEEK_SET); - fclose(file); - fp = open(".sendpage_tst.tmp", O_RDONLY); - if (fp < 0) { - perror("reopen file for sendpage"); - return 1; - } + fp = fileno(file); clock_gettime(CLOCK_MONOTONIC, &s->start); for (i = 0; i < cnt; i++) { @@ -354,11 +349,11 @@ static int msg_loop_sendpage(int fd, int iov_length, int cnt, if (!drop && sent < 0) { perror("send loop error"); - close(fp); + fclose(file); return sent; } else if (drop && sent >= 0) { printf("sendpage loop error expected: %i\n", sent); - close(fp); + fclose(file); return -EIO; } @@ -366,7 +361,7 @@ static int msg_loop_sendpage(int fd, int iov_length, int cnt, s->bytes_sent += sent; } clock_gettime(CLOCK_MONOTONIC, &s->end); - close(fp); + fclose(file); return 0; } diff --git a/tools/testing/selftests/bpf/test_sockmap_kern.h b/tools/testing/selftests/bpf/test_sockmap_kern.h index d008b41b7d8d..9b4d3a68a91a 100644 --- a/tools/testing/selftests/bpf/test_sockmap_kern.h +++ b/tools/testing/selftests/bpf/test_sockmap_kern.h @@ -12,8 +12,8 @@ #include <linux/tcp.h> #include <linux/pkt_cls.h> #include <sys/socket.h> -#include "bpf_helpers.h" -#include "bpf_endian.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> /* Sockmap sample program connects a client and a backend together * using cgroups. diff --git a/tools/testing/selftests/bpf/test_sysctl.c b/tools/testing/selftests/bpf/test_sysctl.c index 40bd93a6e7ae..d196e2a4a6e0 100644 --- a/tools/testing/selftests/bpf/test_sysctl.c +++ b/tools/testing/selftests/bpf/test_sysctl.c @@ -13,7 +13,7 @@ #include <bpf/bpf.h> #include <bpf/libbpf.h> -#include "bpf_endian.h" +#include <bpf/bpf_endian.h> #include "bpf_rlimit.h" #include "bpf_util.h" #include "cgroup_helpers.h" diff --git a/tools/testing/selftests/bpf/trace_helpers.h b/tools/testing/selftests/bpf/trace_helpers.h index aa4dcfe18050..0383c9b8adc1 100644 --- a/tools/testing/selftests/bpf/trace_helpers.h +++ b/tools/testing/selftests/bpf/trace_helpers.h @@ -2,7 +2,7 @@ #ifndef __TRACE_HELPER_H #define __TRACE_HELPER_H -#include <libbpf.h> +#include <bpf/libbpf.h> struct ksym { long addr; |