summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/bpf/bpftool/.gitignore2
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-prog.rst19
-rw-r--r--tools/bpf/bpftool/Makefile36
-rw-r--r--tools/bpf/bpftool/bash-completion/bpftool76
-rw-r--r--tools/bpf/bpftool/btf.c5
-rw-r--r--tools/bpf/bpftool/common.c40
-rw-r--r--tools/bpf/bpftool/main.c7
-rw-r--r--tools/bpf/bpftool/main.h7
-rw-r--r--tools/bpf/bpftool/map.c2
-rw-r--r--tools/bpf/bpftool/prog.c454
-rw-r--r--tools/bpf/bpftool/skeleton/profiler.bpf.c119
-rw-r--r--tools/bpf/bpftool/skeleton/profiler.h46
-rw-r--r--tools/bpf/runqslower/runqslower.bpf.c4
-rw-r--r--tools/build/feature/Makefile9
-rw-r--r--tools/build/feature/test-clang-bpf-global-var.c4
-rw-r--r--tools/include/uapi/linux/bpf.h223
-rw-r--r--tools/include/uapi/linux/types.h (renamed from tools/testing/selftests/bpf/include/uapi/linux/types.h)0
-rw-r--r--tools/lib/bpf/bpf_tracing.h223
-rw-r--r--tools/lib/bpf/btf_dump.c10
-rw-r--r--tools/lib/bpf/libbpf.c156
-rw-r--r--tools/lib/bpf/libbpf.h5
-rw-r--r--tools/lib/bpf/libbpf.map5
-rw-r--r--tools/scripts/Makefile.include1
-rw-r--r--tools/testing/selftests/bpf/.gitignore1
-rw-r--r--tools/testing/selftests/bpf/Makefile26
-rw-r--r--tools/testing/selftests/bpf/bpf_tcp_helpers.h2
-rw-r--r--tools/testing/selftests/bpf/bpf_trace_helpers.h120
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fentry_fexit.c12
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fentry_test.c14
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_test.c69
-rw-r--r--tools/testing/selftests/bpf/prog_tests/link_pinning.c105
-rw-r--r--tools/testing/selftests/bpf/prog_tests/modify_return.c65
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c88
-rw-r--r--tools/testing/selftests/bpf/prog_tests/select_reuseport.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/skb_ctx.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_listen.c259
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tcp_rtt.c32
-rw-r--r--tools/testing/selftests/bpf/prog_tests/vmlinux.c43
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c53
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_dctcp.c2
-rw-r--r--tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c2
-rw-r--r--tools/testing/selftests/bpf/progs/fentry_test.c2
-rw-r--r--tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c2
-rw-r--r--tools/testing/selftests/bpf/progs/fexit_bpf2bpf_simple.c2
-rw-r--r--tools/testing/selftests/bpf/progs/fexit_test.c2
-rw-r--r--tools/testing/selftests/bpf/progs/kfree_skb.c2
-rw-r--r--tools/testing/selftests/bpf/progs/modify_return.c49
-rw-r--r--tools/testing/selftests/bpf/progs/test_attach_probe.c3
-rw-r--r--tools/testing/selftests/bpf/progs/test_link_pinning.c25
-rw-r--r--tools/testing/selftests/bpf/progs/test_ns_current_pid_tgid.c37
-rw-r--r--tools/testing/selftests/bpf/progs/test_overhead.c7
-rw-r--r--tools/testing/selftests/bpf/progs/test_perf_branches.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_perf_buffer.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_probe_user.c1
-rw-r--r--tools/testing/selftests/bpf/progs/test_skb_ctx.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_trampoline_count.c3
-rw-r--r--tools/testing/selftests/bpf/progs/test_vmlinux.c84
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c26
-rw-r--r--tools/testing/selftests/bpf/test_current_pid_tgid_new_ns.c159
-rw-r--r--tools/testing/selftests/bpf/test_progs.c28
-rw-r--r--tools/testing/selftests/bpf/test_progs.h8
-rw-r--r--tools/testing/selftests/bpf/verifier/ctx_skb.c47
65 files changed, 2380 insertions, 472 deletions
diff --git a/tools/bpf/bpftool/.gitignore b/tools/bpf/bpftool/.gitignore
index b13926432b84..8d6e8901ed2b 100644
--- a/tools/bpf/bpftool/.gitignore
+++ b/tools/bpf/bpftool/.gitignore
@@ -1,7 +1,9 @@
*.d
+/_bpftool
/bpftool
bpftool*.8
bpf-helpers.*
FEATURE-DUMP.bpftool
feature
libbpf
+profiler.skel.h
diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
index 46862e85fed2..9f19404f470e 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
@@ -30,6 +30,7 @@ PROG COMMANDS
| **bpftool** **prog detach** *PROG* *ATTACH_TYPE* [*MAP*]
| **bpftool** **prog tracelog**
| **bpftool** **prog run** *PROG* **data_in** *FILE* [**data_out** *FILE* [**data_size_out** *L*]] [**ctx_in** *FILE* [**ctx_out** *FILE* [**ctx_size_out** *M*]]] [**repeat** *N*]
+| **bpftool** **prog profile** *PROG* [**duration** *DURATION*] *METRICs*
| **bpftool** **prog help**
|
| *MAP* := { **id** *MAP_ID* | **pinned** *FILE* }
@@ -48,6 +49,9 @@ PROG COMMANDS
| *ATTACH_TYPE* := {
| **msg_verdict** | **stream_verdict** | **stream_parser** | **flow_dissector**
| }
+| *METRIC* := {
+| **cycles** | **instructions** | **l1d_loads** | **llc_misses**
+| }
DESCRIPTION
@@ -189,6 +193,12 @@ DESCRIPTION
not all of them can take the **ctx_in**/**ctx_out**
arguments. bpftool does not perform checks on program types.
+ **bpftool prog profile** *PROG* [**duration** *DURATION*] *METRICs*
+ Profile *METRICs* for bpf program *PROG* for *DURATION*
+ seconds or until user hits Ctrl-C. *DURATION* is optional.
+ If *DURATION* is not specified, the profiling will run up to
+ UINT_MAX seconds.
+
**bpftool prog help**
Print short help message.
@@ -311,6 +321,15 @@ EXAMPLES
**# rm /sys/fs/bpf/xdp1**
+|
+| **# bpftool prog profile id 337 duration 10 cycles instructions llc_misses**
+
+::
+ 51397 run_cnt
+ 40176203 cycles (83.05%)
+ 42518139 instructions # 1.06 insns per cycle (83.39%)
+ 123 llc_misses # 2.89 LLC misses per million insns (83.15%)
+
SEE ALSO
========
**bpf**\ (2),
diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile
index c4e810335810..f584d1fdfc64 100644
--- a/tools/bpf/bpftool/Makefile
+++ b/tools/bpf/bpftool/Makefile
@@ -59,10 +59,12 @@ LIBS = $(LIBBPF) -lelf -lz
INSTALL ?= install
RM ?= rm -f
+CLANG ?= clang
FEATURE_USER = .bpftool
-FEATURE_TESTS = libbfd disassembler-four-args reallocarray zlib
-FEATURE_DISPLAY = libbfd disassembler-four-args zlib
+FEATURE_TESTS = libbfd disassembler-four-args reallocarray zlib \
+ clang-bpf-global-var
+FEATURE_DISPLAY = libbfd disassembler-four-args zlib clang-bpf-global-var
check_feat := 1
NON_CHECK_FEAT_TARGETS := clean uninstall doc doc-clean doc-install doc-uninstall
@@ -110,14 +112,39 @@ SRCS += $(BFD_SRCS)
endif
OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o
+_OBJS = $(filter-out $(OUTPUT)prog.o,$(OBJS)) $(OUTPUT)_prog.o
+
+ifeq ($(feature-clang-bpf-global-var),1)
+ __OBJS = $(OBJS)
+else
+ __OBJS = $(_OBJS)
+endif
+
+$(OUTPUT)_prog.o: prog.c
+ $(QUIET_CC)$(COMPILE.c) -MMD -DBPFTOOL_WITHOUT_SKELETONS -o $@ $<
+
+$(OUTPUT)_bpftool: $(_OBJS) $(LIBBPF)
+ $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(_OBJS) $(LIBS)
+
+skeleton/profiler.bpf.o: skeleton/profiler.bpf.c $(LIBBPF)
+ $(QUIET_CLANG)$(CLANG) \
+ -I$(srctree)/tools/include/uapi/ \
+ -I$(LIBBPF_PATH) -I$(srctree)/tools/lib \
+ -g -O2 -target bpf -c $< -o $@
+
+profiler.skel.h: $(OUTPUT)_bpftool skeleton/profiler.bpf.o
+ $(QUIET_GEN)$(OUTPUT)./_bpftool gen skeleton skeleton/profiler.bpf.o > $@
+
+$(OUTPUT)prog.o: prog.c profiler.skel.h
+ $(QUIET_CC)$(COMPILE.c) -MMD -o $@ $<
$(OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c
$(QUIET_CC)$(COMPILE.c) -MMD -o $@ $<
$(OUTPUT)feature.o: | zdep
-$(OUTPUT)bpftool: $(OBJS) $(LIBBPF)
- $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(OBJS) $(LIBS)
+$(OUTPUT)bpftool: $(__OBJS) $(LIBBPF)
+ $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(__OBJS) $(LIBS)
$(OUTPUT)%.o: %.c
$(QUIET_CC)$(COMPILE.c) -MMD -o $@ $<
@@ -125,6 +152,7 @@ $(OUTPUT)%.o: %.c
clean: $(LIBBPF)-clean
$(call QUIET_CLEAN, bpftool)
$(Q)$(RM) -- $(OUTPUT)bpftool $(OUTPUT)*.o $(OUTPUT)*.d
+ $(Q)$(RM) -- $(OUTPUT)_bpftool profiler.skel.h skeleton/profiler.bpf.o
$(Q)$(RM) -r -- $(OUTPUT)libbpf/
$(call QUIET_CLEAN, core-gen)
$(Q)$(RM) -- $(OUTPUT)FEATURE-DUMP.bpftool
diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool
index f2838a658339..9b0534f558f1 100644
--- a/tools/bpf/bpftool/bash-completion/bpftool
+++ b/tools/bpf/bpftool/bash-completion/bpftool
@@ -337,6 +337,7 @@ _bpftool()
local PROG_TYPE='id pinned tag name'
local MAP_TYPE='id pinned name'
+ local METRIC_TYPE='cycles instructions l1d_loads llc_misses'
case $command in
show|list)
[[ $prev != "$command" ]] && return 0
@@ -388,7 +389,7 @@ _bpftool()
_bpftool_get_prog_ids
;;
name)
- _bpftool_get_map_names
+ _bpftool_get_prog_names
;;
pinned)
_filedir
@@ -498,9 +499,51 @@ _bpftool()
tracelog)
return 0
;;
+ profile)
+ case $cword in
+ 3)
+ COMPREPLY=( $( compgen -W "$PROG_TYPE" -- "$cur" ) )
+ return 0
+ ;;
+ 4)
+ case $prev in
+ id)
+ _bpftool_get_prog_ids
+ ;;
+ name)
+ _bpftool_get_prog_names
+ ;;
+ pinned)
+ _filedir
+ ;;
+ esac
+ return 0
+ ;;
+ 5)
+ COMPREPLY=( $( compgen -W "$METRIC_TYPE duration" -- "$cur" ) )
+ return 0
+ ;;
+ 6)
+ case $prev in
+ duration)
+ return 0
+ ;;
+ *)
+ COMPREPLY=( $( compgen -W "$METRIC_TYPE" -- "$cur" ) )
+ return 0
+ ;;
+ esac
+ return 0
+ ;;
+ *)
+ COMPREPLY=( $( compgen -W "$METRIC_TYPE" -- "$cur" ) )
+ return 0
+ ;;
+ esac
+ ;;
run)
- if [[ ${#words[@]} -lt 5 ]]; then
- _filedir
+ if [[ ${#words[@]} -eq 4 ]]; then
+ COMPREPLY=( $( compgen -W "$PROG_TYPE" -- "$cur" ) )
return 0
fi
case $prev in
@@ -508,6 +551,10 @@ _bpftool()
_bpftool_get_prog_ids
return 0
;;
+ name)
+ _bpftool_get_prog_names
+ return 0
+ ;;
data_in|data_out|ctx_in|ctx_out)
_filedir
return 0
@@ -525,7 +572,7 @@ _bpftool()
*)
[[ $prev == $object ]] && \
COMPREPLY=( $( compgen -W 'dump help pin attach detach \
- load loadall show list tracelog run' -- "$cur" ) )
+ load loadall show list tracelog run profile' -- "$cur" ) )
;;
esac
;;
@@ -713,11 +760,17 @@ _bpftool()
esac
;;
pin)
- if [[ $prev == "$command" ]]; then
- COMPREPLY=( $( compgen -W "$PROG_TYPE" -- "$cur" ) )
- else
- _filedir
- fi
+ case $prev in
+ $command)
+ COMPREPLY=( $( compgen -W "$MAP_TYPE" -- "$cur" ) )
+ ;;
+ id)
+ _bpftool_get_map_ids
+ ;;
+ name)
+ _bpftool_get_map_names
+ ;;
+ esac
return 0
;;
event_pipe)
@@ -844,7 +897,7 @@ _bpftool()
case $command in
skeleton)
_filedir
- ;;
+ ;;
*)
[[ $prev == $object ]] && \
COMPREPLY=( $( compgen -W 'skeleton help' -- "$cur" ) )
@@ -944,6 +997,9 @@ _bpftool()
id)
_bpftool_get_prog_ids
;;
+ name)
+ _bpftool_get_prog_names
+ ;;
pinned)
_filedir
;;
diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c
index b3745ed711ba..bcaf55b59498 100644
--- a/tools/bpf/bpftool/btf.c
+++ b/tools/bpf/bpftool/btf.c
@@ -389,6 +389,9 @@ static int dump_btf_c(const struct btf *btf,
if (IS_ERR(d))
return PTR_ERR(d);
+ printf("#ifndef __VMLINUX_H__\n");
+ printf("#define __VMLINUX_H__\n");
+ printf("\n");
printf("#ifndef BPF_NO_PRESERVE_ACCESS_INDEX\n");
printf("#pragma clang attribute push (__attribute__((preserve_access_index)), apply_to = record)\n");
printf("#endif\n\n");
@@ -412,6 +415,8 @@ static int dump_btf_c(const struct btf *btf,
printf("#ifndef BPF_NO_PRESERVE_ACCESS_INDEX\n");
printf("#pragma clang attribute pop\n");
printf("#endif\n");
+ printf("\n");
+ printf("#endif /* __VMLINUX_H__ */\n");
done:
btf_dump__free(d);
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index b75b8ec5469c..f2223dbdfb0a 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -211,39 +211,14 @@ int do_pin_fd(int fd, const char *name)
return err;
}
-int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32))
+int do_pin_any(int argc, char **argv, int (*get_fd)(int *, char ***))
{
- unsigned int id;
- char *endptr;
int err;
int fd;
- if (argc < 3) {
- p_err("too few arguments, id ID and FILE path is required");
- return -1;
- } else if (argc > 3) {
- p_err("too many arguments");
- return -1;
- }
-
- if (!is_prefix(*argv, "id")) {
- p_err("expected 'id' got %s", *argv);
- return -1;
- }
- NEXT_ARG();
-
- id = strtoul(*argv, &endptr, 0);
- if (*endptr) {
- p_err("can't parse %s as ID", *argv);
- return -1;
- }
- NEXT_ARG();
-
- fd = get_fd_by_id(id);
- if (fd < 0) {
- p_err("can't open object by id (%u): %s", id, strerror(errno));
- return -1;
- }
+ fd = get_fd(&argc, &argv);
+ if (fd < 0)
+ return fd;
err = do_pin_fd(fd, *argv);
@@ -597,3 +572,10 @@ int parse_u32_arg(int *argc, char ***argv, __u32 *val, const char *what)
return 0;
}
+
+int __printf(2, 0)
+print_all_levels(__maybe_unused enum libbpf_print_level level,
+ const char *format, va_list args)
+{
+ return vfprintf(stderr, format, args);
+}
diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
index 6d41bbfc6459..06449e846e4b 100644
--- a/tools/bpf/bpftool/main.c
+++ b/tools/bpf/bpftool/main.c
@@ -79,13 +79,6 @@ static int do_version(int argc, char **argv)
return 0;
}
-static int __printf(2, 0)
-print_all_levels(__maybe_unused enum libbpf_print_level level,
- const char *format, va_list args)
-{
- return vfprintf(stderr, format, args);
-}
-
int cmd_select(const struct cmd *cmds, int argc, char **argv,
int (*help)(int argc, char **argv))
{
diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h
index 724ef9d941d3..5f6dccd43622 100644
--- a/tools/bpf/bpftool/main.h
+++ b/tools/bpf/bpftool/main.h
@@ -14,6 +14,8 @@
#include <linux/hashtable.h>
#include <tools/libc_compat.h>
+#include <bpf/libbpf.h>
+
#include "json_writer.h"
#define ptr_to_u64(ptr) ((__u64)(unsigned long)(ptr))
@@ -146,7 +148,7 @@ char *get_fdinfo(int fd, const char *key);
int open_obj_pinned(char *path, bool quiet);
int open_obj_pinned_any(char *path, enum bpf_obj_type exp_type);
int mount_bpffs_for_pin(const char *name);
-int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32));
+int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(int *, char ***));
int do_pin_fd(int fd, const char *name);
int do_prog(int argc, char **arg);
@@ -229,4 +231,7 @@ struct tcmsg;
int do_xdp_dump(struct ifinfomsg *ifinfo, struct nlattr **tb);
int do_filter_dump(struct tcmsg *ifinfo, struct nlattr **tb, const char *kind,
const char *devname, int ifindex);
+
+int print_all_levels(__maybe_unused enum libbpf_print_level level,
+ const char *format, va_list args);
#endif
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index e6c85680b34d..693a632f6813 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -1384,7 +1384,7 @@ static int do_pin(int argc, char **argv)
{
int err;
- err = do_pin_any(argc, argv, bpf_map_get_fd_by_id);
+ err = do_pin_any(argc, argv, map_parse_fd);
if (!err && json_output)
jsonw_null(json_wtr);
return err;
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index 1996e67a2f00..f6a5974a7b0a 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -4,6 +4,7 @@
#define _GNU_SOURCE
#include <errno.h>
#include <fcntl.h>
+#include <signal.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
@@ -11,10 +12,13 @@
#include <time.h>
#include <unistd.h>
#include <net/if.h>
+#include <sys/ioctl.h>
#include <sys/types.h>
#include <sys/stat.h>
+#include <sys/syscall.h>
#include <linux/err.h>
+#include <linux/perf_event.h>
#include <linux/sizes.h>
#include <bpf/bpf.h>
@@ -809,7 +813,7 @@ static int do_pin(int argc, char **argv)
{
int err;
- err = do_pin_any(argc, argv, bpf_prog_get_fd_by_id);
+ err = do_pin_any(argc, argv, prog_parse_fd);
if (!err && json_output)
jsonw_null(json_wtr);
return err;
@@ -1243,6 +1247,25 @@ free_data_in:
return err;
}
+static int
+get_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
+ enum bpf_attach_type *expected_attach_type)
+{
+ libbpf_print_fn_t print_backup;
+ int ret;
+
+ ret = libbpf_prog_type_by_name(name, prog_type, expected_attach_type);
+ if (!ret)
+ return ret;
+
+ /* libbpf_prog_type_by_name() failed, let's re-run with debug level */
+ print_backup = libbpf_set_print(print_all_levels);
+ ret = libbpf_prog_type_by_name(name, prog_type, expected_attach_type);
+ libbpf_set_print(print_backup);
+
+ return ret;
+}
+
static int load_with_options(int argc, char **argv, bool first_prog_only)
{
enum bpf_prog_type common_prog_type = BPF_PROG_TYPE_UNSPEC;
@@ -1292,8 +1315,8 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
strcat(type, *argv);
strcat(type, "/");
- err = libbpf_prog_type_by_name(type, &common_prog_type,
- &expected_attach_type);
+ err = get_prog_type_by_name(type, &common_prog_type,
+ &expected_attach_type);
free(type);
if (err < 0)
goto err_free_reuse_maps;
@@ -1392,8 +1415,8 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
if (prog_type == BPF_PROG_TYPE_UNSPEC) {
const char *sec_name = bpf_program__title(pos, false);
- err = libbpf_prog_type_by_name(sec_name, &prog_type,
- &expected_attach_type);
+ err = get_prog_type_by_name(sec_name, &prog_type,
+ &expected_attach_type);
if (err < 0)
goto err_close_obj;
}
@@ -1537,6 +1560,422 @@ static int do_loadall(int argc, char **argv)
return load_with_options(argc, argv, false);
}
+#ifdef BPFTOOL_WITHOUT_SKELETONS
+
+static int do_profile(int argc, char **argv)
+{
+ p_err("bpftool prog profile command is not supported. Please build bpftool with clang >= 10.0.0");
+ return 0;
+}
+
+#else /* BPFTOOL_WITHOUT_SKELETONS */
+
+#include "profiler.skel.h"
+
+struct profile_metric {
+ const char *name;
+ struct bpf_perf_event_value val;
+ struct perf_event_attr attr;
+ bool selected;
+
+ /* calculate ratios like instructions per cycle */
+ const int ratio_metric; /* 0 for N/A, 1 for index 0 (cycles) */
+ const char *ratio_desc;
+ const float ratio_mul;
+} metrics[] = {
+ {
+ .name = "cycles",
+ .attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_CPU_CYCLES,
+ .exclude_user = 1,
+ },
+ },
+ {
+ .name = "instructions",
+ .attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_INSTRUCTIONS,
+ .exclude_user = 1,
+ },
+ .ratio_metric = 1,
+ .ratio_desc = "insns per cycle",
+ .ratio_mul = 1.0,
+ },
+ {
+ .name = "l1d_loads",
+ .attr = {
+ .type = PERF_TYPE_HW_CACHE,
+ .config =
+ PERF_COUNT_HW_CACHE_L1D |
+ (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+ (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
+ .exclude_user = 1,
+ },
+ },
+ {
+ .name = "llc_misses",
+ .attr = {
+ .type = PERF_TYPE_HW_CACHE,
+ .config =
+ PERF_COUNT_HW_CACHE_LL |
+ (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+ (PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
+ .exclude_user = 1
+ },
+ .ratio_metric = 2,
+ .ratio_desc = "LLC misses per million insns",
+ .ratio_mul = 1e6,
+ },
+};
+
+static __u64 profile_total_count;
+
+#define MAX_NUM_PROFILE_METRICS 4
+
+static int profile_parse_metrics(int argc, char **argv)
+{
+ unsigned int metric_cnt;
+ int selected_cnt = 0;
+ unsigned int i;
+
+ metric_cnt = sizeof(metrics) / sizeof(struct profile_metric);
+
+ while (argc > 0) {
+ for (i = 0; i < metric_cnt; i++) {
+ if (is_prefix(argv[0], metrics[i].name)) {
+ if (!metrics[i].selected)
+ selected_cnt++;
+ metrics[i].selected = true;
+ break;
+ }
+ }
+ if (i == metric_cnt) {
+ p_err("unknown metric %s", argv[0]);
+ return -1;
+ }
+ NEXT_ARG();
+ }
+ if (selected_cnt > MAX_NUM_PROFILE_METRICS) {
+ p_err("too many (%d) metrics, please specify no more than %d metrics at at time",
+ selected_cnt, MAX_NUM_PROFILE_METRICS);
+ return -1;
+ }
+ return selected_cnt;
+}
+
+static void profile_read_values(struct profiler_bpf *obj)
+{
+ __u32 m, cpu, num_cpu = obj->rodata->num_cpu;
+ int reading_map_fd, count_map_fd;
+ __u64 counts[num_cpu];
+ __u32 key = 0;
+ int err;
+
+ reading_map_fd = bpf_map__fd(obj->maps.accum_readings);
+ count_map_fd = bpf_map__fd(obj->maps.counts);
+ if (reading_map_fd < 0 || count_map_fd < 0) {
+ p_err("failed to get fd for map");
+ return;
+ }
+
+ err = bpf_map_lookup_elem(count_map_fd, &key, counts);
+ if (err) {
+ p_err("failed to read count_map: %s", strerror(errno));
+ return;
+ }
+
+ profile_total_count = 0;
+ for (cpu = 0; cpu < num_cpu; cpu++)
+ profile_total_count += counts[cpu];
+
+ for (m = 0; m < ARRAY_SIZE(metrics); m++) {
+ struct bpf_perf_event_value values[num_cpu];
+
+ if (!metrics[m].selected)
+ continue;
+
+ err = bpf_map_lookup_elem(reading_map_fd, &key, values);
+ if (err) {
+ p_err("failed to read reading_map: %s",
+ strerror(errno));
+ return;
+ }
+ for (cpu = 0; cpu < num_cpu; cpu++) {
+ metrics[m].val.counter += values[cpu].counter;
+ metrics[m].val.enabled += values[cpu].enabled;
+ metrics[m].val.running += values[cpu].running;
+ }
+ key++;
+ }
+}
+
+static void profile_print_readings_json(void)
+{
+ __u32 m;
+
+ jsonw_start_array(json_wtr);
+ for (m = 0; m < ARRAY_SIZE(metrics); m++) {
+ if (!metrics[m].selected)
+ continue;
+ jsonw_start_object(json_wtr);
+ jsonw_string_field(json_wtr, "metric", metrics[m].name);
+ jsonw_lluint_field(json_wtr, "run_cnt", profile_total_count);
+ jsonw_lluint_field(json_wtr, "value", metrics[m].val.counter);
+ jsonw_lluint_field(json_wtr, "enabled", metrics[m].val.enabled);
+ jsonw_lluint_field(json_wtr, "running", metrics[m].val.running);
+
+ jsonw_end_object(json_wtr);
+ }
+ jsonw_end_array(json_wtr);
+}
+
+static void profile_print_readings_plain(void)
+{
+ __u32 m;
+
+ printf("\n%18llu %-20s\n", profile_total_count, "run_cnt");
+ for (m = 0; m < ARRAY_SIZE(metrics); m++) {
+ struct bpf_perf_event_value *val = &metrics[m].val;
+ int r;
+
+ if (!metrics[m].selected)
+ continue;
+ printf("%18llu %-20s", val->counter, metrics[m].name);
+
+ r = metrics[m].ratio_metric - 1;
+ if (r >= 0 && metrics[r].selected &&
+ metrics[r].val.counter > 0) {
+ printf("# %8.2f %-30s",
+ val->counter * metrics[m].ratio_mul /
+ metrics[r].val.counter,
+ metrics[m].ratio_desc);
+ } else {
+ printf("%-41s", "");
+ }
+
+ if (val->enabled > val->running)
+ printf("(%4.2f%%)",
+ val->running * 100.0 / val->enabled);
+ printf("\n");
+ }
+}
+
+static void profile_print_readings(void)
+{
+ if (json_output)
+ profile_print_readings_json();
+ else
+ profile_print_readings_plain();
+}
+
+static char *profile_target_name(int tgt_fd)
+{
+ struct bpf_prog_info_linear *info_linear;
+ struct bpf_func_info *func_info;
+ const struct btf_type *t;
+ char *name = NULL;
+ struct btf *btf;
+
+ info_linear = bpf_program__get_prog_info_linear(
+ tgt_fd, 1UL << BPF_PROG_INFO_FUNC_INFO);
+ if (IS_ERR_OR_NULL(info_linear)) {
+ p_err("failed to get info_linear for prog FD %d", tgt_fd);
+ return NULL;
+ }
+
+ if (info_linear->info.btf_id == 0 ||
+ btf__get_from_id(info_linear->info.btf_id, &btf)) {
+ p_err("prog FD %d doesn't have valid btf", tgt_fd);
+ goto out;
+ }
+
+ func_info = (struct bpf_func_info *)(info_linear->info.func_info);
+ t = btf__type_by_id(btf, func_info[0].type_id);
+ if (!t) {
+ p_err("btf %d doesn't have type %d",
+ info_linear->info.btf_id, func_info[0].type_id);
+ goto out;
+ }
+ name = strdup(btf__name_by_offset(btf, t->name_off));
+out:
+ free(info_linear);
+ return name;
+}
+
+static struct profiler_bpf *profile_obj;
+static int profile_tgt_fd = -1;
+static char *profile_tgt_name;
+static int *profile_perf_events;
+static int profile_perf_event_cnt;
+
+static void profile_close_perf_events(struct profiler_bpf *obj)
+{
+ int i;
+
+ for (i = profile_perf_event_cnt - 1; i >= 0; i--)
+ close(profile_perf_events[i]);
+
+ free(profile_perf_events);
+ profile_perf_event_cnt = 0;
+}
+
+static int profile_open_perf_events(struct profiler_bpf *obj)
+{
+ unsigned int cpu, m;
+ int map_fd, pmu_fd;
+
+ profile_perf_events = calloc(
+ sizeof(int), obj->rodata->num_cpu * obj->rodata->num_metric);
+ if (!profile_perf_events) {
+ p_err("failed to allocate memory for perf_event array: %s",
+ strerror(errno));
+ return -1;
+ }
+ map_fd = bpf_map__fd(obj->maps.events);
+ if (map_fd < 0) {
+ p_err("failed to get fd for events map");
+ return -1;
+ }
+
+ for (m = 0; m < ARRAY_SIZE(metrics); m++) {
+ if (!metrics[m].selected)
+ continue;
+ for (cpu = 0; cpu < obj->rodata->num_cpu; cpu++) {
+ pmu_fd = syscall(__NR_perf_event_open, &metrics[m].attr,
+ -1/*pid*/, cpu, -1/*group_fd*/, 0);
+ if (pmu_fd < 0 ||
+ bpf_map_update_elem(map_fd, &profile_perf_event_cnt,
+ &pmu_fd, BPF_ANY) ||
+ ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0)) {
+ p_err("failed to create event %s on cpu %d",
+ metrics[m].name, cpu);
+ return -1;
+ }
+ profile_perf_events[profile_perf_event_cnt++] = pmu_fd;
+ }
+ }
+ return 0;
+}
+
+static void profile_print_and_cleanup(void)
+{
+ profile_close_perf_events(profile_obj);
+ profile_read_values(profile_obj);
+ profile_print_readings();
+ profiler_bpf__destroy(profile_obj);
+
+ close(profile_tgt_fd);
+ free(profile_tgt_name);
+}
+
+static void int_exit(int signo)
+{
+ profile_print_and_cleanup();
+ exit(0);
+}
+
+static int do_profile(int argc, char **argv)
+{
+ int num_metric, num_cpu, err = -1;
+ struct bpf_program *prog;
+ unsigned long duration;
+ char *endptr;
+
+ /* we at least need two args for the prog and one metric */
+ if (!REQ_ARGS(3))
+ return -EINVAL;
+
+ /* parse target fd */
+ profile_tgt_fd = prog_parse_fd(&argc, &argv);
+ if (profile_tgt_fd < 0) {
+ p_err("failed to parse fd");
+ return -1;
+ }
+
+ /* parse profiling optional duration */
+ if (argc > 2 && is_prefix(argv[0], "duration")) {
+ NEXT_ARG();
+ duration = strtoul(*argv, &endptr, 0);
+ if (*endptr)
+ usage();
+ NEXT_ARG();
+ } else {
+ duration = UINT_MAX;
+ }
+
+ num_metric = profile_parse_metrics(argc, argv);
+ if (num_metric <= 0)
+ goto out;
+
+ num_cpu = libbpf_num_possible_cpus();
+ if (num_cpu <= 0) {
+ p_err("failed to identify number of CPUs");
+ goto out;
+ }
+
+ profile_obj = profiler_bpf__open();
+ if (!profile_obj) {
+ p_err("failed to open and/or load BPF object");
+ goto out;
+ }
+
+ profile_obj->rodata->num_cpu = num_cpu;
+ profile_obj->rodata->num_metric = num_metric;
+
+ /* adjust map sizes */
+ bpf_map__resize(profile_obj->maps.events, num_metric * num_cpu);
+ bpf_map__resize(profile_obj->maps.fentry_readings, num_metric);
+ bpf_map__resize(profile_obj->maps.accum_readings, num_metric);
+ bpf_map__resize(profile_obj->maps.counts, 1);
+
+ /* change target name */
+ profile_tgt_name = profile_target_name(profile_tgt_fd);
+ if (!profile_tgt_name)
+ goto out;
+
+ bpf_object__for_each_program(prog, profile_obj->obj) {
+ err = bpf_program__set_attach_target(prog, profile_tgt_fd,
+ profile_tgt_name);
+ if (err) {
+ p_err("failed to set attach target\n");
+ goto out;
+ }
+ }
+
+ set_max_rlimit();
+ err = profiler_bpf__load(profile_obj);
+ if (err) {
+ p_err("failed to load profile_obj");
+ goto out;
+ }
+
+ err = profile_open_perf_events(profile_obj);
+ if (err)
+ goto out;
+
+ err = profiler_bpf__attach(profile_obj);
+ if (err) {
+ p_err("failed to attach profile_obj");
+ goto out;
+ }
+ signal(SIGINT, int_exit);
+
+ sleep(duration);
+ profile_print_and_cleanup();
+ return 0;
+
+out:
+ profile_close_perf_events(profile_obj);
+ if (profile_obj)
+ profiler_bpf__destroy(profile_obj);
+ close(profile_tgt_fd);
+ free(profile_tgt_name);
+ return err;
+}
+
+#endif /* BPFTOOL_WITHOUT_SKELETONS */
+
static int do_help(int argc, char **argv)
{
if (json_output) {
@@ -1560,6 +1999,7 @@ static int do_help(int argc, char **argv)
" [data_out FILE [data_size_out L]] \\\n"
" [ctx_in FILE [ctx_out FILE [ctx_size_out M]]] \\\n"
" [repeat N]\n"
+ " %s %s profile PROG [duration DURATION] METRICs\n"
" %s %s tracelog\n"
" %s %s help\n"
"\n"
@@ -1577,12 +2017,13 @@ static int do_help(int argc, char **argv)
" struct_ops | fentry | fexit | freplace }\n"
" ATTACH_TYPE := { msg_verdict | stream_verdict | stream_parser |\n"
" flow_dissector }\n"
+ " METRIC := { cycles | instructions | l1d_loads | llc_misses }\n"
" " HELP_SPEC_OPTIONS "\n"
"",
bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
- bin_name, argv[-2]);
+ bin_name, argv[-2], bin_name, argv[-2]);
return 0;
}
@@ -1599,6 +2040,7 @@ static const struct cmd cmds[] = {
{ "detach", do_detach },
{ "tracelog", do_tracelog },
{ "run", do_run },
+ { "profile", do_profile },
{ 0 }
};
diff --git a/tools/bpf/bpftool/skeleton/profiler.bpf.c b/tools/bpf/bpftool/skeleton/profiler.bpf.c
new file mode 100644
index 000000000000..20034c12f7c5
--- /dev/null
+++ b/tools/bpf/bpftool/skeleton/profiler.bpf.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Facebook
+#include "profiler.h"
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+/* map of perf event fds, num_cpu * num_metric entries */
+struct {
+ __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+ __uint(key_size, sizeof(u32));
+ __uint(value_size, sizeof(int));
+} events SEC(".maps");
+
+/* readings at fentry */
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(key_size, sizeof(u32));
+ __uint(value_size, sizeof(struct bpf_perf_event_value));
+} fentry_readings SEC(".maps");
+
+/* accumulated readings */
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(key_size, sizeof(u32));
+ __uint(value_size, sizeof(struct bpf_perf_event_value));
+} accum_readings SEC(".maps");
+
+/* sample counts, one per cpu */
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(key_size, sizeof(u32));
+ __uint(value_size, sizeof(u64));
+} counts SEC(".maps");
+
+const volatile __u32 num_cpu = 1;
+const volatile __u32 num_metric = 1;
+#define MAX_NUM_MATRICS 4
+
+SEC("fentry/XXX")
+int BPF_PROG(fentry_XXX)
+{
+ struct bpf_perf_event_value *ptrs[MAX_NUM_MATRICS];
+ u32 key = bpf_get_smp_processor_id();
+ u32 i;
+
+ /* look up before reading, to reduce error */
+ for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) {
+ u32 flag = i;
+
+ ptrs[i] = bpf_map_lookup_elem(&fentry_readings, &flag);
+ if (!ptrs[i])
+ return 0;
+ }
+
+ for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) {
+ struct bpf_perf_event_value reading;
+ int err;
+
+ err = bpf_perf_event_read_value(&events, key, &reading,
+ sizeof(reading));
+ if (err)
+ return 0;
+ *(ptrs[i]) = reading;
+ key += num_cpu;
+ }
+
+ return 0;
+}
+
+static inline void
+fexit_update_maps(u32 id, struct bpf_perf_event_value *after)
+{
+ struct bpf_perf_event_value *before, diff, *accum;
+
+ before = bpf_map_lookup_elem(&fentry_readings, &id);
+ /* only account samples with a valid fentry_reading */
+ if (before && before->counter) {
+ struct bpf_perf_event_value *accum;
+
+ diff.counter = after->counter - before->counter;
+ diff.enabled = after->enabled - before->enabled;
+ diff.running = after->running - before->running;
+
+ accum = bpf_map_lookup_elem(&accum_readings, &id);
+ if (accum) {
+ accum->counter += diff.counter;
+ accum->enabled += diff.enabled;
+ accum->running += diff.running;
+ }
+ }
+}
+
+SEC("fexit/XXX")
+int BPF_PROG(fexit_XXX)
+{
+ struct bpf_perf_event_value readings[MAX_NUM_MATRICS];
+ u32 cpu = bpf_get_smp_processor_id();
+ u32 i, one = 1, zero = 0;
+ int err;
+ u64 *count;
+
+ /* read all events before updating the maps, to reduce error */
+ for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) {
+ err = bpf_perf_event_read_value(&events, cpu + i * num_cpu,
+ readings + i, sizeof(*readings));
+ if (err)
+ return 0;
+ }
+ count = bpf_map_lookup_elem(&counts, &zero);
+ if (count) {
+ *count += 1;
+ for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++)
+ fexit_update_maps(i, &readings[i]);
+ }
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/bpf/bpftool/skeleton/profiler.h b/tools/bpf/bpftool/skeleton/profiler.h
new file mode 100644
index 000000000000..1f767e9510f7
--- /dev/null
+++ b/tools/bpf/bpftool/skeleton/profiler.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+#ifndef __PROFILER_H
+#define __PROFILER_H
+
+/* useful typedefs from vmlinux.h */
+
+typedef signed char __s8;
+typedef unsigned char __u8;
+typedef short int __s16;
+typedef short unsigned int __u16;
+typedef int __s32;
+typedef unsigned int __u32;
+typedef long long int __s64;
+typedef long long unsigned int __u64;
+
+typedef __s8 s8;
+typedef __u8 u8;
+typedef __s16 s16;
+typedef __u16 u16;
+typedef __s32 s32;
+typedef __u32 u32;
+typedef __s64 s64;
+typedef __u64 u64;
+
+enum {
+ false = 0,
+ true = 1,
+};
+
+#ifdef __CHECKER__
+#define __bitwise__ __attribute__((bitwise))
+#else
+#define __bitwise__
+#endif
+
+typedef __u16 __bitwise__ __le16;
+typedef __u16 __bitwise__ __be16;
+typedef __u32 __bitwise__ __le32;
+typedef __u32 __bitwise__ __be32;
+typedef __u64 __bitwise__ __le64;
+typedef __u64 __bitwise__ __be64;
+
+typedef __u16 __bitwise__ __sum16;
+typedef __u32 __bitwise__ __wsum;
+
+#endif /* __PROFILER_H */
diff --git a/tools/bpf/runqslower/runqslower.bpf.c b/tools/bpf/runqslower/runqslower.bpf.c
index 48a39f72fadf..1f18a409f044 100644
--- a/tools/bpf/runqslower/runqslower.bpf.c
+++ b/tools/bpf/runqslower/runqslower.bpf.c
@@ -5,9 +5,7 @@
#include "runqslower.h"
#define TASK_RUNNING 0
-
-#define BPF_F_INDEX_MASK 0xffffffffULL
-#define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK
+#define BPF_F_CURRENT_CPU 0xffffffffULL
const volatile __u64 min_us = 0;
const volatile pid_t targ_pid = 0;
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index 7ac0d8088565..ab8e89a7009c 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -67,7 +67,8 @@ FILES= \
test-llvm.bin \
test-llvm-version.bin \
test-libaio.bin \
- test-libzstd.bin
+ test-libzstd.bin \
+ test-clang-bpf-global-var.bin
FILES := $(addprefix $(OUTPUT),$(FILES))
@@ -75,6 +76,7 @@ CC ?= $(CROSS_COMPILE)gcc
CXX ?= $(CROSS_COMPILE)g++
PKG_CONFIG ?= $(CROSS_COMPILE)pkg-config
LLVM_CONFIG ?= llvm-config
+CLANG ?= clang
all: $(FILES)
@@ -321,6 +323,11 @@ $(OUTPUT)test-libaio.bin:
$(OUTPUT)test-libzstd.bin:
$(BUILD) -lzstd
+$(OUTPUT)test-clang-bpf-global-var.bin:
+ $(CLANG) -S -g -target bpf -o - $(patsubst %.bin,%.c,$(@F)) | \
+ grep BTF_KIND_VAR
+
+
###############################
clean:
diff --git a/tools/build/feature/test-clang-bpf-global-var.c b/tools/build/feature/test-clang-bpf-global-var.c
new file mode 100644
index 000000000000..221f1481d52e
--- /dev/null
+++ b/tools/build/feature/test-clang-bpf-global-var.c
@@ -0,0 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Facebook
+
+volatile int global_value_for_test = 1;
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 906e9f2752db..5d01c5c7e598 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -73,7 +73,7 @@ struct bpf_insn {
/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */
struct bpf_lpm_trie_key {
__u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */
- __u8 data[0]; /* Arbitrary size */
+ __u8 data[]; /* Arbitrary size */
};
struct bpf_cgroup_storage_key {
@@ -210,6 +210,7 @@ enum bpf_attach_type {
BPF_TRACE_RAW_TP,
BPF_TRACE_FENTRY,
BPF_TRACE_FEXIT,
+ BPF_MODIFY_RETURN,
__MAX_BPF_ATTACH_TYPE
};
@@ -325,44 +326,46 @@ enum bpf_attach_type {
#define BPF_PSEUDO_CALL 1
/* flags for BPF_MAP_UPDATE_ELEM command */
-#define BPF_ANY 0 /* create new element or update existing */
-#define BPF_NOEXIST 1 /* create new element if it didn't exist */
-#define BPF_EXIST 2 /* update existing element */
-#define BPF_F_LOCK 4 /* spin_lock-ed map_lookup/map_update */
+enum {
+ BPF_ANY = 0, /* create new element or update existing */
+ BPF_NOEXIST = 1, /* create new element if it didn't exist */
+ BPF_EXIST = 2, /* update existing element */
+ BPF_F_LOCK = 4, /* spin_lock-ed map_lookup/map_update */
+};
/* flags for BPF_MAP_CREATE command */
-#define BPF_F_NO_PREALLOC (1U << 0)
+enum {
+ BPF_F_NO_PREALLOC = (1U << 0),
/* Instead of having one common LRU list in the
* BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list
* which can scale and perform better.
* Note, the LRU nodes (including free nodes) cannot be moved
* across different LRU lists.
*/
-#define BPF_F_NO_COMMON_LRU (1U << 1)
+ BPF_F_NO_COMMON_LRU = (1U << 1),
/* Specify numa node during map creation */
-#define BPF_F_NUMA_NODE (1U << 2)
-
-#define BPF_OBJ_NAME_LEN 16U
+ BPF_F_NUMA_NODE = (1U << 2),
/* Flags for accessing BPF object from syscall side. */
-#define BPF_F_RDONLY (1U << 3)
-#define BPF_F_WRONLY (1U << 4)
+ BPF_F_RDONLY = (1U << 3),
+ BPF_F_WRONLY = (1U << 4),
/* Flag for stack_map, store build_id+offset instead of pointer */
-#define BPF_F_STACK_BUILD_ID (1U << 5)
+ BPF_F_STACK_BUILD_ID = (1U << 5),
/* Zero-initialize hash function seed. This should only be used for testing. */
-#define BPF_F_ZERO_SEED (1U << 6)
+ BPF_F_ZERO_SEED = (1U << 6),
/* Flags for accessing BPF object from program side. */
-#define BPF_F_RDONLY_PROG (1U << 7)
-#define BPF_F_WRONLY_PROG (1U << 8)
+ BPF_F_RDONLY_PROG = (1U << 7),
+ BPF_F_WRONLY_PROG = (1U << 8),
/* Clone map from listener for newly accepted socket */
-#define BPF_F_CLONE (1U << 9)
+ BPF_F_CLONE = (1U << 9),
/* Enable memory-mapping BPF map */
-#define BPF_F_MMAPABLE (1U << 10)
+ BPF_F_MMAPABLE = (1U << 10),
+};
/* Flags for BPF_PROG_QUERY. */
@@ -391,6 +394,8 @@ struct bpf_stack_build_id {
};
};
+#define BPF_OBJ_NAME_LEN 16U
+
union bpf_attr {
struct { /* anonymous struct used by BPF_MAP_CREATE command */
__u32 map_type; /* one of enum bpf_map_type */
@@ -2909,6 +2914,42 @@ union bpf_attr {
* of sizeof(struct perf_branch_entry).
*
* **-ENOENT** if architecture does not support branch records.
+ *
+ * int bpf_get_ns_current_pid_tgid(u64 dev, u64 ino, struct bpf_pidns_info *nsdata, u32 size)
+ * Description
+ * Returns 0 on success, values for *pid* and *tgid* as seen from the current
+ * *namespace* will be returned in *nsdata*.
+ *
+ * On failure, the returned value is one of the following:
+ *
+ * **-EINVAL** if dev and inum supplied don't match dev_t and inode number
+ * with nsfs of current task, or if dev conversion to dev_t lost high bits.
+ *
+ * **-ENOENT** if pidns does not exists for the current task.
+ *
+ * int bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
+ * Description
+ * Write raw *data* blob into a special BPF perf event held by
+ * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
+ * event must have the following attributes: **PERF_SAMPLE_RAW**
+ * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and
+ * **PERF_COUNT_SW_BPF_OUTPUT** as **config**.
+ *
+ * The *flags* are used to indicate the index in *map* for which
+ * the value must be put, masked with **BPF_F_INDEX_MASK**.
+ * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU**
+ * to indicate that the index of the current CPU core should be
+ * used.
+ *
+ * The value to write, of *size*, is passed through eBPF stack and
+ * pointed by *data*.
+ *
+ * *ctx* is a pointer to in-kernel struct xdp_buff.
+ *
+ * This helper is similar to **bpf_perf_eventoutput**\ () but
+ * restricted to raw_tracepoint bpf programs.
+ * Return
+ * 0 on success, or a negative error in case of failure.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -3030,7 +3071,9 @@ union bpf_attr {
FN(tcp_send_ack), \
FN(send_signal_thread), \
FN(jiffies64), \
- FN(read_branch_records),
+ FN(read_branch_records), \
+ FN(get_ns_current_pid_tgid), \
+ FN(xdp_output),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
@@ -3045,72 +3088,100 @@ enum bpf_func_id {
/* All flags used by eBPF helper functions, placed here. */
/* BPF_FUNC_skb_store_bytes flags. */
-#define BPF_F_RECOMPUTE_CSUM (1ULL << 0)
-#define BPF_F_INVALIDATE_HASH (1ULL << 1)
+enum {
+ BPF_F_RECOMPUTE_CSUM = (1ULL << 0),
+ BPF_F_INVALIDATE_HASH = (1ULL << 1),
+};
/* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags.
* First 4 bits are for passing the header field size.
*/
-#define BPF_F_HDR_FIELD_MASK 0xfULL
+enum {
+ BPF_F_HDR_FIELD_MASK = 0xfULL,
+};
/* BPF_FUNC_l4_csum_replace flags. */
-#define BPF_F_PSEUDO_HDR (1ULL << 4)
-#define BPF_F_MARK_MANGLED_0 (1ULL << 5)
-#define BPF_F_MARK_ENFORCE (1ULL << 6)
+enum {
+ BPF_F_PSEUDO_HDR = (1ULL << 4),
+ BPF_F_MARK_MANGLED_0 = (1ULL << 5),
+ BPF_F_MARK_ENFORCE = (1ULL << 6),
+};
/* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */
-#define BPF_F_INGRESS (1ULL << 0)
+enum {
+ BPF_F_INGRESS = (1ULL << 0),
+};
/* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */
-#define BPF_F_TUNINFO_IPV6 (1ULL << 0)
+enum {
+ BPF_F_TUNINFO_IPV6 = (1ULL << 0),
+};
/* flags for both BPF_FUNC_get_stackid and BPF_FUNC_get_stack. */
-#define BPF_F_SKIP_FIELD_MASK 0xffULL
-#define BPF_F_USER_STACK (1ULL << 8)
+enum {
+ BPF_F_SKIP_FIELD_MASK = 0xffULL,
+ BPF_F_USER_STACK = (1ULL << 8),
/* flags used by BPF_FUNC_get_stackid only. */
-#define BPF_F_FAST_STACK_CMP (1ULL << 9)
-#define BPF_F_REUSE_STACKID (1ULL << 10)
+ BPF_F_FAST_STACK_CMP = (1ULL << 9),
+ BPF_F_REUSE_STACKID = (1ULL << 10),
/* flags used by BPF_FUNC_get_stack only. */
-#define BPF_F_USER_BUILD_ID (1ULL << 11)
+ BPF_F_USER_BUILD_ID = (1ULL << 11),
+};
/* BPF_FUNC_skb_set_tunnel_key flags. */
-#define BPF_F_ZERO_CSUM_TX (1ULL << 1)
-#define BPF_F_DONT_FRAGMENT (1ULL << 2)
-#define BPF_F_SEQ_NUMBER (1ULL << 3)
+enum {
+ BPF_F_ZERO_CSUM_TX = (1ULL << 1),
+ BPF_F_DONT_FRAGMENT = (1ULL << 2),
+ BPF_F_SEQ_NUMBER = (1ULL << 3),
+};
/* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and
* BPF_FUNC_perf_event_read_value flags.
*/
-#define BPF_F_INDEX_MASK 0xffffffffULL
-#define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK
+enum {
+ BPF_F_INDEX_MASK = 0xffffffffULL,
+ BPF_F_CURRENT_CPU = BPF_F_INDEX_MASK,
/* BPF_FUNC_perf_event_output for sk_buff input context. */
-#define BPF_F_CTXLEN_MASK (0xfffffULL << 32)
+ BPF_F_CTXLEN_MASK = (0xfffffULL << 32),
+};
/* Current network namespace */
-#define BPF_F_CURRENT_NETNS (-1L)
+enum {
+ BPF_F_CURRENT_NETNS = (-1L),
+};
/* BPF_FUNC_skb_adjust_room flags. */
-#define BPF_F_ADJ_ROOM_FIXED_GSO (1ULL << 0)
+enum {
+ BPF_F_ADJ_ROOM_FIXED_GSO = (1ULL << 0),
+ BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = (1ULL << 1),
+ BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = (1ULL << 2),
+ BPF_F_ADJ_ROOM_ENCAP_L4_GRE = (1ULL << 3),
+ BPF_F_ADJ_ROOM_ENCAP_L4_UDP = (1ULL << 4),
+};
-#define BPF_ADJ_ROOM_ENCAP_L2_MASK 0xff
-#define BPF_ADJ_ROOM_ENCAP_L2_SHIFT 56
+enum {
+ BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff,
+ BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 56,
+};
-#define BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 (1ULL << 1)
-#define BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 (1ULL << 2)
-#define BPF_F_ADJ_ROOM_ENCAP_L4_GRE (1ULL << 3)
-#define BPF_F_ADJ_ROOM_ENCAP_L4_UDP (1ULL << 4)
#define BPF_F_ADJ_ROOM_ENCAP_L2(len) (((__u64)len & \
BPF_ADJ_ROOM_ENCAP_L2_MASK) \
<< BPF_ADJ_ROOM_ENCAP_L2_SHIFT)
/* BPF_FUNC_sysctl_get_name flags. */
-#define BPF_F_SYSCTL_BASE_NAME (1ULL << 0)
+enum {
+ BPF_F_SYSCTL_BASE_NAME = (1ULL << 0),
+};
/* BPF_FUNC_sk_storage_get flags */
-#define BPF_SK_STORAGE_GET_F_CREATE (1ULL << 0)
+enum {
+ BPF_SK_STORAGE_GET_F_CREATE = (1ULL << 0),
+};
/* BPF_FUNC_read_branch_records flags. */
-#define BPF_F_GET_BRANCH_RECORDS_SIZE (1ULL << 0)
+enum {
+ BPF_F_GET_BRANCH_RECORDS_SIZE = (1ULL << 0),
+};
/* Mode for BPF_FUNC_skb_adjust_room helper. */
enum bpf_adj_room_mode {
@@ -3176,6 +3247,7 @@ struct __sk_buff {
__u32 wire_len;
__u32 gso_segs;
__bpf_md_ptr(struct bpf_sock *, sk);
+ __u32 gso_size;
};
struct bpf_tunnel_key {
@@ -3528,13 +3600,14 @@ struct bpf_sock_ops {
};
/* Definitions for bpf_sock_ops_cb_flags */
-#define BPF_SOCK_OPS_RTO_CB_FLAG (1<<0)
-#define BPF_SOCK_OPS_RETRANS_CB_FLAG (1<<1)
-#define BPF_SOCK_OPS_STATE_CB_FLAG (1<<2)
-#define BPF_SOCK_OPS_RTT_CB_FLAG (1<<3)
-#define BPF_SOCK_OPS_ALL_CB_FLAGS 0xF /* Mask of all currently
- * supported cb flags
- */
+enum {
+ BPF_SOCK_OPS_RTO_CB_FLAG = (1<<0),
+ BPF_SOCK_OPS_RETRANS_CB_FLAG = (1<<1),
+ BPF_SOCK_OPS_STATE_CB_FLAG = (1<<2),
+ BPF_SOCK_OPS_RTT_CB_FLAG = (1<<3),
+/* Mask of all currently supported cb flags */
+ BPF_SOCK_OPS_ALL_CB_FLAGS = 0xF,
+};
/* List of known BPF sock_ops operators.
* New entries can only be added at the end
@@ -3613,8 +3686,10 @@ enum {
BPF_TCP_MAX_STATES /* Leave at the end! */
};
-#define TCP_BPF_IW 1001 /* Set TCP initial congestion window */
-#define TCP_BPF_SNDCWND_CLAMP 1002 /* Set sndcwnd_clamp */
+enum {
+ TCP_BPF_IW = 1001, /* Set TCP initial congestion window */
+ TCP_BPF_SNDCWND_CLAMP = 1002, /* Set sndcwnd_clamp */
+};
struct bpf_perf_event_value {
__u64 counter;
@@ -3622,12 +3697,16 @@ struct bpf_perf_event_value {
__u64 running;
};
-#define BPF_DEVCG_ACC_MKNOD (1ULL << 0)
-#define BPF_DEVCG_ACC_READ (1ULL << 1)
-#define BPF_DEVCG_ACC_WRITE (1ULL << 2)
+enum {
+ BPF_DEVCG_ACC_MKNOD = (1ULL << 0),
+ BPF_DEVCG_ACC_READ = (1ULL << 1),
+ BPF_DEVCG_ACC_WRITE = (1ULL << 2),
+};
-#define BPF_DEVCG_DEV_BLOCK (1ULL << 0)
-#define BPF_DEVCG_DEV_CHAR (1ULL << 1)
+enum {
+ BPF_DEVCG_DEV_BLOCK = (1ULL << 0),
+ BPF_DEVCG_DEV_CHAR = (1ULL << 1),
+};
struct bpf_cgroup_dev_ctx {
/* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */
@@ -3643,8 +3722,10 @@ struct bpf_raw_tracepoint_args {
/* DIRECT: Skip the FIB rules and go to FIB table associated with device
* OUTPUT: Do lookup from egress perspective; default is ingress
*/
-#define BPF_FIB_LOOKUP_DIRECT (1U << 0)
-#define BPF_FIB_LOOKUP_OUTPUT (1U << 1)
+enum {
+ BPF_FIB_LOOKUP_DIRECT = (1U << 0),
+ BPF_FIB_LOOKUP_OUTPUT = (1U << 1),
+};
enum {
BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */
@@ -3716,9 +3797,11 @@ enum bpf_task_fd_type {
BPF_FD_TYPE_URETPROBE, /* filename + offset */
};
-#define BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG (1U << 0)
-#define BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL (1U << 1)
-#define BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP (1U << 2)
+enum {
+ BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = (1U << 0),
+ BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = (1U << 1),
+ BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = (1U << 2),
+};
struct bpf_flow_keys {
__u16 nhoff;
@@ -3784,4 +3867,8 @@ struct bpf_sockopt {
__s32 retval;
};
+struct bpf_pidns_info {
+ __u32 pid;
+ __u32 tgid;
+};
#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/tools/testing/selftests/bpf/include/uapi/linux/types.h b/tools/include/uapi/linux/types.h
index 91fa51a9c31d..91fa51a9c31d 100644
--- a/tools/testing/selftests/bpf/include/uapi/linux/types.h
+++ b/tools/include/uapi/linux/types.h
diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h
index b0dafe8b4ebc..b0c9ae5c73b5 100644
--- a/tools/lib/bpf/bpf_tracing.h
+++ b/tools/lib/bpf/bpf_tracing.h
@@ -49,7 +49,8 @@
#if defined(bpf_target_x86)
-#ifdef __KERNEL__
+#if defined(__KERNEL__) || defined(__VMLINUX_H__)
+
#define PT_REGS_PARM1(x) ((x)->di)
#define PT_REGS_PARM2(x) ((x)->si)
#define PT_REGS_PARM3(x) ((x)->dx)
@@ -60,7 +61,20 @@
#define PT_REGS_RC(x) ((x)->ax)
#define PT_REGS_SP(x) ((x)->sp)
#define PT_REGS_IP(x) ((x)->ip)
+
+#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), di)
+#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), si)
+#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), dx)
+#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), cx)
+#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), r8)
+#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), sp)
+#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), bp)
+#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), ax)
+#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), sp)
+#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), ip)
+
#else
+
#ifdef __i386__
/* i386 kernel is built with -mregparm=3 */
#define PT_REGS_PARM1(x) ((x)->eax)
@@ -73,7 +87,20 @@
#define PT_REGS_RC(x) ((x)->eax)
#define PT_REGS_SP(x) ((x)->esp)
#define PT_REGS_IP(x) ((x)->eip)
+
+#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), eax)
+#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), edx)
+#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), ecx)
+#define PT_REGS_PARM4_CORE(x) 0
+#define PT_REGS_PARM5_CORE(x) 0
+#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), esp)
+#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), ebp)
+#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), eax)
+#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), esp)
+#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), eip)
+
#else
+
#define PT_REGS_PARM1(x) ((x)->rdi)
#define PT_REGS_PARM2(x) ((x)->rsi)
#define PT_REGS_PARM3(x) ((x)->rdx)
@@ -84,6 +111,18 @@
#define PT_REGS_RC(x) ((x)->rax)
#define PT_REGS_SP(x) ((x)->rsp)
#define PT_REGS_IP(x) ((x)->rip)
+
+#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), rdi)
+#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), rsi)
+#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), rdx)
+#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), rcx)
+#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), r8)
+#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), rsp)
+#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), rbp)
+#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), rax)
+#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), rsp)
+#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), rip)
+
#endif
#endif
@@ -104,6 +143,17 @@ struct pt_regs;
#define PT_REGS_SP(x) (((PT_REGS_S390 *)(x))->gprs[15])
#define PT_REGS_IP(x) (((PT_REGS_S390 *)(x))->psw.addr)
+#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[2])
+#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[3])
+#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[4])
+#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[5])
+#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[6])
+#define PT_REGS_RET_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), grps[14])
+#define PT_REGS_FP_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[11])
+#define PT_REGS_RC_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[2])
+#define PT_REGS_SP_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[15])
+#define PT_REGS_IP_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), pdw.addr)
+
#elif defined(bpf_target_arm)
#define PT_REGS_PARM1(x) ((x)->uregs[0])
@@ -117,6 +167,17 @@ struct pt_regs;
#define PT_REGS_SP(x) ((x)->uregs[13])
#define PT_REGS_IP(x) ((x)->uregs[12])
+#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), uregs[0])
+#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), uregs[1])
+#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), uregs[2])
+#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), uregs[3])
+#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), uregs[4])
+#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), uregs[14])
+#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), uregs[11])
+#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), uregs[0])
+#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), uregs[13])
+#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), uregs[12])
+
#elif defined(bpf_target_arm64)
/* arm64 provides struct user_pt_regs instead of struct pt_regs to userspace */
@@ -134,6 +195,17 @@ struct pt_regs;
#define PT_REGS_SP(x) (((PT_REGS_ARM64 *)(x))->sp)
#define PT_REGS_IP(x) (((PT_REGS_ARM64 *)(x))->pc)
+#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[0])
+#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[1])
+#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[2])
+#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[3])
+#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[4])
+#define PT_REGS_RET_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[30])
+#define PT_REGS_FP_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[29])
+#define PT_REGS_RC_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[0])
+#define PT_REGS_SP_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), sp)
+#define PT_REGS_IP_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), pc)
+
#elif defined(bpf_target_mips)
#define PT_REGS_PARM1(x) ((x)->regs[4])
@@ -147,6 +219,17 @@ struct pt_regs;
#define PT_REGS_SP(x) ((x)->regs[29])
#define PT_REGS_IP(x) ((x)->cp0_epc)
+#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), regs[4])
+#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), regs[5])
+#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), regs[6])
+#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), regs[7])
+#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), regs[8])
+#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), regs[31])
+#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), regs[30])
+#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), regs[1])
+#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), regs[29])
+#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), cp0_epc)
+
#elif defined(bpf_target_powerpc)
#define PT_REGS_PARM1(x) ((x)->gpr[3])
@@ -158,6 +241,15 @@ struct pt_regs;
#define PT_REGS_SP(x) ((x)->sp)
#define PT_REGS_IP(x) ((x)->nip)
+#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), gpr[3])
+#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), gpr[4])
+#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), gpr[5])
+#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), gpr[6])
+#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), gpr[7])
+#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), gpr[3])
+#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), sp)
+#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), nip)
+
#elif defined(bpf_target_sparc)
#define PT_REGS_PARM1(x) ((x)->u_regs[UREG_I0])
@@ -169,11 +261,22 @@ struct pt_regs;
#define PT_REGS_RC(x) ((x)->u_regs[UREG_I0])
#define PT_REGS_SP(x) ((x)->u_regs[UREG_FP])
+#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I0])
+#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I1])
+#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I2])
+#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I3])
+#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I4])
+#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I7])
+#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I0])
+#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), u_regs[UREG_FP])
+
/* Should this also be a bpf_target check for the sparc case? */
#if defined(__arch64__)
#define PT_REGS_IP(x) ((x)->tpc)
+#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), tpc)
#else
#define PT_REGS_IP(x) ((x)->pc)
+#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), pc)
#endif
#endif
@@ -192,4 +295,122 @@ struct pt_regs;
(void *)(PT_REGS_FP(ctx) + sizeof(ip))); })
#endif
+#define ___bpf_concat(a, b) a ## b
+#define ___bpf_apply(fn, n) ___bpf_concat(fn, n)
+#define ___bpf_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c, N, ...) N
+#define ___bpf_narg(...) \
+ ___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
+#define ___bpf_empty(...) \
+ ___bpf_nth(_, ##__VA_ARGS__, N, N, N, N, N, N, N, N, N, N, 0)
+
+#define ___bpf_ctx_cast0() ctx
+#define ___bpf_ctx_cast1(x) ___bpf_ctx_cast0(), (void *)ctx[0]
+#define ___bpf_ctx_cast2(x, args...) ___bpf_ctx_cast1(args), (void *)ctx[1]
+#define ___bpf_ctx_cast3(x, args...) ___bpf_ctx_cast2(args), (void *)ctx[2]
+#define ___bpf_ctx_cast4(x, args...) ___bpf_ctx_cast3(args), (void *)ctx[3]
+#define ___bpf_ctx_cast5(x, args...) ___bpf_ctx_cast4(args), (void *)ctx[4]
+#define ___bpf_ctx_cast6(x, args...) ___bpf_ctx_cast5(args), (void *)ctx[5]
+#define ___bpf_ctx_cast7(x, args...) ___bpf_ctx_cast6(args), (void *)ctx[6]
+#define ___bpf_ctx_cast8(x, args...) ___bpf_ctx_cast7(args), (void *)ctx[7]
+#define ___bpf_ctx_cast9(x, args...) ___bpf_ctx_cast8(args), (void *)ctx[8]
+#define ___bpf_ctx_cast10(x, args...) ___bpf_ctx_cast9(args), (void *)ctx[9]
+#define ___bpf_ctx_cast11(x, args...) ___bpf_ctx_cast10(args), (void *)ctx[10]
+#define ___bpf_ctx_cast12(x, args...) ___bpf_ctx_cast11(args), (void *)ctx[11]
+#define ___bpf_ctx_cast(args...) \
+ ___bpf_apply(___bpf_ctx_cast, ___bpf_narg(args))(args)
+
+/*
+ * BPF_PROG is a convenience wrapper for generic tp_btf/fentry/fexit and
+ * similar kinds of BPF programs, that accept input arguments as a single
+ * pointer to untyped u64 array, where each u64 can actually be a typed
+ * pointer or integer of different size. Instead of requring user to write
+ * manual casts and work with array elements by index, BPF_PROG macro
+ * allows user to declare a list of named and typed input arguments in the
+ * same syntax as for normal C function. All the casting is hidden and
+ * performed transparently, while user code can just assume working with
+ * function arguments of specified type and name.
+ *
+ * Original raw context argument is preserved as well as 'ctx' argument.
+ * This is useful when using BPF helpers that expect original context
+ * as one of the parameters (e.g., for bpf_perf_event_output()).
+ */
+#define BPF_PROG(name, args...) \
+name(unsigned long long *ctx); \
+static __attribute__((always_inline)) typeof(name(0)) \
+____##name(unsigned long long *ctx, ##args); \
+typeof(name(0)) name(unsigned long long *ctx) \
+{ \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
+ return ____##name(___bpf_ctx_cast(args)); \
+ _Pragma("GCC diagnostic pop") \
+} \
+static __attribute__((always_inline)) typeof(name(0)) \
+____##name(unsigned long long *ctx, ##args)
+
+struct pt_regs;
+
+#define ___bpf_kprobe_args0() ctx
+#define ___bpf_kprobe_args1(x) \
+ ___bpf_kprobe_args0(), (void *)PT_REGS_PARM1(ctx)
+#define ___bpf_kprobe_args2(x, args...) \
+ ___bpf_kprobe_args1(args), (void *)PT_REGS_PARM2(ctx)
+#define ___bpf_kprobe_args3(x, args...) \
+ ___bpf_kprobe_args2(args), (void *)PT_REGS_PARM3(ctx)
+#define ___bpf_kprobe_args4(x, args...) \
+ ___bpf_kprobe_args3(args), (void *)PT_REGS_PARM4(ctx)
+#define ___bpf_kprobe_args5(x, args...) \
+ ___bpf_kprobe_args4(args), (void *)PT_REGS_PARM5(ctx)
+#define ___bpf_kprobe_args(args...) \
+ ___bpf_apply(___bpf_kprobe_args, ___bpf_narg(args))(args)
+
+/*
+ * BPF_KPROBE serves the same purpose for kprobes as BPF_PROG for
+ * tp_btf/fentry/fexit BPF programs. It hides the underlying platform-specific
+ * low-level way of getting kprobe input arguments from struct pt_regs, and
+ * provides a familiar typed and named function arguments syntax and
+ * semantics of accessing kprobe input paremeters.
+ *
+ * Original struct pt_regs* context is preserved as 'ctx' argument. This might
+ * be necessary when using BPF helpers like bpf_perf_event_output().
+ */
+#define BPF_KPROBE(name, args...) \
+name(struct pt_regs *ctx); \
+static __attribute__((always_inline)) typeof(name(0)) \
+____##name(struct pt_regs *ctx, ##args); \
+typeof(name(0)) name(struct pt_regs *ctx) \
+{ \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
+ return ____##name(___bpf_kprobe_args(args)); \
+ _Pragma("GCC diagnostic pop") \
+} \
+static __attribute__((always_inline)) typeof(name(0)) \
+____##name(struct pt_regs *ctx, ##args)
+
+#define ___bpf_kretprobe_args0() ctx
+#define ___bpf_kretprobe_args1(x) \
+ ___bpf_kretprobe_args0(), (void *)PT_REGS_RET(ctx)
+#define ___bpf_kretprobe_args(args...) \
+ ___bpf_apply(___bpf_kretprobe_args, ___bpf_narg(args))(args)
+
+/*
+ * BPF_KRETPROBE is similar to BPF_KPROBE, except, it only provides optional
+ * return value (in addition to `struct pt_regs *ctx`), but no input
+ * arguments, because they will be clobbered by the time probed function
+ * returns.
+ */
+#define BPF_KRETPROBE(name, args...) \
+name(struct pt_regs *ctx); \
+static __attribute__((always_inline)) typeof(name(0)) \
+____##name(struct pt_regs *ctx, ##args); \
+typeof(name(0)) name(struct pt_regs *ctx) \
+{ \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
+ return ____##name(___bpf_kretprobe_args(args)); \
+ _Pragma("GCC diagnostic pop") \
+} \
+static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args)
+
#endif
diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
index bd09ed1710f1..0c28ee82834b 100644
--- a/tools/lib/bpf/btf_dump.c
+++ b/tools/lib/bpf/btf_dump.c
@@ -916,13 +916,13 @@ static void btf_dump_emit_enum_def(struct btf_dump *d, __u32 id,
/* enumerators share namespace with typedef idents */
dup_cnt = btf_dump_name_dups(d, d->ident_names, name);
if (dup_cnt > 1) {
- btf_dump_printf(d, "\n%s%s___%zu = %d,",
+ btf_dump_printf(d, "\n%s%s___%zu = %u,",
pfx(lvl + 1), name, dup_cnt,
- (__s32)v->val);
+ (__u32)v->val);
} else {
- btf_dump_printf(d, "\n%s%s = %d,",
+ btf_dump_printf(d, "\n%s%s = %u,",
pfx(lvl + 1), name,
- (__s32)v->val);
+ (__u32)v->val);
}
}
btf_dump_printf(d, "\n%s}", pfx(lvl));
@@ -1030,7 +1030,7 @@ int btf_dump__emit_type_decl(struct btf_dump *d, __u32 id,
if (!OPTS_VALID(opts, btf_dump_emit_type_decl_opts))
return -EINVAL;
- fname = OPTS_GET(opts, field_name, NULL);
+ fname = OPTS_GET(opts, field_name, "");
lvl = OPTS_GET(opts, indent_level, 0);
btf_dump_emit_type_decl(d, id, fname, lvl);
return 0;
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 996162801f7a..085e41f9b68e 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -2284,9 +2284,16 @@ static void bpf_object__sanitize_btf_ext(struct bpf_object *obj)
}
}
-static bool bpf_object__is_btf_mandatory(const struct bpf_object *obj)
+static bool libbpf_needs_btf(const struct bpf_object *obj)
{
- return obj->efile.st_ops_shndx >= 0 || obj->nr_extern > 0;
+ return obj->efile.btf_maps_shndx >= 0 ||
+ obj->efile.st_ops_shndx >= 0 ||
+ obj->nr_extern > 0;
+}
+
+static bool kernel_needs_btf(const struct bpf_object *obj)
+{
+ return obj->efile.st_ops_shndx >= 0;
}
static int bpf_object__init_btf(struct bpf_object *obj,
@@ -2322,7 +2329,7 @@ static int bpf_object__init_btf(struct bpf_object *obj,
}
}
out:
- if (err && bpf_object__is_btf_mandatory(obj)) {
+ if (err && libbpf_needs_btf(obj)) {
pr_warn("BTF is required, but is missing or corrupted.\n");
return err;
}
@@ -2346,7 +2353,7 @@ static int bpf_object__finalize_btf(struct bpf_object *obj)
btf_ext__free(obj->btf_ext);
obj->btf_ext = NULL;
- if (bpf_object__is_btf_mandatory(obj)) {
+ if (libbpf_needs_btf(obj)) {
pr_warn("BTF is required, but is missing or corrupted.\n");
return -ENOENT;
}
@@ -2410,7 +2417,7 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
obj->btf_ext = NULL;
}
- if (bpf_object__is_btf_mandatory(obj))
+ if (kernel_needs_btf(obj))
return err;
}
return 0;
@@ -3866,6 +3873,10 @@ static struct ids_vec *bpf_core_find_cands(const struct btf *local_btf,
if (str_is_empty(targ_name))
continue;
+ t = skip_mods_and_typedefs(targ_btf, i, NULL);
+ if (!btf_is_composite(t) && !btf_is_array(t))
+ continue;
+
targ_essent_len = bpf_core_essential_name_len(targ_name);
if (targ_essent_len != local_essent_len)
continue;
@@ -6288,6 +6299,10 @@ static const struct bpf_sec_def section_defs[] = {
.expected_attach_type = BPF_TRACE_FENTRY,
.is_attach_btf = true,
.attach_fn = attach_trace),
+ SEC_DEF("fmod_ret/", TRACING,
+ .expected_attach_type = BPF_MODIFY_RETURN,
+ .is_attach_btf = true,
+ .attach_fn = attach_trace),
SEC_DEF("fexit/", TRACING,
.expected_attach_type = BPF_TRACE_FEXIT,
.is_attach_btf = true,
@@ -6931,6 +6946,8 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
struct bpf_link {
int (*detach)(struct bpf_link *link);
int (*destroy)(struct bpf_link *link);
+ char *pin_path; /* NULL, if not pinned */
+ int fd; /* hook FD, -1 if not applicable */
bool disconnected;
};
@@ -6960,26 +6977,109 @@ int bpf_link__destroy(struct bpf_link *link)
err = link->detach(link);
if (link->destroy)
link->destroy(link);
+ if (link->pin_path)
+ free(link->pin_path);
free(link);
return err;
}
-struct bpf_link_fd {
- struct bpf_link link; /* has to be at the top of struct */
- int fd; /* hook FD */
-};
+int bpf_link__fd(const struct bpf_link *link)
+{
+ return link->fd;
+}
+
+const char *bpf_link__pin_path(const struct bpf_link *link)
+{
+ return link->pin_path;
+}
+
+static int bpf_link__detach_fd(struct bpf_link *link)
+{
+ return close(link->fd);
+}
+
+struct bpf_link *bpf_link__open(const char *path)
+{
+ struct bpf_link *link;
+ int fd;
+
+ fd = bpf_obj_get(path);
+ if (fd < 0) {
+ fd = -errno;
+ pr_warn("failed to open link at %s: %d\n", path, fd);
+ return ERR_PTR(fd);
+ }
+
+ link = calloc(1, sizeof(*link));
+ if (!link) {
+ close(fd);
+ return ERR_PTR(-ENOMEM);
+ }
+ link->detach = &bpf_link__detach_fd;
+ link->fd = fd;
+
+ link->pin_path = strdup(path);
+ if (!link->pin_path) {
+ bpf_link__destroy(link);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return link;
+}
+
+int bpf_link__pin(struct bpf_link *link, const char *path)
+{
+ int err;
+
+ if (link->pin_path)
+ return -EBUSY;
+ err = make_parent_dir(path);
+ if (err)
+ return err;
+ err = check_path(path);
+ if (err)
+ return err;
+
+ link->pin_path = strdup(path);
+ if (!link->pin_path)
+ return -ENOMEM;
+
+ if (bpf_obj_pin(link->fd, link->pin_path)) {
+ err = -errno;
+ zfree(&link->pin_path);
+ return err;
+ }
+
+ pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path);
+ return 0;
+}
+
+int bpf_link__unpin(struct bpf_link *link)
+{
+ int err;
+
+ if (!link->pin_path)
+ return -EINVAL;
+
+ err = unlink(link->pin_path);
+ if (err != 0)
+ return -errno;
+
+ pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path);
+ zfree(&link->pin_path);
+ return 0;
+}
static int bpf_link__detach_perf_event(struct bpf_link *link)
{
- struct bpf_link_fd *l = (void *)link;
int err;
- err = ioctl(l->fd, PERF_EVENT_IOC_DISABLE, 0);
+ err = ioctl(link->fd, PERF_EVENT_IOC_DISABLE, 0);
if (err)
err = -errno;
- close(l->fd);
+ close(link->fd);
return err;
}
@@ -6987,7 +7087,7 @@ struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog,
int pfd)
{
char errmsg[STRERR_BUFSIZE];
- struct bpf_link_fd *link;
+ struct bpf_link *link;
int prog_fd, err;
if (pfd < 0) {
@@ -7005,7 +7105,7 @@ struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog,
link = calloc(1, sizeof(*link));
if (!link)
return ERR_PTR(-ENOMEM);
- link->link.detach = &bpf_link__detach_perf_event;
+ link->detach = &bpf_link__detach_perf_event;
link->fd = pfd;
if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) {
@@ -7024,7 +7124,7 @@ struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog,
libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
return ERR_PTR(err);
}
- return (struct bpf_link *)link;
+ return link;
}
/*
@@ -7312,18 +7412,11 @@ out:
return link;
}
-static int bpf_link__detach_fd(struct bpf_link *link)
-{
- struct bpf_link_fd *l = (void *)link;
-
- return close(l->fd);
-}
-
struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
const char *tp_name)
{
char errmsg[STRERR_BUFSIZE];
- struct bpf_link_fd *link;
+ struct bpf_link *link;
int prog_fd, pfd;
prog_fd = bpf_program__fd(prog);
@@ -7336,7 +7429,7 @@ struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
link = calloc(1, sizeof(*link));
if (!link)
return ERR_PTR(-ENOMEM);
- link->link.detach = &bpf_link__detach_fd;
+ link->detach = &bpf_link__detach_fd;
pfd = bpf_raw_tracepoint_open(tp_name, prog_fd);
if (pfd < 0) {
@@ -7348,7 +7441,7 @@ struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
return ERR_PTR(pfd);
}
link->fd = pfd;
- return (struct bpf_link *)link;
+ return link;
}
static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec,
@@ -7362,7 +7455,7 @@ static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec,
struct bpf_link *bpf_program__attach_trace(struct bpf_program *prog)
{
char errmsg[STRERR_BUFSIZE];
- struct bpf_link_fd *link;
+ struct bpf_link *link;
int prog_fd, pfd;
prog_fd = bpf_program__fd(prog);
@@ -7375,7 +7468,7 @@ struct bpf_link *bpf_program__attach_trace(struct bpf_program *prog)
link = calloc(1, sizeof(*link));
if (!link)
return ERR_PTR(-ENOMEM);
- link->link.detach = &bpf_link__detach_fd;
+ link->detach = &bpf_link__detach_fd;
pfd = bpf_raw_tracepoint_open(NULL, prog_fd);
if (pfd < 0) {
@@ -7409,10 +7502,9 @@ struct bpf_link *bpf_program__attach(struct bpf_program *prog)
static int bpf_link__detach_struct_ops(struct bpf_link *link)
{
- struct bpf_link_fd *l = (void *)link;
__u32 zero = 0;
- if (bpf_map_delete_elem(l->fd, &zero))
+ if (bpf_map_delete_elem(link->fd, &zero))
return -errno;
return 0;
@@ -7421,7 +7513,7 @@ static int bpf_link__detach_struct_ops(struct bpf_link *link)
struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map)
{
struct bpf_struct_ops *st_ops;
- struct bpf_link_fd *link;
+ struct bpf_link *link;
__u32 i, zero = 0;
int err;
@@ -7453,10 +7545,10 @@ struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map)
return ERR_PTR(err);
}
- link->link.detach = bpf_link__detach_struct_ops;
+ link->detach = bpf_link__detach_struct_ops;
link->fd = map->fd;
- return (struct bpf_link *)link;
+ return link;
}
enum bpf_perf_event_ret
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 02fc58a21a7f..d38d7a629417 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -219,6 +219,11 @@ LIBBPF_API void bpf_program__unload(struct bpf_program *prog);
struct bpf_link;
+LIBBPF_API struct bpf_link *bpf_link__open(const char *path);
+LIBBPF_API int bpf_link__fd(const struct bpf_link *link);
+LIBBPF_API const char *bpf_link__pin_path(const struct bpf_link *link);
+LIBBPF_API int bpf_link__pin(struct bpf_link *link, const char *path);
+LIBBPF_API int bpf_link__unpin(struct bpf_link *link);
LIBBPF_API void bpf_link__disconnect(struct bpf_link *link);
LIBBPF_API int bpf_link__destroy(struct bpf_link *link);
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index 7b014c8cdece..5129283c0284 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -238,5 +238,10 @@ LIBBPF_0.0.7 {
LIBBPF_0.0.8 {
global:
+ bpf_link__fd;
+ bpf_link__open;
+ bpf_link__pin;
+ bpf_link__pin_path;
+ bpf_link__unpin;
bpf_program__set_attach_target;
} LIBBPF_0.0.7;
diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include
index ded7a950dc40..59f31f01cb93 100644
--- a/tools/scripts/Makefile.include
+++ b/tools/scripts/Makefile.include
@@ -106,6 +106,7 @@ ifneq ($(silent),1)
ifneq ($(V),1)
QUIET_CC = @echo ' CC '$@;
QUIET_CC_FPIC = @echo ' CC FPIC '$@;
+ QUIET_CLANG = @echo ' CLANG '$@;
QUIET_AR = @echo ' AR '$@;
QUIET_LINK = @echo ' LINK '$@;
QUIET_MKDIR = @echo ' MKDIR '$@;
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index ec464859c6b6..2198cd876675 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -31,6 +31,7 @@ test_tcp_check_syncookie_user
test_sysctl
test_hashmap
test_btf_dump
+test_current_pid_tgid_new_ns
xdping
test_cpp
*.skel.h
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 2d7f5df33f04..7729892e0b04 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -20,8 +20,9 @@ CLANG ?= clang
LLC ?= llc
LLVM_OBJCOPY ?= llvm-objcopy
BPF_GCC ?= $(shell command -v bpf-gcc;)
-CFLAGS += -g -rdynamic -Wall -O2 $(GENFLAGS) -I$(CURDIR) -I$(APIDIR) \
+CFLAGS += -g -rdynamic -Wall -O2 $(GENFLAGS) -I$(CURDIR) \
-I$(INCLUDE_DIR) -I$(GENDIR) -I$(LIBDIR) -I$(TOOLSINCDIR) \
+ -I$(APIDIR) \
-Dbpf_prog_load=bpf_prog_test_load \
-Dbpf_load_program=bpf_test_load_program
LDLIBS += -lcap -lelf -lz -lrt -lpthread
@@ -32,7 +33,8 @@ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test
test_sock test_btf test_sockmap get_cgroup_id_user test_socket_cookie \
test_cgroup_storage \
test_netcnt test_tcpnotify_user test_sock_fields test_sysctl test_hashmap \
- test_progs-no_alu32
+ test_progs-no_alu32 \
+ test_current_pid_tgid_new_ns
# Also test bpf-gcc, if present
ifneq ($(BPF_GCC),)
@@ -129,10 +131,13 @@ $(OUTPUT)/test_stub.o: test_stub.c $(BPFOBJ)
$(call msg,CC,,$@)
$(CC) -c $(CFLAGS) -o $@ $<
-VMLINUX_BTF_PATHS := $(abspath ../../../../vmlinux) \
- /sys/kernel/btf/vmlinux \
- /boot/vmlinux-$(shell uname -r)
-VMLINUX_BTF:= $(firstword $(wildcard $(VMLINUX_BTF_PATHS)))
+VMLINUX_BTF_PATHS := $(if $(O),$(O)/vmlinux) \
+ $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux) \
+ ../../../../vmlinux \
+ /sys/kernel/btf/vmlinux \
+ /boot/vmlinux-$(shell uname -r)
+VMLINUX_BTF := $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS))))
+
$(OUTPUT)/runqslower: $(BPFOBJ)
$(Q)$(MAKE) $(submake_extras) -C $(TOOLSDIR)/bpf/runqslower \
OUTPUT=$(SCRATCH_DIR)/ VMLINUX_BTF=$(VMLINUX_BTF) \
@@ -172,6 +177,10 @@ $(BUILD_DIR)/libbpf $(BUILD_DIR)/bpftool $(INCLUDE_DIR):
$(call msg,MKDIR,,$@)
mkdir -p $@
+$(INCLUDE_DIR)/vmlinux.h: $(VMLINUX_BTF) | $(BPFTOOL) $(INCLUDE_DIR)
+ $(call msg,GEN,,$@)
+ $(BPFTOOL) btf dump file $(VMLINUX_BTF) format c > $@
+
# Get Clang's default includes on this system, as opposed to those seen by
# '-target bpf'. This fixes "missing" files on some architectures/distros,
# such as asm/byteorder.h, asm/socket.h, asm/sockios.h, sys/cdefs.h etc.
@@ -190,8 +199,8 @@ MENDIAN=$(if $(IS_LITTLE_ENDIAN),-mlittle-endian,-mbig-endian)
CLANG_SYS_INCLUDES = $(call get_sys_includes,$(CLANG))
BPF_CFLAGS = -g -D__TARGET_ARCH_$(SRCARCH) $(MENDIAN) \
- -I$(INCLUDE_DIR) -I$(CURDIR) -I$(CURDIR)/include/uapi \
- -I$(APIDIR) -I$(abspath $(OUTPUT)/../usr/include)
+ -I$(INCLUDE_DIR) -I$(CURDIR) -I$(APIDIR) \
+ -I$(abspath $(OUTPUT)/../usr/include)
CLANG_CFLAGS = $(CLANG_SYS_INCLUDES) \
-Wno-compare-distinct-pointer-types
@@ -280,6 +289,7 @@ $(TRUNNER_BPF_PROGS_DIR)$(if $2,-)$2-bpfobjs := y
$(TRUNNER_BPF_OBJS): $(TRUNNER_OUTPUT)/%.o: \
$(TRUNNER_BPF_PROGS_DIR)/%.c \
$(TRUNNER_BPF_PROGS_DIR)/*.h \
+ $$(INCLUDE_DIR)/vmlinux.h \
$$(BPFOBJ) | $(TRUNNER_OUTPUT)
$$(call $(TRUNNER_BPF_BUILD_RULE),$$<,$$@, \
$(TRUNNER_BPF_CFLAGS), \
diff --git a/tools/testing/selftests/bpf/bpf_tcp_helpers.h b/tools/testing/selftests/bpf/bpf_tcp_helpers.h
index 8f21965ffc6c..5bf2fe9b1efa 100644
--- a/tools/testing/selftests/bpf/bpf_tcp_helpers.h
+++ b/tools/testing/selftests/bpf/bpf_tcp_helpers.h
@@ -6,7 +6,7 @@
#include <linux/types.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
-#include "bpf_trace_helpers.h"
+#include <bpf/bpf_tracing.h>
#define BPF_STRUCT_OPS(name, args...) \
SEC("struct_ops/"#name) \
diff --git a/tools/testing/selftests/bpf/bpf_trace_helpers.h b/tools/testing/selftests/bpf/bpf_trace_helpers.h
deleted file mode 100644
index c6f1354d93fb..000000000000
--- a/tools/testing/selftests/bpf/bpf_trace_helpers.h
+++ /dev/null
@@ -1,120 +0,0 @@
-/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
-#ifndef __BPF_TRACE_HELPERS_H
-#define __BPF_TRACE_HELPERS_H
-
-#include <bpf/bpf_helpers.h>
-
-#define ___bpf_concat(a, b) a ## b
-#define ___bpf_apply(fn, n) ___bpf_concat(fn, n)
-#define ___bpf_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c, N, ...) N
-#define ___bpf_narg(...) \
- ___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
-#define ___bpf_empty(...) \
- ___bpf_nth(_, ##__VA_ARGS__, N, N, N, N, N, N, N, N, N, N, 0)
-
-#define ___bpf_ctx_cast0() ctx
-#define ___bpf_ctx_cast1(x) ___bpf_ctx_cast0(), (void *)ctx[0]
-#define ___bpf_ctx_cast2(x, args...) ___bpf_ctx_cast1(args), (void *)ctx[1]
-#define ___bpf_ctx_cast3(x, args...) ___bpf_ctx_cast2(args), (void *)ctx[2]
-#define ___bpf_ctx_cast4(x, args...) ___bpf_ctx_cast3(args), (void *)ctx[3]
-#define ___bpf_ctx_cast5(x, args...) ___bpf_ctx_cast4(args), (void *)ctx[4]
-#define ___bpf_ctx_cast6(x, args...) ___bpf_ctx_cast5(args), (void *)ctx[5]
-#define ___bpf_ctx_cast7(x, args...) ___bpf_ctx_cast6(args), (void *)ctx[6]
-#define ___bpf_ctx_cast8(x, args...) ___bpf_ctx_cast7(args), (void *)ctx[7]
-#define ___bpf_ctx_cast9(x, args...) ___bpf_ctx_cast8(args), (void *)ctx[8]
-#define ___bpf_ctx_cast10(x, args...) ___bpf_ctx_cast9(args), (void *)ctx[9]
-#define ___bpf_ctx_cast11(x, args...) ___bpf_ctx_cast10(args), (void *)ctx[10]
-#define ___bpf_ctx_cast12(x, args...) ___bpf_ctx_cast11(args), (void *)ctx[11]
-#define ___bpf_ctx_cast(args...) \
- ___bpf_apply(___bpf_ctx_cast, ___bpf_narg(args))(args)
-
-/*
- * BPF_PROG is a convenience wrapper for generic tp_btf/fentry/fexit and
- * similar kinds of BPF programs, that accept input arguments as a single
- * pointer to untyped u64 array, where each u64 can actually be a typed
- * pointer or integer of different size. Instead of requring user to write
- * manual casts and work with array elements by index, BPF_PROG macro
- * allows user to declare a list of named and typed input arguments in the
- * same syntax as for normal C function. All the casting is hidden and
- * performed transparently, while user code can just assume working with
- * function arguments of specified type and name.
- *
- * Original raw context argument is preserved as well as 'ctx' argument.
- * This is useful when using BPF helpers that expect original context
- * as one of the parameters (e.g., for bpf_perf_event_output()).
- */
-#define BPF_PROG(name, args...) \
-name(unsigned long long *ctx); \
-static __always_inline typeof(name(0)) \
-____##name(unsigned long long *ctx, ##args); \
-typeof(name(0)) name(unsigned long long *ctx) \
-{ \
- _Pragma("GCC diagnostic push") \
- _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
- return ____##name(___bpf_ctx_cast(args)); \
- _Pragma("GCC diagnostic pop") \
-} \
-static __always_inline typeof(name(0)) \
-____##name(unsigned long long *ctx, ##args)
-
-struct pt_regs;
-
-#define ___bpf_kprobe_args0() ctx
-#define ___bpf_kprobe_args1(x) \
- ___bpf_kprobe_args0(), (void *)PT_REGS_PARM1(ctx)
-#define ___bpf_kprobe_args2(x, args...) \
- ___bpf_kprobe_args1(args), (void *)PT_REGS_PARM2(ctx)
-#define ___bpf_kprobe_args3(x, args...) \
- ___bpf_kprobe_args2(args), (void *)PT_REGS_PARM3(ctx)
-#define ___bpf_kprobe_args4(x, args...) \
- ___bpf_kprobe_args3(args), (void *)PT_REGS_PARM4(ctx)
-#define ___bpf_kprobe_args5(x, args...) \
- ___bpf_kprobe_args4(args), (void *)PT_REGS_PARM5(ctx)
-#define ___bpf_kprobe_args(args...) \
- ___bpf_apply(___bpf_kprobe_args, ___bpf_narg(args))(args)
-
-/*
- * BPF_KPROBE serves the same purpose for kprobes as BPF_PROG for
- * tp_btf/fentry/fexit BPF programs. It hides the underlying platform-specific
- * low-level way of getting kprobe input arguments from struct pt_regs, and
- * provides a familiar typed and named function arguments syntax and
- * semantics of accessing kprobe input paremeters.
- *
- * Original struct pt_regs* context is preserved as 'ctx' argument. This might
- * be necessary when using BPF helpers like bpf_perf_event_output().
- */
-#define BPF_KPROBE(name, args...) \
-name(struct pt_regs *ctx); \
-static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args);\
-typeof(name(0)) name(struct pt_regs *ctx) \
-{ \
- _Pragma("GCC diagnostic push") \
- _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
- return ____##name(___bpf_kprobe_args(args)); \
- _Pragma("GCC diagnostic pop") \
-} \
-static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args)
-
-#define ___bpf_kretprobe_args0() ctx
-#define ___bpf_kretprobe_argsN(x, args...) \
- ___bpf_kprobe_args(args), (void *)PT_REGS_RET(ctx)
-#define ___bpf_kretprobe_args(args...) \
- ___bpf_apply(___bpf_kretprobe_args, ___bpf_empty(args))(args)
-
-/*
- * BPF_KRETPROBE is similar to BPF_KPROBE, except, in addition to listing all
- * input kprobe arguments, one last extra argument has to be specified, which
- * captures kprobe return value.
- */
-#define BPF_KRETPROBE(name, args...) \
-name(struct pt_regs *ctx); \
-static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args);\
-typeof(name(0)) name(struct pt_regs *ctx) \
-{ \
- _Pragma("GCC diagnostic push") \
- _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
- return ____##name(___bpf_kretprobe_args(args)); \
- _Pragma("GCC diagnostic pop") \
-} \
-static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args)
-#endif
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c
index 5b13f2c6c402..70e94e783070 100644
--- a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c
@@ -6,7 +6,7 @@
#define PING_CMD "ping -q -c1 -w1 127.0.0.1 > /dev/null"
-char bpf_log_buf[BPF_LOG_BUF_SIZE];
+static char bpf_log_buf[BPF_LOG_BUF_SIZE];
static int prog_load(void)
{
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c
index 2ff21dbce179..139f8e82c7c6 100644
--- a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c
@@ -6,7 +6,7 @@
#define PING_CMD "ping -q -c1 -w1 127.0.0.1 > /dev/null"
-char bpf_log_buf[BPF_LOG_BUF_SIZE];
+static char bpf_log_buf[BPF_LOG_BUF_SIZE];
static int map_fd = -1;
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c
index 9d8cb48b99de..9e96f8d87fea 100644
--- a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c
@@ -8,7 +8,7 @@
#define BAR "/foo/bar/"
#define PING_CMD "ping -q -c1 -w1 127.0.0.1 > /dev/null"
-char bpf_log_buf[BPF_LOG_BUF_SIZE];
+static char bpf_log_buf[BPF_LOG_BUF_SIZE];
static int prog_load(int verdict)
{
diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c b/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c
index 235ac4f67f5b..83493bd5745c 100644
--- a/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c
+++ b/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c
@@ -1,22 +1,17 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
-#include "test_pkt_access.skel.h"
#include "fentry_test.skel.h"
#include "fexit_test.skel.h"
void test_fentry_fexit(void)
{
- struct test_pkt_access *pkt_skel = NULL;
struct fentry_test *fentry_skel = NULL;
struct fexit_test *fexit_skel = NULL;
__u64 *fentry_res, *fexit_res;
__u32 duration = 0, retval;
- int err, pkt_fd, i;
+ int err, prog_fd, i;
- pkt_skel = test_pkt_access__open_and_load();
- if (CHECK(!pkt_skel, "pkt_skel_load", "pkt_access skeleton failed\n"))
- return;
fentry_skel = fentry_test__open_and_load();
if (CHECK(!fentry_skel, "fentry_skel_load", "fentry skeleton failed\n"))
goto close_prog;
@@ -31,8 +26,8 @@ void test_fentry_fexit(void)
if (CHECK(err, "fexit_attach", "fexit attach failed: %d\n", err))
goto close_prog;
- pkt_fd = bpf_program__fd(pkt_skel->progs.test_pkt_access);
- err = bpf_prog_test_run(pkt_fd, 1, &pkt_v6, sizeof(pkt_v6),
+ prog_fd = bpf_program__fd(fexit_skel->progs.test1);
+ err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
NULL, NULL, &retval, &duration);
CHECK(err || retval, "ipv6",
"err %d errno %d retval %d duration %d\n",
@@ -49,7 +44,6 @@ void test_fentry_fexit(void)
}
close_prog:
- test_pkt_access__destroy(pkt_skel);
fentry_test__destroy(fentry_skel);
fexit_test__destroy(fexit_skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_test.c b/tools/testing/selftests/bpf/prog_tests/fentry_test.c
index 5cc06021f27d..04ebbf1cb390 100644
--- a/tools/testing/selftests/bpf/prog_tests/fentry_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/fentry_test.c
@@ -1,20 +1,15 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
-#include "test_pkt_access.skel.h"
#include "fentry_test.skel.h"
void test_fentry_test(void)
{
- struct test_pkt_access *pkt_skel = NULL;
struct fentry_test *fentry_skel = NULL;
- int err, pkt_fd, i;
+ int err, prog_fd, i;
__u32 duration = 0, retval;
__u64 *result;
- pkt_skel = test_pkt_access__open_and_load();
- if (CHECK(!pkt_skel, "pkt_skel_load", "pkt_access skeleton failed\n"))
- return;
fentry_skel = fentry_test__open_and_load();
if (CHECK(!fentry_skel, "fentry_skel_load", "fentry skeleton failed\n"))
goto cleanup;
@@ -23,10 +18,10 @@ void test_fentry_test(void)
if (CHECK(err, "fentry_attach", "fentry attach failed: %d\n", err))
goto cleanup;
- pkt_fd = bpf_program__fd(pkt_skel->progs.test_pkt_access);
- err = bpf_prog_test_run(pkt_fd, 1, &pkt_v6, sizeof(pkt_v6),
+ prog_fd = bpf_program__fd(fentry_skel->progs.test1);
+ err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
NULL, NULL, &retval, &duration);
- CHECK(err || retval, "ipv6",
+ CHECK(err || retval, "test_run",
"err %d errno %d retval %d duration %d\n",
err, errno, retval, duration);
@@ -39,5 +34,4 @@ void test_fentry_test(void)
cleanup:
fentry_test__destroy(fentry_skel);
- test_pkt_access__destroy(pkt_skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_test.c b/tools/testing/selftests/bpf/prog_tests/fexit_test.c
index d2c3655dd7a3..78d7a2765c27 100644
--- a/tools/testing/selftests/bpf/prog_tests/fexit_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/fexit_test.c
@@ -1,64 +1,37 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
+#include "fexit_test.skel.h"
void test_fexit_test(void)
{
- struct bpf_prog_load_attr attr = {
- .file = "./fexit_test.o",
- };
-
- char prog_name[] = "fexit/bpf_fentry_testX";
- struct bpf_object *obj = NULL, *pkt_obj;
- int err, pkt_fd, kfree_skb_fd, i;
- struct bpf_link *link[6] = {};
- struct bpf_program *prog[6];
+ struct fexit_test *fexit_skel = NULL;
+ int err, prog_fd, i;
__u32 duration = 0, retval;
- struct bpf_map *data_map;
- const int zero = 0;
- u64 result[6];
+ __u64 *result;
- err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS,
- &pkt_obj, &pkt_fd);
- if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno))
- return;
- err = bpf_prog_load_xattr(&attr, &obj, &kfree_skb_fd);
- if (CHECK(err, "prog_load fail", "err %d errno %d\n", err, errno))
- goto close_prog;
+ fexit_skel = fexit_test__open_and_load();
+ if (CHECK(!fexit_skel, "fexit_skel_load", "fexit skeleton failed\n"))
+ goto cleanup;
- for (i = 0; i < 6; i++) {
- prog_name[sizeof(prog_name) - 2] = '1' + i;
- prog[i] = bpf_object__find_program_by_title(obj, prog_name);
- if (CHECK(!prog[i], "find_prog", "prog %s not found\n", prog_name))
- goto close_prog;
- link[i] = bpf_program__attach_trace(prog[i]);
- if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n"))
- goto close_prog;
- }
- data_map = bpf_object__find_map_by_name(obj, "fexit_te.bss");
- if (CHECK(!data_map, "find_data_map", "data map not found\n"))
- goto close_prog;
+ err = fexit_test__attach(fexit_skel);
+ if (CHECK(err, "fexit_attach", "fexit attach failed: %d\n", err))
+ goto cleanup;
- err = bpf_prog_test_run(pkt_fd, 1, &pkt_v6, sizeof(pkt_v6),
+ prog_fd = bpf_program__fd(fexit_skel->progs.test1);
+ err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
NULL, NULL, &retval, &duration);
- CHECK(err || retval, "ipv6",
+ CHECK(err || retval, "test_run",
"err %d errno %d retval %d duration %d\n",
err, errno, retval, duration);
- err = bpf_map_lookup_elem(bpf_map__fd(data_map), &zero, &result);
- if (CHECK(err, "get_result",
- "failed to get output data: %d\n", err))
- goto close_prog;
-
- for (i = 0; i < 6; i++)
- if (CHECK(result[i] != 1, "result", "bpf_fentry_test%d failed err %ld\n",
- i + 1, result[i]))
- goto close_prog;
+ result = (__u64 *)fexit_skel->bss;
+ for (i = 0; i < 6; i++) {
+ if (CHECK(result[i] != 1, "result",
+ "fexit_test%d failed err %lld\n", i + 1, result[i]))
+ goto cleanup;
+ }
-close_prog:
- for (i = 0; i < 6; i++)
- if (!IS_ERR_OR_NULL(link[i]))
- bpf_link__destroy(link[i]);
- bpf_object__close(obj);
- bpf_object__close(pkt_obj);
+cleanup:
+ fexit_test__destroy(fexit_skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/link_pinning.c b/tools/testing/selftests/bpf/prog_tests/link_pinning.c
new file mode 100644
index 000000000000..a743288cf384
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/link_pinning.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+
+#include <test_progs.h>
+#include <sys/stat.h>
+
+#include "test_link_pinning.skel.h"
+
+static int duration = 0;
+
+void test_link_pinning_subtest(struct bpf_program *prog,
+ struct test_link_pinning__bss *bss)
+{
+ const char *link_pin_path = "/sys/fs/bpf/pinned_link_test";
+ struct stat statbuf = {};
+ struct bpf_link *link;
+ int err, i;
+
+ link = bpf_program__attach(prog);
+ if (CHECK(IS_ERR(link), "link_attach", "err: %ld\n", PTR_ERR(link)))
+ goto cleanup;
+
+ bss->in = 1;
+ usleep(1);
+ CHECK(bss->out != 1, "res_check1", "exp %d, got %d\n", 1, bss->out);
+
+ /* pin link */
+ err = bpf_link__pin(link, link_pin_path);
+ if (CHECK(err, "link_pin", "err: %d\n", err))
+ goto cleanup;
+
+ CHECK(strcmp(link_pin_path, bpf_link__pin_path(link)), "pin_path1",
+ "exp %s, got %s\n", link_pin_path, bpf_link__pin_path(link));
+
+ /* check that link was pinned */
+ err = stat(link_pin_path, &statbuf);
+ if (CHECK(err, "stat_link", "err %d errno %d\n", err, errno))
+ goto cleanup;
+
+ bss->in = 2;
+ usleep(1);
+ CHECK(bss->out != 2, "res_check2", "exp %d, got %d\n", 2, bss->out);
+
+ /* destroy link, pinned link should keep program attached */
+ bpf_link__destroy(link);
+ link = NULL;
+
+ bss->in = 3;
+ usleep(1);
+ CHECK(bss->out != 3, "res_check3", "exp %d, got %d\n", 3, bss->out);
+
+ /* re-open link from BPFFS */
+ link = bpf_link__open(link_pin_path);
+ if (CHECK(IS_ERR(link), "link_open", "err: %ld\n", PTR_ERR(link)))
+ goto cleanup;
+
+ CHECK(strcmp(link_pin_path, bpf_link__pin_path(link)), "pin_path2",
+ "exp %s, got %s\n", link_pin_path, bpf_link__pin_path(link));
+
+ /* unpin link from BPFFS, program still attached */
+ err = bpf_link__unpin(link);
+ if (CHECK(err, "link_unpin", "err: %d\n", err))
+ goto cleanup;
+
+ /* still active, as we have FD open now */
+ bss->in = 4;
+ usleep(1);
+ CHECK(bss->out != 4, "res_check4", "exp %d, got %d\n", 4, bss->out);
+
+ bpf_link__destroy(link);
+ link = NULL;
+
+ /* Validate it's finally detached.
+ * Actual detachment might get delayed a bit, so there is no reliable
+ * way to validate it immediately here, let's count up for long enough
+ * and see if eventually output stops being updated
+ */
+ for (i = 5; i < 10000; i++) {
+ bss->in = i;
+ usleep(1);
+ if (bss->out == i - 1)
+ break;
+ }
+ CHECK(i == 10000, "link_attached", "got to iteration #%d\n", i);
+
+cleanup:
+ if (!IS_ERR(link))
+ bpf_link__destroy(link);
+}
+
+void test_link_pinning(void)
+{
+ struct test_link_pinning* skel;
+
+ skel = test_link_pinning__open_and_load();
+ if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
+ return;
+
+ if (test__start_subtest("pin_raw_tp"))
+ test_link_pinning_subtest(skel->progs.raw_tp_prog, skel->bss);
+ if (test__start_subtest("pin_tp_btf"))
+ test_link_pinning_subtest(skel->progs.tp_btf_prog, skel->bss);
+
+ test_link_pinning__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/modify_return.c b/tools/testing/selftests/bpf/prog_tests/modify_return.c
new file mode 100644
index 000000000000..97fec70c600b
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/modify_return.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2020 Google LLC.
+ */
+
+#include <test_progs.h>
+#include "modify_return.skel.h"
+
+#define LOWER(x) ((x) & 0xffff)
+#define UPPER(x) ((x) >> 16)
+
+
+static void run_test(__u32 input_retval, __u16 want_side_effect, __s16 want_ret)
+{
+ struct modify_return *skel = NULL;
+ int err, prog_fd;
+ __u32 duration = 0, retval;
+ __u16 side_effect;
+ __s16 ret;
+
+ skel = modify_return__open_and_load();
+ if (CHECK(!skel, "skel_load", "modify_return skeleton failed\n"))
+ goto cleanup;
+
+ err = modify_return__attach(skel);
+ if (CHECK(err, "modify_return", "attach failed: %d\n", err))
+ goto cleanup;
+
+ skel->bss->input_retval = input_retval;
+ prog_fd = bpf_program__fd(skel->progs.fmod_ret_test);
+ err = bpf_prog_test_run(prog_fd, 1, NULL, 0, NULL, 0,
+ &retval, &duration);
+
+ CHECK(err, "test_run", "err %d errno %d\n", err, errno);
+
+ side_effect = UPPER(retval);
+ ret = LOWER(retval);
+
+ CHECK(ret != want_ret, "test_run",
+ "unexpected ret: %d, expected: %d\n", ret, want_ret);
+ CHECK(side_effect != want_side_effect, "modify_return",
+ "unexpected side_effect: %d\n", side_effect);
+
+ CHECK(skel->bss->fentry_result != 1, "modify_return",
+ "fentry failed\n");
+ CHECK(skel->bss->fexit_result != 1, "modify_return",
+ "fexit failed\n");
+ CHECK(skel->bss->fmod_ret_result != 1, "modify_return",
+ "fmod_ret failed\n");
+
+cleanup:
+ modify_return__destroy(skel);
+}
+
+void test_modify_return(void)
+{
+ run_test(0 /* input_retval */,
+ 1 /* want_side_effect */,
+ 4 /* want_ret */);
+ run_test(-EINVAL /* input_retval */,
+ 0 /* want_side_effect */,
+ -EINVAL /* want_ret */);
+}
+
diff --git a/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c b/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c
new file mode 100644
index 000000000000..542240e16564
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Carlos Neira cneirabustos@gmail.com */
+#include <test_progs.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <sys/syscall.h>
+
+struct bss {
+ __u64 dev;
+ __u64 ino;
+ __u64 pid_tgid;
+ __u64 user_pid_tgid;
+};
+
+void test_ns_current_pid_tgid(void)
+{
+ const char *probe_name = "raw_tracepoint/sys_enter";
+ const char *file = "test_ns_current_pid_tgid.o";
+ int err, key = 0, duration = 0;
+ struct bpf_link *link = NULL;
+ struct bpf_program *prog;
+ struct bpf_map *bss_map;
+ struct bpf_object *obj;
+ struct bss bss;
+ struct stat st;
+ __u64 id;
+
+ obj = bpf_object__open_file(file, NULL);
+ if (CHECK(IS_ERR(obj), "obj_open", "err %ld\n", PTR_ERR(obj)))
+ return;
+
+ err = bpf_object__load(obj);
+ if (CHECK(err, "obj_load", "err %d errno %d\n", err, errno))
+ goto cleanup;
+
+ bss_map = bpf_object__find_map_by_name(obj, "test_ns_.bss");
+ if (CHECK(!bss_map, "find_bss_map", "failed\n"))
+ goto cleanup;
+
+ prog = bpf_object__find_program_by_title(obj, probe_name);
+ if (CHECK(!prog, "find_prog", "prog '%s' not found\n",
+ probe_name))
+ goto cleanup;
+
+ memset(&bss, 0, sizeof(bss));
+ pid_t tid = syscall(SYS_gettid);
+ pid_t pid = getpid();
+
+ id = (__u64) tid << 32 | pid;
+ bss.user_pid_tgid = id;
+
+ if (CHECK_FAIL(stat("/proc/self/ns/pid", &st))) {
+ perror("Failed to stat /proc/self/ns/pid");
+ goto cleanup;
+ }
+
+ bss.dev = st.st_dev;
+ bss.ino = st.st_ino;
+
+ err = bpf_map_update_elem(bpf_map__fd(bss_map), &key, &bss, 0);
+ if (CHECK(err, "setting_bss", "failed to set bss : %d\n", err))
+ goto cleanup;
+
+ link = bpf_program__attach_raw_tracepoint(prog, "sys_enter");
+ if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n",
+ PTR_ERR(link))) {
+ link = NULL;
+ goto cleanup;
+ }
+
+ /* trigger some syscalls */
+ usleep(1);
+
+ err = bpf_map_lookup_elem(bpf_map__fd(bss_map), &key, &bss);
+ if (CHECK(err, "set_bss", "failed to get bss : %d\n", err))
+ goto cleanup;
+
+ if (CHECK(id != bss.pid_tgid, "Compare user pid/tgid vs. bpf pid/tgid",
+ "User pid/tgid %llu BPF pid/tgid %llu\n", id, bss.pid_tgid))
+ goto cleanup;
+cleanup:
+ if (!link) {
+ bpf_link__destroy(link);
+ link = NULL;
+ }
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/select_reuseport.c b/tools/testing/selftests/bpf/prog_tests/select_reuseport.c
index a1dd13b34d4b..821b4146b7b6 100644
--- a/tools/testing/selftests/bpf/prog_tests/select_reuseport.c
+++ b/tools/testing/selftests/bpf/prog_tests/select_reuseport.c
@@ -805,12 +805,6 @@ static void test_config(int sotype, sa_family_t family, bool inany)
char s[MAX_TEST_NAME];
const struct test *t;
- /* SOCKMAP/SOCKHASH don't support UDP yet */
- if (sotype == SOCK_DGRAM &&
- (inner_map_type == BPF_MAP_TYPE_SOCKMAP ||
- inner_map_type == BPF_MAP_TYPE_SOCKHASH))
- return;
-
for (t = tests; t < tests + ARRAY_SIZE(tests); t++) {
if (t->need_sotype && t->need_sotype != sotype)
continue; /* test not compatible with socket type */
diff --git a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c
index c6d6b685a946..4538bd08203f 100644
--- a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c
+++ b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c
@@ -14,6 +14,7 @@ void test_skb_ctx(void)
.wire_len = 100,
.gso_segs = 8,
.mark = 9,
+ .gso_size = 10,
};
struct bpf_prog_test_run_attr tattr = {
.data_in = &pkt_v4,
diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
index b1b2acea0638..d7d65a700799 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
@@ -16,6 +16,7 @@
#include <pthread.h>
#include <stdlib.h>
#include <string.h>
+#include <sys/select.h>
#include <unistd.h>
#include <bpf/bpf.h>
@@ -25,6 +26,7 @@
#include "test_progs.h"
#include "test_sockmap_listen.skel.h"
+#define IO_TIMEOUT_SEC 30
#define MAX_STRERR_LEN 256
#define MAX_TEST_NAME 80
@@ -44,9 +46,10 @@
/* Wrappers that fail the test on error and report it. */
-#define xaccept(fd, addr, len) \
+#define xaccept_nonblock(fd, addr, len) \
({ \
- int __ret = accept((fd), (addr), (len)); \
+ int __ret = \
+ accept_timeout((fd), (addr), (len), IO_TIMEOUT_SEC); \
if (__ret == -1) \
FAIL_ERRNO("accept"); \
__ret; \
@@ -108,6 +111,23 @@
__ret; \
})
+#define xsend(fd, buf, len, flags) \
+ ({ \
+ ssize_t __ret = send((fd), (buf), (len), (flags)); \
+ if (__ret == -1) \
+ FAIL_ERRNO("send"); \
+ __ret; \
+ })
+
+#define xrecv_nonblock(fd, buf, len, flags) \
+ ({ \
+ ssize_t __ret = recv_timeout((fd), (buf), (len), (flags), \
+ IO_TIMEOUT_SEC); \
+ if (__ret == -1) \
+ FAIL_ERRNO("recv"); \
+ __ret; \
+ })
+
#define xsocket(family, sotype, flags) \
({ \
int __ret = socket(family, sotype, flags); \
@@ -175,6 +195,40 @@
__ret; \
})
+static int poll_read(int fd, unsigned int timeout_sec)
+{
+ struct timeval timeout = { .tv_sec = timeout_sec };
+ fd_set rfds;
+ int r;
+
+ FD_ZERO(&rfds);
+ FD_SET(fd, &rfds);
+
+ r = select(fd + 1, &rfds, NULL, NULL, &timeout);
+ if (r == 0)
+ errno = ETIME;
+
+ return r == 1 ? 0 : -1;
+}
+
+static int accept_timeout(int fd, struct sockaddr *addr, socklen_t *len,
+ unsigned int timeout_sec)
+{
+ if (poll_read(fd, timeout_sec))
+ return -1;
+
+ return accept(fd, addr, len);
+}
+
+static int recv_timeout(int fd, void *buf, size_t len, int flags,
+ unsigned int timeout_sec)
+{
+ if (poll_read(fd, timeout_sec))
+ return -1;
+
+ return recv(fd, buf, len, flags);
+}
+
static void init_addr_loopback4(struct sockaddr_storage *ss, socklen_t *len)
{
struct sockaddr_in *addr4 = memset(ss, 0, sizeof(*ss));
@@ -230,7 +284,7 @@ static int enable_reuseport(int s, int progfd)
return 0;
}
-static int listen_loopback_reuseport(int family, int sotype, int progfd)
+static int socket_loopback_reuseport(int family, int sotype, int progfd)
{
struct sockaddr_storage addr;
socklen_t len;
@@ -249,6 +303,9 @@ static int listen_loopback_reuseport(int family, int sotype, int progfd)
if (err)
goto close;
+ if (sotype & SOCK_DGRAM)
+ return s;
+
err = xlisten(s, SOMAXCONN);
if (err)
goto close;
@@ -259,9 +316,9 @@ close:
return -1;
}
-static int listen_loopback(int family, int sotype)
+static int socket_loopback(int family, int sotype)
{
- return listen_loopback_reuseport(family, sotype, -1);
+ return socket_loopback_reuseport(family, sotype, -1);
}
static void test_insert_invalid(int family, int sotype, int mapfd)
@@ -327,13 +384,13 @@ close:
xclose(s);
}
-static void test_insert_listening(int family, int sotype, int mapfd)
+static void test_insert(int family, int sotype, int mapfd)
{
u64 value;
u32 key;
int s;
- s = listen_loopback(family, sotype);
+ s = socket_loopback(family, sotype);
if (s < 0)
return;
@@ -349,7 +406,7 @@ static void test_delete_after_insert(int family, int sotype, int mapfd)
u32 key;
int s;
- s = listen_loopback(family, sotype);
+ s = socket_loopback(family, sotype);
if (s < 0)
return;
@@ -366,7 +423,7 @@ static void test_delete_after_close(int family, int sotype, int mapfd)
u64 value;
u32 key;
- s = listen_loopback(family, sotype);
+ s = socket_loopback(family, sotype);
if (s < 0)
return;
@@ -390,7 +447,7 @@ static void test_lookup_after_insert(int family, int sotype, int mapfd)
u32 key;
int s;
- s = listen_loopback(family, sotype);
+ s = socket_loopback(family, sotype);
if (s < 0)
return;
@@ -417,7 +474,7 @@ static void test_lookup_after_delete(int family, int sotype, int mapfd)
u64 value;
u32 key;
- s = listen_loopback(family, sotype);
+ s = socket_loopback(family, sotype);
if (s < 0)
return;
@@ -439,7 +496,7 @@ static void test_lookup_32_bit_value(int family, int sotype, int mapfd)
u32 key, value32;
int err, s;
- s = listen_loopback(family, sotype);
+ s = socket_loopback(family, sotype);
if (s < 0)
return;
@@ -464,17 +521,17 @@ close:
xclose(s);
}
-static void test_update_listening(int family, int sotype, int mapfd)
+static void test_update_existing(int family, int sotype, int mapfd)
{
int s1, s2;
u64 value;
u32 key;
- s1 = listen_loopback(family, sotype);
+ s1 = socket_loopback(family, sotype);
if (s1 < 0)
return;
- s2 = listen_loopback(family, sotype);
+ s2 = socket_loopback(family, sotype);
if (s2 < 0)
goto close_s1;
@@ -500,7 +557,7 @@ static void test_destroy_orphan_child(int family, int sotype, int mapfd)
u64 value;
u32 key;
- s = listen_loopback(family, sotype);
+ s = socket_loopback(family, sotype);
if (s < 0)
return;
@@ -534,7 +591,7 @@ static void test_clone_after_delete(int family, int sotype, int mapfd)
u64 value;
u32 key;
- s = listen_loopback(family, sotype);
+ s = socket_loopback(family, sotype);
if (s < 0)
return;
@@ -570,7 +627,7 @@ static void test_accept_after_delete(int family, int sotype, int mapfd)
socklen_t len;
u64 value;
- s = listen_loopback(family, sotype);
+ s = socket_loopback(family, sotype | SOCK_NONBLOCK);
if (s == -1)
return;
@@ -598,7 +655,7 @@ static void test_accept_after_delete(int family, int sotype, int mapfd)
if (err)
goto close_cli;
- p = xaccept(s, NULL, NULL);
+ p = xaccept_nonblock(s, NULL, NULL);
if (p == -1)
goto close_cli;
@@ -624,7 +681,7 @@ static void test_accept_before_delete(int family, int sotype, int mapfd)
socklen_t len;
u64 value;
- s = listen_loopback(family, sotype);
+ s = socket_loopback(family, sotype | SOCK_NONBLOCK);
if (s == -1)
return;
@@ -647,7 +704,7 @@ static void test_accept_before_delete(int family, int sotype, int mapfd)
if (err)
goto close_cli;
- p = xaccept(s, NULL, NULL);
+ p = xaccept_nonblock(s, NULL, NULL);
if (p == -1)
goto close_cli;
@@ -711,7 +768,7 @@ static void *connect_accept_thread(void *arg)
break;
}
- p = xaccept(s, NULL, NULL);
+ p = xaccept_nonblock(s, NULL, NULL);
if (p < 0) {
xclose(c);
break;
@@ -735,7 +792,7 @@ static void test_syn_recv_insert_delete(int family, int sotype, int mapfd)
int err, s;
u64 value;
- s = listen_loopback(family, sotype | SOCK_NONBLOCK);
+ s = socket_loopback(family, sotype | SOCK_NONBLOCK);
if (s < 0)
return;
@@ -877,7 +934,7 @@ static void redir_to_connected(int family, int sotype, int sock_mapfd,
zero_verdict_count(verd_mapfd);
- s = listen_loopback(family, sotype | SOCK_NONBLOCK);
+ s = socket_loopback(family, sotype | SOCK_NONBLOCK);
if (s < 0)
return;
@@ -893,7 +950,7 @@ static void redir_to_connected(int family, int sotype, int sock_mapfd,
if (err)
goto close_cli0;
- p0 = xaccept(s, NULL, NULL);
+ p0 = xaccept_nonblock(s, NULL, NULL);
if (p0 < 0)
goto close_cli0;
@@ -904,7 +961,7 @@ static void redir_to_connected(int family, int sotype, int sock_mapfd,
if (err)
goto close_cli1;
- p1 = xaccept(s, NULL, NULL);
+ p1 = xaccept_nonblock(s, NULL, NULL);
if (p1 < 0)
goto close_cli1;
@@ -1009,7 +1066,7 @@ static void redir_to_listening(int family, int sotype, int sock_mapfd,
zero_verdict_count(verd_mapfd);
- s = listen_loopback(family, sotype | SOCK_NONBLOCK);
+ s = socket_loopback(family, sotype | SOCK_NONBLOCK);
if (s < 0)
return;
@@ -1025,7 +1082,7 @@ static void redir_to_listening(int family, int sotype, int sock_mapfd,
if (err)
goto close_cli;
- p = xaccept(s, NULL, NULL);
+ p = xaccept_nonblock(s, NULL, NULL);
if (p < 0)
goto close_cli;
@@ -1113,14 +1170,15 @@ static void test_reuseport_select_listening(int family, int sotype,
{
struct sockaddr_storage addr;
unsigned int pass;
- int s, c, p, err;
+ int s, c, err;
socklen_t len;
u64 value;
u32 key;
zero_verdict_count(verd_map);
- s = listen_loopback_reuseport(family, sotype, reuseport_prog);
+ s = socket_loopback_reuseport(family, sotype | SOCK_NONBLOCK,
+ reuseport_prog);
if (s < 0)
return;
@@ -1142,19 +1200,33 @@ static void test_reuseport_select_listening(int family, int sotype,
if (err)
goto close_cli;
- p = xaccept(s, NULL, NULL);
- if (p < 0)
- goto close_cli;
+ if (sotype == SOCK_STREAM) {
+ int p;
+
+ p = xaccept_nonblock(s, NULL, NULL);
+ if (p < 0)
+ goto close_cli;
+ xclose(p);
+ } else {
+ char b = 'a';
+ ssize_t n;
+
+ n = xsend(c, &b, sizeof(b), 0);
+ if (n == -1)
+ goto close_cli;
+
+ n = xrecv_nonblock(s, &b, sizeof(b), 0);
+ if (n == -1)
+ goto close_cli;
+ }
key = SK_PASS;
err = xbpf_map_lookup_elem(verd_map, &key, &pass);
if (err)
- goto close_peer;
+ goto close_cli;
if (pass != 1)
FAIL("want pass count 1, have %d", pass);
-close_peer:
- xclose(p);
close_cli:
xclose(c);
close_srv:
@@ -1174,7 +1246,7 @@ static void test_reuseport_select_connected(int family, int sotype,
zero_verdict_count(verd_map);
- s = listen_loopback_reuseport(family, sotype, reuseport_prog);
+ s = socket_loopback_reuseport(family, sotype, reuseport_prog);
if (s < 0)
return;
@@ -1198,9 +1270,24 @@ static void test_reuseport_select_connected(int family, int sotype,
if (err)
goto close_cli0;
- p0 = xaccept(s, NULL, NULL);
- if (err)
- goto close_cli0;
+ if (sotype == SOCK_STREAM) {
+ p0 = xaccept_nonblock(s, NULL, NULL);
+ if (p0 < 0)
+ goto close_cli0;
+ } else {
+ p0 = xsocket(family, sotype, 0);
+ if (p0 < 0)
+ goto close_cli0;
+
+ len = sizeof(addr);
+ err = xgetsockname(c0, sockaddr(&addr), &len);
+ if (err)
+ goto close_cli0;
+
+ err = xconnect(p0, sockaddr(&addr), len);
+ if (err)
+ goto close_cli0;
+ }
/* Update sock_map[0] to redirect to a connected socket */
key = 0;
@@ -1213,8 +1300,24 @@ static void test_reuseport_select_connected(int family, int sotype,
if (c1 < 0)
goto close_peer0;
+ len = sizeof(addr);
+ err = xgetsockname(s, sockaddr(&addr), &len);
+ if (err)
+ goto close_srv;
+
errno = 0;
err = connect(c1, sockaddr(&addr), len);
+ if (sotype == SOCK_DGRAM) {
+ char b = 'a';
+ ssize_t n;
+
+ n = xsend(c1, &b, sizeof(b), 0);
+ if (n == -1)
+ goto close_cli1;
+
+ n = recv_timeout(c1, &b, sizeof(b), 0, IO_TIMEOUT_SEC);
+ err = n == -1;
+ }
if (!err || errno != ECONNREFUSED)
FAIL_ERRNO("connect: expected ECONNREFUSED");
@@ -1249,11 +1352,11 @@ static void test_reuseport_mixed_groups(int family, int sotype, int sock_map,
zero_verdict_count(verd_map);
/* Create two listeners, each in its own reuseport group */
- s1 = listen_loopback_reuseport(family, sotype, reuseport_prog);
+ s1 = socket_loopback_reuseport(family, sotype, reuseport_prog);
if (s1 < 0)
return;
- s2 = listen_loopback_reuseport(family, sotype, reuseport_prog);
+ s2 = socket_loopback_reuseport(family, sotype, reuseport_prog);
if (s2 < 0)
goto close_srv1;
@@ -1278,7 +1381,18 @@ static void test_reuseport_mixed_groups(int family, int sotype, int sock_map,
goto close_srv2;
err = connect(c, sockaddr(&addr), len);
- if (err && errno != ECONNREFUSED) {
+ if (sotype == SOCK_DGRAM) {
+ char b = 'a';
+ ssize_t n;
+
+ n = xsend(c, &b, sizeof(b), 0);
+ if (n == -1)
+ goto close_cli;
+
+ n = recv_timeout(c, &b, sizeof(b), 0, IO_TIMEOUT_SEC);
+ err = n == -1;
+ }
+ if (!err || errno != ECONNREFUSED) {
FAIL_ERRNO("connect: expected ECONNREFUSED");
goto close_cli;
}
@@ -1299,9 +1413,9 @@ close_srv1:
xclose(s1);
}
-#define TEST(fn) \
+#define TEST(fn, ...) \
{ \
- fn, #fn \
+ fn, #fn, __VA_ARGS__ \
}
static void test_ops_cleanup(const struct bpf_map *map)
@@ -1350,18 +1464,31 @@ static const char *map_type_str(const struct bpf_map *map)
}
}
+static const char *sotype_str(int sotype)
+{
+ switch (sotype) {
+ case SOCK_DGRAM:
+ return "UDP";
+ case SOCK_STREAM:
+ return "TCP";
+ default:
+ return "unknown";
+ }
+}
+
static void test_ops(struct test_sockmap_listen *skel, struct bpf_map *map,
int family, int sotype)
{
const struct op_test {
void (*fn)(int family, int sotype, int mapfd);
const char *name;
+ int sotype;
} tests[] = {
/* insert */
TEST(test_insert_invalid),
TEST(test_insert_opened),
- TEST(test_insert_bound),
- TEST(test_insert_listening),
+ TEST(test_insert_bound, SOCK_STREAM),
+ TEST(test_insert),
/* delete */
TEST(test_delete_after_insert),
TEST(test_delete_after_close),
@@ -1370,28 +1497,32 @@ static void test_ops(struct test_sockmap_listen *skel, struct bpf_map *map,
TEST(test_lookup_after_delete),
TEST(test_lookup_32_bit_value),
/* update */
- TEST(test_update_listening),
+ TEST(test_update_existing),
/* races with insert/delete */
- TEST(test_destroy_orphan_child),
- TEST(test_syn_recv_insert_delete),
- TEST(test_race_insert_listen),
+ TEST(test_destroy_orphan_child, SOCK_STREAM),
+ TEST(test_syn_recv_insert_delete, SOCK_STREAM),
+ TEST(test_race_insert_listen, SOCK_STREAM),
/* child clone */
- TEST(test_clone_after_delete),
- TEST(test_accept_after_delete),
- TEST(test_accept_before_delete),
+ TEST(test_clone_after_delete, SOCK_STREAM),
+ TEST(test_accept_after_delete, SOCK_STREAM),
+ TEST(test_accept_before_delete, SOCK_STREAM),
};
- const char *family_name, *map_name;
+ const char *family_name, *map_name, *sotype_name;
const struct op_test *t;
char s[MAX_TEST_NAME];
int map_fd;
family_name = family_str(family);
map_name = map_type_str(map);
+ sotype_name = sotype_str(sotype);
map_fd = bpf_map__fd(map);
for (t = tests; t < tests + ARRAY_SIZE(tests); t++) {
- snprintf(s, sizeof(s), "%s %s %s", map_name, family_name,
- t->name);
+ snprintf(s, sizeof(s), "%s %s %s %s", map_name, family_name,
+ sotype_name, t->name);
+
+ if (t->sotype != 0 && t->sotype != sotype)
+ continue;
if (!test__start_subtest(s))
continue;
@@ -1424,6 +1555,7 @@ static void test_redir(struct test_sockmap_listen *skel, struct bpf_map *map,
for (t = tests; t < tests + ARRAY_SIZE(tests); t++) {
snprintf(s, sizeof(s), "%s %s %s", map_name, family_name,
t->name);
+
if (!test__start_subtest(s))
continue;
@@ -1438,26 +1570,31 @@ static void test_reuseport(struct test_sockmap_listen *skel,
void (*fn)(int family, int sotype, int socket_map,
int verdict_map, int reuseport_prog);
const char *name;
+ int sotype;
} tests[] = {
TEST(test_reuseport_select_listening),
TEST(test_reuseport_select_connected),
TEST(test_reuseport_mixed_groups),
};
int socket_map, verdict_map, reuseport_prog;
- const char *family_name, *map_name;
+ const char *family_name, *map_name, *sotype_name;
const struct reuseport_test *t;
char s[MAX_TEST_NAME];
family_name = family_str(family);
map_name = map_type_str(map);
+ sotype_name = sotype_str(sotype);
socket_map = bpf_map__fd(map);
verdict_map = bpf_map__fd(skel->maps.verdict_map);
reuseport_prog = bpf_program__fd(skel->progs.prog_reuseport);
for (t = tests; t < tests + ARRAY_SIZE(tests); t++) {
- snprintf(s, sizeof(s), "%s %s %s", map_name, family_name,
- t->name);
+ snprintf(s, sizeof(s), "%s %s %s %s", map_name, family_name,
+ sotype_name, t->name);
+
+ if (t->sotype != 0 && t->sotype != sotype)
+ continue;
if (!test__start_subtest(s))
continue;
@@ -1470,8 +1607,10 @@ static void run_tests(struct test_sockmap_listen *skel, struct bpf_map *map,
int family)
{
test_ops(skel, map, family, SOCK_STREAM);
+ test_ops(skel, map, family, SOCK_DGRAM);
test_redir(skel, map, family, SOCK_STREAM);
test_reuseport(skel, map, family, SOCK_STREAM);
+ test_reuseport(skel, map, family, SOCK_DGRAM);
}
void test_sockmap_listen(void)
diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c b/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c
index f4cd60d6fba2..e08f6bb17700 100644
--- a/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c
+++ b/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c
@@ -188,7 +188,7 @@ static int start_server(void)
};
int fd;
- fd = socket(AF_INET, SOCK_STREAM, 0);
+ fd = socket(AF_INET, SOCK_STREAM | SOCK_NONBLOCK, 0);
if (fd < 0) {
log_err("Failed to create server socket");
return -1;
@@ -205,6 +205,7 @@ static int start_server(void)
static pthread_mutex_t server_started_mtx = PTHREAD_MUTEX_INITIALIZER;
static pthread_cond_t server_started = PTHREAD_COND_INITIALIZER;
+static volatile bool server_done = false;
static void *server_thread(void *arg)
{
@@ -222,23 +223,24 @@ static void *server_thread(void *arg)
if (CHECK_FAIL(err < 0)) {
perror("Failed to listed on socket");
- return NULL;
+ return ERR_PTR(err);
}
- client_fd = accept(fd, (struct sockaddr *)&addr, &len);
+ while (!server_done) {
+ client_fd = accept(fd, (struct sockaddr *)&addr, &len);
+ if (client_fd == -1 && errno == EAGAIN) {
+ usleep(50);
+ continue;
+ }
+ break;
+ }
if (CHECK_FAIL(client_fd < 0)) {
perror("Failed to accept client");
- return NULL;
+ return ERR_PTR(err);
}
- /* Wait for the next connection (that never arrives)
- * to keep this thread alive to prevent calling
- * close() on client_fd.
- */
- if (CHECK_FAIL(accept(fd, (struct sockaddr *)&addr, &len) >= 0)) {
- perror("Unexpected success in second accept");
- return NULL;
- }
+ while (!server_done)
+ usleep(50);
close(client_fd);
@@ -249,6 +251,7 @@ void test_tcp_rtt(void)
{
int server_fd, cgroup_fd;
pthread_t tid;
+ void *server_res;
cgroup_fd = test__join_cgroup("/tcp_rtt");
if (CHECK_FAIL(cgroup_fd < 0))
@@ -267,6 +270,11 @@ void test_tcp_rtt(void)
pthread_mutex_unlock(&server_started_mtx);
CHECK_FAIL(run_test(cgroup_fd, server_fd));
+
+ server_done = true;
+ pthread_join(tid, &server_res);
+ CHECK_FAIL(IS_ERR(server_res));
+
close_server_fd:
close(server_fd);
close_cgroup_fd:
diff --git a/tools/testing/selftests/bpf/prog_tests/vmlinux.c b/tools/testing/selftests/bpf/prog_tests/vmlinux.c
new file mode 100644
index 000000000000..04939eda1325
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/vmlinux.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+
+#include <test_progs.h>
+#include <time.h>
+#include "test_vmlinux.skel.h"
+
+#define MY_TV_NSEC 1337
+
+static void nsleep()
+{
+ struct timespec ts = { .tv_nsec = MY_TV_NSEC };
+
+ (void)nanosleep(&ts, NULL);
+}
+
+void test_vmlinux(void)
+{
+ int duration = 0, err;
+ struct test_vmlinux* skel;
+ struct test_vmlinux__bss *bss;
+
+ skel = test_vmlinux__open_and_load();
+ if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
+ return;
+ bss = skel->bss;
+
+ err = test_vmlinux__attach(skel);
+ if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
+ goto cleanup;
+
+ /* trigger everything */
+ nsleep();
+
+ CHECK(!bss->tp_called, "tp", "not called\n");
+ CHECK(!bss->raw_tp_called, "raw_tp", "not called\n");
+ CHECK(!bss->tp_btf_called, "tp_btf", "not called\n");
+ CHECK(!bss->kprobe_called, "kprobe", "not called\n");
+ CHECK(!bss->fentry_called, "fentry", "not called\n");
+
+cleanup:
+ test_vmlinux__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c
index 4ba011031d4c..a0f688c37023 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c
@@ -4,17 +4,51 @@
#include "test_xdp.skel.h"
#include "test_xdp_bpf2bpf.skel.h"
+struct meta {
+ int ifindex;
+ int pkt_len;
+};
+
+static void on_sample(void *ctx, int cpu, void *data, __u32 size)
+{
+ int duration = 0;
+ struct meta *meta = (struct meta *)data;
+ struct ipv4_packet *trace_pkt_v4 = data + sizeof(*meta);
+
+ if (CHECK(size < sizeof(pkt_v4) + sizeof(*meta),
+ "check_size", "size %u < %zu\n",
+ size, sizeof(pkt_v4) + sizeof(*meta)))
+ return;
+
+ if (CHECK(meta->ifindex != if_nametoindex("lo"), "check_meta_ifindex",
+ "meta->ifindex = %d\n", meta->ifindex))
+ return;
+
+ if (CHECK(meta->pkt_len != sizeof(pkt_v4), "check_meta_pkt_len",
+ "meta->pkt_len = %zd\n", sizeof(pkt_v4)))
+ return;
+
+ if (CHECK(memcmp(trace_pkt_v4, &pkt_v4, sizeof(pkt_v4)),
+ "check_packet_content", "content not the same\n"))
+ return;
+
+ *(bool *)ctx = true;
+}
+
void test_xdp_bpf2bpf(void)
{
__u32 duration = 0, retval, size;
char buf[128];
int err, pkt_fd, map_fd;
+ bool passed = false;
struct iphdr *iph = (void *)buf + sizeof(struct ethhdr);
struct iptnl_info value4 = {.family = AF_INET};
struct test_xdp *pkt_skel = NULL;
struct test_xdp_bpf2bpf *ftrace_skel = NULL;
struct vip key4 = {.protocol = 6, .family = AF_INET};
struct bpf_program *prog;
+ struct perf_buffer *pb = NULL;
+ struct perf_buffer_opts pb_opts = {};
/* Load XDP program to introspect */
pkt_skel = test_xdp__open_and_load();
@@ -50,6 +84,14 @@ void test_xdp_bpf2bpf(void)
if (CHECK(err, "ftrace_attach", "ftrace attach failed: %d\n", err))
goto out;
+ /* Set up perf buffer */
+ pb_opts.sample_cb = on_sample;
+ pb_opts.ctx = &passed;
+ pb = perf_buffer__new(bpf_map__fd(ftrace_skel->maps.perf_buf_map),
+ 1, &pb_opts);
+ if (CHECK(IS_ERR(pb), "perf_buf__new", "err %ld\n", PTR_ERR(pb)))
+ goto out;
+
/* Run test program */
err = bpf_prog_test_run(pkt_fd, 1, &pkt_v4, sizeof(pkt_v4),
buf, &size, &retval, &duration);
@@ -60,6 +102,15 @@ void test_xdp_bpf2bpf(void)
err, errno, retval, size))
goto out;
+ /* Make sure bpf_xdp_output() was triggered and it sent the expected
+ * data to the perf ring buffer.
+ */
+ err = perf_buffer__poll(pb, 100);
+ if (CHECK(err < 0, "perf_buffer__poll", "err %d\n", err))
+ goto out;
+
+ CHECK_FAIL(!passed);
+
/* Verify test results */
if (CHECK(ftrace_skel->bss->test_result_fentry != if_nametoindex("lo"),
"result", "fentry failed err %llu\n",
@@ -70,6 +121,8 @@ void test_xdp_bpf2bpf(void)
"fexit failed err %llu\n", ftrace_skel->bss->test_result_fexit);
out:
+ if (pb)
+ perf_buffer__free(pb);
test_xdp__destroy(pkt_skel);
test_xdp_bpf2bpf__destroy(ftrace_skel);
}
diff --git a/tools/testing/selftests/bpf/progs/bpf_dctcp.c b/tools/testing/selftests/bpf/progs/bpf_dctcp.c
index b631fb5032d2..127ea762a062 100644
--- a/tools/testing/selftests/bpf/progs/bpf_dctcp.c
+++ b/tools/testing/selftests/bpf/progs/bpf_dctcp.c
@@ -9,7 +9,7 @@
#include <linux/bpf.h>
#include <linux/types.h>
#include <bpf/bpf_helpers.h>
-#include "bpf_trace_helpers.h"
+#include <bpf/bpf_tracing.h>
#include "bpf_tcp_helpers.h"
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c
index d4a02fe44a12..31975c96e2c9 100644
--- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c
+++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c
@@ -13,7 +13,7 @@ enum e1 {
enum e2 {
C = 100,
- D = -100,
+ D = 4294967295,
E = 0,
};
diff --git a/tools/testing/selftests/bpf/progs/fentry_test.c b/tools/testing/selftests/bpf/progs/fentry_test.c
index 38d3a82144ca..9365b686f84b 100644
--- a/tools/testing/selftests/bpf/progs/fentry_test.c
+++ b/tools/testing/selftests/bpf/progs/fentry_test.c
@@ -2,7 +2,7 @@
/* Copyright (c) 2019 Facebook */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-#include "bpf_trace_helpers.h"
+#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c
index c329fccf9842..98e1efe14549 100644
--- a/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c
+++ b/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c
@@ -5,7 +5,7 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
-#include "bpf_trace_helpers.h"
+#include <bpf/bpf_tracing.h>
struct sk_buff {
unsigned int len;
diff --git a/tools/testing/selftests/bpf/progs/fexit_bpf2bpf_simple.c b/tools/testing/selftests/bpf/progs/fexit_bpf2bpf_simple.c
index 92f3fa47cf40..85c0b516d6ee 100644
--- a/tools/testing/selftests/bpf/progs/fexit_bpf2bpf_simple.c
+++ b/tools/testing/selftests/bpf/progs/fexit_bpf2bpf_simple.c
@@ -2,7 +2,7 @@
/* Copyright (c) 2019 Facebook */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-#include "bpf_trace_helpers.h"
+#include <bpf/bpf_tracing.h>
struct sk_buff {
unsigned int len;
diff --git a/tools/testing/selftests/bpf/progs/fexit_test.c b/tools/testing/selftests/bpf/progs/fexit_test.c
index 348109b9ea07..bd1e17d8024c 100644
--- a/tools/testing/selftests/bpf/progs/fexit_test.c
+++ b/tools/testing/selftests/bpf/progs/fexit_test.c
@@ -2,7 +2,7 @@
/* Copyright (c) 2019 Facebook */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-#include "bpf_trace_helpers.h"
+#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/kfree_skb.c b/tools/testing/selftests/bpf/progs/kfree_skb.c
index 8f48a909f079..a46a264ce24e 100644
--- a/tools/testing/selftests/bpf/progs/kfree_skb.c
+++ b/tools/testing/selftests/bpf/progs/kfree_skb.c
@@ -4,7 +4,7 @@
#include <stdbool.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
-#include "bpf_trace_helpers.h"
+#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
struct {
diff --git a/tools/testing/selftests/bpf/progs/modify_return.c b/tools/testing/selftests/bpf/progs/modify_return.c
new file mode 100644
index 000000000000..8b7466a15c6b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/modify_return.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2020 Google LLC.
+ */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+static int sequence = 0;
+__s32 input_retval = 0;
+
+__u64 fentry_result = 0;
+SEC("fentry/bpf_modify_return_test")
+int BPF_PROG(fentry_test, int a, __u64 b)
+{
+ sequence++;
+ fentry_result = (sequence == 1);
+ return 0;
+}
+
+__u64 fmod_ret_result = 0;
+SEC("fmod_ret/bpf_modify_return_test")
+int BPF_PROG(fmod_ret_test, int a, int *b, int ret)
+{
+ sequence++;
+ /* This is the first fmod_ret program, the ret passed should be 0 */
+ fmod_ret_result = (sequence == 2 && ret == 0);
+ return input_retval;
+}
+
+__u64 fexit_result = 0;
+SEC("fexit/bpf_modify_return_test")
+int BPF_PROG(fexit_test, int a, __u64 b, int ret)
+{
+ sequence++;
+ /* If the input_reval is non-zero a successful modification should have
+ * occurred.
+ */
+ if (input_retval)
+ fexit_result = (sequence == 3 && ret == input_retval);
+ else
+ fexit_result = (sequence == 3 && ret == 4);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_attach_probe.c b/tools/testing/selftests/bpf/progs/test_attach_probe.c
index dd8fae6660ab..8056a4c6d918 100644
--- a/tools/testing/selftests/bpf/progs/test_attach_probe.c
+++ b/tools/testing/selftests/bpf/progs/test_attach_probe.c
@@ -4,6 +4,7 @@
#include <linux/ptrace.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
int kprobe_res = 0;
int kretprobe_res = 0;
@@ -18,7 +19,7 @@ int handle_kprobe(struct pt_regs *ctx)
}
SEC("kretprobe/sys_nanosleep")
-int handle_kretprobe(struct pt_regs *ctx)
+int BPF_KRETPROBE(handle_kretprobe)
{
kretprobe_res = 2;
return 0;
diff --git a/tools/testing/selftests/bpf/progs/test_link_pinning.c b/tools/testing/selftests/bpf/progs/test_link_pinning.c
new file mode 100644
index 000000000000..bbf2a5264dc0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_link_pinning.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+
+#include <stdbool.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+int in = 0;
+int out = 0;
+
+SEC("raw_tp/sys_enter")
+int raw_tp_prog(const void *ctx)
+{
+ out = in;
+ return 0;
+}
+
+SEC("tp_btf/sys_enter")
+int tp_btf_prog(const void *ctx)
+{
+ out = in;
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_ns_current_pid_tgid.c b/tools/testing/selftests/bpf/progs/test_ns_current_pid_tgid.c
new file mode 100644
index 000000000000..1dca70a6de2f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_ns_current_pid_tgid.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Carlos Neira cneirabustos@gmail.com */
+
+#include <linux/bpf.h>
+#include <stdint.h>
+#include <bpf/bpf_helpers.h>
+
+static volatile struct {
+ __u64 dev;
+ __u64 ino;
+ __u64 pid_tgid;
+ __u64 user_pid_tgid;
+} res;
+
+SEC("raw_tracepoint/sys_enter")
+int trace(void *ctx)
+{
+ __u64 ns_pid_tgid, expected_pid;
+ struct bpf_pidns_info nsdata;
+ __u32 key = 0;
+
+ if (bpf_get_ns_current_pid_tgid(res.dev, res.ino, &nsdata,
+ sizeof(struct bpf_pidns_info)))
+ return 0;
+
+ ns_pid_tgid = (__u64)nsdata.tgid << 32 | nsdata.pid;
+ expected_pid = res.user_pid_tgid;
+
+ if (expected_pid != ns_pid_tgid)
+ return 0;
+
+ res.pid_tgid = ns_pid_tgid;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_overhead.c b/tools/testing/selftests/bpf/progs/test_overhead.c
index bfe9fbcb9684..56a50b25cd33 100644
--- a/tools/testing/selftests/bpf/progs/test_overhead.c
+++ b/tools/testing/selftests/bpf/progs/test_overhead.c
@@ -6,7 +6,6 @@
#include <linux/ptrace.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
-#include "bpf_trace_helpers.h"
struct task_struct;
@@ -17,11 +16,9 @@ int BPF_KPROBE(prog1, struct task_struct *tsk, const char *buf, bool exec)
}
SEC("kretprobe/__set_task_comm")
-int BPF_KRETPROBE(prog2,
- struct task_struct *tsk, const char *buf, bool exec,
- int ret)
+int BPF_KRETPROBE(prog2, int ret)
{
- return !PT_REGS_PARM1(ctx) && ret;
+ return ret;
}
SEC("raw_tp/task_rename")
diff --git a/tools/testing/selftests/bpf/progs/test_perf_branches.c b/tools/testing/selftests/bpf/progs/test_perf_branches.c
index 0f7e27d97567..a1ccc831c882 100644
--- a/tools/testing/selftests/bpf/progs/test_perf_branches.c
+++ b/tools/testing/selftests/bpf/progs/test_perf_branches.c
@@ -5,7 +5,7 @@
#include <linux/ptrace.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-#include "bpf_trace_helpers.h"
+#include <bpf/bpf_tracing.h>
int valid = 0;
int required_size_out = 0;
diff --git a/tools/testing/selftests/bpf/progs/test_perf_buffer.c b/tools/testing/selftests/bpf/progs/test_perf_buffer.c
index ebfcc9f50c35..ad59c4c9aba8 100644
--- a/tools/testing/selftests/bpf/progs/test_perf_buffer.c
+++ b/tools/testing/selftests/bpf/progs/test_perf_buffer.c
@@ -4,7 +4,7 @@
#include <linux/ptrace.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-#include "bpf_trace_helpers.h"
+#include <bpf/bpf_tracing.h>
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
diff --git a/tools/testing/selftests/bpf/progs/test_probe_user.c b/tools/testing/selftests/bpf/progs/test_probe_user.c
index d556b1572cc6..89b3532ccc75 100644
--- a/tools/testing/selftests/bpf/progs/test_probe_user.c
+++ b/tools/testing/selftests/bpf/progs/test_probe_user.c
@@ -7,7 +7,6 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
-#include "bpf_trace_helpers.h"
static struct sockaddr_in old;
diff --git a/tools/testing/selftests/bpf/progs/test_skb_ctx.c b/tools/testing/selftests/bpf/progs/test_skb_ctx.c
index 202de3938494..b02ea589ce7e 100644
--- a/tools/testing/selftests/bpf/progs/test_skb_ctx.c
+++ b/tools/testing/selftests/bpf/progs/test_skb_ctx.c
@@ -23,6 +23,8 @@ int process(struct __sk_buff *skb)
return 1;
if (skb->gso_segs != 8)
return 1;
+ if (skb->gso_size != 10)
+ return 1;
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_trampoline_count.c b/tools/testing/selftests/bpf/progs/test_trampoline_count.c
index e51e6e3a81c2..f030e469d05b 100644
--- a/tools/testing/selftests/bpf/progs/test_trampoline_count.c
+++ b/tools/testing/selftests/bpf/progs/test_trampoline_count.c
@@ -2,7 +2,8 @@
#include <stdbool.h>
#include <stddef.h>
#include <linux/bpf.h>
-#include "bpf_trace_helpers.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
struct task_struct;
diff --git a/tools/testing/selftests/bpf/progs/test_vmlinux.c b/tools/testing/selftests/bpf/progs/test_vmlinux.c
new file mode 100644
index 000000000000..5611b564d3b1
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_vmlinux.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+
+#include "vmlinux.h"
+#include <asm/unistd.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+
+#define MY_TV_NSEC 1337
+
+bool tp_called = false;
+bool raw_tp_called = false;
+bool tp_btf_called = false;
+bool kprobe_called = false;
+bool fentry_called = false;
+
+SEC("tp/syscalls/sys_enter_nanosleep")
+int handle__tp(struct trace_event_raw_sys_enter *args)
+{
+ struct __kernel_timespec *ts;
+
+ if (args->id != __NR_nanosleep)
+ return 0;
+
+ ts = (void *)args->args[0];
+ if (BPF_CORE_READ(ts, tv_nsec) != MY_TV_NSEC)
+ return 0;
+
+ tp_called = true;
+ return 0;
+}
+
+SEC("raw_tp/sys_enter")
+int BPF_PROG(handle__raw_tp, struct pt_regs *regs, long id)
+{
+ struct __kernel_timespec *ts;
+
+ if (id != __NR_nanosleep)
+ return 0;
+
+ ts = (void *)PT_REGS_PARM1_CORE(regs);
+ if (BPF_CORE_READ(ts, tv_nsec) != MY_TV_NSEC)
+ return 0;
+
+ raw_tp_called = true;
+ return 0;
+}
+
+SEC("tp_btf/sys_enter")
+int BPF_PROG(handle__tp_btf, struct pt_regs *regs, long id)
+{
+ struct __kernel_timespec *ts;
+
+ if (id != __NR_nanosleep)
+ return 0;
+
+ ts = (void *)PT_REGS_PARM1_CORE(regs);
+ if (BPF_CORE_READ(ts, tv_nsec) != MY_TV_NSEC)
+ return 0;
+
+ tp_btf_called = true;
+ return 0;
+}
+
+SEC("kprobe/hrtimer_nanosleep")
+int BPF_KPROBE(handle__kprobe,
+ ktime_t rqtp, enum hrtimer_mode mode, clockid_t clockid)
+{
+ if (rqtp == MY_TV_NSEC)
+ kprobe_called = true;
+ return 0;
+}
+
+SEC("fentry/hrtimer_nanosleep")
+int BPF_PROG(handle__fentry,
+ ktime_t rqtp, enum hrtimer_mode mode, clockid_t clockid)
+{
+ if (rqtp == MY_TV_NSEC)
+ fentry_called = true;
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c b/tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c
index b840fc9e3ed5..a038e827f850 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
+#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
-#include "bpf_trace_helpers.h"
+
+char _license[] SEC("license") = "GPL";
struct net_device {
/* Structure does not need to contain all entries,
@@ -27,10 +29,32 @@ struct xdp_buff {
struct xdp_rxq_info *rxq;
} __attribute__((preserve_access_index));
+struct meta {
+ int ifindex;
+ int pkt_len;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(int));
+} perf_buf_map SEC(".maps");
+
__u64 test_result_fentry = 0;
SEC("fentry/FUNC")
int BPF_PROG(trace_on_entry, struct xdp_buff *xdp)
{
+ struct meta meta;
+ void *data_end = (void *)(long)xdp->data_end;
+ void *data = (void *)(long)xdp->data;
+
+ meta.ifindex = xdp->rxq->dev->ifindex;
+ meta.pkt_len = data_end - data;
+ bpf_xdp_output(xdp, &perf_buf_map,
+ ((__u64) meta.pkt_len << 32) |
+ BPF_F_CURRENT_CPU,
+ &meta, sizeof(meta));
+
test_result_fentry = xdp->rxq->dev->ifindex;
return 0;
}
diff --git a/tools/testing/selftests/bpf/test_current_pid_tgid_new_ns.c b/tools/testing/selftests/bpf/test_current_pid_tgid_new_ns.c
new file mode 100644
index 000000000000..ed253f252cd0
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_current_pid_tgid_new_ns.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Carlos Neira cneirabustos@gmail.com */
+#define _GNU_SOURCE
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <sched.h>
+#include <sys/wait.h>
+#include <sys/mount.h>
+#include "test_progs.h"
+
+#define CHECK_NEWNS(condition, tag, format...) ({ \
+ int __ret = !!(condition); \
+ if (__ret) { \
+ printf("%s:FAIL:%s ", __func__, tag); \
+ printf(format); \
+ } else { \
+ printf("%s:PASS:%s\n", __func__, tag); \
+ } \
+ __ret; \
+})
+
+struct bss {
+ __u64 dev;
+ __u64 ino;
+ __u64 pid_tgid;
+ __u64 user_pid_tgid;
+};
+
+int main(int argc, char **argv)
+{
+ pid_t pid;
+ int exit_code = 1;
+ struct stat st;
+
+ printf("Testing bpf_get_ns_current_pid_tgid helper in new ns\n");
+
+ if (stat("/proc/self/ns/pid", &st)) {
+ perror("stat failed on /proc/self/ns/pid ns\n");
+ printf("%s:FAILED\n", argv[0]);
+ return exit_code;
+ }
+
+ if (CHECK_NEWNS(unshare(CLONE_NEWPID | CLONE_NEWNS),
+ "unshare CLONE_NEWPID | CLONE_NEWNS", "error errno=%d\n", errno))
+ return exit_code;
+
+ pid = fork();
+ if (pid == -1) {
+ perror("Fork() failed\n");
+ printf("%s:FAILED\n", argv[0]);
+ return exit_code;
+ }
+
+ if (pid > 0) {
+ int status;
+
+ usleep(5);
+ waitpid(pid, &status, 0);
+ return 0;
+ } else {
+
+ pid = fork();
+ if (pid == -1) {
+ perror("Fork() failed\n");
+ printf("%s:FAILED\n", argv[0]);
+ return exit_code;
+ }
+
+ if (pid > 0) {
+ int status;
+ waitpid(pid, &status, 0);
+ return 0;
+ } else {
+ if (CHECK_NEWNS(mount("none", "/proc", NULL, MS_PRIVATE|MS_REC, NULL),
+ "Unmounting proc", "Cannot umount proc! errno=%d\n", errno))
+ return exit_code;
+
+ if (CHECK_NEWNS(mount("proc", "/proc", "proc", MS_NOSUID|MS_NOEXEC|MS_NODEV, NULL),
+ "Mounting proc", "Cannot mount proc! errno=%d\n", errno))
+ return exit_code;
+
+ const char *probe_name = "raw_tracepoint/sys_enter";
+ const char *file = "test_ns_current_pid_tgid.o";
+ struct bpf_link *link = NULL;
+ struct bpf_program *prog;
+ struct bpf_map *bss_map;
+ struct bpf_object *obj;
+ int exit_code = 1;
+ int err, key = 0;
+ struct bss bss;
+ struct stat st;
+ __u64 id;
+
+ obj = bpf_object__open_file(file, NULL);
+ if (CHECK_NEWNS(IS_ERR(obj), "obj_open", "err %ld\n", PTR_ERR(obj)))
+ return exit_code;
+
+ err = bpf_object__load(obj);
+ if (CHECK_NEWNS(err, "obj_load", "err %d errno %d\n", err, errno))
+ goto cleanup;
+
+ bss_map = bpf_object__find_map_by_name(obj, "test_ns_.bss");
+ if (CHECK_NEWNS(!bss_map, "find_bss_map", "failed\n"))
+ goto cleanup;
+
+ prog = bpf_object__find_program_by_title(obj, probe_name);
+ if (CHECK_NEWNS(!prog, "find_prog", "prog '%s' not found\n",
+ probe_name))
+ goto cleanup;
+
+ memset(&bss, 0, sizeof(bss));
+ pid_t tid = syscall(SYS_gettid);
+ pid_t pid = getpid();
+
+ id = (__u64) tid << 32 | pid;
+ bss.user_pid_tgid = id;
+
+ if (CHECK_NEWNS(stat("/proc/self/ns/pid", &st),
+ "stat new ns", "Failed to stat /proc/self/ns/pid errno=%d\n", errno))
+ goto cleanup;
+
+ bss.dev = st.st_dev;
+ bss.ino = st.st_ino;
+
+ err = bpf_map_update_elem(bpf_map__fd(bss_map), &key, &bss, 0);
+ if (CHECK_NEWNS(err, "setting_bss", "failed to set bss : %d\n", err))
+ goto cleanup;
+
+ link = bpf_program__attach_raw_tracepoint(prog, "sys_enter");
+ if (CHECK_NEWNS(IS_ERR(link), "attach_raw_tp", "err %ld\n",
+ PTR_ERR(link))) {
+ link = NULL;
+ goto cleanup;
+ }
+
+ /* trigger some syscalls */
+ usleep(1);
+
+ err = bpf_map_lookup_elem(bpf_map__fd(bss_map), &key, &bss);
+ if (CHECK_NEWNS(err, "set_bss", "failed to get bss : %d\n", err))
+ goto cleanup;
+
+ if (CHECK_NEWNS(id != bss.pid_tgid, "Compare user pid/tgid vs. bpf pid/tgid",
+ "User pid/tgid %llu BPF pid/tgid %llu\n", id, bss.pid_tgid))
+ goto cleanup;
+
+ exit_code = 0;
+ printf("%s:PASS\n", argv[0]);
+cleanup:
+ if (!link) {
+ bpf_link__destroy(link);
+ link = NULL;
+ }
+ bpf_object__close(obj);
+ }
+ }
+}
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index a969c77e9456..f85a06512541 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -29,6 +29,24 @@ struct prog_test_def {
int old_error_cnt;
};
+/* Override C runtime library's usleep() implementation to ensure nanosleep()
+ * is always called. Usleep is frequently used in selftests as a way to
+ * trigger kprobe and tracepoints.
+ */
+int usleep(useconds_t usec)
+{
+ struct timespec ts;
+
+ if (usec > 999999) {
+ ts.tv_sec = usec / 1000000;
+ ts.tv_nsec = usec % 1000000;
+ } else {
+ ts.tv_sec = 0;
+ ts.tv_nsec = usec;
+ }
+ return nanosleep(&ts, NULL);
+}
+
static bool should_run(struct test_selector *sel, int num, const char *name)
{
int i;
@@ -198,7 +216,7 @@ int bpf_find_map(const char *test, struct bpf_object *obj, const char *name)
map = bpf_object__find_map_by_name(obj, name);
if (!map) {
- printf("%s:FAIL:map '%s' not found\n", test, name);
+ fprintf(stdout, "%s:FAIL:map '%s' not found\n", test, name);
test__fail();
return -1;
}
@@ -369,7 +387,7 @@ static int libbpf_print_fn(enum libbpf_print_level level,
{
if (env.verbosity < VERBOSE_VERY && level == LIBBPF_DEBUG)
return 0;
- vprintf(format, args);
+ vfprintf(stdout, format, args);
return 0;
}
@@ -615,7 +633,7 @@ int cd_flavor_subdir(const char *exec_name)
if (!flavor)
return 0;
flavor++;
- printf("Switching to flavor '%s' subdirectory...\n", flavor);
+ fprintf(stdout, "Switching to flavor '%s' subdirectory...\n", flavor);
return chdir(flavor);
}
@@ -698,8 +716,8 @@ int main(int argc, char **argv)
cleanup_cgroup_environment();
}
stdio_restore();
- printf("Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n",
- env.succ_cnt, env.sub_succ_cnt, env.skip_cnt, env.fail_cnt);
+ fprintf(stdout, "Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n",
+ env.succ_cnt, env.sub_succ_cnt, env.skip_cnt, env.fail_cnt);
free(env.test_selector.blacklist.strs);
free(env.test_selector.whitelist.strs);
diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h
index bcfa9ef23fda..fd85fa61dbf7 100644
--- a/tools/testing/selftests/bpf/test_progs.h
+++ b/tools/testing/selftests/bpf/test_progs.h
@@ -109,10 +109,10 @@ extern struct ipv6_packet pkt_v6;
int __save_errno = errno; \
if (__ret) { \
test__fail(); \
- printf("%s:FAIL:%s ", __func__, tag); \
- printf(format); \
+ fprintf(stdout, "%s:FAIL:%s ", __func__, tag); \
+ fprintf(stdout, ##format); \
} else { \
- printf("%s:PASS:%s %d nsec\n", \
+ fprintf(stdout, "%s:PASS:%s %d nsec\n", \
__func__, tag, duration); \
} \
errno = __save_errno; \
@@ -124,7 +124,7 @@ extern struct ipv6_packet pkt_v6;
int __save_errno = errno; \
if (__ret) { \
test__fail(); \
- printf("%s:FAIL:%d\n", __func__, __LINE__); \
+ fprintf(stdout, "%s:FAIL:%d\n", __func__, __LINE__); \
} \
errno = __save_errno; \
__ret; \
diff --git a/tools/testing/selftests/bpf/verifier/ctx_skb.c b/tools/testing/selftests/bpf/verifier/ctx_skb.c
index d438193804b2..2e16b8e268f2 100644
--- a/tools/testing/selftests/bpf/verifier/ctx_skb.c
+++ b/tools/testing/selftests/bpf/verifier/ctx_skb.c
@@ -1011,6 +1011,53 @@
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
+ "read gso_size from CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, gso_size)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "read gso_size from CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, gso_size)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "write gso_size from CGROUP_SKB",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, gso_size)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .result_unpriv = REJECT,
+ .errstr = "invalid bpf_context access off=176 size=4",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "read gso_size from CLS",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, gso_size)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
"check wire_len is not readable by sockets",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,