diff options
author | Jakub Kicinski <kuba@kernel.org> | 2022-07-23 02:55:43 +0300 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2022-07-23 02:55:44 +0300 |
commit | b3fce974d4239bd46ae81bba07b59f255eb979d7 (patch) | |
tree | 9856a94f13b97f74e831664899b1ab9d1e0c7b69 /samples | |
parent | 3c47fb2f4c4df33881fa540e35e21415a6ecfbb5 (diff) | |
parent | ea2babac63d40e59926dc5de4550dac94cc3c6d2 (diff) | |
download | linux-b3fce974d4239bd46ae81bba07b59f255eb979d7.tar.xz |
Merge https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Daniel Borkmann says:
====================
bpf-next 2022-07-22
We've added 73 non-merge commits during the last 12 day(s) which contain
a total of 88 files changed, 3458 insertions(+), 860 deletions(-).
The main changes are:
1) Implement BPF trampoline for arm64 JIT, from Xu Kuohai.
2) Add ksyscall/kretsyscall section support to libbpf to simplify tracing kernel
syscalls through kprobe mechanism, from Andrii Nakryiko.
3) Allow for livepatch (KLP) and BPF trampolines to attach to the same kernel
function, from Song Liu & Jiri Olsa.
4) Add new kfunc infrastructure for netfilter's CT e.g. to insert and change
entries, from Kumar Kartikeya Dwivedi & Lorenzo Bianconi.
5) Add a ksym BPF iterator to allow for more flexible and efficient interactions
with kernel symbols, from Alan Maguire.
6) Bug fixes in libbpf e.g. for uprobe binary path resolution, from Dan Carpenter.
7) Fix BPF subprog function names in stack traces, from Alexei Starovoitov.
8) libbpf support for writing custom perf event readers, from Jon Doron.
9) Switch to use SPDX tag for BPF helper man page, from Alejandro Colomar.
10) Fix xsk send-only sockets when in busy poll mode, from Maciej Fijalkowski.
11) Reparent BPF maps and their charging on memcg offlining, from Roman Gushchin.
12) Multiple follow-up fixes around BPF lsm cgroup infra, from Stanislav Fomichev.
13) Use bootstrap version of bpftool where possible to speed up builds, from Pu Lehui.
14) Cleanup BPF verifier's check_func_arg() handling, from Joanne Koong.
15) Make non-prealloced BPF map allocations low priority to play better with
memcg limits, from Yafang Shao.
16) Fix BPF test runner to reject zero-length data for skbs, from Zhengchao Shao.
17) Various smaller cleanups and improvements all over the place.
* https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (73 commits)
bpf: Simplify bpf_prog_pack_[size|mask]
bpf: Support bpf_trampoline on functions with IPMODIFY (e.g. livepatch)
bpf, x64: Allow to use caller address from stack
ftrace: Allow IPMODIFY and DIRECT ops on the same function
ftrace: Add modify_ftrace_direct_multi_nolock
bpf/selftests: Fix couldn't retrieve pinned program in xdp veth test
bpf: Fix build error in case of !CONFIG_DEBUG_INFO_BTF
selftests/bpf: Fix test_verifier failed test in unprivileged mode
selftests/bpf: Add negative tests for new nf_conntrack kfuncs
selftests/bpf: Add tests for new nf_conntrack kfuncs
selftests/bpf: Add verifier tests for trusted kfunc args
net: netfilter: Add kfuncs to set and change CT status
net: netfilter: Add kfuncs to set and change CT timeout
net: netfilter: Add kfuncs to allocate and insert CT
net: netfilter: Deduplicate code in bpf_{xdp,skb}_ct_lookup
bpf: Add documentation for kfuncs
bpf: Add support for forcing kfunc args to be trusted
bpf: Switch to new kfunc flags infrastructure
tools/resolve_btfids: Add support for 8-byte BTF sets
bpf: Introduce 8-byte BTF set
...
====================
Link: https://lore.kernel.org/r/20220722221218.29943-1-daniel@iogearbox.net
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'samples')
-rw-r--r-- | samples/bpf/Makefile | 10 | ||||
-rw-r--r-- | samples/bpf/fds_example.c | 3 | ||||
-rw-r--r-- | samples/bpf/sock_example.c | 3 | ||||
-rw-r--r-- | samples/bpf/test_cgrp2_attach.c | 3 | ||||
-rw-r--r-- | samples/bpf/test_lru_dist.c | 2 | ||||
-rw-r--r-- | samples/bpf/test_map_in_map_user.c | 4 | ||||
-rw-r--r-- | samples/bpf/tracex5_user.c | 3 | ||||
-rw-r--r-- | samples/bpf/xdp_redirect_map.bpf.c | 6 | ||||
-rw-r--r-- | samples/bpf/xdp_redirect_map_user.c | 9 |
9 files changed, 29 insertions, 14 deletions
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 5002a5b9a7da..727da3c5879b 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -282,12 +282,10 @@ $(LIBBPF): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(LIBBPF_OU BPFTOOLDIR := $(TOOLS_PATH)/bpf/bpftool BPFTOOL_OUTPUT := $(abspath $(BPF_SAMPLES_PATH))/bpftool -BPFTOOL := $(BPFTOOL_OUTPUT)/bpftool -$(BPFTOOL): $(LIBBPF) $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) | $(BPFTOOL_OUTPUT) - $(MAKE) -C $(BPFTOOLDIR) srctree=$(BPF_SAMPLES_PATH)/../../ \ - OUTPUT=$(BPFTOOL_OUTPUT)/ \ - LIBBPF_OUTPUT=$(LIBBPF_OUTPUT)/ \ - LIBBPF_DESTDIR=$(LIBBPF_DESTDIR)/ +BPFTOOL := $(BPFTOOL_OUTPUT)/bootstrap/bpftool +$(BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) | $(BPFTOOL_OUTPUT) + $(MAKE) -C $(BPFTOOLDIR) srctree=$(BPF_SAMPLES_PATH)/../../ \ + OUTPUT=$(BPFTOOL_OUTPUT)/ bootstrap $(LIBBPF_OUTPUT) $(BPFTOOL_OUTPUT): $(call msg,MKDIR,$@) diff --git a/samples/bpf/fds_example.c b/samples/bpf/fds_example.c index 16dbf49e0f19..88a26f3ce201 100644 --- a/samples/bpf/fds_example.c +++ b/samples/bpf/fds_example.c @@ -17,6 +17,7 @@ #include <bpf/libbpf.h> #include "bpf_insn.h" #include "sock_example.h" +#include "bpf_util.h" #define BPF_F_PIN (1 << 0) #define BPF_F_GET (1 << 1) @@ -52,7 +53,7 @@ static int bpf_prog_create(const char *object) BPF_MOV64_IMM(BPF_REG_0, 1), BPF_EXIT_INSN(), }; - size_t insns_cnt = sizeof(insns) / sizeof(struct bpf_insn); + size_t insns_cnt = ARRAY_SIZE(insns); struct bpf_object *obj; int err; diff --git a/samples/bpf/sock_example.c b/samples/bpf/sock_example.c index a88f69504c08..5b66f2401b96 100644 --- a/samples/bpf/sock_example.c +++ b/samples/bpf/sock_example.c @@ -29,6 +29,7 @@ #include <bpf/bpf.h> #include "bpf_insn.h" #include "sock_example.h" +#include "bpf_util.h" char bpf_log_buf[BPF_LOG_BUF_SIZE]; @@ -58,7 +59,7 @@ static int test_sock(void) BPF_MOV64_IMM(BPF_REG_0, 0), /* r0 = 0 */ BPF_EXIT_INSN(), }; - size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn); + size_t insns_cnt = ARRAY_SIZE(prog); LIBBPF_OPTS(bpf_prog_load_opts, opts, .log_buf = bpf_log_buf, .log_size = BPF_LOG_BUF_SIZE, diff --git a/samples/bpf/test_cgrp2_attach.c b/samples/bpf/test_cgrp2_attach.c index 6d90874b09c3..68ce69457afe 100644 --- a/samples/bpf/test_cgrp2_attach.c +++ b/samples/bpf/test_cgrp2_attach.c @@ -31,6 +31,7 @@ #include <bpf/bpf.h> #include "bpf_insn.h" +#include "bpf_util.h" enum { MAP_KEY_PACKETS, @@ -70,7 +71,7 @@ static int prog_load(int map_fd, int verdict) BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */ BPF_EXIT_INSN(), }; - size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn); + size_t insns_cnt = ARRAY_SIZE(prog); LIBBPF_OPTS(bpf_prog_load_opts, opts, .log_buf = bpf_log_buf, .log_size = BPF_LOG_BUF_SIZE, diff --git a/samples/bpf/test_lru_dist.c b/samples/bpf/test_lru_dist.c index be98ccb4952f..5efb91763d65 100644 --- a/samples/bpf/test_lru_dist.c +++ b/samples/bpf/test_lru_dist.c @@ -523,7 +523,7 @@ int main(int argc, char **argv) return -1; } - for (f = 0; f < sizeof(map_flags) / sizeof(*map_flags); f++) { + for (f = 0; f < ARRAY_SIZE(map_flags); f++) { test_lru_loss0(BPF_MAP_TYPE_LRU_HASH, map_flags[f]); test_lru_loss1(BPF_MAP_TYPE_LRU_HASH, map_flags[f]); test_parallel_lru_loss(BPF_MAP_TYPE_LRU_HASH, map_flags[f], diff --git a/samples/bpf/test_map_in_map_user.c b/samples/bpf/test_map_in_map_user.c index e8b4cc184ac9..652ec720533d 100644 --- a/samples/bpf/test_map_in_map_user.c +++ b/samples/bpf/test_map_in_map_user.c @@ -12,6 +12,8 @@ #include <bpf/bpf.h> #include <bpf/libbpf.h> +#include "bpf_util.h" + static int map_fd[7]; #define PORT_A (map_fd[0]) @@ -28,7 +30,7 @@ static const char * const test_names[] = { "Hash of Hash", }; -#define NR_TESTS (sizeof(test_names) / sizeof(*test_names)) +#define NR_TESTS ARRAY_SIZE(test_names) static void check_map_id(int inner_map_fd, int map_in_map_fd, uint32_t key) { diff --git a/samples/bpf/tracex5_user.c b/samples/bpf/tracex5_user.c index e910dc265c31..9d7d79f0d47d 100644 --- a/samples/bpf/tracex5_user.c +++ b/samples/bpf/tracex5_user.c @@ -8,6 +8,7 @@ #include <bpf/bpf.h> #include <bpf/libbpf.h> #include "trace_helpers.h" +#include "bpf_util.h" #ifdef __mips__ #define MAX_ENTRIES 6000 /* MIPS n64 syscalls start at 5000 */ @@ -24,7 +25,7 @@ static void install_accept_all_seccomp(void) BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_ALLOW), }; struct sock_fprog prog = { - .len = (unsigned short)(sizeof(filter)/sizeof(filter[0])), + .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; if (prctl(PR_SET_SECCOMP, 2, &prog)) diff --git a/samples/bpf/xdp_redirect_map.bpf.c b/samples/bpf/xdp_redirect_map.bpf.c index 415bac1758e3..8557c278df77 100644 --- a/samples/bpf/xdp_redirect_map.bpf.c +++ b/samples/bpf/xdp_redirect_map.bpf.c @@ -33,7 +33,7 @@ struct { } tx_port_native SEC(".maps"); /* store egress interface mac address */ -const volatile char tx_mac_addr[ETH_ALEN]; +const volatile __u8 tx_mac_addr[ETH_ALEN]; static __always_inline int xdp_redirect_map(struct xdp_md *ctx, void *redirect_map) { @@ -73,6 +73,7 @@ int xdp_redirect_map_egress(struct xdp_md *ctx) { void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; + u8 *mac_addr = (u8 *) tx_mac_addr; struct ethhdr *eth = data; u64 nh_off; @@ -80,7 +81,8 @@ int xdp_redirect_map_egress(struct xdp_md *ctx) if (data + nh_off > data_end) return XDP_DROP; - __builtin_memcpy(eth->h_source, (const char *)tx_mac_addr, ETH_ALEN); + barrier_var(mac_addr); /* prevent optimizing out memcpy */ + __builtin_memcpy(eth->h_source, mac_addr, ETH_ALEN); return XDP_PASS; } diff --git a/samples/bpf/xdp_redirect_map_user.c b/samples/bpf/xdp_redirect_map_user.c index b6e4fc849577..c889a1394dc1 100644 --- a/samples/bpf/xdp_redirect_map_user.c +++ b/samples/bpf/xdp_redirect_map_user.c @@ -40,6 +40,8 @@ static const struct option long_options[] = { {} }; +static int verbose = 0; + int main(int argc, char **argv) { struct bpf_devmap_val devmap_val = {}; @@ -79,6 +81,7 @@ int main(int argc, char **argv) break; case 'v': sample_switch_mode(); + verbose = 1; break; case 's': mask |= SAMPLE_REDIRECT_MAP_CNT; @@ -134,6 +137,12 @@ int main(int argc, char **argv) ret = EXIT_FAIL; goto end_destroy; } + if (verbose) + printf("Egress ifindex:%d using src MAC %02x:%02x:%02x:%02x:%02x:%02x\n", + ifindex_out, + skel->rodata->tx_mac_addr[0], skel->rodata->tx_mac_addr[1], + skel->rodata->tx_mac_addr[2], skel->rodata->tx_mac_addr[3], + skel->rodata->tx_mac_addr[4], skel->rodata->tx_mac_addr[5]); } skel->rodata->from_match[0] = ifindex_in; |