From 20c09d92faeefb8536f705d3a4629e0dc314c8a1 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Thu, 2 Mar 2023 20:14:43 -0800 Subject: bpf: Introduce kptr_rcu. The life time of certain kernel structures like 'struct cgroup' is protected by RCU. Hence it's safe to dereference them directly from __kptr tagged pointers in bpf maps. The resulting pointer is MEM_RCU and can be passed to kfuncs that expect KF_RCU. Derefrence of other kptr-s returns PTR_UNTRUSTED. For example: struct map_value { struct cgroup __kptr *cgrp; }; SEC("tp_btf/cgroup_mkdir") int BPF_PROG(test_cgrp_get_ancestors, struct cgroup *cgrp_arg, const char *path) { struct cgroup *cg, *cg2; cg = bpf_cgroup_acquire(cgrp_arg); // cg is PTR_TRUSTED and ref_obj_id > 0 bpf_kptr_xchg(&v->cgrp, cg); cg2 = v->cgrp; // This is new feature introduced by this patch. // cg2 is PTR_MAYBE_NULL | MEM_RCU. // When cg2 != NULL, it's a valid cgroup, but its percpu_ref could be zero if (cg2) bpf_cgroup_ancestor(cg2, level); // safe to do. } Signed-off-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann Acked-by: Tejun Heo Acked-by: David Vernet Link: https://lore.kernel.org/bpf/20230303041446.3630-4-alexei.starovoitov@gmail.com --- net/bpf/test_run.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'net/bpf/test_run.c') diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 6f3d654b3339..6a8b33a103a4 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -737,6 +737,7 @@ __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len) __bpf_kfunc void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p) { + /* p != NULL, but p->cnt could be 0 */ } __bpf_kfunc void bpf_kfunc_call_test_destructive(void) @@ -784,7 +785,7 @@ BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail3) BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1) BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1) BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2) -BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS | KF_RCU) BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE) BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg) BTF_SET8_END(test_sk_check_kfunc_ids) -- cgit v1.2.3 From aa3d65de4b9004d799f97700751a86d3ebd7d5f9 Mon Sep 17 00:00:00 2001 From: Viktor Malik Date: Fri, 10 Mar 2023 08:41:00 +0100 Subject: bpf/selftests: Test fentry attachment to shadowed functions Adds a new test that tries to attach a program to fentry of two functions of the same name, one located in vmlinux and the other in bpf_testmod. To avoid conflicts with existing tests, a new function "bpf_fentry_shadow_test" was created both in vmlinux and in bpf_testmod. The previous commit fixed a bug which caused this test to fail. The verifier would always use the vmlinux function's address as the target trampoline address, hence trying to create two trampolines for a single address, which is forbidden. The test (similarly to other fentry/fexit tests) is not working on arm64 at the moment. Signed-off-by: Viktor Malik Acked-by: Jiri Olsa Link: https://lore.kernel.org/r/5fe2f364190b6f79b085066ed7c5989c5bc475fa.1678432753.git.vmalik@redhat.com Signed-off-by: Alexei Starovoitov --- net/bpf/test_run.c | 5 + tools/testing/selftests/bpf/DENYLIST.aarch64 | 1 + .../selftests/bpf/bpf_testmod/bpf_testmod.c | 6 + .../bpf/prog_tests/module_fentry_shadow.c | 128 +++++++++++++++++++++ 4 files changed, 140 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c (limited to 'net/bpf/test_run.c') diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 6a8b33a103a4..71226f68270d 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -560,6 +560,11 @@ long noinline bpf_kfunc_call_test4(signed char a, short b, int c, long d) return (long)a + (long)b + (long)c + d; } +int noinline bpf_fentry_shadow_test(int a) +{ + return a + 1; +} + struct prog_test_member1 { int a; }; diff --git a/tools/testing/selftests/bpf/DENYLIST.aarch64 b/tools/testing/selftests/bpf/DENYLIST.aarch64 index 99cc33c51eaa..0a6837f97c32 100644 --- a/tools/testing/selftests/bpf/DENYLIST.aarch64 +++ b/tools/testing/selftests/bpf/DENYLIST.aarch64 @@ -44,6 +44,7 @@ lookup_key # test_lookup_key__attach unexp lru_bug # lru_bug__attach unexpected error: -524 (errno 524) modify_return # modify_return__attach failed unexpected error: -524 (errno 524) module_attach # skel_attach skeleton attach failed: -524 +module_fentry_shadow # bpf_link_create unexpected bpf_link_create: actual -524 < expected 0 mptcp/base # run_test mptcp unexpected error: -524 (errno 524) netcnt # packets unexpected packets: actual 10001 != expected 10000 rcu_read_lock # failed to attach: ERROR: strerror_r(-524)=22 diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c index 5e6e85c8d77d..7999476b9446 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c @@ -268,6 +268,12 @@ static const struct btf_kfunc_id_set bpf_testmod_kfunc_set = { .set = &bpf_testmod_check_kfunc_ids, }; +noinline int bpf_fentry_shadow_test(int a) +{ + return a + 2; +} +EXPORT_SYMBOL_GPL(bpf_fentry_shadow_test); + extern int bpf_fentry_test1(int a); static int bpf_testmod_init(void) diff --git a/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c b/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c new file mode 100644 index 000000000000..c7636e18b1eb --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Red Hat */ +#include +#include +#include "bpf/libbpf_internal.h" +#include "cgroup_helpers.h" + +static const char *module_name = "bpf_testmod"; +static const char *symbol_name = "bpf_fentry_shadow_test"; + +static int get_bpf_testmod_btf_fd(void) +{ + struct bpf_btf_info info; + char name[64]; + __u32 id = 0, len; + int err, fd; + + while (true) { + err = bpf_btf_get_next_id(id, &id); + if (err) { + log_err("failed to iterate BTF objects"); + return err; + } + + fd = bpf_btf_get_fd_by_id(id); + if (fd < 0) { + if (errno == ENOENT) + continue; /* expected race: BTF was unloaded */ + err = -errno; + log_err("failed to get FD for BTF object #%d", id); + return err; + } + + len = sizeof(info); + memset(&info, 0, sizeof(info)); + info.name = ptr_to_u64(name); + info.name_len = sizeof(name); + + err = bpf_obj_get_info_by_fd(fd, &info, &len); + if (err) { + err = -errno; + log_err("failed to get info for BTF object #%d", id); + close(fd); + return err; + } + + if (strcmp(name, module_name) == 0) + return fd; + + close(fd); + } + return -ENOENT; +} + +void test_module_fentry_shadow(void) +{ + struct btf *vmlinux_btf = NULL, *mod_btf = NULL; + int err, i; + int btf_fd[2] = {}; + int prog_fd[2] = {}; + int link_fd[2] = {}; + __s32 btf_id[2] = {}; + + LIBBPF_OPTS(bpf_prog_load_opts, load_opts, + .expected_attach_type = BPF_TRACE_FENTRY, + ); + + const struct bpf_insn trace_program[] = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + + vmlinux_btf = btf__load_vmlinux_btf(); + if (!ASSERT_OK_PTR(vmlinux_btf, "load_vmlinux_btf")) + return; + + btf_fd[1] = get_bpf_testmod_btf_fd(); + if (!ASSERT_GE(btf_fd[1], 0, "get_bpf_testmod_btf_fd")) + goto out; + + mod_btf = btf_get_from_fd(btf_fd[1], vmlinux_btf); + if (!ASSERT_OK_PTR(mod_btf, "btf_get_from_fd")) + goto out; + + btf_id[0] = btf__find_by_name_kind(vmlinux_btf, symbol_name, BTF_KIND_FUNC); + if (!ASSERT_GT(btf_id[0], 0, "btf_find_by_name")) + goto out; + + btf_id[1] = btf__find_by_name_kind(mod_btf, symbol_name, BTF_KIND_FUNC); + if (!ASSERT_GT(btf_id[1], 0, "btf_find_by_name")) + goto out; + + for (i = 0; i < 2; i++) { + load_opts.attach_btf_id = btf_id[i]; + load_opts.attach_btf_obj_fd = btf_fd[i]; + prog_fd[i] = bpf_prog_load(BPF_PROG_TYPE_TRACING, NULL, "GPL", + trace_program, + sizeof(trace_program) / sizeof(struct bpf_insn), + &load_opts); + if (!ASSERT_GE(prog_fd[i], 0, "bpf_prog_load")) + goto out; + + /* If the verifier incorrectly resolves addresses of the + * shadowed functions and uses the same address for both the + * vmlinux and the bpf_testmod functions, this will fail on + * attempting to create two trampolines for the same address, + * which is forbidden. + */ + link_fd[i] = bpf_link_create(prog_fd[i], 0, BPF_TRACE_FENTRY, NULL); + if (!ASSERT_GE(link_fd[i], 0, "bpf_link_create")) + goto out; + } + + err = bpf_prog_test_run_opts(prog_fd[0], NULL); + ASSERT_OK(err, "running test"); + +out: + btf__free(vmlinux_btf); + btf__free(mod_btf); + for (i = 0; i < 2; i++) { + if (btf_fd[i]) + close(btf_fd[i]); + if (prog_fd[i] > 0) + close(prog_fd[i]); + if (link_fd[i] > 0) + close(link_fd[i]); + } +} -- cgit v1.2.3 From e5995bc7e2ba1a0d444f806016d2e4ea91c032d0 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Thu, 16 Mar 2023 18:50:50 +0100 Subject: bpf, test_run: fix crashes due to XDP frame overwriting/corruption MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit syzbot and Ilya faced the splats when %XDP_PASS happens for bpf_test_run after skb PP recycling was enabled for {__,}xdp_build_skb_from_frame(): BUG: kernel NULL pointer dereference, address: 0000000000000d28 RIP: 0010:memset_erms+0xd/0x20 arch/x86/lib/memset_64.S:66 [...] Call Trace: __finalize_skb_around net/core/skbuff.c:321 [inline] __build_skb_around+0x232/0x3a0 net/core/skbuff.c:379 build_skb_around+0x32/0x290 net/core/skbuff.c:444 __xdp_build_skb_from_frame+0x121/0x760 net/core/xdp.c:622 xdp_recv_frames net/bpf/test_run.c:248 [inline] xdp_test_run_batch net/bpf/test_run.c:334 [inline] bpf_test_run_xdp_live+0x1289/0x1930 net/bpf/test_run.c:362 bpf_prog_test_run_xdp+0xa05/0x14e0 net/bpf/test_run.c:1418 [...] This happens due to that it calls xdp_scrub_frame(), which nullifies xdpf->data. bpf_test_run code doesn't reinit the frame when the XDP program doesn't adjust head or tail. Previously, %XDP_PASS meant the page will be released from the pool and returned to the MM layer, but now it does return to the Pool with the nullified xdpf->data, which doesn't get reinitialized then. So, in addition to checking whether the head and/or tail have been adjusted, check also for a potential XDP frame corruption. xdpf->data is 100% affected and also xdpf->flags is the field closest to the metadata / frame start. Checking for these two should be enough for non-extreme cases. Fixes: 9c94bbf9a87b ("xdp: recycle Page Pool backed skbs built from XDP frames") Reported-by: syzbot+e1d1b65f7c32f2a86a9f@syzkaller.appspotmail.com Link: https://lore.kernel.org/bpf/000000000000f1985705f6ef2243@google.com Reported-by: Ilya Leoshkevich Link: https://lore.kernel.org/bpf/e07dd94022ad5731705891b9487cc9ed66328b94.camel@linux.ibm.com Signed-off-by: Alexander Lobakin Acked-by: Toke Høiland-Jørgensen Tested-by: Ilya Leoshkevich Link: https://lore.kernel.org/r/20230316175051.922550-2-aleksander.lobakin@intel.com Signed-off-by: Alexei Starovoitov --- net/bpf/test_run.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) (limited to 'net/bpf/test_run.c') diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 71226f68270d..8d6b31209bd6 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -208,6 +208,16 @@ static void xdp_test_run_teardown(struct xdp_test_data *xdp) kfree(xdp->skbs); } +static bool frame_was_changed(const struct xdp_page_head *head) +{ + /* xdp_scrub_frame() zeroes the data pointer, flags is the last field, + * i.e. has the highest chances to be overwritten. If those two are + * untouched, it's most likely safe to skip the context reset. + */ + return head->frm.data != head->orig_ctx.data || + head->frm.flags != head->orig_ctx.flags; +} + static bool ctx_was_changed(struct xdp_page_head *head) { return head->orig_ctx.data != head->ctx.data || @@ -217,7 +227,7 @@ static bool ctx_was_changed(struct xdp_page_head *head) static void reset_ctx(struct xdp_page_head *head) { - if (likely(!ctx_was_changed(head))) + if (likely(!frame_was_changed(head) && !ctx_was_changed(head))) return; head->ctx.data = head->orig_ctx.data; -- cgit v1.2.3 From fb2211a57c110b4ced3cb7f8570bd7246acf2d04 Mon Sep 17 00:00:00 2001 From: David Vernet Date: Sat, 25 Mar 2023 16:31:45 -0500 Subject: bpf: Remove now-unnecessary NULL checks for KF_RELEASE kfuncs Now that we're not invoking kfunc destructors when the kptr in a map was NULL, we no longer require NULL checks in many of our KF_RELEASE kfuncs. This patch removes those NULL checks. Signed-off-by: David Vernet Link: https://lore.kernel.org/r/20230325213144.486885-3-void@manifault.com Signed-off-by: Alexei Starovoitov --- drivers/hid/bpf/hid_bpf_dispatch.c | 3 --- kernel/bpf/cpumask.c | 3 --- kernel/bpf/helpers.c | 6 ------ net/bpf/test_run.c | 3 --- net/netfilter/nf_conntrack_bpf.c | 2 -- 5 files changed, 17 deletions(-) (limited to 'net/bpf/test_run.c') diff --git a/drivers/hid/bpf/hid_bpf_dispatch.c b/drivers/hid/bpf/hid_bpf_dispatch.c index 8a034a555d4c..d9ef45fcaeab 100644 --- a/drivers/hid/bpf/hid_bpf_dispatch.c +++ b/drivers/hid/bpf/hid_bpf_dispatch.c @@ -342,9 +342,6 @@ hid_bpf_release_context(struct hid_bpf_ctx *ctx) { struct hid_bpf_ctx_kern *ctx_kern; - if (!ctx) - return; - ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx); kfree(ctx_kern); diff --git a/kernel/bpf/cpumask.c b/kernel/bpf/cpumask.c index db9da2194c1a..e991af7dc13c 100644 --- a/kernel/bpf/cpumask.c +++ b/kernel/bpf/cpumask.c @@ -102,9 +102,6 @@ static void cpumask_free_cb(struct rcu_head *head) */ __bpf_kfunc void bpf_cpumask_release(struct bpf_cpumask *cpumask) { - if (!cpumask) - return; - if (refcount_dec_and_test(&cpumask->usage)) call_rcu(&cpumask->rcu, cpumask_free_cb); } diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index f753676ef652..8980f6859443 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -2089,9 +2089,6 @@ __bpf_kfunc struct task_struct *bpf_task_kptr_get(struct task_struct **pp) */ __bpf_kfunc void bpf_task_release(struct task_struct *p) { - if (!p) - return; - put_task_struct(p); } @@ -2148,9 +2145,6 @@ __bpf_kfunc struct cgroup *bpf_cgroup_kptr_get(struct cgroup **cgrpp) */ __bpf_kfunc void bpf_cgroup_release(struct cgroup *cgrp) { - if (!cgrp) - return; - cgroup_put(cgrp); } diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 8d6b31209bd6..27587f1c5f36 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -615,9 +615,6 @@ bpf_kfunc_call_memb_acquire(void) __bpf_kfunc void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) { - if (!p) - return; - refcount_dec(&p->cnt); } diff --git a/net/netfilter/nf_conntrack_bpf.c b/net/netfilter/nf_conntrack_bpf.c index cd99e6dc1f35..002e9d24a1e9 100644 --- a/net/netfilter/nf_conntrack_bpf.c +++ b/net/netfilter/nf_conntrack_bpf.c @@ -401,8 +401,6 @@ __bpf_kfunc struct nf_conn *bpf_ct_insert_entry(struct nf_conn___init *nfct_i) */ __bpf_kfunc void bpf_ct_release(struct nf_conn *nfct) { - if (!nfct) - return; nf_ct_put(nfct); } -- cgit v1.2.3 From 6c831c4684124a544f73f7c9b83bc7b2eb0b23d3 Mon Sep 17 00:00:00 2001 From: David Vernet Date: Sat, 25 Mar 2023 16:31:46 -0500 Subject: bpf: Treat KF_RELEASE kfuncs as KF_TRUSTED_ARGS KF_RELEASE kfuncs are not currently treated as having KF_TRUSTED_ARGS, even though they have a superset of the requirements of KF_TRUSTED_ARGS. Like KF_TRUSTED_ARGS, KF_RELEASE kfuncs require a 0-offset argument, and don't allow NULL-able arguments. Unlike KF_TRUSTED_ARGS which require _either_ an argument with ref_obj_id > 0, _or_ (ref->type & BPF_REG_TRUSTED_MODIFIERS) (and no unsafe modifiers allowed), KF_RELEASE only allows for ref_obj_id > 0. Because KF_RELEASE today doesn't automatically imply KF_TRUSTED_ARGS, some of these requirements are enforced in different ways that can make the behavior of the verifier feel unpredictable. For example, a KF_RELEASE kfunc with a NULL-able argument will currently fail in the verifier with a message like, "arg#0 is ptr_or_null_ expected ptr_ or socket" rather than "Possibly NULL pointer passed to trusted arg0". Our intention is the same, but the semantics are different due to implemenetation details that kfunc authors and BPF program writers should not need to care about. Let's make the behavior of the verifier more consistent and intuitive by having KF_RELEASE kfuncs imply the presence of KF_TRUSTED_ARGS. Our eventual goal is to have all kfuncs assume KF_TRUSTED_ARGS by default anyways, so this takes us a step in that direction. Note that it does not make sense to assume KF_TRUSTED_ARGS for all KF_ACQUIRE kfuncs. KF_ACQUIRE kfuncs can have looser semantics than KF_RELEASE, with e.g. KF_RCU | KF_RET_NULL. We may want to have KF_ACQUIRE imply KF_TRUSTED_ARGS _unless_ KF_RCU is specified, but that can be left to another patch set, and there are no such subtleties to address for KF_RELEASE. Signed-off-by: David Vernet Link: https://lore.kernel.org/r/20230325213144.486885-4-void@manifault.com Signed-off-by: Alexei Starovoitov --- Documentation/bpf/kfuncs.rst | 7 ++++--- kernel/bpf/cpumask.c | 2 +- kernel/bpf/verifier.c | 2 +- net/bpf/test_run.c | 6 ++++++ tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c | 4 ++-- tools/testing/selftests/bpf/progs/task_kfunc_failure.c | 6 +++--- tools/testing/selftests/bpf/verifier/calls.c | 10 +++++++--- tools/testing/selftests/bpf/verifier/ref_tracking.c | 6 +++--- 8 files changed, 27 insertions(+), 16 deletions(-) (limited to 'net/bpf/test_run.c') diff --git a/Documentation/bpf/kfuncs.rst b/Documentation/bpf/kfuncs.rst index 69eccf6f98ef..bf1b85941452 100644 --- a/Documentation/bpf/kfuncs.rst +++ b/Documentation/bpf/kfuncs.rst @@ -179,9 +179,10 @@ both are orthogonal to each other. --------------------- The KF_RELEASE flag is used to indicate that the kfunc releases the pointer -passed in to it. There can be only one referenced pointer that can be passed in. -All copies of the pointer being released are invalidated as a result of invoking -kfunc with this flag. +passed in to it. There can be only one referenced pointer that can be passed +in. All copies of the pointer being released are invalidated as a result of +invoking kfunc with this flag. KF_RELEASE kfuncs automatically receive the +protection afforded by the KF_TRUSTED_ARGS flag described below. 2.4.4 KF_KPTR_GET flag ---------------------- diff --git a/kernel/bpf/cpumask.c b/kernel/bpf/cpumask.c index e991af7dc13c..7efdf5d770ca 100644 --- a/kernel/bpf/cpumask.c +++ b/kernel/bpf/cpumask.c @@ -402,7 +402,7 @@ __diag_pop(); BTF_SET8_START(cpumask_kfunc_btf_ids) BTF_ID_FLAGS(func, bpf_cpumask_create, KF_ACQUIRE | KF_RET_NULL) -BTF_ID_FLAGS(func, bpf_cpumask_release, KF_RELEASE | KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_cpumask_release, KF_RELEASE) BTF_ID_FLAGS(func, bpf_cpumask_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS) BTF_ID_FLAGS(func, bpf_cpumask_first, KF_RCU) BTF_ID_FLAGS(func, bpf_cpumask_first_zero, KF_RCU) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 64f06f6e16bf..20eb2015842f 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -9307,7 +9307,7 @@ static bool is_kfunc_release(struct bpf_kfunc_call_arg_meta *meta) static bool is_kfunc_trusted_args(struct bpf_kfunc_call_arg_meta *meta) { - return meta->kfunc_flags & KF_TRUSTED_ARGS; + return (meta->kfunc_flags & KF_TRUSTED_ARGS) || is_kfunc_release(meta); } static bool is_kfunc_sleepable(struct bpf_kfunc_call_arg_meta *meta) diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 27587f1c5f36..f1652f5fbd2e 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -606,6 +606,11 @@ bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr) return &prog_test_struct; } +__bpf_kfunc void bpf_kfunc_call_test_offset(struct prog_test_ref_kfunc *p) +{ + WARN_ON_ONCE(1); +} + __bpf_kfunc struct prog_test_member * bpf_kfunc_call_memb_acquire(void) { @@ -800,6 +805,7 @@ BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2) BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS | KF_RCU) BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE) BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_offset) BTF_SET8_END(test_sk_check_kfunc_ids) static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size, diff --git a/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c b/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c index 807fb0ac41e9..48b2034cadb3 100644 --- a/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c +++ b/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c @@ -206,7 +206,7 @@ int BPF_PROG(cgrp_kfunc_get_unreleased, struct cgroup *cgrp, const char *path) } SEC("tp_btf/cgroup_mkdir") -__failure __msg("expects refcounted") +__failure __msg("Possibly NULL pointer passed to trusted arg0") int BPF_PROG(cgrp_kfunc_release_untrusted, struct cgroup *cgrp, const char *path) { struct __cgrps_kfunc_map_value *v; @@ -234,7 +234,7 @@ int BPF_PROG(cgrp_kfunc_release_fp, struct cgroup *cgrp, const char *path) } SEC("tp_btf/cgroup_mkdir") -__failure __msg("arg#0 is ptr_or_null_ expected ptr_ or socket") +__failure __msg("Possibly NULL pointer passed to trusted arg0") int BPF_PROG(cgrp_kfunc_release_null, struct cgroup *cgrp, const char *path) { struct __cgrps_kfunc_map_value local, *v; diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_failure.c b/tools/testing/selftests/bpf/progs/task_kfunc_failure.c index 27994d6b2914..2c374a7ffece 100644 --- a/tools/testing/selftests/bpf/progs/task_kfunc_failure.c +++ b/tools/testing/selftests/bpf/progs/task_kfunc_failure.c @@ -206,7 +206,7 @@ int BPF_PROG(task_kfunc_get_unreleased, struct task_struct *task, u64 clone_flag } SEC("tp_btf/task_newtask") -__failure __msg("arg#0 is untrusted_ptr_or_null_ expected ptr_ or socket") +__failure __msg("Possibly NULL pointer passed to trusted arg0") int BPF_PROG(task_kfunc_release_untrusted, struct task_struct *task, u64 clone_flags) { struct __tasks_kfunc_map_value *v; @@ -234,7 +234,7 @@ int BPF_PROG(task_kfunc_release_fp, struct task_struct *task, u64 clone_flags) } SEC("tp_btf/task_newtask") -__failure __msg("arg#0 is ptr_or_null_ expected ptr_ or socket") +__failure __msg("Possibly NULL pointer passed to trusted arg0") int BPF_PROG(task_kfunc_release_null, struct task_struct *task, u64 clone_flags) { struct __tasks_kfunc_map_value local, *v; @@ -277,7 +277,7 @@ int BPF_PROG(task_kfunc_release_unacquired, struct task_struct *task, u64 clone_ } SEC("tp_btf/task_newtask") -__failure __msg("arg#0 is ptr_or_null_ expected ptr_ or socket") +__failure __msg("Possibly NULL pointer passed to trusted arg0") int BPF_PROG(task_kfunc_from_pid_no_null_check, struct task_struct *task, u64 clone_flags) { struct task_struct *acquired; diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c index 5702fc9761ef..1bdf2b43e49e 100644 --- a/tools/testing/selftests/bpf/verifier/calls.c +++ b/tools/testing/selftests/bpf/verifier/calls.c @@ -109,7 +109,7 @@ }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .result = REJECT, - .errstr = "arg#0 is ptr_or_null_ expected ptr_ or socket", + .errstr = "Possibly NULL pointer passed to trusted arg0", .fixup_kfunc_btf_id = { { "bpf_kfunc_call_test_acquire", 3 }, { "bpf_kfunc_call_test_release", 5 }, @@ -165,19 +165,23 @@ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), BPF_EXIT_INSN(), BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 16), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -4), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .fixup_kfunc_btf_id = { { "bpf_kfunc_call_test_acquire", 3 }, - { "bpf_kfunc_call_test_release", 9 }, + { "bpf_kfunc_call_test_offset", 9 }, + { "bpf_kfunc_call_test_release", 12 }, }, .result_unpriv = REJECT, .result = REJECT, diff --git a/tools/testing/selftests/bpf/verifier/ref_tracking.c b/tools/testing/selftests/bpf/verifier/ref_tracking.c index 9540164712b7..5a2e154dd1e0 100644 --- a/tools/testing/selftests/bpf/verifier/ref_tracking.c +++ b/tools/testing/selftests/bpf/verifier/ref_tracking.c @@ -142,7 +142,7 @@ .kfunc = "bpf", .expected_attach_type = BPF_LSM_MAC, .flags = BPF_F_SLEEPABLE, - .errstr = "arg#0 is ptr_or_null_ expected ptr_ or socket", + .errstr = "Possibly NULL pointer passed to trusted arg0", .fixup_kfunc_btf_id = { { "bpf_lookup_user_key", 2 }, { "bpf_key_put", 4 }, @@ -163,7 +163,7 @@ .kfunc = "bpf", .expected_attach_type = BPF_LSM_MAC, .flags = BPF_F_SLEEPABLE, - .errstr = "arg#0 is ptr_or_null_ expected ptr_ or socket", + .errstr = "Possibly NULL pointer passed to trusted arg0", .fixup_kfunc_btf_id = { { "bpf_lookup_system_key", 1 }, { "bpf_key_put", 3 }, @@ -182,7 +182,7 @@ .kfunc = "bpf", .expected_attach_type = BPF_LSM_MAC, .flags = BPF_F_SLEEPABLE, - .errstr = "arg#0 pointer type STRUCT bpf_key must point to scalar, or struct with scalar", + .errstr = "Possibly NULL pointer passed to trusted arg0", .fixup_kfunc_btf_id = { { "bpf_key_put", 1 }, }, -- cgit v1.2.3 From 75dcef8d3609d0b1d3497d6ed4809096513e0b83 Mon Sep 17 00:00:00 2001 From: Feng Zhou Date: Mon, 10 Apr 2023 16:59:08 +0800 Subject: selftests/bpf: Add test to access u32 ptr argument in tracing program Adding verifier test for accessing u32 pointer argument in tracing programs. The test program loads 1nd argument of bpf_fentry_test9 function which is u32 pointer and checks that verifier allows that. Co-developed-by: Chengming Zhou Signed-off-by: Chengming Zhou Signed-off-by: Feng Zhou Signed-off-by: Daniel Borkmann Acked-by: Jiri Olsa Link: https://lore.kernel.org/bpf/20230410085908.98493-3-zhoufeng.zf@bytedance.com --- net/bpf/test_run.c | 8 +++++++- tools/testing/selftests/bpf/verifier/btf_ctx_access.c | 13 +++++++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) (limited to 'net/bpf/test_run.c') diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index f1652f5fbd2e..68bdfc041a7b 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -541,6 +541,11 @@ int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg) return (long)arg->a; } +__bpf_kfunc u32 bpf_fentry_test9(u32 *a) +{ + return *a; +} + __bpf_kfunc int bpf_modify_return_test(int a, int *b) { *b += 1; @@ -855,7 +860,8 @@ int bpf_prog_test_run_tracing(struct bpf_prog *prog, bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 || bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 || bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 || - bpf_fentry_test8(&arg) != 0) + bpf_fentry_test8(&arg) != 0 || + bpf_fentry_test9(&retval) != 0) goto out; break; case BPF_MODIFY_RETURN: diff --git a/tools/testing/selftests/bpf/verifier/btf_ctx_access.c b/tools/testing/selftests/bpf/verifier/btf_ctx_access.c index 6340db6b46dc..0484d3de040d 100644 --- a/tools/testing/selftests/bpf/verifier/btf_ctx_access.c +++ b/tools/testing/selftests/bpf/verifier/btf_ctx_access.c @@ -10,3 +10,16 @@ .expected_attach_type = BPF_TRACE_FENTRY, .kfunc = "bpf_modify_return_test", }, + +{ + "btf_ctx_access u32 pointer accept", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), /* load 1nd argument value (u32 pointer) */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACING, + .expected_attach_type = BPF_TRACE_FENTRY, + .kfunc = "bpf_fentry_test9", +}, -- cgit v1.2.3 From 09b501d905217a38f03c0f07d5a66e0b5c8c1644 Mon Sep 17 00:00:00 2001 From: David Vernet Date: Sun, 16 Apr 2023 03:49:26 -0500 Subject: bpf: Remove bpf_kfunc_call_test_kptr_get() test kfunc We've managed to improve the UX for kptrs significantly over the last 9 months. All of the prior main use cases, struct bpf_cpumask *, struct task_struct *, and struct cgroup *, have all been updated to be synchronized mainly using RCU. In other words, their KF_ACQUIRE kfunc calls are all KF_RCU, and the pointers themselves are MEM_RCU and can be accessed in an RCU read region in BPF. In a follow-on change, we'll be removing the KF_KPTR_GET kfunc flag. This patch prepares for that by removing the bpf_kfunc_call_test_kptr_get() kfunc, and all associated selftests. Signed-off-by: David Vernet Link: https://lore.kernel.org/r/20230416084928.326135-2-void@manifault.com Signed-off-by: Alexei Starovoitov --- net/bpf/test_run.c | 12 ---- tools/testing/selftests/bpf/progs/map_kptr.c | 40 ++---------- tools/testing/selftests/bpf/progs/map_kptr_fail.c | 78 ----------------------- tools/testing/selftests/bpf/verifier/map_kptr.c | 27 -------- 4 files changed, 5 insertions(+), 152 deletions(-) (limited to 'net/bpf/test_run.c') diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 0b9bd9b39990..f170e8a17974 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -679,17 +679,6 @@ __bpf_kfunc void bpf_kfunc_call_int_mem_release(int *p) { } -__bpf_kfunc struct prog_test_ref_kfunc * -bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **pp, int a, int b) -{ - struct prog_test_ref_kfunc *p = READ_ONCE(*pp); - - if (!p) - return NULL; - refcount_inc(&p->cnt); - return p; -} - struct prog_test_pass1 { int x0; struct { @@ -804,7 +793,6 @@ BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdwr_mem, KF_RET_NULL) BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdonly_mem, KF_RET_NULL) BTF_ID_FLAGS(func, bpf_kfunc_call_test_acq_rdonly_mem, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_kfunc_call_int_mem_release, KF_RELEASE) -BTF_ID_FLAGS(func, bpf_kfunc_call_test_kptr_get, KF_ACQUIRE | KF_RET_NULL | KF_KPTR_GET) BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx) BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1) BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass2) diff --git a/tools/testing/selftests/bpf/progs/map_kptr.c b/tools/testing/selftests/bpf/progs/map_kptr.c index dae5dab1bbf7..d7150041e5d1 100644 --- a/tools/testing/selftests/bpf/progs/map_kptr.c +++ b/tools/testing/selftests/bpf/progs/map_kptr.c @@ -115,8 +115,6 @@ DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, hash_malloc_map, hash_of_hash_mallo DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, lru_hash_map, hash_of_lru_hash_maps); extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym; -extern struct prog_test_ref_kfunc * -bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **p, int a, int b) __ksym; extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym; void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p) __ksym; @@ -187,25 +185,10 @@ static void test_kptr_ref(struct map_value *v) bpf_kfunc_call_test_release(p); } -static void test_kptr_get(struct map_value *v) -{ - struct prog_test_ref_kfunc *p; - - p = bpf_kfunc_call_test_kptr_get(&v->ref_ptr, 0, 0); - if (!p) - return; - if (p->a + p->b > 100) { - bpf_kfunc_call_test_release(p); - return; - } - bpf_kfunc_call_test_release(p); -} - static void test_kptr(struct map_value *v) { test_kptr_unref(v); test_kptr_ref(v); - test_kptr_get(v); } SEC("tc") @@ -338,38 +321,25 @@ int test_map_kptr_ref_pre(struct map_value *v) if (p_st->cnt.refs.counter != ref) return 4; - p = bpf_kfunc_call_test_kptr_get(&v->ref_ptr, 0, 0); - if (!p) - return 5; - ref++; - if (p_st->cnt.refs.counter != ref) { - ret = 6; - goto end; - } - bpf_kfunc_call_test_release(p); - ref--; - if (p_st->cnt.refs.counter != ref) - return 7; - p = bpf_kptr_xchg(&v->ref_ptr, NULL); if (!p) - return 8; + return 5; bpf_kfunc_call_test_release(p); ref--; if (p_st->cnt.refs.counter != ref) - return 9; + return 6; p = bpf_kfunc_call_test_acquire(&arg); if (!p) - return 10; + return 7; ref++; p = bpf_kptr_xchg(&v->ref_ptr, p); if (p) { - ret = 11; + ret = 8; goto end; } if (p_st->cnt.refs.counter != ref) - return 12; + return 9; /* Leave in map */ return 0; diff --git a/tools/testing/selftests/bpf/progs/map_kptr_fail.c b/tools/testing/selftests/bpf/progs/map_kptr_fail.c index 15bf3127dba3..da8c724f839b 100644 --- a/tools/testing/selftests/bpf/progs/map_kptr_fail.c +++ b/tools/testing/selftests/bpf/progs/map_kptr_fail.c @@ -21,8 +21,6 @@ struct array_map { extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym; extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym; -extern struct prog_test_ref_kfunc * -bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **p, int a, int b) __ksym; SEC("?tc") __failure __msg("kptr access size must be BPF_DW") @@ -220,67 +218,6 @@ int reject_kptr_xchg_on_unref(struct __sk_buff *ctx) return 0; } -SEC("?tc") -__failure __msg("arg#0 expected pointer to map value") -int reject_kptr_get_no_map_val(struct __sk_buff *ctx) -{ - bpf_kfunc_call_test_kptr_get((void *)&ctx, 0, 0); - return 0; -} - -SEC("?tc") -__failure __msg("arg#0 expected pointer to map value") -int reject_kptr_get_no_null_map_val(struct __sk_buff *ctx) -{ - bpf_kfunc_call_test_kptr_get(bpf_map_lookup_elem(&array_map, &(int){0}), 0, 0); - return 0; -} - -SEC("?tc") -__failure __msg("arg#0 no referenced kptr at map value offset=0") -int reject_kptr_get_no_kptr(struct __sk_buff *ctx) -{ - struct map_value *v; - int key = 0; - - v = bpf_map_lookup_elem(&array_map, &key); - if (!v) - return 0; - - bpf_kfunc_call_test_kptr_get((void *)v, 0, 0); - return 0; -} - -SEC("?tc") -__failure __msg("arg#0 no referenced kptr at map value offset=8") -int reject_kptr_get_on_unref(struct __sk_buff *ctx) -{ - struct map_value *v; - int key = 0; - - v = bpf_map_lookup_elem(&array_map, &key); - if (!v) - return 0; - - bpf_kfunc_call_test_kptr_get(&v->unref_ptr, 0, 0); - return 0; -} - -SEC("?tc") -__failure __msg("kernel function bpf_kfunc_call_test_kptr_get args#0") -int reject_kptr_get_bad_type_match(struct __sk_buff *ctx) -{ - struct map_value *v; - int key = 0; - - v = bpf_map_lookup_elem(&array_map, &key); - if (!v) - return 0; - - bpf_kfunc_call_test_kptr_get((void *)&v->ref_memb_ptr, 0, 0); - return 0; -} - SEC("?tc") __failure __msg("R1 type=rcu_ptr_or_null_ expected=percpu_ptr_") int mark_ref_as_untrusted_or_null(struct __sk_buff *ctx) @@ -428,21 +365,6 @@ int kptr_xchg_ref_state(struct __sk_buff *ctx) return 0; } -SEC("?tc") -__failure __msg("Unreleased reference id=3 alloc_insn=") -int kptr_get_ref_state(struct __sk_buff *ctx) -{ - struct map_value *v; - int key = 0; - - v = bpf_map_lookup_elem(&array_map, &key); - if (!v) - return 0; - - bpf_kfunc_call_test_kptr_get(&v->ref_ptr, 0, 0); - return 0; -} - SEC("?tc") __failure __msg("Possibly NULL pointer passed to helper arg2") int kptr_xchg_possibly_null(struct __sk_buff *ctx) diff --git a/tools/testing/selftests/bpf/verifier/map_kptr.c b/tools/testing/selftests/bpf/verifier/map_kptr.c index d775ccb01989..a0cfc06d75bc 100644 --- a/tools/testing/selftests/bpf/verifier/map_kptr.c +++ b/tools/testing/selftests/bpf/verifier/map_kptr.c @@ -288,33 +288,6 @@ .result = REJECT, .errstr = "off=0 kptr isn't referenced kptr", }, -{ - "map_kptr: unref: bpf_kfunc_call_test_kptr_get rejected", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_LD_MAP_FD(BPF_REG_6, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .fixup_map_kptr = { 1 }, - .result = REJECT, - .errstr = "arg#0 no referenced kptr at map value offset=0", - .fixup_kfunc_btf_id = { - { "bpf_kfunc_call_test_kptr_get", 13 }, - } -}, /* Tests for referenced PTR_TO_BTF_ID */ { "map_kptr: ref: loaded pointer marked as untrusted", -- cgit v1.2.3 From 2b99ef22e0d237e08bfc437e7d051f78f352aeb2 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 21 Apr 2023 19:02:59 +0200 Subject: bpf: add test_run support for netfilter program type add glue code so a bpf program can be run using userspace-provided netfilter state and packet/skb. Default is to use ipv4:output hook point, but this can be overridden by userspace. Userspace provided netfilter state is restricted, only hook and protocol families can be overridden and only to ipv4/ipv6. Signed-off-by: Florian Westphal Link: https://lore.kernel.org/r/20230421170300.24115-7-fw@strlen.de Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 3 + net/bpf/test_run.c | 158 ++++++++++++++++++++++++++++++++++++++++++++ net/netfilter/nf_bpf_link.c | 1 + 3 files changed, 162 insertions(+) (limited to 'net/bpf/test_run.c') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 18b592fde896..e53ceee1df37 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -2264,6 +2264,9 @@ int bpf_prog_test_run_raw_tp(struct bpf_prog *prog, int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr); +int bpf_prog_test_run_nf(struct bpf_prog *prog, + const union bpf_attr *kattr, + union bpf_attr __user *uattr); bool btf_ctx_access(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, struct bpf_insn_access_aux *info); diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index f170e8a17974..e79e3a415ca9 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -19,7 +19,9 @@ #include #include #include +#include #include +#include #define CREATE_TRACE_POINTS #include @@ -1691,6 +1693,162 @@ out: return err; } +static int verify_and_copy_hook_state(struct nf_hook_state *state, + const struct nf_hook_state *user, + struct net_device *dev) +{ + if (user->in || user->out) + return -EINVAL; + + if (user->net || user->sk || user->okfn) + return -EINVAL; + + switch (user->pf) { + case NFPROTO_IPV4: + case NFPROTO_IPV6: + switch (state->hook) { + case NF_INET_PRE_ROUTING: + state->in = dev; + break; + case NF_INET_LOCAL_IN: + state->in = dev; + break; + case NF_INET_FORWARD: + state->in = dev; + state->out = dev; + break; + case NF_INET_LOCAL_OUT: + state->out = dev; + break; + case NF_INET_POST_ROUTING: + state->out = dev; + break; + } + + break; + default: + return -EINVAL; + } + + state->pf = user->pf; + state->hook = user->hook; + + return 0; +} + +static __be16 nfproto_eth(int nfproto) +{ + switch (nfproto) { + case NFPROTO_IPV4: + return htons(ETH_P_IP); + case NFPROTO_IPV6: + break; + } + + return htons(ETH_P_IPV6); +} + +int bpf_prog_test_run_nf(struct bpf_prog *prog, + const union bpf_attr *kattr, + union bpf_attr __user *uattr) +{ + struct net *net = current->nsproxy->net_ns; + struct net_device *dev = net->loopback_dev; + struct nf_hook_state *user_ctx, hook_state = { + .pf = NFPROTO_IPV4, + .hook = NF_INET_LOCAL_OUT, + }; + u32 size = kattr->test.data_size_in; + u32 repeat = kattr->test.repeat; + struct bpf_nf_ctx ctx = { + .state = &hook_state, + }; + struct sk_buff *skb = NULL; + u32 retval, duration; + void *data; + int ret; + + if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) + return -EINVAL; + + if (size < sizeof(struct iphdr)) + return -EINVAL; + + data = bpf_test_init(kattr, kattr->test.data_size_in, size, + NET_SKB_PAD + NET_IP_ALIGN, + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); + if (IS_ERR(data)) + return PTR_ERR(data); + + if (!repeat) + repeat = 1; + + user_ctx = bpf_ctx_init(kattr, sizeof(struct nf_hook_state)); + if (IS_ERR(user_ctx)) { + kfree(data); + return PTR_ERR(user_ctx); + } + + if (user_ctx) { + ret = verify_and_copy_hook_state(&hook_state, user_ctx, dev); + if (ret) + goto out; + } + + skb = slab_build_skb(data); + if (!skb) { + ret = -ENOMEM; + goto out; + } + + data = NULL; /* data released via kfree_skb */ + + skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); + __skb_put(skb, size); + + ret = -EINVAL; + + if (hook_state.hook != NF_INET_LOCAL_OUT) { + if (size < ETH_HLEN + sizeof(struct iphdr)) + goto out; + + skb->protocol = eth_type_trans(skb, dev); + switch (skb->protocol) { + case htons(ETH_P_IP): + if (hook_state.pf == NFPROTO_IPV4) + break; + goto out; + case htons(ETH_P_IPV6): + if (size < ETH_HLEN + sizeof(struct ipv6hdr)) + goto out; + if (hook_state.pf == NFPROTO_IPV6) + break; + goto out; + default: + ret = -EPROTO; + goto out; + } + + skb_reset_network_header(skb); + } else { + skb->protocol = nfproto_eth(hook_state.pf); + } + + ctx.skb = skb; + + ret = bpf_test_run(prog, &ctx, repeat, &retval, &duration, false); + if (ret) + goto out; + + ret = bpf_test_finish(kattr, uattr, NULL, NULL, 0, retval, duration); + +out: + kfree(user_ctx); + kfree_skb(skb); + kfree(data); + return ret; +} + static const struct btf_kfunc_id_set bpf_prog_test_kfunc_set = { .owner = THIS_MODULE, .set = &test_sk_check_kfunc_ids, diff --git a/net/netfilter/nf_bpf_link.c b/net/netfilter/nf_bpf_link.c index 49cfc5215386..c36da56d756f 100644 --- a/net/netfilter/nf_bpf_link.c +++ b/net/netfilter/nf_bpf_link.c @@ -166,6 +166,7 @@ int bpf_nf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) } const struct bpf_prog_ops netfilter_prog_ops = { + .test_run = bpf_prog_test_run_nf, }; static bool nf_ptr_to_btf_id(struct bpf_insn_access_aux *info, const char *name) -- cgit v1.2.3