diff options
author | Jakub Kicinski <kuba@kernel.org> | 2024-01-27 08:08:21 +0300 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2024-01-27 08:08:22 +0300 |
commit | 92046e83c07b064ca65ac4ae7660a540016bdfc1 (patch) | |
tree | 7f8a5dd9522e2b744a48d93ca5a7ec418ea192f5 /tools/testing/selftests/bpf/prog_tests | |
parent | c09f32a859458002b40ba44fc736329a4c0fe4e5 (diff) | |
parent | fa7178b0f12e55a4f2d4906df3f25d6d4f88d962 (diff) | |
download | linux-92046e83c07b064ca65ac4ae7660a540016bdfc1.tar.xz |
Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Daniel Borkmann says:
====================
pull-request: bpf-next 2024-01-26
We've added 107 non-merge commits during the last 4 day(s) which contain
a total of 101 files changed, 6009 insertions(+), 1260 deletions(-).
The main changes are:
1) Add BPF token support to delegate a subset of BPF subsystem
functionality from privileged system-wide daemons such as systemd
through special mount options for userns-bound BPF fs to a trusted
& unprivileged application. With addressed changes from Christian
and Linus' reviews, from Andrii Nakryiko.
2) Support registration of struct_ops types from modules which helps
projects like fuse-bpf that seeks to implement a new struct_ops type,
from Kui-Feng Lee.
3) Add support for retrieval of cookies for perf/kprobe multi links,
from Jiri Olsa.
4) Bigger batch of prep-work for the BPF verifier to eventually support
preserving boundaries and tracking scalars on narrowing fills,
from Maxim Mikityanskiy.
5) Extend the tc BPF flavor to support arbitrary TCP SYN cookies to help
with the scenario of SYN floods, from Kuniyuki Iwashima.
6) Add code generation to inline the bpf_kptr_xchg() helper which
improves performance when stashing/popping the allocated BPF objects,
from Hou Tao.
7) Extend BPF verifier to track aligned ST stores as imprecise spilled
registers, from Yonghong Song.
8) Several fixes to BPF selftests around inline asm constraints and
unsupported VLA code generation, from Jose E. Marchesi.
9) Various updates to the BPF IETF instruction set draft document such
as the introduction of conformance groups for instructions,
from Dave Thaler.
10) Fix BPF verifier to make infinite loop detection in is_state_visited()
exact to catch some too lax spill/fill corner cases,
from Eduard Zingerman.
11) Refactor the BPF verifier pointer ALU check to allow ALU explicitly
instead of implicitly for various register types, from Hao Sun.
12) Fix the flaky tc_redirect_dtime BPF selftest due to slowness
in neighbor advertisement at setup time, from Martin KaFai Lau.
13) Change BPF selftests to skip callback tests for the case when the
JIT is disabled, from Tiezhu Yang.
14) Add a small extension to libbpf which allows to auto create
a map-in-map's inner map, from Andrey Grafin.
* tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (107 commits)
selftests/bpf: Add missing line break in test_verifier
bpf, docs: Clarify definitions of various instructions
bpf: Fix error checks against bpf_get_btf_vmlinux().
bpf: One more maintainer for libbpf and BPF selftests
selftests/bpf: Incorporate LSM policy to token-based tests
selftests/bpf: Add tests for LIBBPF_BPF_TOKEN_PATH envvar
libbpf: Support BPF token path setting through LIBBPF_BPF_TOKEN_PATH envvar
selftests/bpf: Add tests for BPF object load with implicit token
selftests/bpf: Add BPF object loading tests with explicit token passing
libbpf: Wire up BPF token support at BPF object level
libbpf: Wire up token_fd into feature probing logic
libbpf: Move feature detection code into its own file
libbpf: Further decouple feature checking logic from bpf_object
libbpf: Split feature detectors definitions from cached results
selftests/bpf: Utilize string values for delegate_xxx mount options
bpf: Support symbolic BPF FS delegation mount options
bpf: Fail BPF_TOKEN_CREATE if no delegation option was set on BPF FS
bpf,selinux: Allocate bpf_security_struct per BPF token
selftests/bpf: Add BPF token-enabled tests
libbpf: Add BPF token support to bpf_prog_load() API
...
====================
Link: https://lore.kernel.org/r/20240126215710.19855-1-daniel@iogearbox.net
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'tools/testing/selftests/bpf/prog_tests')
12 files changed, 1524 insertions, 68 deletions
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c index e770912fc1d2..4c6ada5b270b 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c @@ -35,7 +35,7 @@ static int check_load(const char *file, enum bpf_prog_type type) } bpf_program__set_type(prog, type); - bpf_program__set_flags(prog, BPF_F_TEST_RND_HI32 | BPF_F_TEST_REG_INVARIANTS); + bpf_program__set_flags(prog, testing_prog_flags()); bpf_program__set_log_level(prog, 4 | extra_prog_load_log_flags); err = bpf_object__load(obj); diff --git a/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c b/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c index 4951aa978f33..3b7c57fe55a5 100644 --- a/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c +++ b/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c @@ -626,50 +626,6 @@ err: return false; } -/* Request BPF program instructions after all rewrites are applied, - * e.g. verifier.c:convert_ctx_access() is done. - */ -static int get_xlated_program(int fd_prog, struct bpf_insn **buf, __u32 *cnt) -{ - struct bpf_prog_info info = {}; - __u32 info_len = sizeof(info); - __u32 xlated_prog_len; - __u32 buf_element_size = sizeof(struct bpf_insn); - - if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) { - perror("bpf_prog_get_info_by_fd failed"); - return -1; - } - - xlated_prog_len = info.xlated_prog_len; - if (xlated_prog_len % buf_element_size) { - printf("Program length %d is not multiple of %d\n", - xlated_prog_len, buf_element_size); - return -1; - } - - *cnt = xlated_prog_len / buf_element_size; - *buf = calloc(*cnt, buf_element_size); - if (!buf) { - perror("can't allocate xlated program buffer"); - return -ENOMEM; - } - - bzero(&info, sizeof(info)); - info.xlated_prog_len = xlated_prog_len; - info.xlated_prog_insns = (__u64)(unsigned long)*buf; - if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) { - perror("second bpf_prog_get_info_by_fd failed"); - goto out_free_buf; - } - - return 0; - -out_free_buf: - free(*buf); - return -1; -} - static void print_insn(void *private_data, const char *fmt, ...) { va_list args; diff --git a/tools/testing/selftests/bpf/prog_tests/fill_link_info.c b/tools/testing/selftests/bpf/prog_tests/fill_link_info.c index d4b1901f7879..f3932941bbaa 100644 --- a/tools/testing/selftests/bpf/prog_tests/fill_link_info.c +++ b/tools/testing/selftests/bpf/prog_tests/fill_link_info.c @@ -19,6 +19,7 @@ static const char *kmulti_syms[] = { }; #define KMULTI_CNT ARRAY_SIZE(kmulti_syms) static __u64 kmulti_addrs[KMULTI_CNT]; +static __u64 kmulti_cookies[] = { 3, 1, 2 }; #define KPROBE_FUNC "bpf_fentry_test1" static __u64 kprobe_addr; @@ -31,6 +32,8 @@ static noinline void uprobe_func(void) asm volatile (""); } +#define PERF_EVENT_COOKIE 0xdeadbeef + static int verify_perf_link_info(int fd, enum bpf_perf_event_type type, long addr, ssize_t offset, ssize_t entry_offset) { @@ -62,6 +65,8 @@ again: ASSERT_EQ(info.perf_event.kprobe.addr, addr + entry_offset, "kprobe_addr"); + ASSERT_EQ(info.perf_event.kprobe.cookie, PERF_EVENT_COOKIE, "kprobe_cookie"); + if (!info.perf_event.kprobe.func_name) { ASSERT_EQ(info.perf_event.kprobe.name_len, 0, "name_len"); info.perf_event.kprobe.func_name = ptr_to_u64(&buf); @@ -81,6 +86,8 @@ again: goto again; } + ASSERT_EQ(info.perf_event.tracepoint.cookie, PERF_EVENT_COOKIE, "tracepoint_cookie"); + err = strncmp(u64_to_ptr(info.perf_event.tracepoint.tp_name), TP_NAME, strlen(TP_NAME)); ASSERT_EQ(err, 0, "cmp_tp_name"); @@ -96,10 +103,17 @@ again: goto again; } + ASSERT_EQ(info.perf_event.uprobe.cookie, PERF_EVENT_COOKIE, "uprobe_cookie"); + err = strncmp(u64_to_ptr(info.perf_event.uprobe.file_name), UPROBE_FILE, strlen(UPROBE_FILE)); ASSERT_EQ(err, 0, "cmp_file_name"); break; + case BPF_PERF_EVENT_EVENT: + ASSERT_EQ(info.perf_event.event.type, PERF_TYPE_SOFTWARE, "event_type"); + ASSERT_EQ(info.perf_event.event.config, PERF_COUNT_SW_PAGE_FAULTS, "event_config"); + ASSERT_EQ(info.perf_event.event.cookie, PERF_EVENT_COOKIE, "event_cookie"); + break; default: err = -1; break; @@ -139,6 +153,7 @@ static void test_kprobe_fill_link_info(struct test_fill_link_info *skel, DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts, .attach_mode = PROBE_ATTACH_MODE_LINK, .retprobe = type == BPF_PERF_EVENT_KRETPROBE, + .bpf_cookie = PERF_EVENT_COOKIE, ); ssize_t entry_offset = 0; struct bpf_link *link; @@ -163,10 +178,13 @@ static void test_kprobe_fill_link_info(struct test_fill_link_info *skel, static void test_tp_fill_link_info(struct test_fill_link_info *skel) { + DECLARE_LIBBPF_OPTS(bpf_tracepoint_opts, opts, + .bpf_cookie = PERF_EVENT_COOKIE, + ); struct bpf_link *link; int link_fd, err; - link = bpf_program__attach_tracepoint(skel->progs.tp_run, TP_CAT, TP_NAME); + link = bpf_program__attach_tracepoint_opts(skel->progs.tp_run, TP_CAT, TP_NAME, &opts); if (!ASSERT_OK_PTR(link, "attach_tp")) return; @@ -176,16 +194,53 @@ static void test_tp_fill_link_info(struct test_fill_link_info *skel) bpf_link__destroy(link); } +static void test_event_fill_link_info(struct test_fill_link_info *skel) +{ + DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, opts, + .bpf_cookie = PERF_EVENT_COOKIE, + ); + struct bpf_link *link; + int link_fd, err, pfd; + struct perf_event_attr attr = { + .type = PERF_TYPE_SOFTWARE, + .config = PERF_COUNT_SW_PAGE_FAULTS, + .freq = 1, + .sample_freq = 1, + .size = sizeof(struct perf_event_attr), + }; + + pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu 0 */, + -1 /* group id */, 0 /* flags */); + if (!ASSERT_GE(pfd, 0, "perf_event_open")) + return; + + link = bpf_program__attach_perf_event_opts(skel->progs.event_run, pfd, &opts); + if (!ASSERT_OK_PTR(link, "attach_event")) + goto error; + + link_fd = bpf_link__fd(link); + err = verify_perf_link_info(link_fd, BPF_PERF_EVENT_EVENT, 0, 0, 0); + ASSERT_OK(err, "verify_perf_link_info"); + bpf_link__destroy(link); + +error: + close(pfd); +} + static void test_uprobe_fill_link_info(struct test_fill_link_info *skel, enum bpf_perf_event_type type) { + DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts, + .retprobe = type == BPF_PERF_EVENT_URETPROBE, + .bpf_cookie = PERF_EVENT_COOKIE, + ); struct bpf_link *link; int link_fd, err; - link = bpf_program__attach_uprobe(skel->progs.uprobe_run, - type == BPF_PERF_EVENT_URETPROBE, - 0, /* self pid */ - UPROBE_FILE, uprobe_offset); + link = bpf_program__attach_uprobe_opts(skel->progs.uprobe_run, + 0, /* self pid */ + UPROBE_FILE, uprobe_offset, + &opts); if (!ASSERT_OK_PTR(link, "attach_uprobe")) return; @@ -195,11 +250,11 @@ static void test_uprobe_fill_link_info(struct test_fill_link_info *skel, bpf_link__destroy(link); } -static int verify_kmulti_link_info(int fd, bool retprobe) +static int verify_kmulti_link_info(int fd, bool retprobe, bool has_cookies) { + __u64 addrs[KMULTI_CNT], cookies[KMULTI_CNT]; struct bpf_link_info info; __u32 len = sizeof(info); - __u64 addrs[KMULTI_CNT]; int flags, i, err; memset(&info, 0, sizeof(info)); @@ -221,18 +276,22 @@ again: if (!info.kprobe_multi.addrs) { info.kprobe_multi.addrs = ptr_to_u64(addrs); + info.kprobe_multi.cookies = ptr_to_u64(cookies); goto again; } - for (i = 0; i < KMULTI_CNT; i++) + for (i = 0; i < KMULTI_CNT; i++) { ASSERT_EQ(addrs[i], kmulti_addrs[i], "kmulti_addrs"); + ASSERT_EQ(cookies[i], has_cookies ? kmulti_cookies[i] : 0, + "kmulti_cookies_value"); + } return 0; } static void verify_kmulti_invalid_user_buffer(int fd) { + __u64 addrs[KMULTI_CNT], cookies[KMULTI_CNT]; struct bpf_link_info info; __u32 len = sizeof(info); - __u64 addrs[KMULTI_CNT]; int err, i; memset(&info, 0, sizeof(info)); @@ -266,7 +325,20 @@ static void verify_kmulti_invalid_user_buffer(int fd) info.kprobe_multi.count = KMULTI_CNT; info.kprobe_multi.addrs = 0x1; /* invalid addr */ err = bpf_link_get_info_by_fd(fd, &info, &len); - ASSERT_EQ(err, -EFAULT, "invalid_buff"); + ASSERT_EQ(err, -EFAULT, "invalid_buff_addrs"); + + info.kprobe_multi.count = KMULTI_CNT; + info.kprobe_multi.addrs = ptr_to_u64(addrs); + info.kprobe_multi.cookies = 0x1; /* invalid addr */ + err = bpf_link_get_info_by_fd(fd, &info, &len); + ASSERT_EQ(err, -EFAULT, "invalid_buff_cookies"); + + /* cookies && !count */ + info.kprobe_multi.count = 0; + info.kprobe_multi.addrs = ptr_to_u64(NULL); + info.kprobe_multi.cookies = ptr_to_u64(cookies); + err = bpf_link_get_info_by_fd(fd, &info, &len); + ASSERT_EQ(err, -EINVAL, "invalid_cookies_count"); } static int symbols_cmp_r(const void *a, const void *b) @@ -278,13 +350,15 @@ static int symbols_cmp_r(const void *a, const void *b) } static void test_kprobe_multi_fill_link_info(struct test_fill_link_info *skel, - bool retprobe, bool invalid) + bool retprobe, bool cookies, + bool invalid) { LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); struct bpf_link *link; int link_fd, err; opts.syms = kmulti_syms; + opts.cookies = cookies ? kmulti_cookies : NULL; opts.cnt = KMULTI_CNT; opts.retprobe = retprobe; link = bpf_program__attach_kprobe_multi_opts(skel->progs.kmulti_run, NULL, &opts); @@ -293,7 +367,7 @@ static void test_kprobe_multi_fill_link_info(struct test_fill_link_info *skel, link_fd = bpf_link__fd(link); if (!invalid) { - err = verify_kmulti_link_info(link_fd, retprobe); + err = verify_kmulti_link_info(link_fd, retprobe, cookies); ASSERT_OK(err, "verify_kmulti_link_info"); } else { verify_kmulti_invalid_user_buffer(link_fd); @@ -513,6 +587,8 @@ void test_fill_link_info(void) test_kprobe_fill_link_info(skel, BPF_PERF_EVENT_KPROBE, true); if (test__start_subtest("tracepoint_link_info")) test_tp_fill_link_info(skel); + if (test__start_subtest("event_link_info")) + test_event_fill_link_info(skel); uprobe_offset = get_uprobe_offset(&uprobe_func); if (test__start_subtest("uprobe_link_info")) @@ -523,12 +599,16 @@ void test_fill_link_info(void) qsort(kmulti_syms, KMULTI_CNT, sizeof(kmulti_syms[0]), symbols_cmp_r); for (i = 0; i < KMULTI_CNT; i++) kmulti_addrs[i] = ksym_get_addr(kmulti_syms[i]); - if (test__start_subtest("kprobe_multi_link_info")) - test_kprobe_multi_fill_link_info(skel, false, false); - if (test__start_subtest("kretprobe_multi_link_info")) - test_kprobe_multi_fill_link_info(skel, true, false); + if (test__start_subtest("kprobe_multi_link_info")) { + test_kprobe_multi_fill_link_info(skel, false, false, false); + test_kprobe_multi_fill_link_info(skel, false, true, false); + } + if (test__start_subtest("kretprobe_multi_link_info")) { + test_kprobe_multi_fill_link_info(skel, true, false, false); + test_kprobe_multi_fill_link_info(skel, true, true, false); + } if (test__start_subtest("kprobe_multi_invalid_ubuff")) - test_kprobe_multi_fill_link_info(skel, true, true); + test_kprobe_multi_fill_link_info(skel, true, true, true); if (test__start_subtest("uprobe_multi_link_info")) test_uprobe_multi_fill_link_info(skel, false, false); diff --git a/tools/testing/selftests/bpf/prog_tests/kptr_xchg_inline.c b/tools/testing/selftests/bpf/prog_tests/kptr_xchg_inline.c new file mode 100644 index 000000000000..15144943e88b --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/kptr_xchg_inline.c @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2023. Huawei Technologies Co., Ltd */ +#include <test_progs.h> + +#include "linux/filter.h" +#include "kptr_xchg_inline.skel.h" + +void test_kptr_xchg_inline(void) +{ + struct kptr_xchg_inline *skel; + struct bpf_insn *insn = NULL; + struct bpf_insn exp; + unsigned int cnt; + int err; + +#if !(defined(__x86_64__) || defined(__aarch64__)) + test__skip(); + return; +#endif + + skel = kptr_xchg_inline__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open_load")) + return; + + err = get_xlated_program(bpf_program__fd(skel->progs.kptr_xchg_inline), &insn, &cnt); + if (!ASSERT_OK(err, "prog insn")) + goto out; + + /* The original instructions are: + * r1 = map[id:xxx][0]+0 + * r2 = 0 + * call bpf_kptr_xchg#yyy + * + * call bpf_kptr_xchg#yyy will be inlined as: + * r0 = r2 + * r0 = atomic64_xchg((u64 *)(r1 +0), r0) + */ + if (!ASSERT_GT(cnt, 5, "insn cnt")) + goto out; + + exp = BPF_MOV64_REG(BPF_REG_0, BPF_REG_2); + if (!ASSERT_OK(memcmp(&insn[3], &exp, sizeof(exp)), "mov")) + goto out; + + exp = BPF_ATOMIC_OP(BPF_DW, BPF_XCHG, BPF_REG_1, BPF_REG_0, 0); + if (!ASSERT_OK(memcmp(&insn[4], &exp, sizeof(exp)), "xchg")) + goto out; +out: + free(insn); + kptr_xchg_inline__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/libbpf_probes.c b/tools/testing/selftests/bpf/prog_tests/libbpf_probes.c index 9f766ddd946a..4ed46ed58a7b 100644 --- a/tools/testing/selftests/bpf/prog_tests/libbpf_probes.c +++ b/tools/testing/selftests/bpf/prog_tests/libbpf_probes.c @@ -30,6 +30,8 @@ void test_libbpf_probe_prog_types(void) if (prog_type == BPF_PROG_TYPE_UNSPEC) continue; + if (strcmp(prog_type_name, "__MAX_BPF_PROG_TYPE") == 0) + continue; if (!test__start_subtest(prog_type_name)) continue; @@ -68,6 +70,8 @@ void test_libbpf_probe_map_types(void) if (map_type == BPF_MAP_TYPE_UNSPEC) continue; + if (strcmp(map_type_name, "__MAX_BPF_MAP_TYPE") == 0) + continue; if (!test__start_subtest(map_type_name)) continue; diff --git a/tools/testing/selftests/bpf/prog_tests/libbpf_str.c b/tools/testing/selftests/bpf/prog_tests/libbpf_str.c index eb34d612d6f8..62ea855ec4d0 100644 --- a/tools/testing/selftests/bpf/prog_tests/libbpf_str.c +++ b/tools/testing/selftests/bpf/prog_tests/libbpf_str.c @@ -132,6 +132,9 @@ static void test_libbpf_bpf_map_type_str(void) const char *map_type_str; char buf[256]; + if (map_type == __MAX_BPF_MAP_TYPE) + continue; + map_type_name = btf__str_by_offset(btf, e->name_off); map_type_str = libbpf_bpf_map_type_str(map_type); ASSERT_OK_PTR(map_type_str, map_type_name); @@ -186,6 +189,9 @@ static void test_libbpf_bpf_prog_type_str(void) const char *prog_type_str; char buf[256]; + if (prog_type == __MAX_BPF_PROG_TYPE) + continue; + prog_type_name = btf__str_by_offset(btf, e->name_off); prog_type_str = libbpf_bpf_prog_type_str(prog_type); ASSERT_OK_PTR(prog_type_str, prog_type_name); diff --git a/tools/testing/selftests/bpf/prog_tests/reg_bounds.c b/tools/testing/selftests/bpf/prog_tests/reg_bounds.c index 820d0bcfc474..eb74363f9f70 100644 --- a/tools/testing/selftests/bpf/prog_tests/reg_bounds.c +++ b/tools/testing/selftests/bpf/prog_tests/reg_bounds.c @@ -840,7 +840,7 @@ static int load_range_cmp_prog(struct range x, struct range y, enum op op, .log_level = 2, .log_buf = log_buf, .log_size = log_sz, - .prog_flags = BPF_F_TEST_REG_INVARIANTS, + .prog_flags = testing_prog_flags(), ); /* ; skip exit block below diff --git a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c index 518f143c5b0f..dbe06aeaa2b2 100644 --- a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c +++ b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c @@ -188,6 +188,7 @@ static int netns_setup_links_and_routes(struct netns_setup_result *result) { struct nstoken *nstoken = NULL; char src_fwd_addr[IFADDR_STR_LEN+1] = {}; + char src_addr[IFADDR_STR_LEN + 1] = {}; int err; if (result->dev_mode == MODE_VETH) { @@ -208,6 +209,9 @@ static int netns_setup_links_and_routes(struct netns_setup_result *result) if (get_ifaddr("src_fwd", src_fwd_addr)) goto fail; + if (get_ifaddr("src", src_addr)) + goto fail; + result->ifindex_src = if_nametoindex("src"); if (!ASSERT_GT(result->ifindex_src, 0, "ifindex_src")) goto fail; @@ -270,6 +274,13 @@ static int netns_setup_links_and_routes(struct netns_setup_result *result) SYS(fail, "ip route add " IP4_DST "/32 dev dst_fwd scope global"); SYS(fail, "ip route add " IP6_DST "/128 dev dst_fwd scope global"); + if (result->dev_mode == MODE_VETH) { + SYS(fail, "ip neigh add " IP4_SRC " dev src_fwd lladdr %s", src_addr); + SYS(fail, "ip neigh add " IP6_SRC " dev src_fwd lladdr %s", src_addr); + SYS(fail, "ip neigh add " IP4_DST " dev dst_fwd lladdr %s", MAC_DST); + SYS(fail, "ip neigh add " IP6_DST " dev dst_fwd lladdr %s", MAC_DST); + } + close_netns(nstoken); /** setup in 'dst' namespace */ @@ -280,6 +291,7 @@ static int netns_setup_links_and_routes(struct netns_setup_result *result) SYS(fail, "ip addr add " IP4_DST "/32 dev dst"); SYS(fail, "ip addr add " IP6_DST "/128 dev dst nodad"); SYS(fail, "ip link set dev dst up"); + SYS(fail, "ip link set dev lo up"); SYS(fail, "ip route add " IP4_SRC "/32 dev dst scope global"); SYS(fail, "ip route add " IP4_NET "/16 dev dst scope global"); @@ -457,7 +469,7 @@ static int set_forwarding(bool enable) return 0; } -static void rcv_tstamp(int fd, const char *expected, size_t s) +static int __rcv_tstamp(int fd, const char *expected, size_t s, __u64 *tstamp) { struct __kernel_timespec pkt_ts = {}; char ctl[CMSG_SPACE(sizeof(pkt_ts))]; @@ -478,7 +490,7 @@ static void rcv_tstamp(int fd, const char *expected, size_t s) ret = recvmsg(fd, &msg, 0); if (!ASSERT_EQ(ret, s, "recvmsg")) - return; + return -1; ASSERT_STRNEQ(data, expected, s, "expected rcv data"); cmsg = CMSG_FIRSTHDR(&msg); @@ -487,6 +499,12 @@ static void rcv_tstamp(int fd, const char *expected, size_t s) memcpy(&pkt_ts, CMSG_DATA(cmsg), sizeof(pkt_ts)); pkt_ns = pkt_ts.tv_sec * NSEC_PER_SEC + pkt_ts.tv_nsec; + if (tstamp) { + /* caller will check the tstamp itself */ + *tstamp = pkt_ns; + return 0; + } + ASSERT_NEQ(pkt_ns, 0, "pkt rcv tstamp"); ret = clock_gettime(CLOCK_REALTIME, &now_ts); @@ -496,6 +514,60 @@ static void rcv_tstamp(int fd, const char *expected, size_t s) if (ASSERT_GE(now_ns, pkt_ns, "check rcv tstamp")) ASSERT_LT(now_ns - pkt_ns, 5 * NSEC_PER_SEC, "check rcv tstamp"); + return 0; +} + +static void rcv_tstamp(int fd, const char *expected, size_t s) +{ + __rcv_tstamp(fd, expected, s, NULL); +} + +static int wait_netstamp_needed_key(void) +{ + int opt = 1, srv_fd = -1, cli_fd = -1, nretries = 0, err, n; + char buf[] = "testing testing"; + struct nstoken *nstoken; + __u64 tstamp = 0; + + nstoken = open_netns(NS_DST); + if (!nstoken) + return -1; + + srv_fd = start_server(AF_INET6, SOCK_DGRAM, "::1", 0, 0); + if (!ASSERT_GE(srv_fd, 0, "start_server")) + goto done; + + err = setsockopt(srv_fd, SOL_SOCKET, SO_TIMESTAMPNS_NEW, + &opt, sizeof(opt)); + if (!ASSERT_OK(err, "setsockopt(SO_TIMESTAMPNS_NEW)")) + goto done; + + cli_fd = connect_to_fd(srv_fd, TIMEOUT_MILLIS); + if (!ASSERT_GE(cli_fd, 0, "connect_to_fd")) + goto done; + +again: + n = write(cli_fd, buf, sizeof(buf)); + if (!ASSERT_EQ(n, sizeof(buf), "send to server")) + goto done; + err = __rcv_tstamp(srv_fd, buf, sizeof(buf), &tstamp); + if (!ASSERT_OK(err, "__rcv_tstamp")) + goto done; + if (!tstamp && nretries++ < 5) { + sleep(1); + printf("netstamp_needed_key retry#%d\n", nretries); + goto again; + } + +done: + if (!tstamp && srv_fd != -1) { + close(srv_fd); + srv_fd = -1; + } + if (cli_fd != -1) + close(cli_fd); + close_netns(nstoken); + return srv_fd; } static void snd_tstamp(int fd, char *b, size_t s) @@ -832,11 +904,20 @@ static void test_tc_redirect_dtime(struct netns_setup_result *setup_result) { struct test_tc_dtime *skel; struct nstoken *nstoken; - int err; + int hold_tstamp_fd, err; + + /* Hold a sk with the SOCK_TIMESTAMP set to ensure there + * is no delay in the kernel net_enable_timestamp(). + * This ensures the following tests must have + * non zero rcv tstamp in the recvmsg(). + */ + hold_tstamp_fd = wait_netstamp_needed_key(); + if (!ASSERT_GE(hold_tstamp_fd, 0, "wait_netstamp_needed_key")) + return; skel = test_tc_dtime__open(); if (!ASSERT_OK_PTR(skel, "test_tc_dtime__open")) - return; + goto done; skel->rodata->IFINDEX_SRC = setup_result->ifindex_src_fwd; skel->rodata->IFINDEX_DST = setup_result->ifindex_dst_fwd; @@ -881,6 +962,7 @@ static void test_tc_redirect_dtime(struct netns_setup_result *setup_result) done: test_tc_dtime__destroy(skel); + close(hold_tstamp_fd); } static void test_tc_redirect_neigh_fib(struct netns_setup_result *setup_result) diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_custom_syncookie.c b/tools/testing/selftests/bpf/prog_tests/tcp_custom_syncookie.c new file mode 100644 index 000000000000..eaf441dc7e79 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/tcp_custom_syncookie.c @@ -0,0 +1,150 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright Amazon.com Inc. or its affiliates. */ + +#define _GNU_SOURCE +#include <sched.h> +#include <stdlib.h> +#include <net/if.h> + +#include "test_progs.h" +#include "cgroup_helpers.h" +#include "network_helpers.h" +#include "test_tcp_custom_syncookie.skel.h" + +static struct test_tcp_custom_syncookie_case { + int family, type; + char addr[16]; + char name[10]; +} test_cases[] = { + { + .name = "IPv4 TCP", + .family = AF_INET, + .type = SOCK_STREAM, + .addr = "127.0.0.1", + }, + { + .name = "IPv6 TCP", + .family = AF_INET6, + .type = SOCK_STREAM, + .addr = "::1", + }, +}; + +static int setup_netns(void) +{ + if (!ASSERT_OK(unshare(CLONE_NEWNET), "create netns")) + return -1; + + if (!ASSERT_OK(system("ip link set dev lo up"), "ip")) + goto err; + + if (!ASSERT_OK(write_sysctl("/proc/sys/net/ipv4/tcp_ecn", "1"), + "write_sysctl")) + goto err; + + return 0; +err: + return -1; +} + +static int setup_tc(struct test_tcp_custom_syncookie *skel) +{ + LIBBPF_OPTS(bpf_tc_hook, qdisc_lo, .attach_point = BPF_TC_INGRESS); + LIBBPF_OPTS(bpf_tc_opts, tc_attach, + .prog_fd = bpf_program__fd(skel->progs.tcp_custom_syncookie)); + + qdisc_lo.ifindex = if_nametoindex("lo"); + if (!ASSERT_OK(bpf_tc_hook_create(&qdisc_lo), "qdisc add dev lo clsact")) + goto err; + + if (!ASSERT_OK(bpf_tc_attach(&qdisc_lo, &tc_attach), + "filter add dev lo ingress")) + goto err; + + return 0; +err: + return -1; +} + +#define msg "Hello World" +#define msglen 11 + +static void transfer_message(int sender, int receiver) +{ + char buf[msglen]; + int ret; + + ret = send(sender, msg, msglen, 0); + if (!ASSERT_EQ(ret, msglen, "send")) + return; + + memset(buf, 0, sizeof(buf)); + + ret = recv(receiver, buf, msglen, 0); + if (!ASSERT_EQ(ret, msglen, "recv")) + return; + + ret = strncmp(buf, msg, msglen); + if (!ASSERT_EQ(ret, 0, "strncmp")) + return; +} + +static void create_connection(struct test_tcp_custom_syncookie_case *test_case) +{ + int server, client, child; + + server = start_server(test_case->family, test_case->type, test_case->addr, 0, 0); + if (!ASSERT_NEQ(server, -1, "start_server")) + return; + + client = connect_to_fd(server, 0); + if (!ASSERT_NEQ(client, -1, "connect_to_fd")) + goto close_server; + + child = accept(server, NULL, 0); + if (!ASSERT_NEQ(child, -1, "accept")) + goto close_client; + + transfer_message(client, child); + transfer_message(child, client); + + close(child); +close_client: + close(client); +close_server: + close(server); +} + +void test_tcp_custom_syncookie(void) +{ + struct test_tcp_custom_syncookie *skel; + int i; + + if (setup_netns()) + return; + + skel = test_tcp_custom_syncookie__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open_and_load")) + return; + + if (setup_tc(skel)) + goto destroy_skel; + + for (i = 0; i < ARRAY_SIZE(test_cases); i++) { + if (!test__start_subtest(test_cases[i].name)) + continue; + + skel->bss->handled_syn = false; + skel->bss->handled_ack = false; + + create_connection(&test_cases[i]); + + ASSERT_EQ(skel->bss->handled_syn, true, "SYN is not handled at tc."); + ASSERT_EQ(skel->bss->handled_ack, true, "ACK is not handled at tc"); + } + +destroy_skel: + system("tc qdisc del dev lo clsact"); + + test_tcp_custom_syncookie__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c new file mode 100644 index 000000000000..8d833f0c7580 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#include <test_progs.h> +#include <time.h> + +#include "struct_ops_module.skel.h" + +static void check_map_info(struct bpf_map_info *info) +{ + struct bpf_btf_info btf_info; + char btf_name[256]; + u32 btf_info_len = sizeof(btf_info); + int err, fd; + + fd = bpf_btf_get_fd_by_id(info->btf_vmlinux_id); + if (!ASSERT_GE(fd, 0, "get_value_type_btf_obj_fd")) + return; + + memset(&btf_info, 0, sizeof(btf_info)); + btf_info.name = ptr_to_u64(btf_name); + btf_info.name_len = sizeof(btf_name); + err = bpf_btf_get_info_by_fd(fd, &btf_info, &btf_info_len); + if (!ASSERT_OK(err, "get_value_type_btf_obj_info")) + goto cleanup; + + if (!ASSERT_EQ(strcmp(btf_name, "bpf_testmod"), 0, "get_value_type_btf_obj_name")) + goto cleanup; + +cleanup: + close(fd); +} + +static void test_struct_ops_load(void) +{ + DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts); + struct struct_ops_module *skel; + struct bpf_map_info info = {}; + struct bpf_link *link; + int err; + u32 len; + + skel = struct_ops_module__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "struct_ops_module_open")) + return; + + err = struct_ops_module__load(skel); + if (!ASSERT_OK(err, "struct_ops_module_load")) + goto cleanup; + + len = sizeof(info); + err = bpf_map_get_info_by_fd(bpf_map__fd(skel->maps.testmod_1), &info, + &len); + if (!ASSERT_OK(err, "bpf_map_get_info_by_fd")) + goto cleanup; + + link = bpf_map__attach_struct_ops(skel->maps.testmod_1); + ASSERT_OK_PTR(link, "attach_test_mod_1"); + + /* test_2() will be called from bpf_dummy_reg() in bpf_testmod.c */ + ASSERT_EQ(skel->bss->test_2_result, 7, "test_2_result"); + + bpf_link__destroy(link); + + check_map_info(&info); + +cleanup: + struct_ops_module__destroy(skel); +} + +void serial_test_struct_ops_module(void) +{ + if (test__start_subtest("test_struct_ops_load")) + test_struct_ops_load(); +} + diff --git a/tools/testing/selftests/bpf/prog_tests/token.c b/tools/testing/selftests/bpf/prog_tests/token.c new file mode 100644 index 000000000000..fc4a175d8d76 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/token.c @@ -0,0 +1,1052 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ +#define _GNU_SOURCE +#include <test_progs.h> +#include <bpf/btf.h> +#include "cap_helpers.h" +#include <fcntl.h> +#include <sched.h> +#include <signal.h> +#include <unistd.h> +#include <linux/filter.h> +#include <linux/unistd.h> +#include <linux/mount.h> +#include <sys/socket.h> +#include <sys/stat.h> +#include <sys/syscall.h> +#include <sys/un.h> +#include "priv_map.skel.h" +#include "priv_prog.skel.h" +#include "dummy_st_ops_success.skel.h" +#include "token_lsm.skel.h" + +static inline int sys_mount(const char *dev_name, const char *dir_name, + const char *type, unsigned long flags, + const void *data) +{ + return syscall(__NR_mount, dev_name, dir_name, type, flags, data); +} + +static inline int sys_fsopen(const char *fsname, unsigned flags) +{ + return syscall(__NR_fsopen, fsname, flags); +} + +static inline int sys_fspick(int dfd, const char *path, unsigned flags) +{ + return syscall(__NR_fspick, dfd, path, flags); +} + +static inline int sys_fsconfig(int fs_fd, unsigned cmd, const char *key, const void *val, int aux) +{ + return syscall(__NR_fsconfig, fs_fd, cmd, key, val, aux); +} + +static inline int sys_fsmount(int fs_fd, unsigned flags, unsigned ms_flags) +{ + return syscall(__NR_fsmount, fs_fd, flags, ms_flags); +} + +static inline int sys_move_mount(int from_dfd, const char *from_path, + int to_dfd, const char *to_path, + unsigned flags) +{ + return syscall(__NR_move_mount, from_dfd, from_path, to_dfd, to_path, flags); +} + +static int drop_priv_caps(__u64 *old_caps) +{ + return cap_disable_effective((1ULL << CAP_BPF) | + (1ULL << CAP_PERFMON) | + (1ULL << CAP_NET_ADMIN) | + (1ULL << CAP_SYS_ADMIN), old_caps); +} + +static int restore_priv_caps(__u64 old_caps) +{ + return cap_enable_effective(old_caps, NULL); +} + +static int set_delegate_mask(int fs_fd, const char *key, __u64 mask, const char *mask_str) +{ + char buf[32]; + int err; + + if (!mask_str) { + if (mask == ~0ULL) { + mask_str = "any"; + } else { + snprintf(buf, sizeof(buf), "0x%llx", (unsigned long long)mask); + mask_str = buf; + } + } + + err = sys_fsconfig(fs_fd, FSCONFIG_SET_STRING, key, + mask_str, 0); + if (err < 0) + err = -errno; + return err; +} + +#define zclose(fd) do { if (fd >= 0) close(fd); fd = -1; } while (0) + +struct bpffs_opts { + __u64 cmds; + __u64 maps; + __u64 progs; + __u64 attachs; + const char *cmds_str; + const char *maps_str; + const char *progs_str; + const char *attachs_str; +}; + +static int create_bpffs_fd(void) +{ + int fs_fd; + + /* create VFS context */ + fs_fd = sys_fsopen("bpf", 0); + ASSERT_GE(fs_fd, 0, "fs_fd"); + + return fs_fd; +} + +static int materialize_bpffs_fd(int fs_fd, struct bpffs_opts *opts) +{ + int mnt_fd, err; + + /* set up token delegation mount options */ + err = set_delegate_mask(fs_fd, "delegate_cmds", opts->cmds, opts->cmds_str); + if (!ASSERT_OK(err, "fs_cfg_cmds")) + return err; + err = set_delegate_mask(fs_fd, "delegate_maps", opts->maps, opts->maps_str); + if (!ASSERT_OK(err, "fs_cfg_maps")) + return err; + err = set_delegate_mask(fs_fd, "delegate_progs", opts->progs, opts->progs_str); + if (!ASSERT_OK(err, "fs_cfg_progs")) + return err; + err = set_delegate_mask(fs_fd, "delegate_attachs", opts->attachs, opts->attachs_str); + if (!ASSERT_OK(err, "fs_cfg_attachs")) + return err; + + /* instantiate FS object */ + err = sys_fsconfig(fs_fd, FSCONFIG_CMD_CREATE, NULL, NULL, 0); + if (err < 0) + return -errno; + + /* create O_PATH fd for detached mount */ + mnt_fd = sys_fsmount(fs_fd, 0, 0); + if (err < 0) + return -errno; + + return mnt_fd; +} + +/* send FD over Unix domain (AF_UNIX) socket */ +static int sendfd(int sockfd, int fd) +{ + struct msghdr msg = {}; + struct cmsghdr *cmsg; + int fds[1] = { fd }, err; + char iobuf[1]; + struct iovec io = { + .iov_base = iobuf, + .iov_len = sizeof(iobuf), + }; + union { + char buf[CMSG_SPACE(sizeof(fds))]; + struct cmsghdr align; + } u; + + msg.msg_iov = &io; + msg.msg_iovlen = 1; + msg.msg_control = u.buf; + msg.msg_controllen = sizeof(u.buf); + cmsg = CMSG_FIRSTHDR(&msg); + cmsg->cmsg_level = SOL_SOCKET; + cmsg->cmsg_type = SCM_RIGHTS; + cmsg->cmsg_len = CMSG_LEN(sizeof(fds)); + memcpy(CMSG_DATA(cmsg), fds, sizeof(fds)); + + err = sendmsg(sockfd, &msg, 0); + if (err < 0) + err = -errno; + if (!ASSERT_EQ(err, 1, "sendmsg")) + return -EINVAL; + + return 0; +} + +/* receive FD over Unix domain (AF_UNIX) socket */ +static int recvfd(int sockfd, int *fd) +{ + struct msghdr msg = {}; + struct cmsghdr *cmsg; + int fds[1], err; + char iobuf[1]; + struct iovec io = { + .iov_base = iobuf, + .iov_len = sizeof(iobuf), + }; + union { + char buf[CMSG_SPACE(sizeof(fds))]; + struct cmsghdr align; + } u; + + msg.msg_iov = &io; + msg.msg_iovlen = 1; + msg.msg_control = u.buf; + msg.msg_controllen = sizeof(u.buf); + + err = recvmsg(sockfd, &msg, 0); + if (err < 0) + err = -errno; + if (!ASSERT_EQ(err, 1, "recvmsg")) + return -EINVAL; + + cmsg = CMSG_FIRSTHDR(&msg); + if (!ASSERT_OK_PTR(cmsg, "cmsg_null") || + !ASSERT_EQ(cmsg->cmsg_len, CMSG_LEN(sizeof(fds)), "cmsg_len") || + !ASSERT_EQ(cmsg->cmsg_level, SOL_SOCKET, "cmsg_level") || + !ASSERT_EQ(cmsg->cmsg_type, SCM_RIGHTS, "cmsg_type")) + return -EINVAL; + + memcpy(fds, CMSG_DATA(cmsg), sizeof(fds)); + *fd = fds[0]; + + return 0; +} + +static ssize_t write_nointr(int fd, const void *buf, size_t count) +{ + ssize_t ret; + + do { + ret = write(fd, buf, count); + } while (ret < 0 && errno == EINTR); + + return ret; +} + +static int write_file(const char *path, const void *buf, size_t count) +{ + int fd; + ssize_t ret; + + fd = open(path, O_WRONLY | O_CLOEXEC | O_NOCTTY | O_NOFOLLOW); + if (fd < 0) + return -1; + + ret = write_nointr(fd, buf, count); + close(fd); + if (ret < 0 || (size_t)ret != count) + return -1; + + return 0; +} + +static int create_and_enter_userns(void) +{ + uid_t uid; + gid_t gid; + char map[100]; + + uid = getuid(); + gid = getgid(); + + if (unshare(CLONE_NEWUSER)) + return -1; + + if (write_file("/proc/self/setgroups", "deny", sizeof("deny") - 1) && + errno != ENOENT) + return -1; + + snprintf(map, sizeof(map), "0 %d 1", uid); + if (write_file("/proc/self/uid_map", map, strlen(map))) + return -1; + + + snprintf(map, sizeof(map), "0 %d 1", gid); + if (write_file("/proc/self/gid_map", map, strlen(map))) + return -1; + + if (setgid(0)) + return -1; + + if (setuid(0)) + return -1; + + return 0; +} + +typedef int (*child_callback_fn)(int bpffs_fd, struct token_lsm *lsm_skel); + +static void child(int sock_fd, struct bpffs_opts *opts, child_callback_fn callback) +{ + int mnt_fd = -1, fs_fd = -1, err = 0, bpffs_fd = -1, token_fd = -1; + struct token_lsm *lsm_skel = NULL; + + /* load and attach LSM "policy" before we go into unpriv userns */ + lsm_skel = token_lsm__open_and_load(); + if (!ASSERT_OK_PTR(lsm_skel, "lsm_skel_load")) { + err = -EINVAL; + goto cleanup; + } + lsm_skel->bss->my_pid = getpid(); + err = token_lsm__attach(lsm_skel); + if (!ASSERT_OK(err, "lsm_skel_attach")) + goto cleanup; + + /* setup userns with root mappings */ + err = create_and_enter_userns(); + if (!ASSERT_OK(err, "create_and_enter_userns")) + goto cleanup; + + /* setup mountns to allow creating BPF FS (fsopen("bpf")) from unpriv process */ + err = unshare(CLONE_NEWNS); + if (!ASSERT_OK(err, "create_mountns")) + goto cleanup; + + err = sys_mount(NULL, "/", NULL, MS_REC | MS_PRIVATE, 0); + if (!ASSERT_OK(err, "remount_root")) + goto cleanup; + + fs_fd = create_bpffs_fd(); + if (!ASSERT_GE(fs_fd, 0, "create_bpffs_fd")) { + err = -EINVAL; + goto cleanup; + } + + /* ensure unprivileged child cannot set delegation options */ + err = set_delegate_mask(fs_fd, "delegate_cmds", 0x1, NULL); + ASSERT_EQ(err, -EPERM, "delegate_cmd_eperm"); + err = set_delegate_mask(fs_fd, "delegate_maps", 0x1, NULL); + ASSERT_EQ(err, -EPERM, "delegate_maps_eperm"); + err = set_delegate_mask(fs_fd, "delegate_progs", 0x1, NULL); + ASSERT_EQ(err, -EPERM, "delegate_progs_eperm"); + err = set_delegate_mask(fs_fd, "delegate_attachs", 0x1, NULL); + ASSERT_EQ(err, -EPERM, "delegate_attachs_eperm"); + + /* pass BPF FS context object to parent */ + err = sendfd(sock_fd, fs_fd); + if (!ASSERT_OK(err, "send_fs_fd")) + goto cleanup; + zclose(fs_fd); + + /* avoid mucking around with mount namespaces and mounting at + * well-known path, just get detach-mounted BPF FS fd back from parent + */ + err = recvfd(sock_fd, &mnt_fd); + if (!ASSERT_OK(err, "recv_mnt_fd")) + goto cleanup; + + /* try to fspick() BPF FS and try to add some delegation options */ + fs_fd = sys_fspick(mnt_fd, "", FSPICK_EMPTY_PATH); + if (!ASSERT_GE(fs_fd, 0, "bpffs_fspick")) { + err = -EINVAL; + goto cleanup; + } + + /* ensure unprivileged child cannot reconfigure to set delegation options */ + err = set_delegate_mask(fs_fd, "delegate_cmds", 0, "any"); + if (!ASSERT_EQ(err, -EPERM, "delegate_cmd_eperm_reconfig")) { + err = -EINVAL; + goto cleanup; + } + err = set_delegate_mask(fs_fd, "delegate_maps", 0, "any"); + if (!ASSERT_EQ(err, -EPERM, "delegate_maps_eperm_reconfig")) { + err = -EINVAL; + goto cleanup; + } + err = set_delegate_mask(fs_fd, "delegate_progs", 0, "any"); + if (!ASSERT_EQ(err, -EPERM, "delegate_progs_eperm_reconfig")) { + err = -EINVAL; + goto cleanup; + } + err = set_delegate_mask(fs_fd, "delegate_attachs", 0, "any"); + if (!ASSERT_EQ(err, -EPERM, "delegate_attachs_eperm_reconfig")) { + err = -EINVAL; + goto cleanup; + } + zclose(fs_fd); + + bpffs_fd = openat(mnt_fd, ".", 0, O_RDWR); + if (!ASSERT_GE(bpffs_fd, 0, "bpffs_open")) { + err = -EINVAL; + goto cleanup; + } + + /* create BPF token FD and pass it to parent for some extra checks */ + token_fd = bpf_token_create(bpffs_fd, NULL); + if (!ASSERT_GT(token_fd, 0, "child_token_create")) { + err = -EINVAL; + goto cleanup; + } + err = sendfd(sock_fd, token_fd); + if (!ASSERT_OK(err, "send_token_fd")) + goto cleanup; + zclose(token_fd); + + /* do custom test logic with customly set up BPF FS instance */ + err = callback(bpffs_fd, lsm_skel); + if (!ASSERT_OK(err, "test_callback")) + goto cleanup; + + err = 0; +cleanup: + zclose(sock_fd); + zclose(mnt_fd); + zclose(fs_fd); + zclose(bpffs_fd); + zclose(token_fd); + + lsm_skel->bss->my_pid = 0; + token_lsm__destroy(lsm_skel); + + exit(-err); +} + +static int wait_for_pid(pid_t pid) +{ + int status, ret; + +again: + ret = waitpid(pid, &status, 0); + if (ret == -1) { + if (errno == EINTR) + goto again; + + return -1; + } + + if (!WIFEXITED(status)) + return -1; + + return WEXITSTATUS(status); +} + +static void parent(int child_pid, struct bpffs_opts *bpffs_opts, int sock_fd) +{ + int fs_fd = -1, mnt_fd = -1, token_fd = -1, err; + + err = recvfd(sock_fd, &fs_fd); + if (!ASSERT_OK(err, "recv_bpffs_fd")) + goto cleanup; + + mnt_fd = materialize_bpffs_fd(fs_fd, bpffs_opts); + if (!ASSERT_GE(mnt_fd, 0, "materialize_bpffs_fd")) { + err = -EINVAL; + goto cleanup; + } + zclose(fs_fd); + + /* pass BPF FS context object to parent */ + err = sendfd(sock_fd, mnt_fd); + if (!ASSERT_OK(err, "send_mnt_fd")) + goto cleanup; + zclose(mnt_fd); + + /* receive BPF token FD back from child for some extra tests */ + err = recvfd(sock_fd, &token_fd); + if (!ASSERT_OK(err, "recv_token_fd")) + goto cleanup; + + err = wait_for_pid(child_pid); + ASSERT_OK(err, "waitpid_child"); + +cleanup: + zclose(sock_fd); + zclose(fs_fd); + zclose(mnt_fd); + zclose(token_fd); + + if (child_pid > 0) + (void)kill(child_pid, SIGKILL); +} + +static void subtest_userns(struct bpffs_opts *bpffs_opts, + child_callback_fn child_cb) +{ + int sock_fds[2] = { -1, -1 }; + int child_pid = 0, err; + + err = socketpair(AF_UNIX, SOCK_STREAM, 0, sock_fds); + if (!ASSERT_OK(err, "socketpair")) + goto cleanup; + + child_pid = fork(); + if (!ASSERT_GE(child_pid, 0, "fork")) + goto cleanup; + + if (child_pid == 0) { + zclose(sock_fds[0]); + return child(sock_fds[1], bpffs_opts, child_cb); + + } else { + zclose(sock_fds[1]); + return parent(child_pid, bpffs_opts, sock_fds[0]); + } + +cleanup: + zclose(sock_fds[0]); + zclose(sock_fds[1]); + if (child_pid > 0) + (void)kill(child_pid, SIGKILL); +} + +static int userns_map_create(int mnt_fd, struct token_lsm *lsm_skel) +{ + LIBBPF_OPTS(bpf_map_create_opts, map_opts); + int err, token_fd = -1, map_fd = -1; + __u64 old_caps = 0; + + /* create BPF token from BPF FS mount */ + token_fd = bpf_token_create(mnt_fd, NULL); + if (!ASSERT_GT(token_fd, 0, "token_create")) { + err = -EINVAL; + goto cleanup; + } + + /* while inside non-init userns, we need both a BPF token *and* + * CAP_BPF inside current userns to create privileged map; let's test + * that neither BPF token alone nor namespaced CAP_BPF is sufficient + */ + err = drop_priv_caps(&old_caps); + if (!ASSERT_OK(err, "drop_caps")) + goto cleanup; + + /* no token, no CAP_BPF -> fail */ + map_opts.map_flags = 0; + map_opts.token_fd = 0; + map_fd = bpf_map_create(BPF_MAP_TYPE_STACK, "wo_token_wo_bpf", 0, 8, 1, &map_opts); + if (!ASSERT_LT(map_fd, 0, "stack_map_wo_token_wo_cap_bpf_should_fail")) { + err = -EINVAL; + goto cleanup; + } + + /* token without CAP_BPF -> fail */ + map_opts.map_flags = BPF_F_TOKEN_FD; + map_opts.token_fd = token_fd; + map_fd = bpf_map_create(BPF_MAP_TYPE_STACK, "w_token_wo_bpf", 0, 8, 1, &map_opts); + if (!ASSERT_LT(map_fd, 0, "stack_map_w_token_wo_cap_bpf_should_fail")) { + err = -EINVAL; + goto cleanup; + } + + /* get back effective local CAP_BPF (and CAP_SYS_ADMIN) */ + err = restore_priv_caps(old_caps); + if (!ASSERT_OK(err, "restore_caps")) + goto cleanup; + + /* CAP_BPF without token -> fail */ + map_opts.map_flags = 0; + map_opts.token_fd = 0; + map_fd = bpf_map_create(BPF_MAP_TYPE_STACK, "wo_token_w_bpf", 0, 8, 1, &map_opts); + if (!ASSERT_LT(map_fd, 0, "stack_map_wo_token_w_cap_bpf_should_fail")) { + err = -EINVAL; + goto cleanup; + } + + /* finally, namespaced CAP_BPF + token -> success */ + map_opts.map_flags = BPF_F_TOKEN_FD; + map_opts.token_fd = token_fd; + map_fd = bpf_map_create(BPF_MAP_TYPE_STACK, "w_token_w_bpf", 0, 8, 1, &map_opts); + if (!ASSERT_GT(map_fd, 0, "stack_map_w_token_w_cap_bpf")) { + err = -EINVAL; + goto cleanup; + } + +cleanup: + zclose(token_fd); + zclose(map_fd); + return err; +} + +static int userns_btf_load(int mnt_fd, struct token_lsm *lsm_skel) +{ + LIBBPF_OPTS(bpf_btf_load_opts, btf_opts); + int err, token_fd = -1, btf_fd = -1; + const void *raw_btf_data; + struct btf *btf = NULL; + __u32 raw_btf_size; + __u64 old_caps = 0; + + /* create BPF token from BPF FS mount */ + token_fd = bpf_token_create(mnt_fd, NULL); + if (!ASSERT_GT(token_fd, 0, "token_create")) { + err = -EINVAL; + goto cleanup; + } + + /* while inside non-init userns, we need both a BPF token *and* + * CAP_BPF inside current userns to create privileged map; let's test + * that neither BPF token alone nor namespaced CAP_BPF is sufficient + */ + err = drop_priv_caps(&old_caps); + if (!ASSERT_OK(err, "drop_caps")) + goto cleanup; + + /* setup a trivial BTF data to load to the kernel */ + btf = btf__new_empty(); + if (!ASSERT_OK_PTR(btf, "empty_btf")) + goto cleanup; + + ASSERT_GT(btf__add_int(btf, "int", 4, 0), 0, "int_type"); + + raw_btf_data = btf__raw_data(btf, &raw_btf_size); + if (!ASSERT_OK_PTR(raw_btf_data, "raw_btf_data")) + goto cleanup; + + /* no token + no CAP_BPF -> failure */ + btf_opts.btf_flags = 0; + btf_opts.token_fd = 0; + btf_fd = bpf_btf_load(raw_btf_data, raw_btf_size, &btf_opts); + if (!ASSERT_LT(btf_fd, 0, "no_token_no_cap_should_fail")) + goto cleanup; + + /* token + no CAP_BPF -> failure */ + btf_opts.btf_flags = BPF_F_TOKEN_FD; + btf_opts.token_fd = token_fd; + btf_fd = bpf_btf_load(raw_btf_data, raw_btf_size, &btf_opts); + if (!ASSERT_LT(btf_fd, 0, "token_no_cap_should_fail")) + goto cleanup; + + /* get back effective local CAP_BPF (and CAP_SYS_ADMIN) */ + err = restore_priv_caps(old_caps); + if (!ASSERT_OK(err, "restore_caps")) + goto cleanup; + + /* token + CAP_BPF -> success */ + btf_opts.btf_flags = BPF_F_TOKEN_FD; + btf_opts.token_fd = token_fd; + btf_fd = bpf_btf_load(raw_btf_data, raw_btf_size, &btf_opts); + if (!ASSERT_GT(btf_fd, 0, "token_and_cap_success")) + goto cleanup; + + err = 0; +cleanup: + btf__free(btf); + zclose(btf_fd); + zclose(token_fd); + return err; +} + +static int userns_prog_load(int mnt_fd, struct token_lsm *lsm_skel) +{ + LIBBPF_OPTS(bpf_prog_load_opts, prog_opts); + int err, token_fd = -1, prog_fd = -1; + struct bpf_insn insns[] = { + /* bpf_jiffies64() requires CAP_BPF */ + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_jiffies64), + /* bpf_get_current_task() requires CAP_PERFMON */ + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_current_task), + /* r0 = 0; exit; */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + size_t insn_cnt = ARRAY_SIZE(insns); + __u64 old_caps = 0; + + /* create BPF token from BPF FS mount */ + token_fd = bpf_token_create(mnt_fd, NULL); + if (!ASSERT_GT(token_fd, 0, "token_create")) { + err = -EINVAL; + goto cleanup; + } + + /* validate we can successfully load BPF program with token; this + * being XDP program (CAP_NET_ADMIN) using bpf_jiffies64() (CAP_BPF) + * and bpf_get_current_task() (CAP_PERFMON) helpers validates we have + * BPF token wired properly in a bunch of places in the kernel + */ + prog_opts.prog_flags = BPF_F_TOKEN_FD; + prog_opts.token_fd = token_fd; + prog_opts.expected_attach_type = BPF_XDP; + prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, "token_prog", "GPL", + insns, insn_cnt, &prog_opts); + if (!ASSERT_GT(prog_fd, 0, "prog_fd")) { + err = -EPERM; + goto cleanup; + } + + /* no token + caps -> failure */ + prog_opts.prog_flags = 0; + prog_opts.token_fd = 0; + prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, "token_prog", "GPL", + insns, insn_cnt, &prog_opts); + if (!ASSERT_EQ(prog_fd, -EPERM, "prog_fd_eperm")) { + err = -EPERM; + goto cleanup; + } + + err = drop_priv_caps(&old_caps); + if (!ASSERT_OK(err, "drop_caps")) + goto cleanup; + + /* no caps + token -> failure */ + prog_opts.prog_flags = BPF_F_TOKEN_FD; + prog_opts.token_fd = token_fd; + prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, "token_prog", "GPL", + insns, insn_cnt, &prog_opts); + if (!ASSERT_EQ(prog_fd, -EPERM, "prog_fd_eperm")) { + err = -EPERM; + goto cleanup; + } + + /* no caps + no token -> definitely a failure */ + prog_opts.prog_flags = 0; + prog_opts.token_fd = 0; + prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, "token_prog", "GPL", + insns, insn_cnt, &prog_opts); + if (!ASSERT_EQ(prog_fd, -EPERM, "prog_fd_eperm")) { + err = -EPERM; + goto cleanup; + } + + err = 0; +cleanup: + zclose(prog_fd); + zclose(token_fd); + return err; +} + +static int userns_obj_priv_map(int mnt_fd, struct token_lsm *lsm_skel) +{ + LIBBPF_OPTS(bpf_object_open_opts, opts); + char buf[256]; + struct priv_map *skel; + int err; + + skel = priv_map__open_and_load(); + if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load")) { + priv_map__destroy(skel); + return -EINVAL; + } + + /* use bpf_token_path to provide BPF FS path */ + snprintf(buf, sizeof(buf), "/proc/self/fd/%d", mnt_fd); + opts.bpf_token_path = buf; + skel = priv_map__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "obj_token_path_open")) + return -EINVAL; + + err = priv_map__load(skel); + priv_map__destroy(skel); + if (!ASSERT_OK(err, "obj_token_path_load")) + return -EINVAL; + + return 0; +} + +static int userns_obj_priv_prog(int mnt_fd, struct token_lsm *lsm_skel) +{ + LIBBPF_OPTS(bpf_object_open_opts, opts); + char buf[256]; + struct priv_prog *skel; + int err; + + skel = priv_prog__open_and_load(); + if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load")) { + priv_prog__destroy(skel); + return -EINVAL; + } + + /* use bpf_token_path to provide BPF FS path */ + snprintf(buf, sizeof(buf), "/proc/self/fd/%d", mnt_fd); + opts.bpf_token_path = buf; + skel = priv_prog__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "obj_token_path_open")) + return -EINVAL; + err = priv_prog__load(skel); + priv_prog__destroy(skel); + if (!ASSERT_OK(err, "obj_token_path_load")) + return -EINVAL; + + /* provide BPF token, but reject bpf_token_capable() with LSM */ + lsm_skel->bss->reject_capable = true; + lsm_skel->bss->reject_cmd = false; + skel = priv_prog__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "obj_token_lsm_reject_cap_open")) + return -EINVAL; + err = priv_prog__load(skel); + priv_prog__destroy(skel); + if (!ASSERT_ERR(err, "obj_token_lsm_reject_cap_load")) + return -EINVAL; + + /* provide BPF token, but reject bpf_token_cmd() with LSM */ + lsm_skel->bss->reject_capable = false; + lsm_skel->bss->reject_cmd = true; + skel = priv_prog__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "obj_token_lsm_reject_cmd_open")) + return -EINVAL; + err = priv_prog__load(skel); + priv_prog__destroy(skel); + if (!ASSERT_ERR(err, "obj_token_lsm_reject_cmd_load")) + return -EINVAL; + + return 0; +} + +/* this test is called with BPF FS that doesn't delegate BPF_BTF_LOAD command, + * which should cause struct_ops application to fail, as BTF won't be uploaded + * into the kernel, even if STRUCT_OPS programs themselves are allowed + */ +static int validate_struct_ops_load(int mnt_fd, bool expect_success) +{ + LIBBPF_OPTS(bpf_object_open_opts, opts); + char buf[256]; + struct dummy_st_ops_success *skel; + int err; + + snprintf(buf, sizeof(buf), "/proc/self/fd/%d", mnt_fd); + opts.bpf_token_path = buf; + skel = dummy_st_ops_success__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "obj_token_path_open")) + return -EINVAL; + + err = dummy_st_ops_success__load(skel); + dummy_st_ops_success__destroy(skel); + if (expect_success) { + if (!ASSERT_OK(err, "obj_token_path_load")) + return -EINVAL; + } else /* expect failure */ { + if (!ASSERT_ERR(err, "obj_token_path_load")) + return -EINVAL; + } + + return 0; +} + +static int userns_obj_priv_btf_fail(int mnt_fd, struct token_lsm *lsm_skel) +{ + return validate_struct_ops_load(mnt_fd, false /* should fail */); +} + +static int userns_obj_priv_btf_success(int mnt_fd, struct token_lsm *lsm_skel) +{ + return validate_struct_ops_load(mnt_fd, true /* should succeed */); +} + +#define TOKEN_ENVVAR "LIBBPF_BPF_TOKEN_PATH" +#define TOKEN_BPFFS_CUSTOM "/bpf-token-fs" + +static int userns_obj_priv_implicit_token(int mnt_fd, struct token_lsm *lsm_skel) +{ + LIBBPF_OPTS(bpf_object_open_opts, opts); + struct dummy_st_ops_success *skel; + int err; + + /* before we mount BPF FS with token delegation, struct_ops skeleton + * should fail to load + */ + skel = dummy_st_ops_success__open_and_load(); + if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load")) { + dummy_st_ops_success__destroy(skel); + return -EINVAL; + } + + /* mount custom BPF FS over /sys/fs/bpf so that libbpf can create BPF + * token automatically and implicitly + */ + err = sys_move_mount(mnt_fd, "", AT_FDCWD, "/sys/fs/bpf", MOVE_MOUNT_F_EMPTY_PATH); + if (!ASSERT_OK(err, "move_mount_bpffs")) + return -EINVAL; + + /* disable implicit BPF token creation by setting + * LIBBPF_BPF_TOKEN_PATH envvar to empty value, load should fail + */ + err = setenv(TOKEN_ENVVAR, "", 1 /*overwrite*/); + if (!ASSERT_OK(err, "setenv_token_path")) + return -EINVAL; + skel = dummy_st_ops_success__open_and_load(); + if (!ASSERT_ERR_PTR(skel, "obj_token_envvar_disabled_load")) { + unsetenv(TOKEN_ENVVAR); + dummy_st_ops_success__destroy(skel); + return -EINVAL; + } + unsetenv(TOKEN_ENVVAR); + + /* now the same struct_ops skeleton should succeed thanks to libppf + * creating BPF token from /sys/fs/bpf mount point + */ + skel = dummy_st_ops_success__open_and_load(); + if (!ASSERT_OK_PTR(skel, "obj_implicit_token_load")) + return -EINVAL; + + dummy_st_ops_success__destroy(skel); + + /* now disable implicit token through empty bpf_token_path, should fail */ + opts.bpf_token_path = ""; + skel = dummy_st_ops_success__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "obj_empty_token_path_open")) + return -EINVAL; + + err = dummy_st_ops_success__load(skel); + dummy_st_ops_success__destroy(skel); + if (!ASSERT_ERR(err, "obj_empty_token_path_load")) + return -EINVAL; + + return 0; +} + +static int userns_obj_priv_implicit_token_envvar(int mnt_fd, struct token_lsm *lsm_skel) +{ + LIBBPF_OPTS(bpf_object_open_opts, opts); + struct dummy_st_ops_success *skel; + int err; + + /* before we mount BPF FS with token delegation, struct_ops skeleton + * should fail to load + */ + skel = dummy_st_ops_success__open_and_load(); + if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load")) { + dummy_st_ops_success__destroy(skel); + return -EINVAL; + } + + /* mount custom BPF FS over custom location, so libbpf can't create + * BPF token implicitly, unless pointed to it through + * LIBBPF_BPF_TOKEN_PATH envvar + */ + rmdir(TOKEN_BPFFS_CUSTOM); + if (!ASSERT_OK(mkdir(TOKEN_BPFFS_CUSTOM, 0777), "mkdir_bpffs_custom")) + goto err_out; + err = sys_move_mount(mnt_fd, "", AT_FDCWD, TOKEN_BPFFS_CUSTOM, MOVE_MOUNT_F_EMPTY_PATH); + if (!ASSERT_OK(err, "move_mount_bpffs")) + goto err_out; + + /* even though we have BPF FS with delegation, it's not at default + * /sys/fs/bpf location, so we still fail to load until envvar is set up + */ + skel = dummy_st_ops_success__open_and_load(); + if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load2")) { + dummy_st_ops_success__destroy(skel); + goto err_out; + } + + err = setenv(TOKEN_ENVVAR, TOKEN_BPFFS_CUSTOM, 1 /*overwrite*/); + if (!ASSERT_OK(err, "setenv_token_path")) + goto err_out; + + /* now the same struct_ops skeleton should succeed thanks to libppf + * creating BPF token from custom mount point + */ + skel = dummy_st_ops_success__open_and_load(); + if (!ASSERT_OK_PTR(skel, "obj_implicit_token_load")) + goto err_out; + + dummy_st_ops_success__destroy(skel); + + /* now disable implicit token through empty bpf_token_path, envvar + * will be ignored, should fail + */ + opts.bpf_token_path = ""; + skel = dummy_st_ops_success__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "obj_empty_token_path_open")) + goto err_out; + + err = dummy_st_ops_success__load(skel); + dummy_st_ops_success__destroy(skel); + if (!ASSERT_ERR(err, "obj_empty_token_path_load")) + goto err_out; + + rmdir(TOKEN_BPFFS_CUSTOM); + unsetenv(TOKEN_ENVVAR); + return 0; +err_out: + rmdir(TOKEN_BPFFS_CUSTOM); + unsetenv(TOKEN_ENVVAR); + return -EINVAL; +} + +#define bit(n) (1ULL << (n)) + +void test_token(void) +{ + if (test__start_subtest("map_token")) { + struct bpffs_opts opts = { + .cmds_str = "map_create", + .maps_str = "stack", + }; + + subtest_userns(&opts, userns_map_create); + } + if (test__start_subtest("btf_token")) { + struct bpffs_opts opts = { + .cmds = 1ULL << BPF_BTF_LOAD, + }; + + subtest_userns(&opts, userns_btf_load); + } + if (test__start_subtest("prog_token")) { + struct bpffs_opts opts = { + .cmds_str = "PROG_LOAD", + .progs_str = "XDP", + .attachs_str = "xdp", + }; + + subtest_userns(&opts, userns_prog_load); + } + if (test__start_subtest("obj_priv_map")) { + struct bpffs_opts opts = { + .cmds = bit(BPF_MAP_CREATE), + .maps = bit(BPF_MAP_TYPE_QUEUE), + }; + + subtest_userns(&opts, userns_obj_priv_map); + } + if (test__start_subtest("obj_priv_prog")) { + struct bpffs_opts opts = { + .cmds = bit(BPF_PROG_LOAD), + .progs = bit(BPF_PROG_TYPE_KPROBE), + .attachs = ~0ULL, + }; + + subtest_userns(&opts, userns_obj_priv_prog); + } + if (test__start_subtest("obj_priv_btf_fail")) { + struct bpffs_opts opts = { + /* disallow BTF loading */ + .cmds = bit(BPF_MAP_CREATE) | bit(BPF_PROG_LOAD), + .maps = bit(BPF_MAP_TYPE_STRUCT_OPS), + .progs = bit(BPF_PROG_TYPE_STRUCT_OPS), + .attachs = ~0ULL, + }; + + subtest_userns(&opts, userns_obj_priv_btf_fail); + } + if (test__start_subtest("obj_priv_btf_success")) { + struct bpffs_opts opts = { + /* allow BTF loading */ + .cmds = bit(BPF_BTF_LOAD) | bit(BPF_MAP_CREATE) | bit(BPF_PROG_LOAD), + .maps = bit(BPF_MAP_TYPE_STRUCT_OPS), + .progs = bit(BPF_PROG_TYPE_STRUCT_OPS), + .attachs = ~0ULL, + }; + + subtest_userns(&opts, userns_obj_priv_btf_success); + } + if (test__start_subtest("obj_priv_implicit_token")) { + struct bpffs_opts opts = { + /* allow BTF loading */ + .cmds = bit(BPF_BTF_LOAD) | bit(BPF_MAP_CREATE) | bit(BPF_PROG_LOAD), + .maps = bit(BPF_MAP_TYPE_STRUCT_OPS), + .progs = bit(BPF_PROG_TYPE_STRUCT_OPS), + .attachs = ~0ULL, + }; + + subtest_userns(&opts, userns_obj_priv_implicit_token); + } + if (test__start_subtest("obj_priv_implicit_token_envvar")) { + struct bpffs_opts opts = { + /* allow BTF loading */ + .cmds = bit(BPF_BTF_LOAD) | bit(BPF_MAP_CREATE) | bit(BPF_PROG_LOAD), + .maps = bit(BPF_MAP_TYPE_STRUCT_OPS), + .progs = bit(BPF_PROG_TYPE_STRUCT_OPS), + .attachs = ~0ULL, + }; + + subtest_userns(&opts, userns_obj_priv_implicit_token_envvar); + } +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdpwall.c b/tools/testing/selftests/bpf/prog_tests/xdpwall.c index f3927829a55a..4599154c8e9b 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdpwall.c +++ b/tools/testing/selftests/bpf/prog_tests/xdpwall.c @@ -9,7 +9,7 @@ void test_xdpwall(void) struct xdpwall *skel; skel = xdpwall__open_and_load(); - ASSERT_OK_PTR(skel, "Does LLMV have https://reviews.llvm.org/D109073?"); + ASSERT_OK_PTR(skel, "Does LLVM have https://github.com/llvm/llvm-project/commit/ea72b0319d7b0f0c2fcf41d121afa5d031b319d5?"); xdpwall__destroy(skel); } |