diff options
author | Jakub Kicinski <kuba@kernel.org> | 2022-11-29 04:14:01 +0300 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2022-11-29 06:42:17 +0300 |
commit | d6dc62fca6b6a1e75430618bf6d84cf135f1b3d8 (patch) | |
tree | 2e955738844b6091fb36d509e1ef7aaed3a67392 /tools/testing/selftests/bpf/prog_tests | |
parent | 469d258d9e112fb7ed51a3b35c0c85ee106331e8 (diff) | |
parent | 2b3e8f6f5b939ceeb2e097339bf78ebaaf11dfe9 (diff) | |
download | linux-d6dc62fca6b6a1e75430618bf6d84cf135f1b3d8.tar.xz |
Daniel Borkmann says:
====================
bpf-next 2022-11-25
We've added 101 non-merge commits during the last 11 day(s) which contain
a total of 109 files changed, 8827 insertions(+), 1129 deletions(-).
The main changes are:
1) Support for user defined BPF objects: the use case is to allocate own
objects, build own object hierarchies and use the building blocks to
build own data structures flexibly, for example, linked lists in BPF,
from Kumar Kartikeya Dwivedi.
2) Add bpf_rcu_read_{,un}lock() support for sleepable programs,
from Yonghong Song.
3) Add support storing struct task_struct objects as kptrs in maps,
from David Vernet.
4) Batch of BPF map documentation improvements, from Maryam Tahhan
and Donald Hunter.
5) Improve BPF verifier to propagate nullness information for branches
of register to register comparisons, from Eduard Zingerman.
6) Fix cgroup BPF iter infra to hold reference on the start cgroup,
from Hou Tao.
7) Fix BPF verifier to not mark fentry/fexit program arguments as trusted
given it is not the case for them, from Alexei Starovoitov.
8) Improve BPF verifier's realloc handling to better play along with dynamic
runtime analysis tools like KASAN and friends, from Kees Cook.
9) Remove legacy libbpf mode support from bpftool,
from Sahid Orentino Ferdjaoui.
10) Rework zero-len skb redirection checks to avoid potentially breaking
existing BPF test infra users, from Stanislav Fomichev.
11) Two small refactorings which are independent and have been split out
of the XDP queueing RFC series, from Toke Høiland-Jørgensen.
12) Fix a memory leak in LSM cgroup BPF selftest, from Wang Yufen.
13) Documentation on how to run BPF CI without patch submission,
from Daniel Müller.
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
====================
Link: https://lore.kernel.org/r/20221125012450.441-1-daniel@iogearbox.net
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'tools/testing/selftests/bpf/prog_tests')
-rw-r--r-- | tools/testing/selftests/bpf/prog_tests/btf.c | 14 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/prog_tests/cgroup_iter.c | 76 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/prog_tests/cgrp_kfunc.c | 175 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/prog_tests/dynptr.c | 2 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/prog_tests/empty_skb.c | 146 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/prog_tests/kfunc_dynptr_param.c | 2 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/prog_tests/linked_list.c | 740 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c | 17 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c | 158 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/prog_tests/spin_lock.c | 142 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/prog_tests/spinlock.c | 45 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/prog_tests/task_kfunc.c | 163 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/prog_tests/type_cast.c | 114 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c | 2 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c | 2 |
15 files changed, 1745 insertions, 53 deletions
diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c index 95a2b80f0d17..de1b5b9eb93a 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf.c +++ b/tools/testing/selftests/bpf/prog_tests/btf.c @@ -3949,6 +3949,20 @@ static struct btf_raw_test raw_tests[] = { .err_str = "Invalid return type", }, { + .descr = "decl_tag test #17, func proto, argument", + .raw_types = { + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DECL_TAG, 0, 0), 4), (-1), /* [1] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 0), /* [2] */ + BTF_FUNC_PROTO_ENC(0, 1), /* [3] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_VAR_ENC(NAME_TBD, 2, 0), /* [4] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0local\0tag1\0var"), + .btf_load_err = true, + .err_str = "Invalid arg#1", +}, +{ .descr = "type_tag test #1", .raw_types = { BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_iter.c b/tools/testing/selftests/bpf/prog_tests/cgroup_iter.c index c4a2adb38da1..e02feb5fae97 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_iter.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_iter.c @@ -189,6 +189,80 @@ static void test_walk_self_only(struct cgroup_iter *skel) BPF_CGROUP_ITER_SELF_ONLY, "self_only"); } +static void test_walk_dead_self_only(struct cgroup_iter *skel) +{ + DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); + char expected_output[128], buf[128]; + const char *cgrp_name = "/dead"; + union bpf_iter_link_info linfo; + int len, cgrp_fd, iter_fd; + struct bpf_link *link; + size_t left; + char *p; + + cgrp_fd = create_and_get_cgroup(cgrp_name); + if (!ASSERT_GE(cgrp_fd, 0, "create cgrp")) + return; + + /* The cgroup will be dead during read() iteration, so it only has + * epilogue in the output + */ + snprintf(expected_output, sizeof(expected_output), EPILOGUE); + + memset(&linfo, 0, sizeof(linfo)); + linfo.cgroup.cgroup_fd = cgrp_fd; + linfo.cgroup.order = BPF_CGROUP_ITER_SELF_ONLY; + opts.link_info = &linfo; + opts.link_info_len = sizeof(linfo); + + link = bpf_program__attach_iter(skel->progs.cgroup_id_printer, &opts); + if (!ASSERT_OK_PTR(link, "attach_iter")) + goto close_cgrp; + + iter_fd = bpf_iter_create(bpf_link__fd(link)); + if (!ASSERT_GE(iter_fd, 0, "iter_create")) + goto free_link; + + /* Close link fd and cgroup fd */ + bpf_link__destroy(link); + close(cgrp_fd); + + /* Remove cgroup to mark it as dead */ + remove_cgroup(cgrp_name); + + /* Two kern_sync_rcu() and usleep() pairs are used to wait for the + * releases of cgroup css, and the last kern_sync_rcu() and usleep() + * pair is used to wait for the free of cgroup itself. + */ + kern_sync_rcu(); + usleep(8000); + kern_sync_rcu(); + usleep(8000); + kern_sync_rcu(); + usleep(1000); + + memset(buf, 0, sizeof(buf)); + left = ARRAY_SIZE(buf); + p = buf; + while ((len = read(iter_fd, p, left)) > 0) { + p += len; + left -= len; + } + + ASSERT_STREQ(buf, expected_output, "dead cgroup output"); + + /* read() after iter finishes should be ok. */ + if (len == 0) + ASSERT_OK(read(iter_fd, buf, sizeof(buf)), "second_read"); + + close(iter_fd); + return; +free_link: + bpf_link__destroy(link); +close_cgrp: + close(cgrp_fd); +} + void test_cgroup_iter(void) { struct cgroup_iter *skel = NULL; @@ -217,6 +291,8 @@ void test_cgroup_iter(void) test_early_termination(skel); if (test__start_subtest("cgroup_iter__self_only")) test_walk_self_only(skel); + if (test__start_subtest("cgroup_iter__dead_self_only")) + test_walk_dead_self_only(skel); out: cgroup_iter__destroy(skel); cleanup_cgroups(); diff --git a/tools/testing/selftests/bpf/prog_tests/cgrp_kfunc.c b/tools/testing/selftests/bpf/prog_tests/cgrp_kfunc.c new file mode 100644 index 000000000000..973f0c5af965 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cgrp_kfunc.c @@ -0,0 +1,175 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ + +#define _GNU_SOURCE +#include <cgroup_helpers.h> +#include <test_progs.h> + +#include "cgrp_kfunc_failure.skel.h" +#include "cgrp_kfunc_success.skel.h" + +static size_t log_buf_sz = 1 << 20; /* 1 MB */ +static char obj_log_buf[1048576]; + +static struct cgrp_kfunc_success *open_load_cgrp_kfunc_skel(void) +{ + struct cgrp_kfunc_success *skel; + int err; + + skel = cgrp_kfunc_success__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return NULL; + + skel->bss->pid = getpid(); + + err = cgrp_kfunc_success__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto cleanup; + + return skel; + +cleanup: + cgrp_kfunc_success__destroy(skel); + return NULL; +} + +static int mkdir_rm_test_dir(void) +{ + int fd; + const char *cgrp_path = "cgrp_kfunc"; + + fd = create_and_get_cgroup(cgrp_path); + if (!ASSERT_GT(fd, 0, "mkdir_cgrp_fd")) + return -1; + + close(fd); + remove_cgroup(cgrp_path); + + return 0; +} + +static void run_success_test(const char *prog_name) +{ + struct cgrp_kfunc_success *skel; + struct bpf_program *prog; + struct bpf_link *link = NULL; + + skel = open_load_cgrp_kfunc_skel(); + if (!ASSERT_OK_PTR(skel, "open_load_skel")) + return; + + if (!ASSERT_OK(skel->bss->err, "pre_mkdir_err")) + goto cleanup; + + prog = bpf_object__find_program_by_name(skel->obj, prog_name); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) + goto cleanup; + + link = bpf_program__attach(prog); + if (!ASSERT_OK_PTR(link, "attached_link")) + goto cleanup; + + ASSERT_EQ(skel->bss->invocations, 0, "pre_rmdir_count"); + if (!ASSERT_OK(mkdir_rm_test_dir(), "cgrp_mkdir")) + goto cleanup; + + ASSERT_EQ(skel->bss->invocations, 1, "post_rmdir_count"); + ASSERT_OK(skel->bss->err, "post_rmdir_err"); + +cleanup: + bpf_link__destroy(link); + cgrp_kfunc_success__destroy(skel); +} + +static const char * const success_tests[] = { + "test_cgrp_acquire_release_argument", + "test_cgrp_acquire_leave_in_map", + "test_cgrp_xchg_release", + "test_cgrp_get_release", + "test_cgrp_get_ancestors", +}; + +static struct { + const char *prog_name; + const char *expected_err_msg; +} failure_tests[] = { + {"cgrp_kfunc_acquire_untrusted", "R1 must be referenced or trusted"}, + {"cgrp_kfunc_acquire_fp", "arg#0 pointer type STRUCT cgroup must point"}, + {"cgrp_kfunc_acquire_unsafe_kretprobe", "reg type unsupported for arg#0 function"}, + {"cgrp_kfunc_acquire_trusted_walked", "R1 must be referenced or trusted"}, + {"cgrp_kfunc_acquire_null", "arg#0 pointer type STRUCT cgroup must point"}, + {"cgrp_kfunc_acquire_unreleased", "Unreleased reference"}, + {"cgrp_kfunc_get_non_kptr_param", "arg#0 expected pointer to map value"}, + {"cgrp_kfunc_get_non_kptr_acquired", "arg#0 expected pointer to map value"}, + {"cgrp_kfunc_get_null", "arg#0 expected pointer to map value"}, + {"cgrp_kfunc_xchg_unreleased", "Unreleased reference"}, + {"cgrp_kfunc_get_unreleased", "Unreleased reference"}, + {"cgrp_kfunc_release_untrusted", "arg#0 is untrusted_ptr_or_null_ expected ptr_ or socket"}, + {"cgrp_kfunc_release_fp", "arg#0 pointer type STRUCT cgroup must point"}, + {"cgrp_kfunc_release_null", "arg#0 is ptr_or_null_ expected ptr_ or socket"}, + {"cgrp_kfunc_release_unacquired", "release kernel function bpf_cgroup_release expects"}, +}; + +static void verify_fail(const char *prog_name, const char *expected_err_msg) +{ + LIBBPF_OPTS(bpf_object_open_opts, opts); + struct cgrp_kfunc_failure *skel; + int err, i; + + opts.kernel_log_buf = obj_log_buf; + opts.kernel_log_size = log_buf_sz; + opts.kernel_log_level = 1; + + skel = cgrp_kfunc_failure__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "cgrp_kfunc_failure__open_opts")) + goto cleanup; + + for (i = 0; i < ARRAY_SIZE(failure_tests); i++) { + struct bpf_program *prog; + const char *curr_name = failure_tests[i].prog_name; + + prog = bpf_object__find_program_by_name(skel->obj, curr_name); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) + goto cleanup; + + bpf_program__set_autoload(prog, !strcmp(curr_name, prog_name)); + } + + err = cgrp_kfunc_failure__load(skel); + if (!ASSERT_ERR(err, "unexpected load success")) + goto cleanup; + + if (!ASSERT_OK_PTR(strstr(obj_log_buf, expected_err_msg), "expected_err_msg")) { + fprintf(stderr, "Expected err_msg: %s\n", expected_err_msg); + fprintf(stderr, "Verifier output: %s\n", obj_log_buf); + } + +cleanup: + cgrp_kfunc_failure__destroy(skel); +} + +void test_cgrp_kfunc(void) +{ + int i, err; + + err = setup_cgroup_environment(); + if (!ASSERT_OK(err, "cgrp_env_setup")) + goto cleanup; + + for (i = 0; i < ARRAY_SIZE(success_tests); i++) { + if (!test__start_subtest(success_tests[i])) + continue; + + run_success_test(success_tests[i]); + } + + for (i = 0; i < ARRAY_SIZE(failure_tests); i++) { + if (!test__start_subtest(failure_tests[i].prog_name)) + continue; + + verify_fail(failure_tests[i].prog_name, failure_tests[i].expected_err_msg); + } + +cleanup: + cleanup_cgroup_environment(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/dynptr.c b/tools/testing/selftests/bpf/prog_tests/dynptr.c index 8fc4e6c02bfd..b0c06f821cb8 100644 --- a/tools/testing/selftests/bpf/prog_tests/dynptr.c +++ b/tools/testing/selftests/bpf/prog_tests/dynptr.c @@ -17,7 +17,7 @@ static struct { {"ringbuf_missing_release2", "Unreleased reference id=2"}, {"ringbuf_missing_release_callback", "Unreleased reference id"}, {"use_after_invalid", "Expected an initialized dynptr as arg #3"}, - {"ringbuf_invalid_api", "type=mem expected=alloc_mem"}, + {"ringbuf_invalid_api", "type=mem expected=ringbuf_mem"}, {"add_dynptr_to_map1", "invalid indirect read from stack"}, {"add_dynptr_to_map2", "invalid indirect read from stack"}, {"data_slice_out_of_bounds_ringbuf", "value is outside of the allowed memory range"}, diff --git a/tools/testing/selftests/bpf/prog_tests/empty_skb.c b/tools/testing/selftests/bpf/prog_tests/empty_skb.c new file mode 100644 index 000000000000..0613f3bb8b5e --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/empty_skb.c @@ -0,0 +1,146 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <network_helpers.h> +#include <net/if.h> +#include "empty_skb.skel.h" + +#define SYS(cmd) ({ \ + if (!ASSERT_OK(system(cmd), (cmd))) \ + goto out; \ +}) + +void serial_test_empty_skb(void) +{ + LIBBPF_OPTS(bpf_test_run_opts, tattr); + struct empty_skb *bpf_obj = NULL; + struct nstoken *tok = NULL; + struct bpf_program *prog; + char eth_hlen_pp[15]; + char eth_hlen[14]; + int veth_ifindex; + int ipip_ifindex; + int err; + int i; + + struct { + const char *msg; + const void *data_in; + __u32 data_size_in; + int *ifindex; + int err; + int ret; + bool success_on_tc; + } tests[] = { + /* Empty packets are always rejected. */ + + { + /* BPF_PROG_RUN ETH_HLEN size check */ + .msg = "veth empty ingress packet", + .data_in = NULL, + .data_size_in = 0, + .ifindex = &veth_ifindex, + .err = -EINVAL, + }, + { + /* BPF_PROG_RUN ETH_HLEN size check */ + .msg = "ipip empty ingress packet", + .data_in = NULL, + .data_size_in = 0, + .ifindex = &ipip_ifindex, + .err = -EINVAL, + }, + + /* ETH_HLEN-sized packets: + * - can not be redirected at LWT_XMIT + * - can be redirected at TC to non-tunneling dest + */ + + { + /* __bpf_redirect_common */ + .msg = "veth ETH_HLEN packet ingress", + .data_in = eth_hlen, + .data_size_in = sizeof(eth_hlen), + .ifindex = &veth_ifindex, + .ret = -ERANGE, + .success_on_tc = true, + }, + { + /* __bpf_redirect_no_mac + * + * lwt: skb->len=0 <= skb_network_offset=0 + * tc: skb->len=14 <= skb_network_offset=14 + */ + .msg = "ipip ETH_HLEN packet ingress", + .data_in = eth_hlen, + .data_size_in = sizeof(eth_hlen), + .ifindex = &ipip_ifindex, + .ret = -ERANGE, + }, + + /* ETH_HLEN+1-sized packet should be redirected. */ + + { + .msg = "veth ETH_HLEN+1 packet ingress", + .data_in = eth_hlen_pp, + .data_size_in = sizeof(eth_hlen_pp), + .ifindex = &veth_ifindex, + }, + { + .msg = "ipip ETH_HLEN+1 packet ingress", + .data_in = eth_hlen_pp, + .data_size_in = sizeof(eth_hlen_pp), + .ifindex = &ipip_ifindex, + }, + }; + + SYS("ip netns add empty_skb"); + tok = open_netns("empty_skb"); + SYS("ip link add veth0 type veth peer veth1"); + SYS("ip link set dev veth0 up"); + SYS("ip link set dev veth1 up"); + SYS("ip addr add 10.0.0.1/8 dev veth0"); + SYS("ip addr add 10.0.0.2/8 dev veth1"); + veth_ifindex = if_nametoindex("veth0"); + + SYS("ip link add ipip0 type ipip local 10.0.0.1 remote 10.0.0.2"); + SYS("ip link set ipip0 up"); + SYS("ip addr add 192.168.1.1/16 dev ipip0"); + ipip_ifindex = if_nametoindex("ipip0"); + + bpf_obj = empty_skb__open_and_load(); + if (!ASSERT_OK_PTR(bpf_obj, "open skeleton")) + goto out; + + for (i = 0; i < ARRAY_SIZE(tests); i++) { + bpf_object__for_each_program(prog, bpf_obj->obj) { + char buf[128]; + bool at_tc = !strncmp(bpf_program__section_name(prog), "tc", 2); + + tattr.data_in = tests[i].data_in; + tattr.data_size_in = tests[i].data_size_in; + + tattr.data_size_out = 0; + bpf_obj->bss->ifindex = *tests[i].ifindex; + bpf_obj->bss->ret = 0; + err = bpf_prog_test_run_opts(bpf_program__fd(prog), &tattr); + sprintf(buf, "err: %s [%s]", tests[i].msg, bpf_program__name(prog)); + + if (at_tc && tests[i].success_on_tc) + ASSERT_GE(err, 0, buf); + else + ASSERT_EQ(err, tests[i].err, buf); + sprintf(buf, "ret: %s [%s]", tests[i].msg, bpf_program__name(prog)); + if (at_tc && tests[i].success_on_tc) + ASSERT_GE(bpf_obj->bss->ret, 0, buf); + else + ASSERT_EQ(bpf_obj->bss->ret, tests[i].ret, buf); + } + } + +out: + if (bpf_obj) + empty_skb__destroy(bpf_obj); + if (tok) + close_netns(tok); + system("ip netns del empty_skb"); +} diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_dynptr_param.c b/tools/testing/selftests/bpf/prog_tests/kfunc_dynptr_param.c index c210657d4d0a..55d641c1f126 100644 --- a/tools/testing/selftests/bpf/prog_tests/kfunc_dynptr_param.c +++ b/tools/testing/selftests/bpf/prog_tests/kfunc_dynptr_param.c @@ -22,7 +22,7 @@ static struct { "arg#0 pointer type STRUCT bpf_dynptr_kern points to unsupported dynamic pointer type", 0}, {"not_valid_dynptr", "arg#0 pointer type STRUCT bpf_dynptr_kern must be valid and initialized", 0}, - {"not_ptr_to_stack", "arg#0 pointer type STRUCT bpf_dynptr_kern not to stack", 0}, + {"not_ptr_to_stack", "arg#0 expected pointer to stack", 0}, {"dynptr_data_null", NULL, -EBADMSG}, }; diff --git a/tools/testing/selftests/bpf/prog_tests/linked_list.c b/tools/testing/selftests/bpf/prog_tests/linked_list.c new file mode 100644 index 000000000000..9a7d4c47af63 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/linked_list.c @@ -0,0 +1,740 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <bpf/btf.h> +#include <test_btf.h> +#include <linux/btf.h> +#include <test_progs.h> +#include <network_helpers.h> + +#include "linked_list.skel.h" +#include "linked_list_fail.skel.h" + +static char log_buf[1024 * 1024]; + +static struct { + const char *prog_name; + const char *err_msg; +} linked_list_fail_tests[] = { +#define TEST(test, off) \ + { #test "_missing_lock_push_front", \ + "bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, \ + { #test "_missing_lock_push_back", \ + "bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, \ + { #test "_missing_lock_pop_front", \ + "bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, \ + { #test "_missing_lock_pop_back", \ + "bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, + TEST(kptr, 32) + TEST(global, 16) + TEST(map, 0) + TEST(inner_map, 0) +#undef TEST +#define TEST(test, op) \ + { #test "_kptr_incorrect_lock_" #op, \ + "held lock and object are not in the same allocation\n" \ + "bpf_spin_lock at off=32 must be held for bpf_list_head" }, \ + { #test "_global_incorrect_lock_" #op, \ + "held lock and object are not in the same allocation\n" \ + "bpf_spin_lock at off=16 must be held for bpf_list_head" }, \ + { #test "_map_incorrect_lock_" #op, \ + "held lock and object are not in the same allocation\n" \ + "bpf_spin_lock at off=0 must be held for bpf_list_head" }, \ + { #test "_inner_map_incorrect_lock_" #op, \ + "held lock and object are not in the same allocation\n" \ + "bpf_spin_lock at off=0 must be held for bpf_list_head" }, + TEST(kptr, push_front) + TEST(kptr, push_back) + TEST(kptr, pop_front) + TEST(kptr, pop_back) + TEST(global, push_front) + TEST(global, push_back) + TEST(global, pop_front) + TEST(global, pop_back) + TEST(map, push_front) + TEST(map, push_back) + TEST(map, pop_front) + TEST(map, pop_back) + TEST(inner_map, push_front) + TEST(inner_map, push_back) + TEST(inner_map, pop_front) + TEST(inner_map, pop_back) +#undef TEST + { "map_compat_kprobe", "tracing progs cannot use bpf_list_head yet" }, + { "map_compat_kretprobe", "tracing progs cannot use bpf_list_head yet" }, + { "map_compat_tp", "tracing progs cannot use bpf_list_head yet" }, + { "map_compat_perf", "tracing progs cannot use bpf_list_head yet" }, + { "map_compat_raw_tp", "tracing progs cannot use bpf_list_head yet" }, + { "map_compat_raw_tp_w", "tracing progs cannot use bpf_list_head yet" }, + { "obj_type_id_oor", "local type ID argument must be in range [0, U32_MAX]" }, + { "obj_new_no_composite", "bpf_obj_new type ID argument must be of a struct" }, + { "obj_new_no_struct", "bpf_obj_new type ID argument must be of a struct" }, + { "obj_drop_non_zero_off", "R1 must have zero offset when passed to release func" }, + { "new_null_ret", "R0 invalid mem access 'ptr_or_null_'" }, + { "obj_new_acq", "Unreleased reference id=" }, + { "use_after_drop", "invalid mem access 'scalar'" }, + { "ptr_walk_scalar", "type=scalar expected=percpu_ptr_" }, + { "direct_read_lock", "direct access to bpf_spin_lock is disallowed" }, + { "direct_write_lock", "direct access to bpf_spin_lock is disallowed" }, + { "direct_read_head", "direct access to bpf_list_head is disallowed" }, + { "direct_write_head", "direct access to bpf_list_head is disallowed" }, + { "direct_read_node", "direct access to bpf_list_node is disallowed" }, + { "direct_write_node", "direct access to bpf_list_node is disallowed" }, + { "write_after_push_front", "only read is supported" }, + { "write_after_push_back", "only read is supported" }, + { "use_after_unlock_push_front", "invalid mem access 'scalar'" }, + { "use_after_unlock_push_back", "invalid mem access 'scalar'" }, + { "double_push_front", "arg#1 expected pointer to allocated object" }, + { "double_push_back", "arg#1 expected pointer to allocated object" }, + { "no_node_value_type", "bpf_list_node not found at offset=0" }, + { "incorrect_value_type", + "operation on bpf_list_head expects arg#1 bpf_list_node at offset=0 in struct foo, " + "but arg is at offset=0 in struct bar" }, + { "incorrect_node_var_off", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" }, + { "incorrect_node_off1", "bpf_list_node not found at offset=1" }, + { "incorrect_node_off2", "arg#1 offset=40, but expected bpf_list_node at offset=0 in struct foo" }, + { "no_head_type", "bpf_list_head not found at offset=0" }, + { "incorrect_head_var_off1", "R1 doesn't have constant offset" }, + { "incorrect_head_var_off2", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" }, + { "incorrect_head_off1", "bpf_list_head not found at offset=17" }, + { "incorrect_head_off2", "bpf_list_head not found at offset=1" }, + { "pop_front_off", + "15: (bf) r1 = r6 ; R1_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=40,imm=0) " + "R6_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=40,imm=0) refs=2,4\n" + "16: (85) call bpf_this_cpu_ptr#154\nR1 type=ptr_or_null_ expected=percpu_ptr_" }, + { "pop_back_off", + "15: (bf) r1 = r6 ; R1_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=40,imm=0) " + "R6_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=40,imm=0) refs=2,4\n" + "16: (85) call bpf_this_cpu_ptr#154\nR1 type=ptr_or_null_ expected=percpu_ptr_" }, +}; + +static void test_linked_list_fail_prog(const char *prog_name, const char *err_msg) +{ + LIBBPF_OPTS(bpf_object_open_opts, opts, .kernel_log_buf = log_buf, + .kernel_log_size = sizeof(log_buf), + .kernel_log_level = 1); + struct linked_list_fail *skel; + struct bpf_program *prog; + int ret; + + skel = linked_list_fail__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "linked_list_fail__open_opts")) + return; + + prog = bpf_object__find_program_by_name(skel->obj, prog_name); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) + goto end; + + bpf_program__set_autoload(prog, true); + + ret = linked_list_fail__load(skel); + if (!ASSERT_ERR(ret, "linked_list_fail__load must fail")) + goto end; + + if (!ASSERT_OK_PTR(strstr(log_buf, err_msg), "expected error message")) { + fprintf(stderr, "Expected: %s\n", err_msg); + fprintf(stderr, "Verifier: %s\n", log_buf); + } + +end: + linked_list_fail__destroy(skel); +} + +static void clear_fields(struct bpf_map *map) +{ + char buf[24]; + int key = 0; + + memset(buf, 0xff, sizeof(buf)); + ASSERT_OK(bpf_map__update_elem(map, &key, sizeof(key), buf, sizeof(buf), 0), "check_and_free_fields"); +} + +enum { + TEST_ALL, + PUSH_POP, + PUSH_POP_MULT, + LIST_IN_LIST, +}; + +static void test_linked_list_success(int mode, bool leave_in_map) +{ + LIBBPF_OPTS(bpf_test_run_opts, opts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + struct linked_list *skel; + int ret; + + skel = linked_list__open_and_load(); + if (!ASSERT_OK_PTR(skel, "linked_list__open_and_load")) + return; + + if (mode == LIST_IN_LIST) + goto lil; + if (mode == PUSH_POP_MULT) + goto ppm; + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.map_list_push_pop), &opts); + ASSERT_OK(ret, "map_list_push_pop"); + ASSERT_OK(opts.retval, "map_list_push_pop retval"); + if (!leave_in_map) + clear_fields(skel->maps.array_map); + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.inner_map_list_push_pop), &opts); + ASSERT_OK(ret, "inner_map_list_push_pop"); + ASSERT_OK(opts.retval, "inner_map_list_push_pop retval"); + if (!leave_in_map) + clear_fields(skel->maps.inner_map); + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_push_pop), &opts); + ASSERT_OK(ret, "global_list_push_pop"); + ASSERT_OK(opts.retval, "global_list_push_pop retval"); + if (!leave_in_map) + clear_fields(skel->maps.bss_A); + + if (mode == PUSH_POP) + goto end; + +ppm: + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.map_list_push_pop_multiple), &opts); + ASSERT_OK(ret, "map_list_push_pop_multiple"); + ASSERT_OK(opts.retval, "map_list_push_pop_multiple retval"); + if (!leave_in_map) + clear_fields(skel->maps.array_map); + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.inner_map_list_push_pop_multiple), &opts); + ASSERT_OK(ret, "inner_map_list_push_pop_multiple"); + ASSERT_OK(opts.retval, "inner_map_list_push_pop_multiple retval"); + if (!leave_in_map) + clear_fields(skel->maps.inner_map); + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_push_pop_multiple), &opts); + ASSERT_OK(ret, "global_list_push_pop_multiple"); + ASSERT_OK(opts.retval, "global_list_push_pop_multiple retval"); + if (!leave_in_map) + clear_fields(skel->maps.bss_A); + + if (mode == PUSH_POP_MULT) + goto end; + +lil: + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.map_list_in_list), &opts); + ASSERT_OK(ret, "map_list_in_list"); + ASSERT_OK(opts.retval, "map_list_in_list retval"); + if (!leave_in_map) + clear_fields(skel->maps.array_map); + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.inner_map_list_in_list), &opts); + ASSERT_OK(ret, "inner_map_list_in_list"); + ASSERT_OK(opts.retval, "inner_map_list_in_list retval"); + if (!leave_in_map) + clear_fields(skel->maps.inner_map); + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_in_list), &opts); + ASSERT_OK(ret, "global_list_in_list"); + ASSERT_OK(opts.retval, "global_list_in_list retval"); + if (!leave_in_map) + clear_fields(skel->maps.bss_A); +end: + linked_list__destroy(skel); +} + +#define SPIN_LOCK 2 +#define LIST_HEAD 3 +#define LIST_NODE 4 + +static struct btf *init_btf(void) +{ + int id, lid, hid, nid; + struct btf *btf; + + btf = btf__new_empty(); + if (!ASSERT_OK_PTR(btf, "btf__new_empty")) + return NULL; + id = btf__add_int(btf, "int", 4, BTF_INT_SIGNED); + if (!ASSERT_EQ(id, 1, "btf__add_int")) + goto end; + lid = btf__add_struct(btf, "bpf_spin_lock", 4); + if (!ASSERT_EQ(lid, SPIN_LOCK, "btf__add_struct bpf_spin_lock")) + goto end; + hid = btf__add_struct(btf, "bpf_list_head", 16); + if (!ASSERT_EQ(hid, LIST_HEAD, "btf__add_struct bpf_list_head")) + goto end; + nid = btf__add_struct(btf, "bpf_list_node", 16); + if (!ASSERT_EQ(nid, LIST_NODE, "btf__add_struct bpf_list_node")) + goto end; + return btf; +end: + btf__free(btf); + return NULL; +} + +static void test_btf(void) +{ + struct btf *btf = NULL; + int id, err; + + while (test__start_subtest("btf: too many locks")) { + btf = init_btf(); + if (!ASSERT_OK_PTR(btf, "init_btf")) + break; + id = btf__add_struct(btf, "foo", 24); + if (!ASSERT_EQ(id, 5, "btf__add_struct foo")) + break; + err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0); + if (!ASSERT_OK(err, "btf__add_struct foo::a")) + break; + err = btf__add_field(btf, "b", SPIN_LOCK, 32, 0); + if (!ASSERT_OK(err, "btf__add_struct foo::a")) + break; + err = btf__add_field(btf, "c", LIST_HEAD, 64, 0); + if (!ASSERT_OK(err, "btf__add_struct foo::a")) + break; + + err = btf__load_into_kernel(btf); + ASSERT_EQ(err, -E2BIG, "check btf"); + btf__free(btf); + break; + } + + while (test__start_subtest("btf: missing lock")) { + btf = init_btf(); + if (!ASSERT_OK_PTR(btf, "init_btf")) + break; + id = btf__add_struct(btf, "foo", 16); + if (!ASSERT_EQ(id, 5, "btf__add_struct foo")) + break; + err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); + if (!ASSERT_OK(err, "btf__add_struct foo::a")) + break; + id = btf__add_decl_tag(btf, "contains:baz:a", 5, 0); + if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:baz:a")) + break; + id = btf__add_struct(btf, "baz", 16); + if (!ASSERT_EQ(id, 7, "btf__add_struct baz")) + break; + err = btf__add_field(btf, "a", LIST_NODE, 0, 0); + if (!ASSERT_OK(err, "btf__add_field baz::a")) + break; + + err = btf__load_into_kernel(btf); + ASSERT_EQ(err, -EINVAL, "check btf"); + btf__free(btf); + break; + } + + while (test__start_subtest("btf: bad offset")) { + btf = init_btf(); + if (!ASSERT_OK_PTR(btf, "init_btf")) + break; + id = btf__add_struct(btf, "foo", 36); + if (!ASSERT_EQ(id, 5, "btf__add_struct foo")) + break; + err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); + if (!ASSERT_OK(err, "btf__add_field foo::a")) + break; + err = btf__add_field(btf, "b", LIST_NODE, 0, 0); + if (!ASSERT_OK(err, "btf__add_field foo::b")) + break; + err = btf__add_field(btf, "c", SPIN_LOCK, 0, 0); + if (!ASSERT_OK(err, "btf__add_field foo::c")) + break; + id = btf__add_decl_tag(btf, "contains:foo:b", 5, 0); + if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:b")) + break; + + err = btf__load_into_kernel(btf); + ASSERT_EQ(err, -EEXIST, "check btf"); + btf__free(btf); + break; + } + + while (test__start_subtest("btf: missing contains:")) { + btf = init_btf(); + if (!ASSERT_OK_PTR(btf, "init_btf")) + break; + id = btf__add_struct(btf, "foo", 24); + if (!ASSERT_EQ(id, 5, "btf__add_struct foo")) + break; + err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0); + if (!ASSERT_OK(err, "btf__add_field foo::a")) + break; + err = btf__add_field(btf, "b", LIST_HEAD, 64, 0); + if (!ASSERT_OK(err, "btf__add_field foo::b")) + break; + + err = btf__load_into_kernel(btf); + ASSERT_EQ(err, -EINVAL, "check btf"); + btf__free(btf); + break; + } + + while (test__start_subtest("btf: missing struct")) { + btf = init_btf(); + if (!ASSERT_OK_PTR(btf, "init_btf")) + break; + id = btf__add_struct(btf, "foo", 24); + if (!ASSERT_EQ(id, 5, "btf__add_struct foo")) + break; + err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0); + if (!ASSERT_OK(err, "btf__add_field foo::a")) + break; + err = btf__add_field(btf, "b", LIST_HEAD, 64, 0); + if (!ASSERT_OK(err, "btf__add_field foo::b")) + break; + id = btf__add_decl_tag(btf, "contains:bar:bar", 5, 1); + if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:bar")) + break; + + err = btf__load_into_kernel(btf); + ASSERT_EQ(err, -ENOENT, "check btf"); + btf__free(btf); + break; + } + + while (test__start_subtest("btf: missing node")) { + btf = init_btf(); + if (!ASSERT_OK_PTR(btf, "init_btf")) + break; + id = btf__add_struct(btf, "foo", 24); + if (!ASSERT_EQ(id, 5, "btf__add_struct foo")) + break; + err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0); + if (!ASSERT_OK(err, "btf__add_field foo::a")) + break; + err = btf__add_field(btf, "b", LIST_HEAD, 64, 0); + if (!ASSERT_OK(err, "btf__add_field foo::b")) + break; + id = btf__add_decl_tag(btf, "contains:foo:c", 5, 1); + if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:c")) + break; + + err = btf__load_into_kernel(btf); + btf__free(btf); + ASSERT_EQ(err, -ENOENT, "check btf"); + break; + } + + while (test__start_subtest("btf: node incorrect type")) { + btf = init_btf(); + if (!ASSERT_OK_PTR(btf, "init_btf")) + break; + id = btf__add_struct(btf, "foo", 20); + if (!ASSERT_EQ(id, 5, "btf__add_struct foo")) + break; + err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); + if (!ASSERT_OK(err, "btf__add_field foo::a")) + break; + err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0); + if (!ASSERT_OK(err, "btf__add_field foo::b")) + break; + id = btf__add_decl_tag(btf, "contains:bar:a", 5, 0); + if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:a")) + break; + id = btf__add_struct(btf, "bar", 4); + if (!ASSERT_EQ(id, 7, "btf__add_struct bar")) + break; + err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0); + if (!ASSERT_OK(err, "btf__add_field bar::a")) + break; + + err = btf__load_into_kernel(btf); + ASSERT_EQ(err, -EINVAL, "check btf"); + btf__free(btf); + break; + } + + while (test__start_subtest("btf: multiple bpf_list_node with name b")) { + btf = init_btf(); + if (!ASSERT_OK_PTR(btf, "init_btf")) + break; + id = btf__add_struct(btf, "foo", 52); + if (!ASSERT_EQ(id, 5, "btf__add_struct foo")) + break; + err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); + if (!ASSERT_OK(err, "btf__add_field foo::a")) + break; + err = btf__add_field(btf, "b", LIST_NODE, 128, 0); + if (!ASSERT_OK(err, "btf__add_field foo::b")) + break; + err = btf__add_field(btf, "b", LIST_NODE, 256, 0); + if (!ASSERT_OK(err, "btf__add_field foo::c")) + break; + err = btf__add_field(btf, "d", SPIN_LOCK, 384, 0); + if (!ASSERT_OK(err, "btf__add_field foo::d")) + break; + id = btf__add_decl_tag(btf, "contains:foo:b", 5, 0); + if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:b")) + break; + + err = btf__load_into_kernel(btf); + ASSERT_EQ(err, -EINVAL, "check btf"); + btf__free(btf); + break; + } + + while (test__start_subtest("btf: owning | owned AA cycle")) { + btf = init_btf(); + if (!ASSERT_OK_PTR(btf, "init_btf")) + break; + id = btf__add_struct(btf, "foo", 36); + if (!ASSERT_EQ(id, 5, "btf__add_struct foo")) + break; + err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); + if (!ASSERT_OK(err, "btf__add_field foo::a")) + break; + err = btf__add_field(btf, "b", LIST_NODE, 128, 0); + if (!ASSERT_OK(err, "btf__add_field foo::b")) + break; + err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0); + if (!ASSERT_OK(err, "btf__add_field foo::c")) + break; + id = btf__add_decl_tag(btf, "contains:foo:b", 5, 0); + if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:b")) + break; + + err = btf__load_into_kernel(btf); + ASSERT_EQ(err, -ELOOP, "check btf"); + btf__free(btf); + break; + } + + while (test__start_subtest("btf: owning | owned ABA cycle")) { + btf = init_btf(); + if (!ASSERT_OK_PTR(btf, "init_btf")) + break; + id = btf__add_struct(btf, "foo", 36); + if (!ASSERT_EQ(id, 5, "btf__add_struct foo")) + break; + err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); + if (!ASSERT_OK(err, "btf__add_field foo::a")) + break; + err = btf__add_field(btf, "b", LIST_NODE, 128, 0); + if (!ASSERT_OK(err, "btf__add_field foo::b")) + break; + err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0); + if (!ASSERT_OK(err, "btf__add_field foo::c")) + break; + id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0); + if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b")) + break; + id = btf__add_struct(btf, "bar", 36); + if (!ASSERT_EQ(id, 7, "btf__add_struct bar")) + break; + err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); + if (!ASSERT_OK(err, "btf__add_field bar::a")) + break; + err = btf__add_field(btf, "b", LIST_NODE, 128, 0); + if (!ASSERT_OK(err, "btf__add_field bar::b")) + break; + err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0); + if (!ASSERT_OK(err, "btf__add_field bar::c")) + break; + id = btf__add_decl_tag(btf, "contains:foo:b", 7, 0); + if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:foo:b")) + break; + + err = btf__load_into_kernel(btf); + ASSERT_EQ(err, -ELOOP, "check btf"); + btf__free(btf); + break; + } + + while (test__start_subtest("btf: owning -> owned")) { + btf = init_btf(); + if (!ASSERT_OK_PTR(btf, "init_btf")) + break; + id = btf__add_struct(btf, "foo", 20); + if (!ASSERT_EQ(id, 5, "btf__add_struct foo")) + break; + err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); + if (!ASSERT_OK(err, "btf__add_field foo::a")) + break; + err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0); + if (!ASSERT_OK(err, "btf__add_field foo::b")) + break; + id = btf__add_decl_tag(btf, "contains:bar:a", 5, 0); + if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:a")) + break; + id = btf__add_struct(btf, "bar", 16); + if (!ASSERT_EQ(id, 7, "btf__add_struct bar")) + break; + err = btf__add_field(btf, "a", LIST_NODE, 0, 0); + if (!ASSERT_OK(err, "btf__add_field bar::a")) + break; + + err = btf__load_into_kernel(btf); + ASSERT_EQ(err, 0, "check btf"); + btf__free(btf); + break; + } + + while (test__start_subtest("btf: owning -> owning | owned -> owned")) { + btf = init_btf(); + if (!ASSERT_OK_PTR(btf, "init_btf")) + break; + id = btf__add_struct(btf, "foo", 20); + if (!ASSERT_EQ(id, 5, "btf__add_struct foo")) + break; + err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); + if (!ASSERT_OK(err, "btf__add_field foo::a")) + break; + err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0); + if (!ASSERT_OK(err, "btf__add_field foo::b")) + break; + id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0); + if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b")) + break; + id = btf__add_struct(btf, "bar", 36); + if (!ASSERT_EQ(id, 7, "btf__add_struct bar")) + break; + err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); + if (!ASSERT_OK(err, "btf__add_field bar::a")) + break; + err = btf__add_field(btf, "b", LIST_NODE, 128, 0); + if (!ASSERT_OK(err, "btf__add_field bar::b")) + break; + err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0); + if (!ASSERT_OK(err, "btf__add_field bar::c")) + break; + id = btf__add_decl_tag(btf, "contains:baz:a", 7, 0); + if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:baz:a")) + break; + id = btf__add_struct(btf, "baz", 16); + if (!ASSERT_EQ(id, 9, "btf__add_struct baz")) + break; + err = btf__add_field(btf, "a", LIST_NODE, 0, 0); + if (!ASSERT_OK(err, "btf__add_field baz:a")) + break; + + err = btf__load_into_kernel(btf); + ASSERT_EQ(err, 0, "check btf"); + btf__free(btf); + break; + } + + while (test__start_subtest("btf: owning | owned -> owning | owned -> owned")) { + btf = init_btf(); + if (!ASSERT_OK_PTR(btf, "init_btf")) + break; + id = btf__add_struct(btf, "foo", 36); + if (!ASSERT_EQ(id, 5, "btf__add_struct foo")) + break; + err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); + if (!ASSERT_OK(err, "btf__add_field foo::a")) + break; + err = btf__add_field(btf, "b", LIST_NODE, 128, 0); + if (!ASSERT_OK(err, "btf__add_field foo::b")) + break; + err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0); + if (!ASSERT_OK(err, "btf__add_field foo::c")) + break; + id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0); + if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b")) + break; + id = btf__add_struct(btf, "bar", 36); + if (!ASSERT_EQ(id, 7, "btf__add_struct bar")) + break; + err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); + if (!ASSERT_OK(err, "btf__add_field bar:a")) + break; + err = btf__add_field(btf, "b", LIST_NODE, 128, 0); + if (!ASSERT_OK(err, "btf__add_field bar:b")) + break; + err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0); + if (!ASSERT_OK(err, "btf__add_field bar:c")) + break; + id = btf__add_decl_tag(btf, "contains:baz:a", 7, 0); + if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:baz:a")) + break; + id = btf__add_struct(btf, "baz", 16); + if (!ASSERT_EQ(id, 9, "btf__add_struct baz")) + break; + err = btf__add_field(btf, "a", LIST_NODE, 0, 0); + if (!ASSERT_OK(err, "btf__add_field baz:a")) + break; + + err = btf__load_into_kernel(btf); + ASSERT_EQ(err, -ELOOP, "check btf"); + btf__free(btf); + break; + } + + while (test__start_subtest("btf: owning -> owning | owned -> owning | owned -> owned")) { + btf = init_btf(); + if (!ASSERT_OK_PTR(btf, "init_btf")) + break; + id = btf__add_struct(btf, "foo", 20); + if (!ASSERT_EQ(id, 5, "btf__add_struct foo")) + break; + err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); + if (!ASSERT_OK(err, "btf__add_field foo::a")) + break; + err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0); + if (!ASSERT_OK(err, "btf__add_field foo::b")) + break; + id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0); + if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b")) + break; + id = btf__add_struct(btf, "bar", 36); + if (!ASSERT_EQ(id, 7, "btf__add_struct bar")) + break; + err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); + if (!ASSERT_OK(err, "btf__add_field bar::a")) + break; + err = btf__add_field(btf, "b", LIST_NODE, 128, 0); + if (!ASSERT_OK(err, "btf__add_field bar::b")) + break; + err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0); + if (!ASSERT_OK(err, "btf__add_field bar::c")) + break; + id = btf__add_decl_tag(btf, "contains:baz:b", 7, 0); + if (!ASSERT_EQ(id, 8, "btf__add_decl_tag")) + break; + id = btf__add_struct(btf, "baz", 36); + if (!ASSERT_EQ(id, 9, "btf__add_struct baz")) + break; + err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); + if (!ASSERT_OK(err, "btf__add_field bar::a")) + break; + err = btf__add_field(btf, "b", LIST_NODE, 128, 0); + if (!ASSERT_OK(err, "btf__add_field bar::b")) + break; + err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0); + if (!ASSERT_OK(err, "btf__add_field bar::c")) + break; + id = btf__add_decl_tag(btf, "contains:bam:a", 9, 0); + if (!ASSERT_EQ(id, 10, "btf__add_decl_tag contains:bam:a")) + break; + id = btf__add_struct(btf, "bam", 16); + if (!ASSERT_EQ(id, 11, "btf__add_struct bam")) + break; + err = btf__add_field(btf, "a", LIST_NODE, 0, 0); + if (!ASSERT_OK(err, "btf__add_field bam::a")) + break; + + err = btf__load_into_kernel(btf); + ASSERT_EQ(err, -ELOOP, "check btf"); + btf__free(btf); + break; + } +} + +void test_linked_list(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(linked_list_fail_tests); i++) { + if (!test__start_subtest(linked_list_fail_tests[i].prog_name)) + continue; + test_linked_list_fail_prog(linked_list_fail_tests[i].prog_name, + linked_list_fail_tests[i].err_msg); + } + test_btf(); + test_linked_list_success(PUSH_POP, false); + test_linked_list_success(PUSH_POP, true); + test_linked_list_success(PUSH_POP_MULT, false); + test_linked_list_success(PUSH_POP_MULT, true); + test_linked_list_success(LIST_IN_LIST, false); + test_linked_list_success(LIST_IN_LIST, true); + test_linked_list_success(TEST_ALL, false); +} diff --git a/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c b/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c index 1102e4f42d2d..f117bfef68a1 100644 --- a/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c +++ b/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c @@ -173,10 +173,12 @@ static void test_lsm_cgroup_functional(void) ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 4, "total prog count"); ASSERT_EQ(query_prog_cnt(cgroup_fd2, NULL), 1, "total prog count"); - /* AF_UNIX is prohibited. */ - fd = socket(AF_UNIX, SOCK_STREAM, 0); - ASSERT_LT(fd, 0, "socket(AF_UNIX)"); + if (!(skel->kconfig->CONFIG_SECURITY_APPARMOR + || skel->kconfig->CONFIG_SECURITY_SELINUX + || skel->kconfig->CONFIG_SECURITY_SMACK)) + /* AF_UNIX is prohibited. */ + ASSERT_LT(fd, 0, "socket(AF_UNIX)"); close(fd); /* AF_INET6 gets default policy (sk_priority). */ @@ -233,11 +235,18 @@ static void test_lsm_cgroup_functional(void) /* AF_INET6+SOCK_STREAM * AF_PACKET+SOCK_RAW + * AF_UNIX+SOCK_RAW if already have non-bpf lsms installed * listen_fd * client_fd * accepted_fd */ - ASSERT_EQ(skel->bss->called_socket_post_create2, 5, "called_create2"); + if (skel->kconfig->CONFIG_SECURITY_APPARMOR + || skel->kconfig->CONFIG_SECURITY_SELINUX + || skel->kconfig->CONFIG_SECURITY_SMACK) + /* AF_UNIX+SOCK_RAW if already have non-bpf lsms installed */ + ASSERT_EQ(skel->bss->called_socket_post_create2, 6, "called_create2"); + else + ASSERT_EQ(skel->bss->called_socket_post_create2, 5, "called_create2"); /* start_server * bind(ETH_P_ALL) diff --git a/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c b/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c new file mode 100644 index 000000000000..447d8560ecb6 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates.*/ + +#define _GNU_SOURCE +#include <unistd.h> +#include <sys/syscall.h> +#include <sys/types.h> +#include <test_progs.h> +#include <bpf/btf.h> +#include "rcu_read_lock.skel.h" +#include "cgroup_helpers.h" + +static unsigned long long cgroup_id; + +static void test_success(void) +{ + struct rcu_read_lock *skel; + int err; + + skel = rcu_read_lock__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + skel->bss->target_pid = syscall(SYS_gettid); + + bpf_program__set_autoload(skel->progs.get_cgroup_id, true); + bpf_program__set_autoload(skel->progs.task_succ, true); + bpf_program__set_autoload(skel->progs.no_lock, true); + bpf_program__set_autoload(skel->progs.two_regions, true); + bpf_program__set_autoload(skel->progs.non_sleepable_1, true); + bpf_program__set_autoload(skel->progs.non_sleepable_2, true); + err = rcu_read_lock__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto out; + + err = rcu_read_lock__attach(skel); + if (!ASSERT_OK(err, "skel_attach")) + goto out; + + syscall(SYS_getpgid); + + ASSERT_EQ(skel->bss->task_storage_val, 2, "task_storage_val"); + ASSERT_EQ(skel->bss->cgroup_id, cgroup_id, "cgroup_id"); +out: + rcu_read_lock__destroy(skel); +} + +static void test_rcuptr_acquire(void) +{ + struct rcu_read_lock *skel; + int err; + + skel = rcu_read_lock__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + skel->bss->target_pid = syscall(SYS_gettid); + + bpf_program__set_autoload(skel->progs.task_acquire, true); + err = rcu_read_lock__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto out; + + err = rcu_read_lock__attach(skel); + ASSERT_OK(err, "skel_attach"); +out: + rcu_read_lock__destroy(skel); +} + +static const char * const inproper_region_tests[] = { + "miss_lock", + "miss_unlock", + "non_sleepable_rcu_mismatch", + "inproper_sleepable_helper", + "inproper_sleepable_kfunc", + "nested_rcu_region", +}; + +static void test_inproper_region(void) +{ + struct rcu_read_lock *skel; + struct bpf_program *prog; + int i, err; + + for (i = 0; i < ARRAY_SIZE(inproper_region_tests); i++) { + skel = rcu_read_lock__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + prog = bpf_object__find_program_by_name(skel->obj, inproper_region_tests[i]); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) + goto out; + bpf_program__set_autoload(prog, true); + err = rcu_read_lock__load(skel); + ASSERT_ERR(err, "skel_load"); +out: + rcu_read_lock__destroy(skel); + } +} + +static const char * const rcuptr_misuse_tests[] = { + "task_untrusted_non_rcuptr", + "task_untrusted_rcuptr", + "cross_rcu_region", +}; + +static void test_rcuptr_misuse(void) +{ + struct rcu_read_lock *skel; + struct bpf_program *prog; + int i, err; + + for (i = 0; i < ARRAY_SIZE(rcuptr_misuse_tests); i++) { + skel = rcu_read_lock__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + prog = bpf_object__find_program_by_name(skel->obj, rcuptr_misuse_tests[i]); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) + goto out; + bpf_program__set_autoload(prog, true); + err = rcu_read_lock__load(skel); + ASSERT_ERR(err, "skel_load"); +out: + rcu_read_lock__destroy(skel); + } +} + +void test_rcu_read_lock(void) +{ + struct btf *vmlinux_btf; + int cgroup_fd; + + vmlinux_btf = btf__load_vmlinux_btf(); + if (!ASSERT_OK_PTR(vmlinux_btf, "could not load vmlinux BTF")) + return; + if (btf__find_by_name_kind(vmlinux_btf, "rcu", BTF_KIND_TYPE_TAG) < 0) { + test__skip(); + goto out; + } + + cgroup_fd = test__join_cgroup("/rcu_read_lock"); + if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /rcu_read_lock")) + goto out; + + cgroup_id = get_cgroup_id("/rcu_read_lock"); + if (test__start_subtest("success")) + test_success(); + if (test__start_subtest("rcuptr_acquire")) + test_rcuptr_acquire(); + if (test__start_subtest("negative_tests_inproper_region")) + test_inproper_region(); + if (test__start_subtest("negative_tests_rcuptr_misuse")) + test_rcuptr_misuse(); + close(cgroup_fd); +out: + btf__free(vmlinux_btf); +} diff --git a/tools/testing/selftests/bpf/prog_tests/spin_lock.c b/tools/testing/selftests/bpf/prog_tests/spin_lock.c new file mode 100644 index 000000000000..d9270bd3d920 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/spin_lock.c @@ -0,0 +1,142 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <network_helpers.h> + +#include "test_spin_lock.skel.h" +#include "test_spin_lock_fail.skel.h" + +static char log_buf[1024 * 1024]; + +static struct { + const char *prog_name; + const char *err_msg; +} spin_lock_fail_tests[] = { + { "lock_id_kptr_preserve", + "5: (bf) r1 = r0 ; R0_w=ptr_foo(id=2,ref_obj_id=2,off=0,imm=0) " + "R1_w=ptr_foo(id=2,ref_obj_id=2,off=0,imm=0) refs=2\n6: (85) call bpf_this_cpu_ptr#154\n" + "R1 type=ptr_ expected=percpu_ptr_" }, + { "lock_id_global_zero", + "; R1_w=map_value(off=0,ks=4,vs=4,imm=0)\n2: (85) call bpf_this_cpu_ptr#154\n" + "R1 type=map_value expected=percpu_ptr_" }, + { "lock_id_mapval_preserve", + "8: (bf) r1 = r0 ; R0_w=map_value(id=1,off=0,ks=4,vs=8,imm=0) " + "R1_w=map_value(id=1,off=0,ks=4,vs=8,imm=0)\n9: (85) call bpf_this_cpu_ptr#154\n" + "R1 type=map_value expected=percpu_ptr_" }, + { "lock_id_innermapval_preserve", + "13: (bf) r1 = r0 ; R0=map_value(id=2,off=0,ks=4,vs=8,imm=0) " + "R1_w=map_value(id=2,off=0,ks=4,vs=8,imm=0)\n14: (85) call bpf_this_cpu_ptr#154\n" + "R1 type=map_value expected=percpu_ptr_" }, + { "lock_id_mismatch_kptr_kptr", "bpf_spin_unlock of different lock" }, + { "lock_id_mismatch_kptr_global", "bpf_spin_unlock of different lock" }, + { "lock_id_mismatch_kptr_mapval", "bpf_spin_unlock of different lock" }, + { "lock_id_mismatch_kptr_innermapval", "bpf_spin_unlock of different lock" }, + { "lock_id_mismatch_global_global", "bpf_spin_unlock of different lock" }, + { "lock_id_mismatch_global_kptr", "bpf_spin_unlock of different lock" }, + { "lock_id_mismatch_global_mapval", "bpf_spin_unlock of different lock" }, + { "lock_id_mismatch_global_innermapval", "bpf_spin_unlock of different lock" }, + { "lock_id_mismatch_mapval_mapval", "bpf_spin_unlock of different lock" }, + { "lock_id_mismatch_mapval_kptr", "bpf_spin_unlock of different lock" }, + { "lock_id_mismatch_mapval_global", "bpf_spin_unlock of different lock" }, + { "lock_id_mismatch_mapval_innermapval", "bpf_spin_unlock of different lock" }, + { "lock_id_mismatch_innermapval_innermapval1", "bpf_spin_unlock of different lock" }, + { "lock_id_mismatch_innermapval_innermapval2", "bpf_spin_unlock of different lock" }, + { "lock_id_mismatch_innermapval_kptr", "bpf_spin_unlock of different lock" }, + { "lock_id_mismatch_innermapval_global", "bpf_spin_unlock of different lock" }, + { "lock_id_mismatch_innermapval_mapval", "bpf_spin_unlock of different lock" }, +}; + +static void test_spin_lock_fail_prog(const char *prog_name, const char *err_msg) +{ + LIBBPF_OPTS(bpf_object_open_opts, opts, .kernel_log_buf = log_buf, + .kernel_log_size = sizeof(log_buf), + .kernel_log_level = 1); + struct test_spin_lock_fail *skel; + struct bpf_program *prog; + int ret; + + skel = test_spin_lock_fail__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "test_spin_lock_fail__open_opts")) + return; + + prog = bpf_object__find_program_by_name(skel->obj, prog_name); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) + goto end; + + bpf_program__set_autoload(prog, true); + + ret = test_spin_lock_fail__load(skel); + if (!ASSERT_ERR(ret, "test_spin_lock_fail__load must fail")) + goto end; + + /* Skip check if JIT does not support kfuncs */ + if (strstr(log_buf, "JIT does not support calling kernel function")) { + test__skip(); + goto end; + } + + if (!ASSERT_OK_PTR(strstr(log_buf, err_msg), "expected error message")) { + fprintf(stderr, "Expected: %s\n", err_msg); + fprintf(stderr, "Verifier: %s\n", log_buf); + } + +end: + test_spin_lock_fail__destroy(skel); +} + +static void *spin_lock_thread(void *arg) +{ + int err, prog_fd = *(u32 *) arg; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 10000, + ); + + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); + ASSERT_OK(topts.retval, "test_run retval"); + pthread_exit(arg); +} + +void test_spin_lock_success(void) +{ + struct test_spin_lock *skel; + pthread_t thread_id[4]; + int prog_fd, i; + void *ret; + + skel = test_spin_lock__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_spin_lock__open_and_load")) + return; + prog_fd = bpf_program__fd(skel->progs.bpf_spin_lock_test); + for (i = 0; i < 4; i++) { + int err; + + err = pthread_create(&thread_id[i], NULL, &spin_lock_thread, &prog_fd); + if (!ASSERT_OK(err, "pthread_create")) + goto end; + } + + for (i = 0; i < 4; i++) { + if (!ASSERT_OK(pthread_join(thread_id[i], &ret), "pthread_join")) + goto end; + if (!ASSERT_EQ(ret, &prog_fd, "ret == prog_fd")) + goto end; + } +end: + test_spin_lock__destroy(skel); +} + +void test_spin_lock(void) +{ + int i; + + test_spin_lock_success(); + + for (i = 0; i < ARRAY_SIZE(spin_lock_fail_tests); i++) { + if (!test__start_subtest(spin_lock_fail_tests[i].prog_name)) + continue; + test_spin_lock_fail_prog(spin_lock_fail_tests[i].prog_name, + spin_lock_fail_tests[i].err_msg); + } +} diff --git a/tools/testing/selftests/bpf/prog_tests/spinlock.c b/tools/testing/selftests/bpf/prog_tests/spinlock.c deleted file mode 100644 index 15eb1372d771..000000000000 --- a/tools/testing/selftests/bpf/prog_tests/spinlock.c +++ /dev/null @@ -1,45 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include <test_progs.h> -#include <network_helpers.h> - -static void *spin_lock_thread(void *arg) -{ - int err, prog_fd = *(u32 *) arg; - LIBBPF_OPTS(bpf_test_run_opts, topts, - .data_in = &pkt_v4, - .data_size_in = sizeof(pkt_v4), - .repeat = 10000, - ); - - err = bpf_prog_test_run_opts(prog_fd, &topts); - ASSERT_OK(err, "test_run"); - ASSERT_OK(topts.retval, "test_run retval"); - pthread_exit(arg); -} - -void test_spinlock(void) -{ - const char *file = "./test_spin_lock.bpf.o"; - pthread_t thread_id[4]; - struct bpf_object *obj = NULL; - int prog_fd; - int err = 0, i; - void *ret; - - err = bpf_prog_test_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd); - if (CHECK_FAIL(err)) { - printf("test_spin_lock:bpf_prog_test_load errno %d\n", errno); - goto close_prog; - } - for (i = 0; i < 4; i++) - if (CHECK_FAIL(pthread_create(&thread_id[i], NULL, - &spin_lock_thread, &prog_fd))) - goto close_prog; - - for (i = 0; i < 4; i++) - if (CHECK_FAIL(pthread_join(thread_id[i], &ret) || - ret != (void *)&prog_fd)) - goto close_prog; -close_prog: - bpf_object__close(obj); -} diff --git a/tools/testing/selftests/bpf/prog_tests/task_kfunc.c b/tools/testing/selftests/bpf/prog_tests/task_kfunc.c new file mode 100644 index 000000000000..ffd8ef4303c8 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/task_kfunc.c @@ -0,0 +1,163 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ + +#define _GNU_SOURCE +#include <sys/wait.h> +#include <test_progs.h> +#include <unistd.h> + +#include "task_kfunc_failure.skel.h" +#include "task_kfunc_success.skel.h" + +static size_t log_buf_sz = 1 << 20; /* 1 MB */ +static char obj_log_buf[1048576]; + +static struct task_kfunc_success *open_load_task_kfunc_skel(void) +{ + struct task_kfunc_success *skel; + int err; + + skel = task_kfunc_success__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return NULL; + + skel->bss->pid = getpid(); + + err = task_kfunc_success__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto cleanup; + + return skel; + +cleanup: + task_kfunc_success__destroy(skel); + return NULL; +} + +static void run_success_test(const char *prog_name) +{ + struct task_kfunc_success *skel; + int status; + pid_t child_pid; + struct bpf_program *prog; + struct bpf_link *link = NULL; + + skel = open_load_task_kfunc_skel(); + if (!ASSERT_OK_PTR(skel, "open_load_skel")) + return; + + if (!ASSERT_OK(skel->bss->err, "pre_spawn_err")) + goto cleanup; + + prog = bpf_object__find_program_by_name(skel->obj, prog_name); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) + goto cleanup; + + link = bpf_program__attach(prog); + if (!ASSERT_OK_PTR(link, "attached_link")) + goto cleanup; + + child_pid = fork(); + if (!ASSERT_GT(child_pid, -1, "child_pid")) + goto cleanup; + if (child_pid == 0) + _exit(0); + waitpid(child_pid, &status, 0); + + ASSERT_OK(skel->bss->err, "post_wait_err"); + +cleanup: + bpf_link__destroy(link); + task_kfunc_success__destroy(skel); +} + +static const char * const success_tests[] = { + "test_task_acquire_release_argument", + "test_task_acquire_release_current", + "test_task_acquire_leave_in_map", + "test_task_xchg_release", + "test_task_get_release", + "test_task_current_acquire_release", + "test_task_from_pid_arg", + "test_task_from_pid_current", + "test_task_from_pid_invalid", +}; + +static struct { + const char *prog_name; + const char *expected_err_msg; +} failure_tests[] = { + {"task_kfunc_acquire_untrusted", "R1 must be referenced or trusted"}, + {"task_kfunc_acquire_fp", "arg#0 pointer type STRUCT task_struct must point"}, + {"task_kfunc_acquire_unsafe_kretprobe", "reg type unsupported for arg#0 function"}, + {"task_kfunc_acquire_trusted_walked", "R1 must be referenced or trusted"}, + {"task_kfunc_acquire_null", "arg#0 pointer type STRUCT task_struct must point"}, + {"task_kfunc_acquire_unreleased", "Unreleased reference"}, + {"task_kfunc_get_non_kptr_param", "arg#0 expected pointer to map value"}, + {"task_kfunc_get_non_kptr_acquired", "arg#0 expected pointer to map value"}, + {"task_kfunc_get_null", "arg#0 expected pointer to map value"}, + {"task_kfunc_xchg_unreleased", "Unreleased reference"}, + {"task_kfunc_get_unreleased", "Unreleased reference"}, + {"task_kfunc_release_untrusted", "arg#0 is untrusted_ptr_or_null_ expected ptr_ or socket"}, + {"task_kfunc_release_fp", "arg#0 pointer type STRUCT task_struct must point"}, + {"task_kfunc_release_null", "arg#0 is ptr_or_null_ expected ptr_ or socket"}, + {"task_kfunc_release_unacquired", "release kernel function bpf_task_release expects"}, + {"task_kfunc_from_pid_no_null_check", "arg#0 is ptr_or_null_ expected ptr_ or socket"}, +}; + +static void verify_fail(const char *prog_name, const char *expected_err_msg) +{ + LIBBPF_OPTS(bpf_object_open_opts, opts); + struct task_kfunc_failure *skel; + int err, i; + + opts.kernel_log_buf = obj_log_buf; + opts.kernel_log_size = log_buf_sz; + opts.kernel_log_level = 1; + + skel = task_kfunc_failure__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "task_kfunc_failure__open_opts")) + goto cleanup; + + for (i = 0; i < ARRAY_SIZE(failure_tests); i++) { + struct bpf_program *prog; + const char *curr_name = failure_tests[i].prog_name; + + prog = bpf_object__find_program_by_name(skel->obj, curr_name); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) + goto cleanup; + + bpf_program__set_autoload(prog, !strcmp(curr_name, prog_name)); + } + + err = task_kfunc_failure__load(skel); + if (!ASSERT_ERR(err, "unexpected load success")) + goto cleanup; + + if (!ASSERT_OK_PTR(strstr(obj_log_buf, expected_err_msg), "expected_err_msg")) { + fprintf(stderr, "Expected err_msg: %s\n", expected_err_msg); + fprintf(stderr, "Verifier output: %s\n", obj_log_buf); + } + +cleanup: + task_kfunc_failure__destroy(skel); +} + +void test_task_kfunc(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(success_tests); i++) { + if (!test__start_subtest(success_tests[i])) + continue; + + run_success_test(success_tests[i]); + } + + for (i = 0; i < ARRAY_SIZE(failure_tests); i++) { + if (!test__start_subtest(failure_tests[i].prog_name)) + continue; + + verify_fail(failure_tests[i].prog_name, failure_tests[i].expected_err_msg); + } +} diff --git a/tools/testing/selftests/bpf/prog_tests/type_cast.c b/tools/testing/selftests/bpf/prog_tests/type_cast.c new file mode 100644 index 000000000000..9317d5fa2635 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/type_cast.c @@ -0,0 +1,114 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ +#include <test_progs.h> +#include <network_helpers.h> +#include "type_cast.skel.h" + +static void test_xdp(void) +{ + struct type_cast *skel; + int err, prog_fd; + char buf[128]; + + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .data_out = buf, + .data_size_out = sizeof(buf), + .repeat = 1, + ); + + skel = type_cast__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + bpf_program__set_autoload(skel->progs.md_xdp, true); + err = type_cast__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto out; + + prog_fd = bpf_program__fd(skel->progs.md_xdp); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(topts.retval, XDP_PASS, "xdp test_run retval"); + + ASSERT_EQ(skel->bss->ifindex, 1, "xdp_md ifindex"); + ASSERT_EQ(skel->bss->ifindex, skel->bss->ingress_ifindex, "xdp_md ingress_ifindex"); + ASSERT_STREQ(skel->bss->name, "lo", "xdp_md name"); + ASSERT_NEQ(skel->bss->inum, 0, "xdp_md inum"); + +out: + type_cast__destroy(skel); +} + +static void test_tc(void) +{ + struct type_cast *skel; + int err, prog_fd; + + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + + skel = type_cast__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + bpf_program__set_autoload(skel->progs.md_skb, true); + err = type_cast__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto out; + + prog_fd = bpf_program__fd(skel->progs.md_skb); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(topts.retval, 0, "tc test_run retval"); + + ASSERT_EQ(skel->bss->meta_len, 0, "skb meta_len"); + ASSERT_EQ(skel->bss->frag0_len, 0, "skb frag0_len"); + ASSERT_NEQ(skel->bss->kskb_len, 0, "skb len"); + ASSERT_NEQ(skel->bss->kskb2_len, 0, "skb2 len"); + ASSERT_EQ(skel->bss->kskb_len, skel->bss->kskb2_len, "skb len compare"); + +out: + type_cast__destroy(skel); +} + +static const char * const negative_tests[] = { + "untrusted_ptr", + "kctx_u64", +}; + +static void test_negative(void) +{ + struct bpf_program *prog; + struct type_cast *skel; + int i, err; + + for (i = 0; i < ARRAY_SIZE(negative_tests); i++) { + skel = type_cast__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + prog = bpf_object__find_program_by_name(skel->obj, negative_tests[i]); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) + goto out; + bpf_program__set_autoload(prog, true); + err = type_cast__load(skel); + ASSERT_ERR(err, "skel_load"); +out: + type_cast__destroy(skel); + } +} + +void test_type_cast(void) +{ + if (test__start_subtest("xdp")) + test_xdp(); + if (test__start_subtest("tc")) + test_tc(); + if (test__start_subtest("negative")) + test_negative(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c index a50971c6cf4a..9ac6f6a268db 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c @@ -85,7 +85,7 @@ static void test_max_pkt_size(int fd) } #define NUM_PKTS 10000 -void test_xdp_do_redirect(void) +void serial_test_xdp_do_redirect(void) { int err, xdp_prog_fd, tc_prog_fd, ifindex_src, ifindex_dst; char data[sizeof(pkt_udp) + sizeof(__u32)]; diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c b/tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c index c72083885b6d..13daa3746064 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c @@ -174,7 +174,7 @@ out: system("ip netns del synproxy"); } -void test_xdp_synproxy(void) +void serial_test_xdp_synproxy(void) { if (test__start_subtest("xdp")) test_synproxy(true); |