diff options
Diffstat (limited to 'tools/testing/selftests/bpf/progs')
41 files changed, 3592 insertions, 29 deletions
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c index 6286023fd62b..c5969ca6f26b 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c @@ -19,13 +19,20 @@ struct { __type(value, __u64); } arraymap1 SEC(".maps"); +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 10); + __type(key, __u64); + __type(value, __u32); +} hashmap1 SEC(".maps"); + __u32 key_sum = 0; __u64 val_sum = 0; SEC("iter/bpf_map_elem") int dump_bpf_array_map(struct bpf_iter__bpf_map_elem *ctx) { - __u32 *key = ctx->key; + __u32 *hmap_val, *key = ctx->key; __u64 *val = ctx->value; if (key == (void *)0 || val == (void *)0) @@ -35,6 +42,18 @@ int dump_bpf_array_map(struct bpf_iter__bpf_map_elem *ctx) bpf_seq_write(ctx->meta->seq, val, sizeof(__u64)); key_sum += *key; val_sum += *val; + + /* workaround - It's necessary to do this convoluted (val, key) + * write into hashmap1, instead of simply doing + * bpf_map_update_elem(&hashmap1, val, key, BPF_ANY); + * because key has MEM_RDONLY flag and bpf_map_update elem expects + * types without this flag + */ + bpf_map_update_elem(&hashmap1, val, val, BPF_ANY); + hmap_val = bpf_map_lookup_elem(&hashmap1, val); + if (hmap_val) + *hmap_val = *key; + *val = *key; return 0; } diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_ksym.c b/tools/testing/selftests/bpf/progs/bpf_iter_ksym.c index 285c008cbf9c..9ba14c37bbcc 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_ksym.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_ksym.c @@ -7,14 +7,14 @@ char _license[] SEC("license") = "GPL"; unsigned long last_sym_value = 0; -static inline char tolower(char c) +static inline char to_lower(char c) { if (c >= 'A' && c <= 'Z') c += ('a' - 'A'); return c; } -static inline char toupper(char c) +static inline char to_upper(char c) { if (c >= 'a' && c <= 'z') c -= ('a' - 'A'); @@ -54,7 +54,7 @@ int dump_ksym(struct bpf_iter__ksym *ctx) type = iter->type; if (iter->module_name[0]) { - type = iter->exported ? toupper(type) : tolower(type); + type = iter->exported ? to_upper(type) : to_lower(type); BPF_SEQ_PRINTF(seq, "0x%llx %c %s [ %s ] ", value, type, iter->name, iter->module_name); } else { diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h index 5bb11fe595a4..4a01ea9113bf 100644 --- a/tools/testing/selftests/bpf/progs/bpf_misc.h +++ b/tools/testing/selftests/bpf/progs/bpf_misc.h @@ -2,6 +2,11 @@ #ifndef __BPF_MISC_H__ #define __BPF_MISC_H__ +#define __msg(msg) __attribute__((btf_decl_tag("comment:test_expect_msg=" msg))) +#define __failure __attribute__((btf_decl_tag("comment:test_expect_failure"))) +#define __success __attribute__((btf_decl_tag("comment:test_expect_success"))) +#define __log_level(lvl) __attribute__((btf_decl_tag("comment:test_log_level="#lvl))) + #if defined(__TARGET_ARCH_x86) #define SYSCALL_WRAPPER 1 #define SYS_PREFIX "__x64_" diff --git a/tools/testing/selftests/bpf/progs/bpf_tracing_net.h b/tools/testing/selftests/bpf/progs/bpf_tracing_net.h index adb087aecc9e..b394817126cf 100644 --- a/tools/testing/selftests/bpf/progs/bpf_tracing_net.h +++ b/tools/testing/selftests/bpf/progs/bpf_tracing_net.h @@ -25,6 +25,9 @@ #define IPV6_TCLASS 67 #define IPV6_AUTOFLOWLABEL 70 +#define TC_ACT_UNSPEC (-1) +#define TC_ACT_SHOT 2 + #define SOL_TCP 6 #define TCP_NODELAY 1 #define TCP_MAXSEG 2 diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c index f2661c8d2d90..7cb522d22a66 100644 --- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c +++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c @@ -102,12 +102,21 @@ struct zone { struct zone_padding __pad__; }; +/* ----- START-EXPECTED-OUTPUT ----- */ +struct padding_wo_named_members { + long: 64; + long: 64; +}; + +/* ------ END-EXPECTED-OUTPUT ------ */ + int f(struct { struct padded_implicitly _1; struct padded_explicitly _2; struct padded_a_lot _3; struct padded_cache_line _4; struct zone _5; + struct padding_wo_named_members _6; } *_) { return 0; diff --git a/tools/testing/selftests/bpf/progs/btf_type_tag_percpu.c b/tools/testing/selftests/bpf/progs/btf_type_tag_percpu.c index 8feddb8289cf..38f78d9345de 100644 --- a/tools/testing/selftests/bpf/progs/btf_type_tag_percpu.c +++ b/tools/testing/selftests/bpf/progs/btf_type_tag_percpu.c @@ -64,3 +64,4 @@ int BPF_PROG(test_percpu_helper, struct cgroup *cgrp, const char *path) return 0; } +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/cgrp_kfunc_common.h b/tools/testing/selftests/bpf/progs/cgrp_kfunc_common.h new file mode 100644 index 000000000000..7d30855bfe78 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/cgrp_kfunc_common.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ + +#ifndef _CGRP_KFUNC_COMMON_H +#define _CGRP_KFUNC_COMMON_H + +#include <errno.h> +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +struct __cgrps_kfunc_map_value { + struct cgroup __kptr_ref * cgrp; +}; + +struct hash_map { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, int); + __type(value, struct __cgrps_kfunc_map_value); + __uint(max_entries, 1); +} __cgrps_kfunc_map SEC(".maps"); + +struct cgroup *bpf_cgroup_acquire(struct cgroup *p) __ksym; +struct cgroup *bpf_cgroup_kptr_get(struct cgroup **pp) __ksym; +void bpf_cgroup_release(struct cgroup *p) __ksym; +struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) __ksym; + +static inline struct __cgrps_kfunc_map_value *cgrps_kfunc_map_value_lookup(struct cgroup *cgrp) +{ + s32 id; + long status; + + status = bpf_probe_read_kernel(&id, sizeof(id), &cgrp->self.id); + if (status) + return NULL; + + return bpf_map_lookup_elem(&__cgrps_kfunc_map, &id); +} + +static inline int cgrps_kfunc_map_insert(struct cgroup *cgrp) +{ + struct __cgrps_kfunc_map_value local, *v; + long status; + struct cgroup *acquired, *old; + s32 id; + + status = bpf_probe_read_kernel(&id, sizeof(id), &cgrp->self.id); + if (status) + return status; + + local.cgrp = NULL; + status = bpf_map_update_elem(&__cgrps_kfunc_map, &id, &local, BPF_NOEXIST); + if (status) + return status; + + v = bpf_map_lookup_elem(&__cgrps_kfunc_map, &id); + if (!v) { + bpf_map_delete_elem(&__cgrps_kfunc_map, &id); + return -ENOENT; + } + + acquired = bpf_cgroup_acquire(cgrp); + old = bpf_kptr_xchg(&v->cgrp, acquired); + if (old) { + bpf_cgroup_release(old); + return -EEXIST; + } + + return 0; +} + +#endif /* _CGRP_KFUNC_COMMON_H */ diff --git a/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c b/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c new file mode 100644 index 000000000000..a1369b5ebcf8 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c @@ -0,0 +1,260 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ + +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> + +#include "cgrp_kfunc_common.h" + +char _license[] SEC("license") = "GPL"; + +/* Prototype for all of the program trace events below: + * + * TRACE_EVENT(cgroup_mkdir, + * TP_PROTO(struct cgroup *cgrp, const char *path), + * TP_ARGS(cgrp, path) + */ + +static struct __cgrps_kfunc_map_value *insert_lookup_cgrp(struct cgroup *cgrp) +{ + int status; + + status = cgrps_kfunc_map_insert(cgrp); + if (status) + return NULL; + + return cgrps_kfunc_map_value_lookup(cgrp); +} + +SEC("tp_btf/cgroup_mkdir") +int BPF_PROG(cgrp_kfunc_acquire_untrusted, struct cgroup *cgrp, const char *path) +{ + struct cgroup *acquired; + struct __cgrps_kfunc_map_value *v; + + v = insert_lookup_cgrp(cgrp); + if (!v) + return 0; + + /* Can't invoke bpf_cgroup_acquire() on an untrusted pointer. */ + acquired = bpf_cgroup_acquire(v->cgrp); + bpf_cgroup_release(acquired); + + return 0; +} + +SEC("tp_btf/cgroup_mkdir") +int BPF_PROG(cgrp_kfunc_acquire_fp, struct cgroup *cgrp, const char *path) +{ + struct cgroup *acquired, *stack_cgrp = (struct cgroup *)&path; + + /* Can't invoke bpf_cgroup_acquire() on a random frame pointer. */ + acquired = bpf_cgroup_acquire((struct cgroup *)&stack_cgrp); + bpf_cgroup_release(acquired); + + return 0; +} + +SEC("kretprobe/cgroup_destroy_locked") +int BPF_PROG(cgrp_kfunc_acquire_unsafe_kretprobe, struct cgroup *cgrp) +{ + struct cgroup *acquired; + + /* Can't acquire an untrusted struct cgroup * pointer. */ + acquired = bpf_cgroup_acquire(cgrp); + bpf_cgroup_release(acquired); + + return 0; +} + +SEC("tp_btf/cgroup_mkdir") +int BPF_PROG(cgrp_kfunc_acquire_trusted_walked, struct cgroup *cgrp, const char *path) +{ + struct cgroup *acquired; + + /* Can't invoke bpf_cgroup_acquire() on a pointer obtained from walking a trusted cgroup. */ + acquired = bpf_cgroup_acquire(cgrp->old_dom_cgrp); + bpf_cgroup_release(acquired); + + return 0; +} + + +SEC("tp_btf/cgroup_mkdir") +int BPF_PROG(cgrp_kfunc_acquire_null, struct cgroup *cgrp, const char *path) +{ + struct cgroup *acquired; + + /* Can't invoke bpf_cgroup_acquire() on a NULL pointer. */ + acquired = bpf_cgroup_acquire(NULL); + if (!acquired) + return 0; + bpf_cgroup_release(acquired); + + return 0; +} + +SEC("tp_btf/cgroup_mkdir") +int BPF_PROG(cgrp_kfunc_acquire_unreleased, struct cgroup *cgrp, const char *path) +{ + struct cgroup *acquired; + + acquired = bpf_cgroup_acquire(cgrp); + + /* Acquired cgroup is never released. */ + + return 0; +} + +SEC("tp_btf/cgroup_mkdir") +int BPF_PROG(cgrp_kfunc_get_non_kptr_param, struct cgroup *cgrp, const char *path) +{ + struct cgroup *kptr; + + /* Cannot use bpf_cgroup_kptr_get() on a non-kptr, even on a valid cgroup. */ + kptr = bpf_cgroup_kptr_get(&cgrp); + if (!kptr) + return 0; + + bpf_cgroup_release(kptr); + + return 0; +} + +SEC("tp_btf/cgroup_mkdir") +int BPF_PROG(cgrp_kfunc_get_non_kptr_acquired, struct cgroup *cgrp, const char *path) +{ + struct cgroup *kptr, *acquired; + + acquired = bpf_cgroup_acquire(cgrp); + + /* Cannot use bpf_cgroup_kptr_get() on a non-map-value, even if the kptr was acquired. */ + kptr = bpf_cgroup_kptr_get(&acquired); + bpf_cgroup_release(acquired); + if (!kptr) + return 0; + + bpf_cgroup_release(kptr); + + return 0; +} + +SEC("tp_btf/cgroup_mkdir") +int BPF_PROG(cgrp_kfunc_get_null, struct cgroup *cgrp, const char *path) +{ + struct cgroup *kptr; + + /* Cannot use bpf_cgroup_kptr_get() on a NULL pointer. */ + kptr = bpf_cgroup_kptr_get(NULL); + if (!kptr) + return 0; + + bpf_cgroup_release(kptr); + + return 0; +} + +SEC("tp_btf/cgroup_mkdir") +int BPF_PROG(cgrp_kfunc_xchg_unreleased, struct cgroup *cgrp, const char *path) +{ + struct cgroup *kptr; + struct __cgrps_kfunc_map_value *v; + + v = insert_lookup_cgrp(cgrp); + if (!v) + return 0; + + kptr = bpf_kptr_xchg(&v->cgrp, NULL); + if (!kptr) + return 0; + + /* Kptr retrieved from map is never released. */ + + return 0; +} + +SEC("tp_btf/cgroup_mkdir") +int BPF_PROG(cgrp_kfunc_get_unreleased, struct cgroup *cgrp, const char *path) +{ + struct cgroup *kptr; + struct __cgrps_kfunc_map_value *v; + + v = insert_lookup_cgrp(cgrp); + if (!v) + return 0; + + kptr = bpf_cgroup_kptr_get(&v->cgrp); + if (!kptr) + return 0; + + /* Kptr acquired above is never released. */ + + return 0; +} + +SEC("tp_btf/cgroup_mkdir") +int BPF_PROG(cgrp_kfunc_release_untrusted, struct cgroup *cgrp, const char *path) +{ + struct __cgrps_kfunc_map_value *v; + + v = insert_lookup_cgrp(cgrp); + if (!v) + return 0; + + /* Can't invoke bpf_cgroup_release() on an untrusted pointer. */ + bpf_cgroup_release(v->cgrp); + + return 0; +} + +SEC("tp_btf/cgroup_mkdir") +int BPF_PROG(cgrp_kfunc_release_fp, struct cgroup *cgrp, const char *path) +{ + struct cgroup *acquired = (struct cgroup *)&path; + + /* Cannot release random frame pointer. */ + bpf_cgroup_release(acquired); + + return 0; +} + +SEC("tp_btf/cgroup_mkdir") +int BPF_PROG(cgrp_kfunc_release_null, struct cgroup *cgrp, const char *path) +{ + struct __cgrps_kfunc_map_value local, *v; + long status; + struct cgroup *acquired, *old; + s32 id; + + status = bpf_probe_read_kernel(&id, sizeof(id), &cgrp->self.id); + if (status) + return 0; + + local.cgrp = NULL; + status = bpf_map_update_elem(&__cgrps_kfunc_map, &id, &local, BPF_NOEXIST); + if (status) + return status; + + v = bpf_map_lookup_elem(&__cgrps_kfunc_map, &id); + if (!v) + return -ENOENT; + + acquired = bpf_cgroup_acquire(cgrp); + + old = bpf_kptr_xchg(&v->cgrp, acquired); + + /* old cannot be passed to bpf_cgroup_release() without a NULL check. */ + bpf_cgroup_release(old); + + return 0; +} + +SEC("tp_btf/cgroup_mkdir") +int BPF_PROG(cgrp_kfunc_release_unacquired, struct cgroup *cgrp, const char *path) +{ + /* Cannot release trusted cgroup pointer which was not acquired. */ + bpf_cgroup_release(cgrp); + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c b/tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c new file mode 100644 index 000000000000..0c23ea32df9f --- /dev/null +++ b/tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c @@ -0,0 +1,170 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ + +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> + +#include "cgrp_kfunc_common.h" + +char _license[] SEC("license") = "GPL"; + +int err, pid, invocations; + +/* Prototype for all of the program trace events below: + * + * TRACE_EVENT(cgroup_mkdir, + * TP_PROTO(struct cgroup *cgrp, const char *path), + * TP_ARGS(cgrp, path) + */ + +static bool is_test_kfunc_task(void) +{ + int cur_pid = bpf_get_current_pid_tgid() >> 32; + bool same = pid == cur_pid; + + if (same) + __sync_fetch_and_add(&invocations, 1); + + return same; +} + +SEC("tp_btf/cgroup_mkdir") +int BPF_PROG(test_cgrp_acquire_release_argument, struct cgroup *cgrp, const char *path) +{ + struct cgroup *acquired; + + if (!is_test_kfunc_task()) + return 0; + + acquired = bpf_cgroup_acquire(cgrp); + bpf_cgroup_release(acquired); + + return 0; +} + +SEC("tp_btf/cgroup_mkdir") +int BPF_PROG(test_cgrp_acquire_leave_in_map, struct cgroup *cgrp, const char *path) +{ + long status; + + if (!is_test_kfunc_task()) + return 0; + + status = cgrps_kfunc_map_insert(cgrp); + if (status) + err = 1; + + return 0; +} + +SEC("tp_btf/cgroup_mkdir") +int BPF_PROG(test_cgrp_xchg_release, struct cgroup *cgrp, const char *path) +{ + struct cgroup *kptr; + struct __cgrps_kfunc_map_value *v; + long status; + + if (!is_test_kfunc_task()) + return 0; + + status = cgrps_kfunc_map_insert(cgrp); + if (status) { + err = 1; + return 0; + } + + v = cgrps_kfunc_map_value_lookup(cgrp); + if (!v) { + err = 2; + return 0; + } + + kptr = bpf_kptr_xchg(&v->cgrp, NULL); + if (!kptr) { + err = 3; + return 0; + } + + bpf_cgroup_release(kptr); + + return 0; +} + +SEC("tp_btf/cgroup_mkdir") +int BPF_PROG(test_cgrp_get_release, struct cgroup *cgrp, const char *path) +{ + struct cgroup *kptr; + struct __cgrps_kfunc_map_value *v; + long status; + + if (!is_test_kfunc_task()) + return 0; + + status = cgrps_kfunc_map_insert(cgrp); + if (status) { + err = 1; + return 0; + } + + v = cgrps_kfunc_map_value_lookup(cgrp); + if (!v) { + err = 2; + return 0; + } + + kptr = bpf_cgroup_kptr_get(&v->cgrp); + if (!kptr) { + err = 3; + return 0; + } + + bpf_cgroup_release(kptr); + + return 0; +} + +SEC("tp_btf/cgroup_mkdir") +int BPF_PROG(test_cgrp_get_ancestors, struct cgroup *cgrp, const char *path) +{ + struct cgroup *self, *ancestor1, *invalid; + + if (!is_test_kfunc_task()) + return 0; + + self = bpf_cgroup_ancestor(cgrp, cgrp->level); + if (!self) { + err = 1; + return 0; + } + + if (self->self.id != cgrp->self.id) { + bpf_cgroup_release(self); + err = 2; + return 0; + } + bpf_cgroup_release(self); + + ancestor1 = bpf_cgroup_ancestor(cgrp, cgrp->level - 1); + if (!ancestor1) { + err = 3; + return 0; + } + bpf_cgroup_release(ancestor1); + + invalid = bpf_cgroup_ancestor(cgrp, 10000); + if (invalid) { + bpf_cgroup_release(invalid); + err = 4; + return 0; + } + + invalid = bpf_cgroup_ancestor(cgrp, -1); + if (invalid) { + bpf_cgroup_release(invalid); + err = 5; + return 0; + } + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/cgrp_ls_attach_cgroup.c b/tools/testing/selftests/bpf/progs/cgrp_ls_attach_cgroup.c new file mode 100644 index 000000000000..6652d18465b2 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/cgrp_ls_attach_cgroup.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include "bpf_tracing_net.h" + +char _license[] SEC("license") = "GPL"; + +struct socket_cookie { + __u64 cookie_key; + __u64 cookie_value; +}; + +struct { + __uint(type, BPF_MAP_TYPE_CGRP_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, struct socket_cookie); +} socket_cookies SEC(".maps"); + +SEC("cgroup/connect6") +int set_cookie(struct bpf_sock_addr *ctx) +{ + struct socket_cookie *p; + struct tcp_sock *tcp_sk; + struct bpf_sock *sk; + + if (ctx->family != AF_INET6 || ctx->user_family != AF_INET6) + return 1; + + sk = ctx->sk; + if (!sk) + return 1; + + tcp_sk = bpf_skc_to_tcp_sock(sk); + if (!tcp_sk) + return 1; + + p = bpf_cgrp_storage_get(&socket_cookies, + tcp_sk->inet_conn.icsk_inet.sk.sk_cgrp_data.cgroup, 0, + BPF_LOCAL_STORAGE_GET_F_CREATE); + if (!p) + return 1; + + p->cookie_value = 0xF; + p->cookie_key = bpf_get_socket_cookie(ctx); + return 1; +} + +SEC("sockops") +int update_cookie_sockops(struct bpf_sock_ops *ctx) +{ + struct socket_cookie *p; + struct tcp_sock *tcp_sk; + struct bpf_sock *sk; + + if (ctx->family != AF_INET6 || ctx->op != BPF_SOCK_OPS_TCP_CONNECT_CB) + return 1; + + sk = ctx->sk; + if (!sk) + return 1; + + tcp_sk = bpf_skc_to_tcp_sock(sk); + if (!tcp_sk) + return 1; + + p = bpf_cgrp_storage_get(&socket_cookies, + tcp_sk->inet_conn.icsk_inet.sk.sk_cgrp_data.cgroup, 0, 0); + if (!p) + return 1; + + if (p->cookie_key != bpf_get_socket_cookie(ctx)) + return 1; + + p->cookie_value |= (ctx->local_port << 8); + return 1; +} + +SEC("fexit/inet_stream_connect") +int BPF_PROG(update_cookie_tracing, struct socket *sock, + struct sockaddr *uaddr, int addr_len, int flags) +{ + struct socket_cookie *p; + struct tcp_sock *tcp_sk; + + if (uaddr->sa_family != AF_INET6) + return 0; + + p = bpf_cgrp_storage_get(&socket_cookies, sock->sk->sk_cgrp_data.cgroup, 0, 0); + if (!p) + return 0; + + if (p->cookie_key != bpf_get_socket_cookie(sock->sk)) + return 0; + + p->cookie_value |= 0xF0; + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/cgrp_ls_negative.c b/tools/testing/selftests/bpf/progs/cgrp_ls_negative.c new file mode 100644 index 000000000000..d41f90e2ab64 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/cgrp_ls_negative.c @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +char _license[] SEC("license") = "GPL"; + +struct { + __uint(type, BPF_MAP_TYPE_CGRP_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, long); +} map_a SEC(".maps"); + +SEC("tp_btf/sys_enter") +int BPF_PROG(on_enter, struct pt_regs *regs, long id) +{ + struct task_struct *task; + + task = bpf_get_current_task_btf(); + (void)bpf_cgrp_storage_get(&map_a, (struct cgroup *)task, 0, + BPF_LOCAL_STORAGE_GET_F_CREATE); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c b/tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c new file mode 100644 index 000000000000..a043d8fefdac --- /dev/null +++ b/tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +char _license[] SEC("license") = "GPL"; + +struct { + __uint(type, BPF_MAP_TYPE_CGRP_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, long); +} map_a SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_CGRP_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, long); +} map_b SEC(".maps"); + +SEC("fentry/bpf_local_storage_lookup") +int BPF_PROG(on_lookup) +{ + struct task_struct *task = bpf_get_current_task_btf(); + + bpf_cgrp_storage_delete(&map_a, task->cgroups->dfl_cgrp); + bpf_cgrp_storage_delete(&map_b, task->cgroups->dfl_cgrp); + return 0; +} + +SEC("fentry/bpf_local_storage_update") +int BPF_PROG(on_update) +{ + struct task_struct *task = bpf_get_current_task_btf(); + long *ptr; + + ptr = bpf_cgrp_storage_get(&map_a, task->cgroups->dfl_cgrp, 0, + BPF_LOCAL_STORAGE_GET_F_CREATE); + if (ptr) + *ptr += 1; + + ptr = bpf_cgrp_storage_get(&map_b, task->cgroups->dfl_cgrp, 0, + BPF_LOCAL_STORAGE_GET_F_CREATE); + if (ptr) + *ptr += 1; + + return 0; +} + +SEC("tp_btf/sys_enter") +int BPF_PROG(on_enter, struct pt_regs *regs, long id) +{ + struct task_struct *task; + long *ptr; + + task = bpf_get_current_task_btf(); + ptr = bpf_cgrp_storage_get(&map_a, task->cgroups->dfl_cgrp, 0, + BPF_LOCAL_STORAGE_GET_F_CREATE); + if (ptr) + *ptr = 200; + + ptr = bpf_cgrp_storage_get(&map_b, task->cgroups->dfl_cgrp, 0, + BPF_LOCAL_STORAGE_GET_F_CREATE); + if (ptr) + *ptr = 100; + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c b/tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c new file mode 100644 index 000000000000..2d11ed528b6f --- /dev/null +++ b/tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ + +#include "bpf_iter.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + +struct { + __uint(type, BPF_MAP_TYPE_CGRP_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, long); +} map_a SEC(".maps"); + +__u32 target_pid; +__u64 cgroup_id; + +void bpf_rcu_read_lock(void) __ksym; +void bpf_rcu_read_unlock(void) __ksym; + +SEC("?iter.s/cgroup") +int cgroup_iter(struct bpf_iter__cgroup *ctx) +{ + struct seq_file *seq = ctx->meta->seq; + struct cgroup *cgrp = ctx->cgroup; + long *ptr; + + if (cgrp == NULL) + return 0; + + ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0, + BPF_LOCAL_STORAGE_GET_F_CREATE); + if (ptr) + cgroup_id = cgrp->kn->id; + return 0; +} + +SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") +int no_rcu_lock(void *ctx) +{ + struct task_struct *task; + struct cgroup *cgrp; + long *ptr; + + task = bpf_get_current_task_btf(); + if (task->pid != target_pid) + return 0; + + /* ptr_to_btf_id semantics. should work. */ + cgrp = task->cgroups->dfl_cgrp; + ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0, + BPF_LOCAL_STORAGE_GET_F_CREATE); + if (ptr) + cgroup_id = cgrp->kn->id; + return 0; +} + +SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") +int yes_rcu_lock(void *ctx) +{ + struct task_struct *task; + struct cgroup *cgrp; + long *ptr; + + task = bpf_get_current_task_btf(); + if (task->pid != target_pid) + return 0; + + bpf_rcu_read_lock(); + cgrp = task->cgroups->dfl_cgrp; + /* cgrp is untrusted and cannot pass to bpf_cgrp_storage_get() helper. */ + ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0, BPF_LOCAL_STORAGE_GET_F_CREATE); + if (ptr) + cgroup_id = cgrp->kn->id; + bpf_rcu_read_unlock(); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/cgrp_ls_tp_btf.c b/tools/testing/selftests/bpf/progs/cgrp_ls_tp_btf.c new file mode 100644 index 000000000000..9ebb8e2fe541 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/cgrp_ls_tp_btf.c @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +char _license[] SEC("license") = "GPL"; + +struct { + __uint(type, BPF_MAP_TYPE_CGRP_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, long); +} map_a SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_CGRP_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, long); +} map_b SEC(".maps"); + +#define MAGIC_VALUE 0xabcd1234 + +pid_t target_pid = 0; +int mismatch_cnt = 0; +int enter_cnt = 0; +int exit_cnt = 0; + +SEC("tp_btf/sys_enter") +int BPF_PROG(on_enter, struct pt_regs *regs, long id) +{ + struct task_struct *task; + long *ptr; + int err; + + task = bpf_get_current_task_btf(); + if (task->pid != target_pid) + return 0; + + /* populate value 0 */ + ptr = bpf_cgrp_storage_get(&map_a, task->cgroups->dfl_cgrp, 0, + BPF_LOCAL_STORAGE_GET_F_CREATE); + if (!ptr) + return 0; + + /* delete value 0 */ + err = bpf_cgrp_storage_delete(&map_a, task->cgroups->dfl_cgrp); + if (err) + return 0; + + /* value is not available */ + ptr = bpf_cgrp_storage_get(&map_a, task->cgroups->dfl_cgrp, 0, 0); + if (ptr) + return 0; + + /* re-populate the value */ + ptr = bpf_cgrp_storage_get(&map_a, task->cgroups->dfl_cgrp, 0, + BPF_LOCAL_STORAGE_GET_F_CREATE); + if (!ptr) + return 0; + __sync_fetch_and_add(&enter_cnt, 1); + *ptr = MAGIC_VALUE + enter_cnt; + + return 0; +} + +SEC("tp_btf/sys_exit") +int BPF_PROG(on_exit, struct pt_regs *regs, long id) +{ + struct task_struct *task; + long *ptr; + + task = bpf_get_current_task_btf(); + if (task->pid != target_pid) + return 0; + + ptr = bpf_cgrp_storage_get(&map_a, task->cgroups->dfl_cgrp, 0, + BPF_LOCAL_STORAGE_GET_F_CREATE); + if (!ptr) + return 0; + + __sync_fetch_and_add(&exit_cnt, 1); + if (*ptr != MAGIC_VALUE + exit_cnt) + __sync_fetch_and_add(&mismatch_cnt, 1); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c index b0f08ff024fb..78debc1b3820 100644 --- a/tools/testing/selftests/bpf/progs/dynptr_fail.c +++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c @@ -43,6 +43,7 @@ struct sample { struct { __uint(type, BPF_MAP_TYPE_RINGBUF); + __uint(max_entries, 4096); } ringbuf SEC(".maps"); int err, val; @@ -66,6 +67,7 @@ static int get_map_val_dynptr(struct bpf_dynptr *ptr) * bpf_ringbuf_submit/discard_dynptr call */ SEC("?raw_tp") +__failure __msg("Unreleased reference id=1") int ringbuf_missing_release1(void *ctx) { struct bpf_dynptr ptr; @@ -78,6 +80,7 @@ int ringbuf_missing_release1(void *ctx) } SEC("?raw_tp") +__failure __msg("Unreleased reference id=2") int ringbuf_missing_release2(void *ctx) { struct bpf_dynptr ptr1, ptr2; @@ -113,6 +116,7 @@ static int missing_release_callback_fn(__u32 index, void *data) /* Any dynptr initialized within a callback must have bpf_dynptr_put called */ SEC("?raw_tp") +__failure __msg("Unreleased reference id") int ringbuf_missing_release_callback(void *ctx) { bpf_loop(10, missing_release_callback_fn, NULL, 0); @@ -121,6 +125,7 @@ int ringbuf_missing_release_callback(void *ctx) /* Can't call bpf_ringbuf_submit/discard_dynptr on a non-initialized dynptr */ SEC("?raw_tp") +__failure __msg("arg 1 is an unacquired reference") int ringbuf_release_uninit_dynptr(void *ctx) { struct bpf_dynptr ptr; @@ -133,6 +138,7 @@ int ringbuf_release_uninit_dynptr(void *ctx) /* A dynptr can't be used after it has been invalidated */ SEC("?raw_tp") +__failure __msg("Expected an initialized dynptr as arg #3") int use_after_invalid(void *ctx) { struct bpf_dynptr ptr; @@ -152,6 +158,7 @@ int use_after_invalid(void *ctx) /* Can't call non-dynptr ringbuf APIs on a dynptr ringbuf sample */ SEC("?raw_tp") +__failure __msg("type=mem expected=ringbuf_mem") int ringbuf_invalid_api(void *ctx) { struct bpf_dynptr ptr; @@ -174,6 +181,7 @@ done: /* Can't add a dynptr to a map */ SEC("?raw_tp") +__failure __msg("invalid indirect read from stack") int add_dynptr_to_map1(void *ctx) { struct bpf_dynptr ptr; @@ -191,6 +199,7 @@ int add_dynptr_to_map1(void *ctx) /* Can't add a struct with an embedded dynptr to a map */ SEC("?raw_tp") +__failure __msg("invalid indirect read from stack") int add_dynptr_to_map2(void *ctx) { struct test_info x; @@ -208,6 +217,7 @@ int add_dynptr_to_map2(void *ctx) /* A data slice can't be accessed out of bounds */ SEC("?raw_tp") +__failure __msg("value is outside of the allowed memory range") int data_slice_out_of_bounds_ringbuf(void *ctx) { struct bpf_dynptr ptr; @@ -228,6 +238,7 @@ done: } SEC("?raw_tp") +__failure __msg("value is outside of the allowed memory range") int data_slice_out_of_bounds_map_value(void *ctx) { __u32 key = 0, map_val; @@ -248,6 +259,7 @@ int data_slice_out_of_bounds_map_value(void *ctx) /* A data slice can't be used after it has been released */ SEC("?raw_tp") +__failure __msg("invalid mem access 'scalar'") int data_slice_use_after_release1(void *ctx) { struct bpf_dynptr ptr; @@ -279,6 +291,7 @@ done: * ptr2 is at fp - 16). */ SEC("?raw_tp") +__failure __msg("invalid mem access 'scalar'") int data_slice_use_after_release2(void *ctx) { struct bpf_dynptr ptr1, ptr2; @@ -310,6 +323,7 @@ done: /* A data slice must be first checked for NULL */ SEC("?raw_tp") +__failure __msg("invalid mem access 'mem_or_null'") int data_slice_missing_null_check1(void *ctx) { struct bpf_dynptr ptr; @@ -330,6 +344,7 @@ int data_slice_missing_null_check1(void *ctx) /* A data slice can't be dereferenced if it wasn't checked for null */ SEC("?raw_tp") +__failure __msg("invalid mem access 'mem_or_null'") int data_slice_missing_null_check2(void *ctx) { struct bpf_dynptr ptr; @@ -352,6 +367,7 @@ done: * dynptr argument */ SEC("?raw_tp") +__failure __msg("invalid indirect read from stack") int invalid_helper1(void *ctx) { struct bpf_dynptr ptr; @@ -366,6 +382,7 @@ int invalid_helper1(void *ctx) /* A dynptr can't be passed into a helper function at a non-zero offset */ SEC("?raw_tp") +__failure __msg("Expected an initialized dynptr as arg #3") int invalid_helper2(void *ctx) { struct bpf_dynptr ptr; @@ -381,6 +398,7 @@ int invalid_helper2(void *ctx) /* A bpf_dynptr is invalidated if it's been written into */ SEC("?raw_tp") +__failure __msg("Expected an initialized dynptr as arg #1") int invalid_write1(void *ctx) { struct bpf_dynptr ptr; @@ -402,6 +420,7 @@ int invalid_write1(void *ctx) * offset */ SEC("?raw_tp") +__failure __msg("Expected an initialized dynptr as arg #3") int invalid_write2(void *ctx) { struct bpf_dynptr ptr; @@ -425,6 +444,7 @@ int invalid_write2(void *ctx) * non-const offset */ SEC("?raw_tp") +__failure __msg("Expected an initialized dynptr as arg #1") int invalid_write3(void *ctx) { struct bpf_dynptr ptr; @@ -456,6 +476,7 @@ static int invalid_write4_callback(__u32 index, void *data) * be invalidated as a dynptr */ SEC("?raw_tp") +__failure __msg("arg 1 is an unacquired reference") int invalid_write4(void *ctx) { struct bpf_dynptr ptr; @@ -472,7 +493,9 @@ int invalid_write4(void *ctx) /* A globally-defined bpf_dynptr can't be used (it must reside as a stack frame) */ struct bpf_dynptr global_dynptr; + SEC("?raw_tp") +__failure __msg("type=map_value expected=fp") int global(void *ctx) { /* this should fail */ @@ -485,6 +508,7 @@ int global(void *ctx) /* A direct read should fail */ SEC("?raw_tp") +__failure __msg("invalid read from stack") int invalid_read1(void *ctx) { struct bpf_dynptr ptr; @@ -501,6 +525,7 @@ int invalid_read1(void *ctx) /* A direct read at an offset should fail */ SEC("?raw_tp") +__failure __msg("cannot pass in dynptr at an offset") int invalid_read2(void *ctx) { struct bpf_dynptr ptr; @@ -516,6 +541,7 @@ int invalid_read2(void *ctx) /* A direct read at an offset into the lower stack slot should fail */ SEC("?raw_tp") +__failure __msg("invalid read from stack") int invalid_read3(void *ctx) { struct bpf_dynptr ptr1, ptr2; @@ -542,6 +568,7 @@ static int invalid_read4_callback(__u32 index, void *data) /* A direct read within a callback function should fail */ SEC("?raw_tp") +__failure __msg("invalid read from stack") int invalid_read4(void *ctx) { struct bpf_dynptr ptr; @@ -557,6 +584,7 @@ int invalid_read4(void *ctx) /* Initializing a dynptr on an offset should fail */ SEC("?raw_tp") +__failure __msg("invalid write to stack") int invalid_offset(void *ctx) { struct bpf_dynptr ptr; @@ -571,6 +599,7 @@ int invalid_offset(void *ctx) /* Can't release a dynptr twice */ SEC("?raw_tp") +__failure __msg("arg 1 is an unacquired reference") int release_twice(void *ctx) { struct bpf_dynptr ptr; @@ -597,6 +626,7 @@ static int release_twice_callback_fn(__u32 index, void *data) * within a calback function, fails */ SEC("?raw_tp") +__failure __msg("arg 1 is an unacquired reference") int release_twice_callback(void *ctx) { struct bpf_dynptr ptr; @@ -612,6 +642,7 @@ int release_twice_callback(void *ctx) /* Reject unsupported local mem types for dynptr_from_mem API */ SEC("?raw_tp") +__failure __msg("Unsupported reg type fp for bpf_dynptr_from_mem data") int dynptr_from_mem_invalid_api(void *ctx) { struct bpf_dynptr ptr; diff --git a/tools/testing/selftests/bpf/progs/dynptr_success.c b/tools/testing/selftests/bpf/progs/dynptr_success.c index a3a6103c8569..35db7c6c1fc7 100644 --- a/tools/testing/selftests/bpf/progs/dynptr_success.c +++ b/tools/testing/selftests/bpf/progs/dynptr_success.c @@ -20,6 +20,7 @@ struct sample { struct { __uint(type, BPF_MAP_TYPE_RINGBUF); + __uint(max_entries, 4096); } ringbuf SEC(".maps"); struct { diff --git a/tools/testing/selftests/bpf/progs/empty_skb.c b/tools/testing/selftests/bpf/progs/empty_skb.c new file mode 100644 index 000000000000..4b0cd6753251 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/empty_skb.c @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> + +char _license[] SEC("license") = "GPL"; + +int ifindex; +int ret; + +SEC("lwt_xmit") +int redirect_ingress(struct __sk_buff *skb) +{ + ret = bpf_clone_redirect(skb, ifindex, BPF_F_INGRESS); + return 0; +} + +SEC("lwt_xmit") +int redirect_egress(struct __sk_buff *skb) +{ + ret = bpf_clone_redirect(skb, ifindex, 0); + return 0; +} + +SEC("tc") +int tc_redirect_ingress(struct __sk_buff *skb) +{ + ret = bpf_clone_redirect(skb, ifindex, BPF_F_INGRESS); + return 0; +} + +SEC("tc") +int tc_redirect_egress(struct __sk_buff *skb) +{ + ret = bpf_clone_redirect(skb, ifindex, 0); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/kprobe_multi.c b/tools/testing/selftests/bpf/progs/kprobe_multi.c index 98c3399e15c0..9e1ca8e34913 100644 --- a/tools/testing/selftests/bpf/progs/kprobe_multi.c +++ b/tools/testing/selftests/bpf/progs/kprobe_multi.c @@ -110,3 +110,53 @@ int test_kretprobe_manual(struct pt_regs *ctx) kprobe_multi_check(ctx, true); return 0; } + +extern const void bpf_testmod_fentry_test1 __ksym; +extern const void bpf_testmod_fentry_test2 __ksym; +extern const void bpf_testmod_fentry_test3 __ksym; + +__u64 kprobe_testmod_test1_result = 0; +__u64 kprobe_testmod_test2_result = 0; +__u64 kprobe_testmod_test3_result = 0; + +__u64 kretprobe_testmod_test1_result = 0; +__u64 kretprobe_testmod_test2_result = 0; +__u64 kretprobe_testmod_test3_result = 0; + +static void kprobe_multi_testmod_check(void *ctx, bool is_return) +{ + if (bpf_get_current_pid_tgid() >> 32 != pid) + return; + + __u64 addr = bpf_get_func_ip(ctx); + + if (is_return) { + if ((const void *) addr == &bpf_testmod_fentry_test1) + kretprobe_testmod_test1_result = 1; + if ((const void *) addr == &bpf_testmod_fentry_test2) + kretprobe_testmod_test2_result = 1; + if ((const void *) addr == &bpf_testmod_fentry_test3) + kretprobe_testmod_test3_result = 1; + } else { + if ((const void *) addr == &bpf_testmod_fentry_test1) + kprobe_testmod_test1_result = 1; + if ((const void *) addr == &bpf_testmod_fentry_test2) + kprobe_testmod_test2_result = 1; + if ((const void *) addr == &bpf_testmod_fentry_test3) + kprobe_testmod_test3_result = 1; + } +} + +SEC("kprobe.multi") +int test_kprobe_testmod(struct pt_regs *ctx) +{ + kprobe_multi_testmod_check(ctx, false); + return 0; +} + +SEC("kretprobe.multi") +int test_kretprobe_testmod(struct pt_regs *ctx) +{ + kprobe_multi_testmod_check(ctx, true); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/linked_list.c b/tools/testing/selftests/bpf/progs/linked_list.c new file mode 100644 index 000000000000..4ad88da5cda2 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/linked_list.c @@ -0,0 +1,385 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> +#include "bpf_experimental.h" + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +#include "linked_list.h" + +static __always_inline +int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool leave_in_map) +{ + struct bpf_list_node *n; + struct foo *f; + + f = bpf_obj_new(typeof(*f)); + if (!f) + return 2; + + bpf_spin_lock(lock); + n = bpf_list_pop_front(head); + bpf_spin_unlock(lock); + if (n) { + bpf_obj_drop(container_of(n, struct foo, node)); + bpf_obj_drop(f); + return 3; + } + + bpf_spin_lock(lock); + n = bpf_list_pop_back(head); + bpf_spin_unlock(lock); + if (n) { + bpf_obj_drop(container_of(n, struct foo, node)); + bpf_obj_drop(f); + return 4; + } + + + bpf_spin_lock(lock); + f->data = 42; + bpf_list_push_front(head, &f->node); + bpf_spin_unlock(lock); + if (leave_in_map) + return 0; + bpf_spin_lock(lock); + n = bpf_list_pop_back(head); + bpf_spin_unlock(lock); + if (!n) + return 5; + f = container_of(n, struct foo, node); + if (f->data != 42) { + bpf_obj_drop(f); + return 6; + } + + bpf_spin_lock(lock); + f->data = 13; + bpf_list_push_front(head, &f->node); + bpf_spin_unlock(lock); + bpf_spin_lock(lock); + n = bpf_list_pop_front(head); + bpf_spin_unlock(lock); + if (!n) + return 7; + f = container_of(n, struct foo, node); + if (f->data != 13) { + bpf_obj_drop(f); + return 8; + } + bpf_obj_drop(f); + + bpf_spin_lock(lock); + n = bpf_list_pop_front(head); + bpf_spin_unlock(lock); + if (n) { + bpf_obj_drop(container_of(n, struct foo, node)); + return 9; + } + + bpf_spin_lock(lock); + n = bpf_list_pop_back(head); + bpf_spin_unlock(lock); + if (n) { + bpf_obj_drop(container_of(n, struct foo, node)); + return 10; + } + return 0; +} + + +static __always_inline +int list_push_pop_multiple(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool leave_in_map) +{ + struct bpf_list_node *n; + struct foo *f[8], *pf; + int i; + + /* Loop following this check adds nodes 2-at-a-time in order to + * validate multiple release_on_unlock release logic + */ + if (ARRAY_SIZE(f) % 2) + return 10; + + for (i = 0; i < ARRAY_SIZE(f); i += 2) { + f[i] = bpf_obj_new(typeof(**f)); + if (!f[i]) + return 2; + f[i]->data = i; + + f[i + 1] = bpf_obj_new(typeof(**f)); + if (!f[i + 1]) { + bpf_obj_drop(f[i]); + return 9; + } + f[i + 1]->data = i + 1; + + bpf_spin_lock(lock); + bpf_list_push_front(head, &f[i]->node); + bpf_list_push_front(head, &f[i + 1]->node); + bpf_spin_unlock(lock); + } + + for (i = 0; i < ARRAY_SIZE(f); i++) { + bpf_spin_lock(lock); + n = bpf_list_pop_front(head); + bpf_spin_unlock(lock); + if (!n) + return 3; + pf = container_of(n, struct foo, node); + if (pf->data != (ARRAY_SIZE(f) - i - 1)) { + bpf_obj_drop(pf); + return 4; + } + bpf_spin_lock(lock); + bpf_list_push_back(head, &pf->node); + bpf_spin_unlock(lock); + } + + if (leave_in_map) + return 0; + + for (i = 0; i < ARRAY_SIZE(f); i++) { + bpf_spin_lock(lock); + n = bpf_list_pop_back(head); + bpf_spin_unlock(lock); + if (!n) + return 5; + pf = container_of(n, struct foo, node); + if (pf->data != i) { + bpf_obj_drop(pf); + return 6; + } + bpf_obj_drop(pf); + } + bpf_spin_lock(lock); + n = bpf_list_pop_back(head); + bpf_spin_unlock(lock); + if (n) { + bpf_obj_drop(container_of(n, struct foo, node)); + return 7; + } + + bpf_spin_lock(lock); + n = bpf_list_pop_front(head); + bpf_spin_unlock(lock); + if (n) { + bpf_obj_drop(container_of(n, struct foo, node)); + return 8; + } + return 0; +} + +static __always_inline +int list_in_list(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool leave_in_map) +{ + struct bpf_list_node *n; + struct bar *ba[8], *b; + struct foo *f; + int i; + + f = bpf_obj_new(typeof(*f)); + if (!f) + return 2; + for (i = 0; i < ARRAY_SIZE(ba); i++) { + b = bpf_obj_new(typeof(*b)); + if (!b) { + bpf_obj_drop(f); + return 3; + } + b->data = i; + bpf_spin_lock(&f->lock); + bpf_list_push_back(&f->head, &b->node); + bpf_spin_unlock(&f->lock); + } + + bpf_spin_lock(lock); + f->data = 42; + bpf_list_push_front(head, &f->node); + bpf_spin_unlock(lock); + + if (leave_in_map) + return 0; + + bpf_spin_lock(lock); + n = bpf_list_pop_front(head); + bpf_spin_unlock(lock); + if (!n) + return 4; + f = container_of(n, struct foo, node); + if (f->data != 42) { + bpf_obj_drop(f); + return 5; + } + + for (i = 0; i < ARRAY_SIZE(ba); i++) { + bpf_spin_lock(&f->lock); + n = bpf_list_pop_front(&f->head); + bpf_spin_unlock(&f->lock); + if (!n) { + bpf_obj_drop(f); + return 6; + } + b = container_of(n, struct bar, node); + if (b->data != i) { + bpf_obj_drop(f); + bpf_obj_drop(b); + return 7; + } + bpf_obj_drop(b); + } + bpf_spin_lock(&f->lock); + n = bpf_list_pop_front(&f->head); + bpf_spin_unlock(&f->lock); + if (n) { + bpf_obj_drop(f); + bpf_obj_drop(container_of(n, struct bar, node)); + return 8; + } + bpf_obj_drop(f); + return 0; +} + +static __always_inline +int test_list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head) +{ + int ret; + + ret = list_push_pop(lock, head, false); + if (ret) + return ret; + return list_push_pop(lock, head, true); +} + +static __always_inline +int test_list_push_pop_multiple(struct bpf_spin_lock *lock, struct bpf_list_head *head) +{ + int ret; + + ret = list_push_pop_multiple(lock ,head, false); + if (ret) + return ret; + return list_push_pop_multiple(lock, head, true); +} + +static __always_inline +int test_list_in_list(struct bpf_spin_lock *lock, struct bpf_list_head *head) +{ + int ret; + + ret = list_in_list(lock, head, false); + if (ret) + return ret; + return list_in_list(lock, head, true); +} + +SEC("tc") +int map_list_push_pop(void *ctx) +{ + struct map_value *v; + + v = bpf_map_lookup_elem(&array_map, &(int){0}); + if (!v) + return 1; + return test_list_push_pop(&v->lock, &v->head); +} + +SEC("tc") +int inner_map_list_push_pop(void *ctx) +{ + struct map_value *v; + void *map; + + map = bpf_map_lookup_elem(&map_of_maps, &(int){0}); + if (!map) + return 1; + v = bpf_map_lookup_elem(map, &(int){0}); + if (!v) + return 1; + return test_list_push_pop(&v->lock, &v->head); +} + +SEC("tc") +int global_list_push_pop(void *ctx) +{ + return test_list_push_pop(&glock, &ghead); +} + +SEC("tc") +int map_list_push_pop_multiple(void *ctx) +{ + struct map_value *v; + int ret; + + v = bpf_map_lookup_elem(&array_map, &(int){0}); + if (!v) + return 1; + return test_list_push_pop_multiple(&v->lock, &v->head); +} + +SEC("tc") +int inner_map_list_push_pop_multiple(void *ctx) +{ + struct map_value *v; + void *map; + int ret; + + map = bpf_map_lookup_elem(&map_of_maps, &(int){0}); + if (!map) + return 1; + v = bpf_map_lookup_elem(map, &(int){0}); + if (!v) + return 1; + return test_list_push_pop_multiple(&v->lock, &v->head); +} + +SEC("tc") +int global_list_push_pop_multiple(void *ctx) +{ + int ret; + + ret = list_push_pop_multiple(&glock, &ghead, false); + if (ret) + return ret; + return list_push_pop_multiple(&glock, &ghead, true); +} + +SEC("tc") +int map_list_in_list(void *ctx) +{ + struct map_value *v; + int ret; + + v = bpf_map_lookup_elem(&array_map, &(int){0}); + if (!v) + return 1; + return test_list_in_list(&v->lock, &v->head); +} + +SEC("tc") +int inner_map_list_in_list(void *ctx) +{ + struct map_value *v; + void *map; + int ret; + + map = bpf_map_lookup_elem(&map_of_maps, &(int){0}); + if (!map) + return 1; + v = bpf_map_lookup_elem(map, &(int){0}); + if (!v) + return 1; + return test_list_in_list(&v->lock, &v->head); +} + +SEC("tc") +int global_list_in_list(void *ctx) +{ + return test_list_in_list(&glock, &ghead); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/linked_list.h b/tools/testing/selftests/bpf/progs/linked_list.h new file mode 100644 index 000000000000..3fb2412552fc --- /dev/null +++ b/tools/testing/selftests/bpf/progs/linked_list.h @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-2.0 +#ifndef LINKED_LIST_H +#define LINKED_LIST_H + +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include "bpf_experimental.h" + +struct bar { + struct bpf_list_node node; + int data; +}; + +struct foo { + struct bpf_list_node node; + struct bpf_list_head head __contains(bar, node); + struct bpf_spin_lock lock; + int data; + struct bpf_list_node node2; +}; + +struct map_value { + struct bpf_spin_lock lock; + int data; + struct bpf_list_head head __contains(foo, node); +}; + +struct array_map { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, int); + __type(value, struct map_value); + __uint(max_entries, 1); +}; + +struct array_map array_map SEC(".maps"); +struct array_map inner_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS); + __uint(max_entries, 1); + __type(key, int); + __type(value, int); + __array(values, struct array_map); +} map_of_maps SEC(".maps") = { + .values = { + [0] = &inner_map, + }, +}; + +#define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8))) + +private(A) struct bpf_spin_lock glock; +private(A) struct bpf_list_head ghead __contains(foo, node); +private(B) struct bpf_spin_lock glock2; + +#endif diff --git a/tools/testing/selftests/bpf/progs/linked_list_fail.c b/tools/testing/selftests/bpf/progs/linked_list_fail.c new file mode 100644 index 000000000000..1d9017240e19 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/linked_list_fail.c @@ -0,0 +1,581 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> +#include "bpf_experimental.h" + +#include "linked_list.h" + +#define INIT \ + struct map_value *v, *v2, *iv, *iv2; \ + struct foo *f, *f1, *f2; \ + struct bar *b; \ + void *map; \ + \ + map = bpf_map_lookup_elem(&map_of_maps, &(int){ 0 }); \ + if (!map) \ + return 0; \ + v = bpf_map_lookup_elem(&array_map, &(int){ 0 }); \ + if (!v) \ + return 0; \ + v2 = bpf_map_lookup_elem(&array_map, &(int){ 0 }); \ + if (!v2) \ + return 0; \ + iv = bpf_map_lookup_elem(map, &(int){ 0 }); \ + if (!iv) \ + return 0; \ + iv2 = bpf_map_lookup_elem(map, &(int){ 0 }); \ + if (!iv2) \ + return 0; \ + f = bpf_obj_new(typeof(*f)); \ + if (!f) \ + return 0; \ + f1 = f; \ + f2 = bpf_obj_new(typeof(*f2)); \ + if (!f2) { \ + bpf_obj_drop(f1); \ + return 0; \ + } \ + b = bpf_obj_new(typeof(*b)); \ + if (!b) { \ + bpf_obj_drop(f2); \ + bpf_obj_drop(f1); \ + return 0; \ + } + +#define CHECK(test, op, hexpr) \ + SEC("?tc") \ + int test##_missing_lock_##op(void *ctx) \ + { \ + INIT; \ + void (*p)(void *) = (void *)&bpf_list_##op; \ + p(hexpr); \ + return 0; \ + } + +CHECK(kptr, push_front, &f->head); +CHECK(kptr, push_back, &f->head); +CHECK(kptr, pop_front, &f->head); +CHECK(kptr, pop_back, &f->head); + +CHECK(global, push_front, &ghead); +CHECK(global, push_back, &ghead); +CHECK(global, pop_front, &ghead); +CHECK(global, pop_back, &ghead); + +CHECK(map, push_front, &v->head); +CHECK(map, push_back, &v->head); +CHECK(map, pop_front, &v->head); +CHECK(map, pop_back, &v->head); + +CHECK(inner_map, push_front, &iv->head); +CHECK(inner_map, push_back, &iv->head); +CHECK(inner_map, pop_front, &iv->head); +CHECK(inner_map, pop_back, &iv->head); + +#undef CHECK + +#define CHECK(test, op, lexpr, hexpr) \ + SEC("?tc") \ + int test##_incorrect_lock_##op(void *ctx) \ + { \ + INIT; \ + void (*p)(void *) = (void *)&bpf_list_##op; \ + bpf_spin_lock(lexpr); \ + p(hexpr); \ + return 0; \ + } + +#define CHECK_OP(op) \ + CHECK(kptr_kptr, op, &f1->lock, &f2->head); \ + CHECK(kptr_global, op, &f1->lock, &ghead); \ + CHECK(kptr_map, op, &f1->lock, &v->head); \ + CHECK(kptr_inner_map, op, &f1->lock, &iv->head); \ + \ + CHECK(global_global, op, &glock2, &ghead); \ + CHECK(global_kptr, op, &glock, &f1->head); \ + CHECK(global_map, op, &glock, &v->head); \ + CHECK(global_inner_map, op, &glock, &iv->head); \ + \ + CHECK(map_map, op, &v->lock, &v2->head); \ + CHECK(map_kptr, op, &v->lock, &f2->head); \ + CHECK(map_global, op, &v->lock, &ghead); \ + CHECK(map_inner_map, op, &v->lock, &iv->head); \ + \ + CHECK(inner_map_inner_map, op, &iv->lock, &iv2->head); \ + CHECK(inner_map_kptr, op, &iv->lock, &f2->head); \ + CHECK(inner_map_global, op, &iv->lock, &ghead); \ + CHECK(inner_map_map, op, &iv->lock, &v->head); + +CHECK_OP(push_front); +CHECK_OP(push_back); +CHECK_OP(pop_front); +CHECK_OP(pop_back); + +#undef CHECK +#undef CHECK_OP +#undef INIT + +SEC("?kprobe/xyz") +int map_compat_kprobe(void *ctx) +{ + bpf_list_push_front(&ghead, NULL); + return 0; +} + +SEC("?kretprobe/xyz") +int map_compat_kretprobe(void *ctx) +{ + bpf_list_push_front(&ghead, NULL); + return 0; +} + +SEC("?tracepoint/xyz") +int map_compat_tp(void *ctx) +{ + bpf_list_push_front(&ghead, NULL); + return 0; +} + +SEC("?perf_event") +int map_compat_perf(void *ctx) +{ + bpf_list_push_front(&ghead, NULL); + return 0; +} + +SEC("?raw_tp/xyz") +int map_compat_raw_tp(void *ctx) +{ + bpf_list_push_front(&ghead, NULL); + return 0; +} + +SEC("?raw_tp.w/xyz") +int map_compat_raw_tp_w(void *ctx) +{ + bpf_list_push_front(&ghead, NULL); + return 0; +} + +SEC("?tc") +int obj_type_id_oor(void *ctx) +{ + bpf_obj_new_impl(~0UL, NULL); + return 0; +} + +SEC("?tc") +int obj_new_no_composite(void *ctx) +{ + bpf_obj_new_impl(bpf_core_type_id_local(int), (void *)42); + return 0; +} + +SEC("?tc") +int obj_new_no_struct(void *ctx) +{ + + bpf_obj_new(union { int data; unsigned udata; }); + return 0; +} + +SEC("?tc") +int obj_drop_non_zero_off(void *ctx) +{ + void *f; + + f = bpf_obj_new(struct foo); + if (!f) + return 0; + bpf_obj_drop(f+1); + return 0; +} + +SEC("?tc") +int new_null_ret(void *ctx) +{ + return bpf_obj_new(struct foo)->data; +} + +SEC("?tc") +int obj_new_acq(void *ctx) +{ + bpf_obj_new(struct foo); + return 0; +} + +SEC("?tc") +int use_after_drop(void *ctx) +{ + struct foo *f; + + f = bpf_obj_new(typeof(*f)); + if (!f) + return 0; + bpf_obj_drop(f); + return f->data; +} + +SEC("?tc") +int ptr_walk_scalar(void *ctx) +{ + struct test1 { + struct test2 { + struct test2 *next; + } *ptr; + } *p; + + p = bpf_obj_new(typeof(*p)); + if (!p) + return 0; + bpf_this_cpu_ptr(p->ptr); + return 0; +} + +SEC("?tc") +int direct_read_lock(void *ctx) +{ + struct foo *f; + + f = bpf_obj_new(typeof(*f)); + if (!f) + return 0; + return *(int *)&f->lock; +} + +SEC("?tc") +int direct_write_lock(void *ctx) +{ + struct foo *f; + + f = bpf_obj_new(typeof(*f)); + if (!f) + return 0; + *(int *)&f->lock = 0; + return 0; +} + +SEC("?tc") +int direct_read_head(void *ctx) +{ + struct foo *f; + + f = bpf_obj_new(typeof(*f)); + if (!f) + return 0; + return *(int *)&f->head; +} + +SEC("?tc") +int direct_write_head(void *ctx) +{ + struct foo *f; + + f = bpf_obj_new(typeof(*f)); + if (!f) + return 0; + *(int *)&f->head = 0; + return 0; +} + +SEC("?tc") +int direct_read_node(void *ctx) +{ + struct foo *f; + + f = bpf_obj_new(typeof(*f)); + if (!f) + return 0; + return *(int *)&f->node; +} + +SEC("?tc") +int direct_write_node(void *ctx) +{ + struct foo *f; + + f = bpf_obj_new(typeof(*f)); + if (!f) + return 0; + *(int *)&f->node = 0; + return 0; +} + +static __always_inline +int write_after_op(void (*push_op)(void *head, void *node)) +{ + struct foo *f; + + f = bpf_obj_new(typeof(*f)); + if (!f) + return 0; + bpf_spin_lock(&glock); + push_op(&ghead, &f->node); + f->data = 42; + bpf_spin_unlock(&glock); + + return 0; +} + +SEC("?tc") +int write_after_push_front(void *ctx) +{ + return write_after_op((void *)bpf_list_push_front); +} + +SEC("?tc") +int write_after_push_back(void *ctx) +{ + return write_after_op((void *)bpf_list_push_back); +} + +static __always_inline +int use_after_unlock(void (*op)(void *head, void *node)) +{ + struct foo *f; + + f = bpf_obj_new(typeof(*f)); + if (!f) + return 0; + bpf_spin_lock(&glock); + f->data = 42; + op(&ghead, &f->node); + bpf_spin_unlock(&glock); + + return f->data; +} + +SEC("?tc") +int use_after_unlock_push_front(void *ctx) +{ + return use_after_unlock((void *)bpf_list_push_front); +} + +SEC("?tc") +int use_after_unlock_push_back(void *ctx) +{ + return use_after_unlock((void *)bpf_list_push_back); +} + +static __always_inline +int list_double_add(void (*op)(void *head, void *node)) +{ + struct foo *f; + + f = bpf_obj_new(typeof(*f)); + if (!f) + return 0; + bpf_spin_lock(&glock); + op(&ghead, &f->node); + op(&ghead, &f->node); + bpf_spin_unlock(&glock); + + return 0; +} + +SEC("?tc") +int double_push_front(void *ctx) +{ + return list_double_add((void *)bpf_list_push_front); +} + +SEC("?tc") +int double_push_back(void *ctx) +{ + return list_double_add((void *)bpf_list_push_back); +} + +SEC("?tc") +int no_node_value_type(void *ctx) +{ + void *p; + + p = bpf_obj_new(struct { int data; }); + if (!p) + return 0; + bpf_spin_lock(&glock); + bpf_list_push_front(&ghead, p); + bpf_spin_unlock(&glock); + + return 0; +} + +SEC("?tc") +int incorrect_value_type(void *ctx) +{ + struct bar *b; + + b = bpf_obj_new(typeof(*b)); + if (!b) + return 0; + bpf_spin_lock(&glock); + bpf_list_push_front(&ghead, &b->node); + bpf_spin_unlock(&glock); + + return 0; +} + +SEC("?tc") +int incorrect_node_var_off(struct __sk_buff *ctx) +{ + struct foo *f; + + f = bpf_obj_new(typeof(*f)); + if (!f) + return 0; + bpf_spin_lock(&glock); + bpf_list_push_front(&ghead, (void *)&f->node + ctx->protocol); + bpf_spin_unlock(&glock); + + return 0; +} + +SEC("?tc") +int incorrect_node_off1(void *ctx) +{ + struct foo *f; + + f = bpf_obj_new(typeof(*f)); + if (!f) + return 0; + bpf_spin_lock(&glock); + bpf_list_push_front(&ghead, (void *)&f->node + 1); + bpf_spin_unlock(&glock); + + return 0; +} + +SEC("?tc") +int incorrect_node_off2(void *ctx) +{ + struct foo *f; + + f = bpf_obj_new(typeof(*f)); + if (!f) + return 0; + bpf_spin_lock(&glock); + bpf_list_push_front(&ghead, &f->node2); + bpf_spin_unlock(&glock); + + return 0; +} + +SEC("?tc") +int no_head_type(void *ctx) +{ + void *p; + + p = bpf_obj_new(typeof(struct { int data; })); + if (!p) + return 0; + bpf_spin_lock(&glock); + bpf_list_push_front(p, NULL); + bpf_spin_lock(&glock); + + return 0; +} + +SEC("?tc") +int incorrect_head_var_off1(struct __sk_buff *ctx) +{ + struct foo *f; + + f = bpf_obj_new(typeof(*f)); + if (!f) + return 0; + bpf_spin_lock(&glock); + bpf_list_push_front((void *)&ghead + ctx->protocol, &f->node); + bpf_spin_unlock(&glock); + + return 0; +} + +SEC("?tc") +int incorrect_head_var_off2(struct __sk_buff *ctx) +{ + struct foo *f; + + f = bpf_obj_new(typeof(*f)); + if (!f) + return 0; + bpf_spin_lock(&glock); + bpf_list_push_front((void *)&f->head + ctx->protocol, &f->node); + bpf_spin_unlock(&glock); + + return 0; +} + +SEC("?tc") +int incorrect_head_off1(void *ctx) +{ + struct foo *f; + struct bar *b; + + f = bpf_obj_new(typeof(*f)); + if (!f) + return 0; + b = bpf_obj_new(typeof(*b)); + if (!b) { + bpf_obj_drop(f); + return 0; + } + + bpf_spin_lock(&f->lock); + bpf_list_push_front((void *)&f->head + 1, &b->node); + bpf_spin_unlock(&f->lock); + + return 0; +} + +SEC("?tc") +int incorrect_head_off2(void *ctx) +{ + struct foo *f; + struct bar *b; + + f = bpf_obj_new(typeof(*f)); + if (!f) + return 0; + + bpf_spin_lock(&glock); + bpf_list_push_front((void *)&ghead + 1, &f->node); + bpf_spin_unlock(&glock); + + return 0; +} + +static __always_inline +int pop_ptr_off(void *(*op)(void *head)) +{ + struct { + struct bpf_list_head head __contains(foo, node2); + struct bpf_spin_lock lock; + } *p; + struct bpf_list_node *n; + + p = bpf_obj_new(typeof(*p)); + if (!p) + return 0; + bpf_spin_lock(&p->lock); + n = op(&p->head); + bpf_spin_unlock(&p->lock); + + bpf_this_cpu_ptr(n); + return 0; +} + +SEC("?tc") +int pop_front_off(void *ctx) +{ + return pop_ptr_off((void *)bpf_list_pop_front); +} + +SEC("?tc") +int pop_back_off(void *ctx) +{ + return pop_ptr_off((void *)bpf_list_pop_back); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/lsm_cgroup.c b/tools/testing/selftests/bpf/progs/lsm_cgroup.c index 4f2d60b87b75..02c11d16b692 100644 --- a/tools/testing/selftests/bpf/progs/lsm_cgroup.c +++ b/tools/testing/selftests/bpf/progs/lsm_cgroup.c @@ -7,6 +7,10 @@ char _license[] SEC("license") = "GPL"; +extern bool CONFIG_SECURITY_SELINUX __kconfig __weak; +extern bool CONFIG_SECURITY_SMACK __kconfig __weak; +extern bool CONFIG_SECURITY_APPARMOR __kconfig __weak; + #ifndef AF_PACKET #define AF_PACKET 17 #endif @@ -140,6 +144,10 @@ SEC("lsm_cgroup/sk_alloc_security") int BPF_PROG(socket_alloc, struct sock *sk, int family, gfp_t priority) { called_socket_alloc++; + /* if already have non-bpf lsms installed, EPERM will cause memory leak of non-bpf lsms */ + if (CONFIG_SECURITY_SELINUX || CONFIG_SECURITY_SMACK || CONFIG_SECURITY_APPARMOR) + return 1; + if (family == AF_UNIX) return 0; /* EPERM */ diff --git a/tools/testing/selftests/bpf/progs/map_kptr_fail.c b/tools/testing/selftests/bpf/progs/map_kptr_fail.c index 05e209b1b12a..760e41e1a632 100644 --- a/tools/testing/selftests/bpf/progs/map_kptr_fail.c +++ b/tools/testing/selftests/bpf/progs/map_kptr_fail.c @@ -3,6 +3,7 @@ #include <bpf/bpf_tracing.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_core_read.h> +#include "bpf_misc.h" struct map_value { char buf[8]; @@ -23,6 +24,7 @@ extern struct prog_test_ref_kfunc * bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **p, int a, int b) __ksym; SEC("?tc") +__failure __msg("kptr access size must be BPF_DW") int size_not_bpf_dw(struct __sk_buff *ctx) { struct map_value *v; @@ -37,6 +39,7 @@ int size_not_bpf_dw(struct __sk_buff *ctx) } SEC("?tc") +__failure __msg("kptr access cannot have variable offset") int non_const_var_off(struct __sk_buff *ctx) { struct map_value *v; @@ -55,6 +58,7 @@ int non_const_var_off(struct __sk_buff *ctx) } SEC("?tc") +__failure __msg("R1 doesn't have constant offset. kptr has to be") int non_const_var_off_kptr_xchg(struct __sk_buff *ctx) { struct map_value *v; @@ -73,6 +77,7 @@ int non_const_var_off_kptr_xchg(struct __sk_buff *ctx) } SEC("?tc") +__failure __msg("kptr access misaligned expected=8 off=7") int misaligned_access_write(struct __sk_buff *ctx) { struct map_value *v; @@ -88,6 +93,7 @@ int misaligned_access_write(struct __sk_buff *ctx) } SEC("?tc") +__failure __msg("kptr access misaligned expected=8 off=1") int misaligned_access_read(struct __sk_buff *ctx) { struct map_value *v; @@ -101,6 +107,7 @@ int misaligned_access_read(struct __sk_buff *ctx) } SEC("?tc") +__failure __msg("variable untrusted_ptr_ access var_off=(0x0; 0x1e0)") int reject_var_off_store(struct __sk_buff *ctx) { struct prog_test_ref_kfunc *unref_ptr; @@ -124,6 +131,7 @@ int reject_var_off_store(struct __sk_buff *ctx) } SEC("?tc") +__failure __msg("invalid kptr access, R1 type=untrusted_ptr_prog_test_ref_kfunc") int reject_bad_type_match(struct __sk_buff *ctx) { struct prog_test_ref_kfunc *unref_ptr; @@ -144,6 +152,7 @@ int reject_bad_type_match(struct __sk_buff *ctx) } SEC("?tc") +__failure __msg("R1 type=untrusted_ptr_or_null_ expected=percpu_ptr_") int marked_as_untrusted_or_null(struct __sk_buff *ctx) { struct map_value *v; @@ -158,6 +167,7 @@ int marked_as_untrusted_or_null(struct __sk_buff *ctx) } SEC("?tc") +__failure __msg("access beyond struct prog_test_ref_kfunc at off 32 size 4") int correct_btf_id_check_size(struct __sk_buff *ctx) { struct prog_test_ref_kfunc *p; @@ -175,6 +185,7 @@ int correct_btf_id_check_size(struct __sk_buff *ctx) } SEC("?tc") +__failure __msg("R1 type=untrusted_ptr_ expected=percpu_ptr_") int inherit_untrusted_on_walk(struct __sk_buff *ctx) { struct prog_test_ref_kfunc *unref_ptr; @@ -194,6 +205,7 @@ int inherit_untrusted_on_walk(struct __sk_buff *ctx) } SEC("?tc") +__failure __msg("off=8 kptr isn't referenced kptr") int reject_kptr_xchg_on_unref(struct __sk_buff *ctx) { struct map_value *v; @@ -208,6 +220,7 @@ int reject_kptr_xchg_on_unref(struct __sk_buff *ctx) } SEC("?tc") +__failure __msg("arg#0 expected pointer to map value") int reject_kptr_get_no_map_val(struct __sk_buff *ctx) { bpf_kfunc_call_test_kptr_get((void *)&ctx, 0, 0); @@ -215,6 +228,7 @@ int reject_kptr_get_no_map_val(struct __sk_buff *ctx) } SEC("?tc") +__failure __msg("arg#0 expected pointer to map value") int reject_kptr_get_no_null_map_val(struct __sk_buff *ctx) { bpf_kfunc_call_test_kptr_get(bpf_map_lookup_elem(&array_map, &(int){0}), 0, 0); @@ -222,6 +236,7 @@ int reject_kptr_get_no_null_map_val(struct __sk_buff *ctx) } SEC("?tc") +__failure __msg("arg#0 no referenced kptr at map value offset=0") int reject_kptr_get_no_kptr(struct __sk_buff *ctx) { struct map_value *v; @@ -236,6 +251,7 @@ int reject_kptr_get_no_kptr(struct __sk_buff *ctx) } SEC("?tc") +__failure __msg("arg#0 no referenced kptr at map value offset=8") int reject_kptr_get_on_unref(struct __sk_buff *ctx) { struct map_value *v; @@ -250,6 +266,7 @@ int reject_kptr_get_on_unref(struct __sk_buff *ctx) } SEC("?tc") +__failure __msg("kernel function bpf_kfunc_call_test_kptr_get args#0") int reject_kptr_get_bad_type_match(struct __sk_buff *ctx) { struct map_value *v; @@ -264,6 +281,7 @@ int reject_kptr_get_bad_type_match(struct __sk_buff *ctx) } SEC("?tc") +__failure __msg("R1 type=untrusted_ptr_or_null_ expected=percpu_ptr_") int mark_ref_as_untrusted_or_null(struct __sk_buff *ctx) { struct map_value *v; @@ -278,6 +296,7 @@ int mark_ref_as_untrusted_or_null(struct __sk_buff *ctx) } SEC("?tc") +__failure __msg("store to referenced kptr disallowed") int reject_untrusted_store_to_ref(struct __sk_buff *ctx) { struct prog_test_ref_kfunc *p; @@ -297,6 +316,7 @@ int reject_untrusted_store_to_ref(struct __sk_buff *ctx) } SEC("?tc") +__failure __msg("R2 type=untrusted_ptr_ expected=ptr_") int reject_untrusted_xchg(struct __sk_buff *ctx) { struct prog_test_ref_kfunc *p; @@ -315,6 +335,8 @@ int reject_untrusted_xchg(struct __sk_buff *ctx) } SEC("?tc") +__failure +__msg("invalid kptr access, R2 type=ptr_prog_test_ref_kfunc expected=ptr_prog_test_member") int reject_bad_type_xchg(struct __sk_buff *ctx) { struct prog_test_ref_kfunc *ref_ptr; @@ -333,6 +355,7 @@ int reject_bad_type_xchg(struct __sk_buff *ctx) } SEC("?tc") +__failure __msg("invalid kptr access, R2 type=ptr_prog_test_ref_kfunc") int reject_member_of_ref_xchg(struct __sk_buff *ctx) { struct prog_test_ref_kfunc *ref_ptr; @@ -351,6 +374,7 @@ int reject_member_of_ref_xchg(struct __sk_buff *ctx) } SEC("?syscall") +__failure __msg("kptr cannot be accessed indirectly by helper") int reject_indirect_helper_access(struct __sk_buff *ctx) { struct map_value *v; @@ -371,6 +395,7 @@ int write_func(int *p) } SEC("?tc") +__failure __msg("kptr cannot be accessed indirectly by helper") int reject_indirect_global_func_access(struct __sk_buff *ctx) { struct map_value *v; @@ -384,6 +409,7 @@ int reject_indirect_global_func_access(struct __sk_buff *ctx) } SEC("?tc") +__failure __msg("Unreleased reference id=5 alloc_insn=") int kptr_xchg_ref_state(struct __sk_buff *ctx) { struct prog_test_ref_kfunc *p; @@ -402,6 +428,7 @@ int kptr_xchg_ref_state(struct __sk_buff *ctx) } SEC("?tc") +__failure __msg("Unreleased reference id=3 alloc_insn=") int kptr_get_ref_state(struct __sk_buff *ctx) { struct map_value *v; diff --git a/tools/testing/selftests/bpf/progs/rcu_read_lock.c b/tools/testing/selftests/bpf/progs/rcu_read_lock.c new file mode 100644 index 000000000000..125f908024d3 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/rcu_read_lock.c @@ -0,0 +1,330 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include "bpf_tracing_net.h" +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + +struct { + __uint(type, BPF_MAP_TYPE_TASK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, long); +} map_a SEC(".maps"); + +__u32 user_data, key_serial, target_pid; +__u64 flags, task_storage_val, cgroup_id; + +struct bpf_key *bpf_lookup_user_key(__u32 serial, __u64 flags) __ksym; +void bpf_key_put(struct bpf_key *key) __ksym; +void bpf_rcu_read_lock(void) __ksym; +void bpf_rcu_read_unlock(void) __ksym; +struct task_struct *bpf_task_acquire_not_zero(struct task_struct *p) __ksym; +void bpf_task_release(struct task_struct *p) __ksym; + +SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") +int get_cgroup_id(void *ctx) +{ + struct task_struct *task; + struct css_set *cgroups; + + task = bpf_get_current_task_btf(); + if (task->pid != target_pid) + return 0; + + /* simulate bpf_get_current_cgroup_id() helper */ + bpf_rcu_read_lock(); + cgroups = task->cgroups; + if (!cgroups) + goto unlock; + cgroup_id = cgroups->dfl_cgrp->kn->id; +unlock: + bpf_rcu_read_unlock(); + return 0; +} + +SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") +int task_succ(void *ctx) +{ + struct task_struct *task, *real_parent; + long init_val = 2; + long *ptr; + + task = bpf_get_current_task_btf(); + if (task->pid != target_pid) + return 0; + + bpf_rcu_read_lock(); + /* region including helper using rcu ptr real_parent */ + real_parent = task->real_parent; + if (!real_parent) + goto out; + ptr = bpf_task_storage_get(&map_a, real_parent, &init_val, + BPF_LOCAL_STORAGE_GET_F_CREATE); + if (!ptr) + goto out; + ptr = bpf_task_storage_get(&map_a, real_parent, 0, 0); + if (!ptr) + goto out; + task_storage_val = *ptr; +out: + bpf_rcu_read_unlock(); + return 0; +} + +SEC("?fentry.s/" SYS_PREFIX "sys_nanosleep") +int no_lock(void *ctx) +{ + struct task_struct *task, *real_parent; + + /* no bpf_rcu_read_lock(), old code still works */ + task = bpf_get_current_task_btf(); + real_parent = task->real_parent; + (void)bpf_task_storage_get(&map_a, real_parent, 0, 0); + return 0; +} + +SEC("?fentry.s/" SYS_PREFIX "sys_nanosleep") +int two_regions(void *ctx) +{ + struct task_struct *task, *real_parent; + + /* two regions */ + task = bpf_get_current_task_btf(); + bpf_rcu_read_lock(); + bpf_rcu_read_unlock(); + bpf_rcu_read_lock(); + real_parent = task->real_parent; + if (!real_parent) + goto out; + (void)bpf_task_storage_get(&map_a, real_parent, 0, 0); +out: + bpf_rcu_read_unlock(); + return 0; +} + +SEC("?fentry/" SYS_PREFIX "sys_getpgid") +int non_sleepable_1(void *ctx) +{ + struct task_struct *task, *real_parent; + + task = bpf_get_current_task_btf(); + bpf_rcu_read_lock(); + real_parent = task->real_parent; + if (!real_parent) + goto out; + (void)bpf_task_storage_get(&map_a, real_parent, 0, 0); +out: + bpf_rcu_read_unlock(); + return 0; +} + +SEC("?fentry/" SYS_PREFIX "sys_getpgid") +int non_sleepable_2(void *ctx) +{ + struct task_struct *task, *real_parent; + + bpf_rcu_read_lock(); + task = bpf_get_current_task_btf(); + bpf_rcu_read_unlock(); + + bpf_rcu_read_lock(); + real_parent = task->real_parent; + if (!real_parent) + goto out; + (void)bpf_task_storage_get(&map_a, real_parent, 0, 0); +out: + bpf_rcu_read_unlock(); + return 0; +} + +SEC("?fentry.s/" SYS_PREFIX "sys_nanosleep") +int task_acquire(void *ctx) +{ + struct task_struct *task, *real_parent, *gparent; + + task = bpf_get_current_task_btf(); + bpf_rcu_read_lock(); + real_parent = task->real_parent; + if (!real_parent) + goto out; + + /* rcu_ptr->rcu_field */ + gparent = real_parent->real_parent; + if (!gparent) + goto out; + + /* acquire a reference which can be used outside rcu read lock region */ + gparent = bpf_task_acquire_not_zero(gparent); + if (!gparent) + /* Until we resolve the issues with using task->rcu_users, we + * expect bpf_task_acquire_not_zero() to return a NULL task. + * See the comment at the definition of + * bpf_task_acquire_not_zero() for more details. + */ + goto out; + + (void)bpf_task_storage_get(&map_a, gparent, 0, 0); + bpf_task_release(gparent); +out: + bpf_rcu_read_unlock(); + return 0; +} + +SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") +int miss_lock(void *ctx) +{ + struct task_struct *task; + struct css_set *cgroups; + struct cgroup *dfl_cgrp; + + /* missing bpf_rcu_read_lock() */ + task = bpf_get_current_task_btf(); + bpf_rcu_read_lock(); + (void)bpf_task_storage_get(&map_a, task, 0, 0); + bpf_rcu_read_unlock(); + bpf_rcu_read_unlock(); + return 0; +} + +SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") +int miss_unlock(void *ctx) +{ + struct task_struct *task; + struct css_set *cgroups; + struct cgroup *dfl_cgrp; + + /* missing bpf_rcu_read_unlock() */ + task = bpf_get_current_task_btf(); + bpf_rcu_read_lock(); + (void)bpf_task_storage_get(&map_a, task, 0, 0); + return 0; +} + +SEC("?fentry/" SYS_PREFIX "sys_getpgid") +int non_sleepable_rcu_mismatch(void *ctx) +{ + struct task_struct *task, *real_parent; + + task = bpf_get_current_task_btf(); + /* non-sleepable: missing bpf_rcu_read_unlock() in one path */ + bpf_rcu_read_lock(); + real_parent = task->real_parent; + if (!real_parent) + goto out; + (void)bpf_task_storage_get(&map_a, real_parent, 0, 0); + if (real_parent) + bpf_rcu_read_unlock(); +out: + return 0; +} + +SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") +int inproper_sleepable_helper(void *ctx) +{ + struct task_struct *task, *real_parent; + struct pt_regs *regs; + __u32 value = 0; + void *ptr; + + task = bpf_get_current_task_btf(); + /* sleepable helper in rcu read lock region */ + bpf_rcu_read_lock(); + real_parent = task->real_parent; + if (!real_parent) + goto out; + regs = (struct pt_regs *)bpf_task_pt_regs(real_parent); + if (!regs) + goto out; + + ptr = (void *)PT_REGS_IP(regs); + (void)bpf_copy_from_user_task(&value, sizeof(uint32_t), ptr, task, 0); + user_data = value; + (void)bpf_task_storage_get(&map_a, real_parent, 0, 0); +out: + bpf_rcu_read_unlock(); + return 0; +} + +SEC("?lsm.s/bpf") +int BPF_PROG(inproper_sleepable_kfunc, int cmd, union bpf_attr *attr, unsigned int size) +{ + struct bpf_key *bkey; + + /* sleepable kfunc in rcu read lock region */ + bpf_rcu_read_lock(); + bkey = bpf_lookup_user_key(key_serial, flags); + bpf_rcu_read_unlock(); + if (!bkey) + return -1; + bpf_key_put(bkey); + + return 0; +} + +SEC("?fentry.s/" SYS_PREFIX "sys_nanosleep") +int nested_rcu_region(void *ctx) +{ + struct task_struct *task, *real_parent; + + /* nested rcu read lock regions */ + task = bpf_get_current_task_btf(); + bpf_rcu_read_lock(); + bpf_rcu_read_lock(); + real_parent = task->real_parent; + if (!real_parent) + goto out; + (void)bpf_task_storage_get(&map_a, real_parent, 0, 0); +out: + bpf_rcu_read_unlock(); + bpf_rcu_read_unlock(); + return 0; +} + +SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") +int task_untrusted_non_rcuptr(void *ctx) +{ + struct task_struct *task, *last_wakee; + + task = bpf_get_current_task_btf(); + bpf_rcu_read_lock(); + /* the pointer last_wakee marked as untrusted */ + last_wakee = task->real_parent->last_wakee; + (void)bpf_task_storage_get(&map_a, last_wakee, 0, 0); + bpf_rcu_read_unlock(); + return 0; +} + +SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") +int task_untrusted_rcuptr(void *ctx) +{ + struct task_struct *task, *real_parent; + + task = bpf_get_current_task_btf(); + bpf_rcu_read_lock(); + real_parent = task->real_parent; + bpf_rcu_read_unlock(); + /* helper use of rcu ptr outside the rcu read lock region */ + (void)bpf_task_storage_get(&map_a, real_parent, 0, 0); + return 0; +} + +SEC("?fentry.s/" SYS_PREFIX "sys_nanosleep") +int cross_rcu_region(void *ctx) +{ + struct task_struct *task, *real_parent; + + /* rcu ptr define/use in different regions */ + task = bpf_get_current_task_btf(); + bpf_rcu_read_lock(); + real_parent = task->real_parent; + bpf_rcu_read_unlock(); + bpf_rcu_read_lock(); + (void)bpf_task_storage_get(&map_a, real_parent, 0, 0); + bpf_rcu_read_unlock(); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_common.h b/tools/testing/selftests/bpf/progs/task_kfunc_common.h new file mode 100644 index 000000000000..c0ffd171743e --- /dev/null +++ b/tools/testing/selftests/bpf/progs/task_kfunc_common.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ + +#ifndef _TASK_KFUNC_COMMON_H +#define _TASK_KFUNC_COMMON_H + +#include <errno.h> +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +struct __tasks_kfunc_map_value { + struct task_struct __kptr_ref * task; +}; + +struct hash_map { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, int); + __type(value, struct __tasks_kfunc_map_value); + __uint(max_entries, 1); +} __tasks_kfunc_map SEC(".maps"); + +struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym; +struct task_struct *bpf_task_kptr_get(struct task_struct **pp) __ksym; +void bpf_task_release(struct task_struct *p) __ksym; +struct task_struct *bpf_task_from_pid(s32 pid) __ksym; + +static inline struct __tasks_kfunc_map_value *tasks_kfunc_map_value_lookup(struct task_struct *p) +{ + s32 pid; + long status; + + status = bpf_probe_read_kernel(&pid, sizeof(pid), &p->pid); + if (status) + return NULL; + + return bpf_map_lookup_elem(&__tasks_kfunc_map, &pid); +} + +static inline int tasks_kfunc_map_insert(struct task_struct *p) +{ + struct __tasks_kfunc_map_value local, *v; + long status; + struct task_struct *acquired, *old; + s32 pid; + + status = bpf_probe_read_kernel(&pid, sizeof(pid), &p->pid); + if (status) + return status; + + local.task = NULL; + status = bpf_map_update_elem(&__tasks_kfunc_map, &pid, &local, BPF_NOEXIST); + if (status) + return status; + + v = bpf_map_lookup_elem(&__tasks_kfunc_map, &pid); + if (!v) { + bpf_map_delete_elem(&__tasks_kfunc_map, &pid); + return -ENOENT; + } + + acquired = bpf_task_acquire(p); + old = bpf_kptr_xchg(&v->task, acquired); + if (old) { + bpf_task_release(old); + return -EEXIST; + } + + return 0; +} + +#endif /* _TASK_KFUNC_COMMON_H */ diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_failure.c b/tools/testing/selftests/bpf/progs/task_kfunc_failure.c new file mode 100644 index 000000000000..87fa1db9d9b5 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/task_kfunc_failure.c @@ -0,0 +1,284 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ + +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> + +#include "task_kfunc_common.h" + +char _license[] SEC("license") = "GPL"; + +/* Prototype for all of the program trace events below: + * + * TRACE_EVENT(task_newtask, + * TP_PROTO(struct task_struct *p, u64 clone_flags) + */ + +static struct __tasks_kfunc_map_value *insert_lookup_task(struct task_struct *task) +{ + int status; + + status = tasks_kfunc_map_insert(task); + if (status) + return NULL; + + return tasks_kfunc_map_value_lookup(task); +} + +SEC("tp_btf/task_newtask") +int BPF_PROG(task_kfunc_acquire_untrusted, struct task_struct *task, u64 clone_flags) +{ + struct task_struct *acquired; + struct __tasks_kfunc_map_value *v; + + v = insert_lookup_task(task); + if (!v) + return 0; + + /* Can't invoke bpf_task_acquire() on an untrusted pointer. */ + acquired = bpf_task_acquire(v->task); + bpf_task_release(acquired); + + return 0; +} + +SEC("tp_btf/task_newtask") +int BPF_PROG(task_kfunc_acquire_fp, struct task_struct *task, u64 clone_flags) +{ + struct task_struct *acquired, *stack_task = (struct task_struct *)&clone_flags; + + /* Can't invoke bpf_task_acquire() on a random frame pointer. */ + acquired = bpf_task_acquire((struct task_struct *)&stack_task); + bpf_task_release(acquired); + + return 0; +} + +SEC("kretprobe/free_task") +int BPF_PROG(task_kfunc_acquire_unsafe_kretprobe, struct task_struct *task, u64 clone_flags) +{ + struct task_struct *acquired; + + acquired = bpf_task_acquire(task); + /* Can't release a bpf_task_acquire()'d task without a NULL check. */ + bpf_task_release(acquired); + + return 0; +} + +SEC("tp_btf/task_newtask") +int BPF_PROG(task_kfunc_acquire_trusted_walked, struct task_struct *task, u64 clone_flags) +{ + struct task_struct *acquired; + + /* Can't invoke bpf_task_acquire() on a trusted pointer obtained from walking a struct. */ + acquired = bpf_task_acquire(task->last_wakee); + bpf_task_release(acquired); + + return 0; +} + + +SEC("tp_btf/task_newtask") +int BPF_PROG(task_kfunc_acquire_null, struct task_struct *task, u64 clone_flags) +{ + struct task_struct *acquired; + + /* Can't invoke bpf_task_acquire() on a NULL pointer. */ + acquired = bpf_task_acquire(NULL); + if (!acquired) + return 0; + bpf_task_release(acquired); + + return 0; +} + +SEC("tp_btf/task_newtask") +int BPF_PROG(task_kfunc_acquire_unreleased, struct task_struct *task, u64 clone_flags) +{ + struct task_struct *acquired; + + acquired = bpf_task_acquire(task); + + /* Acquired task is never released. */ + + return 0; +} + +SEC("tp_btf/task_newtask") +int BPF_PROG(task_kfunc_get_non_kptr_param, struct task_struct *task, u64 clone_flags) +{ + struct task_struct *kptr; + + /* Cannot use bpf_task_kptr_get() on a non-kptr, even on a valid task. */ + kptr = bpf_task_kptr_get(&task); + if (!kptr) + return 0; + + bpf_task_release(kptr); + + return 0; +} + +SEC("tp_btf/task_newtask") +int BPF_PROG(task_kfunc_get_non_kptr_acquired, struct task_struct *task, u64 clone_flags) +{ + struct task_struct *kptr, *acquired; + + acquired = bpf_task_acquire(task); + + /* Cannot use bpf_task_kptr_get() on a non-kptr, even if it was acquired. */ + kptr = bpf_task_kptr_get(&acquired); + bpf_task_release(acquired); + if (!kptr) + return 0; + + bpf_task_release(kptr); + + return 0; +} + +SEC("tp_btf/task_newtask") +int BPF_PROG(task_kfunc_get_null, struct task_struct *task, u64 clone_flags) +{ + struct task_struct *kptr; + + /* Cannot use bpf_task_kptr_get() on a NULL pointer. */ + kptr = bpf_task_kptr_get(NULL); + if (!kptr) + return 0; + + bpf_task_release(kptr); + + return 0; +} + +SEC("tp_btf/task_newtask") +int BPF_PROG(task_kfunc_xchg_unreleased, struct task_struct *task, u64 clone_flags) +{ + struct task_struct *kptr; + struct __tasks_kfunc_map_value *v; + + v = insert_lookup_task(task); + if (!v) + return 0; + + kptr = bpf_kptr_xchg(&v->task, NULL); + if (!kptr) + return 0; + + /* Kptr retrieved from map is never released. */ + + return 0; +} + +SEC("tp_btf/task_newtask") +int BPF_PROG(task_kfunc_get_unreleased, struct task_struct *task, u64 clone_flags) +{ + struct task_struct *kptr; + struct __tasks_kfunc_map_value *v; + + v = insert_lookup_task(task); + if (!v) + return 0; + + kptr = bpf_task_kptr_get(&v->task); + if (!kptr) + return 0; + + /* Kptr acquired above is never released. */ + + return 0; +} + +SEC("tp_btf/task_newtask") +int BPF_PROG(task_kfunc_release_untrusted, struct task_struct *task, u64 clone_flags) +{ + struct __tasks_kfunc_map_value *v; + + v = insert_lookup_task(task); + if (!v) + return 0; + + /* Can't invoke bpf_task_release() on an untrusted pointer. */ + bpf_task_release(v->task); + + return 0; +} + +SEC("tp_btf/task_newtask") +int BPF_PROG(task_kfunc_release_fp, struct task_struct *task, u64 clone_flags) +{ + struct task_struct *acquired = (struct task_struct *)&clone_flags; + + /* Cannot release random frame pointer. */ + bpf_task_release(acquired); + + return 0; +} + +SEC("tp_btf/task_newtask") +int BPF_PROG(task_kfunc_release_null, struct task_struct *task, u64 clone_flags) +{ + struct __tasks_kfunc_map_value local, *v; + long status; + struct task_struct *acquired, *old; + s32 pid; + + status = bpf_probe_read_kernel(&pid, sizeof(pid), &task->pid); + if (status) + return 0; + + local.task = NULL; + status = bpf_map_update_elem(&__tasks_kfunc_map, &pid, &local, BPF_NOEXIST); + if (status) + return status; + + v = bpf_map_lookup_elem(&__tasks_kfunc_map, &pid); + if (!v) + return -ENOENT; + + acquired = bpf_task_acquire(task); + + old = bpf_kptr_xchg(&v->task, acquired); + + /* old cannot be passed to bpf_task_release() without a NULL check. */ + bpf_task_release(old); + bpf_task_release(old); + + return 0; +} + +SEC("tp_btf/task_newtask") +int BPF_PROG(task_kfunc_release_unacquired, struct task_struct *task, u64 clone_flags) +{ + /* Cannot release trusted task pointer which was not acquired. */ + bpf_task_release(task); + + return 0; +} + +SEC("tp_btf/task_newtask") +int BPF_PROG(task_kfunc_from_pid_no_null_check, struct task_struct *task, u64 clone_flags) +{ + struct task_struct *acquired; + + acquired = bpf_task_from_pid(task->pid); + + /* Releasing bpf_task_from_pid() lookup without a NULL check. */ + bpf_task_release(acquired); + + return 0; +} + +SEC("lsm/task_free") +int BPF_PROG(task_kfunc_from_lsm_task_free, struct task_struct *task) +{ + struct task_struct *acquired; + + /* the argument of lsm task_free hook is untrusted. */ + acquired = bpf_task_acquire(task); + bpf_task_release(acquired); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_success.c b/tools/testing/selftests/bpf/progs/task_kfunc_success.c new file mode 100644 index 000000000000..9f359cfd29e7 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/task_kfunc_success.c @@ -0,0 +1,227 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ + +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> + +#include "task_kfunc_common.h" + +char _license[] SEC("license") = "GPL"; + +int err, pid; + +/* Prototype for all of the program trace events below: + * + * TRACE_EVENT(task_newtask, + * TP_PROTO(struct task_struct *p, u64 clone_flags) + */ + +static bool is_test_kfunc_task(void) +{ + int cur_pid = bpf_get_current_pid_tgid() >> 32; + + return pid == cur_pid; +} + +static int test_acquire_release(struct task_struct *task) +{ + struct task_struct *acquired; + + acquired = bpf_task_acquire(task); + bpf_task_release(acquired); + + return 0; +} + +SEC("tp_btf/task_newtask") +int BPF_PROG(test_task_acquire_release_argument, struct task_struct *task, u64 clone_flags) +{ + if (!is_test_kfunc_task()) + return 0; + + return test_acquire_release(task); +} + +SEC("tp_btf/task_newtask") +int BPF_PROG(test_task_acquire_release_current, struct task_struct *task, u64 clone_flags) +{ + if (!is_test_kfunc_task()) + return 0; + + return test_acquire_release(bpf_get_current_task_btf()); +} + +SEC("tp_btf/task_newtask") +int BPF_PROG(test_task_acquire_leave_in_map, struct task_struct *task, u64 clone_flags) +{ + long status; + + if (!is_test_kfunc_task()) + return 0; + + status = tasks_kfunc_map_insert(task); + if (status) + err = 1; + + return 0; +} + +SEC("tp_btf/task_newtask") +int BPF_PROG(test_task_xchg_release, struct task_struct *task, u64 clone_flags) +{ + struct task_struct *kptr; + struct __tasks_kfunc_map_value *v; + long status; + + if (!is_test_kfunc_task()) + return 0; + + status = tasks_kfunc_map_insert(task); + if (status) { + err = 1; + return 0; + } + + v = tasks_kfunc_map_value_lookup(task); + if (!v) { + err = 2; + return 0; + } + + kptr = bpf_kptr_xchg(&v->task, NULL); + if (!kptr) { + err = 3; + return 0; + } + + bpf_task_release(kptr); + + return 0; +} + +SEC("tp_btf/task_newtask") +int BPF_PROG(test_task_get_release, struct task_struct *task, u64 clone_flags) +{ + struct task_struct *kptr; + struct __tasks_kfunc_map_value *v; + long status; + + if (!is_test_kfunc_task()) + return 0; + + status = tasks_kfunc_map_insert(task); + if (status) { + err = 1; + return 0; + } + + v = tasks_kfunc_map_value_lookup(task); + if (!v) { + err = 2; + return 0; + } + + kptr = bpf_task_kptr_get(&v->task); + if (kptr) { + /* Until we resolve the issues with using task->rcu_users, we + * expect bpf_task_kptr_get() to return a NULL task. See the + * comment at the definition of bpf_task_acquire_not_zero() for + * more details. + */ + bpf_task_release(kptr); + err = 3; + return 0; + } + + + return 0; +} + +SEC("tp_btf/task_newtask") +int BPF_PROG(test_task_current_acquire_release, struct task_struct *task, u64 clone_flags) +{ + struct task_struct *current, *acquired; + + if (!is_test_kfunc_task()) + return 0; + + current = bpf_get_current_task_btf(); + acquired = bpf_task_acquire(current); + bpf_task_release(acquired); + + return 0; +} + +static void lookup_compare_pid(const struct task_struct *p) +{ + struct task_struct *acquired; + + acquired = bpf_task_from_pid(p->pid); + if (!acquired) { + err = 1; + return; + } + + if (acquired->pid != p->pid) + err = 2; + bpf_task_release(acquired); +} + +SEC("tp_btf/task_newtask") +int BPF_PROG(test_task_from_pid_arg, struct task_struct *task, u64 clone_flags) +{ + struct task_struct *acquired; + + if (!is_test_kfunc_task()) + return 0; + + lookup_compare_pid(task); + return 0; +} + +SEC("tp_btf/task_newtask") +int BPF_PROG(test_task_from_pid_current, struct task_struct *task, u64 clone_flags) +{ + struct task_struct *current, *acquired; + + if (!is_test_kfunc_task()) + return 0; + + lookup_compare_pid(bpf_get_current_task_btf()); + return 0; +} + +static int is_pid_lookup_valid(s32 pid) +{ + struct task_struct *acquired; + + acquired = bpf_task_from_pid(pid); + if (acquired) { + bpf_task_release(acquired); + return 1; + } + + return 0; +} + +SEC("tp_btf/task_newtask") +int BPF_PROG(test_task_from_pid_invalid, struct task_struct *task, u64 clone_flags) +{ + struct task_struct *acquired; + + if (!is_test_kfunc_task()) + return 0; + + if (is_pid_lookup_valid(-1)) { + err = 1; + return 0; + } + + if (is_pid_lookup_valid(0xcafef00d)) { + err = 2; + return 0; + } + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/task_local_storage_exit_creds.c b/tools/testing/selftests/bpf/progs/task_local_storage_exit_creds.c index 81758c0aef99..41d88ed222ff 100644 --- a/tools/testing/selftests/bpf/progs/task_local_storage_exit_creds.c +++ b/tools/testing/selftests/bpf/progs/task_local_storage_exit_creds.c @@ -14,6 +14,7 @@ struct { __type(value, __u64); } task_storage SEC(".maps"); +int run_count = 0; int valid_ptr_count = 0; int null_ptr_count = 0; @@ -28,5 +29,7 @@ int BPF_PROG(trace_exit_creds, struct task_struct *task) __sync_fetch_and_add(&valid_ptr_count, 1); else __sync_fetch_and_add(&null_ptr_count, 1); + + __sync_fetch_and_add(&run_count, 1); return 0; } diff --git a/tools/testing/selftests/bpf/progs/task_ls_recursion.c b/tools/testing/selftests/bpf/progs/task_ls_recursion.c index 564583dca7c8..4542dc683b44 100644 --- a/tools/testing/selftests/bpf/progs/task_ls_recursion.c +++ b/tools/testing/selftests/bpf/progs/task_ls_recursion.c @@ -5,7 +5,13 @@ #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> +#ifndef EBUSY +#define EBUSY 16 +#endif + char _license[] SEC("license") = "GPL"; +int nr_del_errs = 0; +int test_pid = 0; struct { __uint(type, BPF_MAP_TYPE_TASK_STORAGE); @@ -26,6 +32,13 @@ int BPF_PROG(on_lookup) { struct task_struct *task = bpf_get_current_task_btf(); + if (!test_pid || task->pid != test_pid) + return 0; + + /* The bpf_task_storage_delete will call + * bpf_local_storage_lookup. The prog->active will + * stop the recursion. + */ bpf_task_storage_delete(&map_a, task); bpf_task_storage_delete(&map_b, task); return 0; @@ -37,11 +50,32 @@ int BPF_PROG(on_update) struct task_struct *task = bpf_get_current_task_btf(); long *ptr; + if (!test_pid || task->pid != test_pid) + return 0; + ptr = bpf_task_storage_get(&map_a, task, 0, BPF_LOCAL_STORAGE_GET_F_CREATE); - if (ptr) + /* ptr will not be NULL when it is called from + * the bpf_task_storage_get(&map_b,...F_CREATE) in + * the BPF_PROG(on_enter) below. It is because + * the value can be found in map_a and the kernel + * does not need to acquire any spin_lock. + */ + if (ptr) { + int err; + *ptr += 1; + err = bpf_task_storage_delete(&map_a, task); + if (err == -EBUSY) + nr_del_errs++; + } + /* This will still fail because map_b is empty and + * this BPF_PROG(on_update) has failed to acquire + * the percpu busy lock => meaning potential + * deadlock is detected and it will fail to create + * new storage. + */ ptr = bpf_task_storage_get(&map_b, task, 0, BPF_LOCAL_STORAGE_GET_F_CREATE); if (ptr) @@ -57,14 +91,17 @@ int BPF_PROG(on_enter, struct pt_regs *regs, long id) long *ptr; task = bpf_get_current_task_btf(); + if (!test_pid || task->pid != test_pid) + return 0; + ptr = bpf_task_storage_get(&map_a, task, 0, BPF_LOCAL_STORAGE_GET_F_CREATE); - if (ptr) + if (ptr && !*ptr) *ptr = 200; ptr = bpf_task_storage_get(&map_b, task, 0, BPF_LOCAL_STORAGE_GET_F_CREATE); - if (ptr) + if (ptr && !*ptr) *ptr = 100; return 0; } diff --git a/tools/testing/selftests/bpf/progs/task_storage_nodeadlock.c b/tools/testing/selftests/bpf/progs/task_storage_nodeadlock.c new file mode 100644 index 000000000000..ea2dbb80f7b3 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/task_storage_nodeadlock.c @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +char _license[] SEC("license") = "GPL"; + +#ifndef EBUSY +#define EBUSY 16 +#endif + +extern bool CONFIG_PREEMPT __kconfig __weak; +int nr_get_errs = 0; +int nr_del_errs = 0; + +struct { + __uint(type, BPF_MAP_TYPE_TASK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, int); +} task_storage SEC(".maps"); + +SEC("lsm.s/socket_post_create") +int BPF_PROG(socket_post_create, struct socket *sock, int family, int type, + int protocol, int kern) +{ + struct task_struct *task; + int ret, zero = 0; + int *value; + + if (!CONFIG_PREEMPT) + return 0; + + task = bpf_get_current_task_btf(); + value = bpf_task_storage_get(&task_storage, task, &zero, + BPF_LOCAL_STORAGE_GET_F_CREATE); + if (!value) + __sync_fetch_and_add(&nr_get_errs, 1); + + ret = bpf_task_storage_delete(&task_storage, + bpf_get_current_task_btf()); + if (ret == -EBUSY) + __sync_fetch_and_add(&nr_del_errs, 1); + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c b/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c index ce39d096bba3..f4a8250329b2 100644 --- a/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c +++ b/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c @@ -33,18 +33,6 @@ int err, pid; char _license[] SEC("license") = "GPL"; SEC("?lsm.s/bpf") -int BPF_PROG(dynptr_type_not_supp, int cmd, union bpf_attr *attr, - unsigned int size) -{ - char write_data[64] = "hello there, world!!"; - struct bpf_dynptr ptr; - - bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(write_data), 0, &ptr); - - return bpf_verify_pkcs7_signature(&ptr, &ptr, NULL); -} - -SEC("?lsm.s/bpf") int BPF_PROG(not_valid_dynptr, int cmd, union bpf_attr *attr, unsigned int size) { unsigned long val; diff --git a/tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c b/tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c new file mode 100644 index 000000000000..f5ac5f3e8919 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright (C) 2022 Huawei Technologies Duesseldorf GmbH + * + * Author: Roberto Sassu <roberto.sassu@huawei.com> + */ + +#include "vmlinux.h" +#include <errno.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +/* From include/linux/mm.h. */ +#define FMODE_WRITE 0x2 + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u32); +} data_input SEC(".maps"); + +char _license[] SEC("license") = "GPL"; + +SEC("lsm/bpf_map") +int BPF_PROG(check_access, struct bpf_map *map, fmode_t fmode) +{ + if (map != (struct bpf_map *)&data_input) + return 0; + + if (fmode & FMODE_WRITE) + return -EACCES; + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/test_misc_tcp_hdr_options.c b/tools/testing/selftests/bpf/progs/test_misc_tcp_hdr_options.c index 2c121c5d66a7..d487153a839d 100644 --- a/tools/testing/selftests/bpf/progs/test_misc_tcp_hdr_options.c +++ b/tools/testing/selftests/bpf/progs/test_misc_tcp_hdr_options.c @@ -27,6 +27,7 @@ unsigned int nr_pure_ack = 0; unsigned int nr_data = 0; unsigned int nr_syn = 0; unsigned int nr_fin = 0; +unsigned int nr_hwtstamp = 0; /* Check the header received from the active side */ static int __check_active_hdr_in(struct bpf_sock_ops *skops, bool check_syn) @@ -146,6 +147,9 @@ static int check_active_hdr_in(struct bpf_sock_ops *skops) if (th->ack && !th->fin && tcp_hdrlen(th) == skops->skb_len) nr_pure_ack++; + if (skops->skb_hwtstamp) + nr_hwtstamp++; + return CG_OK; } diff --git a/tools/testing/selftests/bpf/progs/test_module_attach.c b/tools/testing/selftests/bpf/progs/test_module_attach.c index 08628afedb77..8a1b50f3a002 100644 --- a/tools/testing/selftests/bpf/progs/test_module_attach.c +++ b/tools/testing/selftests/bpf/progs/test_module_attach.c @@ -110,4 +110,10 @@ int BPF_PROG(handle_fmod_ret, return 0; /* don't override the exit code */ } +SEC("kprobe.multi/bpf_testmod_test_read") +int BPF_PROG(kprobe_multi) +{ + return 0; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_ringbuf_map_key.c b/tools/testing/selftests/bpf/progs/test_ringbuf_map_key.c new file mode 100644 index 000000000000..2760bf60d05a --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_ringbuf_map_key.c @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + +struct sample { + int pid; + int seq; + long value; + char comm[16]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_RINGBUF); +} ringbuf SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1000); + __type(key, struct sample); + __type(value, int); +} hash_map SEC(".maps"); + +/* inputs */ +int pid = 0; + +/* inner state */ +long seq = 0; + +SEC("fentry/" SYS_PREFIX "sys_getpgid") +int test_ringbuf_mem_map_key(void *ctx) +{ + int cur_pid = bpf_get_current_pid_tgid() >> 32; + struct sample *sample, sample_copy; + int *lookup_val; + + if (cur_pid != pid) + return 0; + + sample = bpf_ringbuf_reserve(&ringbuf, sizeof(*sample), 0); + if (!sample) + return 0; + + sample->pid = pid; + bpf_get_current_comm(sample->comm, sizeof(sample->comm)); + sample->seq = ++seq; + sample->value = 42; + + /* test using 'sample' (PTR_TO_MEM | MEM_ALLOC) as map key arg + */ + lookup_val = (int *)bpf_map_lookup_elem(&hash_map, sample); + + /* workaround - memcpy is necessary so that verifier doesn't + * complain with: + * verifier internal error: more than one arg with ref_obj_id R3 + * when trying to do bpf_map_update_elem(&hash_map, sample, &sample->seq, BPF_ANY); + * + * Since bpf_map_lookup_elem above uses 'sample' as key, test using + * sample field as value below + */ + __builtin_memcpy(&sample_copy, sample, sizeof(struct sample)); + bpf_map_update_elem(&hash_map, &sample_copy, &sample->seq, BPF_ANY); + + bpf_ringbuf_submit(sample, 0); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/test_skeleton.c b/tools/testing/selftests/bpf/progs/test_skeleton.c index 1a4e93f6d9df..adece9f91f58 100644 --- a/tools/testing/selftests/bpf/progs/test_skeleton.c +++ b/tools/testing/selftests/bpf/progs/test_skeleton.c @@ -53,6 +53,20 @@ int out_mostly_var; char huge_arr[16 * 1024 * 1024]; +/* non-mmapable custom .data section */ + +struct my_value { int x, y, z; }; + +__hidden int zero_key SEC(".data.non_mmapable"); +static struct my_value zero_value SEC(".data.non_mmapable"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, int); + __type(value, struct my_value); + __uint(max_entries, 1); +} my_map SEC(".maps"); + SEC("raw_tp/sys_enter") int handler(const void *ctx) { @@ -75,6 +89,9 @@ int handler(const void *ctx) huge_arr[sizeof(huge_arr) - 1] = 123; + /* make sure zero_key and zero_value are not optimized out */ + bpf_map_update_elem(&my_map, &zero_key, &zero_value, BPF_ANY); + return 0; } diff --git a/tools/testing/selftests/bpf/progs/test_spin_lock.c b/tools/testing/selftests/bpf/progs/test_spin_lock.c index 7e88309d3229..5bd10409285b 100644 --- a/tools/testing/selftests/bpf/progs/test_spin_lock.c +++ b/tools/testing/selftests/bpf/progs/test_spin_lock.c @@ -45,8 +45,8 @@ struct { #define CREDIT_PER_NS(delta, rate) (((delta) * rate) >> 20) -SEC("tc") -int bpf_sping_lock_test(struct __sk_buff *skb) +SEC("cgroup_skb/ingress") +int bpf_spin_lock_test(struct __sk_buff *skb) { volatile int credit = 0, max_credit = 100, pkt_len = 64; struct hmap_elem zero = {}, *val; diff --git a/tools/testing/selftests/bpf/progs/test_spin_lock_fail.c b/tools/testing/selftests/bpf/progs/test_spin_lock_fail.c new file mode 100644 index 000000000000..86cd183ef6dc --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_spin_lock_fail.c @@ -0,0 +1,204 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> +#include "bpf_experimental.h" + +struct foo { + struct bpf_spin_lock lock; + int data; +}; + +struct array_map { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, int); + __type(value, struct foo); + __uint(max_entries, 1); +} array_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS); + __uint(max_entries, 1); + __type(key, int); + __type(value, int); + __array(values, struct array_map); +} map_of_maps SEC(".maps") = { + .values = { + [0] = &array_map, + }, +}; + +SEC(".data.A") struct bpf_spin_lock lockA; +SEC(".data.B") struct bpf_spin_lock lockB; + +SEC("?tc") +int lock_id_kptr_preserve(void *ctx) +{ + struct foo *f; + + f = bpf_obj_new(typeof(*f)); + if (!f) + return 0; + bpf_this_cpu_ptr(f); + return 0; +} + +SEC("?tc") +int lock_id_global_zero(void *ctx) +{ + bpf_this_cpu_ptr(&lockA); + return 0; +} + +SEC("?tc") +int lock_id_mapval_preserve(void *ctx) +{ + struct foo *f; + int key = 0; + + f = bpf_map_lookup_elem(&array_map, &key); + if (!f) + return 0; + bpf_this_cpu_ptr(f); + return 0; +} + +SEC("?tc") +int lock_id_innermapval_preserve(void *ctx) +{ + struct foo *f; + int key = 0; + void *map; + + map = bpf_map_lookup_elem(&map_of_maps, &key); + if (!map) + return 0; + f = bpf_map_lookup_elem(map, &key); + if (!f) + return 0; + bpf_this_cpu_ptr(f); + return 0; +} + +#define CHECK(test, A, B) \ + SEC("?tc") \ + int lock_id_mismatch_##test(void *ctx) \ + { \ + struct foo *f1, *f2, *v, *iv; \ + int key = 0; \ + void *map; \ + \ + map = bpf_map_lookup_elem(&map_of_maps, &key); \ + if (!map) \ + return 0; \ + iv = bpf_map_lookup_elem(map, &key); \ + if (!iv) \ + return 0; \ + v = bpf_map_lookup_elem(&array_map, &key); \ + if (!v) \ + return 0; \ + f1 = bpf_obj_new(typeof(*f1)); \ + if (!f1) \ + return 0; \ + f2 = bpf_obj_new(typeof(*f2)); \ + if (!f2) { \ + bpf_obj_drop(f1); \ + return 0; \ + } \ + bpf_spin_lock(A); \ + bpf_spin_unlock(B); \ + return 0; \ + } + +CHECK(kptr_kptr, &f1->lock, &f2->lock); +CHECK(kptr_global, &f1->lock, &lockA); +CHECK(kptr_mapval, &f1->lock, &v->lock); +CHECK(kptr_innermapval, &f1->lock, &iv->lock); + +CHECK(global_global, &lockA, &lockB); +CHECK(global_kptr, &lockA, &f1->lock); +CHECK(global_mapval, &lockA, &v->lock); +CHECK(global_innermapval, &lockA, &iv->lock); + +SEC("?tc") +int lock_id_mismatch_mapval_mapval(void *ctx) +{ + struct foo *f1, *f2; + int key = 0; + + f1 = bpf_map_lookup_elem(&array_map, &key); + if (!f1) + return 0; + f2 = bpf_map_lookup_elem(&array_map, &key); + if (!f2) + return 0; + + bpf_spin_lock(&f1->lock); + f1->data = 42; + bpf_spin_unlock(&f2->lock); + + return 0; +} + +CHECK(mapval_kptr, &v->lock, &f1->lock); +CHECK(mapval_global, &v->lock, &lockB); +CHECK(mapval_innermapval, &v->lock, &iv->lock); + +SEC("?tc") +int lock_id_mismatch_innermapval_innermapval1(void *ctx) +{ + struct foo *f1, *f2; + int key = 0; + void *map; + + map = bpf_map_lookup_elem(&map_of_maps, &key); + if (!map) + return 0; + f1 = bpf_map_lookup_elem(map, &key); + if (!f1) + return 0; + f2 = bpf_map_lookup_elem(map, &key); + if (!f2) + return 0; + + bpf_spin_lock(&f1->lock); + f1->data = 42; + bpf_spin_unlock(&f2->lock); + + return 0; +} + +SEC("?tc") +int lock_id_mismatch_innermapval_innermapval2(void *ctx) +{ + struct foo *f1, *f2; + int key = 0; + void *map; + + map = bpf_map_lookup_elem(&map_of_maps, &key); + if (!map) + return 0; + f1 = bpf_map_lookup_elem(map, &key); + if (!f1) + return 0; + map = bpf_map_lookup_elem(&map_of_maps, &key); + if (!map) + return 0; + f2 = bpf_map_lookup_elem(map, &key); + if (!f2) + return 0; + + bpf_spin_lock(&f1->lock); + f1->data = 42; + bpf_spin_unlock(&f2->lock); + + return 0; +} + +CHECK(innermapval_kptr, &iv->lock, &f1->lock); +CHECK(innermapval_global, &iv->lock, &lockA); +CHECK(innermapval_mapval, &iv->lock, &v->lock); + +#undef CHECK + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/type_cast.c b/tools/testing/selftests/bpf/progs/type_cast.c new file mode 100644 index 000000000000..eb78e6f03129 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/type_cast.c @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_core_read.h> + +struct { + __uint(type, BPF_MAP_TYPE_TASK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, long); +} enter_id SEC(".maps"); + +#define IFNAMSIZ 16 + +int ifindex, ingress_ifindex; +char name[IFNAMSIZ]; +unsigned int inum; +unsigned int meta_len, frag0_len, kskb_len, kskb2_len; + +void *bpf_cast_to_kern_ctx(void *) __ksym; +void *bpf_rdonly_cast(void *, __u32) __ksym; + +SEC("?xdp") +int md_xdp(struct xdp_md *ctx) +{ + struct xdp_buff *kctx = bpf_cast_to_kern_ctx(ctx); + struct net_device *dev; + + dev = kctx->rxq->dev; + ifindex = dev->ifindex; + inum = dev->nd_net.net->ns.inum; + __builtin_memcpy(name, dev->name, IFNAMSIZ); + ingress_ifindex = ctx->ingress_ifindex; + return XDP_PASS; +} + +SEC("?tc") +int md_skb(struct __sk_buff *skb) +{ + struct sk_buff *kskb = bpf_cast_to_kern_ctx(skb); + struct skb_shared_info *shared_info; + struct sk_buff *kskb2; + + kskb_len = kskb->len; + + /* Simulate the following kernel macro: + * #define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB))) + */ + shared_info = bpf_rdonly_cast(kskb->head + kskb->end, + bpf_core_type_id_kernel(struct skb_shared_info)); + meta_len = shared_info->meta_len; + frag0_len = shared_info->frag_list->len; + + /* kskb2 should be equal to kskb */ + kskb2 = bpf_rdonly_cast(kskb, bpf_core_type_id_kernel(struct sk_buff)); + kskb2_len = kskb2->len; + return 0; +} + +SEC("?tp_btf/sys_enter") +int BPF_PROG(untrusted_ptr, struct pt_regs *regs, long id) +{ + struct task_struct *task, *task_dup; + long *ptr; + + task = bpf_get_current_task_btf(); + task_dup = bpf_rdonly_cast(task, bpf_core_type_id_kernel(struct task_struct)); + (void)bpf_task_storage_get(&enter_id, task_dup, 0, 0); + return 0; +} + +SEC("?tracepoint/syscalls/sys_enter_nanosleep") +int kctx_u64(void *ctx) +{ + u64 *kctx = bpf_rdonly_cast(ctx, bpf_core_type_id_kernel(u64)); + + (void)kctx; + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c b/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c index 82aba4529aa9..f3201dc69a60 100644 --- a/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c +++ b/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c @@ -18,6 +18,13 @@ struct { __uint(type, BPF_MAP_TYPE_USER_RINGBUF); } user_ringbuf SEC(".maps"); +struct { + __uint(type, BPF_MAP_TYPE_RINGBUF); + __uint(max_entries, 2); +} ringbuf SEC(".maps"); + +static int map_value; + static long bad_access1(struct bpf_dynptr *dynptr, void *context) { @@ -32,7 +39,7 @@ bad_access1(struct bpf_dynptr *dynptr, void *context) /* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should * not be able to read before the pointer. */ -SEC("?raw_tp/sys_nanosleep") +SEC("?raw_tp/") int user_ringbuf_callback_bad_access1(void *ctx) { bpf_user_ringbuf_drain(&user_ringbuf, bad_access1, NULL, 0); @@ -54,7 +61,7 @@ bad_access2(struct bpf_dynptr *dynptr, void *context) /* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should * not be able to read past the end of the pointer. */ -SEC("?raw_tp/sys_nanosleep") +SEC("?raw_tp/") int user_ringbuf_callback_bad_access2(void *ctx) { bpf_user_ringbuf_drain(&user_ringbuf, bad_access2, NULL, 0); @@ -73,7 +80,7 @@ write_forbidden(struct bpf_dynptr *dynptr, void *context) /* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should * not be able to write to that pointer. */ -SEC("?raw_tp/sys_nanosleep") +SEC("?raw_tp/") int user_ringbuf_callback_write_forbidden(void *ctx) { bpf_user_ringbuf_drain(&user_ringbuf, write_forbidden, NULL, 0); @@ -92,7 +99,7 @@ null_context_write(struct bpf_dynptr *dynptr, void *context) /* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should * not be able to write to that pointer. */ -SEC("?raw_tp/sys_nanosleep") +SEC("?raw_tp/") int user_ringbuf_callback_null_context_write(void *ctx) { bpf_user_ringbuf_drain(&user_ringbuf, null_context_write, NULL, 0); @@ -113,7 +120,7 @@ null_context_read(struct bpf_dynptr *dynptr, void *context) /* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should * not be able to write to that pointer. */ -SEC("?raw_tp/sys_nanosleep") +SEC("?raw_tp/") int user_ringbuf_callback_null_context_read(void *ctx) { bpf_user_ringbuf_drain(&user_ringbuf, null_context_read, NULL, 0); @@ -132,7 +139,7 @@ try_discard_dynptr(struct bpf_dynptr *dynptr, void *context) /* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should * not be able to read past the end of the pointer. */ -SEC("?raw_tp/sys_nanosleep") +SEC("?raw_tp/") int user_ringbuf_callback_discard_dynptr(void *ctx) { bpf_user_ringbuf_drain(&user_ringbuf, try_discard_dynptr, NULL, 0); @@ -151,7 +158,7 @@ try_submit_dynptr(struct bpf_dynptr *dynptr, void *context) /* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should * not be able to read past the end of the pointer. */ -SEC("?raw_tp/sys_nanosleep") +SEC("?raw_tp/") int user_ringbuf_callback_submit_dynptr(void *ctx) { bpf_user_ringbuf_drain(&user_ringbuf, try_submit_dynptr, NULL, 0); @@ -168,10 +175,38 @@ invalid_drain_callback_return(struct bpf_dynptr *dynptr, void *context) /* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should * not be able to write to that pointer. */ -SEC("?raw_tp/sys_nanosleep") +SEC("?raw_tp/") int user_ringbuf_callback_invalid_return(void *ctx) { bpf_user_ringbuf_drain(&user_ringbuf, invalid_drain_callback_return, NULL, 0); return 0; } + +static long +try_reinit_dynptr_mem(struct bpf_dynptr *dynptr, void *context) +{ + bpf_dynptr_from_mem(&map_value, 4, 0, dynptr); + return 0; +} + +static long +try_reinit_dynptr_ringbuf(struct bpf_dynptr *dynptr, void *context) +{ + bpf_ringbuf_reserve_dynptr(&ringbuf, 8, 0, dynptr); + return 0; +} + +SEC("?raw_tp/") +int user_ringbuf_callback_reinit_dynptr_mem(void *ctx) +{ + bpf_user_ringbuf_drain(&user_ringbuf, try_reinit_dynptr_mem, NULL, 0); + return 0; +} + +SEC("?raw_tp/") +int user_ringbuf_callback_reinit_dynptr_ringbuf(void *ctx) +{ + bpf_user_ringbuf_drain(&user_ringbuf, try_reinit_dynptr_ringbuf, NULL, 0); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/xfrm_info.c b/tools/testing/selftests/bpf/progs/xfrm_info.c new file mode 100644 index 000000000000..f6a501fbba2b --- /dev/null +++ b/tools/testing/selftests/bpf/progs/xfrm_info.c @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "vmlinux.h" +#include "bpf_tracing_net.h" +#include <bpf/bpf_helpers.h> + +struct bpf_xfrm_info___local { + u32 if_id; + int link; +} __attribute__((preserve_access_index)); + +__u32 req_if_id; +__u32 resp_if_id; + +int bpf_skb_set_xfrm_info(struct __sk_buff *skb_ctx, + const struct bpf_xfrm_info___local *from) __ksym; +int bpf_skb_get_xfrm_info(struct __sk_buff *skb_ctx, + struct bpf_xfrm_info___local *to) __ksym; + +SEC("tc") +int set_xfrm_info(struct __sk_buff *skb) +{ + struct bpf_xfrm_info___local info = { .if_id = req_if_id }; + + return bpf_skb_set_xfrm_info(skb, &info) ? TC_ACT_SHOT : TC_ACT_UNSPEC; +} + +SEC("tc") +int get_xfrm_info(struct __sk_buff *skb) +{ + struct bpf_xfrm_info___local info = {}; + + if (bpf_skb_get_xfrm_info(skb, &info) < 0) + return TC_ACT_SHOT; + + resp_if_id = info.if_id; + + return TC_ACT_UNSPEC; +} + +char _license[] SEC("license") = "GPL"; |