diff options
Diffstat (limited to 'tools/testing/selftests/bpf/progs')
72 files changed, 4478 insertions, 158 deletions
diff --git a/tools/testing/selftests/bpf/progs/bench_sockmap_prog.c b/tools/testing/selftests/bpf/progs/bench_sockmap_prog.c new file mode 100644 index 000000000000..079bf3794b3a --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bench_sockmap_prog.c @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> + +long process_byte = 0; +int verdict_dir = 0; +int dropped = 0; +int pkt_size = 0; +struct { + __uint(type, BPF_MAP_TYPE_SOCKMAP); + __uint(max_entries, 20); + __type(key, int); + __type(value, int); +} sock_map_rx SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_SOCKMAP); + __uint(max_entries, 20); + __type(key, int); + __type(value, int); +} sock_map_tx SEC(".maps"); + +SEC("sk_skb/stream_parser") +int prog_skb_parser(struct __sk_buff *skb) +{ + return pkt_size; +} + +SEC("sk_skb/stream_verdict") +int prog_skb_verdict(struct __sk_buff *skb) +{ + int one = 1; + int ret = bpf_sk_redirect_map(skb, &sock_map_rx, one, verdict_dir); + + if (ret == SK_DROP) + dropped++; + __sync_fetch_and_add(&process_byte, skb->len); + return ret; +} + +SEC("sk_skb/stream_verdict") +int prog_skb_pass(struct __sk_buff *skb) +{ + __sync_fetch_and_add(&process_byte, skb->len); + return SK_PASS; +} + +SEC("sk_msg") +int prog_skmsg_verdict(struct sk_msg_md *msg) +{ + int one = 1; + + __sync_fetch_and_add(&process_byte, msg->size); + return bpf_msg_redirect_map(msg, &sock_map_tx, one, verdict_dir); +} + +SEC("sk_msg") +int prog_skmsg_pass(struct sk_msg_md *msg) +{ + __sync_fetch_and_add(&process_byte, msg->size); + return SK_PASS; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h b/tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h new file mode 100644 index 000000000000..d67466c1ff77 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h @@ -0,0 +1,542 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ +#ifndef BPF_ARENA_SPIN_LOCK_H +#define BPF_ARENA_SPIN_LOCK_H + +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include "bpf_atomic.h" + +#define arch_mcs_spin_lock_contended_label(l, label) smp_cond_load_acquire_label(l, VAL, label) +#define arch_mcs_spin_unlock_contended(l) smp_store_release((l), 1) + +#if defined(ENABLE_ATOMICS_TESTS) && defined(__BPF_FEATURE_ADDR_SPACE_CAST) + +#define EBUSY 16 +#define EOPNOTSUPP 95 +#define ETIMEDOUT 110 + +#ifndef __arena +#define __arena __attribute__((address_space(1))) +#endif + +extern unsigned long CONFIG_NR_CPUS __kconfig; + +/* + * Typically, we'd just rely on the definition in vmlinux.h for qspinlock, but + * PowerPC overrides the definition to define lock->val as u32 instead of + * atomic_t, leading to compilation errors. Import a local definition below so + * that we don't depend on the vmlinux.h version. + */ + +struct __qspinlock { + union { + atomic_t val; +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + struct { + u8 locked; + u8 pending; + }; + struct { + u16 locked_pending; + u16 tail; + }; +#else + struct { + u16 tail; + u16 locked_pending; + }; + struct { + u8 reserved[2]; + u8 pending; + u8 locked; + }; +#endif + }; +}; + +#define arena_spinlock_t struct __qspinlock +/* FIXME: Using typedef causes CO-RE relocation error */ +/* typedef struct qspinlock arena_spinlock_t; */ + +struct arena_mcs_spinlock { + struct arena_mcs_spinlock __arena *next; + int locked; + int count; +}; + +struct arena_qnode { + struct arena_mcs_spinlock mcs; +}; + +#define _Q_MAX_NODES 4 +#define _Q_PENDING_LOOPS 1 + +/* + * Bitfields in the atomic value: + * + * 0- 7: locked byte + * 8: pending + * 9-15: not used + * 16-17: tail index + * 18-31: tail cpu (+1) + */ +#define _Q_MAX_CPUS 1024 + +#define _Q_SET_MASK(type) (((1U << _Q_ ## type ## _BITS) - 1)\ + << _Q_ ## type ## _OFFSET) +#define _Q_LOCKED_OFFSET 0 +#define _Q_LOCKED_BITS 8 +#define _Q_LOCKED_MASK _Q_SET_MASK(LOCKED) + +#define _Q_PENDING_OFFSET (_Q_LOCKED_OFFSET + _Q_LOCKED_BITS) +#define _Q_PENDING_BITS 8 +#define _Q_PENDING_MASK _Q_SET_MASK(PENDING) + +#define _Q_TAIL_IDX_OFFSET (_Q_PENDING_OFFSET + _Q_PENDING_BITS) +#define _Q_TAIL_IDX_BITS 2 +#define _Q_TAIL_IDX_MASK _Q_SET_MASK(TAIL_IDX) + +#define _Q_TAIL_CPU_OFFSET (_Q_TAIL_IDX_OFFSET + _Q_TAIL_IDX_BITS) +#define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET) +#define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU) + +#define _Q_TAIL_OFFSET _Q_TAIL_IDX_OFFSET +#define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK) + +#define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET) +#define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET) + +struct arena_qnode __arena qnodes[_Q_MAX_CPUS][_Q_MAX_NODES]; + +static inline u32 encode_tail(int cpu, int idx) +{ + u32 tail; + + tail = (cpu + 1) << _Q_TAIL_CPU_OFFSET; + tail |= idx << _Q_TAIL_IDX_OFFSET; /* assume < 4 */ + + return tail; +} + +static inline struct arena_mcs_spinlock __arena *decode_tail(u32 tail) +{ + u32 cpu = (tail >> _Q_TAIL_CPU_OFFSET) - 1; + u32 idx = (tail & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET; + + return &qnodes[cpu][idx].mcs; +} + +static inline +struct arena_mcs_spinlock __arena *grab_mcs_node(struct arena_mcs_spinlock __arena *base, int idx) +{ + return &((struct arena_qnode __arena *)base + idx)->mcs; +} + +#define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK) + +/** + * xchg_tail - Put in the new queue tail code word & retrieve previous one + * @lock : Pointer to queued spinlock structure + * @tail : The new queue tail code word + * Return: The previous queue tail code word + * + * xchg(lock, tail) + * + * p,*,* -> n,*,* ; prev = xchg(lock, node) + */ +static __always_inline u32 xchg_tail(arena_spinlock_t __arena *lock, u32 tail) +{ + u32 old, new; + + old = atomic_read(&lock->val); + do { + new = (old & _Q_LOCKED_PENDING_MASK) | tail; + /* + * We can use relaxed semantics since the caller ensures that + * the MCS node is properly initialized before updating the + * tail. + */ + /* These loops are not expected to stall, but we still need to + * prove to the verifier they will terminate eventually. + */ + cond_break_label(out); + } while (!atomic_try_cmpxchg_relaxed(&lock->val, &old, new)); + + return old; +out: + bpf_printk("RUNTIME ERROR: %s unexpected cond_break exit!!!", __func__); + return old; +} + +/** + * clear_pending - clear the pending bit. + * @lock: Pointer to queued spinlock structure + * + * *,1,* -> *,0,* + */ +static __always_inline void clear_pending(arena_spinlock_t __arena *lock) +{ + WRITE_ONCE(lock->pending, 0); +} + +/** + * clear_pending_set_locked - take ownership and clear the pending bit. + * @lock: Pointer to queued spinlock structure + * + * *,1,0 -> *,0,1 + * + * Lock stealing is not allowed if this function is used. + */ +static __always_inline void clear_pending_set_locked(arena_spinlock_t __arena *lock) +{ + WRITE_ONCE(lock->locked_pending, _Q_LOCKED_VAL); +} + +/** + * set_locked - Set the lock bit and own the lock + * @lock: Pointer to queued spinlock structure + * + * *,*,0 -> *,0,1 + */ +static __always_inline void set_locked(arena_spinlock_t __arena *lock) +{ + WRITE_ONCE(lock->locked, _Q_LOCKED_VAL); +} + +static __always_inline +u32 arena_fetch_set_pending_acquire(arena_spinlock_t __arena *lock) +{ + u32 old, new; + + old = atomic_read(&lock->val); + do { + new = old | _Q_PENDING_VAL; + /* + * These loops are not expected to stall, but we still need to + * prove to the verifier they will terminate eventually. + */ + cond_break_label(out); + } while (!atomic_try_cmpxchg_acquire(&lock->val, &old, new)); + + return old; +out: + bpf_printk("RUNTIME ERROR: %s unexpected cond_break exit!!!", __func__); + return old; +} + +/** + * arena_spin_trylock - try to acquire the queued spinlock + * @lock : Pointer to queued spinlock structure + * Return: 1 if lock acquired, 0 if failed + */ +static __always_inline int arena_spin_trylock(arena_spinlock_t __arena *lock) +{ + int val = atomic_read(&lock->val); + + if (unlikely(val)) + return 0; + + return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)); +} + +__noinline +int arena_spin_lock_slowpath(arena_spinlock_t __arena __arg_arena *lock, u32 val) +{ + struct arena_mcs_spinlock __arena *prev, *next, *node0, *node; + int ret = -ETIMEDOUT; + u32 old, tail; + int idx; + + /* + * Wait for in-progress pending->locked hand-overs with a bounded + * number of spins so that we guarantee forward progress. + * + * 0,1,0 -> 0,0,1 + */ + if (val == _Q_PENDING_VAL) { + int cnt = _Q_PENDING_LOOPS; + val = atomic_cond_read_relaxed_label(&lock->val, + (VAL != _Q_PENDING_VAL) || !cnt--, + release_err); + } + + /* + * If we observe any contention; queue. + */ + if (val & ~_Q_LOCKED_MASK) + goto queue; + + /* + * trylock || pending + * + * 0,0,* -> 0,1,* -> 0,0,1 pending, trylock + */ + val = arena_fetch_set_pending_acquire(lock); + + /* + * If we observe contention, there is a concurrent locker. + * + * Undo and queue; our setting of PENDING might have made the + * n,0,0 -> 0,0,0 transition fail and it will now be waiting + * on @next to become !NULL. + */ + if (unlikely(val & ~_Q_LOCKED_MASK)) { + + /* Undo PENDING if we set it. */ + if (!(val & _Q_PENDING_MASK)) + clear_pending(lock); + + goto queue; + } + + /* + * We're pending, wait for the owner to go away. + * + * 0,1,1 -> *,1,0 + * + * this wait loop must be a load-acquire such that we match the + * store-release that clears the locked bit and create lock + * sequentiality; this is because not all + * clear_pending_set_locked() implementations imply full + * barriers. + */ + if (val & _Q_LOCKED_MASK) + smp_cond_load_acquire_label(&lock->locked, !VAL, release_err); + + /* + * take ownership and clear the pending bit. + * + * 0,1,0 -> 0,0,1 + */ + clear_pending_set_locked(lock); + return 0; + + /* + * End of pending bit optimistic spinning and beginning of MCS + * queuing. + */ +queue: + node0 = &(qnodes[bpf_get_smp_processor_id()])[0].mcs; + idx = node0->count++; + tail = encode_tail(bpf_get_smp_processor_id(), idx); + + /* + * 4 nodes are allocated based on the assumption that there will not be + * nested NMIs taking spinlocks. That may not be true in some + * architectures even though the chance of needing more than 4 nodes + * will still be extremely unlikely. When that happens, we simply return + * an error. Original qspinlock has a trylock fallback in this case. + */ + if (unlikely(idx >= _Q_MAX_NODES)) { + ret = -EBUSY; + goto release_node_err; + } + + node = grab_mcs_node(node0, idx); + + /* + * Ensure that we increment the head node->count before initialising + * the actual node. If the compiler is kind enough to reorder these + * stores, then an IRQ could overwrite our assignments. + */ + barrier(); + + node->locked = 0; + node->next = NULL; + + /* + * We touched a (possibly) cold cacheline in the per-cpu queue node; + * attempt the trylock once more in the hope someone let go while we + * weren't watching. + */ + if (arena_spin_trylock(lock)) + goto release; + + /* + * Ensure that the initialisation of @node is complete before we + * publish the updated tail via xchg_tail() and potentially link + * @node into the waitqueue via WRITE_ONCE(prev->next, node) below. + */ + smp_wmb(); + + /* + * Publish the updated tail. + * We have already touched the queueing cacheline; don't bother with + * pending stuff. + * + * p,*,* -> n,*,* + */ + old = xchg_tail(lock, tail); + next = NULL; + + /* + * if there was a previous node; link it and wait until reaching the + * head of the waitqueue. + */ + if (old & _Q_TAIL_MASK) { + prev = decode_tail(old); + + /* Link @node into the waitqueue. */ + WRITE_ONCE(prev->next, node); + + arch_mcs_spin_lock_contended_label(&node->locked, release_node_err); + + /* + * While waiting for the MCS lock, the next pointer may have + * been set by another lock waiter. We cannot prefetch here + * due to lack of equivalent instruction in BPF ISA. + */ + next = READ_ONCE(node->next); + } + + /* + * we're at the head of the waitqueue, wait for the owner & pending to + * go away. + * + * *,x,y -> *,0,0 + * + * this wait loop must use a load-acquire such that we match the + * store-release that clears the locked bit and create lock + * sequentiality; this is because the set_locked() function below + * does not imply a full barrier. + */ + val = atomic_cond_read_acquire_label(&lock->val, !(VAL & _Q_LOCKED_PENDING_MASK), + release_node_err); + + /* + * claim the lock: + * + * n,0,0 -> 0,0,1 : lock, uncontended + * *,*,0 -> *,*,1 : lock, contended + * + * If the queue head is the only one in the queue (lock value == tail) + * and nobody is pending, clear the tail code and grab the lock. + * Otherwise, we only need to grab the lock. + */ + + /* + * In the PV case we might already have _Q_LOCKED_VAL set, because + * of lock stealing; therefore we must also allow: + * + * n,0,1 -> 0,0,1 + * + * Note: at this point: (val & _Q_PENDING_MASK) == 0, because of the + * above wait condition, therefore any concurrent setting of + * PENDING will make the uncontended transition fail. + */ + if ((val & _Q_TAIL_MASK) == tail) { + if (atomic_try_cmpxchg_relaxed(&lock->val, &val, _Q_LOCKED_VAL)) + goto release; /* No contention */ + } + + /* + * Either somebody is queued behind us or _Q_PENDING_VAL got set + * which will then detect the remaining tail and queue behind us + * ensuring we'll see a @next. + */ + set_locked(lock); + + /* + * contended path; wait for next if not observed yet, release. + */ + if (!next) + next = smp_cond_load_relaxed_label(&node->next, (VAL), release_node_err); + + arch_mcs_spin_unlock_contended(&next->locked); + +release:; + /* + * release the node + * + * Doing a normal dec vs this_cpu_dec is fine. An upper context always + * decrements count it incremented before returning, thus we're fine. + * For contexts interrupting us, they either observe our dec or not. + * Just ensure the compiler doesn't reorder this statement, as a + * this_cpu_dec implicitly implied that. + */ + barrier(); + node0->count--; + return 0; +release_node_err: + barrier(); + node0->count--; + goto release_err; +release_err: + return ret; +} + +/** + * arena_spin_lock - acquire a queued spinlock + * @lock: Pointer to queued spinlock structure + * + * On error, returned value will be negative. + * On success, zero is returned. + * + * The return value _must_ be tested against zero for success, + * instead of checking it against negative, for passing the + * BPF verifier. + * + * The user should do: + * if (arena_spin_lock(...) != 0) // failure + * or + * if (arena_spin_lock(...) == 0) // success + * or + * if (arena_spin_lock(...)) // failure + * or + * if (!arena_spin_lock(...)) // success + * instead of: + * if (arena_spin_lock(...) < 0) // failure + * + * The return value can still be inspected later. + */ +static __always_inline int arena_spin_lock(arena_spinlock_t __arena *lock) +{ + int val = 0; + + if (CONFIG_NR_CPUS > 1024) + return -EOPNOTSUPP; + + bpf_preempt_disable(); + if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL))) + return 0; + + val = arena_spin_lock_slowpath(lock, val); + /* FIXME: bpf_assert_range(-MAX_ERRNO, 0) once we have it working for all cases. */ + if (val) + bpf_preempt_enable(); + return val; +} + +/** + * arena_spin_unlock - release a queued spinlock + * @lock : Pointer to queued spinlock structure + */ +static __always_inline void arena_spin_unlock(arena_spinlock_t __arena *lock) +{ + /* + * unlock() needs release semantics: + */ + smp_store_release(&lock->locked, 0); + bpf_preempt_enable(); +} + +#define arena_spin_lock_irqsave(lock, flags) \ + ({ \ + int __ret; \ + bpf_local_irq_save(&(flags)); \ + __ret = arena_spin_lock((lock)); \ + if (__ret) \ + bpf_local_irq_restore(&(flags)); \ + (__ret); \ + }) + +#define arena_spin_unlock_irqrestore(lock, flags) \ + ({ \ + arena_spin_unlock((lock)); \ + bpf_local_irq_restore(&(flags)); \ + }) + +#endif + +#endif /* BPF_ARENA_SPIN_LOCK_H */ diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_map_elem.c b/tools/testing/selftests/bpf/progs/bpf_iter_map_elem.c new file mode 100644 index 000000000000..2f20485e0de3 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_iter_map_elem.c @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include "vmlinux.h" +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> + +char _license[] SEC("license") = "GPL"; + +__u32 value_sum = 0; + +SEC("iter/bpf_map_elem") +int dump_bpf_map_values(struct bpf_iter__bpf_map_elem *ctx) +{ + __u32 value = 0; + + if (ctx->value == (void *)0) + return 0; + + bpf_probe_read_kernel(&value, sizeof(value), ctx->value); + value_sum += value; + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h index 863df7c0fdd0..c1cfd297aabf 100644 --- a/tools/testing/selftests/bpf/progs/bpf_misc.h +++ b/tools/testing/selftests/bpf/progs/bpf_misc.h @@ -83,9 +83,11 @@ * expect return value to match passed parameter: * - a decimal number * - a hexadecimal number, when starts from 0x - * - literal INT_MIN - * - literal POINTER_VALUE (see definition below) - * - literal TEST_DATA_LEN (see definition below) + * - a macro which expands to one of the above + * - literal _INT_MIN (expands to INT_MIN) + * In addition, two special macros are defined below: + * - POINTER_VALUE + * - TEST_DATA_LEN * __retval_unpriv Same, but load program in unprivileged mode. * * __description Text to be used instead of a program name for display @@ -125,8 +127,8 @@ #define __success_unpriv __attribute__((btf_decl_tag("comment:test_expect_success_unpriv"))) #define __log_level(lvl) __attribute__((btf_decl_tag("comment:test_log_level="#lvl))) #define __flag(flag) __attribute__((btf_decl_tag("comment:test_prog_flags="#flag))) -#define __retval(val) __attribute__((btf_decl_tag("comment:test_retval="#val))) -#define __retval_unpriv(val) __attribute__((btf_decl_tag("comment:test_retval_unpriv="#val))) +#define __retval(val) __attribute__((btf_decl_tag("comment:test_retval="XSTR(val)))) +#define __retval_unpriv(val) __attribute__((btf_decl_tag("comment:test_retval_unpriv="XSTR(val)))) #define __auxiliary __attribute__((btf_decl_tag("comment:test_auxiliary"))) #define __auxiliary_unpriv __attribute__((btf_decl_tag("comment:test_auxiliary_unpriv"))) #define __btf_path(path) __attribute__((btf_decl_tag("comment:test_btf_path=" path))) @@ -155,7 +157,7 @@ #define __imm_insn(name, expr) [name]"i"(*(long *)&(expr)) /* Magic constants used with __retval() */ -#define POINTER_VALUE 0xcafe4all +#define POINTER_VALUE 0xbadcafe #define TEST_DATA_LEN 64 #ifndef __used @@ -225,9 +227,19 @@ #define CAN_USE_BPF_ST #endif -#if __clang_major__ >= 18 && defined(ENABLE_ATOMICS_TESTS) && \ - (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86)) +#if __clang_major__ >= 18 && defined(ENABLE_ATOMICS_TESTS) && \ + (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \ + (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) || \ + (defined(__TARGET_ARCH_powerpc)) #define CAN_USE_LOAD_ACQ_STORE_REL #endif +#if defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) +#define SPEC_V1 +#endif + +#if defined(__TARGET_ARCH_x86) +#define SPEC_V4 +#endif + #endif diff --git a/tools/testing/selftests/bpf/progs/btf_type_tag_percpu.c b/tools/testing/selftests/bpf/progs/btf_type_tag_percpu.c index 69f81cb555ca..d93f68024cc6 100644 --- a/tools/testing/selftests/bpf/progs/btf_type_tag_percpu.c +++ b/tools/testing/selftests/bpf/progs/btf_type_tag_percpu.c @@ -57,15 +57,15 @@ int BPF_PROG(test_percpu_load, struct cgroup *cgrp, const char *path) SEC("tp_btf/cgroup_mkdir") int BPF_PROG(test_percpu_helper, struct cgroup *cgrp, const char *path) { - struct cgroup_rstat_cpu *rstat; + struct css_rstat_cpu *rstat; __u32 cpu; cpu = bpf_get_smp_processor_id(); - rstat = (struct cgroup_rstat_cpu *)bpf_per_cpu_ptr( + rstat = (struct css_rstat_cpu *)bpf_per_cpu_ptr( cgrp->self.rstat_cpu, cpu); if (rstat) { /* READ_ONCE */ - *(volatile int *)rstat; + *(volatile long *)rstat; } return 0; diff --git a/tools/testing/selftests/bpf/progs/cgroup_mprog.c b/tools/testing/selftests/bpf/progs/cgroup_mprog.c new file mode 100644 index 000000000000..6a0ea02c4de2 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/cgroup_mprog.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> + +char _license[] SEC("license") = "GPL"; + +SEC("cgroup/getsockopt") +int getsockopt_1(struct bpf_sockopt *ctx) +{ + return 1; +} + +SEC("cgroup/getsockopt") +int getsockopt_2(struct bpf_sockopt *ctx) +{ + return 1; +} + +SEC("cgroup/getsockopt") +int getsockopt_3(struct bpf_sockopt *ctx) +{ + return 1; +} + +SEC("cgroup/getsockopt") +int getsockopt_4(struct bpf_sockopt *ctx) +{ + return 1; +} diff --git a/tools/testing/selftests/bpf/progs/cgroup_read_xattr.c b/tools/testing/selftests/bpf/progs/cgroup_read_xattr.c new file mode 100644 index 000000000000..092db1d0435e --- /dev/null +++ b/tools/testing/selftests/bpf/progs/cgroup_read_xattr.c @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ + +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> +#include "bpf_experimental.h" +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + +char value[16]; + +static __always_inline void read_xattr(struct cgroup *cgroup) +{ + struct bpf_dynptr value_ptr; + + bpf_dynptr_from_mem(value, sizeof(value), 0, &value_ptr); + bpf_cgroup_read_xattr(cgroup, "user.bpf_test", + &value_ptr); +} + +SEC("lsm.s/socket_connect") +__success +int BPF_PROG(trusted_cgroup_ptr_sleepable) +{ + u64 cgrp_id = bpf_get_current_cgroup_id(); + struct cgroup *cgrp; + + cgrp = bpf_cgroup_from_id(cgrp_id); + if (!cgrp) + return 0; + + read_xattr(cgrp); + bpf_cgroup_release(cgrp); + return 0; +} + +SEC("lsm/socket_connect") +__success +int BPF_PROG(trusted_cgroup_ptr_non_sleepable) +{ + u64 cgrp_id = bpf_get_current_cgroup_id(); + struct cgroup *cgrp; + + cgrp = bpf_cgroup_from_id(cgrp_id); + if (!cgrp) + return 0; + + read_xattr(cgrp); + bpf_cgroup_release(cgrp); + return 0; +} + +SEC("lsm/socket_connect") +__success +int BPF_PROG(use_css_iter_non_sleepable) +{ + u64 cgrp_id = bpf_get_current_cgroup_id(); + struct cgroup_subsys_state *css; + struct cgroup *cgrp; + + cgrp = bpf_cgroup_from_id(cgrp_id); + if (!cgrp) + return 0; + + bpf_for_each(css, css, &cgrp->self, BPF_CGROUP_ITER_ANCESTORS_UP) + read_xattr(css->cgroup); + + bpf_cgroup_release(cgrp); + return 0; +} + +SEC("lsm.s/socket_connect") +__failure __msg("expected an RCU CS") +int BPF_PROG(use_css_iter_sleepable_missing_rcu_lock) +{ + u64 cgrp_id = bpf_get_current_cgroup_id(); + struct cgroup_subsys_state *css; + struct cgroup *cgrp; + + cgrp = bpf_cgroup_from_id(cgrp_id); + if (!cgrp) + return 0; + + bpf_for_each(css, css, &cgrp->self, BPF_CGROUP_ITER_ANCESTORS_UP) + read_xattr(css->cgroup); + + bpf_cgroup_release(cgrp); + return 0; +} + +SEC("lsm.s/socket_connect") +__success +int BPF_PROG(use_css_iter_sleepable_with_rcu_lock) +{ + u64 cgrp_id = bpf_get_current_cgroup_id(); + struct cgroup_subsys_state *css; + struct cgroup *cgrp; + + bpf_rcu_read_lock(); + cgrp = bpf_cgroup_from_id(cgrp_id); + if (!cgrp) + goto out; + + bpf_for_each(css, css, &cgrp->self, BPF_CGROUP_ITER_ANCESTORS_UP) + read_xattr(css->cgroup); + + bpf_cgroup_release(cgrp); +out: + bpf_rcu_read_unlock(); + return 0; +} + +SEC("lsm/socket_connect") +__success +int BPF_PROG(use_bpf_cgroup_ancestor) +{ + u64 cgrp_id = bpf_get_current_cgroup_id(); + struct cgroup *cgrp, *ancestor; + + cgrp = bpf_cgroup_from_id(cgrp_id); + if (!cgrp) + return 0; + + ancestor = bpf_cgroup_ancestor(cgrp, 1); + if (!ancestor) + goto out; + + read_xattr(cgrp); + bpf_cgroup_release(ancestor); +out: + bpf_cgroup_release(cgrp); + return 0; +} + +SEC("cgroup/sendmsg4") +__success +int BPF_PROG(cgroup_skb) +{ + u64 cgrp_id = bpf_get_current_cgroup_id(); + struct cgroup *cgrp, *ancestor; + + cgrp = bpf_cgroup_from_id(cgrp_id); + if (!cgrp) + return 0; + + ancestor = bpf_cgroup_ancestor(cgrp, 1); + if (!ancestor) + goto out; + + read_xattr(cgrp); + bpf_cgroup_release(ancestor); +out: + bpf_cgroup_release(cgrp); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/compute_live_registers.c b/tools/testing/selftests/bpf/progs/compute_live_registers.c index f3d79aecbf93..6884ab99a421 100644 --- a/tools/testing/selftests/bpf/progs/compute_live_registers.c +++ b/tools/testing/selftests/bpf/progs/compute_live_registers.c @@ -240,6 +240,22 @@ __naked void if2(void) ::: __clobber_all); } +/* Verifier misses that r2 is alive if jset is not handled properly */ +SEC("socket") +__log_level(2) +__msg("2: 012....... (45) if r1 & 0x7 goto pc+1") +__naked void if3_jset_bug(void) +{ + asm volatile ( + "r0 = 1;" + "r2 = 2;" + "if r1 & 0x7 goto +1;" + "exit;" + "r0 = r2;" + "exit;" + ::: __clobber_all); +} + SEC("socket") __log_level(2) __msg("0: .......... (b7) r1 = 0") diff --git a/tools/testing/selftests/bpf/progs/dmabuf_iter.c b/tools/testing/selftests/bpf/progs/dmabuf_iter.c new file mode 100644 index 000000000000..13cdb11fdeb2 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/dmabuf_iter.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Google LLC */ +#include <vmlinux.h> +#include <bpf/bpf_core_read.h> +#include <bpf/bpf_helpers.h> + +/* From uapi/linux/dma-buf.h */ +#define DMA_BUF_NAME_LEN 32 + +char _license[] SEC("license") = "GPL"; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(key_size, DMA_BUF_NAME_LEN); + __type(value, bool); + __uint(max_entries, 5); +} testbuf_hash SEC(".maps"); + +/* + * Fields output by this iterator are delimited by newlines. Convert any + * newlines in user-provided printed strings to spaces. + */ +static void sanitize_string(char *src, size_t size) +{ + for (char *c = src; (size_t)(c - src) < size && *c; ++c) + if (*c == '\n') + *c = ' '; +} + +SEC("iter/dmabuf") +int dmabuf_collector(struct bpf_iter__dmabuf *ctx) +{ + const struct dma_buf *dmabuf = ctx->dmabuf; + struct seq_file *seq = ctx->meta->seq; + unsigned long inode = 0; + size_t size; + const char *pname, *exporter; + char name[DMA_BUF_NAME_LEN] = {'\0'}; + + if (!dmabuf) + return 0; + + if (BPF_CORE_READ_INTO(&inode, dmabuf, file, f_inode, i_ino) || + bpf_core_read(&size, sizeof(size), &dmabuf->size) || + bpf_core_read(&pname, sizeof(pname), &dmabuf->name) || + bpf_core_read(&exporter, sizeof(exporter), &dmabuf->exp_name)) + return 1; + + /* Buffers are not required to be named */ + if (pname) { + if (bpf_probe_read_kernel(name, sizeof(name), pname)) + return 1; + + /* Name strings can be provided by userspace */ + sanitize_string(name, sizeof(name)); + } + + BPF_SEQ_PRINTF(seq, "%lu\n%llu\n%s\n%s\n", inode, size, name, exporter); + return 0; +} + +SEC("syscall") +int iter_dmabuf_for_each(const void *ctx) +{ + struct dma_buf *d; + + bpf_for_each(dmabuf, d) { + char name[DMA_BUF_NAME_LEN]; + const char *pname; + bool *found; + long len; + int i; + + if (bpf_core_read(&pname, sizeof(pname), &d->name)) + return 1; + + /* Buffers are not required to be named */ + if (!pname) + continue; + + len = bpf_probe_read_kernel_str(name, sizeof(name), pname); + if (len < 0) + return 1; + + /* + * The entire name buffer is used as a map key. + * Zeroize any uninitialized trailing bytes after the NUL. + */ + bpf_for(i, len, DMA_BUF_NAME_LEN) + name[i] = 0; + + found = bpf_map_lookup_elem(&testbuf_hash, name); + if (found) { + bool t = true; + + bpf_map_update_elem(&testbuf_hash, name, &t, BPF_EXIST); + } + } + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/dynptr_success.c b/tools/testing/selftests/bpf/progs/dynptr_success.c index e1fba28e4a86..8315273cb900 100644 --- a/tools/testing/selftests/bpf/progs/dynptr_success.c +++ b/tools/testing/selftests/bpf/progs/dynptr_success.c @@ -9,6 +9,8 @@ #include "bpf_misc.h" #include "errno.h" +#define PAGE_SIZE_64K 65536 + char _license[] SEC("license") = "GPL"; int pid, err, val; @@ -611,11 +613,12 @@ int test_dynptr_copy_xdp(struct xdp_md *xdp) struct bpf_dynptr ptr_buf, ptr_xdp; char data[] = "qwertyuiopasdfghjkl"; char buf[32] = {'\0'}; - __u32 len = sizeof(data); + __u32 len = sizeof(data), xdp_data_size; int i, chunks = 200; /* ptr_xdp is backed by non-contiguous memory */ bpf_dynptr_from_xdp(xdp, 0, &ptr_xdp); + xdp_data_size = bpf_dynptr_size(&ptr_xdp); bpf_ringbuf_reserve_dynptr(&ringbuf, len * chunks, 0, &ptr_buf); /* Destination dynptr is backed by non-contiguous memory */ @@ -673,10 +676,407 @@ int test_dynptr_copy_xdp(struct xdp_md *xdp) goto out; } - if (bpf_dynptr_copy(&ptr_xdp, 2000, &ptr_xdp, 0, len * chunks) != -E2BIG) + if (bpf_dynptr_copy(&ptr_xdp, xdp_data_size - 3000, &ptr_xdp, 0, len * chunks) != -E2BIG) err = 1; out: bpf_ringbuf_discard_dynptr(&ptr_buf, 0); return XDP_DROP; } + +char memset_zero_data[] = "data to be zeroed"; + +SEC("?tp/syscalls/sys_enter_nanosleep") +int test_dynptr_memset_zero(void *ctx) +{ + __u32 data_sz = sizeof(memset_zero_data); + char zeroes[32] = {'\0'}; + struct bpf_dynptr ptr; + + err = bpf_dynptr_from_mem(memset_zero_data, data_sz, 0, &ptr); + err = err ?: bpf_dynptr_memset(&ptr, 0, data_sz, 0); + err = err ?: bpf_memcmp(zeroes, memset_zero_data, data_sz); + + return 0; +} + +#define DYNPTR_MEMSET_VAL 42 + +char memset_notzero_data[] = "data to be overwritten"; + +SEC("?tp/syscalls/sys_enter_nanosleep") +int test_dynptr_memset_notzero(void *ctx) +{ + u32 data_sz = sizeof(memset_notzero_data); + struct bpf_dynptr ptr; + char expected[32]; + + __builtin_memset(expected, DYNPTR_MEMSET_VAL, data_sz); + + err = bpf_dynptr_from_mem(memset_notzero_data, data_sz, 0, &ptr); + err = err ?: bpf_dynptr_memset(&ptr, 0, data_sz, DYNPTR_MEMSET_VAL); + err = err ?: bpf_memcmp(expected, memset_notzero_data, data_sz); + + return 0; +} + +char memset_zero_offset_data[] = "data to be zeroed partially"; + +SEC("?tp/syscalls/sys_enter_nanosleep") +int test_dynptr_memset_zero_offset(void *ctx) +{ + char expected[] = "data to \0\0\0\0eroed partially"; + __u32 data_sz = sizeof(memset_zero_offset_data); + struct bpf_dynptr ptr; + + err = bpf_dynptr_from_mem(memset_zero_offset_data, data_sz, 0, &ptr); + err = err ?: bpf_dynptr_memset(&ptr, 8, 4, 0); + err = err ?: bpf_memcmp(expected, memset_zero_offset_data, data_sz); + + return 0; +} + +char memset_zero_adjusted_data[] = "data to be zeroed partially"; + +SEC("?tp/syscalls/sys_enter_nanosleep") +int test_dynptr_memset_zero_adjusted(void *ctx) +{ + char expected[] = "data\0\0\0\0be zeroed partially"; + __u32 data_sz = sizeof(memset_zero_adjusted_data); + struct bpf_dynptr ptr; + + err = bpf_dynptr_from_mem(memset_zero_adjusted_data, data_sz, 0, &ptr); + err = err ?: bpf_dynptr_adjust(&ptr, 4, 8); + err = err ?: bpf_dynptr_memset(&ptr, 0, bpf_dynptr_size(&ptr), 0); + err = err ?: bpf_memcmp(expected, memset_zero_adjusted_data, data_sz); + + return 0; +} + +char memset_overflow_data[] = "memset overflow data"; + +SEC("?tp/syscalls/sys_enter_nanosleep") +int test_dynptr_memset_overflow(void *ctx) +{ + __u32 data_sz = sizeof(memset_overflow_data); + struct bpf_dynptr ptr; + int ret; + + err = bpf_dynptr_from_mem(memset_overflow_data, data_sz, 0, &ptr); + ret = bpf_dynptr_memset(&ptr, 0, data_sz + 1, 0); + if (ret != -E2BIG) + err = 1; + + return 0; +} + +SEC("?tp/syscalls/sys_enter_nanosleep") +int test_dynptr_memset_overflow_offset(void *ctx) +{ + __u32 data_sz = sizeof(memset_overflow_data); + struct bpf_dynptr ptr; + int ret; + + err = bpf_dynptr_from_mem(memset_overflow_data, data_sz, 0, &ptr); + ret = bpf_dynptr_memset(&ptr, 1, data_sz, 0); + if (ret != -E2BIG) + err = 1; + + return 0; +} + +SEC("?cgroup_skb/egress") +int test_dynptr_memset_readonly(struct __sk_buff *skb) +{ + struct bpf_dynptr ptr; + int ret; + + err = bpf_dynptr_from_skb(skb, 0, &ptr); + + /* cgroup skbs are read only, memset should fail */ + ret = bpf_dynptr_memset(&ptr, 0, bpf_dynptr_size(&ptr), 0); + if (ret != -EINVAL) + err = 1; + + return 0; +} + +#define min_t(type, x, y) ({ \ + type __x = (x); \ + type __y = (y); \ + __x < __y ? __x : __y; }) + +SEC("xdp") +int test_dynptr_memset_xdp_chunks(struct xdp_md *xdp) +{ + u32 data_sz, chunk_sz, offset = 0; + const int max_chunks = 200; + struct bpf_dynptr ptr_xdp; + char expected_buf[32]; + char buf[32]; + int i; + + __builtin_memset(expected_buf, DYNPTR_MEMSET_VAL, sizeof(expected_buf)); + + /* ptr_xdp is backed by non-contiguous memory */ + bpf_dynptr_from_xdp(xdp, 0, &ptr_xdp); + data_sz = bpf_dynptr_size(&ptr_xdp); + + err = bpf_dynptr_memset(&ptr_xdp, 0, data_sz, DYNPTR_MEMSET_VAL); + if (err) { + /* bpf_dynptr_memset() eventually called bpf_xdp_pointer() + * where if data_sz is greater than 0xffff, -EFAULT will be + * returned. For 64K page size, data_sz is greater than + * 64K, so error is expected and let us zero out error and + * return success. + */ + if (data_sz >= PAGE_SIZE_64K) + err = 0; + goto out; + } + + bpf_for(i, 0, max_chunks) { + offset = i * sizeof(buf); + if (offset >= data_sz) + goto out; + chunk_sz = min_t(u32, sizeof(buf), data_sz - offset); + err = bpf_dynptr_read(&buf, chunk_sz, &ptr_xdp, offset, 0); + if (err) + goto out; + err = bpf_memcmp(buf, expected_buf, sizeof(buf)); + if (err) + goto out; + } +out: + return XDP_DROP; +} + +void *user_ptr; +/* Contains the copy of the data pointed by user_ptr. + * Size 384 to make it not fit into a single kernel chunk when copying + * but less than the maximum bpf stack size (512). + */ +char expected_str[384]; +__u32 test_len[7] = {0/* placeholder */, 0, 1, 2, 255, 256, 257}; + +typedef int (*bpf_read_dynptr_fn_t)(struct bpf_dynptr *dptr, u32 off, + u32 size, const void *unsafe_ptr); + +/* Returns the offset just before the end of the maximum sized xdp fragment. + * Any write larger than 32 bytes will be split between 2 fragments. + */ +__u32 xdp_near_frag_end_offset(void) +{ + const __u32 headroom = 256; + const __u32 max_frag_size = __PAGE_SIZE - headroom - sizeof(struct skb_shared_info); + + /* 32 bytes before the approximate end of the fragment */ + return max_frag_size - 32; +} + +/* Use __always_inline on test_dynptr_probe[_str][_xdp]() and callbacks + * of type bpf_read_dynptr_fn_t to prevent compiler from generating + * indirect calls that make program fail to load with "unknown opcode" error. + */ +static __always_inline void test_dynptr_probe(void *ptr, bpf_read_dynptr_fn_t bpf_read_dynptr_fn) +{ + char buf[sizeof(expected_str)]; + struct bpf_dynptr ptr_buf; + int i; + + if (bpf_get_current_pid_tgid() >> 32 != pid) + return; + + err = bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(buf), 0, &ptr_buf); + + bpf_for(i, 0, ARRAY_SIZE(test_len)) { + __u32 len = test_len[i]; + + err = err ?: bpf_read_dynptr_fn(&ptr_buf, 0, test_len[i], ptr); + if (len > sizeof(buf)) + break; + err = err ?: bpf_dynptr_read(&buf, len, &ptr_buf, 0, 0); + + if (err || bpf_memcmp(expected_str, buf, len)) + err = 1; + + /* Reset buffer and dynptr */ + __builtin_memset(buf, 0, sizeof(buf)); + err = err ?: bpf_dynptr_write(&ptr_buf, 0, buf, len, 0); + } + bpf_ringbuf_discard_dynptr(&ptr_buf, 0); +} + +static __always_inline void test_dynptr_probe_str(void *ptr, + bpf_read_dynptr_fn_t bpf_read_dynptr_fn) +{ + char buf[sizeof(expected_str)]; + struct bpf_dynptr ptr_buf; + __u32 cnt, i; + + if (bpf_get_current_pid_tgid() >> 32 != pid) + return; + + bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(buf), 0, &ptr_buf); + + bpf_for(i, 0, ARRAY_SIZE(test_len)) { + __u32 len = test_len[i]; + + cnt = bpf_read_dynptr_fn(&ptr_buf, 0, len, ptr); + if (cnt != len) + err = 1; + + if (len > sizeof(buf)) + continue; + err = err ?: bpf_dynptr_read(&buf, len, &ptr_buf, 0, 0); + if (!len) + continue; + if (err || bpf_memcmp(expected_str, buf, len - 1) || buf[len - 1] != '\0') + err = 1; + } + bpf_ringbuf_discard_dynptr(&ptr_buf, 0); +} + +static __always_inline void test_dynptr_probe_xdp(struct xdp_md *xdp, void *ptr, + bpf_read_dynptr_fn_t bpf_read_dynptr_fn) +{ + struct bpf_dynptr ptr_xdp; + char buf[sizeof(expected_str)]; + __u32 off, i; + + if (bpf_get_current_pid_tgid() >> 32 != pid) + return; + + off = xdp_near_frag_end_offset(); + err = bpf_dynptr_from_xdp(xdp, 0, &ptr_xdp); + + bpf_for(i, 0, ARRAY_SIZE(test_len)) { + __u32 len = test_len[i]; + + err = err ?: bpf_read_dynptr_fn(&ptr_xdp, off, len, ptr); + if (len > sizeof(buf)) + continue; + err = err ?: bpf_dynptr_read(&buf, len, &ptr_xdp, off, 0); + if (err || bpf_memcmp(expected_str, buf, len)) + err = 1; + /* Reset buffer and dynptr */ + __builtin_memset(buf, 0, sizeof(buf)); + err = err ?: bpf_dynptr_write(&ptr_xdp, off, buf, len, 0); + } +} + +static __always_inline void test_dynptr_probe_str_xdp(struct xdp_md *xdp, void *ptr, + bpf_read_dynptr_fn_t bpf_read_dynptr_fn) +{ + struct bpf_dynptr ptr_xdp; + char buf[sizeof(expected_str)]; + __u32 cnt, off, i; + + if (bpf_get_current_pid_tgid() >> 32 != pid) + return; + + off = xdp_near_frag_end_offset(); + err = bpf_dynptr_from_xdp(xdp, 0, &ptr_xdp); + if (err) + return; + + bpf_for(i, 0, ARRAY_SIZE(test_len)) { + __u32 len = test_len[i]; + + cnt = bpf_read_dynptr_fn(&ptr_xdp, off, len, ptr); + if (cnt != len) + err = 1; + + if (len > sizeof(buf)) + continue; + err = err ?: bpf_dynptr_read(&buf, len, &ptr_xdp, off, 0); + + if (!len) + continue; + if (err || bpf_memcmp(expected_str, buf, len - 1) || buf[len - 1] != '\0') + err = 1; + + __builtin_memset(buf, 0, sizeof(buf)); + err = err ?: bpf_dynptr_write(&ptr_xdp, off, buf, len, 0); + } +} + +SEC("xdp") +int test_probe_read_user_dynptr(struct xdp_md *xdp) +{ + test_dynptr_probe(user_ptr, bpf_probe_read_user_dynptr); + if (!err) + test_dynptr_probe_xdp(xdp, user_ptr, bpf_probe_read_user_dynptr); + return XDP_PASS; +} + +SEC("xdp") +int test_probe_read_kernel_dynptr(struct xdp_md *xdp) +{ + test_dynptr_probe(expected_str, bpf_probe_read_kernel_dynptr); + if (!err) + test_dynptr_probe_xdp(xdp, expected_str, bpf_probe_read_kernel_dynptr); + return XDP_PASS; +} + +SEC("xdp") +int test_probe_read_user_str_dynptr(struct xdp_md *xdp) +{ + test_dynptr_probe_str(user_ptr, bpf_probe_read_user_str_dynptr); + if (!err) + test_dynptr_probe_str_xdp(xdp, user_ptr, bpf_probe_read_user_str_dynptr); + return XDP_PASS; +} + +SEC("xdp") +int test_probe_read_kernel_str_dynptr(struct xdp_md *xdp) +{ + test_dynptr_probe_str(expected_str, bpf_probe_read_kernel_str_dynptr); + if (!err) + test_dynptr_probe_str_xdp(xdp, expected_str, bpf_probe_read_kernel_str_dynptr); + return XDP_PASS; +} + +SEC("fentry.s/" SYS_PREFIX "sys_nanosleep") +int test_copy_from_user_dynptr(void *ctx) +{ + test_dynptr_probe(user_ptr, bpf_copy_from_user_dynptr); + return 0; +} + +SEC("fentry.s/" SYS_PREFIX "sys_nanosleep") +int test_copy_from_user_str_dynptr(void *ctx) +{ + test_dynptr_probe_str(user_ptr, bpf_copy_from_user_str_dynptr); + return 0; +} + +static int bpf_copy_data_from_user_task(struct bpf_dynptr *dptr, u32 off, + u32 size, const void *unsafe_ptr) +{ + struct task_struct *task = bpf_get_current_task_btf(); + + return bpf_copy_from_user_task_dynptr(dptr, off, size, unsafe_ptr, task); +} + +static int bpf_copy_data_from_user_task_str(struct bpf_dynptr *dptr, u32 off, + u32 size, const void *unsafe_ptr) +{ + struct task_struct *task = bpf_get_current_task_btf(); + + return bpf_copy_from_user_task_str_dynptr(dptr, off, size, unsafe_ptr, task); +} + +SEC("fentry.s/" SYS_PREFIX "sys_nanosleep") +int test_copy_from_user_task_dynptr(void *ctx) +{ + test_dynptr_probe(user_ptr, bpf_copy_data_from_user_task); + return 0; +} + +SEC("fentry.s/" SYS_PREFIX "sys_nanosleep") +int test_copy_from_user_task_str_dynptr(void *ctx) +{ + test_dynptr_probe_str(user_ptr, bpf_copy_data_from_user_task_str); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/fd_htab_lookup.c b/tools/testing/selftests/bpf/progs/fd_htab_lookup.c new file mode 100644 index 000000000000..a4a9e1db626f --- /dev/null +++ b/tools/testing/selftests/bpf/progs/fd_htab_lookup.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2025. Huawei Technologies Co., Ltd */ +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> + +char _license[] SEC("license") = "GPL"; + +struct inner_map_type { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(key_size, 4); + __uint(value_size, 4); + __uint(max_entries, 1); +} inner_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS); + __uint(max_entries, 64); + __type(key, int); + __type(value, int); + __array(values, struct inner_map_type); +} outer_map SEC(".maps") = { + .values = { + [0] = &inner_map, + }, +}; diff --git a/tools/testing/selftests/bpf/progs/fexit_noreturns.c b/tools/testing/selftests/bpf/progs/fexit_noreturns.c deleted file mode 100644 index 54654539f550..000000000000 --- a/tools/testing/selftests/bpf/progs/fexit_noreturns.c +++ /dev/null @@ -1,15 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -#include <linux/bpf.h> -#include <bpf/bpf_helpers.h> -#include <bpf/bpf_tracing.h> -#include "bpf_misc.h" - -char _license[] SEC("license") = "GPL"; - -SEC("fexit/do_exit") -__failure __msg("Attaching fexit/fmod_ret to __noreturn functions is rejected.") -int BPF_PROG(noreturns) -{ - return 0; -} diff --git a/tools/testing/selftests/bpf/progs/iters.c b/tools/testing/selftests/bpf/progs/iters.c index 427b72954b87..7dd92a303bf6 100644 --- a/tools/testing/selftests/bpf/progs/iters.c +++ b/tools/testing/selftests/bpf/progs/iters.c @@ -7,8 +7,6 @@ #include "bpf_misc.h" #include "bpf_compiler.h" -#define unlikely(x) __builtin_expect(!!(x), 0) - static volatile int zero = 0; int my_pid; @@ -1651,4 +1649,281 @@ int clean_live_states(const void *ctx) return 0; } +SEC("?raw_tp") +__flag(BPF_F_TEST_STATE_FREQ) +__failure __msg("misaligned stack access off 0+-31+0 size 8") +__naked int absent_mark_in_the_middle_state(void) +{ + /* This is equivalent to C program below. + * + * r8 = bpf_get_prandom_u32(); + * r6 = -32; + * bpf_iter_num_new(&fp[-8], 0, 10); + * if (unlikely(bpf_get_prandom_u32())) + * r6 = -31; + * while (bpf_iter_num_next(&fp[-8])) { + * if (unlikely(bpf_get_prandom_u32())) + * *(fp + r6) = 7; + * } + * bpf_iter_num_destroy(&fp[-8]) + * return 0 + */ + asm volatile ( + "call %[bpf_get_prandom_u32];" + "r8 = r0;" + "r7 = 0;" + "r6 = -32;" + "r0 = 0;" + "*(u64 *)(r10 - 16) = r0;" + "r1 = r10;" + "r1 += -8;" + "r2 = 0;" + "r3 = 10;" + "call %[bpf_iter_num_new];" + "call %[bpf_get_prandom_u32];" + "if r0 == r8 goto change_r6_%=;" + "loop_%=:" + "call noop;" + "r1 = r10;" + "r1 += -8;" + "call %[bpf_iter_num_next];" + "if r0 == 0 goto loop_end_%=;" + "call %[bpf_get_prandom_u32];" + "if r0 == r8 goto use_r6_%=;" + "goto loop_%=;" + "loop_end_%=:" + "r1 = r10;" + "r1 += -8;" + "call %[bpf_iter_num_destroy];" + "r0 = 0;" + "exit;" + "use_r6_%=:" + "r0 = r10;" + "r0 += r6;" + "r1 = 7;" + "*(u64 *)(r0 + 0) = r1;" + "goto loop_%=;" + "change_r6_%=:" + "r6 = -31;" + "goto loop_%=;" + : + : __imm(bpf_iter_num_new), + __imm(bpf_iter_num_next), + __imm(bpf_iter_num_destroy), + __imm(bpf_get_prandom_u32) + : __clobber_all + ); +} + +__used __naked +static int noop(void) +{ + asm volatile ( + "r0 = 0;" + "exit;" + ); +} + +SEC("?raw_tp") +__flag(BPF_F_TEST_STATE_FREQ) +__failure __msg("misaligned stack access off 0+-31+0 size 8") +__naked int absent_mark_in_the_middle_state2(void) +{ + /* This is equivalent to C program below. + * + * r8 = bpf_get_prandom_u32(); + * r6 = -32; + * bpf_iter_num_new(&fp[-8], 0, 10); + * if (unlikely(bpf_get_prandom_u32())) { + * r6 = -31; + * jump_into_loop: + * goto +0; + * goto loop; + * } + * if (unlikely(bpf_get_prandom_u32())) + * goto jump_into_loop; + * loop: + * while (bpf_iter_num_next(&fp[-8])) { + * if (unlikely(bpf_get_prandom_u32())) + * *(fp + r6) = 7; + * } + * bpf_iter_num_destroy(&fp[-8]) + * return 0 + */ + asm volatile ( + "call %[bpf_get_prandom_u32];" + "r8 = r0;" + "r7 = 0;" + "r6 = -32;" + "r0 = 0;" + "*(u64 *)(r10 - 16) = r0;" + "r1 = r10;" + "r1 += -8;" + "r2 = 0;" + "r3 = 10;" + "call %[bpf_iter_num_new];" + "call %[bpf_get_prandom_u32];" + "if r0 == r8 goto change_r6_%=;" + "call %[bpf_get_prandom_u32];" + "if r0 == r8 goto jump_into_loop_%=;" + "loop_%=:" + "r1 = r10;" + "r1 += -8;" + "call %[bpf_iter_num_next];" + "if r0 == 0 goto loop_end_%=;" + "call %[bpf_get_prandom_u32];" + "if r0 == r8 goto use_r6_%=;" + "goto loop_%=;" + "loop_end_%=:" + "r1 = r10;" + "r1 += -8;" + "call %[bpf_iter_num_destroy];" + "r0 = 0;" + "exit;" + "use_r6_%=:" + "r0 = r10;" + "r0 += r6;" + "r1 = 7;" + "*(u64 *)(r0 + 0) = r1;" + "goto loop_%=;" + "change_r6_%=:" + "r6 = -31;" + "jump_into_loop_%=: " + "goto +0;" + "goto loop_%=;" + : + : __imm(bpf_iter_num_new), + __imm(bpf_iter_num_next), + __imm(bpf_iter_num_destroy), + __imm(bpf_get_prandom_u32) + : __clobber_all + ); +} + +SEC("?raw_tp") +__flag(BPF_F_TEST_STATE_FREQ) +__failure __msg("misaligned stack access off 0+-31+0 size 8") +__naked int absent_mark_in_the_middle_state3(void) +{ + /* + * bpf_iter_num_new(&fp[-8], 0, 10) + * loop1(-32, &fp[-8]) + * loop1_wrapper(&fp[-8]) + * bpf_iter_num_destroy(&fp[-8]) + */ + asm volatile ( + "r1 = r10;" + "r1 += -8;" + "r2 = 0;" + "r3 = 10;" + "call %[bpf_iter_num_new];" + /* call #1 */ + "r1 = -32;" + "r2 = r10;" + "r2 += -8;" + "call loop1;" + "r1 = r10;" + "r1 += -8;" + "call %[bpf_iter_num_destroy];" + /* call #2 */ + "r1 = r10;" + "r1 += -8;" + "r2 = 0;" + "r3 = 10;" + "call %[bpf_iter_num_new];" + "r1 = r10;" + "r1 += -8;" + "call loop1_wrapper;" + /* return */ + "r1 = r10;" + "r1 += -8;" + "call %[bpf_iter_num_destroy];" + "r0 = 0;" + "exit;" + : + : __imm(bpf_iter_num_new), + __imm(bpf_iter_num_destroy), + __imm(bpf_get_prandom_u32) + : __clobber_all + ); +} + +__used __naked +static int loop1(void) +{ + /* + * int loop1(num, iter) { + * r6 = num; + * r7 = iter; + * while (bpf_iter_num_next(r7)) { + * if (unlikely(bpf_get_prandom_u32())) + * *(fp + r6) = 7; + * } + * return 0 + * } + */ + asm volatile ( + "r6 = r1;" + "r7 = r2;" + "call %[bpf_get_prandom_u32];" + "r8 = r0;" + "loop_%=:" + "r1 = r7;" + "call %[bpf_iter_num_next];" + "if r0 == 0 goto loop_end_%=;" + "call %[bpf_get_prandom_u32];" + "if r0 == r8 goto use_r6_%=;" + "goto loop_%=;" + "loop_end_%=:" + "r0 = 0;" + "exit;" + "use_r6_%=:" + "r0 = r10;" + "r0 += r6;" + "r1 = 7;" + "*(u64 *)(r0 + 0) = r1;" + "goto loop_%=;" + : + : __imm(bpf_iter_num_next), + __imm(bpf_get_prandom_u32) + : __clobber_all + ); +} + +__used __naked +static int loop1_wrapper(void) +{ + /* + * int loop1_wrapper(iter) { + * r6 = -32; + * r7 = iter; + * if (unlikely(bpf_get_prandom_u32())) + * r6 = -31; + * loop1(r6, r7); + * return 0; + * } + */ + asm volatile ( + "r6 = -32;" + "r7 = r1;" + "call %[bpf_get_prandom_u32];" + "r8 = r0;" + "call %[bpf_get_prandom_u32];" + "if r0 == r8 goto change_r6_%=;" + "loop_%=:" + "r1 = r6;" + "r2 = r7;" + "call loop1;" + "r0 = 0;" + "exit;" + "change_r6_%=:" + "r6 = -31;" + "goto loop_%=;" + : + : __imm(bpf_iter_num_next), + __imm(bpf_get_prandom_u32) + : __clobber_all + ); +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/linked_list_peek.c b/tools/testing/selftests/bpf/progs/linked_list_peek.c new file mode 100644 index 000000000000..264e81bfb287 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/linked_list_peek.c @@ -0,0 +1,113 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ + +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" +#include "bpf_experimental.h" + +struct node_data { + struct bpf_list_node l; + int key; +}; + +#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8))) +private(A) struct bpf_spin_lock glock; +private(A) struct bpf_list_head ghead __contains(node_data, l); + +#define list_entry(ptr, type, member) container_of(ptr, type, member) +#define NR_NODES 16 + +int zero = 0; + +SEC("syscall") +__retval(0) +long list_peek(void *ctx) +{ + struct bpf_list_node *l_n; + struct node_data *n; + int i, err = 0; + + bpf_spin_lock(&glock); + l_n = bpf_list_front(&ghead); + bpf_spin_unlock(&glock); + if (l_n) + return __LINE__; + + bpf_spin_lock(&glock); + l_n = bpf_list_back(&ghead); + bpf_spin_unlock(&glock); + if (l_n) + return __LINE__; + + for (i = zero; i < NR_NODES && can_loop; i++) { + n = bpf_obj_new(typeof(*n)); + if (!n) + return __LINE__; + n->key = i; + bpf_spin_lock(&glock); + bpf_list_push_back(&ghead, &n->l); + bpf_spin_unlock(&glock); + } + + bpf_spin_lock(&glock); + + l_n = bpf_list_front(&ghead); + if (!l_n) { + err = __LINE__; + goto done; + } + + n = list_entry(l_n, struct node_data, l); + if (n->key != 0) { + err = __LINE__; + goto done; + } + + l_n = bpf_list_back(&ghead); + if (!l_n) { + err = __LINE__; + goto done; + } + + n = list_entry(l_n, struct node_data, l); + if (n->key != NR_NODES - 1) { + err = __LINE__; + goto done; + } + +done: + bpf_spin_unlock(&glock); + return err; +} + +#define TEST_FB(op, dolock) \ +SEC("syscall") \ +__failure __msg(MSG) \ +long test_##op##_spinlock_##dolock(void *ctx) \ +{ \ + struct bpf_list_node *l_n; \ + __u64 jiffies = 0; \ + \ + if (dolock) \ + bpf_spin_lock(&glock); \ + l_n = bpf_list_##op(&ghead); \ + if (l_n) \ + jiffies = bpf_jiffies64(); \ + if (dolock) \ + bpf_spin_unlock(&glock); \ + \ + return !!jiffies; \ +} + +#define MSG "call bpf_list_{{(front|back).+}}; R0{{(_w)?}}=ptr_or_null_node_data(id={{[0-9]+}},non_own_ref" +TEST_FB(front, true) +TEST_FB(back, true) +#undef MSG + +#define MSG "bpf_spin_lock at off=0 must be held for bpf_list_head" +TEST_FB(front, false) +TEST_FB(back, false) +#undef MSG + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/mem_rdonly_untrusted.c b/tools/testing/selftests/bpf/progs/mem_rdonly_untrusted.c new file mode 100644 index 000000000000..4f94c971ae86 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/mem_rdonly_untrusted.c @@ -0,0 +1,229 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <vmlinux.h> +#include <bpf/bpf_core_read.h> +#include "bpf_misc.h" +#include "../test_kmods/bpf_testmod_kfunc.h" + +SEC("tp_btf/sys_enter") +__success +__log_level(2) +__msg("r8 = *(u64 *)(r7 +0) ; R7_w=ptr_nameidata(off={{[0-9]+}}) R8_w=rdonly_untrusted_mem(sz=0)") +__msg("r9 = *(u8 *)(r8 +0) ; R8_w=rdonly_untrusted_mem(sz=0) R9_w=scalar") +int btf_id_to_ptr_mem(void *ctx) +{ + struct task_struct *task; + struct nameidata *idata; + u64 ret, off; + + task = bpf_get_current_task_btf(); + idata = task->nameidata; + off = bpf_core_field_offset(struct nameidata, pathname); + /* + * asm block to have reliable match target for __msg, equivalent of: + * ret = task->nameidata->pathname[0]; + */ + asm volatile ( + "r7 = %[idata];" + "r7 += %[off];" + "r8 = *(u64 *)(r7 + 0);" + "r9 = *(u8 *)(r8 + 0);" + "%[ret] = r9;" + : [ret]"=r"(ret) + : [idata]"r"(idata), + [off]"r"(off) + : "r7", "r8", "r9"); + return ret; +} + +SEC("socket") +__success +__retval(0) +int ldx_is_ok_bad_addr(void *ctx) +{ + char *p; + + if (!bpf_core_enum_value_exists(enum bpf_features, BPF_FEAT_RDONLY_CAST_TO_VOID)) + return 42; + + p = bpf_rdonly_cast(0, 0); + return p[0x7fff]; +} + +SEC("socket") +__success +__retval(1) +int ldx_is_ok_good_addr(void *ctx) +{ + int v, *p; + + v = 1; + p = bpf_rdonly_cast(&v, 0); + return *p; +} + +SEC("socket") +__success +int offset_not_tracked(void *ctx) +{ + int *p, i, s; + + p = bpf_rdonly_cast(0, 0); + s = 0; + bpf_for(i, 0, 1000 * 1000 * 1000) { + p++; + s += *p; + } + return s; +} + +SEC("socket") +__failure +__msg("cannot write into rdonly_untrusted_mem") +int stx_not_ok(void *ctx) +{ + int v, *p; + + v = 1; + p = bpf_rdonly_cast(&v, 0); + *p = 1; + return 0; +} + +SEC("socket") +__failure +__msg("cannot write into rdonly_untrusted_mem") +int atomic_not_ok(void *ctx) +{ + int v, *p; + + v = 1; + p = bpf_rdonly_cast(&v, 0); + __sync_fetch_and_add(p, 1); + return 0; +} + +SEC("socket") +__failure +__msg("cannot write into rdonly_untrusted_mem") +int atomic_rmw_not_ok(void *ctx) +{ + long v, *p; + + v = 1; + p = bpf_rdonly_cast(&v, 0); + return __sync_val_compare_and_swap(p, 0, 42); +} + +SEC("socket") +__failure +__msg("invalid access to memory, mem_size=0 off=0 size=4") +__msg("R1 min value is outside of the allowed memory range") +int kfunc_param_not_ok(void *ctx) +{ + int *p; + + p = bpf_rdonly_cast(0, 0); + bpf_kfunc_trusted_num_test(p); + return 0; +} + +SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") +__failure +__msg("R1 type=rdonly_untrusted_mem expected=") +int helper_param_not_ok(void *ctx) +{ + char *p; + + p = bpf_rdonly_cast(0, 0); + /* + * Any helper with ARG_CONST_SIZE_OR_ZERO constraint will do, + * the most permissive constraint + */ + bpf_copy_from_user(p, 0, (void *)42); + return 0; +} + +static __noinline u64 *get_some_addr(void) +{ + if (bpf_get_prandom_u32()) + return bpf_rdonly_cast(0, bpf_core_type_id_kernel(struct sock)); + else + return bpf_rdonly_cast(0, 0); +} + +SEC("socket") +__success +__retval(0) +int mixed_mem_type(void *ctx) +{ + u64 *p; + + /* Try to avoid compiler hoisting load to if branches by using __noinline func. */ + p = get_some_addr(); + return *p; +} + +__attribute__((__aligned__(8))) +u8 global[] = { + 0x11, 0x22, 0x33, 0x44, + 0x55, 0x66, 0x77, 0x88, + 0x99 +}; + +__always_inline +static u64 combine(void *p) +{ + u64 acc; + + acc = 0; +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + acc |= (*(u64 *)p >> 56) << 24; + acc |= (*(u32 *)p >> 24) << 16; + acc |= (*(u16 *)p >> 8) << 8; + acc |= *(u8 *)p; +#else + acc |= (*(u64 *)p & 0xff) << 24; + acc |= (*(u32 *)p & 0xff) << 16; + acc |= (*(u16 *)p & 0xff) << 8; + acc |= *(u8 *)p; +#endif + return acc; +} + +SEC("socket") +__retval(0x88442211) +int diff_size_access(void *ctx) +{ + return combine(bpf_rdonly_cast(&global, 0)); +} + +SEC("socket") +__retval(0x99553322) +int misaligned_access(void *ctx) +{ + return combine(bpf_rdonly_cast(&global, 0) + 1); +} + +__weak int return_one(void) +{ + return 1; +} + +SEC("socket") +__success +__retval(1) +int null_check(void *ctx) +{ + int *p; + + p = bpf_rdonly_cast(0, 0); + if (p == 0) + /* make this a function call to avoid compiler + * moving r0 assignment before check. + */ + return return_one(); + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/prepare.c b/tools/testing/selftests/bpf/progs/prepare.c index 1f1dd547e4ee..cfc1f48e0d28 100644 --- a/tools/testing/selftests/bpf/progs/prepare.c +++ b/tools/testing/selftests/bpf/progs/prepare.c @@ -2,7 +2,6 @@ /* Copyright (c) 2025 Meta */ #include <vmlinux.h> #include <bpf/bpf_helpers.h> -//#include <bpf/bpf_tracing.h> char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/raw_tp_null.c b/tools/testing/selftests/bpf/progs/raw_tp_null.c index 5927054b6dd9..efa416f53968 100644 --- a/tools/testing/selftests/bpf/progs/raw_tp_null.c +++ b/tools/testing/selftests/bpf/progs/raw_tp_null.c @@ -10,7 +10,7 @@ char _license[] SEC("license") = "GPL"; int tid; int i; -SEC("tp_btf/bpf_testmod_test_raw_tp_null") +SEC("tp_btf/bpf_testmod_test_raw_tp_null_tp") int BPF_PROG(test_raw_tp_null, struct sk_buff *skb) { struct task_struct *task = bpf_get_current_task_btf(); diff --git a/tools/testing/selftests/bpf/progs/raw_tp_null_fail.c b/tools/testing/selftests/bpf/progs/raw_tp_null_fail.c index 38d669957bf1..0d58114a4955 100644 --- a/tools/testing/selftests/bpf/progs/raw_tp_null_fail.c +++ b/tools/testing/selftests/bpf/progs/raw_tp_null_fail.c @@ -8,7 +8,7 @@ char _license[] SEC("license") = "GPL"; /* Ensure module parameter has PTR_MAYBE_NULL */ -SEC("tp_btf/bpf_testmod_test_raw_tp_null") +SEC("tp_btf/bpf_testmod_test_raw_tp_null_tp") __failure __msg("R1 invalid mem access 'trusted_ptr_or_null_'") int test_raw_tp_null_bpf_testmod_test_raw_tp_null_arg_1(void *ctx) { asm volatile("r1 = *(u64 *)(r1 +0); r1 = *(u64 *)(r1 +0);" ::: __clobber_all); diff --git a/tools/testing/selftests/bpf/progs/rbtree.c b/tools/testing/selftests/bpf/progs/rbtree.c index a3620c15c136..49fe93d7e059 100644 --- a/tools/testing/selftests/bpf/progs/rbtree.c +++ b/tools/testing/selftests/bpf/progs/rbtree.c @@ -61,19 +61,19 @@ static long __add_three(struct bpf_rb_root *root, struct bpf_spin_lock *lock) } m->key = 1; - bpf_spin_lock(&glock); - bpf_rbtree_add(&groot, &n->node, less); - bpf_rbtree_add(&groot, &m->node, less); - bpf_spin_unlock(&glock); + bpf_spin_lock(lock); + bpf_rbtree_add(root, &n->node, less); + bpf_rbtree_add(root, &m->node, less); + bpf_spin_unlock(lock); n = bpf_obj_new(typeof(*n)); if (!n) return 3; n->key = 3; - bpf_spin_lock(&glock); - bpf_rbtree_add(&groot, &n->node, less); - bpf_spin_unlock(&glock); + bpf_spin_lock(lock); + bpf_rbtree_add(root, &n->node, less); + bpf_spin_unlock(lock); return 0; } diff --git a/tools/testing/selftests/bpf/progs/rbtree_fail.c b/tools/testing/selftests/bpf/progs/rbtree_fail.c index dbd5eee8e25e..4acb6af2dfe3 100644 --- a/tools/testing/selftests/bpf/progs/rbtree_fail.c +++ b/tools/testing/selftests/bpf/progs/rbtree_fail.c @@ -69,11 +69,11 @@ long rbtree_api_nolock_first(void *ctx) } SEC("?tc") -__failure __msg("rbtree_remove node input must be non-owning ref") +__retval(0) long rbtree_api_remove_unadded_node(void *ctx) { struct node_data *n, *m; - struct bpf_rb_node *res; + struct bpf_rb_node *res_n, *res_m; n = bpf_obj_new(typeof(*n)); if (!n) @@ -88,19 +88,20 @@ long rbtree_api_remove_unadded_node(void *ctx) bpf_spin_lock(&glock); bpf_rbtree_add(&groot, &n->node, less); - /* This remove should pass verifier */ - res = bpf_rbtree_remove(&groot, &n->node); - n = container_of(res, struct node_data, node); + res_n = bpf_rbtree_remove(&groot, &n->node); - /* This remove shouldn't, m isn't in an rbtree */ - res = bpf_rbtree_remove(&groot, &m->node); - m = container_of(res, struct node_data, node); + res_m = bpf_rbtree_remove(&groot, &m->node); bpf_spin_unlock(&glock); - if (n) - bpf_obj_drop(n); - if (m) - bpf_obj_drop(m); + bpf_obj_drop(m); + if (res_n) + bpf_obj_drop(container_of(res_n, struct node_data, node)); + if (res_m) { + bpf_obj_drop(container_of(res_m, struct node_data, node)); + /* m was not added to the rbtree */ + return 2; + } + return 0; } @@ -178,7 +179,7 @@ err_out: } SEC("?tc") -__failure __msg("rbtree_remove node input must be non-owning ref") +__failure __msg("bpf_rbtree_remove can only take non-owning or refcounted bpf_rb_node pointer") long rbtree_api_add_release_unlock_escape(void *ctx) { struct node_data *n; @@ -202,7 +203,7 @@ long rbtree_api_add_release_unlock_escape(void *ctx) } SEC("?tc") -__failure __msg("rbtree_remove node input must be non-owning ref") +__failure __msg("bpf_rbtree_remove can only take non-owning or refcounted bpf_rb_node pointer") long rbtree_api_first_release_unlock_escape(void *ctx) { struct bpf_rb_node *res; diff --git a/tools/testing/selftests/bpf/progs/rbtree_search.c b/tools/testing/selftests/bpf/progs/rbtree_search.c new file mode 100644 index 000000000000..098ef970fac1 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/rbtree_search.c @@ -0,0 +1,206 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ + +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" +#include "bpf_experimental.h" + +struct node_data { + struct bpf_refcount ref; + struct bpf_rb_node r0; + struct bpf_rb_node r1; + int key0; + int key1; +}; + +#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8))) +private(A) struct bpf_spin_lock glock0; +private(A) struct bpf_rb_root groot0 __contains(node_data, r0); + +private(B) struct bpf_spin_lock glock1; +private(B) struct bpf_rb_root groot1 __contains(node_data, r1); + +#define rb_entry(ptr, type, member) container_of(ptr, type, member) +#define NR_NODES 16 + +int zero = 0; + +static bool less0(struct bpf_rb_node *a, const struct bpf_rb_node *b) +{ + struct node_data *node_a; + struct node_data *node_b; + + node_a = rb_entry(a, struct node_data, r0); + node_b = rb_entry(b, struct node_data, r0); + + return node_a->key0 < node_b->key0; +} + +static bool less1(struct bpf_rb_node *a, const struct bpf_rb_node *b) +{ + struct node_data *node_a; + struct node_data *node_b; + + node_a = rb_entry(a, struct node_data, r1); + node_b = rb_entry(b, struct node_data, r1); + + return node_a->key1 < node_b->key1; +} + +SEC("syscall") +__retval(0) +long rbtree_search(void *ctx) +{ + struct bpf_rb_node *rb_n, *rb_m, *gc_ns[NR_NODES]; + long lookup_key = NR_NODES / 2; + struct node_data *n, *m; + int i, nr_gc = 0; + + for (i = zero; i < NR_NODES && can_loop; i++) { + n = bpf_obj_new(typeof(*n)); + if (!n) + return __LINE__; + + m = bpf_refcount_acquire(n); + + n->key0 = i; + m->key1 = i; + + bpf_spin_lock(&glock0); + bpf_rbtree_add(&groot0, &n->r0, less0); + bpf_spin_unlock(&glock0); + + bpf_spin_lock(&glock1); + bpf_rbtree_add(&groot1, &m->r1, less1); + bpf_spin_unlock(&glock1); + } + + n = NULL; + bpf_spin_lock(&glock0); + rb_n = bpf_rbtree_root(&groot0); + while (can_loop) { + if (!rb_n) { + bpf_spin_unlock(&glock0); + return __LINE__; + } + + n = rb_entry(rb_n, struct node_data, r0); + if (lookup_key == n->key0) + break; + if (nr_gc < NR_NODES) + gc_ns[nr_gc++] = rb_n; + if (lookup_key < n->key0) + rb_n = bpf_rbtree_left(&groot0, rb_n); + else + rb_n = bpf_rbtree_right(&groot0, rb_n); + } + + if (!n || lookup_key != n->key0) { + bpf_spin_unlock(&glock0); + return __LINE__; + } + + for (i = 0; i < nr_gc; i++) { + rb_n = gc_ns[i]; + gc_ns[i] = bpf_rbtree_remove(&groot0, rb_n); + } + + m = bpf_refcount_acquire(n); + bpf_spin_unlock(&glock0); + + for (i = 0; i < nr_gc; i++) { + rb_n = gc_ns[i]; + if (rb_n) { + n = rb_entry(rb_n, struct node_data, r0); + bpf_obj_drop(n); + } + } + + if (!m) + return __LINE__; + + bpf_spin_lock(&glock1); + rb_m = bpf_rbtree_remove(&groot1, &m->r1); + bpf_spin_unlock(&glock1); + bpf_obj_drop(m); + if (!rb_m) + return __LINE__; + bpf_obj_drop(rb_entry(rb_m, struct node_data, r1)); + + return 0; +} + +#define TEST_ROOT(dolock) \ +SEC("syscall") \ +__failure __msg(MSG) \ +long test_root_spinlock_##dolock(void *ctx) \ +{ \ + struct bpf_rb_node *rb_n; \ + __u64 jiffies = 0; \ + \ + if (dolock) \ + bpf_spin_lock(&glock0); \ + rb_n = bpf_rbtree_root(&groot0); \ + if (rb_n) \ + jiffies = bpf_jiffies64(); \ + if (dolock) \ + bpf_spin_unlock(&glock0); \ + \ + return !!jiffies; \ +} + +#define TEST_LR(op, dolock) \ +SEC("syscall") \ +__failure __msg(MSG) \ +long test_##op##_spinlock_##dolock(void *ctx) \ +{ \ + struct bpf_rb_node *rb_n; \ + struct node_data *n; \ + __u64 jiffies = 0; \ + \ + bpf_spin_lock(&glock0); \ + rb_n = bpf_rbtree_root(&groot0); \ + if (!rb_n) { \ + bpf_spin_unlock(&glock0); \ + return 1; \ + } \ + n = rb_entry(rb_n, struct node_data, r0); \ + n = bpf_refcount_acquire(n); \ + bpf_spin_unlock(&glock0); \ + if (!n) \ + return 1; \ + \ + if (dolock) \ + bpf_spin_lock(&glock0); \ + rb_n = bpf_rbtree_##op(&groot0, &n->r0); \ + if (rb_n) \ + jiffies = bpf_jiffies64(); \ + if (dolock) \ + bpf_spin_unlock(&glock0); \ + \ + return !!jiffies; \ +} + +/* + * Use a spearate MSG macro instead of passing to TEST_XXX(..., MSG) + * to ensure the message itself is not in the bpf prog lineinfo + * which the verifier includes in its log. + * Otherwise, the test_loader will incorrectly match the prog lineinfo + * instead of the log generated by the verifier. + */ +#define MSG "call bpf_rbtree_root{{.+}}; R0{{(_w)?}}=rcu_ptr_or_null_node_data(id={{[0-9]+}},non_own_ref" +TEST_ROOT(true) +#undef MSG +#define MSG "call bpf_rbtree_{{(left|right).+}}; R0{{(_w)?}}=rcu_ptr_or_null_node_data(id={{[0-9]+}},non_own_ref" +TEST_LR(left, true) +TEST_LR(right, true) +#undef MSG + +#define MSG "bpf_spin_lock at off=0 must be held for bpf_rb_root" +TEST_ROOT(false) +TEST_LR(left, false) +TEST_LR(right, false) +#undef MSG + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/rcu_read_lock.c b/tools/testing/selftests/bpf/progs/rcu_read_lock.c index 43637ee2cdcd..3a868a199349 100644 --- a/tools/testing/selftests/bpf/progs/rcu_read_lock.c +++ b/tools/testing/selftests/bpf/progs/rcu_read_lock.c @@ -16,10 +16,11 @@ struct { __type(value, long); } map_a SEC(".maps"); -__u32 user_data, key_serial, target_pid; +__u32 user_data, target_pid; +__s32 key_serial; __u64 flags, task_storage_val, cgroup_id; -struct bpf_key *bpf_lookup_user_key(__u32 serial, __u64 flags) __ksym; +struct bpf_key *bpf_lookup_user_key(__s32 serial, __u64 flags) __ksym; void bpf_key_put(struct bpf_key *key) __ksym; void bpf_rcu_read_lock(void) __ksym; void bpf_rcu_read_unlock(void) __ksym; diff --git a/tools/testing/selftests/bpf/progs/read_cgroupfs_xattr.c b/tools/testing/selftests/bpf/progs/read_cgroupfs_xattr.c new file mode 100644 index 000000000000..405adbe5e8b0 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/read_cgroupfs_xattr.c @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ + +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> +#include "bpf_experimental.h" + +char _license[] SEC("license") = "GPL"; + +pid_t target_pid = 0; + +char xattr_value[64]; +static const char expected_value_a[] = "bpf_selftest_value_a"; +static const char expected_value_b[] = "bpf_selftest_value_b"; +bool found_value_a; +bool found_value_b; + +SEC("lsm.s/file_open") +int BPF_PROG(test_file_open) +{ + u64 cgrp_id = bpf_get_current_cgroup_id(); + struct cgroup_subsys_state *css, *tmp; + struct bpf_dynptr value_ptr; + struct cgroup *cgrp; + + if ((bpf_get_current_pid_tgid() >> 32) != target_pid) + return 0; + + bpf_rcu_read_lock(); + cgrp = bpf_cgroup_from_id(cgrp_id); + if (!cgrp) { + bpf_rcu_read_unlock(); + return 0; + } + + css = &cgrp->self; + bpf_dynptr_from_mem(xattr_value, sizeof(xattr_value), 0, &value_ptr); + bpf_for_each(css, tmp, css, BPF_CGROUP_ITER_ANCESTORS_UP) { + int ret; + + ret = bpf_cgroup_read_xattr(tmp->cgroup, "user.bpf_test", + &value_ptr); + if (ret < 0) + continue; + + if (ret == sizeof(expected_value_a) && + !bpf_strncmp(xattr_value, sizeof(expected_value_a), expected_value_a)) + found_value_a = true; + if (ret == sizeof(expected_value_b) && + !bpf_strncmp(xattr_value, sizeof(expected_value_b), expected_value_b)) + found_value_b = true; + } + + bpf_rcu_read_unlock(); + bpf_cgroup_release(cgrp); + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/security_bpf_map.c b/tools/testing/selftests/bpf/progs/security_bpf_map.c new file mode 100644 index 000000000000..7216b3450e96 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/security_bpf_map.c @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include "vmlinux.h" +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> + +char _license[] SEC("license") = "GPL"; + +#define EPERM 1 /* Operation not permitted */ + +/* From include/linux/mm.h. */ +#define FMODE_WRITE 0x2 + +struct map; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, __u32); + __type(value, __u32); + __uint(max_entries, 1); +} prot_status_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, __u32); + __type(value, __u32); + __uint(max_entries, 3); +} prot_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, __u32); + __type(value, __u32); + __uint(max_entries, 3); +} not_prot_map SEC(".maps"); + +SEC("fmod_ret/security_bpf_map") +int BPF_PROG(fmod_bpf_map, struct bpf_map *map, int fmode) +{ + __u32 key = 0; + __u32 *status_ptr = bpf_map_lookup_elem(&prot_status_map, &key); + + if (!status_ptr || !*status_ptr) + return 0; + + if (map == &prot_map) { + /* Allow read-only access */ + if (fmode & FMODE_WRITE) + return -EPERM; + } + + return 0; +} + +/* + * This program keeps references to maps. This is needed to prevent + * optimizing them out. + */ +SEC("fentry/bpf_fentry_test1") +int BPF_PROG(fentry_dummy1, int a) +{ + __u32 key = 0; + __u32 val1 = a; + __u32 val2 = a + 1; + + bpf_map_update_elem(&prot_map, &key, &val1, BPF_ANY); + bpf_map_update_elem(¬_prot_map, &key, &val2, BPF_ANY); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/set_global_vars.c b/tools/testing/selftests/bpf/progs/set_global_vars.c index 9adb5ba4cd4d..ebaef28b2cb3 100644 --- a/tools/testing/selftests/bpf/progs/set_global_vars.c +++ b/tools/testing/selftests/bpf/progs/set_global_vars.c @@ -7,24 +7,74 @@ char _license[] SEC("license") = "GPL"; -enum Enum { EA1 = 0, EA2 = 11 }; +typedef __s32 s32; +typedef s32 i32; +typedef __u8 u8; + +enum Enum { EA1 = 0, EA2 = 11, EA3 = 10 }; enum Enumu64 {EB1 = 0llu, EB2 = 12llu }; enum Enums64 { EC1 = 0ll, EC2 = 13ll }; const volatile __s64 var_s64 = -1; const volatile __u64 var_u64 = 0; -const volatile __s32 var_s32 = -1; +const volatile i32 var_s32 = -1; const volatile __u32 var_u32 = 0; const volatile __s16 var_s16 = -1; const volatile __u16 var_u16 = 0; const volatile __s8 var_s8 = -1; -const volatile __u8 var_u8 = 0; +const volatile u8 var_u8 = 0; const volatile enum Enum var_ea = EA1; const volatile enum Enumu64 var_eb = EB1; const volatile enum Enums64 var_ec = EC1; const volatile bool var_b = false; +const volatile i32 arr[32]; +const volatile enum Enum enum_arr[32]; +const volatile i32 three_d[47][19][17]; +const volatile i32 *ptr_arr[32]; + +struct Struct { + int:16; + __u16 filler; + struct { + const __u16 filler2; + }; + struct Struct2 { + __u16 filler; + volatile struct { + const int:1; + union { + const volatile u8 var_u8[3]; + const volatile __s16 filler3; + const int:1; + s32 mat[7][5]; + } u; + }; + } struct2[2][4]; +}; + +const volatile __u32 stru = 0; /* same prefix as below */ +const volatile struct Struct struct1[3]; +const volatile struct Struct struct11[11][7]; + +struct Struct3 { + struct { + u8 var_u8_l; + }; + struct { + struct { + u8 var_u8_h; + }; + }; +}; -char arr[4] = {0}; +typedef struct Struct3 Struct3_t; + +union Union { + __u16 var_u16; + Struct3_t struct3; +}; + +const volatile union Union union1 = {.var_u16 = -1}; SEC("socket") int test_set_globals(void *ctx) @@ -43,5 +93,14 @@ int test_set_globals(void *ctx) a = var_eb; a = var_ec; a = var_b; + a = struct1[2].struct2[1][2].u.var_u8[2]; + a = union1.var_u16; + a = arr[3]; + a = arr[EA2]; + a = enum_arr[EC2]; + a = three_d[31][7][EA2]; + a = struct1[2].struct2[1][2].u.mat[5][3]; + a = struct11[7][5].struct2[0][1].u.mat[3][0]; + return a; } diff --git a/tools/testing/selftests/bpf/progs/sock_iter_batch.c b/tools/testing/selftests/bpf/progs/sock_iter_batch.c index 8f483337e103..77966ded5467 100644 --- a/tools/testing/selftests/bpf/progs/sock_iter_batch.c +++ b/tools/testing/selftests/bpf/progs/sock_iter_batch.c @@ -23,6 +23,7 @@ static bool ipv4_addr_loopback(__be32 a) } volatile const unsigned int sf; +volatile const unsigned int ss; volatile const __u16 ports[2]; unsigned int bucket[2]; @@ -42,16 +43,18 @@ int iter_tcp_soreuse(struct bpf_iter__tcp *ctx) sock_cookie = bpf_get_socket_cookie(sk); sk = bpf_core_cast(sk, struct sock); if (sk->sk_family != sf || - sk->sk_state != TCP_LISTEN || - sk->sk_family == AF_INET6 ? + (ss && sk->sk_state != ss) || + (sk->sk_family == AF_INET6 ? !ipv6_addr_loopback(&sk->sk_v6_rcv_saddr) : - !ipv4_addr_loopback(sk->sk_rcv_saddr)) + !ipv4_addr_loopback(sk->sk_rcv_saddr))) return 0; if (sk->sk_num == ports[0]) idx = 0; else if (sk->sk_num == ports[1]) idx = 1; + else if (!ports[0] && !ports[1]) + idx = 0; else return 0; @@ -67,6 +70,27 @@ int iter_tcp_soreuse(struct bpf_iter__tcp *ctx) return 0; } +volatile const __u64 destroy_cookie; + +SEC("iter/tcp") +int iter_tcp_destroy(struct bpf_iter__tcp *ctx) +{ + struct sock_common *sk_common = (struct sock_common *)ctx->sk_common; + __u64 sock_cookie; + + if (!sk_common) + return 0; + + sock_cookie = bpf_get_socket_cookie(sk_common); + if (sock_cookie != destroy_cookie) + return 0; + + bpf_sock_destroy(sk_common); + bpf_seq_write(ctx->meta->seq, &sock_cookie, sizeof(sock_cookie)); + + return 0; +} + #define udp_sk(ptr) container_of(ptr, struct udp_sock, inet.sk) SEC("iter/udp") @@ -83,15 +107,17 @@ int iter_udp_soreuse(struct bpf_iter__udp *ctx) sock_cookie = bpf_get_socket_cookie(sk); sk = bpf_core_cast(sk, struct sock); if (sk->sk_family != sf || - sk->sk_family == AF_INET6 ? + (sk->sk_family == AF_INET6 ? !ipv6_addr_loopback(&sk->sk_v6_rcv_saddr) : - !ipv4_addr_loopback(sk->sk_rcv_saddr)) + !ipv4_addr_loopback(sk->sk_rcv_saddr))) return 0; if (sk->sk_num == ports[0]) idx = 0; else if (sk->sk_num == ports[1]) idx = 1; + else if (!ports[0] && !ports[1]) + idx = 0; else return 0; diff --git a/tools/testing/selftests/bpf/progs/stream.c b/tools/testing/selftests/bpf/progs/stream.c new file mode 100644 index 000000000000..35790897dc87 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/stream.c @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" +#include "bpf_experimental.h" + +struct arr_elem { + struct bpf_res_spin_lock lock; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, struct arr_elem); +} arrmap SEC(".maps"); + +#define ENOSPC 28 +#define _STR "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + +int size; + +SEC("syscall") +__success __retval(0) +int stream_exhaust(void *ctx) +{ + /* Use global variable for loop convergence. */ + size = 0; + bpf_repeat(BPF_MAX_LOOPS) { + if (bpf_stream_printk(BPF_STDOUT, _STR) == -ENOSPC && size == 99954) + return 0; + size += sizeof(_STR) - 1; + } + return 1; +} + +SEC("syscall") +__success __retval(0) +int stream_cond_break(void *ctx) +{ + while (can_loop) + ; + return 0; +} + +SEC("syscall") +__success __retval(0) +int stream_deadlock(void *ctx) +{ + struct bpf_res_spin_lock *lock, *nlock; + + lock = bpf_map_lookup_elem(&arrmap, &(int){0}); + if (!lock) + return 1; + nlock = bpf_map_lookup_elem(&arrmap, &(int){0}); + if (!nlock) + return 1; + if (bpf_res_spin_lock(lock)) + return 1; + if (bpf_res_spin_lock(nlock)) { + bpf_res_spin_unlock(lock); + return 0; + } + bpf_res_spin_unlock(nlock); + bpf_res_spin_unlock(lock); + return 1; +} + +SEC("syscall") +__success __retval(0) +int stream_syscall(void *ctx) +{ + bpf_stream_printk(BPF_STDOUT, "foo"); + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/stream_fail.c b/tools/testing/selftests/bpf/progs/stream_fail.c new file mode 100644 index 000000000000..b4a0d0cc8ec8 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/stream_fail.c @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> +#include "bpf_misc.h" + +SEC("syscall") +__failure __msg("Possibly NULL pointer passed") +int stream_vprintk_null_arg(void *ctx) +{ + bpf_stream_vprintk(BPF_STDOUT, "", NULL, 0, NULL); + return 0; +} + +SEC("syscall") +__failure __msg("R3 type=scalar expected=") +int stream_vprintk_scalar_arg(void *ctx) +{ + bpf_stream_vprintk(BPF_STDOUT, "", (void *)46, 0, NULL); + return 0; +} + +SEC("syscall") +__failure __msg("arg#1 doesn't point to a const string") +int stream_vprintk_string_arg(void *ctx) +{ + bpf_stream_vprintk(BPF_STDOUT, ctx, NULL, 0, NULL); + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/string_kfuncs_failure1.c b/tools/testing/selftests/bpf/progs/string_kfuncs_failure1.c new file mode 100644 index 000000000000..53af438bd998 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/string_kfuncs_failure1.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2025 Red Hat, Inc.*/ +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <linux/limits.h> +#include "bpf_misc.h" +#include "errno.h" + +char *user_ptr = (char *)1; +char *invalid_kern_ptr = (char *)-1; + +/* + * When passing userspace pointers, the error code differs based on arch: + * -ERANGE on arches with non-overlapping address spaces + * -EFAULT on other arches + */ +#if defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_loongarch) || \ + defined(__TARGET_ARCH_powerpc) || defined(__TARGET_ARCH_x86) +#define USER_PTR_ERR -ERANGE +#else +#define USER_PTR_ERR -EFAULT +#endif + +/* + * On s390, __get_kernel_nofault (used in string kfuncs) returns 0 for NULL and + * user_ptr (instead of causing an exception) so the below two groups of tests + * are not applicable. + */ +#ifndef __TARGET_ARCH_s390 + +/* Passing NULL to string kfuncs (treated as a userspace ptr) */ +SEC("syscall") __retval(USER_PTR_ERR) int test_strcmp_null1(void *ctx) { return bpf_strcmp(NULL, "hello"); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strcmp_null2(void *ctx) { return bpf_strcmp("hello", NULL); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strchr_null(void *ctx) { return bpf_strchr(NULL, 'a'); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strchrnul_null(void *ctx) { return bpf_strchrnul(NULL, 'a'); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strnchr_null(void *ctx) { return bpf_strnchr(NULL, 1, 'a'); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strrchr_null(void *ctx) { return bpf_strrchr(NULL, 'a'); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strlen_null(void *ctx) { return bpf_strlen(NULL); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strnlen_null(void *ctx) { return bpf_strnlen(NULL, 1); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strspn_null1(void *ctx) { return bpf_strspn(NULL, "hello"); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strspn_null2(void *ctx) { return bpf_strspn("hello", NULL); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strcspn_null1(void *ctx) { return bpf_strcspn(NULL, "hello"); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strcspn_null2(void *ctx) { return bpf_strcspn("hello", NULL); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strstr_null1(void *ctx) { return bpf_strstr(NULL, "hello"); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strstr_null2(void *ctx) { return bpf_strstr("hello", NULL); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strnstr_null1(void *ctx) { return bpf_strnstr(NULL, "hello", 1); } +SEC("syscall") __retval(USER_PTR_ERR)int test_strnstr_null2(void *ctx) { return bpf_strnstr("hello", NULL, 1); } + +/* Passing userspace ptr to string kfuncs */ +SEC("syscall") __retval(USER_PTR_ERR) int test_strcmp_user_ptr1(void *ctx) { return bpf_strcmp(user_ptr, "hello"); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strcmp_user_ptr2(void *ctx) { return bpf_strcmp("hello", user_ptr); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strchr_user_ptr(void *ctx) { return bpf_strchr(user_ptr, 'a'); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strchrnul_user_ptr(void *ctx) { return bpf_strchrnul(user_ptr, 'a'); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strnchr_user_ptr(void *ctx) { return bpf_strnchr(user_ptr, 1, 'a'); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strrchr_user_ptr(void *ctx) { return bpf_strrchr(user_ptr, 'a'); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strlen_user_ptr(void *ctx) { return bpf_strlen(user_ptr); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strnlen_user_ptr(void *ctx) { return bpf_strnlen(user_ptr, 1); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strspn_user_ptr1(void *ctx) { return bpf_strspn(user_ptr, "hello"); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strspn_user_ptr2(void *ctx) { return bpf_strspn("hello", user_ptr); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strcspn_user_ptr1(void *ctx) { return bpf_strcspn(user_ptr, "hello"); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strcspn_user_ptr2(void *ctx) { return bpf_strcspn("hello", user_ptr); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strstr_user_ptr1(void *ctx) { return bpf_strstr(user_ptr, "hello"); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strstr_user_ptr2(void *ctx) { return bpf_strstr("hello", user_ptr); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strnstr_user_ptr1(void *ctx) { return bpf_strnstr(user_ptr, "hello", 1); } +SEC("syscall") __retval(USER_PTR_ERR) int test_strnstr_user_ptr2(void *ctx) { return bpf_strnstr("hello", user_ptr, 1); } + +#endif /* __TARGET_ARCH_s390 */ + +/* Passing invalid kernel ptr to string kfuncs should always return -EFAULT */ +SEC("syscall") __retval(-EFAULT) int test_strcmp_pagefault1(void *ctx) { return bpf_strcmp(invalid_kern_ptr, "hello"); } +SEC("syscall") __retval(-EFAULT) int test_strcmp_pagefault2(void *ctx) { return bpf_strcmp("hello", invalid_kern_ptr); } +SEC("syscall") __retval(-EFAULT) int test_strchr_pagefault(void *ctx) { return bpf_strchr(invalid_kern_ptr, 'a'); } +SEC("syscall") __retval(-EFAULT) int test_strchrnul_pagefault(void *ctx) { return bpf_strchrnul(invalid_kern_ptr, 'a'); } +SEC("syscall") __retval(-EFAULT) int test_strnchr_pagefault(void *ctx) { return bpf_strnchr(invalid_kern_ptr, 1, 'a'); } +SEC("syscall") __retval(-EFAULT) int test_strrchr_pagefault(void *ctx) { return bpf_strrchr(invalid_kern_ptr, 'a'); } +SEC("syscall") __retval(-EFAULT) int test_strlen_pagefault(void *ctx) { return bpf_strlen(invalid_kern_ptr); } +SEC("syscall") __retval(-EFAULT) int test_strnlen_pagefault(void *ctx) { return bpf_strnlen(invalid_kern_ptr, 1); } +SEC("syscall") __retval(-EFAULT) int test_strspn_pagefault1(void *ctx) { return bpf_strspn(invalid_kern_ptr, "hello"); } +SEC("syscall") __retval(-EFAULT) int test_strspn_pagefault2(void *ctx) { return bpf_strspn("hello", invalid_kern_ptr); } +SEC("syscall") __retval(-EFAULT) int test_strcspn_pagefault1(void *ctx) { return bpf_strcspn(invalid_kern_ptr, "hello"); } +SEC("syscall") __retval(-EFAULT) int test_strcspn_pagefault2(void *ctx) { return bpf_strcspn("hello", invalid_kern_ptr); } +SEC("syscall") __retval(-EFAULT) int test_strstr_pagefault1(void *ctx) { return bpf_strstr(invalid_kern_ptr, "hello"); } +SEC("syscall") __retval(-EFAULT) int test_strstr_pagefault2(void *ctx) { return bpf_strstr("hello", invalid_kern_ptr); } +SEC("syscall") __retval(-EFAULT) int test_strnstr_pagefault1(void *ctx) { return bpf_strnstr(invalid_kern_ptr, "hello", 1); } +SEC("syscall") __retval(-EFAULT) int test_strnstr_pagefault2(void *ctx) { return bpf_strnstr("hello", invalid_kern_ptr, 1); } + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/string_kfuncs_failure2.c b/tools/testing/selftests/bpf/progs/string_kfuncs_failure2.c new file mode 100644 index 000000000000..89fb4669b0e9 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/string_kfuncs_failure2.c @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2025 Red Hat, Inc.*/ +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <linux/limits.h> + +char long_str[XATTR_SIZE_MAX + 1]; + +SEC("syscall") int test_strcmp_too_long(void *ctx) { return bpf_strcmp(long_str, long_str); } +SEC("syscall") int test_strchr_too_long(void *ctx) { return bpf_strchr(long_str, 'b'); } +SEC("syscall") int test_strchrnul_too_long(void *ctx) { return bpf_strchrnul(long_str, 'b'); } +SEC("syscall") int test_strnchr_too_long(void *ctx) { return bpf_strnchr(long_str, sizeof(long_str), 'b'); } +SEC("syscall") int test_strrchr_too_long(void *ctx) { return bpf_strrchr(long_str, 'b'); } +SEC("syscall") int test_strlen_too_long(void *ctx) { return bpf_strlen(long_str); } +SEC("syscall") int test_strnlen_too_long(void *ctx) { return bpf_strnlen(long_str, sizeof(long_str)); } +SEC("syscall") int test_strspn_str_too_long(void *ctx) { return bpf_strspn(long_str, "a"); } +SEC("syscall") int test_strspn_accept_too_long(void *ctx) { return bpf_strspn("b", long_str); } +SEC("syscall") int test_strcspn_str_too_long(void *ctx) { return bpf_strcspn(long_str, "b"); } +SEC("syscall") int test_strcspn_reject_too_long(void *ctx) { return bpf_strcspn("b", long_str); } +SEC("syscall") int test_strstr_too_long(void *ctx) { return bpf_strstr(long_str, "hello"); } +SEC("syscall") int test_strnstr_too_long(void *ctx) { return bpf_strnstr(long_str, "hello", sizeof(long_str)); } + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/string_kfuncs_success.c b/tools/testing/selftests/bpf/progs/string_kfuncs_success.c new file mode 100644 index 000000000000..46697f381878 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/string_kfuncs_success.c @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2025 Red Hat, Inc.*/ +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" +#include "errno.h" + +char str[] = "hello world"; + +#define __test(retval) SEC("syscall") __success __retval(retval) + +/* Functional tests */ +__test(0) int test_strcmp_eq(void *ctx) { return bpf_strcmp(str, "hello world"); } +__test(1) int test_strcmp_neq(void *ctx) { return bpf_strcmp(str, "hello"); } +__test(1) int test_strchr_found(void *ctx) { return bpf_strchr(str, 'e'); } +__test(11) int test_strchr_null(void *ctx) { return bpf_strchr(str, '\0'); } +__test(-ENOENT) int test_strchr_notfound(void *ctx) { return bpf_strchr(str, 'x'); } +__test(1) int test_strchrnul_found(void *ctx) { return bpf_strchrnul(str, 'e'); } +__test(11) int test_strchrnul_notfound(void *ctx) { return bpf_strchrnul(str, 'x'); } +__test(1) int test_strnchr_found(void *ctx) { return bpf_strnchr(str, 5, 'e'); } +__test(11) int test_strnchr_null(void *ctx) { return bpf_strnchr(str, 12, '\0'); } +__test(-ENOENT) int test_strnchr_notfound(void *ctx) { return bpf_strnchr(str, 5, 'w'); } +__test(9) int test_strrchr_found(void *ctx) { return bpf_strrchr(str, 'l'); } +__test(11) int test_strrchr_null(void *ctx) { return bpf_strrchr(str, '\0'); } +__test(-ENOENT) int test_strrchr_notfound(void *ctx) { return bpf_strrchr(str, 'x'); } +__test(11) int test_strlen(void *ctx) { return bpf_strlen(str); } +__test(11) int test_strnlen(void *ctx) { return bpf_strnlen(str, 12); } +__test(5) int test_strspn(void *ctx) { return bpf_strspn(str, "ehlo"); } +__test(2) int test_strcspn(void *ctx) { return bpf_strcspn(str, "lo"); } +__test(6) int test_strstr_found(void *ctx) { return bpf_strstr(str, "world"); } +__test(-ENOENT) int test_strstr_notfound(void *ctx) { return bpf_strstr(str, "hi"); } +__test(0) int test_strstr_empty(void *ctx) { return bpf_strstr(str, ""); } +__test(0) int test_strnstr_found(void *ctx) { return bpf_strnstr(str, "hello", 6); } +__test(-ENOENT) int test_strnstr_notfound(void *ctx) { return bpf_strnstr(str, "hi", 10); } +__test(0) int test_strnstr_empty(void *ctx) { return bpf_strnstr(str, "", 1); } + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/struct_ops_private_stack.c b/tools/testing/selftests/bpf/progs/struct_ops_private_stack.c index 0e4d2ff63ab8..dbe646013811 100644 --- a/tools/testing/selftests/bpf/progs/struct_ops_private_stack.c +++ b/tools/testing/selftests/bpf/progs/struct_ops_private_stack.c @@ -7,7 +7,7 @@ char _license[] SEC("license") = "GPL"; -#if defined(__TARGET_ARCH_x86) +#if defined(__TARGET_ARCH_x86) || defined(__TARGET_ARCH_arm64) bool skip __attribute((__section__(".data"))) = false; #else bool skip = true; diff --git a/tools/testing/selftests/bpf/progs/struct_ops_private_stack_fail.c b/tools/testing/selftests/bpf/progs/struct_ops_private_stack_fail.c index 58d5d8dc2235..3d89ad7cbe2a 100644 --- a/tools/testing/selftests/bpf/progs/struct_ops_private_stack_fail.c +++ b/tools/testing/selftests/bpf/progs/struct_ops_private_stack_fail.c @@ -7,7 +7,7 @@ char _license[] SEC("license") = "GPL"; -#if defined(__TARGET_ARCH_x86) +#if defined(__TARGET_ARCH_x86) || defined(__TARGET_ARCH_arm64) bool skip __attribute((__section__(".data"))) = false; #else bool skip = true; diff --git a/tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c b/tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c index 31e58389bb8b..b1f6d7e5a8e5 100644 --- a/tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c +++ b/tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c @@ -7,7 +7,7 @@ char _license[] SEC("license") = "GPL"; -#if defined(__TARGET_ARCH_x86) +#if defined(__TARGET_ARCH_x86) || defined(__TARGET_ARCH_arm64) bool skip __attribute((__section__(".data"))) = false; #else bool skip = true; diff --git a/tools/testing/selftests/bpf/progs/test_btf_ext.c b/tools/testing/selftests/bpf/progs/test_btf_ext.c new file mode 100644 index 000000000000..cdf20331db04 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_btf_ext.c @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2025 Meta Platforms Inc. */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + +__noinline static void f0(void) +{ + __u64 a = 1; + + __sink(a); +} + +SEC("xdp") +__u64 global_func(struct xdp_md *xdp) +{ + f0(); + return XDP_DROP; +} diff --git a/tools/testing/selftests/bpf/progs/test_global_map_resize.c b/tools/testing/selftests/bpf/progs/test_global_map_resize.c index a3f220ba7025..ee65bad0436d 100644 --- a/tools/testing/selftests/bpf/progs/test_global_map_resize.c +++ b/tools/testing/selftests/bpf/progs/test_global_map_resize.c @@ -32,6 +32,16 @@ int my_int_last SEC(".data.array_not_last"); int percpu_arr[1] SEC(".data.percpu_arr"); +/* at least one extern is included, to ensure that a specific + * regression is tested whereby resizing resulted in a free-after-use + * bug after type information is invalidated by the resize operation. + * + * There isn't a particularly good API to test for this specific condition, + * but by having externs for the resizing tests it will cover this path. + */ +extern int LINUX_KERNEL_VERSION __kconfig; +long version_sink; + SEC("tp/syscalls/sys_enter_getpid") int bss_array_sum(void *ctx) { @@ -44,6 +54,9 @@ int bss_array_sum(void *ctx) for (size_t i = 0; i < bss_array_len; ++i) sum += array[i]; + /* see above; ensure this is not optimized out */ + version_sink = LINUX_KERNEL_VERSION; + return 0; } @@ -59,6 +72,9 @@ int data_array_sum(void *ctx) for (size_t i = 0; i < data_array_len; ++i) sum += my_array[i]; + /* see above; ensure this is not optimized out */ + version_sink = LINUX_KERNEL_VERSION; + return 0; } diff --git a/tools/testing/selftests/bpf/progs/test_lookup_key.c b/tools/testing/selftests/bpf/progs/test_lookup_key.c index cdbbb12f1491..1f7e1e59b073 100644 --- a/tools/testing/selftests/bpf/progs/test_lookup_key.c +++ b/tools/testing/selftests/bpf/progs/test_lookup_key.c @@ -14,11 +14,11 @@ char _license[] SEC("license") = "GPL"; __u32 monitored_pid; -__u32 key_serial; +__s32 key_serial; __u32 key_id; __u64 flags; -extern struct bpf_key *bpf_lookup_user_key(__u32 serial, __u64 flags) __ksym; +extern struct bpf_key *bpf_lookup_user_key(__s32 serial, __u64 flags) __ksym; extern struct bpf_key *bpf_lookup_system_key(__u64 id) __ksym; extern void bpf_key_put(struct bpf_key *key) __ksym; diff --git a/tools/testing/selftests/bpf/progs/test_module_attach.c b/tools/testing/selftests/bpf/progs/test_module_attach.c index 7f3c233943b3..03d7f89787a1 100644 --- a/tools/testing/selftests/bpf/progs/test_module_attach.c +++ b/tools/testing/selftests/bpf/progs/test_module_attach.c @@ -19,7 +19,7 @@ int BPF_PROG(handle_raw_tp, __u32 raw_tp_bare_write_sz = 0; -SEC("raw_tp/bpf_testmod_test_write_bare") +SEC("raw_tp/bpf_testmod_test_write_bare_tp") int BPF_PROG(handle_raw_tp_bare, struct task_struct *task, struct bpf_testmod_test_write_ctx *write_ctx) { @@ -31,7 +31,7 @@ int raw_tp_writable_bare_in_val = 0; int raw_tp_writable_bare_early_ret = 0; int raw_tp_writable_bare_out_val = 0; -SEC("raw_tp.w/bpf_testmod_test_writable_bare") +SEC("raw_tp.w/bpf_testmod_test_writable_bare_tp") int BPF_PROG(handle_raw_tp_writable_bare, struct bpf_testmod_test_writable_ctx *writable) { diff --git a/tools/testing/selftests/bpf/progs/test_ringbuf_write.c b/tools/testing/selftests/bpf/progs/test_ringbuf_write.c index 350513c0e4c9..f063a0013f85 100644 --- a/tools/testing/selftests/bpf/progs/test_ringbuf_write.c +++ b/tools/testing/selftests/bpf/progs/test_ringbuf_write.c @@ -26,11 +26,11 @@ int test_ringbuf_write(void *ctx) if (cur_pid != pid) return 0; - sample1 = bpf_ringbuf_reserve(&ringbuf, 0x3000, 0); + sample1 = bpf_ringbuf_reserve(&ringbuf, 0x30000, 0); if (!sample1) return 0; /* first one can pass */ - sample2 = bpf_ringbuf_reserve(&ringbuf, 0x3000, 0); + sample2 = bpf_ringbuf_reserve(&ringbuf, 0x30000, 0); if (!sample2) { bpf_ringbuf_discard(sample1, 0); __sync_fetch_and_add(&discarded, 1); diff --git a/tools/testing/selftests/bpf/progs/test_sig_in_xattr.c b/tools/testing/selftests/bpf/progs/test_sig_in_xattr.c index 8ef6b39335b6..34b30e2603f0 100644 --- a/tools/testing/selftests/bpf/progs/test_sig_in_xattr.c +++ b/tools/testing/selftests/bpf/progs/test_sig_in_xattr.c @@ -40,7 +40,7 @@ char digest[MAGIC_SIZE + SIZEOF_STRUCT_FSVERITY_DIGEST + SHA256_DIGEST_SIZE]; __u32 monitored_pid; char sig[MAX_SIG_SIZE]; __u32 sig_size; -__u32 user_keyring_serial; +__s32 user_keyring_serial; SEC("lsm.s/file_open") int BPF_PROG(test_file_open, struct file *f) diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_change_tail.c b/tools/testing/selftests/bpf/progs/test_sockmap_change_tail.c index 2796dd8545eb..1c7941a4ad00 100644 --- a/tools/testing/selftests/bpf/progs/test_sockmap_change_tail.c +++ b/tools/testing/selftests/bpf/progs/test_sockmap_change_tail.c @@ -1,8 +1,13 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2024 ByteDance */ -#include <linux/bpf.h> +#include "vmlinux.h" #include <bpf/bpf_helpers.h> +#ifndef PAGE_SIZE +#define PAGE_SIZE __PAGE_SIZE +#endif +#define BPF_SKB_MAX_LEN (PAGE_SIZE << 2) + struct { __uint(type, BPF_MAP_TYPE_SOCKMAP); __uint(max_entries, 1); @@ -31,7 +36,7 @@ int prog_skb_verdict(struct __sk_buff *skb) change_tail_ret = bpf_skb_change_tail(skb, skb->len + 1, 0); return SK_PASS; } else if (data[0] == 'E') { /* Error */ - change_tail_ret = bpf_skb_change_tail(skb, 65535, 0); + change_tail_ret = bpf_skb_change_tail(skb, BPF_SKB_MAX_LEN, 0); return SK_PASS; } return SK_PASS; diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_ktls.c b/tools/testing/selftests/bpf/progs/test_sockmap_ktls.c new file mode 100644 index 000000000000..83df4919c224 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_sockmap_ktls.c @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> + +int cork_byte; +int push_start; +int push_end; +int apply_bytes; +int pop_start; +int pop_end; + +struct { + __uint(type, BPF_MAP_TYPE_SOCKMAP); + __uint(max_entries, 20); + __type(key, int); + __type(value, int); +} sock_map SEC(".maps"); + +SEC("sk_msg") +int prog_sk_policy(struct sk_msg_md *msg) +{ + if (cork_byte > 0) + bpf_msg_cork_bytes(msg, cork_byte); + if (push_start > 0 && push_end > 0) + bpf_msg_push_data(msg, push_start, push_end, 0); + if (pop_start >= 0 && pop_end > 0) + bpf_msg_pop_data(msg, pop_start, pop_end, 0); + + return SK_PASS; +} + +SEC("sk_msg") +int prog_sk_policy_redir(struct sk_msg_md *msg) +{ + int two = 2; + + bpf_msg_apply_bytes(msg, apply_bytes); + return bpf_msg_redirect_map(msg, &sock_map, two, 0); +} diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_redir.c b/tools/testing/selftests/bpf/progs/test_sockmap_redir.c new file mode 100644 index 000000000000..34d9f4f2f0a2 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_sockmap_redir.c @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +SEC(".maps") struct { + __uint(type, BPF_MAP_TYPE_SOCKMAP); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u64); +} nop_map, sock_map; + +SEC(".maps") struct { + __uint(type, BPF_MAP_TYPE_SOCKHASH); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u64); +} nop_hash, sock_hash; + +SEC(".maps") struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 2); + __type(key, int); + __type(value, unsigned int); +} verdict_map; + +/* Set by user space */ +int redirect_type; +int redirect_flags; + +#define redirect_map(__data) \ + _Generic((__data), \ + struct __sk_buff * : bpf_sk_redirect_map, \ + struct sk_msg_md * : bpf_msg_redirect_map \ + )((__data), &sock_map, (__u32){0}, redirect_flags) + +#define redirect_hash(__data) \ + _Generic((__data), \ + struct __sk_buff * : bpf_sk_redirect_hash, \ + struct sk_msg_md * : bpf_msg_redirect_hash \ + )((__data), &sock_hash, &(__u32){0}, redirect_flags) + +#define DEFINE_PROG(__type, __param) \ +SEC("sk_" XSTR(__type)) \ +int prog_ ## __type ## _verdict(__param data) \ +{ \ + unsigned int *count; \ + int verdict; \ + \ + if (redirect_type == BPF_MAP_TYPE_SOCKMAP) \ + verdict = redirect_map(data); \ + else if (redirect_type == BPF_MAP_TYPE_SOCKHASH) \ + verdict = redirect_hash(data); \ + else \ + verdict = redirect_type - __MAX_BPF_MAP_TYPE; \ + \ + count = bpf_map_lookup_elem(&verdict_map, &verdict); \ + if (count) \ + (*count)++; \ + \ + return verdict; \ +} + +DEFINE_PROG(skb, struct __sk_buff *); +DEFINE_PROG(msg, struct sk_msg_md *); + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_tc_change_tail.c b/tools/testing/selftests/bpf/progs/test_tc_change_tail.c index 28edafe803f0..fcba8299f0bc 100644 --- a/tools/testing/selftests/bpf/progs/test_tc_change_tail.c +++ b/tools/testing/selftests/bpf/progs/test_tc_change_tail.c @@ -1,11 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 -#include <linux/bpf.h> +#include "vmlinux.h" #include <bpf/bpf_helpers.h> -#include <linux/if_ether.h> -#include <linux/in.h> -#include <linux/ip.h> -#include <linux/udp.h> -#include <linux/pkt_cls.h> + +#ifndef PAGE_SIZE +#define PAGE_SIZE __PAGE_SIZE +#endif +#define BPF_SKB_MAX_LEN (PAGE_SIZE << 2) long change_tail_ret = 1; @@ -94,7 +94,7 @@ int change_tail(struct __sk_buff *skb) bpf_skb_change_tail(skb, len, 0); return TCX_PASS; } else if (payload[0] == 'E') { /* Error */ - change_tail_ret = bpf_skb_change_tail(skb, 65535, 0); + change_tail_ret = bpf_skb_change_tail(skb, BPF_SKB_MAX_LEN, 0); return TCX_PASS; } else if (payload[0] == 'Z') { /* Zero */ change_tail_ret = bpf_skb_change_tail(skb, 0, 0); diff --git a/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c index eb5cca1fce16..7d5293de1952 100644 --- a/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c +++ b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c @@ -294,7 +294,9 @@ static int tcp_validate_sysctl(struct tcp_syncookie *ctx) (ctx->ipv6 && ctx->attrs.mss != MSS_LOCAL_IPV6)) goto err; - if (!ctx->attrs.wscale_ok || ctx->attrs.snd_wscale != 7) + if (!ctx->attrs.wscale_ok || + !ctx->attrs.snd_wscale || + ctx->attrs.snd_wscale >= BPF_SYNCOOKIE_WSCALE_MASK) goto err; if (!ctx->attrs.tstamp_ok) diff --git a/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c b/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c index 39ff06f2c834..cf0547a613ff 100644 --- a/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c +++ b/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c @@ -6,14 +6,14 @@ #include "../test_kmods/bpf_testmod.h" #include "bpf_misc.h" -SEC("tp_btf/bpf_testmod_test_nullable_bare") +SEC("tp_btf/bpf_testmod_test_nullable_bare_tp") __failure __msg("R1 invalid mem access 'trusted_ptr_or_null_'") int BPF_PROG(handle_tp_btf_nullable_bare1, struct bpf_testmod_test_read_ctx *nullable_ctx) { return nullable_ctx->len; } -SEC("tp_btf/bpf_testmod_test_nullable_bare") +SEC("tp_btf/bpf_testmod_test_nullable_bare_tp") int BPF_PROG(handle_tp_btf_nullable_bare2, struct bpf_testmod_test_read_ctx *nullable_ctx) { if (nullable_ctx) diff --git a/tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c b/tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c index e96d09e11115..ff8d755548b9 100644 --- a/tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c +++ b/tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c @@ -17,7 +17,7 @@ #define MAX_SIG_SIZE 1024 __u32 monitored_pid; -__u32 user_keyring_serial; +__s32 user_keyring_serial; __u64 system_keyring_id; struct data { diff --git a/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c b/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c index dc74d8cf9e3f..5904f45cfbc4 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c @@ -19,7 +19,9 @@ int _xdp_adjust_tail_grow(struct xdp_md *xdp) /* Data length determine test case */ if (data_len == 54) { /* sizeof(pkt_v4) */ - offset = 4096; /* test too large offset */ + offset = 4096; /* test too large offset, 4k page size */ + } else if (data_len == 53) { /* sizeof(pkt_v4) - 1 */ + offset = 65536; /* test too large offset, 64k page size */ } else if (data_len == 74) { /* sizeof(pkt_v6) */ offset = 40; } else if (data_len == 64) { @@ -31,6 +33,10 @@ int _xdp_adjust_tail_grow(struct xdp_md *xdp) offset = 10; } else if (data_len == 9001) { offset = 4096; + } else if (data_len == 90000) { + offset = 10; /* test a small offset, 64k page size */ + } else if (data_len == 90001) { + offset = 65536; /* test too large offset, 64k page size */ } else { return XDP_ABORTED; /* No matching test */ } diff --git a/tools/testing/selftests/bpf/progs/tracing_failure.c b/tools/testing/selftests/bpf/progs/tracing_failure.c index d41665d2ec8c..65e485c4468c 100644 --- a/tools/testing/selftests/bpf/progs/tracing_failure.c +++ b/tools/testing/selftests/bpf/progs/tracing_failure.c @@ -18,3 +18,15 @@ int BPF_PROG(test_spin_unlock, struct bpf_spin_lock *lock) { return 0; } + +SEC("?fentry/__rcu_read_lock") +int BPF_PROG(tracing_deny) +{ + return 0; +} + +SEC("?fexit/do_exit") +int BPF_PROG(fexit_noreturns) +{ + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/verifier_and.c b/tools/testing/selftests/bpf/progs/verifier_and.c index e97e518516b6..2b4fdca162be 100644 --- a/tools/testing/selftests/bpf/progs/verifier_and.c +++ b/tools/testing/selftests/bpf/progs/verifier_and.c @@ -85,8 +85,14 @@ l0_%=: r0 = r0; \ SEC("socket") __description("check known subreg with unknown reg") -__success __failure_unpriv __msg_unpriv("R1 !read_ok") +__success __success_unpriv __retval(0) +#ifdef SPEC_V1 +__xlated_unpriv("if w0 < 0x1 goto pc+2") +__xlated_unpriv("nospec") /* inserted to prevent `R1 !read_ok'` */ +__xlated_unpriv("goto pc-1") /* `r1 = *(u32*)(r1 + 512)`, sanitized dead code */ +__xlated_unpriv("r0 = 0") +#endif __naked void known_subreg_with_unknown_reg(void) { asm volatile (" \ diff --git a/tools/testing/selftests/bpf/progs/verifier_arena.c b/tools/testing/selftests/bpf/progs/verifier_arena.c index 67509c5d3982..7f4827eede3c 100644 --- a/tools/testing/selftests/bpf/progs/verifier_arena.c +++ b/tools/testing/selftests/bpf/progs/verifier_arena.c @@ -3,6 +3,7 @@ #define BPF_NO_KFUNC_PROTOTYPES #include <vmlinux.h> +#include <errno.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> #include "bpf_misc.h" @@ -114,6 +115,111 @@ int basic_alloc3(void *ctx) return 0; } +SEC("syscall") +__success __retval(0) +int basic_reserve1(void *ctx) +{ +#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) + char __arena *page; + int ret; + + page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); + if (!page) + return 1; + + page += __PAGE_SIZE; + + /* Reserve the second page */ + ret = bpf_arena_reserve_pages(&arena, page, 1); + if (ret) + return 2; + + /* Try to explicitly allocate the reserved page. */ + page = bpf_arena_alloc_pages(&arena, page, 1, NUMA_NO_NODE, 0); + if (page) + return 3; + + /* Try to implicitly allocate the page (since there's only 2 of them). */ + page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); + if (page) + return 4; +#endif + return 0; +} + +SEC("syscall") +__success __retval(0) +int basic_reserve2(void *ctx) +{ +#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) + char __arena *page; + int ret; + + page = arena_base(&arena); + ret = bpf_arena_reserve_pages(&arena, page, 1); + if (ret) + return 1; + + page = bpf_arena_alloc_pages(&arena, page, 1, NUMA_NO_NODE, 0); + if ((u64)page) + return 2; +#endif + return 0; +} + +/* Reserve the same page twice, should return -EBUSY. */ +SEC("syscall") +__success __retval(0) +int reserve_twice(void *ctx) +{ +#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) + char __arena *page; + int ret; + + page = arena_base(&arena); + + ret = bpf_arena_reserve_pages(&arena, page, 1); + if (ret) + return 1; + + ret = bpf_arena_reserve_pages(&arena, page, 1); + if (ret != -EBUSY) + return 2; +#endif + return 0; +} + +/* Try to reserve past the end of the arena. */ +SEC("syscall") +__success __retval(0) +int reserve_invalid_region(void *ctx) +{ +#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) + char __arena *page; + int ret; + + /* Try a NULL pointer. */ + ret = bpf_arena_reserve_pages(&arena, NULL, 3); + if (ret != -EINVAL) + return 1; + + page = arena_base(&arena); + + ret = bpf_arena_reserve_pages(&arena, page, 3); + if (ret != -EINVAL) + return 2; + + ret = bpf_arena_reserve_pages(&arena, page, 4096); + if (ret != -EINVAL) + return 3; + + ret = bpf_arena_reserve_pages(&arena, page, (1ULL << 32) - 1); + if (ret != -EINVAL) + return 4; +#endif + return 0; +} + SEC("iter.s/bpf_map") __success __log_level(2) int iter_maps1(struct bpf_iter__bpf_map *ctx) diff --git a/tools/testing/selftests/bpf/progs/verifier_arena_large.c b/tools/testing/selftests/bpf/progs/verifier_arena_large.c index f94f30cf1bb8..9dbdf123542d 100644 --- a/tools/testing/selftests/bpf/progs/verifier_arena_large.c +++ b/tools/testing/selftests/bpf/progs/verifier_arena_large.c @@ -67,6 +67,104 @@ int big_alloc1(void *ctx) return 0; } +/* Try to access a reserved page. Behavior should be identical with accessing unallocated pages. */ +SEC("syscall") +__success __retval(0) +int access_reserved(void *ctx) +{ +#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) + volatile char __arena *page; + char __arena *base; + const size_t len = 4; + int ret, i; + + /* Get a separate region of the arena. */ + page = base = arena_base(&arena) + 16384 * PAGE_SIZE; + + ret = bpf_arena_reserve_pages(&arena, base, len); + if (ret) + return 1; + + /* Try to dirty reserved memory. */ + for (i = 0; i < len && can_loop; i++) + *page = 0x5a; + + for (i = 0; i < len && can_loop; i++) { + page = (volatile char __arena *)(base + i * PAGE_SIZE); + + /* + * Error out in case either the write went through, + * or the address has random garbage. + */ + if (*page == 0x5a) + return 2 + 2 * i; + + if (*page) + return 2 + 2 * i + 1; + } +#endif + return 0; +} + +/* Try to allocate a region overlapping with a reservation. */ +SEC("syscall") +__success __retval(0) +int request_partially_reserved(void *ctx) +{ +#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) + volatile char __arena *page; + char __arena *base; + int ret; + + /* Add an arbitrary page offset. */ + page = base = arena_base(&arena) + 4096 * __PAGE_SIZE; + + ret = bpf_arena_reserve_pages(&arena, base + 3 * __PAGE_SIZE, 4); + if (ret) + return 1; + + page = bpf_arena_alloc_pages(&arena, base, 5, NUMA_NO_NODE, 0); + if ((u64)page != 0ULL) + return 2; +#endif + return 0; +} + +SEC("syscall") +__success __retval(0) +int free_reserved(void *ctx) +{ +#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) + char __arena *addr; + char __arena *page; + int ret; + + /* Add an arbitrary page offset. */ + addr = arena_base(&arena) + 32768 * __PAGE_SIZE; + + page = bpf_arena_alloc_pages(&arena, addr, 2, NUMA_NO_NODE, 0); + if (!page) + return 1; + + ret = bpf_arena_reserve_pages(&arena, addr + 2 * __PAGE_SIZE, 2); + if (ret) + return 2; + + /* + * Reserved and allocated pages should be interchangeable for + * bpf_arena_free_pages(). Free a reserved and an allocated + * page with a single call. + */ + bpf_arena_free_pages(&arena, addr + __PAGE_SIZE , 2); + + /* The free call above should have succeeded, so this allocation should too. */ + page = bpf_arena_alloc_pages(&arena, addr + __PAGE_SIZE, 2, NUMA_NO_NODE, 0); + if (!page) + return 3; +#endif + return 0; +} + #if defined(__BPF_FEATURE_ADDR_SPACE_CAST) #define PAGE_CNT 100 __u8 __arena * __arena page[PAGE_CNT]; /* occupies the first page */ diff --git a/tools/testing/selftests/bpf/progs/verifier_bounds.c b/tools/testing/selftests/bpf/progs/verifier_bounds.c index 0eb33bb801b5..87a2c60d86e6 100644 --- a/tools/testing/selftests/bpf/progs/verifier_bounds.c +++ b/tools/testing/selftests/bpf/progs/verifier_bounds.c @@ -2,6 +2,7 @@ /* Converted from tools/testing/selftests/bpf/verifier/bounds.c */ #include <linux/bpf.h> +#include <../../../include/linux/filter.h> #include <bpf/bpf_helpers.h> #include "bpf_misc.h" @@ -620,8 +621,14 @@ l1_%=: exit; \ SEC("socket") __description("bounds check mixed 32bit and 64bit arithmetic. test1") -__success __failure_unpriv __msg_unpriv("R0 invalid mem access 'scalar'") +__success __success_unpriv __retval(0) +#ifdef SPEC_V1 +__xlated_unpriv("goto pc+2") +__xlated_unpriv("nospec") /* inserted to prevent `R0 invalid mem access 'scalar'` */ +__xlated_unpriv("goto pc-1") /* sanitized dead code */ +__xlated_unpriv("exit") +#endif __naked void _32bit_and_64bit_arithmetic_test1(void) { asm volatile (" \ @@ -643,8 +650,14 @@ l1_%=: exit; \ SEC("socket") __description("bounds check mixed 32bit and 64bit arithmetic. test2") -__success __failure_unpriv __msg_unpriv("R0 invalid mem access 'scalar'") +__success __success_unpriv __retval(0) +#ifdef SPEC_V1 +__xlated_unpriv("goto pc+2") +__xlated_unpriv("nospec") /* inserted to prevent `R0 invalid mem access 'scalar'` */ +__xlated_unpriv("goto pc-1") /* sanitized dead code */ +__xlated_unpriv("exit") +#endif __naked void _32bit_and_64bit_arithmetic_test2(void) { asm volatile (" \ @@ -691,9 +704,14 @@ l0_%=: r0 = 0; \ SEC("socket") __description("bounds check for reg = 0, reg xor 1") -__success __failure_unpriv -__msg_unpriv("R0 min value is outside of the allowed memory range") +__success __success_unpriv __retval(0) +#ifdef SPEC_V1 +__xlated_unpriv("if r1 != 0x0 goto pc+2") +__xlated_unpriv("nospec") /* inserted to prevent `R0 min value is outside of the allowed memory range` */ +__xlated_unpriv("goto pc-1") /* sanitized dead code */ +__xlated_unpriv("r0 = 0") +#endif __naked void reg_0_reg_xor_1(void) { asm volatile (" \ @@ -719,9 +737,14 @@ l1_%=: r0 = 0; \ SEC("socket") __description("bounds check for reg32 = 0, reg32 xor 1") -__success __failure_unpriv -__msg_unpriv("R0 min value is outside of the allowed memory range") +__success __success_unpriv __retval(0) +#ifdef SPEC_V1 +__xlated_unpriv("if w1 != 0x0 goto pc+2") +__xlated_unpriv("nospec") /* inserted to prevent `R0 min value is outside of the allowed memory range` */ +__xlated_unpriv("goto pc-1") /* sanitized dead code */ +__xlated_unpriv("r0 = 0") +#endif __naked void reg32_0_reg32_xor_1(void) { asm volatile (" \ @@ -747,9 +770,14 @@ l1_%=: r0 = 0; \ SEC("socket") __description("bounds check for reg = 2, reg xor 3") -__success __failure_unpriv -__msg_unpriv("R0 min value is outside of the allowed memory range") +__success __success_unpriv __retval(0) +#ifdef SPEC_V1 +__xlated_unpriv("if r1 > 0x0 goto pc+2") +__xlated_unpriv("nospec") /* inserted to prevent `R0 min value is outside of the allowed memory range` */ +__xlated_unpriv("goto pc-1") /* sanitized dead code */ +__xlated_unpriv("r0 = 0") +#endif __naked void reg_2_reg_xor_3(void) { asm volatile (" \ @@ -829,9 +857,14 @@ l1_%=: r0 = 0; \ SEC("socket") __description("bounds check for reg > 0, reg xor 3") -__success __failure_unpriv -__msg_unpriv("R0 min value is outside of the allowed memory range") +__success __success_unpriv __retval(0) +#ifdef SPEC_V1 +__xlated_unpriv("if r1 >= 0x0 goto pc+2") +__xlated_unpriv("nospec") /* inserted to prevent `R0 min value is outside of the allowed memory range` */ +__xlated_unpriv("goto pc-1") /* sanitized dead code */ +__xlated_unpriv("r0 = 0") +#endif __naked void reg_0_reg_xor_3(void) { asm volatile (" \ @@ -858,9 +891,14 @@ l1_%=: r0 = 0; \ SEC("socket") __description("bounds check for reg32 > 0, reg32 xor 3") -__success __failure_unpriv -__msg_unpriv("R0 min value is outside of the allowed memory range") +__success __success_unpriv __retval(0) +#ifdef SPEC_V1 +__xlated_unpriv("if w1 >= 0x0 goto pc+2") +__xlated_unpriv("nospec") /* inserted to prevent `R0 min value is outside of the allowed memory range` */ +__xlated_unpriv("goto pc-1") /* sanitized dead code */ +__xlated_unpriv("r0 = 0") +#endif __naked void reg32_0_reg32_xor_3(void) { asm volatile (" \ @@ -1028,7 +1066,7 @@ l0_%=: r0 = 0; \ SEC("xdp") __description("bound check with JMP_JSLT for crossing 64-bit signed boundary") __success __retval(0) -__flag(!BPF_F_TEST_REG_INVARIANTS) /* known invariants violation */ +__flag(BPF_F_TEST_REG_INVARIANTS) __naked void crossing_64_bit_signed_boundary_2(void) { asm volatile (" \ @@ -1334,4 +1372,300 @@ __naked void mult_sign_ovf(void) __imm(bpf_skb_store_bytes) : __clobber_all); } + +SEC("socket") +__description("64-bit addition, all outcomes overflow") +__success __log_level(2) +__msg("5: (0f) r3 += r3 {{.*}} R3_w=scalar(umin=0x4000000000000000,umax=0xfffffffffffffffe)") +__retval(0) +__naked void add64_full_overflow(void) +{ + asm volatile ( + "call %[bpf_get_prandom_u32];" + "r4 = r0;" + "r3 = 0xa000000000000000 ll;" + "r3 |= r4;" + "r3 += r3;" + "r0 = 0;" + "exit" + : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("64-bit addition, partial overflow, result in unbounded reg") +__success __log_level(2) +__msg("4: (0f) r3 += r3 {{.*}} R3_w=scalar()") +__retval(0) +__naked void add64_partial_overflow(void) +{ + asm volatile ( + "call %[bpf_get_prandom_u32];" + "r4 = r0;" + "r3 = 2;" + "r3 |= r4;" + "r3 += r3;" + "r0 = 0;" + "exit" + : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("32-bit addition overflow, all outcomes overflow") +__success __log_level(2) +__msg("4: (0c) w3 += w3 {{.*}} R3_w=scalar(smin=umin=umin32=0x40000000,smax=umax=umax32=0xfffffffe,var_off=(0x0; 0xffffffff))") +__retval(0) +__naked void add32_full_overflow(void) +{ + asm volatile ( + "call %[bpf_get_prandom_u32];" + "w4 = w0;" + "w3 = 0xa0000000;" + "w3 |= w4;" + "w3 += w3;" + "r0 = 0;" + "exit" + : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("32-bit addition, partial overflow, result in unbounded u32 bounds") +__success __log_level(2) +__msg("4: (0c) w3 += w3 {{.*}} R3_w=scalar(smin=0,smax=umax=0xffffffff,var_off=(0x0; 0xffffffff))") +__retval(0) +__naked void add32_partial_overflow(void) +{ + asm volatile ( + "call %[bpf_get_prandom_u32];" + "w4 = w0;" + "w3 = 2;" + "w3 |= w4;" + "w3 += w3;" + "r0 = 0;" + "exit" + : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("64-bit subtraction, all outcomes underflow") +__success __log_level(2) +__msg("6: (1f) r3 -= r1 {{.*}} R3_w=scalar(umin=1,umax=0x8000000000000000)") +__retval(0) +__naked void sub64_full_overflow(void) +{ + asm volatile ( + "call %[bpf_get_prandom_u32];" + "r1 = r0;" + "r2 = 0x8000000000000000 ll;" + "r1 |= r2;" + "r3 = 0;" + "r3 -= r1;" + "r0 = 0;" + "exit" + : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("64-bit subtraction, partial overflow, result in unbounded reg") +__success __log_level(2) +__msg("3: (1f) r3 -= r2 {{.*}} R3_w=scalar()") +__retval(0) +__naked void sub64_partial_overflow(void) +{ + asm volatile ( + "call %[bpf_get_prandom_u32];" + "r3 = r0;" + "r2 = 1;" + "r3 -= r2;" + "r0 = 0;" + "exit" + : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("32-bit subtraction overflow, all outcomes underflow") +__success __log_level(2) +__msg("5: (1c) w3 -= w1 {{.*}} R3_w=scalar(smin=umin=umin32=1,smax=umax=umax32=0x80000000,var_off=(0x0; 0xffffffff))") +__retval(0) +__naked void sub32_full_overflow(void) +{ + asm volatile ( + "call %[bpf_get_prandom_u32];" + "w1 = w0;" + "w2 = 0x80000000;" + "w1 |= w2;" + "w3 = 0;" + "w3 -= w1;" + "r0 = 0;" + "exit" + : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("32-bit subtraction, partial overflow, result in unbounded u32 bounds") +__success __log_level(2) +__msg("3: (1c) w3 -= w2 {{.*}} R3_w=scalar(smin=0,smax=umax=0xffffffff,var_off=(0x0; 0xffffffff))") +__retval(0) +__naked void sub32_partial_overflow(void) +{ + asm volatile ( + "call %[bpf_get_prandom_u32];" + "w3 = w0;" + "w2 = 1;" + "w3 -= w2;" + "r0 = 0;" + "exit" + : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("dead branch on jset, does not result in invariants violation error") +__success __log_level(2) +__retval(0) __flag(BPF_F_TEST_REG_INVARIANTS) +__naked void jset_range_analysis(void) +{ + asm volatile (" \ + call %[bpf_get_netns_cookie]; \ + if r0 == 0 goto l0_%=; \ + if r0 & 0xffffffff goto +0; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_netns_cookie) + : __clobber_all); +} + +/* This test covers the bounds deduction on 64bits when the s64 and u64 ranges + * overlap on the negative side. At instruction 7, the ranges look as follows: + * + * 0 umin=0xfffffcf1 umax=0xff..ff6e U64_MAX + * | [xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx] | + * |----------------------------|------------------------------| + * |xxxxxxxxxx] [xxxxxxxxxxxx| + * 0 smax=0xeffffeee smin=-655 -1 + * + * We should therefore deduce the following new bounds: + * + * 0 u64=[0xff..ffd71;0xff..ff6e] U64_MAX + * | [xxx] | + * |----------------------------|------------------------------| + * | [xxx] | + * 0 s64=[-655;-146] -1 + * + * Without the deduction cross sign boundary, we end up with an invariant + * violation error. + */ +SEC("socket") +__description("bounds deduction cross sign boundary, negative overlap") +__success __log_level(2) __flag(BPF_F_TEST_REG_INVARIANTS) +__msg("7: (1f) r0 -= r6 {{.*}} R0=scalar(smin=smin32=-655,smax=smax32=-146,umin=0xfffffffffffffd71,umax=0xffffffffffffff6e,umin32=0xfffffd71,umax32=0xffffff6e,var_off=(0xfffffffffffffc00; 0x3ff))") +__retval(0) +__naked void bounds_deduct_negative_overlap(void) +{ + asm volatile(" \ + call %[bpf_get_prandom_u32]; \ + w3 = w0; \ + w6 = (s8)w0; \ + r0 = (s8)r0; \ + if w6 >= 0xf0000000 goto l0_%=; \ + r0 += r6; \ + r6 += 400; \ + r0 -= r6; \ + if r3 < r0 goto l0_%=; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +/* This test covers the bounds deduction on 64bits when the s64 and u64 ranges + * overlap on the positive side. At instruction 3, the ranges look as follows: + * + * 0 umin=0 umax=0xffffffffffffff00 U64_MAX + * [xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx] | + * |----------------------------|------------------------------| + * |xxxxxxxx] [xxxxxxxx| + * 0 smax=127 smin=-128 -1 + * + * We should therefore deduce the following new bounds: + * + * 0 u64=[0;127] U64_MAX + * [xxxxxxxx] | + * |----------------------------|------------------------------| + * [xxxxxxxx] | + * 0 s64=[0;127] -1 + * + * Without the deduction cross sign boundary, the program is rejected due to + * the frame pointer write. + */ +SEC("socket") +__description("bounds deduction cross sign boundary, positive overlap") +__success __log_level(2) __flag(BPF_F_TEST_REG_INVARIANTS) +__msg("3: (2d) if r0 > r1 {{.*}} R0_w=scalar(smin=smin32=0,smax=umax=smax32=umax32=127,var_off=(0x0; 0x7f))") +__retval(0) +__naked void bounds_deduct_positive_overlap(void) +{ + asm volatile(" \ + call %[bpf_get_prandom_u32]; \ + r0 = (s8)r0; \ + r1 = 0xffffffffffffff00; \ + if r0 > r1 goto l0_%=; \ + if r0 < 128 goto l0_%=; \ + r10 = 0; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +/* This test is the same as above, but the s64 and u64 ranges overlap in two + * places. At instruction 3, the ranges look as follows: + * + * 0 umin=0 umax=0xffffffffffffff80 U64_MAX + * [xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx] | + * |----------------------------|------------------------------| + * |xxxxxxxx] [xxxxxxxx| + * 0 smax=127 smin=-128 -1 + * + * 0xffffffffffffff80 = (u64)-128. We therefore can't deduce anything new and + * the program should fail due to the frame pointer write. + */ +SEC("socket") +__description("bounds deduction cross sign boundary, two overlaps") +__failure __flag(BPF_F_TEST_REG_INVARIANTS) +__msg("3: (2d) if r0 > r1 {{.*}} R0_w=scalar(smin=smin32=-128,smax=smax32=127,umax=0xffffffffffffff80)") +__msg("frame pointer is read only") +__naked void bounds_deduct_two_overlaps(void) +{ + asm volatile(" \ + call %[bpf_get_prandom_u32]; \ + r0 = (s8)r0; \ + r1 = 0xffffffffffffff80; \ + if r0 > r1 goto l0_%=; \ + if r0 < 128 goto l0_%=; \ + r10 = 0; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_bounds_deduction.c b/tools/testing/selftests/bpf/progs/verifier_bounds_deduction.c index c506afbdd936..260a6df264e3 100644 --- a/tools/testing/selftests/bpf/progs/verifier_bounds_deduction.c +++ b/tools/testing/selftests/bpf/progs/verifier_bounds_deduction.c @@ -159,13 +159,16 @@ __failure_unpriv __naked void deducing_bounds_from_const_10(void) { asm volatile (" \ + r6 = r1; \ r0 = 0; \ if r0 s<= 0 goto l0_%=; \ -l0_%=: /* Marks reg as unknown. */ \ - r0 = -r0; \ - r0 -= r1; \ +l0_%=: /* Marks r0 as unknown. */ \ + call %[bpf_get_prandom_u32]; \ + r0 -= r6; \ exit; \ -" ::: __clobber_all); +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); } char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_bpf_trap.c b/tools/testing/selftests/bpf/progs/verifier_bpf_trap.c new file mode 100644 index 000000000000..35e2cdc00a01 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_bpf_trap.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +#if __clang_major__ >= 21 && 0 +SEC("socket") +__description("__builtin_trap with simple c code") +__failure __msg("unexpected __bpf_trap() due to uninitialized variable?") +void bpf_builtin_trap_with_simple_c(void) +{ + __builtin_trap(); +} +#endif + +SEC("socket") +__description("__bpf_trap with simple c code") +__failure __msg("unexpected __bpf_trap() due to uninitialized variable?") +void bpf_trap_with_simple_c(void) +{ + __bpf_trap(); +} + +SEC("socket") +__description("__bpf_trap as the second-from-last insn") +__failure __msg("unexpected __bpf_trap() due to uninitialized variable?") +__naked void bpf_trap_at_func_end(void) +{ + asm volatile ( + "r0 = 0;" + "call %[__bpf_trap];" + "exit;" + : + : __imm(__bpf_trap) + : __clobber_all); +} + +SEC("socket") +__description("dead code __bpf_trap in the middle of code") +__success +__naked void dead_bpf_trap_in_middle(void) +{ + asm volatile ( + "r0 = 0;" + "if r0 == 0 goto +1;" + "call %[__bpf_trap];" + "r0 = 2;" + "exit;" + : + : __imm(__bpf_trap) + : __clobber_all); +} + +SEC("socket") +__description("reachable __bpf_trap in the middle of code") +__failure __msg("unexpected __bpf_trap() due to uninitialized variable?") +__naked void live_bpf_trap_in_middle(void) +{ + asm volatile ( + "r0 = 0;" + "if r0 == 1 goto +1;" + "call %[__bpf_trap];" + "r0 = 2;" + "exit;" + : + : __imm(__bpf_trap) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_btf_ctx_access.c b/tools/testing/selftests/bpf/progs/verifier_btf_ctx_access.c index 28b939572cda..03942cec07e5 100644 --- a/tools/testing/selftests/bpf/progs/verifier_btf_ctx_access.c +++ b/tools/testing/selftests/bpf/progs/verifier_btf_ctx_access.c @@ -65,4 +65,16 @@ __naked void ctx_access_u32_pointer_reject_8(void) " ::: __clobber_all); } +SEC("fentry/bpf_fentry_test10") +__description("btf_ctx_access const void pointer accept") +__success __retval(0) +__naked void ctx_access_const_void_pointer_accept(void) +{ + asm volatile (" \ + r2 = *(u64 *)(r1 + 0); /* load 1st argument value (const void pointer) */\ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_ctx.c b/tools/testing/selftests/bpf/progs/verifier_ctx.c index a83809a1dbbf..424463094760 100644 --- a/tools/testing/selftests/bpf/progs/verifier_ctx.c +++ b/tools/testing/selftests/bpf/progs/verifier_ctx.c @@ -1,10 +1,12 @@ // SPDX-License-Identifier: GPL-2.0 /* Converted from tools/testing/selftests/bpf/verifier/ctx.c */ -#include <linux/bpf.h> +#include "vmlinux.h" #include <bpf/bpf_helpers.h> #include "bpf_misc.h" +#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER)) + SEC("tc") __description("context stores via BPF_ATOMIC") __failure __msg("BPF_ATOMIC stores into R1 ctx is not allowed") @@ -218,4 +220,48 @@ __naked void null_check_8_null_bind(void) : __clobber_all); } +#define narrow_load(type, ctx, field) \ + SEC(type) \ + __description("narrow load on field " #field " of " #ctx) \ + __failure __msg("invalid bpf_context access") \ + __naked void invalid_narrow_load##ctx##field(void) \ + { \ + asm volatile (" \ + r1 = *(u32 *)(r1 + %[off]); \ + r0 = 0; \ + exit;" \ + : \ + : __imm_const(off, offsetof(struct ctx, field) + 4) \ + : __clobber_all); \ + } + +narrow_load("cgroup/getsockopt", bpf_sockopt, sk); +narrow_load("cgroup/getsockopt", bpf_sockopt, optval); +narrow_load("cgroup/getsockopt", bpf_sockopt, optval_end); +narrow_load("tc", __sk_buff, sk); +narrow_load("cgroup/bind4", bpf_sock_addr, sk); +narrow_load("sockops", bpf_sock_ops, sk); +narrow_load("sockops", bpf_sock_ops, skb_data); +narrow_load("sockops", bpf_sock_ops, skb_data_end); +narrow_load("sockops", bpf_sock_ops, skb_hwtstamp); + +#define unaligned_access(type, ctx, field) \ + SEC(type) \ + __description("unaligned access on field " #field " of " #ctx) \ + __failure __msg("invalid bpf_context access") \ + __naked void unaligned_ctx_access_##ctx##field(void) \ + { \ + asm volatile (" \ + r1 = *(u%[size] *)(r1 + %[off]); \ + r0 = 0; \ + exit;" \ + : \ + : __imm_const(size, sizeof_field(struct ctx, field) * 8), \ + __imm_const(off, offsetof(struct ctx, field) + 1) \ + : __clobber_all); \ + } + +unaligned_access("flow_dissector", __sk_buff, data); +unaligned_access("netfilter", bpf_nf_ctx, skb); + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_div_overflow.c b/tools/testing/selftests/bpf/progs/verifier_div_overflow.c index 458984da804c..34e0c012ee76 100644 --- a/tools/testing/selftests/bpf/progs/verifier_div_overflow.c +++ b/tools/testing/selftests/bpf/progs/verifier_div_overflow.c @@ -77,7 +77,7 @@ l0_%=: exit; \ SEC("tc") __description("MOD32 overflow, check 1") -__success __retval(INT_MIN) +__success __retval(_INT_MIN) __naked void mod32_overflow_check_1(void) { asm volatile (" \ @@ -92,7 +92,7 @@ __naked void mod32_overflow_check_1(void) SEC("tc") __description("MOD32 overflow, check 2") -__success __retval(INT_MIN) +__success __retval(_INT_MIN) __naked void mod32_overflow_check_2(void) { asm volatile (" \ diff --git a/tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c b/tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c index 4ab0ef18d7eb..181da86ba5f0 100644 --- a/tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c +++ b/tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c @@ -179,4 +179,132 @@ int BPF_PROG(trusted_acq_rel, struct task_struct *task, u64 clone_flags) return subprog_trusted_acq_rel(task); } +__weak int subprog_untrusted_bad_tags(struct task_struct *task __arg_untrusted __arg_nullable) +{ + return task->pid; +} + +SEC("tp_btf/sys_enter") +__failure +__msg("arg#0 untrusted cannot be combined with any other tags") +int untrusted_bad_tags(void *ctx) +{ + return subprog_untrusted_bad_tags(0); +} + +struct local_type_wont_be_accepted {}; + +__weak int subprog_untrusted_bad_type(struct local_type_wont_be_accepted *p __arg_untrusted) +{ + return 0; +} + +SEC("tp_btf/sys_enter") +__failure +__msg("arg#0 reference type('STRUCT local_type_wont_be_accepted') has no matches") +int untrusted_bad_type(void *ctx) +{ + return subprog_untrusted_bad_type(bpf_rdonly_cast(0, 0)); +} + +__weak int subprog_untrusted(const volatile struct task_struct *restrict task __arg_untrusted) +{ + return task->pid; +} + +SEC("tp_btf/sys_enter") +__success +__log_level(2) +__msg("r1 = {{.*}}; {{.*}}R1_w=trusted_ptr_task_struct()") +__msg("Func#1 ('subprog_untrusted') is global and assumed valid.") +__msg("Validating subprog_untrusted() func#1...") +__msg(": R1=untrusted_ptr_task_struct") +int trusted_to_untrusted(void *ctx) +{ + return subprog_untrusted(bpf_get_current_task_btf()); +} + +char mem[16]; +u32 off; + +SEC("tp_btf/sys_enter") +__success +int anything_to_untrusted(void *ctx) +{ + /* untrusted to untrusted */ + subprog_untrusted(bpf_core_cast(0, struct task_struct)); + /* wrong type to untrusted */ + subprog_untrusted((void *)bpf_core_cast(0, struct bpf_verifier_env)); + /* map value to untrusted */ + subprog_untrusted((void *)mem); + /* scalar to untrusted */ + subprog_untrusted(0); + /* variable offset to untrusted (map) */ + subprog_untrusted((void *)mem + off); + /* variable offset to untrusted (trusted) */ + subprog_untrusted((void *)bpf_get_current_task_btf() + off); + return 0; +} + +__weak int subprog_untrusted2(struct task_struct *task __arg_untrusted) +{ + return subprog_trusted_task_nullable(task); +} + +SEC("tp_btf/sys_enter") +__failure +__msg("R1 type=untrusted_ptr_ expected=ptr_, trusted_ptr_, rcu_ptr_") +__msg("Caller passes invalid args into func#{{.*}} ('subprog_trusted_task_nullable')") +int untrusted_to_trusted(void *ctx) +{ + return subprog_untrusted2(bpf_get_current_task_btf()); +} + +__weak int subprog_void_untrusted(void *p __arg_untrusted) +{ + return *(int *)p; +} + +__weak int subprog_char_untrusted(char *p __arg_untrusted) +{ + return *(int *)p; +} + +__weak int subprog_enum_untrusted(enum bpf_attach_type *p __arg_untrusted) +{ + return *(int *)p; +} + +SEC("tp_btf/sys_enter") +__success +__log_level(2) +__msg("r1 = {{.*}}; {{.*}}R1_w=trusted_ptr_task_struct()") +__msg("Func#1 ('subprog_void_untrusted') is global and assumed valid.") +__msg("Validating subprog_void_untrusted() func#1...") +__msg(": R1=rdonly_untrusted_mem(sz=0)") +int trusted_to_untrusted_mem(void *ctx) +{ + return subprog_void_untrusted(bpf_get_current_task_btf()); +} + +SEC("tp_btf/sys_enter") +__success +int anything_to_untrusted_mem(void *ctx) +{ + /* untrusted to untrusted mem */ + subprog_void_untrusted(bpf_core_cast(0, struct task_struct)); + /* map value to untrusted mem */ + subprog_void_untrusted(mem); + /* scalar to untrusted mem */ + subprog_void_untrusted(0); + /* variable offset to untrusted mem (map) */ + subprog_void_untrusted((void *)mem + off); + /* variable offset to untrusted mem (trusted) */ + subprog_void_untrusted(bpf_get_current_task_btf() + off); + /* variable offset to untrusted char/enum (map) */ + subprog_char_untrusted(mem + off); + subprog_enum_untrusted((void *)mem + off); + return 0; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_load_acquire.c b/tools/testing/selftests/bpf/progs/verifier_load_acquire.c index 77698d5a19e4..74f4f19c10b8 100644 --- a/tools/testing/selftests/bpf/progs/verifier_load_acquire.c +++ b/tools/testing/selftests/bpf/progs/verifier_load_acquire.c @@ -10,65 +10,81 @@ SEC("socket") __description("load-acquire, 8-bit") -__success __success_unpriv __retval(0x12) +__success __success_unpriv __retval(0) __naked void load_acquire_8(void) { asm volatile ( - "w1 = 0x12;" + "r0 = 0;" + "w1 = 0xfe;" "*(u8 *)(r10 - 1) = w1;" - ".8byte %[load_acquire_insn];" // w0 = load_acquire((u8 *)(r10 - 1)); + ".8byte %[load_acquire_insn];" // w2 = load_acquire((u8 *)(r10 - 1)); + "if r2 == r1 goto 1f;" + "r0 = 1;" +"1:" "exit;" : : __imm_insn(load_acquire_insn, - BPF_ATOMIC_OP(BPF_B, BPF_LOAD_ACQ, BPF_REG_0, BPF_REG_10, -1)) + BPF_ATOMIC_OP(BPF_B, BPF_LOAD_ACQ, BPF_REG_2, BPF_REG_10, -1)) : __clobber_all); } SEC("socket") __description("load-acquire, 16-bit") -__success __success_unpriv __retval(0x1234) +__success __success_unpriv __retval(0) __naked void load_acquire_16(void) { asm volatile ( - "w1 = 0x1234;" + "r0 = 0;" + "w1 = 0xfedc;" "*(u16 *)(r10 - 2) = w1;" - ".8byte %[load_acquire_insn];" // w0 = load_acquire((u16 *)(r10 - 2)); + ".8byte %[load_acquire_insn];" // w2 = load_acquire((u16 *)(r10 - 2)); + "if r2 == r1 goto 1f;" + "r0 = 1;" +"1:" "exit;" : : __imm_insn(load_acquire_insn, - BPF_ATOMIC_OP(BPF_H, BPF_LOAD_ACQ, BPF_REG_0, BPF_REG_10, -2)) + BPF_ATOMIC_OP(BPF_H, BPF_LOAD_ACQ, BPF_REG_2, BPF_REG_10, -2)) : __clobber_all); } SEC("socket") __description("load-acquire, 32-bit") -__success __success_unpriv __retval(0x12345678) +__success __success_unpriv __retval(0) __naked void load_acquire_32(void) { asm volatile ( - "w1 = 0x12345678;" + "r0 = 0;" + "w1 = 0xfedcba09;" "*(u32 *)(r10 - 4) = w1;" - ".8byte %[load_acquire_insn];" // w0 = load_acquire((u32 *)(r10 - 4)); + ".8byte %[load_acquire_insn];" // w2 = load_acquire((u32 *)(r10 - 4)); + "if r2 == r1 goto 1f;" + "r0 = 1;" +"1:" "exit;" : : __imm_insn(load_acquire_insn, - BPF_ATOMIC_OP(BPF_W, BPF_LOAD_ACQ, BPF_REG_0, BPF_REG_10, -4)) + BPF_ATOMIC_OP(BPF_W, BPF_LOAD_ACQ, BPF_REG_2, BPF_REG_10, -4)) : __clobber_all); } SEC("socket") __description("load-acquire, 64-bit") -__success __success_unpriv __retval(0x1234567890abcdef) +__success __success_unpriv __retval(0) __naked void load_acquire_64(void) { asm volatile ( - "r1 = 0x1234567890abcdef ll;" + "r0 = 0;" + "r1 = 0xfedcba0987654321 ll;" "*(u64 *)(r10 - 8) = r1;" - ".8byte %[load_acquire_insn];" // r0 = load_acquire((u64 *)(r10 - 8)); + ".8byte %[load_acquire_insn];" // r2 = load_acquire((u64 *)(r10 - 8)); + "if r2 == r1 goto 1f;" + "r0 = 1;" +"1:" "exit;" : : __imm_insn(load_acquire_insn, - BPF_ATOMIC_OP(BPF_DW, BPF_LOAD_ACQ, BPF_REG_0, BPF_REG_10, -8)) + BPF_ATOMIC_OP(BPF_DW, BPF_LOAD_ACQ, BPF_REG_2, BPF_REG_10, -8)) : __clobber_all); } diff --git a/tools/testing/selftests/bpf/progs/verifier_map_in_map.c b/tools/testing/selftests/bpf/progs/verifier_map_in_map.c index 7d088ba99ea5..16b761e510f0 100644 --- a/tools/testing/selftests/bpf/progs/verifier_map_in_map.c +++ b/tools/testing/selftests/bpf/progs/verifier_map_in_map.c @@ -139,4 +139,122 @@ __naked void on_the_inner_map_pointer(void) : __clobber_all); } +SEC("socket") +__description("map_ptr is never null") +__success +__naked void map_ptr_is_never_null(void) +{ + asm volatile (" \ + r0 = 0; \ + r1 = %[map_in_map] ll; \ + if r1 != 0 goto l0_%=; \ + r10 = 42; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_in_map) + : __clobber_all); +} + +SEC("socket") +__description("map_ptr is never null inner") +__success +__naked void map_ptr_is_never_null_inner(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_in_map] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + if r0 != 0 goto l0_%=; \ + r10 = 42; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_in_map) + : __clobber_all); +} + +SEC("socket") +__description("map_ptr is never null inner spill fill") +__success +__naked void map_ptr_is_never_null_inner_spill_fill(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_in_map] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: *(u64 *)(r10 -16) = r0; \ + r1 = *(u64 *)(r10 -16); \ + if r1 == 0 goto l1_%=; \ + exit; \ +l1_%=: r10 = 42; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_in_map) + : __clobber_all); +} + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS); + __uint(max_entries, 1); + __type(key, int); + __type(value, int); + __array(values, struct { + __uint(type, BPF_MAP_TYPE_RINGBUF); + __uint(max_entries, 64 * 1024); + }); +} rb_in_map SEC(".maps"); + +struct rb_ctx { + void *rb; + struct bpf_dynptr dptr; +}; + +static __always_inline struct rb_ctx __rb_event_reserve(__u32 sz) +{ + struct rb_ctx rb_ctx = {}; + void *rb; + __u32 cpu = bpf_get_smp_processor_id(); + __u32 rb_slot = cpu & 1; + + rb = bpf_map_lookup_elem(&rb_in_map, &rb_slot); + if (!rb) + return rb_ctx; + + rb_ctx.rb = rb; + bpf_ringbuf_reserve_dynptr(rb, sz, 0, &rb_ctx.dptr); + + return rb_ctx; +} + +static __noinline void __rb_event_submit(struct rb_ctx *ctx) +{ + if (!ctx->rb) + return; + + /* If the verifier (incorrectly) concludes that ctx->rb can be + * NULL at this point, we'll get "BPF_EXIT instruction in main + * prog would lead to reference leak" error + */ + bpf_ringbuf_submit_dynptr(&ctx->dptr, 0); +} + +SEC("socket") +int map_ptr_is_never_null_rb(void *ctx) +{ + struct rb_ctx event_ctx = __rb_event_reserve(256); + __rb_event_submit(&event_ctx); + return 0; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_movsx.c b/tools/testing/selftests/bpf/progs/verifier_movsx.c index 994bbc346d25..a4d8814eb5ed 100644 --- a/tools/testing/selftests/bpf/progs/verifier_movsx.c +++ b/tools/testing/selftests/bpf/progs/verifier_movsx.c @@ -245,7 +245,13 @@ l0_%=: \ SEC("socket") __description("MOV32SX, S8, var_off not u32_max, positive after s8 extension") __success __retval(0) -__failure_unpriv __msg_unpriv("frame pointer is read only") +__success_unpriv +#ifdef SPEC_V1 +__xlated_unpriv("w0 = 0") +__xlated_unpriv("exit") +__xlated_unpriv("nospec") /* inserted to prevent `frame pointer is read only` */ +__xlated_unpriv("goto pc-1") +#endif __naked void mov64sx_s32_varoff_2(void) { asm volatile (" \ @@ -267,7 +273,13 @@ l0_%=: \ SEC("socket") __description("MOV32SX, S8, var_off not u32_max, negative after s8 extension") __success __retval(0) -__failure_unpriv __msg_unpriv("frame pointer is read only") +__success_unpriv +#ifdef SPEC_V1 +__xlated_unpriv("w0 = 0") +__xlated_unpriv("exit") +__xlated_unpriv("nospec") /* inserted to prevent `frame pointer is read only` */ +__xlated_unpriv("goto pc-1") +#endif __naked void mov64sx_s32_varoff_3(void) { asm volatile (" \ diff --git a/tools/testing/selftests/bpf/progs/verifier_precision.c b/tools/testing/selftests/bpf/progs/verifier_precision.c index 6662d4b39969..73fee2aec698 100644 --- a/tools/testing/selftests/bpf/progs/verifier_precision.c +++ b/tools/testing/selftests/bpf/progs/verifier_precision.c @@ -91,8 +91,7 @@ __naked int bpf_end_bswap(void) ::: __clobber_all); } -#if defined(ENABLE_ATOMICS_TESTS) && \ - (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86)) +#ifdef CAN_USE_LOAD_ACQ_STORE_REL SEC("?raw_tp") __success __log_level(2) @@ -138,7 +137,7 @@ __naked int bpf_store_release(void) : __clobber_all); } -#endif /* load-acquire, store-release */ +#endif /* CAN_USE_LOAD_ACQ_STORE_REL */ #endif /* v4 instruction */ SEC("?raw_tp") @@ -179,4 +178,127 @@ __naked int state_loop_first_last_equal(void) ); } +__used __naked static void __bpf_cond_op_r10(void) +{ + asm volatile ( + "r2 = 2314885393468386424 ll;" + "goto +0;" + "if r2 <= r10 goto +3;" + "if r1 >= -1835016 goto +0;" + "if r2 <= 8 goto +0;" + "if r3 <= 0 goto +0;" + "exit;" + ::: __clobber_all); +} + +SEC("?raw_tp") +__success __log_level(2) +__msg("8: (bd) if r2 <= r10 goto pc+3") +__msg("9: (35) if r1 >= 0xffe3fff8 goto pc+0") +__msg("10: (b5) if r2 <= 0x8 goto pc+0") +__msg("mark_precise: frame1: last_idx 10 first_idx 0 subseq_idx -1") +__msg("mark_precise: frame1: regs=r2 stack= before 9: (35) if r1 >= 0xffe3fff8 goto pc+0") +__msg("mark_precise: frame1: regs=r2 stack= before 8: (bd) if r2 <= r10 goto pc+3") +__msg("mark_precise: frame1: regs=r2 stack= before 7: (05) goto pc+0") +__naked void bpf_cond_op_r10(void) +{ + asm volatile ( + "r3 = 0 ll;" + "call __bpf_cond_op_r10;" + "r0 = 0;" + "exit;" + ::: __clobber_all); +} + +SEC("?raw_tp") +__success __log_level(2) +__msg("3: (bf) r3 = r10") +__msg("4: (bd) if r3 <= r2 goto pc+1") +__msg("5: (b5) if r2 <= 0x8 goto pc+2") +__msg("mark_precise: frame0: last_idx 5 first_idx 0 subseq_idx -1") +__msg("mark_precise: frame0: regs=r2 stack= before 4: (bd) if r3 <= r2 goto pc+1") +__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r3 = r10") +__naked void bpf_cond_op_not_r10(void) +{ + asm volatile ( + "r0 = 0;" + "r2 = 2314885393468386424 ll;" + "r3 = r10;" + "if r3 <= r2 goto +1;" + "if r2 <= 8 goto +2;" + "r0 = 2 ll;" + "exit;" + ::: __clobber_all); +} + +SEC("lsm.s/socket_connect") +__success __log_level(2) +__msg("0: (b7) r0 = 1 ; R0_w=1") +__msg("1: (84) w0 = -w0 ; R0_w=0xffffffff") +__msg("mark_precise: frame0: last_idx 2 first_idx 0 subseq_idx -1") +__msg("mark_precise: frame0: regs=r0 stack= before 1: (84) w0 = -w0") +__msg("mark_precise: frame0: regs=r0 stack= before 0: (b7) r0 = 1") +__naked int bpf_neg_2(void) +{ + /* + * lsm.s/socket_connect requires a return value within [-4095, 0]. + * Returning -1 is allowed + */ + asm volatile ( + "r0 = 1;" + "w0 = -w0;" + "exit;" + ::: __clobber_all); +} + +SEC("lsm.s/socket_connect") +__failure __msg("At program exit the register R0 has") +__naked int bpf_neg_3(void) +{ + /* + * lsm.s/socket_connect requires a return value within [-4095, 0]. + * Returning -10000 is not allowed. + */ + asm volatile ( + "r0 = 10000;" + "w0 = -w0;" + "exit;" + ::: __clobber_all); +} + +SEC("lsm.s/socket_connect") +__success __log_level(2) +__msg("0: (b7) r0 = 1 ; R0_w=1") +__msg("1: (87) r0 = -r0 ; R0_w=-1") +__msg("mark_precise: frame0: last_idx 2 first_idx 0 subseq_idx -1") +__msg("mark_precise: frame0: regs=r0 stack= before 1: (87) r0 = -r0") +__msg("mark_precise: frame0: regs=r0 stack= before 0: (b7) r0 = 1") +__naked int bpf_neg_4(void) +{ + /* + * lsm.s/socket_connect requires a return value within [-4095, 0]. + * Returning -1 is allowed + */ + asm volatile ( + "r0 = 1;" + "r0 = -r0;" + "exit;" + ::: __clobber_all); +} + +SEC("lsm.s/socket_connect") +__failure __msg("At program exit the register R0 has") +__naked int bpf_neg_5(void) +{ + /* + * lsm.s/socket_connect requires a return value within [-4095, 0]. + * Returning -10000 is not allowed. + */ + asm volatile ( + "r0 = 10000;" + "r0 = -r0;" + "exit;" + ::: __clobber_all); +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_private_stack.c b/tools/testing/selftests/bpf/progs/verifier_private_stack.c index fc91b414364e..1ecd34ebde19 100644 --- a/tools/testing/selftests/bpf/progs/verifier_private_stack.c +++ b/tools/testing/selftests/bpf/progs/verifier_private_stack.c @@ -8,7 +8,7 @@ /* From include/linux/filter.h */ #define MAX_BPF_STACK 512 -#if defined(__TARGET_ARCH_x86) +#if defined(__TARGET_ARCH_x86) || defined(__TARGET_ARCH_arm64) struct elem { struct bpf_timer t; @@ -30,6 +30,18 @@ __jited(" movabsq $0x{{.*}}, %r9") __jited(" addq %gs:{{.*}}, %r9") __jited(" movl $0x2a, %edi") __jited(" movq %rdi, -0x100(%r9)") +__arch_arm64 +__jited(" stp x25, x27, [sp, {{.*}}]!") +__jited(" mov x27, {{.*}}") +__jited(" movk x27, {{.*}}, lsl #16") +__jited(" movk x27, {{.*}}") +__jited(" mrs x10, TPIDR_EL{{[0-1]}}") +__jited(" add x27, x27, x10") +__jited(" add x25, x27, {{.*}}") +__jited(" mov x0, #0x2a") +__jited(" str x0, [x27]") +__jited("...") +__jited(" ldp x25, x27, [sp], {{.*}}") __naked void private_stack_single_prog(void) { asm volatile (" \ @@ -45,6 +57,9 @@ __description("No private stack") __success __arch_x86_64 __jited(" subq $0x8, %rsp") +__arch_arm64 +__jited(" mov x25, sp") +__jited(" sub sp, sp, #0x10") __naked void no_private_stack_nested(void) { asm volatile (" \ @@ -81,6 +96,19 @@ __jited(" pushq %r9") __jited(" callq 0x{{.*}}") __jited(" popq %r9") __jited(" xorl %eax, %eax") +__arch_arm64 +__jited(" stp x25, x27, [sp, {{.*}}]!") +__jited(" mov x27, {{.*}}") +__jited(" movk x27, {{.*}}, lsl #16") +__jited(" movk x27, {{.*}}") +__jited(" mrs x10, TPIDR_EL{{[0-1]}}") +__jited(" add x27, x27, x10") +__jited(" add x25, x27, {{.*}}") +__jited(" mov x0, #0x2a") +__jited(" str x0, [x27]") +__jited(" bl {{.*}}") +__jited("...") +__jited(" ldp x25, x27, [sp], {{.*}}") __naked void private_stack_nested_1(void) { asm volatile (" \ @@ -131,6 +159,24 @@ __jited(" movq %rdi, -0x200(%r9)") __jited(" pushq %r9") __jited(" callq") __jited(" popq %r9") +__arch_arm64 +__jited("func #1") +__jited("...") +__jited(" stp x25, x27, [sp, {{.*}}]!") +__jited(" mov x27, {{.*}}") +__jited(" movk x27, {{.*}}, lsl #16") +__jited(" movk x27, {{.*}}") +__jited(" mrs x10, TPIDR_EL{{[0-1]}}") +__jited(" add x27, x27, x10") +__jited(" add x25, x27, {{.*}}") +__jited(" bl 0x{{.*}}") +__jited(" add x7, x0, #0x0") +__jited(" mov x0, #0x2a") +__jited(" str x0, [x27]") +__jited(" bl 0x{{.*}}") +__jited(" add x7, x0, #0x0") +__jited(" mov x7, #0x0") +__jited(" ldp x25, x27, [sp], {{.*}}") __naked void private_stack_callback(void) { asm volatile (" \ @@ -154,6 +200,28 @@ __arch_x86_64 __jited(" pushq %r9") __jited(" callq") __jited(" popq %r9") +__arch_arm64 +__jited(" stp x29, x30, [sp, #-0x10]!") +__jited(" mov x29, sp") +__jited(" stp xzr, x26, [sp, #-0x10]!") +__jited(" mov x26, sp") +__jited(" stp x19, x20, [sp, #-0x10]!") +__jited(" stp x21, x22, [sp, #-0x10]!") +__jited(" stp x23, x24, [sp, #-0x10]!") +__jited(" stp x25, x26, [sp, #-0x10]!") +__jited(" stp x27, x28, [sp, #-0x10]!") +__jited(" mov x27, {{.*}}") +__jited(" movk x27, {{.*}}, lsl #16") +__jited(" movk x27, {{.*}}") +__jited(" mrs x10, TPIDR_EL{{[0-1]}}") +__jited(" add x27, x27, x10") +__jited(" add x25, x27, {{.*}}") +__jited(" mov x0, #0x2a") +__jited(" str x0, [x27]") +__jited(" mov x0, #0x0") +__jited(" bl 0x{{.*}}") +__jited(" add x7, x0, #0x0") +__jited(" ldp x27, x28, [sp], #0x10") int private_stack_exception_main_prog(void) { asm volatile (" \ @@ -179,6 +247,19 @@ __jited(" movq %rdi, -0x200(%r9)") __jited(" pushq %r9") __jited(" callq") __jited(" popq %r9") +__arch_arm64 +__jited(" stp x27, x28, [sp, #-0x10]!") +__jited(" mov x27, {{.*}}") +__jited(" movk x27, {{.*}}, lsl #16") +__jited(" movk x27, {{.*}}") +__jited(" mrs x10, TPIDR_EL{{[0-1]}}") +__jited(" add x27, x27, x10") +__jited(" add x25, x27, {{.*}}") +__jited(" mov x0, #0x2a") +__jited(" str x0, [x27]") +__jited(" bl 0x{{.*}}") +__jited(" add x7, x0, #0x0") +__jited(" ldp x27, x28, [sp], #0x10") int private_stack_exception_sub_prog(void) { asm volatile (" \ @@ -220,6 +301,10 @@ __description("Private stack, async callback, not nested") __success __retval(0) __arch_x86_64 __jited(" movabsq $0x{{.*}}, %r9") +__arch_arm64 +__jited(" mrs x10, TPIDR_EL{{[0-1]}}") +__jited(" add x27, x27, x10") +__jited(" add x25, x27, {{.*}}") int private_stack_async_callback_1(void) { struct bpf_timer *arr_timer; @@ -241,6 +326,8 @@ __description("Private stack, async callback, potential nesting") __success __retval(0) __arch_x86_64 __jited(" subq $0x100, %rsp") +__arch_arm64 +__jited(" sub sp, sp, #0x100") int private_stack_async_callback_2(void) { struct bpf_timer *arr_timer; diff --git a/tools/testing/selftests/bpf/progs/verifier_ref_tracking.c b/tools/testing/selftests/bpf/progs/verifier_ref_tracking.c index 683a882b3e6d..910365201f68 100644 --- a/tools/testing/selftests/bpf/progs/verifier_ref_tracking.c +++ b/tools/testing/selftests/bpf/progs/verifier_ref_tracking.c @@ -27,7 +27,7 @@ struct bpf_key {} __attribute__((preserve_access_index)); extern void bpf_key_put(struct bpf_key *key) __ksym; extern struct bpf_key *bpf_lookup_system_key(__u64 id) __ksym; -extern struct bpf_key *bpf_lookup_user_key(__u32 serial, __u64 flags) __ksym; +extern struct bpf_key *bpf_lookup_user_key(__s32 serial, __u64 flags) __ksym; /* BTF FUNC records are not generated for kfuncs referenced * from inline assembly. These records are necessary for diff --git a/tools/testing/selftests/bpf/progs/verifier_store_release.c b/tools/testing/selftests/bpf/progs/verifier_store_release.c index c0442d5bb049..72f1eb006074 100644 --- a/tools/testing/selftests/bpf/progs/verifier_store_release.c +++ b/tools/testing/selftests/bpf/progs/verifier_store_release.c @@ -6,18 +6,21 @@ #include "../../../include/linux/filter.h" #include "bpf_misc.h" -#if __clang_major__ >= 18 && defined(ENABLE_ATOMICS_TESTS) && \ - (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86)) +#ifdef CAN_USE_LOAD_ACQ_STORE_REL SEC("socket") __description("store-release, 8-bit") -__success __success_unpriv __retval(0x12) +__success __success_unpriv __retval(0) __naked void store_release_8(void) { asm volatile ( + "r0 = 0;" "w1 = 0x12;" ".8byte %[store_release_insn];" // store_release((u8 *)(r10 - 1), w1); - "w0 = *(u8 *)(r10 - 1);" + "w2 = *(u8 *)(r10 - 1);" + "if r2 == r1 goto 1f;" + "r0 = 1;" +"1:" "exit;" : : __imm_insn(store_release_insn, @@ -27,13 +30,17 @@ __naked void store_release_8(void) SEC("socket") __description("store-release, 16-bit") -__success __success_unpriv __retval(0x1234) +__success __success_unpriv __retval(0) __naked void store_release_16(void) { asm volatile ( + "r0 = 0;" "w1 = 0x1234;" ".8byte %[store_release_insn];" // store_release((u16 *)(r10 - 2), w1); - "w0 = *(u16 *)(r10 - 2);" + "w2 = *(u16 *)(r10 - 2);" + "if r2 == r1 goto 1f;" + "r0 = 1;" +"1:" "exit;" : : __imm_insn(store_release_insn, @@ -43,13 +50,17 @@ __naked void store_release_16(void) SEC("socket") __description("store-release, 32-bit") -__success __success_unpriv __retval(0x12345678) +__success __success_unpriv __retval(0) __naked void store_release_32(void) { asm volatile ( + "r0 = 0;" "w1 = 0x12345678;" ".8byte %[store_release_insn];" // store_release((u32 *)(r10 - 4), w1); - "w0 = *(u32 *)(r10 - 4);" + "w2 = *(u32 *)(r10 - 4);" + "if r2 == r1 goto 1f;" + "r0 = 1;" +"1:" "exit;" : : __imm_insn(store_release_insn, @@ -59,13 +70,17 @@ __naked void store_release_32(void) SEC("socket") __description("store-release, 64-bit") -__success __success_unpriv __retval(0x1234567890abcdef) +__success __success_unpriv __retval(0) __naked void store_release_64(void) { asm volatile ( + "r0 = 0;" "r1 = 0x1234567890abcdef ll;" ".8byte %[store_release_insn];" // store_release((u64 *)(r10 - 8), r1); - "r0 = *(u64 *)(r10 - 8);" + "r2 = *(u64 *)(r10 - 8);" + "if r2 == r1 goto 1f;" + "r0 = 1;" +"1:" "exit;" : : __imm_insn(store_release_insn, @@ -271,7 +286,7 @@ __naked void store_release_with_invalid_reg(void) : __clobber_all); } -#else +#else /* CAN_USE_LOAD_ACQ_STORE_REL */ SEC("socket") __description("Clang version < 18, ENABLE_ATOMICS_TESTS not defined, and/or JIT doesn't support store-release, use a dummy test") @@ -281,6 +296,6 @@ int dummy_test(void) return 0; } -#endif +#endif /* CAN_USE_LOAD_ACQ_STORE_REL */ char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_tailcall.c b/tools/testing/selftests/bpf/progs/verifier_tailcall.c new file mode 100644 index 000000000000..b4acce60fb9b --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_tailcall.c @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u32); +} map_array SEC(".maps"); + +SEC("socket") +__description("invalid map type for tail call") +__failure __msg("expected prog array map for tail call") +__failure_unpriv +__naked void invalid_map_for_tail_call(void) +{ + asm volatile (" \ + r2 = %[map_array] ll; \ + r3 = 0; \ + call %[bpf_tail_call]; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_array) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_unpriv.c b/tools/testing/selftests/bpf/progs/verifier_unpriv.c index a4a5e2071604..28b4f7035ceb 100644 --- a/tools/testing/selftests/bpf/progs/verifier_unpriv.c +++ b/tools/testing/selftests/bpf/progs/verifier_unpriv.c @@ -572,8 +572,14 @@ l0_%=: exit; \ SEC("socket") __description("alu32: mov u32 const") -__success __failure_unpriv __msg_unpriv("R7 invalid mem access 'scalar'") +__success __success_unpriv __retval(0) +#ifdef SPEC_V1 +__xlated_unpriv("if r0 == 0x0 goto pc+2") +__xlated_unpriv("nospec") /* inserted to prevent `R7 invalid mem access 'scalar'` */ +__xlated_unpriv("goto pc-1") /* sanitized dead code */ +__xlated_unpriv("exit") +#endif __naked void alu32_mov_u32_const(void) { asm volatile (" \ @@ -619,12 +625,11 @@ __naked void pass_pointer_to_tail_call(void) SEC("socket") __description("unpriv: cmp map pointer with zero") -__success __failure_unpriv __msg_unpriv("R1 pointer comparison") +__success __success_unpriv __retval(0) __naked void cmp_map_pointer_with_zero(void) { asm volatile (" \ - r1 = 0; \ r1 = %[map_hash_8b] ll; \ if r1 == 0 goto l0_%=; \ l0_%=: r0 = 0; \ @@ -635,6 +640,22 @@ l0_%=: r0 = 0; \ } SEC("socket") +__description("unpriv: cmp map pointer with const") +__success __failure_unpriv __msg_unpriv("R1 pointer comparison prohibited") +__retval(0) +__naked void cmp_map_pointer_with_const(void) +{ + asm volatile (" \ + r1 = %[map_hash_8b] ll; \ + if r1 == 0x0000beef goto l0_%=; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") __description("unpriv: write into frame pointer") __failure __msg("frame pointer is read only") __failure_unpriv @@ -723,4 +744,210 @@ l0_%=: r0 = 0; \ " ::: __clobber_all); } +SEC("socket") +__description("unpriv: Spectre v1 path-based type confusion of scalar as stack-ptr") +__success __success_unpriv __retval(0) +#ifdef SPEC_V1 +__xlated_unpriv("if r0 != 0x1 goto pc+2") +/* This nospec prevents the exploit because it forces the mispredicted (not + * taken) `if r0 != 0x0 goto l0_%=` to resolve before using r6 as a pointer. + * This causes the CPU to realize that `r6 = r9` should have never executed. It + * ensures that r6 always contains a readable stack slot ptr when the insn after + * the nospec executes. + */ +__xlated_unpriv("nospec") +__xlated_unpriv("r9 = *(u8 *)(r6 +0)") +#endif +__naked void unpriv_spec_v1_type_confusion(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + /* r0: pointer to a map array entry */ \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + /* r1, r2: prepared call args */ \ + r6 = r10; \ + r6 += -8; \ + /* r6: pointer to readable stack slot */ \ + r9 = 0xffffc900; \ + r9 <<= 32; \ + /* r9: scalar controlled by attacker */ \ + r0 = *(u64 *)(r0 + 0); /* cache miss */ \ + if r0 != 0x0 goto l0_%=; \ + r6 = r9; \ +l0_%=: if r0 != 0x1 goto l1_%=; \ + r9 = *(u8 *)(r6 + 0); \ +l1_%=: /* leak r9 */ \ + r9 &= 1; \ + r9 <<= 9; \ + *(u64*)(r10 - 8) = r9; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + /* leak secret into is_cached(map[0|512]): */ \ + r0 = *(u64 *)(r0 + 0); \ +l2_%=: \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("unpriv: ldimm64 before Spectre v4 barrier") +__success __success_unpriv +__retval(0) +#ifdef SPEC_V4 +__xlated_unpriv("r1 = 0x2020200005642020") /* should not matter */ +__xlated_unpriv("*(u64 *)(r10 -8) = r1") +__xlated_unpriv("nospec") +#endif +__naked void unpriv_ldimm64_spectre_v4(void) +{ + asm volatile (" \ + r1 = 0x2020200005642020 ll; \ + *(u64 *)(r10 -8) = r1; \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: Spectre v1 and v4 barrier") +__success __success_unpriv +__retval(0) +#ifdef SPEC_V1 +#ifdef SPEC_V4 +/* starts with r0 == r8 == r9 == 0 */ +__xlated_unpriv("if r8 != 0x0 goto pc+1") +__xlated_unpriv("goto pc+2") +__xlated_unpriv("if r9 == 0x0 goto pc+4") +__xlated_unpriv("r2 = r0") +/* Following nospec required to prevent following dangerous `*(u64 *)(NOT_FP -64) + * = r1` iff `if r9 == 0 goto pc+4` was mispredicted because of Spectre v1. The + * test therefore ensures the Spectre-v4--induced nospec does not prevent the + * Spectre-v1--induced speculative path from being fully analyzed. + */ +__xlated_unpriv("nospec") /* Spectre v1 */ +__xlated_unpriv("*(u64 *)(r2 -64) = r1") /* could be used to leak r2 */ +__xlated_unpriv("nospec") /* Spectre v4 */ +#endif +#endif +__naked void unpriv_spectre_v1_and_v4(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + r8 = r0; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + r9 = r0; \ + r0 = r10; \ + r1 = 0; \ + r2 = r10; \ + if r8 != 0 goto l0_%=; \ + if r9 != 0 goto l0_%=; \ + r0 = 0; \ +l0_%=: if r8 != 0 goto l1_%=; \ + goto l2_%=; \ +l1_%=: if r9 == 0 goto l3_%=; \ + r2 = r0; \ +l2_%=: *(u64 *)(r2 -64) = r1; \ +l3_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("unpriv: Spectre v1 and v4 barrier (simple)") +__success __success_unpriv +__retval(0) +#ifdef SPEC_V1 +#ifdef SPEC_V4 +__xlated_unpriv("if r8 != 0x0 goto pc+1") +__xlated_unpriv("goto pc+2") +__xlated_unpriv("goto pc-1") /* if r9 == 0 goto l3_%= */ +__xlated_unpriv("goto pc-1") /* r2 = r0 */ +__xlated_unpriv("nospec") +__xlated_unpriv("*(u64 *)(r2 -64) = r1") +__xlated_unpriv("nospec") +#endif +#endif +__naked void unpriv_spectre_v1_and_v4_simple(void) +{ + asm volatile (" \ + r8 = 0; \ + r9 = 0; \ + r0 = r10; \ + r1 = 0; \ + r2 = r10; \ + if r8 != 0 goto l0_%=; \ + if r9 != 0 goto l0_%=; \ + r0 = 0; \ +l0_%=: if r8 != 0 goto l1_%=; \ + goto l2_%=; \ +l1_%=: if r9 == 0 goto l3_%=; \ + r2 = r0; \ +l2_%=: *(u64 *)(r2 -64) = r1; \ +l3_%=: r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: ldimm64 before Spectre v1 and v4 barrier (simple)") +__success __success_unpriv +__retval(0) +#ifdef SPEC_V1 +#ifdef SPEC_V4 +__xlated_unpriv("if r8 != 0x0 goto pc+1") +__xlated_unpriv("goto pc+4") +__xlated_unpriv("goto pc-1") /* if r9 == 0 goto l3_%= */ +__xlated_unpriv("goto pc-1") /* r2 = r0 */ +__xlated_unpriv("goto pc-1") /* r1 = 0x2020200005642020 ll */ +__xlated_unpriv("goto pc-1") /* second part of ldimm64 */ +__xlated_unpriv("nospec") +__xlated_unpriv("*(u64 *)(r2 -64) = r1") +__xlated_unpriv("nospec") +#endif +#endif +__naked void unpriv_ldimm64_spectre_v1_and_v4_simple(void) +{ + asm volatile (" \ + r8 = 0; \ + r9 = 0; \ + r0 = r10; \ + r1 = 0; \ + r2 = r10; \ + if r8 != 0 goto l0_%=; \ + if r9 != 0 goto l0_%=; \ + r0 = 0; \ +l0_%=: if r8 != 0 goto l1_%=; \ + goto l2_%=; \ +l1_%=: if r9 == 0 goto l3_%=; \ + r2 = r0; \ + r1 = 0x2020200005642020 ll; \ +l2_%=: *(u64 *)(r2 -64) = r1; \ +l3_%=: r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c b/tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c index 5ba6e53571c8..af7938ce56cb 100644 --- a/tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c +++ b/tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c @@ -231,6 +231,10 @@ __retval(1) __naked void ptr_unknown_vs_unknown_lt(void) { asm volatile (" \ + r8 = r1; \ + call %[bpf_get_prandom_u32]; \ + r9 = r0; \ + r1 = r8; \ r0 = *(u32*)(r1 + %[__sk_buff_len]); \ r1 = 0; \ *(u64*)(r10 - 8) = r1; \ @@ -245,11 +249,11 @@ l1_%=: call %[bpf_map_lookup_elem]; \ r4 = *(u8*)(r0 + 0); \ if r4 == 1 goto l3_%=; \ r1 = 6; \ - r1 = -r1; \ + r1 = r9; \ r1 &= 0x3; \ goto l4_%=; \ l3_%=: r1 = 6; \ - r1 = -r1; \ + r1 = r9; \ r1 &= 0x7; \ l4_%=: r1 += r0; \ r0 = *(u8*)(r1 + 0); \ @@ -259,7 +263,8 @@ l2_%=: r0 = 1; \ : __imm(bpf_map_lookup_elem), __imm_addr(map_array_48b), __imm_addr(map_hash_16b), - __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)), + __imm(bpf_get_prandom_u32) : __clobber_all); } @@ -271,6 +276,10 @@ __retval(1) __naked void ptr_unknown_vs_unknown_gt(void) { asm volatile (" \ + r8 = r1; \ + call %[bpf_get_prandom_u32]; \ + r9 = r0; \ + r1 = r8; \ r0 = *(u32*)(r1 + %[__sk_buff_len]); \ r1 = 0; \ *(u64*)(r10 - 8) = r1; \ @@ -285,11 +294,11 @@ l1_%=: call %[bpf_map_lookup_elem]; \ r4 = *(u8*)(r0 + 0); \ if r4 == 1 goto l3_%=; \ r1 = 6; \ - r1 = -r1; \ + r1 = r9; \ r1 &= 0x7; \ goto l4_%=; \ l3_%=: r1 = 6; \ - r1 = -r1; \ + r1 = r9; \ r1 &= 0x3; \ l4_%=: r1 += r0; \ r0 = *(u8*)(r1 + 0); \ @@ -299,7 +308,8 @@ l2_%=: r0 = 1; \ : __imm(bpf_map_lookup_elem), __imm_addr(map_array_48b), __imm_addr(map_hash_16b), - __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)), + __imm(bpf_get_prandom_u32) : __clobber_all); } @@ -398,7 +408,8 @@ l2_%=: r0 = 1; \ SEC("socket") __description("map access: mixing value pointer and scalar, 1") -__success __failure_unpriv __msg_unpriv("R2 pointer comparison prohibited") +__success __failure_unpriv +__msg_unpriv("R2 tried to add from different maps, paths or scalars, pointer arithmetic with it prohibited for !root") __retval(0) __naked void value_pointer_and_scalar_1(void) { @@ -433,6 +444,7 @@ l2_%=: /* common instruction */ \ l3_%=: /* branch B */ \ r0 = 0x13371337; \ /* verifier follows fall-through */ \ + /* unpriv: nospec (inserted to prevent `R2 pointer comparison prohibited`) */\ if r2 != 0x100000 goto l4_%=; \ r0 = 0; \ exit; \ @@ -450,7 +462,8 @@ l4_%=: /* fake-dead code; targeted from branch A to \ SEC("socket") __description("map access: mixing value pointer and scalar, 2") -__success __failure_unpriv __msg_unpriv("R0 invalid mem access 'scalar'") +__success __failure_unpriv +__msg_unpriv("R2 tried to add from different maps, paths or scalars, pointer arithmetic with it prohibited for !root") __retval(0) __naked void value_pointer_and_scalar_2(void) { @@ -492,6 +505,7 @@ l4_%=: /* fake-dead code; targeted from branch A to \ * prevent dead code sanitization, rejected \ * via branch B however \ */ \ + /* unpriv: nospec (inserted to prevent `R0 invalid mem access 'scalar'`) */\ r0 = *(u8*)(r0 + 0); \ r0 = 0; \ exit; \ @@ -1296,9 +1310,13 @@ l0_%=: r0 = 1; \ SEC("socket") __description("map access: value_ptr -= unknown scalar, 2") -__success __failure_unpriv -__msg_unpriv("R0 pointer arithmetic of map value goes out of range") +__success __success_unpriv __retval(1) +#ifdef SPEC_V1 +__xlated_unpriv("r1 &= 7") +__xlated_unpriv("nospec") /* inserted to prevent `R0 pointer arithmetic of map value goes out of range` */ +__xlated_unpriv("r0 -= r1") +#endif __naked void value_ptr_unknown_scalar_2_2(void) { asm volatile (" \ diff --git a/tools/testing/selftests/bpf/progs/verifier_vfs_accept.c b/tools/testing/selftests/bpf/progs/verifier_vfs_accept.c index a7c0a553aa50..3e2d76ee8050 100644 --- a/tools/testing/selftests/bpf/progs/verifier_vfs_accept.c +++ b/tools/testing/selftests/bpf/progs/verifier_vfs_accept.c @@ -2,6 +2,7 @@ /* Copyright (c) 2024 Google LLC. */ #include <vmlinux.h> +#include <errno.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> @@ -82,4 +83,21 @@ int BPF_PROG(path_d_path_from_file_argument, struct file *file) return 0; } +SEC("lsm.s/inode_rename") +__success +int BPF_PROG(inode_rename, struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, struct dentry *new_dentry, + unsigned int flags) +{ + struct inode *inode = new_dentry->d_inode; + ino_t ino; + + if (!inode) + return 0; + ino = inode->i_ino; + if (ino == 0) + return -EACCES; + return 0; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c b/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c index d6d3f4fcb24c..4b392c6c8fc4 100644 --- a/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c +++ b/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c @@ -2,6 +2,7 @@ /* Copyright (c) 2024 Google LLC. */ #include <vmlinux.h> +#include <errno.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> #include <linux/limits.h> @@ -158,4 +159,18 @@ int BPF_PROG(path_d_path_kfunc_non_lsm, struct path *path, struct file *f) return 0; } +SEC("lsm.s/inode_rename") +__failure __msg("invalid mem access 'trusted_ptr_or_null_'") +int BPF_PROG(inode_rename, struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, struct dentry *new_dentry, + unsigned int flags) +{ + struct inode *inode = new_dentry->d_inode; + ino_t ino; + + ino = inode->i_ino; + if (ino == 0) + return -EACCES; + return 0; +} char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/xdp_metadata.c b/tools/testing/selftests/bpf/progs/xdp_metadata.c index 31ca229bb3c0..09bb8a038d52 100644 --- a/tools/testing/selftests/bpf/progs/xdp_metadata.c +++ b/tools/testing/selftests/bpf/progs/xdp_metadata.c @@ -19,6 +19,13 @@ struct { __type(value, __u32); } prog_arr SEC(".maps"); +struct { + __uint(type, BPF_MAP_TYPE_DEVMAP); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(struct bpf_devmap_val)); + __uint(max_entries, 1); +} dev_map SEC(".maps"); + extern int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx, __u64 *timestamp) __ksym; extern int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, __u32 *hash, @@ -95,4 +102,10 @@ int rx(struct xdp_md *ctx) return bpf_redirect_map(&xsk, ctx->rx_queue_index, XDP_PASS); } +SEC("xdp") +int redirect(struct xdp_md *ctx) +{ + return bpf_redirect_map(&dev_map, ctx->rx_queue_index, XDP_PASS); +} + char _license[] SEC("license") = "GPL"; |