summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2026-04-15 04:04:04 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2026-04-15 04:04:04 +0300
commitf5ad4101009e7f5f5984ffea6923d4fcd470932a (patch)
treec9c25bb09794d372c3028113d1f89f2a2cbcbca4 /tools
parente997ac58ad0b47141c62c79cde8356fe5633287a (diff)
parent71b500afd2f7336f5b6c6026f2af546fc079be26 (diff)
downloadlinux-f5ad4101009e7f5f5984ffea6923d4fcd470932a.tar.xz
Merge tag 'bpf-next-7.1' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Pull bpf updates from Alexei Starovoitov: - Welcome new BPF maintainers: Kumar Kartikeya Dwivedi, Eduard Zingerman while Martin KaFai Lau reduced his load to Reviwer. - Lots of fixes everywhere from many first time contributors. Thank you All. - Diff stat is dominated by mechanical split of verifier.c into multiple components: - backtrack.c: backtracking logic and jump history - states.c: state equivalence - cfg.c: control flow graph, postorder, strongly connected components - liveness.c: register and stack liveness - fixups.c: post-verification passes: instruction patching, dead code removal, bpf_loop inlining, finalize fastcall 8k line were moved. verifier.c still stands at 20k lines. Further refactoring is planned for the next release. - Replace dynamic stack liveness with static stack liveness based on data flow analysis. This improved the verification time by 2x for some programs and equally reduced memory consumption. New logic is in liveness.c and supported by constant folding in const_fold.c (Eduard Zingerman, Alexei Starovoitov) - Introduce BTF layout to ease addition of new BTF kinds (Alan Maguire) - Use kmalloc_nolock() universally in BPF local storage (Amery Hung) - Fix several bugs in linked registers delta tracking (Daniel Borkmann) - Improve verifier support of arena pointers (Emil Tsalapatis) - Improve verifier tracking of register bounds in min/max and tnum domains (Harishankar Vishwanathan, Paul Chaignon, Hao Sun) - Further extend support for implicit arguments in the verifier (Ihor Solodrai) - Add support for nop,nop5 instruction combo for USDT probes in libbpf (Jiri Olsa) - Support merging multiple module BTFs (Josef Bacik) - Extend applicability of bpf_kptr_xchg (Kaitao Cheng) - Retire rcu_trace_implies_rcu_gp() (Kumar Kartikeya Dwivedi) - Support variable offset context access for 'syscall' programs (Kumar Kartikeya Dwivedi) - Migrate bpf_task_work and dynptr to kmalloc_nolock() (Mykyta Yatsenko) - Fix UAF in in open-coded task_vma iterator (Puranjay Mohan) * tag 'bpf-next-7.1' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (241 commits) selftests/bpf: cover short IPv4/IPv6 inputs with adjust_room bpf: reject short IPv4/IPv6 inputs in bpf_prog_test_run_skb selftests/bpf: Use memfd_create instead of shm_open in cgroup_iter_memcg selftests/bpf: Add test for cgroup storage OOB read bpf: Fix OOB in pcpu_init_value selftests/bpf: Fix reg_bounds to match new tnum-based refinement selftests/bpf: Add tests for non-arena/arena operations bpf: Allow instructions with arena source and non-arena dest registers bpftool: add missing fsession to the usage and docs of bpftool docs/bpf: add missing fsession attach type to docs bpf: add missing fsession to the verifier log bpf: Move BTF checking logic into check_btf.c bpf: Move backtracking logic to backtrack.c bpf: Move state equivalence logic to states.c bpf: Move check_cfg() into cfg.c bpf: Move compute_insn_live_regs() into liveness.c bpf: Move fixup/post-processing logic from verifier.c into fixups.c bpf: Simplify do_check_insn() bpf: Move checks for reserved fields out of the main pass bpf: Delete unused variable ...
Diffstat (limited to 'tools')
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-btf.rst11
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-prog.rst2
-rw-r--r--tools/bpf/bpftool/Makefile30
-rw-r--r--tools/bpf/bpftool/bash-completion/bpftool8
-rw-r--r--tools/bpf/bpftool/btf.c121
-rw-r--r--tools/bpf/bpftool/jit_disasm.c11
-rw-r--r--tools/bpf/bpftool/main.c7
-rw-r--r--tools/bpf/bpftool/main.h14
-rw-r--r--tools/bpf/bpftool/prog.c2
-rw-r--r--tools/bpf/resolve_btfids/main.c1
-rw-r--r--tools/include/uapi/linux/bpf.h4
-rw-r--r--tools/include/uapi/linux/btf.h12
-rw-r--r--tools/lib/bpf/btf.c623
-rw-r--r--tools/lib/bpf/btf.h20
-rw-r--r--tools/lib/bpf/features.c53
-rw-r--r--tools/lib/bpf/libbpf.c240
-rw-r--r--tools/lib/bpf/libbpf.h44
-rw-r--r--tools/lib/bpf/libbpf.map6
-rw-r--r--tools/lib/bpf/libbpf_internal.h9
-rw-r--r--tools/lib/bpf/libbpf_probes.c40
-rw-r--r--tools/lib/bpf/libbpf_version.h2
-rw-r--r--tools/lib/bpf/relo_core.c2
-rw-r--r--tools/lib/bpf/usdt.c47
-rw-r--r--tools/testing/selftests/bpf/.gitignore2
-rw-r--r--tools/testing/selftests/bpf/Makefile17
-rw-r--r--tools/testing/selftests/bpf/bench.c4
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_local_storage_create.c21
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_trigger.c60
-rwxr-xr-xtools/testing/selftests/bpf/benchs/run_bench_uprobes.sh2
-rw-r--r--tools/testing/selftests/bpf/bpf_experimental.h174
-rw-r--r--tools/testing/selftests/bpf/bpftool_helpers.c15
-rw-r--r--tools/testing/selftests/bpf/cgroup_iter_memcg.h2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/access_variable_array.c16
-rw-r--r--tools/testing/selftests/bpf/prog_tests/align.c712
-rw-r--r--tools/testing/selftests/bpf/prog_tests/attach_probe.c213
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_cookie.c29
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_gotox.c123
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_insn_array.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_kind.c226
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_sanitize.c97
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_write.c111
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c35
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_storage.c47
-rw-r--r--tools/testing/selftests/bpf/prog_tests/clone_attach_btf_id.c78
-rw-r--r--tools/testing/selftests/bpf/prog_tests/connect_force_port.c26
-rw-r--r--tools/testing/selftests/bpf/prog_tests/empty_skb.c40
-rw-r--r--tools/testing/selftests/bpf/prog_tests/exceptions.c9
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c26
-rw-r--r--tools/testing/selftests/bpf/prog_tests/get_func_args_test.c25
-rw-r--r--tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c28
-rw-r--r--tools/testing/selftests/bpf/prog_tests/htab_reuse.c169
-rw-r--r--tools/testing/selftests/bpf/prog_tests/iter_buf_null_fail.c9
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kfunc_call.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c108
-rw-r--r--tools/testing/selftests/bpf/prog_tests/linked_list.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/livepatch_trampoline.c20
-rw-r--r--tools/testing/selftests/bpf/prog_tests/lsm_bdev.c221
-rw-r--r--tools/testing/selftests/bpf/prog_tests/lwt_misc.c9
-rw-r--r--tools/testing/selftests/bpf/prog_tests/modify_return.c8
-rw-r--r--tools/testing/selftests/bpf/prog_tests/module_attach.c173
-rw-r--r--tools/testing/selftests/bpf/prog_tests/perf_link.c20
-rw-r--r--tools/testing/selftests/bpf/prog_tests/probe_user.c29
-rw-r--r--tools/testing/selftests/bpf/prog_tests/rbtree.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/reg_bounds.c53
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_basic.c17
-rw-r--r--tools/testing/selftests/bpf/prog_tests/spin_lock.c5
-rw-r--r--tools/testing/selftests/bpf/prog_tests/summarization.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_local_data.h104
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_local_storage.c16
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_bpf_smc.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_global_funcs.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_struct_ops_multi_args.c9
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_task_local_data.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_tc_tunnel.c17
-rw-r--r--tools/testing/selftests/bpf/prog_tests/trampoline_count.c17
-rw-r--r--tools/testing/selftests/bpf/prog_tests/usdt.c92
-rw-r--r--tools/testing/selftests/bpf/prog_tests/verifier.c10
-rw-r--r--tools/testing/selftests/bpf/prog_tests/verifier_log.c6
-rw-r--r--tools/testing/selftests/bpf/progs/bench_local_storage_create.c11
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_gotox.c31
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_misc.h68
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_smc.c28
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_iter_memcg.c18
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_storage.c43
-rw-r--r--tools/testing/selftests/bpf/progs/clone_attach_btf_id.c13
-rw-r--r--tools/testing/selftests/bpf/progs/connect_force_port4.c10
-rw-r--r--tools/testing/selftests/bpf/progs/connect_force_port6.c10
-rw-r--r--tools/testing/selftests/bpf/progs/dynptr_fail.c115
-rw-r--r--tools/testing/selftests/bpf/progs/empty_skb.c7
-rw-r--r--tools/testing/selftests/bpf/progs/exceptions.c14
-rw-r--r--tools/testing/selftests/bpf/progs/exceptions_assert.c6
-rw-r--r--tools/testing/selftests/bpf/progs/exceptions_fail.c34
-rw-r--r--tools/testing/selftests/bpf/progs/freplace_int_with_void.c11
-rw-r--r--tools/testing/selftests/bpf/progs/freplace_void.c10
-rw-r--r--tools/testing/selftests/bpf/progs/get_func_args_fsession_test.c37
-rw-r--r--tools/testing/selftests/bpf/progs/get_func_args_test.c38
-rw-r--r--tools/testing/selftests/bpf/progs/get_func_ip_fsession_test.c21
-rw-r--r--tools/testing/selftests/bpf/progs/get_func_ip_test.c23
-rw-r--r--tools/testing/selftests/bpf/progs/htab_reuse.c16
-rw-r--r--tools/testing/selftests/bpf/progs/irq.c4
-rw-r--r--tools/testing/selftests/bpf/progs/iter_buf_null_fail.c39
-rw-r--r--tools/testing/selftests/bpf/progs/iters.c6
-rw-r--r--tools/testing/selftests/bpf/progs/kfunc_call_test.c98
-rw-r--r--tools/testing/selftests/bpf/progs/kprobe_multi_session.c10
-rw-r--r--tools/testing/selftests/bpf/progs/kprobe_multi_sleepable.c25
-rw-r--r--tools/testing/selftests/bpf/progs/kprobe_write_ctx.c19
-rw-r--r--tools/testing/selftests/bpf/progs/kptr_xchg_inline.c4
-rw-r--r--tools/testing/selftests/bpf/progs/lsm_bdev.c96
-rw-r--r--tools/testing/selftests/bpf/progs/lwt_misc.c22
-rw-r--r--tools/testing/selftests/bpf/progs/map_ptr_kern.c17
-rw-r--r--tools/testing/selftests/bpf/progs/mem_rdonly_untrusted.c2
-rw-r--r--tools/testing/selftests/bpf/progs/modify_return.c13
-rw-r--r--tools/testing/selftests/bpf/progs/percpu_alloc_fail.c4
-rw-r--r--tools/testing/selftests/bpf/progs/preempt_lock.c6
-rw-r--r--tools/testing/selftests/bpf/progs/rbtree_search_kptr.c290
-rw-r--r--tools/testing/selftests/bpf/progs/refcounted_kptr.c4
-rw-r--r--tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c2
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_multi_args.c35
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall3.c5
-rw-r--r--tools/testing/selftests/bpf/progs/task_local_data.bpf.h4
-rw-r--r--tools/testing/selftests/bpf/progs/test_access_variable_array.c19
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func3.c18
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func7.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func_deep_stack.c95
-rw-r--r--tools/testing/selftests/bpf/progs/test_module_attach.c63
-rw-r--r--tools/testing/selftests/bpf/progs/test_probe_user.c13
-rw-r--r--tools/testing/selftests/bpf/progs/test_trampoline_count.c12
-rw-r--r--tools/testing/selftests/bpf/progs/test_usdt.c12
-rw-r--r--tools/testing/selftests/bpf/progs/trigger_bench.c10
-rw-r--r--tools/testing/selftests/bpf/progs/uninit_stack.c1
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_align.c581
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_arena.c130
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_async_cb_context.c4
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bounds.c87
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_ctx.c569
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_ctx_ptr_param.c68
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c65
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_div_mod_bounds.c18
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_global_subprogs.c114
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_gotox.c4
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_helper_packet_access.c2
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_helper_value_access.c4
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_int_ptr.c2
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_jeq_infer_not_null.c54
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_ld_ind.c142
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_linked_scalars.c175
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_live_stack.c2411
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_liveness_exp.c139
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_loops1.c3
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_meta_access.c2
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_private_stack.c8
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_scalar_ids.c46
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_spill_fill.c96
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_stack_ptr.c4
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_subprog_precision.c8
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_subprog_topo.c226
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_subreg.c165
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_unpriv.c6
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c10
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_xdp_direct_packet_access.c64
-rw-r--r--tools/testing/selftests/bpf/test_bpftool.py174
-rwxr-xr-xtools/testing/selftests/bpf/test_bpftool.sh11
-rw-r--r--tools/testing/selftests/bpf/test_kmods/bpf_testmod.c81
-rw-r--r--tools/testing/selftests/bpf/test_kmods/bpf_testmod.h3
-rw-r--r--tools/testing/selftests/bpf/test_kmods/bpf_testmod_kfunc.h1
-rw-r--r--tools/testing/selftests/bpf/test_loader.c248
-rw-r--r--tools/testing/selftests/bpf/test_progs.c45
-rw-r--r--tools/testing/selftests/bpf/test_progs.h1
-rw-r--r--tools/testing/selftests/bpf/testing_helpers.c9
-rw-r--r--tools/testing/selftests/bpf/testing_helpers.h1
-rw-r--r--tools/testing/selftests/bpf/uprobe_multi.c19
-rw-r--r--tools/testing/selftests/bpf/uprobe_multi.ld4
-rw-r--r--tools/testing/selftests/bpf/usdt.h2
-rw-r--r--tools/testing/selftests/bpf/usdt_1.c18
-rw-r--r--tools/testing/selftests/bpf/usdt_2.c16
-rw-r--r--tools/testing/selftests/bpf/verifier/calls.c9
-rw-r--r--tools/testing/selftests/bpf/verifier/junk_insn.c6
-rw-r--r--tools/testing/selftests/bpf/verifier/sleepable.c2
-rw-r--r--tools/testing/selftests/bpf/veristat.c103
179 files changed, 9852 insertions, 2207 deletions
diff --git a/tools/bpf/bpftool/Documentation/bpftool-btf.rst b/tools/bpf/bpftool/Documentation/bpftool-btf.rst
index d47dddc2b4ee..cf75a7fa2d6b 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-btf.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-btf.rst
@@ -27,7 +27,7 @@ BTF COMMANDS
| **bpftool** **btf dump** *BTF_SRC* [**format** *FORMAT*] [**root_id** *ROOT_ID*]
| **bpftool** **btf help**
|
-| *BTF_SRC* := { **id** *BTF_ID* | **prog** *PROG* | **map** *MAP* [{**key** | **value** | **kv** | **all**}] | **file** *FILE* }
+| *BTF_SRC* := { **id** *BTF_ID* | **prog** *PROG* | **map** *MAP* [{**key** | **value** | **kv** | **all**}] | **file** *FILE* [**file** *FILE*]... }
| *FORMAT* := { **raw** | **c** [**unsorted**] }
| *MAP* := { **id** *MAP_ID* | **pinned** *FILE* }
| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* | **name** *PROG_NAME* }
@@ -58,9 +58,12 @@ bpftool btf dump *BTF_SRC* [format *FORMAT*] [root_id *ROOT_ID*]
When **prog** is provided, it's expected that program has associated BTF
object with BTF types.
- When specifying *FILE*, an ELF file is expected, containing .BTF section
- with well-defined BTF binary format data, typically produced by clang or
- pahole.
+ When specifying *FILE*, an ELF file or a raw BTF file (e.g. from
+ ``/sys/kernel/btf/``) is expected. Multiple **file** arguments may be
+ given to merge BTF from several kernel modules into a single output.
+ When sysfs paths are used, vmlinux BTF is loaded automatically as the
+ base; if vmlinux itself appears in the file list it is skipped.
+ A base BTF can also be specified explicitly with **-B**.
**format** option can be used to override default (raw) output format. Raw
(**raw**) or C-syntax (**c**) output formats are supported. With C-style
diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
index 35aeeaf5f711..90fa2a48cc26 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
@@ -54,7 +54,7 @@ PROG COMMANDS
| **cgroup/sendmsg4** | **cgroup/sendmsg6** | **cgroup/sendmsg_unix** |
| **cgroup/recvmsg4** | **cgroup/recvmsg6** | **cgroup/recvmsg_unix** | **cgroup/sysctl** |
| **cgroup/getsockopt** | **cgroup/setsockopt** | **cgroup/sock_release** |
-| **struct_ops** | **fentry** | **fexit** | **freplace** | **sk_lookup**
+| **struct_ops** | **fentry** | **fexit** | **fsession** | **freplace** | **sk_lookup**
| }
| *ATTACH_TYPE* := {
| **sk_msg_verdict** | **sk_skb_verdict** | **sk_skb_stream_verdict** |
diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile
index 519ea5cb8ab1..0febf60e1b64 100644
--- a/tools/bpf/bpftool/Makefile
+++ b/tools/bpf/bpftool/Makefile
@@ -97,6 +97,15 @@ RM ?= rm -f
FEATURE_USER = .bpftool
+# Skip optional dependencies: LLVM (JIT disasm), libbfd (fallback
+# disasm), libcrypto (program signing).
+SKIP_LLVM ?=
+SKIP_LIBBFD ?=
+SKIP_CRYPTO ?=
+ifneq ($(SKIP_CRYPTO),1)
+ CRYPTO_LIBS := -lcrypto
+endif
+
FEATURE_TESTS := clang-bpf-co-re
FEATURE_TESTS += llvm
FEATURE_TESTS += libcap
@@ -130,8 +139,8 @@ include $(FEATURES_DUMP)
endif
endif
-LIBS = $(LIBBPF) -lelf -lcrypto -lz
-LIBS_BOOTSTRAP = $(LIBBPF_BOOTSTRAP) -lelf -lcrypto -lz
+LIBS = $(LIBBPF) -lelf $(CRYPTO_LIBS) -lz
+LIBS_BOOTSTRAP = $(LIBBPF_BOOTSTRAP) -lelf $(CRYPTO_LIBS) -lz
ifeq ($(feature-libelf-zstd),1)
LIBS += -lzstd
@@ -150,7 +159,12 @@ all: $(OUTPUT)bpftool
SRCS := $(wildcard *.c)
ifeq ($(feature-llvm),1)
- # If LLVM is available, use it for JIT disassembly
+ifneq ($(SKIP_LLVM),1)
+HAS_LLVM := 1
+endif
+endif
+
+ifeq ($(HAS_LLVM),1)
CFLAGS += -DHAVE_LLVM_SUPPORT
LLVM_CONFIG_LIB_COMPONENTS := mcdisassembler all-targets
# llvm-config always adds -D_GNU_SOURCE, however, it may already be in CFLAGS
@@ -165,6 +179,7 @@ ifeq ($(feature-llvm),1)
endif
LDFLAGS += $(shell $(LLVM_CONFIG) --ldflags)
else
+ ifneq ($(SKIP_LIBBFD),1)
# Fall back on libbfd
ifeq ($(feature-libbfd),1)
LIBS += -lbfd -ldl -lopcodes
@@ -186,15 +201,22 @@ else
CFLAGS += -DDISASM_INIT_STYLED
endif
endif
+ endif # SKIP_LIBBFD
endif
ifeq ($(filter -DHAVE_LLVM_SUPPORT -DHAVE_LIBBFD_SUPPORT,$(CFLAGS)),)
# No support for JIT disassembly
SRCS := $(filter-out jit_disasm.c,$(SRCS))
endif
+ifeq ($(SKIP_CRYPTO),1)
+ CFLAGS += -DBPFTOOL_WITHOUT_CRYPTO
+ HOST_CFLAGS += -DBPFTOOL_WITHOUT_CRYPTO
+ SRCS := $(filter-out sign.c,$(SRCS))
+endif
+
BPFTOOL_BOOTSTRAP := $(BOOTSTRAP_OUTPUT)bpftool
-BOOTSTRAP_OBJS = $(addprefix $(BOOTSTRAP_OUTPUT),main.o common.o json_writer.o gen.o btf.o sign.o)
+BOOTSTRAP_OBJS = $(addprefix $(BOOTSTRAP_OUTPUT),main.o common.o json_writer.o gen.o btf.o $(if $(CRYPTO_LIBS),sign.o))
$(BOOTSTRAP_OBJS): $(LIBBPF_BOOTSTRAP)
OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o
diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool
index a28f0cc522e4..75cbcb512eba 100644
--- a/tools/bpf/bpftool/bash-completion/bpftool
+++ b/tools/bpf/bpftool/bash-completion/bpftool
@@ -501,7 +501,7 @@ _bpftool()
cgroup/post_bind4 cgroup/post_bind6 \
cgroup/sysctl cgroup/getsockopt \
cgroup/setsockopt cgroup/sock_release struct_ops \
- fentry fexit freplace sk_lookup'
+ fentry fexit fsession freplace sk_lookup'
COMPREPLY=( $( compgen -W "$BPFTOOL_PROG_LOAD_TYPES" -- "$cur" ) )
return 0
;;
@@ -961,10 +961,14 @@ _bpftool()
*)
# emit extra options
case ${words[3]} in
- id|file)
+ id)
COMPREPLY=( $( compgen -W "root_id" -- "$cur" ) )
_bpftool_once_attr 'format'
;;
+ file)
+ COMPREPLY=( $( compgen -W "root_id file" -- "$cur" ) )
+ _bpftool_once_attr 'format'
+ ;;
map|prog)
if [[ ${words[3]} == "map" ]] && [[ $cword == 6 ]]; then
COMPREPLY+=( $( compgen -W "key value kv all" -- "$cur" ) )
diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c
index 946612029dee..2e899e940034 100644
--- a/tools/bpf/bpftool/btf.c
+++ b/tools/bpf/bpftool/btf.c
@@ -28,6 +28,7 @@
#define FASTCALL_DECL_TAG "bpf_fastcall"
#define MAX_ROOT_IDS 16
+#define MAX_BTF_FILES 64
static const char * const btf_kind_str[NR_BTF_KINDS] = {
[BTF_KIND_UNKN] = "UNKNOWN",
@@ -878,6 +879,45 @@ static bool btf_is_kernel_module(__u32 btf_id)
return btf_info.kernel_btf && strncmp(btf_name, "vmlinux", sizeof(btf_name)) != 0;
}
+static struct btf *merge_btf_files(const char **files, int nr_files,
+ struct btf *vmlinux_base)
+{
+ struct btf *combined, *mod;
+ int ret;
+
+ combined = btf__new_empty_split(vmlinux_base);
+ if (!combined) {
+ p_err("failed to create combined BTF: %s", strerror(errno));
+ return NULL;
+ }
+
+ for (int j = 0; j < nr_files; j++) {
+ mod = btf__parse_split(files[j], vmlinux_base);
+ if (!mod) {
+ p_err("failed to load BTF from %s: %s", files[j], strerror(errno));
+ btf__free(combined);
+ return NULL;
+ }
+
+ ret = btf__add_btf(combined, mod);
+ btf__free(mod);
+ if (ret < 0) {
+ p_err("failed to merge BTF from %s: %s", files[j], strerror(-ret));
+ btf__free(combined);
+ return NULL;
+ }
+ }
+
+ ret = btf__dedup(combined, NULL);
+ if (ret) {
+ p_err("failed to dedup combined BTF: %s", strerror(-ret));
+ btf__free(combined);
+ return NULL;
+ }
+
+ return combined;
+}
+
static int do_dump(int argc, char **argv)
{
bool dump_c = false, sort_dump_c = true;
@@ -958,20 +998,76 @@ static int do_dump(int argc, char **argv)
NEXT_ARG();
} else if (is_prefix(src, "file")) {
const char sysfs_prefix[] = "/sys/kernel/btf/";
+ struct btf *vmlinux_base = base_btf;
+ const char *files[MAX_BTF_FILES];
+ int nr_files = 0;
- if (!base_btf &&
- strncmp(*argv, sysfs_prefix, sizeof(sysfs_prefix) - 1) == 0 &&
- strcmp(*argv, sysfs_vmlinux) != 0)
- base = get_vmlinux_btf_from_sysfs();
-
- btf = btf__parse_split(*argv, base ?: base_btf);
- if (!btf) {
- err = -errno;
- p_err("failed to load BTF from %s: %s",
- *argv, strerror(errno));
- goto done;
+ /* First grab our argument, filtering out the sysfs_vmlinux. */
+ if (strcmp(*argv, sysfs_vmlinux) != 0) {
+ files[nr_files++] = *argv;
+ } else {
+ p_info("skipping %s (will be loaded as base)", *argv);
}
NEXT_ARG();
+
+ while (argc && is_prefix(*argv, "file")) {
+ NEXT_ARG();
+ if (!REQ_ARGS(1)) {
+ err = -EINVAL;
+ goto done;
+ }
+ /* Filter out any sysfs vmlinux entries. */
+ if (strcmp(*argv, sysfs_vmlinux) == 0) {
+ p_info("skipping %s (will be loaded as base)", *argv);
+ NEXT_ARG();
+ continue;
+ }
+ if (nr_files >= MAX_BTF_FILES) {
+ p_err("too many BTF files (max %d)", MAX_BTF_FILES);
+ err = -E2BIG;
+ goto done;
+ }
+ files[nr_files++] = *argv;
+ NEXT_ARG();
+ }
+
+ /* Auto-detect vmlinux base if any file is from sysfs */
+ if (!vmlinux_base) {
+ for (int j = 0; j < nr_files; j++) {
+ if (strncmp(files[j], sysfs_prefix, sizeof(sysfs_prefix) - 1) == 0) {
+ base = get_vmlinux_btf_from_sysfs();
+ vmlinux_base = base;
+ break;
+ }
+ }
+ }
+
+ /* All files were the sysfs_vmlinux, handle it like we used to */
+ if (nr_files == 0) {
+ nr_files = 1;
+ files[0] = sysfs_vmlinux;
+ }
+
+ if (nr_files == 1) {
+ btf = btf__parse_split(files[0], base ?: base_btf);
+ if (!btf) {
+ err = -errno;
+ p_err("failed to load BTF from %s: %s", files[0], strerror(errno));
+ goto done;
+ }
+ } else {
+ if (!vmlinux_base) {
+ p_err("base BTF is required when merging multiple BTF files; use -B/--base-btf or use sysfs paths");
+ err = -EINVAL;
+ goto done;
+ }
+
+ btf = merge_btf_files(files, nr_files, vmlinux_base);
+ if (!btf) {
+ err = -errno;
+ goto done;
+ }
+ }
} else {
err = -1;
p_err("unrecognized BTF source specifier: '%s'", src);
@@ -1445,7 +1541,8 @@ static int do_help(int argc, char **argv)
" %1$s %2$s dump BTF_SRC [format FORMAT] [root_id ROOT_ID]\n"
" %1$s %2$s help\n"
"\n"
- " BTF_SRC := { id BTF_ID | prog PROG | map MAP [{key | value | kv | all}] | file FILE }\n"
+ " BTF_SRC := { id BTF_ID | prog PROG | map MAP [{key | value | kv | all}] |\n"
+ " file FILE [file FILE]... }\n"
" FORMAT := { raw | c [unsorted] }\n"
" " HELP_SPEC_MAP "\n"
" " HELP_SPEC_PROGRAM "\n"
diff --git a/tools/bpf/bpftool/jit_disasm.c b/tools/bpf/bpftool/jit_disasm.c
index 8895b4e1f690..04541155e9cc 100644
--- a/tools/bpf/bpftool/jit_disasm.c
+++ b/tools/bpf/bpftool/jit_disasm.c
@@ -93,7 +93,16 @@ init_context(disasm_ctx_t *ctx, const char *arch,
p_err("Failed to retrieve triple");
return -1;
}
- *ctx = LLVMCreateDisasm(triple, NULL, 0, NULL, symbol_lookup_callback);
+
+ /*
+ * Enable all aarch64 ISA extensions so the disassembler can handle any
+ * instruction the kernel JIT might emit (e.g. ARM64 LSE atomics).
+ */
+ if (!strncmp(triple, "aarch64", 7))
+ *ctx = LLVMCreateDisasmCPUFeatures(triple, "", "+all", NULL, 0, NULL,
+ symbol_lookup_callback);
+ else
+ *ctx = LLVMCreateDisasm(triple, NULL, 0, NULL, symbol_lookup_callback);
LLVMDisposeMessage(triple);
if (!*ctx) {
diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
index a829a6a49037..c91e1a6e1a1e 100644
--- a/tools/bpf/bpftool/main.c
+++ b/tools/bpf/bpftool/main.c
@@ -132,6 +132,11 @@ static int do_version(int argc, char **argv)
#else
const bool has_skeletons = true;
#endif
+#ifdef BPFTOOL_WITHOUT_CRYPTO
+ const bool has_crypto = false;
+#else
+ const bool has_crypto = true;
+#endif
bool bootstrap = false;
int i;
@@ -163,6 +168,7 @@ static int do_version(int argc, char **argv)
jsonw_start_object(json_wtr); /* features */
jsonw_bool_field(json_wtr, "libbfd", has_libbfd);
jsonw_bool_field(json_wtr, "llvm", has_llvm);
+ jsonw_bool_field(json_wtr, "crypto", has_crypto);
jsonw_bool_field(json_wtr, "skeletons", has_skeletons);
jsonw_bool_field(json_wtr, "bootstrap", bootstrap);
jsonw_end_object(json_wtr); /* features */
@@ -181,6 +187,7 @@ static int do_version(int argc, char **argv)
printf("features:");
print_feature("libbfd", has_libbfd, &nb_features);
print_feature("llvm", has_llvm, &nb_features);
+ print_feature("crypto", has_crypto, &nb_features);
print_feature("skeletons", has_skeletons, &nb_features);
print_feature("bootstrap", bootstrap, &nb_features);
printf("\n");
diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h
index 1130299cede0..78b6e0ebb85d 100644
--- a/tools/bpf/bpftool/main.h
+++ b/tools/bpf/bpftool/main.h
@@ -293,6 +293,20 @@ struct kernel_config_option {
int read_kernel_config(const struct kernel_config_option *requested_options,
size_t num_options, char **out_values,
const char *define_prefix);
+#ifndef BPFTOOL_WITHOUT_CRYPTO
int bpftool_prog_sign(struct bpf_load_and_run_opts *opts);
__u32 register_session_key(const char *key_der_path);
+#else
+static inline int bpftool_prog_sign(struct bpf_load_and_run_opts *opts)
+{
+ p_err("bpftool was built without signing support");
+ return -ENOTSUP;
+}
+
+static inline __u32 register_session_key(const char *key_der_path)
+{
+ p_err("bpftool was built without signing support");
+ return -1;
+}
+#endif
#endif
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index 6daf19809ca4..a9f730d407a9 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -2583,7 +2583,7 @@ static int do_help(int argc, char **argv)
" cgroup/getsockname_unix | cgroup/sendmsg4 | cgroup/sendmsg6 |\n"
" cgroup/sendmsg_unix | cgroup/recvmsg4 | cgroup/recvmsg6 | cgroup/recvmsg_unix |\n"
" cgroup/getsockopt | cgroup/setsockopt | cgroup/sock_release |\n"
- " struct_ops | fentry | fexit | freplace | sk_lookup }\n"
+ " struct_ops | fentry | fexit | fsession | freplace | sk_lookup }\n"
" ATTACH_TYPE := { sk_msg_verdict | sk_skb_verdict | sk_skb_stream_verdict |\n"
" sk_skb_stream_parser | flow_dissector }\n"
" METRIC := { cycles | instructions | l1d_loads | llc_misses | itlb_misses | dtlb_misses }\n"
diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c
index 5208f650080f..f8a91fa7584f 100644
--- a/tools/bpf/resolve_btfids/main.c
+++ b/tools/bpf/resolve_btfids/main.c
@@ -1065,6 +1065,7 @@ static bool is_kf_implicit_arg(const struct btf *btf, const struct btf_param *p)
{
static const char *const kf_implicit_arg_types[] = {
"bpf_prog_aux",
+ "btf_struct_meta",
};
const struct btf_type *t;
const char *name;
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 5e38b4887de6..677be9a47347 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -4645,7 +4645,9 @@ union bpf_attr {
* Description
* Discard reserved ring buffer sample, pointed to by *data*.
* If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
- * of new data availability is sent.
+ * of new data availability is sent. Discarded records remain in
+ * the ring buffer until consumed by user space, so a later submit
+ * using adaptive wakeup might not wake up the consumer.
* If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
* of new data availability is sent unconditionally.
* If **0** is specified in *flags*, an adaptive notification
diff --git a/tools/include/uapi/linux/btf.h b/tools/include/uapi/linux/btf.h
index 266d4ffa6c07..638615ebddc2 100644
--- a/tools/include/uapi/linux/btf.h
+++ b/tools/include/uapi/linux/btf.h
@@ -8,6 +8,16 @@
#define BTF_MAGIC 0xeB9F
#define BTF_VERSION 1
+/*
+ * BTF layout section consists of a struct btf_layout for each known
+ * kind at BTF encoding time.
+ */
+struct btf_layout {
+ __u8 info_sz; /* size of singular element after btf_type */
+ __u8 elem_sz; /* size of each of btf_vlen(t) elements */
+ __u16 flags; /* currently unused */
+};
+
struct btf_header {
__u16 magic;
__u8 version;
@@ -19,6 +29,8 @@ struct btf_header {
__u32 type_len; /* length of type section */
__u32 str_off; /* offset of string section */
__u32 str_len; /* length of string section */
+ __u32 layout_off; /* offset of layout section */
+ __u32 layout_len; /* length of layout section */
};
/* Max # of type identifier */
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index 83fe79ffcb8f..ceb57b46a878 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -29,6 +29,36 @@
static struct btf_type btf_void;
+/*
+ * Describe how kinds are laid out; some have a singular element following the "struct btf_type",
+ * some have BTF_INFO_VLEN(t->info) elements. Specify sizes for both. Flags are currently unused.
+ * Kind layout can be optionally added to the BTF representation in a dedicated section to
+ * facilitate parsing. New kinds must be added here.
+ */
+static struct btf_layout layouts[NR_BTF_KINDS] = {
+/* singular element size vlen element(s) size flags */
+[BTF_KIND_UNKN] = { 0, 0, 0 },
+[BTF_KIND_INT] = { sizeof(__u32), 0, 0 },
+[BTF_KIND_PTR] = { 0, 0, 0 },
+[BTF_KIND_ARRAY] = { sizeof(struct btf_array), 0, 0 },
+[BTF_KIND_STRUCT] = { 0, sizeof(struct btf_member), 0 },
+[BTF_KIND_UNION] = { 0, sizeof(struct btf_member), 0 },
+[BTF_KIND_ENUM] = { 0, sizeof(struct btf_enum), 0 },
+[BTF_KIND_FWD] = { 0, 0, 0 },
+[BTF_KIND_TYPEDEF] = { 0, 0, 0 },
+[BTF_KIND_VOLATILE] = { 0, 0, 0 },
+[BTF_KIND_CONST] = { 0, 0, 0 },
+[BTF_KIND_RESTRICT] = { 0, 0, 0 },
+[BTF_KIND_FUNC] = { 0, 0, 0 },
+[BTF_KIND_FUNC_PROTO] = { 0, sizeof(struct btf_param), 0 },
+[BTF_KIND_VAR] = { sizeof(struct btf_var), 0, 0 },
+[BTF_KIND_DATASEC] = { 0, sizeof(struct btf_var_secinfo), 0 },
+[BTF_KIND_FLOAT] = { 0, 0, 0 },
+[BTF_KIND_DECL_TAG] = { sizeof(struct btf_decl_tag), 0, 0 },
+[BTF_KIND_TYPE_TAG] = { 0, 0, 0 },
+[BTF_KIND_ENUM64] = { 0, sizeof(struct btf_enum64), 0 },
+};
+
struct btf {
/* raw BTF data in native endianness */
void *raw_data;
@@ -40,42 +70,53 @@ struct btf {
/*
* When BTF is loaded from an ELF or raw memory it is stored
- * in a contiguous memory block. The hdr, type_data, and, strs_data
+ * in a contiguous memory block. The type_data, layout and strs_data
* point inside that memory region to their respective parts of BTF
* representation:
*
- * +--------------------------------+
- * | Header | Types | Strings |
- * +--------------------------------+
- * ^ ^ ^
- * | | |
- * hdr | |
- * types_data-+ |
- * strs_data------------+
+ * +----------------------------------------+---------------+
+ * | Header | Types | Optional layout | Strings |
+ * +--------------------------------------------------------+
+ * ^ ^ ^ ^
+ * | | | |
+ * raw_data | | |
+ * types_data-+ | |
+ * layout---------------+ |
+ * strs_data--------------------------------+
+ *
+ * A separate struct btf_header is embedded as btf->hdr,
+ * and header information is copied into it. This allows us
+ * to handle header data for various header formats; the original,
+ * the extended header with layout info, etc.
*
* If BTF data is later modified, e.g., due to types added or
* removed, BTF deduplication performed, etc, this contiguous
- * representation is broken up into three independently allocated
- * memory regions to be able to modify them independently.
+ * representation is broken up into four independent memory
+ * regions.
+ *
* raw_data is nulled out at that point, but can be later allocated
* and cached again if user calls btf__raw_data(), at which point
- * raw_data will contain a contiguous copy of header, types, and
- * strings:
+ * raw_data will contain a contiguous copy of header, types, optional
+ * layout and strings. layout optionally points to a
+ * btf_layout array - this allows us to encode information about
+ * the kinds known at encoding time. If layout is NULL no
+ * layout information is encoded.
*
- * +----------+ +---------+ +-----------+
- * | Header | | Types | | Strings |
- * +----------+ +---------+ +-----------+
- * ^ ^ ^
- * | | |
- * hdr | |
- * types_data----+ |
- * strset__data(strs_set)-----+
+ * +----------+ +---------+ +-----------+ +-----------+
+ * | Header | | Types | | Layout | | Strings |
+ * +----------+ +---------+ +-----------+ +-----------+
+ * ^ ^ ^ ^
+ * | | | |
+ * hdr | | |
+ * types_data----+ | |
+ * layout---------------------+ |
+ * strset__data(strs_set)---------------------+
*
- * +----------+---------+-----------+
- * | Header | Types | Strings |
- * raw_data----->+----------+---------+-----------+
+ * +----------+---------+-------------------+-----------+
+ * | Header | Types | Optional Layout | Strings |
+ * raw_data----->+----------+---------+-------------------+-----------+
*/
- struct btf_header *hdr;
+ struct btf_header hdr;
void *types_data;
size_t types_data_cap; /* used size stored in hdr->type_len */
@@ -125,6 +166,17 @@ struct btf {
/* whether raw_data is a (read-only) mmap */
bool raw_data_is_mmap;
+ /* is BTF modifiable? i.e. is it split into separate sections as described above? */
+ bool modifiable;
+ /* does BTF have header information we do not support? If so, disallow
+ * modification.
+ */
+ bool has_hdr_extra;
+ /* Points either at raw kind layout data in parsed BTF (if present), or
+ * at an allocated kind layout array when BTF is modifiable.
+ */
+ void *layout;
+
/* BTF object FD, if loaded into kernel */
int fd;
@@ -216,7 +268,7 @@ static int btf_add_type_idx_entry(struct btf *btf, __u32 type_off)
return 0;
}
-static void btf_bswap_hdr(struct btf_header *h)
+static void btf_bswap_hdr(struct btf_header *h, __u32 hdr_len)
{
h->magic = bswap_16(h->magic);
h->hdr_len = bswap_32(h->hdr_len);
@@ -224,66 +276,115 @@ static void btf_bswap_hdr(struct btf_header *h)
h->type_len = bswap_32(h->type_len);
h->str_off = bswap_32(h->str_off);
h->str_len = bswap_32(h->str_len);
+ /* May be operating on raw data with hdr_len that does not include below fields */
+ if (hdr_len >= sizeof(struct btf_header)) {
+ h->layout_off = bswap_32(h->layout_off);
+ h->layout_len = bswap_32(h->layout_len);
+ }
}
static int btf_parse_hdr(struct btf *btf)
{
- struct btf_header *hdr = btf->hdr;
- __u32 meta_left;
+ struct btf_header *hdr = btf->raw_data;
+ __u32 hdr_len, meta_left;
- if (btf->raw_size < sizeof(struct btf_header)) {
+ if (btf->raw_size < offsetofend(struct btf_header, str_len)) {
pr_debug("BTF header not found\n");
return -EINVAL;
}
+ hdr_len = hdr->hdr_len;
+
if (hdr->magic == bswap_16(BTF_MAGIC)) {
btf->swapped_endian = true;
- if (bswap_32(hdr->hdr_len) != sizeof(struct btf_header)) {
+ hdr_len = bswap_32(hdr->hdr_len);
+ if (hdr_len < offsetofend(struct btf_header, str_len)) {
pr_warn("Can't load BTF with non-native endianness due to unsupported header length %u\n",
- bswap_32(hdr->hdr_len));
+ hdr_len);
return -ENOTSUP;
}
- btf_bswap_hdr(hdr);
} else if (hdr->magic != BTF_MAGIC) {
pr_debug("Invalid BTF magic: %x\n", hdr->magic);
return -EINVAL;
}
- if (btf->raw_size < hdr->hdr_len) {
+ if (btf->raw_size < hdr_len) {
pr_debug("BTF header len %u larger than data size %u\n",
- hdr->hdr_len, btf->raw_size);
+ hdr_len, btf->raw_size);
return -EINVAL;
}
- meta_left = btf->raw_size - hdr->hdr_len;
- if (meta_left < (long long)hdr->str_off + hdr->str_len) {
+ if (btf->swapped_endian)
+ btf_bswap_hdr(hdr, hdr_len);
+
+ memcpy(&btf->hdr, hdr, min((size_t)hdr_len, sizeof(struct btf_header)));
+
+ /* If unknown header data is found, modification is prohibited in
+ * btf_ensure_modifiable().
+ */
+ if (hdr_len > sizeof(struct btf_header)) {
+ __u8 *h = (__u8 *)hdr;
+ __u32 i;
+
+ for (i = sizeof(struct btf_header); i < hdr_len; i++) {
+ if (!h[i])
+ continue;
+ btf->has_hdr_extra = true;
+ pr_debug("Unknown BTF header data at offset %u; modification is disallowed\n",
+ i);
+ break;
+ }
+ }
+
+ meta_left = btf->raw_size - hdr_len;
+ if (meta_left < (long long)btf->hdr.str_off + btf->hdr.str_len) {
pr_debug("Invalid BTF total size: %u\n", btf->raw_size);
return -EINVAL;
}
- if ((long long)hdr->type_off + hdr->type_len > hdr->str_off) {
+ if ((long long)btf->hdr.type_off + btf->hdr.type_len > btf->hdr.str_off) {
pr_debug("Invalid BTF data sections layout: type data at %u + %u, strings data at %u + %u\n",
- hdr->type_off, hdr->type_len, hdr->str_off, hdr->str_len);
+ btf->hdr.type_off, btf->hdr.type_len, btf->hdr.str_off,
+ btf->hdr.str_len);
return -EINVAL;
}
- if (hdr->type_off % 4) {
+ if (btf->hdr.type_off % 4) {
pr_debug("BTF type section is not aligned to 4 bytes\n");
return -EINVAL;
}
+ if (btf->hdr.layout_len == 0)
+ return 0;
+
+ /* optional layout section sits between types and strings */
+ if (btf->hdr.layout_off % 4) {
+ pr_debug("BTF layout section is not aligned to 4 bytes\n");
+ return -EINVAL;
+ }
+ if (btf->hdr.layout_off < (long long)btf->hdr.type_off + btf->hdr.type_len) {
+ pr_debug("Invalid BTF data sections layout: type data at %u + %u, layout data at %u + %u\n",
+ btf->hdr.type_off, btf->hdr.type_len,
+ btf->hdr.layout_off, btf->hdr.layout_len);
+ return -EINVAL;
+ }
+ if ((long long)btf->hdr.layout_off + btf->hdr.layout_len > btf->hdr.str_off ||
+ btf->hdr.layout_off > btf->hdr.str_off) {
+ pr_debug("Invalid BTF data sections layout: layout data at %u + %u, strings data at %u\n",
+ btf->hdr.layout_off, btf->hdr.layout_len, btf->hdr.str_off);
+ return -EINVAL;
+ }
return 0;
}
static int btf_parse_str_sec(struct btf *btf)
{
- const struct btf_header *hdr = btf->hdr;
const char *start = btf->strs_data;
- const char *end = start + btf->hdr->str_len;
+ const char *end = start + btf->hdr.str_len;
- if (btf->base_btf && hdr->str_len == 0)
+ if (btf->base_btf && btf->hdr.str_len == 0)
return 0;
- if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_STR_OFFSET || end[-1]) {
+ if (!btf->hdr.str_len || btf->hdr.str_len - 1 > BTF_MAX_STR_OFFSET || end[-1]) {
pr_debug("Invalid BTF string section\n");
return -EINVAL;
}
@@ -294,7 +395,63 @@ static int btf_parse_str_sec(struct btf *btf)
return 0;
}
-static int btf_type_size(const struct btf_type *t)
+static int btf_parse_layout_sec(struct btf *btf)
+{
+ if (!btf->hdr.layout_len)
+ return 0;
+
+ if (btf->hdr.layout_len % sizeof(struct btf_layout) != 0) {
+ pr_debug("Invalid BTF kind layout section\n");
+ return -EINVAL;
+ }
+ btf->layout = btf->raw_data + btf->hdr.hdr_len + btf->hdr.layout_off;
+
+ if (btf->swapped_endian) {
+ struct btf_layout *l, *end = btf->layout + btf->hdr.layout_len;
+
+ for (l = btf->layout; l < end; l++)
+ l->flags = bswap_16(l->flags);
+ }
+
+ return 0;
+}
+
+/* for unknown kinds, consult kind layout. */
+static int btf_type_size_unknown(const struct btf *btf, const struct btf_type *t)
+{
+ __u32 l_cnt = btf->hdr.layout_len / sizeof(struct btf_layout);
+ struct btf_layout *l = btf->layout;
+ __u16 vlen = btf_vlen(t);
+ __u32 kind = btf_kind(t);
+
+ /* Fall back to base BTF if needed as they share layout information */
+ if (!l) {
+ struct btf *base_btf = btf->base_btf;
+
+ if (base_btf) {
+ l = base_btf->layout;
+ l_cnt = base_btf->hdr.layout_len / sizeof(struct btf_layout);
+ }
+ }
+ if (!l || kind >= l_cnt) {
+ pr_debug("Unsupported BTF_KIND: %u\n", btf_kind(t));
+ return -EINVAL;
+ }
+ if (l[kind].info_sz % 4) {
+ pr_debug("Unsupported info_sz %u for kind %u\n",
+ l[kind].info_sz, kind);
+ return -EINVAL;
+ }
+ if (l[kind].elem_sz % 4) {
+ pr_debug("Unsupported elem_sz %u for kind %u\n",
+ l[kind].elem_sz, kind);
+ return -EINVAL;
+ }
+
+ return sizeof(struct btf_type) + l[kind].info_sz + vlen * l[kind].elem_sz;
+}
+
+static int btf_type_size(const struct btf *btf, const struct btf_type *t)
{
const int base_size = sizeof(struct btf_type);
__u16 vlen = btf_vlen(t);
@@ -330,8 +487,7 @@ static int btf_type_size(const struct btf_type *t)
case BTF_KIND_DECL_TAG:
return base_size + sizeof(struct btf_decl_tag);
default:
- pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
- return -EINVAL;
+ return btf_type_size_unknown(btf, t);
}
}
@@ -421,16 +577,15 @@ static int btf_bswap_type_rest(struct btf_type *t)
static int btf_parse_type_sec(struct btf *btf)
{
- struct btf_header *hdr = btf->hdr;
void *next_type = btf->types_data;
- void *end_type = next_type + hdr->type_len;
+ void *end_type = next_type + btf->hdr.type_len;
int err, type_size;
while (next_type + sizeof(struct btf_type) <= end_type) {
if (btf->swapped_endian)
btf_bswap_type_base(next_type);
- type_size = btf_type_size(next_type);
+ type_size = btf_type_size(btf, next_type);
if (type_size < 0)
return type_size;
if (next_type + type_size > end_type) {
@@ -591,8 +746,12 @@ static int btf_validate_type(const struct btf *btf, const struct btf_type *t, __
break;
}
default:
- pr_warn("btf: type [%u]: unrecognized kind %u\n", id, kind);
- return -EINVAL;
+ /* Kind may be represented in kind layout information. */
+ if (btf_type_size_unknown(btf, t) < 0) {
+ pr_warn("btf: type [%u]: unrecognized kind %u\n", id, kind);
+ return -EINVAL;
+ }
+ break;
}
return 0;
}
@@ -1012,7 +1171,8 @@ __s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name,
static bool btf_is_modifiable(const struct btf *btf)
{
- return (void *)btf->hdr != btf->raw_data;
+ /* BTF is modifiable if split into multiple sections */
+ return btf->modifiable;
}
static void btf_free_raw_data(struct btf *btf)
@@ -1036,14 +1196,14 @@ void btf__free(struct btf *btf)
if (btf_is_modifiable(btf)) {
/* if BTF was modified after loading, it will have a split
- * in-memory representation for header, types, and strings
+ * in-memory representation for types, strings and layout
* sections, so we need to free all of them individually. It
* might still have a cached contiguous raw data present,
* which will be unconditionally freed below.
*/
- free(btf->hdr);
free(btf->types_data);
strset__free(btf->strs_set);
+ free(btf->layout);
}
btf_free_raw_data(btf);
free(btf->raw_data_swapped);
@@ -1053,8 +1213,11 @@ void btf__free(struct btf *btf)
free(btf);
}
-static struct btf *btf_new_empty(struct btf *base_btf)
+static struct btf *btf_new_empty(struct btf_new_opts *opts)
{
+ bool add_layout = OPTS_GET(opts, add_layout, false);
+ struct btf *base_btf = OPTS_GET(opts, base_btf, NULL);
+ struct btf_header *hdr;
struct btf *btf;
btf = calloc(1, sizeof(*btf));
@@ -1072,26 +1235,42 @@ static struct btf *btf_new_empty(struct btf *base_btf)
if (base_btf) {
btf->base_btf = base_btf;
btf->start_id = btf__type_cnt(base_btf);
- btf->start_str_off = base_btf->hdr->str_len + base_btf->start_str_off;
+ btf->start_str_off = base_btf->hdr.str_len + base_btf->start_str_off;
btf->swapped_endian = base_btf->swapped_endian;
}
/* +1 for empty string at offset 0 */
btf->raw_size = sizeof(struct btf_header) + (base_btf ? 0 : 1);
+ if (add_layout)
+ btf->raw_size += sizeof(layouts);
btf->raw_data = calloc(1, btf->raw_size);
if (!btf->raw_data) {
free(btf);
return ERR_PTR(-ENOMEM);
}
- btf->hdr = btf->raw_data;
- btf->hdr->hdr_len = sizeof(struct btf_header);
- btf->hdr->magic = BTF_MAGIC;
- btf->hdr->version = BTF_VERSION;
+ hdr = btf->raw_data;
+ hdr->hdr_len = sizeof(struct btf_header);
+ hdr->magic = BTF_MAGIC;
+ hdr->version = BTF_VERSION;
- btf->types_data = btf->raw_data + btf->hdr->hdr_len;
- btf->strs_data = btf->raw_data + btf->hdr->hdr_len;
- btf->hdr->str_len = base_btf ? 0 : 1; /* empty string at offset 0 */
+ btf->types_data = btf->raw_data + hdr->hdr_len;
+ btf->strs_data = btf->raw_data + hdr->hdr_len;
+ hdr->str_len = base_btf ? 0 : 1; /* empty string at offset 0 */
+
+ if (add_layout) {
+ hdr->layout_len = sizeof(layouts);
+ btf->layout = layouts;
+ /*
+ * No need to swap endianness here as btf_get_raw_data()
+ * will do this for us if btf->swapped_endian is true.
+ */
+ memcpy(btf->raw_data + hdr->hdr_len, layouts, sizeof(layouts));
+ btf->strs_data += sizeof(layouts);
+ hdr->str_off += sizeof(layouts);
+ }
+
+ memcpy(&btf->hdr, hdr, sizeof(*hdr));
return btf;
}
@@ -1103,7 +1282,19 @@ struct btf *btf__new_empty(void)
struct btf *btf__new_empty_split(struct btf *base_btf)
{
- return libbpf_ptr(btf_new_empty(base_btf));
+ LIBBPF_OPTS(btf_new_opts, opts);
+
+ opts.base_btf = base_btf;
+
+ return libbpf_ptr(btf_new_empty(&opts));
+}
+
+struct btf *btf__new_empty_opts(struct btf_new_opts *opts)
+{
+ if (!OPTS_VALID(opts, btf_new_opts))
+ return libbpf_err_ptr(-EINVAL);
+
+ return libbpf_ptr(btf_new_empty(opts));
}
static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf, bool is_mmap)
@@ -1124,7 +1315,7 @@ static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf, b
if (base_btf) {
btf->base_btf = base_btf;
btf->start_id = btf__type_cnt(base_btf);
- btf->start_str_off = base_btf->hdr->str_len + base_btf->start_str_off;
+ btf->start_str_off = base_btf->hdr.str_len + base_btf->start_str_off;
}
if (is_mmap) {
@@ -1141,15 +1332,15 @@ static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf, b
btf->raw_size = size;
- btf->hdr = btf->raw_data;
err = btf_parse_hdr(btf);
if (err)
goto done;
- btf->strs_data = btf->raw_data + btf->hdr->hdr_len + btf->hdr->str_off;
- btf->types_data = btf->raw_data + btf->hdr->hdr_len + btf->hdr->type_off;
+ btf->strs_data = btf->raw_data + btf->hdr.hdr_len + btf->hdr.str_off;
+ btf->types_data = btf->raw_data + btf->hdr.hdr_len + btf->hdr.type_off;
err = btf_parse_str_sec(btf);
+ err = err ?: btf_parse_layout_sec(btf);
err = err ?: btf_parse_type_sec(btf);
err = err ?: btf_sanity_check(btf);
if (err)
@@ -1601,7 +1792,7 @@ static const void *btf_strs_data(const struct btf *btf)
static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian)
{
- struct btf_header *hdr = btf->hdr;
+ const struct btf_header *hdr = &btf->hdr;
struct btf_type *t;
void *data, *p;
__u32 data_sz;
@@ -1614,14 +1805,17 @@ static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endi
}
data_sz = hdr->hdr_len + hdr->type_len + hdr->str_len;
+ if (btf->layout)
+ data_sz += hdr->layout_len;
+
data = calloc(1, data_sz);
if (!data)
return NULL;
p = data;
- memcpy(p, hdr, hdr->hdr_len);
+ memcpy(p, hdr, min((__u32)sizeof(struct btf_header), hdr->hdr_len));
if (swap_endian)
- btf_bswap_hdr(p);
+ btf_bswap_hdr(p, hdr->hdr_len);
p += hdr->hdr_len;
memcpy(p, btf->types_data, hdr->type_len);
@@ -1639,8 +1833,18 @@ static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endi
}
p += hdr->type_len;
+ if (btf->layout) {
+ memcpy(p, btf->layout, hdr->layout_len);
+ if (swap_endian) {
+ struct btf_layout *l, *end = p + hdr->layout_len;
+
+ for (l = p; l < end ; l++)
+ l->flags = bswap_16(l->flags);
+ }
+ p += hdr->layout_len;
+ }
+
memcpy(p, btf_strs_data(btf), hdr->str_len);
- p += hdr->str_len;
*size = data_sz;
return data;
@@ -1675,7 +1879,7 @@ const char *btf__str_by_offset(const struct btf *btf, __u32 offset)
{
if (offset < btf->start_str_off)
return btf__str_by_offset(btf->base_btf, offset);
- else if (offset - btf->start_str_off < btf->hdr->str_len)
+ else if (offset - btf->start_str_off < btf->hdr.str_len)
return btf_strs_data(btf) + (offset - btf->start_str_off);
else
return errno = EINVAL, NULL;
@@ -1783,12 +1987,12 @@ static void btf_invalidate_raw_data(struct btf *btf)
}
/* Ensure BTF is ready to be modified (by splitting into a three memory
- * regions for header, types, and strings). Also invalidate cached
+ * regions for types, strings and layout. Also invalidate cached
* raw_data, if any.
*/
static int btf_ensure_modifiable(struct btf *btf)
{
- void *hdr, *types;
+ void *types, *layout = NULL;
struct strset *set = NULL;
int err = -ENOMEM;
@@ -1798,45 +2002,58 @@ static int btf_ensure_modifiable(struct btf *btf)
return 0;
}
- /* split raw data into three memory regions */
- hdr = malloc(btf->hdr->hdr_len);
- types = malloc(btf->hdr->type_len);
- if (!hdr || !types)
+ if (btf->has_hdr_extra) {
+ /* Additional BTF header data was found; not safe to modify. */
+ return -EOPNOTSUPP;
+ }
+
+ /* split raw data into memory regions; btf->hdr is done already. */
+ types = malloc(btf->hdr.type_len);
+ if (!types)
goto err_out;
+ memcpy(types, btf->types_data, btf->hdr.type_len);
- memcpy(hdr, btf->hdr, btf->hdr->hdr_len);
- memcpy(types, btf->types_data, btf->hdr->type_len);
+ if (btf->hdr.layout_len) {
+ layout = malloc(btf->hdr.layout_len);
+ if (!layout)
+ goto err_out;
+ memcpy(layout, btf->raw_data + btf->hdr.hdr_len + btf->hdr.layout_off,
+ btf->hdr.layout_len);
+ }
/* build lookup index for all strings */
- set = strset__new(BTF_MAX_STR_OFFSET, btf->strs_data, btf->hdr->str_len);
+ set = strset__new(BTF_MAX_STR_OFFSET, btf->strs_data, btf->hdr.str_len);
if (IS_ERR(set)) {
err = PTR_ERR(set);
goto err_out;
}
/* only when everything was successful, update internal state */
- btf->hdr = hdr;
btf->types_data = types;
- btf->types_data_cap = btf->hdr->type_len;
+ btf->types_data_cap = btf->hdr.type_len;
btf->strs_data = NULL;
btf->strs_set = set;
+ if (layout)
+ btf->layout = layout;
/* if BTF was created from scratch, all strings are guaranteed to be
* unique and deduplicated
*/
- if (btf->hdr->str_len == 0)
+ if (btf->hdr.str_len == 0)
btf->strs_deduped = true;
- if (!btf->base_btf && btf->hdr->str_len == 1)
+ if (!btf->base_btf && btf->hdr.str_len == 1)
btf->strs_deduped = true;
/* invalidate raw_data representation */
btf_invalidate_raw_data(btf);
+ btf->modifiable = true;
+
return 0;
err_out:
strset__free(set);
- free(hdr);
free(types);
+ free(layout);
return err;
}
@@ -1849,6 +2066,7 @@ err_out:
int btf__find_str(struct btf *btf, const char *s)
{
int off;
+ int err;
if (btf->base_btf) {
off = btf__find_str(btf->base_btf, s);
@@ -1857,8 +2075,9 @@ int btf__find_str(struct btf *btf, const char *s)
}
/* BTF needs to be in a modifiable state to build string lookup index */
- if (btf_ensure_modifiable(btf))
- return libbpf_err(-ENOMEM);
+ err = btf_ensure_modifiable(btf);
+ if (err)
+ return libbpf_err(err);
off = strset__find_str(btf->strs_set, s);
if (off < 0)
@@ -1875,6 +2094,7 @@ int btf__find_str(struct btf *btf, const char *s)
int btf__add_str(struct btf *btf, const char *s)
{
int off;
+ int err;
if (btf->base_btf) {
off = btf__find_str(btf->base_btf, s);
@@ -1882,14 +2102,15 @@ int btf__add_str(struct btf *btf, const char *s)
return off;
}
- if (btf_ensure_modifiable(btf))
- return libbpf_err(-ENOMEM);
+ err = btf_ensure_modifiable(btf);
+ if (err)
+ return libbpf_err(err);
off = strset__add_str(btf->strs_set, s);
if (off < 0)
return libbpf_err(off);
- btf->hdr->str_len = strset__data_size(btf->strs_set);
+ btf->hdr.str_len = strset__data_size(btf->strs_set);
return btf->start_str_off + off;
}
@@ -1897,7 +2118,7 @@ int btf__add_str(struct btf *btf, const char *s)
static void *btf_add_type_mem(struct btf *btf, size_t add_sz)
{
return libbpf_add_mem(&btf->types_data, &btf->types_data_cap, 1,
- btf->hdr->type_len, UINT_MAX, add_sz);
+ btf->hdr.type_len, UINT_MAX, add_sz);
}
static void btf_type_inc_vlen(struct btf_type *t)
@@ -1905,16 +2126,31 @@ static void btf_type_inc_vlen(struct btf_type *t)
t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, btf_kflag(t));
}
+static void btf_hdr_update_type_len(struct btf *btf, int new_len)
+{
+ btf->hdr.type_len = new_len;
+ if (btf->layout) {
+ btf->hdr.layout_off = btf->hdr.type_off + new_len;
+ btf->hdr.str_off = btf->hdr.layout_off + btf->hdr.layout_len;
+ } else {
+ btf->hdr.str_off = btf->hdr.type_off + new_len;
+ }
+}
+
+static void btf_hdr_update_str_len(struct btf *btf, int new_len)
+{
+ btf->hdr.str_len = new_len;
+}
+
static int btf_commit_type(struct btf *btf, int data_sz)
{
int err;
- err = btf_add_type_idx_entry(btf, btf->hdr->type_len);
+ err = btf_add_type_idx_entry(btf, btf->hdr.type_len);
if (err)
return libbpf_err(err);
- btf->hdr->type_len += data_sz;
- btf->hdr->str_off += data_sz;
+ btf_hdr_update_type_len(btf, btf->hdr.type_len + data_sz);
btf->nr_types++;
return btf->start_id + btf->nr_types - 1;
}
@@ -1963,13 +2199,14 @@ static int btf_add_type(struct btf_pipe *p, const struct btf_type *src_type)
__u32 *str_off;
int sz, err;
- sz = btf_type_size(src_type);
+ sz = btf_type_size(p->src, src_type);
if (sz < 0)
return libbpf_err(sz);
/* deconstruct BTF, if necessary, and invalidate raw_data */
- if (btf_ensure_modifiable(p->dst))
- return libbpf_err(-ENOMEM);
+ err = btf_ensure_modifiable(p->dst);
+ if (err)
+ return libbpf_err(err);
t = btf_add_type_mem(p->dst, sz);
if (!t)
@@ -2004,24 +2241,31 @@ int btf__add_btf(struct btf *btf, const struct btf *src_btf)
{
struct btf_pipe p = { .src = src_btf, .dst = btf };
int data_sz, sz, cnt, i, err, old_strs_len;
+ __u32 src_start_id;
__u32 *off;
void *t;
- /* appending split BTF isn't supported yet */
- if (src_btf->base_btf)
- return libbpf_err(-ENOTSUP);
+ /*
+ * When appending split BTF, the destination must share the same base
+ * BTF so that base type ID references remain valid.
+ */
+ if (src_btf->base_btf && src_btf->base_btf != btf->base_btf)
+ return libbpf_err(-EOPNOTSUPP);
+
+ src_start_id = src_btf->base_btf ? btf__type_cnt(src_btf->base_btf) : 1;
/* deconstruct BTF, if necessary, and invalidate raw_data */
- if (btf_ensure_modifiable(btf))
- return libbpf_err(-ENOMEM);
+ err = btf_ensure_modifiable(btf);
+ if (err)
+ return libbpf_err(err);
/* remember original strings section size if we have to roll back
* partial strings section changes
*/
- old_strs_len = btf->hdr->str_len;
+ old_strs_len = btf->hdr.str_len;
- data_sz = src_btf->hdr->type_len;
- cnt = btf__type_cnt(src_btf) - 1;
+ data_sz = src_btf->hdr.type_len;
+ cnt = src_btf->nr_types;
/* pre-allocate enough memory for new types */
t = btf_add_type_mem(btf, data_sz);
@@ -2045,7 +2289,7 @@ int btf__add_btf(struct btf *btf, const struct btf *src_btf)
struct btf_field_iter it;
__u32 *type_id, *str_off;
- sz = btf_type_size(t);
+ sz = btf_type_size(src_btf, t);
if (sz < 0) {
/* unlikely, has to be corrupted src_btf */
err = sz;
@@ -2060,6 +2304,9 @@ int btf__add_btf(struct btf *btf, const struct btf *src_btf)
if (err)
goto err_out;
while ((str_off = btf_field_iter_next(&it))) {
+ /* don't remap strings from shared base BTF */
+ if (*str_off < src_btf->start_str_off)
+ continue;
err = btf_rewrite_str(&p, str_off);
if (err)
goto err_out;
@@ -2074,11 +2321,11 @@ int btf__add_btf(struct btf *btf, const struct btf *src_btf)
if (!*type_id) /* nothing to do for VOID references */
continue;
- /* we haven't updated btf's type count yet, so
- * btf->start_id + btf->nr_types - 1 is the type ID offset we should
- * add to all newly added BTF types
- */
- *type_id += btf->start_id + btf->nr_types - 1;
+ /* don't remap types from shared base BTF */
+ if (*type_id < src_start_id)
+ continue;
+
+ *type_id += btf->start_id + btf->nr_types - src_start_id;
}
/* go to next type data and type offset index entry */
@@ -2094,8 +2341,7 @@ int btf__add_btf(struct btf *btf, const struct btf *src_btf)
* update type count and various internal offsets and sizes to
* "commit" the changes and made them visible to the outside world.
*/
- btf->hdr->type_len += data_sz;
- btf->hdr->str_off += data_sz;
+ btf_hdr_update_type_len(btf, btf->hdr.type_len + data_sz);
btf->nr_types += cnt;
hashmap__free(p.str_off_map);
@@ -2106,13 +2352,14 @@ err_out:
/* zero out preallocated memory as if it was just allocated with
* libbpf_add_mem()
*/
- memset(btf->types_data + btf->hdr->type_len, 0, data_sz);
- memset(btf->strs_data + old_strs_len, 0, btf->hdr->str_len - old_strs_len);
+ memset(btf->types_data + btf->hdr.type_len, 0, data_sz);
+ if (btf->strs_data)
+ memset(btf->strs_data + old_strs_len, 0, btf->hdr.str_len - old_strs_len);
/* and now restore original strings section size; types data size
* wasn't modified, so doesn't need restoring, see big comment above
*/
- btf->hdr->str_len = old_strs_len;
+ btf_hdr_update_str_len(btf, old_strs_len);
hashmap__free(p.str_off_map);
@@ -2132,6 +2379,7 @@ int btf__add_int(struct btf *btf, const char *name, size_t byte_sz, int encoding
{
struct btf_type *t;
int sz, name_off;
+ int err;
/* non-empty name */
if (str_is_empty(name))
@@ -2143,8 +2391,9 @@ int btf__add_int(struct btf *btf, const char *name, size_t byte_sz, int encoding
return libbpf_err(-EINVAL);
/* deconstruct BTF, if necessary, and invalidate raw_data */
- if (btf_ensure_modifiable(btf))
- return libbpf_err(-ENOMEM);
+ err = btf_ensure_modifiable(btf);
+ if (err)
+ return libbpf_err(err);
sz = sizeof(struct btf_type) + sizeof(int);
t = btf_add_type_mem(btf, sz);
@@ -2180,6 +2429,7 @@ int btf__add_float(struct btf *btf, const char *name, size_t byte_sz)
{
struct btf_type *t;
int sz, name_off;
+ int err;
/* non-empty name */
if (str_is_empty(name))
@@ -2190,8 +2440,9 @@ int btf__add_float(struct btf *btf, const char *name, size_t byte_sz)
byte_sz != 16)
return libbpf_err(-EINVAL);
- if (btf_ensure_modifiable(btf))
- return libbpf_err(-ENOMEM);
+ err = btf_ensure_modifiable(btf);
+ if (err)
+ return libbpf_err(err);
sz = sizeof(struct btf_type);
t = btf_add_type_mem(btf, sz);
@@ -2225,12 +2476,14 @@ static int btf_add_ref_kind(struct btf *btf, int kind, const char *name, int ref
{
struct btf_type *t;
int sz, name_off = 0;
+ int err;
if (validate_type_id(ref_type_id))
return libbpf_err(-EINVAL);
- if (btf_ensure_modifiable(btf))
- return libbpf_err(-ENOMEM);
+ err = btf_ensure_modifiable(btf);
+ if (err)
+ return libbpf_err(err);
sz = sizeof(struct btf_type);
t = btf_add_type_mem(btf, sz);
@@ -2275,13 +2528,15 @@ int btf__add_array(struct btf *btf, int index_type_id, int elem_type_id, __u32 n
{
struct btf_type *t;
struct btf_array *a;
+ int err;
int sz;
if (validate_type_id(index_type_id) || validate_type_id(elem_type_id))
return libbpf_err(-EINVAL);
- if (btf_ensure_modifiable(btf))
- return libbpf_err(-ENOMEM);
+ err = btf_ensure_modifiable(btf);
+ if (err)
+ return libbpf_err(err);
sz = sizeof(struct btf_type) + sizeof(struct btf_array);
t = btf_add_type_mem(btf, sz);
@@ -2305,9 +2560,11 @@ static int btf_add_composite(struct btf *btf, int kind, const char *name, __u32
{
struct btf_type *t;
int sz, name_off = 0;
+ int err;
- if (btf_ensure_modifiable(btf))
- return libbpf_err(-ENOMEM);
+ err = btf_ensure_modifiable(btf);
+ if (err)
+ return libbpf_err(err);
sz = sizeof(struct btf_type);
t = btf_add_type_mem(btf, sz);
@@ -2387,6 +2644,7 @@ int btf__add_field(struct btf *btf, const char *name, int type_id,
struct btf_member *m;
bool is_bitfield;
int sz, name_off = 0;
+ int err;
/* last type should be union/struct */
if (btf->nr_types == 0)
@@ -2407,8 +2665,9 @@ int btf__add_field(struct btf *btf, const char *name, int type_id,
return libbpf_err(-EINVAL);
/* decompose and invalidate raw data */
- if (btf_ensure_modifiable(btf))
- return libbpf_err(-ENOMEM);
+ err = btf_ensure_modifiable(btf);
+ if (err)
+ return libbpf_err(err);
sz = sizeof(struct btf_member);
m = btf_add_type_mem(btf, sz);
@@ -2430,8 +2689,7 @@ int btf__add_field(struct btf *btf, const char *name, int type_id,
/* update parent type's vlen and kflag */
t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, is_bitfield || btf_kflag(t));
- btf->hdr->type_len += sz;
- btf->hdr->str_off += sz;
+ btf_hdr_update_type_len(btf, btf->hdr.type_len + sz);
return 0;
}
@@ -2440,13 +2698,15 @@ static int btf_add_enum_common(struct btf *btf, const char *name, __u32 byte_sz,
{
struct btf_type *t;
int sz, name_off = 0;
+ int err;
/* byte_sz must be power of 2 */
if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 8)
return libbpf_err(-EINVAL);
- if (btf_ensure_modifiable(btf))
- return libbpf_err(-ENOMEM);
+ err = btf_ensure_modifiable(btf);
+ if (err)
+ return libbpf_err(err);
sz = sizeof(struct btf_type);
t = btf_add_type_mem(btf, sz);
@@ -2502,6 +2762,7 @@ int btf__add_enum_value(struct btf *btf, const char *name, __s64 value)
struct btf_type *t;
struct btf_enum *v;
int sz, name_off;
+ int err;
/* last type should be BTF_KIND_ENUM */
if (btf->nr_types == 0)
@@ -2517,8 +2778,9 @@ int btf__add_enum_value(struct btf *btf, const char *name, __s64 value)
return libbpf_err(-E2BIG);
/* decompose and invalidate raw data */
- if (btf_ensure_modifiable(btf))
- return libbpf_err(-ENOMEM);
+ err = btf_ensure_modifiable(btf);
+ if (err)
+ return libbpf_err(err);
sz = sizeof(struct btf_enum);
v = btf_add_type_mem(btf, sz);
@@ -2540,8 +2802,7 @@ int btf__add_enum_value(struct btf *btf, const char *name, __s64 value)
if (value < 0)
t->info = btf_type_info(btf_kind(t), btf_vlen(t), true);
- btf->hdr->type_len += sz;
- btf->hdr->str_off += sz;
+ btf_hdr_update_type_len(btf, btf->hdr.type_len + sz);
return 0;
}
@@ -2579,6 +2840,7 @@ int btf__add_enum64_value(struct btf *btf, const char *name, __u64 value)
struct btf_enum64 *v;
struct btf_type *t;
int sz, name_off;
+ int err;
/* last type should be BTF_KIND_ENUM64 */
if (btf->nr_types == 0)
@@ -2592,8 +2854,9 @@ int btf__add_enum64_value(struct btf *btf, const char *name, __u64 value)
return libbpf_err(-EINVAL);
/* decompose and invalidate raw data */
- if (btf_ensure_modifiable(btf))
- return libbpf_err(-ENOMEM);
+ err = btf_ensure_modifiable(btf);
+ if (err)
+ return libbpf_err(err);
sz = sizeof(struct btf_enum64);
v = btf_add_type_mem(btf, sz);
@@ -2612,8 +2875,7 @@ int btf__add_enum64_value(struct btf *btf, const char *name, __u64 value)
t = btf_last_type(btf);
btf_type_inc_vlen(t);
- btf->hdr->type_len += sz;
- btf->hdr->str_off += sz;
+ btf_hdr_update_type_len(btf, btf->hdr.type_len + sz);
return 0;
}
@@ -2782,13 +3044,15 @@ int btf__add_func(struct btf *btf, const char *name,
int btf__add_func_proto(struct btf *btf, int ret_type_id)
{
struct btf_type *t;
+ int err;
int sz;
if (validate_type_id(ret_type_id))
return libbpf_err(-EINVAL);
- if (btf_ensure_modifiable(btf))
- return libbpf_err(-ENOMEM);
+ err = btf_ensure_modifiable(btf);
+ if (err)
+ return libbpf_err(err);
sz = sizeof(struct btf_type);
t = btf_add_type_mem(btf, sz);
@@ -2818,6 +3082,7 @@ int btf__add_func_param(struct btf *btf, const char *name, int type_id)
struct btf_type *t;
struct btf_param *p;
int sz, name_off = 0;
+ int err;
if (validate_type_id(type_id))
return libbpf_err(-EINVAL);
@@ -2830,8 +3095,9 @@ int btf__add_func_param(struct btf *btf, const char *name, int type_id)
return libbpf_err(-EINVAL);
/* decompose and invalidate raw data */
- if (btf_ensure_modifiable(btf))
- return libbpf_err(-ENOMEM);
+ err = btf_ensure_modifiable(btf);
+ if (err)
+ return libbpf_err(err);
sz = sizeof(struct btf_param);
p = btf_add_type_mem(btf, sz);
@@ -2851,8 +3117,7 @@ int btf__add_func_param(struct btf *btf, const char *name, int type_id)
t = btf_last_type(btf);
btf_type_inc_vlen(t);
- btf->hdr->type_len += sz;
- btf->hdr->str_off += sz;
+ btf_hdr_update_type_len(btf, btf->hdr.type_len + sz);
return 0;
}
@@ -2871,6 +3136,7 @@ int btf__add_var(struct btf *btf, const char *name, int linkage, int type_id)
struct btf_type *t;
struct btf_var *v;
int sz, name_off;
+ int err;
/* non-empty name */
if (str_is_empty(name))
@@ -2882,8 +3148,9 @@ int btf__add_var(struct btf *btf, const char *name, int linkage, int type_id)
return libbpf_err(-EINVAL);
/* deconstruct BTF, if necessary, and invalidate raw_data */
- if (btf_ensure_modifiable(btf))
- return libbpf_err(-ENOMEM);
+ err = btf_ensure_modifiable(btf);
+ if (err)
+ return libbpf_err(err);
sz = sizeof(struct btf_type) + sizeof(struct btf_var);
t = btf_add_type_mem(btf, sz);
@@ -2920,13 +3187,15 @@ int btf__add_datasec(struct btf *btf, const char *name, __u32 byte_sz)
{
struct btf_type *t;
int sz, name_off;
+ int err;
/* non-empty name */
if (str_is_empty(name))
return libbpf_err(-EINVAL);
- if (btf_ensure_modifiable(btf))
- return libbpf_err(-ENOMEM);
+ err = btf_ensure_modifiable(btf);
+ if (err)
+ return libbpf_err(err);
sz = sizeof(struct btf_type);
t = btf_add_type_mem(btf, sz);
@@ -2959,6 +3228,7 @@ int btf__add_datasec_var_info(struct btf *btf, int var_type_id, __u32 offset, __
{
struct btf_type *t;
struct btf_var_secinfo *v;
+ int err;
int sz;
/* last type should be BTF_KIND_DATASEC */
@@ -2972,8 +3242,9 @@ int btf__add_datasec_var_info(struct btf *btf, int var_type_id, __u32 offset, __
return libbpf_err(-EINVAL);
/* decompose and invalidate raw data */
- if (btf_ensure_modifiable(btf))
- return libbpf_err(-ENOMEM);
+ err = btf_ensure_modifiable(btf);
+ if (err)
+ return libbpf_err(err);
sz = sizeof(struct btf_var_secinfo);
v = btf_add_type_mem(btf, sz);
@@ -2988,8 +3259,7 @@ int btf__add_datasec_var_info(struct btf *btf, int var_type_id, __u32 offset, __
t = btf_last_type(btf);
btf_type_inc_vlen(t);
- btf->hdr->type_len += sz;
- btf->hdr->str_off += sz;
+ btf_hdr_update_type_len(btf, btf->hdr.type_len + sz);
return 0;
}
@@ -2998,6 +3268,7 @@ static int btf_add_decl_tag(struct btf *btf, const char *value, int ref_type_id,
{
struct btf_type *t;
int sz, value_off;
+ int err;
if (str_is_empty(value) || component_idx < -1)
return libbpf_err(-EINVAL);
@@ -3005,8 +3276,9 @@ static int btf_add_decl_tag(struct btf *btf, const char *value, int ref_type_id,
if (validate_type_id(ref_type_id))
return libbpf_err(-EINVAL);
- if (btf_ensure_modifiable(btf))
- return libbpf_err(-ENOMEM);
+ err = btf_ensure_modifiable(btf);
+ if (err)
+ return libbpf_err(err);
sz = sizeof(struct btf_type) + sizeof(struct btf_decl_tag);
t = btf_add_type_mem(btf, sz);
@@ -3630,10 +3902,9 @@ int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts)
return libbpf_err(-EINVAL);
}
- if (btf_ensure_modifiable(btf)) {
- err = -ENOMEM;
+ err = btf_ensure_modifiable(btf);
+ if (err)
goto done;
- }
err = btf_dedup_prep(d);
if (err) {
@@ -3953,7 +4224,7 @@ static int btf_dedup_strings(struct btf_dedup *d)
/* replace BTF string data and hash with deduped ones */
strset__free(d->btf->strs_set);
- d->btf->hdr->str_len = strset__data_size(d->strs_set);
+ btf_hdr_update_str_len(d->btf, strset__data_size(d->strs_set));
d->btf->strs_set = d->strs_set;
d->strs_set = NULL;
d->btf->strs_deduped = true;
@@ -5386,7 +5657,7 @@ static int btf_dedup_compact_types(struct btf_dedup *d)
continue;
t = btf__type_by_id(d->btf, id);
- len = btf_type_size(t);
+ len = btf_type_size(d->btf, t);
if (len < 0)
return len;
@@ -5400,14 +5671,17 @@ static int btf_dedup_compact_types(struct btf_dedup *d)
/* shrink struct btf's internal types index and update btf_header */
d->btf->nr_types = next_type_id - d->btf->start_id;
d->btf->type_offs_cap = d->btf->nr_types;
- d->btf->hdr->type_len = p - d->btf->types_data;
+ d->btf->hdr.type_len = p - d->btf->types_data;
new_offs = libbpf_reallocarray(d->btf->type_offs, d->btf->type_offs_cap,
sizeof(*new_offs));
if (d->btf->type_offs_cap && !new_offs)
return -ENOMEM;
d->btf->type_offs = new_offs;
- d->btf->hdr->str_off = d->btf->hdr->type_len;
- d->btf->raw_size = d->btf->hdr->hdr_len + d->btf->hdr->type_len + d->btf->hdr->str_len;
+ if (d->btf->layout)
+ d->btf->hdr.layout_off = d->btf->hdr.type_off + d->btf->hdr.type_len;
+ d->btf->hdr.str_off = d->btf->hdr.type_off + d->btf->hdr.type_len + d->btf->hdr.layout_len;
+ d->btf->raw_size = d->btf->hdr.hdr_len + d->btf->hdr.type_off + d->btf->hdr.type_len +
+ d->btf->hdr.layout_len + d->btf->hdr.str_len;
return 0;
}
@@ -5865,7 +6139,7 @@ int btf__distill_base(const struct btf *src_btf, struct btf **new_base_btf,
goto done;
}
dist.split_start_id = btf__type_cnt(old_base);
- dist.split_start_str = old_base->hdr->str_len;
+ dist.split_start_str = old_base->hdr.str_len;
/* Pass over src split BTF; generate the list of base BTF type ids it
* references; these will constitute our distilled BTF set to be
@@ -5934,14 +6208,14 @@ done:
const struct btf_header *btf_header(const struct btf *btf)
{
- return btf->hdr;
+ return &btf->hdr;
}
void btf_set_base_btf(struct btf *btf, const struct btf *base_btf)
{
btf->base_btf = (struct btf *)base_btf;
btf->start_id = btf__type_cnt(base_btf);
- btf->start_str_off = base_btf->hdr->str_len + base_btf->start_str_off;
+ btf->start_str_off = base_btf->hdr.str_len + base_btf->start_str_off;
}
int btf__relocate(struct btf *btf, const struct btf *base_btf)
@@ -6008,16 +6282,15 @@ int btf__permute(struct btf *btf, __u32 *id_map, __u32 id_map_cnt,
goto done;
}
- new_types = calloc(btf->hdr->type_len, 1);
+ new_types = calloc(btf->hdr.type_len, 1);
if (!new_types) {
err = -ENOMEM;
goto done;
}
- if (btf_ensure_modifiable(btf)) {
- err = -ENOMEM;
+ err = btf_ensure_modifiable(btf);
+ if (err)
goto done;
- }
for (i = start_offs; i < id_map_cnt; i++) {
id = id_map[i];
@@ -6046,7 +6319,7 @@ int btf__permute(struct btf *btf, __u32 *id_map, __u32 id_map_cnt,
id = order_map[i];
t = btf__type_by_id(btf, id);
- type_size = btf_type_size(t);
+ type_size = btf_type_size(btf, t);
memcpy(nt, t, type_size);
/* fix up referenced IDs for BTF */
@@ -6072,7 +6345,7 @@ int btf__permute(struct btf *btf, __u32 *id_map, __u32 id_map_cnt,
for (nt = new_types, i = 0; i < id_map_cnt - start_offs; i++) {
btf->type_offs[i] = nt - new_types;
- nt += btf_type_size(nt);
+ nt += btf_type_size(btf, nt);
}
free(order_map);
diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
index b30008c267c0..a1f8deca2603 100644
--- a/tools/lib/bpf/btf.h
+++ b/tools/lib/bpf/btf.h
@@ -109,6 +109,26 @@ LIBBPF_API struct btf *btf__new_empty(void);
*/
LIBBPF_API struct btf *btf__new_empty_split(struct btf *base_btf);
+struct btf_new_opts {
+ size_t sz;
+ struct btf *base_btf; /* optional base BTF */
+ bool add_layout; /* add BTF layout information */
+ size_t:0;
+};
+#define btf_new_opts__last_field add_layout
+
+/**
+ * @brief **btf__new_empty_opts()** creates an unpopulated BTF object with
+ * optional *base_btf* and BTF kind layout description if *add_layout*
+ * is set
+ * @return new BTF object instance which has to be eventually freed with
+ * **btf__free()**
+ *
+ * On error, NULL is returned and the thread-local `errno` variable is
+ * set to the error code.
+ */
+LIBBPF_API struct btf *btf__new_empty_opts(struct btf_new_opts *opts);
+
/**
* @brief **btf__distill_base()** creates new versions of the split BTF
* *src_btf* and its base BTF. The new base BTF will only contain the types
diff --git a/tools/lib/bpf/features.c b/tools/lib/bpf/features.c
index 2fa434f09cce..4f19a0d79b0c 100644
--- a/tools/lib/bpf/features.c
+++ b/tools/lib/bpf/features.c
@@ -568,6 +568,53 @@ static int probe_ldimm64_full_range_off(int token_fd)
return 1;
}
+#ifdef __x86_64__
+
+#ifndef __NR_uprobe
+#define __NR_uprobe 336
+#endif
+
+static int probe_uprobe_syscall(int token_fd)
+{
+ /*
+ * If kernel supports uprobe() syscall, it will return -ENXIO when called
+ * from the outside of a kernel-generated uprobe trampoline.
+ */
+ return syscall(__NR_uprobe) < 0 && errno == ENXIO;
+}
+#else
+static int probe_uprobe_syscall(int token_fd)
+{
+ return 0;
+}
+#endif
+
+static int probe_kern_btf_layout(int token_fd)
+{
+ static const char strs[] = "\0int";
+ __u32 types[] = {
+ /* int */
+ BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
+ };
+ struct btf_layout layout[] = {
+ { 0, 0, 0 },
+ { sizeof(__u32), 0, 0 },
+ };
+ struct btf_header hdr = {
+ .magic = BTF_MAGIC,
+ .version = BTF_VERSION,
+ .hdr_len = sizeof(struct btf_header),
+ .type_len = sizeof(types),
+ .str_off = sizeof(types) + sizeof(layout),
+ .str_len = sizeof(strs),
+ .layout_off = sizeof(types),
+ .layout_len = sizeof(layout),
+ };
+
+ return probe_fd(libbpf__load_raw_btf_hdr(&hdr, (char *)types, strs,
+ (char *)layout, token_fd));
+}
+
typedef int (*feature_probe_fn)(int /* token_fd */);
static struct kern_feature_cache feature_cache;
@@ -646,6 +693,12 @@ static struct kern_feature_desc {
[FEAT_LDIMM64_FULL_RANGE_OFF] = {
"full range LDIMM64 support", probe_ldimm64_full_range_off,
},
+ [FEAT_UPROBE_SYSCALL] = {
+ "kernel supports uprobe syscall", probe_uprobe_syscall,
+ },
+ [FEAT_BTF_LAYOUT] = {
+ "kernel supports BTF layout", probe_kern_btf_layout,
+ },
};
bool feat_supported(struct kern_feature_cache *cache, enum kern_feature_id feat_id)
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 0be7017800fe..8b0c3246097f 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -3138,12 +3138,14 @@ static bool btf_needs_sanitization(struct bpf_object *obj)
bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64);
bool has_qmark_datasec = kernel_supports(obj, FEAT_BTF_QMARK_DATASEC);
+ bool has_layout = kernel_supports(obj, FEAT_BTF_LAYOUT);
return !has_func || !has_datasec || !has_func_global || !has_float ||
- !has_decl_tag || !has_type_tag || !has_enum64 || !has_qmark_datasec;
+ !has_decl_tag || !has_type_tag || !has_enum64 || !has_qmark_datasec ||
+ !has_layout;
}
-static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
+struct btf *bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *orig_btf)
{
bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
@@ -3153,9 +3155,64 @@ static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64);
bool has_qmark_datasec = kernel_supports(obj, FEAT_BTF_QMARK_DATASEC);
+ bool has_layout = kernel_supports(obj, FEAT_BTF_LAYOUT);
int enum64_placeholder_id = 0;
+ const struct btf_header *hdr;
+ struct btf *btf = NULL;
+ const void *raw_data;
struct btf_type *t;
int i, j, vlen;
+ __u32 sz;
+ int err;
+
+ /* clone BTF to sanitize a copy and leave the original intact */
+ raw_data = btf__raw_data(orig_btf, &sz);
+ if (!raw_data)
+ return ERR_PTR(-ENOMEM);
+ /* btf_header() gives us endian-safe header info */
+ hdr = btf_header(orig_btf);
+
+ if (!has_layout && hdr->hdr_len >= sizeof(struct btf_header) &&
+ (hdr->layout_len != 0 || hdr->layout_off != 0)) {
+ const struct btf_header *old_hdr = raw_data;
+ struct btf_header *new_hdr;
+ void *new_raw_data;
+ __u32 new_str_off;
+
+ /*
+ * Need to rewrite BTF to exclude layout information and
+ * move string section to immediately after types.
+ */
+ new_raw_data = malloc(sz);
+ if (!new_raw_data)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(new_raw_data, raw_data, sz);
+ new_hdr = new_raw_data;
+ new_hdr->layout_off = 0;
+ new_hdr->layout_len = 0;
+ new_str_off = hdr->type_off + hdr->type_len;
+ /* Handle swapped endian case */
+ if (old_hdr->magic != hdr->magic)
+ new_hdr->str_off = bswap_32(new_str_off);
+ else
+ new_hdr->str_off = new_str_off;
+
+ memmove(new_raw_data + hdr->hdr_len + new_str_off,
+ new_raw_data + hdr->hdr_len + hdr->str_off,
+ hdr->str_len);
+ sz = hdr->hdr_len + hdr->type_off + hdr->type_len + hdr->str_len;
+ btf = btf__new(new_raw_data, sz);
+ free(new_raw_data);
+ } else {
+ btf = btf__new(raw_data, sz);
+ }
+ err = libbpf_get_error(btf);
+ if (err)
+ return ERR_PTR(err);
+
+ /* enforce 8-byte pointers for BPF-targeted BTFs */
+ btf__set_pointer_size(btf, 8);
for (i = 1; i < btf__type_cnt(btf); i++) {
t = (struct btf_type *)btf__type_by_id(btf, i);
@@ -3233,9 +3290,10 @@ static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
if (enum64_placeholder_id == 0) {
enum64_placeholder_id = btf__add_int(btf, "enum64_placeholder", 1, 0);
- if (enum64_placeholder_id < 0)
- return enum64_placeholder_id;
-
+ if (enum64_placeholder_id < 0) {
+ btf__free(btf);
+ return ERR_PTR(enum64_placeholder_id);
+ }
t = (struct btf_type *)btf__type_by_id(btf, i);
}
@@ -3249,7 +3307,7 @@ static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
}
}
- return 0;
+ return btf;
}
static bool libbpf_needs_btf(const struct bpf_object *obj)
@@ -3600,21 +3658,9 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
sanitize = btf_needs_sanitization(obj);
if (sanitize) {
- const void *raw_data;
- __u32 sz;
-
- /* clone BTF to sanitize a copy and leave the original intact */
- raw_data = btf__raw_data(obj->btf, &sz);
- kern_btf = btf__new(raw_data, sz);
- err = libbpf_get_error(kern_btf);
- if (err)
- return err;
-
- /* enforce 8-byte pointers for BPF-targeted BTFs */
- btf__set_pointer_size(obj->btf, 8);
- err = bpf_object__sanitize_btf(obj, kern_btf);
- if (err)
- return err;
+ kern_btf = bpf_object__sanitize_btf(obj, obj->btf);
+ if (IS_ERR(kern_btf))
+ return PTR_ERR(kern_btf);
}
if (obj->gen_loader) {
@@ -5157,12 +5203,20 @@ bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id)
*/
return true;
- if (obj->token_fd)
+ if (obj->feat_cache)
return feat_supported(obj->feat_cache, feat_id);
return feat_supported(NULL, feat_id);
}
+/* Used in testing to simulate missing features. */
+void bpf_object_set_feat_cache(struct bpf_object *obj, struct kern_feature_cache *cache)
+{
+ if (obj->feat_cache)
+ free(obj->feat_cache);
+ obj->feat_cache = cache;
+}
+
static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
{
struct bpf_map_info map_info;
@@ -9802,6 +9856,111 @@ __u32 bpf_program__line_info_cnt(const struct bpf_program *prog)
return prog->line_info_cnt;
}
+int bpf_program__clone(struct bpf_program *prog, const struct bpf_prog_load_opts *opts)
+{
+ LIBBPF_OPTS(bpf_prog_load_opts, attr);
+ struct bpf_object *obj;
+ const void *info;
+ __u32 info_cnt, info_rec_size;
+ int err, fd, prog_btf_fd;
+
+ if (!prog)
+ return libbpf_err(-EINVAL);
+
+ if (!OPTS_VALID(opts, bpf_prog_load_opts))
+ return libbpf_err(-EINVAL);
+
+ obj = prog->obj;
+ if (obj->state < OBJ_PREPARED)
+ return libbpf_err(-EINVAL);
+
+ /*
+ * Caller-provided opts take priority; fall back to
+ * prog/object defaults when the caller leaves them zero.
+ */
+ attr.attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0) ?: prog->attach_prog_fd;
+ attr.prog_flags = OPTS_GET(opts, prog_flags, 0) ?: prog->prog_flags;
+ attr.prog_ifindex = OPTS_GET(opts, prog_ifindex, 0) ?: prog->prog_ifindex;
+ attr.kern_version = OPTS_GET(opts, kern_version, 0) ?: obj->kern_version;
+ attr.fd_array = OPTS_GET(opts, fd_array, NULL) ?: obj->fd_array;
+ attr.fd_array_cnt = OPTS_GET(opts, fd_array_cnt, 0) ?: obj->fd_array_cnt;
+ attr.token_fd = OPTS_GET(opts, token_fd, 0) ?: obj->token_fd;
+ if (attr.token_fd)
+ attr.prog_flags |= BPF_F_TOKEN_FD;
+
+ prog_btf_fd = OPTS_GET(opts, prog_btf_fd, 0);
+ if (!prog_btf_fd && obj->btf)
+ prog_btf_fd = btf__fd(obj->btf);
+
+ /* BTF func/line info: only pass if kernel supports it */
+ if (kernel_supports(obj, FEAT_BTF_FUNC) && prog_btf_fd > 0) {
+ attr.prog_btf_fd = prog_btf_fd;
+
+ /* func_info/line_info triples: all-or-nothing from caller */
+ info = OPTS_GET(opts, func_info, NULL);
+ info_cnt = OPTS_GET(opts, func_info_cnt, 0);
+ info_rec_size = OPTS_GET(opts, func_info_rec_size, 0);
+ if (!!info != !!info_cnt || !!info != !!info_rec_size) {
+ pr_warn("prog '%s': func_info, func_info_cnt, and func_info_rec_size must all be specified or all omitted\n",
+ prog->name);
+ return libbpf_err(-EINVAL);
+ }
+ attr.func_info = info ?: prog->func_info;
+ attr.func_info_cnt = info ? info_cnt : prog->func_info_cnt;
+ attr.func_info_rec_size = info ? info_rec_size : prog->func_info_rec_size;
+
+ info = OPTS_GET(opts, line_info, NULL);
+ info_cnt = OPTS_GET(opts, line_info_cnt, 0);
+ info_rec_size = OPTS_GET(opts, line_info_rec_size, 0);
+ if (!!info != !!info_cnt || !!info != !!info_rec_size) {
+ pr_warn("prog '%s': line_info, line_info_cnt, and line_info_rec_size must all be specified or all omitted\n",
+ prog->name);
+ return libbpf_err(-EINVAL);
+ }
+ attr.line_info = info ?: prog->line_info;
+ attr.line_info_cnt = info ? info_cnt : prog->line_info_cnt;
+ attr.line_info_rec_size = info ? info_rec_size : prog->line_info_rec_size;
+ }
+
+ /* Logging is caller-controlled; no fallback to prog/obj log settings */
+ attr.log_buf = OPTS_GET(opts, log_buf, NULL);
+ attr.log_size = OPTS_GET(opts, log_size, 0);
+ attr.log_level = OPTS_GET(opts, log_level, 0);
+
+ /*
+ * Fields below may be mutated by prog_prepare_load_fn:
+ * Seed them from prog/obj defaults here;
+ * Later override with caller-provided opts.
+ */
+ attr.expected_attach_type = prog->expected_attach_type;
+ attr.attach_btf_id = prog->attach_btf_id;
+ attr.attach_btf_obj_fd = prog->attach_btf_obj_fd;
+
+ if (prog->sec_def && prog->sec_def->prog_prepare_load_fn) {
+ err = prog->sec_def->prog_prepare_load_fn(prog, &attr, prog->sec_def->cookie);
+ if (err)
+ return libbpf_err(err);
+ }
+
+ /* Re-apply caller overrides for output fields */
+ if (OPTS_GET(opts, expected_attach_type, 0))
+ attr.expected_attach_type = OPTS_GET(opts, expected_attach_type, 0);
+ if (OPTS_GET(opts, attach_btf_id, 0))
+ attr.attach_btf_id = OPTS_GET(opts, attach_btf_id, 0);
+ if (OPTS_GET(opts, attach_btf_obj_fd, 0))
+ attr.attach_btf_obj_fd = OPTS_GET(opts, attach_btf_obj_fd, 0);
+
+ /*
+ * Unlike bpf_object_load_prog(), we intentionally do not call bpf_prog_bind_map()
+ * for RODATA maps here to avoid mutating the object's state. Callers can bind the
+ * required maps themselves using bpf_prog_bind_map().
+ */
+ fd = bpf_prog_load(prog->type, prog->name, obj->license, prog->insns, prog->insns_cnt,
+ &attr);
+
+ return libbpf_err(fd);
+}
+
#define SEC_DEF(sec_pfx, ptype, atype, flags, ...) { \
.sec = (char *)sec_pfx, \
.prog_type = BPF_PROG_TYPE_##ptype, \
@@ -11692,6 +11851,8 @@ bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
default:
return libbpf_err_ptr(-EINVAL);
}
+ if (!func_name && legacy)
+ return libbpf_err_ptr(-EOPNOTSUPP);
if (!legacy) {
pfd = perf_event_open_probe(false /* uprobe */, retprobe,
@@ -11711,21 +11872,21 @@ bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
offset, -1 /* pid */);
}
if (pfd < 0) {
- err = -errno;
- pr_warn("prog '%s': failed to create %s '%s+0x%zx' perf event: %s\n",
+ err = pfd;
+ pr_warn("prog '%s': failed to create %s '%s%s0x%zx' perf event: %s\n",
prog->name, retprobe ? "kretprobe" : "kprobe",
- func_name, offset,
- errstr(err));
+ func_name ?: "", func_name ? "+" : "",
+ offset, errstr(err));
goto err_out;
}
link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
err = libbpf_get_error(link);
if (err) {
close(pfd);
- pr_warn("prog '%s': failed to attach to %s '%s+0x%zx': %s\n",
+ pr_warn("prog '%s': failed to attach to %s '%s%s0x%zx': %s\n",
prog->name, retprobe ? "kretprobe" : "kprobe",
- func_name, offset,
- errstr(err));
+ func_name ?: "", func_name ? "+" : "",
+ offset, errstr(err));
goto err_clean_legacy;
}
if (legacy) {
@@ -12041,7 +12202,16 @@ bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
if (addrs && syms)
return libbpf_err_ptr(-EINVAL);
- if (pattern) {
+ /*
+ * Exact function name (no wildcards) without unique_match:
+ * bypass kallsyms parsing and pass the symbol directly to the
+ * kernel via syms[] array. When unique_match is set, fall
+ * through to the slow path which detects duplicate symbols.
+ */
+ if (pattern && !strpbrk(pattern, "*?") && !unique_match) {
+ syms = &pattern;
+ cnt = 1;
+ } else if (pattern) {
if (has_available_filter_functions_addrs())
err = libbpf_available_kprobes_parse(&res);
else
@@ -12084,6 +12254,14 @@ bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
link_fd = bpf_link_create(prog_fd, 0, attach_type, &lopts);
if (link_fd < 0) {
err = -errno;
+ /*
+ * Normalize error code: when exact name bypasses kallsyms
+ * parsing, kernel returns ESRCH from ftrace_lookup_symbols().
+ * Convert to ENOENT for API consistency with the pattern
+ * matching path which returns ENOENT from userspace.
+ */
+ if (err == -ESRCH)
+ err = -ENOENT;
pr_warn("prog '%s': failed to attach: %s\n",
prog->name, errstr(err));
goto error;
@@ -12684,7 +12862,7 @@ bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
binary_path, func_offset, pid);
}
if (pfd < 0) {
- err = -errno;
+ err = pfd;
pr_warn("prog '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
prog->name, retprobe ? "uretprobe" : "uprobe",
binary_path, func_offset,
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index dfc37a615578..bba4e8464396 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -557,7 +557,7 @@ struct bpf_kprobe_opts {
size_t sz;
/* custom user-provided value fetchable through bpf_get_attach_cookie() */
__u64 bpf_cookie;
- /* function's offset to install kprobe to */
+ /* function offset, or raw address if func_name == NULL */
size_t offset;
/* kprobe is return probe */
bool retprobe;
@@ -565,11 +565,36 @@ struct bpf_kprobe_opts {
enum probe_attach_mode attach_mode;
size_t :0;
};
+
#define bpf_kprobe_opts__last_field attach_mode
+/**
+ * @brief **bpf_program__attach_kprobe()** attaches a BPF program to a
+ * kernel function entry or return.
+ *
+ * @param prog BPF program to attach
+ * @param retprobe Attach to function return
+ * @param func_name Name of the kernel function to attach to
+ * @return Reference to the newly created BPF link; or NULL is returned on
+ * error, error code is stored in errno
+ */
LIBBPF_API struct bpf_link *
bpf_program__attach_kprobe(const struct bpf_program *prog, bool retprobe,
const char *func_name);
+
+/**
+ * @brief **bpf_program__attach_kprobe_opts()** is just like
+ * bpf_program__attach_kprobe() except with an options struct
+ * for various configurations.
+ *
+ * @param prog BPF program to attach
+ * @param func_name Name of the kernel function to attach to. If NULL,
+ * opts->offset is treated as a raw kernel address. Raw-address attach
+ * is supported with PROBE_ATTACH_MODE_PERF and PROBE_ATTACH_MODE_LINK.
+ * @param opts Options for altering program attachment
+ * @return Reference to the newly created BPF link; or NULL is returned on
+ * error, error code is stored in errno
+ */
LIBBPF_API struct bpf_link *
bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
const char *func_name,
@@ -2021,6 +2046,23 @@ LIBBPF_API int libbpf_register_prog_handler(const char *sec,
*/
LIBBPF_API int libbpf_unregister_prog_handler(int handler_id);
+/**
+ * @brief **bpf_program__clone()** loads a single BPF program from a prepared
+ * BPF object into the kernel, returning its file descriptor.
+ *
+ * The BPF object must have been previously prepared with
+ * **bpf_object__prepare()**. If @opts is provided, any non-zero field
+ * overrides the defaults derived from the program/object internals.
+ * If @opts is NULL, all fields are populated automatically.
+ *
+ * The returned FD is owned by the caller and must be closed with close().
+ *
+ * @param prog BPF program from a prepared object
+ * @param opts Optional load options; non-zero fields override defaults
+ * @return program FD (>= 0) on success; negative error code on failure
+ */
+LIBBPF_API int bpf_program__clone(struct bpf_program *prog, const struct bpf_prog_load_opts *opts);
+
#ifdef __cplusplus
} /* extern "C" */
#endif
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index d18fbcea7578..dfed8d60af05 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -455,3 +455,9 @@ LIBBPF_1.7.0 {
bpf_program__assoc_struct_ops;
btf__permute;
} LIBBPF_1.6.0;
+
+LIBBPF_1.8.0 {
+ global:
+ bpf_program__clone;
+ btf__new_empty_opts;
+} LIBBPF_1.7.0;
diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
index 974147e8a8aa..3781c45b46d3 100644
--- a/tools/lib/bpf/libbpf_internal.h
+++ b/tools/lib/bpf/libbpf_internal.h
@@ -394,6 +394,10 @@ enum kern_feature_id {
FEAT_BTF_QMARK_DATASEC,
/* Kernel supports LDIMM64 imm offsets past 512 MiB. */
FEAT_LDIMM64_FULL_RANGE_OFF,
+ /* Kernel supports uprobe syscall */
+ FEAT_UPROBE_SYSCALL,
+ /* Kernel supports BTF layout information */
+ FEAT_BTF_LAYOUT,
__FEAT_CNT,
};
@@ -410,6 +414,7 @@ struct kern_feature_cache {
bool feat_supported(struct kern_feature_cache *cache, enum kern_feature_id feat_id);
bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id);
+void bpf_object_set_feat_cache(struct bpf_object *obj, struct kern_feature_cache *cache);
int probe_kern_syscall_wrapper(int token_fd);
int probe_memcg_account(int token_fd);
@@ -420,6 +425,10 @@ int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz);
int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
const char *str_sec, size_t str_len,
int token_fd);
+int libbpf__load_raw_btf_hdr(const struct btf_header *hdr,
+ const char *raw_types, const char *str_sec,
+ const char *layout_sec, int token_fd);
+struct btf *bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *orig_btf);
int btf_load_into_kernel(struct btf *btf,
char *log_buf, size_t log_sz, __u32 log_level,
int token_fd);
diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c
index bccf4bb747e1..b70d9637ecf5 100644
--- a/tools/lib/bpf/libbpf_probes.c
+++ b/tools/lib/bpf/libbpf_probes.c
@@ -218,18 +218,10 @@ int libbpf_probe_bpf_prog_type(enum bpf_prog_type prog_type, const void *opts)
return libbpf_err(ret);
}
-int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
- const char *str_sec, size_t str_len,
- int token_fd)
+int libbpf__load_raw_btf_hdr(const struct btf_header *hdr, const char *raw_types,
+ const char *str_sec, const char *layout_sec,
+ int token_fd)
{
- struct btf_header hdr = {
- .magic = BTF_MAGIC,
- .version = BTF_VERSION,
- .hdr_len = sizeof(struct btf_header),
- .type_len = types_len,
- .str_off = types_len,
- .str_len = str_len,
- };
LIBBPF_OPTS(bpf_btf_load_opts, opts,
.token_fd = token_fd,
.btf_flags = token_fd ? BPF_F_TOKEN_FD : 0,
@@ -237,14 +229,16 @@ int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
int btf_fd, btf_len;
__u8 *raw_btf;
- btf_len = hdr.hdr_len + hdr.type_len + hdr.str_len;
+ btf_len = hdr->hdr_len + hdr->type_off + hdr->type_len + hdr->str_len + hdr->layout_len;
raw_btf = malloc(btf_len);
if (!raw_btf)
return -ENOMEM;
- memcpy(raw_btf, &hdr, sizeof(hdr));
- memcpy(raw_btf + hdr.hdr_len, raw_types, hdr.type_len);
- memcpy(raw_btf + hdr.hdr_len + hdr.type_len, str_sec, hdr.str_len);
+ memcpy(raw_btf, hdr, sizeof(*hdr));
+ memcpy(raw_btf + hdr->hdr_len + hdr->type_off, raw_types, hdr->type_len);
+ memcpy(raw_btf + hdr->hdr_len + hdr->str_off, str_sec, hdr->str_len);
+ if (layout_sec)
+ memcpy(raw_btf + hdr->hdr_len + hdr->layout_off, layout_sec, hdr->layout_len);
btf_fd = bpf_btf_load(raw_btf, btf_len, &opts);
@@ -252,6 +246,22 @@ int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
return btf_fd;
}
+int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
+ const char *str_sec, size_t str_len,
+ int token_fd)
+{
+ struct btf_header hdr = {
+ .magic = BTF_MAGIC,
+ .version = BTF_VERSION,
+ .hdr_len = sizeof(struct btf_header),
+ .type_len = types_len,
+ .str_off = types_len,
+ .str_len = str_len,
+ };
+
+ return libbpf__load_raw_btf_hdr(&hdr, raw_types, str_sec, NULL, token_fd);
+}
+
static int load_local_storage_btf(void)
{
const char strs[] = "\0bpf_spin_lock\0val\0cnt\0l";
diff --git a/tools/lib/bpf/libbpf_version.h b/tools/lib/bpf/libbpf_version.h
index 99331e317dee..c446c0cd8cf9 100644
--- a/tools/lib/bpf/libbpf_version.h
+++ b/tools/lib/bpf/libbpf_version.h
@@ -4,6 +4,6 @@
#define __LIBBPF_VERSION_H
#define LIBBPF_MAJOR_VERSION 1
-#define LIBBPF_MINOR_VERSION 7
+#define LIBBPF_MINOR_VERSION 8
#endif /* __LIBBPF_VERSION_H */
diff --git a/tools/lib/bpf/relo_core.c b/tools/lib/bpf/relo_core.c
index 6eea5edba58a..0ccc8f548cba 100644
--- a/tools/lib/bpf/relo_core.c
+++ b/tools/lib/bpf/relo_core.c
@@ -292,6 +292,8 @@ int bpf_core_parse_spec(const char *prog_name, const struct btf *btf,
++spec_str;
if (sscanf(spec_str, "%d%n", &access_idx, &parsed_len) != 1)
return -EINVAL;
+ if (access_idx < 0)
+ return -EINVAL;
if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
return -E2BIG;
spec_str += parsed_len;
diff --git a/tools/lib/bpf/usdt.c b/tools/lib/bpf/usdt.c
index d1524f6f54ae..e3710933fd52 100644
--- a/tools/lib/bpf/usdt.c
+++ b/tools/lib/bpf/usdt.c
@@ -262,6 +262,7 @@ struct usdt_manager {
bool has_bpf_cookie;
bool has_sema_refcnt;
bool has_uprobe_multi;
+ bool has_uprobe_syscall;
};
struct usdt_manager *usdt_manager_new(struct bpf_object *obj)
@@ -301,6 +302,13 @@ struct usdt_manager *usdt_manager_new(struct bpf_object *obj)
* usdt probes.
*/
man->has_uprobe_multi = kernel_supports(obj, FEAT_UPROBE_MULTI_LINK);
+
+ /*
+ * Detect kernel support for uprobe() syscall, it's presence means we can
+ * take advantage of faster nop5 uprobe handling.
+ * Added in: 56101b69c919 ("uprobes/x86: Add uprobe syscall to speed up uprobe")
+ */
+ man->has_uprobe_syscall = kernel_supports(obj, FEAT_UPROBE_SYSCALL);
return man;
}
@@ -585,13 +593,34 @@ static int parse_usdt_note(GElf_Nhdr *nhdr, const char *data, size_t name_off,
static int parse_usdt_spec(struct usdt_spec *spec, const struct usdt_note *note, __u64 usdt_cookie);
-static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *path, pid_t pid,
- const char *usdt_provider, const char *usdt_name, __u64 usdt_cookie,
- struct usdt_target **out_targets, size_t *out_target_cnt)
+#if defined(__x86_64__)
+static bool has_nop_combo(int fd, long off)
+{
+ unsigned char nop_combo[6] = {
+ 0x90, 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nop,nop5 */
+ };
+ unsigned char buf[6];
+
+ if (pread(fd, buf, 6, off) != 6)
+ return false;
+ return memcmp(buf, nop_combo, 6) == 0;
+}
+#else
+static bool has_nop_combo(int fd, long off)
+{
+ return false;
+}
+#endif
+
+static int collect_usdt_targets(struct usdt_manager *man, struct elf_fd *elf_fd, const char *path,
+ pid_t pid, const char *usdt_provider, const char *usdt_name,
+ __u64 usdt_cookie, struct usdt_target **out_targets,
+ size_t *out_target_cnt)
{
size_t off, name_off, desc_off, seg_cnt = 0, vma_seg_cnt = 0, target_cnt = 0;
struct elf_seg *segs = NULL, *vma_segs = NULL;
struct usdt_target *targets = NULL, *target;
+ Elf *elf = elf_fd->elf;
long base_addr = 0;
Elf_Scn *notes_scn, *base_scn;
GElf_Shdr base_shdr, notes_shdr;
@@ -784,6 +813,16 @@ static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *
target = &targets[target_cnt];
memset(target, 0, sizeof(*target));
+ /*
+ * We have uprobe syscall and usdt with nop,nop5 instructions combo,
+ * so we can place the uprobe directly on nop5 (+1) and get this probe
+ * optimized.
+ */
+ if (man->has_uprobe_syscall && has_nop_combo(elf_fd->fd, usdt_rel_ip)) {
+ usdt_abs_ip++;
+ usdt_rel_ip++;
+ }
+
target->abs_ip = usdt_abs_ip;
target->rel_ip = usdt_rel_ip;
target->sema_off = usdt_sema_off;
@@ -998,7 +1037,7 @@ struct bpf_link *usdt_manager_attach_usdt(struct usdt_manager *man, const struct
/* discover USDT in given binary, optionally limiting
* activations to a given PID, if pid > 0
*/
- err = collect_usdt_targets(man, elf_fd.elf, path, pid, usdt_provider, usdt_name,
+ err = collect_usdt_targets(man, &elf_fd, path, pid, usdt_provider, usdt_name,
usdt_cookie, &targets, &target_cnt);
if (err <= 0) {
err = (err == 0) ? -ENOENT : err;
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index a3ea98211ea6..bfdc5518ecc8 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -47,3 +47,5 @@ verification_cert.h
*.BTF
*.BTF_ids
*.BTF.base
+usdt_1
+usdt_2
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 52e05b256040..78e60040811e 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -41,6 +41,8 @@ LIBELF_LIBS := $(shell $(PKG_CONFIG) libelf --libs 2>/dev/null || echo -lelf)
SKIP_DOCS ?=
SKIP_LLVM ?=
+SKIP_LIBBFD ?=
+SKIP_CRYPTO ?=
ifeq ($(srctree),)
srctree := $(patsubst %/,%,$(dir $(CURDIR)))
@@ -111,14 +113,12 @@ TEST_PROGS := test_kmod.sh \
test_lirc_mode2.sh \
test_xdping.sh \
test_bpftool_build.sh \
- test_bpftool.sh \
test_doc_build.sh \
test_xsk.sh \
test_xdp_features.sh
TEST_PROGS_EXTENDED := \
- ima_setup.sh verify_sig_setup.sh \
- test_bpftool.py
+ ima_setup.sh verify_sig_setup.sh
TEST_KMODS := bpf_testmod.ko bpf_test_no_cfi.ko bpf_test_modorder_x.ko \
bpf_test_modorder_y.ko bpf_test_rqspinlock.ko
@@ -336,6 +336,9 @@ $(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \
OUTPUT=$(HOST_BUILD_DIR)/bpftool/ \
LIBBPF_OUTPUT=$(HOST_BUILD_DIR)/libbpf/ \
LIBBPF_DESTDIR=$(HOST_SCRATCH_DIR)/ \
+ SKIP_LLVM=$(SKIP_LLVM) \
+ SKIP_LIBBFD=$(SKIP_LIBBFD) \
+ SKIP_CRYPTO=$(SKIP_CRYPTO) \
prefix= DESTDIR=$(HOST_SCRATCH_DIR)/ install-bin
ifneq ($(CROSS_COMPILE),)
@@ -348,6 +351,9 @@ $(CROSS_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \
OUTPUT=$(BUILD_DIR)/bpftool/ \
LIBBPF_OUTPUT=$(BUILD_DIR)/libbpf/ \
LIBBPF_DESTDIR=$(SCRATCH_DIR)/ \
+ SKIP_LLVM=$(SKIP_LLVM) \
+ SKIP_LIBBFD=$(SKIP_LIBBFD) \
+ SKIP_CRYPTO=$(SKIP_CRYPTO) \
prefix= DESTDIR=$(SCRATCH_DIR)/ install-bin
endif
@@ -756,7 +762,8 @@ TRUNNER_EXTRA_SOURCES := test_progs.c \
$(VERIFY_SIG_HDR) \
flow_dissector_load.h \
ip_check_defrag_frags.h \
- bpftool_helpers.c
+ bpftool_helpers.c \
+ usdt_1.c usdt_2.c
TRUNNER_LIB_SOURCES := find_bit.c
TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read \
$(OUTPUT)/liburandom_read.so \
@@ -880,6 +887,8 @@ $(OUTPUT)/bench: $(OUTPUT)/bench.o \
$(OUTPUT)/bench_bpf_crypto.o \
$(OUTPUT)/bench_sockmap.o \
$(OUTPUT)/bench_lpm_trie_map.o \
+ $(OUTPUT)/usdt_1.o \
+ $(OUTPUT)/usdt_2.o \
#
$(call msg,BINARY,,$@)
$(Q)$(CC) $(CFLAGS) $(LDFLAGS) $(filter %.a %.o,$^) $(LDLIBS) -o $@
diff --git a/tools/testing/selftests/bpf/bench.c b/tools/testing/selftests/bpf/bench.c
index 8368bd3a0665..029b3e21f438 100644
--- a/tools/testing/selftests/bpf/bench.c
+++ b/tools/testing/selftests/bpf/bench.c
@@ -541,6 +541,8 @@ extern const struct bench bench_trig_uprobe_nop5;
extern const struct bench bench_trig_uretprobe_nop5;
extern const struct bench bench_trig_uprobe_multi_nop5;
extern const struct bench bench_trig_uretprobe_multi_nop5;
+extern const struct bench bench_trig_usdt_nop;
+extern const struct bench bench_trig_usdt_nop5;
#endif
extern const struct bench bench_rb_libbpf;
@@ -617,6 +619,8 @@ static const struct bench *benchs[] = {
&bench_trig_uretprobe_nop5,
&bench_trig_uprobe_multi_nop5,
&bench_trig_uretprobe_multi_nop5,
+ &bench_trig_usdt_nop,
+ &bench_trig_usdt_nop5,
#endif
/* ringbuf/perfbuf benchmarks */
&bench_rb_libbpf,
diff --git a/tools/testing/selftests/bpf/benchs/bench_local_storage_create.c b/tools/testing/selftests/bpf/benchs/bench_local_storage_create.c
index e2ff8ea1cb79..71e38000ee06 100644
--- a/tools/testing/selftests/bpf/benchs/bench_local_storage_create.c
+++ b/tools/testing/selftests/bpf/benchs/bench_local_storage_create.c
@@ -101,11 +101,6 @@ static void setup(void)
}
}
- if (!bpf_program__attach(skel->progs.kmalloc)) {
- fprintf(stderr, "Error attaching bpf program\n");
- exit(1);
- }
-
threads = calloc(env.producer_cnt, sizeof(*threads));
if (!threads) {
@@ -140,7 +135,6 @@ static void setup(void)
static void measure(struct bench_res *res)
{
res->hits = atomic_swap(&skel->bss->create_cnts, 0);
- res->drops = atomic_swap(&skel->bss->kmalloc_cnts, 0);
}
static void *sk_producer(void *input)
@@ -203,28 +197,25 @@ static void *producer(void *input)
static void report_progress(int iter, struct bench_res *res, long delta_ns)
{
- double creates_per_sec, kmallocs_per_create;
+ double creates_per_sec;
creates_per_sec = res->hits / 1000.0 / (delta_ns / 1000000000.0);
- kmallocs_per_create = (double)res->drops / res->hits;
printf("Iter %3d (%7.3lfus): ",
iter, (delta_ns - 1000000000) / 1000.0);
- printf("creates %8.3lfk/s (%7.3lfk/prod), ",
+ printf("creates %8.3lfk/s (%7.3lfk/prod)\n",
creates_per_sec, creates_per_sec / env.producer_cnt);
- printf("%3.2lf kmallocs/create\n", kmallocs_per_create);
}
static void report_final(struct bench_res res[], int res_cnt)
{
double creates_mean = 0.0, creates_stddev = 0.0;
- long total_creates = 0, total_kmallocs = 0;
+ long total_creates = 0;
int i;
for (i = 0; i < res_cnt; i++) {
creates_mean += res[i].hits / 1000.0 / (0.0 + res_cnt);
total_creates += res[i].hits;
- total_kmallocs += res[i].drops;
}
if (res_cnt > 1) {
@@ -234,9 +225,9 @@ static void report_final(struct bench_res res[], int res_cnt)
(res_cnt - 1.0);
creates_stddev = sqrt(creates_stddev);
}
- printf("Summary: creates %8.3lf \u00B1 %5.3lfk/s (%7.3lfk/prod), ",
- creates_mean, creates_stddev, creates_mean / env.producer_cnt);
- printf("%4.2lf kmallocs/create\n", (double)total_kmallocs / total_creates);
+ printf("Summary: creates %8.3lf \u00B1 %5.3lfk/s (%7.3lfk/prod), %ld total\n",
+ creates_mean, creates_stddev, creates_mean / env.producer_cnt,
+ total_creates);
if (create_owner_errs || skel->bss->create_errs)
printf("%s() errors %ld create_errs %ld\n",
storage_type == BPF_MAP_TYPE_SK_STORAGE ?
diff --git a/tools/testing/selftests/bpf/benchs/bench_trigger.c b/tools/testing/selftests/bpf/benchs/bench_trigger.c
index f74b313d6ae4..2f22ec61667b 100644
--- a/tools/testing/selftests/bpf/benchs/bench_trigger.c
+++ b/tools/testing/selftests/bpf/benchs/bench_trigger.c
@@ -407,6 +407,23 @@ static void *uprobe_producer_nop5(void *input)
uprobe_target_nop5();
return NULL;
}
+
+void usdt_1(void);
+void usdt_2(void);
+
+static void *uprobe_producer_usdt_nop(void *input)
+{
+ while (true)
+ usdt_1();
+ return NULL;
+}
+
+static void *uprobe_producer_usdt_nop5(void *input)
+{
+ while (true)
+ usdt_2();
+ return NULL;
+}
#endif
static void usetup(bool use_retprobe, bool use_multi, void *target_addr)
@@ -544,6 +561,47 @@ static void uretprobe_multi_nop5_setup(void)
{
usetup(true, true /* use_multi */, &uprobe_target_nop5);
}
+
+static void usdt_setup(const char *name)
+{
+ struct bpf_link *link;
+ int err;
+
+ setup_libbpf();
+
+ ctx.skel = trigger_bench__open();
+ if (!ctx.skel) {
+ fprintf(stderr, "failed to open skeleton\n");
+ exit(1);
+ }
+
+ bpf_program__set_autoload(ctx.skel->progs.bench_trigger_usdt, true);
+
+ err = trigger_bench__load(ctx.skel);
+ if (err) {
+ fprintf(stderr, "failed to load skeleton\n");
+ exit(1);
+ }
+
+ link = bpf_program__attach_usdt(ctx.skel->progs.bench_trigger_usdt,
+ 0 /*self*/, "/proc/self/exe",
+ "optimized_attach", name, NULL);
+ if (libbpf_get_error(link)) {
+ fprintf(stderr, "failed to attach optimized_attach:%s usdt probe\n", name);
+ exit(1);
+ }
+ ctx.skel->links.bench_trigger_usdt = link;
+}
+
+static void usdt_nop_setup(void)
+{
+ usdt_setup("usdt_1");
+}
+
+static void usdt_nop5_setup(void)
+{
+ usdt_setup("usdt_2");
+}
#endif
const struct bench bench_trig_syscall_count = {
@@ -611,4 +669,6 @@ BENCH_TRIG_USERMODE(uprobe_nop5, nop5, "uprobe-nop5");
BENCH_TRIG_USERMODE(uretprobe_nop5, nop5, "uretprobe-nop5");
BENCH_TRIG_USERMODE(uprobe_multi_nop5, nop5, "uprobe-multi-nop5");
BENCH_TRIG_USERMODE(uretprobe_multi_nop5, nop5, "uretprobe-multi-nop5");
+BENCH_TRIG_USERMODE(usdt_nop, usdt_nop, "usdt-nop");
+BENCH_TRIG_USERMODE(usdt_nop5, usdt_nop5, "usdt-nop5");
#endif
diff --git a/tools/testing/selftests/bpf/benchs/run_bench_uprobes.sh b/tools/testing/selftests/bpf/benchs/run_bench_uprobes.sh
index 03f55405484b..9ec59423b949 100755
--- a/tools/testing/selftests/bpf/benchs/run_bench_uprobes.sh
+++ b/tools/testing/selftests/bpf/benchs/run_bench_uprobes.sh
@@ -2,7 +2,7 @@
set -eufo pipefail
-for i in usermode-count syscall-count {uprobe,uretprobe}-{nop,push,ret,nop5}
+for i in usermode-count syscall-count {uprobe,uretprobe}-{nop,push,ret,nop5} usdt-nop usdt-nop5
do
summary=$(sudo ./bench -w2 -d5 -a trig-$i | tail -n1 | cut -d'(' -f1 | cut -d' ' -f3-)
printf "%-15s: %s\n" $i "$summary"
diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h
index 4b7210c318dd..2234bd6bc9d3 100644
--- a/tools/testing/selftests/bpf/bpf_experimental.h
+++ b/tools/testing/selftests/bpf/bpf_experimental.h
@@ -8,156 +8,11 @@
#define __contains(name, node) __attribute__((btf_decl_tag("contains:" #name ":" #node)))
-/* Description
- * Allocates an object of the type represented by 'local_type_id' in
- * program BTF. User may use the bpf_core_type_id_local macro to pass the
- * type ID of a struct in program BTF.
- *
- * The 'local_type_id' parameter must be a known constant.
- * The 'meta' parameter is rewritten by the verifier, no need for BPF
- * program to set it.
- * Returns
- * A pointer to an object of the type corresponding to the passed in
- * 'local_type_id', or NULL on failure.
- */
-extern void *bpf_obj_new_impl(__u64 local_type_id, void *meta) __ksym;
-
-/* Convenience macro to wrap over bpf_obj_new_impl */
-#define bpf_obj_new(type) ((type *)bpf_obj_new_impl(bpf_core_type_id_local(type), NULL))
-
-/* Description
- * Free an allocated object. All fields of the object that require
- * destruction will be destructed before the storage is freed.
- *
- * The 'meta' parameter is rewritten by the verifier, no need for BPF
- * program to set it.
- * Returns
- * Void.
- */
-extern void bpf_obj_drop_impl(void *kptr, void *meta) __ksym;
-
-/* Convenience macro to wrap over bpf_obj_drop_impl */
-#define bpf_obj_drop(kptr) bpf_obj_drop_impl(kptr, NULL)
-
-/* Description
- * Increment the refcount on a refcounted local kptr, turning the
- * non-owning reference input into an owning reference in the process.
- *
- * The 'meta' parameter is rewritten by the verifier, no need for BPF
- * program to set it.
- * Returns
- * An owning reference to the object pointed to by 'kptr'
- */
-extern void *bpf_refcount_acquire_impl(void *kptr, void *meta) __ksym;
-
-/* Convenience macro to wrap over bpf_refcount_acquire_impl */
-#define bpf_refcount_acquire(kptr) bpf_refcount_acquire_impl(kptr, NULL)
-
-/* Description
- * Add a new entry to the beginning of the BPF linked list.
- *
- * The 'meta' and 'off' parameters are rewritten by the verifier, no need
- * for BPF programs to set them
- * Returns
- * 0 if the node was successfully added
- * -EINVAL if the node wasn't added because it's already in a list
- */
-extern int bpf_list_push_front_impl(struct bpf_list_head *head,
- struct bpf_list_node *node,
- void *meta, __u64 off) __ksym;
-
-/* Convenience macro to wrap over bpf_list_push_front_impl */
-#define bpf_list_push_front(head, node) bpf_list_push_front_impl(head, node, NULL, 0)
-
-/* Description
- * Add a new entry to the end of the BPF linked list.
- *
- * The 'meta' and 'off' parameters are rewritten by the verifier, no need
- * for BPF programs to set them
- * Returns
- * 0 if the node was successfully added
- * -EINVAL if the node wasn't added because it's already in a list
- */
-extern int bpf_list_push_back_impl(struct bpf_list_head *head,
- struct bpf_list_node *node,
- void *meta, __u64 off) __ksym;
-
-/* Convenience macro to wrap over bpf_list_push_back_impl */
-#define bpf_list_push_back(head, node) bpf_list_push_back_impl(head, node, NULL, 0)
-
-/* Description
- * Remove the entry at the beginning of the BPF linked list.
- * Returns
- * Pointer to bpf_list_node of deleted entry, or NULL if list is empty.
- */
-extern struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head) __ksym;
+/* Convenience macro to wrap over bpf_obj_new */
+#define bpf_obj_new(type) ((type *)bpf_obj_new(bpf_core_type_id_local(type)))
-/* Description
- * Remove the entry at the end of the BPF linked list.
- * Returns
- * Pointer to bpf_list_node of deleted entry, or NULL if list is empty.
- */
-extern struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) __ksym;
-
-/* Description
- * Remove 'node' from rbtree with root 'root'
- * Returns
- * Pointer to the removed node, or NULL if 'root' didn't contain 'node'
- */
-extern struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root,
- struct bpf_rb_node *node) __ksym;
-
-/* Description
- * Add 'node' to rbtree with root 'root' using comparator 'less'
- *
- * The 'meta' and 'off' parameters are rewritten by the verifier, no need
- * for BPF programs to set them
- * Returns
- * 0 if the node was successfully added
- * -EINVAL if the node wasn't added because it's already in a tree
- */
-extern int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node,
- bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b),
- void *meta, __u64 off) __ksym;
-
-/* Convenience macro to wrap over bpf_rbtree_add_impl */
-#define bpf_rbtree_add(head, node, less) bpf_rbtree_add_impl(head, node, less, NULL, 0)
-
-/* Description
- * Return the first (leftmost) node in input tree
- * Returns
- * Pointer to the node, which is _not_ removed from the tree. If the tree
- * contains no nodes, returns NULL.
- */
-extern struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) __ksym;
-
-/* Description
- * Allocates a percpu object of the type represented by 'local_type_id' in
- * program BTF. User may use the bpf_core_type_id_local macro to pass the
- * type ID of a struct in program BTF.
- *
- * The 'local_type_id' parameter must be a known constant.
- * The 'meta' parameter is rewritten by the verifier, no need for BPF
- * program to set it.
- * Returns
- * A pointer to a percpu object of the type corresponding to the passed in
- * 'local_type_id', or NULL on failure.
- */
-extern void *bpf_percpu_obj_new_impl(__u64 local_type_id, void *meta) __ksym;
-
-/* Convenience macro to wrap over bpf_percpu_obj_new_impl */
-#define bpf_percpu_obj_new(type) ((type __percpu_kptr *)bpf_percpu_obj_new_impl(bpf_core_type_id_local(type), NULL))
-
-/* Description
- * Free an allocated percpu object. All fields of the object that require
- * destruction will be destructed before the storage is freed.
- *
- * The 'meta' parameter is rewritten by the verifier, no need for BPF
- * program to set it.
- * Returns
- * Void.
- */
-extern void bpf_percpu_obj_drop_impl(void *kptr, void *meta) __ksym;
+/* Convenience macro to wrap over bpf_percpu_obj_new */
+#define bpf_percpu_obj_new(type) ((type __percpu_kptr *)bpf_percpu_obj_new(bpf_core_type_id_local(type)))
struct bpf_iter_task_vma;
@@ -167,9 +22,6 @@ extern int bpf_iter_task_vma_new(struct bpf_iter_task_vma *it,
extern struct vm_area_struct *bpf_iter_task_vma_next(struct bpf_iter_task_vma *it) __ksym;
extern void bpf_iter_task_vma_destroy(struct bpf_iter_task_vma *it) __ksym;
-/* Convenience macro to wrap over bpf_obj_drop_impl */
-#define bpf_percpu_obj_drop(kptr) bpf_percpu_obj_drop_impl(kptr, NULL)
-
/* Description
* Throw a BPF exception from the program, immediately terminating its
* execution and unwinding the stack. The supplied 'cookie' parameter
@@ -627,6 +479,10 @@ struct task_struct___preempt_rt {
int softirq_disable_cnt;
} __attribute__((preserve_access_index));
+#ifdef bpf_target_s390
+extern struct lowcore *bpf_get_lowcore(void) __weak __ksym;
+#endif
+
static inline int get_preempt_count(void)
{
#if defined(bpf_target_x86)
@@ -645,6 +501,10 @@ static inline int get_preempt_count(void)
bpf_this_cpu_ptr(&pcpu_hot))->preempt_count;
#elif defined(bpf_target_arm64)
return bpf_get_current_task_btf()->thread_info.preempt.count;
+#elif defined(bpf_target_powerpc)
+ return bpf_get_current_task_btf()->thread_info.preempt_count;
+#elif defined(bpf_target_s390)
+ return bpf_get_lowcore()->preempt_count;
#endif
return 0;
}
@@ -653,6 +513,8 @@ static inline int get_preempt_count(void)
* Report whether it is in interrupt context. Only works on the following archs:
* * x86
* * arm64
+ * * powerpc64
+ * * s390x
*/
static inline int bpf_in_interrupt(void)
{
@@ -672,6 +534,8 @@ static inline int bpf_in_interrupt(void)
* Report whether it is in NMI context. Only works on the following archs:
* * x86
* * arm64
+ * * powerpc64
+ * * s390x
*/
static inline int bpf_in_nmi(void)
{
@@ -682,6 +546,8 @@ static inline int bpf_in_nmi(void)
* Report whether it is in hard IRQ context. Only works on the following archs:
* * x86
* * arm64
+ * * powerpc64
+ * * s390x
*/
static inline int bpf_in_hardirq(void)
{
@@ -692,6 +558,8 @@ static inline int bpf_in_hardirq(void)
* Report whether it is in softirq context. Only works on the following archs:
* * x86
* * arm64
+ * * powerpc64
+ * * s390x
*/
static inline int bpf_in_serving_softirq(void)
{
@@ -710,6 +578,8 @@ static inline int bpf_in_serving_softirq(void)
* Report whether it is in task context. Only works on the following archs:
* * x86
* * arm64
+ * * powerpc64
+ * * s390x
*/
static inline int bpf_in_task(void)
{
diff --git a/tools/testing/selftests/bpf/bpftool_helpers.c b/tools/testing/selftests/bpf/bpftool_helpers.c
index 929fc257f431..0a2a4f0a2794 100644
--- a/tools/testing/selftests/bpf/bpftool_helpers.c
+++ b/tools/testing/selftests/bpf/bpftool_helpers.c
@@ -2,18 +2,18 @@
#include <unistd.h>
#include <string.h>
#include <stdbool.h>
+#include <limits.h>
#include "bpf_util.h"
#include "bpftool_helpers.h"
-#define BPFTOOL_PATH_MAX_LEN 64
-#define BPFTOOL_FULL_CMD_MAX_LEN 512
+#define BPFTOOL_FULL_CMD_MAX_LEN (PATH_MAX * 2)
#define BPFTOOL_DEFAULT_PATH "tools/sbin/bpftool"
static int detect_bpftool_path(char *buffer, size_t size)
{
- char tmp[BPFTOOL_PATH_MAX_LEN];
+ char tmp[PATH_MAX];
const char *env_path;
/* First, check if BPFTOOL environment variable is set */
@@ -29,7 +29,7 @@ static int detect_bpftool_path(char *buffer, size_t size)
/* Check default bpftool location (will work if we are running the
* default flavor of test_progs)
*/
- snprintf(tmp, BPFTOOL_PATH_MAX_LEN, "./%s", BPFTOOL_DEFAULT_PATH);
+ snprintf(tmp, sizeof(tmp), "./%s", BPFTOOL_DEFAULT_PATH);
if (access(tmp, X_OK) == 0) {
strscpy(buffer, tmp, size);
return 0;
@@ -38,7 +38,7 @@ static int detect_bpftool_path(char *buffer, size_t size)
/* Check alternate bpftool location (will work if we are running a
* specific flavor of test_progs, e.g. cpuv4 or no_alu32)
*/
- snprintf(tmp, BPFTOOL_PATH_MAX_LEN, "../%s", BPFTOOL_DEFAULT_PATH);
+ snprintf(tmp, sizeof(tmp), "../%s", BPFTOOL_DEFAULT_PATH);
if (access(tmp, X_OK) == 0) {
strscpy(buffer, tmp, size);
return 0;
@@ -50,7 +50,7 @@ static int detect_bpftool_path(char *buffer, size_t size)
static int run_command(char *args, char *output_buf, size_t output_max_len)
{
- static char bpftool_path[BPFTOOL_PATH_MAX_LEN] = {0};
+ static char bpftool_path[PATH_MAX] = {};
bool suppress_output = !(output_buf && output_max_len);
char command[BPFTOOL_FULL_CMD_MAX_LEN];
FILE *f;
@@ -60,7 +60,7 @@ static int run_command(char *args, char *output_buf, size_t output_max_len)
if (bpftool_path[0] == 0 && detect_bpftool_path(bpftool_path, sizeof(bpftool_path)))
return 1;
- ret = snprintf(command, BPFTOOL_FULL_CMD_MAX_LEN, "%s %s%s",
+ ret = snprintf(command, sizeof(command), "%s %s%s",
bpftool_path, args,
suppress_output ? " > /dev/null 2>&1" : "");
@@ -84,4 +84,3 @@ int get_bpftool_command_output(char *args, char *output_buf, size_t output_max_l
{
return run_command(args, output_buf, output_max_len);
}
-
diff --git a/tools/testing/selftests/bpf/cgroup_iter_memcg.h b/tools/testing/selftests/bpf/cgroup_iter_memcg.h
index 3f59b127943b..ff20ec537164 100644
--- a/tools/testing/selftests/bpf/cgroup_iter_memcg.h
+++ b/tools/testing/selftests/bpf/cgroup_iter_memcg.h
@@ -9,8 +9,6 @@ struct memcg_query {
unsigned long nr_shmem;
unsigned long nr_file_pages;
unsigned long nr_file_mapped;
- /* some memcg_stat_item */
- unsigned long memcg_kmem;
/* some vm_event_item */
unsigned long pgfault;
};
diff --git a/tools/testing/selftests/bpf/prog_tests/access_variable_array.c b/tools/testing/selftests/bpf/prog_tests/access_variable_array.c
deleted file mode 100644
index 08131782437c..000000000000
--- a/tools/testing/selftests/bpf/prog_tests/access_variable_array.c
+++ /dev/null
@@ -1,16 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2022 Bytedance */
-
-#include <test_progs.h>
-#include "test_access_variable_array.skel.h"
-
-void test_access_variable_array(void)
-{
- struct test_access_variable_array *skel;
-
- skel = test_access_variable_array__open_and_load();
- if (!ASSERT_OK_PTR(skel, "test_access_variable_array__open_and_load"))
- return;
-
- test_access_variable_array__destroy(skel);
-}
diff --git a/tools/testing/selftests/bpf/prog_tests/align.c b/tools/testing/selftests/bpf/prog_tests/align.c
deleted file mode 100644
index 24c509ce4e5b..000000000000
--- a/tools/testing/selftests/bpf/prog_tests/align.c
+++ /dev/null
@@ -1,712 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <test_progs.h>
-
-#define MAX_INSNS 512
-#define MAX_MATCHES 24
-
-struct bpf_reg_match {
- unsigned int line;
- const char *reg;
- const char *match;
-};
-
-struct bpf_align_test {
- const char *descr;
- struct bpf_insn insns[MAX_INSNS];
- enum {
- UNDEF,
- ACCEPT,
- REJECT
- } result;
- enum bpf_prog_type prog_type;
- /* Matches must be in order of increasing line */
- struct bpf_reg_match matches[MAX_MATCHES];
-};
-
-static struct bpf_align_test tests[] = {
- /* Four tests of known constants. These aren't staggeringly
- * interesting since we track exact values now.
- */
- {
- .descr = "mov",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_3, 2),
- BPF_MOV64_IMM(BPF_REG_3, 4),
- BPF_MOV64_IMM(BPF_REG_3, 8),
- BPF_MOV64_IMM(BPF_REG_3, 16),
- BPF_MOV64_IMM(BPF_REG_3, 32),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .matches = {
- {0, "R1", "ctx()"},
- {0, "R10", "fp0"},
- {0, "R3", "2"},
- {1, "R3", "4"},
- {2, "R3", "8"},
- {3, "R3", "16"},
- {4, "R3", "32"},
- },
- },
- {
- .descr = "shift",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_3, 1),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_3, 4),
- BPF_MOV64_IMM(BPF_REG_4, 32),
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .matches = {
- {0, "R1", "ctx()"},
- {0, "R10", "fp0"},
- {0, "R3", "1"},
- {1, "R3", "2"},
- {2, "R3", "4"},
- {3, "R3", "8"},
- {4, "R3", "16"},
- {5, "R3", "1"},
- {6, "R4", "32"},
- {7, "R4", "16"},
- {8, "R4", "8"},
- {9, "R4", "4"},
- {10, "R4", "2"},
- },
- },
- {
- .descr = "addsub",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_3, 4),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 4),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 2),
- BPF_MOV64_IMM(BPF_REG_4, 8),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .matches = {
- {0, "R1", "ctx()"},
- {0, "R10", "fp0"},
- {0, "R3", "4"},
- {1, "R3", "8"},
- {2, "R3", "10"},
- {3, "R4", "8"},
- {4, "R4", "12"},
- {5, "R4", "14"},
- },
- },
- {
- .descr = "mul",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_3, 7),
- BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 1),
- BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 2),
- BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 4),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .matches = {
- {0, "R1", "ctx()"},
- {0, "R10", "fp0"},
- {0, "R3", "7"},
- {1, "R3", "7"},
- {2, "R3", "14"},
- {3, "R3", "56"},
- },
- },
-
- /* Tests using unknown values */
-#define PREP_PKT_POINTERS \
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \
- offsetof(struct __sk_buff, data)), \
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \
- offsetof(struct __sk_buff, data_end))
-
-#define LOAD_UNKNOWN(DST_REG) \
- PREP_PKT_POINTERS, \
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), \
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), \
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 1), \
- BPF_EXIT_INSN(), \
- BPF_LDX_MEM(BPF_B, DST_REG, BPF_REG_2, 0)
-
- {
- .descr = "unknown shift",
- .insns = {
- LOAD_UNKNOWN(BPF_REG_3),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
- LOAD_UNKNOWN(BPF_REG_4),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 5),
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .matches = {
- {6, "R0", "pkt(off=8,r=8)"},
- {6, "R3", "var_off=(0x0; 0xff)"},
- {7, "R3", "var_off=(0x0; 0x1fe)"},
- {8, "R3", "var_off=(0x0; 0x3fc)"},
- {9, "R3", "var_off=(0x0; 0x7f8)"},
- {10, "R3", "var_off=(0x0; 0xff0)"},
- {12, "R3", "pkt_end()"},
- {17, "R4", "var_off=(0x0; 0xff)"},
- {18, "R4", "var_off=(0x0; 0x1fe0)"},
- {19, "R4", "var_off=(0x0; 0xff0)"},
- {20, "R4", "var_off=(0x0; 0x7f8)"},
- {21, "R4", "var_off=(0x0; 0x3fc)"},
- {22, "R4", "var_off=(0x0; 0x1fe)"},
- },
- },
- {
- .descr = "unknown mul",
- .insns = {
- LOAD_UNKNOWN(BPF_REG_3),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
- BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 1),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
- BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 2),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
- BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 4),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
- BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 8),
- BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .matches = {
- {6, "R3", "var_off=(0x0; 0xff)"},
- {7, "R4", "var_off=(0x0; 0xff)"},
- {8, "R4", "var_off=(0x0; 0xff)"},
- {9, "R4", "var_off=(0x0; 0xff)"},
- {10, "R4", "var_off=(0x0; 0x1fe)"},
- {11, "R4", "var_off=(0x0; 0xff)"},
- {12, "R4", "var_off=(0x0; 0x3fc)"},
- {13, "R4", "var_off=(0x0; 0xff)"},
- {14, "R4", "var_off=(0x0; 0x7f8)"},
- {15, "R4", "var_off=(0x0; 0xff0)"},
- },
- },
- {
- .descr = "packet const offset",
- .insns = {
- PREP_PKT_POINTERS,
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
-
- BPF_MOV64_IMM(BPF_REG_0, 0),
-
- /* Skip over ethernet header. */
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
- BPF_EXIT_INSN(),
-
- BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 0),
- BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 2),
- BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 3),
- BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_5, 0),
- BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_5, 2),
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
-
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .matches = {
- {2, "R5", "pkt(r=0)"},
- {4, "R5", "pkt(off=14,r=0)"},
- {5, "R4", "pkt(off=14,r=0)"},
- {9, "R2", "pkt(r=18)"},
- {10, "R5", "pkt(off=14,r=18)"},
- {10, "R4", "var_off=(0x0; 0xff)"},
- {13, "R4", "var_off=(0x0; 0xffff)"},
- {14, "R4", "var_off=(0x0; 0xffff)"},
- },
- },
- {
- .descr = "packet variable offset",
- .insns = {
- LOAD_UNKNOWN(BPF_REG_6),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
-
- /* First, add a constant to the R5 packet pointer,
- * then a variable with a known alignment.
- */
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
-
- /* Now, test in the other direction. Adding first
- * the variable offset to R5, then the constant.
- */
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
-
- /* Test multiple accumulations of unknown values
- * into a packet pointer.
- */
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
-
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .matches = {
- /* Calculated offset in R6 has unknown value, but known
- * alignment of 4.
- */
- {6, "R2", "pkt(r=8)"},
- {7, "R6", "var_off=(0x0; 0x3fc)"},
- /* Offset is added to packet pointer R5, resulting in
- * known fixed offset, and variable offset from R6.
- */
- {11, "R5", "pkt(id=1,off=14,"},
- /* At the time the word size load is performed from R5,
- * it's total offset is NET_IP_ALIGN + reg->off (0) +
- * reg->aux_off (14) which is 16. Then the variable
- * offset is considered using reg->aux_off_align which
- * is 4 and meets the load's requirements.
- */
- {15, "R4", "var_off=(0x0; 0x3fc)"},
- {15, "R5", "var_off=(0x0; 0x3fc)"},
- /* Variable offset is added to R5 packet pointer,
- * resulting in auxiliary alignment of 4. To avoid BPF
- * verifier's precision backtracking logging
- * interfering we also have a no-op R4 = R5
- * instruction to validate R5 state. We also check
- * that R4 is what it should be in such case.
- */
- {18, "R4", "var_off=(0x0; 0x3fc)"},
- {18, "R5", "var_off=(0x0; 0x3fc)"},
- /* Constant offset is added to R5, resulting in
- * reg->off of 14.
- */
- {19, "R5", "pkt(id=2,off=14,"},
- /* At the time the word size load is performed from R5,
- * its total fixed offset is NET_IP_ALIGN + reg->off
- * (14) which is 16. Then the variable offset is 4-byte
- * aligned, so the total offset is 4-byte aligned and
- * meets the load's requirements.
- */
- {24, "R4", "var_off=(0x0; 0x3fc)"},
- {24, "R5", "var_off=(0x0; 0x3fc)"},
- /* Constant offset is added to R5 packet pointer,
- * resulting in reg->off value of 14.
- */
- {26, "R5", "pkt(off=14,r=8)"},
- /* Variable offset is added to R5, resulting in a
- * variable offset of (4n). See comment for insn #18
- * for R4 = R5 trick.
- */
- {28, "R4", "var_off=(0x0; 0x3fc)"},
- {28, "R5", "var_off=(0x0; 0x3fc)"},
- /* Constant is added to R5 again, setting reg->off to 18. */
- {29, "R5", "pkt(id=3,off=18,"},
- /* And once more we add a variable; resulting var_off
- * is still (4n), fixed offset is not changed.
- * Also, we create a new reg->id.
- */
- {31, "R4", "var_off=(0x0; 0x7fc)"},
- {31, "R5", "var_off=(0x0; 0x7fc)"},
- /* At the time the word size load is performed from R5,
- * its total fixed offset is NET_IP_ALIGN + reg->off (18)
- * which is 20. Then the variable offset is (4n), so
- * the total offset is 4-byte aligned and meets the
- * load's requirements.
- */
- {35, "R4", "var_off=(0x0; 0x7fc)"},
- {35, "R5", "var_off=(0x0; 0x7fc)"},
- },
- },
- {
- .descr = "packet variable offset 2",
- .insns = {
- /* Create an unknown offset, (4n+2)-aligned */
- LOAD_UNKNOWN(BPF_REG_6),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14),
- /* Add it to the packet pointer */
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
- /* Check bounds and perform a read */
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
- /* Make a (4n) offset from the value we just read */
- BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 0xff),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
- /* Add it to the packet pointer */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
- /* Check bounds and perform a read */
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .matches = {
- /* Calculated offset in R6 has unknown value, but known
- * alignment of 4.
- */
- {6, "R2", "pkt(r=8)"},
- {7, "R6", "var_off=(0x0; 0x3fc)"},
- /* Adding 14 makes R6 be (4n+2) */
- {8, "R6", "var_off=(0x2; 0x7fc)"},
- /* Packet pointer has (4n+2) offset */
- {11, "R5", "var_off=(0x2; 0x7fc)"},
- {12, "R4", "var_off=(0x2; 0x7fc)"},
- /* At the time the word size load is performed from R5,
- * its total fixed offset is NET_IP_ALIGN + reg->off (0)
- * which is 2. Then the variable offset is (4n+2), so
- * the total offset is 4-byte aligned and meets the
- * load's requirements.
- */
- {15, "R5", "var_off=(0x2; 0x7fc)"},
- /* Newly read value in R6 was shifted left by 2, so has
- * known alignment of 4.
- */
- {17, "R6", "var_off=(0x0; 0x3fc)"},
- /* Added (4n) to packet pointer's (4n+2) var_off, giving
- * another (4n+2).
- */
- {19, "R5", "var_off=(0x2; 0xffc)"},
- {20, "R4", "var_off=(0x2; 0xffc)"},
- /* At the time the word size load is performed from R5,
- * its total fixed offset is NET_IP_ALIGN + reg->off (0)
- * which is 2. Then the variable offset is (4n+2), so
- * the total offset is 4-byte aligned and meets the
- * load's requirements.
- */
- {23, "R5", "var_off=(0x2; 0xffc)"},
- },
- },
- {
- .descr = "dubious pointer arithmetic",
- .insns = {
- PREP_PKT_POINTERS,
- BPF_MOV64_IMM(BPF_REG_0, 0),
- /* (ptr - ptr) << 2 */
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_5, BPF_REG_2),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_5, 2),
- /* We have a (4n) value. Let's make a packet offset
- * out of it. First add 14, to make it a (4n+2)
- */
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
- /* Then make sure it's nonnegative */
- BPF_JMP_IMM(BPF_JSGE, BPF_REG_5, 0, 1),
- BPF_EXIT_INSN(),
- /* Add it to packet pointer */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
- /* Check bounds and perform a read */
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = REJECT,
- .matches = {
- {3, "R5", "pkt_end()"},
- /* (ptr - ptr) << 2 == unknown, (4n) */
- {5, "R5", "var_off=(0x0; 0xfffffffffffffffc)"},
- /* (4n) + 14 == (4n+2). We blow our bounds, because
- * the add could overflow.
- */
- {6, "R5", "var_off=(0x2; 0xfffffffffffffffc)"},
- /* Checked s>=0 */
- {9, "R5", "var_off=(0x2; 0x7ffffffffffffffc)"},
- /* packet pointer + nonnegative (4n+2) */
- {11, "R6", "var_off=(0x2; 0x7ffffffffffffffc)"},
- {12, "R4", "var_off=(0x2; 0x7ffffffffffffffc)"},
- /* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
- * We checked the bounds, but it might have been able
- * to overflow if the packet pointer started in the
- * upper half of the address space.
- * So we did not get a 'range' on R6, and the access
- * attempt will fail.
- */
- {15, "R6", "var_off=(0x2; 0x7ffffffffffffffc)"},
- }
- },
- {
- .descr = "variable subtraction",
- .insns = {
- /* Create an unknown offset, (4n+2)-aligned */
- LOAD_UNKNOWN(BPF_REG_6),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14),
- /* Create another unknown, (4n)-aligned, and subtract
- * it from the first one
- */
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 2),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_6, BPF_REG_7),
- /* Bounds-check the result */
- BPF_JMP_IMM(BPF_JSGE, BPF_REG_6, 0, 1),
- BPF_EXIT_INSN(),
- /* Add it to the packet pointer */
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
- /* Check bounds and perform a read */
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .matches = {
- /* Calculated offset in R6 has unknown value, but known
- * alignment of 4.
- */
- {6, "R2", "pkt(r=8)"},
- {8, "R6", "var_off=(0x0; 0x3fc)"},
- /* Adding 14 makes R6 be (4n+2) */
- {9, "R6", "var_off=(0x2; 0x7fc)"},
- /* New unknown value in R7 is (4n) */
- {10, "R7", "var_off=(0x0; 0x3fc)"},
- /* Subtracting it from R6 blows our unsigned bounds */
- {11, "R6", "var_off=(0x2; 0xfffffffffffffffc)"},
- /* Checked s>= 0 */
- {14, "R6", "var_off=(0x2; 0x7fc)"},
- /* At the time the word size load is performed from R5,
- * its total fixed offset is NET_IP_ALIGN + reg->off (0)
- * which is 2. Then the variable offset is (4n+2), so
- * the total offset is 4-byte aligned and meets the
- * load's requirements.
- */
- {20, "R5", "var_off=(0x2; 0x7fc)"},
- },
- },
- {
- .descr = "pointer variable subtraction",
- .insns = {
- /* Create an unknown offset, (4n+2)-aligned and bounded
- * to [14,74]
- */
- LOAD_UNKNOWN(BPF_REG_6),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 0xf),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14),
- /* Subtract it from the packet pointer */
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_5, BPF_REG_6),
- /* Create another unknown, (4n)-aligned and >= 74.
- * That in fact means >= 76, since 74 % 4 == 2
- */
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 76),
- /* Add it to the packet pointer */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_7),
- /* Check bounds and perform a read */
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .matches = {
- /* Calculated offset in R6 has unknown value, but known
- * alignment of 4.
- */
- {6, "R2", "pkt(r=8)"},
- {9, "R6", "var_off=(0x0; 0x3c)"},
- /* Adding 14 makes R6 be (4n+2) */
- {10, "R6", "var_off=(0x2; 0x7c)"},
- /* Subtracting from packet pointer overflows ubounds */
- {13, "R5", "var_off=(0xffffffffffffff82; 0x7c)"},
- /* New unknown value in R7 is (4n), >= 76 */
- {14, "R7", "var_off=(0x0; 0x7fc)"},
- /* Adding it to packet pointer gives nice bounds again */
- {16, "R5", "var_off=(0x2; 0x7fc)"},
- /* At the time the word size load is performed from R5,
- * its total fixed offset is NET_IP_ALIGN + reg->off (0)
- * which is 2. Then the variable offset is (4n+2), so
- * the total offset is 4-byte aligned and meets the
- * load's requirements.
- */
- {20, "R5", "var_off=(0x2; 0x7fc)"},
- },
- },
-};
-
-static int probe_filter_length(const struct bpf_insn *fp)
-{
- int len;
-
- for (len = MAX_INSNS - 1; len > 0; --len)
- if (fp[len].code != 0 || fp[len].imm != 0)
- break;
- return len + 1;
-}
-
-static char bpf_vlog[32768];
-
-static int do_test_single(struct bpf_align_test *test)
-{
- struct bpf_insn *prog = test->insns;
- int prog_type = test->prog_type;
- char bpf_vlog_copy[32768];
- LIBBPF_OPTS(bpf_prog_load_opts, opts,
- .prog_flags = BPF_F_STRICT_ALIGNMENT,
- .log_buf = bpf_vlog,
- .log_size = sizeof(bpf_vlog),
- .log_level = 2,
- );
- const char *main_pass_start = "0: R1=ctx() R10=fp0";
- const char *line_ptr;
- int cur_line = -1;
- int prog_len, i;
- char *start;
- int fd_prog;
- int ret;
-
- prog_len = probe_filter_length(prog);
- fd_prog = bpf_prog_load(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL",
- prog, prog_len, &opts);
- if (fd_prog < 0 && test->result != REJECT) {
- printf("Failed to load program.\n");
- printf("%s", bpf_vlog);
- ret = 1;
- } else if (fd_prog >= 0 && test->result == REJECT) {
- printf("Unexpected success to load!\n");
- printf("%s", bpf_vlog);
- ret = 1;
- close(fd_prog);
- } else {
- ret = 0;
- /* We make a local copy so that we can strtok() it */
- strncpy(bpf_vlog_copy, bpf_vlog, sizeof(bpf_vlog_copy));
- start = strstr(bpf_vlog_copy, main_pass_start);
- if (!start) {
- ret = 1;
- printf("Can't find initial line '%s'\n", main_pass_start);
- goto out;
- }
- line_ptr = strtok(start, "\n");
- for (i = 0; i < MAX_MATCHES; i++) {
- struct bpf_reg_match m = test->matches[i];
- const char *p;
- int tmp;
-
- if (!m.match)
- break;
- while (line_ptr) {
- cur_line = -1;
- sscanf(line_ptr, "%u: ", &cur_line);
- if (cur_line == -1)
- sscanf(line_ptr, "from %u to %u: ", &tmp, &cur_line);
- if (cur_line == m.line)
- break;
- line_ptr = strtok(NULL, "\n");
- }
- if (!line_ptr) {
- printf("Failed to find line %u for match: %s=%s\n",
- m.line, m.reg, m.match);
- ret = 1;
- printf("%s", bpf_vlog);
- break;
- }
- /* Check the next line as well in case the previous line
- * did not have a corresponding bpf insn. Example:
- * func#0 @0
- * 0: R1=ctx() R10=fp0
- * 0: (b7) r3 = 2 ; R3_w=2
- *
- * Sometimes it's actually two lines below, e.g. when
- * searching for "6: R3_w=scalar(umax=255,var_off=(0x0; 0xff))":
- * from 4 to 6: R0_w=pkt(off=8,r=8) R1=ctx() R2_w=pkt(r=8) R3_w=pkt_end() R10=fp0
- * 6: R0_w=pkt(off=8,r=8) R1=ctx() R2_w=pkt(r=8) R3_w=pkt_end() R10=fp0
- * 6: (71) r3 = *(u8 *)(r2 +0) ; R2_w=pkt(r=8) R3_w=scalar(umax=255,var_off=(0x0; 0xff))
- */
- while (!(p = strstr(line_ptr, m.reg)) || !strstr(p, m.match)) {
- cur_line = -1;
- line_ptr = strtok(NULL, "\n");
- sscanf(line_ptr ?: "", "%u: ", &cur_line);
- if (!line_ptr || cur_line != m.line)
- break;
- }
- if (cur_line != m.line || !line_ptr || !(p = strstr(line_ptr, m.reg)) || !strstr(p, m.match)) {
- printf("Failed to find match %u: %s=%s\n", m.line, m.reg, m.match);
- ret = 1;
- printf("%s", bpf_vlog);
- break;
- }
- }
-out:
- if (fd_prog >= 0)
- close(fd_prog);
- }
- return ret;
-}
-
-void test_align(void)
-{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(tests); i++) {
- struct bpf_align_test *test = &tests[i];
-
- if (!test__start_subtest(test->descr))
- continue;
-
- ASSERT_OK(do_test_single(test), test->descr);
- }
-}
diff --git a/tools/testing/selftests/bpf/prog_tests/attach_probe.c b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
index 9e77e5da7097..e8c1a619e330 100644
--- a/tools/testing/selftests/bpf/prog_tests/attach_probe.c
+++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
@@ -123,6 +123,140 @@ cleanup:
test_attach_probe_manual__destroy(skel);
}
+/* manual attach address-based kprobe/kretprobe testings */
+static void test_attach_kprobe_by_addr(enum probe_attach_mode attach_mode)
+{
+ LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
+ struct test_attach_probe_manual *skel;
+ unsigned long func_addr;
+
+ if (!ASSERT_OK(load_kallsyms(), "load_kallsyms"))
+ return;
+
+ func_addr = ksym_get_addr(SYS_NANOSLEEP_KPROBE_NAME);
+ if (!ASSERT_NEQ(func_addr, 0UL, "func_addr"))
+ return;
+
+ skel = test_attach_probe_manual__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_kprobe_manual_open_and_load"))
+ return;
+
+ kprobe_opts.attach_mode = attach_mode;
+ kprobe_opts.retprobe = false;
+ kprobe_opts.offset = func_addr;
+ skel->links.handle_kprobe =
+ bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe,
+ NULL, &kprobe_opts);
+ if (!ASSERT_OK_PTR(skel->links.handle_kprobe, "attach_kprobe_by_addr"))
+ goto cleanup;
+
+ kprobe_opts.retprobe = true;
+ skel->links.handle_kretprobe =
+ bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe,
+ NULL, &kprobe_opts);
+ if (!ASSERT_OK_PTR(skel->links.handle_kretprobe, "attach_kretprobe_by_addr"))
+ goto cleanup;
+
+ /* trigger & validate kprobe && kretprobe */
+ usleep(1);
+
+ ASSERT_EQ(skel->bss->kprobe_res, 1, "check_kprobe_res");
+ ASSERT_EQ(skel->bss->kretprobe_res, 2, "check_kretprobe_res");
+
+cleanup:
+ test_attach_probe_manual__destroy(skel);
+}
+
+/* reject legacy address-based kprobe attach */
+static void test_attach_kprobe_legacy_by_addr_reject(void)
+{
+ LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
+ struct test_attach_probe_manual *skel;
+ unsigned long func_addr;
+
+ if (!ASSERT_OK(load_kallsyms(), "load_kallsyms"))
+ return;
+
+ func_addr = ksym_get_addr(SYS_NANOSLEEP_KPROBE_NAME);
+ if (!ASSERT_NEQ(func_addr, 0UL, "func_addr"))
+ return;
+
+ skel = test_attach_probe_manual__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_kprobe_manual_open_and_load"))
+ return;
+
+ kprobe_opts.attach_mode = PROBE_ATTACH_MODE_LEGACY;
+ kprobe_opts.offset = func_addr;
+ skel->links.handle_kprobe =
+ bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe,
+ NULL, &kprobe_opts);
+ ASSERT_ERR_PTR(skel->links.handle_kprobe, "attach_kprobe_legacy_by_addr");
+ ASSERT_EQ(libbpf_get_error(skel->links.handle_kprobe),
+ -EOPNOTSUPP, "attach_kprobe_legacy_by_addr_err");
+
+ test_attach_probe_manual__destroy(skel);
+}
+
+/*
+ * bpf_fentry_shadow_test exists in both vmlinux (net/bpf/test_run.c) and
+ * bpf_testmod (bpf_testmod.c). When bpf_testmod is loaded the symbol is
+ * duplicated. Test that kprobe attachment handles this correctly:
+ * - Unqualified name ("bpf_fentry_shadow_test") attaches to vmlinux.
+ * - MOD:SYM name ("bpf_testmod:bpf_fentry_shadow_test") attaches to module.
+ *
+ * Note: bpf_fentry_shadow_test is not invoked via test_run, so we only
+ * verify that attach and detach succeed without triggering the probe.
+ */
+static void test_attach_probe_dup_sym(enum probe_attach_mode attach_mode)
+{
+ DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
+ struct bpf_link *kprobe_link, *kretprobe_link;
+ struct test_attach_probe_manual *skel;
+
+ skel = test_attach_probe_manual__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_dup_sym_open_and_load"))
+ return;
+
+ kprobe_opts.attach_mode = attach_mode;
+
+ /* Unqualified: should attach to vmlinux symbol */
+ kprobe_opts.retprobe = false;
+ kprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe,
+ "bpf_fentry_shadow_test",
+ &kprobe_opts);
+ if (!ASSERT_OK_PTR(kprobe_link, "attach_kprobe_vmlinux"))
+ goto cleanup;
+ bpf_link__destroy(kprobe_link);
+
+ kprobe_opts.retprobe = true;
+ kretprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe,
+ "bpf_fentry_shadow_test",
+ &kprobe_opts);
+ if (!ASSERT_OK_PTR(kretprobe_link, "attach_kretprobe_vmlinux"))
+ goto cleanup;
+ bpf_link__destroy(kretprobe_link);
+
+ /* MOD:SYM qualified: should attach to module symbol */
+ kprobe_opts.retprobe = false;
+ kprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe,
+ "bpf_testmod:bpf_fentry_shadow_test",
+ &kprobe_opts);
+ if (!ASSERT_OK_PTR(kprobe_link, "attach_kprobe_module"))
+ goto cleanup;
+ bpf_link__destroy(kprobe_link);
+
+ kprobe_opts.retprobe = true;
+ kretprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe,
+ "bpf_testmod:bpf_fentry_shadow_test",
+ &kprobe_opts);
+ if (!ASSERT_OK_PTR(kretprobe_link, "attach_kretprobe_module"))
+ goto cleanup;
+ bpf_link__destroy(kretprobe_link);
+
+cleanup:
+ test_attach_probe_manual__destroy(skel);
+}
+
/* attach uprobe/uretprobe long event name testings */
static void test_attach_uprobe_long_event_name(void)
{
@@ -220,11 +354,73 @@ static void test_attach_kprobe_write_ctx(void)
kprobe_write_ctx__destroy(skel);
}
+
+static void test_freplace_kprobe_write_ctx(void)
+{
+ struct bpf_program *prog_kprobe, *prog_ext, *prog_fentry;
+ struct kprobe_write_ctx *skel_kprobe, *skel_ext = NULL;
+ struct bpf_link *link_kprobe = NULL, *link_ext = NULL;
+ int err, prog_fd;
+ LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+
+ skel_kprobe = kprobe_write_ctx__open();
+ if (!ASSERT_OK_PTR(skel_kprobe, "kprobe_write_ctx__open kprobe"))
+ return;
+
+ prog_kprobe = skel_kprobe->progs.kprobe_dummy;
+ bpf_program__set_autoload(prog_kprobe, true);
+
+ prog_fentry = skel_kprobe->progs.fentry;
+ bpf_program__set_autoload(prog_fentry, true);
+
+ err = kprobe_write_ctx__load(skel_kprobe);
+ if (!ASSERT_OK(err, "kprobe_write_ctx__load kprobe"))
+ goto out;
+
+ skel_ext = kprobe_write_ctx__open();
+ if (!ASSERT_OK_PTR(skel_ext, "kprobe_write_ctx__open ext"))
+ goto out;
+
+ prog_ext = skel_ext->progs.freplace_kprobe;
+ bpf_program__set_autoload(prog_ext, true);
+
+ prog_fd = bpf_program__fd(skel_kprobe->progs.kprobe_write_ctx);
+ bpf_program__set_attach_target(prog_ext, prog_fd, "kprobe_write_ctx");
+
+ err = kprobe_write_ctx__load(skel_ext);
+ if (!ASSERT_OK(err, "kprobe_write_ctx__load ext"))
+ goto out;
+
+ prog_fd = bpf_program__fd(prog_kprobe);
+ link_ext = bpf_program__attach_freplace(prog_ext, prog_fd, "kprobe_dummy");
+ ASSERT_ERR_PTR(link_ext, "bpf_program__attach_freplace link");
+ ASSERT_EQ(libbpf_get_error(link_ext), -EINVAL, "bpf_program__attach_freplace error");
+
+ link_kprobe = bpf_program__attach_kprobe_opts(prog_kprobe, "bpf_fentry_test1",
+ &kprobe_opts);
+ if (!ASSERT_OK_PTR(link_kprobe, "bpf_program__attach_kprobe_opts"))
+ goto out;
+
+ err = bpf_prog_test_run_opts(bpf_program__fd(prog_fentry), &topts);
+ ASSERT_OK(err, "bpf_prog_test_run_opts");
+
+out:
+ bpf_link__destroy(link_ext);
+ bpf_link__destroy(link_kprobe);
+ kprobe_write_ctx__destroy(skel_ext);
+ kprobe_write_ctx__destroy(skel_kprobe);
+}
#else
static void test_attach_kprobe_write_ctx(void)
{
test__skip();
}
+
+static void test_freplace_kprobe_write_ctx(void)
+{
+ test__skip();
+}
#endif
static void test_attach_probe_auto(struct test_attach_probe *skel)
@@ -416,6 +612,21 @@ void test_attach_probe(void)
test_attach_probe_manual(PROBE_ATTACH_MODE_PERF);
if (test__start_subtest("manual-link"))
test_attach_probe_manual(PROBE_ATTACH_MODE_LINK);
+ if (test__start_subtest("kprobe-perf-by-addr"))
+ test_attach_kprobe_by_addr(PROBE_ATTACH_MODE_PERF);
+ if (test__start_subtest("kprobe-link-by-addr"))
+ test_attach_kprobe_by_addr(PROBE_ATTACH_MODE_LINK);
+ if (test__start_subtest("kprobe-legacy-by-addr-reject"))
+ test_attach_kprobe_legacy_by_addr_reject();
+
+ if (test__start_subtest("dup-sym-default"))
+ test_attach_probe_dup_sym(PROBE_ATTACH_MODE_DEFAULT);
+ if (test__start_subtest("dup-sym-legacy"))
+ test_attach_probe_dup_sym(PROBE_ATTACH_MODE_LEGACY);
+ if (test__start_subtest("dup-sym-perf"))
+ test_attach_probe_dup_sym(PROBE_ATTACH_MODE_PERF);
+ if (test__start_subtest("dup-sym-link"))
+ test_attach_probe_dup_sym(PROBE_ATTACH_MODE_LINK);
if (test__start_subtest("auto"))
test_attach_probe_auto(skel);
@@ -434,6 +645,8 @@ void test_attach_probe(void)
test_attach_kprobe_long_event_name();
if (test__start_subtest("kprobe-write-ctx"))
test_attach_kprobe_write_ctx();
+ if (test__start_subtest("freplace-kprobe-write-ctx"))
+ test_freplace_kprobe_write_ctx();
cleanup:
test_attach_probe__destroy(skel);
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
index 75f4dff7d042..35adc3f6d443 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
@@ -6,6 +6,7 @@
#include <sys/syscall.h>
#include <sys/mman.h>
#include <unistd.h>
+#include <linux/compiler.h>
#include <test_progs.h>
#include <network_helpers.h>
#include <bpf/btf.h>
@@ -105,6 +106,11 @@ static void kprobe_multi_link_api_subtest(void)
unsigned long long addrs[8];
__u64 cookies[8];
+ if (!env.has_testmod) {
+ test__skip();
+ return;
+ }
+
if (!ASSERT_OK(load_kallsyms(), "load_kallsyms"))
goto cleanup;
@@ -192,6 +198,11 @@ static void kprobe_multi_attach_api_subtest(void)
};
__u64 cookies[8];
+ if (!env.has_testmod) {
+ test__skip();
+ return;
+ }
+
skel = kprobe_multi__open_and_load();
if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load"))
goto cleanup;
@@ -421,11 +432,12 @@ cleanup:
bpf_link__destroy(link3);
}
-static void burn_cpu(void)
+static void burn_cpu(long loops)
{
- volatile int j = 0;
+ long j = 0;
cpu_set_t cpu_set;
- int i, err;
+ long i;
+ int err;
/* generate some branches on cpu 0 */
CPU_ZERO(&cpu_set);
@@ -433,9 +445,10 @@ static void burn_cpu(void)
err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set);
ASSERT_OK(err, "set_thread_affinity");
- /* spin the loop for a while (random high number) */
- for (i = 0; i < 1000000; ++i)
+ for (i = 0; i < loops; ++i) {
++j;
+ barrier();
+ }
}
static void pe_subtest(struct test_bpf_cookie *skel)
@@ -451,7 +464,7 @@ static void pe_subtest(struct test_bpf_cookie *skel)
attr.type = PERF_TYPE_SOFTWARE;
attr.config = PERF_COUNT_SW_CPU_CLOCK;
attr.sample_period = 100000;
- pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);
+ pfd = syscall(__NR_perf_event_open, &attr, 0, -1, -1, PERF_FLAG_FD_CLOEXEC);
if (!ASSERT_GE(pfd, 0, "perf_fd"))
goto cleanup;
@@ -460,7 +473,7 @@ static void pe_subtest(struct test_bpf_cookie *skel)
if (!ASSERT_OK_PTR(link, "link1"))
goto cleanup;
- burn_cpu(); /* trigger BPF prog */
+ burn_cpu(100000000L); /* trigger BPF prog */
ASSERT_EQ(skel->bss->pe_res, 0x100000, "pe_res1");
@@ -479,7 +492,7 @@ static void pe_subtest(struct test_bpf_cookie *skel)
if (!ASSERT_OK_PTR(link, "link2"))
goto cleanup;
- burn_cpu(); /* trigger BPF prog */
+ burn_cpu(100000000L); /* trigger BPF prog */
ASSERT_EQ(skel->bss->pe_res, 0x200000, "pe_res2");
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_gotox.c b/tools/testing/selftests/bpf/prog_tests/bpf_gotox.c
index 75b0cf2467ab..73dc63882b7d 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_gotox.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_gotox.c
@@ -317,7 +317,7 @@ static void check_ldimm64_off_load(struct bpf_gotox *skel __always_unused)
static int __check_ldimm64_gotox_prog_load(struct bpf_insn *insns,
__u32 insn_cnt,
- __u32 off1, __u32 off2)
+ int off1, int off2, int off3)
{
const __u32 values[] = {5, 7, 9, 11, 13, 15};
const __u32 max_entries = ARRAY_SIZE(values);
@@ -349,16 +349,46 @@ static int __check_ldimm64_gotox_prog_load(struct bpf_insn *insns,
/* r1 += off2 */
insns[2].imm = off2;
+ /* r1 = *(r1 + off3) */
+ insns[3].off = off3;
+
ret = prog_load(insns, insn_cnt);
close(map_fd);
return ret;
}
-static void reject_offsets(struct bpf_insn *insns, __u32 insn_cnt, __u32 off1, __u32 off2)
+static void
+allow_offsets(struct bpf_insn *insns, __u32 insn_cnt, int off1, int off2, int off3)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ int prog_fd, err;
+ char s[128] = "";
+
+ prog_fd = __check_ldimm64_gotox_prog_load(insns, insn_cnt, off1, off2, off3);
+ snprintf(s, sizeof(s), "__check_ldimm64_gotox_prog_load(%d,%d,%d)", off1, off2, off3);
+ if (!ASSERT_GE(prog_fd, 0, s))
+ return;
+
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ if (!ASSERT_OK(err, "test_run_opts err")) {
+ close(prog_fd);
+ return;
+ }
+
+ if (!ASSERT_EQ(topts.retval, (off1 + off2 + off3) / 8, "test_run_opts retval")) {
+ close(prog_fd);
+ return;
+ }
+
+ close(prog_fd);
+}
+
+static void
+reject_offsets(struct bpf_insn *insns, __u32 insn_cnt, int off1, int off2, int off3)
{
int prog_fd;
- prog_fd = __check_ldimm64_gotox_prog_load(insns, insn_cnt, off1, off2);
+ prog_fd = __check_ldimm64_gotox_prog_load(insns, insn_cnt, off1, off2, off3);
if (!ASSERT_EQ(prog_fd, -EACCES, "__check_ldimm64_gotox_prog_load"))
close(prog_fd);
}
@@ -376,7 +406,7 @@ static void check_ldimm64_off_gotox(struct bpf_gotox *skel __always_unused)
* The program rewrites the offsets in the instructions below:
* r1 = &map + offset1
* r1 += offset2
- * r1 = *r1
+ * r1 = *(r1 + offset3)
* gotox r1
*/
BPF_LD_IMM64_RAW(BPF_REG_1, BPF_PSEUDO_MAP_VALUE, 0),
@@ -403,43 +433,55 @@ static void check_ldimm64_off_gotox(struct bpf_gotox *skel __always_unused)
BPF_MOV64_IMM(BPF_REG_0, 5),
BPF_EXIT_INSN(),
};
- int prog_fd, err;
- __u32 off1, off2;
-
- /* allow all combinations off1 + off2 < 6 */
- for (off1 = 0; off1 < 6; off1++) {
- for (off2 = 0; off1 + off2 < 6; off2++) {
- LIBBPF_OPTS(bpf_test_run_opts, topts);
-
- prog_fd = __check_ldimm64_gotox_prog_load(insns, ARRAY_SIZE(insns),
- off1 * 8, off2 * 8);
- if (!ASSERT_GE(prog_fd, 0, "__check_ldimm64_gotox_prog_load"))
- return;
-
- err = bpf_prog_test_run_opts(prog_fd, &topts);
- if (!ASSERT_OK(err, "test_run_opts err")) {
- close(prog_fd);
- return;
- }
-
- if (!ASSERT_EQ(topts.retval, off1 + off2, "test_run_opts retval")) {
- close(prog_fd);
- return;
- }
-
- close(prog_fd);
- }
- }
+ int off1, off2, off3;
+
+ /* allow all combinations off1 + off2 + off3 < 6 */
+ for (off1 = 0; off1 < 6; off1++)
+ for (off2 = 0; off1 + off2 < 6; off2++)
+ for (off3 = 0; off1 + off2 + off3 < 6; off3++)
+ allow_offsets(insns, ARRAY_SIZE(insns),
+ off1 * 8, off2 * 8, off3 * 8);
+
+ /* allow for some offsets to be negative */
+ allow_offsets(insns, ARRAY_SIZE(insns), 8 * 3, 0, -(8 * 3));
+ allow_offsets(insns, ARRAY_SIZE(insns), 8 * 3, -(8 * 3), 0);
+ allow_offsets(insns, ARRAY_SIZE(insns), 0, 8 * 3, -(8 * 3));
+ allow_offsets(insns, ARRAY_SIZE(insns), 8 * 4, 0, -(8 * 2));
+ allow_offsets(insns, ARRAY_SIZE(insns), 8 * 4, -(8 * 2), 0);
+ allow_offsets(insns, ARRAY_SIZE(insns), 0, 8 * 4, -(8 * 2));
+
+ /* disallow negative sums of offsets */
+ reject_offsets(insns, ARRAY_SIZE(insns), 8 * 3, 0, -(8 * 4));
+ reject_offsets(insns, ARRAY_SIZE(insns), 8 * 3, -(8 * 4), 0);
+ reject_offsets(insns, ARRAY_SIZE(insns), 0, 8 * 3, -(8 * 4));
+
+ /* disallow the off1 to be negative in any case */
+ reject_offsets(insns, ARRAY_SIZE(insns), -8 * 1, 0, 0);
+ reject_offsets(insns, ARRAY_SIZE(insns), -8 * 1, 8 * 1, 0);
+ reject_offsets(insns, ARRAY_SIZE(insns), -8 * 1, 8 * 1, 8 * 1);
+
+ /* reject off1 + off2 + off3 >= 6 */
+ reject_offsets(insns, ARRAY_SIZE(insns), 8 * 3, 8 * 3, 8 * 0);
+ reject_offsets(insns, ARRAY_SIZE(insns), 8 * 7, 8 * 0, 8 * 0);
+ reject_offsets(insns, ARRAY_SIZE(insns), 8 * 0, 8 * 7, 8 * 0);
+ reject_offsets(insns, ARRAY_SIZE(insns), 8 * 3, 8 * 0, 8 * 3);
+ reject_offsets(insns, ARRAY_SIZE(insns), 8 * 0, 8 * 3, 8 * 3);
+
+ /* reject (off1 + off2) % 8 != 0, off3 % 8 != 0 */
+ reject_offsets(insns, ARRAY_SIZE(insns), 3, 3, 0);
+ reject_offsets(insns, ARRAY_SIZE(insns), 7, 0, 0);
+ reject_offsets(insns, ARRAY_SIZE(insns), 0, 7, 0);
+ reject_offsets(insns, ARRAY_SIZE(insns), 0, 0, 7);
+}
- /* reject off1 + off2 >= 6 */
- reject_offsets(insns, ARRAY_SIZE(insns), 8 * 3, 8 * 3);
- reject_offsets(insns, ARRAY_SIZE(insns), 8 * 7, 8 * 0);
- reject_offsets(insns, ARRAY_SIZE(insns), 8 * 0, 8 * 7);
+static void check_ldimm64_off_gotox_llvm(struct bpf_gotox *skel)
+{
+ __u64 in[] = {0, 1, 2, 3, 4};
+ __u64 out[] = {1, 1, 5, 1, 1};
+ int i;
- /* reject (off1 + off2) % 8 != 0 */
- reject_offsets(insns, ARRAY_SIZE(insns), 3, 3);
- reject_offsets(insns, ARRAY_SIZE(insns), 7, 0);
- reject_offsets(insns, ARRAY_SIZE(insns), 0, 7);
+ for (i = 0; i < ARRAY_SIZE(in); i++)
+ check_simple(skel, skel->progs.load_with_nonzero_offset, in[i], out[i]);
}
void test_bpf_gotox(void)
@@ -496,5 +538,8 @@ void test_bpf_gotox(void)
if (test__start_subtest("check-ldimm64-off-gotox"))
__subtest(skel, check_ldimm64_off_gotox);
+ if (test__start_subtest("check-ldimm64-off-gotox-llvm"))
+ __subtest(skel, check_ldimm64_off_gotox_llvm);
+
bpf_gotox__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_insn_array.c b/tools/testing/selftests/bpf/prog_tests/bpf_insn_array.c
index 482d38b9c29e..0222a9a5d076 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_insn_array.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_insn_array.c
@@ -3,7 +3,7 @@
#include <bpf/bpf.h>
#include <test_progs.h>
-#if defined(__x86_64__) || defined(__powerpc__)
+#if defined(__x86_64__) || defined(__powerpc__) || defined(__aarch64__)
static int map_create(__u32 map_type, __u32 max_entries)
{
const char *map_name = "insn_array";
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_kind.c b/tools/testing/selftests/bpf/prog_tests/btf_kind.c
new file mode 100644
index 000000000000..f61afe6a79a5
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/btf_kind.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2026, Oracle and/or its affiliates. */
+
+#include <test_progs.h>
+#include <bpf/btf.h>
+#include <bpf/libbpf.h>
+
+/* Verify kind encoding exists for each kind */
+static void test_btf_kind_encoding(void)
+{
+ LIBBPF_OPTS(btf_new_opts, opts);
+ const struct btf_header *hdr;
+ const void *raw_btf;
+ struct btf *btf;
+ __u32 raw_size;
+
+ opts.add_layout = true;
+ btf = btf__new_empty_opts(&opts);
+ if (!ASSERT_OK_PTR(btf, "btf_new"))
+ return;
+
+ raw_btf = btf__raw_data(btf, &raw_size);
+ if (!ASSERT_OK_PTR(raw_btf, "btf__raw_data"))
+ return;
+
+ hdr = raw_btf;
+
+ ASSERT_EQ(hdr->layout_off % 4, 0, "layout_aligned");
+ ASSERT_EQ(hdr->layout_len, sizeof(struct btf_layout) * NR_BTF_KINDS,
+ "layout_len");
+ ASSERT_EQ(hdr->str_off, hdr->layout_off + hdr->layout_len, "str_after_layout");
+ btf__free(btf);
+
+ opts.add_layout = false;
+ btf = btf__new_empty_opts(&opts);
+ if (!ASSERT_OK_PTR(btf, "btf_new"))
+ return;
+
+ raw_btf = btf__raw_data(btf, &raw_size);
+ if (!ASSERT_OK_PTR(raw_btf, "btf__raw_data"))
+ return;
+
+ hdr = raw_btf;
+
+ ASSERT_EQ(hdr->layout_off, 0, "no_layout_off");
+ ASSERT_EQ(hdr->layout_len, 0, "no_layout_len");
+ ASSERT_EQ(hdr->str_off, hdr->type_off + hdr->type_len, "strs_after_types");
+ btf__free(btf);
+}
+
+static int write_raw_btf(void *raw_btf, size_t raw_size, char *file)
+{
+ int fd = mkstemp(file);
+ ssize_t n;
+
+ if (!ASSERT_OK_FD(fd, "open_raw_btf"))
+ return -1;
+ n = write(fd, raw_btf, raw_size);
+ close(fd);
+ if (!ASSERT_EQ(n, (ssize_t)raw_size, "write_raw_btf"))
+ return -1;
+ return 0;
+}
+
+/*
+ * Fabricate an unrecognized kind at BTF_KIND_MAX + 1, and after adding
+ * the appropriate struct/typedefs to the BTF such that it recognizes
+ * this kind, ensure that parsing of BTF containing the unrecognized kind
+ * can succeed.
+ */
+void test_btf_kind_decoding(void)
+{
+ char btf_kind_file1[] = "/tmp/test_btf_kind.XXXXXX";
+ char btf_kind_file2[] = "/tmp/test_btf_kind.XXXXXX";
+ char btf_kind_file3[] = "/tmp/test_btf_kind.XXXXXX";
+ struct btf *btf = NULL, *new_btf = NULL;
+ __s32 int_id, unrec_id, id, id2;
+ LIBBPF_OPTS(btf_new_opts, opts);
+ struct btf_layout *l;
+ struct btf_header *hdr;
+ const void *raw_btf;
+ struct btf_type *t;
+ void *new_raw_btf;
+ void *str_data;
+ __u32 raw_size;
+
+ opts.add_layout = true;
+ btf = btf__new_empty_opts(&opts);
+ if (!ASSERT_OK_PTR(btf, "btf_new"))
+ return;
+
+ int_id = btf__add_int(btf, "test_char", 1, BTF_INT_CHAR);
+ if (!ASSERT_GT(int_id, 0, "add_int_id"))
+ return;
+
+ /*
+ * Create our type with unrecognized kind by adding a typedef kind
+ * we will overwrite it with our unrecognized kind value.
+ */
+ unrec_id = btf__add_typedef(btf, "unrec_kind", int_id);
+ if (!ASSERT_GT(unrec_id, 0, "add_unrec_id"))
+ return;
+
+ /*
+ * Add an id after it that we will look up to verify we can parse
+ * beyond unrecognized kinds.
+ */
+ id = btf__add_typedef(btf, "test_lookup", int_id);
+ if (!ASSERT_GT(id, 0, "add_test_lookup_id"))
+ return;
+ id2 = btf__add_typedef(btf, "test_lookup2", int_id);
+ if (!ASSERT_GT(id2, 0, "add_test_lookup_id2"))
+ return;
+
+ raw_btf = (void *)btf__raw_data(btf, &raw_size);
+ if (!ASSERT_OK_PTR(raw_btf, "btf__raw_data"))
+ return;
+
+ new_raw_btf = calloc(1, raw_size + sizeof(*l));
+ if (!ASSERT_OK_PTR(new_raw_btf, "calloc_raw_btf"))
+ return;
+ memcpy(new_raw_btf, raw_btf, raw_size);
+
+ hdr = new_raw_btf;
+
+ /* Move strings to make space for one new layout description */
+ raw_size += sizeof(*l);
+ str_data = new_raw_btf + hdr->hdr_len + hdr->str_off;
+ memmove(str_data + sizeof(*l), str_data, hdr->str_len);
+ hdr->str_off += sizeof(*l);
+
+ /* Add new layout description */
+ hdr->layout_len += sizeof(*l);
+ l = new_raw_btf + hdr->hdr_len + hdr->layout_off;
+ l[NR_BTF_KINDS].info_sz = 0;
+ l[NR_BTF_KINDS].elem_sz = 0;
+ l[NR_BTF_KINDS].flags = 0;
+
+ /* Now modify typedef added above to be an unrecognized kind. */
+ t = (void *)hdr + hdr->hdr_len + hdr->type_off + sizeof(struct btf_type) +
+ sizeof(__u32);
+ t->info = (NR_BTF_KINDS << 24);
+
+ /* Write BTF to a raw file, ready for parsing. */
+ if (write_raw_btf(new_raw_btf, raw_size, btf_kind_file1))
+ goto out;
+
+ /*
+ * Verify parsing succeeds, and that we can read type info past
+ * the unrecognized kind.
+ */
+ new_btf = btf__parse_raw(btf_kind_file1);
+ if (ASSERT_OK_PTR(new_btf, "btf__parse_raw")) {
+ ASSERT_EQ(btf__find_by_name(new_btf, "unrec_kind"), unrec_id,
+ "unrec_kind_found");
+ ASSERT_EQ(btf__find_by_name_kind(new_btf, "test_lookup",
+ BTF_KIND_TYPEDEF), id,
+ "verify_id_lookup");
+ ASSERT_EQ(btf__find_by_name_kind(new_btf, "test_lookup2",
+ BTF_KIND_TYPEDEF), id2,
+ "verify_id2_lookup");
+ }
+ btf__free(new_btf);
+ new_btf = NULL;
+
+ /*
+ * Next, change info_sz to equal sizeof(struct btf_type); this means the
+ * "test_lookup" kind will be reinterpreted as a singular info element
+ * following the unrecognized kind.
+ */
+ l[NR_BTF_KINDS].info_sz = sizeof(struct btf_type);
+ if (write_raw_btf(new_raw_btf, raw_size, btf_kind_file2))
+ goto out;
+
+ new_btf = btf__parse_raw(btf_kind_file2);
+ if (ASSERT_OK_PTR(new_btf, "btf__parse_raw")) {
+ ASSERT_EQ(btf__find_by_name_kind(new_btf, "test_lookup",
+ BTF_KIND_TYPEDEF), -ENOENT,
+ "verify_id_not_found");
+ /* id of "test_lookup2" will be id2 -1 as we have removed one type */
+ ASSERT_EQ(btf__find_by_name_kind(new_btf, "test_lookup2",
+ BTF_KIND_TYPEDEF), id2 - 1,
+ "verify_id_lookup2");
+
+ }
+ btf__free(new_btf);
+ new_btf = NULL;
+
+ /*
+ * Change elem_sz to equal sizeof(struct btf_type) and set vlen
+ * associated with unrecognized type to 1; this allows us to verify
+ * vlen-specified BTF can still be parsed.
+ */
+ l[NR_BTF_KINDS].info_sz = 0;
+ l[NR_BTF_KINDS].elem_sz = sizeof(struct btf_type);
+ t->info |= 1;
+ if (write_raw_btf(new_raw_btf, raw_size, btf_kind_file3))
+ goto out;
+
+ new_btf = btf__parse_raw(btf_kind_file3);
+ if (ASSERT_OK_PTR(new_btf, "btf__parse_raw")) {
+ ASSERT_EQ(btf__find_by_name_kind(new_btf, "test_lookup",
+ BTF_KIND_TYPEDEF), -ENOENT,
+ "verify_id_not_found");
+ /* id of "test_lookup2" will be id2 -1 as we have removed one type */
+ ASSERT_EQ(btf__find_by_name_kind(new_btf, "test_lookup2",
+ BTF_KIND_TYPEDEF), id2 - 1,
+ "verify_id_lookup2");
+
+ }
+out:
+ btf__free(new_btf);
+ free(new_raw_btf);
+ unlink(btf_kind_file1);
+ unlink(btf_kind_file2);
+ unlink(btf_kind_file3);
+ btf__free(btf);
+}
+
+void test_btf_kind(void)
+{
+ if (test__start_subtest("btf_kind_encoding"))
+ test_btf_kind_encoding();
+ if (test__start_subtest("btf_kind_decoding"))
+ test_btf_kind_decoding();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_sanitize.c b/tools/testing/selftests/bpf/prog_tests/btf_sanitize.c
new file mode 100644
index 000000000000..652b51efafc2
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/btf_sanitize.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2026, Oracle and/or its affiliates. */
+#include <test_progs.h>
+#include <linux/btf.h>
+#include "bpf/libbpf_internal.h"
+#include "../test_btf.h"
+#include "kfree_skb.skel.h"
+
+#define TYPE_LEN (sizeof(struct btf_type) + sizeof(__u32))
+#define MAX_NR_LAYOUT 2
+#define LAYOUT_LEN (sizeof(struct btf_layout) * MAX_NR_LAYOUT)
+#define STR_LEN sizeof("\0int")
+
+struct layout_btf {
+ struct btf_header hdr;
+ __u32 types[TYPE_LEN/sizeof(__u32)];
+ struct btf_layout layout[MAX_NR_LAYOUT];
+ char strs[STR_LEN];
+};
+
+static const struct layout_btf layout_btf = {
+ .hdr = {
+ .magic = BTF_MAGIC,
+ .version = BTF_VERSION,
+ .hdr_len = sizeof(struct btf_header),
+ .type_off = 0,
+ .type_len = TYPE_LEN,
+ .str_off = TYPE_LEN + LAYOUT_LEN,
+ .str_len = STR_LEN,
+ .layout_off = TYPE_LEN,
+ .layout_len = LAYOUT_LEN,
+ },
+ .types = {
+ BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
+ },
+ .layout = {
+ { .info_sz = 0, .elem_sz = 0, .flags = 0 },
+ { .info_sz = sizeof(__u32), .elem_sz = 0, .flags = 0 },
+ },
+ .strs = "\0int",
+};
+
+void test_btf_sanitize_layout(void)
+{
+ struct btf *orig = NULL, *sanitized = NULL;
+ struct kern_feature_cache *cache = NULL;
+ struct kfree_skb *skel = NULL;
+ const struct btf_header *hdr;
+ const void *raw;
+ __u32 raw_sz;
+
+ skel = kfree_skb__open();
+ if (!ASSERT_OK_PTR(skel, "kfree_skb_skel"))
+ return;
+ orig = btf__new(&layout_btf, sizeof(layout_btf));
+ if (!ASSERT_OK_PTR(orig, "btf_new_layout"))
+ goto out;
+ raw = btf__raw_data(orig, &raw_sz);
+ if (!ASSERT_OK_PTR(raw, "btf__raw_data_orig"))
+ goto out;
+ hdr = (struct btf_header *)raw;
+ ASSERT_EQ(hdr->layout_off, TYPE_LEN, "layout_off_nonzero");
+ ASSERT_EQ(hdr->layout_len, LAYOUT_LEN, "layout_len_nonzero");
+
+ cache = calloc(1, sizeof(*cache));
+ if (!ASSERT_OK_PTR(cache, "alloc_feat_cache"))
+ goto out;
+ for (int i = 0; i < __FEAT_CNT; i++)
+ cache->res[i] = FEAT_SUPPORTED;
+ cache->res[FEAT_BTF_LAYOUT] = FEAT_MISSING;
+
+ bpf_object_set_feat_cache(skel->obj, cache);
+
+ if (!ASSERT_FALSE(kernel_supports(skel->obj, FEAT_BTF_LAYOUT), "layout_feature_missing"))
+ goto out;
+ if (!ASSERT_TRUE(kernel_supports(skel->obj, FEAT_BTF_FUNC), "other_feature_allowed"))
+ goto out;
+
+ sanitized = bpf_object__sanitize_btf(skel->obj, orig);
+ if (!ASSERT_OK_PTR(sanitized, "bpf_object__sanitize_btf"))
+ goto out;
+
+ raw = btf__raw_data(sanitized, &raw_sz);
+ if (!ASSERT_OK_PTR(raw, "btf__raw_data_sanitized"))
+ goto out;
+ hdr = (struct btf_header *)raw;
+ ASSERT_EQ(hdr->layout_off, 0, "layout_off_zero");
+ ASSERT_EQ(hdr->layout_len, 0, "layout_len_zero");
+ ASSERT_EQ(hdr->str_off, TYPE_LEN, "strs_after_types");
+ ASSERT_EQ(hdr->str_len, STR_LEN, "strs_len_unchanged");
+ ASSERT_EQ(raw_sz, hdr->hdr_len + hdr->type_len + hdr->str_len, "btf_raw_sz_reduced");
+out:
+ /* This will free the cache we allocated above */
+ kfree_skb__destroy(skel);
+ btf__free(sanitized);
+ btf__free(orig);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_write.c b/tools/testing/selftests/bpf/prog_tests/btf_write.c
index 6e36de1302fc..5c84723cf254 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf_write.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf_write.c
@@ -497,10 +497,121 @@ cleanup:
btf__free(btf2);
}
+static void test_btf_add_btf_split()
+{
+ struct btf *base = NULL, *split1 = NULL, *split2 = NULL;
+ struct btf *combined = NULL;
+ int id, err;
+
+ /* Create a base BTF with an INT and a PTR to it */
+ base = btf__new_empty();
+ if (!ASSERT_OK_PTR(base, "base"))
+ return;
+
+ id = btf__add_int(base, "int", 4, BTF_INT_SIGNED);
+ ASSERT_EQ(id, 1, "base_int_id");
+ id = btf__add_ptr(base, 1);
+ ASSERT_EQ(id, 2, "base_ptr_id");
+
+ /* base has 2 types, type IDs 1..2 */
+ ASSERT_EQ(btf__type_cnt(base), 3, "base_type_cnt");
+
+ /* Create split1 on base: a STRUCT referencing base's int (ID 1) */
+ split1 = btf__new_empty_split(base);
+ if (!ASSERT_OK_PTR(split1, "split1"))
+ goto cleanup;
+
+ id = btf__add_struct(split1, "s1", 4);
+ /* split types start at base_type_cnt = 3 */
+ ASSERT_EQ(id, 3, "split1_struct_id");
+ btf__add_field(split1, "x", 1, 0, 0); /* refers to base int */
+
+ id = btf__add_ptr(split1, 3);
+ ASSERT_EQ(id, 4, "split1_ptr_id"); /* ptr to the struct (split self-ref) */
+
+ /* Add a typedef "int_alias" -> base int in split1, which will be
+ * duplicated in split2 to test that btf__dedup() merges them.
+ */
+ id = btf__add_typedef(split1, "int_alias", 1);
+ ASSERT_EQ(id, 5, "split1_typedef_id");
+
+ /* Create split2 on base: a TYPEDEF referencing base's ptr (ID 2) */
+ split2 = btf__new_empty_split(base);
+ if (!ASSERT_OK_PTR(split2, "split2"))
+ goto cleanup;
+
+ id = btf__add_typedef(split2, "int_ptr", 2); /* refers to base ptr */
+ ASSERT_EQ(id, 3, "split2_typedef_id");
+
+ id = btf__add_struct(split2, "s2", 8);
+ ASSERT_EQ(id, 4, "split2_struct_id");
+ btf__add_field(split2, "p", 3, 0, 0); /* refers to split2's own typedef */
+
+ /* Same "int_alias" typedef as split1 - should be deduped away */
+ id = btf__add_typedef(split2, "int_alias", 1);
+ ASSERT_EQ(id, 5, "split2_dup_typedef_id");
+
+ /* Create combined split BTF on same base and merge both */
+ combined = btf__new_empty_split(base);
+ if (!ASSERT_OK_PTR(combined, "combined"))
+ goto cleanup;
+
+ /* Merge split1: its types (3,4,5) should land at IDs 3,4,5 */
+ id = btf__add_btf(combined, split1);
+ if (!ASSERT_GE(id, 0, "add_split1"))
+ goto cleanup;
+ ASSERT_EQ(id, 3, "split1_first_id");
+
+ /* Merge split2: its types (3,4,5) should be remapped to 6,7,8 */
+ id = btf__add_btf(combined, split2);
+ if (!ASSERT_GE(id, 0, "add_split2"))
+ goto cleanup;
+ ASSERT_EQ(id, 6, "split2_first_id");
+
+ /* Before dedup: base (2) + split1 (3) + split2 (3) = 8 types + void */
+ ASSERT_EQ(btf__type_cnt(combined), 9, "pre_dedup_type_cnt");
+
+ VALIDATE_RAW_BTF(
+ combined,
+ /* base types (IDs 1-2) */
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] PTR '(anon)' type_id=1",
+
+ /* split1 types (IDs 3-5): base refs unchanged */
+ "[3] STRUCT 's1' size=4 vlen=1\n"
+ "\t'x' type_id=1 bits_offset=0", /* refers to base int=1 */
+ "[4] PTR '(anon)' type_id=3", /* refers to split1's struct=3 */
+ "[5] TYPEDEF 'int_alias' type_id=1", /* refers to base int=1 */
+
+ /* split2 types (IDs 6-8): remapped from 3,4,5 to 6,7,8 */
+ "[6] TYPEDEF 'int_ptr' type_id=2", /* base ptr=2, unchanged */
+ "[7] STRUCT 's2' size=8 vlen=1\n"
+ "\t'p' type_id=6 bits_offset=0", /* split2 typedef: 3->6 */
+ "[8] TYPEDEF 'int_alias' type_id=1"); /* dup of [5] */
+
+ /* Dedup to mirror the bpftool merge flow; should remove the
+ * duplicate "int_alias" typedef.
+ */
+ err = btf__dedup(combined, NULL);
+ if (!ASSERT_OK(err, "dedup"))
+ goto cleanup;
+
+ /* After dedup: one int_alias removed, so 7 types + void */
+ ASSERT_EQ(btf__type_cnt(combined), 8, "dedup_type_cnt");
+
+cleanup:
+ btf__free(combined);
+ btf__free(split2);
+ btf__free(split1);
+ btf__free(base);
+}
+
void test_btf_write()
{
if (test__start_subtest("btf_add"))
test_btf_add();
if (test__start_subtest("btf_add_btf"))
test_btf_add_btf();
+ if (test__start_subtest("btf_add_btf_split"))
+ test_btf_add_btf_split();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c b/tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c
index a5afd16705f0..b7c18d590b99 100644
--- a/tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c
@@ -107,10 +107,10 @@ static void test_shmem(struct bpf_link *link, struct memcg_query *memcg_query)
/*
* Increase memcg shmem usage by creating and writing
- * to a shmem object.
+ * to a memfd backed by shmem/tmpfs.
*/
- fd = shm_open("/tmp_shmem", O_CREAT | O_RDWR, 0644);
- if (!ASSERT_OK_FD(fd, "shm_open"))
+ fd = memfd_create("tmp_shmem", 0);
+ if (!ASSERT_OK_FD(fd, "memfd_create"))
return;
if (!ASSERT_OK(fallocate(fd, 0, 0, len), "fallocate"))
@@ -123,33 +123,6 @@ static void test_shmem(struct bpf_link *link, struct memcg_query *memcg_query)
cleanup:
close(fd);
- shm_unlink("/tmp_shmem");
-}
-
-#define NR_PIPES 64
-static void test_kmem(struct bpf_link *link, struct memcg_query *memcg_query)
-{
- int fds[NR_PIPES][2], i;
-
- /*
- * Increase kmem value by creating pipes which will allocate some
- * kernel buffers.
- */
- for (i = 0; i < NR_PIPES; i++) {
- if (!ASSERT_OK(pipe(fds[i]), "pipe"))
- goto cleanup;
- }
-
- if (!ASSERT_OK(read_stats(link), "read stats"))
- goto cleanup;
-
- ASSERT_GT(memcg_query->memcg_kmem, 0, "kmem value");
-
-cleanup:
- for (i = i - 1; i >= 0; i--) {
- close(fds[i][0]);
- close(fds[i][1]);
- }
}
static void test_pgfault(struct bpf_link *link, struct memcg_query *memcg_query)
@@ -209,8 +182,6 @@ void test_cgroup_iter_memcg(void)
test_shmem(link, &skel->data_query->memcg_query);
if (test__start_subtest("cgroup_iter_memcg__file"))
test_file(link, &skel->data_query->memcg_query);
- if (test__start_subtest("cgroup_iter_memcg__kmem"))
- test_kmem(link, &skel->data_query->memcg_query);
if (test__start_subtest("cgroup_iter_memcg__pgfault"))
test_pgfault(link, &skel->data_query->memcg_query);
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_storage.c b/tools/testing/selftests/bpf/prog_tests/cgroup_storage.c
index cf395715ced4..8dab655db342 100644
--- a/tools/testing/selftests/bpf/prog_tests/cgroup_storage.c
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_storage.c
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
+#include <unistd.h>
+#include <sys/socket.h>
#include <test_progs.h>
#include "cgroup_helpers.h"
#include "network_helpers.h"
@@ -86,6 +88,11 @@ void test_cgroup_storage(void)
err = SYS_NOFAIL(PING_CMD);
ASSERT_OK(err, "sixth ping");
+ err = bpf_map__get_next_key(skel->maps.cgroup_storage, &key, &key,
+ sizeof(key));
+ ASSERT_ERR(err, "bpf_map__get_next_key should fail");
+ ASSERT_EQ(errno, ENOENT, "no second key");
+
cleanup_progs:
cgroup_storage__destroy(skel);
cleanup_network:
@@ -94,3 +101,43 @@ cleanup_cgroup:
close(cgroup_fd);
cleanup_cgroup_environment();
}
+
+void test_cgroup_storage_oob(void)
+{
+ struct cgroup_storage *skel;
+ int cgroup_fd, sock_fd;
+
+ cgroup_fd = cgroup_setup_and_join(TEST_CGROUP);
+ if (!ASSERT_OK_FD(cgroup_fd, "create cgroup"))
+ return;
+
+ /* Load and attach BPF program */
+ skel = cgroup_storage__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "cgroup_storage__open_and_load"))
+ goto cleanup_cgroup;
+
+ skel->links.trigger_oob = bpf_program__attach_cgroup(skel->progs.trigger_oob,
+ cgroup_fd);
+ if (!ASSERT_OK_PTR(skel->links.trigger_oob, "attach_cgroup"))
+ goto cleanup_skel;
+
+ /* Create a socket to trigger cgroup/sock_create hook.
+ * This will execute our BPF program and trigger the OOB read
+ * if the bug is present (before the fix).
+ */
+ sock_fd = socket(AF_INET, SOCK_DGRAM, 0);
+ if (!ASSERT_OK_FD(sock_fd, "create socket"))
+ goto cleanup_skel;
+
+ close(sock_fd);
+
+ /* If we reach here without a kernel panic or KASAN report,
+ * the test passes (the fix is working).
+ */
+
+cleanup_skel:
+ cgroup_storage__destroy(skel);
+cleanup_cgroup:
+ close(cgroup_fd);
+ cleanup_cgroup_environment();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/clone_attach_btf_id.c b/tools/testing/selftests/bpf/prog_tests/clone_attach_btf_id.c
new file mode 100644
index 000000000000..1c3e28e74606
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/clone_attach_btf_id.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta */
+#include <test_progs.h>
+#include "clone_attach_btf_id.skel.h"
+
+/*
+ * Test that bpf_program__clone() respects caller-provided attach_btf_id
+ * override via bpf_prog_load_opts.
+ *
+ * The BPF program has SEC("fentry/bpf_fentry_test1"). Clone it twice
+ * from the same prepared object: first with no opts (callback resolves
+ * attach_btf_id from sec_name), then with attach_btf_id overridden to
+ * bpf_fentry_test2. Verify each loaded program's attach_btf_id via
+ * bpf_prog_get_info_by_fd().
+ */
+
+static int get_prog_attach_btf_id(int prog_fd)
+{
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ int err;
+
+ err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
+ if (err)
+ return err;
+ return info.attach_btf_id;
+}
+
+void test_clone_attach_btf_id(void)
+{
+ struct clone_attach_btf_id *skel;
+ int fd1 = -1, fd2 = -1, err;
+ int btf_id_test1, btf_id_test2;
+
+ btf_id_test1 = libbpf_find_vmlinux_btf_id("bpf_fentry_test1", BPF_TRACE_FENTRY);
+ if (!ASSERT_GT(btf_id_test1, 0, "find_btf_id_test1"))
+ return;
+
+ btf_id_test2 = libbpf_find_vmlinux_btf_id("bpf_fentry_test2", BPF_TRACE_FENTRY);
+ if (!ASSERT_GT(btf_id_test2, 0, "find_btf_id_test2"))
+ return;
+
+ skel = clone_attach_btf_id__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ err = bpf_object__prepare(skel->obj);
+ if (!ASSERT_OK(err, "obj_prepare"))
+ goto out;
+
+ /* Clone with no opts — callback resolves BTF from sec_name */
+ fd1 = bpf_program__clone(skel->progs.fentry_handler, NULL);
+ if (!ASSERT_GE(fd1, 0, "clone_default"))
+ goto out;
+ ASSERT_EQ(get_prog_attach_btf_id(fd1), btf_id_test1,
+ "attach_btf_id_default");
+
+ /*
+ * Clone with attach_btf_id override pointing to a different
+ * function. The BPF program never accesses arguments, so the
+ * load succeeds regardless of signature mismatch.
+ */
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
+ .attach_btf_id = btf_id_test2,
+ );
+ fd2 = bpf_program__clone(skel->progs.fentry_handler, &opts);
+ if (!ASSERT_GE(fd2, 0, "clone_override"))
+ goto out;
+ ASSERT_EQ(get_prog_attach_btf_id(fd2), btf_id_test2,
+ "attach_btf_id_override");
+
+out:
+ if (fd1 >= 0)
+ close(fd1);
+ if (fd2 >= 0)
+ close(fd2);
+ clone_attach_btf_id__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/connect_force_port.c b/tools/testing/selftests/bpf/prog_tests/connect_force_port.c
index 24d553109f8d..dfb7f6cf3ee1 100644
--- a/tools/testing/selftests/bpf/prog_tests/connect_force_port.c
+++ b/tools/testing/selftests/bpf/prog_tests/connect_force_port.c
@@ -53,6 +53,9 @@ static int run_test(int cgroup_fd, int server_fd, int family, int type)
__u16 expected_peer_port = 60000;
struct bpf_program *prog;
struct bpf_object *obj;
+ struct bpf_map *map;
+ __u16 *port_ptr;
+ size_t port_size;
const char *obj_file = v4 ? "connect_force_port4.bpf.o" : "connect_force_port6.bpf.o";
int fd, err;
__u32 duration = 0;
@@ -61,6 +64,21 @@ static int run_test(int cgroup_fd, int server_fd, int family, int type)
if (!ASSERT_OK_PTR(obj, "bpf_obj_open"))
return -1;
+ map = bpf_object__find_map_by_name(obj, ".bss");
+ if (!ASSERT_OK_PTR(map, "find bss map")) {
+ err = -EIO;
+ goto close_bpf_object;
+ }
+
+ port_ptr = bpf_map__initial_value(map, &port_size);
+ if (!ASSERT_OK_PTR(port_ptr, "get bss initial value")) {
+ err = -EIO;
+ goto close_bpf_object;
+ }
+
+ /* Auto assigns the port according to availability */
+ *port_ptr = ntohs(get_socket_local_port(server_fd));
+
err = bpf_object__load(obj);
if (!ASSERT_OK(err, "bpf_obj_load")) {
err = -EIO;
@@ -138,25 +156,25 @@ void test_connect_force_port(void)
if (CHECK_FAIL(cgroup_fd < 0))
return;
- server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 60123, 0);
+ server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 0, 0);
if (CHECK_FAIL(server_fd < 0))
goto close_cgroup_fd;
CHECK_FAIL(run_test(cgroup_fd, server_fd, AF_INET, SOCK_STREAM));
close(server_fd);
- server_fd = start_server(AF_INET6, SOCK_STREAM, NULL, 60124, 0);
+ server_fd = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0);
if (CHECK_FAIL(server_fd < 0))
goto close_cgroup_fd;
CHECK_FAIL(run_test(cgroup_fd, server_fd, AF_INET6, SOCK_STREAM));
close(server_fd);
- server_fd = start_server(AF_INET, SOCK_DGRAM, NULL, 60123, 0);
+ server_fd = start_server(AF_INET, SOCK_DGRAM, NULL, 0, 0);
if (CHECK_FAIL(server_fd < 0))
goto close_cgroup_fd;
CHECK_FAIL(run_test(cgroup_fd, server_fd, AF_INET, SOCK_DGRAM));
close(server_fd);
- server_fd = start_server(AF_INET6, SOCK_DGRAM, NULL, 60124, 0);
+ server_fd = start_server(AF_INET6, SOCK_DGRAM, NULL, 0, 0);
if (CHECK_FAIL(server_fd < 0))
goto close_cgroup_fd;
CHECK_FAIL(run_test(cgroup_fd, server_fd, AF_INET6, SOCK_DGRAM));
diff --git a/tools/testing/selftests/bpf/prog_tests/empty_skb.c b/tools/testing/selftests/bpf/prog_tests/empty_skb.c
index 438583e1f2d1..c9fcb70cbafb 100644
--- a/tools/testing/selftests/bpf/prog_tests/empty_skb.c
+++ b/tools/testing/selftests/bpf/prog_tests/empty_skb.c
@@ -10,8 +10,8 @@ void test_empty_skb(void)
struct empty_skb *bpf_obj = NULL;
struct nstoken *tok = NULL;
struct bpf_program *prog;
+ struct ethhdr eth_hlen;
char eth_hlen_pp[15];
- char eth_hlen[14];
int veth_ifindex;
int ipip_ifindex;
int err;
@@ -25,7 +25,9 @@ void test_empty_skb(void)
int err;
int ret;
int lwt_egress_ret; /* expected retval at lwt/egress */
+ __be16 h_proto;
bool success_on_tc;
+ bool adjust_room;
} tests[] = {
/* Empty packets are always rejected. */
@@ -46,6 +48,28 @@ void test_empty_skb(void)
.err = -EINVAL,
},
+ /* ETH_HLEN-sized packets with IPv4/IPv6 EtherType but
+ * no L3 header are rejected.
+ */
+ {
+ .msg = "veth short IPv4 ingress packet",
+ .data_in = &eth_hlen,
+ .data_size_in = sizeof(eth_hlen),
+ .ifindex = &veth_ifindex,
+ .err = -EINVAL,
+ .h_proto = htons(ETH_P_IP),
+ .adjust_room = true,
+ },
+ {
+ .msg = "veth short IPv6 ingress packet",
+ .data_in = &eth_hlen,
+ .data_size_in = sizeof(eth_hlen),
+ .ifindex = &veth_ifindex,
+ .err = -EINVAL,
+ .h_proto = htons(ETH_P_IPV6),
+ .adjust_room = true,
+ },
+
/* ETH_HLEN-sized packets:
* - can not be redirected at LWT_XMIT
* - can be redirected at TC to non-tunneling dest
@@ -54,7 +78,7 @@ void test_empty_skb(void)
{
/* __bpf_redirect_common */
.msg = "veth ETH_HLEN packet ingress",
- .data_in = eth_hlen,
+ .data_in = &eth_hlen,
.data_size_in = sizeof(eth_hlen),
.ifindex = &veth_ifindex,
.ret = -ERANGE,
@@ -68,7 +92,7 @@ void test_empty_skb(void)
* tc: skb->len=14 <= skb_network_offset=14
*/
.msg = "ipip ETH_HLEN packet ingress",
- .data_in = eth_hlen,
+ .data_in = &eth_hlen,
.data_size_in = sizeof(eth_hlen),
.ifindex = &ipip_ifindex,
.ret = -ERANGE,
@@ -108,17 +132,27 @@ void test_empty_skb(void)
SYS(out, "ip addr add 192.168.1.1/16 dev ipip0");
ipip_ifindex = if_nametoindex("ipip0");
+ memset(eth_hlen_pp, 0, sizeof(eth_hlen_pp));
+ memset(&eth_hlen, 0, sizeof(eth_hlen));
+
bpf_obj = empty_skb__open_and_load();
if (!ASSERT_OK_PTR(bpf_obj, "open skeleton"))
goto out;
for (i = 0; i < ARRAY_SIZE(tests); i++) {
+ if (tests[i].data_in == &eth_hlen)
+ eth_hlen.h_proto = tests[i].h_proto;
+
bpf_object__for_each_program(prog, bpf_obj->obj) {
bool at_egress = strstr(bpf_program__name(prog), "egress") != NULL;
bool at_tc = !strncmp(bpf_program__section_name(prog), "tc", 2);
+ bool is_adjust_room = !strcmp(bpf_program__name(prog), "tc_adjust_room");
int expected_ret;
char buf[128];
+ if (tests[i].adjust_room != is_adjust_room)
+ continue;
+
expected_ret = at_egress && !at_tc ? tests[i].lwt_egress_ret : tests[i].ret;
tattr.data_in = tests[i].data_in;
diff --git a/tools/testing/selftests/bpf/prog_tests/exceptions.c b/tools/testing/selftests/bpf/prog_tests/exceptions.c
index 516f4a13013c..e8cbaf2a3e82 100644
--- a/tools/testing/selftests/bpf/prog_tests/exceptions.c
+++ b/tools/testing/selftests/bpf/prog_tests/exceptions.c
@@ -83,6 +83,7 @@ static void test_exceptions_success(void)
RUN_SUCCESS(exception_assert_range_with, 1);
RUN_SUCCESS(exception_bad_assert_range, 0);
RUN_SUCCESS(exception_bad_assert_range_with, 10);
+ RUN_SUCCESS(exception_throw_from_void_global, 11);
#define RUN_EXT(load_ret, attach_err, expr, msg, after_link) \
{ \
@@ -127,7 +128,7 @@ static void test_exceptions_success(void)
bpf_program__fd(skel->progs.exception_ext_mod_cb_runtime),
"exception_cb_mod"), "set_attach_target"))
goto done;
- }), "FENTRY/FEXIT programs cannot attach to exception callback", 0);
+ }), "Tracing programs cannot attach to exception callback", 0);
if (test__start_subtest("throwing fentry -> exception_cb"))
RUN_EXT(-EINVAL, true, ({
@@ -137,7 +138,7 @@ static void test_exceptions_success(void)
bpf_program__fd(skel->progs.exception_ext_mod_cb_runtime),
"exception_cb_mod"), "set_attach_target"))
goto done;
- }), "FENTRY/FEXIT programs cannot attach to exception callback", 0);
+ }), "Tracing programs cannot attach to exception callback", 0);
if (test__start_subtest("non-throwing fexit -> exception_cb"))
RUN_EXT(-EINVAL, true, ({
@@ -147,7 +148,7 @@ static void test_exceptions_success(void)
bpf_program__fd(skel->progs.exception_ext_mod_cb_runtime),
"exception_cb_mod"), "set_attach_target"))
goto done;
- }), "FENTRY/FEXIT programs cannot attach to exception callback", 0);
+ }), "Tracing programs cannot attach to exception callback", 0);
if (test__start_subtest("throwing fexit -> exception_cb"))
RUN_EXT(-EINVAL, true, ({
@@ -157,7 +158,7 @@ static void test_exceptions_success(void)
bpf_program__fd(skel->progs.exception_ext_mod_cb_runtime),
"exception_cb_mod"), "set_attach_target"))
goto done;
- }), "FENTRY/FEXIT programs cannot attach to exception callback", 0);
+ }), "Tracing programs cannot attach to exception callback", 0);
if (test__start_subtest("throwing extension (with custom cb) -> exception_cb"))
RUN_EXT(-EINVAL, true, ({
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
index f29fc789c14b..92c20803ea76 100644
--- a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
+++ b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
@@ -111,7 +111,7 @@ static void test_fexit_bpf2bpf_common(const char *obj_file,
struct bpf_link_info link_info;
struct bpf_program *pos;
const char *pos_sec_name;
- char *tgt_name;
+ const char *tgt_name;
__s32 btf_id;
tgt_name = strstr(prog_name[i], "/");
@@ -347,6 +347,17 @@ static void test_func_sockmap_update(void)
prog_name, false, NULL);
}
+static void test_func_replace_void(void)
+{
+ const char *prog_name[] = {
+ "freplace/foo",
+ };
+ test_fexit_bpf2bpf_common("./freplace_void.bpf.o",
+ "./test_global_func7.bpf.o",
+ ARRAY_SIZE(prog_name),
+ prog_name, false, NULL);
+}
+
static void test_obj_load_failure_common(const char *obj_file,
const char *target_obj_file,
const char *exp_msg)
@@ -432,6 +443,15 @@ static void test_func_replace_global_func(void)
prog_name, false, NULL);
}
+static void test_func_replace_int_with_void(void)
+{
+ /* Make sure we can't freplace with the wrong type */
+ test_obj_load_failure_common("freplace_int_with_void.bpf.o",
+ "./test_global_func2.bpf.o",
+ "Return type UNKNOWN of test_freplace_int_with_void()"
+ " doesn't match type INT of global_func2()");
+}
+
static int find_prog_btf_id(const char *name, __u32 attach_prog_fd)
{
struct bpf_prog_info info = {};
@@ -597,4 +617,8 @@ void serial_test_fexit_bpf2bpf(void)
test_fentry_to_cgroup_bpf();
if (test__start_subtest("func_replace_progmap"))
test_func_replace_progmap();
+ if (test__start_subtest("freplace_int_with_void"))
+ test_func_replace_int_with_void();
+ if (test__start_subtest("freplace_void"))
+ test_func_replace_void();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/get_func_args_test.c b/tools/testing/selftests/bpf/prog_tests/get_func_args_test.c
index 96b27de05524..7bf8adc41e99 100644
--- a/tools/testing/selftests/bpf/prog_tests/get_func_args_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/get_func_args_test.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "get_func_args_test.skel.h"
+#include "get_func_args_fsession_test.skel.h"
void test_get_func_args_test(void)
{
@@ -41,8 +42,30 @@ void test_get_func_args_test(void)
ASSERT_EQ(skel->bss->test4_result, 1, "test4_result");
ASSERT_EQ(skel->bss->test5_result, 1, "test5_result");
ASSERT_EQ(skel->bss->test6_result, 1, "test6_result");
- ASSERT_EQ(skel->bss->test7_result, 1, "test7_result");
cleanup:
get_func_args_test__destroy(skel);
}
+
+void test_get_func_args_fsession_test(void)
+{
+ struct get_func_args_fsession_test *skel = NULL;
+ int err;
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+
+ skel = get_func_args_fsession_test__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "get_func_args_fsession_test__open_and_load"))
+ return;
+
+ err = get_func_args_fsession_test__attach(skel);
+ if (!ASSERT_OK(err, "get_func_args_fsession_test__attach"))
+ goto cleanup;
+
+ err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test1), &topts);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(topts.retval, 0, "test_run");
+
+ ASSERT_EQ(skel->bss->test1_result, 1, "test1_result");
+cleanup:
+ get_func_args_fsession_test__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c b/tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c
index 7772a0f288d3..357fdedfea93 100644
--- a/tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c
@@ -2,6 +2,7 @@
#include <test_progs.h>
#include "get_func_ip_test.skel.h"
#include "get_func_ip_uprobe_test.skel.h"
+#include "get_func_ip_fsession_test.skel.h"
static noinline void uprobe_trigger(void)
{
@@ -46,8 +47,6 @@ static void test_function_entry(void)
ASSERT_EQ(skel->bss->test5_result, 1, "test5_result");
ASSERT_EQ(skel->bss->test7_result, 1, "test7_result");
ASSERT_EQ(skel->bss->test8_result, 1, "test8_result");
- ASSERT_EQ(skel->bss->test9_entry_result, 1, "test9_entry_result");
- ASSERT_EQ(skel->bss->test9_exit_result, 1, "test9_exit_result");
cleanup:
get_func_ip_test__destroy(skel);
@@ -139,3 +138,28 @@ void test_get_func_ip_test(void)
test_function_entry();
test_function_body();
}
+
+void test_get_func_ip_fsession_test(void)
+{
+ struct get_func_ip_fsession_test *skel = NULL;
+ int err;
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+
+ skel = get_func_ip_fsession_test__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "get_func_ip_fsession_test__open_and_load"))
+ return;
+
+ err = get_func_ip_fsession_test__attach(skel);
+ if (!ASSERT_OK(err, "get_func_ip_fsession_test__attach"))
+ goto cleanup;
+
+ err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test1), &topts);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(topts.retval, 0, "test_run");
+
+ ASSERT_EQ(skel->bss->test1_entry_result, 1, "test1_entry_result");
+ ASSERT_EQ(skel->bss->test1_exit_result, 1, "test1_exit_result");
+
+cleanup:
+ get_func_ip_fsession_test__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/htab_reuse.c b/tools/testing/selftests/bpf/prog_tests/htab_reuse.c
index a742dd994d60..d7c3df165adc 100644
--- a/tools/testing/selftests/bpf/prog_tests/htab_reuse.c
+++ b/tools/testing/selftests/bpf/prog_tests/htab_reuse.c
@@ -59,7 +59,7 @@ static void *htab_update_fn(void *arg)
return NULL;
}
-void test_htab_reuse(void)
+static void test_htab_reuse_basic(void)
{
unsigned int i, wr_nr = 1, rd_nr = 4;
pthread_t tids[wr_nr + rd_nr];
@@ -99,3 +99,170 @@ reap:
}
htab_reuse__destroy(skel);
}
+
+/*
+ * Writes consistency test for BPF_F_LOCK update
+ *
+ * The race:
+ * 1. Thread A: BPF_F_LOCK|BPF_EXIST update
+ * 2. Thread B: delete element then update it with BPF_ANY
+ */
+
+struct htab_val_large {
+ struct bpf_spin_lock lock;
+ __u32 seq;
+ __u64 data[256];
+};
+
+struct consistency_ctx {
+ int fd;
+ int start_fd;
+ int loop;
+ volatile bool torn_write;
+};
+
+static void wait_for_start(int fd)
+{
+ char buf;
+
+ read(fd, &buf, 1);
+}
+
+static void *locked_update_fn(void *arg)
+{
+ struct consistency_ctx *ctx = arg;
+ struct htab_val_large value;
+ unsigned int key = 1;
+ int i;
+
+ memset(&value, 0xAA, sizeof(value));
+ wait_for_start(ctx->start_fd);
+
+ for (i = 0; i < ctx->loop; i++) {
+ value.seq = i;
+ bpf_map_update_elem(ctx->fd, &key, &value,
+ BPF_F_LOCK | BPF_EXIST);
+ }
+
+ return NULL;
+}
+
+/* Delete + update: removes the element then re-creates it with BPF_ANY. */
+static void *delete_update_fn(void *arg)
+{
+ struct consistency_ctx *ctx = arg;
+ struct htab_val_large value;
+ unsigned int key = 1;
+ int i;
+
+ memset(&value, 0xBB, sizeof(value));
+
+ wait_for_start(ctx->start_fd);
+
+ for (i = 0; i < ctx->loop; i++) {
+ value.seq = i;
+ bpf_map_delete_elem(ctx->fd, &key);
+ bpf_map_update_elem(ctx->fd, &key, &value, BPF_ANY | BPF_F_LOCK);
+ }
+
+ return NULL;
+}
+
+static void *locked_lookup_fn(void *arg)
+{
+ struct consistency_ctx *ctx = arg;
+ struct htab_val_large value;
+ unsigned int key = 1;
+ int i, j;
+
+ wait_for_start(ctx->start_fd);
+
+ for (i = 0; i < ctx->loop && !ctx->torn_write; i++) {
+ if (bpf_map_lookup_elem_flags(ctx->fd, &key, &value, BPF_F_LOCK))
+ continue;
+
+ for (j = 0; j < 256; j++) {
+ if (value.data[j] != value.data[0]) {
+ ctx->torn_write = true;
+ return NULL;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+static void test_htab_reuse_consistency(void)
+{
+ int threads_total = 6, threads = 2;
+ pthread_t tids[threads_total];
+ struct consistency_ctx ctx;
+ struct htab_val_large seed;
+ struct htab_reuse *skel;
+ unsigned int key = 1, i;
+ int pipefd[2];
+ int err;
+
+ skel = htab_reuse__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "htab_reuse__open_and_load"))
+ return;
+
+ if (!ASSERT_OK(pipe(pipefd), "pipe"))
+ goto out;
+
+ ctx.fd = bpf_map__fd(skel->maps.htab_lock_consistency);
+ ctx.start_fd = pipefd[0];
+ ctx.loop = 100000;
+ ctx.torn_write = false;
+
+ /* Seed the element so locked updaters have something to find */
+ memset(&seed, 0xBB, sizeof(seed));
+ err = bpf_map_update_elem(ctx.fd, &key, &seed, BPF_ANY);
+ if (!ASSERT_OK(err, "seed_element"))
+ goto close_pipe;
+
+ memset(tids, 0, sizeof(tids));
+ for (i = 0; i < threads; i++) {
+ err = pthread_create(&tids[i], NULL, locked_update_fn, &ctx);
+ if (!ASSERT_OK(err, "pthread_create"))
+ goto stop;
+ }
+ for (i = 0; i < threads; i++) {
+ err = pthread_create(&tids[threads + i], NULL, delete_update_fn, &ctx);
+ if (!ASSERT_OK(err, "pthread_create"))
+ goto stop;
+ }
+ for (i = 0; i < threads; i++) {
+ err = pthread_create(&tids[threads * 2 + i], NULL, locked_lookup_fn, &ctx);
+ if (!ASSERT_OK(err, "pthread_create"))
+ goto stop;
+ }
+
+ /* Release all threads simultaneously */
+ close(pipefd[1]);
+ pipefd[1] = -1;
+
+stop:
+ for (i = 0; i < threads_total; i++) {
+ if (!tids[i])
+ continue;
+ pthread_join(tids[i], NULL);
+ }
+
+ ASSERT_FALSE(ctx.torn_write, "no torn writes detected");
+
+close_pipe:
+ if (pipefd[1] >= 0)
+ close(pipefd[1]);
+ close(pipefd[0]);
+out:
+ htab_reuse__destroy(skel);
+}
+
+void test_htab_reuse(void)
+{
+ if (test__start_subtest("basic"))
+ test_htab_reuse_basic();
+ if (test__start_subtest("consistency"))
+ test_htab_reuse_consistency();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/iter_buf_null_fail.c b/tools/testing/selftests/bpf/prog_tests/iter_buf_null_fail.c
new file mode 100644
index 000000000000..ea97787b870d
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/iter_buf_null_fail.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+#include "iter_buf_null_fail.skel.h"
+
+void test_iter_buf_null_fail(void)
+{
+ RUN_TESTS(iter_buf_null_fail);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c
index f79c8e53cb3e..62f3fb79f5d1 100644
--- a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c
+++ b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c
@@ -74,6 +74,8 @@ static struct kfunc_test_params kfunc_tests[] = {
TC_TEST(kfunc_call_test1, 12),
TC_TEST(kfunc_call_test2, 3),
TC_TEST(kfunc_call_test4, -1234),
+ TC_TEST(kfunc_call_test5, 0),
+ TC_TEST(kfunc_call_test5_asm, 0),
TC_TEST(kfunc_call_test_ref_btf_id, 0),
TC_TEST(kfunc_call_test_get_mem, 42),
SYSCALL_TEST(kfunc_syscall_test, 0),
diff --git a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
index f81dcd609ee9..2e0ddef77ba5 100644
--- a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
@@ -10,6 +10,7 @@
#include "kprobe_multi_session_cookie.skel.h"
#include "kprobe_multi_verifier.skel.h"
#include "kprobe_write_ctx.skel.h"
+#include "kprobe_multi_sleepable.skel.h"
#include "bpf/libbpf_internal.h"
#include "bpf/hashmap.h"
@@ -220,7 +221,9 @@ static void test_attach_api_syms(void)
static void test_attach_api_fails(void)
{
LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
struct kprobe_multi *skel = NULL;
+ struct kprobe_multi_sleepable *sl_skel = NULL;
struct bpf_link *link = NULL;
unsigned long long addrs[2];
const char *syms[2] = {
@@ -228,7 +231,7 @@ static void test_attach_api_fails(void)
"bpf_fentry_test2",
};
__u64 cookies[2];
- int saved_error;
+ int saved_error, err;
addrs[0] = ksym_get_addr("bpf_fentry_test1");
addrs[1] = ksym_get_addr("bpf_fentry_test2");
@@ -327,9 +330,63 @@ static void test_attach_api_fails(void)
if (!ASSERT_EQ(saved_error, -E2BIG, "fail_6_error"))
goto cleanup;
+ /* fail_7 - non-existent wildcard pattern (slow path) */
+ LIBBPF_OPTS_RESET(opts);
+
+ link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual,
+ "__nonexistent_func_xyz_*",
+ &opts);
+ saved_error = -errno;
+ if (!ASSERT_ERR_PTR(link, "fail_7"))
+ goto cleanup;
+
+ if (!ASSERT_EQ(saved_error, -ENOENT, "fail_7_error"))
+ goto cleanup;
+
+ /* fail_8 - non-existent exact name (fast path), same error as wildcard */
+ link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual,
+ "__nonexistent_func_xyz_123",
+ &opts);
+ saved_error = -errno;
+ if (!ASSERT_ERR_PTR(link, "fail_8"))
+ goto cleanup;
+
+ if (!ASSERT_EQ(saved_error, -ENOENT, "fail_8_error"))
+ goto cleanup;
+
+ /* fail_9 - sleepable kprobe multi should not attach */
+ sl_skel = kprobe_multi_sleepable__open();
+ if (!ASSERT_OK_PTR(sl_skel, "sleep_skel_open"))
+ goto cleanup;
+
+ sl_skel->bss->user_ptr = sl_skel;
+
+ err = bpf_program__set_flags(sl_skel->progs.handle_kprobe_multi_sleepable,
+ BPF_F_SLEEPABLE);
+ if (!ASSERT_OK(err, "sleep_skel_set_flags"))
+ goto cleanup;
+
+ err = kprobe_multi_sleepable__load(sl_skel);
+ if (!ASSERT_OK(err, "sleep_skel_load"))
+ goto cleanup;
+
+ link = bpf_program__attach_kprobe_multi_opts(sl_skel->progs.handle_kprobe_multi_sleepable,
+ "bpf_fentry_test1", NULL);
+ saved_error = -errno;
+
+ if (!ASSERT_ERR_PTR(link, "fail_9"))
+ goto cleanup;
+
+ if (!ASSERT_EQ(saved_error, -EINVAL, "fail_9_error"))
+ goto cleanup;
+
+ err = bpf_prog_test_run_opts(bpf_program__fd(sl_skel->progs.fentry), &topts);
+ ASSERT_OK(err, "bpf_prog_test_run_opts");
+
cleanup:
bpf_link__destroy(link);
kprobe_multi__destroy(skel);
+ kprobe_multi_sleepable__destroy(sl_skel);
}
static void test_session_skel_api(void)
@@ -355,8 +412,13 @@ static void test_session_skel_api(void)
ASSERT_OK(err, "test_run");
ASSERT_EQ(topts.retval, 0, "test_run");
- /* bpf_fentry_test1-4 trigger return probe, result is 2 */
- for (i = 0; i < 4; i++)
+ /*
+ * bpf_fentry_test1 is hit by both the wildcard probe and the exact
+ * name probe (test_kprobe_syms), so entry + return fires twice: 4.
+ * bpf_fentry_test2-4 are hit only by the wildcard probe: 2.
+ */
+ ASSERT_EQ(skel->bss->kprobe_session_result[0], 4, "kprobe_session_result");
+ for (i = 1; i < 4; i++)
ASSERT_EQ(skel->bss->kprobe_session_result[i], 2, "kprobe_session_result");
/* bpf_fentry_test5-8 trigger only entry probe, result is 1 */
@@ -604,6 +666,44 @@ static void test_attach_write_ctx(void)
}
#endif
+/*
+ * Test kprobe_multi handles shadow symbols (vmlinux + module duplicate).
+ * bpf_fentry_shadow_test exists in both vmlinux and bpf_testmod.
+ * kprobe_multi resolves via ftrace_lookup_symbols() which finds the
+ * vmlinux symbol first and stops, so this should always succeed.
+ */
+static void test_attach_probe_dup_sym(void)
+{
+ LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
+ const char *syms[1] = { "bpf_fentry_shadow_test" };
+ struct kprobe_multi *skel = NULL;
+ struct bpf_link *link1 = NULL, *link2 = NULL;
+
+ skel = kprobe_multi__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "kprobe_multi__open_and_load"))
+ goto cleanup;
+
+ skel->bss->pid = getpid();
+ opts.syms = syms;
+ opts.cnt = ARRAY_SIZE(syms);
+
+ link1 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual,
+ NULL, &opts);
+ if (!ASSERT_OK_PTR(link1, "attach_kprobe_multi_dup_sym"))
+ goto cleanup;
+
+ opts.retprobe = true;
+ link2 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kretprobe_manual,
+ NULL, &opts);
+ if (!ASSERT_OK_PTR(link2, "attach_kretprobe_multi_dup_sym"))
+ goto cleanup;
+
+cleanup:
+ bpf_link__destroy(link2);
+ bpf_link__destroy(link1);
+ kprobe_multi__destroy(skel);
+}
+
void serial_test_kprobe_multi_bench_attach(void)
{
if (test__start_subtest("kernel"))
@@ -647,5 +747,7 @@ void test_kprobe_multi_test(void)
test_unique_match();
if (test__start_subtest("attach_write_ctx"))
test_attach_write_ctx();
+ if (test__start_subtest("dup_sym"))
+ test_attach_probe_dup_sym();
RUN_TESTS(kprobe_multi_verifier);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/linked_list.c b/tools/testing/selftests/bpf/prog_tests/linked_list.c
index 14c5a7ef0e87..6f25b5f39a79 100644
--- a/tools/testing/selftests/bpf/prog_tests/linked_list.c
+++ b/tools/testing/selftests/bpf/prog_tests/linked_list.c
@@ -87,12 +87,12 @@ static struct {
{ "incorrect_value_type",
"operation on bpf_list_head expects arg#1 bpf_list_node at offset=48 in struct foo, "
"but arg is at offset=0 in struct bar" },
- { "incorrect_node_var_off", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" },
+ { "incorrect_node_var_off", "variable ptr_ access var_off=(0x0; 0x1ffffffff) disallowed" },
{ "incorrect_node_off1", "bpf_list_node not found at offset=49" },
{ "incorrect_node_off2", "arg#1 offset=0, but expected bpf_list_node at offset=48 in struct foo" },
{ "no_head_type", "bpf_list_head not found at offset=0" },
{ "incorrect_head_var_off1", "R1 doesn't have constant offset" },
- { "incorrect_head_var_off2", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" },
+ { "incorrect_head_var_off2", "variable ptr_ access var_off=(0x0; 0x1ffffffff) disallowed" },
{ "incorrect_head_off1", "bpf_list_head not found at offset=25" },
{ "incorrect_head_off2", "bpf_list_head not found at offset=1" },
{ "pop_front_off", "off 48 doesn't point to 'struct bpf_spin_lock' that is at 40" },
diff --git a/tools/testing/selftests/bpf/prog_tests/livepatch_trampoline.c b/tools/testing/selftests/bpf/prog_tests/livepatch_trampoline.c
index 72aa5376c30e..0a12af924a99 100644
--- a/tools/testing/selftests/bpf/prog_tests/livepatch_trampoline.c
+++ b/tools/testing/selftests/bpf/prog_tests/livepatch_trampoline.c
@@ -5,6 +5,8 @@
#include "testing_helpers.h"
#include "livepatch_trampoline.skel.h"
+#define LIVEPATCH_ENABLED_PATH "/sys/kernel/livepatch/livepatch_sample/enabled"
+
static int load_livepatch(void)
{
char path[4096];
@@ -19,7 +21,8 @@ static int load_livepatch(void)
static void unload_livepatch(void)
{
/* Disable the livepatch before unloading the module */
- system("echo 0 > /sys/kernel/livepatch/livepatch_sample/enabled");
+ if (!access(LIVEPATCH_ENABLED_PATH, F_OK))
+ system("echo 0 > " LIVEPATCH_ENABLED_PATH);
unload_module("livepatch_sample", env_verbosity > VERBOSE_NONE);
}
@@ -81,9 +84,22 @@ out:
void test_livepatch_trampoline(void)
{
int retry_cnt = 0;
+ int err;
+
+ /* Skip if kernel was built without CONFIG_LIVEPATCH */
+ if (access("/sys/kernel/livepatch", F_OK)) {
+ test__skip();
+ return;
+ }
retry:
- if (load_livepatch()) {
+ err = load_livepatch();
+ if (err) {
+ if (err == -ENOENT) {
+ test__skip();
+ return;
+ }
+
if (retry_cnt) {
ASSERT_OK(1, "load_livepatch");
goto out;
diff --git a/tools/testing/selftests/bpf/prog_tests/lsm_bdev.c b/tools/testing/selftests/bpf/prog_tests/lsm_bdev.c
new file mode 100644
index 000000000000..a970798e1173
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/lsm_bdev.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2026 Christian Brauner <brauner@kernel.org> */
+
+/*
+ * Test BPF LSM block device integrity hooks with dm-verity.
+ *
+ * Creates a dm-verity device over loopback, which triggers
+ * security_bdev_setintegrity() during verity_preresume().
+ * Verifies that the BPF program correctly tracks the integrity
+ * metadata in its hashmap.
+ */
+
+#define _GNU_SOURCE
+#include <test_progs.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include "lsm_bdev.skel.h"
+
+/* Must match the definition in progs/lsm_bdev.c. */
+struct verity_info {
+ __u8 has_roothash;
+ __u8 sig_valid;
+ __u32 setintegrity_cnt;
+};
+
+#define DATA_SIZE_MB 8
+#define HASH_SIZE_MB 1
+#define DM_NAME "bpf_test_verity"
+#define DM_DEV_PATH "/dev/mapper/" DM_NAME
+
+/* Run a command and optionally capture the first line of stdout. */
+static int run_cmd(const char *cmd, char *out, size_t out_sz)
+{
+ FILE *fp;
+ int ret;
+
+ fp = popen(cmd, "r");
+ if (!fp)
+ return -1;
+
+ if (out && out_sz > 0) {
+ if (!fgets(out, out_sz, fp))
+ out[0] = '\0';
+ /* strip trailing newline */
+ out[strcspn(out, "\n")] = '\0';
+ }
+
+ ret = pclose(fp);
+ return WIFEXITED(ret) ? WEXITSTATUS(ret) : -1;
+}
+
+static bool has_prerequisites(void)
+{
+ if (getuid() != 0) {
+ printf("SKIP: must be root\n");
+ return false;
+ }
+
+ if (run_cmd("modprobe loop 2>/dev/null", NULL, 0) &&
+ run_cmd("ls /dev/loop-control 2>/dev/null", NULL, 0)) {
+ printf("SKIP: no loop device support\n");
+ return false;
+ }
+
+ if (run_cmd("modprobe dm-verity 2>/dev/null", NULL, 0) &&
+ run_cmd("dmsetup targets 2>/dev/null | grep -q verity", NULL, 0)) {
+ printf("SKIP: dm-verity module not available\n");
+ return false;
+ }
+
+ if (run_cmd("which veritysetup >/dev/null 2>&1", NULL, 0)) {
+ printf("SKIP: veritysetup not found\n");
+ return false;
+ }
+
+ return true;
+}
+
+void test_lsm_bdev(void)
+{
+ char data_img[] = "/tmp/bpf_verity_data_XXXXXX";
+ char hash_img[] = "/tmp/bpf_verity_hash_XXXXXX";
+ char data_loop[64] = {};
+ char hash_loop[64] = {};
+ char roothash[256] = {};
+ char cmd[512];
+ int data_fd = -1, hash_fd = -1;
+ struct lsm_bdev *skel = NULL;
+ struct verity_info val;
+ struct stat st;
+ __u32 dev_key;
+ int err;
+
+ if (!has_prerequisites()) {
+ test__skip();
+ return;
+ }
+
+ /* Clean up any stale device from a previous crashed run. */
+ snprintf(cmd, sizeof(cmd), "dmsetup remove %s 2>/dev/null", DM_NAME);
+ run_cmd(cmd, NULL, 0);
+
+ /* Create temporary image files. */
+ data_fd = mkstemp(data_img);
+ if (!ASSERT_OK_FD(data_fd, "mkstemp data"))
+ return;
+
+ hash_fd = mkstemp(hash_img);
+ if (!ASSERT_OK_FD(hash_fd, "mkstemp hash"))
+ goto cleanup;
+
+ if (!ASSERT_OK(ftruncate(data_fd, DATA_SIZE_MB * 1024 * 1024),
+ "truncate data"))
+ goto cleanup;
+
+ if (!ASSERT_OK(ftruncate(hash_fd, HASH_SIZE_MB * 1024 * 1024),
+ "truncate hash"))
+ goto cleanup;
+
+ close(data_fd);
+ data_fd = -1;
+ close(hash_fd);
+ hash_fd = -1;
+
+ /* Set up loop devices. */
+ snprintf(cmd, sizeof(cmd),
+ "losetup --find --show %s 2>/dev/null", data_img);
+ if (!ASSERT_OK(run_cmd(cmd, data_loop, sizeof(data_loop)),
+ "losetup data"))
+ goto teardown;
+
+ snprintf(cmd, sizeof(cmd),
+ "losetup --find --show %s 2>/dev/null", hash_img);
+ if (!ASSERT_OK(run_cmd(cmd, hash_loop, sizeof(hash_loop)),
+ "losetup hash"))
+ goto teardown;
+
+ /* Format the dm-verity device and capture the root hash. */
+ snprintf(cmd, sizeof(cmd),
+ "veritysetup format %s %s 2>/dev/null | "
+ "grep -i 'root hash' | awk '{print $NF}'",
+ data_loop, hash_loop);
+ if (!ASSERT_OK(run_cmd(cmd, roothash, sizeof(roothash)),
+ "veritysetup format"))
+ goto teardown;
+
+ if (!ASSERT_GT((int)strlen(roothash), 0, "roothash not empty"))
+ goto teardown;
+
+ /* Load and attach BPF program before activating dm-verity. */
+ skel = lsm_bdev__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel open_and_load"))
+ goto teardown;
+
+ err = lsm_bdev__attach(skel);
+ if (!ASSERT_OK(err, "skel attach"))
+ goto teardown;
+
+ /* Activate dm-verity — triggers verity_preresume() hooks. */
+ snprintf(cmd, sizeof(cmd),
+ "veritysetup open %s %s %s %s 2>/dev/null",
+ data_loop, DM_NAME, hash_loop, roothash);
+ if (!ASSERT_OK(run_cmd(cmd, NULL, 0), "veritysetup open"))
+ goto teardown;
+
+ /* Get the dm device's dev_t. */
+ if (!ASSERT_OK(stat(DM_DEV_PATH, &st), "stat dm dev"))
+ goto remove_dm;
+
+ dev_key = (__u32)st.st_rdev;
+
+ /* Look up the device in the BPF map and verify. */
+ err = bpf_map__lookup_elem(skel->maps.verity_devices,
+ &dev_key, sizeof(dev_key),
+ &val, sizeof(val), 0);
+ if (!ASSERT_OK(err, "map lookup"))
+ goto remove_dm;
+
+ ASSERT_EQ(val.has_roothash, 1, "has_roothash");
+ ASSERT_EQ(val.sig_valid, 0, "sig_valid (unsigned)");
+ /*
+ * verity_preresume() always calls security_bdev_setintegrity()
+ * for the roothash. The signature-validity call only happens
+ * when CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is enabled.
+ */
+ ASSERT_GE(val.setintegrity_cnt, 1, "setintegrity_cnt min");
+ ASSERT_LE(val.setintegrity_cnt, 2, "setintegrity_cnt max");
+
+ /* Verify that the alloc hook fired at least once. */
+ ASSERT_GT(skel->bss->alloc_count, 0, "alloc_count");
+
+remove_dm:
+ snprintf(cmd, sizeof(cmd), "dmsetup remove %s 2>/dev/null", DM_NAME);
+ run_cmd(cmd, NULL, 0);
+
+teardown:
+ if (data_loop[0]) {
+ snprintf(cmd, sizeof(cmd), "losetup -d %s 2>/dev/null",
+ data_loop);
+ run_cmd(cmd, NULL, 0);
+ }
+ if (hash_loop[0]) {
+ snprintf(cmd, sizeof(cmd), "losetup -d %s 2>/dev/null",
+ hash_loop);
+ run_cmd(cmd, NULL, 0);
+ }
+
+cleanup:
+ lsm_bdev__destroy(skel);
+ if (data_fd >= 0)
+ close(data_fd);
+ if (hash_fd >= 0)
+ close(hash_fd);
+ unlink(data_img);
+ unlink(hash_img);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_misc.c b/tools/testing/selftests/bpf/prog_tests/lwt_misc.c
new file mode 100644
index 000000000000..6940fca38512
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/lwt_misc.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+#include "lwt_misc.skel.h"
+
+void test_lwt_misc(void)
+{
+ RUN_TESTS(lwt_misc);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/modify_return.c b/tools/testing/selftests/bpf/prog_tests/modify_return.c
index a70c99c2f8c8..4661d77ebdfc 100644
--- a/tools/testing/selftests/bpf/prog_tests/modify_return.c
+++ b/tools/testing/selftests/bpf/prog_tests/modify_return.c
@@ -5,6 +5,7 @@
*/
#include <test_progs.h>
+#include <unistd.h>
#include "modify_return.skel.h"
#define LOWER(x) ((x) & 0xffff)
@@ -23,11 +24,13 @@ static void run_test(__u32 input_retval, __u16 want_side_effect, __s16 want_ret)
if (!ASSERT_OK_PTR(skel, "skel_load"))
goto cleanup;
+ skel->bss->input_retval = input_retval;
+ skel->bss->test_pid = getpid();
+
err = modify_return__attach(skel);
if (!ASSERT_OK(err, "modify_return__attach failed"))
goto cleanup;
- skel->bss->input_retval = input_retval;
prog_fd = bpf_program__fd(skel->progs.fmod_ret_test);
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "test_run");
@@ -49,8 +52,7 @@ cleanup:
modify_return__destroy(skel);
}
-/* TODO: conflict with get_func_ip_test */
-void serial_test_modify_return(void)
+void test_modify_return(void)
{
run_test(0 /* input_retval */,
2 /* want_side_effect */,
diff --git a/tools/testing/selftests/bpf/prog_tests/module_attach.c b/tools/testing/selftests/bpf/prog_tests/module_attach.c
index 70fa7ae93173..92c336333fcb 100644
--- a/tools/testing/selftests/bpf/prog_tests/module_attach.c
+++ b/tools/testing/selftests/bpf/prog_tests/module_attach.c
@@ -6,7 +6,22 @@
#include "test_module_attach.skel.h"
#include "testing_helpers.h"
-static int duration;
+static const char * const read_tests[] = {
+ "handle_raw_tp",
+ "handle_tp_btf",
+ "handle_fentry",
+ "handle_fentry_explicit",
+ "handle_fmod_ret",
+};
+
+static const char * const detach_tests[] = {
+ "handle_fentry",
+ "handle_fexit",
+ "kprobe_multi",
+};
+
+static const int READ_SZ = 456;
+static const int WRITE_SZ = 457;
static int trigger_module_test_writable(int *val)
{
@@ -33,53 +48,73 @@ static int trigger_module_test_writable(int *val)
return 0;
}
-void test_module_attach(void)
+static void test_module_attach_prog(const char *prog_name, int sz,
+ const char *attach_target, int ret)
{
- const int READ_SZ = 456;
- const int WRITE_SZ = 457;
- struct test_module_attach* skel;
- struct test_module_attach__bss *bss;
- struct bpf_link *link;
+ struct test_module_attach *skel;
+ struct bpf_program *prog;
int err;
- int writable_val = 0;
skel = test_module_attach__open();
- if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
+ if (!ASSERT_OK_PTR(skel, "module_attach open"))
return;
- err = bpf_program__set_attach_target(skel->progs.handle_fentry_manual,
- 0, "bpf_testmod_test_read");
- ASSERT_OK(err, "set_attach_target");
+ prog = bpf_object__find_program_by_name(skel->obj, prog_name);
+ if (!ASSERT_OK_PTR(prog, "module_attach find_program"))
+ goto cleanup;
+ bpf_program__set_autoload(prog, true);
- err = bpf_program__set_attach_target(skel->progs.handle_fentry_explicit_manual,
- 0, "bpf_testmod:bpf_testmod_test_read");
- ASSERT_OK(err, "set_attach_target_explicit");
+ if (attach_target) {
+ err = bpf_program__set_attach_target(prog, 0, attach_target);
+ if (!ASSERT_OK(err, attach_target))
+ goto cleanup;
+ }
err = test_module_attach__load(skel);
- if (CHECK(err, "skel_load", "failed to load skeleton\n"))
+ if (!ASSERT_OK(err, "module_attach load"))
+ goto cleanup;
+
+ err = test_module_attach__attach(skel);
+ if (!ASSERT_OK(err, "module_attach attach"))
+ goto cleanup;
+
+ if (sz) {
+ /* trigger both read and write though each test uses only one */
+ ASSERT_OK(trigger_module_test_read(sz), "trigger_read");
+ ASSERT_OK(trigger_module_test_write(sz), "trigger_write");
+
+ ASSERT_EQ(skel->bss->sz, sz, prog_name);
+ }
+
+ if (ret)
+ ASSERT_EQ(skel->bss->retval, ret, "ret");
+cleanup:
+ test_module_attach__destroy(skel);
+}
+
+static void test_module_attach_writable(void)
+{
+ struct test_module_attach__bss *bss;
+ struct test_module_attach *skel;
+ int writable_val = 0;
+ int err;
+
+ skel = test_module_attach__open();
+ if (!ASSERT_OK_PTR(skel, "module_attach open"))
return;
+ bpf_program__set_autoload(skel->progs.handle_raw_tp_writable_bare, true);
+
+ err = test_module_attach__load(skel);
+ if (!ASSERT_OK(err, "module_attach load"))
+ goto cleanup;
+
bss = skel->bss;
err = test_module_attach__attach(skel);
- if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
+ if (!ASSERT_OK(err, "module_attach attach"))
goto cleanup;
- /* trigger tracepoint */
- ASSERT_OK(trigger_module_test_read(READ_SZ), "trigger_read");
- ASSERT_OK(trigger_module_test_write(WRITE_SZ), "trigger_write");
-
- ASSERT_EQ(bss->raw_tp_read_sz, READ_SZ, "raw_tp");
- ASSERT_EQ(bss->raw_tp_bare_write_sz, WRITE_SZ, "raw_tp_bare");
- ASSERT_EQ(bss->tp_btf_read_sz, READ_SZ, "tp_btf");
- ASSERT_EQ(bss->fentry_read_sz, READ_SZ, "fentry");
- ASSERT_EQ(bss->fentry_manual_read_sz, READ_SZ, "fentry_manual");
- ASSERT_EQ(bss->fentry_explicit_read_sz, READ_SZ, "fentry_explicit");
- ASSERT_EQ(bss->fentry_explicit_manual_read_sz, READ_SZ, "fentry_explicit_manual");
- ASSERT_EQ(bss->fexit_read_sz, READ_SZ, "fexit");
- ASSERT_EQ(bss->fexit_ret, -EIO, "fexit_tet");
- ASSERT_EQ(bss->fmod_ret_read_sz, READ_SZ, "fmod_ret");
-
bss->raw_tp_writable_bare_early_ret = true;
bss->raw_tp_writable_bare_out_val = 0xf1f2f3f4;
ASSERT_OK(trigger_module_test_writable(&writable_val),
@@ -87,31 +122,73 @@ void test_module_attach(void)
ASSERT_EQ(bss->raw_tp_writable_bare_in_val, 1024, "writable_test_in");
ASSERT_EQ(bss->raw_tp_writable_bare_out_val, writable_val,
"writable_test_out");
+cleanup:
+ test_module_attach__destroy(skel);
+}
- test_module_attach__detach(skel);
-
- /* attach fentry/fexit and make sure it gets module reference */
- link = bpf_program__attach(skel->progs.handle_fentry);
- if (!ASSERT_OK_PTR(link, "attach_fentry"))
- goto cleanup;
+static void test_module_attach_detach(const char *prog_name)
+{
+ struct test_module_attach *skel;
+ struct bpf_program *prog;
+ struct bpf_link *link;
+ int err;
- ASSERT_ERR(unload_bpf_testmod(false), "unload_bpf_testmod");
- bpf_link__destroy(link);
+ skel = test_module_attach__open();
+ if (!ASSERT_OK_PTR(skel, "module_attach open"))
+ return;
- link = bpf_program__attach(skel->progs.handle_fexit);
- if (!ASSERT_OK_PTR(link, "attach_fexit"))
+ prog = bpf_object__find_program_by_name(skel->obj, prog_name);
+ if (!ASSERT_OK_PTR(prog, "module_attach find_program"))
goto cleanup;
+ bpf_program__set_autoload(prog, true);
- ASSERT_ERR(unload_bpf_testmod(false), "unload_bpf_testmod");
- bpf_link__destroy(link);
+ err = test_module_attach__load(skel);
+ if (!ASSERT_OK(err, "module_attach load"))
+ goto cleanup;
- link = bpf_program__attach(skel->progs.kprobe_multi);
- if (!ASSERT_OK_PTR(link, "attach_kprobe_multi"))
+ /* attach and make sure it gets module reference */
+ link = bpf_program__attach(prog);
+ if (!ASSERT_OK_PTR(link, "module_attach attach"))
goto cleanup;
- ASSERT_ERR(unload_bpf_testmod(false), "unload_bpf_testmod");
+ ASSERT_ERR(try_unload_module("bpf_testmod", 1, false), "try_unload_module");
bpf_link__destroy(link);
-
cleanup:
test_module_attach__destroy(skel);
}
+
+void test_module_attach(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(read_tests); i++) {
+ if (!test__start_subtest(read_tests[i]))
+ continue;
+ test_module_attach_prog(read_tests[i], READ_SZ, NULL, 0);
+ }
+ if (test__start_subtest("handle_raw_tp_bare"))
+ test_module_attach_prog("handle_raw_tp_bare", WRITE_SZ, NULL, 0);
+ if (test__start_subtest("handle_raw_tp_writable_bare"))
+ test_module_attach_writable();
+ if (test__start_subtest("handle_fentry_manual")) {
+ test_module_attach_prog("handle_fentry_manual", READ_SZ,
+ "bpf_testmod_test_read", 0);
+ }
+ if (test__start_subtest("handle_fentry_explicit_manual")) {
+ test_module_attach_prog("handle_fentry_explicit_manual",
+ READ_SZ,
+ "bpf_testmod:bpf_testmod_test_read", 0);
+ }
+ if (test__start_subtest("handle_fexit"))
+ test_module_attach_prog("handle_fexit", READ_SZ, NULL, -EIO);
+ if (test__start_subtest("handle_fexit_ret"))
+ test_module_attach_prog("handle_fexit_ret", 0, NULL, 0);
+ for (i = 0; i < ARRAY_SIZE(detach_tests); i++) {
+ char test_name[50];
+
+ snprintf(test_name, sizeof(test_name), "%s_detach", detach_tests[i]);
+ if (!test__start_subtest(test_name))
+ continue;
+ test_module_attach_detach(detach_tests[i]);
+ }
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/perf_link.c b/tools/testing/selftests/bpf/prog_tests/perf_link.c
index d940ff87fa08..9e3a0d217af8 100644
--- a/tools/testing/selftests/bpf/prog_tests/perf_link.c
+++ b/tools/testing/selftests/bpf/prog_tests/perf_link.c
@@ -1,8 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#define _GNU_SOURCE
-#include <pthread.h>
-#include <sched.h>
+#include <linux/compiler.h>
#include <test_progs.h>
#include "testing_helpers.h"
#include "test_perf_link.skel.h"
@@ -12,23 +11,14 @@
static void burn_cpu(void)
{
- volatile int j = 0;
- cpu_set_t cpu_set;
- int i, err;
-
- /* generate some branches on cpu 0 */
- CPU_ZERO(&cpu_set);
- CPU_SET(0, &cpu_set);
- err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set);
- ASSERT_OK(err, "set_thread_affinity");
+ int i;
/* spin the loop for a while (random high number) */
for (i = 0; i < 1000000; ++i)
- ++j;
+ barrier();
}
-/* TODO: often fails in concurrent mode */
-void serial_test_perf_link(void)
+void test_perf_link(void)
{
struct test_perf_link *skel = NULL;
struct perf_event_attr attr;
@@ -45,7 +35,7 @@ void serial_test_perf_link(void)
attr.config = PERF_COUNT_SW_CPU_CLOCK;
attr.freq = 1;
attr.sample_freq = 1000;
- pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);
+ pfd = syscall(__NR_perf_event_open, &attr, 0, -1, -1, PERF_FLAG_FD_CLOEXEC);
if (!ASSERT_GE(pfd, 0, "perf_fd"))
goto cleanup;
diff --git a/tools/testing/selftests/bpf/prog_tests/probe_user.c b/tools/testing/selftests/bpf/prog_tests/probe_user.c
index 8721671321de..7a9d13aa2c87 100644
--- a/tools/testing/selftests/bpf/prog_tests/probe_user.c
+++ b/tools/testing/selftests/bpf/prog_tests/probe_user.c
@@ -1,8 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
-/* TODO: corrupts other tests uses connect() */
-void serial_test_probe_user(void)
+void test_probe_user(void)
{
static const char *const prog_names[] = {
"handle_sys_connect",
@@ -20,6 +19,11 @@ void serial_test_probe_user(void)
struct bpf_program *kprobe_progs[prog_count];
struct bpf_object *obj;
static const int zero = 0;
+ struct test_pro_bss {
+ struct sockaddr_in old;
+ __u32 test_pid;
+ };
+ struct test_pro_bss results = {};
size_t i;
obj = bpf_object__open_file(obj_file, &opts);
@@ -34,6 +38,23 @@ void serial_test_probe_user(void)
goto cleanup;
}
+ {
+ struct bpf_map *bss_map;
+ struct test_pro_bss bss_init = {};
+
+ bss_init.test_pid = getpid();
+ bss_map = bpf_object__find_map_by_name(obj, "test_pro.bss");
+ if (!ASSERT_OK_PTR(bss_map, "find_bss_map"))
+ goto cleanup;
+ if (!ASSERT_EQ(bpf_map__value_size(bss_map), sizeof(bss_init),
+ "bss_size"))
+ goto cleanup;
+ err = bpf_map__set_initial_value(bss_map, &bss_init,
+ sizeof(bss_init));
+ if (!ASSERT_OK(err, "set_bss_init"))
+ goto cleanup;
+ }
+
err = bpf_object__load(obj);
if (CHECK(err, "obj_load", "err %d\n", err))
goto cleanup;
@@ -62,11 +83,13 @@ void serial_test_probe_user(void)
connect(sock_fd, &curr, sizeof(curr));
close(sock_fd);
- err = bpf_map_lookup_elem(results_map_fd, &zero, &tmp);
+ err = bpf_map_lookup_elem(results_map_fd, &zero, &results);
if (CHECK(err, "get_kprobe_res",
"failed to get kprobe res: %d\n", err))
goto cleanup;
+ memcpy(&tmp, &results.old, sizeof(tmp));
+
in = (struct sockaddr_in *)&tmp;
if (CHECK(memcmp(&tmp, &orig, sizeof(orig)), "check_kprobe_res",
"wrong kprobe res from probe read: %s:%u\n",
diff --git a/tools/testing/selftests/bpf/prog_tests/rbtree.c b/tools/testing/selftests/bpf/prog_tests/rbtree.c
index d8f3d7a45fe9..a854fb38e418 100644
--- a/tools/testing/selftests/bpf/prog_tests/rbtree.c
+++ b/tools/testing/selftests/bpf/prog_tests/rbtree.c
@@ -9,6 +9,7 @@
#include "rbtree_btf_fail__wrong_node_type.skel.h"
#include "rbtree_btf_fail__add_wrong_type.skel.h"
#include "rbtree_search.skel.h"
+#include "rbtree_search_kptr.skel.h"
static void test_rbtree_add_nodes(void)
{
@@ -193,3 +194,8 @@ void test_rbtree_search(void)
{
RUN_TESTS(rbtree_search);
}
+
+void test_rbtree_search_kptr(void)
+{
+ RUN_TESTS(rbtree_search_kptr);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/reg_bounds.c b/tools/testing/selftests/bpf/prog_tests/reg_bounds.c
index cb8dd2f63296..71f5240cc5b7 100644
--- a/tools/testing/selftests/bpf/prog_tests/reg_bounds.c
+++ b/tools/testing/selftests/bpf/prog_tests/reg_bounds.c
@@ -500,6 +500,39 @@ static struct range range_refine(enum num_t x_t, struct range x, enum num_t y_t,
(s64)x.a >= S32_MIN && (s64)x.b <= S32_MAX)
return range_intersection(x_t, x, y_cast);
+ if (y_t == U32 && x_t == U64) {
+ u64 xmin_swap, xmax_swap, xmin_lower32, xmax_lower32;
+
+ xmin_lower32 = x.a & 0xffffffff;
+ xmax_lower32 = x.b & 0xffffffff;
+ if (xmin_lower32 < y.a || xmin_lower32 > y.b) {
+ /* The 32 lower bits of the umin64 are outside the u32
+ * range. Let's update umin64 to match the u32 range.
+ * We want to *increase* the umin64 to the *minimum*
+ * value that matches the u32 range.
+ */
+ xmin_swap = swap_low32(x.a, y.a);
+ /* We should always only increase the minimum, so if
+ * the new value is lower than before, we need to
+ * increase the 32 upper bits by 1.
+ */
+ if (xmin_swap < x.a)
+ xmin_swap += 0x100000000;
+ if (xmin_swap == x.b)
+ return range(x_t, x.b, x.b);
+ } else if (xmax_lower32 < y.a || xmax_lower32 > y.b) {
+ /* Same for the umax64, but we want to *decrease*
+ * umax64 to the *maximum* value that matches the u32
+ * range.
+ */
+ xmax_swap = swap_low32(x.b, y.b);
+ if (xmax_swap > x.b)
+ xmax_swap -= 0x100000000;
+ if (xmax_swap == x.a)
+ return range(x_t, x.a, x.a);
+ }
+ }
+
/* the case when new range knowledge, *y*, is a 32-bit subregister
* range, while previous range knowledge, *x*, is a full register
* 64-bit range, needs special treatment to take into account upper 32
@@ -1217,7 +1250,23 @@ static int parse_range_cmp_log(const char *log_buf, struct case_spec spec,
spec.compare_subregs ? "w0" : "r0",
spec.compare_subregs ? "w" : "r", specs[i].reg_idx);
- q = strstr(p, buf);
+ /*
+ * In the verifier log look for lines:
+ * 18: (bf) r0 = r6 ; R0=... R6=...
+ * Different verifier passes may print
+ * 18: (bf) r0 = r6
+ * as well, but never followed by ';'.
+ */
+ q = p;
+ while ((q = strstr(q, buf)) != NULL) {
+ const char *s = q + strlen(buf);
+
+ while (*s == ' ' || *s == '\t')
+ s++;
+ if (*s == ';')
+ break;
+ q = s;
+ }
if (!q) {
*specs[i].state = (struct reg_state){.valid = false};
continue;
@@ -2129,6 +2178,8 @@ static struct subtest_case crafted_cases[] = {
{U64, S64, {0x7fffffff00000001ULL, 0xffffffff00000000ULL}, {0, 0}},
{U64, S64, {0, 0xffffffffULL}, {1, 1}},
{U64, S64, {0, 0xffffffffULL}, {0x7fffffff, 0x7fffffff}},
+ {U64, S32, {0xfffffffe00000001, 0xffffffff00000000}, {S64_MIN, S64_MIN}},
+ {U64, U32, {0xfffffffe00000000, U64_MAX - 1}, {U64_MAX, U64_MAX}},
{U64, U32, {0, 0x100000000}, {0, 0}},
{U64, U32, {0xfffffffe, 0x300000000}, {0x80000000, 0x80000000}},
diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
index dd3c757859f6..d2846579285f 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
@@ -1298,10 +1298,23 @@ static void test_sockmap_multi_channels(int sotype)
avail = wait_for_fionread(p1, expected, IO_TIMEOUT_SEC);
ASSERT_EQ(avail, expected, "ioctl(FIONREAD) full return");
- recvd = recv_timeout(p1, rcv, sizeof(rcv), MSG_DONTWAIT, 1);
- if (!ASSERT_EQ(recvd, sizeof(buf), "recv_timeout(p1)") ||
+ recvd = recv_timeout(p1, rcv, expected, MSG_DONTWAIT, 1);
+ if (!ASSERT_EQ(recvd, expected, "recv_timeout(p1)") ||
!ASSERT_OK(memcmp(buf, rcv, recvd), "data mismatch"))
goto end;
+
+ /* process remaining data for udp if secondary data is available */
+ expected = sizeof(buf) - expected;
+ if (expected) {
+ avail = wait_for_fionread(p1, expected, IO_TIMEOUT_SEC);
+ ASSERT_EQ(avail, expected, "second ioctl(FIONREAD) full return");
+
+ recvd = recv_timeout(p1, rcv, expected, MSG_DONTWAIT, 1);
+ if (!ASSERT_EQ(recvd, expected, "second recv_timeout(p1)") ||
+ !ASSERT_OK(memcmp(buf + sizeof(buf) - expected, rcv, recvd),
+ "second data mismatch"))
+ goto end;
+ }
end:
if (c0 >= 0)
close(c0);
diff --git a/tools/testing/selftests/bpf/prog_tests/spin_lock.c b/tools/testing/selftests/bpf/prog_tests/spin_lock.c
index 254fbfeab06a..bbe476f4c47d 100644
--- a/tools/testing/selftests/bpf/prog_tests/spin_lock.c
+++ b/tools/testing/selftests/bpf/prog_tests/spin_lock.c
@@ -13,8 +13,9 @@ static struct {
const char *err_msg;
} spin_lock_fail_tests[] = {
{ "lock_id_kptr_preserve",
- "5: (bf) r1 = r0 ; R0=ptr_foo(id=2,ref_obj_id=2) "
- "R1=ptr_foo(id=2,ref_obj_id=2) refs=2\n6: (85) call bpf_this_cpu_ptr#154\n"
+ "[0-9]\\+: (bf) r1 = r0 ; R0=ptr_foo(id=2,ref_obj_id=2)"
+ " R1=ptr_foo(id=2,ref_obj_id=2) refs=2\n"
+ "[0-9]\\+: (85) call bpf_this_cpu_ptr#154\n"
"R1 type=ptr_ expected=percpu_ptr_" },
{ "lock_id_global_zero",
"; R1=map_value(map=.data.A,ks=4,vs=4)\n2: (85) call bpf_this_cpu_ptr#154\n"
diff --git a/tools/testing/selftests/bpf/prog_tests/summarization.c b/tools/testing/selftests/bpf/prog_tests/summarization.c
index 5dd6c120a838..6951786044ca 100644
--- a/tools/testing/selftests/bpf/prog_tests/summarization.c
+++ b/tools/testing/selftests/bpf/prog_tests/summarization.c
@@ -58,7 +58,7 @@ static void test_aux(const char *main_prog_name,
* this particular combination can be enabled.
*/
if (!strcmp("might_sleep", replacement) && err) {
- ASSERT_HAS_SUBSTR(log, "helper call might sleep in a non-sleepable prog", "error log");
+ ASSERT_HAS_SUBSTR(log, "sleepable helper bpf_copy_from_user#", "error log");
ASSERT_EQ(err, -EINVAL, "err");
test__skip();
goto out;
diff --git a/tools/testing/selftests/bpf/prog_tests/task_local_data.h b/tools/testing/selftests/bpf/prog_tests/task_local_data.h
index 8342e2fe5260..1e5c67c78ffb 100644
--- a/tools/testing/selftests/bpf/prog_tests/task_local_data.h
+++ b/tools/testing/selftests/bpf/prog_tests/task_local_data.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
#ifndef __TASK_LOCAL_DATA_H
#define __TASK_LOCAL_DATA_H
@@ -22,14 +22,17 @@
/*
* OPTIONS
*
- * Define the option before including the header
+ * Define the option before including the header. Using different options in
+ * different translation units is strongly discouraged.
*
* TLD_FREE_DATA_ON_THREAD_EXIT - Frees memory on thread exit automatically
*
* Thread-specific memory for storing TLD is allocated lazily on the first call to
* tld_get_data(). The thread that calls it must also call tld_free() on thread exit
* to prevent memory leak. Pthread will be included if the option is defined. A pthread
- * key will be registered with a destructor that calls tld_free().
+ * key will be registered with a destructor that calls tld_free(). Enabled only when
+ * the option is defined and TLD_DEFINE_KEY/tld_create_key() is called in the same
+ * translation unit.
*
*
* TLD_DYN_DATA_SIZE - The maximum size of memory allocated for TLDs created dynamically
@@ -47,19 +50,16 @@
* TLD_NAME_LEN - The maximum length of the name of a TLD (default: 62)
*
* Setting TLD_NAME_LEN will affect the maximum number of TLDs a process can store,
- * TLD_MAX_DATA_CNT.
+ * TLD_MAX_DATA_CNT. Must be consistent with task_local_data.bpf.h.
*
*
- * TLD_DATA_USE_ALIGNED_ALLOC - Always use aligned_alloc() instead of malloc()
+ * TLD_DONT_ROUND_UP_DATA_SIZE - Don't round up memory size allocated for data if
+ * the memory allocator has low overhead aligned_alloc() implementation.
*
- * When allocating the memory for storing TLDs, we need to make sure there is a memory
- * region of the X bytes within a page. This is due to the limit posed by UPTR: memory
- * pinned to the kernel cannot exceed a page nor can it cross the page boundary. The
- * library normally calls malloc(2*X) given X bytes of total TLDs, and only uses
- * aligned_alloc(PAGE_SIZE, X) when X >= PAGE_SIZE / 2. This is to reduce memory wastage
- * as not all memory allocator can use the exact amount of memory requested to fulfill
- * aligned_alloc(). For example, some may round the size up to the alignment. Enable the
- * option to always use aligned_alloc() if the implementation has low memory overhead.
+ * For some memory allocators, when calling aligned_alloc(alignment, size), size
+ * does not need to be an integral multiple of alignment and it can be fulfilled
+ * without using round_up(size, alignment) bytes of memory. Enable this option to
+ * reduce memory usage.
*/
#define TLD_PAGE_SIZE getpagesize()
@@ -68,7 +68,7 @@
#define TLD_ROUND_MASK(x, y) ((__typeof__(x))((y) - 1))
#define TLD_ROUND_UP(x, y) ((((x) - 1) | TLD_ROUND_MASK(x, y)) + 1)
-#define TLD_READ_ONCE(x) (*(volatile typeof(x) *)&(x))
+#define TLD_ROUND_UP_POWER_OF_TWO(x) (1UL << (sizeof(x) * 8 - __builtin_clzl(x - 1)))
#ifndef TLD_DYN_DATA_SIZE
#define TLD_DYN_DATA_SIZE 64
@@ -90,7 +90,7 @@ typedef struct {
struct tld_metadata {
char name[TLD_NAME_LEN];
- _Atomic __u16 size;
+ _Atomic __u16 size; /* size of tld_data_u->data */
};
struct tld_meta_u {
@@ -101,7 +101,7 @@ struct tld_meta_u {
struct tld_data_u {
__u64 start; /* offset of tld_data_u->data in a page */
- char data[];
+ char data[] __attribute__((aligned(8)));
};
struct tld_map_value {
@@ -111,15 +111,16 @@ struct tld_map_value {
struct tld_meta_u * _Atomic tld_meta_p __attribute__((weak));
__thread struct tld_data_u *tld_data_p __attribute__((weak));
-__thread void *tld_data_alloc_p __attribute__((weak));
#ifdef TLD_FREE_DATA_ON_THREAD_EXIT
+bool _Atomic tld_pthread_key_init __attribute__((weak));
pthread_key_t tld_pthread_key __attribute__((weak));
static void tld_free(void);
static void __tld_thread_exit_handler(void *unused)
{
+ (void)unused;
tld_free();
}
#endif
@@ -143,20 +144,16 @@ static int __tld_init_meta_p(void)
goto out;
}
-#ifdef TLD_FREE_DATA_ON_THREAD_EXIT
- pthread_key_create(&tld_pthread_key, __tld_thread_exit_handler);
-#endif
out:
return err;
}
static int __tld_init_data_p(int map_fd)
{
- bool use_aligned_alloc = false;
struct tld_map_value map_val;
struct tld_data_u *data;
- void *data_alloc = NULL;
int err, tid_fd = -1;
+ size_t size, size_pot;
tid_fd = syscall(SYS_pidfd_open, sys_gettid(), O_EXCL);
if (tid_fd < 0) {
@@ -164,47 +161,37 @@ static int __tld_init_data_p(int map_fd)
goto out;
}
-#ifdef TLD_DATA_USE_ALIGNED_ALLOC
- use_aligned_alloc = true;
-#endif
-
/*
* tld_meta_p->size = TLD_DYN_DATA_SIZE +
* total size of TLDs defined via TLD_DEFINE_KEY()
*/
- data_alloc = (use_aligned_alloc || tld_meta_p->size * 2 >= TLD_PAGE_SIZE) ?
- aligned_alloc(TLD_PAGE_SIZE, tld_meta_p->size) :
- malloc(tld_meta_p->size * 2);
- if (!data_alloc) {
+ size = tld_meta_p->size + sizeof(struct tld_data_u);
+ size_pot = TLD_ROUND_UP_POWER_OF_TWO(size);
+#ifdef TLD_DONT_ROUND_UP_DATA_SIZE
+ data = (struct tld_data_u *)aligned_alloc(size_pot, size);
+#else
+ data = (struct tld_data_u *)aligned_alloc(size_pot, size_pot);
+#endif
+ if (!data) {
err = -ENOMEM;
goto out;
}
/*
* Always pass a page-aligned address to UPTR since the size of tld_map_value::data
- * is a page in BTF. If data_alloc spans across two pages, use the page that contains large
- * enough memory.
+ * is a page in BTF.
*/
- if (TLD_PAGE_SIZE - (~TLD_PAGE_MASK & (intptr_t)data_alloc) >= tld_meta_p->size) {
- map_val.data = (void *)(TLD_PAGE_MASK & (intptr_t)data_alloc);
- data = data_alloc;
- data->start = (~TLD_PAGE_MASK & (intptr_t)data_alloc) +
- offsetof(struct tld_data_u, data);
- } else {
- map_val.data = (void *)(TLD_ROUND_UP((intptr_t)data_alloc, TLD_PAGE_SIZE));
- data = (void *)(TLD_ROUND_UP((intptr_t)data_alloc, TLD_PAGE_SIZE));
- data->start = offsetof(struct tld_data_u, data);
- }
- map_val.meta = TLD_READ_ONCE(tld_meta_p);
+ map_val.data = (void *)(TLD_PAGE_MASK & (intptr_t)data);
+ data->start = (~TLD_PAGE_MASK & (intptr_t)data) + sizeof(struct tld_data_u);
+ map_val.meta = tld_meta_p;
err = bpf_map_update_elem(map_fd, &tid_fd, &map_val, 0);
if (err) {
- free(data_alloc);
+ free(data);
goto out;
}
tld_data_p = data;
- tld_data_alloc_p = data_alloc;
#ifdef TLD_FREE_DATA_ON_THREAD_EXIT
pthread_setspecific(tld_pthread_key, (void *)1);
#endif
@@ -217,15 +204,24 @@ out:
static tld_key_t __tld_create_key(const char *name, size_t size, bool dyn_data)
{
int err, i, sz, off = 0;
+ bool uninit = false;
__u16 cnt;
- if (!TLD_READ_ONCE(tld_meta_p)) {
+ if (!tld_meta_p) {
err = __tld_init_meta_p();
if (err)
- return (tld_key_t){err};
+ return (tld_key_t){(__s16)err};
}
- for (i = 0; i < TLD_MAX_DATA_CNT; i++) {
+#ifdef TLD_FREE_DATA_ON_THREAD_EXIT
+ if (atomic_compare_exchange_strong(&tld_pthread_key_init, &uninit, true)) {
+ err = pthread_key_create(&tld_pthread_key, __tld_thread_exit_handler);
+ if (err)
+ return (tld_key_t){(__s16)err};
+ }
+#endif
+
+ for (i = 0; i < (int)TLD_MAX_DATA_CNT; i++) {
retry:
cnt = atomic_load(&tld_meta_p->cnt);
if (i < cnt) {
@@ -290,7 +286,7 @@ retry:
#define TLD_DEFINE_KEY(key, name, size) \
tld_key_t key; \
\
-__attribute__((constructor)) \
+__attribute__((constructor(101))) \
void __tld_define_key_##key(void) \
{ \
key = __tld_create_key(name, size, false); \
@@ -350,7 +346,7 @@ static inline int tld_key_err_or_zero(tld_key_t key)
__attribute__((unused))
static void *tld_get_data(int map_fd, tld_key_t key)
{
- if (!TLD_READ_ONCE(tld_meta_p))
+ if (!tld_meta_p)
return NULL;
/* tld_data_p is allocated on the first invocation of tld_get_data() */
@@ -367,14 +363,14 @@ static void *tld_get_data(int map_fd, tld_key_t key)
*
* Users must call tld_free() on thread exit to prevent memory leak. Alternatively,
* define TLD_FREE_DATA_ON_THREAD_EXIT and a thread exit handler will be registered
- * to free the memory automatically.
+ * to free the memory automatically. Calling tld_free() before thread exit is
+ * undefined behavior, which may lead to null-pointer dereference.
*/
__attribute__((unused))
static void tld_free(void)
{
- if (tld_data_alloc_p) {
- free(tld_data_alloc_p);
- tld_data_alloc_p = NULL;
+ if (tld_data_p) {
+ free(tld_data_p);
tld_data_p = NULL;
}
}
diff --git a/tools/testing/selftests/bpf/prog_tests/task_local_storage.c b/tools/testing/selftests/bpf/prog_tests/task_local_storage.c
index 7bee33797c71..1b26c12f255a 100644
--- a/tools/testing/selftests/bpf/prog_tests/task_local_storage.c
+++ b/tools/testing/selftests/bpf/prog_tests/task_local_storage.c
@@ -25,24 +25,30 @@
static void test_sys_enter_exit(void)
{
struct task_local_storage *skel;
+ pid_t pid = sys_gettid();
int err;
skel = task_local_storage__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
return;
- skel->bss->target_pid = sys_gettid();
-
err = task_local_storage__attach(skel);
if (!ASSERT_OK(err, "skel_attach"))
goto out;
+ /* Set target_pid after attach so that syscalls made during
+ * attach are not counted.
+ */
+ skel->bss->target_pid = pid;
+
sys_gettid();
sys_gettid();
- /* 3x syscalls: 1x attach and 2x gettid */
- ASSERT_EQ(skel->bss->enter_cnt, 3, "enter_cnt");
- ASSERT_EQ(skel->bss->exit_cnt, 3, "exit_cnt");
+ skel->bss->target_pid = 0;
+
+ /* 2x gettid syscalls */
+ ASSERT_EQ(skel->bss->enter_cnt, 2, "enter_cnt");
+ ASSERT_EQ(skel->bss->exit_cnt, 2, "exit_cnt");
ASSERT_EQ(skel->bss->mismatch_cnt, 0, "mismatch_cnt");
out:
task_local_storage__destroy(skel);
diff --git a/tools/testing/selftests/bpf/prog_tests/test_bpf_smc.c b/tools/testing/selftests/bpf/prog_tests/test_bpf_smc.c
index de22734abc4d..40d38280c091 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_bpf_smc.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_bpf_smc.c
@@ -131,8 +131,10 @@ static bool get_smc_nl_family_id(void)
goto fail;
ret = recv(fd, &msg, sizeof(msg), 0);
- if (!ASSERT_FALSE(msg.n.nlmsg_type == NLMSG_ERROR || ret < 0 ||
- !NLMSG_OK(&msg.n, ret), "nl_family response"))
+ if (msg.n.nlmsg_type == NLMSG_ERROR)
+ goto fail;
+ if (!ASSERT_FALSE(ret < 0 || !NLMSG_OK(&msg.n, ret),
+ "nl_family response"))
goto fail;
nl = (struct nlattr *)GENLMSG_DATA(&msg);
diff --git a/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c b/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c
index e905cbaf6b3d..500446808908 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c
@@ -18,6 +18,7 @@
#include "test_global_func15.skel.h"
#include "test_global_func16.skel.h"
#include "test_global_func17.skel.h"
+#include "test_global_func_deep_stack.skel.h"
#include "test_global_func_ctx_args.skel.h"
#include "bpf/libbpf_internal.h"
@@ -155,6 +156,7 @@ void test_test_global_funcs(void)
RUN_TESTS(test_global_func15);
RUN_TESTS(test_global_func16);
RUN_TESTS(test_global_func17);
+ RUN_TESTS(test_global_func_deep_stack);
RUN_TESTS(test_global_func_ctx_args);
if (test__start_subtest("ctx_arg_rewrite"))
diff --git a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_multi_args.c b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_multi_args.c
new file mode 100644
index 000000000000..0f321e889862
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_multi_args.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+#include "struct_ops_multi_args.skel.h"
+
+void test_struct_ops_multi_args(void)
+{
+ RUN_TESTS(struct_ops_multi_args);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_task_local_data.c b/tools/testing/selftests/bpf/prog_tests/test_task_local_data.c
index 9556ad3d986f..e219ff506b56 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_task_local_data.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_task_local_data.c
@@ -26,7 +26,7 @@ TLD_DEFINE_KEY(value0_key, "value0", sizeof(int));
*/
static void reset_tld(void)
{
- if (TLD_READ_ONCE(tld_meta_p)) {
+ if (tld_meta_p) {
/* Remove TLDs created by tld_create_key() */
tld_meta_p->cnt = 1;
tld_meta_p->size = TLD_DYN_DATA_SIZE;
diff --git a/tools/testing/selftests/bpf/prog_tests/test_tc_tunnel.c b/tools/testing/selftests/bpf/prog_tests/test_tc_tunnel.c
index 7fc4d7dd70ef..1aa7c9463980 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_tc_tunnel.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_tc_tunnel.c
@@ -168,7 +168,7 @@ static int check_server_rx_data(struct subtest_cfg *cfg,
static struct connection *connect_client_to_server(struct subtest_cfg *cfg)
{
- struct network_helper_opts opts = {.timeout_ms = 500};
+ struct network_helper_opts opts = {.timeout_ms = 1000};
int family = cfg->ipproto == 6 ? AF_INET6 : AF_INET;
struct connection *conn = NULL;
int client_fd, server_fd;
@@ -206,18 +206,13 @@ static void disconnect_client_from_server(struct subtest_cfg *cfg,
free(conn);
}
-static int send_and_test_data(struct subtest_cfg *cfg, bool must_succeed)
+static int send_and_test_data(struct subtest_cfg *cfg)
{
struct connection *conn;
int err, res = -1;
conn = connect_client_to_server(cfg);
- if (!must_succeed && !ASSERT_ERR_PTR(conn, "connection that must fail"))
- goto end;
- else if (!must_succeed)
- return 0;
-
- if (!ASSERT_OK_PTR(conn, "connection that must succeed"))
+ if (!ASSERT_OK_PTR(conn, "connect to server"))
return -1;
err = send(conn->client_fd, tx_buffer, DEFAULT_TEST_DATA_SIZE, 0);
@@ -391,7 +386,7 @@ static void run_test(struct subtest_cfg *cfg)
goto fail;
/* Basic communication must work */
- if (!ASSERT_OK(send_and_test_data(cfg, true), "connect without any encap"))
+ if (!ASSERT_OK(send_and_test_data(cfg), "connect without any encap"))
goto fail;
/* Attach encapsulation program to client */
@@ -403,7 +398,7 @@ static void run_test(struct subtest_cfg *cfg)
if (!ASSERT_OK(configure_kernel_decapsulation(cfg),
"configure kernel decapsulation"))
goto fail;
- if (!ASSERT_OK(send_and_test_data(cfg, true),
+ if (!ASSERT_OK(send_and_test_data(cfg),
"connect with encap prog and kern decap"))
goto fail;
}
@@ -411,7 +406,7 @@ static void run_test(struct subtest_cfg *cfg)
/* Replace kernel decapsulation with BPF decapsulation, test must pass */
if (!ASSERT_OK(configure_ebpf_decapsulation(cfg), "configure ebpf decapsulation"))
goto fail;
- ASSERT_OK(send_and_test_data(cfg, true), "connect with encap and decap progs");
+ ASSERT_OK(send_and_test_data(cfg), "connect with encap and decap progs");
fail:
close_netns(nstoken);
diff --git a/tools/testing/selftests/bpf/prog_tests/trampoline_count.c b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c
index 6cd7349d4a2b..7321850db75f 100644
--- a/tools/testing/selftests/bpf/prog_tests/trampoline_count.c
+++ b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c
@@ -30,16 +30,14 @@ static struct bpf_program *load_prog(char *file, char *name, struct inst *inst)
return prog;
}
-/* TODO: use different target function to run in concurrent mode */
-void serial_test_trampoline_count(void)
+void test_trampoline_count(void)
{
char *file = "test_trampoline_count.bpf.o";
char *const progs[] = { "fentry_test", "fmod_ret_test", "fexit_test" };
- int bpf_max_tramp_links, err, i, prog_fd;
+ int bpf_max_tramp_links, i;
struct bpf_program *prog;
struct bpf_link *link;
struct inst *inst;
- LIBBPF_OPTS(bpf_test_run_opts, opts);
bpf_max_tramp_links = get_bpf_max_tramp_links();
if (!ASSERT_GE(bpf_max_tramp_links, 1, "bpf_max_tramp_links"))
@@ -80,16 +78,7 @@ void serial_test_trampoline_count(void)
goto cleanup;
/* and finally execute the probe */
- prog_fd = bpf_program__fd(prog);
- if (!ASSERT_GE(prog_fd, 0, "bpf_program__fd"))
- goto cleanup;
-
- err = bpf_prog_test_run_opts(prog_fd, &opts);
- if (!ASSERT_OK(err, "bpf_prog_test_run_opts"))
- goto cleanup;
-
- ASSERT_EQ(opts.retval & 0xffff, 33, "bpf_modify_return_test.result");
- ASSERT_EQ(opts.retval >> 16, 2, "bpf_modify_return_test.side_effect");
+ ASSERT_OK(trigger_module_test_read(256), "trigger_module_test_read");
cleanup:
for (; i >= 0; i--) {
diff --git a/tools/testing/selftests/bpf/prog_tests/usdt.c b/tools/testing/selftests/bpf/prog_tests/usdt.c
index f4be5269fa90..69759b27794d 100644
--- a/tools/testing/selftests/bpf/prog_tests/usdt.c
+++ b/tools/testing/selftests/bpf/prog_tests/usdt.c
@@ -247,6 +247,96 @@ cleanup:
#undef TRIGGER
}
+#ifdef __x86_64__
+extern void usdt_1(void);
+extern void usdt_2(void);
+
+static unsigned char nop1[1] = { 0x90 };
+static unsigned char nop1_nop5_combo[6] = { 0x90, 0x0f, 0x1f, 0x44, 0x00, 0x00 };
+
+static void *find_instr(void *fn, unsigned char *instr, size_t cnt)
+{
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ if (!memcmp(instr, fn + i, cnt))
+ return fn + i;
+ }
+ return NULL;
+}
+
+static void subtest_optimized_attach(void)
+{
+ struct test_usdt *skel;
+ __u8 *addr_1, *addr_2;
+
+ /* usdt_1 USDT probe has single nop instruction */
+ addr_1 = find_instr(usdt_1, nop1_nop5_combo, 6);
+ if (!ASSERT_NULL(addr_1, "usdt_1_find_nop1_nop5_combo"))
+ return;
+
+ addr_1 = find_instr(usdt_1, nop1, 1);
+ if (!ASSERT_OK_PTR(addr_1, "usdt_1_find_nop1"))
+ return;
+
+ /* usdt_2 USDT probe has nop,nop5 instructions combo */
+ addr_2 = find_instr(usdt_2, nop1_nop5_combo, 6);
+ if (!ASSERT_OK_PTR(addr_2, "usdt_2_find_nop1_nop5_combo"))
+ return;
+
+ skel = test_usdt__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_usdt__open_and_load"))
+ return;
+
+ skel->bss->expected_ip = (unsigned long) addr_1;
+
+ /*
+ * Attach program on top of usdt_1 which is single nop probe,
+ * so the probe won't get optimized.
+ */
+ skel->links.usdt_executed = bpf_program__attach_usdt(skel->progs.usdt_executed,
+ 0 /*self*/, "/proc/self/exe",
+ "optimized_attach", "usdt_1", NULL);
+ if (!ASSERT_OK_PTR(skel->links.usdt_executed, "bpf_program__attach_usdt"))
+ goto cleanup;
+
+ usdt_1();
+ usdt_1();
+
+ /* int3 is on addr_1 address */
+ ASSERT_EQ(*addr_1, 0xcc, "int3");
+ ASSERT_EQ(skel->bss->executed, 2, "executed");
+
+ bpf_link__destroy(skel->links.usdt_executed);
+
+ /* we expect the nop5 ip */
+ skel->bss->expected_ip = (unsigned long) addr_2 + 1;
+
+ /*
+ * Attach program on top of usdt_2 which is probe defined on top
+ * of nop1,nop5 combo, so the probe gets optimized on top of nop5.
+ */
+ skel->links.usdt_executed = bpf_program__attach_usdt(skel->progs.usdt_executed,
+ 0 /*self*/, "/proc/self/exe",
+ "optimized_attach", "usdt_2", NULL);
+ if (!ASSERT_OK_PTR(skel->links.usdt_executed, "bpf_program__attach_usdt"))
+ goto cleanup;
+
+ usdt_2();
+ usdt_2();
+
+ /* nop stays on addr_2 address */
+ ASSERT_EQ(*addr_2, 0x90, "nop");
+
+ /* call is on addr_2 + 1 address */
+ ASSERT_EQ(*(addr_2 + 1), 0xe8, "call");
+ ASSERT_EQ(skel->bss->executed, 4, "executed");
+
+cleanup:
+ test_usdt__destroy(skel);
+}
+#endif
+
unsigned short test_usdt_100_semaphore SEC(".probes");
unsigned short test_usdt_300_semaphore SEC(".probes");
unsigned short test_usdt_400_semaphore SEC(".probes");
@@ -516,6 +606,8 @@ void test_usdt(void)
#ifdef __x86_64__
if (test__start_subtest("basic_optimized"))
subtest_basic_usdt(true);
+ if (test__start_subtest("optimized_attach"))
+ subtest_optimized_attach();
#endif
if (test__start_subtest("multispec"))
subtest_multispec_usdt();
diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c
index 302286a80154..a96b25ebff23 100644
--- a/tools/testing/selftests/bpf/prog_tests/verifier.c
+++ b/tools/testing/selftests/bpf/prog_tests/verifier.c
@@ -3,6 +3,7 @@
#include <test_progs.h>
#include "cap_helpers.h"
+#include "verifier_align.skel.h"
#include "verifier_and.skel.h"
#include "verifier_arena.skel.h"
#include "verifier_arena_large.skel.h"
@@ -53,6 +54,7 @@
#include "verifier_leak_ptr.skel.h"
#include "verifier_linked_scalars.skel.h"
#include "verifier_live_stack.skel.h"
+#include "verifier_liveness_exp.skel.h"
#include "verifier_load_acquire.skel.h"
#include "verifier_loops1.skel.h"
#include "verifier_lwt.skel.h"
@@ -92,6 +94,7 @@
#include "verifier_stack_ptr.skel.h"
#include "verifier_store_release.skel.h"
#include "verifier_subprog_precision.skel.h"
+#include "verifier_subprog_topo.skel.h"
#include "verifier_subreg.skel.h"
#include "verifier_tailcall.skel.h"
#include "verifier_tailcall_jit.skel.h"
@@ -114,6 +117,7 @@
#include "verifier_lsm.skel.h"
#include "verifier_jit_inline.skel.h"
#include "irq.skel.h"
+#include "verifier_ctx_ptr_param.skel.h"
#define MAX_ENTRIES 11
@@ -149,6 +153,7 @@ static void run_tests_aux(const char *skel_name,
#define RUN(skel) run_tests_aux(#skel, skel##__elf_bytes, NULL)
+void test_verifier_align(void) { RUN(verifier_align); }
void test_verifier_and(void) { RUN(verifier_and); }
void test_verifier_arena(void) { RUN(verifier_arena); }
void test_verifier_arena_large(void) { RUN(verifier_arena_large); }
@@ -171,7 +176,7 @@ void test_verifier_cgroup_skb(void) { RUN(verifier_cgroup_skb); }
void test_verifier_cgroup_storage(void) { RUN(verifier_cgroup_storage); }
void test_verifier_const(void) { RUN(verifier_const); }
void test_verifier_const_or(void) { RUN(verifier_const_or); }
-void test_verifier_ctx(void) { RUN(verifier_ctx); }
+void test_verifier_ctx(void) { RUN_TESTS(verifier_ctx); }
void test_verifier_ctx_sk_msg(void) { RUN(verifier_ctx_sk_msg); }
void test_verifier_d_path(void) { RUN(verifier_d_path); }
void test_verifier_default_trusted_ptr(void) { RUN_TESTS(verifier_default_trusted_ptr); }
@@ -198,6 +203,7 @@ void test_verifier_ldsx(void) { RUN(verifier_ldsx); }
void test_verifier_leak_ptr(void) { RUN(verifier_leak_ptr); }
void test_verifier_linked_scalars(void) { RUN(verifier_linked_scalars); }
void test_verifier_live_stack(void) { RUN(verifier_live_stack); }
+void test_verifier_liveness_exp(void) { RUN(verifier_liveness_exp); }
void test_verifier_loops1(void) { RUN(verifier_loops1); }
void test_verifier_lwt(void) { RUN(verifier_lwt); }
void test_verifier_map_in_map(void) { RUN(verifier_map_in_map); }
@@ -235,6 +241,7 @@ void test_verifier_spin_lock(void) { RUN(verifier_spin_lock); }
void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); }
void test_verifier_store_release(void) { RUN(verifier_store_release); }
void test_verifier_subprog_precision(void) { RUN(verifier_subprog_precision); }
+void test_verifier_subprog_topo(void) { RUN(verifier_subprog_topo); }
void test_verifier_subreg(void) { RUN(verifier_subreg); }
void test_verifier_tailcall(void) { RUN(verifier_tailcall); }
void test_verifier_tailcall_jit(void) { RUN(verifier_tailcall_jit); }
@@ -257,6 +264,7 @@ void test_verifier_lsm(void) { RUN(verifier_lsm); }
void test_irq(void) { RUN(irq); }
void test_verifier_mtu(void) { RUN(verifier_mtu); }
void test_verifier_jit_inline(void) { RUN(verifier_jit_inline); }
+void test_verifier_ctx_ptr_param(void) { RUN(verifier_ctx_ptr_param); }
static int init_test_val_map(struct bpf_object *obj, char *map_name)
{
diff --git a/tools/testing/selftests/bpf/prog_tests/verifier_log.c b/tools/testing/selftests/bpf/prog_tests/verifier_log.c
index aaa2854974c0..c01c0114af1b 100644
--- a/tools/testing/selftests/bpf/prog_tests/verifier_log.c
+++ b/tools/testing/selftests/bpf/prog_tests/verifier_log.c
@@ -25,10 +25,10 @@ static bool check_prog_load(int prog_fd, bool expect_err, const char *tag)
static struct {
/* strategically placed before others to avoid accidental modification by kernel */
- char filler[1024];
- char buf[1024];
+ char filler[16384];
+ char buf[16384];
/* strategically placed after buf[] to catch more accidental corruptions */
- char reference[1024];
+ char reference[16384];
} logs;
static const struct bpf_insn *insns;
static size_t insn_cnt;
diff --git a/tools/testing/selftests/bpf/progs/bench_local_storage_create.c b/tools/testing/selftests/bpf/progs/bench_local_storage_create.c
index c8ec0d0368e4..25ca6045fea3 100644
--- a/tools/testing/selftests/bpf/progs/bench_local_storage_create.c
+++ b/tools/testing/selftests/bpf/progs/bench_local_storage_create.c
@@ -8,7 +8,6 @@
long create_errs = 0;
long create_cnts = 0;
-long kmalloc_cnts = 0;
__u32 bench_pid = 0;
struct storage {
@@ -29,16 +28,6 @@ struct {
__type(value, struct storage);
} task_storage_map SEC(".maps");
-SEC("raw_tp/kmalloc")
-int BPF_PROG(kmalloc, unsigned long call_site, const void *ptr,
- size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags,
- int node)
-{
- __sync_fetch_and_add(&kmalloc_cnts, 1);
-
- return 0;
-}
-
SEC("tp_btf/sched_process_fork")
int BPF_PROG(sched_process_fork, struct task_struct *parent, struct task_struct *child)
{
diff --git a/tools/testing/selftests/bpf/progs/bpf_gotox.c b/tools/testing/selftests/bpf/progs/bpf_gotox.c
index 216c71b94c64..99b3c9c9a01c 100644
--- a/tools/testing/selftests/bpf/progs/bpf_gotox.c
+++ b/tools/testing/selftests/bpf/progs/bpf_gotox.c
@@ -421,6 +421,36 @@ int use_nonstatic_global_other_sec(void *ctx)
return __nonstatic_global(in_user);
}
+SEC("syscall")
+int load_with_nonzero_offset(struct simple_ctx *ctx)
+{
+ void *jj[] = { &&l1, &&l2, &&l3 };
+
+ /*
+ * This makes LLVM to generate a load from the jj map with an offset:
+ * r1 = 0x0 ll
+ * r1 = *(u64 *)(r1 + 0x10)
+ * gotox r1
+ */
+ if (ctx->x == 2)
+ goto *jj[ctx->x];
+
+ ret_user = 1;
+ return 1;
+
+l1:
+ /* never reached, but leave it here to outsmart LLVM */
+ ret_user = 0;
+ return 0;
+l2:
+ /* never reached, but leave it here to outsmart LLVM */
+ ret_user = 3;
+ return 3;
+l3:
+ ret_user = 5;
+ return 5;
+}
+
#else /* __BPF_FEATURE_GOTOX */
#define SKIP_TEST(TEST_NAME) \
@@ -442,6 +472,7 @@ SKIP_TEST(use_static_global_other_sec);
SKIP_TEST(use_nonstatic_global1);
SKIP_TEST(use_nonstatic_global2);
SKIP_TEST(use_nonstatic_global_other_sec);
+SKIP_TEST(load_with_nonzero_offset);
#endif /* __BPF_FEATURE_GOTOX */
diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h
index c9bfbe1bafc1..dcd78a3a9052 100644
--- a/tools/testing/selftests/bpf/progs/bpf_misc.h
+++ b/tools/testing/selftests/bpf/progs/bpf_misc.h
@@ -103,8 +103,8 @@
* - TEST_DATA_LEN
* __retval_unpriv Same, but load program in unprivileged mode.
*
- * __description Text to be used instead of a program name for display
- * and filtering purposes.
+ * __description Text to be used for display and as an additional filter
+ * alias, while the original program name stays matchable.
*
* __log_level Log level to use for the program, numeric value expected.
*
@@ -130,39 +130,41 @@
* __linear_size Specify the size of the linear area of non-linear skbs, or
* 0 for linear skbs.
*/
-#define __msg(msg) __attribute__((btf_decl_tag("comment:test_expect_msg=" XSTR(__COUNTER__) "=" msg)))
-#define __not_msg(msg) __attribute__((btf_decl_tag("comment:test_expect_not_msg=" XSTR(__COUNTER__) "=" msg)))
-#define __xlated(msg) __attribute__((btf_decl_tag("comment:test_expect_xlated=" XSTR(__COUNTER__) "=" msg)))
-#define __jited(msg) __attribute__((btf_decl_tag("comment:test_jited=" XSTR(__COUNTER__) "=" msg)))
-#define __failure __attribute__((btf_decl_tag("comment:test_expect_failure")))
-#define __success __attribute__((btf_decl_tag("comment:test_expect_success")))
-#define __description(desc) __attribute__((btf_decl_tag("comment:test_description=" desc)))
-#define __msg_unpriv(msg) __attribute__((btf_decl_tag("comment:test_expect_msg_unpriv=" XSTR(__COUNTER__) "=" msg)))
-#define __not_msg_unpriv(msg) __attribute__((btf_decl_tag("comment:test_expect_not_msg_unpriv=" XSTR(__COUNTER__) "=" msg)))
-#define __xlated_unpriv(msg) __attribute__((btf_decl_tag("comment:test_expect_xlated_unpriv=" XSTR(__COUNTER__) "=" msg)))
-#define __jited_unpriv(msg) __attribute__((btf_decl_tag("comment:test_jited=" XSTR(__COUNTER__) "=" msg)))
-#define __failure_unpriv __attribute__((btf_decl_tag("comment:test_expect_failure_unpriv")))
-#define __success_unpriv __attribute__((btf_decl_tag("comment:test_expect_success_unpriv")))
-#define __log_level(lvl) __attribute__((btf_decl_tag("comment:test_log_level="#lvl)))
-#define __flag(flag) __attribute__((btf_decl_tag("comment:test_prog_flags="#flag)))
-#define __retval(val) __attribute__((btf_decl_tag("comment:test_retval="XSTR(val))))
-#define __retval_unpriv(val) __attribute__((btf_decl_tag("comment:test_retval_unpriv="XSTR(val))))
-#define __auxiliary __attribute__((btf_decl_tag("comment:test_auxiliary")))
-#define __auxiliary_unpriv __attribute__((btf_decl_tag("comment:test_auxiliary_unpriv")))
-#define __btf_path(path) __attribute__((btf_decl_tag("comment:test_btf_path=" path)))
-#define __arch(arch) __attribute__((btf_decl_tag("comment:test_arch=" arch)))
+#define __test_tag(tag) __attribute__((btf_decl_tag("comment:" XSTR(__COUNTER__) ":" tag)))
+
+#define __msg(msg) __test_tag("test_expect_msg=" msg)
+#define __not_msg(msg) __test_tag("test_expect_not_msg=" msg)
+#define __xlated(msg) __test_tag("test_expect_xlated=" msg)
+#define __jited(msg) __test_tag("test_jited=" msg)
+#define __failure __test_tag("test_expect_failure")
+#define __success __test_tag("test_expect_success")
+#define __description(desc) __test_tag("test_description=" desc)
+#define __msg_unpriv(msg) __test_tag("test_expect_msg_unpriv=" msg)
+#define __not_msg_unpriv(msg) __test_tag("test_expect_not_msg_unpriv=" msg)
+#define __xlated_unpriv(msg) __test_tag("test_expect_xlated_unpriv=" msg)
+#define __jited_unpriv(msg) __test_tag("test_jited_unpriv=" msg)
+#define __failure_unpriv __test_tag("test_expect_failure_unpriv")
+#define __success_unpriv __test_tag("test_expect_success_unpriv")
+#define __log_level(lvl) __test_tag("test_log_level=" #lvl)
+#define __flag(flag) __test_tag("test_prog_flags=" #flag)
+#define __retval(val) __test_tag("test_retval=" XSTR(val))
+#define __retval_unpriv(val) __test_tag("test_retval_unpriv=" XSTR(val))
+#define __auxiliary __test_tag("test_auxiliary")
+#define __auxiliary_unpriv __test_tag("test_auxiliary_unpriv")
+#define __btf_path(path) __test_tag("test_btf_path=" path)
+#define __arch(arch) __test_tag("test_arch=" arch)
#define __arch_x86_64 __arch("X86_64")
#define __arch_arm64 __arch("ARM64")
#define __arch_riscv64 __arch("RISCV64")
#define __arch_s390x __arch("s390x")
-#define __caps_unpriv(caps) __attribute__((btf_decl_tag("comment:test_caps_unpriv=" EXPAND_QUOTE(caps))))
-#define __load_if_JITed() __attribute__((btf_decl_tag("comment:load_mode=jited")))
-#define __load_if_no_JITed() __attribute__((btf_decl_tag("comment:load_mode=no_jited")))
-#define __stderr(msg) __attribute__((btf_decl_tag("comment:test_expect_stderr=" XSTR(__COUNTER__) "=" msg)))
-#define __stderr_unpriv(msg) __attribute__((btf_decl_tag("comment:test_expect_stderr_unpriv=" XSTR(__COUNTER__) "=" msg)))
-#define __stdout(msg) __attribute__((btf_decl_tag("comment:test_expect_stdout=" XSTR(__COUNTER__) "=" msg)))
-#define __stdout_unpriv(msg) __attribute__((btf_decl_tag("comment:test_expect_stdout_unpriv=" XSTR(__COUNTER__) "=" msg)))
-#define __linear_size(sz) __attribute__((btf_decl_tag("comment:test_linear_size=" XSTR(sz))))
+#define __caps_unpriv(caps) __test_tag("test_caps_unpriv=" EXPAND_QUOTE(caps))
+#define __load_if_JITed() __test_tag("load_mode=jited")
+#define __load_if_no_JITed() __test_tag("load_mode=no_jited")
+#define __stderr(msg) __test_tag("test_expect_stderr=" msg)
+#define __stderr_unpriv(msg) __test_tag("test_expect_stderr_unpriv=" msg)
+#define __stdout(msg) __test_tag("test_expect_stdout=" msg)
+#define __stdout_unpriv(msg) __test_tag("test_expect_stdout_unpriv=" msg)
+#define __linear_size(sz) __test_tag("test_linear_size=" XSTR(sz))
/* Define common capabilities tested using __caps_unpriv */
#define CAP_NET_ADMIN 12
@@ -188,6 +190,10 @@
#define POINTER_VALUE 0xbadcafe
#define TEST_DATA_LEN 64
+#ifndef __aligned
+#define __aligned(x) __attribute__((aligned(x)))
+#endif
+
#ifndef __used
#define __used __attribute__((used))
#endif
diff --git a/tools/testing/selftests/bpf/progs/bpf_smc.c b/tools/testing/selftests/bpf/progs/bpf_smc.c
index 70d8b08f5914..6263a45bf006 100644
--- a/tools/testing/selftests/bpf/progs/bpf_smc.c
+++ b/tools/testing/selftests/bpf/progs/bpf_smc.c
@@ -8,6 +8,10 @@
char _license[] SEC("license") = "GPL";
+#ifndef SMC_HS_CTRL_NAME_MAX
+#define SMC_HS_CTRL_NAME_MAX 16
+#endif
+
enum {
BPF_SMC_LISTEN = 10,
};
@@ -18,6 +22,20 @@ struct smc_sock___local {
bool use_fallback;
} __attribute__((preserve_access_index));
+struct smc_hs_ctrl___local {
+ char name[SMC_HS_CTRL_NAME_MAX];
+ int (*syn_option)(struct tcp_sock *);
+ int (*synack_option)(const struct tcp_sock *, struct inet_request_sock *);
+} __attribute__((preserve_access_index));
+
+struct netns_smc___local {
+ struct smc_hs_ctrl___local *hs_ctrl;
+} __attribute__((preserve_access_index));
+
+struct net___local {
+ struct netns_smc___local smc;
+} __attribute__((preserve_access_index));
+
int smc_cnt = 0;
int fallback_cnt = 0;
@@ -88,8 +106,14 @@ int BPF_PROG(smc_run, int family, int type, int protocol)
task = bpf_get_current_task_btf();
/* Prevent from affecting other tests */
- if (!task || !task->nsproxy->net_ns->smc.hs_ctrl)
+ if (!task) {
return protocol;
+ } else {
+ struct net___local *net = (struct net___local *)task->nsproxy->net_ns;
+
+ if (!bpf_core_field_exists(struct net___local, smc) || !net->smc.hs_ctrl)
+ return protocol;
+ }
return IPPROTO_SMC;
}
@@ -110,7 +134,7 @@ int BPF_PROG(bpf_smc_set_tcp_option, struct tcp_sock *tp)
}
SEC(".struct_ops")
-struct smc_hs_ctrl linkcheck = {
+struct smc_hs_ctrl___local linkcheck = {
.name = "linkcheck",
.syn_option = (void *)bpf_smc_set_tcp_option,
.synack_option = (void *)bpf_smc_set_tcp_option_cond,
diff --git a/tools/testing/selftests/bpf/progs/cgroup_iter_memcg.c b/tools/testing/selftests/bpf/progs/cgroup_iter_memcg.c
index 59fb70a3cc50..06a385c9d85b 100644
--- a/tools/testing/selftests/bpf/progs/cgroup_iter_memcg.c
+++ b/tools/testing/selftests/bpf/progs/cgroup_iter_memcg.c
@@ -26,12 +26,18 @@ int cgroup_memcg_query(struct bpf_iter__cgroup *ctx)
bpf_mem_cgroup_flush_stats(memcg);
- memcg_query.nr_anon_mapped = bpf_mem_cgroup_page_state(memcg, NR_ANON_MAPPED);
- memcg_query.nr_shmem = bpf_mem_cgroup_page_state(memcg, NR_SHMEM);
- memcg_query.nr_file_pages = bpf_mem_cgroup_page_state(memcg, NR_FILE_PAGES);
- memcg_query.nr_file_mapped = bpf_mem_cgroup_page_state(memcg, NR_FILE_MAPPED);
- memcg_query.memcg_kmem = bpf_mem_cgroup_page_state(memcg, MEMCG_KMEM);
- memcg_query.pgfault = bpf_mem_cgroup_vm_events(memcg, PGFAULT);
+ memcg_query.nr_anon_mapped = bpf_mem_cgroup_page_state(
+ memcg,
+ bpf_core_enum_value(enum node_stat_item, NR_ANON_MAPPED));
+ memcg_query.nr_shmem = bpf_mem_cgroup_page_state(
+ memcg, bpf_core_enum_value(enum node_stat_item, NR_SHMEM));
+ memcg_query.nr_file_pages = bpf_mem_cgroup_page_state(
+ memcg, bpf_core_enum_value(enum node_stat_item, NR_FILE_PAGES));
+ memcg_query.nr_file_mapped = bpf_mem_cgroup_page_state(
+ memcg,
+ bpf_core_enum_value(enum node_stat_item, NR_FILE_MAPPED));
+ memcg_query.pgfault = bpf_mem_cgroup_vm_events(
+ memcg, bpf_core_enum_value(enum vm_event_item, PGFAULT));
bpf_put_mem_cgroup(memcg);
diff --git a/tools/testing/selftests/bpf/progs/cgroup_storage.c b/tools/testing/selftests/bpf/progs/cgroup_storage.c
index db1e4d2d3281..59da1d95e5b9 100644
--- a/tools/testing/selftests/bpf/progs/cgroup_storage.c
+++ b/tools/testing/selftests/bpf/progs/cgroup_storage.c
@@ -21,4 +21,47 @@ int bpf_prog(struct __sk_buff *skb)
return (*counter & 1);
}
+/* Maps for OOB test */
+struct {
+ __uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
+ __type(key, struct bpf_cgroup_storage_key);
+ __type(value, __u32); /* 4-byte value - not 8-byte aligned */
+} cgroup_storage_oob SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u32); /* 4-byte value - same as cgroup storage */
+} lru_map SEC(".maps");
+
+SEC("cgroup/sock_create")
+int trigger_oob(struct bpf_sock *sk)
+{
+ __u32 key = 0;
+ __u32 *cgroup_val;
+ __u32 value = 0x12345678;
+
+ /* Get cgroup storage value */
+ cgroup_val = bpf_get_local_storage(&cgroup_storage_oob, 0);
+ if (!cgroup_val)
+ return 0;
+
+ /* Initialize cgroup storage */
+ *cgroup_val = value;
+
+ /* This triggers the OOB read:
+ * bpf_map_update_elem() -> htab_map_update_elem() ->
+ * pcpu_init_value() -> copy_map_value_long() ->
+ * bpf_obj_memcpy(..., long_memcpy=true) ->
+ * bpf_long_memcpy(dst, src, round_up(4, 8))
+ *
+ * The copy size is rounded up to 8 bytes, but cgroup_val
+ * points to a 4-byte buffer, causing a 4-byte OOB read.
+ */
+ bpf_map_update_elem(&lru_map, &key, cgroup_val, BPF_ANY);
+
+ return 1;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/clone_attach_btf_id.c b/tools/testing/selftests/bpf/progs/clone_attach_btf_id.c
new file mode 100644
index 000000000000..0ffa3ec3e1a0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/clone_attach_btf_id.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta */
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(fentry_handler, int a)
+{
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/connect_force_port4.c b/tools/testing/selftests/bpf/progs/connect_force_port4.c
index 27a632dd382e..d5be6a559d6a 100644
--- a/tools/testing/selftests/bpf/progs/connect_force_port4.c
+++ b/tools/testing/selftests/bpf/progs/connect_force_port4.c
@@ -14,6 +14,8 @@
char _license[] SEC("license") = "GPL";
+__u16 port = 0;
+
struct svc_addr {
__be32 addr;
__be16 port;
@@ -40,7 +42,7 @@ int connect4(struct bpf_sock_addr *ctx)
if (bpf_bind(ctx, (struct sockaddr *)&sa, sizeof(sa)) != 0)
return 0;
- /* Rewire service 1.2.3.4:60000 to backend 127.0.0.1:60123. */
+ /* Rewire service 1.2.3.4:60000 to backend 127.0.0.1:port. */
if (ctx->user_port == bpf_htons(60000)) {
orig = bpf_sk_storage_get(&service_mapping, ctx->sk, 0,
BPF_SK_STORAGE_GET_F_CREATE);
@@ -51,7 +53,7 @@ int connect4(struct bpf_sock_addr *ctx)
orig->port = ctx->user_port;
ctx->user_ip4 = bpf_htonl(0x7f000001);
- ctx->user_port = bpf_htons(60123);
+ ctx->user_port = bpf_htons(port);
}
return 1;
}
@@ -63,7 +65,7 @@ int getsockname4(struct bpf_sock_addr *ctx)
return 1;
/* Expose local server as 1.2.3.4:60000 to client. */
- if (ctx->user_port == bpf_htons(60123)) {
+ if (ctx->user_port == bpf_htons(port)) {
ctx->user_ip4 = bpf_htonl(0x01020304);
ctx->user_port = bpf_htons(60000);
}
@@ -79,7 +81,7 @@ int getpeername4(struct bpf_sock_addr *ctx)
return 1;
/* Expose service 1.2.3.4:60000 as peer instead of backend. */
- if (ctx->user_port == bpf_htons(60123)) {
+ if (ctx->user_port == bpf_htons(port)) {
orig = bpf_sk_storage_get(&service_mapping, ctx->sk, 0, 0);
if (orig) {
ctx->user_ip4 = orig->addr;
diff --git a/tools/testing/selftests/bpf/progs/connect_force_port6.c b/tools/testing/selftests/bpf/progs/connect_force_port6.c
index 19cad93e612f..a1a671b39083 100644
--- a/tools/testing/selftests/bpf/progs/connect_force_port6.c
+++ b/tools/testing/selftests/bpf/progs/connect_force_port6.c
@@ -13,6 +13,8 @@
char _license[] SEC("license") = "GPL";
+__u16 port = 0;
+
struct svc_addr {
__be32 addr[4];
__be16 port;
@@ -39,7 +41,7 @@ int connect6(struct bpf_sock_addr *ctx)
if (bpf_bind(ctx, (struct sockaddr *)&sa, sizeof(sa)) != 0)
return 0;
- /* Rewire service [fc00::1]:60000 to backend [::1]:60124. */
+ /* Rewire service [fc00::1]:60000 to backend [::1]:port. */
if (ctx->user_port == bpf_htons(60000)) {
orig = bpf_sk_storage_get(&service_mapping, ctx->sk, 0,
BPF_SK_STORAGE_GET_F_CREATE);
@@ -56,7 +58,7 @@ int connect6(struct bpf_sock_addr *ctx)
ctx->user_ip6[1] = 0;
ctx->user_ip6[2] = 0;
ctx->user_ip6[3] = bpf_htonl(1);
- ctx->user_port = bpf_htons(60124);
+ ctx->user_port = bpf_htons(port);
}
return 1;
}
@@ -68,7 +70,7 @@ int getsockname6(struct bpf_sock_addr *ctx)
return 1;
/* Expose local server as [fc00::1]:60000 to client. */
- if (ctx->user_port == bpf_htons(60124)) {
+ if (ctx->user_port == bpf_htons(port)) {
ctx->user_ip6[0] = bpf_htonl(0xfc000000);
ctx->user_ip6[1] = 0;
ctx->user_ip6[2] = 0;
@@ -87,7 +89,7 @@ int getpeername6(struct bpf_sock_addr *ctx)
return 1;
/* Expose service [fc00::1]:60000 as peer instead of backend. */
- if (ctx->user_port == bpf_htons(60124)) {
+ if (ctx->user_port == bpf_htons(port)) {
orig = bpf_sk_storage_get(&service_mapping, ctx->sk, 0, 0);
if (orig) {
ctx->user_ip6[0] = orig->addr[0];
diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c
index 8f2ae9640886..b62773ce5219 100644
--- a/tools/testing/selftests/bpf/progs/dynptr_fail.c
+++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c
@@ -1993,3 +1993,118 @@ int test_dynptr_reg_type(void *ctx)
global_call_bpf_dynptr((const struct bpf_dynptr *)current);
return 0;
}
+
+/* Overwriting a referenced dynptr is allowed if a clone still holds the ref */
+SEC("?raw_tp")
+__success
+int dynptr_overwrite_ref_with_clone(void *ctx)
+{
+ struct bpf_dynptr ptr, clone;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr);
+
+ bpf_dynptr_clone(&ptr, &clone);
+
+ /* Overwrite the original - clone still holds the ref */
+ *(volatile __u8 *)&ptr = 0;
+
+ bpf_ringbuf_discard_dynptr(&clone, 0);
+
+ return 0;
+}
+
+/* Overwriting the last referenced dynptr should still be rejected */
+SEC("?raw_tp")
+__failure __msg("cannot overwrite referenced dynptr")
+int dynptr_overwrite_ref_last_clone(void *ctx)
+{
+ struct bpf_dynptr ptr, clone;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr);
+
+ bpf_dynptr_clone(&ptr, &clone);
+
+ /* Overwrite the original - clone still holds the ref, OK */
+ *(volatile __u8 *)&ptr = 0;
+
+ /* Overwrite the last holder - this should fail */
+ *(volatile __u8 *)&clone = 0;
+
+ return 0;
+}
+
+/* Overwriting a clone should be allowed if the original still holds the ref */
+SEC("?raw_tp")
+__success
+int dynptr_overwrite_clone_with_original(void *ctx)
+{
+ struct bpf_dynptr ptr, clone;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr);
+
+ bpf_dynptr_clone(&ptr, &clone);
+
+ /* Overwrite the clone - original still holds the ref */
+ *(volatile __u8 *)&clone = 0;
+
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+
+ return 0;
+}
+
+/* Data slices from the destroyed dynptr should be invalidated */
+SEC("?raw_tp")
+__failure __msg("invalid mem access 'scalar'")
+int dynptr_overwrite_ref_invalidate_slice(void *ctx)
+{
+ struct bpf_dynptr ptr, clone;
+ int *data;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
+
+ data = bpf_dynptr_data(&ptr, 0, sizeof(val));
+ if (!data)
+ return 0;
+
+ bpf_dynptr_clone(&ptr, &clone);
+
+ /* Overwrite the original - clone holds the ref */
+ *(volatile __u8 *)&ptr = 0;
+
+ /* data was from the original dynptr, should be invalid now */
+ *data = 123;
+
+ return 0;
+}
+
+/*
+ * Data slices from a dynptr clone should remain valid after
+ * overwriting the original dynptr
+ */
+SEC("?raw_tp")
+__success
+int dynptr_overwrite_ref_clone_slice_valid(void *ctx)
+{
+ struct bpf_dynptr ptr, clone;
+ int *data;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
+
+ bpf_dynptr_clone(&ptr, &clone);
+
+ data = bpf_dynptr_data(&clone, 0, sizeof(val));
+ if (!data) {
+ bpf_ringbuf_discard_dynptr(&clone, 0);
+ return 0;
+ }
+
+ /* Overwrite the original - clone holds the ref */
+ *(volatile __u8 *)&ptr = 0;
+
+ /* data is from the clone, should still be valid */
+ *data = 123;
+
+ bpf_ringbuf_discard_dynptr(&clone, 0);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/empty_skb.c b/tools/testing/selftests/bpf/progs/empty_skb.c
index 4b0cd6753251..44326f5cc8bb 100644
--- a/tools/testing/selftests/bpf/progs/empty_skb.c
+++ b/tools/testing/selftests/bpf/progs/empty_skb.c
@@ -35,3 +35,10 @@ int tc_redirect_egress(struct __sk_buff *skb)
ret = bpf_clone_redirect(skb, ifindex, 0);
return 0;
}
+
+SEC("tc")
+int tc_adjust_room(struct __sk_buff *skb)
+{
+ ret = bpf_skb_adjust_room(skb, 4, BPF_ADJ_ROOM_NET, 0);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/exceptions.c b/tools/testing/selftests/bpf/progs/exceptions.c
index f09cd14d8e04..4206f59d7b86 100644
--- a/tools/testing/selftests/bpf/progs/exceptions.c
+++ b/tools/testing/selftests/bpf/progs/exceptions.c
@@ -109,6 +109,20 @@ int exception_tail_call(struct __sk_buff *ctx) {
return ret + 8;
}
+__weak
+void throw_11(void)
+{
+ bpf_throw(11);
+}
+
+SEC("tc")
+int exception_throw_from_void_global(struct __sk_buff *ctx)
+{
+ throw_11();
+
+ return 0;
+}
+
__noinline int exception_ext_global(struct __sk_buff *ctx)
{
volatile int ret = 0;
diff --git a/tools/testing/selftests/bpf/progs/exceptions_assert.c b/tools/testing/selftests/bpf/progs/exceptions_assert.c
index 858af5988a38..e4abf4172fca 100644
--- a/tools/testing/selftests/bpf/progs/exceptions_assert.c
+++ b/tools/testing/selftests/bpf/progs/exceptions_assert.c
@@ -59,7 +59,7 @@ check_assert(s64, >=, ge_neg, INT_MIN);
SEC("?tc")
__log_level(2) __failure
-__msg(": R0=0 R1=ctx() R2=scalar(smin=0xffffffff80000002,smax=smax32=0x7ffffffd,smin32=0x80000002) R10=fp0")
+__msg(": R1=ctx() R2=scalar(smin=0xffffffff80000002,smax=smax32=0x7ffffffd,smin32=0x80000002) R10=fp0")
int check_assert_range_s64(struct __sk_buff *ctx)
{
struct bpf_sock *sk = ctx->sk;
@@ -86,7 +86,7 @@ int check_assert_range_u64(struct __sk_buff *ctx)
SEC("?tc")
__log_level(2) __failure
-__msg(": R0=0 R1=ctx() R2=4096 R10=fp0")
+__msg(": R1=ctx() R2=4096 R10=fp0")
int check_assert_single_range_s64(struct __sk_buff *ctx)
{
struct bpf_sock *sk = ctx->sk;
@@ -114,7 +114,7 @@ int check_assert_single_range_u64(struct __sk_buff *ctx)
SEC("?tc")
__log_level(2) __failure
-__msg(": R1=pkt(off=64,r=64) R2=pkt_end() R6=pkt(r=64) R10=fp0")
+__msg(": R6=pkt(r=64) R10=fp0")
int check_assert_generic(struct __sk_buff *ctx)
{
u8 *data_end = (void *)(long)ctx->data_end;
diff --git a/tools/testing/selftests/bpf/progs/exceptions_fail.c b/tools/testing/selftests/bpf/progs/exceptions_fail.c
index 9ea1353488d7..051e2b6f2694 100644
--- a/tools/testing/selftests/bpf/progs/exceptions_fail.c
+++ b/tools/testing/selftests/bpf/progs/exceptions_fail.c
@@ -34,11 +34,15 @@ struct {
private(A) struct bpf_spin_lock lock;
private(A) struct bpf_rb_root rbtree __contains(foo, node);
-__noinline void *exception_cb_bad_ret_type(u64 cookie)
+__noinline void *exception_cb_bad_ret_type1(u64 cookie)
{
return NULL;
}
+__noinline void exception_cb_bad_ret_type2(u64 cookie)
+{
+}
+
__noinline int exception_cb_bad_arg_0(void)
{
return 0;
@@ -55,8 +59,8 @@ __noinline int exception_cb_ok_arg_small(int a)
}
SEC("?tc")
-__exception_cb(exception_cb_bad_ret_type)
-__failure __msg("Global function exception_cb_bad_ret_type() doesn't return scalar.")
+__exception_cb(exception_cb_bad_ret_type1)
+__failure __msg("Global function exception_cb_bad_ret_type1() return value not void or scalar.")
int reject_exception_cb_type_1(struct __sk_buff *ctx)
{
bpf_throw(0);
@@ -90,6 +94,15 @@ int reject_exception_cb_type_4(struct __sk_buff *ctx)
return 0;
}
+SEC("?tc")
+__exception_cb(exception_cb_bad_ret_type2)
+__failure __msg("exception cb cannot return void")
+int reject_exception_cb_type_5(struct __sk_buff *ctx)
+{
+ bpf_throw(0);
+ return 0;
+}
+
__noinline
static int timer_cb(void *map, int *key, struct bpf_timer *timer)
{
@@ -353,6 +366,21 @@ int reject_exception_throw_cb_diff(struct __sk_buff *ctx)
return 0;
}
+__weak
+void foo(void)
+{
+ bpf_throw(1);
+}
+
+SEC("?fentry/bpf_check")
+__failure __msg("At program exit the register R1 has smin=1 smax=1 should")
+int reject_out_of_range_global_throw(struct __sk_buff *skb)
+{
+ foo();
+
+ return 0;
+}
+
__noinline static int always_throws(void)
{
bpf_throw(0);
diff --git a/tools/testing/selftests/bpf/progs/freplace_int_with_void.c b/tools/testing/selftests/bpf/progs/freplace_int_with_void.c
new file mode 100644
index 000000000000..cbb8f8ff2581
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/freplace_int_with_void.c
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <linux/pkt_cls.h>
+#include <bpf/bpf_helpers.h>
+
+SEC("freplace/global_func2")
+void test_freplace_int_with_void(struct __sk_buff *skb)
+{
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/freplace_void.c b/tools/testing/selftests/bpf/progs/freplace_void.c
new file mode 100644
index 000000000000..68b114f477fe
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/freplace_void.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+SEC("freplace/foo")
+void test_freplace_void(struct __sk_buff *skb)
+{
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/get_func_args_fsession_test.c b/tools/testing/selftests/bpf/progs/get_func_args_fsession_test.c
new file mode 100644
index 000000000000..bb597f24b659
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/get_func_args_fsession_test.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <errno.h>
+
+char _license[] SEC("license") = "GPL";
+
+__u64 test1_result = 0;
+
+SEC("fsession/bpf_fentry_test1")
+int BPF_PROG(test1)
+{
+ __u64 cnt = bpf_get_func_arg_cnt(ctx);
+ __u64 a = 0, z = 0, ret = 0;
+ __s64 err;
+
+ test1_result = cnt == 1;
+
+ /* valid arguments */
+ err = bpf_get_func_arg(ctx, 0, &a);
+ test1_result &= err == 0 && ((int) a == 1);
+
+ /* not valid argument */
+ err = bpf_get_func_arg(ctx, 1, &z);
+ test1_result &= err == -EINVAL;
+
+ if (bpf_session_is_return(ctx)) {
+ err = bpf_get_func_ret(ctx, &ret);
+ test1_result &= err == 0 && ret == 2;
+ } else {
+ err = bpf_get_func_ret(ctx, &ret);
+ test1_result &= err == 0 && ret == 0;
+ }
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/get_func_args_test.c b/tools/testing/selftests/bpf/progs/get_func_args_test.c
index 075a1180ec26..1bf47f64d096 100644
--- a/tools/testing/selftests/bpf/progs/get_func_args_test.c
+++ b/tools/testing/selftests/bpf/progs/get_func_args_test.c
@@ -165,41 +165,3 @@ int BPF_PROG(tp_test2)
return 0;
}
-
-__u64 test7_result = 0;
-#if defined(bpf_target_x86) || defined(bpf_target_arm64) || defined(bpf_target_riscv)
-SEC("fsession/bpf_fentry_test1")
-int BPF_PROG(test7)
-{
- __u64 cnt = bpf_get_func_arg_cnt(ctx);
- __u64 a = 0, z = 0, ret = 0;
- __s64 err;
-
- test7_result = cnt == 1;
-
- /* valid arguments */
- err = bpf_get_func_arg(ctx, 0, &a);
- test7_result &= err == 0 && ((int) a == 1);
-
- /* not valid argument */
- err = bpf_get_func_arg(ctx, 1, &z);
- test7_result &= err == -EINVAL;
-
- if (bpf_session_is_return(ctx)) {
- err = bpf_get_func_ret(ctx, &ret);
- test7_result &= err == 0 && ret == 2;
- } else {
- err = bpf_get_func_ret(ctx, &ret);
- test7_result &= err == 0 && ret == 0;
- }
-
- return 0;
-}
-#else
-SEC("fentry/bpf_fentry_test1")
-int BPF_PROG(test7)
-{
- test7_result = 1;
- return 0;
-}
-#endif
diff --git a/tools/testing/selftests/bpf/progs/get_func_ip_fsession_test.c b/tools/testing/selftests/bpf/progs/get_func_ip_fsession_test.c
new file mode 100644
index 000000000000..bbeea0d512e3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/get_func_ip_fsession_test.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+__u64 test1_entry_result = 0;
+__u64 test1_exit_result = 0;
+
+SEC("fsession/bpf_fentry_test1")
+int BPF_PROG(test1, int a)
+{
+ __u64 addr = bpf_get_func_ip(ctx);
+
+ if (bpf_session_is_return(ctx))
+ test1_exit_result = (const void *) addr == &bpf_fentry_test1;
+ else
+ test1_entry_result = (const void *) addr == &bpf_fentry_test1;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/get_func_ip_test.c b/tools/testing/selftests/bpf/progs/get_func_ip_test.c
index 45eaa54d1ac7..2011cacdeb18 100644
--- a/tools/testing/selftests/bpf/progs/get_func_ip_test.c
+++ b/tools/testing/selftests/bpf/progs/get_func_ip_test.c
@@ -103,26 +103,3 @@ int BPF_URETPROBE(test8, int ret)
test8_result = (const void *) addr == (const void *) uprobe_trigger;
return 0;
}
-
-__u64 test9_entry_result = 0;
-__u64 test9_exit_result = 0;
-#if defined(bpf_target_x86) || defined(bpf_target_arm64) || defined(bpf_target_riscv)
-SEC("fsession/bpf_fentry_test1")
-int BPF_PROG(test9, int a)
-{
- __u64 addr = bpf_get_func_ip(ctx);
-
- if (bpf_session_is_return(ctx))
- test9_exit_result = (const void *) addr == &bpf_fentry_test1;
- else
- test9_entry_result = (const void *) addr == &bpf_fentry_test1;
- return 0;
-}
-#else
-SEC("fentry/bpf_fentry_test1")
-int BPF_PROG(test9, int a)
-{
- test9_entry_result = test9_exit_result = 1;
- return 0;
-}
-#endif
diff --git a/tools/testing/selftests/bpf/progs/htab_reuse.c b/tools/testing/selftests/bpf/progs/htab_reuse.c
index 7f7368cb3095..1c7fa7ee45ee 100644
--- a/tools/testing/selftests/bpf/progs/htab_reuse.c
+++ b/tools/testing/selftests/bpf/progs/htab_reuse.c
@@ -17,3 +17,19 @@ struct {
__type(value, struct htab_val);
__uint(map_flags, BPF_F_NO_PREALLOC);
} htab SEC(".maps");
+
+#define HTAB_NDATA 256
+
+struct htab_val_large {
+ struct bpf_spin_lock lock;
+ __u32 seq;
+ __u64 data[HTAB_NDATA];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 8);
+ __type(key, unsigned int);
+ __type(value, struct htab_val_large);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+} htab_lock_consistency SEC(".maps");
diff --git a/tools/testing/selftests/bpf/progs/irq.c b/tools/testing/selftests/bpf/progs/irq.c
index 74d912b22de9..e11e82d98904 100644
--- a/tools/testing/selftests/bpf/progs/irq.c
+++ b/tools/testing/selftests/bpf/progs/irq.c
@@ -490,7 +490,7 @@ int irq_non_sleepable_global_subprog(void *ctx)
}
SEC("?syscall")
-__failure __msg("global functions that may sleep are not allowed in non-sleepable context")
+__failure __msg("sleepable global function")
int irq_sleepable_helper_global_subprog(void *ctx)
{
unsigned long flags;
@@ -502,7 +502,7 @@ int irq_sleepable_helper_global_subprog(void *ctx)
}
SEC("?syscall")
-__failure __msg("global functions that may sleep are not allowed in non-sleepable context")
+__failure __msg("sleepable global function")
int irq_sleepable_global_subprog_indirect(void *ctx)
{
unsigned long flags;
diff --git a/tools/testing/selftests/bpf/progs/iter_buf_null_fail.c b/tools/testing/selftests/bpf/progs/iter_buf_null_fail.c
new file mode 100644
index 000000000000..3daad40515e6
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/iter_buf_null_fail.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2026 Qi Tang */
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+/* Verify that the verifier rejects direct access to nullable PTR_TO_BUF. */
+SEC("iter/bpf_map_elem")
+__failure __msg("invalid mem access")
+int iter_buf_null_deref(struct bpf_iter__bpf_map_elem *ctx)
+{
+ /*
+ * ctx->key is PTR_TO_BUF | PTR_MAYBE_NULL | MEM_RDONLY.
+ * Direct access without null check must be rejected.
+ */
+ volatile __u32 v = *(__u32 *)ctx->key;
+
+ (void)v;
+ return 0;
+}
+
+/* Verify that access after a null check is still accepted. */
+SEC("iter/bpf_map_elem")
+__success
+int iter_buf_null_check_ok(struct bpf_iter__bpf_map_elem *ctx)
+{
+ __u32 *key = ctx->key;
+
+ if (!key)
+ return 0;
+
+ volatile __u32 v = *key;
+
+ (void)v;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/iters.c b/tools/testing/selftests/bpf/progs/iters.c
index 7f27b517d5d5..86b74e3579d9 100644
--- a/tools/testing/selftests/bpf/progs/iters.c
+++ b/tools/testing/selftests/bpf/progs/iters.c
@@ -1651,7 +1651,7 @@ int clean_live_states(const void *ctx)
SEC("?raw_tp")
__flag(BPF_F_TEST_STATE_FREQ)
-__failure __msg("misaligned stack access off 0+-31+0 size 8")
+__failure __msg("misaligned stack access off -31+0 size 8")
__naked int absent_mark_in_the_middle_state(void)
{
/* This is equivalent to C program below.
@@ -1726,7 +1726,7 @@ static int noop(void)
SEC("?raw_tp")
__flag(BPF_F_TEST_STATE_FREQ)
-__failure __msg("misaligned stack access off 0+-31+0 size 8")
+__failure __msg("misaligned stack access off -31+0 size 8")
__naked int absent_mark_in_the_middle_state2(void)
{
/* This is equivalent to C program below.
@@ -1802,7 +1802,7 @@ __naked int absent_mark_in_the_middle_state2(void)
SEC("?raw_tp")
__flag(BPF_F_TEST_STATE_FREQ)
-__failure __msg("misaligned stack access off 0+-31+0 size 8")
+__failure __msg("misaligned stack access off -31+0 size 8")
__naked int absent_mark_in_the_middle_state3(void)
{
/*
diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test.c b/tools/testing/selftests/bpf/progs/kfunc_call_test.c
index 8b86113a0126..5edc51564f71 100644
--- a/tools/testing/selftests/bpf/progs/kfunc_call_test.c
+++ b/tools/testing/selftests/bpf/progs/kfunc_call_test.c
@@ -2,9 +2,107 @@
/* Copyright (c) 2021 Facebook */
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
#include "../test_kmods/bpf_testmod_kfunc.h"
SEC("tc")
+int kfunc_call_test5(struct __sk_buff *skb)
+{
+ struct bpf_sock *sk = skb->sk;
+ int ret;
+ u32 val32;
+ u16 val16;
+ u8 val8;
+
+ if (!sk)
+ return -1;
+
+ sk = bpf_sk_fullsock(sk);
+ if (!sk)
+ return -1;
+
+ /*
+ * Test with constant values to verify zero-extension.
+ * ISA-dependent BPF asm:
+ * With ALU32: w1 = 0xFF; w2 = 0xFFFF; w3 = 0xFFFFffff
+ * Without ALU32: r1 = 0xFF; r2 = 0xFFFF; r3 = 0xFFFFffff
+ * Both zero-extend to 64-bit before the kfunc call.
+ */
+ ret = bpf_kfunc_call_test5(0xFF, 0xFFFF, 0xFFFFffffULL);
+ if (ret)
+ return ret;
+
+ val32 = bpf_get_prandom_u32();
+ val16 = val32 & 0xFFFF;
+ val8 = val32 & 0xFF;
+ ret = bpf_kfunc_call_test5(val8, val16, val32);
+ if (ret)
+ return ret;
+
+ /*
+ * Test multiplication with different operand sizes:
+ *
+ * val8 * 0xFF:
+ * - Both operands promote to int (32-bit signed)
+ * - Result: 32-bit multiplication, truncated to u8, then zero-extended
+ *
+ * val16 * 0xFFFF:
+ * - Both operands promote to int (32-bit signed)
+ * - Result: 32-bit multiplication, truncated to u16, then zero-extended
+ *
+ * val32 * 0xFFFFffffULL:
+ * - val32 (u32) promotes to unsigned long long (due to ULL suffix)
+ * - Result: 64-bit unsigned multiplication, truncated to u32, then zero-extended
+ */
+ ret = bpf_kfunc_call_test5(val8 * 0xFF, val16 * 0xFFFF, val32 * 0xFFFFffffULL);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/*
+ * Assembly version testing the multiplication edge case explicitly.
+ * This ensures consistent testing across different ISA versions.
+ */
+SEC("tc")
+__naked int kfunc_call_test5_asm(void)
+{
+ asm volatile (
+ /* Get a random u32 value */
+ "call %[bpf_get_prandom_u32];"
+ "r6 = r0;" /* Save val32 in r6 */
+
+ /* Prepare first argument: val8 * 0xFF */
+ "r1 = r6;"
+ "r1 &= 0xFF;" /* val8 = val32 & 0xFF */
+ "r7 = 0xFF;"
+ "r1 *= r7;" /* 64-bit mult: r1 = r1 * r7 */
+
+ /* Prepare second argument: val16 * 0xFFFF */
+ "r2 = r6;"
+ "r2 &= 0xFFFF;" /* val16 = val32 & 0xFFFF */
+ "r7 = 0xFFFF;"
+ "r2 *= r7;" /* 64-bit mult: r2 = r2 * r7 */
+
+ /* Prepare third argument: val32 * 0xFFFFffff */
+ "r3 = r6;" /* val32 */
+ "r7 = 0xFFFFffff;"
+ "r3 *= r7;" /* 64-bit mult: r3 = r3 * r7 */
+
+ /* Call kfunc with multiplication results */
+ "call bpf_kfunc_call_test5;"
+
+ /* Check return value */
+ "if r0 != 0 goto exit_%=;"
+ "r0 = 0;"
+ "exit_%=: exit;"
+ :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("tc")
int kfunc_call_test4(struct __sk_buff *skb)
{
struct bpf_sock *sk = skb->sk;
diff --git a/tools/testing/selftests/bpf/progs/kprobe_multi_session.c b/tools/testing/selftests/bpf/progs/kprobe_multi_session.c
index bd8b7fb7061e..d52a65b40bbf 100644
--- a/tools/testing/selftests/bpf/progs/kprobe_multi_session.c
+++ b/tools/testing/selftests/bpf/progs/kprobe_multi_session.c
@@ -76,3 +76,13 @@ int test_kprobe(struct pt_regs *ctx)
{
return session_check(ctx);
}
+
+/*
+ * Exact function name (no wildcards) - exercises the fast syms[] path
+ * in bpf_program__attach_kprobe_multi_opts() which bypasses kallsyms parsing.
+ */
+SEC("kprobe.session/bpf_fentry_test1")
+int test_kprobe_syms(struct pt_regs *ctx)
+{
+ return session_check(ctx);
+}
diff --git a/tools/testing/selftests/bpf/progs/kprobe_multi_sleepable.c b/tools/testing/selftests/bpf/progs/kprobe_multi_sleepable.c
new file mode 100644
index 000000000000..932e1d9c72e2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/kprobe_multi_sleepable.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+void *user_ptr = 0;
+
+SEC("kprobe.multi")
+int handle_kprobe_multi_sleepable(struct pt_regs *ctx)
+{
+ int a, err;
+
+ err = bpf_copy_from_user(&a, sizeof(a), user_ptr);
+ barrier_var(a);
+ return err;
+}
+
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(fentry)
+{
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/kprobe_write_ctx.c b/tools/testing/selftests/bpf/progs/kprobe_write_ctx.c
index f77aef0474d3..adbf52afe490 100644
--- a/tools/testing/selftests/bpf/progs/kprobe_write_ctx.c
+++ b/tools/testing/selftests/bpf/progs/kprobe_write_ctx.c
@@ -19,4 +19,23 @@ int kprobe_multi_write_ctx(struct pt_regs *ctx)
ctx->ax = 0;
return 0;
}
+
+SEC("?kprobe")
+int kprobe_dummy(struct pt_regs *regs)
+{
+ return 0;
+}
+
+SEC("?freplace")
+int freplace_kprobe(struct pt_regs *regs)
+{
+ regs->di = 0;
+ return 0;
+}
+
+SEC("?fentry/bpf_fentry_test1")
+int BPF_PROG(fentry)
+{
+ return 0;
+}
#endif
diff --git a/tools/testing/selftests/bpf/progs/kptr_xchg_inline.c b/tools/testing/selftests/bpf/progs/kptr_xchg_inline.c
index 2414ac20b6d5..ca5943166057 100644
--- a/tools/testing/selftests/bpf/progs/kptr_xchg_inline.c
+++ b/tools/testing/selftests/bpf/progs/kptr_xchg_inline.c
@@ -25,14 +25,14 @@ __naked int kptr_xchg_inline(void)
"if r0 == 0 goto 1f;"
"r1 = r0;"
"r2 = 0;"
- "call %[bpf_obj_drop_impl];"
+ "call %[bpf_obj_drop];"
"1:"
"r0 = 0;"
"exit;"
:
: __imm_addr(ptr),
__imm(bpf_kptr_xchg),
- __imm(bpf_obj_drop_impl)
+ __imm(bpf_obj_drop)
: __clobber_all
);
}
diff --git a/tools/testing/selftests/bpf/progs/lsm_bdev.c b/tools/testing/selftests/bpf/progs/lsm_bdev.c
new file mode 100644
index 000000000000..45554e6db605
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/lsm_bdev.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2026 Christian Brauner <brauner@kernel.org> */
+
+/*
+ * BPF LSM block device integrity tracker for dm-verity.
+ *
+ * Tracks block devices in a hashmap keyed by bd_dev. When dm-verity
+ * calls security_bdev_setintegrity() during verity_preresume(), the
+ * setintegrity hook records the roothash and signature-validity data.
+ * The free hook cleans up when the device goes away. The alloc hook
+ * counts allocations for test validation.
+ *
+ * The sleepable hooks exercise bpf_copy_from_user() to verify that
+ * the sleepable classification actually permits sleepable helpers.
+ */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+struct verity_info {
+ __u8 has_roothash; /* LSM_INT_DMVERITY_ROOTHASH seen */
+ __u8 sig_valid; /* LSM_INT_DMVERITY_SIG_VALID value (non-NULL = valid) */
+ __u32 setintegrity_cnt; /* total setintegrity calls for this dev */
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 64);
+ __type(key, __u32); /* dev_t from bdev->bd_dev */
+ __type(value, struct verity_info);
+} verity_devices SEC(".maps");
+
+/* Global counters exposed to userspace via skeleton bss. */
+int alloc_count;
+
+char _license[] SEC("license") = "GPL";
+
+SEC("lsm.s/bdev_setintegrity")
+int BPF_PROG(bdev_setintegrity, struct block_device *bdev,
+ enum lsm_integrity_type type, const void *value, size_t size)
+{
+ struct verity_info zero = {};
+ struct verity_info *info;
+ __u32 dev;
+ char buf;
+
+ /*
+ * Exercise a sleepable helper to confirm the verifier
+ * allows it in this sleepable hook.
+ */
+ (void)bpf_copy_from_user(&buf, sizeof(buf), NULL);
+
+ dev = bdev->bd_dev;
+
+ info = bpf_map_lookup_elem(&verity_devices, &dev);
+ if (!info) {
+ bpf_map_update_elem(&verity_devices, &dev, &zero, BPF_NOEXIST);
+ info = bpf_map_lookup_elem(&verity_devices, &dev);
+ if (!info)
+ return 0;
+ }
+
+ if (type == LSM_INT_DMVERITY_ROOTHASH)
+ info->has_roothash = 1;
+ else if (type == LSM_INT_DMVERITY_SIG_VALID)
+ info->sig_valid = (value != NULL);
+
+ __sync_fetch_and_add(&info->setintegrity_cnt, 1);
+
+ return 0;
+}
+
+SEC("lsm/bdev_free_security")
+void BPF_PROG(bdev_free_security, struct block_device *bdev)
+{
+ __u32 dev = bdev->bd_dev;
+
+ bpf_map_delete_elem(&verity_devices, &dev);
+}
+
+SEC("lsm.s/bdev_alloc_security")
+int BPF_PROG(bdev_alloc_security, struct block_device *bdev)
+{
+ char buf;
+
+ /*
+ * Exercise a sleepable helper to confirm the verifier
+ * allows it in this sleepable hook.
+ */
+ (void)bpf_copy_from_user(&buf, sizeof(buf), NULL);
+
+ __sync_fetch_and_add(&alloc_count, 1);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/lwt_misc.c b/tools/testing/selftests/bpf/progs/lwt_misc.c
new file mode 100644
index 000000000000..b392317088d2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/lwt_misc.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC("lwt_xmit")
+__success __retval(0)
+int test_missing_dst(struct __sk_buff *skb)
+{
+ struct iphdr iph;
+
+ __builtin_memset(&iph, 0, sizeof(struct iphdr));
+ iph.ihl = 5;
+ iph.version = 4;
+
+ bpf_lwt_push_encap(skb, BPF_LWT_ENCAP_IP, &iph, sizeof(struct iphdr));
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/map_ptr_kern.c b/tools/testing/selftests/bpf/progs/map_ptr_kern.c
index efaf622c28dd..373c8d17ea55 100644
--- a/tools/testing/selftests/bpf/progs/map_ptr_kern.c
+++ b/tools/testing/selftests/bpf/progs/map_ptr_kern.c
@@ -647,8 +647,14 @@ static inline int check_devmap_hash(void)
return 1;
}
+struct bpf_ringbuf {
+ unsigned long consumer_pos;
+ unsigned long producer_pos;
+} __attribute__((preserve_access_index));
+
struct bpf_ringbuf_map {
struct bpf_map map;
+ struct bpf_ringbuf *rb;
} __attribute__((preserve_access_index));
struct {
@@ -659,9 +665,20 @@ static inline int check_ringbuf(void)
{
struct bpf_ringbuf_map *ringbuf = (struct bpf_ringbuf_map *)&m_ringbuf;
struct bpf_map *map = (struct bpf_map *)&m_ringbuf;
+ struct bpf_ringbuf *rb;
+ void *ptr;
VERIFY(check(&ringbuf->map, map, 0, 0, page_size));
+ ptr = bpf_ringbuf_reserve(&m_ringbuf, 128, 0);
+ VERIFY(ptr);
+
+ bpf_ringbuf_discard(ptr, 0);
+ rb = ringbuf->rb;
+ VERIFY(rb);
+ VERIFY(rb->consumer_pos == 0);
+ VERIFY(rb->producer_pos == 128 + BPF_RINGBUF_HDR_SZ);
+
return 1;
}
diff --git a/tools/testing/selftests/bpf/progs/mem_rdonly_untrusted.c b/tools/testing/selftests/bpf/progs/mem_rdonly_untrusted.c
index 3b984b6ae7c0..5b4453747c23 100644
--- a/tools/testing/selftests/bpf/progs/mem_rdonly_untrusted.c
+++ b/tools/testing/selftests/bpf/progs/mem_rdonly_untrusted.c
@@ -8,7 +8,7 @@
SEC("tp_btf/sys_enter")
__success
__log_level(2)
-__msg("r8 = *(u64 *)(r7 +0) ; R7=ptr_nameidata(off={{[0-9]+}}) R8=rdonly_untrusted_mem(sz=0)")
+__msg("r8 = *(u64 *)(r7 +0) ; R7=ptr_nameidata(imm={{[0-9]+}}) R8=rdonly_untrusted_mem(sz=0)")
__msg("r9 = *(u8 *)(r8 +0) ; R8=rdonly_untrusted_mem(sz=0) R9=scalar")
int btf_id_to_ptr_mem(void *ctx)
{
diff --git a/tools/testing/selftests/bpf/progs/modify_return.c b/tools/testing/selftests/bpf/progs/modify_return.c
index 3376d4849f58..68fabd2efe8d 100644
--- a/tools/testing/selftests/bpf/progs/modify_return.c
+++ b/tools/testing/selftests/bpf/progs/modify_return.c
@@ -12,11 +12,14 @@ char _license[] SEC("license") = "GPL";
static int sequence = 0;
__s32 input_retval = 0;
+__u32 test_pid = 0;
__u64 fentry_result = 0;
SEC("fentry/bpf_modify_return_test")
int BPF_PROG(fentry_test, int a, __u64 b)
{
+ if (bpf_get_current_pid_tgid() >> 32 != test_pid)
+ return 0;
sequence++;
fentry_result = (sequence == 1);
return 0;
@@ -26,6 +29,8 @@ __u64 fmod_ret_result = 0;
SEC("fmod_ret/bpf_modify_return_test")
int BPF_PROG(fmod_ret_test, int a, int *b, int ret)
{
+ if (bpf_get_current_pid_tgid() >> 32 != test_pid)
+ return ret;
sequence++;
/* This is the first fmod_ret program, the ret passed should be 0 */
fmod_ret_result = (sequence == 2 && ret == 0);
@@ -36,6 +41,8 @@ __u64 fexit_result = 0;
SEC("fexit/bpf_modify_return_test")
int BPF_PROG(fexit_test, int a, __u64 b, int ret)
{
+ if (bpf_get_current_pid_tgid() >> 32 != test_pid)
+ return 0;
sequence++;
/* If the input_reval is non-zero a successful modification should have
* occurred.
@@ -55,6 +62,8 @@ SEC("fentry/bpf_modify_return_test2")
int BPF_PROG(fentry_test2, int a, int *b, short c, int d, void *e, char f,
int g)
{
+ if (bpf_get_current_pid_tgid() >> 32 != test_pid)
+ return 0;
sequence2++;
fentry_result2 = (sequence2 == 1);
return 0;
@@ -65,6 +74,8 @@ SEC("fmod_ret/bpf_modify_return_test2")
int BPF_PROG(fmod_ret_test2, int a, int *b, short c, int d, void *e, char f,
int g, int ret)
{
+ if (bpf_get_current_pid_tgid() >> 32 != test_pid)
+ return ret;
sequence2++;
/* This is the first fmod_ret program, the ret passed should be 0 */
fmod_ret_result2 = (sequence2 == 2 && ret == 0);
@@ -76,6 +87,8 @@ SEC("fexit/bpf_modify_return_test2")
int BPF_PROG(fexit_test2, int a, int *b, short c, int d, void *e, char f,
int g, int ret)
{
+ if (bpf_get_current_pid_tgid() >> 32 != test_pid)
+ return 0;
sequence2++;
/* If the input_reval is non-zero a successful modification should have
* occurred.
diff --git a/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c b/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c
index f2b8eb2ff76f..81813c724fa9 100644
--- a/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c
+++ b/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c
@@ -110,7 +110,7 @@ int BPF_PROG(test_array_map_3)
}
SEC("?fentry.s/bpf_fentry_test1")
-__failure __msg("arg#0 expected for bpf_percpu_obj_drop_impl()")
+__failure __msg("arg#0 expected for bpf_percpu_obj_drop()")
int BPF_PROG(test_array_map_4)
{
struct val_t __percpu_kptr *p;
@@ -124,7 +124,7 @@ int BPF_PROG(test_array_map_4)
}
SEC("?fentry.s/bpf_fentry_test1")
-__failure __msg("arg#0 expected for bpf_obj_drop_impl()")
+__failure __msg("arg#0 expected for bpf_obj_drop()")
int BPF_PROG(test_array_map_5)
{
struct val_t *p;
diff --git a/tools/testing/selftests/bpf/progs/preempt_lock.c b/tools/testing/selftests/bpf/progs/preempt_lock.c
index 7d04254e61f1..6d5fce7e6ffc 100644
--- a/tools/testing/selftests/bpf/progs/preempt_lock.c
+++ b/tools/testing/selftests/bpf/progs/preempt_lock.c
@@ -177,7 +177,7 @@ global_subprog_calling_sleepable_global(int i)
}
SEC("?syscall")
-__failure __msg("global functions that may sleep are not allowed in non-sleepable context")
+__failure __msg("sleepable global function")
int preempt_global_sleepable_helper_subprog(struct __sk_buff *ctx)
{
preempt_disable();
@@ -188,7 +188,7 @@ int preempt_global_sleepable_helper_subprog(struct __sk_buff *ctx)
}
SEC("?syscall")
-__failure __msg("global functions that may sleep are not allowed in non-sleepable context")
+__failure __msg("sleepable global function")
int preempt_global_sleepable_kfunc_subprog(struct __sk_buff *ctx)
{
preempt_disable();
@@ -199,7 +199,7 @@ int preempt_global_sleepable_kfunc_subprog(struct __sk_buff *ctx)
}
SEC("?syscall")
-__failure __msg("global functions that may sleep are not allowed in non-sleepable context")
+__failure __msg("sleepable global function")
int preempt_global_sleepable_subprog_indirect(struct __sk_buff *ctx)
{
preempt_disable();
diff --git a/tools/testing/selftests/bpf/progs/rbtree_search_kptr.c b/tools/testing/selftests/bpf/progs/rbtree_search_kptr.c
new file mode 100644
index 000000000000..610aae45e2dc
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/rbtree_search_kptr.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2026 KylinSoft Corporation. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+#define NR_NODES 16
+
+struct node_data {
+ int data;
+};
+
+struct tree_node {
+ struct bpf_rb_node node;
+ u64 key;
+ struct node_data __kptr * node_data;
+};
+
+struct tree_node_ref {
+ struct bpf_refcount ref;
+ struct bpf_rb_node node;
+ u64 key;
+ struct node_data __kptr * node_data;
+};
+
+#define private(name) SEC(".data." #name) __hidden __aligned(8)
+
+private(A) struct bpf_rb_root root __contains(tree_node, node);
+private(A) struct bpf_spin_lock lock;
+
+private(B) struct bpf_rb_root root_r __contains(tree_node_ref, node);
+private(B) struct bpf_spin_lock lock_r;
+
+static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+ struct tree_node *node_a, *node_b;
+
+ node_a = container_of(a, struct tree_node, node);
+ node_b = container_of(b, struct tree_node, node);
+
+ return node_a->key < node_b->key;
+}
+
+SEC("syscall")
+__retval(0)
+long rbtree_search_kptr(void *ctx)
+{
+ struct tree_node *tnode;
+ struct bpf_rb_node *rb_n;
+ struct node_data __kptr * node_data;
+ int lookup_key = NR_NODES / 2;
+ int lookup_data = NR_NODES / 2;
+ int i, data, ret = 0;
+
+ for (i = 0; i < NR_NODES && can_loop; i++) {
+ tnode = bpf_obj_new(typeof(*tnode));
+ if (!tnode)
+ return __LINE__;
+
+ node_data = bpf_obj_new(typeof(*node_data));
+ if (!node_data) {
+ bpf_obj_drop(tnode);
+ return __LINE__;
+ }
+
+ tnode->key = i;
+ node_data->data = i;
+
+ node_data = bpf_kptr_xchg(&tnode->node_data, node_data);
+ if (node_data)
+ bpf_obj_drop(node_data);
+
+ bpf_spin_lock(&lock);
+ bpf_rbtree_add(&root, &tnode->node, less);
+ bpf_spin_unlock(&lock);
+ }
+
+ bpf_spin_lock(&lock);
+ rb_n = bpf_rbtree_root(&root);
+ while (rb_n && can_loop) {
+ tnode = container_of(rb_n, struct tree_node, node);
+ node_data = bpf_kptr_xchg(&tnode->node_data, NULL);
+ if (!node_data) {
+ ret = __LINE__;
+ goto fail;
+ }
+
+ data = node_data->data;
+ node_data = bpf_kptr_xchg(&tnode->node_data, node_data);
+ if (node_data) {
+ bpf_spin_unlock(&lock);
+ bpf_obj_drop(node_data);
+ return __LINE__;
+ }
+
+ if (lookup_key == tnode->key) {
+ if (data == lookup_data)
+ break;
+
+ ret = __LINE__;
+ goto fail;
+ }
+
+ if (lookup_key < tnode->key)
+ rb_n = bpf_rbtree_left(&root, rb_n);
+ else
+ rb_n = bpf_rbtree_right(&root, rb_n);
+ }
+ bpf_spin_unlock(&lock);
+
+ while (can_loop) {
+ bpf_spin_lock(&lock);
+ rb_n = bpf_rbtree_first(&root);
+ if (!rb_n) {
+ bpf_spin_unlock(&lock);
+ return 0;
+ }
+
+ rb_n = bpf_rbtree_remove(&root, rb_n);
+ if (!rb_n) {
+ ret = __LINE__;
+ goto fail;
+ }
+ bpf_spin_unlock(&lock);
+
+ tnode = container_of(rb_n, struct tree_node, node);
+
+ node_data = bpf_kptr_xchg(&tnode->node_data, NULL);
+ if (node_data)
+ bpf_obj_drop(node_data);
+
+ bpf_obj_drop(tnode);
+ }
+
+ return 0;
+fail:
+ bpf_spin_unlock(&lock);
+ return ret;
+}
+
+static bool less_r(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+ struct tree_node_ref *node_a, *node_b;
+
+ node_a = container_of(a, struct tree_node_ref, node);
+ node_b = container_of(b, struct tree_node_ref, node);
+
+ return node_a->key < node_b->key;
+}
+
+SEC("syscall")
+__retval(0)
+long rbtree_search_kptr_ref(void *ctx)
+{
+ struct tree_node_ref *tnode_r, *tnode_m;
+ struct bpf_rb_node *rb_n;
+ struct node_data __kptr * node_data;
+ int lookup_key = NR_NODES / 2;
+ int lookup_data = NR_NODES / 2;
+ int i, data, ret = 0;
+
+ for (i = 0; i < NR_NODES && can_loop; i++) {
+ tnode_r = bpf_obj_new(typeof(*tnode_r));
+ if (!tnode_r)
+ return __LINE__;
+
+ node_data = bpf_obj_new(typeof(*node_data));
+ if (!node_data) {
+ bpf_obj_drop(tnode_r);
+ return __LINE__;
+ }
+
+ tnode_r->key = i;
+ node_data->data = i;
+
+ node_data = bpf_kptr_xchg(&tnode_r->node_data, node_data);
+ if (node_data)
+ bpf_obj_drop(node_data);
+
+ /* Unused reference */
+ tnode_m = bpf_refcount_acquire(tnode_r);
+ if (!tnode_m)
+ return __LINE__;
+
+ bpf_spin_lock(&lock_r);
+ bpf_rbtree_add(&root_r, &tnode_r->node, less_r);
+ bpf_spin_unlock(&lock_r);
+
+ bpf_obj_drop(tnode_m);
+ }
+
+ bpf_spin_lock(&lock_r);
+ rb_n = bpf_rbtree_root(&root_r);
+ while (rb_n && can_loop) {
+ tnode_r = container_of(rb_n, struct tree_node_ref, node);
+ node_data = bpf_kptr_xchg(&tnode_r->node_data, NULL);
+ if (!node_data) {
+ ret = __LINE__;
+ goto fail;
+ }
+
+ data = node_data->data;
+ node_data = bpf_kptr_xchg(&tnode_r->node_data, node_data);
+ if (node_data) {
+ bpf_spin_unlock(&lock_r);
+ bpf_obj_drop(node_data);
+ return __LINE__;
+ }
+
+ if (lookup_key == tnode_r->key) {
+ if (data == lookup_data)
+ break;
+
+ ret = __LINE__;
+ goto fail;
+ }
+
+ if (lookup_key < tnode_r->key)
+ rb_n = bpf_rbtree_left(&root_r, rb_n);
+ else
+ rb_n = bpf_rbtree_right(&root_r, rb_n);
+ }
+ bpf_spin_unlock(&lock_r);
+
+ while (can_loop) {
+ bpf_spin_lock(&lock_r);
+ rb_n = bpf_rbtree_first(&root_r);
+ if (!rb_n) {
+ bpf_spin_unlock(&lock_r);
+ return 0;
+ }
+
+ rb_n = bpf_rbtree_remove(&root_r, rb_n);
+ if (!rb_n) {
+ ret = __LINE__;
+ goto fail;
+ }
+ bpf_spin_unlock(&lock_r);
+
+ tnode_r = container_of(rb_n, struct tree_node_ref, node);
+
+ node_data = bpf_kptr_xchg(&tnode_r->node_data, NULL);
+ if (node_data)
+ bpf_obj_drop(node_data);
+
+ bpf_obj_drop(tnode_r);
+ }
+
+ return 0;
+fail:
+ bpf_spin_unlock(&lock_r);
+ return ret;
+}
+
+SEC("syscall")
+__failure __msg("R1 type=scalar expected=map_value, ptr_, ptr_")
+long non_own_ref_kptr_xchg_no_lock(void *ctx)
+{
+ struct tree_node *tnode;
+ struct bpf_rb_node *rb_n;
+ struct node_data __kptr * node_data;
+ int data;
+
+ bpf_spin_lock(&lock);
+ rb_n = bpf_rbtree_first(&root);
+ if (!rb_n) {
+ bpf_spin_unlock(&lock);
+ return __LINE__;
+ }
+ bpf_spin_unlock(&lock);
+
+ tnode = container_of(rb_n, struct tree_node, node);
+ node_data = bpf_kptr_xchg(&tnode->node_data, NULL);
+ if (!node_data)
+ return __LINE__;
+
+ data = node_data->data;
+ if (data < 0)
+ return __LINE__;
+
+ node_data = bpf_kptr_xchg(&tnode->node_data, node_data);
+ if (node_data)
+ return __LINE__;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr.c b/tools/testing/selftests/bpf/progs/refcounted_kptr.c
index 1aca85d86aeb..c847398837cc 100644
--- a/tools/testing/selftests/bpf/progs/refcounted_kptr.c
+++ b/tools/testing/selftests/bpf/progs/refcounted_kptr.c
@@ -500,7 +500,7 @@ long rbtree_wrong_owner_remove_fail_a2(void *ctx)
return 0;
}
-SEC("?fentry.s/bpf_testmod_test_read")
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
__success
int BPF_PROG(rbtree_sleepable_rcu,
struct file *file, struct kobject *kobj,
@@ -534,7 +534,7 @@ err_out:
return 0;
}
-SEC("?fentry.s/bpf_testmod_test_read")
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
__success
int BPF_PROG(rbtree_sleepable_rcu_no_explicit_rcu_lock,
struct file *file, struct kobject *kobj,
diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c b/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c
index 836c8ab7b908..b2808bfcec29 100644
--- a/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c
+++ b/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c
@@ -93,7 +93,7 @@ long rbtree_refcounted_node_ref_escapes_owning_input(void *ctx)
return 0;
}
-SEC("?fentry.s/bpf_testmod_test_read")
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
__failure __msg("function calls are not allowed while holding a lock")
int BPF_PROG(rbtree_fail_sleepable_lock_across_rcu,
struct file *file, struct kobject *kobj,
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_multi_args.c b/tools/testing/selftests/bpf/progs/struct_ops_multi_args.c
new file mode 100644
index 000000000000..c62be15757f0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_multi_args.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2026 Varun R Mallya */
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} prog_array SEC(".maps");
+
+SEC("struct_ops/test_refcounted_multi")
+__failure __msg("program with __ref argument cannot tail call")
+int test_refcounted_multi(unsigned long long *ctx)
+{
+ /* ctx[2] is used because the refcounted variable is the third argument */
+ struct task_struct *refcounted_task = (struct task_struct *)ctx[2];
+
+ bpf_task_release(refcounted_task);
+ bpf_tail_call(ctx, &prog_array, 0);
+
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops testmod_ref_acquire = {
+ .test_refcounted_multi = (void *)test_refcounted_multi,
+};
diff --git a/tools/testing/selftests/bpf/progs/tailcall3.c b/tools/testing/selftests/bpf/progs/tailcall3.c
index f60bcd7b8d4b..204f19c30a3e 100644
--- a/tools/testing/selftests/bpf/progs/tailcall3.c
+++ b/tools/testing/selftests/bpf/progs/tailcall3.c
@@ -5,7 +5,7 @@
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
- __uint(max_entries, 1);
+ __uint(max_entries, 2);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
@@ -23,6 +23,9 @@ int classifier_0(struct __sk_buff *skb)
SEC("tc")
int entry(struct __sk_buff *skb)
{
+ /* prog == NULL case */
+ bpf_tail_call_static(skb, &jmp_table, 1);
+
bpf_tail_call_static(skb, &jmp_table, 0);
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/task_local_data.bpf.h b/tools/testing/selftests/bpf/progs/task_local_data.bpf.h
index fed53d63a7e5..1f396711f487 100644
--- a/tools/testing/selftests/bpf/progs/task_local_data.bpf.h
+++ b/tools/testing/selftests/bpf/progs/task_local_data.bpf.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
#ifndef __TASK_LOCAL_DATA_BPF_H
#define __TASK_LOCAL_DATA_BPF_H
@@ -87,7 +87,7 @@ struct tld_meta_u {
struct tld_data_u {
__u64 start; /* offset of tld_data_u->data in a page */
- char data[__PAGE_SIZE - sizeof(__u64)];
+ char data[__PAGE_SIZE - sizeof(__u64)] __attribute__((aligned(8)));
};
struct tld_map_value {
diff --git a/tools/testing/selftests/bpf/progs/test_access_variable_array.c b/tools/testing/selftests/bpf/progs/test_access_variable_array.c
deleted file mode 100644
index 326b7d1f496a..000000000000
--- a/tools/testing/selftests/bpf/progs/test_access_variable_array.c
+++ /dev/null
@@ -1,19 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2023 Bytedance */
-
-#include "vmlinux.h"
-#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
-
-unsigned long span = 0;
-
-SEC("fentry/sched_balance_rq")
-int BPF_PROG(fentry_fentry, int this_cpu, struct rq *this_rq,
- struct sched_domain *sd)
-{
- span = sd->span[0];
-
- return 0;
-}
-
-char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_global_func3.c b/tools/testing/selftests/bpf/progs/test_global_func3.c
index 142b682d3c2f..974fd8c19561 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func3.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func3.c
@@ -5,56 +5,56 @@
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
-__attribute__ ((noinline))
+static __attribute__ ((noinline))
int f1(struct __sk_buff *skb)
{
return skb->len;
}
-__attribute__ ((noinline))
+static __attribute__ ((noinline))
int f2(int val, struct __sk_buff *skb)
{
return f1(skb) + val;
}
-__attribute__ ((noinline))
+static __attribute__ ((noinline))
int f3(int val, struct __sk_buff *skb, int var)
{
return f2(var, skb) + val;
}
-__attribute__ ((noinline))
+static __attribute__ ((noinline))
int f4(struct __sk_buff *skb)
{
return f3(1, skb, 2);
}
-__attribute__ ((noinline))
+static __attribute__ ((noinline))
int f5(struct __sk_buff *skb)
{
return f4(skb);
}
-__attribute__ ((noinline))
+static __attribute__ ((noinline))
int f6(struct __sk_buff *skb)
{
return f5(skb);
}
-__attribute__ ((noinline))
+static __attribute__ ((noinline))
int f7(struct __sk_buff *skb)
{
return f6(skb);
}
-__attribute__ ((noinline))
+static __attribute__ ((noinline))
int f8(struct __sk_buff *skb)
{
return f7(skb);
}
SEC("tc")
-__failure __msg("the call stack of 8 frames")
+__failure __msg("the call stack of 9 frames")
int global_func3(struct __sk_buff *skb)
{
return f8(skb);
diff --git a/tools/testing/selftests/bpf/progs/test_global_func7.c b/tools/testing/selftests/bpf/progs/test_global_func7.c
index f182febfde3c..9e59625c1c92 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func7.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func7.c
@@ -12,7 +12,7 @@ void foo(struct __sk_buff *skb)
}
SEC("tc")
-__failure __msg("foo() doesn't return scalar")
+__success
int global_func7(struct __sk_buff *skb)
{
foo(skb);
diff --git a/tools/testing/selftests/bpf/progs/test_global_func_deep_stack.c b/tools/testing/selftests/bpf/progs/test_global_func_deep_stack.c
new file mode 100644
index 000000000000..1b634b543b62
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_global_func_deep_stack.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2026 Meta Platforms, Inc and affiliates. */
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+/*
+ * Macro tricks to tersely define for long non-recursive call chains. Add
+ * computation to the functions prevent tail recursion from reducing the
+ * stack size to 0.
+ */
+
+#define CAT(a, b) a ## b
+#define XCAT(a, b) CAT(a, b)
+
+#define F_0 \
+__attribute__((noinline)) \
+int f0(unsigned long a) \
+{ \
+ volatile long b = a + 16; \
+ if (a == 0) \
+ return 0; \
+ return b; \
+}
+
+#define FN(n, prev) \
+__attribute__((noinline)) \
+int XCAT(f, n)(unsigned long a) \
+{ \
+ volatile long b = XCAT(f, prev)(a - 1); \
+ if (!b) \
+ return 0; \
+ return b + 1; \
+}
+
+/* Call chain 33 levels deep. */
+#define F_1 F_0 FN(1, 0)
+#define F_2 F_1 FN(2, 1)
+#define F_3 F_2 FN(3, 2)
+#define F_4 F_3 FN(4, 3)
+#define F_5 F_4 FN(5, 4)
+#define F_6 F_5 FN(6, 5)
+#define F_7 F_6 FN(7, 6)
+#define F_8 F_7 FN(8, 7)
+#define F_9 F_8 FN(9, 8)
+#define F_10 F_9 FN(10, 9)
+#define F_11 F_10 FN(11, 10)
+#define F_12 F_11 FN(12, 11)
+#define F_13 F_12 FN(13, 12)
+#define F_14 F_13 FN(14, 13)
+#define F_15 F_14 FN(15, 14)
+#define F_16 F_15 FN(16, 15)
+#define F_17 F_16 FN(17, 16)
+#define F_18 F_17 FN(18, 17)
+#define F_19 F_18 FN(19, 18)
+#define F_20 F_19 FN(20, 19)
+#define F_21 F_20 FN(21, 20)
+#define F_22 F_21 FN(22, 21)
+#define F_23 F_22 FN(23, 22)
+#define F_24 F_23 FN(24, 23)
+#define F_25 F_24 FN(25, 24)
+#define F_26 F_25 FN(26, 25)
+#define F_27 F_26 FN(27, 26)
+#define F_28 F_27 FN(28, 27)
+#define F_29 F_28 FN(29, 28)
+#define F_30 F_29 FN(30, 29)
+#define F_31 F_30 FN(31, 30)
+#define F_32 F_31 FN(32, 31)
+
+#define CAT2(a, b) a ## b
+#define XCAT2(a, b) CAT2(a, b)
+
+#define F(n) XCAT2(F_, n)
+
+F(32)
+
+/* Ensure that even 32 levels deep, the function verifies. */
+SEC("syscall")
+__success
+int global_func_deep_stack_success(struct __sk_buff *skb)
+{
+ return f31(55);
+}
+
+/*
+ * Check we actually honor stack limits (33 * 16 = 528 > 512 = MAX_STACK_DEPTH).
+ * The stack depth is 16 because the verifier calls round_up_stack_depth() on
+ * the size.
+ */
+SEC("syscall")
+__failure __msg("combined stack size of 34 calls")
+int global_func_deep_stack_fail(struct __sk_buff *skb)
+{
+ return f32(123);
+}
diff --git a/tools/testing/selftests/bpf/progs/test_module_attach.c b/tools/testing/selftests/bpf/progs/test_module_attach.c
index 03d7f89787a1..5609e388fb58 100644
--- a/tools/testing/selftests/bpf/progs/test_module_attach.c
+++ b/tools/testing/selftests/bpf/progs/test_module_attach.c
@@ -7,23 +7,21 @@
#include <bpf/bpf_core_read.h>
#include "../test_kmods/bpf_testmod.h"
-__u32 raw_tp_read_sz = 0;
+__u32 sz = 0;
-SEC("raw_tp/bpf_testmod_test_read")
+SEC("?raw_tp/bpf_testmod_test_read")
int BPF_PROG(handle_raw_tp,
struct task_struct *task, struct bpf_testmod_test_read_ctx *read_ctx)
{
- raw_tp_read_sz = BPF_CORE_READ(read_ctx, len);
+ sz = BPF_CORE_READ(read_ctx, len);
return 0;
}
-__u32 raw_tp_bare_write_sz = 0;
-
-SEC("raw_tp/bpf_testmod_test_write_bare_tp")
+SEC("?raw_tp/bpf_testmod_test_write_bare_tp")
int BPF_PROG(handle_raw_tp_bare,
struct task_struct *task, struct bpf_testmod_test_write_ctx *write_ctx)
{
- raw_tp_bare_write_sz = BPF_CORE_READ(write_ctx, len);
+ sz = BPF_CORE_READ(write_ctx, len);
return 0;
}
@@ -31,7 +29,7 @@ int raw_tp_writable_bare_in_val = 0;
int raw_tp_writable_bare_early_ret = 0;
int raw_tp_writable_bare_out_val = 0;
-SEC("raw_tp.w/bpf_testmod_test_writable_bare_tp")
+SEC("?raw_tp.w/bpf_testmod_test_writable_bare_tp")
int BPF_PROG(handle_raw_tp_writable_bare,
struct bpf_testmod_test_writable_ctx *writable)
{
@@ -41,76 +39,65 @@ int BPF_PROG(handle_raw_tp_writable_bare,
return 0;
}
-__u32 tp_btf_read_sz = 0;
-
-SEC("tp_btf/bpf_testmod_test_read")
+SEC("?tp_btf/bpf_testmod_test_read")
int BPF_PROG(handle_tp_btf,
struct task_struct *task, struct bpf_testmod_test_read_ctx *read_ctx)
{
- tp_btf_read_sz = read_ctx->len;
+ sz = read_ctx->len;
return 0;
}
-__u32 fentry_read_sz = 0;
-
-SEC("fentry/bpf_testmod_test_read")
+SEC("?fentry/bpf_testmod_test_read")
int BPF_PROG(handle_fentry,
struct file *file, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf, loff_t off, size_t len)
{
- fentry_read_sz = len;
+ sz = len;
return 0;
}
-__u32 fentry_manual_read_sz = 0;
-
-SEC("fentry")
+SEC("?fentry")
int BPF_PROG(handle_fentry_manual,
struct file *file, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf, loff_t off, size_t len)
{
- fentry_manual_read_sz = len;
+ sz = len;
return 0;
}
-__u32 fentry_explicit_read_sz = 0;
-
-SEC("fentry/bpf_testmod:bpf_testmod_test_read")
+SEC("?fentry/bpf_testmod:bpf_testmod_test_read")
int BPF_PROG(handle_fentry_explicit,
struct file *file, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf, loff_t off, size_t len)
{
- fentry_explicit_read_sz = len;
+ sz = len;
return 0;
}
-__u32 fentry_explicit_manual_read_sz = 0;
-
-SEC("fentry")
+SEC("?fentry")
int BPF_PROG(handle_fentry_explicit_manual,
struct file *file, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf, loff_t off, size_t len)
{
- fentry_explicit_manual_read_sz = len;
+ sz = len;
return 0;
}
-__u32 fexit_read_sz = 0;
-int fexit_ret = 0;
+int retval = 0;
-SEC("fexit/bpf_testmod_test_read")
+SEC("?fexit/bpf_testmod_test_read")
int BPF_PROG(handle_fexit,
struct file *file, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf, loff_t off, size_t len,
int ret)
{
- fexit_read_sz = len;
- fexit_ret = ret;
+ sz = len;
+ retval = ret;
return 0;
}
-SEC("fexit/bpf_testmod_return_ptr")
+SEC("?fexit/bpf_testmod_return_ptr")
int BPF_PROG(handle_fexit_ret, int arg, struct file *ret)
{
long buf = 0;
@@ -122,18 +109,16 @@ int BPF_PROG(handle_fexit_ret, int arg, struct file *ret)
return 0;
}
-__u32 fmod_ret_read_sz = 0;
-
-SEC("fmod_ret/bpf_testmod_test_read")
+SEC("?fmod_ret/bpf_testmod_test_read")
int BPF_PROG(handle_fmod_ret,
struct file *file, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf, loff_t off, size_t len)
{
- fmod_ret_read_sz = len;
+ sz = len;
return 0; /* don't override the exit code */
}
-SEC("kprobe.multi/bpf_testmod_test_read")
+SEC("?kprobe.multi/bpf_testmod_test_read")
int BPF_PROG(kprobe_multi)
{
return 0;
diff --git a/tools/testing/selftests/bpf/progs/test_probe_user.c b/tools/testing/selftests/bpf/progs/test_probe_user.c
index a8e501af9604..4bc86c7654b1 100644
--- a/tools/testing/selftests/bpf/progs/test_probe_user.c
+++ b/tools/testing/selftests/bpf/progs/test_probe_user.c
@@ -5,13 +5,22 @@
#include <bpf/bpf_core_read.h>
#include "bpf_misc.h"
-static struct sockaddr_in old;
+struct test_pro_bss {
+ struct sockaddr_in old;
+ __u32 test_pid;
+};
+
+struct test_pro_bss bss;
static int handle_sys_connect_common(struct sockaddr_in *uservaddr)
{
struct sockaddr_in new;
+ __u32 cur = bpf_get_current_pid_tgid() >> 32;
+
+ if (bss.test_pid && cur != bss.test_pid)
+ return 0;
- bpf_probe_read_user(&old, sizeof(old), uservaddr);
+ bpf_probe_read_user(&bss.old, sizeof(bss.old), uservaddr);
__builtin_memset(&new, 0xab, sizeof(new));
bpf_probe_write_user(uservaddr, &new, sizeof(new));
diff --git a/tools/testing/selftests/bpf/progs/test_trampoline_count.c b/tools/testing/selftests/bpf/progs/test_trampoline_count.c
index 7765720da7d5..02f52806b1b2 100644
--- a/tools/testing/selftests/bpf/progs/test_trampoline_count.c
+++ b/tools/testing/selftests/bpf/progs/test_trampoline_count.c
@@ -3,20 +3,20 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
-SEC("fentry/bpf_modify_return_test")
-int BPF_PROG(fentry_test, int a, int *b)
+SEC("fentry/bpf_testmod_trampoline_count_test")
+int BPF_PROG(fentry_test)
{
return 0;
}
-SEC("fmod_ret/bpf_modify_return_test")
-int BPF_PROG(fmod_ret_test, int a, int *b, int ret)
+SEC("fmod_ret/bpf_testmod_trampoline_count_test")
+int BPF_PROG(fmod_ret_test, int ret)
{
return 0;
}
-SEC("fexit/bpf_modify_return_test")
-int BPF_PROG(fexit_test, int a, int *b, int ret)
+SEC("fexit/bpf_testmod_trampoline_count_test")
+int BPF_PROG(fexit_test, int ret)
{
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_usdt.c b/tools/testing/selftests/bpf/progs/test_usdt.c
index a78c87537b07..f00cb52874e0 100644
--- a/tools/testing/selftests/bpf/progs/test_usdt.c
+++ b/tools/testing/selftests/bpf/progs/test_usdt.c
@@ -138,4 +138,16 @@ int usdt_sib(struct pt_regs *ctx)
return 0;
}
+#ifdef __TARGET_ARCH_x86
+int executed;
+unsigned long expected_ip;
+
+SEC("usdt")
+int usdt_executed(struct pt_regs *ctx)
+{
+ if (expected_ip == ctx->ip)
+ executed++;
+ return 0;
+}
+#endif
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/trigger_bench.c b/tools/testing/selftests/bpf/progs/trigger_bench.c
index 4ea0422d1042..3225b4aee8ff 100644
--- a/tools/testing/selftests/bpf/progs/trigger_bench.c
+++ b/tools/testing/selftests/bpf/progs/trigger_bench.c
@@ -1,10 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
-#include <linux/bpf.h>
+#include "vmlinux.h"
#include <asm/unistd.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "bpf_misc.h"
+#include "bpf/usdt.bpf.h"
char _license[] SEC("license") = "GPL";
@@ -180,3 +181,10 @@ int bench_trigger_rawtp(void *ctx)
handle(ctx);
return 0;
}
+
+SEC("?usdt")
+int bench_trigger_usdt(void *ctx)
+{
+ inc_counter();
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/uninit_stack.c b/tools/testing/selftests/bpf/progs/uninit_stack.c
index 046a204c8fc6..5db02323c89c 100644
--- a/tools/testing/selftests/bpf/progs/uninit_stack.c
+++ b/tools/testing/selftests/bpf/progs/uninit_stack.c
@@ -76,6 +76,7 @@ __naked int helper_uninit_to_misc(void *ctx)
* thus showing the stack state, matched by __msg(). \
*/ \
call %[dummy]; \
+ r1 = *(u64*)(r10 - 104); \
r0 = 0; \
exit; \
"
diff --git a/tools/testing/selftests/bpf/progs/verifier_align.c b/tools/testing/selftests/bpf/progs/verifier_align.c
new file mode 100644
index 000000000000..3e52686515ca
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_align.c
@@ -0,0 +1,581 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2026 Meta Platforms, Inc. and affiliates. */
+/* Converted from tools/testing/selftests/bpf/prog_tests/align.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+/* Four tests of known constants. These aren't staggeringly
+ * interesting since we track exact values now.
+ */
+
+SEC("tc")
+__success __log_level(2)
+__flag(BPF_F_ANY_ALIGNMENT)
+__msg("0: R1=ctx() R10=fp0")
+__msg("0: {{.*}} R3=2")
+__msg("1: {{.*}} R3=4")
+__msg("2: {{.*}} R3=8")
+__msg("3: {{.*}} R3=16")
+__msg("4: {{.*}} R3=32")
+__naked void mov(void)
+{
+ asm volatile (" \
+ r3 = 2; \
+ r3 = 4; \
+ r3 = 8; \
+ r3 = 16; \
+ r3 = 32; \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("tc")
+__success __log_level(2)
+__flag(BPF_F_ANY_ALIGNMENT)
+__msg("0: R1=ctx() R10=fp0")
+__msg("0: {{.*}}R3=1")
+__msg("1: {{.*}}R3=2")
+__msg("2: {{.*}}R3=4")
+__msg("3: {{.*}}R3=8")
+__msg("4: {{.*}}R3=16")
+__msg("5: {{.*}}R3=1")
+__msg("6: {{.*}}R4=32")
+__msg("7: {{.*}}R4=16")
+__msg("8: {{.*}}R4=8")
+__msg("9: {{.*}}R4=4")
+__msg("10: {{.*}}R4=2")
+__naked void shift(void)
+{
+ asm volatile (" \
+ r3 = 1; \
+ r3 <<= 1; \
+ r3 <<= 1; \
+ r3 <<= 1; \
+ r3 <<= 1; \
+ r3 >>= 4; \
+ r4 = 32; \
+ r4 >>= 1; \
+ r4 >>= 1; \
+ r4 >>= 1; \
+ r4 >>= 1; \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("tc")
+__success __log_level(2)
+__flag(BPF_F_ANY_ALIGNMENT)
+__msg("0: R1=ctx() R10=fp0")
+__msg("0: {{.*}}R3=4")
+__msg("1: {{.*}}R3=8")
+__msg("2: {{.*}}R3=10")
+__msg("3: {{.*}}R4=8")
+__msg("4: {{.*}}R4=12")
+__msg("5: {{.*}}R4=14")
+__naked void addsub(void)
+{
+ asm volatile (" \
+ r3 = 4; \
+ r3 += 4; \
+ r3 += 2; \
+ r4 = 8; \
+ r4 += 4; \
+ r4 += 2; \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("tc")
+__success __log_level(2)
+__flag(BPF_F_ANY_ALIGNMENT)
+__msg("0: R1=ctx() R10=fp0")
+__msg("0: {{.*}}R3=7")
+__msg("1: {{.*}}R3=7")
+__msg("2: {{.*}}R3=14")
+__msg("3: {{.*}}R3=56")
+__naked void mul(void)
+{
+ asm volatile (" \
+ r3 = 7; \
+ r3 *= 1; \
+ r3 *= 2; \
+ r3 *= 4; \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+/* Tests using unknown values */
+
+#define PREP_PKT_POINTERS \
+ "r2 = *(u32*)(r1 + %[__sk_buff_data]);" \
+ "r3 = *(u32*)(r1 + %[__sk_buff_data_end]);"
+
+#define __LOAD_UNKNOWN(DST_REG, LBL) \
+ "r2 = *(u32*)(r1 + %[__sk_buff_data]);" \
+ "r3 = *(u32*)(r1 + %[__sk_buff_data_end]);" \
+ "r0 = r2;" \
+ "r0 += 8;" \
+ "if r3 >= r0 goto " LBL ";" \
+ "exit;" \
+LBL ":" \
+ DST_REG " = *(u8*)(r2 + 0);"
+
+#define LOAD_UNKNOWN(DST_REG) __LOAD_UNKNOWN(DST_REG, "l99_%=")
+
+SEC("tc")
+__success __log_level(2)
+__flag(BPF_F_ANY_ALIGNMENT)
+__msg("6: {{.*}} R2=pkt(r=8)")
+__msg("6: {{.*}} R3={{[^)]*}}var_off=(0x0; 0xff)")
+__msg("7: {{.*}} R3={{[^)]*}}var_off=(0x0; 0x1fe)")
+__msg("8: {{.*}} R3={{[^)]*}}var_off=(0x0; 0x3fc)")
+__msg("9: {{.*}} R3={{[^)]*}}var_off=(0x0; 0x7f8)")
+__msg("10: {{.*}} R3={{[^)]*}}var_off=(0x0; 0xff0)")
+__msg("12: {{.*}} R3=pkt_end()")
+__msg("17: {{.*}} R4={{[^)]*}}var_off=(0x0; 0xff)")
+__msg("18: {{.*}} R4={{[^)]*}}var_off=(0x0; 0x1fe0)")
+__msg("19: {{.*}} R4={{[^)]*}}var_off=(0x0; 0xff0)")
+__msg("20: {{.*}} R4={{[^)]*}}var_off=(0x0; 0x7f8)")
+__msg("21: {{.*}} R4={{[^)]*}}var_off=(0x0; 0x3fc)")
+__msg("22: {{.*}} R4={{[^)]*}}var_off=(0x0; 0x1fe)")
+__naked void unknown_shift(void)
+{
+ asm volatile (" \
+ " __LOAD_UNKNOWN("r3", "l99_%=") " \
+ r3 <<= 1; \
+ r3 <<= 1; \
+ r3 <<= 1; \
+ r3 <<= 1; \
+ " __LOAD_UNKNOWN("r4", "l98_%=") " \
+ r4 <<= 5; \
+ r4 >>= 1; \
+ r4 >>= 1; \
+ r4 >>= 1; \
+ r4 >>= 1; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__success __log_level(2)
+__flag(BPF_F_ANY_ALIGNMENT)
+__msg("6: {{.*}} R3={{[^)]*}}var_off=(0x0; 0xff)")
+__msg("7: {{.*}} R4={{[^)]*}}var_off=(0x0; 0xff)")
+__msg("8: {{.*}} R4={{[^)]*}}var_off=(0x0; 0xff)")
+__msg("9: {{.*}} R4={{[^)]*}}var_off=(0x0; 0xff)")
+__msg("10: {{.*}} R4={{[^)]*}}var_off=(0x0; 0x1fe)")
+__msg("11: {{.*}} R4={{[^)]*}}var_off=(0x0; 0xff)")
+__msg("12: {{.*}} R4={{[^)]*}}var_off=(0x0; 0x3fc)")
+__msg("13: {{.*}} R4={{[^)]*}}var_off=(0x0; 0xff)")
+__msg("14: {{.*}} R4={{[^)]*}}var_off=(0x0; 0x7f8)")
+__msg("15: {{.*}} R4={{[^)]*}}var_off=(0x0; 0xff0)")
+__naked void unknown_mul(void)
+{
+ asm volatile (" \
+ " LOAD_UNKNOWN("r3") " \
+ r4 = r3; \
+ r4 *= 1; \
+ r4 = r3; \
+ r4 *= 2; \
+ r4 = r3; \
+ r4 *= 4; \
+ r4 = r3; \
+ r4 *= 8; \
+ r4 *= 2; \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__success __log_level(2)
+__msg("2: {{.*}} R5=pkt(r=0)")
+__msg("4: {{.*}} R5=pkt(r=0,imm=14)")
+__msg("5: {{.*}} R4=pkt(r=0,imm=14)")
+__msg("9: {{.*}} R5=pkt(r=18,imm=14)")
+__msg("10: {{.*}} R4={{[^)]*}}var_off=(0x0; 0xff){{.*}} R5=pkt(r=18,imm=14)")
+__msg("13: {{.*}} R4={{[^)]*}}var_off=(0x0; 0xffff)")
+__msg("14: {{.*}} R4={{[^)]*}}var_off=(0x0; 0xffff)")
+__naked void packet_const_offset(void)
+{
+ asm volatile (" \
+ " PREP_PKT_POINTERS " \
+ r5 = r2; \
+ r0 = 0; \
+ /* Skip over ethernet header. */ \
+ r5 += 14; \
+ r4 = r5; \
+ r4 += 4; \
+ if r3 >= r4 goto l0_%=; \
+ exit; \
+l0_%=: r4 = *(u8*)(r5 + 0); \
+ r4 = *(u8*)(r5 + 1); \
+ r4 = *(u8*)(r5 + 2); \
+ r4 = *(u8*)(r5 + 3); \
+ r4 = *(u16*)(r5 + 0); \
+ r4 = *(u16*)(r5 + 2); \
+ r4 = *(u32*)(r5 + 0); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__success __log_level(2)
+__flag(BPF_F_ANY_ALIGNMENT)
+/* Calculated offset in R6 has unknown value, but known
+ * alignment of 4.
+ */
+__msg("6: {{.*}} R2=pkt(r=8)")
+__msg("7: {{.*}} R6={{[^)]*}}var_off=(0x0; 0x3fc)")
+/* Offset is added to packet pointer R5, resulting in
+ * known fixed offset, and variable offset from R6.
+ */
+__msg("11: {{.*}} R5=pkt(id=1,{{[^)]*}},var_off=(0x2; 0x7fc)")
+/* At the time the word size load is performed from R5,
+ * it's total offset is NET_IP_ALIGN + reg->off (0) +
+ * reg->aux_off (14) which is 16. Then the variable
+ * offset is considered using reg->aux_off_align which
+ * is 4 and meets the load's requirements.
+ */
+__msg("15: {{.*}} R5={{[^)]*}}var_off=(0x2; 0x7fc)")
+/* Variable offset is added to R5 packet pointer,
+ * resulting in auxiliary alignment of 4. To avoid BPF
+ * verifier's precision backtracking logging
+ * interfering we also have a no-op R4 = R5
+ * instruction to validate R5 state. We also check
+ * that R4 is what it should be in such case.
+ */
+__msg("18: {{.*}} R4={{[^)]*}}var_off=(0x0; 0x3fc){{.*}} R5={{[^)]*}}var_off=(0x0; 0x3fc)")
+/* Constant offset is added to R5, resulting in
+ * reg->off of 14.
+ */
+__msg("19: {{.*}} R5=pkt(id=2,{{[^)]*}}var_off=(0x2; 0x7fc)")
+/* At the time the word size load is performed from R5,
+ * its total fixed offset is NET_IP_ALIGN + reg->off
+ * (14) which is 16. Then the variable offset is 4-byte
+ * aligned, so the total offset is 4-byte aligned and
+ * meets the load's requirements.
+ */
+__msg("24: {{.*}} R5={{[^)]*}}var_off=(0x2; 0x7fc)")
+/* Constant offset is added to R5 packet pointer,
+ * resulting in reg->off value of 14.
+ */
+__msg("26: {{.*}} R5=pkt(r=8,imm=14)")
+/* Variable offset is added to R5, resulting in a
+ * variable offset of (4n). See comment for insn #18
+ * for R4 = R5 trick.
+ */
+__msg("28: {{.*}} R4={{[^)]*}}var_off=(0x2; 0x7fc){{.*}} R5={{[^)]*}}var_off=(0x2; 0x7fc)")
+/* Constant is added to R5 again, setting reg->off to 18. */
+__msg("29: {{.*}} R5=pkt(id=3,{{[^)]*}}var_off=(0x2; 0x7fc)")
+/* And once more we add a variable; resulting {{[^)]*}}var_off
+ * is still (4n), fixed offset is not changed.
+ * Also, we create a new reg->id.
+ */
+__msg("31: {{.*}} R4={{[^)]*}}var_off=(0x2; 0xffc){{.*}} R5={{[^)]*}}var_off=(0x2; 0xffc)")
+/* At the time the word size load is performed from R5,
+ * its total fixed offset is NET_IP_ALIGN + reg->off (18)
+ * which is 20. Then the variable offset is (4n), so
+ * the total offset is 4-byte aligned and meets the
+ * load's requirements.
+ */
+__msg("35: {{.*}} R5={{[^)]*}}var_off=(0x2; 0xffc)")
+__naked void packet_variable_offset(void)
+{
+ asm volatile (" \
+ " LOAD_UNKNOWN("r6") " \
+ r6 <<= 2; \
+ /* First, add a constant to the R5 packet pointer,\
+ * then a variable with a known alignment. \
+ */ \
+ r5 = r2; \
+ r5 += 14; \
+ r5 += r6; \
+ r4 = r5; \
+ r4 += 4; \
+ if r3 >= r4 goto l0_%=; \
+ exit; \
+l0_%=: r4 = *(u32*)(r5 + 0); \
+ /* Now, test in the other direction. Adding first\
+ * the variable offset to R5, then the constant.\
+ */ \
+ r5 = r2; \
+ r5 += r6; \
+ r4 = r5; \
+ r5 += 14; \
+ r4 = r5; \
+ r4 += 4; \
+ if r3 >= r4 goto l1_%=; \
+ exit; \
+l1_%=: r4 = *(u32*)(r5 + 0); \
+ /* Test multiple accumulations of unknown values\
+ * into a packet pointer. \
+ */ \
+ r5 = r2; \
+ r5 += 14; \
+ r5 += r6; \
+ r4 = r5; \
+ r5 += 4; \
+ r5 += r6; \
+ r4 = r5; \
+ r4 += 4; \
+ if r3 >= r4 goto l2_%=; \
+ exit; \
+l2_%=: r4 = *(u32*)(r5 + 0); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__success __log_level(2)
+__flag(BPF_F_ANY_ALIGNMENT)
+/* Calculated offset in R6 has unknown value, but known
+ * alignment of 4.
+ */
+__msg("6: {{.*}} R2=pkt(r=8)")
+__msg("7: {{.*}} R6={{[^)]*}}var_off=(0x0; 0x3fc)")
+/* Adding 14 makes R6 be (4n+2) */
+__msg("8: {{.*}} R6={{[^)]*}}var_off=(0x2; 0x7fc)")
+/* Packet pointer has (4n+2) offset */
+__msg("11: {{.*}} R5={{[^)]*}}var_off=(0x2; 0x7fc)")
+__msg("12: {{.*}} R4={{[^)]*}}var_off=(0x2; 0x7fc)")
+/* At the time the word size load is performed from R5,
+ * its total fixed offset is NET_IP_ALIGN + reg->off (0)
+ * which is 2. Then the variable offset is (4n+2), so
+ * the total offset is 4-byte aligned and meets the
+ * load's requirements.
+ */
+__msg("15: {{.*}} R5={{[^)]*}}var_off=(0x2; 0x7fc)")
+/* Newly read value in R6 was shifted left by 2, so has
+ * known alignment of 4.
+ */
+__msg("17: {{.*}} R6={{[^)]*}}var_off=(0x0; 0x3fc)")
+/* Added (4n) to packet pointer's (4n+2) {{[^)]*}}var_off, giving
+ * another (4n+2).
+ */
+__msg("19: {{.*}} R5={{[^)]*}}var_off=(0x2; 0xffc)")
+__msg("20: {{.*}} R4={{[^)]*}}var_off=(0x2; 0xffc)")
+/* At the time the word size load is performed from R5,
+ * its total fixed offset is NET_IP_ALIGN + reg->off (0)
+ * which is 2. Then the variable offset is (4n+2), so
+ * the total offset is 4-byte aligned and meets the
+ * load's requirements.
+ */
+__msg("23: {{.*}} R5={{[^)]*}}var_off=(0x2; 0xffc)")
+__naked void packet_variable_offset_2(void)
+{
+ asm volatile (" \
+ /* Create an unknown offset, (4n+2)-aligned */ \
+ " LOAD_UNKNOWN("r6") " \
+ r6 <<= 2; \
+ r6 += 14; \
+ /* Add it to the packet pointer */ \
+ r5 = r2; \
+ r5 += r6; \
+ /* Check bounds and perform a read */ \
+ r4 = r5; \
+ r4 += 4; \
+ if r3 >= r4 goto l0_%=; \
+ exit; \
+l0_%=: r6 = *(u32*)(r5 + 0); \
+ /* Make a (4n) offset from the value we just read */\
+ r6 &= 0xff; \
+ r6 <<= 2; \
+ /* Add it to the packet pointer */ \
+ r5 += r6; \
+ /* Check bounds and perform a read */ \
+ r4 = r5; \
+ r4 += 4; \
+ if r3 >= r4 goto l1_%=; \
+ exit; \
+l1_%=: r6 = *(u32*)(r5 + 0); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__failure __log_level(2)
+__msg("3: {{.*}} R5=pkt_end()")
+/* (ptr - ptr) << 2 == unknown, (4n) */
+__msg("5: {{.*}} R5={{[^)]*}}var_off=(0x0; 0xfffffffffffffffc)")
+/* (4n) + 14 == (4n+2). We blow our bounds, because
+ * the add could overflow.
+ */
+__msg("6: {{.*}} R5={{[^)]*}}var_off=(0x2; 0xfffffffffffffffc)")
+/* Checked s>=0 */
+__msg("9: {{.*}} R5={{[^)]*}}var_off=(0x2; 0x7ffffffffffffffc)")
+/* packet pointer + nonnegative (4n+2) */
+__msg("11: {{.*}} R4={{[^)]*}}var_off=(0x2; 0x7ffffffffffffffc){{.*}} R6={{[^)]*}}var_off=(0x2; 0x7ffffffffffffffc)")
+__msg("12: (07) r4 += 4")
+/* packet smax bound overflow */
+__msg("pkt pointer offset -9223372036854775808 is not allowed")
+__naked void dubious_pointer_arithmetic(void)
+{
+ asm volatile (" \
+ " PREP_PKT_POINTERS " \
+ r0 = 0; \
+ /* (ptr - ptr) << 2 */ \
+ r5 = r3; \
+ r5 -= r2; \
+ r5 <<= 2; \
+ /* We have a (4n) value. Let's make a packet offset\
+ * out of it. First add 14, to make it a (4n+2)\
+ */ \
+ r5 += 14; \
+ /* Then make sure it's nonnegative */ \
+ if r5 s>= 0 goto l0_%=; \
+ exit; \
+l0_%=: /* Add it to packet pointer */ \
+ r6 = r2; \
+ r6 += r5; \
+ /* Check bounds and perform a read */ \
+ r4 = r6; \
+ r4 += 4; \
+ if r3 >= r4 goto l1_%=; \
+ exit; \
+l1_%=: r4 = *(u32*)(r6 + 0); \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__success __log_level(2)
+__flag(BPF_F_ANY_ALIGNMENT)
+/* Calculated offset in R6 has unknown value, but known
+ * alignment of 4.
+ */
+__msg("6: {{.*}} R2=pkt(r=8)")
+__msg("8: {{.*}} R6={{[^)]*}}var_off=(0x0; 0x3fc)")
+/* Adding 14 makes R6 be (4n+2) */
+__msg("9: {{.*}} R6={{[^)]*}}var_off=(0x2; 0x7fc)")
+/* New unknown value in R7 is (4n) */
+__msg("10: {{.*}} R7={{[^)]*}}var_off=(0x0; 0x3fc)")
+/* Subtracting it from R6 blows our unsigned bounds */
+__msg("11: {{.*}} R6={{[^)]*}}var_off=(0x2; 0xfffffffffffffffc)")
+/* Checked s>= 0 */
+__msg("14: {{.*}} R6={{[^)]*}}var_off=(0x2; 0x7fc)")
+/* At the time the word size load is performed from R5,
+ * its total fixed offset is NET_IP_ALIGN + reg->off (0)
+ * which is 2. Then the variable offset is (4n+2), so
+ * the total offset is 4-byte aligned and meets the
+ * load's requirements.
+ */
+__msg("20: {{.*}} R5={{[^)]*}}var_off=(0x2; 0x7fc)")
+__naked void variable_subtraction(void)
+{
+ asm volatile (" \
+ /* Create an unknown offset, (4n+2)-aligned */ \
+ " LOAD_UNKNOWN("r6") " \
+ r7 = r6; \
+ r6 <<= 2; \
+ r6 += 14; \
+ /* Create another unknown, (4n)-aligned, and subtract\
+ * it from the first one \
+ */ \
+ r7 <<= 2; \
+ r6 -= r7; \
+ /* Bounds-check the result */ \
+ if r6 s>= 0 goto l0_%=; \
+ exit; \
+l0_%=: /* Add it to the packet pointer */ \
+ r5 = r2; \
+ r5 += r6; \
+ /* Check bounds and perform a read */ \
+ r4 = r5; \
+ r4 += 4; \
+ if r3 >= r4 goto l1_%=; \
+ exit; \
+l1_%=: r6 = *(u32*)(r5 + 0); \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__success __log_level(2)
+__flag(BPF_F_ANY_ALIGNMENT)
+/* Calculated offset in R6 has unknown value, but known
+ * alignment of 4.
+ */
+__msg("6: {{.*}} R2=pkt(r=8)")
+__msg("9: {{.*}} R6={{[^)]*}}var_off=(0x0; 0x3c)")
+/* Adding 14 makes R6 be (4n+2) */
+__msg("10: {{.*}} R6={{[^)]*}}var_off=(0x2; 0x7c)")
+/* Subtracting from packet pointer overflows ubounds */
+__msg("13: R5={{[^)]*}}var_off=(0xffffffffffffff82; 0x7c)")
+/* New unknown value in R7 is (4n), >= 76 */
+__msg("14: {{.*}} R7={{[^)]*}}var_off=(0x0; 0x7fc)")
+/* Adding it to packet pointer gives nice bounds again */
+__msg("16: {{.*}} R5={{[^)]*}}var_off=(0x2; 0x7fc)")
+/* At the time the word size load is performed from R5,
+ * its total fixed offset is NET_IP_ALIGN + reg->off (0)
+ * which is 2. Then the variable offset is (4n+2), so
+ * the total offset is 4-byte aligned and meets the
+ * load's requirements.
+ */
+__msg("20: {{.*}} R5={{[^)]*}}var_off=(0x2; 0x7fc)")
+__naked void pointer_variable_subtraction(void)
+{
+ asm volatile (" \
+ /* Create an unknown offset, (4n+2)-aligned and bounded\
+ * to [14,74] \
+ */ \
+ " LOAD_UNKNOWN("r6") " \
+ r7 = r6; \
+ r6 &= 0xf; \
+ r6 <<= 2; \
+ r6 += 14; \
+ /* Subtract it from the packet pointer */ \
+ r5 = r2; \
+ r5 -= r6; \
+ /* Create another unknown, (4n)-aligned and >= 74.\
+ * That in fact means >= 76, since 74 mod 4 == 2\
+ */ \
+ r7 <<= 2; \
+ r7 += 76; \
+ /* Add it to the packet pointer */ \
+ r5 += r7; \
+ /* Check bounds and perform a read */ \
+ r4 = r5; \
+ r4 += 4; \
+ if r3 >= r4 goto l0_%=; \
+ exit; \
+l0_%=: r6 = *(u32*)(r5 + 0); \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_arena.c b/tools/testing/selftests/bpf/progs/verifier_arena.c
index c4b8daac4388..62e282f4448a 100644
--- a/tools/testing/selftests/bpf/progs/verifier_arena.c
+++ b/tools/testing/selftests/bpf/progs/verifier_arena.c
@@ -477,4 +477,134 @@ int arena_kfuncs_under_bpf_lock(void *ctx)
return 0;
}
+
+#if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
+
+/*
+ * Test that scalar += PTR_TO_ARENA correctly upgrades the
+ * destination register to a PTR_TO_ARENA.
+ */
+SEC("syscall")
+__success __retval(0)
+int scalar_add_arena_ptr(void *ctx)
+{
+ int __arena *scalar, *arena_ptr;
+
+ volatile char __arena *base = arena_base(&arena);
+
+ asm volatile (
+ "%[arena_ptr] = 8192;"
+ "%[arena_ptr] = addr_space_cast(%[arena_ptr], 0x0, 0x1);"
+ "%[scalar] = 12;"
+ "%[scalar] += %[arena_ptr];"
+ : [scalar] "=r"(scalar),
+ [arena_ptr] "=&r"(arena_ptr)
+ : "r"(base)
+ :
+ );
+ return 0;
+}
+
+/*
+ * Tests that PTR_TO_ARENA + PTR_TO_ARENA is allowed.
+ */
+SEC("syscall")
+__success __retval(0)
+int arena_ptr_add_arena_ptr(void *ctx)
+{
+ int __arena *arena_ptr2, *arena_ptr1;
+
+ /* Needed for the verifier to link the arena to the subprog. */
+ volatile char __arena *base = arena_base(&arena);
+
+ asm volatile (
+ "%[arena_ptr1] = 8192;"
+ "%[arena_ptr1] = addr_space_cast(%[arena_ptr1], 0x0, 0x1);"
+ "%[arena_ptr2] = 4096;"
+ "%[arena_ptr2] = addr_space_cast(%[arena_ptr2], 0x0, 0x1);"
+ "%[arena_ptr2] += %[arena_ptr1];"
+ : [arena_ptr2] "=r"(arena_ptr2),
+ [arena_ptr1] "=&r"(arena_ptr1)
+ : "r"(base)
+ :
+ );
+ return 0;
+}
+
+SEC("syscall")
+__success __retval(0)
+int scalar_xor_arena_ptr(void *ctx)
+{
+ int __arena *scalar, *arena_ptr;
+
+ volatile char __arena *base = arena_base(&arena);
+
+ asm volatile (
+ "%[arena_ptr] = 8192;"
+ "%[arena_ptr] = addr_space_cast(%[arena_ptr], 0x0, 0x1);"
+ "%[scalar] = 12;"
+ "%[scalar] ^= %[arena_ptr];"
+ : [scalar] "=r"(scalar),
+ [arena_ptr] "=&r"(arena_ptr)
+ : "r"(base)
+ :
+ );
+ return 0;
+}
+
+/*
+ * Tests that PTR_TO_ARENA and non-arena pointers can be added.
+ */
+SEC("syscall")
+__success __retval(0)
+int arena_ptr_add_to_non_arena_ptr(void *ctx)
+{
+ register int __arena *arena_ptr asm("r3");
+ register void *dst asm("r4");
+
+ volatile char __arena *base = arena_base(&arena);
+
+ asm volatile (
+ "%[arena_ptr] = 8192;"
+ "%[arena_ptr] = addr_space_cast(%[arena_ptr], 0x0, 0x1);"
+ "%[dst] = %[ctx];"
+ "%[dst] += %[arena_ptr];"
+ : [arena_ptr] "=&r"(arena_ptr),
+ [dst] "=&r"(dst)
+ : [ctx] "r"(ctx), "r"(base)
+ :
+ );
+
+ (void)ctx;
+
+ return 0;
+}
+
+SEC("syscall")
+__success __retval(0)
+int non_arena_ptr_add_to_arena_ptr(void *ctx)
+{
+ register int __arena *arena_ptr asm("r3");
+ register void *src asm("r4");
+
+ volatile char __arena *base = arena_base(&arena);
+
+ asm volatile (
+ "%[arena_ptr] = 8192;"
+ "%[arena_ptr] = addr_space_cast(%[arena_ptr], 0x0, 0x1);"
+ "%[src] = %[ctx];"
+ "%[arena_ptr] += %[src];"
+ : [arena_ptr] "=&r"(arena_ptr),
+ [src] "=&r"(src)
+ : [ctx] "r"(ctx), "r"(base)
+ :
+ );
+
+ (void)ctx;
+
+ return 0;
+}
+
+#endif
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_async_cb_context.c b/tools/testing/selftests/bpf/progs/verifier_async_cb_context.c
index 39aff82549c9..6bf95550a024 100644
--- a/tools/testing/selftests/bpf/progs/verifier_async_cb_context.c
+++ b/tools/testing/selftests/bpf/progs/verifier_async_cb_context.c
@@ -31,7 +31,7 @@ static int timer_cb(void *map, int *key, struct bpf_timer *timer)
}
SEC("fentry/bpf_fentry_test1")
-__failure __msg("helper call might sleep in a non-sleepable prog")
+__failure __msg("sleepable helper bpf_copy_from_user#{{[0-9]+}} in non-sleepable prog")
int timer_non_sleepable_prog(void *ctx)
{
struct timer_elem *val;
@@ -47,7 +47,7 @@ int timer_non_sleepable_prog(void *ctx)
}
SEC("lsm.s/file_open")
-__failure __msg("helper call might sleep in a non-sleepable prog")
+__failure __msg("sleepable helper bpf_copy_from_user#{{[0-9]+}} in non-sleepable prog")
int timer_sleepable_prog(void *ctx)
{
struct timer_elem *val;
diff --git a/tools/testing/selftests/bpf/progs/verifier_bounds.c b/tools/testing/selftests/bpf/progs/verifier_bounds.c
index 79a328276805..c1ae013dee29 100644
--- a/tools/testing/selftests/bpf/progs/verifier_bounds.c
+++ b/tools/testing/selftests/bpf/progs/verifier_bounds.c
@@ -202,7 +202,7 @@ l0_%=: /* exit */ \
SEC("tc")
__description("bounds check based on reg_off + var_off + insn_off. test1")
-__failure __msg("value_size=8 off=1073741825")
+__failure __msg("map_value pointer offset 1073741822 is not allowed")
__naked void var_off_insn_off_test1(void)
{
asm volatile (" \
@@ -1066,7 +1066,6 @@ l0_%=: r0 = 0; \
SEC("xdp")
__description("bound check with JMP_JSLT for crossing 64-bit signed boundary")
__success __retval(0)
-__flag(BPF_F_TEST_REG_INVARIANTS)
__naked void crossing_64_bit_signed_boundary_2(void)
{
asm volatile (" \
@@ -1148,7 +1147,6 @@ l0_%=: r0 = 0; \
SEC("xdp")
__description("bound check with JMP32_JSLT for crossing 32-bit signed boundary")
__success __retval(0)
-__flag(BPF_F_TEST_REG_INVARIANTS)
__naked void crossing_32_bit_signed_boundary_2(void)
{
asm volatile (" \
@@ -1536,7 +1534,7 @@ __naked void sub32_partial_overflow(void)
SEC("socket")
__description("dead branch on jset, does not result in invariants violation error")
__success __log_level(2)
-__retval(0) __flag(BPF_F_TEST_REG_INVARIANTS)
+__retval(0)
__naked void jset_range_analysis(void)
{
asm volatile (" \
@@ -1572,7 +1570,7 @@ l0_%=: r0 = 0; \
*/
SEC("socket")
__description("bounds deduction cross sign boundary, negative overlap")
-__success __log_level(2) __flag(BPF_F_TEST_REG_INVARIANTS)
+__success __log_level(2)
__msg("7: (1f) r0 -= r6 {{.*}} R0=scalar(smin=smin32=-655,smax=smax32=-146,umin=0xfffffffffffffd71,umax=0xffffffffffffff6e,umin32=0xfffffd71,umax32=0xffffff6e,var_off=(0xfffffffffffffc00; 0x3ff))")
__retval(0)
__naked void bounds_deduct_negative_overlap(void)
@@ -1616,7 +1614,7 @@ l0_%=: r0 = 0; \
*/
SEC("socket")
__description("bounds deduction cross sign boundary, positive overlap")
-__success __log_level(2) __flag(BPF_F_TEST_REG_INVARIANTS)
+__success __log_level(2)
__msg("3: (2d) if r0 > r1 {{.*}} R0=scalar(smin=smin32=0,smax=umax=smax32=umax32=127,var_off=(0x0; 0x7f))")
__retval(0)
__naked void bounds_deduct_positive_overlap(void)
@@ -1649,7 +1647,7 @@ l0_%=: r0 = 0; \
*/
SEC("socket")
__description("bounds deduction cross sign boundary, two overlaps")
-__failure __flag(BPF_F_TEST_REG_INVARIANTS)
+__failure
__msg("3: (2d) if r0 > r1 {{.*}} R0=scalar(smin=smin32=-128,smax=smax32=127,umax=0xffffffffffffff80)")
__msg("frame pointer is read only")
__naked void bounds_deduct_two_overlaps(void)
@@ -1713,7 +1711,7 @@ SEC("socket")
__description("conditional jump on same register, branch taken")
__not_msg("20: (b7) r0 = 1 {{.*}} R0=1")
__success __log_level(2)
-__retval(0) __flag(BPF_F_TEST_REG_INVARIANTS)
+__retval(0)
__naked void condition_jump_on_same_register(void *ctx)
{
asm volatile(" \
@@ -1748,7 +1746,7 @@ SEC("socket")
__description("jset on same register, constant value branch taken")
__not_msg("7: (b7) r0 = 1 {{.*}} R0=1")
__success __log_level(2)
-__retval(0) __flag(BPF_F_TEST_REG_INVARIANTS)
+__retval(0)
__naked void jset_on_same_register_1(void *ctx)
{
asm volatile(" \
@@ -1770,7 +1768,7 @@ SEC("socket")
__description("jset on same register, scalar value branch taken")
__not_msg("12: (b7) r0 = 1 {{.*}} R0=1")
__success __log_level(2)
-__retval(0) __flag(BPF_F_TEST_REG_INVARIANTS)
+__retval(0)
__naked void jset_on_same_register_2(void *ctx)
{
asm volatile(" \
@@ -1800,7 +1798,6 @@ __description("jset on same register, scalar value unknown branch 1")
__msg("3: (b7) r0 = 0 {{.*}} R0=0")
__msg("5: (b7) r0 = 1 {{.*}} R0=1")
__success __log_level(2)
-__flag(BPF_F_TEST_REG_INVARIANTS)
__naked void jset_on_same_register_3(void *ctx)
{
asm volatile(" \
@@ -1822,7 +1819,6 @@ __description("jset on same register, scalar value unknown branch 2")
__msg("4: (b7) r0 = 0 {{.*}} R0=0")
__msg("6: (b7) r0 = 1 {{.*}} R0=1")
__success __log_level(2)
-__flag(BPF_F_TEST_REG_INVARIANTS)
__naked void jset_on_same_register_4(void *ctx)
{
asm volatile(" \
@@ -1845,7 +1841,6 @@ __description("jset on same register, scalar value unknown branch 3")
__msg("4: (b7) r0 = 0 {{.*}} R0=0")
__msg("6: (b7) r0 = 1 {{.*}} R0=1")
__success __log_level(2)
-__flag(BPF_F_TEST_REG_INVARIANTS)
__naked void jset_on_same_register_5(void *ctx)
{
asm volatile(" \
@@ -1877,7 +1872,6 @@ SEC("socket")
__description("bounds refinement with single-value tnum on umax")
__msg("3: (15) if r0 == 0xe0 {{.*}} R0=240")
__success __log_level(2)
-__flag(BPF_F_TEST_REG_INVARIANTS)
__naked void bounds_refinement_tnum_umax(void *ctx)
{
asm volatile(" \
@@ -1907,7 +1901,6 @@ SEC("socket")
__description("bounds refinement with single-value tnum on umin")
__msg("3: (15) if r0 == 0xf0 {{.*}} R0=224")
__success __log_level(2)
-__flag(BPF_F_TEST_REG_INVARIANTS)
__naked void bounds_refinement_tnum_umin(void *ctx)
{
asm volatile(" \
@@ -2002,7 +1995,6 @@ __naked void bounds_refinement_multiple_overlaps(void *ctx)
SEC("socket")
__success
-__flag(BPF_F_TEST_REG_INVARIANTS)
__naked void signed_unsigned_intersection32_case1(void *ctx)
{
asm volatile(" \
@@ -2020,7 +2012,6 @@ __naked void signed_unsigned_intersection32_case1(void *ctx)
SEC("socket")
__success
-__flag(BPF_F_TEST_REG_INVARIANTS)
__naked void signed_unsigned_intersection32_case2(void *ctx)
{
asm volatile(" \
@@ -2037,6 +2028,40 @@ __naked void signed_unsigned_intersection32_case2(void *ctx)
: __clobber_all);
}
+/*
+ * After instruction 3, the u64 and s64 ranges look as follows:
+ * 0 umin=2 umax=0xff..ff00..03 U64_MAX
+ * | [xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx] |
+ * |----------------------------|------------------------------|
+ * |xx] [xxxxxxxxxxxxxxxxxxxxxxxxxxxx|
+ * 0 smax=2 smin=0x800..02 -1
+ *
+ * The two ranges can't be refined because they overlap in two places. Once we
+ * add an upper-bound to u64 at instruction 4, the refinement can happen. This
+ * test validates that this refinement does happen and is not overwritten by
+ * the less-precise 32bits ranges.
+ */
+SEC("socket")
+__description("bounds refinement: 64bits ranges not overwritten by 32bits ranges")
+__msg("3: (65) if r0 s> 0x2 {{.*}} R0=scalar(smin=0x8000000000000002,smax=2,umin=smin32=umin32=2,umax=0xffffffff00000003,smax32=umax32=3")
+__msg("4: (25) if r0 > 0x13 {{.*}} R0=2")
+__success __log_level(2)
+__naked void refinement_32bounds_not_overwriting_64bounds(void *ctx)
+{
+ asm volatile(" \
+ call %[bpf_get_prandom_u32]; \
+ if w0 < 2 goto +5; \
+ if w0 > 3 goto +4; \
+ if r0 s> 2 goto +3; \
+ if r0 > 19 goto +2; \
+ if r0 == 2 goto +1; \
+ r10 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
SEC("socket")
__description("maybe_fork_scalars: OR with constant rejects OOB")
__failure __msg("invalid access to map value")
@@ -2131,4 +2156,32 @@ l0_%=: r0 = 0; \
: __clobber_all);
}
+/*
+ * Last jump can be detected as always taken because the intersection of R5 and
+ * R7 32bit tnums produces a constant that isn't within R7's s32 bounds.
+ */
+SEC("socket")
+__description("dead branch: tnums give impossible constant if equal")
+__success
+__naked void tnums_equal_impossible_constant(void *ctx)
+{
+ asm volatile(" \
+ call %[bpf_get_prandom_u32]; \
+ r5 = r0; \
+ /* Set r5's var_off32 to (0; 0xfffffffc) */ \
+ r5 &= 0xfffffffffffffffc; \
+ r7 = r0; \
+ /* Set r7's var_off32 to (0x0; 0x1) */ \
+ r7 &= 0x1; \
+ /* Now, s32=[-43; -42], var_off32=(0xffffffd4; 0x3) */ \
+ r7 += -43; \
+ /* On fallthrough, var_off32=-44, not in s32 */ \
+ if w5 != w7 goto +1; \
+ r10 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_ctx.c b/tools/testing/selftests/bpf/progs/verifier_ctx.c
index 5ebf7d9bcc55..7856dad3d1f3 100644
--- a/tools/testing/selftests/bpf/progs/verifier_ctx.c
+++ b/tools/testing/selftests/bpf/progs/verifier_ctx.c
@@ -4,6 +4,10 @@
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+static const char ctx_strncmp_target[] = "ctx";
+static const char ctx_snprintf_fmt[] = "";
SEC("tc")
__description("context stores via BPF_ATOMIC")
@@ -69,7 +73,6 @@ __naked void ctx_pointer_to_helper_1(void)
SEC("socket")
__description("pass modified ctx pointer to helper, 2")
__failure __msg("negative offset ctx ptr R1 off=-612 disallowed")
-__failure_unpriv __msg_unpriv("negative offset ctx ptr R1 off=-612 disallowed")
__naked void ctx_pointer_to_helper_2(void)
{
asm volatile (" \
@@ -292,4 +295,568 @@ padding_access("cgroup/post_bind4", bpf_sock, dst_port, 2);
__failure __msg("invalid bpf_context access")
padding_access("sk_reuseport", sk_reuseport_md, hash, 4);
+SEC("?syscall")
+__description("syscall: write to ctx with fixed offset")
+__success
+int syscall_ctx_fixed_off_write(void *ctx)
+{
+ char *p = ctx;
+
+ *(__u32 *)p = 0;
+ *(__u32 *)(p + 4) = 0;
+ return 0;
+}
+
+SEC("?syscall")
+__description("syscall: read ctx with fixed offset")
+__success
+int syscall_ctx_fixed_off_read(void *ctx)
+{
+ char *p = ctx;
+ volatile __u32 val;
+
+ val = *(__u32 *)(p + 4);
+ (void)val;
+ return 0;
+}
+
+SEC("?syscall")
+__description("syscall: unaligned read ctx with fixed offset")
+__success
+int syscall_ctx_unaligned_fixed_off_read(void *ctx)
+{
+ char *p = ctx;
+ volatile __u32 val;
+
+ val = *(__u32 *)(p + 2);
+ (void)val;
+ return 0;
+}
+
+SEC("?syscall")
+__description("syscall: unaligned write ctx with fixed offset")
+__success
+int syscall_ctx_unaligned_fixed_off_write(void *ctx)
+{
+ char *p = ctx;
+
+ *(__u32 *)(p + 2) = 0;
+ return 0;
+}
+
+SEC("?syscall")
+__description("syscall: read ctx with variable offset")
+__success
+int syscall_ctx_var_off_read(void *ctx)
+{
+ __u64 off = bpf_get_prandom_u32();
+ char *p = ctx;
+ volatile __u32 val;
+
+ off &= 0xfc;
+ p += off;
+ val = *(__u32 *)p;
+ (void)val;
+ return 0;
+}
+
+SEC("?syscall")
+__description("syscall: write ctx with variable offset")
+__success
+int syscall_ctx_var_off_write(void *ctx)
+{
+ __u64 off = bpf_get_prandom_u32();
+ char *p = ctx;
+
+ off &= 0xfc;
+ p += off;
+ *(__u32 *)p = 0;
+ return 0;
+}
+
+SEC("?syscall")
+__description("syscall: unaligned read ctx with variable offset")
+__success
+int syscall_ctx_unaligned_var_off_read(void *ctx)
+{
+ __u64 off = bpf_get_prandom_u32();
+ char *p = ctx;
+ volatile __u32 val;
+
+ off &= 0xfc;
+ off += 2;
+ p += off;
+ val = *(__u32 *)p;
+ (void)val;
+ return 0;
+}
+
+SEC("?syscall")
+__description("syscall: unaligned write ctx with variable offset")
+__success
+int syscall_ctx_unaligned_var_off_write(void *ctx)
+{
+ __u64 off = bpf_get_prandom_u32();
+ char *p = ctx;
+
+ off &= 0xfc;
+ off += 2;
+ p += off;
+ *(__u32 *)p = 0;
+ return 0;
+}
+
+SEC("?syscall")
+__description("syscall: reject ctx access past U16_MAX with fixed offset")
+__failure __msg("outside of the allowed memory range")
+int syscall_ctx_u16_max_fixed_off(void *ctx)
+{
+ char *p = ctx;
+ volatile __u32 val;
+
+ p += 65535;
+ val = *(__u32 *)p;
+ (void)val;
+ return 0;
+}
+
+SEC("?syscall")
+__description("syscall: reject ctx access past U16_MAX with variable offset")
+__failure __msg("outside of the allowed memory range")
+int syscall_ctx_u16_max_var_off(void *ctx)
+{
+ __u64 off = bpf_get_prandom_u32();
+ char *p = ctx;
+ volatile __u32 val;
+
+ off &= 0xffff;
+ off += 1;
+ p += off;
+ val = *(__u32 *)p;
+ (void)val;
+ return 0;
+}
+
+SEC("?syscall")
+__description("syscall: reject negative variable offset ctx access")
+__failure __msg("min value is negative")
+int syscall_ctx_neg_var_off(void *ctx)
+{
+ __u64 off = bpf_get_prandom_u32();
+ char *p = ctx;
+
+ off &= 4;
+ p -= off;
+ return *(__u32 *)p;
+}
+
+SEC("?syscall")
+__description("syscall: reject unbounded variable offset ctx access")
+__failure __msg("unbounded memory access")
+int syscall_ctx_unbounded_var_off(void *ctx)
+{
+ __u64 off = (__u32)bpf_get_prandom_u32();
+ char *p = ctx;
+
+ off <<= 2;
+ p += off;
+ return *(__u32 *)p;
+}
+
+SEC("?syscall")
+__description("syscall: helper read ctx with fixed offset")
+__success
+int syscall_ctx_helper_fixed_off_read(void *ctx)
+{
+ char *p = ctx;
+
+ p += 4;
+ return bpf_strncmp(p, 4, ctx_strncmp_target);
+}
+
+SEC("?syscall")
+__description("syscall: helper write ctx with fixed offset")
+__success
+int syscall_ctx_helper_fixed_off_write(void *ctx)
+{
+ char *p = ctx;
+
+ p += 4;
+ return bpf_probe_read_kernel(p, 4, 0);
+}
+
+SEC("?syscall")
+__description("syscall: helper unaligned read ctx with fixed offset")
+__success
+int syscall_ctx_helper_unaligned_fixed_off_read(void *ctx)
+{
+ char *p = ctx;
+
+ p += 2;
+ return bpf_strncmp(p, 4, ctx_strncmp_target);
+}
+
+SEC("?syscall")
+__description("syscall: helper unaligned write ctx with fixed offset")
+__success
+int syscall_ctx_helper_unaligned_fixed_off_write(void *ctx)
+{
+ char *p = ctx;
+
+ p += 2;
+ return bpf_probe_read_kernel(p, 4, 0);
+}
+
+SEC("?syscall")
+__description("syscall: helper read ctx with variable offset")
+__success
+int syscall_ctx_helper_var_off_read(void *ctx)
+{
+ __u64 off = bpf_get_prandom_u32();
+ char *p = ctx;
+
+ off &= 0xfc;
+ p += off;
+ return bpf_strncmp(p, 4, ctx_strncmp_target);
+}
+
+SEC("?syscall")
+__description("syscall: helper write ctx with variable offset")
+__success
+int syscall_ctx_helper_var_off_write(void *ctx)
+{
+ __u64 off = bpf_get_prandom_u32();
+ char *p = ctx;
+
+ off &= 0xfc;
+ p += off;
+ return bpf_probe_read_kernel(p, 4, 0);
+}
+
+SEC("?syscall")
+__description("syscall: helper unaligned read ctx with variable offset")
+__success
+int syscall_ctx_helper_unaligned_var_off_read(void *ctx)
+{
+ __u64 off = bpf_get_prandom_u32();
+ char *p = ctx;
+
+ off &= 0xfc;
+ off += 2;
+ p += off;
+ return bpf_strncmp(p, 4, ctx_strncmp_target);
+}
+
+SEC("?syscall")
+__description("syscall: helper unaligned write ctx with variable offset")
+__success
+int syscall_ctx_helper_unaligned_var_off_write(void *ctx)
+{
+ __u64 off = bpf_get_prandom_u32();
+ char *p = ctx;
+
+ off &= 0xfc;
+ off += 2;
+ p += off;
+ return bpf_probe_read_kernel(p, 4, 0);
+}
+
+SEC("?syscall")
+__description("syscall: reject helper read ctx past U16_MAX with fixed offset")
+__failure __msg("outside of the allowed memory range")
+int syscall_ctx_helper_u16_max_fixed_off_read(void *ctx)
+{
+ char *p = ctx;
+
+ p += 65535;
+ return bpf_strncmp(p, 4, ctx_strncmp_target);
+}
+
+SEC("?syscall")
+__description("syscall: reject helper write ctx past U16_MAX with fixed offset")
+__failure __msg("outside of the allowed memory range")
+int syscall_ctx_helper_u16_max_fixed_off_write(void *ctx)
+{
+ char *p = ctx;
+
+ p += 65535;
+ return bpf_probe_read_kernel(p, 4, 0);
+}
+
+SEC("?syscall")
+__description("syscall: reject helper read ctx past U16_MAX with variable offset")
+__failure __msg("outside of the allowed memory range")
+int syscall_ctx_helper_u16_max_var_off_read(void *ctx)
+{
+ __u64 off = bpf_get_prandom_u32();
+ char *p = ctx;
+
+ off &= 0xffff;
+ off += 1;
+ p += off;
+ return bpf_strncmp(p, 4, ctx_strncmp_target);
+}
+
+SEC("?syscall")
+__description("syscall: reject helper write ctx past U16_MAX with variable offset")
+__failure __msg("outside of the allowed memory range")
+int syscall_ctx_helper_u16_max_var_off_write(void *ctx)
+{
+ __u64 off = bpf_get_prandom_u32();
+ char *p = ctx;
+
+ off &= 0xffff;
+ off += 1;
+ p += off;
+ return bpf_probe_read_kernel(p, 4, 0);
+}
+
+SEC("?syscall")
+__description("syscall: helper read zero-sized ctx access")
+__success
+int syscall_ctx_helper_zero_sized_read(void *ctx)
+{
+ return bpf_snprintf(0, 0, ctx_snprintf_fmt, ctx, 0);
+}
+
+SEC("?syscall")
+__description("syscall: helper write zero-sized ctx access")
+__success
+int syscall_ctx_helper_zero_sized_write(void *ctx)
+{
+ return bpf_probe_read_kernel(ctx, 0, 0);
+}
+
+SEC("?syscall")
+__description("syscall: kfunc access ctx with fixed offset")
+__success
+int syscall_ctx_kfunc_fixed_off(void *ctx)
+{
+ char *p = ctx;
+
+ p += 4;
+ bpf_kfunc_call_test_mem_len_pass1(p, 4);
+ return 0;
+}
+
+SEC("?syscall")
+__description("syscall: kfunc access ctx with variable offset")
+__success
+int syscall_ctx_kfunc_var_off(void *ctx)
+{
+ __u64 off = bpf_get_prandom_u32();
+ char *p = ctx;
+
+ off &= 0xfc;
+ p += off;
+ bpf_kfunc_call_test_mem_len_pass1(p, 4);
+ return 0;
+}
+
+SEC("?syscall")
+__description("syscall: kfunc unaligned access ctx with fixed offset")
+__success
+int syscall_ctx_kfunc_unaligned_fixed_off(void *ctx)
+{
+ char *p = ctx;
+
+ p += 2;
+ bpf_kfunc_call_test_mem_len_pass1(p, 4);
+ return 0;
+}
+
+SEC("?syscall")
+__description("syscall: kfunc unaligned access ctx with variable offset")
+__success
+int syscall_ctx_kfunc_unaligned_var_off(void *ctx)
+{
+ __u64 off = bpf_get_prandom_u32();
+ char *p = ctx;
+
+ off &= 0xfc;
+ off += 2;
+ p += off;
+ bpf_kfunc_call_test_mem_len_pass1(p, 4);
+ return 0;
+}
+
+SEC("?syscall")
+__description("syscall: reject kfunc ctx access past U16_MAX with fixed offset")
+__failure __msg("outside of the allowed memory range")
+int syscall_ctx_kfunc_u16_max_fixed_off(void *ctx)
+{
+ char *p = ctx;
+
+ p += 65535;
+ bpf_kfunc_call_test_mem_len_pass1(p, 4);
+ return 0;
+}
+
+SEC("?syscall")
+__description("syscall: reject kfunc ctx access past U16_MAX with variable offset")
+__failure __msg("outside of the allowed memory range")
+int syscall_ctx_kfunc_u16_max_var_off(void *ctx)
+{
+ __u64 off = bpf_get_prandom_u32();
+ char *p = ctx;
+
+ off &= 0xffff;
+ off += 1;
+ p += off;
+ bpf_kfunc_call_test_mem_len_pass1(p, 4);
+ return 0;
+}
+
+SEC("?syscall")
+__description("syscall: kfunc access zero-sized ctx")
+__success
+int syscall_ctx_kfunc_zero_sized(void *ctx)
+{
+ bpf_kfunc_call_test_mem_len_pass1(ctx, 0);
+ return 0;
+}
+
+/*
+ * For non-syscall program types without convert_ctx_access, direct ctx
+ * dereference is still allowed after adding a fixed offset, while variable
+ * and negative direct accesses reject.
+ *
+ * Passing ctx as a helper or kfunc memory argument is only permitted for
+ * syscall programs, so the helper and kfunc cases below validate rejection
+ * for non-syscall ctx pointers at fixed, variable, and zero-sized accesses.
+ */
+#define no_rewrite_ctx_access(type, name, off, load_t) \
+ SEC("?" type) \
+ __description(type ": read ctx at fixed offset") \
+ __success \
+ int no_rewrite_##name##_fixed(void *ctx) \
+ { \
+ char *p = ctx; \
+ volatile load_t val; \
+ \
+ val = *(load_t *)(p + off); \
+ (void)val; \
+ return 0; \
+ } \
+ SEC("?" type) \
+ __description(type ": reject variable offset ctx access") \
+ __failure __msg("variable ctx access var_off=") \
+ int no_rewrite_##name##_var(void *ctx) \
+ { \
+ __u64 off_var = bpf_get_prandom_u32(); \
+ char *p = ctx; \
+ \
+ off_var &= 4; \
+ p += off_var; \
+ return *(load_t *)p; \
+ } \
+ SEC("?" type) \
+ __description(type ": reject negative offset ctx access") \
+ __failure __msg("invalid bpf_context access") \
+ int no_rewrite_##name##_neg(void *ctx) \
+ { \
+ char *p = ctx; \
+ \
+ p -= 612; \
+ return *(load_t *)p; \
+ } \
+ SEC("?" type) \
+ __description(type ": reject helper read ctx at fixed offset") \
+ __failure __msg("dereference of modified ctx ptr") \
+ int no_rewrite_##name##_helper_read_fixed(void *ctx) \
+ { \
+ char *p = ctx; \
+ \
+ p += off; \
+ return bpf_strncmp(p, 4, ctx_strncmp_target); \
+ } \
+ SEC("?" type) \
+ __description(type ": reject helper write ctx at fixed offset") \
+ __failure __msg("dereference of modified ctx ptr") \
+ int no_rewrite_##name##_helper_write_fixed(void *ctx) \
+ { \
+ char *p = ctx; \
+ \
+ p += off; \
+ return bpf_probe_read_kernel(p, 4, 0); \
+ } \
+ SEC("?" type) \
+ __description(type ": reject helper read ctx with variable offset") \
+ __failure __msg("variable ctx access var_off=") \
+ int no_rewrite_##name##_helper_read_var(void *ctx) \
+ { \
+ __u64 off_var = bpf_get_prandom_u32(); \
+ char *p = ctx; \
+ \
+ off_var &= 4; \
+ p += off_var; \
+ return bpf_strncmp(p, 4, ctx_strncmp_target); \
+ } \
+ SEC("?" type) \
+ __description(type ": reject helper write ctx with variable offset") \
+ __failure __msg("variable ctx access var_off=") \
+ int no_rewrite_##name##_helper_write_var(void *ctx) \
+ { \
+ __u64 off_var = bpf_get_prandom_u32(); \
+ char *p = ctx; \
+ \
+ off_var &= 4; \
+ p += off_var; \
+ return bpf_probe_read_kernel(p, 4, 0); \
+ } \
+ SEC("?" type) \
+ __description(type ": reject helper read zero-sized ctx access") \
+ __failure __msg("R4 type=ctx expected=fp") \
+ int no_rewrite_##name##_helper_read_zero(void *ctx) \
+ { \
+ return bpf_snprintf(0, 0, ctx_snprintf_fmt, ctx, 0); \
+ } \
+ SEC("?" type) \
+ __description(type ": reject helper write zero-sized ctx access") \
+ __failure __msg("R1 type=ctx expected=fp") \
+ int no_rewrite_##name##_helper_write_zero(void *ctx) \
+ { \
+ return bpf_probe_read_kernel(ctx, 0, 0); \
+ } \
+ SEC("?" type) \
+ __description(type ": reject kfunc ctx at fixed offset") \
+ __failure __msg("dereference of modified ctx ptr") \
+ int no_rewrite_##name##_kfunc_fixed(void *ctx) \
+ { \
+ char *p = ctx; \
+ \
+ p += off; \
+ bpf_kfunc_call_test_mem_len_pass1(p, 4); \
+ return 0; \
+ } \
+ SEC("?" type) \
+ __description(type ": reject kfunc ctx with variable offset") \
+ __failure __msg("variable ctx access var_off=") \
+ int no_rewrite_##name##_kfunc_var(void *ctx) \
+ { \
+ __u64 off_var = bpf_get_prandom_u32(); \
+ char *p = ctx; \
+ \
+ off_var &= 4; \
+ p += off_var; \
+ bpf_kfunc_call_test_mem_len_pass1(p, 4); \
+ return 0; \
+ } \
+ SEC("?" type) \
+ __description(type ": reject kfunc zero-sized ctx access") \
+ __failure __msg("R1 type=ctx expected=fp") \
+ int no_rewrite_##name##_kfunc_zero(void *ctx) \
+ { \
+ bpf_kfunc_call_test_mem_len_pass1(ctx, 0); \
+ return 0; \
+ }
+
+no_rewrite_ctx_access("kprobe", kprobe, 8, u64);
+no_rewrite_ctx_access("tracepoint", tp, 8, u64);
+no_rewrite_ctx_access("raw_tp", raw_tp, 8, u64);
+no_rewrite_ctx_access("raw_tracepoint.w", raw_tp_w, 8, u64);
+no_rewrite_ctx_access("fentry/bpf_modify_return_test", fentry, 8, u64);
+no_rewrite_ctx_access("cgroup/dev", cgroup_dev, 4, u32);
+no_rewrite_ctx_access("netfilter", netfilter, offsetof(struct bpf_nf_ctx, skb), u64);
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_ctx_ptr_param.c b/tools/testing/selftests/bpf/progs/verifier_ctx_ptr_param.c
new file mode 100644
index 000000000000..d5cc8fc01fe6
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_ctx_ptr_param.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Verifier tests for single- and multi-level pointer parameter handling
+ * Copyright (c) 2026 CrowdStrike, Inc.
+ */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+
+SEC("fentry/bpf_fentry_test_ppvoid")
+__description("fentry/void**: void ** inferred as scalar")
+__success __retval(0)
+__log_level(2)
+__msg("R1=ctx() R2=scalar()")
+__naked void fentry_ppvoid_as_scalar(void)
+{
+ asm volatile (" \
+ r2 = *(u64 *)(r1 + 0); \
+ r0 = 0; \
+ exit; \
+ " ::: __clobber_all);
+}
+
+SEC("fentry/bpf_fentry_test_pppvoid")
+__description("fentry/void***: void *** inferred as scalar")
+__success __retval(0)
+__log_level(2)
+__msg("R1=ctx() R2=scalar()")
+__naked void fentry_pppvoid_as_scalar(void)
+{
+ asm volatile (" \
+ r2 = *(u64 *)(r1 + 0); \
+ r0 = 0; \
+ exit; \
+ " ::: __clobber_all);
+}
+
+SEC("fentry/bpf_fentry_test_ppfile")
+__description("fentry/struct file**: struct file ** inferred as scalar")
+__success __retval(0)
+__log_level(2)
+__msg("R1=ctx() R2=scalar()")
+__naked void fentry_ppfile_as_scalar(void)
+{
+ asm volatile (" \
+ r2 = *(u64 *)(r1 + 0); \
+ r0 = 0; \
+ exit; \
+ " ::: __clobber_all);
+}
+
+SEC("fexit/bpf_fexit_test_ret_ppfile")
+__description("fexit/return struct file**: returned struct file ** inferred as scalar")
+__success __retval(0)
+__log_level(2)
+__msg("R1=ctx() R2=scalar()")
+__naked void fexit_ppfile_as_scalar(void)
+{
+ asm volatile (" \
+ r2 = *(u64 *)(r1 + 0); \
+ r0 = 0; \
+ exit; \
+ " ::: __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c b/tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c
index 911caa8fd1b7..915a9707298b 100644
--- a/tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c
+++ b/tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c
@@ -412,7 +412,7 @@ l0_%=: r0 = 0; \
SEC("tc")
__description("direct packet access: test17 (pruning, alignment)")
-__failure __msg("misaligned packet access off 2+0+15+-4 size 4")
+__failure __msg("misaligned packet access off 2+15+-4 size 4")
__flag(BPF_F_STRICT_ALIGNMENT)
__naked void packet_access_test17_pruning_alignment(void)
{
@@ -569,7 +569,7 @@ l0_%=: r0 = 0; \
SEC("tc")
__description("direct packet access: test23 (x += pkt_ptr, 4)")
-__failure __msg("invalid access to packet, off=0 size=8, R5(id=3,off=0,r=0)")
+__failure __msg("invalid access to packet, off=31 size=8, R5(id=3,off=31,r=0)")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void test23_x_pkt_ptr_4(void)
{
@@ -859,4 +859,65 @@ l0_%=: r0 = 1; \
: __clobber_all);
}
+SEC("tc")
+__description("direct packet access: pkt_range cleared after sub with known scalar")
+__failure __msg("invalid access to packet")
+__naked void pkt_range_clear_after_sub(void)
+{
+ asm volatile (" \
+ r9 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r8 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r9 += 256; \
+ if r9 >= r8 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+l0_%=: /* r9 has AT_PKT_END (pkt + 256 >= pkt_end) */ \
+ r9 -= 256; \
+ /* \
+ * AT_PKT_END must not survive the arithmetic. \
+ * is_pkt_ptr_branch_taken must validate both \
+ * branches when visiting the next condition. \
+ */ \
+ if r9 < r8 goto l1_%=; \
+ r0 = 0; \
+ exit; \
+l1_%=: r0 = *(u8*)(r9 + 0); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("direct packet access: pkt_range cleared after add with known scalar")
+__failure __msg("invalid access to packet")
+__naked void pkt_range_clear_after_add(void)
+{
+ asm volatile (" \
+ r9 = *(u32*)(r1 + %[__sk_buff_data]); \
+ r8 = *(u32*)(r1 + %[__sk_buff_data_end]); \
+ r9 += 256; \
+ if r9 >= r8 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+l0_%=: /* r9 has AT_PKT_END (pkt + 256 >= pkt_end) */ \
+ r9 += -256; \
+ /* \
+ * Same as sub, but goes through BPF_ADD path. \
+ * AT_PKT_END must not survive the arithmetic. \
+ */ \
+ if r9 < r8 goto l1_%=; \
+ r0 = 0; \
+ exit; \
+l1_%=: r0 = *(u8*)(r9 + 0); \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_div_mod_bounds.c b/tools/testing/selftests/bpf/progs/verifier_div_mod_bounds.c
index 4672af0b3268..e814a054d69a 100644
--- a/tools/testing/selftests/bpf/progs/verifier_div_mod_bounds.c
+++ b/tools/testing/selftests/bpf/progs/verifier_div_mod_bounds.c
@@ -36,7 +36,7 @@ l0_%=: r0 = *(u64 *)(r1 + 0); \
SEC("socket")
__description("UDIV32, zero divisor")
__success __retval(0) __log_level(2)
-__msg("w1 /= w2 {{.*}}; R1=0 R2=0")
+__msg("w1 /= w2 {{.*}}; R1=0")
__naked void udiv32_zero_divisor(void)
{
asm volatile (" \
@@ -81,7 +81,7 @@ l0_%=: r0 = *(u64 *)(r1 + 0); \
SEC("socket")
__description("UDIV64, zero divisor")
__success __retval(0) __log_level(2)
-__msg("r1 /= r2 {{.*}}; R1=0 R2=0")
+__msg("r1 /= r2 {{.*}}; R1=0")
__naked void udiv64_zero_divisor(void)
{
asm volatile (" \
@@ -242,7 +242,7 @@ l1_%=: r0 = *(u64 *)(r1 + 0); \
SEC("socket")
__description("SDIV32, zero divisor")
__success __retval(0) __log_level(2)
-__msg("w1 s/= w2 {{.*}}; R1=0 R2=0")
+__msg("w1 s/= w2 {{.*}}; R1=0")
__naked void sdiv32_zero_divisor(void)
{
asm volatile (" \
@@ -275,6 +275,7 @@ __naked void sdiv32_overflow_1(void)
w2 += 10; \
if w1 s> w2 goto l0_%=; \
w1 s/= -1; \
+ r2 = r1; \
l0_%=: r0 = 0; \
exit; \
" :
@@ -443,7 +444,7 @@ l1_%=: r0 = *(u64 *)(r1 + 0); \
SEC("socket")
__description("SDIV64, zero divisor")
__success __retval(0) __log_level(2)
-__msg("r1 s/= r2 {{.*}}; R1=0 R2=0")
+__msg("r1 s/= r2 {{.*}}; R1=0")
__naked void sdiv64_zero_divisor(void)
{
asm volatile (" \
@@ -476,6 +477,7 @@ __naked void sdiv64_overflow_1(void)
r2 += 10; \
if r1 s> r2 goto l0_%=; \
r1 s/= -1; \
+ r2 = r1; \
l0_%=: r0 = 0; \
exit; \
" :
@@ -553,7 +555,7 @@ l0_%=: r0 = *(u64 *)(r1 + 0); \
SEC("socket")
__description("UMOD32, zero divisor")
__success __retval(0) __log_level(2)
-__msg("w1 %= w2 {{.*}}; R1=scalar(smin=umin=smin32=umin32=1,smax=umax=smax32=umax32=9,var_off=(0x1; 0x8)) R2=0")
+__msg("w1 %= w2 {{.*}}; R1=scalar(smin=umin=smin32=umin32=1,smax=umax=smax32=umax32=9,var_off=(0x1; 0x8))")
__naked void umod32_zero_divisor(void)
{
asm volatile (" \
@@ -624,7 +626,7 @@ l0_%=: r0 = *(u64 *)(r1 + 0); \
SEC("socket")
__description("UMOD64, zero divisor")
__success __retval(0) __log_level(2)
-__msg("r1 %= r2 {{.*}}; R1=scalar(smin=umin=smin32=umin32=1,smax=umax=smax32=umax32=9,var_off=(0x1; 0x8)) R2=0")
+__msg("r1 %= r2 {{.*}}; R1=scalar(smin=umin=smin32=umin32=1,smax=umax=smax32=umax32=9,var_off=(0x1; 0x8))")
__naked void umod64_zero_divisor(void)
{
asm volatile (" \
@@ -833,7 +835,7 @@ l1_%=: r0 = *(u64 *)(r1 + 0); \
SEC("socket")
__description("SMOD32, zero divisor")
__success __retval(0) __log_level(2)
-__msg("w1 s%= w2 {{.*}}; R1=scalar(smin=0,smax=umax=0xffffffff,smin32=-8,smax32=10,var_off=(0x0; 0xffffffff)) R2=0")
+__msg("w1 s%= w2 {{.*}}; R1=scalar(smin=0,smax=umax=0xffffffff,smin32=-8,smax32=10,var_off=(0x0; 0xffffffff))")
__naked void smod32_zero_divisor(void)
{
asm volatile (" \
@@ -1084,7 +1086,7 @@ l1_%=: r0 = *(u64 *)(r1 + 0); \
SEC("socket")
__description("SMOD64, zero divisor")
__success __retval(0) __log_level(2)
-__msg("r1 s%= r2 {{.*}}; R1=scalar(smin=smin32=-8,smax=smax32=10) R2=0")
+__msg("r1 s%= r2 {{.*}}; R1=scalar(smin=smin32=-8,smax=smax32=10)")
__naked void smod64_zero_divisor(void)
{
asm volatile (" \
diff --git a/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c b/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c
index 20904cd2baa2..1e08aff7532e 100644
--- a/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c
+++ b/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c
@@ -134,7 +134,6 @@ __noinline __weak int subprog_user_anon_mem(user_struct_t *t)
SEC("?tracepoint")
__failure __log_level(2)
-__msg("invalid bpf_context access")
__msg("Caller passes invalid args into func#1 ('subprog_user_anon_mem')")
int anon_user_mem_invalid(void *ctx)
{
@@ -358,6 +357,100 @@ int arg_tag_ctx_syscall(void *ctx)
return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx) + tp_whatever(ctx);
}
+__weak int syscall_array_bpf_for(void *ctx __arg_ctx)
+{
+ int *arr = ctx;
+ int i;
+
+ bpf_for(i, 0, 100)
+ arr[i] *= i;
+
+ return 0;
+}
+
+SEC("?syscall")
+__success __log_level(2)
+int arg_tag_ctx_syscall_bpf_for(void *ctx)
+{
+ return syscall_array_bpf_for(ctx);
+}
+
+SEC("syscall")
+__auxiliary
+int syscall_tailcall_target(void *ctx)
+{
+ return syscall_array_bpf_for(ctx);
+}
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __array(values, int (void *));
+} syscall_prog_array SEC(".maps") = {
+ .values = {
+ [0] = (void *)&syscall_tailcall_target,
+ },
+};
+
+SEC("?syscall")
+__success __log_level(2)
+int arg_tag_ctx_syscall_tailcall(void *ctx)
+{
+ bpf_tail_call(ctx, &syscall_prog_array, 0);
+ return 0;
+}
+
+SEC("?syscall")
+__failure __log_level(2)
+__msg("dereference of modified ctx ptr R1 off=8 disallowed")
+int arg_tag_ctx_syscall_tailcall_fixed_off_bad(void *ctx)
+{
+ char *p = ctx;
+
+ p += 8;
+ bpf_tail_call(p, &syscall_prog_array, 0);
+ return 0;
+}
+
+SEC("?syscall")
+__failure __log_level(2)
+__msg("variable ctx access var_off=(0x0; 0x4) disallowed")
+int arg_tag_ctx_syscall_tailcall_var_off_bad(void *ctx)
+{
+ __u64 off = bpf_get_prandom_u32();
+ char *p = ctx;
+
+ off &= 4;
+ p += off;
+ bpf_tail_call(p, &syscall_prog_array, 0);
+ return 0;
+}
+
+SEC("?syscall")
+__failure __log_level(2)
+__msg("dereference of modified ctx ptr R1 off=8 disallowed")
+int arg_tag_ctx_syscall_fixed_off_bad(void *ctx)
+{
+ char *p = ctx;
+
+ p += 8;
+ return subprog_ctx_tag(p);
+}
+
+SEC("?syscall")
+__failure __log_level(2)
+__msg("variable ctx access var_off=(0x0; 0x4) disallowed")
+int arg_tag_ctx_syscall_var_off_bad(void *ctx)
+{
+ __u64 off = bpf_get_prandom_u32();
+ char *p = ctx;
+
+ off &= 4;
+ p += off;
+ return subprog_ctx_tag(p);
+}
+
__weak int subprog_dynptr(struct bpf_dynptr *dptr)
{
long *d, t, buf[1] = {};
@@ -388,4 +481,23 @@ int arg_tag_dynptr(struct xdp_md *ctx)
return subprog_dynptr(&dptr);
}
+__weak
+void foo(void)
+{
+}
+
+SEC("?tc")
+__failure __msg("R0 !read_ok")
+int return_from_void_global(struct __sk_buff *skb)
+{
+ foo();
+
+ asm volatile(
+ "r1 = r0;"
+ :::
+ );
+
+ return 0;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_gotox.c b/tools/testing/selftests/bpf/progs/verifier_gotox.c
index 0f43b56ec2bc..f88aa4cdb279 100644
--- a/tools/testing/selftests/bpf/progs/verifier_gotox.c
+++ b/tools/testing/selftests/bpf/progs/verifier_gotox.c
@@ -131,7 +131,7 @@ DEFINE_INVALID_SIZE_PROG(u16, __failure __msg("Invalid read of 2 bytes from insn
DEFINE_INVALID_SIZE_PROG(u8, __failure __msg("Invalid read of 1 bytes from insn_array"))
SEC("socket")
-__failure __msg("misaligned value access off 0+1+0 size 8")
+__failure __msg("misaligned value access off 1+0 size 8")
__naked void jump_table_misaligned_access(void)
{
asm volatile (" \
@@ -187,7 +187,7 @@ jt0_%=: \
}
SEC("socket")
-__failure __msg("invalid access to map value, value_size=16 off=-24 size=8")
+__failure __msg("R0 min value is negative")
__naked void jump_table_invalid_mem_acceess_neg(void)
{
asm volatile (" \
diff --git a/tools/testing/selftests/bpf/progs/verifier_helper_packet_access.c b/tools/testing/selftests/bpf/progs/verifier_helper_packet_access.c
index 74f5f9cd153d..71cee3f58324 100644
--- a/tools/testing/selftests/bpf/progs/verifier_helper_packet_access.c
+++ b/tools/testing/selftests/bpf/progs/verifier_helper_packet_access.c
@@ -360,7 +360,7 @@ l0_%=: r0 = 0; \
SEC("tc")
__description("helper access to packet: test15, cls helper fail sub")
-__failure __msg("invalid access to packet")
+__failure __msg("R1 min value is negative")
__naked void test15_cls_helper_fail_sub(void)
{
asm volatile (" \
diff --git a/tools/testing/selftests/bpf/progs/verifier_helper_value_access.c b/tools/testing/selftests/bpf/progs/verifier_helper_value_access.c
index 886498b5e6f3..6d2a38597c34 100644
--- a/tools/testing/selftests/bpf/progs/verifier_helper_value_access.c
+++ b/tools/testing/selftests/bpf/progs/verifier_helper_value_access.c
@@ -1100,7 +1100,7 @@ l0_%=: exit; \
SEC("tracepoint")
__description("map helper access to adjusted map (via const imm): out-of-bound 2")
-__failure __msg("invalid access to map value, value_size=16 off=-4 size=8")
+__failure __msg("R2 min value is negative")
__naked void imm_out_of_bound_2(void)
{
asm volatile (" \
@@ -1176,7 +1176,7 @@ l0_%=: exit; \
SEC("tracepoint")
__description("map helper access to adjusted map (via const reg): out-of-bound 2")
-__failure __msg("invalid access to map value, value_size=16 off=-4 size=8")
+__failure __msg("R2 min value is negative")
__naked void reg_out_of_bound_2(void)
{
asm volatile (" \
diff --git a/tools/testing/selftests/bpf/progs/verifier_int_ptr.c b/tools/testing/selftests/bpf/progs/verifier_int_ptr.c
index 59e34d558654..6627f44faf4b 100644
--- a/tools/testing/selftests/bpf/progs/verifier_int_ptr.c
+++ b/tools/testing/selftests/bpf/progs/verifier_int_ptr.c
@@ -65,7 +65,7 @@ __naked void ptr_to_long_half_uninitialized(void)
SEC("cgroup/sysctl")
__description("arg pointer to long misaligned")
-__failure __msg("misaligned stack access off 0+-20+0 size 8")
+__failure __msg("misaligned stack access off -20+0 size 8")
__naked void arg_ptr_to_long_misaligned(void)
{
asm volatile (" \
diff --git a/tools/testing/selftests/bpf/progs/verifier_jeq_infer_not_null.c b/tools/testing/selftests/bpf/progs/verifier_jeq_infer_not_null.c
index bf16b00502f2..3d1e8de4390c 100644
--- a/tools/testing/selftests/bpf/progs/verifier_jeq_infer_not_null.c
+++ b/tools/testing/selftests/bpf/progs/verifier_jeq_infer_not_null.c
@@ -210,4 +210,58 @@ l0_%=: /* return 0; */ \
: __clobber_all);
}
+/* Verified that we can detect the pointer as non_null when comparing with
+ * register with value 0. JEQ test case.
+ */
+SEC("xdp")
+__success __log_level(2)
+/* to make sure the branch is not falsely predicted*/
+__msg("r0 = *(u32 *)(r0 +0)")
+__msg("from 7 to 9")
+__naked void jeq_reg_reg_null_check(void)
+{
+ asm volatile (" \
+ *(u32*)(r10 - 8) = 0; \
+ r1 = %[map_xskmap] ll; \
+ r2 = r10; \
+ r2 += -8; \
+ call %[bpf_map_lookup_elem]; \
+ r1 = 0; \
+ if r0 == r1 goto 1f; \
+ r0 = *(u32*)(r0 +0); \
+1: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_xskmap)
+ : __clobber_all);
+}
+
+/* Same as above but for JNE.
+ */
+SEC("xdp")
+__success __log_level(2)
+/* to make sure the branch is not falsely predicted*/
+__msg("r0 = *(u32 *)(r0 +0)")
+__msg("from 7 to 9")
+__naked void jne_reg_reg_null_check(void)
+{
+ asm volatile (" \
+ *(u32*)(r10 - 8) = 0; \
+ r1 = %[map_xskmap] ll; \
+ r2 = r10; \
+ r2 += -8; \
+ call %[bpf_map_lookup_elem]; \
+ r1 = 0; \
+ if r0 != r1 goto 1f; \
+ goto 2f; \
+1: r0 = *(u32*)(r0 +0); \
+2: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_xskmap)
+ : __clobber_all);
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_ld_ind.c b/tools/testing/selftests/bpf/progs/verifier_ld_ind.c
index c925ba9a2e74..09e81b99eecb 100644
--- a/tools/testing/selftests/bpf/progs/verifier_ld_ind.c
+++ b/tools/testing/selftests/bpf/progs/verifier_ld_ind.c
@@ -107,4 +107,146 @@ __naked void ind_check_calling_conv_r7(void)
: __clobber_all);
}
+/*
+ * ld_{abs,ind} subprog that always sets r0=1 on the success path.
+ * bpf_gen_ld_abs() emits a hidden exit with r0=0 when the load helper
+ * fails. The verifier must model this failure return so that callers
+ * account for r0=0 as a possible return value.
+ */
+__naked __noinline __used
+static int ldabs_subprog(void)
+{
+ asm volatile (
+ "r6 = r1;"
+ ".8byte %[ld_abs];"
+ "r0 = 1;"
+ "exit;"
+ :
+ : __imm_insn(ld_abs, BPF_LD_ABS(BPF_W, 0))
+ : __clobber_all);
+}
+
+__naked __noinline __used
+static int ldind_subprog(void)
+{
+ asm volatile (
+ "r6 = r1;"
+ "r7 = 0;"
+ ".8byte %[ld_ind];"
+ "r0 = 1;"
+ "exit;"
+ :
+ : __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_7, 0))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("ld_abs: subprog early exit on ld_abs failure")
+__failure __msg("R9 !read_ok")
+__naked void ld_abs_subprog_early_exit(void)
+{
+ asm volatile (
+ "call ldabs_subprog;"
+ "if r0 != 0 goto l_exit_%=;"
+ "r0 = r9;"
+ "l_exit_%=:"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("socket")
+__description("ld_ind: subprog early exit on ld_ind failure")
+__failure __msg("R9 !read_ok")
+__naked void ld_ind_subprog_early_exit(void)
+{
+ asm volatile (
+ "call ldind_subprog;"
+ "if r0 != 0 goto l_exit_%=;"
+ "r0 = r9;"
+ "l_exit_%=:"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("socket")
+__description("ld_abs: subprog with both paths safe")
+__success
+__naked void ld_abs_subprog_both_paths_safe(void)
+{
+ asm volatile (
+ "call ldabs_subprog;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("socket")
+__description("ld_ind: subprog with both paths safe")
+__success
+__naked void ld_ind_subprog_both_paths_safe(void)
+{
+ asm volatile (
+ "call ldind_subprog;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/*
+ * ld_{abs,ind} in subprogs require scalar (int) return type in BTF.
+ * A test with void return must be rejected.
+ */
+__naked __noinline __used
+static void ldabs_void_subprog(void)
+{
+ asm volatile (
+ "r6 = r1;"
+ ".8byte %[ld_abs];"
+ "r0 = 1;"
+ "exit;"
+ :
+ : __imm_insn(ld_abs, BPF_LD_ABS(BPF_W, 0))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("ld_abs: reject void return subprog")
+__failure __msg("LD_ABS is only allowed in functions that return 'int'")
+__naked void ld_abs_void_subprog_reject(void)
+{
+ asm volatile (
+ "call ldabs_void_subprog;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+__naked __noinline __used
+static void ldind_void_subprog(void)
+{
+ asm volatile (
+ "r6 = r1;"
+ "r7 = 0;"
+ ".8byte %[ld_ind];"
+ "r0 = 1;"
+ "exit;"
+ :
+ : __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_7, 0))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("ld_ind: reject void return subprog")
+__failure __msg("LD_ABS is only allowed in functions that return 'int'")
+__naked void ld_ind_void_subprog_reject(void)
+{
+ asm volatile (
+ "call ldind_void_subprog;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_linked_scalars.c b/tools/testing/selftests/bpf/progs/verifier_linked_scalars.c
index f4f8a055af8a..d571fbfc86a3 100644
--- a/tools/testing/selftests/bpf/progs/verifier_linked_scalars.c
+++ b/tools/testing/selftests/bpf/progs/verifier_linked_scalars.c
@@ -535,4 +535,179 @@ int spurious_precision_marks(void *ctx)
return 0;
}
+/*
+ * Test that r += r (self-add, src_reg == dst_reg) clears the scalar ID
+ * so that sync_linked_regs() does not propagate an incorrect delta.
+ */
+SEC("socket")
+__failure
+__msg("div by zero")
+__naked void scalars_self_add_clears_id(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r6 = r0; /* r6 unknown, id A */ \
+ r7 = r6; /* r7 linked to r6, id A */ \
+ call %[bpf_get_prandom_u32]; \
+ r8 = r0; /* r8 unknown, id B */ \
+ r9 = r8; /* r9 linked to r8, id B */ \
+ if r7 != 1 goto l_exit_%=; \
+ /* r7 == 1; sync propagates: r6 = 1 (known, id A) */ \
+ r6 += r6; /* r6 = 2; should clear id */ \
+ if r7 == r9 goto l_exit_%=; \
+ /* Bug: r6 synced to r7(1)+delta(2)=3; Fix: r6 = 2 */ \
+ if r6 == 3 goto l_exit_%=; \
+ r0 /= 0; \
+l_exit_%=: \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+/* Same as above but with alu32 such that w6 += w6 also clears id. */
+SEC("socket")
+__failure
+__msg("div by zero")
+__naked void scalars_self_add_alu32_clears_id(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ w6 = w0; \
+ w7 = w6; \
+ call %[bpf_get_prandom_u32]; \
+ w8 = w0; \
+ w9 = w8; \
+ if w7 != 1 goto l_exit_%=; \
+ w6 += w6; \
+ if w7 == w9 goto l_exit_%=; \
+ if w6 == 3 goto l_exit_%=; \
+ r0 /= 0; \
+l_exit_%=: \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+/*
+ * Test that stale delta from a cleared BPF_ADD_CONST does not leak
+ * through assign_scalar_id_before_mov() into a new id, causing
+ * sync_linked_regs() to compute an incorrect offset.
+ */
+SEC("socket")
+__failure
+__msg("div by zero")
+__naked void scalars_stale_delta_from_cleared_id(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r6 = r0; /* r6 unknown, gets id A */ \
+ r6 += 5; /* id A|ADD_CONST, delta 5 */ \
+ r6 ^= 0; /* id cleared; delta stays 5 */ \
+ r8 = r6; /* new id B, stale delta 5 */ \
+ r8 += 3; /* id B|ADD_CONST, delta 3 */ \
+ r9 = r6; /* id B, stale delta 5 */ \
+ if r9 != 10 goto l_exit_%=; \
+ /* Bug: r8 = 10+(3-5) = 8; Fix: r8 = 10+(3-0) = 13 */ \
+ if r8 == 8 goto l_exit_%=; \
+ r0 /= 0; \
+l_exit_%=: \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+/* Same as above but with alu32. */
+SEC("socket")
+__failure
+__msg("div by zero")
+__naked void scalars_stale_delta_from_cleared_id_alu32(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ w6 = w0; \
+ w6 += 5; \
+ w6 ^= 0; \
+ w8 = w6; \
+ w8 += 3; \
+ w9 = w6; \
+ if w9 != 10 goto l_exit_%=; \
+ if w8 == 8 goto l_exit_%=; \
+ r0 /= 0; \
+l_exit_%=: \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+/*
+ * Test that regsafe() verifies base_id consistency for BPF_ADD_CONST
+ * linked scalars during state pruning.
+ *
+ * The false branch (explored first) links R3 to R2 via ADD_CONST.
+ * The true branch (runtime path) links R3 to R4 (unrelated base_id).
+ * At the merge point, pruning must fail because the linkage topology
+ * differs.
+ */
+SEC("socket")
+__description("linked scalars: add_const base_id must be consistent for pruning")
+__failure __msg("invalid variable-offset")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void add_const_base_id_pruning(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 16) = r1; \
+ call %[bpf_get_prandom_u32]; \
+ r6 = r0; \
+ r6 &= 1; \
+ if r6 >= 1 goto l_true_%=; \
+ \
+ /* False branch (explored first, old state) */ \
+ call %[bpf_get_prandom_u32]; \
+ r2 = r0; \
+ r2 &= 0xff; /* R2 = scalar(id=A) [0,255] */ \
+ r3 = r2; /* R3 linked to R2 (id=A) */ \
+ r3 += 10; /* R3 id=A|ADD_CONST, delta=10 */\
+ r6 = 0; \
+ goto l_merge_%=; \
+ \
+l_true_%=: \
+ /* True branch (runtime path, cur state) */ \
+ call %[bpf_get_prandom_u32]; \
+ r2 = r0; \
+ r2 &= 0xff; /* R2 = scalar [0,255], id=0 */ \
+ r4 = r0; \
+ r4 &= 0xff; /* R4 = scalar [0,255], id=0 */ \
+ r3 = r4; /* R3 linked to R4 (new id=C) */\
+ r3 += 10; /* R3 id=C|ADD_CONST, delta=10 */\
+ r6 = 0; \
+ \
+l_merge_%=: \
+ /* At merge, old R3 linked to R2, cur R3 linked to R4. */\
+ /* Pruning must fail: base_ids A vs C inconsistent. */ \
+ if r2 >= 6 goto l_exit_%=; \
+ /* sync_linked_regs: R2<6 => R3<16 in old state. */ \
+ /* Without fix: R3 in [10,15] from incorrect pruning. */\
+ /* With fix: R3 in [10,265], not synced from R2. */ \
+ r3 -= 10; /* [0,5] vs [0,255] */ \
+ r9 = r10; \
+ r9 += -16; \
+ r9 += r3; /* fp-16+[0,5] vs fp-16+[0,255] */\
+ *(u8*)(r9 + 0) = r6; /* within 16B vs past fp */ \
+l_exit_%=: \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_live_stack.c b/tools/testing/selftests/bpf/progs/verifier_live_stack.c
index 2de105057bbc..b7a9fa10e84d 100644
--- a/tools/testing/selftests/bpf/progs/verifier_live_stack.c
+++ b/tools/testing/selftests/bpf/progs/verifier_live_stack.c
@@ -3,8 +3,10 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include "../../../include/linux/filter.h"
#include "bpf_misc.h"
+char _license[] SEC("license") = "GPL";
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
@@ -12,14 +14,20 @@ struct {
__type(value, long long);
} map SEC(".maps");
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} array_map_8b SEC(".maps");
+
+const char snprintf_u64_fmt[] = "%llu";
+
SEC("socket")
__log_level(2)
-__msg("(0) frame 0 insn 2 +written -8")
-__msg("(0) frame 0 insn 1 +live -24")
-__msg("(0) frame 0 insn 1 +written -8")
-__msg("(0) frame 0 insn 0 +live -8,-24")
-__msg("(0) frame 0 insn 0 +written -8")
-__msg("(0) live stack update done in 2 iterations")
+__msg("0: (79) r1 = *(u64 *)(r10 -8) ; use: fp0-8")
+__msg("1: (79) r2 = *(u64 *)(r10 -24) ; use: fp0-24")
+__msg("2: (7b) *(u64 *)(r10 -8) = r1 ; def: fp0-8")
__naked void simple_read_simple_write(void)
{
asm volatile (
@@ -33,12 +41,8 @@ __naked void simple_read_simple_write(void)
SEC("socket")
__log_level(2)
-__msg("(0) frame 0 insn 1 +live -8")
-__not_msg("(0) frame 0 insn 1 +written")
-__msg("(0) live stack update done in 2 iterations")
-__msg("(0) frame 0 insn 1 +live -16")
-__msg("(0) frame 0 insn 1 +written -32")
-__msg("(0) live stack update done in 2 iterations")
+__msg("2: (79) r0 = *(u64 *)(r10 -8) ; use: fp0-8")
+__msg("6: (79) r0 = *(u64 *)(r10 -16) ; use: fp0-16")
__naked void read_write_join(void)
{
asm volatile (
@@ -58,13 +62,9 @@ __naked void read_write_join(void)
SEC("socket")
__log_level(2)
-__msg("2: (25) if r0 > 0x2a goto pc+1")
-__msg("7: (95) exit")
-__msg("(0) frame 0 insn 2 +written -16")
-__msg("(0) live stack update done in 2 iterations")
-__msg("7: (95) exit")
-__not_msg("(0) frame 0 insn 2")
-__msg("(0) live stack update done in 1 iterations")
+__msg("stack use/def subprog#0 must_write_not_same_slot (d0,cs0):")
+__msg("6: (7b) *(u64 *)(r2 +0) = r0{{$}}")
+__msg("Live regs before insn:")
__naked void must_write_not_same_slot(void)
{
asm volatile (
@@ -83,10 +83,8 @@ __naked void must_write_not_same_slot(void)
SEC("socket")
__log_level(2)
-__msg("(0) frame 0 insn 0 +written -8,-16")
-__msg("(0) live stack update done in 2 iterations")
-__msg("(0) frame 0 insn 0 +written -8")
-__msg("(0) live stack update done in 2 iterations")
+__msg("0: (7a) *(u64 *)(r10 -8) = 0 ; def: fp0-8")
+__msg("5: (85) call bpf_map_lookup_elem#1 ; use: fp0-8h")
__naked void must_write_not_same_type(void)
{
asm volatile (
@@ -110,10 +108,11 @@ __naked void must_write_not_same_type(void)
SEC("socket")
__log_level(2)
-__msg("(2,4) frame 0 insn 4 +written -8")
-__msg("(2,4) live stack update done in 2 iterations")
-__msg("(0) frame 0 insn 2 +written -8")
-__msg("(0) live stack update done in 2 iterations")
+/* Callee writes fp[0]-8: stack_use at call site has slots 0,1 live */
+__msg("stack use/def subprog#0 caller_stack_write (d0,cs0):")
+__msg("2: (85) call pc+1{{$}}")
+__msg("stack use/def subprog#1 write_first_param (d1,cs2):")
+__msg("4: (7a) *(u64 *)(r1 +0) = 7 ; def: fp0-8")
__naked void caller_stack_write(void)
{
asm volatile (
@@ -135,23 +134,15 @@ static __used __naked void write_first_param(void)
SEC("socket")
__log_level(2)
-/* caller_stack_read() function */
-__msg("2: .12345.... (85) call pc+4")
-__msg("5: .12345.... (85) call pc+1")
-__msg("6: 0......... (95) exit")
-/* read_first_param() function */
-__msg("7: .1........ (79) r0 = *(u64 *)(r1 +0)")
-__msg("8: 0......... (95) exit")
-/* update for callsite at (2) */
-__msg("(2,7) frame 0 insn 7 +live -8")
-__msg("(2,7) live stack update done in 2 iterations")
-__msg("(0) frame 0 insn 2 +live -8")
-__msg("(0) live stack update done in 2 iterations")
-/* update for callsite at (5) */
-__msg("(5,7) frame 0 insn 7 +live -16")
-__msg("(5,7) live stack update done in 2 iterations")
-__msg("(0) frame 0 insn 5 +live -16")
-__msg("(0) live stack update done in 2 iterations")
+__msg("stack use/def subprog#0 caller_stack_read (d0,cs0):")
+__msg("2: (85) call pc+{{.*}} ; use: fp0-8{{$}}")
+__msg("5: (85) call pc+{{.*}} ; use: fp0-16{{$}}")
+__msg("stack use/def subprog#1 read_first_param (d1,cs2):")
+__msg("7: (79) r0 = *(u64 *)(r1 +0) ; use: fp0-8{{$}}")
+__msg("8: (95) exit")
+__msg("stack use/def subprog#1 read_first_param (d1,cs5):")
+__msg("7: (79) r0 = *(u64 *)(r1 +0) ; use: fp0-16{{$}}")
+__msg("8: (95) exit")
__naked void caller_stack_read(void)
{
asm volatile (
@@ -174,20 +165,48 @@ static __used __naked void read_first_param(void)
}
SEC("socket")
+__success
+__naked void arg_track_join_convergence(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ "r2 = 2;"
+ "call arg_track_join_convergence_subprog;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+static __used __naked void arg_track_join_convergence_subprog(void)
+{
+ asm volatile (
+ "if r1 == 0 goto 1f;"
+ "r0 = r1;"
+ "goto 2f;"
+"1:"
+ "r0 = r2;"
+"2:"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("socket")
__flag(BPF_F_TEST_STATE_FREQ)
__log_level(2)
-/* read_first_param2() function */
-__msg(" 9: .1........ (79) r0 = *(u64 *)(r1 +0)")
-__msg("10: .......... (b7) r0 = 0")
-__msg("11: 0......... (05) goto pc+0")
-__msg("12: 0......... (95) exit")
+/* fp0-8 consumed at insn 9, dead by insn 11. stack_def at insn 4 kills slots 0,1. */
+__msg("4: (7b) *(u64 *)(r10 -8) = r0 ; def: fp0-8")
+/* stack_use at call site: callee reads fp0-8, slots 0,1 live */
+__msg("7: (85) call pc+{{.*}} ; use: fp0-8")
+/* read_first_param2: no caller stack live inside callee after first read */
+__msg("9: (79) r0 = *(u64 *)(r1 +0) ; use: fp0-8")
+__msg("10: (b7) r0 = 0{{$}}")
+__msg("11: (05) goto pc+0{{$}}")
+__msg("12: (95) exit")
/*
- * The purpose of the test is to check that checkpoint in
- * read_first_param2() stops path traversal. This will only happen if
- * verifier understands that fp[0]-8 at insn (12) is not alive.
+ * Checkpoint at goto +0 fires because fp0-8 is dead → state pruning.
*/
__msg("12: safe")
-__msg("processed 20 insns")
__naked void caller_stack_pruning(void)
{
asm volatile (
@@ -342,3 +361,2289 @@ static __used __naked unsigned long write_tail_call(void)
__imm_addr(map_array)
: __clobber_all);
}
+
+/* Test precise subprog stack access analysis.
+ * Caller passes fp-32 (SPI 3) to callee that only accesses arg+0 and arg+8
+ * (SPIs 3 and 2). Slots 0 and 1 should NOT be live at the call site.
+ *
+ * Insn layout:
+ * 0: *(u64*)(r10 - 8) = 0 write SPI 0
+ * 1: *(u64*)(r10 - 16) = 0 write SPI 1
+ * 2: *(u64*)(r10 - 24) = 0 write SPI 2
+ * 3: *(u64*)(r10 - 32) = 0 write SPI 3
+ * 4: r1 = r10
+ * 5: r1 += -32
+ * 6: call precise_read_two passes fp-32 (SPI 3)
+ * 7: r0 = 0
+ * 8: exit
+ *
+ * At insn 6 only SPIs 2,3 should be live (slots 4-7, 0xf0).
+ * SPIs 0,1 are written but never read → dead.
+ */
+SEC("socket")
+__log_level(2)
+__msg("6: (85) call pc+{{.*}} ; use: fp0-24 fp0-32{{$}}")
+__naked void subprog_precise_stack_access(void)
+{
+ asm volatile (
+ "*(u64 *)(r10 - 8) = 0;"
+ "*(u64 *)(r10 - 16) = 0;"
+ "*(u64 *)(r10 - 24) = 0;"
+ "*(u64 *)(r10 - 32) = 0;"
+ "r1 = r10;"
+ "r1 += -32;"
+ "call precise_read_two;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/* Callee reads only at arg+0 (SPI 3) and arg+8 (SPI 2) */
+static __used __naked void precise_read_two(void)
+{
+ asm volatile (
+ "r0 = *(u64 *)(r1 + 0);"
+ "r2 = *(u64 *)(r1 + 8);"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/* Test that multi-level subprog calls (callee passes arg-derived ptr
+ * to another BPF subprog) are analyzed precisely.
+ *
+ * Caller passes fp-32 (SPI 3). The callee forwards it to inner_callee.
+ * inner_callee only reads at offset 0 from the pointer.
+ * The analysis recurses into forward_to_inner -> inner_callee and
+ * determines only SPI 3 is accessed (slots 6-7, 0xc0), not all of SPIs 0-3.
+ *
+ * Insn layout:
+ * 0: *(u64*)(r10 - 8) = 0 write SPI 0
+ * 1: *(u64*)(r10 - 16) = 0 write SPI 1
+ * 2: *(u64*)(r10 - 24) = 0 write SPI 2
+ * 3: *(u64*)(r10 - 32) = 0 write SPI 3
+ * 4: r1 = r10
+ * 5: r1 += -32
+ * 6: call forward_to_inner passes fp-32 (SPI 3)
+ * 7: r0 = 0
+ * 8: exit
+ */
+SEC("socket")
+__log_level(2)
+__msg("6: (85) call pc+{{.*}} ; use: fp0-32{{$}}")
+__naked void subprog_multilevel_conservative(void)
+{
+ asm volatile (
+ "*(u64 *)(r10 - 8) = 0;"
+ "*(u64 *)(r10 - 16) = 0;"
+ "*(u64 *)(r10 - 24) = 0;"
+ "*(u64 *)(r10 - 32) = 0;"
+ "r1 = r10;"
+ "r1 += -32;"
+ "call forward_to_inner;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/* Forwards arg to another subprog */
+static __used __naked void forward_to_inner(void)
+{
+ asm volatile (
+ "call inner_callee;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+static __used __naked void inner_callee(void)
+{
+ asm volatile (
+ "r0 = *(u64 *)(r1 + 0);"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/* Test multi-frame precision loss: callee consumes caller stack early,
+ * but static liveness keeps it live at pruning points inside callee.
+ *
+ * Caller stores map_ptr or scalar(42) at fp-8, then calls
+ * consume_and_call_inner. The callee reads fp0-8 at entry (consuming
+ * the slot), then calls do_nothing2. After do_nothing2 returns (a
+ * pruning point), fp-8 should be dead -- the read already happened.
+ * But because the call instruction's stack_use includes SPI 0, the
+ * static live_stack_before at insn 7 is 0x1, keeping fp-8 live inside
+ * the callee and preventing state pruning between the two paths.
+ *
+ * Insn layout:
+ * 0: call bpf_get_prandom_u32
+ * 1: if r0 == 42 goto pc+2 -> insn 4
+ * 2: r0 = map ll (ldimm64 part1)
+ * 3: (ldimm64 part2)
+ * 4: *(u64)(r10 - 8) = r0 fp-8 = map_ptr OR scalar(42)
+ * 5: r1 = r10
+ * 6: r1 += -8
+ * 7: call consume_and_call_inner
+ * 8: r0 = 0
+ * 9: exit
+ *
+ * At insn 7, live_stack_before = 0x3 (slots 0-1 live due to stack_use).
+ * At insn 8, live_stack_before = 0x0 (SPI 0 dead, caller doesn't need it).
+ */
+SEC("socket")
+__flag(BPF_F_TEST_STATE_FREQ)
+__log_level(2)
+__success
+__msg(" 7: (85) call pc+{{.*}} ; use: fp0-8")
+__msg(" 8: {{.*}} (b7)")
+__naked void callee_consumed_caller_stack(void)
+{
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "if r0 == 42 goto 1f;"
+ "r0 = %[map] ll;"
+"1:"
+ "*(u64 *)(r10 - 8) = r0;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call consume_and_call_inner;"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_get_prandom_u32),
+ __imm_addr(map)
+ : __clobber_all);
+}
+
+static __used __naked void consume_and_call_inner(void)
+{
+ asm volatile (
+ "r0 = *(u64 *)(r1 + 0);" /* read fp[0]-8 into caller-saved r0 */
+ "call do_nothing2;" /* inner call clobbers r0 */
+ "r0 = 0;"
+ "goto +0;" /* checkpoint */
+ "r0 = 0;"
+ "goto +0;" /* checkpoint */
+ "r0 = 0;"
+ "goto +0;" /* checkpoint */
+ "r0 = 0;"
+ "goto +0;" /* checkpoint */
+ "exit;"
+ ::: __clobber_all);
+}
+
+static __used __naked void do_nothing2(void)
+{
+ asm volatile (
+ "r0 = 0;"
+ "r0 = 0;"
+ "r0 = 0;"
+ "r0 = 0;"
+ "r0 = 0;"
+ "r0 = 0;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/*
+ * Reproducer for unsound pruning when clean_verifier_state() promotes
+ * live STACK_ZERO bytes to STACK_MISC.
+ *
+ * Program shape:
+ * - Build key at fp-4:
+ * - path A keeps key byte as STACK_ZERO;
+ * - path B writes unknown byte making it STACK_MISC.
+ * - Branches merge at a prune point before map_lookup.
+ * - map_lookup on ARRAY map is value-sensitive to constant zero key:
+ * - path A: const key 0 => PTR_TO_MAP_VALUE (non-NULL);
+ * - path B: non-const key => PTR_TO_MAP_VALUE_OR_NULL.
+ * - Dereference lookup result without null check.
+ *
+ * Note this behavior won't trigger at fp-8, since the verifier will
+ * track 32-bit scalar spill differently as spilled_ptr.
+ *
+ * Correct verifier behavior: reject (path B unsafe).
+ * With blanket STACK_ZERO->STACK_MISC promotion on live slots, cached path A
+ * state can be generalized and incorrectly prune path B, making program load.
+ */
+SEC("socket")
+__flag(BPF_F_TEST_STATE_FREQ)
+__failure __msg("R0 invalid mem access 'map_value_or_null'")
+__naked void stack_zero_to_misc_unsound_array_lookup(void)
+{
+ asm volatile (
+ /* key at fp-4: all bytes STACK_ZERO */
+ "*(u32 *)(r10 - 4) = 0;"
+ "call %[bpf_get_prandom_u32];"
+ /* fall-through (path A) explored first */
+ "if r0 != 0 goto l_nonconst%=;"
+ /* path A: keep key constant zero */
+ "goto l_lookup%=;"
+"l_nonconst%=:"
+ /* path B: key byte turns to STACK_MISC, key no longer const */
+ "*(u8 *)(r10 - 4) = r0;"
+"l_lookup%=:"
+ /* value-sensitive lookup */
+ "r2 = r10;"
+ "r2 += -4;"
+ "r1 = %[array_map_8b] ll;"
+ "call %[bpf_map_lookup_elem];"
+ /* unsafe when lookup result is map_value_or_null */
+ "r0 = *(u64 *)(r0 + 0);"
+ "exit;"
+ :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_map_lookup_elem),
+ __imm_addr(array_map_8b)
+ : __clobber_all);
+}
+
+/*
+ * Subprog variant of stack_zero_to_misc_unsound_array_lookup.
+ *
+ * Check unsound pruning when a callee modifies the caller's
+ * stack through a pointer argument.
+ *
+ * Program shape:
+ * main:
+ * *(u32)(fp - 4) = 0 key = 0 (all bytes STACK_ZERO)
+ * r1 = fp - 4
+ * call maybe_clobber_key may overwrite key[0] with scalar
+ * <-- prune point: two states meet here -->
+ * r2 = fp - 4
+ * r1 = array_map_8b
+ * call bpf_map_lookup_elem value-sensitive on const-zero key
+ * r0 = *(u64)(r0 + 0) deref without null check
+ * exit
+ *
+ * maybe_clobber_key(r1):
+ * r6 = r1 save &key
+ * call bpf_get_prandom_u32
+ * if r0 == 0 goto skip path A: key stays STACK_ZERO
+ * *(u8)(r6 + 0) = r0 path B: key[0] becomes STACK_MISC
+ * skip:
+ * r0 = 0
+ * exit
+ *
+ * Path A: const-zero key => array lookup => PTR_TO_MAP_VALUE => deref OK.
+ * Path B: non-const key => array lookup => PTR_TO_MAP_VALUE_OR_NULL => UNSAFE.
+ *
+ * If the cleaner collapses STACK_ZERO -> STACK_MISC for the live key
+ * slot, path A's cached state matches path B, pruning the unsafe path.
+ *
+ * Correct verifier behaviour: reject.
+ */
+SEC("socket")
+__flag(BPF_F_TEST_STATE_FREQ)
+__failure __msg("R0 invalid mem access 'map_value_or_null'")
+__naked void subprog_stack_zero_to_misc_unsound(void)
+{
+ asm volatile (
+ /* key at fp-4: all bytes STACK_ZERO */
+ "*(u32 *)(r10 - 4) = 0;"
+ /* subprog may clobber key[0] with a scalar byte */
+ "r1 = r10;"
+ "r1 += -4;"
+ "call maybe_clobber_key;"
+ /* value-sensitive array lookup */
+ "r2 = r10;"
+ "r2 += -4;"
+ "r1 = %[array_map_8b] ll;"
+ "call %[bpf_map_lookup_elem];"
+ /* unsafe when result is map_value_or_null (path B) */
+ "r0 = *(u64 *)(r0 + 0);"
+ "exit;"
+ :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(array_map_8b)
+ : __clobber_all);
+}
+
+static __used __naked void maybe_clobber_key(void)
+{
+ asm volatile (
+ "r6 = r1;"
+ "call %[bpf_get_prandom_u32];"
+ /* path A (r0==0): key stays STACK_ZERO, explored first */
+ "if r0 == 0 goto 1f;"
+ /* path B (r0!=0): overwrite key[0] with scalar */
+ "*(u8 *)(r6 + 0) = r0;"
+ "1:"
+ "r0 = 0;"
+ "exit;"
+ :: __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+/*
+ * Demonstrate that subprog arg spill/reload breaks arg tracking,
+ * inflating caller stack liveness and preventing state pruning.
+ *
+ * modifier2(fp-24) has two paths: one writes a scalar to *(r1+8)
+ * = caller fp-16, the other leaves it as zero. After modifier2
+ * returns, fp-16 is never read again — it is dead.
+ *
+ * spill_reload_reader2(fp-24) only reads caller fp-8 via
+ * *(r1+16), but it spills r1 across a helper call. This
+ * breaks compute_subprog_arg_access(): the reload from callee
+ * stack cannot be connected back to arg1, so arg1 access goes
+ * "all (conservative)". At the call site (r1 = fp-24, slot 5)
+ * apply_callee_stack_access() marks slots 0..5 as stack_use —
+ * pulling fp-16 (slots 2-3) into live_stack_before even though
+ * the reader never touches it.
+ *
+ * Result: at modifier2's return point two states with different
+ * fp-16 values cannot be pruned.
+ *
+ * With correct (or old dynamic) liveness fp-16 is dead at that
+ * point and the states prune → "6: safe" appears in the log.
+ */
+SEC("socket")
+__flag(BPF_F_TEST_STATE_FREQ)
+__log_level(2)
+__success
+__msg("6: safe")
+__naked void spill_reload_inflates_stack_liveness(void)
+{
+ asm volatile (
+ /* struct at fp-24: { ctx; ptr; tail; } */
+ "*(u64 *)(r10 - 24) = r1;" /* fp-24 = ctx */
+ "*(u64 *)(r10 - 16) = r1;" /* fp-16 = ctx (STACK_SPILL ptr) */
+ "*(u64 *)(r10 - 8) = 0;" /* fp-8 = tail */
+ /* modifier2 writes different values to fp-16 on two paths */
+ "r1 = r10;"
+ "r1 += -24;"
+ "call modifier2;"
+ /* insn 6: prune point — two states with different fp-16
+ * path A: fp-16 = STACK_MISC (scalar overwrote pointer)
+ * path B: fp-16 = STACK_SPILL (original ctx pointer)
+ * STACK_MISC does NOT subsume STACK_SPILL(ptr),
+ * so pruning fails unless fp-16 is cleaned (dead).
+ */
+ "r1 = r10;"
+ "r1 += -24;"
+ "call spill_reload_reader2;" /* reads fp-8 via *(r1+16) */
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/* Two paths: one writes a scalar to *(r1+8) = caller fp-16,
+ * the other leaves it unchanged. Both return 0 via separate
+ * exits to prevent pruning inside the subprog at the merge.
+ */
+static __used __naked void modifier2(void)
+{
+ asm volatile (
+ "r6 = r1;"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 == 0 goto 1f;"
+ "*(u64 *)(r6 + 8) = r0;" /* fp-16 = random */
+ "r0 = 0;"
+ "exit;" /* path A exit */
+ "1:"
+ "r0 = 0;"
+ "exit;" /* path B exit */
+ :: __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+/* Receives r1 = caller fp-24. Only reads *(r1+16) = fp-8.
+ * Spills r1 across a helper call → arg tracking goes conservative →
+ * slots 0..5 all appear used instead of just slot 1 (fp-8).
+ */
+static __used __naked void spill_reload_reader2(void)
+{
+ asm volatile (
+ "*(u64 *)(r10 - 8) = r1;" /* spill arg1 */
+ "call %[bpf_get_prandom_u32];" /* clobbers r1-r5 */
+ "r1 = *(u64 *)(r10 - 8);" /* reload arg1 */
+ "r0 = *(u64 *)(r1 + 16);" /* read caller fp-8 */
+ "r0 = 0;"
+ "exit;"
+ :: __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+/* BTF FUNC records are not generated for kfuncs referenced
+ * from inline assembly. These records are necessary for
+ * libbpf to link the program. The function below is a hack
+ * to ensure that BTF FUNC records are generated.
+ */
+void __kfunc_btf_root(void)
+{
+ bpf_iter_num_new(0, 0, 0);
+ bpf_iter_num_next(0);
+ bpf_iter_num_destroy(0);
+}
+
+/* Test that open-coded iterator kfunc arguments get precise stack
+ * liveness tracking. struct bpf_iter_num is 8 bytes (1 SPI).
+ *
+ * Insn layout:
+ * 0: *(u64*)(r10 - 8) = 0 write SPI 0 (dead)
+ * 1: *(u64*)(r10 - 16) = 0 write SPI 1 (dead)
+ * 2: r1 = r10
+ * 3: r1 += -24 iter state at fp-24 (SPI 2)
+ * 4: r2 = 0
+ * 5: r3 = 10
+ * 6: call bpf_iter_num_new defines SPI 2 (KF_ITER_NEW) → 0x0
+ * 7-8: r1 = fp-24
+ * 9: call bpf_iter_num_next uses SPI 2 → 0x30
+ * 10: if r0 == 0 goto 2f
+ * 11: goto 1b
+ * 12-13: r1 = fp-24
+ * 14: call bpf_iter_num_destroy uses SPI 2 → 0x30
+ * 15: r0 = 0
+ * 16: exit
+ *
+ * At insn 6, SPI 2 is defined (KF_ITER_NEW initializes, doesn't read),
+ * so it kills liveness from successors. live_stack_before = 0x0.
+ * At insns 9 and 14, SPI 2 is used (iter_next/destroy read the state),
+ * so live_stack_before = 0x30.
+ */
+SEC("socket")
+__success __log_level(2)
+__msg(" 6: (85) call bpf_iter_num_new{{.*}} ; def: fp0-24{{$}}")
+__msg(" 9: (85) call bpf_iter_num_next{{.*}} ; use: fp0-24{{$}}")
+__msg("14: (85) call bpf_iter_num_destroy{{.*}} ; use: fp0-24{{$}}")
+__naked void kfunc_iter_stack_liveness(void)
+{
+ asm volatile (
+ "*(u64 *)(r10 - 8) = 0;" /* SPI 0 - dead */
+ "*(u64 *)(r10 - 16) = 0;" /* SPI 1 - dead */
+ "r1 = r10;"
+ "r1 += -24;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+"1:"
+ "r1 = r10;"
+ "r1 += -24;"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto 2f;"
+ "goto 1b;"
+"2:"
+ "r1 = r10;"
+ "r1 += -24;"
+ "call %[bpf_iter_num_destroy];"
+ "r0 = 0;"
+ "exit;"
+ :: __imm(bpf_iter_num_new),
+ __imm(bpf_iter_num_next),
+ __imm(bpf_iter_num_destroy)
+ : __clobber_all);
+}
+
+/*
+ * Test for soundness bug in static stack liveness analysis.
+ *
+ * The static pre-pass tracks FP-derived register offsets to determine
+ * which stack slots are accessed. When a PTR_TO_STACK is spilled to
+ * the stack and later reloaded, the reload (BPF_LDX) kills FP-derived
+ * tracking, making subsequent accesses through the reloaded pointer
+ * invisible to the static analysis.
+ *
+ * This causes the analysis to incorrectly mark SPI 0 as dead at the
+ * merge point. clean_verifier_state() zeros it in the cached state,
+ * and stacksafe() accepts the new state against STACK_INVALID,
+ * enabling incorrect pruning.
+ *
+ * Path A (verified first): stores PTR_TO_MAP_VALUE in SPI 0
+ * Path B (verified second): stores scalar 42 in SPI 0
+ * After merge: reads SPI 0 through spilled/reloaded PTR_TO_STACK
+ * and dereferences the result as a pointer.
+ *
+ * Correct behavior: reject (path B dereferences a scalar)
+ * Bug behavior: accept (path B is incorrectly pruned)
+ */
+SEC("socket")
+__flag(BPF_F_TEST_STATE_FREQ)
+__failure __msg("R0 invalid mem access 'scalar'")
+__naked void spill_ptr_liveness_type_confusion(void)
+{
+ asm volatile (
+ /* Map lookup to get PTR_TO_MAP_VALUE */
+ "r1 = %[map] ll;"
+ "*(u32 *)(r10 - 32) = 0;"
+ "r2 = r10;"
+ "r2 += -32;"
+ "call %[bpf_map_lookup_elem];"
+ "if r0 == 0 goto l_exit%=;"
+ /* r6 = PTR_TO_MAP_VALUE (callee-saved) */
+ "r6 = r0;"
+ /* Branch: fall-through (path A) verified first */
+ "call %[bpf_get_prandom_u32];"
+ "if r0 != 0 goto l_scalar%=;"
+ /* Path A: store map value ptr at SPI 0 */
+ "*(u64 *)(r10 - 8) = r6;"
+ "goto l_merge%=;"
+"l_scalar%=:"
+ /* Path B: store scalar at SPI 0 */
+ "r1 = 42;"
+ "*(u64 *)(r10 - 8) = r1;"
+"l_merge%=:"
+ /*
+ * Spill PTR_TO_STACK{off=-8} to SPI 1, then reload.
+ * Reload kills FP-derived tracking, hiding the
+ * subsequent SPI 0 access from the static analysis.
+ */
+ "r1 = r10;"
+ "r1 += -8;"
+ "*(u64 *)(r10 - 16) = r1;"
+ "goto +0;" /* checkpoint */
+ "goto +0;" /* checkpoint */
+ "goto +0;" /* checkpoint */
+ "r1 = *(u64 *)(r10 - 16);"
+ /* Read SPI 0 through reloaded pointer */
+ "r0 = *(u64 *)(r1 + 0);"
+ /* Dereference: safe for map value (path A),
+ * unsafe for scalar (path B).
+ */
+ "r0 = *(u64 *)(r0 + 0);"
+ "exit;"
+"l_exit%=:"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_get_prandom_u32),
+ __imm_addr(map)
+ : __clobber_all);
+}
+
+/* === Tests for 4-byte stack slot liveness granularity === */
+
+/* Test that a 4-byte aligned write is stack_def and kills liveness.
+ *
+ * 0: *(u64 *)(r10 - 8) = 0 def slots 0,1 (full SPI 0)
+ * 1: *(u32 *)(r10 - 8) = 0 def slot 1 (4-byte write kills slot 1)
+ * 2: r0 = *(u64 *)(r10 - 8) use slots 0,1
+ * 3: r0 = 0
+ * 4: exit
+ *
+ * At insn 1, the 4-byte write defines slot 1. Slot 0 still flows
+ * backward from insn 2's read: live_stack_before = 0x1.
+ */
+SEC("socket")
+__log_level(2)
+__msg("1: (62) *(u32 *)(r10 -8) = 0 ; def: fp0-8h")
+__naked void four_byte_write_kills_slot(void)
+{
+ asm volatile (
+ "*(u64 *)(r10 - 8) = 0;"
+ "*(u32 *)(r10 - 8) = 0;"
+ "r0 = *(u64 *)(r10 - 8);"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/* Test that a write to the upper half of an SPI is dead when only
+ * the lower half is read. This was impossible at SPI granularity
+ * where any read of the SPI kept the entire SPI live.
+ *
+ * 0: *(u32 *)(r10 - 8) = 0 def slot 1 (DEAD: never read)
+ * 1: *(u32 *)(r10 - 4) = 0 def slot 0
+ * 2: r0 = *(u32 *)(r10 - 4) use slot 0 only
+ * 3: r0 = 0
+ * 4: exit
+ *
+ * At insn 0, nothing is live (0x0). Previously at SPI granularity,
+ * the read at insn 2 would mark the full SPI 0 as live and the
+ * 4-byte writes wouldn't count as def, so insn 0 would have had
+ * SPI 0 live (0x1).
+ */
+SEC("socket")
+__log_level(2)
+__msg("0: (62) *(u32 *)(r10 -8) = 0 ; def: fp0-8h")
+__msg("2: (61) r0 = *(u32 *)(r10 -4) ; use: fp0-4h")
+__naked void dead_half_spi_write(void)
+{
+ asm volatile (
+ "*(u32 *)(r10 - 8) = 0;"
+ "*(u32 *)(r10 - 4) = 0;"
+ "r0 = *(u32 *)(r10 - 4);"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/* Test that a 4-byte read from the upper half of SPI 0 makes only
+ * slot 1 live (0x2), not the full SPI (0x3).
+ *
+ * 0: *(u64 *)(r10 - 8) = 0 def slots 0,1
+ * 1: r0 = *(u32 *)(r10 - 8) use slot 1 only (upper half)
+ * 2: r0 = 0
+ * 3: exit
+ *
+ * At insn 1, live_stack_before = 0x2 (slot 1 only).
+ */
+SEC("socket")
+__log_level(2)
+__msg("1: (61) r0 = *(u32 *)(r10 -8) ; use: fp0-8h")
+__naked void four_byte_read_upper_half(void)
+{
+ asm volatile (
+ "*(u64 *)(r10 - 8) = 0;"
+ "r0 = *(u32 *)(r10 - 8);"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/* Test that a 2-byte write does NOT count as stack_def.
+ * Sub-4-byte writes don't fully cover a 4-byte slot,
+ * so liveness passes through.
+ *
+ * 0: *(u64 *)(r10 - 8) = 0 def slots 0,1
+ * 1: *(u16 *)(r10 - 4) = 0 NOT stack_def (2 < 4 bytes)
+ * 2: r0 = *(u32 *)(r10 - 4) use slot 0
+ * 3: r0 = 0
+ * 4: exit
+ *
+ * At insn 1, slot 0 still live (0x1) because 2-byte write
+ * didn't kill it.
+ */
+SEC("socket")
+__log_level(2)
+__msg("0: (7a) *(u64 *)(r10 -8) = 0 ; def: fp0-8")
+__msg("1: (6a) *(u16 *)(r10 -4) = 0{{$}}")
+__msg("2: (61) r0 = *(u32 *)(r10 -4) ; use: fp0-4h")
+__naked void two_byte_write_no_kill(void)
+{
+ asm volatile (
+ "*(u64 *)(r10 - 8) = 0;"
+ "*(u16 *)(r10 - 4) = 0;"
+ "r0 = *(u32 *)(r10 - 4);"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/* Test that a 1-byte write does NOT count as stack_def.
+ *
+ * 0: *(u64 *)(r10 - 8) = 0 def slots 0,1
+ * 1: *(u8 *)(r10 - 4) = 0 NOT stack_def (1 < 4 bytes)
+ * 2: r0 = *(u32 *)(r10 - 4) use slot 0
+ * 3: r0 = 0
+ * 4: exit
+ *
+ * At insn 1, slot 0 still live (0x1).
+ */
+SEC("socket")
+__log_level(2)
+__msg("0: (7a) *(u64 *)(r10 -8) = 0 ; def: fp0-8")
+__msg("1: (72) *(u8 *)(r10 -4) = 0")
+__msg("2: (61) r0 = *(u32 *)(r10 -4) ; use: fp0-4h")
+__naked void one_byte_write_no_kill(void)
+{
+ asm volatile (
+ "*(u64 *)(r10 - 8) = 0;"
+ "*(u8 *)(r10 - 4) = 0;"
+ "r0 = *(u32 *)(r10 - 4);"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/* Test stack access beyond fp-256 exercising the second bitmask word.
+ * fp-264 is SPI 32, slots 64-65, which are bits 0-1 of live_stack[1].
+ *
+ * 0: *(u64 *)(r10 - 264) = 0 def slots 64,65
+ * 1: r0 = *(u64 *)(r10 - 264) use slots 64,65
+ * 2: r0 = 0
+ * 3: exit
+ *
+ * At insn 1, live_stack high word has bits 0,1 set: 0x3:0x0.
+ */
+SEC("socket")
+__log_level(2)
+__msg("1: (79) r0 = *(u64 *)(r10 -264) ; use: fp0-264")
+__naked void high_stack_second_bitmask_word(void)
+{
+ asm volatile (
+ "*(u64 *)(r10 - 264) = 0;"
+ "r0 = *(u64 *)(r10 - 264);"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/* Test that two separate 4-byte writes to each half of an SPI
+ * together kill liveness for the full SPI.
+ *
+ * 0: *(u32 *)(r10 - 8) = 0 def slot 1 (upper half)
+ * 1: *(u32 *)(r10 - 4) = 0 def slot 0 (lower half)
+ * 2: r0 = *(u64 *)(r10 - 8) use slots 0,1
+ * 3: r0 = 0
+ * 4: exit
+ *
+ * At insn 0: live_stack_before = 0x0 (both slots killed by insns 0,1).
+ * At insn 1: live_stack_before = 0x2 (slot 1 still live, slot 0 killed here).
+ */
+SEC("socket")
+__log_level(2)
+__msg("0: (62) *(u32 *)(r10 -8) = 0 ; def: fp0-8h")
+__msg("1: (62) *(u32 *)(r10 -4) = 0 ; def: fp0-4h")
+__naked void two_four_byte_writes_kill_full_spi(void)
+{
+ asm volatile (
+ "*(u32 *)(r10 - 8) = 0;"
+ "*(u32 *)(r10 - 4) = 0;"
+ "r0 = *(u64 *)(r10 - 8);"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/* Test that 4-byte writes on both branches kill a slot at the
+ * join point. Previously at SPI granularity, a 4-byte write was
+ * not stack_def, so liveness would flow backward through the
+ * branch that only had a 4-byte write.
+ *
+ * 0: call bpf_get_prandom_u32
+ * 1: if r0 != 0 goto 1f
+ * 2: *(u64 *)(r10 - 8) = 0 path A: def slots 0,1
+ * 3: goto 2f
+ * 1:4: *(u32 *)(r10 - 4) = 0 path B: def slot 0
+ * 2:5: r0 = *(u32 *)(r10 - 4) use slot 0
+ * 6: r0 = 0
+ * 7: exit
+ *
+ * Both paths define slot 0 before the read. At insn 1 (branch),
+ * live_stack_before = 0x0 because slot 0 is killed on both paths.
+ */
+SEC("socket")
+__log_level(2)
+__msg("1: (55) if r0 != 0x0 goto pc+2")
+__msg("2: (7a) *(u64 *)(r10 -8) = 0 ; def: fp0-8")
+__msg("3: (05) goto pc+1")
+__msg("4: (62) *(u32 *)(r10 -4) = 0 ; def: fp0-4h")
+__msg("5: (61) r0 = *(u32 *)(r10 -4) ; use: fp0-4h")
+__naked void both_branches_kill_slot(void)
+{
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "if r0 != 0 goto 1f;"
+ "*(u64 *)(r10 - 8) = 0;"
+ "goto 2f;"
+"1:"
+ "*(u32 *)(r10 - 4) = 0;"
+"2:"
+ "r0 = *(u32 *)(r10 - 4);"
+ "r0 = 0;"
+ "exit;"
+ :: __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+/* Soundness: cleaning the dead upper half of an SPI must not
+ * affect the live lower half's type information for pruning.
+ *
+ * Both halves of SPI 0 are written separately. Only the lower
+ * half (slot 0) is used as a 4-byte map key. The upper half
+ * (slot 1) is dead and cleaned to STACK_INVALID.
+ *
+ * Path A: key stays 0 (STACK_ZERO) → non-null array lookup
+ * Path B: key byte turns STACK_MISC → may-null array lookup
+ * Deref without null check: safe for A, unsafe for B.
+ *
+ * If half-SPI cleaning incorrectly corrupted the live half's
+ * type info, path A's cached state could generalize and unsoundly
+ * prune path B.
+ *
+ * Expected: reject (path B unsafe).
+ */
+SEC("socket")
+__flag(BPF_F_TEST_STATE_FREQ)
+__failure __msg("R0 invalid mem access 'map_value_or_null'")
+__naked void half_spi_clean_preserves_stack_zero(void)
+{
+ asm volatile (
+ "*(u32 *)(r10 - 4) = 0;" /* slot 0: STACK_ZERO */
+ "*(u32 *)(r10 - 8) = 0;" /* slot 1: STACK_ZERO (dead) */
+ "call %[bpf_get_prandom_u32];"
+ "if r0 != 0 goto l_nonconst%=;"
+ "goto l_lookup%=;"
+"l_nonconst%=:"
+ "*(u8 *)(r10 - 4) = r0;" /* slot 0: STACK_MISC */
+"l_lookup%=:"
+ "r2 = r10;"
+ "r2 += -4;"
+ "r1 = %[array_map_8b] ll;"
+ "call %[bpf_map_lookup_elem];"
+ "r0 = *(u64 *)(r0 + 0);" /* unsafe if null */
+ "exit;"
+ :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_map_lookup_elem),
+ __imm_addr(array_map_8b)
+ : __clobber_all);
+}
+
+/*
+ * Model of scx_lavd's pick_idle_cpu_at_cpdom iat block:
+ * conditional block with helper call and temporary stack spill,
+ * spill dead after merge.
+ *
+ * Path A (fall-through): spill r6 to fp-8 across helper call
+ * Path B (branch taken): skip the block entirely
+ * At merge (insn 6): fp-8 is dead (never read after merge)
+ *
+ * Static liveness marks fp-8 dead at merge. clean_verifier_state()
+ * converts path A's STACK_SPILL to STACK_INVALID. Path B has
+ * STACK_INVALID. stacksafe() matches -> path B pruned -> "6: safe".
+ */
+SEC("socket")
+__flag(BPF_F_TEST_STATE_FREQ)
+__success
+__log_level(2)
+__msg("6: safe")
+__naked void dead_spill_at_merge_enables_pruning(void)
+{
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "r6 = 7;"
+ "if r0 != 0 goto l_skip%=;"
+ /* conditional block: spill, call, reload */
+ "*(u64 *)(r10 - 8) = r6;"
+ "call %[bpf_get_prandom_u32];"
+ "r6 = *(u64 *)(r10 - 8);"
+"l_skip%=:"
+ /* fp-8 dead. Path B pruned here -> "6: safe" */
+ "r0 = r6;"
+ "exit;"
+ :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+/*
+ * FP-offset tracking loses precision on second ADD, killing all liveness.
+ *
+ * fp_off_insn_xfer() handles "FP itself + negative imm" precisely
+ * (e.g. r6 = r10; r6 += -24 -> slot 5). But any subsequent ADD/SUB
+ * on a register that already has non-zero spis falls through to
+ * spis_set_all(), because the code only handles the FP-itself case.
+ *
+ * A write through this imprecise register enters the non-zero-spis
+ * branch of set_indirect_stack_access(), which OR's the all-ones
+ * mask into stack_def. The backward liveness equation
+ *
+ * stack_in = (stack_out & ~stack_def) | stack_use
+ *
+ * sees ~ALL = 0, killing ALL slot liveness at that instruction.
+ *
+ * At the merge pruning point, live_stack_before is empty.
+ * clean_verifier_state() marks fp-8 as STACK_INVALID.
+ * stacksafe() skips STACK_INVALID (line "continue"), so pruning
+ * succeeds regardless of the current state's fp-8 value.
+ * Path B is pruned, its null deref is never explored.
+ *
+ * Correct behavior: reject (path B dereferences NULL).
+ * Bug behavior: accept (path B pruned away).
+ */
+SEC("socket")
+__flag(BPF_F_TEST_STATE_FREQ)
+__failure __msg("R1 invalid mem access 'scalar'")
+__naked void fp_add_loses_precision_kills_liveness(void)
+{
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "if r0 != 0 goto l_pathB%=;"
+
+ /* Path A (fall-through, explored first): fp-8 = 0 */
+ "r1 = 0;"
+ "*(u64 *)(r10 - 8) = r1;"
+ "goto l_merge%=;"
+
+"l_pathB%=:"
+ /* Path B (explored second): fp-8 = 42 */
+ "r1 = 42;"
+ "*(u64 *)(r10 - 8) = r1;"
+
+"l_merge%=:"
+ /*
+ * Create imprecise FP-derived register.
+ * r6 = r10 - 24 gets precise slot 5.
+ * r6 += 8 hits the else branch (spis non-zero, delta > 0)
+ * and sets spis to ALL. r6 is actually r10-16.
+ */
+ "r6 = r10;"
+ "r6 += -24;"
+ "r6 += 8;"
+
+ /*
+ * Write through imprecise r6. Actually writes to fp-16
+ * (does NOT touch fp-8), but liveness marks ALL slots
+ * as stack_def, killing fp-8's liveness.
+ */
+ "r7 = 0;"
+ "*(u64 *)(r6 + 0) = r7;"
+
+ /* Read fp-8: liveness says dead, but value is needed. */
+ "r2 = *(u64 *)(r10 - 8);"
+ "if r2 == 42 goto l_danger%=;"
+
+ /* r2 != 42 (path A: r2 == 0): safe exit */
+ "r0 = 0;"
+ "exit;"
+
+"l_danger%=:"
+ /* Only reachable from path B (r2 == 42): null deref */
+ "r1 = 0;"
+ "r0 = *(u64 *)(r1 + 0);"
+ "exit;"
+ :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__flag(BPF_F_TEST_STATE_FREQ)
+__failure __msg("R1 invalid mem access 'scalar'")
+__naked void fp_spill_loses_precision_kills_liveness(void)
+{
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "if r0 != 0 goto l_pathB%=;"
+
+ "r1 = 0;"
+ "*(u64 *)(r10 - 8) = r1;"
+ "goto l_merge%=;"
+
+"l_pathB%=:"
+ "r1 = 42;"
+ "*(u64 *)(r10 - 8) = r1;"
+
+"l_merge%=:"
+ "r6 = r10;"
+ "r6 += -64;"
+ "*(u64 *)(r10 - 160) = r6;"
+ "r6 = *(u64 *)(r10 - 160);"
+
+ "r7 = 0;"
+ "*(u64 *)(r6 + 0) = r7;"
+
+ "r2 = *(u64 *)(r10 - 8);"
+ "if r2 == 42 goto l_danger%=;"
+
+ "r0 = *(u64 *)(r10 - 56);"
+ "exit;"
+
+"l_danger%=:"
+ "r1 = 0;"
+ "r0 = *(u64 *)(r1 + 0);"
+ "exit;"
+ :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+/* === Tests for frame-based AT_FP tracking === */
+
+/*
+ * Test 1: conditional_stx_in_subprog
+ * Subprog conditionally writes caller's slot.
+ * Verify slot stays live (backward pass handles conditional def via CFG).
+ *
+ * Main writes fp-8=42, calls cond_writer(fp-8), reads fp-8.
+ * cond_writer only writes on one path → parent_def only on that path.
+ * The backward parent_live correctly keeps fp-8 live at entry
+ * (conditional write doesn't kill liveness at the join).
+ */
+SEC("socket")
+__log_level(2)
+/* fp-8 live at call (callee conditionally writes → slot not killed) */
+__msg("1: (7b) *(u64 *)(r10 -8) = r1 ; def: fp0-8")
+__msg("4: (85) call pc+2{{$}}")
+__msg("5: (79) r0 = *(u64 *)(r10 -8) ; use: fp0-8")
+__naked void conditional_stx_in_subprog(void)
+{
+ asm volatile (
+ "r1 = 42;"
+ "*(u64 *)(r10 - 8) = r1;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call cond_writer;"
+ "r0 = *(u64 *)(r10 - 8);"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/* Conditionally writes to *(r1+0) */
+static __used __naked void cond_writer(void)
+{
+ asm volatile (
+ "r6 = r1;"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 == 0 goto 1f;"
+ "*(u64 *)(r6 + 0) = r0;"
+ "1:"
+ "r0 = 0;"
+ "exit;"
+ :: __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__log_level(2)
+__msg("4: (85) call pc+{{.*}} ; use: fp0-16")
+__msg("7: (85) call pc+{{.*}} ; use: fp0-32")
+__naked void multiple_callsites_different_offsets(void)
+{
+ asm volatile (
+ "*(u64 *)(r10 - 16) = 0;"
+ "*(u64 *)(r10 - 32) = 0;"
+ "r1 = r10;"
+ "r1 += -16;"
+ "call read_first_param;"
+ "r1 = r10;"
+ "r1 += -32;"
+ "call read_first_param;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/*
+ * Test 3: nested_fp_passthrough
+ * main→A→B, main's FP forwarded to B. B accesses main's stack.
+ * Verify liveness propagates through.
+ *
+ * Main passes fp-32 to outer_forwarder, which passes it to inner_reader.
+ * inner_reader reads at arg+0 (= main's fp-32).
+ * parent_live propagates transitively: inner→outer→main.
+ */
+SEC("socket")
+__log_level(2)
+/* At call to outer_forwarder: main's fp-32 (slots 6,7) should be live */
+__msg("6: (85) call pc+{{.*}} ; use: fp0-32")
+__naked void nested_fp_passthrough(void)
+{
+ asm volatile (
+ "*(u64 *)(r10 - 8) = 0;"
+ "*(u64 *)(r10 - 16) = 0;"
+ "*(u64 *)(r10 - 24) = 0;"
+ "*(u64 *)(r10 - 32) = 0;"
+ "r1 = r10;"
+ "r1 += -32;"
+ "call outer_forwarder;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/* Forwards arg to inner_reader */
+static __used __naked void outer_forwarder(void)
+{
+ asm volatile (
+ "call inner_reader;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+static __used __naked void inner_reader(void)
+{
+ asm volatile (
+ "r0 = *(u64 *)(r1 + 0);"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/*
+ * Test 4: callee_must_write_before_read
+ * Callee unconditionally writes parent slot before reading.
+ * Verify slot is NOT live at call site (parent_def kills it).
+ */
+SEC("socket")
+__log_level(2)
+/* fp-8 NOT live at call: callee writes before reading (parent_def kills it) */
+__msg("2: .12345.... (85) call pc+")
+__naked void callee_must_write_before_read(void)
+{
+ asm volatile (
+ "r1 = r10;"
+ "r1 += -8;"
+ "call write_then_read;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/* Unconditionally writes *(r1+0), then reads it back */
+static __used __naked void write_then_read(void)
+{
+ asm volatile (
+ "r6 = r1;"
+ "r7 = 99;"
+ "*(u64 *)(r6 + 0) = r7;"
+ "r0 = *(u64 *)(r6 + 0);"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/*
+ * Test 5: return_site_liveness_bleeding
+ * Main calls subprog twice. Slot used after one call but not the other.
+ * Context-insensitive: slot conservatively live at both.
+ *
+ * After first call: read fp-8.
+ * After second call: don't read fp-8.
+ * Since parent_live is per-subprog (not per call-site),
+ * fp-8 is live at both call sites.
+ */
+SEC("socket")
+__log_level(2)
+/* Both calls have fp-8 live due to context-insensitive parent_live */
+__msg("3: (85) call pc+{{.*}} ; use: fp0-8")
+__msg("7: (85) call pc+{{.*}} ; use: fp0-8")
+__naked void return_site_liveness_bleeding(void)
+{
+ asm volatile (
+ "*(u64 *)(r10 - 8) = 0;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call read_first_param;"
+ "r0 = *(u64 *)(r10 - 8);"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call read_first_param;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("socket")
+__log_level(2)
+__msg("9: (85) call bpf_loop#181 ; use: fp0-16")
+__naked void callback_conditional_read_beyond_ctx(void)
+{
+ asm volatile (
+ "r1 = 42;"
+ "*(u64 *)(r10 - 8) = r1;"
+ "*(u64 *)(r10 - 16) = r1;"
+ "r1 = 2;"
+ "r2 = cb_cond_read ll;"
+ "r3 = r10;"
+ "r3 += -8;"
+ "r4 = 0;"
+ "call %[bpf_loop];"
+ "r0 = 0;"
+ "exit;"
+ :: __imm(bpf_loop)
+ : __clobber_all);
+}
+
+/* Callback conditionally reads *(ctx - 8) = caller fp-16 */
+static __used __naked void cb_cond_read(void)
+{
+ asm volatile (
+ "r6 = r2;"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 == 0 goto 1f;"
+ "r0 = *(u64 *)(r6 - 8);"
+ "1:"
+ "r0 = 0;"
+ "exit;"
+ :: __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__log_level(2)
+__msg("14: (7b) *(u64 *)(r6 -8) = r7 ; def: fp0-16")
+__msg("15: (79) r0 = *(u64 *)(r6 -8) ; use: fp0-16")
+__naked void callback_write_before_read_kills(void)
+{
+ asm volatile (
+ "r1 = 42;"
+ "*(u64 *)(r10 - 8) = r1;"
+ "*(u64 *)(r10 - 16) = r1;"
+ "r1 = 2;"
+ "r2 = cb_write_read ll;"
+ "r3 = r10;"
+ "r3 += -8;"
+ "r4 = 0;"
+ "call %[bpf_loop];"
+ "r0 = 0;"
+ "exit;"
+ :: __imm(bpf_loop)
+ : __clobber_all);
+}
+
+/* Callback unconditionally writes *(ctx-8), then reads it back.
+ * The write (parent_def) kills liveness before entry.
+ */
+static __used __naked void cb_write_read(void)
+{
+ asm volatile (
+ "r6 = r2;"
+ "r7 = 99;"
+ "*(u64 *)(r6 - 8) = r7;"
+ "r0 = *(u64 *)(r6 - 8);"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/*
+ * bpf_loop callback conditionally writes fp-16 then unconditionally
+ * reads it. The conditional write does NOT kill liveness
+ */
+SEC("socket")
+__log_level(2)
+__msg("9: (85) call bpf_loop#181 ; use: fp0-16")
+__naked void callback_conditional_write_preserves(void)
+{
+ asm volatile (
+ "r1 = 42;"
+ "*(u64 *)(r10 - 8) = r1;"
+ "*(u64 *)(r10 - 16) = r1;"
+ "r1 = 2;"
+ "r2 = cb_cond_write_read ll;"
+ "r3 = r10;"
+ "r3 += -8;"
+ "r4 = 0;"
+ "call %[bpf_loop];"
+ "r0 = 0;"
+ "exit;"
+ :: __imm(bpf_loop)
+ : __clobber_all);
+}
+
+static __used __naked void cb_cond_write_read(void)
+{
+ asm volatile (
+ "r6 = r2;"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 == 0 goto 1f;"
+ "*(u64 *)(r6 - 8) = r0;"
+ "1:"
+ "r0 = *(u64 *)(r6 - 8);"
+ "r0 = 0;"
+ "exit;"
+ :: __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+/*
+ * Two bpf_loop calls with the same callback but different ctx pointers.
+ *
+ * First call: ctx=fp-8, second call: ctx=fp-24.
+ */
+SEC("socket")
+__log_level(2)
+__msg(" 8: (85) call bpf_loop{{.*}} ; use: fp0-8")
+__msg("15: (85) call bpf_loop{{.*}} ; use: fp0-24")
+__naked void callback_two_calls_different_ctx(void)
+{
+ asm volatile (
+ "*(u64 *)(r10 - 8) = 0;"
+ "*(u64 *)(r10 - 24) = 0;"
+ "r1 = 1;"
+ "r2 = cb_read_ctx ll;"
+ "r3 = r10;"
+ "r3 += -8;"
+ "r4 = 0;"
+ "call %[bpf_loop];"
+ "r1 = 1;"
+ "r2 = cb_read_ctx ll;"
+ "r3 = r10;"
+ "r3 += -24;"
+ "r4 = 0;"
+ "call %[bpf_loop];"
+ "r0 = 0;"
+ "exit;"
+ :: __imm(bpf_loop)
+ : __clobber_all);
+}
+
+/* Callback reads at ctx+0 unconditionally */
+static __used __naked void cb_read_ctx(void)
+{
+ asm volatile (
+ "r0 = *(u64 *)(r2 + 0);"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/*
+ * Reproducer for unsound pruning in refined_caller_live_stack().
+ *
+ * Three-level call chain: main → mid_fwd → grandchild_deref.
+ * Main passes &fp-8 to mid_fwd, which forwards R1 to grandchild_deref.
+ * grandchild_deref reads main's fp-8 through the forwarded pointer
+ * and dereferences the result.
+ *
+ * refined_caller_live_stack() has a callee_offset++ when mid_fwd
+ * (frame 1) is mid-call. This drops the transitive parent_live
+ * contribution at mid_fwd's call instruction — the only place
+ * where grandchild_deref's read of main's fp-8 is recorded.
+ * As a result, main's fp-8 is cleaned to STACK_INVALID at the
+ * pruning point inside grandchild_deref, and path B is
+ * incorrectly pruned against path A.
+ *
+ * Path A: main stores PTR_TO_MAP_VALUE at fp-8
+ * Path B: main stores scalar 42 at fp-8
+ *
+ * Correct behavior: reject (path B dereferences scalar)
+ * Bug behavior: accept (path B pruned against cleaned path A)
+ */
+SEC("socket")
+__flag(BPF_F_TEST_STATE_FREQ)
+__failure __msg("R0 invalid mem access 'scalar'")
+__naked void transitive_parent_stack_read_unsound(void)
+{
+ asm volatile (
+ /* Map lookup to get PTR_TO_MAP_VALUE */
+ "r1 = %[map] ll;"
+ "*(u32 *)(r10 - 32) = 0;"
+ "r2 = r10;"
+ "r2 += -32;"
+ "call %[bpf_map_lookup_elem];"
+ "if r0 == 0 goto l_exit%=;"
+ "r6 = r0;"
+ /* Branch: path A (fall-through) explored first */
+ "call %[bpf_get_prandom_u32];"
+ "if r0 != 0 goto l_scalar%=;"
+ /* Path A: fp-8 = PTR_TO_MAP_VALUE */
+ "*(u64 *)(r10 - 8) = r6;"
+ "goto l_merge%=;"
+"l_scalar%=:"
+ /* Path B: fp-8 = scalar 42 */
+ "r1 = 42;"
+ "*(u64 *)(r10 - 8) = r1;"
+"l_merge%=:"
+ /* Pass &fp-8 to mid_fwd → grandchild_deref */
+ "r1 = r10;"
+ "r1 += -8;"
+ "call mid_fwd;"
+ "r0 = 0;"
+ "exit;"
+"l_exit%=:"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_get_prandom_u32),
+ __imm_addr(map)
+ : __clobber_all);
+}
+
+/* Forwards R1 (ptr to main's fp-8) to grandchild_deref */
+static __used __naked void mid_fwd(void)
+{
+ asm volatile (
+ "call grandchild_deref;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/* Reads main's fp-8 through forwarded pointer, dereferences result */
+static __used __naked void grandchild_deref(void)
+{
+ asm volatile (
+ "goto +0;" /* checkpoint */
+ "goto +0;" /* checkpoint */
+ /* read main's fp-8: map_ptr (path A) or scalar (path B) */
+ "r0 = *(u64 *)(r1 + 0);"
+ /* dereference: safe for map_ptr, unsafe for scalar */
+ "r0 = *(u64 *)(r0 + 0);"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("socket")
+__log_level(2)
+__success
+__msg("14: (79) r1 = *(u64 *)(r10 -8) // r6=fp0-8 r7=fp1-16 fp-8=fp1-16 fp-16=fp0-8")
+__msg("15: (79) r0 = *(u64 *)(r1 +0) // r1=fp1-16 r6=fp0-8 r7=fp1-16 fp-8=fp1-16 fp-16=fp0-8")
+__msg("stack use/def subprog#1 mid_two_fp_threshold (d1,cs2):")
+__msg("14: (79) r1 = *(u64 *)(r10 -8) ; use: fp1-8")
+__msg("15: (79) r0 = *(u64 *)(r1 +0) ; use: fp1-16")
+__naked void two_fp_clear_stack_threshold(void)
+{
+ asm volatile (
+ "r1 = r10;"
+ "r1 += -8;"
+ "call mid_two_fp_threshold;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+static __used __naked void mid_two_fp_threshold(void)
+{
+ asm volatile (
+ "r6 = r1;"
+ "r7 = r10;"
+ "r7 += -16;"
+ "*(u64 *)(r10 - 8) = r7;"
+ "*(u64 *)(r10 - 16) = r6;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = r6;"
+ "call inner_nop_fptest;"
+ "r1 = *(u64 *)(r10 - 8);"
+ "r0 = *(u64 *)(r1 + 0);"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+static __used __naked void inner_nop_fptest(void)
+{
+ asm volatile (
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("socket")
+__log_level(2)
+__success
+__msg("13: (79) r1 = *(u64 *)(r10 -8) // r6=fp0-8 r7=fp1-16 fp-8=fp1-16 fp-16=fp0-8")
+__msg("14: (79) r0 = *(u64 *)(r1 +0) // r1=fp1-16 r6=fp0-8 r7=fp1-16 fp-8=fp1-16 fp-16=fp0-8")
+__msg("stack use/def subprog#1 mid_one_fp_threshold (d1,cs2):")
+__msg("13: (79) r1 = *(u64 *)(r10 -8) ; use: fp1-8")
+__msg("14: (79) r0 = *(u64 *)(r1 +0) ; use: fp1-16")
+__naked void one_fp_clear_stack_threshold(void)
+{
+ asm volatile (
+ "r1 = r10;"
+ "r1 += -8;"
+ "call mid_one_fp_threshold;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+static __used __naked void mid_one_fp_threshold(void)
+{
+ asm volatile (
+ "r6 = r1;"
+ "r7 = r10;"
+ "r7 += -16;"
+ "*(u64 *)(r10 - 8) = r7;"
+ "*(u64 *)(r10 - 16) = r6;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call inner_nop_fptest;"
+ "r1 = *(u64 *)(r10 - 8);"
+ "r0 = *(u64 *)(r1 + 0);"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/*
+ * Reproducer for unsound pruning when a subprog forwards a parent
+ * stack pointer (AT_PARENT) to a helper with a memory argument.
+ *
+ * set_call_stack_access_at() previously only tracked AT_CURRENT args,
+ * skipping AT_PARENT entirely. This meant helper reads through parent
+ * stack pointers did not set parent_use, letting the slot appear dead
+ * at pruning checkpoints inside the subprog.
+ *
+ * Program shape:
+ * main:
+ * *(u32)(fp-4) = 0 key = STACK_ZERO (const 0)
+ * call bpf_get_prandom_u32
+ * if r0 != 0 goto clobber path A (fall-through) first
+ * goto merge
+ * clobber:
+ * *(u8)(fp-4) = r0 path B: key[0] = STACK_MISC
+ * merge:
+ * r1 = fp - 4
+ * call fwd_parent_key_to_helper
+ * r0 = 0
+ * exit
+ *
+ * fwd_parent_key_to_helper(r1 = &caller_fp-4):
+ * goto +0 checkpoint
+ * r2 = r1 R2 = AT_PARENT ptr to caller fp-4
+ * r1 = array_map_8b ll R1 = array map
+ * call bpf_map_lookup_elem reads key_size(4) from parent fp-4
+ * r0 = *(u64 *)(r0 + 0) deref without null check
+ * r0 = 0
+ * exit
+ *
+ * Path A: STACK_ZERO key = const 0 -> array lookup -> PTR_TO_MAP_VALUE
+ * (non-NULL for in-bounds const key) -> deref OK.
+ * Path B: STACK_MISC key = unknown -> array lookup ->
+ * PTR_TO_MAP_VALUE_OR_NULL -> deref UNSAFE.
+ *
+ * Bug: AT_PARENT R2 arg to bpf_map_lookup_elem skipped -> parent_use
+ * not set -> fp-4 cleaned at checkpoint -> STACK_ZERO collapses
+ * to STACK_INVALID -> path B pruned -> deref never checked.
+ *
+ * Correct verifier behavior: reject (path B deref of map_value_or_null).
+ */
+SEC("socket")
+__flag(BPF_F_TEST_STATE_FREQ)
+__failure __msg("R0 invalid mem access 'map_value_or_null'")
+__naked void helper_parent_stack_read_unsound(void)
+{
+ asm volatile (
+ /* key at fp-4: all bytes STACK_ZERO */
+ "*(u32 *)(r10 - 4) = 0;"
+ "call %[bpf_get_prandom_u32];"
+ /* fall-through (path A) explored first */
+ "if r0 != 0 goto l_clobber%=;"
+ /* path A: key stays constant zero */
+ "goto l_merge%=;"
+"l_clobber%=:"
+ /* path B: key[0] becomes STACK_MISC, key no longer const */
+ "*(u8 *)(r10 - 4) = r0;"
+"l_merge%=:"
+ "r1 = r10;"
+ "r1 += -4;"
+ "call fwd_parent_key_to_helper;"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+/*
+ * Subprog forwards parent stack pointer to bpf_map_lookup_elem as key
+ * on an array map, then dereferences the result without a null check.
+ * R1 = &parent_fp-4 (AT_PARENT in this frame).
+ *
+ * The helper reads key_size(4) bytes from parent stack. The deref of
+ * R0 reads the map value, NOT parent stack, so record_insn_mem_accesses
+ * does not set parent_use for it. The ONLY parent stack access is
+ * through the helper's R2 arg.
+ */
+static __used __naked void fwd_parent_key_to_helper(void)
+{
+ asm volatile (
+ "goto +0;" /* checkpoint */
+ "r2 = r1;" /* R2 = parent ptr (AT_PARENT) */
+ "r1 = %[array_map_8b] ll;" /* R1 = array map */
+ "call %[bpf_map_lookup_elem];" /* reads 4 bytes from parent fp-4 */
+ /* deref without null check: safe for PTR_TO_MAP_VALUE,
+ * unsafe for PTR_TO_MAP_VALUE_OR_NULL
+ */
+ "r0 = *(u64 *)(r0 + 0);"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(array_map_8b)
+ : __clobber_all);
+}
+
+/*
+ * Regression for keeping later helper args after a whole-stack fallback
+ * on an earlier local arg. The first bpf_snprintf() arg is a local
+ * frame-derived pointer with offset-imprecise tracking (`fp1 ?`), which
+ * conservatively marks the whole local stack live. The fourth arg still
+ * forwards &parent_fp-8 and must contribute nonlocal_use[0]=0:3.
+ */
+SEC("socket")
+__log_level(2)
+__success
+__msg("call bpf_snprintf{{.*}} ; use: fp1-8..-512 fp0-8")
+__naked void helper_arg_fallback_keeps_scanning(void)
+{
+ asm volatile (
+ "r1 = 42;"
+ "*(u64 *)(r10 - 8) = r1;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call helper_snprintf_parent_after_local_fallback;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+static __used __naked void helper_snprintf_parent_after_local_fallback(void)
+{
+ asm volatile (
+ "r6 = r1;" /* save &parent_fp-8 */
+ "call %[bpf_get_prandom_u32];"
+ "r0 &= 8;"
+ "r1 = r10;"
+ "r1 += -16;"
+ "r1 += r0;" /* local fp, offset-imprecise */
+ "r2 = 8;"
+ "r3 = %[snprintf_u64_fmt] ll;"
+ "r4 = r6;" /* later arg: parent fp-8 */
+ "r5 = 8;"
+ "call %[bpf_snprintf];"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_snprintf),
+ __imm_addr(snprintf_u64_fmt)
+ : __clobber_all);
+}
+
+/*
+ * Test that propagate_callee_ancestor() correctly chains ancestor
+ * liveness across sequential calls within a single frame.
+ *
+ * main → mid_seq_touch → {nop_callee, deref_ancestor}
+ *
+ * mid_seq_touch receives two pointers: R1 = &main_fp-8 (forwarded to
+ * deref_ancestor) and R2 = &main_fp-16 (read directly by mid_seq_touch).
+ * The direct read of fp-16 forces ensure_anc_arrays() to allocate
+ * ancestor_live[0] for mid_seq_touch, so refined_caller_live_stack()
+ * uses the refined path (not the conservative fallback).
+ *
+ * mid_seq_touch calls nop_callee first (no-op, creates a pruning point),
+ * then calls deref_ancestor which reads main's fp-8 and dereferences it.
+ *
+ * propagate_callee_ancestor() propagates deref_ancestor's entry
+ * ancestor_live[0] into mid_seq_touch's anc_use[0] at the call-to-deref
+ * instruction. mid_seq_touch's backward pass flows this backward so
+ * ancestor_live[0] includes fp-8 at the pruning point between the calls.
+ *
+ * Without propagation, mid_seq_touch's ancestor_live[0] only has fp-16
+ * (from the direct read) — fp-8 is missing. refined_caller_live_stack()
+ * Term 1 says fp-8 is dead, the verifier cleans it, and path B
+ * (scalar 42) is incorrectly pruned against path A (MAP_VALUE).
+ *
+ * Path A: main stores PTR_TO_MAP_VALUE at fp-8 → deref succeeds
+ * Path B: main stores scalar 42 at fp-8 → deref must fail
+ *
+ * Correct: reject (path B dereferences scalar)
+ */
+SEC("socket")
+__flag(BPF_F_TEST_STATE_FREQ)
+__failure __msg("R0 invalid mem access 'scalar'")
+__naked void propagate_callee_ancestor_chain(void)
+{
+ asm volatile (
+ /* Map lookup to get PTR_TO_MAP_VALUE */
+ "r1 = %[map] ll;"
+ "*(u32 *)(r10 - 32) = 0;"
+ "r2 = r10;"
+ "r2 += -32;"
+ "call %[bpf_map_lookup_elem];"
+ "if r0 == 0 goto l_exit%=;"
+ "r6 = r0;"
+ /* Branch: path A (fall-through) explored first */
+ "call %[bpf_get_prandom_u32];"
+ "if r0 != 0 goto l_scalar%=;"
+ /* Path A: fp-8 = PTR_TO_MAP_VALUE */
+ "*(u64 *)(r10 - 8) = r6;"
+ "goto l_merge%=;"
+"l_scalar%=:"
+ /* Path B: fp-8 = scalar 42 */
+ "r1 = 42;"
+ "*(u64 *)(r10 - 8) = r1;"
+"l_merge%=:"
+ /* fp-16 = dummy value (mid_seq_touch reads it directly) */
+ "r1 = 99;"
+ "*(u64 *)(r10 - 16) = r1;"
+ /* R1 = &fp-8 (for deref_ancestor), R2 = &fp-16 (for mid_seq_touch) */
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = r10;"
+ "r2 += -16;"
+ "call mid_seq_touch;"
+ "r0 = 0;"
+ "exit;"
+"l_exit%=:"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_get_prandom_u32),
+ __imm_addr(map)
+ : __clobber_all);
+}
+
+/*
+ * R1 = &main_fp-8 (forwarded to deref_ancestor)
+ * R2 = &main_fp-16 (read directly here → allocates ancestor_live[0])
+ *
+ * Reads main's fp-16 to force ancestor_live[0] allocation, then
+ * calls nop_callee (pruning point), then deref_ancestor.
+ */
+static __used __naked void mid_seq_touch(void)
+{
+ asm volatile (
+ "r6 = r1;" /* save &main_fp-8 in callee-saved */
+ "r0 = *(u64 *)(r2 + 0);" /* read main's fp-16: triggers anc_use[0] */
+ "call nop_callee;" /* no-op, creates pruning point after */
+ "r1 = r6;" /* restore ptr to &main_fp-8 */
+ "call deref_ancestor;" /* reads main's fp-8, dereferences */
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+static __used __naked void nop_callee(void)
+{
+ asm volatile (
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/* Reads main's fp-8 through forwarded pointer, dereferences result */
+static __used __naked void deref_ancestor(void)
+{
+ asm volatile (
+ "r0 = *(u64 *)(r1 + 0);" /* read main's fp-8 */
+ "r0 = *(u64 *)(r0 + 0);" /* deref: safe for map_ptr, unsafe for scalar */
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/*
+ * Test: callee loads an fp-derived pointer from caller's stack, then
+ * reads through it to access another caller stack slot.
+ *
+ * main stores PTR_TO_MAP_VALUE at fp-24, stores &fp-24 (an fp-derived
+ * pointer) at fp-8, passes &fp-8 through mid_fwd_spilled_ptr to
+ * load_ptr_deref_grandchild. The leaf loads the pointer from main's
+ * fp-8, then reads main's fp-24 through the loaded pointer.
+ *
+ * fill_from_stack() in arg_track_xfer() only handles local-frame
+ * FP-derived loads (src_is_local_fp check requires frame == depth).
+ * When a callee loads from a parent-frame pointer (frame < depth),
+ * the loaded value gets ARG_NONE instead of being recognized as
+ * fp-derived. Subsequent reads through that loaded pointer are
+ * invisible to liveness — nonlocal_use is never set for fp-24.
+ *
+ * clean_live_states() cleans the current state at every prune point.
+ * Because liveness misses fp-24, refined_caller_live_stack() tells
+ * __clean_func_state() that fp-24 is dead, which destroys the
+ * PTR_TO_MAP_VALUE spill before the grandchild can read it.
+ * The grandchild then reads STACK_INVALID → scalar, and the deref
+ * is rejected with "R0 invalid mem access 'scalar'" — even though
+ * fp-24 is genuinely live and holds a valid map pointer.
+ *
+ * This is a false positive: a valid program incorrectly rejected.
+ */
+SEC("socket")
+__flag(BPF_F_TEST_STATE_FREQ)
+__success
+__naked void spilled_fp_cross_frame_deref(void)
+{
+ asm volatile (
+ /* Map lookup to get PTR_TO_MAP_VALUE */
+ "r1 = %[map] ll;"
+ "*(u32 *)(r10 - 32) = 0;"
+ "r2 = r10;"
+ "r2 += -32;"
+ "call %[bpf_map_lookup_elem];"
+ "if r0 == 0 goto l_exit%=;"
+ /* fp-24 = PTR_TO_MAP_VALUE */
+ "*(u64 *)(r10 - 24) = r0;"
+ /* Store pointer to fp-24 at fp-8 */
+ "r1 = r10;"
+ "r1 += -24;"
+ "*(u64 *)(r10 - 8) = r1;"
+ /* R1 = &fp-8: pointer to the spilled ptr */
+ "r1 = r10;"
+ "r1 += -8;"
+ "call mid_fwd_spilled_ptr;"
+ "r0 = 0;"
+ "exit;"
+"l_exit%=:"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map)
+ : __clobber_all);
+}
+
+/* Forwards R1 (ptr to main's fp-8, which holds &main_fp-24) to leaf */
+static __used __naked void mid_fwd_spilled_ptr(void)
+{
+ asm volatile (
+ "call load_ptr_deref_grandchild;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/*
+ * R1 = &main_fp-8 (where main stored ptr to fp-24)
+ * Loads the ptr from main's fp-8, reads main's fp-24 through it,
+ * then dereferences the result.
+ */
+static __used __naked void load_ptr_deref_grandchild(void)
+{
+ asm volatile (
+ /* Load ptr from main's fp-8 → r2 = &main_fp-24 */
+ "r2 = *(u64 *)(r1 + 0);"
+ /* Read main's fp-24 through loaded ptr */
+ "r0 = *(u64 *)(r2 + 0);"
+ /* Dereference: safe for map_ptr */
+ "r0 = *(u64 *)(r0 + 0);"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/*
+ * Exercise merge_nonlocal_live().
+ *
+ * merge_shared_mid is analyzed twice (once from each wrapper), so the
+ * callsite within merge_shared_mid that calls merge_leaf_read gets its
+ * nonlocal_live info merged twice via merge_nonlocal_live().
+ */
+SEC("socket")
+__log_level(2)
+__success
+__msg("14: (85) call pc+2 r1: fp0-16")
+__msg("17: (79) r0 = *(u64 *)(r1 +0) // r1=fp0-16")
+__msg("14: (85) call pc+2 r1: fp0-8")
+__msg("17: (79) r0 = *(u64 *)(r1 +0) // r1=fp0-8")
+__msg("5: (85) call pc+{{.*}} ; use: fp0-8 fp0-16")
+__naked void test_merge_nonlocal_live(void)
+{
+ asm volatile (
+ "r1 = 0;"
+ "*(u64 *)(r10 - 8) = r1;"
+ "*(u64 *)(r10 - 16) = r1;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call merge_wrapper_a;"
+ "r1 = r10;"
+ "r1 += -16;"
+ "call merge_wrapper_b;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+static __used __naked void merge_wrapper_a(void)
+{
+ asm volatile (
+ "call merge_shared_mid;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+static __used __naked void merge_wrapper_b(void)
+{
+ asm volatile (
+ "call merge_shared_mid;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+static __used __naked void merge_shared_mid(void)
+{
+ asm volatile (
+ "call merge_leaf_read;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+static __used __naked void merge_leaf_read(void)
+{
+ asm volatile (
+ "r0 = *(u64 *)(r1 + 0);"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/* Same bpf_loop instruction calls different callbacks depending on branch. */
+SEC("socket")
+__log_level(2)
+__success
+__msg("call bpf_loop#181 ; use: fp2-8..-512 fp1-8..-512 fp0-8..-512")
+__naked void bpf_loop_two_callbacks(void)
+{
+ asm volatile (
+ "r1 = 0;"
+ "*(u64 *)(r10 - 8) = r1;"
+ "*(u64 *)(r10 - 16) = r1;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call dyn_wrapper_a;"
+ "r1 = r10;"
+ "r1 += -16;"
+ "call dyn_wrapper_b;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+static __used __naked void dyn_wrapper_a(void)
+{
+ asm volatile (
+ "call mid_dynamic_cb;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+static __used __naked void dyn_wrapper_b(void)
+{
+ asm volatile (
+ "call mid_dynamic_cb;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+static __used __naked void mid_dynamic_cb(void)
+{
+ asm volatile (
+ "r6 = r1;"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 == 0 goto 1f;"
+ "r2 = dyn_cb_a ll;"
+ "goto 2f;"
+ "1:"
+ "r2 = dyn_cb_b ll;"
+ "2:"
+ "r1 = 1;"
+ "r3 = r6;" /* ctx = fp-derived ptr from parent */
+ "r4 = 0;"
+ "call %[bpf_loop];"
+ "r0 = 0;"
+ "exit;"
+ :: __imm(bpf_get_prandom_u32),
+ __imm(bpf_loop)
+ : __clobber_all);
+}
+
+/* Callback A/B: read parent stack through ctx */
+static __used __naked void dyn_cb_a(void)
+{
+ asm volatile (
+ "r0 = *(u64 *)(r2 + 0);"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+static __used __naked void dyn_cb_b(void)
+{
+ asm volatile (
+ "r0 = *(u64 *)(r2 + 0);"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/*
+ * Path A: r0 = map_lookup result (non-FP, ARG_NONE for stack tracking)
+ * Path B: r0 = fp-8 (FP-derived, frame=0, off=-8)
+ * At the join: r0 is not guaranteed to be a frame pointer.
+ */
+SEC("socket")
+__log_level(2)
+__msg("10: (79) r0 = *(u64 *)(r10 -8) // r0=fp0-8|fp0+0")
+__naked void stack_or_non_stack_write(void)
+{
+ asm volatile (
+ /* initial write to fp-8 */
+ "*(u64 *)(r10 - 8) = 0;"
+ /* map lookup to get a non-FP pointer */
+ "r2 = r10;"
+ "r2 += -4;"
+ "r1 = %[map] ll;"
+ "call %[bpf_map_lookup_elem];"
+ /* r0 = map_value (ARG_NONE) */
+ "if r0 != 0 goto 1f;"
+ /* path B: r0 = fp-8 */
+ "r0 = r10;"
+ "r0 += -8;"
+"1:"
+ /* join: the write is not a def for fp[0]-8 */
+ "*(u64 *)(r0 + 0) = 7;"
+ /* read fp-8: should be non-poisoned */
+ "r0 = *(u64 *)(r10 - 8);"
+ "exit;"
+ :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map)
+ : __clobber_all);
+}
+
+SEC("socket")
+__log_level(2)
+__flag(BPF_F_TEST_STATE_FREQ)
+__msg("subprog#2 write_first_read_second:")
+__msg("17: (7a) *(u64 *)(r1 +0) = 42{{$}}")
+__msg("18: (79) r0 = *(u64 *)(r2 +0) // r1=fp0-8 r2=fp0-16{{$}}")
+__msg("stack use/def subprog#2 write_first_read_second (d2,cs15):")
+__msg("17: (7a) *(u64 *)(r1 +0) = 42{{$}}")
+__msg("18: (79) r0 = *(u64 *)(r2 +0) ; use: fp0-8 fp0-16")
+__naked void shared_instance_must_write_overwrite(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ "*(u64 *)(r10 - 8) = r1;"
+ "*(u64 *)(r10 - 16) = r1;"
+ /* Call 1: write_first_read_second(&fp[-8], &fp[-16]) */
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = r10;"
+ "r2 += -16;"
+ "call forwarding_rw;"
+ /* Call 2: write_first_read_second(&fp[-16], &fp[-8]) */
+ "r1 = r10;"
+ "r1 += -16;"
+ "r2 = r10;"
+ "r2 += -8;"
+ "call forwarding_rw;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+static __used __naked void forwarding_rw(void)
+{
+ asm volatile (
+ "call write_first_read_second;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+static __used __naked void write_first_read_second(void)
+{
+ asm volatile (
+ "*(u64 *)(r1 + 0) = 42;"
+ "r0 = *(u64 *)(r2 + 0);"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/*
+ * Shared must_write when (callsite, depth) instance is reused.
+ * Main calls fwd_to_stale_wr at two sites. fwd_to_stale_wr calls
+ * stale_wr_leaf at a single internal callsite. Both calls share
+ * stale_wr_leaf's (callsite, depth) instance.
+ *
+ * Call 1: stale_wr_leaf(map_value, fp-8) writes map, reads fp-8.
+ * Call 2: stale_wr_leaf(fp-8, fp-8) writes fp-8, reads fp-8.
+ *
+ * The analysis can't presume that stale_wr_leaf() always writes fp-8,
+ * it must conservatively join must_write masks computed for both calls.
+ */
+SEC("socket")
+__success
+__naked void stale_must_write_cross_callsite(void)
+{
+ asm volatile (
+ "*(u64 *)(r10 - 8) = 0;"
+ /* Call 1: map_value write, fp-8 read (processed second in PO) */
+ "*(u32 *)(r10 - 16) = 0;"
+ "r1 = %[map] ll;"
+ "r2 = r10;"
+ "r2 += -16;"
+ "call %[bpf_map_lookup_elem];"
+ "if r0 == 0 goto 1f;"
+ "r1 = r0;"
+ "r2 = r10;"
+ "r2 += -8;"
+ "call fwd_to_stale_wr;"
+ /* Call 2: fp-8 write, fp-8 read (processed first in PO) */
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = r1;"
+ "call fwd_to_stale_wr;"
+"1:"
+ "r0 = 0;"
+ "exit;"
+ :: __imm_addr(map),
+ __imm(bpf_map_lookup_elem)
+ : __clobber_all);
+}
+
+static __used __naked void fwd_to_stale_wr(void)
+{
+ asm volatile (
+ "call stale_wr_leaf;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+static __used __naked void stale_wr_leaf(void)
+{
+ asm volatile (
+ "*(u64 *)(r1 + 0) = 42;"
+ "r0 = *(u64 *)(r2 + 0);"
+ "exit;"
+ ::: __clobber_all);
+}
+
+#ifdef CAN_USE_LOAD_ACQ_STORE_REL
+
+SEC("socket")
+__log_level(2)
+__success
+__msg("*(u64 *)(r0 +0) = 42 ; def: fp0-16")
+__naked void load_acquire_dont_clear_dst(void)
+{
+ asm volatile (
+ "r0 = r10;"
+ "r0 += -16;"
+ "*(u64 *)(r0 + 0) = r0;" /* fp[-16] == &fp[-16] */
+ ".8byte %[load_acquire_insn];" /* load_acquire is a special case for BPF_STX, */
+ "r0 = *(u64 *)(r10 - 16);" /* it shouldn't clear tracking info for */
+ "*(u64 *)(r0 + 0) = 42;" /* dst register, r0 in this case. */
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_insn(load_acquire_insn,
+ BPF_ATOMIC_OP(BPF_DW, BPF_LOAD_ACQ, BPF_REG_0, BPF_REG_0, 0))
+ : __clobber_all);
+}
+
+#endif /* CAN_USE_LOAD_ACQ_STORE_REL */
+
+SEC("socket")
+__success
+__naked void imprecise_fill_loses_cross_frame(void)
+{
+ asm volatile (
+ "*(u64 *)(r10 - 8) = 0;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call imprecise_fill_cross_frame;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+static __used __naked void imprecise_fill_cross_frame(void)
+{
+ asm volatile (
+ /* spill &caller_fp-8 to callee's fp-8 */
+ "*(u64 *)(r10 - 8) = r1;"
+ /* imprecise FP pointer in r1 */
+ "r1 = r10;"
+ "r2 = -8;"
+ "r1 += r2;"
+ /* load from imprecise offset. fill_from_stack returns
+ * ARG_IMPRECISE{mask=BIT(1)}, losing frame 0
+ */
+ "r1 = *(u64 *)(r1 + 0);"
+ /* read caller's fp-8 through loaded pointer, should mark fp0-8 live */
+ "r0 = *(u64 *)(r1 + 0);"
+ "r0 = 0;"
+ "exit;"
+ :: __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+/* Test that spill_to_stack with multi-offset dst (sz=8) joins instead
+ * of overwriting. r1 has offsets [-8, -16]. Both slots hold FP-derived
+ * pointers. Writing through r1 should join *val with existing values,
+ * not destroy them.
+ *
+ * fp-8 = &fp-24
+ * fp-16 = &fp-32
+ * r1 = fp-8 or fp-16 (two offsets from branch)
+ * *(u64 *)(r1 + 0) = &fp-24 -- writes to one slot, other untouched
+ * r0 = *(u64 *)(r10 - 16) -- fill from fp-16
+ * r0 = *(u64 *)(r0 + 0) -- deref: should produce use
+ */
+SEC("socket")
+__log_level(2)
+__success
+__msg("20: (79) r0 = *(u64 *)(r10 -16)")
+__msg("21: (79) r0 = *(u64 *)(r0 +0) ; use: fp0-24 fp0-32")
+__naked void spill_join_with_multi_off(void)
+{
+ asm volatile (
+ /* fp-8 = &fp-24, fp-16 = &fp-32 (different pointers) */
+ "*(u64 *)(r10 - 24) = 0;"
+ "*(u64 *)(r10 - 32) = 0;"
+ "r1 = r10;"
+ "r1 += -24;"
+ "*(u64 *)(r10 - 8) = r1;"
+ "r1 = r10;"
+ "r1 += -32;"
+ "*(u64 *)(r10 - 16) = r1;"
+ /* create r1 with two candidate offsets: fp-8 or fp-16 */
+ "call %[bpf_get_prandom_u32];"
+ "if r0 == 0 goto 1f;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "goto 2f;"
+"1:"
+ "r1 = r10;"
+ "r1 += -16;"
+"2:"
+ /* write &fp-24 through multi-offset r1: hits one slot, other untouched */
+ "r2 = r10;"
+ "r2 += -24;"
+ "*(u64 *)(r1 + 0) = r2;"
+ /* read back *fp-8 and *fp-16 */
+ "r0 = *(u64 *)(r10 - 8);"
+ "r0 = *(u64 *)(r0 + 0);"
+ "r0 = *(u64 *)(r10 - 16);"
+ "r0 = *(u64 *)(r0 + 0);"
+ "exit;"
+ :: __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+/* Test that spill_to_stack with imprecise dst (off_cnt == 0, sz=8)
+ * joins instead of overwriting. Use "r2 = -8; r1 += r2" to make
+ * arg tracking lose offset precision while the main verifier keeps
+ * r1 as PTR_TO_STACK with fixed offset. Both slots hold FP-derived
+ * pointers. Writing through r1 should join *val with existing
+ * values, not destroy them.
+ *
+ * fp-8 = &fp-24
+ * fp-16 = &fp-32
+ * r1 = fp-8 (imprecise to arg tracking)
+ * *(u64 *)(r1 + 0) = &fp-24 -- since r1 is imprecise, this adds &fp-24
+ * to the set of possible values for all slots,
+ * hence the values at fp-16 become [fp-24, fp-32]
+ * r0 = *(u64 *)(r10 - 16)
+ * r0 = *(u64 *)(r0 + 0) -- deref: should produce use of fp-24 or fp-32
+ */
+SEC("socket")
+__log_level(2)
+__success
+__msg("15: (79) r0 = *(u64 *)(r0 +0) ; use: fp0-24 fp0-32")
+__naked void spill_join_with_imprecise_off(void)
+{
+ asm volatile (
+ "*(u64 *)(r10 - 24) = 0;"
+ "*(u64 *)(r10 - 32) = 0;"
+ "r1 = r10;"
+ "r1 += -24;"
+ "*(u64 *)(r10 - 8) = r1;"
+ "r1 = r10;"
+ "r1 += -32;"
+ "*(u64 *)(r10 - 16) = r1;"
+ /* r1 = fp-8 but arg tracking sees off_cnt == 0 */
+ "r1 = r10;"
+ "r2 = -8;"
+ "r1 += r2;"
+ /* write through imprecise r1 */
+ "r3 = r10;"
+ "r3 += -24;"
+ "*(u64 *)(r1 + 0) = r3;"
+ /* read back fp-16: at_stack should still track &fp-32 */
+ "r0 = *(u64 *)(r10 - 16);"
+ /* deref: should produce use for fp-32 */
+ "r0 = *(u64 *)(r0 + 0);"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
diff --git a/tools/testing/selftests/bpf/progs/verifier_liveness_exp.c b/tools/testing/selftests/bpf/progs/verifier_liveness_exp.c
new file mode 100644
index 000000000000..b058de623200
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_liveness_exp.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2026 Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+/*
+ * Exponential complexity in analyze_subprog() liveness analysis.
+ *
+ * analyze_subprog() recurses into each call site that passes FP-derived
+ * arguments, creating a unique func_instance per (callsite, depth).
+ * There is no memoization for callees reached with equivalent entry args.
+ * Even if memoization were added, it can be defeated by passing a distinct
+ * FP offset at each call site. arg_track keys on (frame, off[]), so
+ * r1=fp-8, r1=fp-16, ... r1=fp-400 produce 50 unique cache keys per level.
+ *
+ * This test chains 8 subprograms (the MAX_CALL_FRAMES limit). Each
+ * intermediate function calls the next one 50 times, each time with a
+ * different FP-relative offset in r1.
+ *
+ * Without complexity limits in analyze_subprog() the resulting 50^7 ~ 7.8 * 10^11
+ * recursive analyze_subprog() calls will cause a CPU soft lockup or OOM.
+ *
+ * The BPF program itself is ~1200 instructions and perfectly valid.
+ */
+
+char _license[] SEC("license") = "GPL";
+
+/* Call fn with r1 = r10 + off (a unique FP-derived arg per call site) */
+#define C(fn, off) "r1 = r10;" \
+ "r1 += -" #off ";" \
+ "call " #fn ";"
+
+/* 50 calls, each with a distinct FP offset: -8, -16, ... -400 */
+#define CALLS_50(fn) \
+ C(fn, 8) C(fn, 16) C(fn, 24) C(fn, 32) C(fn, 40) \
+ C(fn, 48) C(fn, 56) C(fn, 64) C(fn, 72) C(fn, 80) \
+ C(fn, 88) C(fn, 96) C(fn, 104) C(fn, 112) C(fn, 120) \
+ C(fn, 128) C(fn, 136) C(fn, 144) C(fn, 152) C(fn, 160) \
+ C(fn, 168) C(fn, 176) C(fn, 184) C(fn, 192) C(fn, 200) \
+ C(fn, 208) C(fn, 216) C(fn, 224) C(fn, 232) C(fn, 240) \
+ C(fn, 248) C(fn, 256) C(fn, 264) C(fn, 272) C(fn, 280) \
+ C(fn, 288) C(fn, 296) C(fn, 304) C(fn, 312) C(fn, 320) \
+ C(fn, 328) C(fn, 336) C(fn, 344) C(fn, 352) C(fn, 360) \
+ C(fn, 368) C(fn, 376) C(fn, 384) C(fn, 392) C(fn, 400)
+
+/* Leaf: depth 7, no further calls */
+__naked __noinline __used
+static unsigned long exp_sub7(void)
+{
+ asm volatile (
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/* depth 6 -> calls exp_sub7 x50 with distinct offsets */
+__naked __noinline __used
+static unsigned long exp_sub6(void)
+{
+ asm volatile (
+ CALLS_50(exp_sub7)
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/* depth 5 -> calls exp_sub6 x50 */
+__naked __noinline __used
+static unsigned long exp_sub5(void)
+{
+ asm volatile (
+ CALLS_50(exp_sub6)
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/* depth 4 -> calls exp_sub5 x50 */
+__naked __noinline __used
+static unsigned long exp_sub4(void)
+{
+ asm volatile (
+ CALLS_50(exp_sub5)
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/* depth 3 -> calls exp_sub4 x50 */
+__naked __noinline __used
+static unsigned long exp_sub3(void)
+{
+ asm volatile (
+ CALLS_50(exp_sub4)
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/* depth 2 -> calls exp_sub3 x50 */
+__naked __noinline __used
+static unsigned long exp_sub2(void)
+{
+ asm volatile (
+ CALLS_50(exp_sub3)
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/* depth 1 -> calls exp_sub2 x50 */
+__naked __noinline __used
+static unsigned long exp_sub1(void)
+{
+ asm volatile (
+ CALLS_50(exp_sub2)
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/*
+ * Entry: depth 0. Calls exp_sub1 50 times, each with a distinct
+ * FP offset in r1. Every call site produces a unique arg_track,
+ * defeating any memoization keyed on entry args.
+ */
+SEC("?raw_tp")
+__failure __log_level(2)
+__msg("liveness analysis exceeded complexity limit")
+__naked int liveness_exponential_complexity(void)
+{
+ asm volatile (
+ CALLS_50(exp_sub1)
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
diff --git a/tools/testing/selftests/bpf/progs/verifier_loops1.c b/tools/testing/selftests/bpf/progs/verifier_loops1.c
index fbdde80e7b90..d248ce877f14 100644
--- a/tools/testing/selftests/bpf/progs/verifier_loops1.c
+++ b/tools/testing/selftests/bpf/progs/verifier_loops1.c
@@ -138,8 +138,7 @@ l0_%=: exit; \
SEC("tracepoint")
__description("bounded recursion")
__failure
-/* verifier limitation in detecting max stack depth */
-__msg("the call stack of 8 frames is too deep !")
+__msg("recursive call from")
__naked void bounded_recursion(void)
{
asm volatile (" \
diff --git a/tools/testing/selftests/bpf/progs/verifier_meta_access.c b/tools/testing/selftests/bpf/progs/verifier_meta_access.c
index d81722fb5f19..62235f032ffe 100644
--- a/tools/testing/selftests/bpf/progs/verifier_meta_access.c
+++ b/tools/testing/selftests/bpf/progs/verifier_meta_access.c
@@ -27,7 +27,7 @@ l0_%=: r0 = 0; \
SEC("xdp")
__description("meta access, test2")
-__failure __msg("invalid access to packet, off=-8")
+__failure __msg("R0 min value is negative")
__naked void meta_access_test2(void)
{
asm volatile (" \
diff --git a/tools/testing/selftests/bpf/progs/verifier_private_stack.c b/tools/testing/selftests/bpf/progs/verifier_private_stack.c
index 1ecd34ebde19..646e8ef82051 100644
--- a/tools/testing/selftests/bpf/progs/verifier_private_stack.c
+++ b/tools/testing/selftests/bpf/progs/verifier_private_stack.c
@@ -170,11 +170,11 @@ __jited(" mrs x10, TPIDR_EL{{[0-1]}}")
__jited(" add x27, x27, x10")
__jited(" add x25, x27, {{.*}}")
__jited(" bl 0x{{.*}}")
-__jited(" add x7, x0, #0x0")
+__jited(" mov x7, x0")
__jited(" mov x0, #0x2a")
__jited(" str x0, [x27]")
__jited(" bl 0x{{.*}}")
-__jited(" add x7, x0, #0x0")
+__jited(" mov x7, x0")
__jited(" mov x7, #0x0")
__jited(" ldp x25, x27, [sp], {{.*}}")
__naked void private_stack_callback(void)
@@ -220,7 +220,7 @@ __jited(" mov x0, #0x2a")
__jited(" str x0, [x27]")
__jited(" mov x0, #0x0")
__jited(" bl 0x{{.*}}")
-__jited(" add x7, x0, #0x0")
+__jited(" mov x7, x0")
__jited(" ldp x27, x28, [sp], #0x10")
int private_stack_exception_main_prog(void)
{
@@ -258,7 +258,7 @@ __jited(" add x25, x27, {{.*}}")
__jited(" mov x0, #0x2a")
__jited(" str x0, [x27]")
__jited(" bl 0x{{.*}}")
-__jited(" add x7, x0, #0x0")
+__jited(" mov x7, x0")
__jited(" ldp x27, x28, [sp], #0x10")
int private_stack_exception_sub_prog(void)
{
diff --git a/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c b/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c
index 58c7704d61cd..70ae14d6084f 100644
--- a/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c
+++ b/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c
@@ -264,13 +264,13 @@ void precision_many_frames__bar(void)
*/
SEC("socket")
__success __log_level(2)
-__msg("11: (0f) r2 += r1")
+__msg("12: (0f) r2 += r1")
/* foo frame */
-__msg("frame1: regs=r1 stack= before 10: (bf) r2 = r10")
-__msg("frame1: regs=r1 stack= before 9: (25) if r1 > 0x7 goto pc+0")
-__msg("frame1: regs=r1 stack=-8,-16 before 8: (7b) *(u64 *)(r10 -16) = r1")
-__msg("frame1: regs=r1 stack=-8 before 7: (7b) *(u64 *)(r10 -8) = r1")
-__msg("frame1: regs=r1 stack= before 4: (85) call pc+2")
+__msg("frame1: regs=r1 stack= before 11: (bf) r2 = r10")
+__msg("frame1: regs=r1 stack= before 10: (25) if r1 > 0x7 goto pc+0")
+__msg("frame1: regs=r1 stack=-8,-16 before 9: (7b) *(u64 *)(r10 -16) = r1")
+__msg("frame1: regs=r1 stack=-8 before 8: (7b) *(u64 *)(r10 -8) = r1")
+__msg("frame1: regs=r1 stack= before 4: (85) call pc+3")
/* main frame */
__msg("frame0: regs=r1 stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r1")
__msg("frame0: regs=r1 stack= before 2: (bf) r1 = r0")
@@ -286,6 +286,7 @@ __naked void precision_stack(void)
"r1 = r0;"
"*(u64*)(r10 - 8) = r1;"
"call precision_stack__foo;"
+ "r0 = *(u64*)(r10 - 8);"
"r0 = 0;"
"exit;"
:
@@ -309,6 +310,8 @@ void precision_stack__foo(void)
*/
"r2 = r10;"
"r2 += r1;"
+ "r0 = *(u64*)(r10 - 8);"
+ "r0 = *(u64*)(r10 - 16);"
"exit"
::: __clobber_all);
}
@@ -592,10 +595,10 @@ __naked void check_ids_in_regsafe_2(void)
*/
SEC("socket")
__success __log_level(2)
-__msg("11: (1d) if r3 == r4 goto pc+0")
+__msg("14: (1d) if r3 == r4 goto pc+0")
__msg("frame 0: propagating r3,r4")
-__msg("11: safe")
-__msg("processed 15 insns")
+__msg("14: safe")
+__msg("processed 18 insns")
__flag(BPF_F_TEST_STATE_FREQ)
__naked void no_scalar_id_for_const(void)
{
@@ -605,6 +608,7 @@ __naked void no_scalar_id_for_const(void)
"if r0 > 7 goto l0_%=;"
/* possibly generate same scalar ids for r3 and r4 */
"r1 = 0;"
+ "r1 ^= r1;" /* prevent bpf_prune_dead_branches from folding the branch */
"r1 = r1;"
"r3 = r1;"
"r4 = r1;"
@@ -612,7 +616,9 @@ __naked void no_scalar_id_for_const(void)
"l0_%=:"
/* possibly generate different scalar ids for r3 and r4 */
"r1 = 0;"
+ "r1 ^= r1;"
"r2 = 0;"
+ "r2 ^= r2;"
"r3 = r1;"
"r4 = r2;"
"l1_%=:"
@@ -628,10 +634,10 @@ __naked void no_scalar_id_for_const(void)
/* Same as no_scalar_id_for_const() but for 32-bit values */
SEC("socket")
__success __log_level(2)
-__msg("11: (1e) if w3 == w4 goto pc+0")
+__msg("14: (1e) if w3 == w4 goto pc+0")
__msg("frame 0: propagating r3,r4")
-__msg("11: safe")
-__msg("processed 15 insns")
+__msg("14: safe")
+__msg("processed 18 insns")
__flag(BPF_F_TEST_STATE_FREQ)
__naked void no_scalar_id_for_const32(void)
{
@@ -641,6 +647,7 @@ __naked void no_scalar_id_for_const32(void)
"if r0 > 7 goto l0_%=;"
/* possibly generate same scalar ids for r3 and r4 */
"w1 = 0;"
+ "w1 ^= w1;" /* prevent bpf_prune_dead_branches from folding the branch */
"w1 = w1;"
"w3 = w1;"
"w4 = w1;"
@@ -648,11 +655,13 @@ __naked void no_scalar_id_for_const32(void)
"l0_%=:"
/* possibly generate different scalar ids for r3 and r4 */
"w1 = 0;"
+ "w1 ^= w1;"
"w2 = 0;"
+ "w2 ^= w2;"
"w3 = w1;"
"w4 = w2;"
"l1_%=:"
- /* predictable jump, marks r1 and r2 precise */
+ /* predictable jump, marks r3 and r4 precise */
"if w3 == w4 goto +0;"
"r0 = 0;"
"exit;"
@@ -796,9 +805,9 @@ __success __log_level(2)
/* The exit instruction should be reachable from two states,
* use two matches and "processed .. insns" to ensure this.
*/
-__msg("15: (95) exit")
-__msg("15: (95) exit")
-__msg("processed 20 insns")
+__msg("16: (95) exit")
+__msg("16: (95) exit")
+__msg("processed 22 insns")
__flag(BPF_F_TEST_STATE_FREQ)
__naked void two_old_ids_one_cur_id(void)
{
@@ -829,6 +838,11 @@ __naked void two_old_ids_one_cur_id(void)
"r2 = r10;"
"r2 += r6;"
"r2 += r7;"
+ /*
+ * keep r8 and r9 live, otherwise r6->id and r7->id
+ * will become singular and reset to zero before if r6 > r7
+ */
+ "r9 += r8;"
"exit;"
:
: __imm(bpf_ktime_get_ns)
diff --git a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c
index 7a13dbd794b2..6bc721accbae 100644
--- a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c
+++ b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c
@@ -650,13 +650,13 @@ __msg("mark_precise: frame0: last_idx 9 first_idx 7 subseq_idx -1")
__msg("mark_precise: frame0: regs=r2 stack= before 8: (79) r2 = *(u64 *)(r10 -8)")
__msg("mark_precise: frame0: regs= stack=-8 before 7: (bf) r1 = r6")
/* note, fp-8 is precise, fp-16 is not yet precise, we'll get there */
-__msg("mark_precise: frame0: parent state regs= stack=-8: R0=1 R1=ctx() R6=map_value(map=.data.two_byte_,ks=4,vs=2) R10=fp0 fp-8=P1 fp-16=1")
+__msg("mark_precise: frame0: parent state regs= stack=-8: R6=map_value(map=.data.two_byte_,ks=4,vs=2) R10=fp0 fp-8=P1 fp-16=1")
__msg("mark_precise: frame0: last_idx 6 first_idx 3 subseq_idx 7")
__msg("mark_precise: frame0: regs= stack=-8 before 6: (05) goto pc+0")
__msg("mark_precise: frame0: regs= stack=-8 before 5: (7b) *(u64 *)(r10 -16) = r0")
__msg("mark_precise: frame0: regs= stack=-8 before 4: (b7) r0 = 1")
__msg("mark_precise: frame0: regs= stack=-8 before 3: (7a) *(u64 *)(r10 -8) = 1")
-__msg("10: R1=map_value(map=.data.two_byte_,ks=4,vs=2,off=1) R2=1")
+__msg("10: R1=map_value(map=.data.two_byte_,ks=4,vs=2,imm=1) R2=1")
/* validate load from fp-16, which was initialized using BPF_STX_MEM */
__msg("12: (79) r2 = *(u64 *)(r10 -16) ; R2=1 R10=fp0 fp-16=1")
__msg("13: (0f) r1 += r2")
@@ -668,12 +668,12 @@ __msg("mark_precise: frame0: regs= stack=-16 before 9: (0f) r1 += r2")
__msg("mark_precise: frame0: regs= stack=-16 before 8: (79) r2 = *(u64 *)(r10 -8)")
__msg("mark_precise: frame0: regs= stack=-16 before 7: (bf) r1 = r6")
/* now both fp-8 and fp-16 are precise, very good */
-__msg("mark_precise: frame0: parent state regs= stack=-16: R0=1 R1=ctx() R6=map_value(map=.data.two_byte_,ks=4,vs=2) R10=fp0 fp-8=P1 fp-16=P1")
+__msg("mark_precise: frame0: parent state regs= stack=-16: R6=map_value(map=.data.two_byte_,ks=4,vs=2) R10=fp0 fp-8=P1 fp-16=P1")
__msg("mark_precise: frame0: last_idx 6 first_idx 3 subseq_idx 7")
__msg("mark_precise: frame0: regs= stack=-16 before 6: (05) goto pc+0")
__msg("mark_precise: frame0: regs= stack=-16 before 5: (7b) *(u64 *)(r10 -16) = r0")
__msg("mark_precise: frame0: regs=r0 stack= before 4: (b7) r0 = 1")
-__msg("14: R1=map_value(map=.data.two_byte_,ks=4,vs=2,off=1) R2=1")
+__msg("14: R1=map_value(map=.data.two_byte_,ks=4,vs=2,imm=1) R2=1")
__naked void stack_load_preserves_const_precision(void)
{
asm volatile (
@@ -726,13 +726,13 @@ __msg("9: (0f) r1 += r2")
__msg("mark_precise: frame0: last_idx 9 first_idx 7 subseq_idx -1")
__msg("mark_precise: frame0: regs=r2 stack= before 8: (61) r2 = *(u32 *)(r10 -8)")
__msg("mark_precise: frame0: regs= stack=-8 before 7: (bf) r1 = r6")
-__msg("mark_precise: frame0: parent state regs= stack=-8: R0=1 R1=ctx() R6=map_value(map=.data.two_byte_,ks=4,vs=2) R10=fp0 fp-8=????P1 fp-16=????1")
+__msg("mark_precise: frame0: parent state regs= stack=-8: R6=map_value(map=.data.two_byte_,ks=4,vs=2) R10=fp0 fp-8=????P1 fp-16=????1")
__msg("mark_precise: frame0: last_idx 6 first_idx 3 subseq_idx 7")
__msg("mark_precise: frame0: regs= stack=-8 before 6: (05) goto pc+0")
__msg("mark_precise: frame0: regs= stack=-8 before 5: (63) *(u32 *)(r10 -16) = r0")
__msg("mark_precise: frame0: regs= stack=-8 before 4: (b7) r0 = 1")
__msg("mark_precise: frame0: regs= stack=-8 before 3: (62) *(u32 *)(r10 -8) = 1")
-__msg("10: R1=map_value(map=.data.two_byte_,ks=4,vs=2,off=1) R2=1")
+__msg("10: R1=map_value(map=.data.two_byte_,ks=4,vs=2,imm=1) R2=1")
/* validate load from fp-16, which was initialized using BPF_STX_MEM */
__msg("12: (61) r2 = *(u32 *)(r10 -16) ; R2=1 R10=fp0 fp-16=????1")
__msg("13: (0f) r1 += r2")
@@ -743,12 +743,12 @@ __msg("mark_precise: frame0: regs= stack=-16 before 10: (73) *(u8 *)(r1 +0) = r2
__msg("mark_precise: frame0: regs= stack=-16 before 9: (0f) r1 += r2")
__msg("mark_precise: frame0: regs= stack=-16 before 8: (61) r2 = *(u32 *)(r10 -8)")
__msg("mark_precise: frame0: regs= stack=-16 before 7: (bf) r1 = r6")
-__msg("mark_precise: frame0: parent state regs= stack=-16: R0=1 R1=ctx() R6=map_value(map=.data.two_byte_,ks=4,vs=2) R10=fp0 fp-8=????P1 fp-16=????P1")
+__msg("mark_precise: frame0: parent state regs= stack=-16: R6=map_value(map=.data.two_byte_,ks=4,vs=2) R10=fp0 fp-8=????P1 fp-16=????P1")
__msg("mark_precise: frame0: last_idx 6 first_idx 3 subseq_idx 7")
__msg("mark_precise: frame0: regs= stack=-16 before 6: (05) goto pc+0")
__msg("mark_precise: frame0: regs= stack=-16 before 5: (63) *(u32 *)(r10 -16) = r0")
__msg("mark_precise: frame0: regs=r0 stack= before 4: (b7) r0 = 1")
-__msg("14: R1=map_value(map=.data.two_byte_,ks=4,vs=2,off=1) R2=1")
+__msg("14: R1=map_value(map=.data.two_byte_,ks=4,vs=2,imm=1) R2=1")
__naked void stack_load_preserves_const_precision_subreg(void)
{
asm volatile (
@@ -780,6 +780,8 @@ __naked void stack_load_preserves_const_precision_subreg(void)
"r1 += r2;"
"*(u8 *)(r1 + 0) = r2;" /* this should be fine */
+ "r2 = *(u64 *)(r10 -8);" /* keep slots alive */
+ "r2 = *(u64 *)(r10 -16);"
"r0 = 0;"
"exit;"
:
@@ -1279,4 +1281,82 @@ __naked void stack_noperfmon_spill_32bit_onto_64bit_slot(void)
: __clobber_all);
}
+/*
+ * stacksafe(): check if 32-bit scalar spill in old state is considered
+ * equivalent to STACK_MISC in cur state.
+ * 32-bit scalar spill creates slot[0-3] = STACK_MISC, slot[4-7] = STACK_SPILL.
+ * Without 32-bit spill support in stacksafe(), the STACK_SPILL vs STACK_MISC
+ * mismatch at slot[4] causes pruning to fail.
+ */
+SEC("socket")
+__success __log_level(2)
+__msg("8: (79) r1 = *(u64 *)(r10 -8)")
+__msg("8: safe")
+__msg("processed 11 insns")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void old_imprecise_scalar32_vs_cur_stack_misc(void)
+{
+ asm volatile(
+ /* get a random value for branching */
+ "call %[bpf_ktime_get_ns];"
+ "if r0 == 0 goto 1f;"
+ /* conjure 32-bit scalar spill at fp-8 */
+ "r0 = 42;"
+ "*(u32*)(r10 - 8) = r0;"
+ "goto 2f;"
+"1:"
+ /* conjure STACK_MISC at fp-8 */
+ "call %[bpf_ktime_get_ns];"
+ "*(u16*)(r10 - 8) = r0;"
+ "*(u16*)(r10 - 6) = r0;"
+"2:"
+ /* read fp-8, should be considered safe on second visit */
+ "r1 = *(u64*)(r10 - 8);"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__success
+__naked void var_off_write_over_scalar_spill(void)
+{
+ asm volatile (
+ /* Get an unknown value bounded to {0, 4} */
+ "call %[bpf_ktime_get_ns];"
+ "r6 = r0;"
+ "r6 &= 4;"
+
+ /* Spill a scalar to fp-16 */
+ "r7 = 0xdeadbeef00000000 ll;"
+ "*(u64 *)(r10 - 16) = r7;"
+
+ /*
+ * Variable-offset 4-byte write covering [fp-12, fp-4).
+ * This touches stype[3..0] of the spill slot at fp-16 but
+ * leaves stype[7..4] as STACK_SPILL. check_stack_write_var_off()
+ * must scrub the entire slot when setting spilled_ptr to NOT_INIT,
+ * otherwise a subsequent sub-register fill sees a non-scalar
+ * spilled_ptr and is rejected.
+ */
+ "r8 = r10;"
+ "r8 += r6;"
+ "r8 += -12;"
+ "r9 = 0;"
+ "*(u32 *)(r8 + 0) = r9;"
+
+ /*
+ * 4-byte read from fp-16. Without the fix this fails with
+ * "invalid size of register fill" because is_spilled_reg()
+ * sees STACK_SPILL while spilled_ptr.type == NOT_INIT.
+ */
+ "r0 = *(u32 *)(r10 - 16);"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_stack_ptr.c b/tools/testing/selftests/bpf/progs/verifier_stack_ptr.c
index 24aabc6083fd..8e8cf8232255 100644
--- a/tools/testing/selftests/bpf/progs/verifier_stack_ptr.c
+++ b/tools/testing/selftests/bpf/progs/verifier_stack_ptr.c
@@ -37,7 +37,7 @@ __naked void ptr_to_stack_store_load(void)
SEC("socket")
__description("PTR_TO_STACK store/load - bad alignment on off")
-__failure __msg("misaligned stack access off 0+-8+2 size 8")
+__failure __msg("misaligned stack access off -8+2 size 8")
__failure_unpriv
__naked void load_bad_alignment_on_off(void)
{
@@ -53,7 +53,7 @@ __naked void load_bad_alignment_on_off(void)
SEC("socket")
__description("PTR_TO_STACK store/load - bad alignment on reg")
-__failure __msg("misaligned stack access off 0+-10+8 size 8")
+__failure __msg("misaligned stack access off -10+8 size 8")
__failure_unpriv
__naked void load_bad_alignment_on_reg(void)
{
diff --git a/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c
index 61886ed554de..d21d32f6a676 100644
--- a/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c
+++ b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c
@@ -282,7 +282,7 @@ __msg("mark_precise: frame0: regs=r0,r6 stack= before 10: (bf) r6 = r0")
__msg("mark_precise: frame0: regs=r0 stack= before 9: (85) call bpf_loop")
/* State entering callback body popped from states stack */
__msg("from 9 to 17: frame1:")
-__msg("17: frame1: R1=scalar() R2=0 R10=fp0 cb")
+__msg("17: frame1: R10=fp0 cb")
__msg("17: (b7) r0 = 0")
__msg("18: (95) exit")
__msg("returning from callee:")
@@ -411,7 +411,7 @@ __msg("mark_precise: frame0: regs=r6 stack= before 5: (b7) r1 = 1")
__msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3")
/* State entering callback body popped from states stack */
__msg("from 9 to 15: frame1:")
-__msg("15: frame1: R1=scalar() R2=0 R10=fp0 cb")
+__msg("15: frame1: R10=fp0 cb")
__msg("15: (b7) r0 = 0")
__msg("16: (95) exit")
__msg("returning from callee:")
@@ -567,7 +567,7 @@ __msg("mark_precise: frame0: regs= stack=-8 before 5: (7b) *(u64 *)(r10 -8) = r6
__msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3")
/* State entering callback body popped from states stack */
__msg("from 10 to 17: frame1:")
-__msg("17: frame1: R1=scalar() R2=0 R10=fp0 cb")
+__msg("17: frame1: R10=fp0 cb")
__msg("17: (b7) r0 = 0")
__msg("18: (95) exit")
__msg("returning from callee:")
@@ -681,7 +681,7 @@ __msg("mark_precise: frame0: last_idx 10 first_idx 7 subseq_idx -1")
__msg("mark_precise: frame0: regs=r7 stack= before 9: (bf) r1 = r8")
__msg("mark_precise: frame0: regs=r7 stack= before 8: (27) r7 *= 4")
__msg("mark_precise: frame0: regs=r7 stack= before 7: (79) r7 = *(u64 *)(r10 -8)")
-__msg("mark_precise: frame0: parent state regs= stack=-8: R0=2 R6=1 R8=map_value(map=.data.vals,ks=4,vs=16) R10=fp0 fp-8=P1")
+__msg("mark_precise: frame0: parent state regs= stack=-8: R8=map_value(map=.data.vals,ks=4,vs=16) R10=fp0 fp-8=P1")
__msg("mark_precise: frame0: last_idx 18 first_idx 0 subseq_idx 7")
__msg("mark_precise: frame0: regs= stack=-8 before 18: (95) exit")
__msg("mark_precise: frame1: regs= stack= before 17: (0f) r0 += r2")
diff --git a/tools/testing/selftests/bpf/progs/verifier_subprog_topo.c b/tools/testing/selftests/bpf/progs/verifier_subprog_topo.c
new file mode 100644
index 000000000000..e2b9d14bbc3d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_subprog_topo.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2026 Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+/* linear chain main -> A -> B */
+__naked __noinline __used
+static unsigned long linear_b(void)
+{
+ asm volatile (
+ "r0 = 42;"
+ "exit;"
+ );
+}
+
+__naked __noinline __used
+static unsigned long linear_a(void)
+{
+ asm volatile (
+ "call linear_b;"
+ "exit;"
+ );
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("topo_order[0] = linear_b")
+__msg("topo_order[1] = linear_a")
+__msg("topo_order[2] = topo_linear")
+__naked int topo_linear(void)
+{
+ asm volatile (
+ "call linear_a;"
+ "exit;"
+ );
+}
+
+/* diamond main -> A, main -> B, A -> C, B -> C */
+__naked __noinline __used
+static unsigned long diamond_c(void)
+{
+ asm volatile (
+ "r0 = 1;"
+ "exit;"
+ );
+}
+
+__naked __noinline __used
+static unsigned long diamond_b(void)
+{
+ asm volatile (
+ "call diamond_c;"
+ "exit;"
+ );
+}
+
+__naked __noinline __used
+static unsigned long diamond_a(void)
+{
+ asm volatile (
+ "call diamond_c;"
+ "exit;"
+ );
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("topo_order[0] = diamond_c")
+__msg("topo_order[3] = topo_diamond")
+__naked int topo_diamond(void)
+{
+ asm volatile (
+ "call diamond_a;"
+ "call diamond_b;"
+ "exit;"
+ );
+}
+
+/* main -> global_a (global) -> static_leaf (static, leaf) */
+__naked __noinline __used
+static unsigned long static_leaf(void)
+{
+ asm volatile (
+ "r0 = 7;"
+ "exit;"
+ );
+}
+
+__noinline __used
+int global_a(int x)
+{
+ return static_leaf();
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("topo_order[0] = static_leaf")
+__msg("topo_order[1] = global_a")
+__msg("topo_order[2] = topo_mixed")
+__naked int topo_mixed(void)
+{
+ asm volatile (
+ "r1 = 0;"
+ "call global_a;"
+ "exit;"
+ );
+}
+
+/*
+ * shared static callee from global and main:
+ * main -> shared_leaf (static)
+ * main -> global_b (global) -> shared_leaf (static)
+ */
+__naked __noinline __used
+static unsigned long shared_leaf(void)
+{
+ asm volatile (
+ "r0 = 99;"
+ "exit;"
+ );
+}
+
+__noinline __used
+int global_b(int x)
+{
+ return shared_leaf();
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("topo_order[0] = shared_leaf")
+__msg("topo_order[1] = global_b")
+__msg("topo_order[2] = topo_shared")
+__naked int topo_shared(void)
+{
+ asm volatile (
+ "call shared_leaf;"
+ "r1 = 0;"
+ "call global_b;"
+ "exit;"
+ );
+}
+
+/* duplicate calls to the same subprog */
+__naked __noinline __used
+static unsigned long dup_leaf(void)
+{
+ asm volatile (
+ "r0 = 0;"
+ "exit;"
+ );
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("topo_order[0] = dup_leaf")
+__msg("topo_order[1] = topo_dup_calls")
+__naked int topo_dup_calls(void)
+{
+ asm volatile (
+ "call dup_leaf;"
+ "call dup_leaf;"
+ "exit;"
+ );
+}
+
+/* main calls bpf_loop() with loop_cb as the callback */
+static int loop_cb(int idx, void *ctx)
+{
+ return 0;
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("topo_order[0] = loop_cb")
+__msg("topo_order[1] = topo_loop_cb")
+int topo_loop_cb(void)
+{
+ bpf_loop(1, loop_cb, NULL, 0);
+ return 0;
+}
+
+/*
+ * bpf_loop callback calling another subprog
+ * main -> bpf_loop(callback=loop_cb2) -> loop_cb2 -> loop_cb2_leaf
+ */
+__naked __noinline __used
+static unsigned long loop_cb2_leaf(void)
+{
+ asm volatile (
+ "r0 = 0;"
+ "exit;"
+ );
+}
+
+static int loop_cb2(int idx, void *ctx)
+{
+ return loop_cb2_leaf();
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("topo_order[0] = loop_cb2_leaf")
+__msg("topo_order[1] = loop_cb2")
+__msg("topo_order[2] = topo_loop_cb_chain")
+int topo_loop_cb_chain(void)
+{
+ bpf_loop(1, loop_cb2, NULL, 0);
+ return 0;
+}
+
+/* no calls (single subprog) */
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("topo_order[0] = topo_no_calls")
+__naked int topo_no_calls(void)
+{
+ asm volatile (
+ "r0 = 0;"
+ "exit;"
+ );
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_subreg.c b/tools/testing/selftests/bpf/progs/verifier_subreg.c
index be328100ba53..31832a306f91 100644
--- a/tools/testing/selftests/bpf/progs/verifier_subreg.c
+++ b/tools/testing/selftests/bpf/progs/verifier_subreg.c
@@ -823,4 +823,169 @@ __naked void arsh_63_or(void)
: __clobber_all);
}
+SEC("socket")
+__success __retval(42)
+__naked void arsh32_imm1_value(void)
+{
+ asm volatile (" \
+ r0 = 42; \
+ r1 = -2147483648; \
+ w1 s>>= 1; /* r1 = 0xC0000000 */ \
+ r2 = 0xC0000000 ll; \
+ if r1 == r2 goto l0_%=; \
+ r0 /= 0; /* unreachable */ \
+l0_%=: exit; \
+" :
+ :
+ : __clobber_all);
+}
+
+SEC("socket")
+__success __retval(1)
+__naked void lsh32_reg0_zero_extend_check(void)
+{
+ asm volatile (" \
+ r6 = 1; \
+ call %[bpf_get_prandom_u32]; \
+ r1 = 0x1000000000 ll; \
+ r0 |= r1; \
+ w1 = 0; \
+ w0 <<= w1; /* reg shift by 0 */ \
+ r0 >>= 32; /* must be 0 */ \
+ if r0 == 0 goto l0_%=; \
+ r6 /= 0; /* unreachable */ \
+l0_%=: r0 = r6; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__success __retval(1)
+__naked void rsh32_reg0_zero_extend_check(void)
+{
+ asm volatile (" \
+ r6 = 1; \
+ call %[bpf_get_prandom_u32]; \
+ r1 = 0x1000000000 ll; \
+ r0 |= r1; \
+ w1 = 0; \
+ w0 >>= w1; /* reg rsh by 0 */ \
+ r0 >>= 32; /* must be 0 */ \
+ if r0 == 0 goto l0_%=; \
+ r6 /= 0; /* unreachable */ \
+l0_%=: r0 = r6; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__success __retval(1)
+__naked void arsh32_reg0_zero_extend_check(void)
+{
+ asm volatile (" \
+ r6 = 1; \
+ call %[bpf_get_prandom_u32]; \
+ r1 = 0x1000000000 ll; \
+ r0 |= r1; \
+ w1 = 0; \
+ w0 s>>= w1; /* reg arsh by 0 */ \
+ r0 >>= 32; /* must be 0 */ \
+ if r0 == 0 goto l0_%=; \
+ r6 /= 0; /* unreachable */ \
+l0_%=: r0 = r6; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__success __retval(42)
+__naked void lsh32_imm31_value(void)
+{
+ asm volatile (" \
+ r0 = 42; \
+ r1 = 1; \
+ w1 <<= 31; /* r1 = 0x80000000 */ \
+ r2 = 0x80000000 ll; \
+ if r1 == r2 goto l0_%=; \
+ r0 /= 0; /* unreachable */ \
+l0_%=: exit; \
+" :
+ :
+ : __clobber_all);
+}
+
+SEC("socket")
+__success __retval(42)
+__naked void rsh32_imm31_value(void)
+{
+ asm volatile (" \
+ r0 = 42; \
+ r1 = -2147483648; /* 0x80000000 */ \
+ w1 >>= 31; /* r1 = 1 */ \
+ if r1 == 1 goto l0_%=; \
+ r0 /= 0; /* unreachable */ \
+l0_%=: exit; \
+" :
+ :
+ : __clobber_all);
+}
+
+SEC("socket")
+__success __retval(42)
+__naked void arsh32_imm31_value(void)
+{
+ asm volatile (" \
+ r0 = 42; \
+ r1 = -2147483648; /* 0x80000000 */ \
+ w1 s>>= 31; /* r1 = 0xFFFFFFFF */ \
+ r2 = 0xFFFFFFFF ll; \
+ if r1 == r2 goto l0_%=; \
+ r0 /= 0; /* unreachable */ \
+l0_%=: exit; \
+" :
+ :
+ : __clobber_all);
+}
+
+SEC("socket")
+__success __retval(1)
+__naked void lsh32_unknown_precise_bounds(void)
+{
+ asm volatile (" \
+ r6 = 1; \
+ call %[bpf_get_prandom_u32]; \
+ w0 &= 3; /* u32: [0, 3] */ \
+ w0 <<= 1; /* u32: [0, 6] */ \
+ if w0 < 7 goto l0_%=; \
+ r6 /= 0; /* unreachable */ \
+l0_%=: r0 = r6; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__success __retval(1)
+__naked void rsh32_unknown_bounds(void)
+{
+ asm volatile (" \
+ r6 = 1; \
+ call %[bpf_get_prandom_u32]; \
+ w0 >>= 28; /* u32: [0, 15] */ \
+ if w0 < 16 goto l0_%=; \
+ r6 /= 0; /* unreachable */ \
+l0_%=: r0 = r6; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_unpriv.c b/tools/testing/selftests/bpf/progs/verifier_unpriv.c
index 8ee1243e62a8..c16f8382cf17 100644
--- a/tools/testing/selftests/bpf/progs/verifier_unpriv.c
+++ b/tools/testing/selftests/bpf/progs/verifier_unpriv.c
@@ -584,7 +584,7 @@ __naked void alu32_mov_u32_const(void)
{
asm volatile (" \
w7 = 0; \
- w7 &= 1; \
+ w7 ^= w7; \
w0 = w7; \
if r0 == 0 goto l0_%=; \
r0 = *(u64*)(r7 + 0); \
@@ -894,7 +894,9 @@ __naked void unpriv_spectre_v1_and_v4_simple(void)
{
asm volatile (" \
r8 = 0; \
+ r8 ^= r8; \
r9 = 0; \
+ r9 ^= r9; \
r0 = r10; \
r1 = 0; \
r2 = r10; \
@@ -932,7 +934,9 @@ __naked void unpriv_ldimm64_spectre_v1_and_v4_simple(void)
{
asm volatile (" \
r8 = 0; \
+ r8 ^= r8; \
r9 = 0; \
+ r9 ^= r9; \
r0 = r10; \
r1 = 0; \
r2 = r10; \
diff --git a/tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c b/tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c
index af7938ce56cb..b3b701b44550 100644
--- a/tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c
+++ b/tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c
@@ -346,7 +346,7 @@ l2_%=: r0 = 1; \
SEC("socket")
__description("map access: value_ptr -= known scalar from different maps")
__success __failure_unpriv
-__msg_unpriv("R0 min value is outside of the allowed memory range")
+__msg_unpriv("R0 min value is negative")
__retval(1)
__naked void known_scalar_from_different_maps(void)
{
@@ -683,9 +683,7 @@ l0_%=: r0 = 1; \
SEC("socket")
__description("map access: value_ptr -= known scalar, lower oob arith, test 1")
-__failure __msg("R0 min value is outside of the allowed memory range")
-__failure_unpriv
-__msg_unpriv("R0 pointer arithmetic of map value goes out of range")
+__failure __msg("R0 min value is negative")
__naked void lower_oob_arith_test_1(void)
{
asm volatile (" \
@@ -840,7 +838,7 @@ l0_%=: r0 = 1; \
SEC("socket")
__description("map access: value_ptr += known scalar, 3")
-__failure __msg("invalid access to map value")
+__failure __msg("R0 min value is negative")
__failure_unpriv
__naked void value_ptr_known_scalar_3(void)
{
@@ -1207,7 +1205,7 @@ l0_%=: r0 = 1; \
SEC("socket")
__description("map access: value_ptr -= known scalar")
-__failure __msg("R0 min value is outside of the allowed memory range")
+__failure __msg("R0 min value is negative")
__failure_unpriv
__naked void access_value_ptr_known_scalar(void)
{
diff --git a/tools/testing/selftests/bpf/progs/verifier_xdp_direct_packet_access.c b/tools/testing/selftests/bpf/progs/verifier_xdp_direct_packet_access.c
index df2dfd1b15d1..0b86d95a4133 100644
--- a/tools/testing/selftests/bpf/progs/verifier_xdp_direct_packet_access.c
+++ b/tools/testing/selftests/bpf/progs/verifier_xdp_direct_packet_access.c
@@ -69,7 +69,7 @@ l0_%=: r0 = 0; \
SEC("xdp")
__description("XDP pkt read, pkt_data' > pkt_end, bad access 1")
-__failure __msg("R1 offset is outside of the packet")
+__failure __msg("R1 {{min|max}} value is outside of the allowed memory range")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void pkt_end_bad_access_1_1(void)
{
@@ -131,7 +131,7 @@ l0_%=: r0 = 0; \
SEC("xdp")
__description("XDP pkt read, pkt_data' > pkt_end, corner case -1, bad access")
-__failure __msg("R1 offset is outside of the packet")
+__failure __msg("R1 {{min|max}} value is outside of the allowed memory range")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_bad_access_1(void)
{
@@ -173,7 +173,7 @@ l1_%=: r0 = 0; \
SEC("xdp")
__description("XDP pkt read, pkt_end > pkt_data', corner case -1, bad access")
-__failure __msg("R1 offset is outside of the packet")
+__failure __msg("R1 {{min|max}} value is outside of the allowed memory range")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_bad_access_2(void)
{
@@ -279,7 +279,7 @@ l1_%=: r0 = 0; \
SEC("xdp")
__description("XDP pkt read, pkt_data' < pkt_end, corner case -1, bad access")
-__failure __msg("R1 offset is outside of the packet")
+__failure __msg("R1 {{min|max}} value is outside of the allowed memory range")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_bad_access_3(void)
{
@@ -384,7 +384,7 @@ l0_%=: r0 = 0; \
SEC("xdp")
__description("XDP pkt read, pkt_end < pkt_data', bad access 1")
-__failure __msg("R1 offset is outside of the packet")
+__failure __msg("R1 {{min|max}} value is outside of the allowed memory range")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void pkt_data_bad_access_1_1(void)
{
@@ -446,7 +446,7 @@ l0_%=: r0 = 0; \
SEC("xdp")
__description("XDP pkt read, pkt_end < pkt_data', corner case -1, bad access")
-__failure __msg("R1 offset is outside of the packet")
+__failure __msg("R1 {{min|max}} value is outside of the allowed memory range")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_bad_access_4(void)
{
@@ -487,7 +487,7 @@ l0_%=: r0 = 0; \
SEC("xdp")
__description("XDP pkt read, pkt_data' >= pkt_end, corner case -1, bad access")
-__failure __msg("R1 offset is outside of the packet")
+__failure __msg("R1 {{min|max}} value is outside of the allowed memory range")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_bad_access_5(void)
{
@@ -590,7 +590,7 @@ l1_%=: r0 = 0; \
SEC("xdp")
__description("XDP pkt read, pkt_end >= pkt_data', bad access 1")
-__failure __msg("R1 offset is outside of the packet")
+__failure __msg("R1 {{min|max}} value is outside of the allowed memory range")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void pkt_data_bad_access_1_2(void)
{
@@ -654,7 +654,7 @@ l1_%=: r0 = 0; \
SEC("xdp")
__description("XDP pkt read, pkt_end >= pkt_data', corner case -1, bad access")
-__failure __msg("R1 offset is outside of the packet")
+__failure __msg("R1 {{min|max}} value is outside of the allowed memory range")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_bad_access_6(void)
{
@@ -697,7 +697,7 @@ l1_%=: r0 = 0; \
SEC("xdp")
__description("XDP pkt read, pkt_data' <= pkt_end, bad access 1")
-__failure __msg("R1 offset is outside of the packet")
+__failure __msg("R1 {{min|max}} value is outside of the allowed memory range")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void pkt_end_bad_access_1_2(void)
{
@@ -761,7 +761,7 @@ l1_%=: r0 = 0; \
SEC("xdp")
__description("XDP pkt read, pkt_data' <= pkt_end, corner case -1, bad access")
-__failure __msg("R1 offset is outside of the packet")
+__failure __msg("R1 {{min|max}} value is outside of the allowed memory range")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_bad_access_7(void)
{
@@ -803,7 +803,7 @@ l0_%=: r0 = 0; \
SEC("xdp")
__description("XDP pkt read, pkt_end <= pkt_data', corner case -1, bad access")
-__failure __msg("R1 offset is outside of the packet")
+__failure __msg("R1 {{min|max}} value is outside of the allowed memory range")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_bad_access_8(void)
{
@@ -905,7 +905,7 @@ l0_%=: r0 = 0; \
SEC("xdp")
__description("XDP pkt read, pkt_meta' > pkt_data, bad access 1")
-__failure __msg("R1 offset is outside of the packet")
+__failure __msg("R1 {{min|max}} value is outside of the allowed memory range")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void pkt_data_bad_access_1_3(void)
{
@@ -926,7 +926,7 @@ l0_%=: r0 = 0; \
SEC("xdp")
__description("XDP pkt read, pkt_meta' > pkt_data, bad access 2")
-__failure __msg("R1 offset is outside of the packet")
+__failure __msg("R1 {{min|max}} value is outside of the allowed memory range")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void pkt_data_bad_access_2_5(void)
{
@@ -967,7 +967,7 @@ l0_%=: r0 = 0; \
SEC("xdp")
__description("XDP pkt read, pkt_meta' > pkt_data, corner case -1, bad access")
-__failure __msg("R1 offset is outside of the packet")
+__failure __msg("R1 {{min|max}} value is outside of the allowed memory range")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_bad_access_9(void)
{
@@ -1009,7 +1009,7 @@ l1_%=: r0 = 0; \
SEC("xdp")
__description("XDP pkt read, pkt_data > pkt_meta', corner case -1, bad access")
-__failure __msg("R1 offset is outside of the packet")
+__failure __msg("R1 {{min|max}} value is outside of the allowed memory range")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_bad_access_10(void)
{
@@ -1031,7 +1031,7 @@ l1_%=: r0 = 0; \
SEC("xdp")
__description("XDP pkt read, pkt_data > pkt_meta', bad access 2")
-__failure __msg("R1 offset is outside of the packet")
+__failure __msg("R1 {{min|max}} value is outside of the allowed memory range")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void pkt_meta_bad_access_2_1(void)
{
@@ -1115,7 +1115,7 @@ l1_%=: r0 = 0; \
SEC("xdp")
__description("XDP pkt read, pkt_meta' < pkt_data, corner case -1, bad access")
-__failure __msg("R1 offset is outside of the packet")
+__failure __msg("R1 {{min|max}} value is outside of the allowed memory range")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_bad_access_11(void)
{
@@ -1137,7 +1137,7 @@ l1_%=: r0 = 0; \
SEC("xdp")
__description("XDP pkt read, pkt_meta' < pkt_data, bad access 2")
-__failure __msg("R1 offset is outside of the packet")
+__failure __msg("R1 {{min|max}} value is outside of the allowed memory range")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void pkt_data_bad_access_2_6(void)
{
@@ -1220,7 +1220,7 @@ l0_%=: r0 = 0; \
SEC("xdp")
__description("XDP pkt read, pkt_data < pkt_meta', bad access 1")
-__failure __msg("R1 offset is outside of the packet")
+__failure __msg("R1 {{min|max}} value is outside of the allowed memory range")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void pkt_meta_bad_access_1_1(void)
{
@@ -1241,7 +1241,7 @@ l0_%=: r0 = 0; \
SEC("xdp")
__description("XDP pkt read, pkt_data < pkt_meta', bad access 2")
-__failure __msg("R1 offset is outside of the packet")
+__failure __msg("R1 {{min|max}} value is outside of the allowed memory range")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void pkt_meta_bad_access_2_2(void)
{
@@ -1282,7 +1282,7 @@ l0_%=: r0 = 0; \
SEC("xdp")
__description("XDP pkt read, pkt_data < pkt_meta', corner case -1, bad access")
-__failure __msg("R1 offset is outside of the packet")
+__failure __msg("R1 {{min|max}} value is outside of the allowed memory range")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_bad_access_12(void)
{
@@ -1323,7 +1323,7 @@ l0_%=: r0 = 0; \
SEC("xdp")
__description("XDP pkt read, pkt_meta' >= pkt_data, corner case -1, bad access")
-__failure __msg("R1 offset is outside of the packet")
+__failure __msg("R1 {{min|max}} value is outside of the allowed memory range")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_bad_access_13(void)
{
@@ -1344,7 +1344,7 @@ l0_%=: r0 = 0; \
SEC("xdp")
__description("XDP pkt read, pkt_meta' >= pkt_data, bad access 2")
-__failure __msg("R1 offset is outside of the packet")
+__failure __msg("R1 {{min|max}} value is outside of the allowed memory range")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void pkt_data_bad_access_2_7(void)
{
@@ -1426,7 +1426,7 @@ l1_%=: r0 = 0; \
SEC("xdp")
__description("XDP pkt read, pkt_data >= pkt_meta', bad access 1")
-__failure __msg("R1 offset is outside of the packet")
+__failure __msg("R1 {{min|max}} value is outside of the allowed memory range")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void pkt_meta_bad_access_1_2(void)
{
@@ -1448,7 +1448,7 @@ l1_%=: r0 = 0; \
SEC("xdp")
__description("XDP pkt read, pkt_data >= pkt_meta', bad access 2")
-__failure __msg("R1 offset is outside of the packet")
+__failure __msg("R1 {{min|max}} value is outside of the allowed memory range")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void pkt_meta_bad_access_2_3(void)
{
@@ -1490,7 +1490,7 @@ l1_%=: r0 = 0; \
SEC("xdp")
__description("XDP pkt read, pkt_data >= pkt_meta', corner case -1, bad access")
-__failure __msg("R1 offset is outside of the packet")
+__failure __msg("R1 {{min|max}} value is outside of the allowed memory range")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_bad_access_14(void)
{
@@ -1533,7 +1533,7 @@ l1_%=: r0 = 0; \
SEC("xdp")
__description("XDP pkt read, pkt_meta' <= pkt_data, bad access 1")
-__failure __msg("R1 offset is outside of the packet")
+__failure __msg("R1 {{min|max}} value is outside of the allowed memory range")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void pkt_data_bad_access_1_4(void)
{
@@ -1555,7 +1555,7 @@ l1_%=: r0 = 0; \
SEC("xdp")
__description("XDP pkt read, pkt_meta' <= pkt_data, bad access 2")
-__failure __msg("R1 offset is outside of the packet")
+__failure __msg("R1 {{min|max}} value is outside of the allowed memory range")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void pkt_data_bad_access_2_8(void)
{
@@ -1597,7 +1597,7 @@ l1_%=: r0 = 0; \
SEC("xdp")
__description("XDP pkt read, pkt_meta' <= pkt_data, corner case -1, bad access")
-__failure __msg("R1 offset is outside of the packet")
+__failure __msg("R1 {{min|max}} value is outside of the allowed memory range")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_bad_access_15(void)
{
@@ -1639,7 +1639,7 @@ l0_%=: r0 = 0; \
SEC("xdp")
__description("XDP pkt read, pkt_data <= pkt_meta', corner case -1, bad access")
-__failure __msg("R1 offset is outside of the packet")
+__failure __msg("R1 {{min|max}} value is outside of the allowed memory range")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_bad_access_16(void)
{
@@ -1660,7 +1660,7 @@ l0_%=: r0 = 0; \
SEC("xdp")
__description("XDP pkt read, pkt_data <= pkt_meta', bad access 2")
-__failure __msg("R1 offset is outside of the packet")
+__failure __msg("R1 {{min|max}} value is outside of the allowed memory range")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void pkt_meta_bad_access_2_4(void)
{
diff --git a/tools/testing/selftests/bpf/test_bpftool.py b/tools/testing/selftests/bpf/test_bpftool.py
deleted file mode 100644
index 1c2408ee1f5d..000000000000
--- a/tools/testing/selftests/bpf/test_bpftool.py
+++ /dev/null
@@ -1,174 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# Copyright (c) 2020 SUSE LLC.
-
-import collections
-import functools
-import json
-import os
-import socket
-import subprocess
-import unittest
-
-
-# Add the source tree of bpftool and /usr/local/sbin to PATH
-cur_dir = os.path.dirname(os.path.realpath(__file__))
-bpftool_dir = os.path.abspath(os.path.join(cur_dir, "..", "..", "..", "..",
- "tools", "bpf", "bpftool"))
-os.environ["PATH"] = bpftool_dir + ":/usr/local/sbin:" + os.environ["PATH"]
-
-
-class IfaceNotFoundError(Exception):
- pass
-
-
-class UnprivilegedUserError(Exception):
- pass
-
-
-def _bpftool(args, json=True):
- _args = ["bpftool"]
- if json:
- _args.append("-j")
- _args.extend(args)
-
- return subprocess.check_output(_args)
-
-
-def bpftool(args):
- return _bpftool(args, json=False).decode("utf-8")
-
-
-def bpftool_json(args):
- res = _bpftool(args)
- return json.loads(res)
-
-
-def get_default_iface():
- for iface in socket.if_nameindex():
- if iface[1] != "lo":
- return iface[1]
- raise IfaceNotFoundError("Could not find any network interface to probe")
-
-
-def default_iface(f):
- @functools.wraps(f)
- def wrapper(*args, **kwargs):
- iface = get_default_iface()
- return f(*args, iface, **kwargs)
- return wrapper
-
-DMESG_EMITTING_HELPERS = [
- "bpf_probe_write_user",
- "bpf_trace_printk",
- "bpf_trace_vprintk",
- ]
-
-class TestBpftool(unittest.TestCase):
- @classmethod
- def setUpClass(cls):
- if os.getuid() != 0:
- raise UnprivilegedUserError(
- "This test suite needs root privileges")
-
- @default_iface
- def test_feature_dev_json(self, iface):
- unexpected_helpers = DMESG_EMITTING_HELPERS
- expected_keys = [
- "syscall_config",
- "program_types",
- "map_types",
- "helpers",
- "misc",
- ]
-
- res = bpftool_json(["feature", "probe", "dev", iface])
- # Check if the result has all expected keys.
- self.assertCountEqual(res.keys(), expected_keys)
- # Check if unexpected helpers are not included in helpers probes
- # result.
- for helpers in res["helpers"].values():
- for unexpected_helper in unexpected_helpers:
- self.assertNotIn(unexpected_helper, helpers)
-
- def test_feature_kernel(self):
- test_cases = [
- bpftool_json(["feature", "probe", "kernel"]),
- bpftool_json(["feature", "probe"]),
- bpftool_json(["feature"]),
- ]
- unexpected_helpers = DMESG_EMITTING_HELPERS
- expected_keys = [
- "syscall_config",
- "system_config",
- "program_types",
- "map_types",
- "helpers",
- "misc",
- ]
-
- for tc in test_cases:
- # Check if the result has all expected keys.
- self.assertCountEqual(tc.keys(), expected_keys)
- # Check if unexpected helpers are not included in helpers probes
- # result.
- for helpers in tc["helpers"].values():
- for unexpected_helper in unexpected_helpers:
- self.assertNotIn(unexpected_helper, helpers)
-
- def test_feature_kernel_full(self):
- test_cases = [
- bpftool_json(["feature", "probe", "kernel", "full"]),
- bpftool_json(["feature", "probe", "full"]),
- ]
- expected_helpers = DMESG_EMITTING_HELPERS
-
- for tc in test_cases:
- # Check if expected helpers are included at least once in any
- # helpers list for any program type. Unfortunately we cannot assume
- # that they will be included in all program types or a specific
- # subset of programs. It depends on the kernel version and
- # configuration.
- found_helpers = False
-
- for helpers in tc["helpers"].values():
- if all(expected_helper in helpers
- for expected_helper in expected_helpers):
- found_helpers = True
- break
-
- self.assertTrue(found_helpers)
-
- def test_feature_kernel_full_vs_not_full(self):
- full_res = bpftool_json(["feature", "probe", "full"])
- not_full_res = bpftool_json(["feature", "probe"])
- not_full_set = set()
- full_set = set()
-
- for helpers in full_res["helpers"].values():
- for helper in helpers:
- full_set.add(helper)
-
- for helpers in not_full_res["helpers"].values():
- for helper in helpers:
- not_full_set.add(helper)
-
- self.assertCountEqual(full_set - not_full_set,
- set(DMESG_EMITTING_HELPERS))
- self.assertCountEqual(not_full_set - full_set, set())
-
- def test_feature_macros(self):
- expected_patterns = [
- r"/\*\*\* System call availability \*\*\*/",
- r"#define HAVE_BPF_SYSCALL",
- r"/\*\*\* eBPF program types \*\*\*/",
- r"#define HAVE.*PROG_TYPE",
- r"/\*\*\* eBPF map types \*\*\*/",
- r"#define HAVE.*MAP_TYPE",
- r"/\*\*\* eBPF helper functions \*\*\*/",
- r"#define HAVE.*HELPER",
- r"/\*\*\* eBPF misc features \*\*\*/",
- ]
-
- res = bpftool(["feature", "probe", "macros"])
- for pattern in expected_patterns:
- self.assertRegex(res, pattern)
diff --git a/tools/testing/selftests/bpf/test_bpftool.sh b/tools/testing/selftests/bpf/test_bpftool.sh
deleted file mode 100755
index 718f59692ccb..000000000000
--- a/tools/testing/selftests/bpf/test_bpftool.sh
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-2.0
-# Copyright (c) 2020 SUSE LLC.
-
-# 'make -C tools/testing/selftests/bpf install' will install to SCRIPT_DIR
-SCRIPT_DIR=$(dirname $(realpath $0))
-
-# 'make -C tools/testing/selftests/bpf' will install to BPFTOOL_INSTALL_PATH
-BPFTOOL_INSTALL_PATH="$SCRIPT_DIR"/tools/sbin
-export PATH=$SCRIPT_DIR:$BPFTOOL_INSTALL_PATH:$PATH
-python3 -m unittest -v test_bpftool.TestBpftool
diff --git a/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c
index e62c6b78657f..d876314a4d67 100644
--- a/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c
+++ b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c
@@ -470,6 +470,11 @@ noinline void bpf_testmod_stacktrace_test_1(void)
int bpf_testmod_fentry_ok;
+noinline int bpf_testmod_trampoline_count_test(void)
+{
+ return 0;
+}
+
noinline ssize_t
bpf_testmod_test_read(struct file *file, struct kobject *kobj,
const struct bin_attribute *bin_attr,
@@ -548,6 +553,8 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj,
21, 22, 23, 24, 25, 26) != 231)
goto out;
+ bpf_testmod_trampoline_count_test();
+
bpf_testmod_stacktrace_test_1();
bpf_testmod_fentry_ok = 1;
@@ -716,6 +723,7 @@ BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY)
BTF_ID_FLAGS(func, bpf_iter_testmod_seq_value)
BTF_ID_FLAGS(func, bpf_kfunc_common_test)
+BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1)
BTF_ID_FLAGS(func, bpf_kfunc_dynptr_test)
BTF_ID_FLAGS(func, bpf_kfunc_nested_acquire_nonzero_offset_test, KF_ACQUIRE)
BTF_ID_FLAGS(func, bpf_kfunc_nested_acquire_zero_offset_test, KF_ACQUIRE)
@@ -760,12 +768,63 @@ __bpf_kfunc struct sock *bpf_kfunc_call_test3(struct sock *sk)
__bpf_kfunc long noinline bpf_kfunc_call_test4(signed char a, short b, int c, long d)
{
- /* Provoke the compiler to assume that the caller has sign-extended a,
+ /*
+ * Make val as volatile to avoid compiler optimizations.
+ * Verify that negative signed values remain negative after
+ * sign-extension (JIT must sign-extend, not zero-extend).
+ */
+ volatile long val;
+
+ /* val will be positive, if JIT does zero-extension instead of sign-extension */
+ val = a;
+ if (val >= 0)
+ return 1;
+
+ val = b;
+ if (val >= 0)
+ return 2;
+
+ val = c;
+ if (val >= 0)
+ return 3;
+
+ /*
+ * Provoke the compiler to assume that the caller has sign-extended a,
* b and c on platforms where this is required (e.g. s390x).
*/
return (long)a + (long)b + (long)c + d;
}
+__bpf_kfunc int bpf_kfunc_call_test5(u8 a, u16 b, u32 c)
+{
+ /*
+ * Make val as volatile to avoid compiler optimizations on the below checks
+ * In C, assigning u8/u16/u32 to long performs zero-extension.
+ */
+ volatile long val = a;
+
+ /* Check zero-extension */
+ if (val != (unsigned long)a)
+ return 1;
+ /* Check no sign-extension */
+ if (val < 0)
+ return 2;
+
+ val = b;
+ if (val != (unsigned long)b)
+ return 3;
+ if (val < 0)
+ return 4;
+
+ val = c;
+ if (val != (unsigned long)c)
+ return 5;
+ if (val < 0)
+ return 6;
+
+ return 0;
+}
+
static struct prog_test_ref_kfunc prog_test_struct = {
.a = 42,
.b = 108,
@@ -1228,7 +1287,7 @@ BTF_ID_FLAGS(func, bpf_kfunc_call_test1)
BTF_ID_FLAGS(func, bpf_kfunc_call_test2)
BTF_ID_FLAGS(func, bpf_kfunc_call_test3)
BTF_ID_FLAGS(func, bpf_kfunc_call_test4)
-BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1)
+BTF_ID_FLAGS(func, bpf_kfunc_call_test5)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_acquire, KF_ACQUIRE | KF_RET_NULL)
@@ -1359,6 +1418,12 @@ static int bpf_testmod_ops__test_refcounted(int dummy,
return 0;
}
+static int bpf_testmod_ops__test_refcounted_multi(int dummy, struct task_struct *task__nullable,
+ struct task_struct *task__ref)
+{
+ return 0;
+}
+
static struct task_struct *
bpf_testmod_ops__test_return_ref_kptr(int dummy, struct task_struct *task__ref,
struct cgroup *cgrp)
@@ -1371,6 +1436,7 @@ static struct bpf_testmod_ops __bpf_testmod_ops = {
.test_2 = bpf_testmod_test_2,
.test_maybe_null = bpf_testmod_ops__test_maybe_null,
.test_refcounted = bpf_testmod_ops__test_refcounted,
+ .test_refcounted_multi = bpf_testmod_ops__test_refcounted_multi,
.test_return_ref_kptr = bpf_testmod_ops__test_return_ref_kptr,
};
@@ -1843,6 +1909,16 @@ struct bpf_struct_ops testmod_multi_st_ops = {
extern int bpf_fentry_test1(int a);
+BTF_KFUNCS_START(bpf_testmod_trampoline_count_ids)
+BTF_ID_FLAGS(func, bpf_testmod_trampoline_count_test)
+BTF_KFUNCS_END(bpf_testmod_trampoline_count_ids)
+
+static const struct
+btf_kfunc_id_set bpf_testmod_trampoline_count_fmodret_set = {
+ .owner = THIS_MODULE,
+ .set = &bpf_testmod_trampoline_count_ids,
+};
+
static int bpf_testmod_init(void)
{
const struct btf_id_dtor_kfunc bpf_testmod_dtors[] = {
@@ -1859,6 +1935,7 @@ static int bpf_testmod_init(void)
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_testmod_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_testmod_kfunc_set);
+ ret = ret ?: register_btf_fmodret_id_set(&bpf_testmod_trampoline_count_fmodret_set);
ret = ret ?: register_bpf_struct_ops(&bpf_bpf_testmod_ops, bpf_testmod_ops);
ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops2, bpf_testmod_ops2);
ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops3, bpf_testmod_ops3);
diff --git a/tools/testing/selftests/bpf/test_kmods/bpf_testmod.h b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.h
index f6e492f9d042..863fd10f1619 100644
--- a/tools/testing/selftests/bpf/test_kmods/bpf_testmod.h
+++ b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.h
@@ -39,6 +39,9 @@ struct bpf_testmod_ops {
int (*unsupported_ops)(void);
/* Used to test ref_acquired arguments. */
int (*test_refcounted)(int dummy, struct task_struct *task);
+ /* Used to test checking of __ref arguments when it not the first argument. */
+ int (*test_refcounted_multi)(int dummy, struct task_struct *task,
+ struct task_struct *task2);
/* Used to test returning referenced kptr. */
struct task_struct *(*test_return_ref_kptr)(int dummy, struct task_struct *task,
struct cgroup *cgrp);
diff --git a/tools/testing/selftests/bpf/test_kmods/bpf_testmod_kfunc.h b/tools/testing/selftests/bpf/test_kmods/bpf_testmod_kfunc.h
index b393bf771131..aa0b8d41e71b 100644
--- a/tools/testing/selftests/bpf/test_kmods/bpf_testmod_kfunc.h
+++ b/tools/testing/selftests/bpf/test_kmods/bpf_testmod_kfunc.h
@@ -110,6 +110,7 @@ __u64 bpf_kfunc_call_test1(struct sock *sk, __u32 a, __u64 b,
int bpf_kfunc_call_test2(struct sock *sk, __u32 a, __u32 b) __ksym;
struct sock *bpf_kfunc_call_test3(struct sock *sk) __ksym;
long bpf_kfunc_call_test4(signed char a, short b, int c, long d) __ksym;
+int bpf_kfunc_call_test5(__u8 a, __u16 b, __u32 c) __ksym;
void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb) __ksym;
void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p) __ksym;
diff --git a/tools/testing/selftests/bpf/test_loader.c b/tools/testing/selftests/bpf/test_loader.c
index 338c035c3688..c4c34cae6102 100644
--- a/tools/testing/selftests/bpf/test_loader.c
+++ b/tools/testing/selftests/bpf/test_loader.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include <linux/capability.h>
+#include <linux/err.h>
#include <stdlib.h>
#include <test_progs.h>
#include <bpf/btf.h>
@@ -11,39 +12,15 @@
#include "cap_helpers.h"
#include "jit_disasm_helpers.h"
-#define str_has_pfx(str, pfx) \
- (strncmp(str, pfx, __builtin_constant_p(pfx) ? sizeof(pfx) - 1 : strlen(pfx)) == 0)
+static inline const char *str_has_pfx(const char *str, const char *pfx)
+{
+ size_t len = strlen(pfx);
+
+ return strncmp(str, pfx, len) == 0 ? str + len : NULL;
+}
#define TEST_LOADER_LOG_BUF_SZ 2097152
-#define TEST_TAG_EXPECT_FAILURE "comment:test_expect_failure"
-#define TEST_TAG_EXPECT_SUCCESS "comment:test_expect_success"
-#define TEST_TAG_EXPECT_MSG_PFX "comment:test_expect_msg="
-#define TEST_TAG_EXPECT_NOT_MSG_PFX "comment:test_expect_not_msg="
-#define TEST_TAG_EXPECT_XLATED_PFX "comment:test_expect_xlated="
-#define TEST_TAG_EXPECT_FAILURE_UNPRIV "comment:test_expect_failure_unpriv"
-#define TEST_TAG_EXPECT_SUCCESS_UNPRIV "comment:test_expect_success_unpriv"
-#define TEST_TAG_EXPECT_MSG_PFX_UNPRIV "comment:test_expect_msg_unpriv="
-#define TEST_TAG_EXPECT_NOT_MSG_PFX_UNPRIV "comment:test_expect_not_msg_unpriv="
-#define TEST_TAG_EXPECT_XLATED_PFX_UNPRIV "comment:test_expect_xlated_unpriv="
-#define TEST_TAG_LOG_LEVEL_PFX "comment:test_log_level="
-#define TEST_TAG_PROG_FLAGS_PFX "comment:test_prog_flags="
-#define TEST_TAG_DESCRIPTION_PFX "comment:test_description="
-#define TEST_TAG_RETVAL_PFX "comment:test_retval="
-#define TEST_TAG_RETVAL_PFX_UNPRIV "comment:test_retval_unpriv="
-#define TEST_TAG_AUXILIARY "comment:test_auxiliary"
-#define TEST_TAG_AUXILIARY_UNPRIV "comment:test_auxiliary_unpriv"
-#define TEST_BTF_PATH "comment:test_btf_path="
-#define TEST_TAG_ARCH "comment:test_arch="
-#define TEST_TAG_JITED_PFX "comment:test_jited="
-#define TEST_TAG_JITED_PFX_UNPRIV "comment:test_jited_unpriv="
-#define TEST_TAG_CAPS_UNPRIV "comment:test_caps_unpriv="
-#define TEST_TAG_LOAD_MODE_PFX "comment:load_mode="
-#define TEST_TAG_EXPECT_STDERR_PFX "comment:test_expect_stderr="
-#define TEST_TAG_EXPECT_STDERR_PFX_UNPRIV "comment:test_expect_stderr_unpriv="
-#define TEST_TAG_EXPECT_STDOUT_PFX "comment:test_expect_stdout="
-#define TEST_TAG_EXPECT_STDOUT_PFX_UNPRIV "comment:test_expect_stdout_unpriv="
-#define TEST_TAG_LINEAR_SIZE "comment:test_linear_size="
/* Warning: duplicated in bpf_misc.h */
#define POINTER_VALUE 0xbadcafe
@@ -69,6 +46,7 @@ enum load_mode {
struct test_subspec {
char *name;
+ char *description;
bool expect_failure;
struct expected_msgs expect_msgs;
struct expected_msgs expect_xlated;
@@ -142,9 +120,13 @@ static void free_test_spec(struct test_spec *spec)
free_msgs(&spec->priv.stdout);
free(spec->priv.name);
+ free(spec->priv.description);
free(spec->unpriv.name);
+ free(spec->unpriv.description);
spec->priv.name = NULL;
+ spec->priv.description = NULL;
spec->unpriv.name = NULL;
+ spec->unpriv.description = NULL;
}
/* Compiles regular expression matching pattern.
@@ -161,21 +143,21 @@ static void free_test_spec(struct test_spec *spec)
static int compile_regex(const char *pattern, regex_t *regex)
{
char err_buf[256], buf[256] = {}, *ptr, *buf_end;
- const char *original_pattern = pattern;
+ const char *original_pattern = pattern, *next;
bool in_regex = false;
int err;
buf_end = buf + sizeof(buf);
ptr = buf;
while (*pattern && ptr < buf_end - 2) {
- if (!in_regex && str_has_pfx(pattern, "{{")) {
+ if (!in_regex && (next = str_has_pfx(pattern, "{{"))) {
in_regex = true;
- pattern += 2;
+ pattern = next;
continue;
}
- if (in_regex && str_has_pfx(pattern, "}}")) {
+ if (in_regex && (next = str_has_pfx(pattern, "}}"))) {
in_regex = false;
- pattern += 2;
+ pattern = next;
continue;
}
if (in_regex) {
@@ -343,33 +325,49 @@ static void update_flags(int *flags, int flag, bool clear)
*flags |= flag;
}
-/* Matches a string of form '<pfx>[^=]=.*' and returns it's suffix.
- * Used to parse btf_decl_tag values.
- * Such values require unique prefix because compiler does not add
- * same __attribute__((btf_decl_tag(...))) twice.
- * Test suite uses two-component tags for such cases:
- *
- * <pfx> __COUNTER__ '='
- *
- * For example, two consecutive __msg tags '__msg("foo") __msg("foo")'
- * would be encoded as:
- *
- * [18] DECL_TAG 'comment:test_expect_msg=0=foo' type_id=15 component_idx=-1
- * [19] DECL_TAG 'comment:test_expect_msg=1=foo' type_id=15 component_idx=-1
- *
- * And the purpose of this function is to extract 'foo' from the above.
- */
-static const char *skip_dynamic_pfx(const char *s, const char *pfx)
+static const char *skip_decl_tag_pfx(const char *s)
{
- const char *msg;
+ int n = 0;
- if (strncmp(s, pfx, strlen(pfx)) != 0)
+ if (sscanf(s, "comment:%*d:%n", &n) < 0 || !n)
return NULL;
- msg = s + strlen(pfx);
- msg = strchr(msg, '=');
- if (!msg)
- return NULL;
- return msg + 1;
+ return s + n;
+}
+
+static int compare_decl_tags(const void *a, const void *b)
+{
+ return strverscmp(*(const char **)a, *(const char **)b);
+}
+
+/*
+ * Compilers don't guarantee order in which BTF attributes would be generated,
+ * while order is important for test tags like __msg.
+ * Each test tag has the following prefix: "comment:" __COUNTER__,
+ * when sorted using strverscmp this gives same order as in the original C code.
+ */
+static const char **collect_decl_tags(struct btf *btf, int id, int *cnt)
+{
+ const char **tmp, **tags = NULL;
+ const struct btf_type *t;
+ int i;
+
+ *cnt = 0;
+ for (i = 1; i < btf__type_cnt(btf); i++) {
+ t = btf__type_by_id(btf, i);
+ if (!btf_is_decl_tag(t) || t->type != id || btf_decl_tag(t)->component_idx != -1)
+ continue;
+ tmp = realloc(tags, (*cnt + 1) * sizeof(*tags));
+ if (!tmp) {
+ free(tags);
+ return ERR_PTR(-ENOMEM);
+ }
+ tags = tmp;
+ tags[(*cnt)++] = btf__str_by_offset(btf, t->name_off);
+ }
+
+ if (*cnt)
+ qsort(tags, *cnt, sizeof(*tags), compare_decl_tags);
+ return tags;
}
enum arch {
@@ -415,7 +413,9 @@ static int parse_test_spec(struct test_loader *tester,
bool stdout_on_next_line = true;
bool unpriv_stdout_on_next_line = true;
bool collect_jit = false;
- int func_id, i, err = 0;
+ const char **tags = NULL;
+ int func_id, i, nr_tags;
+ int err = 0;
u32 arch_mask = 0;
u32 load_mask = 0;
struct btf *btf;
@@ -438,63 +438,61 @@ static int parse_test_spec(struct test_loader *tester,
return -EINVAL;
}
- for (i = 1; i < btf__type_cnt(btf); i++) {
+ tags = collect_decl_tags(btf, func_id, &nr_tags);
+ if (IS_ERR(tags))
+ return PTR_ERR(tags);
+
+ for (i = 0; i < nr_tags; i++) {
const char *s, *val, *msg;
- const struct btf_type *t;
bool clear;
int flags;
- t = btf__type_by_id(btf, i);
- if (!btf_is_decl_tag(t))
- continue;
-
- if (t->type != func_id || btf_decl_tag(t)->component_idx != -1)
+ s = skip_decl_tag_pfx(tags[i]);
+ if (!s)
continue;
-
- s = btf__str_by_offset(btf, t->name_off);
- if (str_has_pfx(s, TEST_TAG_DESCRIPTION_PFX)) {
- description = s + sizeof(TEST_TAG_DESCRIPTION_PFX) - 1;
- } else if (strcmp(s, TEST_TAG_EXPECT_FAILURE) == 0) {
+ if ((val = str_has_pfx(s, "test_description="))) {
+ description = val;
+ } else if (strcmp(s, "test_expect_failure") == 0) {
spec->priv.expect_failure = true;
spec->mode_mask |= PRIV;
- } else if (strcmp(s, TEST_TAG_EXPECT_SUCCESS) == 0) {
+ } else if (strcmp(s, "test_expect_success") == 0) {
spec->priv.expect_failure = false;
spec->mode_mask |= PRIV;
- } else if (strcmp(s, TEST_TAG_EXPECT_FAILURE_UNPRIV) == 0) {
+ } else if (strcmp(s, "test_expect_failure_unpriv") == 0) {
spec->unpriv.expect_failure = true;
spec->mode_mask |= UNPRIV;
has_unpriv_result = true;
- } else if (strcmp(s, TEST_TAG_EXPECT_SUCCESS_UNPRIV) == 0) {
+ } else if (strcmp(s, "test_expect_success_unpriv") == 0) {
spec->unpriv.expect_failure = false;
spec->mode_mask |= UNPRIV;
has_unpriv_result = true;
- } else if (strcmp(s, TEST_TAG_AUXILIARY) == 0) {
+ } else if (strcmp(s, "test_auxiliary") == 0) {
spec->auxiliary = true;
spec->mode_mask |= PRIV;
- } else if (strcmp(s, TEST_TAG_AUXILIARY_UNPRIV) == 0) {
+ } else if (strcmp(s, "test_auxiliary_unpriv") == 0) {
spec->auxiliary = true;
spec->mode_mask |= UNPRIV;
- } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_MSG_PFX))) {
+ } else if ((msg = str_has_pfx(s, "test_expect_msg="))) {
err = push_msg(msg, false, &spec->priv.expect_msgs);
if (err)
goto cleanup;
spec->mode_mask |= PRIV;
- } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_NOT_MSG_PFX))) {
+ } else if ((msg = str_has_pfx(s, "test_expect_not_msg="))) {
err = push_msg(msg, true, &spec->priv.expect_msgs);
if (err)
goto cleanup;
spec->mode_mask |= PRIV;
- } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_MSG_PFX_UNPRIV))) {
+ } else if ((msg = str_has_pfx(s, "test_expect_msg_unpriv="))) {
err = push_msg(msg, false, &spec->unpriv.expect_msgs);
if (err)
goto cleanup;
spec->mode_mask |= UNPRIV;
- } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_NOT_MSG_PFX_UNPRIV))) {
+ } else if ((msg = str_has_pfx(s, "test_expect_not_msg_unpriv="))) {
err = push_msg(msg, true, &spec->unpriv.expect_msgs);
if (err)
goto cleanup;
spec->mode_mask |= UNPRIV;
- } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_JITED_PFX))) {
+ } else if ((msg = str_has_pfx(s, "test_jited="))) {
if (arch_mask == 0) {
PRINT_FAIL("__jited used before __arch_*");
goto cleanup;
@@ -506,7 +504,7 @@ static int parse_test_spec(struct test_loader *tester,
goto cleanup;
spec->mode_mask |= PRIV;
}
- } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_JITED_PFX_UNPRIV))) {
+ } else if ((msg = str_has_pfx(s, "test_jited_unpriv="))) {
if (arch_mask == 0) {
PRINT_FAIL("__unpriv_jited used before __arch_*");
goto cleanup;
@@ -518,41 +516,36 @@ static int parse_test_spec(struct test_loader *tester,
goto cleanup;
spec->mode_mask |= UNPRIV;
}
- } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_XLATED_PFX))) {
+ } else if ((msg = str_has_pfx(s, "test_expect_xlated="))) {
err = push_disasm_msg(msg, &xlated_on_next_line,
&spec->priv.expect_xlated);
if (err)
goto cleanup;
spec->mode_mask |= PRIV;
- } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_XLATED_PFX_UNPRIV))) {
+ } else if ((msg = str_has_pfx(s, "test_expect_xlated_unpriv="))) {
err = push_disasm_msg(msg, &unpriv_xlated_on_next_line,
&spec->unpriv.expect_xlated);
if (err)
goto cleanup;
spec->mode_mask |= UNPRIV;
- } else if (str_has_pfx(s, TEST_TAG_RETVAL_PFX)) {
- val = s + sizeof(TEST_TAG_RETVAL_PFX) - 1;
+ } else if ((val = str_has_pfx(s, "test_retval="))) {
err = parse_retval(val, &spec->priv.retval, "__retval");
if (err)
goto cleanup;
spec->priv.execute = true;
spec->mode_mask |= PRIV;
- } else if (str_has_pfx(s, TEST_TAG_RETVAL_PFX_UNPRIV)) {
- val = s + sizeof(TEST_TAG_RETVAL_PFX_UNPRIV) - 1;
+ } else if ((val = str_has_pfx(s, "test_retval_unpriv="))) {
err = parse_retval(val, &spec->unpriv.retval, "__retval_unpriv");
if (err)
goto cleanup;
spec->mode_mask |= UNPRIV;
spec->unpriv.execute = true;
has_unpriv_retval = true;
- } else if (str_has_pfx(s, TEST_TAG_LOG_LEVEL_PFX)) {
- val = s + sizeof(TEST_TAG_LOG_LEVEL_PFX) - 1;
+ } else if ((val = str_has_pfx(s, "test_log_level="))) {
err = parse_int(val, &spec->log_level, "test log level");
if (err)
goto cleanup;
- } else if (str_has_pfx(s, TEST_TAG_PROG_FLAGS_PFX)) {
- val = s + sizeof(TEST_TAG_PROG_FLAGS_PFX) - 1;
-
+ } else if ((val = str_has_pfx(s, "test_prog_flags="))) {
clear = val[0] == '!';
if (clear)
val++;
@@ -577,8 +570,7 @@ static int parse_test_spec(struct test_loader *tester,
goto cleanup;
update_flags(&spec->prog_flags, flags, clear);
}
- } else if (str_has_pfx(s, TEST_TAG_ARCH)) {
- val = s + sizeof(TEST_TAG_ARCH) - 1;
+ } else if ((val = str_has_pfx(s, "test_arch="))) {
if (strcmp(val, "X86_64") == 0) {
arch = ARCH_X86_64;
} else if (strcmp(val, "ARM64") == 0) {
@@ -596,16 +588,14 @@ static int parse_test_spec(struct test_loader *tester,
collect_jit = get_current_arch() == arch;
unpriv_jit_on_next_line = true;
jit_on_next_line = true;
- } else if (str_has_pfx(s, TEST_BTF_PATH)) {
- spec->btf_custom_path = s + sizeof(TEST_BTF_PATH) - 1;
- } else if (str_has_pfx(s, TEST_TAG_CAPS_UNPRIV)) {
- val = s + sizeof(TEST_TAG_CAPS_UNPRIV) - 1;
+ } else if ((val = str_has_pfx(s, "test_btf_path="))) {
+ spec->btf_custom_path = val;
+ } else if ((val = str_has_pfx(s, "test_caps_unpriv="))) {
err = parse_caps(val, &spec->unpriv.caps, "test caps");
if (err)
goto cleanup;
spec->mode_mask |= UNPRIV;
- } else if (str_has_pfx(s, TEST_TAG_LOAD_MODE_PFX)) {
- val = s + sizeof(TEST_TAG_LOAD_MODE_PFX) - 1;
+ } else if ((val = str_has_pfx(s, "load_mode="))) {
if (strcmp(val, "jited") == 0) {
load_mask = JITED;
} else if (strcmp(val, "no_jited") == 0) {
@@ -615,32 +605,31 @@ static int parse_test_spec(struct test_loader *tester,
err = -EINVAL;
goto cleanup;
}
- } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_STDERR_PFX))) {
+ } else if ((msg = str_has_pfx(s, "test_expect_stderr="))) {
err = push_disasm_msg(msg, &stderr_on_next_line,
&spec->priv.stderr);
if (err)
goto cleanup;
- } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_STDERR_PFX_UNPRIV))) {
+ } else if ((msg = str_has_pfx(s, "test_expect_stderr_unpriv="))) {
err = push_disasm_msg(msg, &unpriv_stderr_on_next_line,
&spec->unpriv.stderr);
if (err)
goto cleanup;
- } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_STDOUT_PFX))) {
+ } else if ((msg = str_has_pfx(s, "test_expect_stdout="))) {
err = push_disasm_msg(msg, &stdout_on_next_line,
&spec->priv.stdout);
if (err)
goto cleanup;
- } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_STDOUT_PFX_UNPRIV))) {
+ } else if ((msg = str_has_pfx(s, "test_expect_stdout_unpriv="))) {
err = push_disasm_msg(msg, &unpriv_stdout_on_next_line,
&spec->unpriv.stdout);
if (err)
goto cleanup;
- } else if (str_has_pfx(s, TEST_TAG_LINEAR_SIZE)) {
+ } else if ((val = str_has_pfx(s, "test_linear_size="))) {
switch (bpf_program__type(prog)) {
case BPF_PROG_TYPE_SCHED_ACT:
case BPF_PROG_TYPE_SCHED_CLS:
case BPF_PROG_TYPE_CGROUP_SKB:
- val = s + sizeof(TEST_TAG_LINEAR_SIZE) - 1;
err = parse_int(val, &spec->linear_sz, "test linear size");
if (err)
goto cleanup;
@@ -659,33 +648,56 @@ static int parse_test_spec(struct test_loader *tester,
if (spec->mode_mask == 0)
spec->mode_mask = PRIV;
- if (!description)
- description = spec->prog_name;
-
if (spec->mode_mask & PRIV) {
- spec->priv.name = strdup(description);
+ spec->priv.name = strdup(spec->prog_name);
if (!spec->priv.name) {
PRINT_FAIL("failed to allocate memory for priv.name\n");
err = -ENOMEM;
goto cleanup;
}
+
+ if (description) {
+ spec->priv.description = strdup(description);
+ if (!spec->priv.description) {
+ PRINT_FAIL("failed to allocate memory for priv.description\n");
+ err = -ENOMEM;
+ goto cleanup;
+ }
+ }
}
if (spec->mode_mask & UNPRIV) {
- int descr_len = strlen(description);
+ int name_len = strlen(spec->prog_name);
const char *suffix = " @unpriv";
+ int suffix_len = strlen(suffix);
char *name;
- name = malloc(descr_len + strlen(suffix) + 1);
+ name = malloc(name_len + suffix_len + 1);
if (!name) {
PRINT_FAIL("failed to allocate memory for unpriv.name\n");
err = -ENOMEM;
goto cleanup;
}
- strcpy(name, description);
- strcpy(&name[descr_len], suffix);
+ strcpy(name, spec->prog_name);
+ strcpy(&name[name_len], suffix);
spec->unpriv.name = name;
+
+ if (description) {
+ int descr_len = strlen(description);
+ char *descr;
+
+ descr = malloc(descr_len + suffix_len + 1);
+ if (!descr) {
+ PRINT_FAIL("failed to allocate memory for unpriv.description\n");
+ err = -ENOMEM;
+ goto cleanup;
+ }
+
+ strcpy(descr, description);
+ strcpy(&descr[descr_len], suffix);
+ spec->unpriv.description = descr;
+ }
}
if (spec->mode_mask & (PRIV | UNPRIV)) {
@@ -711,9 +723,11 @@ static int parse_test_spec(struct test_loader *tester,
spec->valid = true;
+ free(tags);
return 0;
cleanup:
+ free(tags);
free_test_spec(spec);
return err;
}
@@ -1148,7 +1162,7 @@ void run_subtest(struct test_loader *tester,
int links_cnt = 0;
bool should_load;
- if (!test__start_subtest(subspec->name))
+ if (!test__start_subtest_with_desc(subspec->name, subspec->description))
return;
if ((get_current_arch() & spec->arch_mask) == 0) {
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index 0929f4a7bda4..7fe16b5131b1 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -308,16 +308,34 @@ static bool match_subtest(struct test_filter_set *filter,
return false;
}
+static bool match_subtest_desc(struct test_filter_set *filter,
+ const char *test_name,
+ const char *subtest_name,
+ const char *subtest_desc)
+{
+ if (match_subtest(filter, test_name, subtest_name))
+ return true;
+
+ if (!subtest_desc || !subtest_desc[0] ||
+ strcmp(subtest_name, subtest_desc) == 0)
+ return false;
+
+ return match_subtest(filter, test_name, subtest_desc);
+}
+
static bool should_run_subtest(struct test_selector *sel,
struct test_selector *subtest_sel,
int subtest_num,
const char *test_name,
- const char *subtest_name)
+ const char *subtest_name,
+ const char *subtest_desc)
{
- if (match_subtest(&sel->blacklist, test_name, subtest_name))
+ if (match_subtest_desc(&sel->blacklist, test_name,
+ subtest_name, subtest_desc))
return false;
- if (match_subtest(&sel->whitelist, test_name, subtest_name))
+ if (match_subtest_desc(&sel->whitelist, test_name,
+ subtest_name, subtest_desc))
return true;
if (!sel->whitelist.cnt && !subtest_sel->num_set)
@@ -544,11 +562,12 @@ void test__end_subtest(void)
env.subtest_state = NULL;
}
-bool test__start_subtest(const char *subtest_name)
+bool test__start_subtest_with_desc(const char *subtest_name, const char *subtest_desc)
{
struct prog_test_def *test = env.test;
struct test_state *state = env.test_state;
struct subtest_state *subtest_state;
+ const char *subtest_display_name;
size_t sub_state_size = sizeof(*subtest_state);
if (env.subtest_state)
@@ -574,7 +593,9 @@ bool test__start_subtest(const char *subtest_name)
return false;
}
- subtest_state->name = strdup(subtest_name);
+ subtest_display_name = subtest_desc ? subtest_desc : subtest_name;
+
+ subtest_state->name = strdup(subtest_display_name);
if (!subtest_state->name) {
fprintf(env.stderr_saved,
"Subtest #%d: failed to copy subtest name!\n",
@@ -586,14 +607,15 @@ bool test__start_subtest(const char *subtest_name)
&env.subtest_selector,
state->subtest_num,
test->test_name,
- subtest_name)) {
+ subtest_name,
+ subtest_desc)) {
subtest_state->filtered = true;
return false;
}
- subtest_state->should_tmon = match_subtest(&env.tmon_selector.whitelist,
- test->test_name,
- subtest_name);
+ subtest_state->should_tmon = match_subtest_desc(&env.tmon_selector.whitelist,
+ test->test_name, subtest_name,
+ subtest_desc);
env.subtest_state = subtest_state;
stdio_hijack_init(&subtest_state->log_buf, &subtest_state->log_cnt);
@@ -602,6 +624,11 @@ bool test__start_subtest(const char *subtest_name)
return true;
}
+bool test__start_subtest(const char *subtest_name)
+{
+ return test__start_subtest_with_desc(subtest_name, NULL);
+}
+
void test__force_log(void)
{
env.test_state->force_log = true;
diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h
index eebfc18cdcd2..1a44467f4310 100644
--- a/tools/testing/selftests/bpf/test_progs.h
+++ b/tools/testing/selftests/bpf/test_progs.h
@@ -181,6 +181,7 @@ struct msg {
extern struct test_env env;
void test__force_log(void);
+bool test__start_subtest_with_desc(const char *name, const char *description);
bool test__start_subtest(const char *name);
void test__end_subtest(void);
void test__skip(void);
diff --git a/tools/testing/selftests/bpf/testing_helpers.c b/tools/testing/selftests/bpf/testing_helpers.c
index 66af0d13751a..6fbe1e995660 100644
--- a/tools/testing/selftests/bpf/testing_helpers.c
+++ b/tools/testing/selftests/bpf/testing_helpers.c
@@ -368,7 +368,7 @@ int delete_module(const char *name, int flags)
return syscall(__NR_delete_module, name, flags);
}
-int unload_module(const char *name, bool verbose)
+int try_unload_module(const char *name, int retries, bool verbose)
{
int ret, cnt = 0;
@@ -379,7 +379,7 @@ int unload_module(const char *name, bool verbose)
ret = delete_module(name, 0);
if (!ret || errno != EAGAIN)
break;
- if (++cnt > 10000) {
+ if (++cnt > retries) {
fprintf(stdout, "Unload of %s timed out\n", name);
break;
}
@@ -400,6 +400,11 @@ int unload_module(const char *name, bool verbose)
return 0;
}
+int unload_module(const char *name, bool verbose)
+{
+ return try_unload_module(name, 10000, verbose);
+}
+
static int __load_module(const char *path, const char *param_values, bool verbose)
{
int fd;
diff --git a/tools/testing/selftests/bpf/testing_helpers.h b/tools/testing/selftests/bpf/testing_helpers.h
index eb20d3772218..2ca2356a0b58 100644
--- a/tools/testing/selftests/bpf/testing_helpers.h
+++ b/tools/testing/selftests/bpf/testing_helpers.h
@@ -40,6 +40,7 @@ int finit_module(int fd, const char *param_values, int flags);
int delete_module(const char *name, int flags);
int load_module(const char *path, bool verbose);
int load_module_params(const char *path, const char *param_values, bool verbose);
+int try_unload_module(const char *name, int retries, bool verbose);
int unload_module(const char *name, bool verbose);
static inline __u64 get_time_ns(void)
diff --git a/tools/testing/selftests/bpf/uprobe_multi.c b/tools/testing/selftests/bpf/uprobe_multi.c
index dd38dc68f635..3e58a86b8e25 100644
--- a/tools/testing/selftests/bpf/uprobe_multi.c
+++ b/tools/testing/selftests/bpf/uprobe_multi.c
@@ -100,6 +100,9 @@ int __attribute__((weak)) trigger_uprobe(bool build_id_resident)
int page_sz = sysconf(_SC_PAGESIZE);
void *addr;
+ unsigned char vec[1];
+ int poll = 0;
+
/* page-align build ID start */
addr = (void *)((uintptr_t)&build_id_start & ~(page_sz - 1));
@@ -108,9 +111,19 @@ int __attribute__((weak)) trigger_uprobe(bool build_id_resident)
* do MADV_POPULATE_READ, and then MADV_PAGEOUT, if necessary
*/
madvise(addr, page_sz, MADV_POPULATE_READ);
- if (!build_id_resident)
- madvise(addr, page_sz, MADV_PAGEOUT);
-
+ if (!build_id_resident) {
+ do {
+ madvise(addr, page_sz, MADV_PAGEOUT);
+ /* check if page has been evicted */
+ mincore(addr, page_sz, vec);
+ if (!(vec[0] & 1))
+ break;
+ /* if page is still resident re-attempt MADV_POPULATE_READ/MADV_PAGEOUT */
+ madvise(addr, page_sz, MADV_POPULATE_READ);
+ poll++;
+ usleep(100);
+ } while (poll < 500);
+ }
(void)uprobe();
return 0;
diff --git a/tools/testing/selftests/bpf/uprobe_multi.ld b/tools/testing/selftests/bpf/uprobe_multi.ld
index a2e94828bc8c..2063714b2899 100644
--- a/tools/testing/selftests/bpf/uprobe_multi.ld
+++ b/tools/testing/selftests/bpf/uprobe_multi.ld
@@ -1,8 +1,8 @@
SECTIONS
{
- . = ALIGN(4096);
+ . = ALIGN(65536);
.note.gnu.build-id : { *(.note.gnu.build-id) }
- . = ALIGN(4096);
+ . = ALIGN(65536);
}
INSERT AFTER .text;
diff --git a/tools/testing/selftests/bpf/usdt.h b/tools/testing/selftests/bpf/usdt.h
index 549d1f774810..c71e21df38b3 100644
--- a/tools/testing/selftests/bpf/usdt.h
+++ b/tools/testing/selftests/bpf/usdt.h
@@ -312,6 +312,8 @@ struct usdt_sema { volatile unsigned short active; };
#ifndef USDT_NOP
#if defined(__ia64__) || defined(__s390__) || defined(__s390x__)
#define USDT_NOP nop 0
+#elif defined(__x86_64__)
+#define USDT_NOP .byte 0x90, 0x0f, 0x1f, 0x44, 0x00, 0x0 /* nop, nop5 */
#else
#define USDT_NOP nop
#endif
diff --git a/tools/testing/selftests/bpf/usdt_1.c b/tools/testing/selftests/bpf/usdt_1.c
new file mode 100644
index 000000000000..4f06e8bcf58b
--- /dev/null
+++ b/tools/testing/selftests/bpf/usdt_1.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#if defined(__x86_64__)
+
+/*
+ * Include usdt.h with defined USDT_NOP macro to use single
+ * nop instruction.
+ */
+#define USDT_NOP .byte 0x90
+#include "usdt.h"
+
+__attribute__((aligned(16)))
+void usdt_1(void)
+{
+ USDT(optimized_attach, usdt_1);
+}
+
+#endif
diff --git a/tools/testing/selftests/bpf/usdt_2.c b/tools/testing/selftests/bpf/usdt_2.c
new file mode 100644
index 000000000000..789883aaca4c
--- /dev/null
+++ b/tools/testing/selftests/bpf/usdt_2.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#if defined(__x86_64__)
+
+/*
+ * Include usdt.h with default nop,nop5 instructions combo.
+ */
+#include "usdt.h"
+
+__attribute__((aligned(16)))
+void usdt_2(void)
+{
+ USDT(optimized_attach, usdt_2);
+}
+
+#endif
diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c
index 9ca83dce100d..c3164b9b2be5 100644
--- a/tools/testing/selftests/bpf/verifier/calls.c
+++ b/tools/testing/selftests/bpf/verifier/calls.c
@@ -220,7 +220,7 @@
},
.result_unpriv = REJECT,
.result = REJECT,
- .errstr = "variable trusted_ptr_ access var_off=(0x0; 0x7) disallowed",
+ .errstr = "R1 must have zero offset when passed to release func or trusted arg to kfunc",
},
{
"calls: invalid kfunc call: referenced arg needs refcounted PTR_TO_BTF_ID",
@@ -455,7 +455,7 @@
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "the call stack of 9 frames is too deep",
+ .errstr = "recursive call",
.result = REJECT,
},
{
@@ -812,7 +812,7 @@
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "the call stack of 9 frames is too deep",
+ .errstr = "recursive call",
.result = REJECT,
},
{
@@ -824,7 +824,7 @@
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "the call stack of 9 frames is too deep",
+ .errstr = "recursive call",
.result = REJECT,
},
{
@@ -1521,6 +1521,7 @@
.errstr = "R0 invalid mem access 'scalar'",
.result_unpriv = REJECT,
.errstr_unpriv = "invalid read from stack R7 off=-16 size=8",
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
diff --git a/tools/testing/selftests/bpf/verifier/junk_insn.c b/tools/testing/selftests/bpf/verifier/junk_insn.c
index 89d690f1992a..735d3b9510cf 100644
--- a/tools/testing/selftests/bpf/verifier/junk_insn.c
+++ b/tools/testing/selftests/bpf/verifier/junk_insn.c
@@ -10,7 +10,7 @@
{
"junk insn2",
.insns = {
- BPF_RAW_INSN(1, 0, 0, 0, 0),
+ BPF_RAW_INSN(BPF_LDX | BPF_MEM | BPF_W, 0, 0, 0, 1),
BPF_EXIT_INSN(),
},
.errstr = "BPF_LDX uses reserved fields",
@@ -28,7 +28,7 @@
{
"junk insn4",
.insns = {
- BPF_RAW_INSN(-1, -1, -1, -1, -1),
+ BPF_RAW_INSN(-1, 0, 0, -1, -1),
BPF_EXIT_INSN(),
},
.errstr = "unknown opcode ff",
@@ -37,7 +37,7 @@
{
"junk insn5",
.insns = {
- BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
+ BPF_RAW_INSN(0x7f, 0, 0, -1, -1),
BPF_EXIT_INSN(),
},
.errstr = "BPF_ALU uses reserved fields",
diff --git a/tools/testing/selftests/bpf/verifier/sleepable.c b/tools/testing/selftests/bpf/verifier/sleepable.c
index 1f0d2bdc673f..c2b7f5ebf168 100644
--- a/tools/testing/selftests/bpf/verifier/sleepable.c
+++ b/tools/testing/selftests/bpf/verifier/sleepable.c
@@ -85,7 +85,7 @@
.expected_attach_type = BPF_TRACE_RAW_TP,
.kfunc = "sched_switch",
.result = REJECT,
- .errstr = "Only fentry/fexit/fmod_ret, lsm, iter, uprobe, and struct_ops programs can be sleepable",
+ .errstr = "Only fentry/fexit/fsession/fmod_ret, lsm, iter, uprobe, and struct_ops programs can be sleepable",
.flags = BPF_F_SLEEPABLE,
.runs = -1,
},
diff --git a/tools/testing/selftests/bpf/veristat.c b/tools/testing/selftests/bpf/veristat.c
index 75f85e0362f5..5c82950e6978 100644
--- a/tools/testing/selftests/bpf/veristat.c
+++ b/tools/testing/selftests/bpf/veristat.c
@@ -1236,7 +1236,7 @@ static void mask_unrelated_struct_ops_progs(struct bpf_object *obj,
}
}
-static void fixup_obj(struct bpf_object *obj, struct bpf_program *prog, const char *filename)
+static void fixup_obj_maps(struct bpf_object *obj)
{
struct bpf_map *map;
@@ -1251,15 +1251,23 @@ static void fixup_obj(struct bpf_object *obj, struct bpf_program *prog, const ch
case BPF_MAP_TYPE_INODE_STORAGE:
case BPF_MAP_TYPE_CGROUP_STORAGE:
case BPF_MAP_TYPE_CGRP_STORAGE:
- break;
case BPF_MAP_TYPE_STRUCT_OPS:
- mask_unrelated_struct_ops_progs(obj, map, prog);
break;
default:
if (bpf_map__max_entries(map) == 0)
bpf_map__set_max_entries(map, 1);
}
}
+}
+
+static void fixup_obj(struct bpf_object *obj, struct bpf_program *prog, const char *filename)
+{
+ struct bpf_map *map;
+
+ bpf_object__for_each_map(map, obj) {
+ if (bpf_map__type(map) == BPF_MAP_TYPE_STRUCT_OPS)
+ mask_unrelated_struct_ops_progs(obj, map, prog);
+ }
/* SEC(freplace) programs can't be loaded with veristat as is,
* but we can try guessing their target program's expected type by
@@ -1608,6 +1616,7 @@ static int process_prog(const char *filename, struct bpf_object *obj, struct bpf
const char *base_filename = basename(strdupa(filename));
const char *prog_name = bpf_program__name(prog);
long mem_peak_a, mem_peak_b, mem_peak = -1;
+ LIBBPF_OPTS(bpf_prog_load_opts, opts);
char *buf;
int buf_sz, log_level;
struct verif_stats *stats;
@@ -1647,9 +1656,6 @@ static int process_prog(const char *filename, struct bpf_object *obj, struct bpf
}
verif_log_buf[0] = '\0';
- bpf_program__set_log_buf(prog, buf, buf_sz);
- bpf_program__set_log_level(prog, log_level);
-
/* increase chances of successful BPF object loading */
fixup_obj(obj, prog, base_filename);
@@ -1658,15 +1664,22 @@ static int process_prog(const char *filename, struct bpf_object *obj, struct bpf
if (env.force_reg_invariants)
bpf_program__set_flags(prog, bpf_program__flags(prog) | BPF_F_TEST_REG_INVARIANTS);
- err = bpf_object__prepare(obj);
- if (!err) {
- cgroup_err = reset_stat_cgroup();
- mem_peak_a = cgroup_memory_peak();
- err = bpf_object__load(obj);
- mem_peak_b = cgroup_memory_peak();
- if (!cgroup_err && mem_peak_a >= 0 && mem_peak_b >= 0)
- mem_peak = mem_peak_b - mem_peak_a;
+ opts.log_buf = buf;
+ opts.log_size = buf_sz;
+ opts.log_level = log_level;
+
+ cgroup_err = reset_stat_cgroup();
+ mem_peak_a = cgroup_memory_peak();
+ fd = bpf_program__clone(prog, &opts);
+ if (fd < 0) {
+ err = fd;
+ if (env.verbose)
+ fprintf(stderr, "Failed to load program %s %d\n", prog_name, err);
}
+ mem_peak_b = cgroup_memory_peak();
+ if (!cgroup_err && mem_peak_a >= 0 && mem_peak_b >= 0)
+ mem_peak = mem_peak_b - mem_peak_a;
+
env.progs_processed++;
stats->file_name = strdup(base_filename);
@@ -1678,7 +1691,6 @@ static int process_prog(const char *filename, struct bpf_object *obj, struct bpf
stats->stats[MEMORY_PEAK] = mem_peak < 0 ? -1 : mem_peak / (1024 * 1024);
memset(&info, 0, info_len);
- fd = bpf_program__fd(prog);
if (fd > 0 && bpf_prog_get_info_by_fd(fd, &info, &info_len) == 0) {
stats->stats[JITED_SIZE] = info.jited_prog_len;
if (env.dump_mode & DUMP_JITED)
@@ -1699,7 +1711,8 @@ static int process_prog(const char *filename, struct bpf_object *obj, struct bpf
if (verif_log_buf != buf)
free(buf);
-
+ if (fd > 0)
+ close(fd);
return 0;
}
@@ -2182,8 +2195,8 @@ static int set_global_vars(struct bpf_object *obj, struct var_preset *presets, i
static int process_obj(const char *filename)
{
const char *base_filename = basename(strdupa(filename));
- struct bpf_object *obj = NULL, *tobj;
- struct bpf_program *prog, *tprog, *lprog;
+ struct bpf_object *obj = NULL;
+ struct bpf_program *prog;
libbpf_print_fn_t old_libbpf_print_fn;
LIBBPF_OPTS(bpf_object_open_opts, opts);
int err = 0, prog_cnt = 0;
@@ -2222,51 +2235,24 @@ static int process_obj(const char *filename)
env.files_processed++;
bpf_object__for_each_program(prog, obj) {
+ bpf_program__set_autoload(prog, true);
prog_cnt++;
}
- if (prog_cnt == 1) {
- prog = bpf_object__next_program(obj, NULL);
- bpf_program__set_autoload(prog, true);
- err = set_global_vars(obj, env.presets, env.npresets);
- if (err) {
- fprintf(stderr, "Failed to set global variables %d\n", err);
- goto cleanup;
- }
- process_prog(filename, obj, prog);
+ fixup_obj_maps(obj);
+
+ err = set_global_vars(obj, env.presets, env.npresets);
+ if (err) {
+ fprintf(stderr, "Failed to set global variables %d\n", err);
goto cleanup;
}
- bpf_object__for_each_program(prog, obj) {
- const char *prog_name = bpf_program__name(prog);
-
- tobj = bpf_object__open_file(filename, &opts);
- if (!tobj) {
- err = -errno;
- fprintf(stderr, "Failed to open '%s': %d\n", filename, err);
- goto cleanup;
- }
-
- err = set_global_vars(tobj, env.presets, env.npresets);
- if (err) {
- fprintf(stderr, "Failed to set global variables %d\n", err);
- goto cleanup;
- }
-
- lprog = NULL;
- bpf_object__for_each_program(tprog, tobj) {
- const char *tprog_name = bpf_program__name(tprog);
-
- if (strcmp(prog_name, tprog_name) == 0) {
- bpf_program__set_autoload(tprog, true);
- lprog = tprog;
- } else {
- bpf_program__set_autoload(tprog, false);
- }
- }
+ err = bpf_object__prepare(obj);
+ if (err && env.verbose) /* run process_prog() anyway to output per program failures */
+ fprintf(stderr, "Failed to prepare BPF object for loading %d\n", err);
- process_prog(filename, tobj, lprog);
- bpf_object__close(tobj);
+ bpf_object__for_each_program(prog, obj) {
+ process_prog(filename, obj, prog);
}
cleanup:
@@ -3264,17 +3250,14 @@ static int handle_verif_mode(void)
create_stat_cgroup();
for (i = 0; i < env.filename_cnt; i++) {
err = process_obj(env.filenames[i]);
- if (err) {
+ if (err)
fprintf(stderr, "Failed to process '%s': %d\n", env.filenames[i], err);
- goto out;
- }
}
qsort(env.prog_stats, env.prog_stat_cnt, sizeof(*env.prog_stats), cmp_prog_stats);
output_prog_stats();
-out:
destroy_stat_cgroup();
return err;
}