diff options
| author | Alexei Starovoitov <ast@kernel.org> | 2026-04-03 05:44:19 +0300 |
|---|---|---|
| committer | Alexei Starovoitov <ast@kernel.org> | 2026-04-03 18:34:36 +0300 |
| commit | f1606dd0ac49230f5a5fa1a279210fdf0249c20f (patch) | |
| tree | 4544482bf2f4193f74df6b70ebec9d28a982c82d /tools/testing | |
| parent | 427c07ddb9e63dc96488bbf51bb196e7aca19825 (diff) | |
| download | linux-f1606dd0ac49230f5a5fa1a279210fdf0249c20f.tar.xz | |
bpf: Add bpf_compute_const_regs() and bpf_prune_dead_branches() passes
Add two passes before the main verifier pass:
bpf_compute_const_regs() is a forward dataflow analysis that tracks
register values in R0-R9 across the program using fixed-point
iteration in reverse postorder. Each register is tracked with
a six-state lattice:
UNVISITED -> CONST(val) / MAP_PTR(map_index) /
MAP_VALUE(map_index, offset) / SUBPROG(num) -> UNKNOWN
At merge points, if two paths produce the same state and value for
a register, it stays; otherwise it becomes UNKNOWN.
The analysis handles:
- MOV, ADD, SUB, AND with immediate or register operands
- LD_IMM64 for plain constants, map FDs, map values, and subprogs
- LDX from read-only maps: constant-folds the load by reading the
map value directly via bpf_map_direct_read()
Results that fit in 32 bits are stored per-instruction in
insn_aux_data and bitmasks.
bpf_prune_dead_branches() uses the computed constants to evaluate
conditional branches. When both operands of a conditional jump are
known constants, the branch outcome is determined statically and the
instruction is rewritten to an unconditional jump.
The CFG postorder is then recomputed to reflect new control flow.
This eliminates dead edges so that subsequent liveness analysis
doesn't propagate through dead code.
Also add runtime sanity check to validate that precomputed
constants match the verifier's tracked state.
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20260403024422.87231-5-alexei.starovoitov@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'tools/testing')
| -rw-r--r-- | tools/testing/selftests/bpf/progs/verifier_scalar_ids.c | 20 | ||||
| -rw-r--r-- | tools/testing/selftests/bpf/progs/verifier_unpriv.c | 6 |
2 files changed, 18 insertions, 8 deletions
diff --git a/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c b/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c index 58c7704d61cd..a5b8753ce52c 100644 --- a/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c +++ b/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c @@ -592,10 +592,10 @@ __naked void check_ids_in_regsafe_2(void) */ SEC("socket") __success __log_level(2) -__msg("11: (1d) if r3 == r4 goto pc+0") +__msg("14: (1d) if r3 == r4 goto pc+0") __msg("frame 0: propagating r3,r4") -__msg("11: safe") -__msg("processed 15 insns") +__msg("14: safe") +__msg("processed 18 insns") __flag(BPF_F_TEST_STATE_FREQ) __naked void no_scalar_id_for_const(void) { @@ -605,6 +605,7 @@ __naked void no_scalar_id_for_const(void) "if r0 > 7 goto l0_%=;" /* possibly generate same scalar ids for r3 and r4 */ "r1 = 0;" + "r1 ^= r1;" /* prevent bpf_prune_dead_branches from folding the branch */ "r1 = r1;" "r3 = r1;" "r4 = r1;" @@ -612,7 +613,9 @@ __naked void no_scalar_id_for_const(void) "l0_%=:" /* possibly generate different scalar ids for r3 and r4 */ "r1 = 0;" + "r1 ^= r1;" "r2 = 0;" + "r2 ^= r2;" "r3 = r1;" "r4 = r2;" "l1_%=:" @@ -628,10 +631,10 @@ __naked void no_scalar_id_for_const(void) /* Same as no_scalar_id_for_const() but for 32-bit values */ SEC("socket") __success __log_level(2) -__msg("11: (1e) if w3 == w4 goto pc+0") +__msg("14: (1e) if w3 == w4 goto pc+0") __msg("frame 0: propagating r3,r4") -__msg("11: safe") -__msg("processed 15 insns") +__msg("14: safe") +__msg("processed 18 insns") __flag(BPF_F_TEST_STATE_FREQ) __naked void no_scalar_id_for_const32(void) { @@ -641,6 +644,7 @@ __naked void no_scalar_id_for_const32(void) "if r0 > 7 goto l0_%=;" /* possibly generate same scalar ids for r3 and r4 */ "w1 = 0;" + "w1 ^= w1;" /* prevent bpf_prune_dead_branches from folding the branch */ "w1 = w1;" "w3 = w1;" "w4 = w1;" @@ -648,11 +652,13 @@ __naked void no_scalar_id_for_const32(void) "l0_%=:" /* possibly generate different scalar ids for r3 and r4 */ "w1 = 0;" + "w1 ^= w1;" "w2 = 0;" + "w2 ^= w2;" "w3 = w1;" "w4 = w2;" "l1_%=:" - /* predictable jump, marks r1 and r2 precise */ + /* predictable jump, marks r3 and r4 precise */ "if w3 == w4 goto +0;" "r0 = 0;" "exit;" diff --git a/tools/testing/selftests/bpf/progs/verifier_unpriv.c b/tools/testing/selftests/bpf/progs/verifier_unpriv.c index 8ee1243e62a8..c16f8382cf17 100644 --- a/tools/testing/selftests/bpf/progs/verifier_unpriv.c +++ b/tools/testing/selftests/bpf/progs/verifier_unpriv.c @@ -584,7 +584,7 @@ __naked void alu32_mov_u32_const(void) { asm volatile (" \ w7 = 0; \ - w7 &= 1; \ + w7 ^= w7; \ w0 = w7; \ if r0 == 0 goto l0_%=; \ r0 = *(u64*)(r7 + 0); \ @@ -894,7 +894,9 @@ __naked void unpriv_spectre_v1_and_v4_simple(void) { asm volatile (" \ r8 = 0; \ + r8 ^= r8; \ r9 = 0; \ + r9 ^= r9; \ r0 = r10; \ r1 = 0; \ r2 = r10; \ @@ -932,7 +934,9 @@ __naked void unpriv_ldimm64_spectre_v1_and_v4_simple(void) { asm volatile (" \ r8 = 0; \ + r8 ^= r8; \ r9 = 0; \ + r9 ^= r9; \ r0 = r10; \ r1 = 0; \ r2 = r10; \ |
