diff options
| author | Alexei Starovoitov <ast@kernel.org> | 2026-04-03 18:33:48 +0300 |
|---|---|---|
| committer | Alexei Starovoitov <ast@kernel.org> | 2026-04-03 18:34:47 +0300 |
| commit | 6a14beefab457f267b8cedc6ac697a9562ec1244 (patch) | |
| tree | 91b4f07c35046a6a73a53505555c2799beab5fcd /include/linux | |
| parent | 891a05ccba927050cee17eb90c74692fe083ddaf (diff) | |
| parent | 1a1cadbd5d50b31ae1340c2a9938947719696ca0 (diff) | |
| download | linux-6a14beefab457f267b8cedc6ac697a9562ec1244.tar.xz | |
Merge branch 'bpf-prep-patches-for-static-stack-liveness'
Alexei Starovoitov says:
====================
bpf: Prep patches for static stack liveness.
v4->v5:
- minor test fixup
v3->v4:
- fixed invalid recursion detection when calback is called multiple times
v3: https://lore.kernel.org/bpf/20260402212856.86606-1-alexei.starovoitov@gmail.com/
v2->v3:
- added recursive call detection
- fixed ubsan warning
- removed double declaration in the header
- added Acks
v2: https://lore.kernel.org/bpf/20260402061744.10885-1-alexei.starovoitov@gmail.com/
v1->v2:
. fixed bugs spotted by Eduard, Mykyta, claude and gemini
. fixed selftests that were failing in unpriv
. gemini(sashiko) found several precision improvements in patch 6,
but they made no difference in real programs.
v1: https://lore.kernel.org/bpf/20260401021635.34636-1-alexei.starovoitov@gmail.com/
First 6 prep patches for static stack liveness.
. do src/dst_reg validation early and remove defensive checks
. sort subprog in topo order. We wanted to do this long ago
to process global subprogs this way and in other cases.
. Add constant folding pass that computes map_ptr, subprog_idx,
loads from readonly maps, and other constants that fit into 32-bit
. Use these constants to eliminate dead code. Replace predicted
conditional branches with "jmp always". That reduces JIT prog size.
. Add two helpers that return access size from their arguments.
====================
Link: https://patch.msgid.link/20260403024422.87231-1-alexei.starovoitov@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/bpf_verifier.h | 59 |
1 files changed, 59 insertions, 0 deletions
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index b129e0aaee20..36bfd96d4563 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -595,6 +595,18 @@ struct bpf_insn_aux_data { u32 scc; /* registers alive before this instruction. */ u16 live_regs_before; + /* + * Bitmask of R0-R9 that hold known values at this instruction. + * const_reg_mask: scalar constants that fit in 32 bits. + * const_reg_map_mask: map pointers, val is map_index into used_maps[]. + * const_reg_subprog_mask: subprog pointers, val is subprog number. + * const_reg_vals[i] holds the 32-bit value for register i. + * Populated by compute_const_regs() pre-pass. + */ + u16 const_reg_mask; + u16 const_reg_map_mask; + u16 const_reg_subprog_mask; + u32 const_reg_vals[10]; }; #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ @@ -787,6 +799,8 @@ struct bpf_verifier_env { const struct bpf_line_info *prev_linfo; struct bpf_verifier_log log; struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 2]; /* max + 2 for the fake and exception subprogs */ + /* subprog indices sorted in topological order: leaves first, callers last */ + int subprog_topo_order[BPF_MAX_SUBPROGS + 2]; union { struct bpf_idmap idmap_scratch; struct bpf_idset idset_scratch; @@ -865,6 +879,30 @@ static inline struct bpf_subprog_info *subprog_info(struct bpf_verifier_env *env return &env->subprog_info[subprog]; } +struct bpf_call_summary { + u8 num_params; + bool is_void; + bool fastcall; +}; + +static inline bool bpf_helper_call(const struct bpf_insn *insn) +{ + return insn->code == (BPF_JMP | BPF_CALL) && + insn->src_reg == 0; +} + +static inline bool bpf_pseudo_call(const struct bpf_insn *insn) +{ + return insn->code == (BPF_JMP | BPF_CALL) && + insn->src_reg == BPF_PSEUDO_CALL; +} + +static inline bool bpf_pseudo_kfunc_call(const struct bpf_insn *insn) +{ + return insn->code == (BPF_JMP | BPF_CALL) && + insn->src_reg == BPF_PSEUDO_KFUNC_CALL; +} + __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt, va_list args); __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, @@ -943,6 +981,10 @@ void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab); int mark_chain_precision(struct bpf_verifier_env *env, int regno); +bool bpf_map_is_rdonly(const struct bpf_map *map); +int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val, + bool is_ldsx); + #define BPF_BASE_TYPE_MASK GENMASK(BPF_BASE_TYPE_BITS - 1, 0) /* extract base type from bpf_{arg, return, reg}_type. */ @@ -1086,6 +1128,23 @@ struct bpf_iarray *bpf_insn_successors(struct bpf_verifier_env *env, u32 idx); void bpf_fmt_stack_mask(char *buf, ssize_t buf_sz, u64 stack_mask); bool bpf_calls_callback(struct bpf_verifier_env *env, int insn_idx); +int bpf_find_subprog(struct bpf_verifier_env *env, int off); +int bpf_compute_const_regs(struct bpf_verifier_env *env); +int bpf_prune_dead_branches(struct bpf_verifier_env *env); +int bpf_compute_postorder(struct bpf_verifier_env *env); +bool bpf_insn_is_cond_jump(u8 code); +bool bpf_is_may_goto_insn(struct bpf_insn *insn); + +void bpf_verbose_insn(struct bpf_verifier_env *env, struct bpf_insn *insn); +bool bpf_get_call_summary(struct bpf_verifier_env *env, struct bpf_insn *call, + struct bpf_call_summary *cs); +s64 bpf_helper_stack_access_bytes(struct bpf_verifier_env *env, + struct bpf_insn *insn, int arg, + int insn_idx); +s64 bpf_kfunc_stack_access_bytes(struct bpf_verifier_env *env, + struct bpf_insn *insn, int arg, + int insn_idx); + int bpf_stack_liveness_init(struct bpf_verifier_env *env); void bpf_stack_liveness_free(struct bpf_verifier_env *env); int bpf_update_live_stack(struct bpf_verifier_env *env); |
