summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2026-04-03 05:44:20 +0300
committerAlexei Starovoitov <ast@kernel.org>2026-04-03 18:34:41 +0300
commit19dbb1347481105e8aabc7479af35c09a65333a9 (patch)
tree4eb498bd81ac829d8da02cc240cddfcb7dde2ae7
parentf1606dd0ac49230f5a5fa1a279210fdf0249c20f (diff)
downloadlinux-19dbb1347481105e8aabc7479af35c09a65333a9.tar.xz
bpf: Move verifier helpers to header
Move several helpers to header as preparation for the subsequent stack liveness patches. Acked-by: Eduard Zingerman <eddyz87@gmail.com> Link: https://lore.kernel.org/r/20260403024422.87231-6-alexei.starovoitov@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
-rw-r--r--include/linux/bpf_verifier.h28
-rw-r--r--kernel/bpf/verifier.c44
2 files changed, 38 insertions, 34 deletions
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index c5e65cdb6328..7bd32a8a45f6 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -879,6 +879,30 @@ static inline struct bpf_subprog_info *subprog_info(struct bpf_verifier_env *env
return &env->subprog_info[subprog];
}
+struct bpf_call_summary {
+ u8 num_params;
+ bool is_void;
+ bool fastcall;
+};
+
+static inline bool bpf_helper_call(const struct bpf_insn *insn)
+{
+ return insn->code == (BPF_JMP | BPF_CALL) &&
+ insn->src_reg == 0;
+}
+
+static inline bool bpf_pseudo_call(const struct bpf_insn *insn)
+{
+ return insn->code == (BPF_JMP | BPF_CALL) &&
+ insn->src_reg == BPF_PSEUDO_CALL;
+}
+
+static inline bool bpf_pseudo_kfunc_call(const struct bpf_insn *insn)
+{
+ return insn->code == (BPF_JMP | BPF_CALL) &&
+ insn->src_reg == BPF_PSEUDO_KFUNC_CALL;
+}
+
__printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
const char *fmt, va_list args);
__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
@@ -1111,6 +1135,10 @@ int bpf_compute_postorder(struct bpf_verifier_env *env);
bool bpf_insn_is_cond_jump(u8 code);
bool bpf_is_may_goto_insn(struct bpf_insn *insn);
+void bpf_verbose_insn(struct bpf_verifier_env *env, struct bpf_insn *insn);
+bool bpf_get_call_summary(struct bpf_verifier_env *env, struct bpf_insn *call,
+ struct bpf_call_summary *cs);
+
int bpf_stack_liveness_init(struct bpf_verifier_env *env);
void bpf_stack_liveness_free(struct bpf_verifier_env *env);
int bpf_update_live_stack(struct bpf_verifier_env *env);
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 8d9f7e4574ec..7d4d0f7e2ca1 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -256,24 +256,6 @@ static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state)
(poisoned ? BPF_MAP_KEY_POISON : 0ULL);
}
-static bool bpf_helper_call(const struct bpf_insn *insn)
-{
- return insn->code == (BPF_JMP | BPF_CALL) &&
- insn->src_reg == 0;
-}
-
-static bool bpf_pseudo_call(const struct bpf_insn *insn)
-{
- return insn->code == (BPF_JMP | BPF_CALL) &&
- insn->src_reg == BPF_PSEUDO_CALL;
-}
-
-static bool bpf_pseudo_kfunc_call(const struct bpf_insn *insn)
-{
- return insn->code == (BPF_JMP | BPF_CALL) &&
- insn->src_reg == BPF_PSEUDO_KFUNC_CALL;
-}
-
struct bpf_map_desc {
struct bpf_map *ptr;
int uid;
@@ -4297,7 +4279,7 @@ static const char *disasm_kfunc_name(void *data, const struct bpf_insn *insn)
return btf_name_by_offset(desc_btf, func->name_off);
}
-static void verbose_insn(struct bpf_verifier_env *env, struct bpf_insn *insn)
+void bpf_verbose_insn(struct bpf_verifier_env *env, struct bpf_insn *insn)
{
const struct bpf_insn_cbs cbs = {
.cb_call = disasm_kfunc_name,
@@ -4521,7 +4503,7 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
bpf_fmt_stack_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, bt_stack_mask(bt));
verbose(env, "stack=%s before ", env->tmp_str_buf);
verbose(env, "%d: ", idx);
- verbose_insn(env, insn);
+ bpf_verbose_insn(env, insn);
}
/* If there is a history record that some registers gained range at this insn,
@@ -18582,17 +18564,11 @@ static bool verifier_inlines_helper_call(struct bpf_verifier_env *env, s32 imm)
}
}
-struct call_summary {
- u8 num_params;
- bool is_void;
- bool fastcall;
-};
-
/* If @call is a kfunc or helper call, fills @cs and returns true,
* otherwise returns false.
*/
-static bool get_call_summary(struct bpf_verifier_env *env, struct bpf_insn *call,
- struct call_summary *cs)
+bool bpf_get_call_summary(struct bpf_verifier_env *env, struct bpf_insn *call,
+ struct bpf_call_summary *cs)
{
struct bpf_kfunc_call_arg_meta meta;
const struct bpf_func_proto *fn;
@@ -18713,12 +18689,12 @@ static void mark_fastcall_pattern_for_call(struct bpf_verifier_env *env,
struct bpf_insn *insns = env->prog->insnsi, *stx, *ldx;
struct bpf_insn *call = &env->prog->insnsi[insn_idx];
u32 clobbered_regs_mask;
- struct call_summary cs;
+ struct bpf_call_summary cs;
u32 expected_regs_mask;
s16 off;
int i;
- if (!get_call_summary(env, call, &cs))
+ if (!bpf_get_call_summary(env, call, &cs))
return;
/* A bitmask specifying which caller saved registers are clobbered
@@ -21578,7 +21554,7 @@ static int do_check(struct bpf_verifier_env *env)
verbose_linfo(env, env->insn_idx, "; ");
env->prev_log_pos = env->log.end_pos;
verbose(env, "%d: ", env->insn_idx);
- verbose_insn(env, insn);
+ bpf_verbose_insn(env, insn);
env->prev_insn_print_pos = env->log.end_pos - env->prev_log_pos;
env->prev_log_pos = env->log.end_pos;
}
@@ -25885,7 +25861,7 @@ static void compute_insn_live_regs(struct bpf_verifier_env *env,
struct bpf_insn *insn,
struct insn_live_regs *info)
{
- struct call_summary cs;
+ struct bpf_call_summary cs;
u8 class = BPF_CLASS(insn->code);
u8 code = BPF_OP(insn->code);
u8 mode = BPF_MODE(insn->code);
@@ -26000,7 +25976,7 @@ static void compute_insn_live_regs(struct bpf_verifier_env *env,
case BPF_CALL:
def = ALL_CALLER_SAVED_REGS;
use = def & ~BIT(BPF_REG_0);
- if (get_call_summary(env, insn, &cs))
+ if (bpf_get_call_summary(env, insn, &cs))
use = GENMASK(cs.num_params, 1);
break;
default:
@@ -26100,7 +26076,7 @@ static int compute_live_registers(struct bpf_verifier_env *env)
else
verbose(env, ".");
verbose(env, " ");
- verbose_insn(env, &insns[i]);
+ bpf_verbose_insn(env, &insns[i]);
if (bpf_is_ldimm64(&insns[i]))
i++;
}