summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2026-04-03 05:44:17 +0300
committerAlexei Starovoitov <ast@kernel.org>2026-04-03 18:34:30 +0300
commite6898ec751e4d8577b210f8e816ea9f8c2a7158a (patch)
tree8b17f798fa4f8b5580436b6fc13943f0fb682af9
parent503d21ef8eac1437d76919921115acf0aef328a0 (diff)
downloadlinux-e6898ec751e4d8577b210f8e816ea9f8c2a7158a.tar.xz
bpf: Sort subprogs in topological order after check_cfg()
Add a pass that sorts subprogs in topological order so that iterating subprog_topo_order[] walks leaf subprogs first, then their callers. This is computed as a DFS post-order traversal of the CFG. The pass runs after check_cfg() to ensure the CFG has been validated before traversing and after postorder has been computed to avoid walking dead code. Reviewed-by: Eduard Zingerman <eddyz87@gmail.com> Link: https://lore.kernel.org/r/20260403024422.87231-3-alexei.starovoitov@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
-rw-r--r--include/linux/bpf_verifier.h2
-rw-r--r--kernel/bpf/verifier.c92
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_loops1.c3
-rw-r--r--tools/testing/selftests/bpf/verifier/calls.c6
4 files changed, 98 insertions, 5 deletions
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index b129e0aaee20..d21541f96ee9 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -787,6 +787,8 @@ struct bpf_verifier_env {
const struct bpf_line_info *prev_linfo;
struct bpf_verifier_log log;
struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 2]; /* max + 2 for the fake and exception subprogs */
+ /* subprog indices sorted in topological order: leaves first, callers last */
+ int subprog_topo_order[BPF_MAX_SUBPROGS + 2];
union {
struct bpf_idmap idmap_scratch;
struct bpf_idset idset_scratch;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 9de49d43c21d..f457235c874c 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -3770,6 +3770,94 @@ next:
return 0;
}
+/*
+ * Sort subprogs in topological order so that leaf subprogs come first and
+ * their callers come later. This is a DFS post-order traversal of the call
+ * graph. Scan only reachable instructions (those in the computed postorder) of
+ * the current subprog to discover callees (direct subprogs and sync
+ * callbacks).
+ */
+static int sort_subprogs_topo(struct bpf_verifier_env *env)
+{
+ struct bpf_subprog_info *si = env->subprog_info;
+ int *insn_postorder = env->cfg.insn_postorder;
+ struct bpf_insn *insn = env->prog->insnsi;
+ int cnt = env->subprog_cnt;
+ int *dfs_stack = NULL;
+ int top = 0, order = 0;
+ int i, ret = 0;
+ u8 *color = NULL;
+
+ color = kvzalloc_objs(*color, cnt, GFP_KERNEL_ACCOUNT);
+ dfs_stack = kvmalloc_objs(*dfs_stack, cnt, GFP_KERNEL_ACCOUNT);
+ if (!color || !dfs_stack) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * DFS post-order traversal.
+ * Color values: 0 = unvisited, 1 = on stack, 2 = done.
+ */
+ for (i = 0; i < cnt; i++) {
+ if (color[i])
+ continue;
+ color[i] = 1;
+ dfs_stack[top++] = i;
+
+ while (top > 0) {
+ int cur = dfs_stack[top - 1];
+ int po_start = si[cur].postorder_start;
+ int po_end = si[cur + 1].postorder_start;
+ bool pushed = false;
+ int j;
+
+ for (j = po_start; j < po_end; j++) {
+ int idx = insn_postorder[j];
+ int callee;
+
+ if (!bpf_pseudo_call(&insn[idx]) && !bpf_pseudo_func(&insn[idx]))
+ continue;
+ callee = find_subprog(env, idx + insn[idx].imm + 1);
+ if (callee < 0) {
+ ret = -EFAULT;
+ goto out;
+ }
+ if (color[callee] == 2)
+ continue;
+ if (color[callee] == 1) {
+ if (bpf_pseudo_func(&insn[idx]))
+ continue;
+ verbose(env, "recursive call from %s() to %s()\n",
+ subprog_name(env, cur),
+ subprog_name(env, callee));
+ ret = -EINVAL;
+ goto out;
+ }
+ color[callee] = 1;
+ dfs_stack[top++] = callee;
+ pushed = true;
+ break;
+ }
+
+ if (!pushed) {
+ color[cur] = 2;
+ env->subprog_topo_order[order++] = cur;
+ top--;
+ }
+ }
+ }
+
+ if (env->log.level & BPF_LOG_LEVEL2)
+ for (i = 0; i < cnt; i++)
+ verbose(env, "topo_order[%d] = %s\n",
+ i, subprog_name(env, env->subprog_topo_order[i]));
+out:
+ kvfree(dfs_stack);
+ kvfree(color);
+ return ret;
+}
+
static int mark_stack_slot_obj_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
int spi, int nr_slots)
{
@@ -26320,6 +26408,10 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
if (ret)
goto skip_full_check;
+ ret = sort_subprogs_topo(env);
+ if (ret < 0)
+ goto skip_full_check;
+
ret = compute_scc(env);
if (ret < 0)
goto skip_full_check;
diff --git a/tools/testing/selftests/bpf/progs/verifier_loops1.c b/tools/testing/selftests/bpf/progs/verifier_loops1.c
index fbdde80e7b90..d248ce877f14 100644
--- a/tools/testing/selftests/bpf/progs/verifier_loops1.c
+++ b/tools/testing/selftests/bpf/progs/verifier_loops1.c
@@ -138,8 +138,7 @@ l0_%=: exit; \
SEC("tracepoint")
__description("bounded recursion")
__failure
-/* verifier limitation in detecting max stack depth */
-__msg("the call stack of 8 frames is too deep !")
+__msg("recursive call from")
__naked void bounded_recursion(void)
{
asm volatile (" \
diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c
index 29e57f0e56c3..c3164b9b2be5 100644
--- a/tools/testing/selftests/bpf/verifier/calls.c
+++ b/tools/testing/selftests/bpf/verifier/calls.c
@@ -455,7 +455,7 @@
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "the call stack of 9 frames is too deep",
+ .errstr = "recursive call",
.result = REJECT,
},
{
@@ -812,7 +812,7 @@
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "the call stack of 9 frames is too deep",
+ .errstr = "recursive call",
.result = REJECT,
},
{
@@ -824,7 +824,7 @@
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "the call stack of 9 frames is too deep",
+ .errstr = "recursive call",
.result = REJECT,
},
{