summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/bpf_verifier.h1
-rw-r--r--kernel/bpf/log.c5
-rw-r--r--kernel/bpf/verifier.c89
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_spill_fill.c2
4 files changed, 70 insertions, 27 deletions
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index d3dc46aae2e7..05b9fe98b8f8 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -220,6 +220,7 @@ enum bpf_stack_slot_type {
STACK_DYNPTR,
STACK_ITER,
STACK_IRQ_FLAG,
+ STACK_POISON,
};
#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
diff --git a/kernel/bpf/log.c b/kernel/bpf/log.c
index 803f21e61d92..011e4ec25acd 100644
--- a/kernel/bpf/log.c
+++ b/kernel/bpf/log.c
@@ -501,7 +501,8 @@ static char slot_type_char[] = {
[STACK_ZERO] = '0',
[STACK_DYNPTR] = 'd',
[STACK_ITER] = 'i',
- [STACK_IRQ_FLAG] = 'f'
+ [STACK_IRQ_FLAG] = 'f',
+ [STACK_POISON] = 'p',
};
#define UNUM_MAX_DECIMAL U16_MAX
@@ -738,7 +739,7 @@ void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_verifie
for (j = 0; j < BPF_REG_SIZE; j++) {
slot_type = state->stack[i].slot_type[j];
- if (slot_type != STACK_INVALID)
+ if (slot_type != STACK_INVALID && slot_type != STACK_POISON)
valid = true;
types_buf[j] = slot_type_char[slot_type];
}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 36e697e29e44..566311dd4fba 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1348,6 +1348,7 @@ static bool is_stack_slot_special(const struct bpf_stack_state *stack)
case STACK_IRQ_FLAG:
return true;
case STACK_INVALID:
+ case STACK_POISON:
case STACK_MISC:
case STACK_ZERO:
return false;
@@ -1377,9 +1378,11 @@ static bool is_spilled_scalar_after(const struct bpf_stack_state *stack, int im)
stack->spilled_ptr.type == SCALAR_VALUE;
}
-/* Mark stack slot as STACK_MISC, unless it is already STACK_INVALID, in which
- * case they are equivalent, or it's STACK_ZERO, in which case we preserve
- * more precise STACK_ZERO.
+/*
+ * Mark stack slot as STACK_MISC, unless it is already:
+ * - STACK_INVALID, in which case they are equivalent.
+ * - STACK_ZERO, in which case we preserve more precise STACK_ZERO.
+ * - STACK_POISON, which truly forbids access to the slot.
* Regardless of allow_ptr_leaks setting (i.e., privileged or unprivileged
* mode), we won't promote STACK_INVALID to STACK_MISC. In privileged case it is
* unnecessary as both are considered equivalent when loading data and pruning,
@@ -1390,14 +1393,14 @@ static void mark_stack_slot_misc(struct bpf_verifier_env *env, u8 *stype)
{
if (*stype == STACK_ZERO)
return;
- if (*stype == STACK_INVALID)
+ if (*stype == STACK_INVALID || *stype == STACK_POISON)
return;
*stype = STACK_MISC;
}
static void scrub_spilled_slot(u8 *stype)
{
- if (*stype != STACK_INVALID)
+ if (*stype != STACK_INVALID && *stype != STACK_POISON)
*stype = STACK_MISC;
}
@@ -5586,8 +5589,10 @@ static int check_stack_write_var_off(struct bpf_verifier_env *env,
* For privileged programs, we will accept such reads to slots
* that may or may not be written because, if we're reject
* them, the error would be too confusing.
+ * Conservatively, treat STACK_POISON in a similar way.
*/
- if (*stype == STACK_INVALID && !env->allow_uninit_stack) {
+ if ((*stype == STACK_INVALID || *stype == STACK_POISON) &&
+ !env->allow_uninit_stack) {
verbose(env, "uninit stack in range of var-offset write prohibited for !root; insn %d, off: %d",
insn_idx, i);
return -EINVAL;
@@ -5723,8 +5728,13 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
}
if (type == STACK_INVALID && env->allow_uninit_stack)
continue;
- verbose(env, "invalid read from stack off %d+%d size %d\n",
- off, i, size);
+ if (type == STACK_POISON) {
+ verbose(env, "reading from stack off %d+%d size %d, slot poisoned by dead code elimination\n",
+ off, i, size);
+ } else {
+ verbose(env, "invalid read from stack off %d+%d size %d\n",
+ off, i, size);
+ }
return -EACCES;
}
@@ -5773,8 +5783,13 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
continue;
if (type == STACK_INVALID && env->allow_uninit_stack)
continue;
- verbose(env, "invalid read from stack off %d+%d size %d\n",
- off, i, size);
+ if (type == STACK_POISON) {
+ verbose(env, "reading from stack off %d+%d size %d, slot poisoned by dead code elimination\n",
+ off, i, size);
+ } else {
+ verbose(env, "invalid read from stack off %d+%d size %d\n",
+ off, i, size);
+ }
return -EACCES;
}
if (dst_regno >= 0)
@@ -8377,16 +8392,22 @@ static int check_stack_range_initialized(
/* Some accesses can write anything into the stack, others are
* read-only.
*/
- bool clobber = false;
+ bool clobber = type == BPF_WRITE;
+ /*
+ * Negative access_size signals global subprog/kfunc arg check where
+ * STACK_POISON slots are acceptable. static stack liveness
+ * might have determined that subprog doesn't read them,
+ * but BTF based global subprog validation isn't accurate enough.
+ */
+ bool allow_poison = access_size < 0 || clobber;
+
+ access_size = abs(access_size);
if (access_size == 0 && !zero_size_allowed) {
verbose(env, "invalid zero-sized read\n");
return -EACCES;
}
- if (type == BPF_WRITE)
- clobber = true;
-
err = check_stack_access_within_bounds(env, regno, off, access_size, type);
if (err)
return err;
@@ -8485,7 +8506,12 @@ static int check_stack_range_initialized(
goto mark;
}
- if (tnum_is_const(reg->var_off)) {
+ if (*stype == STACK_POISON) {
+ if (allow_poison)
+ goto mark;
+ verbose(env, "reading from stack R%d off %d+%d size %d, slot poisoned by dead code elimination\n",
+ regno, min_off, i - min_off, access_size);
+ } else if (tnum_is_const(reg->var_off)) {
verbose(env, "invalid read from stack R%d off %d+%d size %d\n",
regno, min_off, i - min_off, access_size);
} else {
@@ -8662,8 +8688,10 @@ static int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg
mark_ptr_not_null_reg(reg);
}
- err = check_helper_mem_access(env, regno, mem_size, BPF_READ, true, NULL);
- err = err ?: check_helper_mem_access(env, regno, mem_size, BPF_WRITE, true, NULL);
+ int size = base_type(reg->type) == PTR_TO_STACK ? -(int)mem_size : mem_size;
+
+ err = check_helper_mem_access(env, regno, size, BPF_READ, true, NULL);
+ err = err ?: check_helper_mem_access(env, regno, size, BPF_WRITE, true, NULL);
if (may_be_null)
*reg = saved_reg;
@@ -20183,7 +20211,7 @@ static void __clean_func_state(struct bpf_verifier_env *env,
__mark_reg_not_init(env, spill);
}
for (j = start; j < end; j++)
- st->stack[i].slot_type[j] = STACK_INVALID;
+ st->stack[i].slot_type[j] = STACK_POISON;
}
}
}
@@ -20452,7 +20480,8 @@ static bool is_stack_misc_after(struct bpf_verifier_env *env,
for (i = im; i < ARRAY_SIZE(stack->slot_type); ++i) {
if ((stack->slot_type[i] == STACK_MISC) ||
- (stack->slot_type[i] == STACK_INVALID && env->allow_uninit_stack))
+ ((stack->slot_type[i] == STACK_INVALID || stack->slot_type[i] == STACK_POISON) &&
+ env->allow_uninit_stack))
continue;
return false;
}
@@ -20488,13 +20517,22 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
spi = i / BPF_REG_SIZE;
- if (exact == EXACT &&
- (i >= cur->allocated_stack ||
- old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
- cur->stack[spi].slot_type[i % BPF_REG_SIZE]))
- return false;
+ if (exact == EXACT) {
+ u8 old_type = old->stack[spi].slot_type[i % BPF_REG_SIZE];
+ u8 cur_type = i < cur->allocated_stack ?
+ cur->stack[spi].slot_type[i % BPF_REG_SIZE] : STACK_INVALID;
+
+ /* STACK_INVALID and STACK_POISON are equivalent for pruning */
+ if (old_type == STACK_POISON)
+ old_type = STACK_INVALID;
+ if (cur_type == STACK_POISON)
+ cur_type = STACK_INVALID;
+ if (i >= cur->allocated_stack || old_type != cur_type)
+ return false;
+ }
- if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
+ if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID ||
+ old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_POISON)
continue;
if (env->allow_uninit_stack &&
@@ -20592,6 +20630,7 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
case STACK_MISC:
case STACK_ZERO:
case STACK_INVALID:
+ case STACK_POISON:
continue;
/* Ensure that new unhandled slot types return false by default */
default:
diff --git a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c
index c6ae64b99cd6..6bc721accbae 100644
--- a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c
+++ b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c
@@ -780,6 +780,8 @@ __naked void stack_load_preserves_const_precision_subreg(void)
"r1 += r2;"
"*(u8 *)(r1 + 0) = r2;" /* this should be fine */
+ "r2 = *(u64 *)(r10 -8);" /* keep slots alive */
+ "r2 = *(u64 *)(r10 -16);"
"r0 = 0;"
"exit;"
: