summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--kernel/bpf/verifier.c13
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_ctx.c76
2 files changed, 88 insertions, 1 deletions
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index fc4ccd1de569..34f89ed29c47 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -7752,6 +7752,11 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
if (!err && value_regno >= 0 && (t == BPF_READ || rdonly_mem))
mark_reg_unknown(env, regs, value_regno);
} else if (reg->type == PTR_TO_CTX) {
+ /*
+ * Program types that don't rewrite ctx accesses can safely
+ * dereference ctx pointers with fixed offsets.
+ */
+ bool fixed_off_ok = !env->ops->convert_ctx_access;
struct bpf_retval_range range;
struct bpf_insn_access_aux info = {
.reg_type = SCALAR_VALUE,
@@ -7765,10 +7770,16 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
return -EACCES;
}
- err = check_ptr_off_reg(env, reg, regno);
+ err = __check_ptr_off_reg(env, reg, regno, fixed_off_ok);
if (err < 0)
return err;
+ /*
+ * Fold the register's constant offset into the insn offset so
+ * that is_valid_access() sees the true effective offset.
+ */
+ if (fixed_off_ok)
+ off += reg->var_off.value;
err = check_ctx_access(env, insn_idx, off, size, t, &info);
if (err)
verbose_linfo(env, insn_idx, "; ");
diff --git a/tools/testing/selftests/bpf/progs/verifier_ctx.c b/tools/testing/selftests/bpf/progs/verifier_ctx.c
index 5ebf7d9bcc55..371780290c0d 100644
--- a/tools/testing/selftests/bpf/progs/verifier_ctx.c
+++ b/tools/testing/selftests/bpf/progs/verifier_ctx.c
@@ -292,4 +292,80 @@ padding_access("cgroup/post_bind4", bpf_sock, dst_port, 2);
__failure __msg("invalid bpf_context access")
padding_access("sk_reuseport", sk_reuseport_md, hash, 4);
+SEC("syscall")
+__description("syscall: write to ctx with fixed offset")
+__success
+__naked void syscall_ctx_fixed_off_write(void)
+{
+ asm volatile (" \
+ r0 = 0; \
+ *(u32*)(r1 + 0) = r0; \
+ r1 += 4; \
+ *(u32*)(r1 + 0) = r0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+/*
+ * Test that program types without convert_ctx_access can dereference
+ * their ctx pointer after adding a fixed offset. Variable and negative
+ * offsets should still be rejected.
+ */
+#define no_rewrite_ctx_access(type, name, off, ld_op) \
+ SEC(type) \
+ __description(type ": read ctx at fixed offset") \
+ __success \
+ __naked void no_rewrite_##name##_fixed(void) \
+ { \
+ asm volatile (" \
+ r1 += %[__off]; \
+ r0 = *(" #ld_op " *)(r1 + 0); \
+ r0 = 0; \
+ exit;" \
+ : \
+ : __imm_const(__off, off) \
+ : __clobber_all); \
+ } \
+ SEC(type) \
+ __description(type ": reject variable offset ctx access") \
+ __failure __msg("variable ctx access var_off=") \
+ __naked void no_rewrite_##name##_var(void) \
+ { \
+ asm volatile (" \
+ r6 = r1; \
+ call %[bpf_get_prandom_u32]; \
+ r1 = r6; \
+ r0 &= 4; \
+ r1 += r0; \
+ r0 = *(" #ld_op " *)(r1 + 0); \
+ r0 = 0; \
+ exit;" \
+ : \
+ : __imm(bpf_get_prandom_u32) \
+ : __clobber_all); \
+ } \
+ SEC(type) \
+ __description(type ": reject negative offset ctx access") \
+ __failure __msg("negative offset ctx ptr") \
+ __naked void no_rewrite_##name##_neg(void) \
+ { \
+ asm volatile (" \
+ r1 += %[__neg_off]; \
+ r0 = *(" #ld_op " *)(r1 + 0); \
+ r0 = 0; \
+ exit;" \
+ : \
+ : __imm_const(__neg_off, -(off)) \
+ : __clobber_all); \
+ }
+
+no_rewrite_ctx_access("syscall", syscall, 4, u32);
+no_rewrite_ctx_access("kprobe", kprobe, 8, u64);
+no_rewrite_ctx_access("tracepoint", tp, 8, u64);
+no_rewrite_ctx_access("raw_tp", raw_tp, 8, u64);
+no_rewrite_ctx_access("raw_tracepoint.w", raw_tp_w, 8, u64);
+no_rewrite_ctx_access("fentry/bpf_modify_return_test", fentry, 8, u64);
+no_rewrite_ctx_access("cgroup/dev", cgroup_dev, 4, u32);
+no_rewrite_ctx_access("netfilter", netfilter, offsetof(struct bpf_nf_ctx, skb), u64);
+
char _license[] SEC("license") = "GPL";