summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2026-03-03 19:45:16 +0300
committerAlexei Starovoitov <ast@kernel.org>2026-03-03 19:45:16 +0300
commitb0000448094892c9131e485b64be2d5ef62ab73b (patch)
tree817e2fd77f8ec8431d18bea57b8ac8cdf76e6f32 /tools
parentb1d6bd5462f1e16adb805ce293bd11e9d7c47e6c (diff)
parentf6312e71759ddb10b20fbdb9ee01b9546cabd4e3 (diff)
downloadlinux-b0000448094892c9131e485b64be2d5ef62ab73b.tar.xz
Merge branch 'allow-fixed-offsets-for-ptr_to_ctx'
Kumar Kartikeya Dwivedi says: ==================== Allow fixed offsets for PTR_TO_CTX Enable pointer modification with constant offsets accumulated in the register for PTR_TO_CTX for programs where it won't be rewritten. See patches for details. ==================== Link: https://patch.msgid.link/20260227005725.1247305-1-memxor@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'tools')
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_ctx.c76
1 files changed, 76 insertions, 0 deletions
diff --git a/tools/testing/selftests/bpf/progs/verifier_ctx.c b/tools/testing/selftests/bpf/progs/verifier_ctx.c
index 5ebf7d9bcc55..371780290c0d 100644
--- a/tools/testing/selftests/bpf/progs/verifier_ctx.c
+++ b/tools/testing/selftests/bpf/progs/verifier_ctx.c
@@ -292,4 +292,80 @@ padding_access("cgroup/post_bind4", bpf_sock, dst_port, 2);
__failure __msg("invalid bpf_context access")
padding_access("sk_reuseport", sk_reuseport_md, hash, 4);
+SEC("syscall")
+__description("syscall: write to ctx with fixed offset")
+__success
+__naked void syscall_ctx_fixed_off_write(void)
+{
+ asm volatile (" \
+ r0 = 0; \
+ *(u32*)(r1 + 0) = r0; \
+ r1 += 4; \
+ *(u32*)(r1 + 0) = r0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+/*
+ * Test that program types without convert_ctx_access can dereference
+ * their ctx pointer after adding a fixed offset. Variable and negative
+ * offsets should still be rejected.
+ */
+#define no_rewrite_ctx_access(type, name, off, ld_op) \
+ SEC(type) \
+ __description(type ": read ctx at fixed offset") \
+ __success \
+ __naked void no_rewrite_##name##_fixed(void) \
+ { \
+ asm volatile (" \
+ r1 += %[__off]; \
+ r0 = *(" #ld_op " *)(r1 + 0); \
+ r0 = 0; \
+ exit;" \
+ : \
+ : __imm_const(__off, off) \
+ : __clobber_all); \
+ } \
+ SEC(type) \
+ __description(type ": reject variable offset ctx access") \
+ __failure __msg("variable ctx access var_off=") \
+ __naked void no_rewrite_##name##_var(void) \
+ { \
+ asm volatile (" \
+ r6 = r1; \
+ call %[bpf_get_prandom_u32]; \
+ r1 = r6; \
+ r0 &= 4; \
+ r1 += r0; \
+ r0 = *(" #ld_op " *)(r1 + 0); \
+ r0 = 0; \
+ exit;" \
+ : \
+ : __imm(bpf_get_prandom_u32) \
+ : __clobber_all); \
+ } \
+ SEC(type) \
+ __description(type ": reject negative offset ctx access") \
+ __failure __msg("negative offset ctx ptr") \
+ __naked void no_rewrite_##name##_neg(void) \
+ { \
+ asm volatile (" \
+ r1 += %[__neg_off]; \
+ r0 = *(" #ld_op " *)(r1 + 0); \
+ r0 = 0; \
+ exit;" \
+ : \
+ : __imm_const(__neg_off, -(off)) \
+ : __clobber_all); \
+ }
+
+no_rewrite_ctx_access("syscall", syscall, 4, u32);
+no_rewrite_ctx_access("kprobe", kprobe, 8, u64);
+no_rewrite_ctx_access("tracepoint", tp, 8, u64);
+no_rewrite_ctx_access("raw_tp", raw_tp, 8, u64);
+no_rewrite_ctx_access("raw_tracepoint.w", raw_tp_w, 8, u64);
+no_rewrite_ctx_access("fentry/bpf_modify_return_test", fentry, 8, u64);
+no_rewrite_ctx_access("cgroup/dev", cgroup_dev, 4, u32);
+no_rewrite_ctx_access("netfilter", netfilter, offsetof(struct bpf_nf_ctx, skb), u64);
+
char _license[] SEC("license") = "GPL";