summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2026-04-07 01:27:27 +0300
committerAlexei Starovoitov <ast@kernel.org>2026-04-07 01:27:27 +0300
commit42e33c9af49c5199504bbfb16f65756a90fe90bf (patch)
tree3668614b371432e63b8f69cbd67d53a4f373acda
parenta1aa9ef47c299c5bbc30594d3c2f0589edf908e6 (diff)
parent171580e432727a9e729f286075ee86070424f490 (diff)
downloadlinux-42e33c9af49c5199504bbfb16f65756a90fe90bf.tar.xz
Merge branch 'allow-variable-offsets-for-syscall-ptr_to_ctx'
Kumar Kartikeya Dwivedi says: ==================== Allow variable offsets for syscall PTR_TO_CTX Enable pointer modification with variable offsets accumulated in the register for PTR_TO_CTX for syscall programs where it won't be rewritten, and the context is user-supplied and checked against the max offset. See patches for details. Fixed offset support landed in [0]. By combining this set with [0], examples like the one below should succeed verification now. SEC("syscall") int prog(void *ctx) { int *arr = ctx; int i; bpf_for(i, 0, 100) arr[i] *= i; return 0; } [0]: https://lore.kernel.org/bpf/20260227005725.1247305-1-memxor@gmail.com Changelog: ---------- v4 -> v5 v4: https://lore.kernel.org/bpf/20260401122818.2240807-1-memxor@gmail.com * Use is_var_ctx_off_allowed() consistently. * Add acks. (Emil) v3 -> v4 v3: https://lore.kernel.org/bpf/20260318103526.2590079-1-memxor@gmail.com * Drop comment around describing choice of fixed or variable offsets. (Eduard) * Simplify offset adjustment for different cases. (Eduard) * Add PTR_TO_CTX case in __check_mem_access(). (Eduard) * Drop aligned access constraint from syscall_prog_is_valid_access(). * Wrap naked checks for BPF_PROG_TYPE_SYSCALL in a utility function. (Eduard) * Split tests into separate clean up and addition patches. (Eduard) * Remove CAP_SYS_ADMIN changes. (Eduard) * Enable unaligned access to syscall ctx, add tests. * Add more tests for various corner cases. * Add acks. (Puranjay, Mykyta) v2 -> v3 v2: https://lore.kernel.org/bpf/20260318075133.1031781-1-memxor@gmail.com * Prevent arg_type for KF_ARG_PTR_TO_CTX from applying to other cases due to preceding fallthrough. (Gemini/Sashiko) v1 -> v2 v1: https://lore.kernel.org/bpf/20260317111850.2107846-2-memxor@gmail.com * Harden check_func_arg_reg_off check with ARG_PTR_TO_CTX. * Add tests for unmodified ctx into tail calls. * Squash unmodified ctx change into base commit. * Add Reviewed-by's from Emil. ==================== Link: https://patch.msgid.link/20260406194403.1649608-1-memxor@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
-rw-r--r--kernel/bpf/syscall.c3
-rw-r--r--kernel/bpf/verifier.c103
-rw-r--r--tools/testing/selftests/bpf/prog_tests/verifier.c2
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_ctx.c591
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_global_subprogs.c95
-rw-r--r--tools/testing/selftests/bpf/test_kmods/bpf_testmod.c2
6 files changed, 703 insertions, 93 deletions
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 810991709da7..f1044ab9b03b 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -6386,8 +6386,7 @@ static bool syscall_prog_is_valid_access(int off, int size,
{
if (off < 0 || off >= U16_MAX)
return false;
- if (off % size != 0)
- return false;
+ /* No alignment requirements for syscall ctx accesses. */
return true;
}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 84699a428077..7d68f7d1230d 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -5983,6 +5983,10 @@ static int __check_mem_access(struct bpf_verifier_env *env, int regno,
verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
off, size, regno, reg->id, off, mem_size);
break;
+ case PTR_TO_CTX:
+ verbose(env, "invalid access to context, ctx_size=%d off=%d size=%d\n",
+ mem_size, off, size);
+ break;
case PTR_TO_MEM:
default:
verbose(env, "invalid access to memory, mem_size=%u off=%d size=%d\n",
@@ -6476,9 +6480,14 @@ static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
return 0;
}
+static bool is_var_ctx_off_allowed(struct bpf_prog *prog)
+{
+ return resolve_prog_type(prog) == BPF_PROG_TYPE_SYSCALL;
+}
+
/* check access to 'struct bpf_context' fields. Supports fixed offsets only */
-static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
- enum bpf_access_type t, struct bpf_insn_access_aux *info)
+static int __check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
+ enum bpf_access_type t, struct bpf_insn_access_aux *info)
{
if (env->ops->is_valid_access &&
env->ops->is_valid_access(off, size, t, env->prog, info)) {
@@ -6509,6 +6518,34 @@ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off,
return -EACCES;
}
+static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
+ int off, int access_size, enum bpf_access_type t,
+ struct bpf_insn_access_aux *info)
+{
+ /*
+ * Program types that don't rewrite ctx accesses can safely
+ * dereference ctx pointers with fixed offsets.
+ */
+ bool var_off_ok = is_var_ctx_off_allowed(env->prog);
+ bool fixed_off_ok = !env->ops->convert_ctx_access;
+ struct bpf_reg_state *regs = cur_regs(env);
+ struct bpf_reg_state *reg = regs + regno;
+ int err;
+
+ if (var_off_ok)
+ err = check_mem_region_access(env, regno, off, access_size, U16_MAX, false);
+ else
+ err = __check_ptr_off_reg(env, reg, regno, fixed_off_ok);
+ if (err)
+ return err;
+ off += reg->umax_value;
+
+ err = __check_ctx_access(env, insn_idx, off, access_size, t, info);
+ if (err)
+ verbose_linfo(env, insn_idx, "; ");
+ return err;
+}
+
static int check_flow_keys_access(struct bpf_verifier_env *env, int off,
int size)
{
@@ -7939,17 +7976,12 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
if (!err && value_regno >= 0 && (t == BPF_READ || rdonly_mem))
mark_reg_unknown(env, regs, value_regno);
} else if (reg->type == PTR_TO_CTX) {
- /*
- * Program types that don't rewrite ctx accesses can safely
- * dereference ctx pointers with fixed offsets.
- */
- bool fixed_off_ok = !env->ops->convert_ctx_access;
- struct bpf_retval_range range;
struct bpf_insn_access_aux info = {
.reg_type = SCALAR_VALUE,
.is_ldsx = is_ldsx,
.log = &env->log,
};
+ struct bpf_retval_range range;
if (t == BPF_WRITE && value_regno >= 0 &&
is_pointer_value(env, value_regno)) {
@@ -7957,19 +7989,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
return -EACCES;
}
- err = __check_ptr_off_reg(env, reg, regno, fixed_off_ok);
- if (err < 0)
- return err;
-
- /*
- * Fold the register's constant offset into the insn offset so
- * that is_valid_access() sees the true effective offset.
- */
- if (fixed_off_ok)
- off += reg->var_off.value;
- err = check_ctx_access(env, insn_idx, off, size, t, &info);
- if (err)
- verbose_linfo(env, insn_idx, "; ");
+ err = check_ctx_access(env, insn_idx, regno, off, size, t, &info);
if (!err && t == BPF_READ && value_regno >= 0) {
/* ctx access returns either a scalar, or a
* PTR_TO_PACKET[_META,_END]. In the latter
@@ -8543,22 +8563,16 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
return check_ptr_to_btf_access(env, regs, regno, 0,
access_size, BPF_READ, -1);
case PTR_TO_CTX:
- /* in case the function doesn't know how to access the context,
- * (because we are in a program of type SYSCALL for example), we
- * can not statically check its size.
- * Dynamically check it now.
- */
- if (!env->ops->convert_ctx_access) {
- int offset = access_size - 1;
-
- /* Allow zero-byte read from PTR_TO_CTX */
- if (access_size == 0)
- return zero_size_allowed ? 0 : -EACCES;
-
- return check_mem_access(env, env->insn_idx, regno, offset, BPF_B,
- access_type, -1, false, false);
+ /* Only permit reading or writing syscall context using helper calls. */
+ if (is_var_ctx_off_allowed(env->prog)) {
+ int err = check_mem_region_access(env, regno, 0, access_size, U16_MAX,
+ zero_size_allowed);
+ if (err)
+ return err;
+ if (env->prog->aux->max_ctx_offset < reg->umax_value + access_size)
+ env->prog->aux->max_ctx_offset = reg->umax_value + access_size;
+ return 0;
}
-
fallthrough;
default: /* scalar_value or invalid ptr */
/* Allow zero-byte read from NULL, regardless of pointer type */
@@ -9502,6 +9516,7 @@ static const struct bpf_reg_types mem_types = {
PTR_TO_MEM | MEM_RINGBUF,
PTR_TO_BUF,
PTR_TO_BTF_ID | PTR_TRUSTED,
+ PTR_TO_CTX,
},
};
@@ -9811,6 +9826,16 @@ static int check_func_arg_reg_off(struct bpf_verifier_env *env,
* still need to do checks instead of returning.
*/
return __check_ptr_off_reg(env, reg, regno, true);
+ case PTR_TO_CTX:
+ /*
+ * Allow fixed and variable offsets for syscall context, but
+ * only when the argument is passed as memory, not ctx,
+ * otherwise we may get modified ctx in tail called programs and
+ * global subprogs (that may act as extension prog hooks).
+ */
+ if (arg_type != ARG_PTR_TO_CTX && is_var_ctx_off_allowed(env->prog))
+ return 0;
+ fallthrough;
default:
return __check_ptr_off_reg(env, reg, regno, false);
}
@@ -10858,7 +10883,7 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog,
* invalid memory access.
*/
} else if (arg->arg_type == ARG_PTR_TO_CTX) {
- ret = check_func_arg_reg_off(env, reg, regno, ARG_DONTCARE);
+ ret = check_func_arg_reg_off(env, reg, regno, ARG_PTR_TO_CTX);
if (ret < 0)
return ret;
/* If function expects ctx type in BTF check that caller
@@ -13732,7 +13757,6 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
}
}
fallthrough;
- case KF_ARG_PTR_TO_CTX:
case KF_ARG_PTR_TO_DYNPTR:
case KF_ARG_PTR_TO_ITER:
case KF_ARG_PTR_TO_LIST_HEAD:
@@ -13750,6 +13774,9 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
case KF_ARG_PTR_TO_IRQ_FLAG:
case KF_ARG_PTR_TO_RES_SPIN_LOCK:
break;
+ case KF_ARG_PTR_TO_CTX:
+ arg_type = ARG_PTR_TO_CTX;
+ break;
default:
verifier_bug(env, "unknown kfunc arg type %d", kf_arg_type);
return -EFAULT;
diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c
index 1ac366fd4dae..169cf7fbf40f 100644
--- a/tools/testing/selftests/bpf/prog_tests/verifier.c
+++ b/tools/testing/selftests/bpf/prog_tests/verifier.c
@@ -175,7 +175,7 @@ void test_verifier_cgroup_skb(void) { RUN(verifier_cgroup_skb); }
void test_verifier_cgroup_storage(void) { RUN(verifier_cgroup_storage); }
void test_verifier_const(void) { RUN(verifier_const); }
void test_verifier_const_or(void) { RUN(verifier_const_or); }
-void test_verifier_ctx(void) { RUN(verifier_ctx); }
+void test_verifier_ctx(void) { RUN_TESTS(verifier_ctx); }
void test_verifier_ctx_sk_msg(void) { RUN(verifier_ctx_sk_msg); }
void test_verifier_d_path(void) { RUN(verifier_d_path); }
void test_verifier_default_trusted_ptr(void) { RUN_TESTS(verifier_default_trusted_ptr); }
diff --git a/tools/testing/selftests/bpf/progs/verifier_ctx.c b/tools/testing/selftests/bpf/progs/verifier_ctx.c
index 371780290c0d..7856dad3d1f3 100644
--- a/tools/testing/selftests/bpf/progs/verifier_ctx.c
+++ b/tools/testing/selftests/bpf/progs/verifier_ctx.c
@@ -4,6 +4,10 @@
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+static const char ctx_strncmp_target[] = "ctx";
+static const char ctx_snprintf_fmt[] = "";
SEC("tc")
__description("context stores via BPF_ATOMIC")
@@ -69,7 +73,6 @@ __naked void ctx_pointer_to_helper_1(void)
SEC("socket")
__description("pass modified ctx pointer to helper, 2")
__failure __msg("negative offset ctx ptr R1 off=-612 disallowed")
-__failure_unpriv __msg_unpriv("negative offset ctx ptr R1 off=-612 disallowed")
__naked void ctx_pointer_to_helper_2(void)
{
asm volatile (" \
@@ -292,74 +295,562 @@ padding_access("cgroup/post_bind4", bpf_sock, dst_port, 2);
__failure __msg("invalid bpf_context access")
padding_access("sk_reuseport", sk_reuseport_md, hash, 4);
-SEC("syscall")
+SEC("?syscall")
__description("syscall: write to ctx with fixed offset")
__success
-__naked void syscall_ctx_fixed_off_write(void)
+int syscall_ctx_fixed_off_write(void *ctx)
{
- asm volatile (" \
- r0 = 0; \
- *(u32*)(r1 + 0) = r0; \
- r1 += 4; \
- *(u32*)(r1 + 0) = r0; \
- exit; \
-" ::: __clobber_all);
+ char *p = ctx;
+
+ *(__u32 *)p = 0;
+ *(__u32 *)(p + 4) = 0;
+ return 0;
+}
+
+SEC("?syscall")
+__description("syscall: read ctx with fixed offset")
+__success
+int syscall_ctx_fixed_off_read(void *ctx)
+{
+ char *p = ctx;
+ volatile __u32 val;
+
+ val = *(__u32 *)(p + 4);
+ (void)val;
+ return 0;
+}
+
+SEC("?syscall")
+__description("syscall: unaligned read ctx with fixed offset")
+__success
+int syscall_ctx_unaligned_fixed_off_read(void *ctx)
+{
+ char *p = ctx;
+ volatile __u32 val;
+
+ val = *(__u32 *)(p + 2);
+ (void)val;
+ return 0;
+}
+
+SEC("?syscall")
+__description("syscall: unaligned write ctx with fixed offset")
+__success
+int syscall_ctx_unaligned_fixed_off_write(void *ctx)
+{
+ char *p = ctx;
+
+ *(__u32 *)(p + 2) = 0;
+ return 0;
+}
+
+SEC("?syscall")
+__description("syscall: read ctx with variable offset")
+__success
+int syscall_ctx_var_off_read(void *ctx)
+{
+ __u64 off = bpf_get_prandom_u32();
+ char *p = ctx;
+ volatile __u32 val;
+
+ off &= 0xfc;
+ p += off;
+ val = *(__u32 *)p;
+ (void)val;
+ return 0;
+}
+
+SEC("?syscall")
+__description("syscall: write ctx with variable offset")
+__success
+int syscall_ctx_var_off_write(void *ctx)
+{
+ __u64 off = bpf_get_prandom_u32();
+ char *p = ctx;
+
+ off &= 0xfc;
+ p += off;
+ *(__u32 *)p = 0;
+ return 0;
+}
+
+SEC("?syscall")
+__description("syscall: unaligned read ctx with variable offset")
+__success
+int syscall_ctx_unaligned_var_off_read(void *ctx)
+{
+ __u64 off = bpf_get_prandom_u32();
+ char *p = ctx;
+ volatile __u32 val;
+
+ off &= 0xfc;
+ off += 2;
+ p += off;
+ val = *(__u32 *)p;
+ (void)val;
+ return 0;
+}
+
+SEC("?syscall")
+__description("syscall: unaligned write ctx with variable offset")
+__success
+int syscall_ctx_unaligned_var_off_write(void *ctx)
+{
+ __u64 off = bpf_get_prandom_u32();
+ char *p = ctx;
+
+ off &= 0xfc;
+ off += 2;
+ p += off;
+ *(__u32 *)p = 0;
+ return 0;
+}
+
+SEC("?syscall")
+__description("syscall: reject ctx access past U16_MAX with fixed offset")
+__failure __msg("outside of the allowed memory range")
+int syscall_ctx_u16_max_fixed_off(void *ctx)
+{
+ char *p = ctx;
+ volatile __u32 val;
+
+ p += 65535;
+ val = *(__u32 *)p;
+ (void)val;
+ return 0;
+}
+
+SEC("?syscall")
+__description("syscall: reject ctx access past U16_MAX with variable offset")
+__failure __msg("outside of the allowed memory range")
+int syscall_ctx_u16_max_var_off(void *ctx)
+{
+ __u64 off = bpf_get_prandom_u32();
+ char *p = ctx;
+ volatile __u32 val;
+
+ off &= 0xffff;
+ off += 1;
+ p += off;
+ val = *(__u32 *)p;
+ (void)val;
+ return 0;
+}
+
+SEC("?syscall")
+__description("syscall: reject negative variable offset ctx access")
+__failure __msg("min value is negative")
+int syscall_ctx_neg_var_off(void *ctx)
+{
+ __u64 off = bpf_get_prandom_u32();
+ char *p = ctx;
+
+ off &= 4;
+ p -= off;
+ return *(__u32 *)p;
+}
+
+SEC("?syscall")
+__description("syscall: reject unbounded variable offset ctx access")
+__failure __msg("unbounded memory access")
+int syscall_ctx_unbounded_var_off(void *ctx)
+{
+ __u64 off = (__u32)bpf_get_prandom_u32();
+ char *p = ctx;
+
+ off <<= 2;
+ p += off;
+ return *(__u32 *)p;
+}
+
+SEC("?syscall")
+__description("syscall: helper read ctx with fixed offset")
+__success
+int syscall_ctx_helper_fixed_off_read(void *ctx)
+{
+ char *p = ctx;
+
+ p += 4;
+ return bpf_strncmp(p, 4, ctx_strncmp_target);
+}
+
+SEC("?syscall")
+__description("syscall: helper write ctx with fixed offset")
+__success
+int syscall_ctx_helper_fixed_off_write(void *ctx)
+{
+ char *p = ctx;
+
+ p += 4;
+ return bpf_probe_read_kernel(p, 4, 0);
+}
+
+SEC("?syscall")
+__description("syscall: helper unaligned read ctx with fixed offset")
+__success
+int syscall_ctx_helper_unaligned_fixed_off_read(void *ctx)
+{
+ char *p = ctx;
+
+ p += 2;
+ return bpf_strncmp(p, 4, ctx_strncmp_target);
+}
+
+SEC("?syscall")
+__description("syscall: helper unaligned write ctx with fixed offset")
+__success
+int syscall_ctx_helper_unaligned_fixed_off_write(void *ctx)
+{
+ char *p = ctx;
+
+ p += 2;
+ return bpf_probe_read_kernel(p, 4, 0);
+}
+
+SEC("?syscall")
+__description("syscall: helper read ctx with variable offset")
+__success
+int syscall_ctx_helper_var_off_read(void *ctx)
+{
+ __u64 off = bpf_get_prandom_u32();
+ char *p = ctx;
+
+ off &= 0xfc;
+ p += off;
+ return bpf_strncmp(p, 4, ctx_strncmp_target);
+}
+
+SEC("?syscall")
+__description("syscall: helper write ctx with variable offset")
+__success
+int syscall_ctx_helper_var_off_write(void *ctx)
+{
+ __u64 off = bpf_get_prandom_u32();
+ char *p = ctx;
+
+ off &= 0xfc;
+ p += off;
+ return bpf_probe_read_kernel(p, 4, 0);
+}
+
+SEC("?syscall")
+__description("syscall: helper unaligned read ctx with variable offset")
+__success
+int syscall_ctx_helper_unaligned_var_off_read(void *ctx)
+{
+ __u64 off = bpf_get_prandom_u32();
+ char *p = ctx;
+
+ off &= 0xfc;
+ off += 2;
+ p += off;
+ return bpf_strncmp(p, 4, ctx_strncmp_target);
+}
+
+SEC("?syscall")
+__description("syscall: helper unaligned write ctx with variable offset")
+__success
+int syscall_ctx_helper_unaligned_var_off_write(void *ctx)
+{
+ __u64 off = bpf_get_prandom_u32();
+ char *p = ctx;
+
+ off &= 0xfc;
+ off += 2;
+ p += off;
+ return bpf_probe_read_kernel(p, 4, 0);
+}
+
+SEC("?syscall")
+__description("syscall: reject helper read ctx past U16_MAX with fixed offset")
+__failure __msg("outside of the allowed memory range")
+int syscall_ctx_helper_u16_max_fixed_off_read(void *ctx)
+{
+ char *p = ctx;
+
+ p += 65535;
+ return bpf_strncmp(p, 4, ctx_strncmp_target);
+}
+
+SEC("?syscall")
+__description("syscall: reject helper write ctx past U16_MAX with fixed offset")
+__failure __msg("outside of the allowed memory range")
+int syscall_ctx_helper_u16_max_fixed_off_write(void *ctx)
+{
+ char *p = ctx;
+
+ p += 65535;
+ return bpf_probe_read_kernel(p, 4, 0);
+}
+
+SEC("?syscall")
+__description("syscall: reject helper read ctx past U16_MAX with variable offset")
+__failure __msg("outside of the allowed memory range")
+int syscall_ctx_helper_u16_max_var_off_read(void *ctx)
+{
+ __u64 off = bpf_get_prandom_u32();
+ char *p = ctx;
+
+ off &= 0xffff;
+ off += 1;
+ p += off;
+ return bpf_strncmp(p, 4, ctx_strncmp_target);
+}
+
+SEC("?syscall")
+__description("syscall: reject helper write ctx past U16_MAX with variable offset")
+__failure __msg("outside of the allowed memory range")
+int syscall_ctx_helper_u16_max_var_off_write(void *ctx)
+{
+ __u64 off = bpf_get_prandom_u32();
+ char *p = ctx;
+
+ off &= 0xffff;
+ off += 1;
+ p += off;
+ return bpf_probe_read_kernel(p, 4, 0);
+}
+
+SEC("?syscall")
+__description("syscall: helper read zero-sized ctx access")
+__success
+int syscall_ctx_helper_zero_sized_read(void *ctx)
+{
+ return bpf_snprintf(0, 0, ctx_snprintf_fmt, ctx, 0);
+}
+
+SEC("?syscall")
+__description("syscall: helper write zero-sized ctx access")
+__success
+int syscall_ctx_helper_zero_sized_write(void *ctx)
+{
+ return bpf_probe_read_kernel(ctx, 0, 0);
+}
+
+SEC("?syscall")
+__description("syscall: kfunc access ctx with fixed offset")
+__success
+int syscall_ctx_kfunc_fixed_off(void *ctx)
+{
+ char *p = ctx;
+
+ p += 4;
+ bpf_kfunc_call_test_mem_len_pass1(p, 4);
+ return 0;
+}
+
+SEC("?syscall")
+__description("syscall: kfunc access ctx with variable offset")
+__success
+int syscall_ctx_kfunc_var_off(void *ctx)
+{
+ __u64 off = bpf_get_prandom_u32();
+ char *p = ctx;
+
+ off &= 0xfc;
+ p += off;
+ bpf_kfunc_call_test_mem_len_pass1(p, 4);
+ return 0;
+}
+
+SEC("?syscall")
+__description("syscall: kfunc unaligned access ctx with fixed offset")
+__success
+int syscall_ctx_kfunc_unaligned_fixed_off(void *ctx)
+{
+ char *p = ctx;
+
+ p += 2;
+ bpf_kfunc_call_test_mem_len_pass1(p, 4);
+ return 0;
+}
+
+SEC("?syscall")
+__description("syscall: kfunc unaligned access ctx with variable offset")
+__success
+int syscall_ctx_kfunc_unaligned_var_off(void *ctx)
+{
+ __u64 off = bpf_get_prandom_u32();
+ char *p = ctx;
+
+ off &= 0xfc;
+ off += 2;
+ p += off;
+ bpf_kfunc_call_test_mem_len_pass1(p, 4);
+ return 0;
+}
+
+SEC("?syscall")
+__description("syscall: reject kfunc ctx access past U16_MAX with fixed offset")
+__failure __msg("outside of the allowed memory range")
+int syscall_ctx_kfunc_u16_max_fixed_off(void *ctx)
+{
+ char *p = ctx;
+
+ p += 65535;
+ bpf_kfunc_call_test_mem_len_pass1(p, 4);
+ return 0;
+}
+
+SEC("?syscall")
+__description("syscall: reject kfunc ctx access past U16_MAX with variable offset")
+__failure __msg("outside of the allowed memory range")
+int syscall_ctx_kfunc_u16_max_var_off(void *ctx)
+{
+ __u64 off = bpf_get_prandom_u32();
+ char *p = ctx;
+
+ off &= 0xffff;
+ off += 1;
+ p += off;
+ bpf_kfunc_call_test_mem_len_pass1(p, 4);
+ return 0;
+}
+
+SEC("?syscall")
+__description("syscall: kfunc access zero-sized ctx")
+__success
+int syscall_ctx_kfunc_zero_sized(void *ctx)
+{
+ bpf_kfunc_call_test_mem_len_pass1(ctx, 0);
+ return 0;
}
/*
- * Test that program types without convert_ctx_access can dereference
- * their ctx pointer after adding a fixed offset. Variable and negative
- * offsets should still be rejected.
+ * For non-syscall program types without convert_ctx_access, direct ctx
+ * dereference is still allowed after adding a fixed offset, while variable
+ * and negative direct accesses reject.
+ *
+ * Passing ctx as a helper or kfunc memory argument is only permitted for
+ * syscall programs, so the helper and kfunc cases below validate rejection
+ * for non-syscall ctx pointers at fixed, variable, and zero-sized accesses.
*/
-#define no_rewrite_ctx_access(type, name, off, ld_op) \
- SEC(type) \
+#define no_rewrite_ctx_access(type, name, off, load_t) \
+ SEC("?" type) \
__description(type ": read ctx at fixed offset") \
__success \
- __naked void no_rewrite_##name##_fixed(void) \
+ int no_rewrite_##name##_fixed(void *ctx) \
{ \
- asm volatile (" \
- r1 += %[__off]; \
- r0 = *(" #ld_op " *)(r1 + 0); \
- r0 = 0; \
- exit;" \
- : \
- : __imm_const(__off, off) \
- : __clobber_all); \
+ char *p = ctx; \
+ volatile load_t val; \
+ \
+ val = *(load_t *)(p + off); \
+ (void)val; \
+ return 0; \
} \
- SEC(type) \
+ SEC("?" type) \
__description(type ": reject variable offset ctx access") \
__failure __msg("variable ctx access var_off=") \
- __naked void no_rewrite_##name##_var(void) \
+ int no_rewrite_##name##_var(void *ctx) \
{ \
- asm volatile (" \
- r6 = r1; \
- call %[bpf_get_prandom_u32]; \
- r1 = r6; \
- r0 &= 4; \
- r1 += r0; \
- r0 = *(" #ld_op " *)(r1 + 0); \
- r0 = 0; \
- exit;" \
- : \
- : __imm(bpf_get_prandom_u32) \
- : __clobber_all); \
+ __u64 off_var = bpf_get_prandom_u32(); \
+ char *p = ctx; \
+ \
+ off_var &= 4; \
+ p += off_var; \
+ return *(load_t *)p; \
} \
- SEC(type) \
+ SEC("?" type) \
__description(type ": reject negative offset ctx access") \
- __failure __msg("negative offset ctx ptr") \
- __naked void no_rewrite_##name##_neg(void) \
+ __failure __msg("invalid bpf_context access") \
+ int no_rewrite_##name##_neg(void *ctx) \
{ \
- asm volatile (" \
- r1 += %[__neg_off]; \
- r0 = *(" #ld_op " *)(r1 + 0); \
- r0 = 0; \
- exit;" \
- : \
- : __imm_const(__neg_off, -(off)) \
- : __clobber_all); \
+ char *p = ctx; \
+ \
+ p -= 612; \
+ return *(load_t *)p; \
+ } \
+ SEC("?" type) \
+ __description(type ": reject helper read ctx at fixed offset") \
+ __failure __msg("dereference of modified ctx ptr") \
+ int no_rewrite_##name##_helper_read_fixed(void *ctx) \
+ { \
+ char *p = ctx; \
+ \
+ p += off; \
+ return bpf_strncmp(p, 4, ctx_strncmp_target); \
+ } \
+ SEC("?" type) \
+ __description(type ": reject helper write ctx at fixed offset") \
+ __failure __msg("dereference of modified ctx ptr") \
+ int no_rewrite_##name##_helper_write_fixed(void *ctx) \
+ { \
+ char *p = ctx; \
+ \
+ p += off; \
+ return bpf_probe_read_kernel(p, 4, 0); \
+ } \
+ SEC("?" type) \
+ __description(type ": reject helper read ctx with variable offset") \
+ __failure __msg("variable ctx access var_off=") \
+ int no_rewrite_##name##_helper_read_var(void *ctx) \
+ { \
+ __u64 off_var = bpf_get_prandom_u32(); \
+ char *p = ctx; \
+ \
+ off_var &= 4; \
+ p += off_var; \
+ return bpf_strncmp(p, 4, ctx_strncmp_target); \
+ } \
+ SEC("?" type) \
+ __description(type ": reject helper write ctx with variable offset") \
+ __failure __msg("variable ctx access var_off=") \
+ int no_rewrite_##name##_helper_write_var(void *ctx) \
+ { \
+ __u64 off_var = bpf_get_prandom_u32(); \
+ char *p = ctx; \
+ \
+ off_var &= 4; \
+ p += off_var; \
+ return bpf_probe_read_kernel(p, 4, 0); \
+ } \
+ SEC("?" type) \
+ __description(type ": reject helper read zero-sized ctx access") \
+ __failure __msg("R4 type=ctx expected=fp") \
+ int no_rewrite_##name##_helper_read_zero(void *ctx) \
+ { \
+ return bpf_snprintf(0, 0, ctx_snprintf_fmt, ctx, 0); \
+ } \
+ SEC("?" type) \
+ __description(type ": reject helper write zero-sized ctx access") \
+ __failure __msg("R1 type=ctx expected=fp") \
+ int no_rewrite_##name##_helper_write_zero(void *ctx) \
+ { \
+ return bpf_probe_read_kernel(ctx, 0, 0); \
+ } \
+ SEC("?" type) \
+ __description(type ": reject kfunc ctx at fixed offset") \
+ __failure __msg("dereference of modified ctx ptr") \
+ int no_rewrite_##name##_kfunc_fixed(void *ctx) \
+ { \
+ char *p = ctx; \
+ \
+ p += off; \
+ bpf_kfunc_call_test_mem_len_pass1(p, 4); \
+ return 0; \
+ } \
+ SEC("?" type) \
+ __description(type ": reject kfunc ctx with variable offset") \
+ __failure __msg("variable ctx access var_off=") \
+ int no_rewrite_##name##_kfunc_var(void *ctx) \
+ { \
+ __u64 off_var = bpf_get_prandom_u32(); \
+ char *p = ctx; \
+ \
+ off_var &= 4; \
+ p += off_var; \
+ bpf_kfunc_call_test_mem_len_pass1(p, 4); \
+ return 0; \
+ } \
+ SEC("?" type) \
+ __description(type ": reject kfunc zero-sized ctx access") \
+ __failure __msg("R1 type=ctx expected=fp") \
+ int no_rewrite_##name##_kfunc_zero(void *ctx) \
+ { \
+ bpf_kfunc_call_test_mem_len_pass1(ctx, 0); \
+ return 0; \
}
-no_rewrite_ctx_access("syscall", syscall, 4, u32);
no_rewrite_ctx_access("kprobe", kprobe, 8, u64);
no_rewrite_ctx_access("tracepoint", tp, 8, u64);
no_rewrite_ctx_access("raw_tp", raw_tp, 8, u64);
diff --git a/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c b/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c
index f02012a2fbaa..1e08aff7532e 100644
--- a/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c
+++ b/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c
@@ -134,7 +134,6 @@ __noinline __weak int subprog_user_anon_mem(user_struct_t *t)
SEC("?tracepoint")
__failure __log_level(2)
-__msg("invalid bpf_context access")
__msg("Caller passes invalid args into func#1 ('subprog_user_anon_mem')")
int anon_user_mem_invalid(void *ctx)
{
@@ -358,6 +357,100 @@ int arg_tag_ctx_syscall(void *ctx)
return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx) + tp_whatever(ctx);
}
+__weak int syscall_array_bpf_for(void *ctx __arg_ctx)
+{
+ int *arr = ctx;
+ int i;
+
+ bpf_for(i, 0, 100)
+ arr[i] *= i;
+
+ return 0;
+}
+
+SEC("?syscall")
+__success __log_level(2)
+int arg_tag_ctx_syscall_bpf_for(void *ctx)
+{
+ return syscall_array_bpf_for(ctx);
+}
+
+SEC("syscall")
+__auxiliary
+int syscall_tailcall_target(void *ctx)
+{
+ return syscall_array_bpf_for(ctx);
+}
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __array(values, int (void *));
+} syscall_prog_array SEC(".maps") = {
+ .values = {
+ [0] = (void *)&syscall_tailcall_target,
+ },
+};
+
+SEC("?syscall")
+__success __log_level(2)
+int arg_tag_ctx_syscall_tailcall(void *ctx)
+{
+ bpf_tail_call(ctx, &syscall_prog_array, 0);
+ return 0;
+}
+
+SEC("?syscall")
+__failure __log_level(2)
+__msg("dereference of modified ctx ptr R1 off=8 disallowed")
+int arg_tag_ctx_syscall_tailcall_fixed_off_bad(void *ctx)
+{
+ char *p = ctx;
+
+ p += 8;
+ bpf_tail_call(p, &syscall_prog_array, 0);
+ return 0;
+}
+
+SEC("?syscall")
+__failure __log_level(2)
+__msg("variable ctx access var_off=(0x0; 0x4) disallowed")
+int arg_tag_ctx_syscall_tailcall_var_off_bad(void *ctx)
+{
+ __u64 off = bpf_get_prandom_u32();
+ char *p = ctx;
+
+ off &= 4;
+ p += off;
+ bpf_tail_call(p, &syscall_prog_array, 0);
+ return 0;
+}
+
+SEC("?syscall")
+__failure __log_level(2)
+__msg("dereference of modified ctx ptr R1 off=8 disallowed")
+int arg_tag_ctx_syscall_fixed_off_bad(void *ctx)
+{
+ char *p = ctx;
+
+ p += 8;
+ return subprog_ctx_tag(p);
+}
+
+SEC("?syscall")
+__failure __log_level(2)
+__msg("variable ctx access var_off=(0x0; 0x4) disallowed")
+int arg_tag_ctx_syscall_var_off_bad(void *ctx)
+{
+ __u64 off = bpf_get_prandom_u32();
+ char *p = ctx;
+
+ off &= 4;
+ p += off;
+ return subprog_ctx_tag(p);
+}
+
__weak int subprog_dynptr(struct bpf_dynptr *dptr)
{
long *d, t, buf[1] = {};
diff --git a/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c
index 061356f10093..d876314a4d67 100644
--- a/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c
+++ b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c
@@ -723,6 +723,7 @@ BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY)
BTF_ID_FLAGS(func, bpf_iter_testmod_seq_value)
BTF_ID_FLAGS(func, bpf_kfunc_common_test)
+BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1)
BTF_ID_FLAGS(func, bpf_kfunc_dynptr_test)
BTF_ID_FLAGS(func, bpf_kfunc_nested_acquire_nonzero_offset_test, KF_ACQUIRE)
BTF_ID_FLAGS(func, bpf_kfunc_nested_acquire_zero_offset_test, KF_ACQUIRE)
@@ -1287,7 +1288,6 @@ BTF_ID_FLAGS(func, bpf_kfunc_call_test2)
BTF_ID_FLAGS(func, bpf_kfunc_call_test3)
BTF_ID_FLAGS(func, bpf_kfunc_call_test4)
BTF_ID_FLAGS(func, bpf_kfunc_call_test5)
-BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_acquire, KF_ACQUIRE | KF_RET_NULL)