diff options
| author | Alexei Starovoitov <ast@kernel.org> | 2026-04-07 01:27:27 +0300 |
|---|---|---|
| committer | Alexei Starovoitov <ast@kernel.org> | 2026-04-07 01:27:27 +0300 |
| commit | 42e33c9af49c5199504bbfb16f65756a90fe90bf (patch) | |
| tree | 3668614b371432e63b8f69cbd67d53a4f373acda /tools | |
| parent | a1aa9ef47c299c5bbc30594d3c2f0589edf908e6 (diff) | |
| parent | 171580e432727a9e729f286075ee86070424f490 (diff) | |
| download | linux-42e33c9af49c5199504bbfb16f65756a90fe90bf.tar.xz | |
Merge branch 'allow-variable-offsets-for-syscall-ptr_to_ctx'
Kumar Kartikeya Dwivedi says:
====================
Allow variable offsets for syscall PTR_TO_CTX
Enable pointer modification with variable offsets accumulated in the
register for PTR_TO_CTX for syscall programs where it won't be
rewritten, and the context is user-supplied and checked against the max
offset. See patches for details. Fixed offset support landed in [0].
By combining this set with [0], examples like the one below should
succeed verification now.
SEC("syscall")
int prog(void *ctx) {
int *arr = ctx;
int i;
bpf_for(i, 0, 100)
arr[i] *= i;
return 0;
}
[0]: https://lore.kernel.org/bpf/20260227005725.1247305-1-memxor@gmail.com
Changelog:
----------
v4 -> v5
v4: https://lore.kernel.org/bpf/20260401122818.2240807-1-memxor@gmail.com
* Use is_var_ctx_off_allowed() consistently.
* Add acks. (Emil)
v3 -> v4
v3: https://lore.kernel.org/bpf/20260318103526.2590079-1-memxor@gmail.com
* Drop comment around describing choice of fixed or variable offsets. (Eduard)
* Simplify offset adjustment for different cases. (Eduard)
* Add PTR_TO_CTX case in __check_mem_access(). (Eduard)
* Drop aligned access constraint from syscall_prog_is_valid_access().
* Wrap naked checks for BPF_PROG_TYPE_SYSCALL in a utility function. (Eduard)
* Split tests into separate clean up and addition patches. (Eduard)
* Remove CAP_SYS_ADMIN changes. (Eduard)
* Enable unaligned access to syscall ctx, add tests.
* Add more tests for various corner cases.
* Add acks. (Puranjay, Mykyta)
v2 -> v3
v2: https://lore.kernel.org/bpf/20260318075133.1031781-1-memxor@gmail.com
* Prevent arg_type for KF_ARG_PTR_TO_CTX from applying to other cases
due to preceding fallthrough. (Gemini/Sashiko)
v1 -> v2
v1: https://lore.kernel.org/bpf/20260317111850.2107846-2-memxor@gmail.com
* Harden check_func_arg_reg_off check with ARG_PTR_TO_CTX.
* Add tests for unmodified ctx into tail calls.
* Squash unmodified ctx change into base commit.
* Add Reviewed-by's from Emil.
====================
Link: https://patch.msgid.link/20260406194403.1649608-1-memxor@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'tools')
4 files changed, 637 insertions, 53 deletions
diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 1ac366fd4dae..169cf7fbf40f 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -175,7 +175,7 @@ void test_verifier_cgroup_skb(void) { RUN(verifier_cgroup_skb); } void test_verifier_cgroup_storage(void) { RUN(verifier_cgroup_storage); } void test_verifier_const(void) { RUN(verifier_const); } void test_verifier_const_or(void) { RUN(verifier_const_or); } -void test_verifier_ctx(void) { RUN(verifier_ctx); } +void test_verifier_ctx(void) { RUN_TESTS(verifier_ctx); } void test_verifier_ctx_sk_msg(void) { RUN(verifier_ctx_sk_msg); } void test_verifier_d_path(void) { RUN(verifier_d_path); } void test_verifier_default_trusted_ptr(void) { RUN_TESTS(verifier_default_trusted_ptr); } diff --git a/tools/testing/selftests/bpf/progs/verifier_ctx.c b/tools/testing/selftests/bpf/progs/verifier_ctx.c index 371780290c0d..7856dad3d1f3 100644 --- a/tools/testing/selftests/bpf/progs/verifier_ctx.c +++ b/tools/testing/selftests/bpf/progs/verifier_ctx.c @@ -4,6 +4,10 @@ #include "vmlinux.h" #include <bpf/bpf_helpers.h> #include "bpf_misc.h" +#include "../test_kmods/bpf_testmod_kfunc.h" + +static const char ctx_strncmp_target[] = "ctx"; +static const char ctx_snprintf_fmt[] = ""; SEC("tc") __description("context stores via BPF_ATOMIC") @@ -69,7 +73,6 @@ __naked void ctx_pointer_to_helper_1(void) SEC("socket") __description("pass modified ctx pointer to helper, 2") __failure __msg("negative offset ctx ptr R1 off=-612 disallowed") -__failure_unpriv __msg_unpriv("negative offset ctx ptr R1 off=-612 disallowed") __naked void ctx_pointer_to_helper_2(void) { asm volatile (" \ @@ -292,74 +295,562 @@ padding_access("cgroup/post_bind4", bpf_sock, dst_port, 2); __failure __msg("invalid bpf_context access") padding_access("sk_reuseport", sk_reuseport_md, hash, 4); -SEC("syscall") +SEC("?syscall") __description("syscall: write to ctx with fixed offset") __success -__naked void syscall_ctx_fixed_off_write(void) +int syscall_ctx_fixed_off_write(void *ctx) { - asm volatile (" \ - r0 = 0; \ - *(u32*)(r1 + 0) = r0; \ - r1 += 4; \ - *(u32*)(r1 + 0) = r0; \ - exit; \ -" ::: __clobber_all); + char *p = ctx; + + *(__u32 *)p = 0; + *(__u32 *)(p + 4) = 0; + return 0; +} + +SEC("?syscall") +__description("syscall: read ctx with fixed offset") +__success +int syscall_ctx_fixed_off_read(void *ctx) +{ + char *p = ctx; + volatile __u32 val; + + val = *(__u32 *)(p + 4); + (void)val; + return 0; +} + +SEC("?syscall") +__description("syscall: unaligned read ctx with fixed offset") +__success +int syscall_ctx_unaligned_fixed_off_read(void *ctx) +{ + char *p = ctx; + volatile __u32 val; + + val = *(__u32 *)(p + 2); + (void)val; + return 0; +} + +SEC("?syscall") +__description("syscall: unaligned write ctx with fixed offset") +__success +int syscall_ctx_unaligned_fixed_off_write(void *ctx) +{ + char *p = ctx; + + *(__u32 *)(p + 2) = 0; + return 0; +} + +SEC("?syscall") +__description("syscall: read ctx with variable offset") +__success +int syscall_ctx_var_off_read(void *ctx) +{ + __u64 off = bpf_get_prandom_u32(); + char *p = ctx; + volatile __u32 val; + + off &= 0xfc; + p += off; + val = *(__u32 *)p; + (void)val; + return 0; +} + +SEC("?syscall") +__description("syscall: write ctx with variable offset") +__success +int syscall_ctx_var_off_write(void *ctx) +{ + __u64 off = bpf_get_prandom_u32(); + char *p = ctx; + + off &= 0xfc; + p += off; + *(__u32 *)p = 0; + return 0; +} + +SEC("?syscall") +__description("syscall: unaligned read ctx with variable offset") +__success +int syscall_ctx_unaligned_var_off_read(void *ctx) +{ + __u64 off = bpf_get_prandom_u32(); + char *p = ctx; + volatile __u32 val; + + off &= 0xfc; + off += 2; + p += off; + val = *(__u32 *)p; + (void)val; + return 0; +} + +SEC("?syscall") +__description("syscall: unaligned write ctx with variable offset") +__success +int syscall_ctx_unaligned_var_off_write(void *ctx) +{ + __u64 off = bpf_get_prandom_u32(); + char *p = ctx; + + off &= 0xfc; + off += 2; + p += off; + *(__u32 *)p = 0; + return 0; +} + +SEC("?syscall") +__description("syscall: reject ctx access past U16_MAX with fixed offset") +__failure __msg("outside of the allowed memory range") +int syscall_ctx_u16_max_fixed_off(void *ctx) +{ + char *p = ctx; + volatile __u32 val; + + p += 65535; + val = *(__u32 *)p; + (void)val; + return 0; +} + +SEC("?syscall") +__description("syscall: reject ctx access past U16_MAX with variable offset") +__failure __msg("outside of the allowed memory range") +int syscall_ctx_u16_max_var_off(void *ctx) +{ + __u64 off = bpf_get_prandom_u32(); + char *p = ctx; + volatile __u32 val; + + off &= 0xffff; + off += 1; + p += off; + val = *(__u32 *)p; + (void)val; + return 0; +} + +SEC("?syscall") +__description("syscall: reject negative variable offset ctx access") +__failure __msg("min value is negative") +int syscall_ctx_neg_var_off(void *ctx) +{ + __u64 off = bpf_get_prandom_u32(); + char *p = ctx; + + off &= 4; + p -= off; + return *(__u32 *)p; +} + +SEC("?syscall") +__description("syscall: reject unbounded variable offset ctx access") +__failure __msg("unbounded memory access") +int syscall_ctx_unbounded_var_off(void *ctx) +{ + __u64 off = (__u32)bpf_get_prandom_u32(); + char *p = ctx; + + off <<= 2; + p += off; + return *(__u32 *)p; +} + +SEC("?syscall") +__description("syscall: helper read ctx with fixed offset") +__success +int syscall_ctx_helper_fixed_off_read(void *ctx) +{ + char *p = ctx; + + p += 4; + return bpf_strncmp(p, 4, ctx_strncmp_target); +} + +SEC("?syscall") +__description("syscall: helper write ctx with fixed offset") +__success +int syscall_ctx_helper_fixed_off_write(void *ctx) +{ + char *p = ctx; + + p += 4; + return bpf_probe_read_kernel(p, 4, 0); +} + +SEC("?syscall") +__description("syscall: helper unaligned read ctx with fixed offset") +__success +int syscall_ctx_helper_unaligned_fixed_off_read(void *ctx) +{ + char *p = ctx; + + p += 2; + return bpf_strncmp(p, 4, ctx_strncmp_target); +} + +SEC("?syscall") +__description("syscall: helper unaligned write ctx with fixed offset") +__success +int syscall_ctx_helper_unaligned_fixed_off_write(void *ctx) +{ + char *p = ctx; + + p += 2; + return bpf_probe_read_kernel(p, 4, 0); +} + +SEC("?syscall") +__description("syscall: helper read ctx with variable offset") +__success +int syscall_ctx_helper_var_off_read(void *ctx) +{ + __u64 off = bpf_get_prandom_u32(); + char *p = ctx; + + off &= 0xfc; + p += off; + return bpf_strncmp(p, 4, ctx_strncmp_target); +} + +SEC("?syscall") +__description("syscall: helper write ctx with variable offset") +__success +int syscall_ctx_helper_var_off_write(void *ctx) +{ + __u64 off = bpf_get_prandom_u32(); + char *p = ctx; + + off &= 0xfc; + p += off; + return bpf_probe_read_kernel(p, 4, 0); +} + +SEC("?syscall") +__description("syscall: helper unaligned read ctx with variable offset") +__success +int syscall_ctx_helper_unaligned_var_off_read(void *ctx) +{ + __u64 off = bpf_get_prandom_u32(); + char *p = ctx; + + off &= 0xfc; + off += 2; + p += off; + return bpf_strncmp(p, 4, ctx_strncmp_target); +} + +SEC("?syscall") +__description("syscall: helper unaligned write ctx with variable offset") +__success +int syscall_ctx_helper_unaligned_var_off_write(void *ctx) +{ + __u64 off = bpf_get_prandom_u32(); + char *p = ctx; + + off &= 0xfc; + off += 2; + p += off; + return bpf_probe_read_kernel(p, 4, 0); +} + +SEC("?syscall") +__description("syscall: reject helper read ctx past U16_MAX with fixed offset") +__failure __msg("outside of the allowed memory range") +int syscall_ctx_helper_u16_max_fixed_off_read(void *ctx) +{ + char *p = ctx; + + p += 65535; + return bpf_strncmp(p, 4, ctx_strncmp_target); +} + +SEC("?syscall") +__description("syscall: reject helper write ctx past U16_MAX with fixed offset") +__failure __msg("outside of the allowed memory range") +int syscall_ctx_helper_u16_max_fixed_off_write(void *ctx) +{ + char *p = ctx; + + p += 65535; + return bpf_probe_read_kernel(p, 4, 0); +} + +SEC("?syscall") +__description("syscall: reject helper read ctx past U16_MAX with variable offset") +__failure __msg("outside of the allowed memory range") +int syscall_ctx_helper_u16_max_var_off_read(void *ctx) +{ + __u64 off = bpf_get_prandom_u32(); + char *p = ctx; + + off &= 0xffff; + off += 1; + p += off; + return bpf_strncmp(p, 4, ctx_strncmp_target); +} + +SEC("?syscall") +__description("syscall: reject helper write ctx past U16_MAX with variable offset") +__failure __msg("outside of the allowed memory range") +int syscall_ctx_helper_u16_max_var_off_write(void *ctx) +{ + __u64 off = bpf_get_prandom_u32(); + char *p = ctx; + + off &= 0xffff; + off += 1; + p += off; + return bpf_probe_read_kernel(p, 4, 0); +} + +SEC("?syscall") +__description("syscall: helper read zero-sized ctx access") +__success +int syscall_ctx_helper_zero_sized_read(void *ctx) +{ + return bpf_snprintf(0, 0, ctx_snprintf_fmt, ctx, 0); +} + +SEC("?syscall") +__description("syscall: helper write zero-sized ctx access") +__success +int syscall_ctx_helper_zero_sized_write(void *ctx) +{ + return bpf_probe_read_kernel(ctx, 0, 0); +} + +SEC("?syscall") +__description("syscall: kfunc access ctx with fixed offset") +__success +int syscall_ctx_kfunc_fixed_off(void *ctx) +{ + char *p = ctx; + + p += 4; + bpf_kfunc_call_test_mem_len_pass1(p, 4); + return 0; +} + +SEC("?syscall") +__description("syscall: kfunc access ctx with variable offset") +__success +int syscall_ctx_kfunc_var_off(void *ctx) +{ + __u64 off = bpf_get_prandom_u32(); + char *p = ctx; + + off &= 0xfc; + p += off; + bpf_kfunc_call_test_mem_len_pass1(p, 4); + return 0; +} + +SEC("?syscall") +__description("syscall: kfunc unaligned access ctx with fixed offset") +__success +int syscall_ctx_kfunc_unaligned_fixed_off(void *ctx) +{ + char *p = ctx; + + p += 2; + bpf_kfunc_call_test_mem_len_pass1(p, 4); + return 0; +} + +SEC("?syscall") +__description("syscall: kfunc unaligned access ctx with variable offset") +__success +int syscall_ctx_kfunc_unaligned_var_off(void *ctx) +{ + __u64 off = bpf_get_prandom_u32(); + char *p = ctx; + + off &= 0xfc; + off += 2; + p += off; + bpf_kfunc_call_test_mem_len_pass1(p, 4); + return 0; +} + +SEC("?syscall") +__description("syscall: reject kfunc ctx access past U16_MAX with fixed offset") +__failure __msg("outside of the allowed memory range") +int syscall_ctx_kfunc_u16_max_fixed_off(void *ctx) +{ + char *p = ctx; + + p += 65535; + bpf_kfunc_call_test_mem_len_pass1(p, 4); + return 0; +} + +SEC("?syscall") +__description("syscall: reject kfunc ctx access past U16_MAX with variable offset") +__failure __msg("outside of the allowed memory range") +int syscall_ctx_kfunc_u16_max_var_off(void *ctx) +{ + __u64 off = bpf_get_prandom_u32(); + char *p = ctx; + + off &= 0xffff; + off += 1; + p += off; + bpf_kfunc_call_test_mem_len_pass1(p, 4); + return 0; +} + +SEC("?syscall") +__description("syscall: kfunc access zero-sized ctx") +__success +int syscall_ctx_kfunc_zero_sized(void *ctx) +{ + bpf_kfunc_call_test_mem_len_pass1(ctx, 0); + return 0; } /* - * Test that program types without convert_ctx_access can dereference - * their ctx pointer after adding a fixed offset. Variable and negative - * offsets should still be rejected. + * For non-syscall program types without convert_ctx_access, direct ctx + * dereference is still allowed after adding a fixed offset, while variable + * and negative direct accesses reject. + * + * Passing ctx as a helper or kfunc memory argument is only permitted for + * syscall programs, so the helper and kfunc cases below validate rejection + * for non-syscall ctx pointers at fixed, variable, and zero-sized accesses. */ -#define no_rewrite_ctx_access(type, name, off, ld_op) \ - SEC(type) \ +#define no_rewrite_ctx_access(type, name, off, load_t) \ + SEC("?" type) \ __description(type ": read ctx at fixed offset") \ __success \ - __naked void no_rewrite_##name##_fixed(void) \ + int no_rewrite_##name##_fixed(void *ctx) \ { \ - asm volatile (" \ - r1 += %[__off]; \ - r0 = *(" #ld_op " *)(r1 + 0); \ - r0 = 0; \ - exit;" \ - : \ - : __imm_const(__off, off) \ - : __clobber_all); \ + char *p = ctx; \ + volatile load_t val; \ + \ + val = *(load_t *)(p + off); \ + (void)val; \ + return 0; \ } \ - SEC(type) \ + SEC("?" type) \ __description(type ": reject variable offset ctx access") \ __failure __msg("variable ctx access var_off=") \ - __naked void no_rewrite_##name##_var(void) \ + int no_rewrite_##name##_var(void *ctx) \ { \ - asm volatile (" \ - r6 = r1; \ - call %[bpf_get_prandom_u32]; \ - r1 = r6; \ - r0 &= 4; \ - r1 += r0; \ - r0 = *(" #ld_op " *)(r1 + 0); \ - r0 = 0; \ - exit;" \ - : \ - : __imm(bpf_get_prandom_u32) \ - : __clobber_all); \ + __u64 off_var = bpf_get_prandom_u32(); \ + char *p = ctx; \ + \ + off_var &= 4; \ + p += off_var; \ + return *(load_t *)p; \ } \ - SEC(type) \ + SEC("?" type) \ __description(type ": reject negative offset ctx access") \ - __failure __msg("negative offset ctx ptr") \ - __naked void no_rewrite_##name##_neg(void) \ + __failure __msg("invalid bpf_context access") \ + int no_rewrite_##name##_neg(void *ctx) \ { \ - asm volatile (" \ - r1 += %[__neg_off]; \ - r0 = *(" #ld_op " *)(r1 + 0); \ - r0 = 0; \ - exit;" \ - : \ - : __imm_const(__neg_off, -(off)) \ - : __clobber_all); \ + char *p = ctx; \ + \ + p -= 612; \ + return *(load_t *)p; \ + } \ + SEC("?" type) \ + __description(type ": reject helper read ctx at fixed offset") \ + __failure __msg("dereference of modified ctx ptr") \ + int no_rewrite_##name##_helper_read_fixed(void *ctx) \ + { \ + char *p = ctx; \ + \ + p += off; \ + return bpf_strncmp(p, 4, ctx_strncmp_target); \ + } \ + SEC("?" type) \ + __description(type ": reject helper write ctx at fixed offset") \ + __failure __msg("dereference of modified ctx ptr") \ + int no_rewrite_##name##_helper_write_fixed(void *ctx) \ + { \ + char *p = ctx; \ + \ + p += off; \ + return bpf_probe_read_kernel(p, 4, 0); \ + } \ + SEC("?" type) \ + __description(type ": reject helper read ctx with variable offset") \ + __failure __msg("variable ctx access var_off=") \ + int no_rewrite_##name##_helper_read_var(void *ctx) \ + { \ + __u64 off_var = bpf_get_prandom_u32(); \ + char *p = ctx; \ + \ + off_var &= 4; \ + p += off_var; \ + return bpf_strncmp(p, 4, ctx_strncmp_target); \ + } \ + SEC("?" type) \ + __description(type ": reject helper write ctx with variable offset") \ + __failure __msg("variable ctx access var_off=") \ + int no_rewrite_##name##_helper_write_var(void *ctx) \ + { \ + __u64 off_var = bpf_get_prandom_u32(); \ + char *p = ctx; \ + \ + off_var &= 4; \ + p += off_var; \ + return bpf_probe_read_kernel(p, 4, 0); \ + } \ + SEC("?" type) \ + __description(type ": reject helper read zero-sized ctx access") \ + __failure __msg("R4 type=ctx expected=fp") \ + int no_rewrite_##name##_helper_read_zero(void *ctx) \ + { \ + return bpf_snprintf(0, 0, ctx_snprintf_fmt, ctx, 0); \ + } \ + SEC("?" type) \ + __description(type ": reject helper write zero-sized ctx access") \ + __failure __msg("R1 type=ctx expected=fp") \ + int no_rewrite_##name##_helper_write_zero(void *ctx) \ + { \ + return bpf_probe_read_kernel(ctx, 0, 0); \ + } \ + SEC("?" type) \ + __description(type ": reject kfunc ctx at fixed offset") \ + __failure __msg("dereference of modified ctx ptr") \ + int no_rewrite_##name##_kfunc_fixed(void *ctx) \ + { \ + char *p = ctx; \ + \ + p += off; \ + bpf_kfunc_call_test_mem_len_pass1(p, 4); \ + return 0; \ + } \ + SEC("?" type) \ + __description(type ": reject kfunc ctx with variable offset") \ + __failure __msg("variable ctx access var_off=") \ + int no_rewrite_##name##_kfunc_var(void *ctx) \ + { \ + __u64 off_var = bpf_get_prandom_u32(); \ + char *p = ctx; \ + \ + off_var &= 4; \ + p += off_var; \ + bpf_kfunc_call_test_mem_len_pass1(p, 4); \ + return 0; \ + } \ + SEC("?" type) \ + __description(type ": reject kfunc zero-sized ctx access") \ + __failure __msg("R1 type=ctx expected=fp") \ + int no_rewrite_##name##_kfunc_zero(void *ctx) \ + { \ + bpf_kfunc_call_test_mem_len_pass1(ctx, 0); \ + return 0; \ } -no_rewrite_ctx_access("syscall", syscall, 4, u32); no_rewrite_ctx_access("kprobe", kprobe, 8, u64); no_rewrite_ctx_access("tracepoint", tp, 8, u64); no_rewrite_ctx_access("raw_tp", raw_tp, 8, u64); diff --git a/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c b/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c index f02012a2fbaa..1e08aff7532e 100644 --- a/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c +++ b/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c @@ -134,7 +134,6 @@ __noinline __weak int subprog_user_anon_mem(user_struct_t *t) SEC("?tracepoint") __failure __log_level(2) -__msg("invalid bpf_context access") __msg("Caller passes invalid args into func#1 ('subprog_user_anon_mem')") int anon_user_mem_invalid(void *ctx) { @@ -358,6 +357,100 @@ int arg_tag_ctx_syscall(void *ctx) return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx) + tp_whatever(ctx); } +__weak int syscall_array_bpf_for(void *ctx __arg_ctx) +{ + int *arr = ctx; + int i; + + bpf_for(i, 0, 100) + arr[i] *= i; + + return 0; +} + +SEC("?syscall") +__success __log_level(2) +int arg_tag_ctx_syscall_bpf_for(void *ctx) +{ + return syscall_array_bpf_for(ctx); +} + +SEC("syscall") +__auxiliary +int syscall_tailcall_target(void *ctx) +{ + return syscall_array_bpf_for(ctx); +} + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 1); + __uint(key_size, sizeof(__u32)); + __array(values, int (void *)); +} syscall_prog_array SEC(".maps") = { + .values = { + [0] = (void *)&syscall_tailcall_target, + }, +}; + +SEC("?syscall") +__success __log_level(2) +int arg_tag_ctx_syscall_tailcall(void *ctx) +{ + bpf_tail_call(ctx, &syscall_prog_array, 0); + return 0; +} + +SEC("?syscall") +__failure __log_level(2) +__msg("dereference of modified ctx ptr R1 off=8 disallowed") +int arg_tag_ctx_syscall_tailcall_fixed_off_bad(void *ctx) +{ + char *p = ctx; + + p += 8; + bpf_tail_call(p, &syscall_prog_array, 0); + return 0; +} + +SEC("?syscall") +__failure __log_level(2) +__msg("variable ctx access var_off=(0x0; 0x4) disallowed") +int arg_tag_ctx_syscall_tailcall_var_off_bad(void *ctx) +{ + __u64 off = bpf_get_prandom_u32(); + char *p = ctx; + + off &= 4; + p += off; + bpf_tail_call(p, &syscall_prog_array, 0); + return 0; +} + +SEC("?syscall") +__failure __log_level(2) +__msg("dereference of modified ctx ptr R1 off=8 disallowed") +int arg_tag_ctx_syscall_fixed_off_bad(void *ctx) +{ + char *p = ctx; + + p += 8; + return subprog_ctx_tag(p); +} + +SEC("?syscall") +__failure __log_level(2) +__msg("variable ctx access var_off=(0x0; 0x4) disallowed") +int arg_tag_ctx_syscall_var_off_bad(void *ctx) +{ + __u64 off = bpf_get_prandom_u32(); + char *p = ctx; + + off &= 4; + p += off; + return subprog_ctx_tag(p); +} + __weak int subprog_dynptr(struct bpf_dynptr *dptr) { long *d, t, buf[1] = {}; diff --git a/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c index 061356f10093..d876314a4d67 100644 --- a/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c +++ b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c @@ -723,6 +723,7 @@ BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY) BTF_ID_FLAGS(func, bpf_iter_testmod_seq_value) BTF_ID_FLAGS(func, bpf_kfunc_common_test) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1) BTF_ID_FLAGS(func, bpf_kfunc_dynptr_test) BTF_ID_FLAGS(func, bpf_kfunc_nested_acquire_nonzero_offset_test, KF_ACQUIRE) BTF_ID_FLAGS(func, bpf_kfunc_nested_acquire_zero_offset_test, KF_ACQUIRE) @@ -1287,7 +1288,6 @@ BTF_ID_FLAGS(func, bpf_kfunc_call_test2) BTF_ID_FLAGS(func, bpf_kfunc_call_test3) BTF_ID_FLAGS(func, bpf_kfunc_call_test4) BTF_ID_FLAGS(func, bpf_kfunc_call_test5) -BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1) BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1) BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2) BTF_ID_FLAGS(func, bpf_kfunc_call_test_acquire, KF_ACQUIRE | KF_RET_NULL) |
