diff options
author | Amery Hung <amery.hung@bytedance.com> | 2024-08-27 04:13:01 +0300 |
---|---|---|
committer | Alexei Starovoitov <ast@kernel.org> | 2024-08-29 22:18:26 +0300 |
commit | bd0b4836a2333d5c725397d458c8edfa66f1d9bb (patch) | |
tree | 70b91ce44d79541d940dd4010bf1f6be03dcd1d6 /tools | |
parent | c634d6f4e12d00c954410ba11db45799a8c77b5b (diff) | |
download | linux-bd0b4836a2333d5c725397d458c8edfa66f1d9bb.tar.xz |
selftests/bpf: Make sure stashed kptr in local kptr is freed recursively
When dropping a local kptr, any kptr stashed into it is supposed to be
freed through bpf_obj_free_fields->__bpf_obj_drop_impl recursively. Add a
test to make sure it happens.
The test first stashes a referenced kptr to "struct task" into a local
kptr and gets the reference count of the task. Then, it drops the local
kptr and reads the reference count of the task again. Since
bpf_obj_free_fields and __bpf_obj_drop_impl will go through the local kptr
recursively during bpf_obj_drop, the dtor of the stashed task kptr should
eventually be called. The second reference count should be one less than
the first one.
Signed-off-by: Amery Hung <amery.hung@bytedance.com>
Acked-by: Martin KaFai Lau <martin.lau@kernel.org>
Link: https://lore.kernel.org/r/20240827011301.608620-1-amery.hung@bytedance.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'tools')
-rw-r--r-- | tools/testing/selftests/bpf/progs/task_kfunc_success.c | 30 |
1 files changed, 29 insertions, 1 deletions
diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_success.c b/tools/testing/selftests/bpf/progs/task_kfunc_success.c index 3138bb689b0b..a55149015063 100644 --- a/tools/testing/selftests/bpf/progs/task_kfunc_success.c +++ b/tools/testing/selftests/bpf/progs/task_kfunc_success.c @@ -143,8 +143,9 @@ int BPF_PROG(test_task_acquire_leave_in_map, struct task_struct *task, u64 clone SEC("tp_btf/task_newtask") int BPF_PROG(test_task_xchg_release, struct task_struct *task, u64 clone_flags) { - struct task_struct *kptr; + struct task_struct *kptr, *acquired; struct __tasks_kfunc_map_value *v, *local; + int refcnt, refcnt_after_drop; long status; if (!is_test_kfunc_task()) @@ -190,7 +191,34 @@ int BPF_PROG(test_task_xchg_release, struct task_struct *task, u64 clone_flags) return 0; } + /* Stash a copy into local kptr and check if it is released recursively */ + acquired = bpf_task_acquire(kptr); + if (!acquired) { + err = 7; + bpf_obj_drop(local); + bpf_task_release(kptr); + return 0; + } + bpf_probe_read_kernel(&refcnt, sizeof(refcnt), &acquired->rcu_users); + + acquired = bpf_kptr_xchg(&local->task, acquired); + if (acquired) { + err = 8; + bpf_obj_drop(local); + bpf_task_release(kptr); + bpf_task_release(acquired); + return 0; + } + bpf_obj_drop(local); + + bpf_probe_read_kernel(&refcnt_after_drop, sizeof(refcnt_after_drop), &kptr->rcu_users); + if (refcnt != refcnt_after_drop + 1) { + err = 9; + bpf_task_release(kptr); + return 0; + } + bpf_task_release(kptr); return 0; |