diff options
| author | Kaitao Cheng <chengkaitao@kylinos.cn> | 2026-02-14 15:40:42 +0300 |
|---|---|---|
| committer | Alexei Starovoitov <ast@kernel.org> | 2026-02-24 04:37:06 +0300 |
| commit | ed8fa4b8894f7e80d3a6bf63e4cb880c7545c63a (patch) | |
| tree | 6d7f94232c2320da8a90de8bde29c155d2f24eee /tools/testing | |
| parent | fb1590448ff7e79e304f9bbe662fc20334038b50 (diff) | |
| download | linux-ed8fa4b8894f7e80d3a6bf63e4cb880c7545c63a.tar.xz | |
selftests/bpf: Add test case for rbtree nodes that contain both bpf_refcount and kptr fields.
Allow bpf_kptr_xchg to directly operate on pointers marked with
NON_OWN_REF | MEM_RCU.
In the example demonstrated in this patch, as long as "struct
bpf_refcount ref" exists, the __kptr pointer is guaranteed to
carry the MEM_RCU flag. The ref member itself does not need to
be explicitly used.
Signed-off-by: Kaitao Cheng <chengkaitao@kylinos.cn>
Link: https://lore.kernel.org/r/20260214124042.62229-6-pilgrimtao@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'tools/testing')
| -rw-r--r-- | tools/testing/selftests/bpf/progs/rbtree_search_kptr.c | 123 |
1 files changed, 123 insertions, 0 deletions
diff --git a/tools/testing/selftests/bpf/progs/rbtree_search_kptr.c b/tools/testing/selftests/bpf/progs/rbtree_search_kptr.c index 069fc64b0167..610aae45e2dc 100644 --- a/tools/testing/selftests/bpf/progs/rbtree_search_kptr.c +++ b/tools/testing/selftests/bpf/progs/rbtree_search_kptr.c @@ -18,11 +18,21 @@ struct tree_node { struct node_data __kptr * node_data; }; +struct tree_node_ref { + struct bpf_refcount ref; + struct bpf_rb_node node; + u64 key; + struct node_data __kptr * node_data; +}; + #define private(name) SEC(".data." #name) __hidden __aligned(8) private(A) struct bpf_rb_root root __contains(tree_node, node); private(A) struct bpf_spin_lock lock; +private(B) struct bpf_rb_root root_r __contains(tree_node_ref, node); +private(B) struct bpf_spin_lock lock_r; + static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b) { struct tree_node *node_a, *node_b; @@ -130,6 +140,119 @@ fail: return ret; } +static bool less_r(struct bpf_rb_node *a, const struct bpf_rb_node *b) +{ + struct tree_node_ref *node_a, *node_b; + + node_a = container_of(a, struct tree_node_ref, node); + node_b = container_of(b, struct tree_node_ref, node); + + return node_a->key < node_b->key; +} + +SEC("syscall") +__retval(0) +long rbtree_search_kptr_ref(void *ctx) +{ + struct tree_node_ref *tnode_r, *tnode_m; + struct bpf_rb_node *rb_n; + struct node_data __kptr * node_data; + int lookup_key = NR_NODES / 2; + int lookup_data = NR_NODES / 2; + int i, data, ret = 0; + + for (i = 0; i < NR_NODES && can_loop; i++) { + tnode_r = bpf_obj_new(typeof(*tnode_r)); + if (!tnode_r) + return __LINE__; + + node_data = bpf_obj_new(typeof(*node_data)); + if (!node_data) { + bpf_obj_drop(tnode_r); + return __LINE__; + } + + tnode_r->key = i; + node_data->data = i; + + node_data = bpf_kptr_xchg(&tnode_r->node_data, node_data); + if (node_data) + bpf_obj_drop(node_data); + + /* Unused reference */ + tnode_m = bpf_refcount_acquire(tnode_r); + if (!tnode_m) + return __LINE__; + + bpf_spin_lock(&lock_r); + bpf_rbtree_add(&root_r, &tnode_r->node, less_r); + bpf_spin_unlock(&lock_r); + + bpf_obj_drop(tnode_m); + } + + bpf_spin_lock(&lock_r); + rb_n = bpf_rbtree_root(&root_r); + while (rb_n && can_loop) { + tnode_r = container_of(rb_n, struct tree_node_ref, node); + node_data = bpf_kptr_xchg(&tnode_r->node_data, NULL); + if (!node_data) { + ret = __LINE__; + goto fail; + } + + data = node_data->data; + node_data = bpf_kptr_xchg(&tnode_r->node_data, node_data); + if (node_data) { + bpf_spin_unlock(&lock_r); + bpf_obj_drop(node_data); + return __LINE__; + } + + if (lookup_key == tnode_r->key) { + if (data == lookup_data) + break; + + ret = __LINE__; + goto fail; + } + + if (lookup_key < tnode_r->key) + rb_n = bpf_rbtree_left(&root_r, rb_n); + else + rb_n = bpf_rbtree_right(&root_r, rb_n); + } + bpf_spin_unlock(&lock_r); + + while (can_loop) { + bpf_spin_lock(&lock_r); + rb_n = bpf_rbtree_first(&root_r); + if (!rb_n) { + bpf_spin_unlock(&lock_r); + return 0; + } + + rb_n = bpf_rbtree_remove(&root_r, rb_n); + if (!rb_n) { + ret = __LINE__; + goto fail; + } + bpf_spin_unlock(&lock_r); + + tnode_r = container_of(rb_n, struct tree_node_ref, node); + + node_data = bpf_kptr_xchg(&tnode_r->node_data, NULL); + if (node_data) + bpf_obj_drop(node_data); + + bpf_obj_drop(tnode_r); + } + + return 0; +fail: + bpf_spin_unlock(&lock_r); + return ret; +} SEC("syscall") __failure __msg("R1 type=scalar expected=map_value, ptr_, ptr_") |
