summaryrefslogtreecommitdiff
path: root/tools/testing
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2026-02-24 04:37:06 +0300
committerAlexei Starovoitov <ast@kernel.org>2026-02-24 04:37:21 +0300
commit0d6bc03d73673ad54945c0f7d7ee2f4e3979c2a5 (patch)
tree6d7f94232c2320da8a90de8bde29c155d2f24eee /tools/testing
parent3ecf0b4a0e0ed4783aa32c5f3e42d23c7021e1c8 (diff)
parented8fa4b8894f7e80d3a6bf63e4cb880c7545c63a (diff)
downloadlinux-0d6bc03d73673ad54945c0f7d7ee2f4e3979c2a5.tar.xz
Merge branch 'bpf-expand-the-usage-scenarios-of-bpf_kptr_xchg'
Kaitao Cheng says: ==================== When using bpf_kptr_xchg, we triggered the following error: 31: (85) call bpf_kptr_xchg#194 function calls are not allowed while holding a lock bpf_kptr_xchg can now be used in lock-held contexts, so we extended its usage scope in [patch 1/5]. When writing test cases using bpf_kptr_xchg and bpf_rbtree_*, the following approach must be followed: bpf_spin_lock(&lock); rb_n = bpf_rbtree_root(&root); while (rb_n && can_loop) { rb_n = bpf_rbtree_remove(&root, rb_n); if (!rb_n) goto fail; tnode = container_of(rb_n, struct tree_node, node); node_data = bpf_kptr_xchg(&tnode->node_data, NULL); if (!node_data) goto fail; data = node_data->data; /* use data to do something */ node_data = bpf_kptr_xchg(&tnode->node_data, node_data); if (node_data) goto fail; bpf_rbtree_add(&root, rb_n, less); if (lookup_key < tnode->key) rb_n = bpf_rbtree_left(&root, rb_n); else rb_n = bpf_rbtree_right(&root, rb_n); } bpf_spin_unlock(&lock); The above illustrates a lock-remove-read-add-unlock workflow, which exhibits lower performance. To address this, we introduced support for a streamlined lock-read-unlock operation in [patch 2/5] and [patch 4/5]. Changes in v7: - Add a comma to the variable declaration in enum bpf_reg_type - Modify the prefixes Changes in v6: - allow using bpf_kptr_xchg even if the MEM_RCU flag is set - Add test case Changes in v5: - add lastname Changes in v4: - Fix the dead logic issue in the test case Changes in v3: - Fix compilation errors Changes in v2: - Allow using bpf_kptr_xchg even if the NON_OWN_REF flag is set - Add test case Link to v6: https://lore.kernel.org/all/20260208024846.18653-1-pilgrimtao@gmail.com/ Link to v5: https://lore.kernel.org/all/20260203022712.99347-1-pilgrimtao@gmail.com/ Link to v4: https://lore.kernel.org/all/20260202090051.87802-1-pilgrimtao@gmail.com/ Link to V3: https://lore.kernel.org/all/20260202055818.78231-1-pilgrimtao@gmail.com/ Link to V2: https://lore.kernel.org/all/20260201031607.32940-1-pilgrimtao@gmail.com/ Link to V1: https://lore.kernel.org/all/20260122081426.78472-1-pilgrimtao@gmail.com/ ==================== Link: https://patch.msgid.link/20260214124042.62229-1-pilgrimtao@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'tools/testing')
-rw-r--r--tools/testing/selftests/bpf/prog_tests/rbtree.c6
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_misc.h4
-rw-r--r--tools/testing/selftests/bpf/progs/rbtree_search_kptr.c290
3 files changed, 300 insertions, 0 deletions
diff --git a/tools/testing/selftests/bpf/prog_tests/rbtree.c b/tools/testing/selftests/bpf/prog_tests/rbtree.c
index d8f3d7a45fe9..a854fb38e418 100644
--- a/tools/testing/selftests/bpf/prog_tests/rbtree.c
+++ b/tools/testing/selftests/bpf/prog_tests/rbtree.c
@@ -9,6 +9,7 @@
#include "rbtree_btf_fail__wrong_node_type.skel.h"
#include "rbtree_btf_fail__add_wrong_type.skel.h"
#include "rbtree_search.skel.h"
+#include "rbtree_search_kptr.skel.h"
static void test_rbtree_add_nodes(void)
{
@@ -193,3 +194,8 @@ void test_rbtree_search(void)
{
RUN_TESTS(rbtree_search);
}
+
+void test_rbtree_search_kptr(void)
+{
+ RUN_TESTS(rbtree_search_kptr);
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h
index c9bfbe1bafc1..0904fe14ad1d 100644
--- a/tools/testing/selftests/bpf/progs/bpf_misc.h
+++ b/tools/testing/selftests/bpf/progs/bpf_misc.h
@@ -188,6 +188,10 @@
#define POINTER_VALUE 0xbadcafe
#define TEST_DATA_LEN 64
+#ifndef __aligned
+#define __aligned(x) __attribute__((aligned(x)))
+#endif
+
#ifndef __used
#define __used __attribute__((used))
#endif
diff --git a/tools/testing/selftests/bpf/progs/rbtree_search_kptr.c b/tools/testing/selftests/bpf/progs/rbtree_search_kptr.c
new file mode 100644
index 000000000000..610aae45e2dc
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/rbtree_search_kptr.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2026 KylinSoft Corporation. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+#define NR_NODES 16
+
+struct node_data {
+ int data;
+};
+
+struct tree_node {
+ struct bpf_rb_node node;
+ u64 key;
+ struct node_data __kptr * node_data;
+};
+
+struct tree_node_ref {
+ struct bpf_refcount ref;
+ struct bpf_rb_node node;
+ u64 key;
+ struct node_data __kptr * node_data;
+};
+
+#define private(name) SEC(".data." #name) __hidden __aligned(8)
+
+private(A) struct bpf_rb_root root __contains(tree_node, node);
+private(A) struct bpf_spin_lock lock;
+
+private(B) struct bpf_rb_root root_r __contains(tree_node_ref, node);
+private(B) struct bpf_spin_lock lock_r;
+
+static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+ struct tree_node *node_a, *node_b;
+
+ node_a = container_of(a, struct tree_node, node);
+ node_b = container_of(b, struct tree_node, node);
+
+ return node_a->key < node_b->key;
+}
+
+SEC("syscall")
+__retval(0)
+long rbtree_search_kptr(void *ctx)
+{
+ struct tree_node *tnode;
+ struct bpf_rb_node *rb_n;
+ struct node_data __kptr * node_data;
+ int lookup_key = NR_NODES / 2;
+ int lookup_data = NR_NODES / 2;
+ int i, data, ret = 0;
+
+ for (i = 0; i < NR_NODES && can_loop; i++) {
+ tnode = bpf_obj_new(typeof(*tnode));
+ if (!tnode)
+ return __LINE__;
+
+ node_data = bpf_obj_new(typeof(*node_data));
+ if (!node_data) {
+ bpf_obj_drop(tnode);
+ return __LINE__;
+ }
+
+ tnode->key = i;
+ node_data->data = i;
+
+ node_data = bpf_kptr_xchg(&tnode->node_data, node_data);
+ if (node_data)
+ bpf_obj_drop(node_data);
+
+ bpf_spin_lock(&lock);
+ bpf_rbtree_add(&root, &tnode->node, less);
+ bpf_spin_unlock(&lock);
+ }
+
+ bpf_spin_lock(&lock);
+ rb_n = bpf_rbtree_root(&root);
+ while (rb_n && can_loop) {
+ tnode = container_of(rb_n, struct tree_node, node);
+ node_data = bpf_kptr_xchg(&tnode->node_data, NULL);
+ if (!node_data) {
+ ret = __LINE__;
+ goto fail;
+ }
+
+ data = node_data->data;
+ node_data = bpf_kptr_xchg(&tnode->node_data, node_data);
+ if (node_data) {
+ bpf_spin_unlock(&lock);
+ bpf_obj_drop(node_data);
+ return __LINE__;
+ }
+
+ if (lookup_key == tnode->key) {
+ if (data == lookup_data)
+ break;
+
+ ret = __LINE__;
+ goto fail;
+ }
+
+ if (lookup_key < tnode->key)
+ rb_n = bpf_rbtree_left(&root, rb_n);
+ else
+ rb_n = bpf_rbtree_right(&root, rb_n);
+ }
+ bpf_spin_unlock(&lock);
+
+ while (can_loop) {
+ bpf_spin_lock(&lock);
+ rb_n = bpf_rbtree_first(&root);
+ if (!rb_n) {
+ bpf_spin_unlock(&lock);
+ return 0;
+ }
+
+ rb_n = bpf_rbtree_remove(&root, rb_n);
+ if (!rb_n) {
+ ret = __LINE__;
+ goto fail;
+ }
+ bpf_spin_unlock(&lock);
+
+ tnode = container_of(rb_n, struct tree_node, node);
+
+ node_data = bpf_kptr_xchg(&tnode->node_data, NULL);
+ if (node_data)
+ bpf_obj_drop(node_data);
+
+ bpf_obj_drop(tnode);
+ }
+
+ return 0;
+fail:
+ bpf_spin_unlock(&lock);
+ return ret;
+}
+
+static bool less_r(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+ struct tree_node_ref *node_a, *node_b;
+
+ node_a = container_of(a, struct tree_node_ref, node);
+ node_b = container_of(b, struct tree_node_ref, node);
+
+ return node_a->key < node_b->key;
+}
+
+SEC("syscall")
+__retval(0)
+long rbtree_search_kptr_ref(void *ctx)
+{
+ struct tree_node_ref *tnode_r, *tnode_m;
+ struct bpf_rb_node *rb_n;
+ struct node_data __kptr * node_data;
+ int lookup_key = NR_NODES / 2;
+ int lookup_data = NR_NODES / 2;
+ int i, data, ret = 0;
+
+ for (i = 0; i < NR_NODES && can_loop; i++) {
+ tnode_r = bpf_obj_new(typeof(*tnode_r));
+ if (!tnode_r)
+ return __LINE__;
+
+ node_data = bpf_obj_new(typeof(*node_data));
+ if (!node_data) {
+ bpf_obj_drop(tnode_r);
+ return __LINE__;
+ }
+
+ tnode_r->key = i;
+ node_data->data = i;
+
+ node_data = bpf_kptr_xchg(&tnode_r->node_data, node_data);
+ if (node_data)
+ bpf_obj_drop(node_data);
+
+ /* Unused reference */
+ tnode_m = bpf_refcount_acquire(tnode_r);
+ if (!tnode_m)
+ return __LINE__;
+
+ bpf_spin_lock(&lock_r);
+ bpf_rbtree_add(&root_r, &tnode_r->node, less_r);
+ bpf_spin_unlock(&lock_r);
+
+ bpf_obj_drop(tnode_m);
+ }
+
+ bpf_spin_lock(&lock_r);
+ rb_n = bpf_rbtree_root(&root_r);
+ while (rb_n && can_loop) {
+ tnode_r = container_of(rb_n, struct tree_node_ref, node);
+ node_data = bpf_kptr_xchg(&tnode_r->node_data, NULL);
+ if (!node_data) {
+ ret = __LINE__;
+ goto fail;
+ }
+
+ data = node_data->data;
+ node_data = bpf_kptr_xchg(&tnode_r->node_data, node_data);
+ if (node_data) {
+ bpf_spin_unlock(&lock_r);
+ bpf_obj_drop(node_data);
+ return __LINE__;
+ }
+
+ if (lookup_key == tnode_r->key) {
+ if (data == lookup_data)
+ break;
+
+ ret = __LINE__;
+ goto fail;
+ }
+
+ if (lookup_key < tnode_r->key)
+ rb_n = bpf_rbtree_left(&root_r, rb_n);
+ else
+ rb_n = bpf_rbtree_right(&root_r, rb_n);
+ }
+ bpf_spin_unlock(&lock_r);
+
+ while (can_loop) {
+ bpf_spin_lock(&lock_r);
+ rb_n = bpf_rbtree_first(&root_r);
+ if (!rb_n) {
+ bpf_spin_unlock(&lock_r);
+ return 0;
+ }
+
+ rb_n = bpf_rbtree_remove(&root_r, rb_n);
+ if (!rb_n) {
+ ret = __LINE__;
+ goto fail;
+ }
+ bpf_spin_unlock(&lock_r);
+
+ tnode_r = container_of(rb_n, struct tree_node_ref, node);
+
+ node_data = bpf_kptr_xchg(&tnode_r->node_data, NULL);
+ if (node_data)
+ bpf_obj_drop(node_data);
+
+ bpf_obj_drop(tnode_r);
+ }
+
+ return 0;
+fail:
+ bpf_spin_unlock(&lock_r);
+ return ret;
+}
+
+SEC("syscall")
+__failure __msg("R1 type=scalar expected=map_value, ptr_, ptr_")
+long non_own_ref_kptr_xchg_no_lock(void *ctx)
+{
+ struct tree_node *tnode;
+ struct bpf_rb_node *rb_n;
+ struct node_data __kptr * node_data;
+ int data;
+
+ bpf_spin_lock(&lock);
+ rb_n = bpf_rbtree_first(&root);
+ if (!rb_n) {
+ bpf_spin_unlock(&lock);
+ return __LINE__;
+ }
+ bpf_spin_unlock(&lock);
+
+ tnode = container_of(rb_n, struct tree_node, node);
+ node_data = bpf_kptr_xchg(&tnode->node_data, NULL);
+ if (!node_data)
+ return __LINE__;
+
+ data = node_data->data;
+ if (data < 0)
+ return __LINE__;
+
+ node_data = bpf_kptr_xchg(&tnode->node_data, node_data);
+ if (node_data)
+ return __LINE__;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";