summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
authorEduard Zingerman <eddyz87@gmail.com>2024-08-22 11:41:12 +0300
committerAlexei Starovoitov <ast@kernel.org>2024-08-22 18:35:21 +0300
commit8c2e043daadad021fc501ac64cce131f48c3ca46 (patch)
tree12248bcf91ee0c83c6f25e055a42a85acfac3323 /tools
parentf406026fefa745ff9a3153507cc3b9a87371d954 (diff)
downloadlinux-8c2e043daadad021fc501ac64cce131f48c3ca46.tar.xz
selftests/bpf: check if bpf_fastcall is recognized for kfuncs
Use kfunc_bpf_cast_to_kern_ctx() and kfunc_bpf_rdonly_cast() to verify that bpf_fastcall pattern is recognized for kfunc calls. Acked-by: Yonghong Song <yonghong.song@linux.dev> Signed-off-by: Eduard Zingerman <eddyz87@gmail.com> Link: https://lore.kernel.org/r/20240822084112.3257995-7-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'tools')
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c55
1 files changed, 55 insertions, 0 deletions
diff --git a/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c b/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c
index e30ab9fe5096..9da97d2efcd9 100644
--- a/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c
+++ b/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c
@@ -2,8 +2,11 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
#include "../../../include/linux/filter.h"
#include "bpf_misc.h"
+#include <stdbool.h>
+#include "bpf_kfuncs.h"
SEC("raw_tp")
__arch_x86_64
@@ -842,4 +845,56 @@ __naked int bpf_fastcall_max_stack_fail(void)
);
}
+SEC("cgroup/getsockname_unix")
+__xlated("0: r2 = 1")
+/* bpf_cast_to_kern_ctx is replaced by a single assignment */
+__xlated("1: r0 = r1")
+__xlated("2: r0 = r2")
+__xlated("3: exit")
+__success
+__naked void kfunc_bpf_cast_to_kern_ctx(void)
+{
+ asm volatile (
+ "r2 = 1;"
+ "*(u64 *)(r10 - 32) = r2;"
+ "call %[bpf_cast_to_kern_ctx];"
+ "r2 = *(u64 *)(r10 - 32);"
+ "r0 = r2;"
+ "exit;"
+ :
+ : __imm(bpf_cast_to_kern_ctx)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__xlated("3: r3 = 1")
+/* bpf_rdonly_cast is replaced by a single assignment */
+__xlated("4: r0 = r1")
+__xlated("5: r0 = r3")
+void kfunc_bpf_rdonly_cast(void)
+{
+ asm volatile (
+ "r2 = %[btf_id];"
+ "r3 = 1;"
+ "*(u64 *)(r10 - 32) = r3;"
+ "call %[bpf_rdonly_cast];"
+ "r3 = *(u64 *)(r10 - 32);"
+ "r0 = r3;"
+ :
+ : __imm(bpf_rdonly_cast),
+ [btf_id]"r"(bpf_core_type_id_kernel(union bpf_attr))
+ : __clobber_common);
+}
+
+/* BTF FUNC records are not generated for kfuncs referenced
+ * from inline assembly. These records are necessary for
+ * libbpf to link the program. The function below is a hack
+ * to ensure that BTF FUNC records are generated.
+ */
+void kfunc_root(void)
+{
+ bpf_cast_to_kern_ctx(0);
+ bpf_rdonly_cast(0, 0);
+}
+
char _license[] SEC("license") = "GPL";