summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2025-12-21 21:54:37 +0300
committerAlexei Starovoitov <ast@kernel.org>2025-12-21 21:54:37 +0300
commitc2f2f005a1c2e2d06f07511068917fa729614c18 (patch)
treee437974093abf3f2c4bd9f3c852e2b3fe9125744 /include
parent3d60306b7bb4a3a3c5f01750a2303f987b7d93fa (diff)
parentc3e34f88f9992866a1fb510850921a8fe299a97b (diff)
downloadlinux-c2f2f005a1c2e2d06f07511068917fa729614c18.tar.xz
Merge branch 'bpf-optimize-recursion-detection-on-arm64'
Puranjay Mohan says: ==================== bpf: Optimize recursion detection on arm64 V2: https://lore.kernel.org/all/20251217233608.2374187-1-puranjay@kernel.org/ Changes in v2->v3: - Added acked by Yonghong - Patch 2: - Change alignment of active from 8 to 4 - Use le32_to_cpu in place of get_unaligned_le32() V1: https://lore.kernel.org/all/20251217162830.2597286-1-puranjay@kernel.org/ Changes in V1->V2: - Patch 2: - Put preempt_enable()/disable() around RMW accesses to mitigate race conditions. Because on CONFIG_PREEMPT_RCU and sleepable bpf programs, preemption can cause no bpf prog to execute in case of recursion. BPF programs detect recursion using a per-CPU 'active' flag in struct bpf_prog. The trampoline currently sets/clears this flag with atomic operations. On some arm64 platforms (e.g., Neoverse V2 with LSE), per-CPU atomic operations are relatively slow. Unlike x86_64 - where per-CPU updates can avoid cross-core atomicity, arm64 LSE atomics are always atomic across all cores, which is unnecessary overhead for strictly per-CPU state. This patch removes atomics from the recursion detection path on arm64. It was discovered in [1] that per-CPU atomics that don't return a value were extremely slow on some arm64 platforms, Catalin added a fix in commit 535fdfc5a228 ("arm64: Use load LSE atomics for the non-return per-CPU atomic operations") to solve this issue, but it seems to have caused a regression on the fentry benchmark. Using the fentry benchmark from the bpf selftests shows the following: ./tools/testing/selftests/bpf/bench trig-fentry +---------------------------------------------+------------------------+ | Configuration | Total Operations (M/s) | +---------------------------------------------+------------------------+ | bpf-next/master with Catalin’s fix reverted | 51.770 | |---------------------------------------------|------------------------| | bpf-next/master | 43.271 | | bpf-next/master with this change | 43.271 | +---------------------------------------------+------------------------+ All benchmarks were run on a KVM based vm with Neoverse-V2 and 8 cpus. This patch yields a 25% improvement in this benchmark compared to bpf-next. Notably, reverting Catalin's fix also results in a performance gain for this benchmark, which is interesting but expected. For completeness, this benchmark was also run with the change enabled on x86-64, which resulted in a 30% regression in the fentry benchmark. So, it is only enabled on arm64. P.S. - Here is more data with other program types: +-----------------+-----------+-----------+----------+ | Metric | Before | After | % Diff | +-----------------+-----------+-----------+----------+ | fentry | 43.149 | 53.948 | +25.03% | | fentry.s | 41.831 | 50.937 | +21.76% | | rawtp | 50.834 | 58.731 | +15.53% | | fexit | 31.118 | 34.360 | +10.42% | | tp | 39.536 | 41.632 | +5.30% | | syscall-count | 8.053 | 8.305 | +3.13% | | fmodret | 33.940 | 34.769 | +2.44% | | kprobe | 9.970 | 9.998 | +0.28% | | usermode-count | 224.886 | 224.839 | -0.02% | | kernel-count | 154.229 | 153.043 | -0.77% | +-----------------+-----------+-----------+----------+ [1] https://lore.kernel.org/all/e7d539ed-ced0-4b96-8ecd-048a5b803b85@paulmck-laptop/ ==================== Link: https://patch.msgid.link/20251219184422.2899902-1-puranjay@kernel.org Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/bpf.h38
1 files changed, 37 insertions, 1 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index bb3847caeae1..da6a00dd313f 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1746,6 +1746,8 @@ struct bpf_prog_aux {
struct bpf_map __rcu *st_ops_assoc;
};
+#define BPF_NR_CONTEXTS 4 /* normal, softirq, hardirq, NMI */
+
struct bpf_prog {
u16 pages; /* Number of allocated pages */
u16 jited:1, /* Is our filter JIT'ed? */
@@ -1772,7 +1774,7 @@ struct bpf_prog {
u8 tag[BPF_TAG_SIZE];
};
struct bpf_prog_stats __percpu *stats;
- int __percpu *active;
+ u8 __percpu *active; /* u8[BPF_NR_CONTEXTS] for recursion protection */
unsigned int (*bpf_func)(const void *ctx,
const struct bpf_insn *insn);
struct bpf_prog_aux *aux; /* Auxiliary fields */
@@ -2004,6 +2006,40 @@ struct bpf_struct_ops_common_value {
enum bpf_struct_ops_state state;
};
+static inline bool bpf_prog_get_recursion_context(struct bpf_prog *prog)
+{
+#ifdef CONFIG_ARM64
+ u8 rctx = interrupt_context_level();
+ u8 *active = this_cpu_ptr(prog->active);
+ u32 val;
+
+ preempt_disable();
+ active[rctx]++;
+ val = le32_to_cpu(*(__le32 *)active);
+ preempt_enable();
+ if (val != BIT(rctx * 8))
+ return false;
+
+ return true;
+#else
+ return this_cpu_inc_return(*(int __percpu *)(prog->active)) == 1;
+#endif
+}
+
+static inline void bpf_prog_put_recursion_context(struct bpf_prog *prog)
+{
+#ifdef CONFIG_ARM64
+ u8 rctx = interrupt_context_level();
+ u8 *active = this_cpu_ptr(prog->active);
+
+ preempt_disable();
+ active[rctx]--;
+ preempt_enable();
+#else
+ this_cpu_dec(*(int __percpu *)(prog->active));
+#endif
+}
+
#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
/* This macro helps developer to register a struct_ops type and generate
* type information correctly. Developers should use this macro to register