diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2020-02-24 17:01:38 +0300 |
---|---|---|
committer | Alexei Starovoitov <ast@kernel.org> | 2020-02-25 03:18:20 +0300 |
commit | 1d7bf6b7d3e8353c3fac648f3f9b3010458570c2 (patch) | |
tree | 78fa90b9a7793a15078ea197fc19afb96539e356 /kernel | |
parent | b0a81b94cc50a112601721fcc2f91fab78d7b9f3 (diff) | |
download | linux-1d7bf6b7d3e8353c3fac648f3f9b3010458570c2.tar.xz |
perf/bpf: Remove preempt disable around BPF invocation
The BPF invocation from the perf event overflow handler does not require to
disable preemption because this is called from NMI or at least hard
interrupt context which is already non-preemptible.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20200224145643.151953573@linutronix.de
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/events/core.c | 2 |
1 files changed, 0 insertions, 2 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index e453589da97c..bbdfac0182f4 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -9206,7 +9206,6 @@ static void bpf_overflow_handler(struct perf_event *event, int ret = 0; ctx.regs = perf_arch_bpf_user_pt_regs(regs); - preempt_disable(); if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) goto out; rcu_read_lock(); @@ -9214,7 +9213,6 @@ static void bpf_overflow_handler(struct perf_event *event, rcu_read_unlock(); out: __this_cpu_dec(bpf_prog_active); - preempt_enable(); if (!ret) return; |