summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2021-11-27 19:32:00 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2021-12-14 12:57:09 +0300
commite76da2e8a09a4ee289e0e90e61092f84feaeb120 (patch)
tree5827ece8f127a0dc5e9c36a0ce65e38ff5b3a793 /include
parent9c983fd7cf97ee1329c5ce70674abf06b99c2222 (diff)
downloadlinux-e76da2e8a09a4ee289e0e90e61092f84feaeb120.tar.xz
bpf: Make sure bpf_disable_instrumentation() is safe vs preemption.
commit 79364031c5b4365ca28ac0fa00acfab5bf465be1 upstream. The initial implementation of migrate_disable() for mainline was a wrapper around preempt_disable(). RT kernels substituted this with a real migrate disable implementation. Later on mainline gained true migrate disable support, but neither documentation nor affected code were updated. Remove stale comments claiming that migrate_disable() is PREEMPT_RT only. Don't use __this_cpu_inc() in the !PREEMPT_RT path because preemption is not disabled and the RMW operation can be preempted. Fixes: 74d862b682f51 ("sched: Make migrate_disable/enable() independent of RT") Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Link: https://lore.kernel.org/bpf/20211127163200.10466-3-bigeasy@linutronix.de Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/bpf.h16
-rw-r--r--include/linux/filter.h3
2 files changed, 2 insertions, 17 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 27850b0b535f..6c4640526f74 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1321,28 +1321,16 @@ extern struct mutex bpf_stats_enabled_mutex;
* kprobes, tracepoints) to prevent deadlocks on map operations as any of
* these events can happen inside a region which holds a map bucket lock
* and can deadlock on it.
- *
- * Use the preemption safe inc/dec variants on RT because migrate disable
- * is preemptible on RT and preemption in the middle of the RMW operation
- * might lead to inconsistent state. Use the raw variants for non RT
- * kernels as migrate_disable() maps to preempt_disable() so the slightly
- * more expensive save operation can be avoided.
*/
static inline void bpf_disable_instrumentation(void)
{
migrate_disable();
- if (IS_ENABLED(CONFIG_PREEMPT_RT))
- this_cpu_inc(bpf_prog_active);
- else
- __this_cpu_inc(bpf_prog_active);
+ this_cpu_inc(bpf_prog_active);
}
static inline void bpf_enable_instrumentation(void)
{
- if (IS_ENABLED(CONFIG_PREEMPT_RT))
- this_cpu_dec(bpf_prog_active);
- else
- __this_cpu_dec(bpf_prog_active);
+ this_cpu_dec(bpf_prog_active);
migrate_enable();
}
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 28391de6cc44..1611dc9d4420 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -639,9 +639,6 @@ static __always_inline u32 bpf_prog_run(const struct bpf_prog *prog, const void
* This uses migrate_disable/enable() explicitly to document that the
* invocation of a BPF program does not require reentrancy protection
* against a BPF program which is invoked from a preempting task.
- *
- * For non RT enabled kernels migrate_disable/enable() maps to
- * preempt_disable/enable(), i.e. it disables also preemption.
*/
static inline u32 bpf_prog_run_pin_on_cpu(const struct bpf_prog *prog,
const void *ctx)