diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-08-30 21:43:50 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-08-30 21:43:50 +0300 |
commit | b69bea8a657b681442765b06be92a2607b1bd875 (patch) | |
tree | 8dcecaf4e5eff737500b73d402b4ef81c1b5368c /include | |
parent | 3edd8db2d53fe6c96ad66248bb1479ae62807268 (diff) | |
parent | eb1f00237aca2e368b93db79303f8433d1976d10 (diff) | |
download | linux-b69bea8a657b681442765b06be92a2607b1bd875.tar.xz |
Merge tag 'locking-urgent-2020-08-30' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking fixes from Thomas Gleixner:
"A set of fixes for lockdep, tracing and RCU:
- Prevent recursion by using raw_cpu_* operations
- Fixup the interrupt state in the cpu idle code to be consistent
- Push rcu_idle_enter/exit() invocations deeper into the idle path so
that the lock operations are inside the RCU watching sections
- Move trace_cpu_idle() into generic code so it's called before RCU
goes idle.
- Handle raw_local_irq* vs. local_irq* operations correctly
- Move the tracepoints out from under the lockdep recursion handling
which turned out to be fragile and inconsistent"
* tag 'locking-urgent-2020-08-30' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
lockdep,trace: Expose tracepoints
lockdep: Only trace IRQ edges
mips: Implement arch_irqs_disabled()
arm64: Implement arch_irqs_disabled()
nds32: Implement arch_irqs_disabled()
locking/lockdep: Cleanup
x86/entry: Remove unused THUNKs
cpuidle: Move trace_cpu_idle() into generic code
cpuidle: Make CPUIDLE_FLAG_TLB_FLUSHED generic
sched,idle,rcu: Push rcu_idle deeper into the idle path
cpuidle: Fixup IRQ state
lockdep: Use raw_cpu_*() for per-cpu variables
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/cpuidle.h | 13 | ||||
-rw-r--r-- | include/linux/irqflags.h | 73 | ||||
-rw-r--r-- | include/linux/lockdep.h | 18 | ||||
-rw-r--r-- | include/linux/mmu_context.h | 5 |
4 files changed, 64 insertions, 45 deletions
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index b65909ae4e20..75895e6363b8 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -75,12 +75,13 @@ struct cpuidle_state { }; /* Idle State Flags */ -#define CPUIDLE_FLAG_NONE (0x00) -#define CPUIDLE_FLAG_POLLING BIT(0) /* polling state */ -#define CPUIDLE_FLAG_COUPLED BIT(1) /* state applies to multiple cpus */ -#define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */ -#define CPUIDLE_FLAG_UNUSABLE BIT(3) /* avoid using this state */ -#define CPUIDLE_FLAG_OFF BIT(4) /* disable this state by default */ +#define CPUIDLE_FLAG_NONE (0x00) +#define CPUIDLE_FLAG_POLLING BIT(0) /* polling state */ +#define CPUIDLE_FLAG_COUPLED BIT(1) /* state applies to multiple cpus */ +#define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */ +#define CPUIDLE_FLAG_UNUSABLE BIT(3) /* avoid using this state */ +#define CPUIDLE_FLAG_OFF BIT(4) /* disable this state by default */ +#define CPUIDLE_FLAG_TLB_FLUSHED BIT(5) /* idle-state flushes TLBs */ struct cpuidle_device_kobj; struct cpuidle_state_kobj; diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h index bd5c55755447..3ed4e8771b64 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h @@ -49,17 +49,18 @@ struct irqtrace_events { DECLARE_PER_CPU(int, hardirqs_enabled); DECLARE_PER_CPU(int, hardirq_context); - extern void trace_hardirqs_on_prepare(void); - extern void trace_hardirqs_off_finish(void); - extern void trace_hardirqs_on(void); - extern void trace_hardirqs_off(void); -# define lockdep_hardirq_context() (this_cpu_read(hardirq_context)) +extern void trace_hardirqs_on_prepare(void); +extern void trace_hardirqs_off_finish(void); +extern void trace_hardirqs_on(void); +extern void trace_hardirqs_off(void); + +# define lockdep_hardirq_context() (raw_cpu_read(hardirq_context)) # define lockdep_softirq_context(p) ((p)->softirq_context) # define lockdep_hardirqs_enabled() (this_cpu_read(hardirqs_enabled)) # define lockdep_softirqs_enabled(p) ((p)->softirqs_enabled) # define lockdep_hardirq_enter() \ do { \ - if (this_cpu_inc_return(hardirq_context) == 1) \ + if (__this_cpu_inc_return(hardirq_context) == 1)\ current->hardirq_threaded = 0; \ } while (0) # define lockdep_hardirq_threaded() \ @@ -68,7 +69,7 @@ do { \ } while (0) # define lockdep_hardirq_exit() \ do { \ - this_cpu_dec(hardirq_context); \ + __this_cpu_dec(hardirq_context); \ } while (0) # define lockdep_softirq_enter() \ do { \ @@ -120,17 +121,17 @@ do { \ #else # define trace_hardirqs_on_prepare() do { } while (0) # define trace_hardirqs_off_finish() do { } while (0) -# define trace_hardirqs_on() do { } while (0) -# define trace_hardirqs_off() do { } while (0) -# define lockdep_hardirq_context() 0 -# define lockdep_softirq_context(p) 0 -# define lockdep_hardirqs_enabled() 0 -# define lockdep_softirqs_enabled(p) 0 -# define lockdep_hardirq_enter() do { } while (0) -# define lockdep_hardirq_threaded() do { } while (0) -# define lockdep_hardirq_exit() do { } while (0) -# define lockdep_softirq_enter() do { } while (0) -# define lockdep_softirq_exit() do { } while (0) +# define trace_hardirqs_on() do { } while (0) +# define trace_hardirqs_off() do { } while (0) +# define lockdep_hardirq_context() 0 +# define lockdep_softirq_context(p) 0 +# define lockdep_hardirqs_enabled() 0 +# define lockdep_softirqs_enabled(p) 0 +# define lockdep_hardirq_enter() do { } while (0) +# define lockdep_hardirq_threaded() do { } while (0) +# define lockdep_hardirq_exit() do { } while (0) +# define lockdep_softirq_enter() do { } while (0) +# define lockdep_softirq_exit() do { } while (0) # define lockdep_hrtimer_enter(__hrtimer) false # define lockdep_hrtimer_exit(__context) do { } while (0) # define lockdep_posixtimer_enter() do { } while (0) @@ -181,26 +182,33 @@ do { \ * if !TRACE_IRQFLAGS. */ #ifdef CONFIG_TRACE_IRQFLAGS -#define local_irq_enable() \ - do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0) -#define local_irq_disable() \ - do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0) + +#define local_irq_enable() \ + do { \ + trace_hardirqs_on(); \ + raw_local_irq_enable(); \ + } while (0) + +#define local_irq_disable() \ + do { \ + bool was_disabled = raw_irqs_disabled();\ + raw_local_irq_disable(); \ + if (!was_disabled) \ + trace_hardirqs_off(); \ + } while (0) + #define local_irq_save(flags) \ do { \ raw_local_irq_save(flags); \ - trace_hardirqs_off(); \ + if (!raw_irqs_disabled_flags(flags)) \ + trace_hardirqs_off(); \ } while (0) - #define local_irq_restore(flags) \ do { \ - if (raw_irqs_disabled_flags(flags)) { \ - raw_local_irq_restore(flags); \ - trace_hardirqs_off(); \ - } else { \ + if (!raw_irqs_disabled_flags(flags)) \ trace_hardirqs_on(); \ - raw_local_irq_restore(flags); \ - } \ + raw_local_irq_restore(flags); \ } while (0) #define safe_halt() \ @@ -214,10 +222,7 @@ do { \ #define local_irq_enable() do { raw_local_irq_enable(); } while (0) #define local_irq_disable() do { raw_local_irq_disable(); } while (0) -#define local_irq_save(flags) \ - do { \ - raw_local_irq_save(flags); \ - } while (0) +#define local_irq_save(flags) do { raw_local_irq_save(flags); } while (0) #define local_irq_restore(flags) do { raw_local_irq_restore(flags); } while (0) #define safe_halt() do { raw_safe_halt(); } while (0) diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 62a382d1845b..6a584b3e5c74 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -535,19 +535,27 @@ do { \ DECLARE_PER_CPU(int, hardirqs_enabled); DECLARE_PER_CPU(int, hardirq_context); +/* + * The below lockdep_assert_*() macros use raw_cpu_read() to access the above + * per-cpu variables. This is required because this_cpu_read() will potentially + * call into preempt/irq-disable and that obviously isn't right. This is also + * correct because when IRQs are enabled, it doesn't matter if we accidentally + * read the value from our previous CPU. + */ + #define lockdep_assert_irqs_enabled() \ do { \ - WARN_ON_ONCE(debug_locks && !this_cpu_read(hardirqs_enabled)); \ + WARN_ON_ONCE(debug_locks && !raw_cpu_read(hardirqs_enabled)); \ } while (0) #define lockdep_assert_irqs_disabled() \ do { \ - WARN_ON_ONCE(debug_locks && this_cpu_read(hardirqs_enabled)); \ + WARN_ON_ONCE(debug_locks && raw_cpu_read(hardirqs_enabled)); \ } while (0) #define lockdep_assert_in_irq() \ do { \ - WARN_ON_ONCE(debug_locks && !this_cpu_read(hardirq_context)); \ + WARN_ON_ONCE(debug_locks && !raw_cpu_read(hardirq_context)); \ } while (0) #define lockdep_assert_preemption_enabled() \ @@ -555,7 +563,7 @@ do { \ WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ debug_locks && \ (preempt_count() != 0 || \ - !this_cpu_read(hardirqs_enabled))); \ + !raw_cpu_read(hardirqs_enabled))); \ } while (0) #define lockdep_assert_preemption_disabled() \ @@ -563,7 +571,7 @@ do { \ WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ debug_locks && \ (preempt_count() == 0 && \ - this_cpu_read(hardirqs_enabled))); \ + raw_cpu_read(hardirqs_enabled))); \ } while (0) #else diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h index c51a84132d7c..03dee12d2b61 100644 --- a/include/linux/mmu_context.h +++ b/include/linux/mmu_context.h @@ -3,10 +3,15 @@ #define _LINUX_MMU_CONTEXT_H #include <asm/mmu_context.h> +#include <asm/mmu.h> /* Architectures that care about IRQ state in switch_mm can override this. */ #ifndef switch_mm_irqs_off # define switch_mm_irqs_off switch_mm #endif +#ifndef leave_mm +static inline void leave_mm(int cpu) { } +#endif + #endif |