summaryrefslogtreecommitdiff
path: root/arch/powerpc/include/asm/interrupt.h
diff options
context:
space:
mode:
authorNicholas Piggin <npiggin@gmail.com>2021-09-22 17:54:48 +0300
committerMichael Ellerman <mpe@ellerman.id.au>2021-12-16 13:31:45 +0300
commitff0b0d6e1a7bc202241a9b1e28d1da4b744e0312 (patch)
tree55d5219070a8e9d7e50e51141a1d7a4033e5a7be /arch/powerpc/include/asm/interrupt.h
parent4423eb5ae32ec613af3fceee2fe84234e417ee55 (diff)
downloadlinux-ff0b0d6e1a7bc202241a9b1e28d1da4b744e0312.tar.xz
powerpc/64s/interrupt: handle MSR EE and RI in interrupt entry wrapper
The mtmsrd to enable MSR[RI] can be combined with the mtmsrd to enable MSR[EE] in interrupt entry code, for those interrupts which enable EE. This helps performance of important synchronous interrupts (e.g., page faults). This is similar to what commit dd152f70bdc1 ("powerpc/64s: system call avoid setting MSR[RI] until we set MSR[EE]") does for system calls. Do this by enabling EE and RI together at the beginning of the entry wrapper if PACA_IRQ_HARD_DIS is clear, and only enabling RI if it is set. Asynchronous interrupts set PACA_IRQ_HARD_DIS, but synchronous ones leave it unchanged, so by default they always get EE=1 unless they have interrupted a caller that is hard disabled. When the sync interrupt later calls interrupt_cond_local_irq_enable(), it will not require another mtmsrd because MSR[EE] was already enabled here. This avoids one mtmsrd L=1 for synchronous interrupts on 64s, which saves about 20 cycles on POWER9. And for kernel-mode interrupts, both synchronous and asynchronous, this saves an additional 40 cycles due to the mtmsrd being moved ahead of mfspr SPRN_AMR, which prevents a SPR scoreboard stall. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20210922145452.352571-3-npiggin@gmail.com
Diffstat (limited to 'arch/powerpc/include/asm/interrupt.h')
-rw-r--r--arch/powerpc/include/asm/interrupt.h27
1 files changed, 23 insertions, 4 deletions
diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h
index 2ab7e31c823f..aa65bb774cdb 100644
--- a/arch/powerpc/include/asm/interrupt.h
+++ b/arch/powerpc/include/asm/interrupt.h
@@ -149,8 +149,14 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrup
#endif
#ifdef CONFIG_PPC64
- if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED)
- trace_hardirqs_off();
+ bool trace_enable = false;
+
+ if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS)) {
+ if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED)
+ trace_enable = true;
+ } else {
+ irq_soft_mask_set(IRQS_ALL_DISABLED);
+ }
/*
* If the interrupt was taken with HARD_DIS clear, then enable MSR[EE].
@@ -164,8 +170,14 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrup
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
BUG_ON(!(regs->msr & MSR_EE));
__hard_irq_enable();
+ } else {
+ __hard_RI_enable();
}
+ /* Do this when RI=1 because it can cause SLB faults */
+ if (trace_enable)
+ trace_hardirqs_off();
+
if (user_mode(regs)) {
kuap_lock();
CT_WARN_ON(ct_state() != CONTEXT_USER);
@@ -220,13 +232,16 @@ static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct in
/* Ensure interrupt_enter_prepare does not enable MSR[EE] */
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
#endif
+ interrupt_enter_prepare(regs, state);
#ifdef CONFIG_PPC_BOOK3S_64
+ /*
+ * RI=1 is set by interrupt_enter_prepare, so this thread flags access
+ * has to come afterward (it can cause SLB faults).
+ */
if (cpu_has_feature(CPU_FTR_CTRL) &&
!test_thread_local_flags(_TLF_RUNLATCH))
__ppc64_runlatch_on();
#endif
-
- interrupt_enter_prepare(regs, state);
irq_enter();
}
@@ -296,6 +311,8 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
regs->softe = IRQS_ALL_DISABLED;
}
+ __hard_RI_enable();
+
/* Don't do any per-CPU operations until interrupt state is fixed */
if (nmi_disables_ftrace(regs)) {
@@ -393,6 +410,8 @@ interrupt_handler long func(struct pt_regs *regs) \
{ \
long ret; \
\
+ __hard_RI_enable(); \
+ \
ret = ____##func (regs); \
\
return ret; \