summaryrefslogtreecommitdiff
path: root/arch/powerpc/include
diff options
context:
space:
mode:
authorNicholas Piggin <npiggin@gmail.com>2021-06-30 10:46:15 +0300
committerMichael Ellerman <mpe@ellerman.id.au>2021-06-30 15:21:20 +0300
commit9b69d48c7516a29cdaacd18d8bf5f575014a42a1 (patch)
tree8f685003f411a16b2988ad2de904be45ee094a7e /arch/powerpc/include
parentfce01acf830a697110ed72ecace4b0afdbcd53cb (diff)
downloadlinux-9b69d48c7516a29cdaacd18d8bf5f575014a42a1.tar.xz
powerpc/64e: remove implicit soft-masking and interrupt exit restart logic
The implicit soft-masking to speed up interrupt return was going to be used by 64e as well, but it has not been extensively tested on that platform and is not considered ready. It was intended to be disabled before merge. Disable it for now. Most of the restart code is common with 64s, so with more correctness and performance testing this could be re-enabled again by adding the extra soft-mask checks to interrupt handlers and flipping exit_must_hard_disable(). Fixes: 9d1988ca87dd ("powerpc/64: treat low kernel text as irqs soft-masked") Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20210630074621.2109197-4-npiggin@gmail.com
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r--arch/powerpc/include/asm/interrupt.h33
1 files changed, 24 insertions, 9 deletions
diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h
index 8b4b1e84e110..f13c93b033c7 100644
--- a/arch/powerpc/include/asm/interrupt.h
+++ b/arch/powerpc/include/asm/interrupt.h
@@ -73,20 +73,34 @@
#include <asm/kprobes.h>
#include <asm/runlatch.h>
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_BOOK3S_64
extern char __end_soft_masked[];
unsigned long search_kernel_restart_table(unsigned long addr);
-#endif
-#ifdef CONFIG_PPC_BOOK3S_64
DECLARE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant);
+static inline bool is_implicit_soft_masked(struct pt_regs *regs)
+{
+ if (regs->msr & MSR_PR)
+ return false;
+
+ if (regs->nip >= (unsigned long)__end_soft_masked)
+ return false;
+
+ return true;
+}
+
static inline void srr_regs_clobbered(void)
{
local_paca->srr_valid = 0;
local_paca->hsrr_valid = 0;
}
#else
+static inline bool is_implicit_soft_masked(struct pt_regs *regs)
+{
+ return false;
+}
+
static inline void srr_regs_clobbered(void)
{
}
@@ -150,11 +164,13 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrup
*/
if (TRAP(regs) != INTERRUPT_PROGRAM) {
CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
- BUG_ON(regs->nip < (unsigned long)__end_soft_masked);
+ BUG_ON(is_implicit_soft_masked(regs));
}
+#ifdef CONFIG_PPC_BOOK3S
/* Move this under a debugging check */
if (arch_irq_disabled_regs(regs))
BUG_ON(search_kernel_restart_table(regs->nip));
+#endif
}
#endif
@@ -244,10 +260,9 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
local_paca->irq_soft_mask = IRQS_ALL_DISABLED;
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
- if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !(regs->msr & MSR_PR) &&
- regs->nip < (unsigned long)__end_soft_masked) {
- // Kernel code running below __end_soft_masked is
- // implicitly soft-masked.
+ if (is_implicit_soft_masked(regs)) {
+ // Adjust regs->softe soft implicit soft-mask, so
+ // arch_irq_disabled_regs(regs) behaves as expected.
regs->softe = IRQS_ALL_DISABLED;
}
@@ -282,6 +297,7 @@ static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct inter
*/
#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_BOOK3S
if (arch_irq_disabled_regs(regs)) {
unsigned long rst = search_kernel_restart_table(regs->nip);
if (rst)
@@ -289,7 +305,6 @@ static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct inter
}
#endif
-#ifdef CONFIG_PPC64
if (nmi_disables_ftrace(regs))
this_cpu_set_ftrace_enabled(state->ftrace_enabled);