summaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
authorChristophe Leroy <christophe.leroy@csgroup.eu>2021-02-08 18:10:28 +0300
committerMichael Ellerman <mpe@ellerman.id.au>2021-02-11 15:35:10 +0300
commit344bb20b159dd0996e521c0d4c131a6ae10c322a (patch)
treedb99372c01b85f49ab43273d635b5d17e28296f7 /arch/powerpc
parentab1a517d55b01b54ba70f5d54f926f5ab4b18339 (diff)
downloadlinux-344bb20b159dd0996e521c0d4c131a6ae10c322a.tar.xz
powerpc/syscall: Make interrupt.c buildable on PPC32
To allow building interrupt.c on PPC32, ifdef out specific PPC64 code or use helpers which are available on both PP32 and PPC64 Modify Makefile to always build interrupt.o Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu> Reviewed-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/ba073ad67bd971a88ce331b65d6655523b54c794.1612796617.git.christophe.leroy@csgroup.eu
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kernel/Makefile4
-rw-r--r--arch/powerpc/kernel/interrupt.c31
2 files changed, 26 insertions, 9 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index b7a82276d569..9c4d75a1eed2 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -46,10 +46,10 @@ obj-y := cputable.o syscalls.o \
prom.o traps.o setup-common.o \
udbg.o misc.o io.o misc_$(BITS).o \
of_platform.o prom_parse.o firmware.o \
- hw_breakpoint_constraints.o
+ hw_breakpoint_constraints.o interrupt.o
obj-y += ptrace/
obj-$(CONFIG_PPC64) += setup_64.o \
- paca.o nvram_64.o note.o interrupt.o
+ paca.o nvram_64.o note.o
obj-$(CONFIG_COMPAT) += sys_ppc32.o signal_32.o
obj-$(CONFIG_VDSO32) += vdso32/
obj-$(CONFIG_PPC_WATCHDOG) += watchdog.o
diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c
index d6be4f9a67e5..2dac4d2bb1cf 100644
--- a/arch/powerpc/kernel/interrupt.c
+++ b/arch/powerpc/kernel/interrupt.c
@@ -39,7 +39,7 @@ notrace long system_call_exception(long r3, long r4, long r5,
BUG_ON(!(regs->msr & MSR_RI));
BUG_ON(!(regs->msr & MSR_PR));
BUG_ON(!FULL_REGS(regs));
- BUG_ON(regs->softe != IRQS_ENABLED);
+ BUG_ON(arch_irq_disabled_regs(regs));
#ifdef CONFIG_PPC_PKEY
if (mmu_has_feature(MMU_FTR_PKEY)) {
@@ -65,7 +65,9 @@ notrace long system_call_exception(long r3, long r4, long r5,
isync();
} else
#endif
+#ifdef CONFIG_PPC64
kuap_check_amr();
+#endif
account_cpu_user_entry();
@@ -77,7 +79,7 @@ notrace long system_call_exception(long r3, long r4, long r5,
* frame, or if the unwinder was taught the first stack frame always
* returns to user with IRQS_ENABLED, this store could be avoided!
*/
- regs->softe = IRQS_ENABLED;
+ irq_soft_mask_regs_set_state(regs, IRQS_ENABLED);
local_irq_enable();
@@ -151,6 +153,7 @@ static notrace inline bool __prep_irq_for_enabled_exit(bool clear_ri)
__hard_EE_RI_disable();
else
__hard_irq_disable();
+#ifdef CONFIG_PPC64
if (unlikely(lazy_irq_pending_nocheck())) {
/* Took an interrupt, may have more exit work to do. */
if (clear_ri)
@@ -162,7 +165,7 @@ static notrace inline bool __prep_irq_for_enabled_exit(bool clear_ri)
}
local_paca->irq_happened = 0;
irq_soft_mask_set(IRQS_ENABLED);
-
+#endif
return true;
}
@@ -216,7 +219,9 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,
CT_WARN_ON(ct_state() == CONTEXT_USER);
+#ifdef CONFIG_PPC64
kuap_check_amr();
+#endif
regs->result = r3;
@@ -309,7 +314,7 @@ again:
account_cpu_user_exit();
-#ifdef CONFIG_PPC_BOOK3S /* BOOK3E not yet using this */
+#ifdef CONFIG_PPC_BOOK3S_64 /* BOOK3E and ppc32 not using this */
/*
* We do this at the end so that we do context switch with KERNEL AMR
*/
@@ -318,7 +323,7 @@ again:
return ret;
}
-#ifdef CONFIG_PPC_BOOK3S /* BOOK3E not yet using this */
+#ifndef CONFIG_PPC_BOOK3E_64 /* BOOK3E not yet using this */
notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned long msr)
{
#ifdef CONFIG_PPC_BOOK3E
@@ -333,14 +338,16 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned
BUG_ON(!(regs->msr & MSR_RI));
BUG_ON(!(regs->msr & MSR_PR));
BUG_ON(!FULL_REGS(regs));
- BUG_ON(regs->softe != IRQS_ENABLED);
+ BUG_ON(arch_irq_disabled_regs(regs));
CT_WARN_ON(ct_state() == CONTEXT_USER);
/*
* We don't need to restore AMR on the way back to userspace for KUAP.
* AMR can only have been unlocked if we interrupted the kernel.
*/
+#ifdef CONFIG_PPC64
kuap_check_amr();
+#endif
local_irq_save(flags);
@@ -407,7 +414,9 @@ again:
/*
* We do this at the end so that we do context switch with KERNEL AMR
*/
+#ifdef CONFIG_PPC64
kuap_user_restore(regs);
+#endif
return ret;
}
@@ -419,7 +428,9 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign
unsigned long *ti_flagsp = &current_thread_info()->flags;
unsigned long flags;
unsigned long ret = 0;
+#ifdef CONFIG_PPC64
unsigned long amr;
+#endif
if (IS_ENABLED(CONFIG_PPC_BOOK3S) && unlikely(!(regs->msr & MSR_RI)))
unrecoverable_exception(regs);
@@ -432,7 +443,9 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign
if (TRAP(regs) != 0x700)
CT_WARN_ON(ct_state() == CONTEXT_USER);
+#ifdef CONFIG_PPC64
amr = kuap_get_and_check_amr();
+#endif
if (unlikely(*ti_flagsp & _TIF_EMULATE_STACK_STORE)) {
clear_bits(_TIF_EMULATE_STACK_STORE, ti_flagsp);
@@ -441,7 +454,7 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign
local_irq_save(flags);
- if (regs->softe == IRQS_ENABLED) {
+ if (!arch_irq_disabled_regs(regs)) {
/* Returning to a kernel context with local irqs enabled. */
WARN_ON_ONCE(!(regs->msr & MSR_EE));
again:
@@ -458,8 +471,10 @@ again:
} else {
/* Returning to a kernel context with local irqs disabled. */
__hard_EE_RI_disable();
+#ifdef CONFIG_PPC64
if (regs->msr & MSR_EE)
local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
+#endif
}
@@ -472,7 +487,9 @@ again:
* which would cause Read-After-Write stalls. Hence, we take the AMR
* value from the check above.
*/
+#ifdef CONFIG_PPC64
kuap_kernel_restore(regs, amr);
+#endif
return ret;
}