diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-02-23 01:34:00 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-02-23 01:34:00 +0300 |
commit | b12b47249688915e987a9a2a393b522f86f6b7ab (patch) | |
tree | eae34f7fa64474bb3123f7b69c411ade6127c41f /arch/powerpc/kernel | |
parent | 6ff6f86bc4d02949b5688d69de1c89c310d62c44 (diff) | |
parent | 82d2c16b350f72aa21ac2a6860c542aa4b43a51e (diff) | |
download | linux-b12b47249688915e987a9a2a393b522f86f6b7ab.tar.xz |
Merge tag 'powerpc-5.12-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc updates from Michael Ellerman:
- A large series adding wrappers for our interrupt handlers, so that
irq/nmi/user tracking can be isolated in the wrappers rather than
spread in each handler.
- Conversion of the 32-bit syscall handling into C.
- A series from Nick to streamline our TLB flushing when using the
Radix MMU.
- Switch to using queued spinlocks by default for 64-bit server CPUs.
- A rework of our PCI probing so that it happens later in boot, when
more generic infrastructure is available.
- Two small fixes to allow 32-bit little-endian processes to run on
64-bit kernels.
- Other smaller features, fixes & cleanups.
Thanks to: Alexey Kardashevskiy, Ananth N Mavinakayanahalli, Aneesh
Kumar K.V, Athira Rajeev, Bhaskar Chowdhury, Cédric Le Goater, Chengyang
Fan, Christophe Leroy, Christopher M. Riedl, Fabiano Rosas, Florian
Fainelli, Frederic Barrat, Ganesh Goudar, Hari Bathini, Jiapeng Chong,
Joseph J Allen, Kajol Jain, Markus Elfring, Michal Suchanek, Nathan
Lynch, Naveen N. Rao, Nicholas Piggin, Oliver O'Halloran, Pingfan Liu,
Po-Hsu Lin, Qian Cai, Ram Pai, Randy Dunlap, Sandipan Das, Stephen
Rothwell, Tyrel Datwyler, Will Springer, Yury Norov, and Zheng Yongjun.
* tag 'powerpc-5.12-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (188 commits)
powerpc/perf: Adds support for programming of Thresholding in P10
powerpc/pci: Remove unimplemented prototypes
powerpc/uaccess: Merge raw_copy_to_user_allowed() into raw_copy_to_user()
powerpc/uaccess: Merge __put_user_size_allowed() into __put_user_size()
powerpc/uaccess: get rid of small constant size cases in raw_copy_{to,from}_user()
powerpc/64: Fix stack trace not displaying final frame
powerpc/time: Remove get_tbl()
powerpc/time: Avoid using get_tbl()
spi: mpc52xx: Avoid using get_tbl()
powerpc/syscall: Avoid storing 'current' in another pointer
powerpc/32: Handle bookE debugging in C in syscall entry/exit
powerpc/syscall: Do not check unsupported scv vector on PPC32
powerpc/32: Remove the counter in global_dbcr0
powerpc/32: Remove verification of MSR_PR on syscall in the ASM entry
powerpc/syscall: implement system call entry/exit logic in C for PPC32
powerpc/32: Always save non volatile GPRs at syscall entry
powerpc/syscall: Change condition to check MSR_RI
powerpc/syscall: Save r3 in regs->orig_r3
powerpc/syscall: Use is_compat_task()
powerpc/syscall: Make interrupt.c buildable on PPC32
...
Diffstat (limited to 'arch/powerpc/kernel')
39 files changed, 840 insertions, 1262 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 79ee7750937d..6084fa499aa3 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -46,10 +46,10 @@ obj-y := cputable.o syscalls.o \ prom.o traps.o setup-common.o \ udbg.o misc.o io.o misc_$(BITS).o \ of_platform.o prom_parse.o firmware.o \ - hw_breakpoint_constraints.o + hw_breakpoint_constraints.o interrupt.o obj-y += ptrace/ obj-$(CONFIG_PPC64) += setup_64.o \ - paca.o nvram_64.o note.o syscall_64.o + paca.o nvram_64.o note.o obj-$(CONFIG_COMPAT) += sys_ppc32.o signal_32.o obj-$(CONFIG_VDSO32) += vdso32_wrapper.o obj-$(CONFIG_PPC_WATCHDOG) += watchdog.o diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index b690c70f061c..f3a662201a9f 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -255,7 +255,6 @@ int main(void) #endif /* CONFIG_PPC_MM_SLICES */ OFFSET(PACA_EXGEN, paca_struct, exgen); OFFSET(PACA_EXMC, paca_struct, exmc); - OFFSET(PACA_EXSLB, paca_struct, exslb); OFFSET(PACA_EXNMI, paca_struct, exnmi); #ifdef CONFIG_PPC_PSERIES OFFSET(PACALPPACAPTR, paca_struct, lppaca_ptr); @@ -309,7 +308,7 @@ int main(void) /* Interrupt register frame */ DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE); - DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs)); + DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_WITH_PT_REGS); STACK_PT_REGS_OFFSET(GPR0, gpr[0]); STACK_PT_REGS_OFFSET(GPR1, gpr[1]); STACK_PT_REGS_OFFSET(GPR2, gpr[2]); diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c index 52680cf07c9d..5545c9cd17c1 100644 --- a/arch/powerpc/kernel/dbell.c +++ b/arch/powerpc/kernel/dbell.c @@ -12,17 +12,17 @@ #include <linux/hardirq.h> #include <asm/dbell.h> +#include <asm/interrupt.h> #include <asm/irq_regs.h> #include <asm/kvm_ppc.h> #include <asm/trace.h> #ifdef CONFIG_SMP -void doorbell_exception(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER_ASYNC(doorbell_exception) { struct pt_regs *old_regs = set_irq_regs(regs); - irq_enter(); trace_doorbell_entry(regs); ppc_msgsync(); @@ -35,13 +35,12 @@ void doorbell_exception(struct pt_regs *regs) smp_ipi_demux_relaxed(); /* already performed the barrier */ trace_doorbell_exit(regs); - irq_exit(); + set_irq_regs(old_regs); } #else /* CONFIG_SMP */ -void doorbell_exception(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER_ASYNC(doorbell_exception) { printk(KERN_WARNING "Received doorbell on non-smp system\n"); } #endif /* CONFIG_SMP */ - diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index 813713c9120c..cd60bc1c8701 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -1596,6 +1596,35 @@ static int proc_eeh_show(struct seq_file *m, void *v) } #ifdef CONFIG_DEBUG_FS + + +static struct pci_dev *eeh_debug_lookup_pdev(struct file *filp, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + uint32_t domain, bus, dev, fn; + struct pci_dev *pdev; + char buf[20]; + int ret; + + memset(buf, 0, sizeof(buf)); + ret = simple_write_to_buffer(buf, sizeof(buf)-1, ppos, user_buf, count); + if (!ret) + return ERR_PTR(-EFAULT); + + ret = sscanf(buf, "%x:%x:%x.%x", &domain, &bus, &dev, &fn); + if (ret != 4) { + pr_err("%s: expected 4 args, got %d\n", __func__, ret); + return ERR_PTR(-EINVAL); + } + + pdev = pci_get_domain_bus_and_slot(domain, bus, (dev << 3) | fn); + if (!pdev) + return ERR_PTR(-ENODEV); + + return pdev; +} + static int eeh_enable_dbgfs_set(void *data, u64 val) { if (val) @@ -1688,26 +1717,13 @@ static ssize_t eeh_dev_check_write(struct file *filp, const char __user *user_buf, size_t count, loff_t *ppos) { - uint32_t domain, bus, dev, fn; struct pci_dev *pdev; struct eeh_dev *edev; - char buf[20]; int ret; - memset(buf, 0, sizeof(buf)); - ret = simple_write_to_buffer(buf, sizeof(buf)-1, ppos, user_buf, count); - if (!ret) - return -EFAULT; - - ret = sscanf(buf, "%x:%x:%x.%x", &domain, &bus, &dev, &fn); - if (ret != 4) { - pr_err("%s: expected 4 args, got %d\n", __func__, ret); - return -EINVAL; - } - - pdev = pci_get_domain_bus_and_slot(domain, bus, (dev << 3) | fn); - if (!pdev) - return -ENODEV; + pdev = eeh_debug_lookup_pdev(filp, user_buf, count, ppos); + if (IS_ERR(pdev)) + return PTR_ERR(pdev); edev = pci_dev_to_eeh_dev(pdev); if (!edev) { @@ -1717,8 +1733,8 @@ static ssize_t eeh_dev_check_write(struct file *filp, } ret = eeh_dev_check_failure(edev); - pci_info(pdev, "eeh_dev_check_failure(%04x:%02x:%02x.%01x) = %d\n", - domain, bus, dev, fn, ret); + pci_info(pdev, "eeh_dev_check_failure(%s) = %d\n", + pci_name(pdev), ret); pci_dev_put(pdev); @@ -1829,25 +1845,12 @@ static ssize_t eeh_dev_break_write(struct file *filp, const char __user *user_buf, size_t count, loff_t *ppos) { - uint32_t domain, bus, dev, fn; struct pci_dev *pdev; - char buf[20]; int ret; - memset(buf, 0, sizeof(buf)); - ret = simple_write_to_buffer(buf, sizeof(buf)-1, ppos, user_buf, count); - if (!ret) - return -EFAULT; - - ret = sscanf(buf, "%x:%x:%x.%x", &domain, &bus, &dev, &fn); - if (ret != 4) { - pr_err("%s: expected 4 args, got %d\n", __func__, ret); - return -EINVAL; - } - - pdev = pci_get_domain_bus_and_slot(domain, bus, (dev << 3) | fn); - if (!pdev) - return -ENODEV; + pdev = eeh_debug_lookup_pdev(filp, user_buf, count, ppos); + if (IS_ERR(pdev)) + return PTR_ERR(pdev); ret = eeh_debugfs_break_device(pdev); pci_dev_put(pdev); @@ -1865,6 +1868,53 @@ static const struct file_operations eeh_dev_break_fops = { .read = eeh_debugfs_dev_usage, }; +static ssize_t eeh_dev_can_recover(struct file *filp, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct pci_driver *drv; + struct pci_dev *pdev; + size_t ret; + + pdev = eeh_debug_lookup_pdev(filp, user_buf, count, ppos); + if (IS_ERR(pdev)) + return PTR_ERR(pdev); + + /* + * In order for error recovery to work the driver needs to implement + * .error_detected(), so it can quiesce IO to the device, and + * .slot_reset() so it can re-initialise the device after a reset. + * + * Ideally they'd implement .resume() too, but some drivers which + * we need to support (notably IPR) don't so I guess we can tolerate + * that. + * + * .mmio_enabled() is mostly there as a work-around for devices which + * take forever to re-init after a hot reset. Implementing that is + * strictly optional. + */ + drv = pci_dev_driver(pdev); + if (drv && + drv->err_handler && + drv->err_handler->error_detected && + drv->err_handler->slot_reset) { + ret = count; + } else { + ret = -EOPNOTSUPP; + } + + pci_dev_put(pdev); + + return ret; +} + +static const struct file_operations eeh_dev_can_recover_fops = { + .open = simple_open, + .llseek = no_llseek, + .write = eeh_dev_can_recover, + .read = eeh_debugfs_dev_usage, +}; + #endif static int __init eeh_init_proc(void) @@ -1889,6 +1939,9 @@ static int __init eeh_init_proc(void) debugfs_create_file_unsafe("eeh_force_recover", 0600, powerpc_debugfs_root, NULL, &eeh_force_recover_fops); + debugfs_create_file_unsafe("eeh_dev_can_recover", 0600, + powerpc_debugfs_root, NULL, + &eeh_dev_can_recover_fops); eeh_cache_debugfs_init(); #endif } diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 1c9b0ccc2172..78c430b7f9d9 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -175,14 +175,11 @@ transfer_to_handler: addi r11,r11,global_dbcr0@l #ifdef CONFIG_SMP lwz r9,TASK_CPU(r2) - slwi r9,r9,3 + slwi r9,r9,2 add r11,r11,r9 #endif lwz r12,0(r11) mtspr SPRN_DBCR0,r12 - lwz r12,4(r11) - addi r12,r12,-1 - stw r12,4(r11) #endif b 3f @@ -276,8 +273,7 @@ reenable_mmu: * We save a bunch of GPRs, * r3 can be different from GPR3(r1) at this point, r9 and r11 * contains the old MSR and handler address respectively, - * r4 & r5 can contain page fault arguments that need to be passed - * along as well. r0, r6-r8, r12, CCR, CTR, XER etc... are left + * r0, r4-r8, r12, CCR, CTR, XER etc... are left * clobbered as they aren't useful past this point. */ @@ -285,15 +281,11 @@ reenable_mmu: stw r9,8(r1) stw r11,12(r1) stw r3,16(r1) - stw r4,20(r1) - stw r5,24(r1) /* If we are disabling interrupts (normal case), simply log it with * lockdep */ 1: bl trace_hardirqs_off - lwz r5,24(r1) - lwz r4,20(r1) lwz r3,16(r1) lwz r11,12(r1) lwz r9,8(r1) @@ -334,132 +326,29 @@ stack_ovf: _ASM_NOKPROBE_SYMBOL(stack_ovf) #endif -#ifdef CONFIG_TRACE_IRQFLAGS -trace_syscall_entry_irq_off: - /* - * Syscall shouldn't happen while interrupts are disabled, - * so let's do a warning here. - */ -0: trap - EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING - bl trace_hardirqs_on - - /* Now enable for real */ - LOAD_REG_IMMEDIATE(r10, MSR_KERNEL | MSR_EE) - mtmsr r10 - - REST_GPR(0, r1) - REST_4GPRS(3, r1) - REST_2GPRS(7, r1) - b DoSyscall -#endif /* CONFIG_TRACE_IRQFLAGS */ - .globl transfer_to_syscall transfer_to_syscall: -#ifdef CONFIG_TRACE_IRQFLAGS - andi. r12,r9,MSR_EE - beq- trace_syscall_entry_irq_off -#endif /* CONFIG_TRACE_IRQFLAGS */ + SAVE_NVGPRS(r1) +#ifdef CONFIG_PPC_BOOK3S_32 + kuep_lock r11, r12 +#endif -/* - * Handle a system call. - */ - .stabs "arch/powerpc/kernel/",N_SO,0,0,0f - .stabs "entry_32.S",N_SO,0,0,0f -0: - -_GLOBAL(DoSyscall) - stw r3,ORIG_GPR3(r1) - li r12,0 - stw r12,RESULT(r1) -#ifdef CONFIG_TRACE_IRQFLAGS - /* Make sure interrupts are enabled */ - mfmsr r11 - andi. r12,r11,MSR_EE - /* We came in with interrupts disabled, we WARN and mark them enabled - * for lockdep now */ -0: tweqi r12, 0 - EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING -#endif /* CONFIG_TRACE_IRQFLAGS */ - lwz r11,TI_FLAGS(r2) - andi. r11,r11,_TIF_SYSCALL_DOTRACE - bne- syscall_dotrace -syscall_dotrace_cont: - cmplwi 0,r0,NR_syscalls - lis r10,sys_call_table@h - ori r10,r10,sys_call_table@l - slwi r0,r0,2 - bge- 66f - - barrier_nospec_asm - /* - * Prevent the load of the handler below (based on the user-passed - * system call number) being speculatively executed until the test - * against NR_syscalls and branch to .66f above has - * committed. - */ + /* Calling convention has r9 = orig r0, r10 = regs */ + addi r10,r1,STACK_FRAME_OVERHEAD + mr r9,r0 + stw r10,THREAD+PT_REGS(r2) + bl system_call_exception - lwzx r10,r10,r0 /* Fetch system call handler [ptr] */ - mtlr r10 - addi r9,r1,STACK_FRAME_OVERHEAD - PPC440EP_ERR42 - blrl /* Call handler */ - .globl ret_from_syscall ret_from_syscall: -#ifdef CONFIG_DEBUG_RSEQ - /* Check whether the syscall is issued inside a restartable sequence */ - stw r3,GPR3(r1) - addi r3,r1,STACK_FRAME_OVERHEAD - bl rseq_syscall - lwz r3,GPR3(r1) -#endif - mr r6,r3 - /* disable interrupts so current_thread_info()->flags can't change */ - LOAD_REG_IMMEDIATE(r10,MSR_KERNEL) /* doesn't include MSR_EE */ - /* Note: We don't bother telling lockdep about it */ - mtmsr r10 - lwz r9,TI_FLAGS(r2) - li r8,-MAX_ERRNO - andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK) - bne- syscall_exit_work - cmplw 0,r3,r8 - blt+ syscall_exit_cont - lwz r11,_CCR(r1) /* Load CR */ - neg r3,r3 - oris r11,r11,0x1000 /* Set SO bit in CR */ - stw r11,_CCR(r1) -syscall_exit_cont: - lwz r8,_MSR(r1) -#ifdef CONFIG_TRACE_IRQFLAGS - /* If we are going to return from the syscall with interrupts - * off, we trace that here. It shouldn't normally happen. - */ - andi. r10,r8,MSR_EE - bne+ 1f - stw r3,GPR3(r1) - bl trace_hardirqs_off - lwz r3,GPR3(r1) -1: -#endif /* CONFIG_TRACE_IRQFLAGS */ -#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) - /* If the process has its own DBCR0 value, load it up. The internal - debug mode bit tells us that dbcr0 should be loaded. */ - lwz r0,THREAD+THREAD_DBCR0(r2) - andis. r10,r0,DBCR0_IDM@h - bnel- load_dbcr0 -#endif + addi r4,r1,STACK_FRAME_OVERHEAD + li r5,0 + bl syscall_exit_prepare #ifdef CONFIG_PPC_47x lis r4,icache_44x_need_flush@ha lwz r5,icache_44x_need_flush@l(r4) cmplwi cr0,r5,0 bne- 2f #endif /* CONFIG_PPC_47x */ -1: -BEGIN_FTR_SECTION - lwarx r7,0,r1 -END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) - stwcx. r0,0,r1 /* to clear the reservation */ - ACCOUNT_CPU_USER_EXIT(r2, r5, r7) #ifdef CONFIG_PPC_BOOK3S_32 kuep_unlock r5, r7 #endif @@ -467,21 +356,36 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) lwz r4,_LINK(r1) lwz r5,_CCR(r1) mtlr r4 - mtcr r5 lwz r7,_NIP(r1) - lwz r2,GPR2(r1) - lwz r1,GPR1(r1) + lwz r8,_MSR(r1) + cmpwi r3,0 + lwz r3,GPR3(r1) syscall_exit_finish: -#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS) - mtspr SPRN_NRI, r0 -#endif mtspr SPRN_SRR0,r7 mtspr SPRN_SRR1,r8 + + bne 3f + mtcr r5 + +1: lwz r2,GPR2(r1) + lwz r1,GPR1(r1) rfi #ifdef CONFIG_40x b . /* Prevent prefetch past rfi */ #endif -_ASM_NOKPROBE_SYMBOL(syscall_exit_finish) + +3: mtcr r5 + lwz r4,_CTR(r1) + lwz r5,_XER(r1) + REST_NVGPRS(r1) + mtctr r4 + mtxer r5 + lwz r0,GPR0(r1) + lwz r3,GPR3(r1) + REST_8GPRS(4,r1) + lwz r12,GPR12(r1) + b 1b + #ifdef CONFIG_44x 2: li r7,0 iccci r0,r0 @@ -489,9 +393,6 @@ _ASM_NOKPROBE_SYMBOL(syscall_exit_finish) b 1b #endif /* CONFIG_44x */ -66: li r3,-ENOSYS - b ret_from_syscall - .globl ret_from_fork ret_from_fork: REST_NVGPRS(r1) @@ -510,157 +411,6 @@ ret_from_kernel_thread: li r3,0 b ret_from_syscall -/* Traced system call support */ -syscall_dotrace: - SAVE_NVGPRS(r1) - li r0,0xc00 - stw r0,_TRAP(r1) - addi r3,r1,STACK_FRAME_OVERHEAD - bl do_syscall_trace_enter - /* - * Restore argument registers possibly just changed. - * We use the return value of do_syscall_trace_enter - * for call number to look up in the table (r0). - */ - mr r0,r3 - lwz r3,GPR3(r1) - lwz r4,GPR4(r1) - lwz r5,GPR5(r1) - lwz r6,GPR6(r1) - lwz r7,GPR7(r1) - lwz r8,GPR8(r1) - REST_NVGPRS(r1) - - cmplwi r0,NR_syscalls - /* Return code is already in r3 thanks to do_syscall_trace_enter() */ - bge- ret_from_syscall - b syscall_dotrace_cont - -syscall_exit_work: - andi. r0,r9,_TIF_RESTOREALL - beq+ 0f - REST_NVGPRS(r1) - b 2f -0: cmplw 0,r3,r8 - blt+ 1f - andi. r0,r9,_TIF_NOERROR - bne- 1f - lwz r11,_CCR(r1) /* Load CR */ - neg r3,r3 - oris r11,r11,0x1000 /* Set SO bit in CR */ - stw r11,_CCR(r1) - -1: stw r6,RESULT(r1) /* Save result */ - stw r3,GPR3(r1) /* Update return value */ -2: andi. r0,r9,(_TIF_PERSYSCALL_MASK) - beq 4f - - /* Clear per-syscall TIF flags if any are set. */ - - li r11,_TIF_PERSYSCALL_MASK - addi r12,r2,TI_FLAGS -3: lwarx r8,0,r12 - andc r8,r8,r11 - stwcx. r8,0,r12 - bne- 3b - -4: /* Anything which requires enabling interrupts? */ - andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP) - beq ret_from_except - - /* Re-enable interrupts. There is no need to trace that with - * lockdep as we are supposed to have IRQs on at this point - */ - ori r10,r10,MSR_EE - mtmsr r10 - - /* Save NVGPRS if they're not saved already */ - lwz r4,_TRAP(r1) - andi. r4,r4,1 - beq 5f - SAVE_NVGPRS(r1) - li r4,0xc00 - stw r4,_TRAP(r1) -5: - addi r3,r1,STACK_FRAME_OVERHEAD - bl do_syscall_trace_leave - b ret_from_except_full - - /* - * System call was called from kernel. We get here with SRR1 in r9. - * Mark the exception as recoverable once we have retrieved SRR0, - * trap a warning and return ENOSYS with CR[SO] set. - */ - .globl ret_from_kernel_syscall -ret_from_kernel_syscall: - mfspr r9, SPRN_SRR0 - mfspr r10, SPRN_SRR1 -#if !defined(CONFIG_4xx) && !defined(CONFIG_BOOKE) - LOAD_REG_IMMEDIATE(r11, MSR_KERNEL & ~(MSR_IR|MSR_DR)) - mtmsr r11 -#endif - -0: trap - EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING - - li r3, ENOSYS - crset so -#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS) - mtspr SPRN_NRI, r0 -#endif - mtspr SPRN_SRR0, r9 - mtspr SPRN_SRR1, r10 - rfi -#ifdef CONFIG_40x - b . /* Prevent prefetch past rfi */ -#endif -_ASM_NOKPROBE_SYMBOL(ret_from_kernel_syscall) - -/* - * The fork/clone functions need to copy the full register set into - * the child process. Therefore we need to save all the nonvolatile - * registers (r13 - r31) before calling the C code. - */ - .globl ppc_fork -ppc_fork: - SAVE_NVGPRS(r1) - lwz r0,_TRAP(r1) - rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ - stw r0,_TRAP(r1) /* register set saved */ - b sys_fork - - .globl ppc_vfork -ppc_vfork: - SAVE_NVGPRS(r1) - lwz r0,_TRAP(r1) - rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ - stw r0,_TRAP(r1) /* register set saved */ - b sys_vfork - - .globl ppc_clone -ppc_clone: - SAVE_NVGPRS(r1) - lwz r0,_TRAP(r1) - rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ - stw r0,_TRAP(r1) /* register set saved */ - b sys_clone - - .globl ppc_clone3 -ppc_clone3: - SAVE_NVGPRS(r1) - lwz r0,_TRAP(r1) - rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ - stw r0,_TRAP(r1) /* register set saved */ - b sys_clone3 - - .globl ppc_swapcontext -ppc_swapcontext: - SAVE_NVGPRS(r1) - lwz r0,_TRAP(r1) - rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ - stw r0,_TRAP(r1) /* register set saved */ - b sys_swapcontext - /* * Top-level page fault handling. * This is in assembler because if do_page_fault tells us that @@ -670,10 +420,6 @@ ppc_swapcontext: .globl handle_page_fault handle_page_fault: addi r3,r1,STACK_FRAME_OVERHEAD -#ifdef CONFIG_PPC_BOOK3S_32 - andis. r0,r5,DSISR_DABRMATCH@h - bne- handle_dabr_fault -#endif bl do_page_fault cmpwi r3,0 beq+ ret_from_except @@ -681,23 +427,11 @@ handle_page_fault: lwz r0,_TRAP(r1) clrrwi r0,r0,1 stw r0,_TRAP(r1) - mr r5,r3 + mr r4,r3 /* err arg for bad_page_fault */ addi r3,r1,STACK_FRAME_OVERHEAD - lwz r4,_DAR(r1) bl __bad_page_fault b ret_from_except_full -#ifdef CONFIG_PPC_BOOK3S_32 - /* We have a data breakpoint exception - handle it */ -handle_dabr_fault: - SAVE_NVGPRS(r1) - lwz r0,_TRAP(r1) - clrrwi r0,r0,1 - stw r0,_TRAP(r1) - bl do_break - b ret_from_except_full -#endif - /* * This routine switches between two different tasks. The process * state of one is saved on its kernel stack. Then the state @@ -1237,14 +971,11 @@ load_dbcr0: addi r11,r11,global_dbcr0@l #ifdef CONFIG_SMP lwz r9,TASK_CPU(r2) - slwi r9,r9,3 + slwi r9,r9,2 add r11,r11,r9 #endif stw r10,0(r11) mtspr SPRN_DBCR0,r0 - lwz r10,4(r11) - addi r10,r10,1 - stw r10,4(r11) li r11,-1 mtspr SPRN_DBSR,r11 /* clear all pending debug events */ blr @@ -1253,7 +984,7 @@ load_dbcr0: .align 4 .global global_dbcr0 global_dbcr0: - .space 8*NR_CPUS + .space 4*NR_CPUS .previous #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */ diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 33ddfeef4fe9..6c4d9e276c4d 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -108,7 +108,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM) li r11,\trapnr std r11,_TRAP(r1) std r12,_CCR(r1) - std r3,ORIG_GPR3(r1) addi r10,r1,STACK_FRAME_OVERHEAD ld r11,exception_marker@toc(r2) std r11,-16(r10) /* "regshere" marker */ @@ -226,6 +225,12 @@ _ASM_NOKPROBE_SYMBOL(system_call_vectored_emulate) #endif .balign IFETCH_ALIGN_BYTES + .globl system_call_common_real +system_call_common_real: + ld r10,PACAKMSR(r13) /* get MSR value for kernel */ + mtmsrd r10 + + .balign IFETCH_ALIGN_BYTES .globl system_call_common system_call_common: _ASM_NOKPROBE_SYMBOL(system_call_common) @@ -278,7 +283,6 @@ END_BTB_FLUSH_SECTION std r10,_LINK(r1) std r11,_TRAP(r1) std r12,_CCR(r1) - std r3,ORIG_GPR3(r1) addi r10,r1,STACK_FRAME_OVERHEAD ld r11,exception_marker@toc(r2) std r11,-16(r10) /* "regshere" marker */ diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index 74d07dc0bb48..e8eb9992a270 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -398,7 +398,6 @@ exc_##n##_common: \ std r10,_NIP(r1); /* save SRR0 to stackframe */ \ std r11,_MSR(r1); /* save SRR1 to stackframe */ \ beq 2f; /* if from kernel mode */ \ - ACCOUNT_CPU_USER_ENTRY(r13,r10,r11);/* accounting (uses cr0+eq) */ \ 2: ld r3,excf+EX_R10(r13); /* get back r10 */ \ ld r4,excf+EX_R11(r13); /* get back r11 */ \ mfspr r5,scratch; /* get back r13 */ \ @@ -791,7 +790,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) EXCEPTION_COMMON_CRIT(0xd00) std r14,_DSISR(r1) addi r3,r1,STACK_FRAME_OVERHEAD - mr r4,r14 ld r14,PACA_EXCRIT+EX_R14(r13) ld r15,PACA_EXCRIT+EX_R15(r13) bl save_nvgprs @@ -864,7 +862,6 @@ kernel_dbg_exc: INTS_DISABLE std r14,_DSISR(r1) addi r3,r1,STACK_FRAME_OVERHEAD - mr r4,r14 ld r14,PACA_EXDBG+EX_R14(r13) ld r15,PACA_EXDBG+EX_R15(r13) bl save_nvgprs @@ -1011,8 +1008,6 @@ storage_fault_common: std r14,_DAR(r1) std r15,_DSISR(r1) addi r3,r1,STACK_FRAME_OVERHEAD - mr r4,r14 - mr r5,r15 ld r14,PACA_EXGEN+EX_R14(r13) ld r15,PACA_EXGEN+EX_R15(r13) bl do_page_fault @@ -1020,9 +1015,8 @@ storage_fault_common: bne- 1f b ret_from_except_lite 1: bl save_nvgprs - mr r5,r3 + mr r4,r3 addi r3,r1,STACK_FRAME_OVERHEAD - ld r4,_DAR(r1) bl __bad_page_fault b ret_from_except diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 6e53f7638737..60d3051a8bc8 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -139,7 +139,6 @@ name: #define IKVM_VIRT .L_IKVM_VIRT_\name\() /* Virt entry tests KVM */ #define ISTACK .L_ISTACK_\name\() /* Set regular kernel stack */ #define __ISTACK(name) .L_ISTACK_ ## name -#define IRECONCILE .L_IRECONCILE_\name\() /* Do RECONCILE_IRQ_STATE */ #define IKUAP .L_IKUAP_\name\() /* Do KUAP lock */ #define INT_DEFINE_BEGIN(n) \ @@ -203,9 +202,6 @@ do_define_int n .ifndef ISTACK ISTACK=1 .endif - .ifndef IRECONCILE - IRECONCILE=1 - .endif .ifndef IKUAP IKUAP=1 .endif @@ -581,7 +577,6 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real) kuap_save_amr_and_lock r9, r10, cr1, cr0 .endif beq 101f /* if from kernel mode */ - ACCOUNT_CPU_USER_ENTRY(r13, r9, r10) BEGIN_FTR_SECTION ld r9,IAREA+EX_PPR(r13) /* Read PPR from paca */ std r9,_PPR(r1) @@ -649,14 +644,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) ld r11,exception_marker@toc(r2) std r10,RESULT(r1) /* clear regs->result */ std r11,STACK_FRAME_OVERHEAD-16(r1) /* mark the frame */ - - .if ISTACK - ACCOUNT_STOLEN_TIME - .endif - - .if IRECONCILE - RECONCILE_IRQ_STATE(r10, r11) - .endif .endm /* @@ -705,14 +692,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) ld r1,GPR1(r1) .endm -#define RUNLATCH_ON \ -BEGIN_FTR_SECTION \ - ld r3, PACA_THREAD_INFO(r13); \ - ld r4,TI_LOCAL_FLAGS(r3); \ - andi. r0,r4,_TLF_RUNLATCH; \ - beql ppc64_runlatch_on_trampoline; \ -END_FTR_SECTION_IFSET(CPU_FTR_CTRL) - /* * When the idle code in power4_idle puts the CPU into NAP mode, * it has to do so in a loop, and relies on the external interrupt @@ -935,7 +914,6 @@ INT_DEFINE_BEGIN(system_reset) */ ISET_RI=0 ISTACK=0 - IRECONCILE=0 IKVM_REAL=1 INT_DEFINE_END(system_reset) @@ -1022,20 +1000,6 @@ EXC_COMMON_BEGIN(system_reset_common) ld r1,PACA_NMI_EMERG_SP(r13) subi r1,r1,INT_FRAME_SIZE __GEN_COMMON_BODY system_reset - /* - * Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does - * the right thing. We do not want to reconcile because that goes - * through irq tracing which we don't want in NMI. - * - * Save PACAIRQHAPPENED to RESULT (otherwise unused), and set HARD_DIS - * as we are running with MSR[EE]=0. - */ - li r10,IRQS_ALL_DISABLED - stb r10,PACAIRQSOFTMASK(r13) - lbz r10,PACAIRQHAPPENED(r13) - std r10,RESULT(r1) - ori r10,r10,PACA_IRQ_HARD_DIS - stb r10,PACAIRQHAPPENED(r13) addi r3,r1,STACK_FRAME_OVERHEAD bl system_reset_exception @@ -1051,14 +1015,6 @@ EXC_COMMON_BEGIN(system_reset_common) subi r10,r10,1 sth r10,PACA_IN_NMI(r13) - /* - * Restore soft mask settings. - */ - ld r10,RESULT(r1) - stb r10,PACAIRQHAPPENED(r13) - ld r10,SOFTE(r1) - stb r10,PACAIRQSOFTMASK(r13) - kuap_kernel_restore r9, r10 EXCEPTION_RESTORE_REGS RFI_TO_USER_OR_KERNEL @@ -1123,7 +1079,6 @@ INT_DEFINE_BEGIN(machine_check_early) ISTACK=0 IDAR=1 IDSISR=1 - IRECONCILE=0 IKUAP=0 /* We don't touch AMR here, we never go to virtual mode */ INT_DEFINE_END(machine_check_early) @@ -1205,30 +1160,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) li r10,MSR_RI mtmsrd r10,1 - /* - * Set IRQS_ALL_DISABLED and save PACAIRQHAPPENED (see - * system_reset_common) - */ - li r10,IRQS_ALL_DISABLED - stb r10,PACAIRQSOFTMASK(r13) - lbz r10,PACAIRQHAPPENED(r13) - std r10,RESULT(r1) - ori r10,r10,PACA_IRQ_HARD_DIS - stb r10,PACAIRQHAPPENED(r13) - addi r3,r1,STACK_FRAME_OVERHEAD bl machine_check_early std r3,RESULT(r1) /* Save result */ ld r12,_MSR(r1) - /* - * Restore soft mask settings. - */ - ld r10,RESULT(r1) - stb r10,PACAIRQHAPPENED(r13) - ld r10,SOFTE(r1) - stb r10,PACAIRQSOFTMASK(r13) - #ifdef CONFIG_PPC_P7_NAP /* * Check if thread was in power saving mode. We come here when any @@ -1401,14 +1337,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) * * Handling: * - Hash MMU - * Go to do_hash_page first to see if the HPT can be filled from an entry in - * the Linux page table. Hash faults can hit in kernel mode in a fairly + * Go to do_hash_fault, which attempts to fill the HPT from an entry in the + * Linux page table. Hash faults can hit in kernel mode in a fairly * arbitrary state (e.g., interrupts disabled, locks held) when accessing * "non-bolted" regions, e.g., vmalloc space. However these should always be - * backed by Linux page tables. + * backed by Linux page table entries. * - * If none is found, do a Linux page fault. Linux page faults can happen in - * kernel mode due to user copy operations of course. + * If no entry is found the Linux page fault handler is invoked (by + * do_hash_fault). Linux page faults can happen in kernel mode due to user + * copy operations of course. * * KVM: The KVM HDSI handler may perform a load with MSR[DR]=1 in guest * MMU context, which may cause a DSI in the host, which must go to the @@ -1437,15 +1374,24 @@ EXC_VIRT_BEGIN(data_access, 0x4300, 0x80) EXC_VIRT_END(data_access, 0x4300, 0x80) EXC_COMMON_BEGIN(data_access_common) GEN_COMMON data_access - ld r4,_DAR(r1) - ld r5,_DSISR(r1) + ld r4,_DSISR(r1) + addi r3,r1,STACK_FRAME_OVERHEAD + andis. r0,r4,DSISR_DABRMATCH@h + bne- 1f BEGIN_MMU_FTR_SECTION - ld r6,_MSR(r1) - li r3,0x300 - b do_hash_page /* Try to handle as hpte fault */ + bl do_hash_fault MMU_FTR_SECTION_ELSE - b handle_page_fault + bl do_page_fault ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) + b interrupt_return + +1: bl do_break + /* + * do_break() may have changed the NV GPRS while handling a breakpoint. + * If so, we need to restore them with their updated values. + */ + REST_NVGPRS(r1) + b interrupt_return GEN_KVM data_access @@ -1466,14 +1412,9 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) * on user-handler data structures. * * KVM: Same as 0x300, DSLB must test for KVM guest. - * - * A dedicated save area EXSLB is used (XXX: but it actually need not be - * these days, we could use EXGEN). */ INT_DEFINE_BEGIN(data_access_slb) IVEC=0x380 - IAREA=PACA_EXSLB - IRECONCILE=0 IDAR=1 IKVM_SKIP=1 IKVM_REAL=1 @@ -1487,10 +1428,9 @@ EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80) EXC_VIRT_END(data_access_slb, 0x4380, 0x80) EXC_COMMON_BEGIN(data_access_slb_common) GEN_COMMON data_access_slb - ld r4,_DAR(r1) - addi r3,r1,STACK_FRAME_OVERHEAD BEGIN_MMU_FTR_SECTION /* HPT case, do SLB fault */ + addi r3,r1,STACK_FRAME_OVERHEAD bl do_slb_fault cmpdi r3,0 bne- 1f @@ -1501,9 +1441,6 @@ MMU_FTR_SECTION_ELSE li r3,-EFAULT ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) std r3,RESULT(r1) - RECONCILE_IRQ_STATE(r10, r11) - ld r4,_DAR(r1) - ld r5,RESULT(r1) addi r3,r1,STACK_FRAME_OVERHEAD bl do_bad_slb_fault b interrupt_return @@ -1538,15 +1475,13 @@ EXC_VIRT_BEGIN(instruction_access, 0x4400, 0x80) EXC_VIRT_END(instruction_access, 0x4400, 0x80) EXC_COMMON_BEGIN(instruction_access_common) GEN_COMMON instruction_access - ld r4,_DAR(r1) - ld r5,_DSISR(r1) + addi r3,r1,STACK_FRAME_OVERHEAD BEGIN_MMU_FTR_SECTION - ld r6,_MSR(r1) - li r3,0x400 - b do_hash_page /* Try to handle as hpte fault */ + bl do_hash_fault MMU_FTR_SECTION_ELSE - b handle_page_fault + bl do_page_fault ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) + b interrupt_return GEN_KVM instruction_access @@ -1562,8 +1497,6 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) */ INT_DEFINE_BEGIN(instruction_access_slb) IVEC=0x480 - IAREA=PACA_EXSLB - IRECONCILE=0 IISIDE=1 IDAR=1 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE @@ -1579,10 +1512,9 @@ EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x80) EXC_VIRT_END(instruction_access_slb, 0x4480, 0x80) EXC_COMMON_BEGIN(instruction_access_slb_common) GEN_COMMON instruction_access_slb - ld r4,_DAR(r1) - addi r3,r1,STACK_FRAME_OVERHEAD BEGIN_MMU_FTR_SECTION /* HPT case, do SLB fault */ + addi r3,r1,STACK_FRAME_OVERHEAD bl do_slb_fault cmpdi r3,0 bne- 1f @@ -1593,9 +1525,6 @@ MMU_FTR_SECTION_ELSE li r3,-EFAULT ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) std r3,RESULT(r1) - RECONCILE_IRQ_STATE(r10, r11) - ld r4,_DAR(r1) - ld r5,RESULT(r1) addi r3,r1,STACK_FRAME_OVERHEAD bl do_bad_slb_fault b interrupt_return @@ -1643,7 +1572,6 @@ EXC_VIRT_END(hardware_interrupt, 0x4500, 0x100) EXC_COMMON_BEGIN(hardware_interrupt_common) GEN_COMMON hardware_interrupt FINISH_NAP - RUNLATCH_ON addi r3,r1,STACK_FRAME_OVERHEAD bl do_IRQ b interrupt_return @@ -1697,6 +1625,51 @@ INT_DEFINE_BEGIN(program_check) INT_DEFINE_END(program_check) EXC_REAL_BEGIN(program_check, 0x700, 0x100) + +#ifdef CONFIG_CPU_LITTLE_ENDIAN + /* + * There's a short window during boot where although the kernel is + * running little endian, any exceptions will cause the CPU to switch + * back to big endian. For example a WARN() boils down to a trap + * instruction, which will cause a program check, and we end up here but + * with the CPU in big endian mode. The first instruction of the program + * check handler (in GEN_INT_ENTRY below) is an mtsprg, which when + * executed in the wrong endian is an lhzu with a ~3GB displacement from + * r3. The content of r3 is random, so that is a load from some random + * location, and depending on the system can easily lead to a checkstop, + * or an infinitely recursive page fault. + * + * So to handle that case we have a trampoline here that can detect we + * are in the wrong endian and flip us back to the correct endian. We + * can't flip MSR[LE] using mtmsr, so we have to use rfid. That requires + * backing up SRR0/1 as well as a GPR. To do that we use SPRG0/2/3, as + * SPRG1 is already used for the paca. SPRG3 is user readable, but this + * trampoline is only active very early in boot, and SPRG3 will be + * reinitialised in vdso_getcpu_init() before userspace starts. + */ +BEGIN_FTR_SECTION + tdi 0,0,0x48 // Trap never, or in reverse endian: b . + 8 + b 1f // Skip trampoline if endian is correct + .long 0xa643707d // mtsprg 0, r11 Backup r11 + .long 0xa6027a7d // mfsrr0 r11 + .long 0xa643727d // mtsprg 2, r11 Backup SRR0 in SPRG2 + .long 0xa6027b7d // mfsrr1 r11 + .long 0xa643737d // mtsprg 3, r11 Backup SRR1 in SPRG3 + .long 0xa600607d // mfmsr r11 + .long 0x01006b69 // xori r11, r11, 1 Invert MSR[LE] + .long 0xa6037b7d // mtsrr1 r11 + .long 0x34076039 // li r11, 0x734 + .long 0xa6037a7d // mtsrr0 r11 + .long 0x2400004c // rfid + mfsprg r11, 3 + mtsrr1 r11 // Restore SRR1 + mfsprg r11, 2 + mtsrr0 r11 // Restore SRR0 + mfsprg r11, 0 // Restore r11 +1: +END_FTR_SECTION(0, 1) // nop out after boot +#endif /* CONFIG_CPU_LITTLE_ENDIAN */ + GEN_INT_ENTRY program_check, virt=0 EXC_REAL_END(program_check, 0x700, 0x100) EXC_VIRT_BEGIN(program_check, 0x4700, 0x100) @@ -1755,7 +1728,6 @@ EXC_COMMON_BEGIN(program_check_common) */ INT_DEFINE_BEGIN(fp_unavailable) IVEC=0x800 - IRECONCILE=0 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE IKVM_REAL=1 #endif @@ -1770,7 +1742,6 @@ EXC_VIRT_END(fp_unavailable, 0x4800, 0x100) EXC_COMMON_BEGIN(fp_unavailable_common) GEN_COMMON fp_unavailable bne 1f /* if from user, just load it up */ - RECONCILE_IRQ_STATE(r10, r11) addi r3,r1,STACK_FRAME_OVERHEAD bl kernel_fp_unavailable_exception 0: trap @@ -1789,7 +1760,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM) b fast_interrupt_return #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2: /* User process was in a transaction */ - RECONCILE_IRQ_STATE(r10, r11) addi r3,r1,STACK_FRAME_OVERHEAD bl fp_unavailable_tm b interrupt_return @@ -1832,7 +1802,6 @@ EXC_VIRT_END(decrementer, 0x4900, 0x80) EXC_COMMON_BEGIN(decrementer_common) GEN_COMMON decrementer FINISH_NAP - RUNLATCH_ON addi r3,r1,STACK_FRAME_OVERHEAD bl timer_interrupt b interrupt_return @@ -1854,7 +1823,6 @@ INT_DEFINE_BEGIN(hdecrementer) IVEC=0x980 IHSRR=1 ISTACK=0 - IRECONCILE=0 IKVM_REAL=1 IKVM_VIRT=1 INT_DEFINE_END(hdecrementer) @@ -1919,12 +1887,11 @@ EXC_VIRT_END(doorbell_super, 0x4a00, 0x100) EXC_COMMON_BEGIN(doorbell_super_common) GEN_COMMON doorbell_super FINISH_NAP - RUNLATCH_ON addi r3,r1,STACK_FRAME_OVERHEAD #ifdef CONFIG_PPC_DOORBELL bl doorbell_exception #else - bl unknown_exception + bl unknown_async_exception #endif b interrupt_return @@ -2001,12 +1968,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) HMT_MEDIUM .if ! \virt - __LOAD_HANDLER(r10, system_call_common) - mtspr SPRN_SRR0,r10 - ld r10,PACAKMSR(r13) - mtspr SPRN_SRR1,r10 - RFI_TO_KERNEL - b . /* prevent speculative execution */ + __LOAD_HANDLER(r10, system_call_common_real) + mtctr r10 + bctr .else li r10,MSR_RI mtmsrd r10,1 /* Set RI (EE=0) */ @@ -2137,9 +2101,7 @@ EXC_COMMON_BEGIN(h_data_storage_common) GEN_COMMON h_data_storage addi r3,r1,STACK_FRAME_OVERHEAD BEGIN_MMU_FTR_SECTION - ld r4,_DAR(r1) - li r5,SIGSEGV - bl bad_page_fault + bl do_bad_page_fault_segv MMU_FTR_SECTION_ELSE bl unknown_exception ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_TYPE_RADIX) @@ -2230,7 +2192,6 @@ INT_DEFINE_BEGIN(hmi_exception_early) IHSRR=1 IREALMODE_COMMON=1 ISTACK=0 - IRECONCILE=0 IKUAP=0 /* We don't touch AMR here, we never go to virtual mode */ IKVM_REAL=1 INT_DEFINE_END(hmi_exception_early) @@ -2277,7 +2238,6 @@ EXC_COMMON_BEGIN(hmi_exception_early_common) EXC_COMMON_BEGIN(hmi_exception_common) GEN_COMMON hmi_exception FINISH_NAP - RUNLATCH_ON addi r3,r1,STACK_FRAME_OVERHEAD bl handle_hmi_exception b interrupt_return @@ -2307,12 +2267,11 @@ EXC_VIRT_END(h_doorbell, 0x4e80, 0x20) EXC_COMMON_BEGIN(h_doorbell_common) GEN_COMMON h_doorbell FINISH_NAP - RUNLATCH_ON addi r3,r1,STACK_FRAME_OVERHEAD #ifdef CONFIG_PPC_DOORBELL bl doorbell_exception #else - bl unknown_exception + bl unknown_async_exception #endif b interrupt_return @@ -2341,7 +2300,6 @@ EXC_VIRT_END(h_virt_irq, 0x4ea0, 0x20) EXC_COMMON_BEGIN(h_virt_irq_common) GEN_COMMON h_virt_irq FINISH_NAP - RUNLATCH_ON addi r3,r1,STACK_FRAME_OVERHEAD bl do_IRQ b interrupt_return @@ -2388,7 +2346,6 @@ EXC_VIRT_END(performance_monitor, 0x4f00, 0x20) EXC_COMMON_BEGIN(performance_monitor_common) GEN_COMMON performance_monitor FINISH_NAP - RUNLATCH_ON addi r3,r1,STACK_FRAME_OVERHEAD bl performance_monitor_exception b interrupt_return @@ -2404,7 +2361,6 @@ EXC_COMMON_BEGIN(performance_monitor_common) */ INT_DEFINE_BEGIN(altivec_unavailable) IVEC=0xf20 - IRECONCILE=0 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE IKVM_REAL=1 #endif @@ -2434,7 +2390,6 @@ BEGIN_FTR_SECTION b fast_interrupt_return #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2: /* User process was in a transaction */ - RECONCILE_IRQ_STATE(r10, r11) addi r3,r1,STACK_FRAME_OVERHEAD bl altivec_unavailable_tm b interrupt_return @@ -2442,7 +2397,6 @@ BEGIN_FTR_SECTION 1: END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #endif - RECONCILE_IRQ_STATE(r10, r11) addi r3,r1,STACK_FRAME_OVERHEAD bl altivec_unavailable_exception b interrupt_return @@ -2458,7 +2412,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) */ INT_DEFINE_BEGIN(vsx_unavailable) IVEC=0xf40 - IRECONCILE=0 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE IKVM_REAL=1 #endif @@ -2487,7 +2440,6 @@ BEGIN_FTR_SECTION b load_up_vsx #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2: /* User process was in a transaction */ - RECONCILE_IRQ_STATE(r10, r11) addi r3,r1,STACK_FRAME_OVERHEAD bl vsx_unavailable_tm b interrupt_return @@ -2495,7 +2447,6 @@ BEGIN_FTR_SECTION 1: END_FTR_SECTION_IFSET(CPU_FTR_VSX) #endif - RECONCILE_IRQ_STATE(r10, r11) addi r3,r1,STACK_FRAME_OVERHEAD bl vsx_unavailable_exception b interrupt_return @@ -2830,7 +2781,6 @@ EXC_VIRT_NONE(0x5800, 0x100) INT_DEFINE_BEGIN(soft_nmi) IVEC=0x900 ISTACK=0 - IRECONCILE=0 /* Soft-NMI may fire under local_irq_disable */ INT_DEFINE_END(soft_nmi) /* @@ -2849,17 +2799,6 @@ EXC_COMMON_BEGIN(soft_nmi_common) subi r1,r1,INT_FRAME_SIZE __GEN_COMMON_BODY soft_nmi - /* - * Set IRQS_ALL_DISABLED and save PACAIRQHAPPENED (see - * system_reset_common) - */ - li r10,IRQS_ALL_DISABLED - stb r10,PACAIRQSOFTMASK(r13) - lbz r10,PACAIRQHAPPENED(r13) - std r10,RESULT(r1) - ori r10,r10,PACA_IRQ_HARD_DIS - stb r10,PACAIRQHAPPENED(r13) - addi r3,r1,STACK_FRAME_OVERHEAD bl soft_nmi_interrupt @@ -2867,14 +2806,6 @@ EXC_COMMON_BEGIN(soft_nmi_common) li r9,0 mtmsrd r9,1 - /* - * Restore soft mask settings. - */ - ld r10,RESULT(r1) - stb r10,PACAIRQHAPPENED(r13) - ld r10,SOFTE(r1) - stb r10,PACAIRQSOFTMASK(r13) - kuap_kernel_restore r9, r10 EXCEPTION_RESTORE_REGS hsrr=0 RFI_TO_KERNEL @@ -3148,9 +3079,6 @@ kvmppc_skip_Hinterrupt: * come here. */ -EXC_COMMON_BEGIN(ppc64_runlatch_on_trampoline) - b __ppc64_runlatch_on - USE_FIXED_SECTION(virt_trampolines) /* * All code below __end_interrupts is treated as soft-masked. If @@ -3221,99 +3149,3 @@ disable_machine_check: RFI_TO_KERNEL 1: mtlr r0 blr - -/* - * Hash table stuff - */ - .balign IFETCH_ALIGN_BYTES -do_hash_page: -#ifdef CONFIG_PPC_BOOK3S_64 - lis r0,(DSISR_BAD_FAULT_64S | DSISR_DABRMATCH | DSISR_KEYFAULT)@h - ori r0,r0,DSISR_BAD_FAULT_64S@l - and. r0,r5,r0 /* weird error? */ - bne- handle_page_fault /* if not, try to insert a HPTE */ - - /* - * If we are in an "NMI" (e.g., an interrupt when soft-disabled), then - * don't call hash_page, just fail the fault. This is required to - * prevent re-entrancy problems in the hash code, namely perf - * interrupts hitting while something holds H_PAGE_BUSY, and taking a - * hash fault. See the comment in hash_preload(). - */ - ld r11, PACA_THREAD_INFO(r13) - lwz r0,TI_PREEMPT(r11) - andis. r0,r0,NMI_MASK@h - bne 77f - - /* - * r3 contains the trap number - * r4 contains the faulting address - * r5 contains dsisr - * r6 msr - * - * at return r3 = 0 for success, 1 for page fault, negative for error - */ - bl __hash_page /* build HPTE if possible */ - cmpdi r3,0 /* see if __hash_page succeeded */ - - /* Success */ - beq interrupt_return /* Return from exception on success */ - - /* Error */ - blt- 13f - - /* Reload DAR/DSISR into r4/r5 for the DABR check below */ - ld r4,_DAR(r1) - ld r5,_DSISR(r1) -#endif /* CONFIG_PPC_BOOK3S_64 */ - -/* Here we have a page fault that hash_page can't handle. */ -handle_page_fault: -11: andis. r0,r5,DSISR_DABRMATCH@h - bne- handle_dabr_fault - addi r3,r1,STACK_FRAME_OVERHEAD - bl do_page_fault - cmpdi r3,0 - beq+ interrupt_return - mr r5,r3 - addi r3,r1,STACK_FRAME_OVERHEAD - ld r4,_DAR(r1) - bl __bad_page_fault - b interrupt_return - -/* We have a data breakpoint exception - handle it */ -handle_dabr_fault: - ld r4,_DAR(r1) - ld r5,_DSISR(r1) - addi r3,r1,STACK_FRAME_OVERHEAD - bl do_break - /* - * do_break() may have changed the NV GPRS while handling a breakpoint. - * If so, we need to restore them with their updated values. - */ - REST_NVGPRS(r1) - b interrupt_return - - -#ifdef CONFIG_PPC_BOOK3S_64 -/* We have a page fault that hash_page could handle but HV refused - * the PTE insertion - */ -13: mr r5,r3 - addi r3,r1,STACK_FRAME_OVERHEAD - ld r4,_DAR(r1) - bl low_hash_fault - b interrupt_return -#endif - -/* - * We come here as a result of a DSI at a point where we don't want - * to call hash_page, such as when we are accessing memory (possibly - * user memory) inside a PMU interrupt that occurred while interrupts - * were soft-disabled. We want to invoke the exception handler for - * the access, or panic if there isn't a handler. - */ -77: addi r3,r1,STACK_FRAME_OVERHEAD - li r5,SIGSEGV - bl bad_page_fault - b interrupt_return diff --git a/arch/powerpc/kernel/head_32.h b/arch/powerpc/kernel/head_32.h index a2f72c966baf..5d4706c14572 100644 --- a/arch/powerpc/kernel/head_32.h +++ b/arch/powerpc/kernel/head_32.h @@ -47,7 +47,7 @@ lwz r1,TASK_STACK-THREAD(r1) addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE 1: - mtcrf 0x7f, r1 + mtcrf 0x3f, r1 bt 32 - THREAD_ALIGN_SHIFT, stack_overflow #else subi r11, r1, INT_FRAME_SIZE /* use r1 if kernel */ @@ -116,114 +116,44 @@ .endm .macro SYSCALL_ENTRY trapno - mfspr r12,SPRN_SPRG_THREAD mfspr r9, SPRN_SRR1 -#ifdef CONFIG_VMAP_STACK - mfspr r11, SPRN_SRR0 - mtctr r11 - andi. r11, r9, MSR_PR + mfspr r10, SPRN_SRR0 + LOAD_REG_IMMEDIATE(r11, MSR_KERNEL) /* can take exceptions */ + lis r12, 1f@h + ori r12, r12, 1f@l + mtspr SPRN_SRR1, r11 + mtspr SPRN_SRR0, r12 + mfspr r12,SPRN_SPRG_THREAD mr r11, r1 lwz r1,TASK_STACK-THREAD(r12) - beq- 99f - addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE - li r10, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */ - mtmsr r10 - isync tovirt(r12, r12) + addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE + rfi +1: stw r11,GPR1(r1) stw r11,0(r1) mr r11, r1 -#else - andi. r11, r9, MSR_PR - lwz r11,TASK_STACK-THREAD(r12) - beq- 99f - addi r11, r11, THREAD_SIZE - INT_FRAME_SIZE - tophys(r11, r11) - stw r1,GPR1(r11) - stw r1,0(r11) - tovirt(r1, r11) /* set new kernel sp */ -#endif + stw r10,_NIP(r11) mflr r10 stw r10, _LINK(r11) -#ifdef CONFIG_VMAP_STACK - mfctr r10 -#else - mfspr r10,SPRN_SRR0 -#endif - stw r10,_NIP(r11) mfcr r10 rlwinm r10,r10,0,4,2 /* Clear SO bit in CR */ stw r10,_CCR(r11) /* save registers */ #ifdef CONFIG_40x rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */ -#else -#ifdef CONFIG_VMAP_STACK - LOAD_REG_IMMEDIATE(r10, MSR_KERNEL & ~MSR_IR) /* can take exceptions */ -#else - LOAD_REG_IMMEDIATE(r10, MSR_KERNEL & ~(MSR_IR|MSR_DR)) /* can take exceptions */ -#endif - mtmsr r10 /* (except for mach check in rtas) */ #endif lis r10,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */ stw r2,GPR2(r11) addi r10,r10,STACK_FRAME_REGS_MARKER@l stw r9,_MSR(r11) - li r2, \trapno + 1 + li r2, \trapno stw r10,8(r11) stw r2,_TRAP(r11) SAVE_GPR(0, r11) SAVE_4GPRS(3, r11) SAVE_2GPRS(7, r11) - addi r11,r1,STACK_FRAME_OVERHEAD addi r2,r12,-THREAD - stw r11,PT_REGS(r12) -#if defined(CONFIG_40x) - /* Check to see if the dbcr0 register is set up to debug. Use the - internal debug mode bit to do this. */ - lwz r12,THREAD_DBCR0(r12) - andis. r12,r12,DBCR0_IDM@h -#endif - ACCOUNT_CPU_USER_ENTRY(r2, r11, r12) -#if defined(CONFIG_40x) - beq+ 3f - /* From user and task is ptraced - load up global dbcr0 */ - li r12,-1 /* clear all pending debug events */ - mtspr SPRN_DBSR,r12 - lis r11,global_dbcr0@ha - tophys(r11,r11) - addi r11,r11,global_dbcr0@l - lwz r12,0(r11) - mtspr SPRN_DBCR0,r12 - lwz r12,4(r11) - addi r12,r12,-1 - stw r12,4(r11) -#endif - -3: - tovirt_novmstack r2, r2 /* set r2 to current */ - lis r11, transfer_to_syscall@h - ori r11, r11, transfer_to_syscall@l -#ifdef CONFIG_TRACE_IRQFLAGS - /* - * If MSR is changing we need to keep interrupts disabled at this point - * otherwise we might risk taking an interrupt before we tell lockdep - * they are enabled. - */ - LOAD_REG_IMMEDIATE(r10, MSR_KERNEL) - rlwimi r10, r9, 0, MSR_EE -#else - LOAD_REG_IMMEDIATE(r10, MSR_KERNEL | MSR_EE) -#endif -#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS) - mtspr SPRN_NRI, r0 -#endif - mtspr SPRN_SRR1,r10 - mtspr SPRN_SRR0,r11 - rfi /* jump to handler, enable MMU */ -#ifdef CONFIG_40x - b . /* Prevent prefetch past rfi */ -#endif -99: b ret_from_kernel_syscall + b transfer_to_syscall /* jump to handler */ .endm .macro save_dar_dsisr_on_stack reg1, reg2, sp diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index a1ae00689e0f..24724a7dad49 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S @@ -179,9 +179,9 @@ _ENTRY(saved_ksp_limit) */ START_EXCEPTION(0x0300, DataStorage) EXCEPTION_PROLOG - mfspr r5, SPRN_ESR /* Grab the ESR, save it, pass arg3 */ + mfspr r5, SPRN_ESR /* Grab the ESR, save it */ stw r5, _ESR(r11) - mfspr r4, SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */ + mfspr r4, SPRN_DEAR /* Grab the DEAR, save it */ stw r4, _DEAR(r11) EXC_XFER_LITE(0x300, handle_page_fault) @@ -191,9 +191,9 @@ _ENTRY(saved_ksp_limit) */ START_EXCEPTION(0x0400, InstructionAccess) EXCEPTION_PROLOG - mr r4,r12 /* Pass SRR0 as arg2 */ - stw r4, _DEAR(r11) - li r5,0 /* Pass zero as arg3 */ + li r5,0 + stw r5, _ESR(r11) /* Zero ESR */ + stw r12, _DEAR(r11) /* SRR0 as DEAR */ EXC_XFER_LITE(0x400, handle_page_fault) /* 0x0500 - External Interrupt Exception */ @@ -476,6 +476,7 @@ _ENTRY(saved_ksp_limit) /* continue normal handling for a critical exception... */ 2: mfspr r4,SPRN_DBSR + stw r4,_ESR(r11) /* DebugException takes DBSR in _ESR */ addi r3,r1,STACK_FRAME_OVERHEAD EXC_XFER_TEMPLATE(DebugException, 0x2002, \ (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \ diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S index 8e36718f3167..813fa305c33b 100644 --- a/arch/powerpc/kernel/head_44x.S +++ b/arch/powerpc/kernel/head_44x.S @@ -376,7 +376,7 @@ interrupt_base: /* Load the next available TLB index */ lwz r13,tlb_44x_index@l(r10) - bne 2f /* Bail if permission mismach */ + bne 2f /* Bail if permission mismatch */ /* Increment, rollover, and store TLB index */ addi r13,r13,1 @@ -471,7 +471,7 @@ interrupt_base: /* Load the next available TLB index */ lwz r13,tlb_44x_index@l(r10) - bne 2f /* Bail if permission mismach */ + bne 2f /* Bail if permission mismatch */ /* Increment, rollover, and store TLB index */ addi r13,r13,1 diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 52702f3db6df..46dff3f9c31f 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -165,7 +165,7 @@ SystemCall: /* On the MPC8xx, this is a software emulation interrupt. It occurs * for all unimplemented and illegal instructions. */ - EXCEPTION(0x1000, SoftEmu, program_check_exception, EXC_XFER_STD) + EXCEPTION(0x1000, SoftEmu, emulation_assist_interrupt, EXC_XFER_STD) . = 0x1100 /* @@ -312,14 +312,14 @@ DataStoreTLBMiss: . = 0x1300 InstructionTLBError: EXCEPTION_PROLOG - mr r4,r12 andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */ andis. r10,r9,SRR1_ISI_NOPT@h beq+ .Litlbie - tlbie r4 + tlbie r12 /* 0x400 is InstructionAccess exception, needed by bad_page_fault() */ .Litlbie: - stw r4, _DAR(r11) + stw r12, _DAR(r11) + stw r5, _DSISR(r11) EXC_XFER_LITE(0x400, handle_page_fault) /* This is the data TLB error on the MPC8xx. This could be due to @@ -364,10 +364,9 @@ do_databreakpoint: addi r3,r1,STACK_FRAME_OVERHEAD mfspr r4,SPRN_BAR stw r4,_DAR(r11) -#ifdef CONFIG_VMAP_STACK - lwz r5,_DSISR(r11) -#else +#ifndef CONFIG_VMAP_STACK mfspr r5,SPRN_DSISR + stw r5,_DSISR(r11) #endif EXC_XFER_STD(0x1c00, do_break) diff --git a/arch/powerpc/kernel/head_book3s_32.S b/arch/powerpc/kernel/head_book3s_32.S index 858fbc8b19f3..727fdab557c9 100644 --- a/arch/powerpc/kernel/head_book3s_32.S +++ b/arch/powerpc/kernel/head_book3s_32.S @@ -238,8 +238,8 @@ __secondary_hold_acknowledge: /* System reset */ /* core99 pmac starts the seconary here by changing the vector, and - putting it back to what it was (unknown_exception) when done. */ - EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD) + putting it back to what it was (unknown_async_exception) when done. */ + EXCEPTION(0x100, Reset, unknown_async_exception, EXC_XFER_STD) /* Machine check */ /* @@ -278,12 +278,6 @@ MachineCheck: 7: EXCEPTION_PROLOG_2 addi r3,r1,STACK_FRAME_OVERHEAD #ifdef CONFIG_PPC_CHRP -#ifdef CONFIG_VMAP_STACK - mfspr r4, SPRN_SPRG_THREAD - tovirt(r4, r4) - lwz r4, RTAS_SP(r4) - cmpwi cr1, r4, 0 -#endif beq cr1, machine_check_tramp twi 31, 0, 0 #else @@ -295,6 +289,7 @@ MachineCheck: DO_KVM 0x300 DataAccess: #ifdef CONFIG_VMAP_STACK +#ifdef CONFIG_PPC_BOOK3S_604 BEGIN_MMU_FTR_SECTION mtspr SPRN_SPRG_SCRATCH2,r10 mfspr r10, SPRN_SPRG_THREAD @@ -311,12 +306,14 @@ BEGIN_MMU_FTR_SECTION MMU_FTR_SECTION_ELSE b 1f ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE) +#endif 1: EXCEPTION_PROLOG_0 handle_dar_dsisr=1 EXCEPTION_PROLOG_1 b handle_page_fault_tramp_1 #else /* CONFIG_VMAP_STACK */ EXCEPTION_PROLOG handle_dar_dsisr=1 get_and_save_dar_dsisr_on_stack r4, r5, r11 +#ifdef CONFIG_PPC_BOOK3S_604 BEGIN_MMU_FTR_SECTION andis. r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h bne handle_page_fault_tramp_2 /* if not, try to put a PTE */ @@ -324,8 +321,11 @@ BEGIN_MMU_FTR_SECTION bl hash_page b handle_page_fault_tramp_1 MMU_FTR_SECTION_ELSE +#endif b handle_page_fault_tramp_2 +#ifdef CONFIG_PPC_BOOK3S_604 ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE) +#endif #endif /* CONFIG_VMAP_STACK */ /* Instruction access exception. */ @@ -341,12 +341,14 @@ InstructionAccess: mfspr r11, SPRN_SRR1 /* check whether user or kernel */ stw r11, SRR1(r10) mfcr r10 +#ifdef CONFIG_PPC_BOOK3S_604 BEGIN_MMU_FTR_SECTION andis. r11, r11, SRR1_ISI_NOPT@h /* no pte found? */ bne hash_page_isi .Lhash_page_isi_cont: mfspr r11, SPRN_SRR1 /* check whether user or kernel */ END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) +#endif andi. r11, r11, MSR_PR EXCEPTION_PROLOG_1 @@ -357,13 +359,15 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) beq 1f /* if so, try to put a PTE */ li r3,0 /* into the hash table */ mr r4,r12 /* SRR0 is fault address */ +#ifdef CONFIG_PPC_BOOK3S_604 BEGIN_MMU_FTR_SECTION bl hash_page END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) +#endif #endif /* CONFIG_VMAP_STACK */ -1: mr r4,r12 andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */ - stw r4, _DAR(r11) + stw r5, _DSISR(r11) + stw r12, _DAR(r11) EXC_XFER_LITE(0x400, handle_page_fault) /* External interrupt */ @@ -640,7 +644,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU) #endif #ifndef CONFIG_TAU_INT -#define TAUException unknown_exception +#define TAUException unknown_async_exception #endif EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_STD) @@ -685,13 +689,16 @@ handle_page_fault_tramp_1: #ifdef CONFIG_VMAP_STACK EXCEPTION_PROLOG_2 handle_dar_dsisr=1 #endif - lwz r4, _DAR(r11) lwz r5, _DSISR(r11) /* fall through */ handle_page_fault_tramp_2: + andis. r0, r5, DSISR_DABRMATCH@h + bne- 1f EXC_XFER_LITE(0x300, handle_page_fault) +1: EXC_XFER_STD(0x300, do_break) #ifdef CONFIG_VMAP_STACK +#ifdef CONFIG_PPC_BOOK3S_604 .macro save_regs_thread thread stw r0, THR0(\thread) stw r3, THR3(\thread) @@ -763,6 +770,7 @@ fast_hash_page_return: mfspr r11, SPRN_SPRG_SCRATCH1 mfspr r10, SPRN_SPRG_SCRATCH0 rfi +#endif /* CONFIG_PPC_BOOK3S_604 */ stack_overflow: vmap_stack_overflow_exception diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h index 74e230c200fb..47857795f50a 100644 --- a/arch/powerpc/kernel/head_booke.h +++ b/arch/powerpc/kernel/head_booke.h @@ -106,10 +106,8 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV) #endif mfspr r9, SPRN_SRR1 BOOKE_CLEAR_BTB(r11) - andi. r11, r9, MSR_PR lwz r11, TASK_STACK - THREAD(r10) rlwinm r12,r12,0,4,2 /* Clear SO bit in CR */ - beq- 99f ALLOC_STACK_FRAME(r11, THREAD_SIZE - INT_FRAME_SIZE) stw r12, _CCR(r11) /* save various registers */ mflr r12 @@ -124,60 +122,15 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV) stw r2,GPR2(r11) addi r12, r12, STACK_FRAME_REGS_MARKER@l stw r9,_MSR(r11) - li r2, \trapno + 1 + li r2, \trapno stw r12, 8(r11) stw r2,_TRAP(r11) SAVE_GPR(0, r11) SAVE_4GPRS(3, r11) SAVE_2GPRS(7, r11) - addi r11,r1,STACK_FRAME_OVERHEAD addi r2,r10,-THREAD - stw r11,PT_REGS(r10) - /* Check to see if the dbcr0 register is set up to debug. Use the - internal debug mode bit to do this. */ - lwz r12,THREAD_DBCR0(r10) - andis. r12,r12,DBCR0_IDM@h - ACCOUNT_CPU_USER_ENTRY(r2, r11, r12) - beq+ 3f - /* From user and task is ptraced - load up global dbcr0 */ - li r12,-1 /* clear all pending debug events */ - mtspr SPRN_DBSR,r12 - lis r11,global_dbcr0@ha - tophys(r11,r11) - addi r11,r11,global_dbcr0@l -#ifdef CONFIG_SMP - lwz r10, TASK_CPU(r2) - slwi r10, r10, 3 - add r11, r11, r10 -#endif - lwz r12,0(r11) - mtspr SPRN_DBCR0,r12 - lwz r12,4(r11) - addi r12,r12,-1 - stw r12,4(r11) - -3: - tovirt(r2, r2) /* set r2 to current */ - lis r11, transfer_to_syscall@h - ori r11, r11, transfer_to_syscall@l -#ifdef CONFIG_TRACE_IRQFLAGS - /* - * If MSR is changing we need to keep interrupts disabled at this point - * otherwise we might risk taking an interrupt before we tell lockdep - * they are enabled. - */ - lis r10, MSR_KERNEL@h - ori r10, r10, MSR_KERNEL@l - rlwimi r10, r9, 0, MSR_EE -#else - lis r10, (MSR_KERNEL | MSR_EE)@h - ori r10, r10, (MSR_KERNEL | MSR_EE)@l -#endif - mtspr SPRN_SRR1,r10 - mtspr SPRN_SRR0,r11 - rfi /* jump to handler, enable MMU */ -99: b ret_from_kernel_syscall + b transfer_to_syscall /* jump to handler */ .endm /* To handle the additional exception priority levels on 40x and Book-E @@ -406,6 +359,7 @@ label: \ /* continue normal handling for a debug exception... */ \ 2: mfspr r4,SPRN_DBSR; \ + stw r4,_ESR(r11); /* DebugException takes DBSR in _ESR */\ addi r3,r1,STACK_FRAME_OVERHEAD; \ EXC_XFER_TEMPLATE(DebugException, 0x2008, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), debug_transfer_to_handler, ret_from_debug_exc) @@ -459,6 +413,7 @@ label: \ /* continue normal handling for a critical exception... */ \ 2: mfspr r4,SPRN_DBSR; \ + stw r4,_ESR(r11); /* DebugException takes DBSR in _ESR */\ addi r3,r1,STACK_FRAME_OVERHEAD; \ EXC_XFER_TEMPLATE(DebugException, 0x2002, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), crit_transfer_to_handler, ret_from_crit_exc) @@ -476,9 +431,7 @@ label: NORMAL_EXCEPTION_PROLOG(INST_STORAGE); \ mfspr r5,SPRN_ESR; /* Grab the ESR and save it */ \ stw r5,_ESR(r11); \ - mr r4,r12; /* Pass SRR0 as arg2 */ \ - stw r4, _DEAR(r11); \ - li r5,0; /* Pass zero as arg3 */ \ + stw r12, _DEAR(r11); /* Pass SRR0 as arg2 */ \ EXC_XFER_LITE(0x0400, handle_page_fault) #define ALIGNMENT_EXCEPTION \ diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index fdd4d274c245..3f4a40cccef5 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -364,12 +364,12 @@ interrupt_base: /* Data Storage Interrupt */ START_EXCEPTION(DataStorage) NORMAL_EXCEPTION_PROLOG(DATA_STORAGE) - mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */ + mfspr r5,SPRN_ESR /* Grab the ESR, save it */ stw r5,_ESR(r11) - mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */ + mfspr r4,SPRN_DEAR /* Grab the DEAR, save it */ + stw r4, _DEAR(r11) andis. r10,r5,(ESR_ILK|ESR_DLK)@h bne 1f - stw r4, _DEAR(r11) EXC_XFER_LITE(0x0300, handle_page_fault) 1: addi r3,r1,STACK_FRAME_OVERHEAD diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S index 22f249b6f58d..f9e6d83e6720 100644 --- a/arch/powerpc/kernel/idle_book3s.S +++ b/arch/powerpc/kernel/idle_book3s.S @@ -52,28 +52,32 @@ _GLOBAL(isa300_idle_stop_mayloss) std r1,PACAR1(r13) mflr r4 mfcr r5 - /* use stack red zone rather than a new frame for saving regs */ - std r2,-8*0(r1) - std r14,-8*1(r1) - std r15,-8*2(r1) - std r16,-8*3(r1) - std r17,-8*4(r1) - std r18,-8*5(r1) - std r19,-8*6(r1) - std r20,-8*7(r1) - std r21,-8*8(r1) - std r22,-8*9(r1) - std r23,-8*10(r1) - std r24,-8*11(r1) - std r25,-8*12(r1) - std r26,-8*13(r1) - std r27,-8*14(r1) - std r28,-8*15(r1) - std r29,-8*16(r1) - std r30,-8*17(r1) - std r31,-8*18(r1) - std r4,-8*19(r1) - std r5,-8*20(r1) + /* + * Use the stack red zone rather than a new frame for saving regs since + * in the case of no GPR loss the wakeup code branches directly back to + * the caller without deallocating the stack frame first. + */ + std r2,-8*1(r1) + std r14,-8*2(r1) + std r15,-8*3(r1) + std r16,-8*4(r1) + std r17,-8*5(r1) + std r18,-8*6(r1) + std r19,-8*7(r1) + std r20,-8*8(r1) + std r21,-8*9(r1) + std r22,-8*10(r1) + std r23,-8*11(r1) + std r24,-8*12(r1) + std r25,-8*13(r1) + std r26,-8*14(r1) + std r27,-8*15(r1) + std r28,-8*16(r1) + std r29,-8*17(r1) + std r30,-8*18(r1) + std r31,-8*19(r1) + std r4,-8*20(r1) + std r5,-8*21(r1) /* 168 bytes */ PPC_STOP b . /* catch bugs */ @@ -89,8 +93,8 @@ _GLOBAL(isa300_idle_stop_mayloss) */ _GLOBAL(idle_return_gpr_loss) ld r1,PACAR1(r13) - ld r4,-8*19(r1) - ld r5,-8*20(r1) + ld r4,-8*20(r1) + ld r5,-8*21(r1) mtlr r4 mtcr r5 /* @@ -98,25 +102,25 @@ _GLOBAL(idle_return_gpr_loss) * from PACATOC. This could be avoided for that less common case * if KVM saved its r2. */ - ld r2,-8*0(r1) - ld r14,-8*1(r1) - ld r15,-8*2(r1) - ld r16,-8*3(r1) - ld r17,-8*4(r1) - ld r18,-8*5(r1) - ld r19,-8*6(r1) - ld r20,-8*7(r1) - ld r21,-8*8(r1) - ld r22,-8*9(r1) - ld r23,-8*10(r1) - ld r24,-8*11(r1) - ld r25,-8*12(r1) - ld r26,-8*13(r1) - ld r27,-8*14(r1) - ld r28,-8*15(r1) - ld r29,-8*16(r1) - ld r30,-8*17(r1) - ld r31,-8*18(r1) + ld r2,-8*1(r1) + ld r14,-8*2(r1) + ld r15,-8*3(r1) + ld r16,-8*4(r1) + ld r17,-8*5(r1) + ld r18,-8*6(r1) + ld r19,-8*7(r1) + ld r20,-8*8(r1) + ld r21,-8*9(r1) + ld r22,-8*10(r1) + ld r23,-8*11(r1) + ld r24,-8*12(r1) + ld r25,-8*13(r1) + ld r26,-8*14(r1) + ld r27,-8*15(r1) + ld r28,-8*16(r1) + ld r29,-8*17(r1) + ld r30,-8*18(r1) + ld r31,-8*19(r1) blr /* @@ -154,28 +158,32 @@ _GLOBAL(isa206_idle_insn_mayloss) std r1,PACAR1(r13) mflr r4 mfcr r5 - /* use stack red zone rather than a new frame for saving regs */ - std r2,-8*0(r1) - std r14,-8*1(r1) - std r15,-8*2(r1) - std r16,-8*3(r1) - std r17,-8*4(r1) - std r18,-8*5(r1) - std r19,-8*6(r1) - std r20,-8*7(r1) - std r21,-8*8(r1) - std r22,-8*9(r1) - std r23,-8*10(r1) - std r24,-8*11(r1) - std r25,-8*12(r1) - std r26,-8*13(r1) - std r27,-8*14(r1) - std r28,-8*15(r1) - std r29,-8*16(r1) - std r30,-8*17(r1) - std r31,-8*18(r1) - std r4,-8*19(r1) - std r5,-8*20(r1) + /* + * Use the stack red zone rather than a new frame for saving regs since + * in the case of no GPR loss the wakeup code branches directly back to + * the caller without deallocating the stack frame first. + */ + std r2,-8*1(r1) + std r14,-8*2(r1) + std r15,-8*3(r1) + std r16,-8*4(r1) + std r17,-8*5(r1) + std r18,-8*6(r1) + std r19,-8*7(r1) + std r20,-8*8(r1) + std r21,-8*9(r1) + std r22,-8*10(r1) + std r23,-8*11(r1) + std r24,-8*12(r1) + std r25,-8*13(r1) + std r26,-8*14(r1) + std r27,-8*15(r1) + std r28,-8*16(r1) + std r29,-8*17(r1) + std r30,-8*18(r1) + std r31,-8*19(r1) + std r4,-8*20(r1) + std r5,-8*21(r1) cmpwi r3,PNV_THREAD_NAP bne 1f IDLE_STATE_ENTER_SEQ_NORET(PPC_NAP) diff --git a/arch/powerpc/kernel/syscall_64.c b/arch/powerpc/kernel/interrupt.c index 7c85ed04a164..398cd86b6ada 100644 --- a/arch/powerpc/kernel/syscall_64.c +++ b/arch/powerpc/kernel/interrupt.c @@ -1,10 +1,15 @@ // SPDX-License-Identifier: GPL-2.0-or-later +#include <linux/context_tracking.h> #include <linux/err.h> +#include <linux/compat.h> + #include <asm/asm-prototypes.h> #include <asm/kup.h> #include <asm/cputime.h> +#include <asm/interrupt.h> #include <asm/hw_irq.h> +#include <asm/interrupt.h> #include <asm/kprobes.h> #include <asm/paca.h> #include <asm/ptrace.h> @@ -24,16 +29,21 @@ notrace long system_call_exception(long r3, long r4, long r5, { syscall_fn f; + regs->orig_gpr3 = r3; + if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) BUG_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED); + CT_WARN_ON(ct_state() == CONTEXT_KERNEL); + user_exit_irqoff(); + trace_hardirqs_off(); /* finish reconciling */ - if (IS_ENABLED(CONFIG_PPC_BOOK3S)) + if (!IS_ENABLED(CONFIG_BOOKE) && !IS_ENABLED(CONFIG_40x)) BUG_ON(!(regs->msr & MSR_RI)); BUG_ON(!(regs->msr & MSR_PR)); BUG_ON(!FULL_REGS(regs)); - BUG_ON(regs->softe != IRQS_ENABLED); + BUG_ON(arch_irq_disabled_regs(regs)); #ifdef CONFIG_PPC_PKEY if (mmu_has_feature(MMU_FTR_PKEY)) { @@ -59,19 +69,15 @@ notrace long system_call_exception(long r3, long r4, long r5, isync(); } else #endif +#ifdef CONFIG_PPC64 kuap_check_amr(); +#endif - account_cpu_user_entry(); + booke_restore_dbcr0(); -#ifdef CONFIG_PPC_SPLPAR - if (IS_ENABLED(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && - firmware_has_feature(FW_FEATURE_SPLPAR)) { - struct lppaca *lp = local_paca->lppaca_ptr; + account_cpu_user_entry(); - if (unlikely(local_paca->dtl_ridx != be64_to_cpu(lp->dtl_idx))) - accumulate_stolen_time(); - } -#endif + account_stolen_time(); /* * This is not required for the syscall exit path, but makes the @@ -79,12 +85,12 @@ notrace long system_call_exception(long r3, long r4, long r5, * frame, or if the unwinder was taught the first stack frame always * returns to user with IRQS_ENABLED, this store could be avoided! */ - regs->softe = IRQS_ENABLED; + irq_soft_mask_regs_set_state(regs, IRQS_ENABLED); local_irq_enable(); if (unlikely(current_thread_info()->flags & _TIF_SYSCALL_DOTRACE)) { - if (unlikely(regs->trap == 0x7ff0)) { + if (unlikely(trap_is_unsupported_scv(regs))) { /* Unsupported scv vector */ _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); return regs->gpr[3]; @@ -107,7 +113,7 @@ notrace long system_call_exception(long r3, long r4, long r5, r8 = regs->gpr[8]; } else if (unlikely(r0 >= NR_syscalls)) { - if (unlikely(regs->trap == 0x7ff0)) { + if (unlikely(trap_is_unsupported_scv(regs))) { /* Unsupported scv vector */ _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); return regs->gpr[3]; @@ -118,7 +124,7 @@ notrace long system_call_exception(long r3, long r4, long r5, /* May be faster to do array_index_nospec? */ barrier_nospec(); - if (unlikely(is_32bit_task())) { + if (unlikely(is_compat_task())) { f = (void *)compat_sys_call_table[r0]; r3 &= 0x00000000ffffffffULL; @@ -138,8 +144,12 @@ notrace long system_call_exception(long r3, long r4, long r5, /* * local irqs must be disabled. Returns false if the caller must re-enable * them, check for new work, and try again. + * + * This should be called with local irqs disabled, but if they were previously + * enabled when the interrupt handler returns (indicating a process-context / + * synchronous interrupt) then irqs_enabled should be true. */ -static notrace inline bool prep_irq_for_enabled_exit(bool clear_ri) +static notrace inline bool __prep_irq_for_enabled_exit(bool clear_ri) { /* This must be done with RI=1 because tracing may touch vmaps */ trace_hardirqs_on(); @@ -149,6 +159,7 @@ static notrace inline bool prep_irq_for_enabled_exit(bool clear_ri) __hard_EE_RI_disable(); else __hard_irq_disable(); +#ifdef CONFIG_PPC64 if (unlikely(lazy_irq_pending_nocheck())) { /* Took an interrupt, may have more exit work to do. */ if (clear_ri) @@ -160,10 +171,63 @@ static notrace inline bool prep_irq_for_enabled_exit(bool clear_ri) } local_paca->irq_happened = 0; irq_soft_mask_set(IRQS_ENABLED); - +#endif return true; } +static notrace inline bool prep_irq_for_enabled_exit(bool clear_ri, bool irqs_enabled) +{ + if (__prep_irq_for_enabled_exit(clear_ri)) + return true; + + /* + * Must replay pending soft-masked interrupts now. Don't just + * local_irq_enabe(); local_irq_disable(); because if we are + * returning from an asynchronous interrupt here, another one + * might hit after irqs are enabled, and it would exit via this + * same path allowing another to fire, and so on unbounded. + * + * If interrupts were enabled when this interrupt exited, + * indicating a process context (synchronous) interrupt, + * local_irq_enable/disable can be used, which will enable + * interrupts rather than keeping them masked (unclear how + * much benefit this is over just replaying for all cases, + * because we immediately disable again, so all we're really + * doing is allowing hard interrupts to execute directly for + * a very small time, rather than being masked and replayed). + */ + if (irqs_enabled) { + local_irq_enable(); + local_irq_disable(); + } else { + replay_soft_interrupts(); + } + + return false; +} + +static notrace void booke_load_dbcr0(void) +{ +#ifdef CONFIG_PPC_ADV_DEBUG_REGS + unsigned long dbcr0 = current->thread.debug.dbcr0; + + if (likely(!(dbcr0 & DBCR0_IDM))) + return; + + /* + * Check to see if the dbcr0 register is set up to debug. + * Use the internal debug mode bit to do this. + */ + mtmsr(mfmsr() & ~MSR_DE); + if (IS_ENABLED(CONFIG_PPC32)) { + isync(); + global_dbcr0[smp_processor_id()] = mfspr(SPRN_DBCR0); + } + mtspr(SPRN_DBCR0, dbcr0); + mtspr(SPRN_DBSR, -1); +#endif +} + /* * This should be called after a syscall returns, with r3 the return value * from the syscall. If this function returns non-zero, the system call @@ -177,20 +241,24 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3, struct pt_regs *regs, long scv) { - unsigned long *ti_flagsp = ¤t_thread_info()->flags; unsigned long ti_flags; unsigned long ret = 0; + bool is_not_scv = !IS_ENABLED(CONFIG_PPC_BOOK3S_64) || !scv; + CT_WARN_ON(ct_state() == CONTEXT_USER); + +#ifdef CONFIG_PPC64 kuap_check_amr(); +#endif regs->result = r3; /* Check whether the syscall is issued inside a restartable sequence */ rseq_syscall(regs); - ti_flags = *ti_flagsp; + ti_flags = current_thread_info()->flags; - if (unlikely(r3 >= (unsigned long)-MAX_ERRNO) && !scv) { + if (unlikely(r3 >= (unsigned long)-MAX_ERRNO) && is_not_scv) { if (likely(!(ti_flags & (_TIF_NOERROR | _TIF_RESTOREALL)))) { r3 = -r3; regs->ccr |= 0x10000000; /* Set SO bit in CR */ @@ -202,7 +270,7 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3, ret = _TIF_RESTOREALL; else regs->gpr[3] = r3; - clear_bits(_TIF_PERSYSCALL_MASK, ti_flagsp); + clear_bits(_TIF_PERSYSCALL_MASK, ¤t_thread_info()->flags); } else { regs->gpr[3] = r3; } @@ -212,9 +280,10 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3, ret |= _TIF_RESTOREALL; } -again: local_irq_disable(); - ti_flags = READ_ONCE(*ti_flagsp); + +again: + ti_flags = READ_ONCE(current_thread_info()->flags); while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) { local_irq_enable(); if (ti_flags & _TIF_NEED_RESCHED) { @@ -230,7 +299,7 @@ again: do_notify_resume(regs, ti_flags); } local_irq_disable(); - ti_flags = READ_ONCE(*ti_flagsp); + ti_flags = READ_ONCE(current_thread_info()->flags); } if (IS_ENABLED(CONFIG_PPC_BOOK3S) && IS_ENABLED(CONFIG_PPC_FPU)) { @@ -257,9 +326,13 @@ again: } } + user_enter_irqoff(); + /* scv need not set RI=0 because SRRs are not used */ - if (unlikely(!prep_irq_for_enabled_exit(!scv))) { + if (unlikely(!__prep_irq_for_enabled_exit(is_not_scv))) { + user_exit_irqoff(); local_irq_enable(); + local_irq_disable(); goto again; } @@ -267,9 +340,11 @@ again: local_paca->tm_scratch = regs->msr; #endif + booke_load_dbcr0(); + account_cpu_user_exit(); -#ifdef CONFIG_PPC_BOOK3S /* BOOK3E not yet using this */ +#ifdef CONFIG_PPC_BOOK3S_64 /* BOOK3E and ppc32 not using this */ /* * We do this at the end so that we do context switch with KERNEL AMR */ @@ -278,33 +353,32 @@ again: return ret; } -#ifdef CONFIG_PPC_BOOK3S /* BOOK3E not yet using this */ +#ifndef CONFIG_PPC_BOOK3E_64 /* BOOK3E not yet using this */ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned long msr) { -#ifdef CONFIG_PPC_BOOK3E - struct thread_struct *ts = ¤t->thread; -#endif - unsigned long *ti_flagsp = ¤t_thread_info()->flags; unsigned long ti_flags; unsigned long flags; unsigned long ret = 0; - if (IS_ENABLED(CONFIG_PPC_BOOK3S)) + if (!IS_ENABLED(CONFIG_BOOKE) && !IS_ENABLED(CONFIG_40x)) BUG_ON(!(regs->msr & MSR_RI)); BUG_ON(!(regs->msr & MSR_PR)); BUG_ON(!FULL_REGS(regs)); - BUG_ON(regs->softe != IRQS_ENABLED); + BUG_ON(arch_irq_disabled_regs(regs)); + CT_WARN_ON(ct_state() == CONTEXT_USER); /* * We don't need to restore AMR on the way back to userspace for KUAP. * AMR can only have been unlocked if we interrupted the kernel. */ +#ifdef CONFIG_PPC64 kuap_check_amr(); +#endif local_irq_save(flags); again: - ti_flags = READ_ONCE(*ti_flagsp); + ti_flags = READ_ONCE(current_thread_info()->flags); while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) { local_irq_enable(); /* returning to user: may enable */ if (ti_flags & _TIF_NEED_RESCHED) { @@ -315,7 +389,7 @@ again: do_notify_resume(regs, ti_flags); } local_irq_disable(); - ti_flags = READ_ONCE(*ti_flagsp); + ti_flags = READ_ONCE(current_thread_info()->flags); } if (IS_ENABLED(CONFIG_PPC_BOOK3S) && IS_ENABLED(CONFIG_PPC_FPU)) { @@ -336,23 +410,16 @@ again: } } - if (unlikely(!prep_irq_for_enabled_exit(true))) { + user_enter_irqoff(); + + if (unlikely(!__prep_irq_for_enabled_exit(true))) { + user_exit_irqoff(); local_irq_enable(); local_irq_disable(); goto again; } -#ifdef CONFIG_PPC_BOOK3E - if (unlikely(ts->debug.dbcr0 & DBCR0_IDM)) { - /* - * Check to see if the dbcr0 register is set up to debug. - * Use the internal debug mode bit to do this. - */ - mtmsr(mfmsr() & ~MSR_DE); - mtspr(SPRN_DBCR0, ts->debug.dbcr0); - mtspr(SPRN_DBSR, -1); - } -#endif + booke_load_dbcr0(); #ifdef CONFIG_PPC_TRANSACTIONAL_MEM local_paca->tm_scratch = regs->msr; @@ -363,7 +430,9 @@ again: /* * We do this at the end so that we do context switch with KERNEL AMR */ +#ifdef CONFIG_PPC64 kuap_user_restore(regs); +#endif return ret; } @@ -372,56 +441,56 @@ void preempt_schedule_irq(void); notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsigned long msr) { - unsigned long *ti_flagsp = ¤t_thread_info()->flags; unsigned long flags; unsigned long ret = 0; +#ifdef CONFIG_PPC64 unsigned long amr; +#endif - if (IS_ENABLED(CONFIG_PPC_BOOK3S) && unlikely(!(regs->msr & MSR_RI))) + if (!IS_ENABLED(CONFIG_BOOKE) && !IS_ENABLED(CONFIG_40x) && + unlikely(!(regs->msr & MSR_RI))) unrecoverable_exception(regs); BUG_ON(regs->msr & MSR_PR); BUG_ON(!FULL_REGS(regs)); + /* + * CT_WARN_ON comes here via program_check_exception, + * so avoid recursion. + */ + if (TRAP(regs) != 0x700) + CT_WARN_ON(ct_state() == CONTEXT_USER); +#ifdef CONFIG_PPC64 amr = kuap_get_and_check_amr(); +#endif - if (unlikely(*ti_flagsp & _TIF_EMULATE_STACK_STORE)) { - clear_bits(_TIF_EMULATE_STACK_STORE, ti_flagsp); + if (unlikely(current_thread_info()->flags & _TIF_EMULATE_STACK_STORE)) { + clear_bits(_TIF_EMULATE_STACK_STORE, ¤t_thread_info()->flags); ret = 1; } local_irq_save(flags); - if (regs->softe == IRQS_ENABLED) { + if (!arch_irq_disabled_regs(regs)) { /* Returning to a kernel context with local irqs enabled. */ WARN_ON_ONCE(!(regs->msr & MSR_EE)); again: if (IS_ENABLED(CONFIG_PREEMPT)) { /* Return to preemptible kernel context */ - if (unlikely(*ti_flagsp & _TIF_NEED_RESCHED)) { + if (unlikely(current_thread_info()->flags & _TIF_NEED_RESCHED)) { if (preempt_count() == 0) preempt_schedule_irq(); } } - if (unlikely(!prep_irq_for_enabled_exit(true))) { - /* - * Can't local_irq_restore to replay if we were in - * interrupt context. Must replay directly. - */ - if (irqs_disabled_flags(flags)) { - replay_soft_interrupts(); - } else { - local_irq_restore(flags); - local_irq_save(flags); - } - /* Took an interrupt, may have more exit work to do. */ + if (unlikely(!prep_irq_for_enabled_exit(true, !irqs_disabled_flags(flags)))) goto again; - } } else { /* Returning to a kernel context with local irqs disabled. */ __hard_EE_RI_disable(); +#ifdef CONFIG_PPC64 if (regs->msr & MSR_EE) local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; +#endif } @@ -434,7 +503,9 @@ again: * which would cause Read-After-Write stalls. Hence, we take the AMR * value from the check above. */ +#ifdef CONFIG_PPC64 kuap_kernel_restore(regs, amr); +#endif return ret; } diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 5b69a6a72a0e..c00214a4355c 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -25,6 +25,7 @@ #include <linux/pci.h> #include <linux/iommu.h> #include <linux/sched.h> +#include <linux/debugfs.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/iommu.h> @@ -38,6 +39,47 @@ #define DBG(...) +#ifdef CONFIG_IOMMU_DEBUGFS +static int iommu_debugfs_weight_get(void *data, u64 *val) +{ + struct iommu_table *tbl = data; + *val = bitmap_weight(tbl->it_map, tbl->it_size); + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE(iommu_debugfs_fops_weight, iommu_debugfs_weight_get, NULL, "%llu\n"); + +static void iommu_debugfs_add(struct iommu_table *tbl) +{ + char name[10]; + struct dentry *liobn_entry; + + sprintf(name, "%08lx", tbl->it_index); + liobn_entry = debugfs_create_dir(name, iommu_debugfs_dir); + + debugfs_create_file_unsafe("weight", 0400, liobn_entry, tbl, &iommu_debugfs_fops_weight); + debugfs_create_ulong("it_size", 0400, liobn_entry, &tbl->it_size); + debugfs_create_ulong("it_page_shift", 0400, liobn_entry, &tbl->it_page_shift); + debugfs_create_ulong("it_reserved_start", 0400, liobn_entry, &tbl->it_reserved_start); + debugfs_create_ulong("it_reserved_end", 0400, liobn_entry, &tbl->it_reserved_end); + debugfs_create_ulong("it_indirect_levels", 0400, liobn_entry, &tbl->it_indirect_levels); + debugfs_create_ulong("it_level_size", 0400, liobn_entry, &tbl->it_level_size); +} + +static void iommu_debugfs_del(struct iommu_table *tbl) +{ + char name[10]; + struct dentry *liobn_entry; + + sprintf(name, "%08lx", tbl->it_index); + liobn_entry = debugfs_lookup(name, iommu_debugfs_dir); + if (liobn_entry) + debugfs_remove(liobn_entry); +} +#else +static void iommu_debugfs_add(struct iommu_table *tbl){} +static void iommu_debugfs_del(struct iommu_table *tbl){} +#endif + static int novmerge; static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int); @@ -725,6 +767,8 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid, welcomed = 1; } + iommu_debugfs_add(tbl); + return tbl; } @@ -744,6 +788,8 @@ static void iommu_table_free(struct kref *kref) return; } + iommu_debugfs_del(tbl); + iommu_table_release_pages(tbl); /* verify that table contains no entries */ diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index cc7a6271b6b4..086b0a743329 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -54,6 +54,7 @@ #include <linux/pgtable.h> #include <linux/uaccess.h> +#include <asm/interrupt.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/cache.h> @@ -269,6 +270,31 @@ again: } } +#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_KUAP) +static inline void replay_soft_interrupts_irqrestore(void) +{ + unsigned long kuap_state = get_kuap(); + + /* + * Check if anything calls local_irq_enable/restore() when KUAP is + * disabled (user access enabled). We handle that case here by saving + * and re-locking AMR but we shouldn't get here in the first place, + * hence the warning. + */ + kuap_check_amr(); + + if (kuap_state != AMR_KUAP_BLOCKED) + set_kuap(AMR_KUAP_BLOCKED); + + replay_soft_interrupts(); + + if (kuap_state != AMR_KUAP_BLOCKED) + set_kuap(kuap_state); +} +#else +#define replay_soft_interrupts_irqrestore() replay_soft_interrupts() +#endif + notrace void arch_local_irq_restore(unsigned long mask) { unsigned char irq_happened; @@ -332,7 +358,7 @@ notrace void arch_local_irq_restore(unsigned long mask) irq_soft_mask_set(IRQS_ALL_DISABLED); trace_hardirqs_off(); - replay_soft_interrupts(); + replay_soft_interrupts_irqrestore(); local_paca->irq_happened = 0; trace_hardirqs_on(); @@ -644,8 +670,6 @@ void __do_irq(struct pt_regs *regs) { unsigned int irq; - irq_enter(); - trace_irq_entry(regs); /* @@ -665,11 +689,9 @@ void __do_irq(struct pt_regs *regs) generic_handle_irq(irq); trace_irq_exit(regs); - - irq_exit(); } -void do_IRQ(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ) { struct pt_regs *old_regs = set_irq_regs(regs); void *cursp, *irqsp, *sirqsp; diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c index 9f3e133b57b7..11f0cae086ed 100644 --- a/arch/powerpc/kernel/mce.c +++ b/arch/powerpc/kernel/mce.c @@ -17,22 +17,15 @@ #include <linux/irq_work.h> #include <linux/extable.h> #include <linux/ftrace.h> +#include <linux/memblock.h> +#include <asm/interrupt.h> #include <asm/machdep.h> #include <asm/mce.h> #include <asm/nmi.h> +#include <asm/asm-prototypes.h> -static DEFINE_PER_CPU(int, mce_nest_count); -static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event); - -/* Queue for delayed MCE events. */ -static DEFINE_PER_CPU(int, mce_queue_count); -static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event_queue); - -/* Queue for delayed MCE UE events. */ -static DEFINE_PER_CPU(int, mce_ue_count); -static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], - mce_ue_event_queue); +#include "setup.h" static void machine_check_process_queued_event(struct irq_work *work); static void machine_check_ue_irq_work(struct irq_work *work); @@ -103,9 +96,10 @@ void save_mce_event(struct pt_regs *regs, long handled, struct mce_error_info *mce_err, uint64_t nip, uint64_t addr, uint64_t phys_addr) { - int index = __this_cpu_inc_return(mce_nest_count) - 1; - struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]); + int index = local_paca->mce_info->mce_nest_count++; + struct machine_check_event *mce; + mce = &local_paca->mce_info->mce_event[index]; /* * Return if we don't have enough space to log mce event. * mce_nest_count may go beyond MAX_MC_EVT but that's ok, @@ -191,7 +185,7 @@ void save_mce_event(struct pt_regs *regs, long handled, */ int get_mce_event(struct machine_check_event *mce, bool release) { - int index = __this_cpu_read(mce_nest_count) - 1; + int index = local_paca->mce_info->mce_nest_count - 1; struct machine_check_event *mc_evt; int ret = 0; @@ -201,7 +195,7 @@ int get_mce_event(struct machine_check_event *mce, bool release) /* Check if we have MCE info to process. */ if (index < MAX_MC_EVT) { - mc_evt = this_cpu_ptr(&mce_event[index]); + mc_evt = &local_paca->mce_info->mce_event[index]; /* Copy the event structure and release the original */ if (mce) *mce = *mc_evt; @@ -211,7 +205,7 @@ int get_mce_event(struct machine_check_event *mce, bool release) } /* Decrement the count to free the slot. */ if (release) - __this_cpu_dec(mce_nest_count); + local_paca->mce_info->mce_nest_count--; return ret; } @@ -233,13 +227,14 @@ static void machine_check_ue_event(struct machine_check_event *evt) { int index; - index = __this_cpu_inc_return(mce_ue_count) - 1; + index = local_paca->mce_info->mce_ue_count++; /* If queue is full, just return for now. */ if (index >= MAX_MC_EVT) { - __this_cpu_dec(mce_ue_count); + local_paca->mce_info->mce_ue_count--; return; } - memcpy(this_cpu_ptr(&mce_ue_event_queue[index]), evt, sizeof(*evt)); + memcpy(&local_paca->mce_info->mce_ue_event_queue[index], + evt, sizeof(*evt)); /* Queue work to process this event later. */ irq_work_queue(&mce_ue_event_irq_work); @@ -256,13 +251,14 @@ void machine_check_queue_event(void) if (!get_mce_event(&evt, MCE_EVENT_RELEASE)) return; - index = __this_cpu_inc_return(mce_queue_count) - 1; + index = local_paca->mce_info->mce_queue_count++; /* If queue is full, just return for now. */ if (index >= MAX_MC_EVT) { - __this_cpu_dec(mce_queue_count); + local_paca->mce_info->mce_queue_count--; return; } - memcpy(this_cpu_ptr(&mce_event_queue[index]), &evt, sizeof(evt)); + memcpy(&local_paca->mce_info->mce_event_queue[index], + &evt, sizeof(evt)); /* Queue irq work to process this event later. */ irq_work_queue(&mce_event_process_work); @@ -289,9 +285,9 @@ static void machine_process_ue_event(struct work_struct *work) int index; struct machine_check_event *evt; - while (__this_cpu_read(mce_ue_count) > 0) { - index = __this_cpu_read(mce_ue_count) - 1; - evt = this_cpu_ptr(&mce_ue_event_queue[index]); + while (local_paca->mce_info->mce_ue_count > 0) { + index = local_paca->mce_info->mce_ue_count - 1; + evt = &local_paca->mce_info->mce_ue_event_queue[index]; blocking_notifier_call_chain(&mce_notifier_list, 0, evt); #ifdef CONFIG_MEMORY_FAILURE /* @@ -304,7 +300,7 @@ static void machine_process_ue_event(struct work_struct *work) */ if (evt->error_type == MCE_ERROR_TYPE_UE) { if (evt->u.ue_error.ignore_event) { - __this_cpu_dec(mce_ue_count); + local_paca->mce_info->mce_ue_count--; continue; } @@ -320,7 +316,7 @@ static void machine_process_ue_event(struct work_struct *work) "was generated\n"); } #endif - __this_cpu_dec(mce_ue_count); + local_paca->mce_info->mce_ue_count--; } } /* @@ -338,17 +334,17 @@ static void machine_check_process_queued_event(struct irq_work *work) * For now just print it to console. * TODO: log this error event to FSP or nvram. */ - while (__this_cpu_read(mce_queue_count) > 0) { - index = __this_cpu_read(mce_queue_count) - 1; - evt = this_cpu_ptr(&mce_event_queue[index]); + while (local_paca->mce_info->mce_queue_count > 0) { + index = local_paca->mce_info->mce_queue_count - 1; + evt = &local_paca->mce_info->mce_event_queue[index]; if (evt->error_type == MCE_ERROR_TYPE_UE && evt->u.ue_error.ignore_event) { - __this_cpu_dec(mce_queue_count); + local_paca->mce_info->mce_queue_count--; continue; } machine_check_print_event_info(evt, false, false); - __this_cpu_dec(mce_queue_count); + local_paca->mce_info->mce_queue_count--; } } @@ -588,15 +584,9 @@ EXPORT_SYMBOL_GPL(machine_check_print_event_info); * * regs->nip and regs->msr contains srr0 and ssr1. */ -long notrace machine_check_early(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER_NMI(machine_check_early) { long handled = 0; - u8 ftrace_enabled = this_cpu_get_ftrace_enabled(); - - this_cpu_set_ftrace_enabled(0); - /* Do not use nmi_enter/exit for pseries hpte guest */ - if (radix_enabled() || !firmware_has_feature(FW_FEATURE_LPAR)) - nmi_enter(); hv_nmi_check_nonrecoverable(regs); @@ -606,11 +596,6 @@ long notrace machine_check_early(struct pt_regs *regs) if (ppc_md.machine_check_early) handled = ppc_md.machine_check_early(regs); - if (radix_enabled() || !firmware_has_feature(FW_FEATURE_LPAR)) - nmi_exit(); - - this_cpu_set_ftrace_enabled(ftrace_enabled); - return handled; } @@ -722,7 +707,7 @@ long hmi_handle_debugtrig(struct pt_regs *regs) /* * Return values: */ -long hmi_exception_realmode(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode) { int ret; @@ -741,3 +726,24 @@ long hmi_exception_realmode(struct pt_regs *regs) return 1; } + +void __init mce_init(void) +{ + struct mce_info *mce_info; + u64 limit; + int i; + + limit = min(ppc64_bolted_size(), ppc64_rma_size); + for_each_possible_cpu(i) { + mce_info = memblock_alloc_try_nid(sizeof(*mce_info), + __alignof__(*mce_info), + MEMBLOCK_LOW_LIMIT, + limit, cpu_to_node(i)); + if (!mce_info) + goto err; + paca_ptrs[i]->mce_info = mce_info; + } + return; +err: + panic("Failed to allocate memory for MCE event data\n"); +} diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c index 69bfe96884e2..7f7cdbeacd1a 100644 --- a/arch/powerpc/kernel/optprobes.c +++ b/arch/powerpc/kernel/optprobes.c @@ -142,29 +142,10 @@ void arch_remove_optimized_kprobe(struct optimized_kprobe *op) } /* - * emulate_step() requires insn to be emulated as - * second parameter. Load register 'r4' with the - * instruction. - */ -void patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr) -{ - /* addis r4,0,(insn)@h */ - patch_instruction((struct ppc_inst *)addr, - ppc_inst(PPC_INST_ADDIS | ___PPC_RT(4) | - ((val >> 16) & 0xffff))); - addr++; - - /* ori r4,r4,(insn)@l */ - patch_instruction((struct ppc_inst *)addr, - ppc_inst(PPC_INST_ORI | ___PPC_RA(4) | - ___PPC_RS(4) | (val & 0xffff))); -} - -/* * Generate instructions to load provided immediate 64-bit value * to register 'reg' and patch these instructions at 'addr'. */ -void patch_imm64_load_insns(unsigned long val, int reg, kprobe_opcode_t *addr) +static void patch_imm64_load_insns(unsigned long val, int reg, kprobe_opcode_t *addr) { /* lis reg,(op)@highest */ patch_instruction((struct ppc_inst *)addr, diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 2b555997b295..001e90cd8948 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -1699,3 +1699,13 @@ static void fixup_hide_host_resource_fsl(struct pci_dev *dev) } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl); + + +static int __init discover_phbs(void) +{ + if (ppc_md.discover_phbs) + ppc_md.discover_phbs(); + + return 0; +} +core_initcall(discover_phbs); diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c index e99b7c547d7e..61571ae23953 100644 --- a/arch/powerpc/kernel/pci_dn.c +++ b/arch/powerpc/kernel/pci_dn.c @@ -443,46 +443,6 @@ void *pci_traverse_device_nodes(struct device_node *start, } EXPORT_SYMBOL_GPL(pci_traverse_device_nodes); -static struct pci_dn *pci_dn_next_one(struct pci_dn *root, - struct pci_dn *pdn) -{ - struct list_head *next = pdn->child_list.next; - - if (next != &pdn->child_list) - return list_entry(next, struct pci_dn, list); - - while (1) { - if (pdn == root) - return NULL; - - next = pdn->list.next; - if (next != &pdn->parent->child_list) - break; - - pdn = pdn->parent; - } - - return list_entry(next, struct pci_dn, list); -} - -void *traverse_pci_dn(struct pci_dn *root, - void *(*fn)(struct pci_dn *, void *), - void *data) -{ - struct pci_dn *pdn = root; - void *ret; - - /* Only scan the child nodes */ - for (pdn = pci_dn_next_one(root, pdn); pdn; - pdn = pci_dn_next_one(root, pdn)) { - ret = fn(pdn, data); - if (ret) - return ret; - } - - return NULL; -} - static void *add_pdn(struct device_node *dn, void *data) { struct pci_controller *hose = data; @@ -521,28 +481,6 @@ void pci_devs_phb_init_dynamic(struct pci_controller *phb) pci_traverse_device_nodes(dn, add_pdn, phb); } -/** - * pci_devs_phb_init - Initialize phbs and pci devs under them. - * - * This routine walks over all phb's (pci-host bridges) on the - * system, and sets up assorted pci-related structures - * (including pci info in the device node structs) for each - * pci device found underneath. This routine runs once, - * early in the boot sequence. - */ -static int __init pci_devs_phb_init(void) -{ - struct pci_controller *phb, *tmp; - - /* This must be done first so the device nodes have valid pci info! */ - list_for_each_entry_safe(phb, tmp, &hose_list, list_node) - pci_devs_phb_init_dynamic(phb); - - return 0; -} - -core_initcall(pci_devs_phb_init); - static void pci_dev_pdn_setup(struct pci_dev *pdev) { struct pci_dn *pdn; diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index a66f435dabbf..924d023dad0a 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -41,6 +41,7 @@ #include <linux/pkeys.h> #include <linux/seq_buf.h> +#include <asm/interrupt.h> #include <asm/io.h> #include <asm/processor.h> #include <asm/mmu.h> @@ -659,11 +660,10 @@ static void do_break_handler(struct pt_regs *regs) } } -void do_break (struct pt_regs *regs, unsigned long address, - unsigned long error_code) +DEFINE_INTERRUPT_HANDLER(do_break) { current->thread.trap_nr = TRAP_HWBKPT; - if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, + if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, regs->dsisr, 11, SIGSEGV) == NOTIFY_STOP) return; @@ -681,7 +681,7 @@ void do_break (struct pt_regs *regs, unsigned long address, do_break_handler(regs); /* Deliver the signal to userspace */ - force_sig_fault(SIGTRAP, TRAP_HWBKPT, (void __user *)address); + force_sig_fault(SIGTRAP, TRAP_HWBKPT, (void __user *)regs->dar); } #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ @@ -2047,6 +2047,9 @@ static inline int valid_emergency_stack(unsigned long sp, struct task_struct *p, unsigned long stack_page; unsigned long cpu = task_cpu(p); + if (!paca_ptrs) + return 0; + stack_page = (unsigned long)paca_ptrs[cpu]->emergency_sp - THREAD_SIZE; if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes) return 1; @@ -2176,7 +2179,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack, * See if this is an exception frame. * We look for the "regshere" marker in the current frame. */ - if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE) + if (validate_sp(sp, tsk, STACK_FRAME_WITH_PT_REGS) && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) { struct pt_regs *regs = (struct pt_regs *) (sp + STACK_FRAME_OVERHEAD); diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index ae3c41730367..9a4797d1d40d 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -707,7 +707,7 @@ static void __init save_fscr_to_task(void) init_task.thread.fscr = mfspr(SPRN_FSCR); } #else -static inline void save_fscr_to_task(void) {}; +static inline void save_fscr_to_task(void) {} #endif diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index e9d4eb6144e1..ccf77b985c8f 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -1331,14 +1331,10 @@ static void __init prom_check_platform_support(void) if (prop_len > sizeof(vec)) prom_printf("WARNING: ibm,arch-vec-5-platform-support longer than expected (len: %d)\n", prop_len); - prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support", - &vec, sizeof(vec)); - for (i = 0; i < sizeof(vec); i += 2) { - prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2 - , vec[i] - , vec[i + 1]); - prom_parse_platform_support(vec[i], vec[i + 1], - &supported); + prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support", &vec, sizeof(vec)); + for (i = 0; i < prop_len; i += 2) { + prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2, vec[i], vec[i + 1]); + prom_parse_platform_support(vec[i], vec[i + 1], &supported); } } diff --git a/arch/powerpc/kernel/ptrace/ptrace.c b/arch/powerpc/kernel/ptrace/ptrace.c index 3d44b73adb83..4f3d4ff3728c 100644 --- a/arch/powerpc/kernel/ptrace/ptrace.c +++ b/arch/powerpc/kernel/ptrace/ptrace.c @@ -262,8 +262,6 @@ long do_syscall_trace_enter(struct pt_regs *regs) { u32 flags; - user_exit(); - flags = READ_ONCE(current_thread_info()->flags) & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE); @@ -340,8 +338,6 @@ void do_syscall_trace_leave(struct pt_regs *regs) step = test_thread_flag(TIF_SINGLESTEP); if (step || test_thread_flag(TIF_SYSCALL_TRACE)) tracehook_report_syscall_exit(regs, step); - - user_enter(); } void __init pt_regs_check(void); diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 71f38e9248be..bee984b1887b 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -64,6 +64,7 @@ #include <asm/mmu_context.h> #include <asm/cpu_has_feature.h> #include <asm/kasan.h> +#include <asm/mce.h> #include "setup.h" @@ -237,18 +238,17 @@ static int show_cpuinfo(struct seq_file *m, void *v) maj = (pvr >> 8) & 0xFF; min = pvr & 0xFF; - seq_printf(m, "processor\t: %lu\n", cpu_id); - seq_printf(m, "cpu\t\t: "); + seq_printf(m, "processor\t: %lu\ncpu\t\t: ", cpu_id); if (cur_cpu_spec->pvr_mask && cur_cpu_spec->cpu_name) - seq_printf(m, "%s", cur_cpu_spec->cpu_name); + seq_puts(m, cur_cpu_spec->cpu_name); else seq_printf(m, "unknown (%08x)", pvr); if (cpu_has_feature(CPU_FTR_ALTIVEC)) - seq_printf(m, ", altivec supported"); + seq_puts(m, ", altivec supported"); - seq_printf(m, "\n"); + seq_putc(m, '\n'); #ifdef CONFIG_TAU if (cpu_has_feature(CPU_FTR_TAU)) { @@ -327,7 +327,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, "bogomips\t: %lu.%02lu\n", loops_per_jiffy / (500000 / HZ), (loops_per_jiffy / (5000 / HZ)) % 100); - seq_printf(m, "\n"); + seq_putc(m, '\n'); /* If this is the last cpu, print the summary */ if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids) @@ -938,6 +938,7 @@ void __init setup_arch(char **cmdline_p) exc_lvl_early_init(); emergency_stack_init(); + mce_init(); smp_release_cpus(); initmem_init(); diff --git a/arch/powerpc/kernel/setup.h b/arch/powerpc/kernel/setup.h index 2dd0d9cb5a20..84058bbc8fe9 100644 --- a/arch/powerpc/kernel/setup.h +++ b/arch/powerpc/kernel/setup.h @@ -14,31 +14,31 @@ void irqstack_early_init(void); #ifdef CONFIG_PPC32 void setup_power_save(void); #else -static inline void setup_power_save(void) { }; +static inline void setup_power_save(void) { } #endif #if defined(CONFIG_PPC64) && defined(CONFIG_SMP) void check_smt_enabled(void); #else -static inline void check_smt_enabled(void) { }; +static inline void check_smt_enabled(void) { } #endif #if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP) void setup_tlb_core_data(void); #else -static inline void setup_tlb_core_data(void) { }; +static inline void setup_tlb_core_data(void) { } #endif #if defined(CONFIG_PPC_BOOK3E) || defined(CONFIG_BOOKE) || defined(CONFIG_40x) void exc_lvl_early_init(void); #else -static inline void exc_lvl_early_init(void) { }; +static inline void exc_lvl_early_init(void) { } #endif #if defined(CONFIG_PPC64) || defined(CONFIG_VMAP_STACK) void emergency_stack_init(void); #else -static inline void emergency_stack_init(void) { }; +static inline void emergency_stack_init(void) { } #endif #ifdef CONFIG_PPC64 @@ -55,7 +55,7 @@ extern unsigned long spr_default_dscr; #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE void kvm_cma_reserve(void); #else -static inline void kvm_cma_reserve(void) { }; +static inline void kvm_cma_reserve(void) { } #endif #ifdef CONFIG_TAU diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index c28e949cc222..560ed8b975e7 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -67,6 +67,7 @@ #include <asm/kup.h> #include <asm/early_ioremap.h> #include <asm/pgalloc.h> +#include <asm/asm-prototypes.h> #include "setup.h" @@ -258,7 +259,7 @@ static void cpu_ready_for_interrupts(void) unsigned long spr_default_dscr = 0; -void __init record_spr_defaults(void) +static void __init record_spr_defaults(void) { if (early_cpu_has_feature(CPU_FTR_DSCR)) spr_default_dscr = mfspr(SPRN_DSCR); @@ -1008,7 +1009,7 @@ void rfi_flush_enable(bool enable) rfi_flush = enable; } -void entry_flush_enable(bool enable) +static void entry_flush_enable(bool enable) { if (enable) { do_entry_flush_fixups(enabled_flush_types); @@ -1020,7 +1021,7 @@ void entry_flush_enable(bool enable) entry_flush = enable; } -void uaccess_flush_enable(bool enable) +static void uaccess_flush_enable(bool enable) { if (enable) { do_uaccess_flush_fixups(enabled_flush_types); diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c index 53782aa60ade..9ded046edb0e 100644 --- a/arch/powerpc/kernel/signal.c +++ b/arch/powerpc/kernel/signal.c @@ -282,8 +282,6 @@ static void do_signal(struct task_struct *tsk) void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) { - user_exit(); - if (thread_info_flags & _TIF_UPROBE) uprobe_notify_resume(regs); @@ -299,8 +297,6 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) tracehook_notify_resume(regs); rseq_handle_notify_resume(NULL, regs); } - - user_enter(); } static unsigned long get_tm_stackpointer(struct task_struct *tsk) diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 934cbdf6dd10..75ee918a120a 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -929,8 +929,9 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset, regs->gpr[3] = ksig->sig; regs->gpr[4] = (unsigned long) sc; regs->nip = (unsigned long)ksig->ka.sa.sa_handler; - /* enter the signal handler in big-endian mode */ + /* enter the signal handler in native-endian mode */ regs->msr &= ~MSR_LE; + regs->msr |= (MSR_KERNEL & MSR_LE); return 0; failed: diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 9e2246e80efd..5a4d59a1070d 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -34,6 +34,7 @@ #include <linux/random.h> #include <linux/stackprotector.h> #include <linux/pgtable.h> +#include <linux/clockchips.h> #include <asm/ptrace.h> #include <linux/atomic.h> @@ -576,7 +577,7 @@ void tick_broadcast(const struct cpumask *mask) #endif #ifdef CONFIG_DEBUGGER -void debugger_ipi_callback(struct pt_regs *regs) +static void debugger_ipi_callback(struct pt_regs *regs) { debugger_ipi(regs); } diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c index d36c6391eaf5..16ff0399a257 100644 --- a/arch/powerpc/kernel/sys_ppc32.c +++ b/arch/powerpc/kernel/sys_ppc32.c @@ -59,57 +59,64 @@ unsigned long compat_sys_mmap2(unsigned long addr, size_t len, /* * long long munging: * The 32 bit ABI passes long longs in an odd even register pair. + * High and low parts are swapped depending on endian mode, + * so define a macro (similar to mips linux32) to handle that. */ +#ifdef __LITTLE_ENDIAN__ +#define merge_64(low, high) ((u64)high << 32) | low +#else +#define merge_64(high, low) ((u64)high << 32) | low +#endif compat_ssize_t compat_sys_pread64(unsigned int fd, char __user *ubuf, compat_size_t count, - u32 reg6, u32 poshi, u32 poslo) + u32 reg6, u32 pos1, u32 pos2) { - return ksys_pread64(fd, ubuf, count, ((loff_t)poshi << 32) | poslo); + return ksys_pread64(fd, ubuf, count, merge_64(pos1, pos2)); } compat_ssize_t compat_sys_pwrite64(unsigned int fd, const char __user *ubuf, compat_size_t count, - u32 reg6, u32 poshi, u32 poslo) + u32 reg6, u32 pos1, u32 pos2) { - return ksys_pwrite64(fd, ubuf, count, ((loff_t)poshi << 32) | poslo); + return ksys_pwrite64(fd, ubuf, count, merge_64(pos1, pos2)); } -compat_ssize_t compat_sys_readahead(int fd, u32 r4, u32 offhi, u32 offlo, u32 count) +compat_ssize_t compat_sys_readahead(int fd, u32 r4, u32 offset1, u32 offset2, u32 count) { - return ksys_readahead(fd, ((loff_t)offhi << 32) | offlo, count); + return ksys_readahead(fd, merge_64(offset1, offset2), count); } asmlinkage int compat_sys_truncate64(const char __user * path, u32 reg4, - unsigned long high, unsigned long low) + unsigned long len1, unsigned long len2) { - return ksys_truncate(path, (high << 32) | low); + return ksys_truncate(path, merge_64(len1, len2)); } -asmlinkage long compat_sys_fallocate(int fd, int mode, u32 offhi, u32 offlo, - u32 lenhi, u32 lenlo) +asmlinkage long compat_sys_fallocate(int fd, int mode, u32 offset1, u32 offset2, + u32 len1, u32 len2) { - return ksys_fallocate(fd, mode, ((loff_t)offhi << 32) | offlo, - ((loff_t)lenhi << 32) | lenlo); + return ksys_fallocate(fd, mode, ((loff_t)offset1 << 32) | offset2, + merge_64(len1, len2)); } -asmlinkage int compat_sys_ftruncate64(unsigned int fd, u32 reg4, unsigned long high, - unsigned long low) +asmlinkage int compat_sys_ftruncate64(unsigned int fd, u32 reg4, unsigned long len1, + unsigned long len2) { - return ksys_ftruncate(fd, (high << 32) | low); + return ksys_ftruncate(fd, merge_64(len1, len2)); } -long ppc32_fadvise64(int fd, u32 unused, u32 offset_high, u32 offset_low, +long ppc32_fadvise64(int fd, u32 unused, u32 offset1, u32 offset2, size_t len, int advice) { - return ksys_fadvise64_64(fd, (u64)offset_high << 32 | offset_low, len, + return ksys_fadvise64_64(fd, merge_64(offset1, offset2), len, advice); } asmlinkage long compat_sys_sync_file_range2(int fd, unsigned int flags, - unsigned offset_hi, unsigned offset_lo, - unsigned nbytes_hi, unsigned nbytes_lo) + unsigned offset1, unsigned offset2, + unsigned nbytes1, unsigned nbytes2) { - loff_t offset = ((loff_t)offset_hi << 32) | offset_lo; - loff_t nbytes = ((loff_t)nbytes_hi << 32) | nbytes_lo; + loff_t offset = merge_64(offset1, offset2); + loff_t nbytes = merge_64(nbytes1, nbytes2); return ksys_sync_file_range(fd, offset, nbytes, flags); } diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl index f744eb5cba88..96b2157f0371 100644 --- a/arch/powerpc/kernel/syscalls/syscall.tbl +++ b/arch/powerpc/kernel/syscalls/syscall.tbl @@ -9,9 +9,7 @@ # 0 nospu restart_syscall sys_restart_syscall 1 nospu exit sys_exit -2 32 fork ppc_fork sys_fork -2 64 fork sys_fork -2 spu fork sys_ni_syscall +2 nospu fork sys_fork 3 common read sys_read 4 common write sys_write 5 common open sys_open compat_sys_open @@ -160,9 +158,7 @@ 119 32 sigreturn sys_sigreturn compat_sys_sigreturn 119 64 sigreturn sys_ni_syscall 119 spu sigreturn sys_ni_syscall -120 32 clone ppc_clone sys_clone -120 64 clone sys_clone -120 spu clone sys_ni_syscall +120 nospu clone sys_clone 121 common setdomainname sys_setdomainname 122 common uname sys_newuname 123 common modify_ldt sys_ni_syscall @@ -244,9 +240,7 @@ 186 spu sendfile sys_sendfile64 187 common getpmsg sys_ni_syscall 188 common putpmsg sys_ni_syscall -189 32 vfork ppc_vfork sys_vfork -189 64 vfork sys_vfork -189 spu vfork sys_ni_syscall +189 nospu vfork sys_vfork 190 common ugetrlimit sys_getrlimit compat_sys_getrlimit 191 common readahead sys_readahead compat_sys_readahead 192 32 mmap2 sys_mmap2 compat_sys_mmap2 @@ -322,9 +316,7 @@ 248 32 clock_nanosleep sys_clock_nanosleep_time32 248 64 clock_nanosleep sys_clock_nanosleep 248 spu clock_nanosleep sys_clock_nanosleep -249 32 swapcontext ppc_swapcontext compat_sys_swapcontext -249 64 swapcontext sys_swapcontext -249 spu swapcontext sys_ni_syscall +249 nospu swapcontext sys_swapcontext compat_sys_swapcontext 250 common tgkill sys_tgkill 251 32 utimes sys_utimes_time32 251 64 utimes sys_utimes @@ -522,9 +514,7 @@ 432 common fsmount sys_fsmount 433 common fspick sys_fspick 434 common pidfd_open sys_pidfd_open -435 32 clone3 ppc_clone3 sys_clone3 -435 64 clone3 sys_clone3 -435 spu clone3 sys_ni_syscall +435 nospu clone3 sys_clone3 436 common close_range sys_close_range 437 common openat2 sys_openat2 438 common pidfd_getfd sys_pidfd_getfd diff --git a/arch/powerpc/kernel/tau_6xx.c b/arch/powerpc/kernel/tau_6xx.c index 0b4694b8d248..6c31af7f4fa8 100644 --- a/arch/powerpc/kernel/tau_6xx.c +++ b/arch/powerpc/kernel/tau_6xx.c @@ -22,6 +22,7 @@ #include <linux/delay.h> #include <linux/workqueue.h> +#include <asm/interrupt.h> #include <asm/io.h> #include <asm/reg.h> #include <asm/nvram.h> @@ -100,16 +101,13 @@ static void TAUupdate(int cpu) * with interrupts disabled */ -void TAUException(struct pt_regs * regs) +DEFINE_INTERRUPT_HANDLER_ASYNC(TAUException) { int cpu = smp_processor_id(); - irq_enter(); tau[cpu].interrupts++; TAUupdate(cpu); - - irq_exit(); } #endif /* CONFIG_TAU_INT */ diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 67feb3524460..b67d93a609a2 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -53,9 +53,11 @@ #include <linux/of_clk.h> #include <linux/suspend.h> #include <linux/sched/cputime.h> +#include <linux/sched/clock.h> #include <linux/processor.h> #include <asm/trace.h> +#include <asm/interrupt.h> #include <asm/io.h> #include <asm/nvram.h> #include <asm/cache.h> @@ -570,7 +572,7 @@ void arch_irq_work_raise(void) * timer_interrupt - gets called when the decrementer overflows, * with interrupts disabled. */ -void timer_interrupt(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt) { struct clock_event_device *evt = this_cpu_ptr(&decrementers); u64 *next_tb = this_cpu_ptr(&decrementers_next_tb); @@ -609,7 +611,7 @@ void timer_interrupt(struct pt_regs *regs) #endif old_regs = set_irq_regs(regs); - irq_enter(); + trace_timer_interrupt_entry(regs); if (test_irq_work_pending()) { @@ -634,7 +636,7 @@ void timer_interrupt(struct pt_regs *regs) } trace_timer_interrupt_exit(regs); - irq_exit(); + set_irq_regs(old_regs); } EXPORT_SYMBOL(timer_interrupt); @@ -1030,6 +1032,7 @@ void __init time_init(void) tick_setup_hrtimer_broadcast(); of_clk_init(NULL); + enable_sched_clock_irqtime(); } /* diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 3ec7b443fe6b..1583fd1c6010 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -41,6 +41,7 @@ #include <asm/emulated_ops.h> #include <linux/uaccess.h> #include <asm/debugfs.h> +#include <asm/interrupt.h> #include <asm/io.h> #include <asm/machdep.h> #include <asm/rtas.h> @@ -342,8 +343,8 @@ static bool exception_common(int signr, struct pt_regs *regs, int code, show_signal_msg(signr, regs, code, addr); - if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs)) - local_irq_enable(); + if (arch_irqs_disabled()) + interrupt_cond_local_irq_enable(regs); current->thread.trap_nr = code; @@ -430,16 +431,10 @@ nonrecoverable: regs->msr &= ~MSR_RI; #endif } - -void system_reset_exception(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER_NMI(system_reset_exception) { unsigned long hsrr0, hsrr1; bool saved_hsrrs = false; - u8 ftrace_enabled = this_cpu_get_ftrace_enabled(); - - this_cpu_set_ftrace_enabled(0); - - nmi_enter(); /* * System reset can interrupt code where HSRRs are live and MSR[RI]=1. @@ -503,19 +498,20 @@ out: die("Unrecoverable nested System Reset", regs, SIGABRT); #endif /* Must die if the interrupt is not recoverable */ - if (!(regs->msr & MSR_RI)) + if (!(regs->msr & MSR_RI)) { + /* For the reason explained in die_mce, nmi_exit before die */ + nmi_exit(); die("Unrecoverable System Reset", regs, SIGABRT); + } if (saved_hsrrs) { mtspr(SPRN_HSRR0, hsrr0); mtspr(SPRN_HSRR1, hsrr1); } - nmi_exit(); - - this_cpu_set_ftrace_enabled(ftrace_enabled); - /* What should we do here? We could issue a shutdown or hard reset. */ + + return 0; } /* @@ -788,23 +784,33 @@ int machine_check_generic(struct pt_regs *regs) } #endif /* everything else */ -void machine_check_exception(struct pt_regs *regs) +void die_mce(const char *str, struct pt_regs *regs, long err) { - int recover = 0; - /* - * BOOK3S_64 does not call this handler as a non-maskable interrupt - * (it uses its own early real-mode handler to handle the MCE proper - * and then raises irq_work to call this handler when interrupts are - * enabled). - * - * This is silly. The BOOK3S_64 should just call a different function - * rather than expecting semantics to magically change. Something - * like 'non_nmi_machine_check_exception()', perhaps? + * The machine check wants to kill the interrupted context, but + * do_exit() checks for in_interrupt() and panics in that case, so + * exit the irq/nmi before calling die. */ - const bool nmi = !IS_ENABLED(CONFIG_PPC_BOOK3S_64); + if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) + irq_exit(); + else + nmi_exit(); + die(str, regs, err); +} - if (nmi) nmi_enter(); +/* + * BOOK3S_64 does not call this handler as a non-maskable interrupt + * (it uses its own early real-mode handler to handle the MCE proper + * and then raises irq_work to call this handler when interrupts are + * enabled). + */ +#ifdef CONFIG_PPC_BOOK3S_64 +DEFINE_INTERRUPT_HANDLER_ASYNC(machine_check_exception) +#else +DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception) +#endif +{ + int recover = 0; __this_cpu_inc(irq_stat.mce_exceptions); @@ -830,21 +836,21 @@ void machine_check_exception(struct pt_regs *regs) if (check_io_access(regs)) goto bail; - if (nmi) nmi_exit(); - - die("Machine check", regs, SIGBUS); + die_mce("Machine check", regs, SIGBUS); +bail: /* Must die if the interrupt is not recoverable */ if (!(regs->msr & MSR_RI)) - die("Unrecoverable Machine check", regs, SIGBUS); + die_mce("Unrecoverable Machine check", regs, SIGBUS); +#ifdef CONFIG_PPC_BOOK3S_64 return; - -bail: - if (nmi) nmi_exit(); +#else + return 0; +#endif } -void SMIException(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(SMIException) /* async? */ { die("System Management Interrupt", regs, SIGABRT); } @@ -1030,12 +1036,11 @@ static void p9_hmi_special_emu(struct pt_regs *regs) } #endif /* CONFIG_VSX */ -void handle_hmi_exception(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER_ASYNC(handle_hmi_exception) { struct pt_regs *old_regs; old_regs = set_irq_regs(regs); - irq_enter(); #ifdef CONFIG_VSX /* Real mode flagged P9 special emu is needed */ @@ -1055,46 +1060,42 @@ void handle_hmi_exception(struct pt_regs *regs) if (ppc_md.handle_hmi_exception) ppc_md.handle_hmi_exception(regs); - irq_exit(); set_irq_regs(old_regs); } -void unknown_exception(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(unknown_exception) { - enum ctx_state prev_state = exception_enter(); - printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", regs->nip, regs->msr, regs->trap); _exception(SIGTRAP, regs, TRAP_UNK, 0); - - exception_exit(prev_state); } -void instruction_breakpoint_exception(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception) { - enum ctx_state prev_state = exception_enter(); + printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", + regs->nip, regs->msr, regs->trap); + _exception(SIGTRAP, regs, TRAP_UNK, 0); +} + +DEFINE_INTERRUPT_HANDLER(instruction_breakpoint_exception) +{ if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5, 5, SIGTRAP) == NOTIFY_STOP) - goto bail; + return; if (debugger_iabr_match(regs)) - goto bail; + return; _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); - -bail: - exception_exit(prev_state); } -void RunModeException(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(RunModeException) { _exception(SIGTRAP, regs, TRAP_UNK, 0); } -void single_step_exception(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(single_step_exception) { - enum ctx_state prev_state = exception_enter(); - clear_single_step(regs); clear_br_trace(regs); @@ -1103,16 +1104,12 @@ void single_step_exception(struct pt_regs *regs) if (notify_die(DIE_SSTEP, "single_step", regs, 5, 5, SIGTRAP) == NOTIFY_STOP) - goto bail; + return; if (debugger_sstep(regs)) - goto bail; + return; _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); - -bail: - exception_exit(prev_state); } -NOKPROBE_SYMBOL(single_step_exception); /* * After we have successfully emulated an instruction, we have to @@ -1436,9 +1433,8 @@ static int emulate_math(struct pt_regs *regs) static inline int emulate_math(struct pt_regs *regs) { return -1; } #endif -void program_check_exception(struct pt_regs *regs) +static void do_program_check(struct pt_regs *regs) { - enum ctx_state prev_state = exception_enter(); unsigned int reason = get_reason(regs); /* We can now get here via a FP Unavailable exception if the core @@ -1447,22 +1443,22 @@ void program_check_exception(struct pt_regs *regs) if (reason & REASON_FP) { /* IEEE FP exception */ parse_fpe(regs); - goto bail; + return; } if (reason & REASON_TRAP) { unsigned long bugaddr; /* Debugger is first in line to stop recursive faults in * rcu_lock, notify_die, or atomic_notifier_call_chain */ if (debugger_bpt(regs)) - goto bail; + return; if (kprobe_handler(regs)) - goto bail; + return; /* trap exception */ if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP) == NOTIFY_STOP) - goto bail; + return; bugaddr = regs->nip; /* @@ -1474,10 +1470,10 @@ void program_check_exception(struct pt_regs *regs) if (!(regs->msr & MSR_PR) && /* not user-mode */ report_bug(bugaddr, regs) == BUG_TRAP_TYPE_WARN) { regs->nip += 4; - goto bail; + return; } _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); - goto bail; + return; } #ifdef CONFIG_PPC_TRANSACTIONAL_MEM if (reason & REASON_TM) { @@ -1498,7 +1494,7 @@ void program_check_exception(struct pt_regs *regs) */ if (user_mode(regs)) { _exception(SIGILL, regs, ILL_ILLOPN, regs->nip); - goto bail; + return; } else { printk(KERN_EMERG "Unexpected TM Bad Thing exception " "at %lx (msr 0x%lx) tm_scratch=%llx\n", @@ -1518,9 +1514,7 @@ void program_check_exception(struct pt_regs *regs) if (!user_mode(regs)) goto sigill; - /* We restore the interrupt state now */ - if (!arch_irq_disabled_regs(regs)) - local_irq_enable(); + interrupt_cond_local_irq_enable(regs); /* (reason & REASON_ILLEGAL) would be the obvious thing here, * but there seems to be a hardware bug on the 405GP (RevD) @@ -1531,7 +1525,7 @@ void program_check_exception(struct pt_regs *regs) * pattern to occurrences etc. -dgibson 31/Mar/2003 */ if (!emulate_math(regs)) - goto bail; + return; /* Try to emulate it if we should. */ if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) { @@ -1539,10 +1533,10 @@ void program_check_exception(struct pt_regs *regs) case 0: regs->nip += 4; emulate_single_step(regs); - goto bail; + return; case -EFAULT: _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); - goto bail; + return; } } @@ -1552,34 +1546,31 @@ sigill: else _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); -bail: - exception_exit(prev_state); } -NOKPROBE_SYMBOL(program_check_exception); + +DEFINE_INTERRUPT_HANDLER(program_check_exception) +{ + do_program_check(regs); +} /* * This occurs when running in hypervisor mode on POWER6 or later * and an illegal instruction is encountered. */ -void emulation_assist_interrupt(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(emulation_assist_interrupt) { regs->msr |= REASON_ILLEGAL; - program_check_exception(regs); + do_program_check(regs); } -NOKPROBE_SYMBOL(emulation_assist_interrupt); -void alignment_exception(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(alignment_exception) { - enum ctx_state prev_state = exception_enter(); int sig, code, fixed = 0; unsigned long reason; - /* We restore the interrupt state now */ - if (!arch_irq_disabled_regs(regs)) - local_irq_enable(); + interrupt_cond_local_irq_enable(regs); reason = get_reason(regs); - if (reason & REASON_BOUNDARY) { sig = SIGBUS; code = BUS_ADRALN; @@ -1587,7 +1578,7 @@ void alignment_exception(struct pt_regs *regs) } if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT)) - goto bail; + return; /* we don't implement logging of alignment exceptions */ if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) @@ -1597,7 +1588,7 @@ void alignment_exception(struct pt_regs *regs) /* skip over emulated instruction */ regs->nip += inst_length(reason); emulate_single_step(regs); - goto bail; + return; } /* Operand address was bad */ @@ -1612,13 +1603,10 @@ bad: if (user_mode(regs)) _exception(sig, regs, code, regs->dar); else - bad_page_fault(regs, regs->dar, sig); - -bail: - exception_exit(prev_state); + bad_page_fault(regs, sig); } -void StackOverflow(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(StackOverflow) { pr_crit("Kernel stack overflow in process %s[%d], r1=%lx\n", current->comm, task_pid_nr(current), regs->gpr[1]); @@ -1627,46 +1615,33 @@ void StackOverflow(struct pt_regs *regs) panic("kernel stack overflow"); } -void stack_overflow_exception(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(stack_overflow_exception) { - enum ctx_state prev_state = exception_enter(); - die("Kernel stack overflow", regs, SIGSEGV); - - exception_exit(prev_state); } -void kernel_fp_unavailable_exception(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(kernel_fp_unavailable_exception) { - enum ctx_state prev_state = exception_enter(); - printk(KERN_EMERG "Unrecoverable FP Unavailable Exception " "%lx at %lx\n", regs->trap, regs->nip); die("Unrecoverable FP Unavailable Exception", regs, SIGABRT); - - exception_exit(prev_state); } -void altivec_unavailable_exception(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(altivec_unavailable_exception) { - enum ctx_state prev_state = exception_enter(); - if (user_mode(regs)) { /* A user program has executed an altivec instruction, but this kernel doesn't support altivec. */ _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); - goto bail; + return; } printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception " "%lx at %lx\n", regs->trap, regs->nip); die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT); - -bail: - exception_exit(prev_state); } -void vsx_unavailable_exception(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(vsx_unavailable_exception) { if (user_mode(regs)) { /* A user program has executed an vsx instruction, @@ -1697,7 +1672,7 @@ static void tm_unavailable(struct pt_regs *regs) die("Unrecoverable TM Unavailable Exception", regs, SIGABRT); } -void facility_unavailable_exception(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(facility_unavailable_exception) { static char *facility_strings[] = { [FSCR_FP_LG] = "FPU", @@ -1737,9 +1712,7 @@ void facility_unavailable_exception(struct pt_regs *regs) die("Unexpected facility unavailable exception", regs, SIGABRT); } - /* We restore the interrupt state now */ - if (!arch_irq_disabled_regs(regs)) - local_irq_enable(); + interrupt_cond_local_irq_enable(regs); if (status == FSCR_DSCR_LG) { /* @@ -1817,7 +1790,7 @@ out: #ifdef CONFIG_PPC_TRANSACTIONAL_MEM -void fp_unavailable_tm(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(fp_unavailable_tm) { /* Note: This does not handle any kind of FP laziness. */ @@ -1850,7 +1823,7 @@ void fp_unavailable_tm(struct pt_regs *regs) tm_recheckpoint(¤t->thread); } -void altivec_unavailable_tm(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(altivec_unavailable_tm) { /* See the comments in fp_unavailable_tm(). This function operates * the same way. @@ -1865,7 +1838,7 @@ void altivec_unavailable_tm(struct pt_regs *regs) current->thread.used_vr = 1; } -void vsx_unavailable_tm(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(vsx_unavailable_tm) { /* See the comments in fp_unavailable_tm(). This works similarly, * though we're loading both FP and VEC registers in here. @@ -1890,11 +1863,40 @@ void vsx_unavailable_tm(struct pt_regs *regs) } #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ -void performance_monitor_exception(struct pt_regs *regs) +#ifdef CONFIG_PPC64 +DECLARE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi); +DEFINE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi) { __this_cpu_inc(irq_stat.pmu_irqs); perf_irq(regs); + + return 0; +} +#endif + +DECLARE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async); +DEFINE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async) +{ + __this_cpu_inc(irq_stat.pmu_irqs); + + perf_irq(regs); +} + +DEFINE_INTERRUPT_HANDLER_RAW(performance_monitor_exception) +{ + /* + * On 64-bit, if perf interrupts hit in a local_irq_disable + * (soft-masked) region, we consider them as NMIs. This is required to + * prevent hash faults on user addresses when reading callchains (and + * looks better from an irq tracing perspective). + */ + if (IS_ENABLED(CONFIG_PPC64) && unlikely(arch_irq_disabled_regs(regs))) + performance_monitor_exception_nmi(regs); + else + performance_monitor_exception_async(regs); + + return 0; } #ifdef CONFIG_PPC_ADV_DEBUG_REGS @@ -1957,8 +1959,10 @@ static void handle_debug(struct pt_regs *regs, unsigned long debug_status) mtspr(SPRN_DBCR0, current->thread.debug.dbcr0); } -void DebugException(struct pt_regs *regs, unsigned long debug_status) +DEFINE_INTERRUPT_HANDLER(DebugException) { + unsigned long debug_status = regs->dsisr; + current->thread.debug.dbsr = debug_status; /* Hack alert: On BookE, Branch Taken stops on the branch itself, while @@ -2024,11 +2028,10 @@ void DebugException(struct pt_regs *regs, unsigned long debug_status) } else handle_debug(regs, debug_status); } -NOKPROBE_SYMBOL(DebugException); #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ #ifdef CONFIG_ALTIVEC -void altivec_assist_exception(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(altivec_assist_exception) { int err; @@ -2062,9 +2065,10 @@ void altivec_assist_exception(struct pt_regs *regs) #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_FSL_BOOKE -void CacheLockingException(struct pt_regs *regs, unsigned long address, - unsigned long error_code) +DEFINE_INTERRUPT_HANDLER(CacheLockingException) { + unsigned long error_code = regs->dsisr; + /* We treat cache locking instructions from the user * as priv ops, in the future we could try to do * something smarter @@ -2076,7 +2080,7 @@ void CacheLockingException(struct pt_regs *regs, unsigned long address, #endif /* CONFIG_FSL_BOOKE */ #ifdef CONFIG_SPE -void SPEFloatingPointException(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(SPEFloatingPointException) { extern int do_spe_mathemu(struct pt_regs *regs); unsigned long spefscr; @@ -2084,9 +2088,7 @@ void SPEFloatingPointException(struct pt_regs *regs) int code = FPE_FLTUNK; int err; - /* We restore the interrupt state now */ - if (!arch_irq_disabled_regs(regs)) - local_irq_enable(); + interrupt_cond_local_irq_enable(regs); flush_spe_to_thread(current); @@ -2128,14 +2130,12 @@ void SPEFloatingPointException(struct pt_regs *regs) return; } -void SPEFloatingPointRoundException(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(SPEFloatingPointRoundException) { extern int speround_handler(struct pt_regs *regs); int err; - /* We restore the interrupt state now */ - if (!arch_irq_disabled_regs(regs)) - local_irq_enable(); + interrupt_cond_local_irq_enable(regs); preempt_disable(); if (regs->msr & MSR_SPE) @@ -2170,13 +2170,12 @@ void SPEFloatingPointRoundException(struct pt_regs *regs) * in the MSR is 0. This indicates that SRR0/1 are live, and that * we therefore lost state by taking this exception. */ -void unrecoverable_exception(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(unrecoverable_exception) { pr_emerg("Unrecoverable exception %lx at %lx (msr=%lx)\n", regs->trap, regs->nip, regs->msr); die("Unrecoverable exception", regs, SIGABRT); } -NOKPROBE_SYMBOL(unrecoverable_exception); #if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x) /* @@ -2190,7 +2189,7 @@ void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs) return; } -void WatchdogException(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(WatchdogException) /* XXX NMI? async? */ { printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n"); WatchdogHandler(regs); @@ -2201,13 +2200,12 @@ void WatchdogException(struct pt_regs *regs) * We enter here if we discover during exception entry that we are * running in supervisor mode with a userspace value in the stack pointer. */ -void kernel_bad_stack(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER(kernel_bad_stack) { printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n", regs->gpr[1], regs->nip); die("Bad kernel stack pointer", regs, SIGABRT); } -NOKPROBE_SYMBOL(kernel_bad_stack); void __init trap_init(void) { diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c index af3c15a1d41e..c9a8f4781a10 100644 --- a/arch/powerpc/kernel/watchdog.c +++ b/arch/powerpc/kernel/watchdog.c @@ -26,7 +26,9 @@ #include <linux/delay.h> #include <linux/smp.h> +#include <asm/interrupt.h> #include <asm/paca.h> +#include <asm/nmi.h> /* * The powerpc watchdog ensures that each CPU is able to service timers. @@ -247,16 +249,17 @@ static void watchdog_timer_interrupt(int cpu) watchdog_smp_panic(cpu, tb); } -void soft_nmi_interrupt(struct pt_regs *regs) +DEFINE_INTERRUPT_HANDLER_NMI(soft_nmi_interrupt) { unsigned long flags; int cpu = raw_smp_processor_id(); u64 tb; - if (!cpumask_test_cpu(cpu, &wd_cpus_enabled)) - return; + /* should only arrive from kernel, with irqs disabled */ + WARN_ON_ONCE(!arch_irq_disabled_regs(regs)); - nmi_enter(); + if (!cpumask_test_cpu(cpu, &wd_cpus_enabled)) + return 0; __this_cpu_inc(irq_stat.soft_nmi_irqs); @@ -265,7 +268,7 @@ void soft_nmi_interrupt(struct pt_regs *regs) wd_smp_lock(&flags); if (cpumask_test_cpu(cpu, &wd_smp_cpus_stuck)) { wd_smp_unlock(&flags); - goto out; + return 0; } set_cpu_stuck(cpu, tb); @@ -289,8 +292,7 @@ void soft_nmi_interrupt(struct pt_regs *regs) if (wd_panic_timeout_tb < 0x7fffffff) mtspr(SPRN_DEC, wd_panic_timeout_tb); -out: - nmi_exit(); + return 0; } static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) |