diff options
Diffstat (limited to 'arch/powerpc/kernel/exceptions-64s.S')
-rw-r--r-- | arch/powerpc/kernel/exceptions-64s.S | 1437 |
1 files changed, 992 insertions, 445 deletions
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 73ba246ca11d..eee5bef736c8 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -21,6 +21,698 @@ #include <asm/feature-fixups.h> #include <asm/kup.h> +/* PACA save area offsets (exgen, exmc, etc) */ +#define EX_R9 0 +#define EX_R10 8 +#define EX_R11 16 +#define EX_R12 24 +#define EX_R13 32 +#define EX_DAR 40 +#define EX_DSISR 48 +#define EX_CCR 52 +#define EX_CFAR 56 +#define EX_PPR 64 +#if defined(CONFIG_RELOCATABLE) +#define EX_CTR 72 +.if EX_SIZE != 10 + .error "EX_SIZE is wrong" +.endif +#else +.if EX_SIZE != 9 + .error "EX_SIZE is wrong" +.endif +#endif + +/* + * We're short on space and time in the exception prolog, so we can't + * use the normal LOAD_REG_IMMEDIATE macro to load the address of label. + * Instead we get the base of the kernel from paca->kernelbase and or in the low + * part of label. This requires that the label be within 64KB of kernelbase, and + * that kernelbase be 64K aligned. + */ +#define LOAD_HANDLER(reg, label) \ + ld reg,PACAKBASE(r13); /* get high part of &label */ \ + ori reg,reg,FIXED_SYMBOL_ABS_ADDR(label) + +#define __LOAD_HANDLER(reg, label) \ + ld reg,PACAKBASE(r13); \ + ori reg,reg,(ABS_ADDR(label))@l + +/* + * Branches from unrelocated code (e.g., interrupts) to labels outside + * head-y require >64K offsets. + */ +#define __LOAD_FAR_HANDLER(reg, label) \ + ld reg,PACAKBASE(r13); \ + ori reg,reg,(ABS_ADDR(label))@l; \ + addis reg,reg,(ABS_ADDR(label))@h + +/* Exception register prefixes */ +#define EXC_HV 1 +#define EXC_STD 0 + +#if defined(CONFIG_RELOCATABLE) +/* + * If we support interrupts with relocation on AND we're a relocatable kernel, + * we need to use CTR to get to the 2nd level handler. So, save/restore it + * when required. + */ +#define SAVE_CTR(reg, area) mfctr reg ; std reg,area+EX_CTR(r13) +#define GET_CTR(reg, area) ld reg,area+EX_CTR(r13) +#define RESTORE_CTR(reg, area) ld reg,area+EX_CTR(r13) ; mtctr reg +#else +/* ...else CTR is unused and in register. */ +#define SAVE_CTR(reg, area) +#define GET_CTR(reg, area) mfctr reg +#define RESTORE_CTR(reg, area) +#endif + +/* + * PPR save/restore macros used in exceptions-64s.S + * Used for P7 or later processors + */ +#define SAVE_PPR(area, ra) \ +BEGIN_FTR_SECTION_NESTED(940) \ + ld ra,area+EX_PPR(r13); /* Read PPR from paca */ \ + std ra,_PPR(r1); \ +END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,940) + +#define RESTORE_PPR_PACA(area, ra) \ +BEGIN_FTR_SECTION_NESTED(941) \ + ld ra,area+EX_PPR(r13); \ + mtspr SPRN_PPR,ra; \ +END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,941) + +/* + * Get an SPR into a register if the CPU has the given feature + */ +#define OPT_GET_SPR(ra, spr, ftr) \ +BEGIN_FTR_SECTION_NESTED(943) \ + mfspr ra,spr; \ +END_FTR_SECTION_NESTED(ftr,ftr,943) + +/* + * Set an SPR from a register if the CPU has the given feature + */ +#define OPT_SET_SPR(ra, spr, ftr) \ +BEGIN_FTR_SECTION_NESTED(943) \ + mtspr spr,ra; \ +END_FTR_SECTION_NESTED(ftr,ftr,943) + +/* + * Save a register to the PACA if the CPU has the given feature + */ +#define OPT_SAVE_REG_TO_PACA(offset, ra, ftr) \ +BEGIN_FTR_SECTION_NESTED(943) \ + std ra,offset(r13); \ +END_FTR_SECTION_NESTED(ftr,ftr,943) + +.macro EXCEPTION_PROLOG_0 area + SET_SCRATCH0(r13) /* save r13 */ + GET_PACA(r13) + std r9,\area\()+EX_R9(r13) /* save r9 */ + OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR) + HMT_MEDIUM + std r10,\area\()+EX_R10(r13) /* save r10 - r12 */ + OPT_GET_SPR(r10, SPRN_CFAR, CPU_FTR_CFAR) +.endm + +.macro EXCEPTION_PROLOG_1 hsrr, area, kvm, vec, dar, dsisr, bitmask + OPT_SAVE_REG_TO_PACA(\area\()+EX_PPR, r9, CPU_FTR_HAS_PPR) + OPT_SAVE_REG_TO_PACA(\area\()+EX_CFAR, r10, CPU_FTR_CFAR) + INTERRUPT_TO_KERNEL + SAVE_CTR(r10, \area\()) + mfcr r9 + .if \kvm + KVMTEST \hsrr \vec + .endif + .if \bitmask + lbz r10,PACAIRQSOFTMASK(r13) + andi. r10,r10,\bitmask + /* Associate vector numbers with bits in paca->irq_happened */ + .if \vec == 0x500 || \vec == 0xea0 + li r10,PACA_IRQ_EE + .elseif \vec == 0x900 + li r10,PACA_IRQ_DEC + .elseif \vec == 0xa00 || \vec == 0xe80 + li r10,PACA_IRQ_DBELL + .elseif \vec == 0xe60 + li r10,PACA_IRQ_HMI + .elseif \vec == 0xf00 + li r10,PACA_IRQ_PMI + .else + .abort "Bad maskable vector" + .endif + + .if \hsrr + bne masked_Hinterrupt + .else + bne masked_interrupt + .endif + .endif + + std r11,\area\()+EX_R11(r13) + std r12,\area\()+EX_R12(r13) + + /* + * DAR/DSISR, SCRATCH0 must be read before setting MSR[RI], + * because a d-side MCE will clobber those registers so is + * not recoverable if they are live. + */ + GET_SCRATCH0(r10) + std r10,\area\()+EX_R13(r13) + .if \dar + mfspr r10,SPRN_DAR + std r10,\area\()+EX_DAR(r13) + .endif + .if \dsisr + mfspr r10,SPRN_DSISR + stw r10,\area\()+EX_DSISR(r13) + .endif +.endm + +.macro EXCEPTION_PROLOG_2_REAL label, hsrr, set_ri + ld r10,PACAKMSR(r13) /* get MSR value for kernel */ + .if ! \set_ri + xori r10,r10,MSR_RI /* Clear MSR_RI */ + .endif + .if \hsrr + mfspr r11,SPRN_HSRR0 /* save HSRR0 */ + mfspr r12,SPRN_HSRR1 /* and HSRR1 */ + mtspr SPRN_HSRR1,r10 + .else + mfspr r11,SPRN_SRR0 /* save SRR0 */ + mfspr r12,SPRN_SRR1 /* and SRR1 */ + mtspr SPRN_SRR1,r10 + .endif + LOAD_HANDLER(r10, \label\()) + .if \hsrr + mtspr SPRN_HSRR0,r10 + HRFI_TO_KERNEL + .else + mtspr SPRN_SRR0,r10 + RFI_TO_KERNEL + .endif + b . /* prevent speculative execution */ +.endm + +.macro EXCEPTION_PROLOG_2_VIRT label, hsrr +#ifdef CONFIG_RELOCATABLE + .if \hsrr + mfspr r11,SPRN_HSRR0 /* save HSRR0 */ + .else + mfspr r11,SPRN_SRR0 /* save SRR0 */ + .endif + LOAD_HANDLER(r12, \label\()) + mtctr r12 + .if \hsrr + mfspr r12,SPRN_HSRR1 /* and HSRR1 */ + .else + mfspr r12,SPRN_SRR1 /* and HSRR1 */ + .endif + li r10,MSR_RI + mtmsrd r10,1 /* Set RI (EE=0) */ + bctr +#else + .if \hsrr + mfspr r11,SPRN_HSRR0 /* save HSRR0 */ + mfspr r12,SPRN_HSRR1 /* and HSRR1 */ + .else + mfspr r11,SPRN_SRR0 /* save SRR0 */ + mfspr r12,SPRN_SRR1 /* and SRR1 */ + .endif + li r10,MSR_RI + mtmsrd r10,1 /* Set RI (EE=0) */ + b \label +#endif +.endm + +/* + * Branch to label using its 0xC000 address. This results in instruction + * address suitable for MSR[IR]=0 or 1, which allows relocation to be turned + * on using mtmsr rather than rfid. + * + * This could set the 0xc bits for !RELOCATABLE as an immediate, rather than + * load KBASE for a slight optimisation. + */ +#define BRANCH_TO_C000(reg, label) \ + __LOAD_FAR_HANDLER(reg, label); \ + mtctr reg; \ + bctr + +#ifdef CONFIG_KVM_BOOK3S_64_HANDLER +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE +/* + * If hv is possible, interrupts come into to the hv version + * of the kvmppc_interrupt code, which then jumps to the PR handler, + * kvmppc_interrupt_pr, if the guest is a PR guest. + */ +#define kvmppc_interrupt kvmppc_interrupt_hv +#else +#define kvmppc_interrupt kvmppc_interrupt_pr +#endif + +.macro KVMTEST hsrr, n + lbz r10,HSTATE_IN_GUEST(r13) + cmpwi r10,0 + .if \hsrr + bne do_kvm_H\n + .else + bne do_kvm_\n + .endif +.endm + +.macro KVM_HANDLER area, hsrr, n, skip + .if \skip + cmpwi r10,KVM_GUEST_MODE_SKIP + beq 89f + .else +BEGIN_FTR_SECTION_NESTED(947) + ld r10,\area+EX_CFAR(r13) + std r10,HSTATE_CFAR(r13) +END_FTR_SECTION_NESTED(CPU_FTR_CFAR,CPU_FTR_CFAR,947) + .endif + +BEGIN_FTR_SECTION_NESTED(948) + ld r10,\area+EX_PPR(r13) + std r10,HSTATE_PPR(r13) +END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948) + ld r10,\area+EX_R10(r13) + std r12,HSTATE_SCRATCH0(r13) + sldi r12,r9,32 + /* HSRR variants have the 0x2 bit added to their trap number */ + .if \hsrr + ori r12,r12,(\n + 0x2) + .else + ori r12,r12,(\n) + .endif + +#ifdef CONFIG_RELOCATABLE + /* + * KVM requires __LOAD_FAR_HANDLER beause kvmppc_interrupt lives + * outside the head section. CONFIG_RELOCATABLE KVM expects CTR + * to be saved in HSTATE_SCRATCH1. + */ + mfctr r9 + std r9,HSTATE_SCRATCH1(r13) + __LOAD_FAR_HANDLER(r9, kvmppc_interrupt) + mtctr r9 + ld r9,\area+EX_R9(r13) + bctr +#else + ld r9,\area+EX_R9(r13) + b kvmppc_interrupt +#endif + + + .if \skip +89: mtocrf 0x80,r9 + ld r9,\area+EX_R9(r13) + ld r10,\area+EX_R10(r13) + .if \hsrr + b kvmppc_skip_Hinterrupt + .else + b kvmppc_skip_interrupt + .endif + .endif +.endm + +#else +.macro KVMTEST hsrr, n +.endm +.macro KVM_HANDLER area, hsrr, n, skip +.endm +#endif + +#define EXCEPTION_PROLOG_COMMON_1() \ + std r9,_CCR(r1); /* save CR in stackframe */ \ + std r11,_NIP(r1); /* save SRR0 in stackframe */ \ + std r12,_MSR(r1); /* save SRR1 in stackframe */ \ + std r10,0(r1); /* make stack chain pointer */ \ + std r0,GPR0(r1); /* save r0 in stackframe */ \ + std r10,GPR1(r1); /* save r1 in stackframe */ \ + +/* Save original regs values from save area to stack frame. */ +#define EXCEPTION_PROLOG_COMMON_2(area) \ + ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \ + ld r10,area+EX_R10(r13); \ + std r9,GPR9(r1); \ + std r10,GPR10(r1); \ + ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \ + ld r10,area+EX_R12(r13); \ + ld r11,area+EX_R13(r13); \ + std r9,GPR11(r1); \ + std r10,GPR12(r1); \ + std r11,GPR13(r1); \ +BEGIN_FTR_SECTION_NESTED(66); \ + ld r10,area+EX_CFAR(r13); \ + std r10,ORIG_GPR3(r1); \ +END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \ + GET_CTR(r10, area); \ + std r10,_CTR(r1); + +#define EXCEPTION_PROLOG_COMMON_3(trap) \ + std r2,GPR2(r1); /* save r2 in stackframe */ \ + SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \ + SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \ + mflr r9; /* Get LR, later save to stack */ \ + ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \ + std r9,_LINK(r1); \ + lbz r10,PACAIRQSOFTMASK(r13); \ + mfspr r11,SPRN_XER; /* save XER in stackframe */ \ + std r10,SOFTE(r1); \ + std r11,_XER(r1); \ + li r9,(trap)+1; \ + std r9,_TRAP(r1); /* set trap number */ \ + li r10,0; \ + ld r11,exception_marker@toc(r2); \ + std r10,RESULT(r1); /* clear regs->result */ \ + std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ + +/* + * On entry r13 points to the paca, r9-r13 are saved in the paca, + * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and + * SRR1, and relocation is on. + */ +#define EXCEPTION_COMMON(area, trap) \ + andi. r10,r12,MSR_PR; /* See if coming from user */ \ + mr r10,r1; /* Save r1 */ \ + subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \ + beq- 1f; \ + ld r1,PACAKSAVE(r13); /* kernel stack to use */ \ +1: tdgei r1,-INT_FRAME_SIZE; /* trap if r1 is in userspace */ \ + EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0; \ +3: EXCEPTION_PROLOG_COMMON_1(); \ + kuap_save_amr_and_lock r9, r10, cr1, cr0; \ + beq 4f; /* if from kernel mode */ \ + ACCOUNT_CPU_USER_ENTRY(r13, r9, r10); \ + SAVE_PPR(area, r9); \ +4: EXCEPTION_PROLOG_COMMON_2(area); \ + EXCEPTION_PROLOG_COMMON_3(trap); \ + ACCOUNT_STOLEN_TIME + +/* + * Exception where stack is already set in r1, r1 is saved in r10. + * PPR save and CPU accounting is not done (for some reason). + */ +#define EXCEPTION_COMMON_STACK(area, trap) \ + EXCEPTION_PROLOG_COMMON_1(); \ + kuap_save_amr_and_lock r9, r10, cr1; \ + EXCEPTION_PROLOG_COMMON_2(area); \ + EXCEPTION_PROLOG_COMMON_3(trap) + +/* + * Restore all registers including H/SRR0/1 saved in a stack frame of a + * standard exception. + */ +.macro EXCEPTION_RESTORE_REGS hsrr + /* Move original SRR0 and SRR1 into the respective regs */ + ld r9,_MSR(r1) + .if \hsrr + mtspr SPRN_HSRR1,r9 + .else + mtspr SPRN_SRR1,r9 + .endif + ld r9,_NIP(r1) + .if \hsrr + mtspr SPRN_HSRR0,r9 + .else + mtspr SPRN_SRR0,r9 + .endif + ld r9,_CTR(r1) + mtctr r9 + ld r9,_XER(r1) + mtxer r9 + ld r9,_LINK(r1) + mtlr r9 + ld r9,_CCR(r1) + mtcr r9 + REST_8GPRS(2, r1) + REST_4GPRS(10, r1) + REST_GPR(0, r1) + /* restore original r1. */ + ld r1,GPR1(r1) +.endm + +#define RUNLATCH_ON \ +BEGIN_FTR_SECTION \ + ld r3, PACA_THREAD_INFO(r13); \ + ld r4,TI_LOCAL_FLAGS(r3); \ + andi. r0,r4,_TLF_RUNLATCH; \ + beql ppc64_runlatch_on_trampoline; \ +END_FTR_SECTION_IFSET(CPU_FTR_CTRL) + +/* + * When the idle code in power4_idle puts the CPU into NAP mode, + * it has to do so in a loop, and relies on the external interrupt + * and decrementer interrupt entry code to get it out of the loop. + * It sets the _TLF_NAPPING bit in current_thread_info()->local_flags + * to signal that it is in the loop and needs help to get out. + */ +#ifdef CONFIG_PPC_970_NAP +#define FINISH_NAP \ +BEGIN_FTR_SECTION \ + ld r11, PACA_THREAD_INFO(r13); \ + ld r9,TI_LOCAL_FLAGS(r11); \ + andi. r10,r9,_TLF_NAPPING; \ + bnel power4_fixup_nap; \ +END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) +#else +#define FINISH_NAP +#endif + +/* + * Following are the BOOK3S exception handler helper macros. + * Handlers come in a number of types, and each type has a number of varieties. + * + * EXC_REAL_* - real, unrelocated exception vectors + * EXC_VIRT_* - virt (AIL), unrelocated exception vectors + * TRAMP_REAL_* - real, unrelocated helpers (virt can call these) + * TRAMP_VIRT_* - virt, unreloc helpers (in practice, real can use) + * TRAMP_KVM - KVM handlers that get put into real, unrelocated + * EXC_COMMON - virt, relocated common handlers + * + * The EXC handlers are given a name, and branch to name_common, or the + * appropriate KVM or masking function. Vector handler verieties are as + * follows: + * + * EXC_{REAL|VIRT}_BEGIN/END - used to open-code the exception + * + * EXC_{REAL|VIRT} - standard exception + * + * EXC_{REAL|VIRT}_suffix + * where _suffix is: + * - _MASKABLE - maskable exception + * - _OOL - out of line with trampoline to common handler + * - _HV - HV exception + * + * There can be combinations, e.g., EXC_VIRT_OOL_MASKABLE_HV + * + * KVM handlers come in the following verieties: + * TRAMP_KVM + * TRAMP_KVM_SKIP + * TRAMP_KVM_HV + * TRAMP_KVM_HV_SKIP + * + * COMMON handlers come in the following verieties: + * EXC_COMMON_BEGIN/END - used to open-code the handler + * EXC_COMMON + * EXC_COMMON_ASYNC + * + * TRAMP_REAL and TRAMP_VIRT can be used with BEGIN/END. KVM + * and OOL handlers are implemented as types of TRAMP and TRAMP_VIRT handlers. + */ + +#define __EXC_REAL(name, start, size, area) \ + EXC_REAL_BEGIN(name, start, size); \ + EXCEPTION_PROLOG_0 area ; \ + EXCEPTION_PROLOG_1 EXC_STD, area, 1, start, 0, 0, 0 ; \ + EXCEPTION_PROLOG_2_REAL name##_common, EXC_STD, 1 ; \ + EXC_REAL_END(name, start, size) + +#define EXC_REAL(name, start, size) \ + __EXC_REAL(name, start, size, PACA_EXGEN) + +#define __EXC_VIRT(name, start, size, realvec, area) \ + EXC_VIRT_BEGIN(name, start, size); \ + EXCEPTION_PROLOG_0 area ; \ + EXCEPTION_PROLOG_1 EXC_STD, area, 0, realvec, 0, 0, 0; \ + EXCEPTION_PROLOG_2_VIRT name##_common, EXC_STD ; \ + EXC_VIRT_END(name, start, size) + +#define EXC_VIRT(name, start, size, realvec) \ + __EXC_VIRT(name, start, size, realvec, PACA_EXGEN) + +#define EXC_REAL_MASKABLE(name, start, size, bitmask) \ + EXC_REAL_BEGIN(name, start, size); \ + EXCEPTION_PROLOG_0 PACA_EXGEN ; \ + EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, start, 0, 0, bitmask ; \ + EXCEPTION_PROLOG_2_REAL name##_common, EXC_STD, 1 ; \ + EXC_REAL_END(name, start, size) + +#define EXC_VIRT_MASKABLE(name, start, size, realvec, bitmask) \ + EXC_VIRT_BEGIN(name, start, size); \ + EXCEPTION_PROLOG_0 PACA_EXGEN ; \ + EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 0, realvec, 0, 0, bitmask ; \ + EXCEPTION_PROLOG_2_VIRT name##_common, EXC_STD ; \ + EXC_VIRT_END(name, start, size) + +#define EXC_REAL_HV(name, start, size) \ + EXC_REAL_BEGIN(name, start, size); \ + EXCEPTION_PROLOG_0 PACA_EXGEN; \ + EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, start, 0, 0, 0 ; \ + EXCEPTION_PROLOG_2_REAL name##_common, EXC_HV, 1 ; \ + EXC_REAL_END(name, start, size) + +#define EXC_VIRT_HV(name, start, size, realvec) \ + EXC_VIRT_BEGIN(name, start, size); \ + EXCEPTION_PROLOG_0 PACA_EXGEN; \ + EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, realvec, 0, 0, 0 ; \ + EXCEPTION_PROLOG_2_VIRT name##_common, EXC_HV ; \ + EXC_VIRT_END(name, start, size) + +#define __EXC_REAL_OOL(name, start, size) \ + EXC_REAL_BEGIN(name, start, size); \ + EXCEPTION_PROLOG_0 PACA_EXGEN ; \ + b tramp_real_##name ; \ + EXC_REAL_END(name, start, size) + +#define __TRAMP_REAL_OOL(name, vec) \ + TRAMP_REAL_BEGIN(tramp_real_##name); \ + EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, vec, 0, 0, 0 ; \ + EXCEPTION_PROLOG_2_REAL name##_common, EXC_STD, 1 + +#define EXC_REAL_OOL(name, start, size) \ + __EXC_REAL_OOL(name, start, size); \ + __TRAMP_REAL_OOL(name, start) + +#define __EXC_REAL_OOL_MASKABLE(name, start, size) \ + __EXC_REAL_OOL(name, start, size) + +#define __TRAMP_REAL_OOL_MASKABLE(name, vec, bitmask) \ + TRAMP_REAL_BEGIN(tramp_real_##name); \ + EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, vec, 0, 0, bitmask ; \ + EXCEPTION_PROLOG_2_REAL name##_common, EXC_STD, 1 + +#define EXC_REAL_OOL_MASKABLE(name, start, size, bitmask) \ + __EXC_REAL_OOL_MASKABLE(name, start, size); \ + __TRAMP_REAL_OOL_MASKABLE(name, start, bitmask) + +#define __EXC_REAL_OOL_HV(name, start, size) \ + __EXC_REAL_OOL(name, start, size) + +#define __TRAMP_REAL_OOL_HV(name, vec) \ + TRAMP_REAL_BEGIN(tramp_real_##name); \ + EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, vec, 0, 0, 0 ; \ + EXCEPTION_PROLOG_2_REAL name##_common, EXC_HV, 1 + +#define EXC_REAL_OOL_HV(name, start, size) \ + __EXC_REAL_OOL_HV(name, start, size); \ + __TRAMP_REAL_OOL_HV(name, start) + +#define __EXC_REAL_OOL_MASKABLE_HV(name, start, size) \ + __EXC_REAL_OOL(name, start, size) + +#define __TRAMP_REAL_OOL_MASKABLE_HV(name, vec, bitmask) \ + TRAMP_REAL_BEGIN(tramp_real_##name); \ + EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, vec, 0, 0, bitmask ; \ + EXCEPTION_PROLOG_2_REAL name##_common, EXC_HV, 1 + +#define EXC_REAL_OOL_MASKABLE_HV(name, start, size, bitmask) \ + __EXC_REAL_OOL_MASKABLE_HV(name, start, size); \ + __TRAMP_REAL_OOL_MASKABLE_HV(name, start, bitmask) + +#define __EXC_VIRT_OOL(name, start, size) \ + EXC_VIRT_BEGIN(name, start, size); \ + EXCEPTION_PROLOG_0 PACA_EXGEN ; \ + b tramp_virt_##name; \ + EXC_VIRT_END(name, start, size) + +#define __TRAMP_VIRT_OOL(name, realvec) \ + TRAMP_VIRT_BEGIN(tramp_virt_##name); \ + EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 0, vec, 0, 0, 0 ; \ + EXCEPTION_PROLOG_2_VIRT name##_common, EXC_STD + +#define EXC_VIRT_OOL(name, start, size, realvec) \ + __EXC_VIRT_OOL(name, start, size); \ + __TRAMP_VIRT_OOL(name, realvec) + +#define __EXC_VIRT_OOL_MASKABLE(name, start, size) \ + __EXC_VIRT_OOL(name, start, size) + +#define __TRAMP_VIRT_OOL_MASKABLE(name, realvec, bitmask) \ + TRAMP_VIRT_BEGIN(tramp_virt_##name); \ + EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 0, realvec, 0, 0, bitmask ; \ + EXCEPTION_PROLOG_2_REAL name##_common, EXC_STD, 1 + +#define EXC_VIRT_OOL_MASKABLE(name, start, size, realvec, bitmask) \ + __EXC_VIRT_OOL_MASKABLE(name, start, size); \ + __TRAMP_VIRT_OOL_MASKABLE(name, realvec, bitmask) + +#define __EXC_VIRT_OOL_HV(name, start, size) \ + __EXC_VIRT_OOL(name, start, size) + +#define __TRAMP_VIRT_OOL_HV(name, realvec) \ + TRAMP_VIRT_BEGIN(tramp_virt_##name); \ + EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, realvec, 0, 0, 0 ; \ + EXCEPTION_PROLOG_2_VIRT name##_common, EXC_HV + +#define EXC_VIRT_OOL_HV(name, start, size, realvec) \ + __EXC_VIRT_OOL_HV(name, start, size); \ + __TRAMP_VIRT_OOL_HV(name, realvec) + +#define __EXC_VIRT_OOL_MASKABLE_HV(name, start, size) \ + __EXC_VIRT_OOL(name, start, size) + +#define __TRAMP_VIRT_OOL_MASKABLE_HV(name, realvec, bitmask) \ + TRAMP_VIRT_BEGIN(tramp_virt_##name); \ + EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, realvec, 0, 0, bitmask ; \ + EXCEPTION_PROLOG_2_VIRT name##_common, EXC_HV + +#define EXC_VIRT_OOL_MASKABLE_HV(name, start, size, realvec, bitmask) \ + __EXC_VIRT_OOL_MASKABLE_HV(name, start, size); \ + __TRAMP_VIRT_OOL_MASKABLE_HV(name, realvec, bitmask) + +#define TRAMP_KVM(area, n) \ + TRAMP_KVM_BEGIN(do_kvm_##n); \ + KVM_HANDLER area, EXC_STD, n, 0 + +#define TRAMP_KVM_SKIP(area, n) \ + TRAMP_KVM_BEGIN(do_kvm_##n); \ + KVM_HANDLER area, EXC_STD, n, 1 + +#define TRAMP_KVM_HV(area, n) \ + TRAMP_KVM_BEGIN(do_kvm_H##n); \ + KVM_HANDLER area, EXC_HV, n, 0 + +#define TRAMP_KVM_HV_SKIP(area, n) \ + TRAMP_KVM_BEGIN(do_kvm_H##n); \ + KVM_HANDLER area, EXC_HV, n, 1 + +#define EXC_COMMON(name, realvec, hdlr) \ + EXC_COMMON_BEGIN(name); \ + EXCEPTION_COMMON(PACA_EXGEN, realvec); \ + bl save_nvgprs; \ + RECONCILE_IRQ_STATE(r10, r11); \ + addi r3,r1,STACK_FRAME_OVERHEAD; \ + bl hdlr; \ + b ret_from_except + +/* + * Like EXC_COMMON, but for exceptions that can occur in the idle task and + * therefore need the special idle handling (finish nap and runlatch) + */ +#define EXC_COMMON_ASYNC(name, realvec, hdlr) \ + EXC_COMMON_BEGIN(name); \ + EXCEPTION_COMMON(PACA_EXGEN, realvec); \ + FINISH_NAP; \ + RECONCILE_IRQ_STATE(r10, r11); \ + RUNLATCH_ON; \ + addi r3,r1,STACK_FRAME_OVERHEAD; \ + bl hdlr; \ + b ret_from_except_lite + + /* * There are a few constraints to be concerned with. * - Real mode exceptions code/data must be located at their physical location. @@ -107,6 +799,7 @@ __start_interrupts: EXC_VIRT_NONE(0x4000, 0x100) +EXC_REAL_BEGIN(system_reset, 0x100, 0x100) #ifdef CONFIG_PPC_P7_NAP /* * If running native on arch 2.06 or later, check if we are waking up @@ -114,60 +807,72 @@ EXC_VIRT_NONE(0x4000, 0x100) * bits 46:47. A non-0 value indicates that we are coming from a power * saving state. The idle wakeup handler initially runs in real mode, * but we branch to the 0xc000... address so we can turn on relocation - * with mtmsr. + * with mtmsrd later, after SPRs are restored. + * + * Careful to minimise cost for the fast path (idle wakeup) while + * also avoiding clobbering CFAR for the debug path (non-idle). + * + * For the idle wake case volatile registers can be clobbered, which + * is why we use those initially. If it turns out to not be an idle + * wake, carefully put everything back the way it was, so we can use + * common exception macros to handle it. */ -#define IDLETEST(n) \ - BEGIN_FTR_SECTION ; \ - mfspr r10,SPRN_SRR1 ; \ - rlwinm. r10,r10,47-31,30,31 ; \ - beq- 1f ; \ - cmpwi cr1,r10,2 ; \ - mfspr r3,SPRN_SRR1 ; \ - bltlr cr1 ; /* no state loss, return to idle caller */ \ - BRANCH_TO_C000(r10, system_reset_idle_common) ; \ -1: \ - KVMTEST_PR(n) ; \ - END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) -#else -#define IDLETEST NOTEST +BEGIN_FTR_SECTION + SET_SCRATCH0(r13) + GET_PACA(r13) + std r3,PACA_EXNMI+0*8(r13) + std r4,PACA_EXNMI+1*8(r13) + std r5,PACA_EXNMI+2*8(r13) + mfspr r3,SPRN_SRR1 + mfocrf r4,0x80 + rlwinm. r5,r3,47-31,30,31 + bne+ system_reset_idle_wake + /* Not powersave wakeup. Restore regs for regular interrupt handler. */ + mtocrf 0x80,r4 + ld r3,PACA_EXNMI+0*8(r13) + ld r4,PACA_EXNMI+1*8(r13) + ld r5,PACA_EXNMI+2*8(r13) + GET_SCRATCH0(r13) +END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) #endif -EXC_REAL_BEGIN(system_reset, 0x100, 0x100) - SET_SCRATCH0(r13) + EXCEPTION_PROLOG_0 PACA_EXNMI + EXCEPTION_PROLOG_1 EXC_STD, PACA_EXNMI, 1, 0x100, 0, 0, 0 + EXCEPTION_PROLOG_2_REAL system_reset_common, EXC_STD, 0 /* * MSR_RI is not enabled, because PACA_EXNMI and nmi stack is * being used, so a nested NMI exception would corrupt it. + * + * In theory, we should not enable relocation here if it was disabled + * in SRR1, because the MMU may not be configured to support it (e.g., + * SLB may have been cleared). In practice, there should only be a few + * small windows where that's the case, and sreset is considered to + * be dangerous anyway. */ - EXCEPTION_PROLOG_NORI(PACA_EXNMI, system_reset_common, EXC_STD, - IDLETEST, 0x100) - EXC_REAL_END(system_reset, 0x100, 0x100) + EXC_VIRT_NONE(0x4100, 0x100) TRAMP_KVM(PACA_EXNMI, 0x100) #ifdef CONFIG_PPC_P7_NAP -EXC_COMMON_BEGIN(system_reset_idle_common) - /* - * This must be a direct branch (without linker branch stub) because - * we can not use TOC at this point as r2 may not be restored yet. - */ - b idle_return_gpr_loss +TRAMP_REAL_BEGIN(system_reset_idle_wake) + /* We are waking up from idle, so may clobber any volatile register */ + cmpwi cr1,r5,2 + bltlr cr1 /* no state loss, return to idle caller with r3=SRR1 */ + BRANCH_TO_C000(r12, DOTSYM(idle_return_gpr_loss)) #endif +#ifdef CONFIG_PPC_PSERIES /* - * Set IRQS_ALL_DISABLED unconditionally so arch_irqs_disabled does - * the right thing. We do not want to reconcile because that goes - * through irq tracing which we don't want in NMI. - * - * Save PACAIRQHAPPENED because some code will do a hard disable - * (e.g., xmon). So we want to restore this back to where it was - * when we return. DAR is unused in the stack, so save it there. + * Vectors for the FWNMI option. Share common code. */ -#define ADD_RECONCILE_NMI \ - li r10,IRQS_ALL_DISABLED; \ - stb r10,PACAIRQSOFTMASK(r13); \ - lbz r10,PACAIRQHAPPENED(r13); \ - std r10,_DAR(r1) +TRAMP_REAL_BEGIN(system_reset_fwnmi) + /* See comment at system_reset exception, don't turn on RI */ + EXCEPTION_PROLOG_0 PACA_EXNMI + EXCEPTION_PROLOG_1 EXC_STD, PACA_EXNMI, 0, 0x100, 0, 0, 0 + EXCEPTION_PROLOG_2_REAL system_reset_common, EXC_STD, 0 + +#endif /* CONFIG_PPC_PSERIES */ EXC_COMMON_BEGIN(system_reset_common) /* @@ -185,15 +890,27 @@ EXC_COMMON_BEGIN(system_reset_common) mr r10,r1 ld r1,PACA_NMI_EMERG_SP(r13) subi r1,r1,INT_FRAME_SIZE - EXCEPTION_COMMON_NORET_STACK(PACA_EXNMI, 0x100, - system_reset, system_reset_exception, - ADD_NVGPRS;ADD_RECONCILE_NMI) + EXCEPTION_COMMON_STACK(PACA_EXNMI, 0x100) + bl save_nvgprs + /* + * Set IRQS_ALL_DISABLED unconditionally so arch_irqs_disabled does + * the right thing. We do not want to reconcile because that goes + * through irq tracing which we don't want in NMI. + * + * Save PACAIRQHAPPENED because some code will do a hard disable + * (e.g., xmon). So we want to restore this back to where it was + * when we return. DAR is unused in the stack, so save it there. + */ + li r10,IRQS_ALL_DISABLED + stb r10,PACAIRQSOFTMASK(r13) + lbz r10,PACAIRQHAPPENED(r13) + std r10,_DAR(r1) + + addi r3,r1,STACK_FRAME_OVERHEAD + bl system_reset_exception - /* This (and MCE) can be simplified with mtmsrd L=1 */ /* Clear MSR_RI before setting SRR0 and SRR1. */ - li r0,MSR_RI - mfmsr r9 - andc r9,r9,r0 + li r9,0 mtmsrd r9,1 /* @@ -211,52 +928,16 @@ EXC_COMMON_BEGIN(system_reset_common) ld r10,SOFTE(r1) stb r10,PACAIRQSOFTMASK(r13) - /* - * Keep below code in synch with MACHINE_CHECK_HANDLER_WINDUP. - * Should share common bits... - */ - - /* Move original SRR0 and SRR1 into the respective regs */ - ld r9,_MSR(r1) - mtspr SPRN_SRR1,r9 - ld r3,_NIP(r1) - mtspr SPRN_SRR0,r3 - ld r9,_CTR(r1) - mtctr r9 - ld r9,_XER(r1) - mtxer r9 - ld r9,_LINK(r1) - mtlr r9 - REST_GPR(0, r1) - REST_8GPRS(2, r1) - REST_GPR(10, r1) - ld r11,_CCR(r1) - mtcr r11 - REST_GPR(11, r1) - REST_2GPRS(12, r1) - /* restore original r1. */ - ld r1,GPR1(r1) + EXCEPTION_RESTORE_REGS EXC_STD RFI_TO_USER_OR_KERNEL -#ifdef CONFIG_PPC_PSERIES -/* - * Vectors for the FWNMI option. Share common code. - */ -TRAMP_REAL_BEGIN(system_reset_fwnmi) - SET_SCRATCH0(r13) /* save r13 */ - /* See comment at system_reset exception */ - EXCEPTION_PROLOG_NORI(PACA_EXNMI, system_reset_common, EXC_STD, - NOTEST, 0x100) -#endif /* CONFIG_PPC_PSERIES */ - EXC_REAL_BEGIN(machine_check, 0x200, 0x100) /* This is moved out of line as it can be patched by FW, but * some code path might still want to branch into the original * vector */ - SET_SCRATCH0(r13) /* save r13 */ - EXCEPTION_PROLOG_0(PACA_EXMC) + EXCEPTION_PROLOG_0 PACA_EXMC BEGIN_FTR_SECTION b machine_check_common_early FTR_SECTION_ELSE @@ -265,7 +946,7 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE) EXC_REAL_END(machine_check, 0x200, 0x100) EXC_VIRT_NONE(0x4200, 0x100) TRAMP_REAL_BEGIN(machine_check_common_early) - EXCEPTION_PROLOG_1(PACA_EXMC, NOTEST, 0x200) + EXCEPTION_PROLOG_1 EXC_STD, PACA_EXMC, 0, 0x200, 0, 0, 0 /* * Register contents: * R13 = PACA @@ -344,19 +1025,18 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) TRAMP_REAL_BEGIN(machine_check_pSeries) .globl machine_check_fwnmi machine_check_fwnmi: - SET_SCRATCH0(r13) /* save r13 */ - EXCEPTION_PROLOG_0(PACA_EXMC) + EXCEPTION_PROLOG_0 PACA_EXMC BEGIN_FTR_SECTION b machine_check_common_early END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE) machine_check_pSeries_0: - EXCEPTION_PROLOG_1(PACA_EXMC, KVMTEST_PR, 0x200) + EXCEPTION_PROLOG_1 EXC_STD, PACA_EXMC, 1, 0x200, 1, 1, 0 /* * MSR_RI is not enabled, because PACA_EXMC is being used, so a * nested machine check corrupts it. machine_check_common enables * MSR_RI. */ - EXCEPTION_PROLOG_2_NORI(machine_check_common, EXC_STD) + EXCEPTION_PROLOG_2_REAL machine_check_common, EXC_STD, 0 TRAMP_KVM_SKIP(PACA_EXMC, 0x200) @@ -365,11 +1045,7 @@ EXC_COMMON_BEGIN(machine_check_common) * Machine check is different because we use a different * save area: PACA_EXMC instead of PACA_EXGEN. */ - mfspr r10,SPRN_DAR - std r10,PACA_EXMC+EX_DAR(r13) - mfspr r10,SPRN_DSISR - stw r10,PACA_EXMC+EX_DSISR(r13) - EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC) + EXCEPTION_COMMON(PACA_EXMC, 0x200) FINISH_NAP RECONCILE_IRQ_STATE(r10, r11) ld r3,PACA_EXMC+EX_DAR(r13) @@ -386,34 +1062,13 @@ EXC_COMMON_BEGIN(machine_check_common) #define MACHINE_CHECK_HANDLER_WINDUP \ /* Clear MSR_RI before setting SRR0 and SRR1. */\ - li r0,MSR_RI; \ - mfmsr r9; /* get MSR value */ \ - andc r9,r9,r0; \ + li r9,0; \ mtmsrd r9,1; /* Clear MSR_RI */ \ - /* Move original SRR0 and SRR1 into the respective regs */ \ - ld r9,_MSR(r1); \ - mtspr SPRN_SRR1,r9; \ - ld r3,_NIP(r1); \ - mtspr SPRN_SRR0,r3; \ - ld r9,_CTR(r1); \ - mtctr r9; \ - ld r9,_XER(r1); \ - mtxer r9; \ - ld r9,_LINK(r1); \ - mtlr r9; \ - REST_GPR(0, r1); \ - REST_8GPRS(2, r1); \ - REST_GPR(10, r1); \ - ld r11,_CCR(r1); \ - mtcr r11; \ - /* Decrement paca->in_mce. */ \ + /* Decrement paca->in_mce now RI is clear. */ \ lhz r12,PACA_IN_MCE(r13); \ subi r12,r12,1; \ sth r12,PACA_IN_MCE(r13); \ - REST_GPR(11, r1); \ - REST_2GPRS(12, r1); \ - /* restore original r1. */ \ - ld r1,GPR1(r1) + EXCEPTION_RESTORE_REGS EXC_STD #ifdef CONFIG_PPC_P7_NAP /* @@ -472,10 +1127,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE) * * Go back to nap/sleep/winkle mode again if (b) is true. */ - BEGIN_FTR_SECTION +BEGIN_FTR_SECTION rlwinm. r11,r12,47-31,30,31 bne machine_check_idle_common - END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) +END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) #endif /* @@ -557,8 +1212,7 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE) 9: /* Deliver the machine check to host kernel in V mode. */ MACHINE_CHECK_HANDLER_WINDUP - SET_SCRATCH0(r13) /* save r13 */ - EXCEPTION_PROLOG_0(PACA_EXMC) + EXCEPTION_PROLOG_0 PACA_EXMC b machine_check_pSeries_0 EXC_COMMON_BEGIN(unrecover_mce) @@ -582,33 +1236,18 @@ EXC_COMMON_BEGIN(mce_return) b . EXC_REAL_BEGIN(data_access, 0x300, 0x80) -SET_SCRATCH0(r13) /* save r13 */ -EXCEPTION_PROLOG_0(PACA_EXGEN) + EXCEPTION_PROLOG_0 PACA_EXGEN b tramp_real_data_access EXC_REAL_END(data_access, 0x300, 0x80) TRAMP_REAL_BEGIN(tramp_real_data_access) -EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_PR, 0x300) - /* - * DAR/DSISR must be read before setting MSR[RI], because - * a d-side MCE will clobber those registers so is not - * recoverable if they are live. - */ - mfspr r10,SPRN_DAR - mfspr r11,SPRN_DSISR - std r10,PACA_EXGEN+EX_DAR(r13) - stw r11,PACA_EXGEN+EX_DSISR(r13) -EXCEPTION_PROLOG_2(data_access_common, EXC_STD) + EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, 0x300, 1, 1, 0 + EXCEPTION_PROLOG_2_REAL data_access_common, EXC_STD, 1 EXC_VIRT_BEGIN(data_access, 0x4300, 0x80) -SET_SCRATCH0(r13) /* save r13 */ -EXCEPTION_PROLOG_0(PACA_EXGEN) -EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0x300) - mfspr r10,SPRN_DAR - mfspr r11,SPRN_DSISR - std r10,PACA_EXGEN+EX_DAR(r13) - stw r11,PACA_EXGEN+EX_DSISR(r13) -EXCEPTION_PROLOG_2_RELON(data_access_common, EXC_STD) + EXCEPTION_PROLOG_0 PACA_EXGEN + EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 0, 0x300, 1, 1, 0 +EXCEPTION_PROLOG_2_VIRT data_access_common, EXC_STD EXC_VIRT_END(data_access, 0x4300, 0x80) TRAMP_KVM_SKIP(PACA_EXGEN, 0x300) @@ -620,7 +1259,7 @@ EXC_COMMON_BEGIN(data_access_common) * r9 - r13 are saved in paca->exgen. * EX_DAR and EX_DSISR have saved DAR/DSISR */ - EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN) + EXCEPTION_COMMON(PACA_EXGEN, 0x300) RECONCILE_IRQ_STATE(r10, r11) ld r12,_MSR(r1) ld r3,PACA_EXGEN+EX_DAR(r13) @@ -636,30 +1275,24 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80) -SET_SCRATCH0(r13) /* save r13 */ -EXCEPTION_PROLOG_0(PACA_EXSLB) + EXCEPTION_PROLOG_0 PACA_EXSLB b tramp_real_data_access_slb EXC_REAL_END(data_access_slb, 0x380, 0x80) TRAMP_REAL_BEGIN(tramp_real_data_access_slb) -EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x380) - mfspr r10,SPRN_DAR - std r10,PACA_EXSLB+EX_DAR(r13) -EXCEPTION_PROLOG_2(data_access_slb_common, EXC_STD) + EXCEPTION_PROLOG_1 EXC_STD, PACA_EXSLB, 1, 0x380, 1, 0, 0 + EXCEPTION_PROLOG_2_REAL data_access_slb_common, EXC_STD, 1 EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80) -SET_SCRATCH0(r13) /* save r13 */ -EXCEPTION_PROLOG_0(PACA_EXSLB) -EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x380) - mfspr r10,SPRN_DAR - std r10,PACA_EXSLB+EX_DAR(r13) -EXCEPTION_PROLOG_2_RELON(data_access_slb_common, EXC_STD) + EXCEPTION_PROLOG_0 PACA_EXSLB + EXCEPTION_PROLOG_1 EXC_STD, PACA_EXSLB, 0, 0x380, 1, 0, 0 + EXCEPTION_PROLOG_2_VIRT data_access_slb_common, EXC_STD EXC_VIRT_END(data_access_slb, 0x4380, 0x80) TRAMP_KVM_SKIP(PACA_EXSLB, 0x380) EXC_COMMON_BEGIN(data_access_slb_common) - EXCEPTION_PROLOG_COMMON(0x380, PACA_EXSLB) + EXCEPTION_COMMON(PACA_EXSLB, 0x380) ld r4,PACA_EXSLB+EX_DAR(r13) std r4,_DAR(r1) addi r3,r1,STACK_FRAME_OVERHEAD @@ -689,7 +1322,7 @@ EXC_VIRT(instruction_access, 0x4400, 0x80, 0x400) TRAMP_KVM(PACA_EXGEN, 0x400) EXC_COMMON_BEGIN(instruction_access_common) - EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN) + EXCEPTION_COMMON(PACA_EXGEN, 0x400) RECONCILE_IRQ_STATE(r10, r11) ld r12,_MSR(r1) ld r3,_NIP(r1) @@ -704,18 +1337,12 @@ MMU_FTR_SECTION_ELSE ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) -EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x80) -EXCEPTION_PROLOG(PACA_EXSLB, instruction_access_slb_common, EXC_STD, KVMTEST_PR, 0x480); -EXC_REAL_END(instruction_access_slb, 0x480, 0x80) - -EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x80) -EXCEPTION_RELON_PROLOG(PACA_EXSLB, instruction_access_slb_common, EXC_STD, NOTEST, 0x480); -EXC_VIRT_END(instruction_access_slb, 0x4480, 0x80) - +__EXC_REAL(instruction_access_slb, 0x480, 0x80, PACA_EXSLB) +__EXC_VIRT(instruction_access_slb, 0x4480, 0x80, 0x480, PACA_EXSLB) TRAMP_KVM(PACA_EXSLB, 0x480) EXC_COMMON_BEGIN(instruction_access_slb_common) - EXCEPTION_PROLOG_COMMON(0x480, PACA_EXSLB) + EXCEPTION_COMMON(PACA_EXSLB, 0x480) ld r4,_NIP(r1) addi r3,r1,STACK_FRAME_OVERHEAD BEGIN_MMU_FTR_SECTION @@ -740,25 +1367,25 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x100) - .globl hardware_interrupt_hv; -hardware_interrupt_hv: - BEGIN_FTR_SECTION - MASKABLE_EXCEPTION_HV(0x500, hardware_interrupt_common, IRQS_DISABLED) - FTR_SECTION_ELSE - MASKABLE_EXCEPTION(0x500, hardware_interrupt_common, IRQS_DISABLED) - ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) + EXCEPTION_PROLOG_0 PACA_EXGEN +BEGIN_FTR_SECTION + EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, 0x500, 0, 0, IRQS_DISABLED + EXCEPTION_PROLOG_2_REAL hardware_interrupt_common, EXC_HV, 1 +FTR_SECTION_ELSE + EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, 0x500, 0, 0, IRQS_DISABLED + EXCEPTION_PROLOG_2_REAL hardware_interrupt_common, EXC_STD, 1 +ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) EXC_REAL_END(hardware_interrupt, 0x500, 0x100) EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x100) - .globl hardware_interrupt_relon_hv; -hardware_interrupt_relon_hv: - BEGIN_FTR_SECTION - MASKABLE_RELON_EXCEPTION_HV(0x500, hardware_interrupt_common, - IRQS_DISABLED) - FTR_SECTION_ELSE - __MASKABLE_RELON_EXCEPTION(0x500, hardware_interrupt_common, - EXC_STD, SOFTEN_TEST_PR, IRQS_DISABLED) - ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE) + EXCEPTION_PROLOG_0 PACA_EXGEN +BEGIN_FTR_SECTION + EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, 0x500, 0, 0, IRQS_DISABLED + EXCEPTION_PROLOG_2_VIRT hardware_interrupt_common, EXC_HV +FTR_SECTION_ELSE + EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, 0x500, 0, 0, IRQS_DISABLED + EXCEPTION_PROLOG_2_VIRT hardware_interrupt_common, EXC_STD +ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE) EXC_VIRT_END(hardware_interrupt, 0x4500, 0x100) TRAMP_KVM(PACA_EXGEN, 0x500) @@ -767,30 +1394,20 @@ EXC_COMMON_ASYNC(hardware_interrupt_common, 0x500, do_IRQ) EXC_REAL_BEGIN(alignment, 0x600, 0x100) -SET_SCRATCH0(r13) /* save r13 */ -EXCEPTION_PROLOG_0(PACA_EXGEN) -EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_PR, 0x600) - mfspr r10,SPRN_DAR - mfspr r11,SPRN_DSISR - std r10,PACA_EXGEN+EX_DAR(r13) - stw r11,PACA_EXGEN+EX_DSISR(r13) -EXCEPTION_PROLOG_2(alignment_common, EXC_STD) + EXCEPTION_PROLOG_0 PACA_EXGEN + EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, 0x600, 1, 1, 0 + EXCEPTION_PROLOG_2_REAL alignment_common, EXC_STD, 1 EXC_REAL_END(alignment, 0x600, 0x100) EXC_VIRT_BEGIN(alignment, 0x4600, 0x100) -SET_SCRATCH0(r13) /* save r13 */ -EXCEPTION_PROLOG_0(PACA_EXGEN) -EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0x600) - mfspr r10,SPRN_DAR - mfspr r11,SPRN_DSISR - std r10,PACA_EXGEN+EX_DAR(r13) - stw r11,PACA_EXGEN+EX_DSISR(r13) -EXCEPTION_PROLOG_2_RELON(alignment_common, EXC_STD) + EXCEPTION_PROLOG_0 PACA_EXGEN + EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 0, 0x600, 1, 1, 0 + EXCEPTION_PROLOG_2_VIRT alignment_common, EXC_STD EXC_VIRT_END(alignment, 0x4600, 0x100) TRAMP_KVM(PACA_EXGEN, 0x600) EXC_COMMON_BEGIN(alignment_common) - EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN) + EXCEPTION_COMMON(PACA_EXGEN, 0x600) ld r3,PACA_EXGEN+EX_DAR(r13) lwz r4,PACA_EXGEN+EX_DSISR(r13) std r3,_DAR(r1) @@ -814,21 +1431,25 @@ EXC_COMMON_BEGIN(program_check_common) * we switch to the emergency stack if we're taking a TM Bad Thing from * the kernel. */ - li r10,MSR_PR /* Build a mask of MSR_PR .. */ - oris r10,r10,0x200000@h /* .. and SRR1_PROGTM */ - and r10,r10,r12 /* Mask SRR1 with that. */ - srdi r10,r10,8 /* Shift it so we can compare */ - cmpldi r10,(0x200000 >> 8) /* .. with an immediate. */ - bne 1f /* If != go to normal path. */ - - /* SRR1 had PR=0 and SRR1_PROGTM=1, so use the emergency stack */ - andi. r10,r12,MSR_PR; /* Set CR0 correctly for label */ + + andi. r10,r12,MSR_PR + bne 2f /* If userspace, go normal path */ + + andis. r10,r12,(SRR1_PROGTM)@h + bne 1f /* If TM, emergency */ + + cmpdi r1,-INT_FRAME_SIZE /* check if r1 is in userspace */ + blt 2f /* normal path if not */ + + /* Use the emergency stack */ +1: andi. r10,r12,MSR_PR /* Set CR0 correctly for label */ /* 3 in EXCEPTION_PROLOG_COMMON */ mr r10,r1 /* Save r1 */ ld r1,PACAEMERGSP(r13) /* Use emergency stack */ subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ b 3f /* Jump into the macro !! */ -1: EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) +2: + EXCEPTION_COMMON(PACA_EXGEN, 0x700) bl save_nvgprs RECONCILE_IRQ_STATE(r10, r11) addi r3,r1,STACK_FRAME_OVERHEAD @@ -840,7 +1461,7 @@ EXC_REAL(fp_unavailable, 0x800, 0x100) EXC_VIRT(fp_unavailable, 0x4800, 0x100, 0x800) TRAMP_KVM(PACA_EXGEN, 0x800) EXC_COMMON_BEGIN(fp_unavailable_common) - EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN) + EXCEPTION_COMMON(PACA_EXGEN, 0x800) bne 1f /* if from user, just load it up */ bl save_nvgprs RECONCILE_IRQ_STATE(r10, r11) @@ -932,6 +1553,7 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception) * without saving, though xer is not a good idea to use, as hardware may * interpret some bits so it may be costly to change them. */ +.macro SYSTEM_CALL virt #ifdef CONFIG_KVM_BOOK3S_64_HANDLER /* * There is a little bit of juggling to get syscall and hcall @@ -941,95 +1563,67 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception) * Userspace syscalls have already saved the PPR, hcalls must save * it before setting HMT_MEDIUM. */ -#define SYSCALL_KVMTEST \ - mtctr r13; \ - GET_PACA(r13); \ - std r10,PACA_EXGEN+EX_R10(r13); \ - INTERRUPT_TO_KERNEL; \ - KVMTEST_PR(0xc00); /* uses r10, branch to do_kvm_0xc00_system_call */ \ - HMT_MEDIUM; \ - mfctr r9; - + mtctr r13 + GET_PACA(r13) + std r10,PACA_EXGEN+EX_R10(r13) + INTERRUPT_TO_KERNEL + KVMTEST EXC_STD 0xc00 /* uses r10, branch to do_kvm_0xc00_system_call */ + mfctr r9 #else -#define SYSCALL_KVMTEST \ - HMT_MEDIUM; \ - mr r9,r13; \ - GET_PACA(r13); \ - INTERRUPT_TO_KERNEL; + mr r9,r13 + GET_PACA(r13) + INTERRUPT_TO_KERNEL #endif - -#define LOAD_SYSCALL_HANDLER(reg) \ - __LOAD_HANDLER(reg, system_call_common) - -/* - * After SYSCALL_KVMTEST, we reach here with PACA in r13, r13 in r9, - * and HMT_MEDIUM. - */ -#define SYSCALL_REAL \ - mfspr r11,SPRN_SRR0 ; \ - mfspr r12,SPRN_SRR1 ; \ - LOAD_SYSCALL_HANDLER(r10) ; \ - mtspr SPRN_SRR0,r10 ; \ - ld r10,PACAKMSR(r13) ; \ - mtspr SPRN_SRR1,r10 ; \ - RFI_TO_KERNEL ; \ - b . ; /* prevent speculative execution */ #ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH -#define SYSCALL_FASTENDIAN_TEST \ -BEGIN_FTR_SECTION \ - cmpdi r0,0x1ebe ; \ - beq- 1f ; \ -END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \ - -#define SYSCALL_FASTENDIAN \ - /* Fast LE/BE switch system call */ \ -1: mfspr r12,SPRN_SRR1 ; \ - xori r12,r12,MSR_LE ; \ - mtspr SPRN_SRR1,r12 ; \ - mr r13,r9 ; \ - RFI_TO_USER ; /* return to userspace */ \ - b . ; /* prevent speculative execution */ -#else -#define SYSCALL_FASTENDIAN_TEST -#define SYSCALL_FASTENDIAN -#endif /* CONFIG_PPC_FAST_ENDIAN_SWITCH */ +BEGIN_FTR_SECTION + cmpdi r0,0x1ebe + beq- 1f +END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) +#endif -#if defined(CONFIG_RELOCATABLE) - /* - * We can't branch directly so we do it via the CTR which - * is volatile across system calls. - */ -#define SYSCALL_VIRT \ - LOAD_SYSCALL_HANDLER(r10) ; \ - mtctr r10 ; \ - mfspr r11,SPRN_SRR0 ; \ - mfspr r12,SPRN_SRR1 ; \ - li r10,MSR_RI ; \ - mtmsrd r10,1 ; \ - bctr ; + /* We reach here with PACA in r13, r13 in r9. */ + mfspr r11,SPRN_SRR0 + mfspr r12,SPRN_SRR1 + + HMT_MEDIUM + + .if ! \virt + __LOAD_HANDLER(r10, system_call_common) + mtspr SPRN_SRR0,r10 + ld r10,PACAKMSR(r13) + mtspr SPRN_SRR1,r10 + RFI_TO_KERNEL + b . /* prevent speculative execution */ + .else + li r10,MSR_RI + mtmsrd r10,1 /* Set RI (EE=0) */ +#ifdef CONFIG_RELOCATABLE + __LOAD_HANDLER(r10, system_call_common) + mtctr r10 + bctr #else - /* We can branch directly */ -#define SYSCALL_VIRT \ - mfspr r11,SPRN_SRR0 ; \ - mfspr r12,SPRN_SRR1 ; \ - li r10,MSR_RI ; \ - mtmsrd r10,1 ; /* Set RI (EE=0) */ \ - b system_call_common ; + b system_call_common +#endif + .endif + +#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH + /* Fast LE/BE switch system call */ +1: mfspr r12,SPRN_SRR1 + xori r12,r12,MSR_LE + mtspr SPRN_SRR1,r12 + mr r13,r9 + RFI_TO_USER /* return to userspace */ + b . /* prevent speculative execution */ #endif +.endm EXC_REAL_BEGIN(system_call, 0xc00, 0x100) - SYSCALL_KVMTEST /* loads PACA into r13, and saves r13 to r9 */ - SYSCALL_FASTENDIAN_TEST - SYSCALL_REAL - SYSCALL_FASTENDIAN + SYSTEM_CALL 0 EXC_REAL_END(system_call, 0xc00, 0x100) EXC_VIRT_BEGIN(system_call, 0x4c00, 0x100) - SYSCALL_KVMTEST /* loads PACA into r13, and saves r13 to r9 */ - SYSCALL_FASTENDIAN_TEST - SYSCALL_VIRT - SYSCALL_FASTENDIAN + SYSTEM_CALL 1 EXC_VIRT_END(system_call, 0x4c00, 0x100) #ifdef CONFIG_KVM_BOOK3S_64_HANDLER @@ -1053,7 +1647,7 @@ TRAMP_KVM_BEGIN(do_kvm_0xc00) SET_SCRATCH0(r10) std r9,PACA_EXGEN+EX_R9(r13) mfcr r9 - KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00) + KVM_HANDLER PACA_EXGEN, EXC_STD, 0xc00, 0 #endif @@ -1070,7 +1664,7 @@ EXC_COMMON_BEGIN(h_data_storage_common) std r10,PACA_EXGEN+EX_DAR(r13) mfspr r10,SPRN_HDSISR stw r10,PACA_EXGEN+EX_DSISR(r13) - EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN) + EXCEPTION_COMMON(PACA_EXGEN, 0xe00) bl save_nvgprs RECONCILE_IRQ_STATE(r10, r11) addi r3,r1,STACK_FRAME_OVERHEAD @@ -1104,65 +1698,55 @@ EXC_COMMON(emulation_assist_common, 0xe40, emulation_assist_interrupt) * first, and then eventaully from there to the trampoline to get into virtual * mode. */ -__EXC_REAL_OOL_HV_DIRECT(hmi_exception, 0xe60, 0x20, hmi_exception_early) -__TRAMP_REAL_OOL_MASKABLE_HV(hmi_exception, 0xe60, IRQS_DISABLED) +EXC_REAL_BEGIN(hmi_exception, 0xe60, 0x20) + EXCEPTION_PROLOG_0 PACA_EXGEN + b hmi_exception_early +EXC_REAL_END(hmi_exception, 0xe60, 0x20) EXC_VIRT_NONE(0x4e60, 0x20) TRAMP_KVM_HV(PACA_EXGEN, 0xe60) TRAMP_REAL_BEGIN(hmi_exception_early) - EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_HV, 0xe60) + EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, 0xe60, 0, 0, 0 + mfctr r10 /* save ctr, even for !RELOCATABLE */ + BRANCH_TO_C000(r11, hmi_exception_early_common) + +EXC_COMMON_BEGIN(hmi_exception_early_common) + mtctr r10 /* Restore ctr */ + mfspr r11,SPRN_HSRR0 /* Save HSRR0 */ + mfspr r12,SPRN_HSRR1 /* Save HSRR1 */ mr r10,r1 /* Save r1 */ ld r1,PACAEMERGSP(r13) /* Use emergency stack for realmode */ subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ - mfspr r11,SPRN_HSRR0 /* Save HSRR0 */ - mfspr r12,SPRN_HSRR1 /* Save HSRR1 */ EXCEPTION_PROLOG_COMMON_1() /* We don't touch AMR here, we never go to virtual mode */ EXCEPTION_PROLOG_COMMON_2(PACA_EXGEN) EXCEPTION_PROLOG_COMMON_3(0xe60) addi r3,r1,STACK_FRAME_OVERHEAD - BRANCH_LINK_TO_FAR(DOTSYM(hmi_exception_realmode)) /* Function call ABI */ + bl hmi_exception_realmode cmpdi cr0,r3,0 - - /* Windup the stack. */ - /* Move original HSRR0 and HSRR1 into the respective regs */ - ld r9,_MSR(r1) - mtspr SPRN_HSRR1,r9 - ld r3,_NIP(r1) - mtspr SPRN_HSRR0,r3 - ld r9,_CTR(r1) - mtctr r9 - ld r9,_XER(r1) - mtxer r9 - ld r9,_LINK(r1) - mtlr r9 - REST_GPR(0, r1) - REST_8GPRS(2, r1) - REST_GPR(10, r1) - ld r11,_CCR(r1) - REST_2GPRS(12, r1) bne 1f - mtcr r11 - REST_GPR(11, r1) - ld r1,GPR1(r1) - HRFI_TO_USER_OR_KERNEL -1: mtcr r11 - REST_GPR(11, r1) - ld r1,GPR1(r1) + EXCEPTION_RESTORE_REGS EXC_HV + HRFI_TO_USER_OR_KERNEL +1: /* * Go to virtual mode and pull the HMI event information from * firmware. */ - .globl hmi_exception_after_realmode -hmi_exception_after_realmode: - SET_SCRATCH0(r13) - EXCEPTION_PROLOG_0(PACA_EXGEN) - b tramp_real_hmi_exception + EXCEPTION_RESTORE_REGS EXC_HV + EXCEPTION_PROLOG_0 PACA_EXGEN + EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, 0xe60, 0, 0, IRQS_DISABLED + EXCEPTION_PROLOG_2_REAL hmi_exception_common, EXC_HV, 1 EXC_COMMON_BEGIN(hmi_exception_common) -EXCEPTION_COMMON(PACA_EXGEN, 0xe60, hmi_exception_common, handle_hmi_exception, - ret_from_except, FINISH_NAP;ADD_NVGPRS;ADD_RECONCILE;RUNLATCH_ON) + EXCEPTION_COMMON(PACA_EXGEN, 0xe60) + FINISH_NAP + bl save_nvgprs + RECONCILE_IRQ_STATE(r10, r11) + RUNLATCH_ON + addi r3,r1,STACK_FRAME_OVERHEAD + bl handle_hmi_exception + b ret_from_except EXC_REAL_OOL_MASKABLE_HV(h_doorbell, 0xe80, 0x20, IRQS_DISABLED) EXC_VIRT_OOL_MASKABLE_HV(h_doorbell, 0x4e80, 0x20, 0xe80, IRQS_DISABLED) @@ -1196,7 +1780,7 @@ EXC_REAL_OOL(altivec_unavailable, 0xf20, 0x20) EXC_VIRT_OOL(altivec_unavailable, 0x4f20, 0x20, 0xf20) TRAMP_KVM(PACA_EXGEN, 0xf20) EXC_COMMON_BEGIN(altivec_unavailable_common) - EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN) + EXCEPTION_COMMON(PACA_EXGEN, 0xf20) #ifdef CONFIG_ALTIVEC BEGIN_FTR_SECTION beq 1f @@ -1233,7 +1817,7 @@ EXC_REAL_OOL(vsx_unavailable, 0xf40, 0x20) EXC_VIRT_OOL(vsx_unavailable, 0x4f40, 0x20, 0xf40) TRAMP_KVM(PACA_EXGEN, 0xf40) EXC_COMMON_BEGIN(vsx_unavailable_common) - EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN) + EXCEPTION_COMMON(PACA_EXGEN, 0xf40) #ifdef CONFIG_VSX BEGIN_FTR_SECTION beq 1f @@ -1309,9 +1893,8 @@ EXC_REAL_NONE(0x1400, 0x100) EXC_VIRT_NONE(0x5400, 0x100) EXC_REAL_BEGIN(denorm_exception_hv, 0x1500, 0x100) - mtspr SPRN_SPRG_HSCRATCH0,r13 - EXCEPTION_PROLOG_0(PACA_EXGEN) - EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0x1500) + EXCEPTION_PROLOG_0 PACA_EXGEN + EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 0, 0x1500, 0, 0, 0 #ifdef CONFIG_PPC_DENORMALISATION mfspr r10,SPRN_HSRR1 @@ -1319,8 +1902,8 @@ EXC_REAL_BEGIN(denorm_exception_hv, 0x1500, 0x100) bne+ denorm_assist #endif - KVMTEST_HV(0x1500) - EXCEPTION_PROLOG_2(denorm_common, EXC_HV) + KVMTEST EXC_HV 0x1500 + EXCEPTION_PROLOG_2_REAL denorm_common, EXC_HV, 1 EXC_REAL_END(denorm_exception_hv, 0x1500, 0x100) #ifdef CONFIG_PPC_DENORMALISATION @@ -1346,12 +1929,11 @@ BEGIN_FTR_SECTION mtmsrd r10 sync -#define FMR2(n) fmr (n), (n) ; fmr n+1, n+1 -#define FMR4(n) FMR2(n) ; FMR2(n+2) -#define FMR8(n) FMR4(n) ; FMR4(n+4) -#define FMR16(n) FMR8(n) ; FMR8(n+8) -#define FMR32(n) FMR16(n) ; FMR16(n+16) - FMR32(0) + .Lreg=0 + .rept 32 + fmr .Lreg,.Lreg + .Lreg=.Lreg+1 + .endr FTR_SECTION_ELSE /* @@ -1363,12 +1945,11 @@ FTR_SECTION_ELSE mtmsrd r10 sync -#define XVCPSGNDP2(n) XVCPSGNDP(n,n,n) ; XVCPSGNDP(n+1,n+1,n+1) -#define XVCPSGNDP4(n) XVCPSGNDP2(n) ; XVCPSGNDP2(n+2) -#define XVCPSGNDP8(n) XVCPSGNDP4(n) ; XVCPSGNDP4(n+4) -#define XVCPSGNDP16(n) XVCPSGNDP8(n) ; XVCPSGNDP8(n+8) -#define XVCPSGNDP32(n) XVCPSGNDP16(n) ; XVCPSGNDP16(n+16) - XVCPSGNDP32(0) + .Lreg=0 + .rept 32 + XVCPSGNDP(.Lreg,.Lreg,.Lreg) + .Lreg=.Lreg+1 + .endr ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206) @@ -1379,7 +1960,12 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) * To denormalise we need to move a copy of the register to itself. * For POWER8 we need to do that for all 64 VSX registers */ - XVCPSGNDP32(32) + .Lreg=32 + .rept 32 + XVCPSGNDP(.Lreg,.Lreg,.Lreg) + .Lreg=.Lreg+1 + .endr + denorm_done: mfspr r11,SPRN_HSRR0 subi r11,r11,4 @@ -1442,7 +2028,7 @@ EXC_VIRT_NONE(0x5800, 0x100) std r12,PACA_EXGEN+EX_R12(r13); \ GET_SCRATCH0(r10); \ std r10,PACA_EXGEN+EX_R13(r13); \ - EXCEPTION_PROLOG_2(soft_nmi_common, _H) + EXCEPTION_PROLOG_2_REAL soft_nmi_common, _H, 1 /* * Branch to soft_nmi_interrupt using the emergency stack. The emergency @@ -1457,9 +2043,11 @@ EXC_COMMON_BEGIN(soft_nmi_common) mr r10,r1 ld r1,PACAEMERGSP(r13) subi r1,r1,INT_FRAME_SIZE - EXCEPTION_COMMON_NORET_STACK(PACA_EXGEN, 0x900, - system_reset, soft_nmi_interrupt, - ADD_NVGPRS;ADD_RECONCILE) + EXCEPTION_COMMON_STACK(PACA_EXGEN, 0x900) + bl save_nvgprs + RECONCILE_IRQ_STATE(r10, r11) + addi r3,r1,STACK_FRAME_OVERHEAD + bl soft_nmi_interrupt b ret_from_except #else /* CONFIG_PPC_WATCHDOG */ @@ -1477,35 +2065,50 @@ EXC_COMMON_BEGIN(soft_nmi_common) * - Else it is one of PACA_IRQ_MUST_HARD_MASK, so hard disable and return. * This is called with r10 containing the value to OR to the paca field. */ -#define MASKED_INTERRUPT(_H) \ -masked_##_H##interrupt: \ - std r11,PACA_EXGEN+EX_R11(r13); \ - lbz r11,PACAIRQHAPPENED(r13); \ - or r11,r11,r10; \ - stb r11,PACAIRQHAPPENED(r13); \ - cmpwi r10,PACA_IRQ_DEC; \ - bne 1f; \ - lis r10,0x7fff; \ - ori r10,r10,0xffff; \ - mtspr SPRN_DEC,r10; \ - b MASKED_DEC_HANDLER_LABEL; \ -1: andi. r10,r10,PACA_IRQ_MUST_HARD_MASK; \ - beq 2f; \ - mfspr r10,SPRN_##_H##SRR1; \ - xori r10,r10,MSR_EE; /* clear MSR_EE */ \ - mtspr SPRN_##_H##SRR1,r10; \ - ori r11,r11,PACA_IRQ_HARD_DIS; \ - stb r11,PACAIRQHAPPENED(r13); \ -2: /* done */ \ - mtcrf 0x80,r9; \ - std r1,PACAR1(r13); \ - ld r9,PACA_EXGEN+EX_R9(r13); \ - ld r10,PACA_EXGEN+EX_R10(r13); \ - ld r11,PACA_EXGEN+EX_R11(r13); \ - /* returns to kernel where r13 must be set up, so don't restore it */ \ - ##_H##RFI_TO_KERNEL; \ - b .; \ - MASKED_DEC_HANDLER(_H) +.macro MASKED_INTERRUPT hsrr + .if \hsrr +masked_Hinterrupt: + .else +masked_interrupt: + .endif + std r11,PACA_EXGEN+EX_R11(r13) + lbz r11,PACAIRQHAPPENED(r13) + or r11,r11,r10 + stb r11,PACAIRQHAPPENED(r13) + cmpwi r10,PACA_IRQ_DEC + bne 1f + lis r10,0x7fff + ori r10,r10,0xffff + mtspr SPRN_DEC,r10 + b MASKED_DEC_HANDLER_LABEL +1: andi. r10,r10,PACA_IRQ_MUST_HARD_MASK + beq 2f + .if \hsrr + mfspr r10,SPRN_HSRR1 + xori r10,r10,MSR_EE /* clear MSR_EE */ + mtspr SPRN_HSRR1,r10 + .else + mfspr r10,SPRN_SRR1 + xori r10,r10,MSR_EE /* clear MSR_EE */ + mtspr SPRN_SRR1,r10 + .endif + ori r11,r11,PACA_IRQ_HARD_DIS + stb r11,PACAIRQHAPPENED(r13) +2: /* done */ + mtcrf 0x80,r9 + std r1,PACAR1(r13) + ld r9,PACA_EXGEN+EX_R9(r13) + ld r10,PACA_EXGEN+EX_R10(r13) + ld r11,PACA_EXGEN+EX_R11(r13) + /* returns to kernel where r13 must be set up, so don't restore it */ + .if \hsrr + HRFI_TO_KERNEL + .else + RFI_TO_KERNEL + .endif + b . + MASKED_DEC_HANDLER(\hsrr\()) +.endm TRAMP_REAL_BEGIN(stf_barrier_fallback) std r9,PACA_EXRFI+EX_R9(r13) @@ -1612,8 +2215,8 @@ TRAMP_REAL_BEGIN(hrfi_flush_fallback) * cannot reach these if they are put there. */ USE_FIXED_SECTION(virt_trampolines) - MASKED_INTERRUPT() - MASKED_INTERRUPT(H) + MASKED_INTERRUPT EXC_STD + MASKED_INTERRUPT EXC_HV #ifdef CONFIG_KVM_BOOK3S_64_HANDLER TRAMP_REAL_BEGIN(kvmppc_skip_interrupt) @@ -1746,7 +2349,7 @@ handle_page_fault: addi r3,r1,STACK_FRAME_OVERHEAD bl do_page_fault cmpdi r3,0 - beq+ 12f + beq+ ret_from_except_lite bl save_nvgprs mr r5,r3 addi r3,r1,STACK_FRAME_OVERHEAD @@ -1761,7 +2364,12 @@ handle_dabr_fault: ld r5,_DSISR(r1) addi r3,r1,STACK_FRAME_OVERHEAD bl do_break -12: b ret_from_except_lite + /* + * do_break() may have changed the NV GPRS while handling a breakpoint. + * If so, we need to restore them with their updated values. Don't use + * ret_from_except_lite here. + */ + b ret_from_except #ifdef CONFIG_PPC_BOOK3S_64 @@ -1791,67 +2399,6 @@ handle_dabr_fault: b ret_from_except /* - * Here we have detected that the kernel stack pointer is bad. - * R9 contains the saved CR, r13 points to the paca, - * r10 contains the (bad) kernel stack pointer, - * r11 and r12 contain the saved SRR0 and SRR1. - * We switch to using an emergency stack, save the registers there, - * and call kernel_bad_stack(), which panics. - */ -bad_stack: - ld r1,PACAEMERGSP(r13) - subi r1,r1,64+INT_FRAME_SIZE - std r9,_CCR(r1) - std r10,GPR1(r1) - std r11,_NIP(r1) - std r12,_MSR(r1) - mfspr r11,SPRN_DAR - mfspr r12,SPRN_DSISR - std r11,_DAR(r1) - std r12,_DSISR(r1) - mflr r10 - mfctr r11 - mfxer r12 - std r10,_LINK(r1) - std r11,_CTR(r1) - std r12,_XER(r1) - SAVE_GPR(0,r1) - SAVE_GPR(2,r1) - ld r10,EX_R3(r3) - std r10,GPR3(r1) - SAVE_GPR(4,r1) - SAVE_4GPRS(5,r1) - ld r9,EX_R9(r3) - ld r10,EX_R10(r3) - SAVE_2GPRS(9,r1) - ld r9,EX_R11(r3) - ld r10,EX_R12(r3) - ld r11,EX_R13(r3) - std r9,GPR11(r1) - std r10,GPR12(r1) - std r11,GPR13(r1) -BEGIN_FTR_SECTION - ld r10,EX_CFAR(r3) - std r10,ORIG_GPR3(r1) -END_FTR_SECTION_IFSET(CPU_FTR_CFAR) - SAVE_8GPRS(14,r1) - SAVE_10GPRS(22,r1) - lhz r12,PACA_TRAP_SAVE(r13) - std r12,_TRAP(r1) - addi r11,r1,INT_FRAME_SIZE - std r11,0(r1) - li r12,0 - std r12,0(r11) - ld r2,PACATOC(r13) - ld r11,exception_marker@toc(r2) - std r12,RESULT(r1) - std r11,STACK_FRAME_OVERHEAD-16(r1) -1: addi r3,r1,STACK_FRAME_OVERHEAD - bl kernel_bad_stack - b 1b -_ASM_NOKPROBE_SYMBOL(bad_stack); - -/* * When doorbell is triggered from system reset wakeup, the message is * not cleared, so it would fire again when EE is enabled. * |