diff options
Diffstat (limited to 'arch/powerpc/include')
23 files changed, 229 insertions, 894 deletions
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h index 74d24201fc4f..23b83d3593e2 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu.h +++ b/arch/powerpc/include/asm/book3s/64/mmu.h @@ -116,8 +116,6 @@ typedef struct { /* Number of users of the external (Nest) MMU */ atomic_t copros; - /* NPU NMMU context */ - struct npu_context *npu_context; struct hash_mm_context *hash_context; unsigned long vdso_base; diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index ccf00a8b98c6..62e6ea0a7650 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -274,8 +274,15 @@ extern unsigned long __vmalloc_end; #define VMALLOC_START __vmalloc_start #define VMALLOC_END __vmalloc_end +static inline unsigned int ioremap_max_order(void) +{ + if (radix_enabled()) + return PUD_SHIFT; + return 7 + PAGE_SHIFT; /* default from linux/vmalloc.h */ +} +#define IOREMAP_MAX_ORDER ioremap_max_order() + extern unsigned long __kernel_virt_start; -extern unsigned long __kernel_virt_size; extern unsigned long __kernel_io_start; extern unsigned long __kernel_io_end; #define KERN_VIRT_START __kernel_virt_start @@ -1343,5 +1350,26 @@ static inline bool is_pte_rw_upgrade(unsigned long old_val, unsigned long new_va return false; } +/* + * Like pmd_huge() and pmd_large(), but works regardless of config options + */ +#define pmd_is_leaf pmd_is_leaf +static inline bool pmd_is_leaf(pmd_t pmd) +{ + return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE)); +} + +#define pud_is_leaf pud_is_leaf +static inline bool pud_is_leaf(pud_t pud) +{ + return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE)); +} + +#define pgd_is_leaf pgd_is_leaf +static inline bool pgd_is_leaf(pgd_t pgd) +{ + return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PTE)); +} + #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */ diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h index 574eca33f893..e04a839cb5b9 100644 --- a/arch/powerpc/include/asm/book3s/64/radix.h +++ b/arch/powerpc/include/asm/book3s/64/radix.h @@ -266,6 +266,9 @@ extern void radix__vmemmap_remove_mapping(unsigned long start, extern int radix__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t flags, unsigned int psz); +extern int radix__ioremap_range(unsigned long ea, phys_addr_t pa, + unsigned long size, pgprot_t prot, int nid); + static inline unsigned long radix__get_tree_size(void) { unsigned long rts_field; diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h index 40ea5b3781c6..b3388d95f451 100644 --- a/arch/powerpc/include/asm/cache.h +++ b/arch/powerpc/include/asm/cache.h @@ -33,7 +33,8 @@ #define IFETCH_ALIGN_BYTES (1 << IFETCH_ALIGN_SHIFT) -#if defined(__powerpc64__) && !defined(__ASSEMBLY__) +#if !defined(__ASSEMBLY__) +#ifdef CONFIG_PPC64 struct ppc_cache_info { u32 size; @@ -53,7 +54,28 @@ struct ppc64_caches { }; extern struct ppc64_caches ppc64_caches; -#endif /* __powerpc64__ && ! __ASSEMBLY__ */ + +static inline u32 l1_cache_shift(void) +{ + return ppc64_caches.l1d.log_block_size; +} + +static inline u32 l1_cache_bytes(void) +{ + return ppc64_caches.l1d.block_size; +} +#else +static inline u32 l1_cache_shift(void) +{ + return L1_CACHE_SHIFT; +} + +static inline u32 l1_cache_bytes(void) +{ + return L1_CACHE_BYTES; +} +#endif +#endif /* ! __ASSEMBLY__ */ #if defined(__ASSEMBLY__) /* @@ -85,22 +107,22 @@ extern void _set_L3CR(unsigned long); static inline void dcbz(void *addr) { - __asm__ __volatile__ ("dcbz 0, %0" : : "r"(addr) : "memory"); + __asm__ __volatile__ ("dcbz %y0" : : "Z"(*(u8 *)addr) : "memory"); } static inline void dcbi(void *addr) { - __asm__ __volatile__ ("dcbi 0, %0" : : "r"(addr) : "memory"); + __asm__ __volatile__ ("dcbi %y0" : : "Z"(*(u8 *)addr) : "memory"); } static inline void dcbf(void *addr) { - __asm__ __volatile__ ("dcbf 0, %0" : : "r"(addr) : "memory"); + __asm__ __volatile__ ("dcbf %y0" : : "Z"(*(u8 *)addr) : "memory"); } static inline void dcbst(void *addr) { - __asm__ __volatile__ ("dcbst 0, %0" : : "r"(addr) : "memory"); + __asm__ __volatile__ ("dcbst %y0" : : "Z"(*(u8 *)addr) : "memory"); } #endif /* !__ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h index 74d60cfe8ce5..eef388f2659f 100644 --- a/arch/powerpc/include/asm/cacheflush.h +++ b/arch/powerpc/include/asm/cacheflush.h @@ -29,9 +29,12 @@ * not expect this type of fault. flush_cache_vmap is not exactly the right * place to put this, but it seems to work well enough. */ -#define flush_cache_vmap(start, end) do { asm volatile("ptesync" ::: "memory"); } while (0) +static inline void flush_cache_vmap(unsigned long start, unsigned long end) +{ + asm volatile("ptesync" ::: "memory"); +} #else -#define flush_cache_vmap(start, end) do { } while (0) +static inline void flush_cache_vmap(unsigned long start, unsigned long end) { } #endif #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 @@ -54,20 +57,29 @@ static inline void __flush_dcache_icache_phys(unsigned long physaddr) } #endif -#ifdef CONFIG_PPC32 /* * Write any modified data cache blocks out to memory and invalidate them. * Does not invalidate the corresponding instruction cache blocks. */ static inline void flush_dcache_range(unsigned long start, unsigned long stop) { - void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1)); - unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1); + unsigned long shift = l1_cache_shift(); + unsigned long bytes = l1_cache_bytes(); + void *addr = (void *)(start & ~(bytes - 1)); + unsigned long size = stop - (unsigned long)addr + (bytes - 1); unsigned long i; - for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES) + if (IS_ENABLED(CONFIG_PPC64)) { + mb(); /* sync */ + isync(); + } + + for (i = 0; i < size >> shift; i++, addr += bytes) dcbf(addr); mb(); /* sync */ + + if (IS_ENABLED(CONFIG_PPC64)) + isync(); } /* @@ -77,11 +89,13 @@ static inline void flush_dcache_range(unsigned long start, unsigned long stop) */ static inline void clean_dcache_range(unsigned long start, unsigned long stop) { - void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1)); - unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1); + unsigned long shift = l1_cache_shift(); + unsigned long bytes = l1_cache_bytes(); + void *addr = (void *)(start & ~(bytes - 1)); + unsigned long size = stop - (unsigned long)addr + (bytes - 1); unsigned long i; - for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES) + for (i = 0; i < size >> shift; i++, addr += bytes) dcbst(addr); mb(); /* sync */ } @@ -94,21 +108,17 @@ static inline void clean_dcache_range(unsigned long start, unsigned long stop) static inline void invalidate_dcache_range(unsigned long start, unsigned long stop) { - void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1)); - unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1); + unsigned long shift = l1_cache_shift(); + unsigned long bytes = l1_cache_bytes(); + void *addr = (void *)(start & ~(bytes - 1)); + unsigned long size = stop - (unsigned long)addr + (bytes - 1); unsigned long i; - for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES) + for (i = 0; i < size >> shift; i++, addr += bytes) dcbi(addr); mb(); /* sync */ } -#endif /* CONFIG_PPC32 */ -#ifdef CONFIG_PPC64 -extern void flush_dcache_range(unsigned long start, unsigned long stop); -extern void flush_inval_dcache_range(unsigned long start, unsigned long stop); -#endif - #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ do { \ memcpy(dst, src, len); \ diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 841a0be6c1b2..33f4f72eb035 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -30,25 +30,13 @@ * exception handlers (including pSeries LPAR) and iSeries LPAR * implementations as possible. */ -#include <asm/head-64.h> #include <asm/feature-fixups.h> -/* PACA save area offsets (exgen, exmc, etc) */ -#define EX_R9 0 -#define EX_R10 8 -#define EX_R11 16 -#define EX_R12 24 -#define EX_R13 32 -#define EX_DAR 40 -#define EX_DSISR 48 -#define EX_CCR 52 -#define EX_CFAR 56 -#define EX_PPR 64 +/* PACA save area size in u64 units (exgen, exmc, etc) */ #if defined(CONFIG_RELOCATABLE) -#define EX_CTR 72 -#define EX_SIZE 10 /* size in u64 units */ +#define EX_SIZE 10 #else -#define EX_SIZE 9 /* size in u64 units */ +#define EX_SIZE 9 #endif /* @@ -56,12 +44,7 @@ */ #define MAX_MCE_DEPTH 4 -/* - * EX_R3 is only used by the bad_stack handler. bad_stack reloads and - * saves DAR from SPRN_DAR, and EX_DAR is not used. So EX_R3 can overlap - * with EX_DAR. - */ -#define EX_R3 EX_DAR +#ifdef __ASSEMBLY__ #define STF_ENTRY_BARRIER_SLOT \ STF_ENTRY_BARRIER_FIXUP_SECTION; \ @@ -144,588 +127,6 @@ hrfid; \ b hrfi_flush_fallback -#ifdef CONFIG_RELOCATABLE -#define __EXCEPTION_PROLOG_2_RELON(label, h) \ - mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \ - LOAD_HANDLER(r12,label); \ - mtctr r12; \ - mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ - li r10,MSR_RI; \ - mtmsrd r10,1; /* Set RI (EE=0) */ \ - bctr; -#else -/* If not relocatable, we can jump directly -- and save messing with LR */ -#define __EXCEPTION_PROLOG_2_RELON(label, h) \ - mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \ - mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ - li r10,MSR_RI; \ - mtmsrd r10,1; /* Set RI (EE=0) */ \ - b label; -#endif -#define EXCEPTION_PROLOG_2_RELON(label, h) \ - __EXCEPTION_PROLOG_2_RELON(label, h) - -/* - * As EXCEPTION_PROLOG(), except we've already got relocation on so no need to - * rfid. Save LR in case we're CONFIG_RELOCATABLE, in which case - * EXCEPTION_PROLOG_2_RELON will be using LR. - */ -#define EXCEPTION_RELON_PROLOG(area, label, h, extra, vec) \ - SET_SCRATCH0(r13); /* save r13 */ \ - EXCEPTION_PROLOG_0(area); \ - EXCEPTION_PROLOG_1(area, extra, vec); \ - EXCEPTION_PROLOG_2_RELON(label, h) - -/* - * We're short on space and time in the exception prolog, so we can't - * use the normal LOAD_REG_IMMEDIATE macro to load the address of label. - * Instead we get the base of the kernel from paca->kernelbase and or in the low - * part of label. This requires that the label be within 64KB of kernelbase, and - * that kernelbase be 64K aligned. - */ -#define LOAD_HANDLER(reg, label) \ - ld reg,PACAKBASE(r13); /* get high part of &label */ \ - ori reg,reg,FIXED_SYMBOL_ABS_ADDR(label); - -#define __LOAD_HANDLER(reg, label) \ - ld reg,PACAKBASE(r13); \ - ori reg,reg,(ABS_ADDR(label))@l; - -/* - * Branches from unrelocated code (e.g., interrupts) to labels outside - * head-y require >64K offsets. - */ -#define __LOAD_FAR_HANDLER(reg, label) \ - ld reg,PACAKBASE(r13); \ - ori reg,reg,(ABS_ADDR(label))@l; \ - addis reg,reg,(ABS_ADDR(label))@h; - -/* Exception register prefixes */ -#define EXC_HV H -#define EXC_STD - -#if defined(CONFIG_RELOCATABLE) -/* - * If we support interrupts with relocation on AND we're a relocatable kernel, - * we need to use CTR to get to the 2nd level handler. So, save/restore it - * when required. - */ -#define SAVE_CTR(reg, area) mfctr reg ; std reg,area+EX_CTR(r13) -#define GET_CTR(reg, area) ld reg,area+EX_CTR(r13) -#define RESTORE_CTR(reg, area) ld reg,area+EX_CTR(r13) ; mtctr reg -#else -/* ...else CTR is unused and in register. */ -#define SAVE_CTR(reg, area) -#define GET_CTR(reg, area) mfctr reg -#define RESTORE_CTR(reg, area) -#endif - -/* - * PPR save/restore macros used in exceptions_64s.S - * Used for P7 or later processors - */ -#define SAVE_PPR(area, ra) \ -BEGIN_FTR_SECTION_NESTED(940) \ - ld ra,area+EX_PPR(r13); /* Read PPR from paca */ \ - std ra,_PPR(r1); \ -END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,940) - -#define RESTORE_PPR_PACA(area, ra) \ -BEGIN_FTR_SECTION_NESTED(941) \ - ld ra,area+EX_PPR(r13); \ - mtspr SPRN_PPR,ra; \ -END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,941) - -/* - * Get an SPR into a register if the CPU has the given feature - */ -#define OPT_GET_SPR(ra, spr, ftr) \ -BEGIN_FTR_SECTION_NESTED(943) \ - mfspr ra,spr; \ -END_FTR_SECTION_NESTED(ftr,ftr,943) - -/* - * Set an SPR from a register if the CPU has the given feature - */ -#define OPT_SET_SPR(ra, spr, ftr) \ -BEGIN_FTR_SECTION_NESTED(943) \ - mtspr spr,ra; \ -END_FTR_SECTION_NESTED(ftr,ftr,943) - -/* - * Save a register to the PACA if the CPU has the given feature - */ -#define OPT_SAVE_REG_TO_PACA(offset, ra, ftr) \ -BEGIN_FTR_SECTION_NESTED(943) \ - std ra,offset(r13); \ -END_FTR_SECTION_NESTED(ftr,ftr,943) - -#define EXCEPTION_PROLOG_0(area) \ - GET_PACA(r13); \ - std r9,area+EX_R9(r13); /* save r9 */ \ - OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR); \ - HMT_MEDIUM; \ - std r10,area+EX_R10(r13); /* save r10 - r12 */ \ - OPT_GET_SPR(r10, SPRN_CFAR, CPU_FTR_CFAR) - -#define __EXCEPTION_PROLOG_1_PRE(area) \ - OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR); \ - OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR); \ - INTERRUPT_TO_KERNEL; \ - SAVE_CTR(r10, area); \ - mfcr r9; - -#define __EXCEPTION_PROLOG_1_POST(area) \ - std r11,area+EX_R11(r13); \ - std r12,area+EX_R12(r13); \ - GET_SCRATCH0(r10); \ - std r10,area+EX_R13(r13) - -/* - * This version of the EXCEPTION_PROLOG_1 will carry - * addition parameter called "bitmask" to support - * checking of the interrupt maskable level in the SOFTEN_TEST. - * Intended to be used in MASKABLE_EXCPETION_* macros. - */ -#define MASKABLE_EXCEPTION_PROLOG_1(area, extra, vec, bitmask) \ - __EXCEPTION_PROLOG_1_PRE(area); \ - extra(vec, bitmask); \ - __EXCEPTION_PROLOG_1_POST(area); - -/* - * This version of the EXCEPTION_PROLOG_1 is intended - * to be used in STD_EXCEPTION* macros - */ -#define _EXCEPTION_PROLOG_1(area, extra, vec) \ - __EXCEPTION_PROLOG_1_PRE(area); \ - extra(vec); \ - __EXCEPTION_PROLOG_1_POST(area); - -#define EXCEPTION_PROLOG_1(area, extra, vec) \ - _EXCEPTION_PROLOG_1(area, extra, vec) - -#define __EXCEPTION_PROLOG_2(label, h) \ - ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \ - mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \ - LOAD_HANDLER(r12,label) \ - mtspr SPRN_##h##SRR0,r12; \ - mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ - mtspr SPRN_##h##SRR1,r10; \ - h##RFI_TO_KERNEL; \ - b . /* prevent speculative execution */ -#define EXCEPTION_PROLOG_2(label, h) \ - __EXCEPTION_PROLOG_2(label, h) - -/* _NORI variant keeps MSR_RI clear */ -#define __EXCEPTION_PROLOG_2_NORI(label, h) \ - ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \ - xori r10,r10,MSR_RI; /* Clear MSR_RI */ \ - mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \ - LOAD_HANDLER(r12,label) \ - mtspr SPRN_##h##SRR0,r12; \ - mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ - mtspr SPRN_##h##SRR1,r10; \ - h##RFI_TO_KERNEL; \ - b . /* prevent speculative execution */ - -#define EXCEPTION_PROLOG_2_NORI(label, h) \ - __EXCEPTION_PROLOG_2_NORI(label, h) - -#define EXCEPTION_PROLOG(area, label, h, extra, vec) \ - SET_SCRATCH0(r13); /* save r13 */ \ - EXCEPTION_PROLOG_0(area); \ - EXCEPTION_PROLOG_1(area, extra, vec); \ - EXCEPTION_PROLOG_2(label, h); - -#define __KVMTEST(h, n) \ - lbz r10,HSTATE_IN_GUEST(r13); \ - cmpwi r10,0; \ - bne do_kvm_##h##n - -#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE -/* - * If hv is possible, interrupts come into to the hv version - * of the kvmppc_interrupt code, which then jumps to the PR handler, - * kvmppc_interrupt_pr, if the guest is a PR guest. - */ -#define kvmppc_interrupt kvmppc_interrupt_hv -#else -#define kvmppc_interrupt kvmppc_interrupt_pr -#endif - -/* - * Branch to label using its 0xC000 address. This results in instruction - * address suitable for MSR[IR]=0 or 1, which allows relocation to be turned - * on using mtmsr rather than rfid. - * - * This could set the 0xc bits for !RELOCATABLE as an immediate, rather than - * load KBASE for a slight optimisation. - */ -#define BRANCH_TO_C000(reg, label) \ - __LOAD_HANDLER(reg, label); \ - mtctr reg; \ - bctr - -#ifdef CONFIG_RELOCATABLE -#define BRANCH_TO_COMMON(reg, label) \ - __LOAD_HANDLER(reg, label); \ - mtctr reg; \ - bctr - -#define BRANCH_LINK_TO_FAR(label) \ - __LOAD_FAR_HANDLER(r12, label); \ - mtctr r12; \ - bctrl - -/* - * KVM requires __LOAD_FAR_HANDLER. - * - * __BRANCH_TO_KVM_EXIT branches are also a special case because they - * explicitly use r9 then reload it from PACA before branching. Hence - * the double-underscore. - */ -#define __BRANCH_TO_KVM_EXIT(area, label) \ - mfctr r9; \ - std r9,HSTATE_SCRATCH1(r13); \ - __LOAD_FAR_HANDLER(r9, label); \ - mtctr r9; \ - ld r9,area+EX_R9(r13); \ - bctr - -#else -#define BRANCH_TO_COMMON(reg, label) \ - b label - -#define BRANCH_LINK_TO_FAR(label) \ - bl label - -#define __BRANCH_TO_KVM_EXIT(area, label) \ - ld r9,area+EX_R9(r13); \ - b label - -#endif - -/* Do not enable RI */ -#define EXCEPTION_PROLOG_NORI(area, label, h, extra, vec) \ - EXCEPTION_PROLOG_0(area); \ - EXCEPTION_PROLOG_1(area, extra, vec); \ - EXCEPTION_PROLOG_2_NORI(label, h); - - -#define __KVM_HANDLER(area, h, n) \ - BEGIN_FTR_SECTION_NESTED(947) \ - ld r10,area+EX_CFAR(r13); \ - std r10,HSTATE_CFAR(r13); \ - END_FTR_SECTION_NESTED(CPU_FTR_CFAR,CPU_FTR_CFAR,947); \ - BEGIN_FTR_SECTION_NESTED(948) \ - ld r10,area+EX_PPR(r13); \ - std r10,HSTATE_PPR(r13); \ - END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948); \ - ld r10,area+EX_R10(r13); \ - std r12,HSTATE_SCRATCH0(r13); \ - sldi r12,r9,32; \ - ori r12,r12,(n); \ - /* This reloads r9 before branching to kvmppc_interrupt */ \ - __BRANCH_TO_KVM_EXIT(area, kvmppc_interrupt) - -#define __KVM_HANDLER_SKIP(area, h, n) \ - cmpwi r10,KVM_GUEST_MODE_SKIP; \ - beq 89f; \ - BEGIN_FTR_SECTION_NESTED(948) \ - ld r10,area+EX_PPR(r13); \ - std r10,HSTATE_PPR(r13); \ - END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948); \ - ld r10,area+EX_R10(r13); \ - std r12,HSTATE_SCRATCH0(r13); \ - sldi r12,r9,32; \ - ori r12,r12,(n); \ - /* This reloads r9 before branching to kvmppc_interrupt */ \ - __BRANCH_TO_KVM_EXIT(area, kvmppc_interrupt); \ -89: mtocrf 0x80,r9; \ - ld r9,area+EX_R9(r13); \ - ld r10,area+EX_R10(r13); \ - b kvmppc_skip_##h##interrupt - -#ifdef CONFIG_KVM_BOOK3S_64_HANDLER -#define KVMTEST(h, n) __KVMTEST(h, n) -#define KVM_HANDLER(area, h, n) __KVM_HANDLER(area, h, n) -#define KVM_HANDLER_SKIP(area, h, n) __KVM_HANDLER_SKIP(area, h, n) - -#else -#define KVMTEST(h, n) -#define KVM_HANDLER(area, h, n) -#define KVM_HANDLER_SKIP(area, h, n) -#endif - -#define NOTEST(n) - -#define EXCEPTION_PROLOG_COMMON_1() \ - std r9,_CCR(r1); /* save CR in stackframe */ \ - std r11,_NIP(r1); /* save SRR0 in stackframe */ \ - std r12,_MSR(r1); /* save SRR1 in stackframe */ \ - std r10,0(r1); /* make stack chain pointer */ \ - std r0,GPR0(r1); /* save r0 in stackframe */ \ - std r10,GPR1(r1); /* save r1 in stackframe */ \ - - -/* - * The common exception prolog is used for all except a few exceptions - * such as a segment miss on a kernel address. We have to be prepared - * to take another exception from the point where we first touch the - * kernel stack onwards. - * - * On entry r13 points to the paca, r9-r13 are saved in the paca, - * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and - * SRR1, and relocation is on. - */ -#define EXCEPTION_PROLOG_COMMON(n, area) \ - andi. r10,r12,MSR_PR; /* See if coming from user */ \ - mr r10,r1; /* Save r1 */ \ - subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \ - beq- 1f; \ - ld r1,PACAKSAVE(r13); /* kernel stack to use */ \ -1: cmpdi cr1,r1,-INT_FRAME_SIZE; /* check if r1 is in userspace */ \ - blt+ cr1,3f; /* abort if it is */ \ - li r1,(n); /* will be reloaded later */ \ - sth r1,PACA_TRAP_SAVE(r13); \ - std r3,area+EX_R3(r13); \ - addi r3,r13,area; /* r3 -> where regs are saved*/ \ - RESTORE_CTR(r1, area); \ - b bad_stack; \ -3: EXCEPTION_PROLOG_COMMON_1(); \ - kuap_save_amr_and_lock r9, r10, cr1, cr0; \ - beq 4f; /* if from kernel mode */ \ - ACCOUNT_CPU_USER_ENTRY(r13, r9, r10); \ - SAVE_PPR(area, r9); \ -4: EXCEPTION_PROLOG_COMMON_2(area) \ - EXCEPTION_PROLOG_COMMON_3(n) \ - ACCOUNT_STOLEN_TIME - -/* Save original regs values from save area to stack frame. */ -#define EXCEPTION_PROLOG_COMMON_2(area) \ - ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \ - ld r10,area+EX_R10(r13); \ - std r9,GPR9(r1); \ - std r10,GPR10(r1); \ - ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \ - ld r10,area+EX_R12(r13); \ - ld r11,area+EX_R13(r13); \ - std r9,GPR11(r1); \ - std r10,GPR12(r1); \ - std r11,GPR13(r1); \ - BEGIN_FTR_SECTION_NESTED(66); \ - ld r10,area+EX_CFAR(r13); \ - std r10,ORIG_GPR3(r1); \ - END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \ - GET_CTR(r10, area); \ - std r10,_CTR(r1); - -#define EXCEPTION_PROLOG_COMMON_3(n) \ - std r2,GPR2(r1); /* save r2 in stackframe */ \ - SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \ - SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \ - mflr r9; /* Get LR, later save to stack */ \ - ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \ - std r9,_LINK(r1); \ - lbz r10,PACAIRQSOFTMASK(r13); \ - mfspr r11,SPRN_XER; /* save XER in stackframe */ \ - std r10,SOFTE(r1); \ - std r11,_XER(r1); \ - li r9,(n)+1; \ - std r9,_TRAP(r1); /* set trap number */ \ - li r10,0; \ - ld r11,exception_marker@toc(r2); \ - std r10,RESULT(r1); /* clear regs->result */ \ - std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ - -/* - * Exception vectors. - */ -#define STD_EXCEPTION(vec, label) \ - EXCEPTION_PROLOG(PACA_EXGEN, label, EXC_STD, KVMTEST_PR, vec); - -/* Version of above for when we have to branch out-of-line */ -#define __OOL_EXCEPTION(vec, label, hdlr) \ - SET_SCRATCH0(r13) \ - EXCEPTION_PROLOG_0(PACA_EXGEN) \ - b hdlr; - -#define STD_EXCEPTION_OOL(vec, label) \ - EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_PR, vec); \ - EXCEPTION_PROLOG_2(label, EXC_STD) - -#define STD_EXCEPTION_HV(loc, vec, label) \ - EXCEPTION_PROLOG(PACA_EXGEN, label, EXC_HV, KVMTEST_HV, vec); - -#define STD_EXCEPTION_HV_OOL(vec, label) \ - EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_HV, vec); \ - EXCEPTION_PROLOG_2(label, EXC_HV) - -#define STD_RELON_EXCEPTION(loc, vec, label) \ - /* No guest interrupts come through here */ \ - EXCEPTION_RELON_PROLOG(PACA_EXGEN, label, EXC_STD, NOTEST, vec); - -#define STD_RELON_EXCEPTION_OOL(vec, label) \ - EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, vec); \ - EXCEPTION_PROLOG_2_RELON(label, EXC_STD) - -#define STD_RELON_EXCEPTION_HV(loc, vec, label) \ - EXCEPTION_RELON_PROLOG(PACA_EXGEN, label, EXC_HV, KVMTEST_HV, vec); - -#define STD_RELON_EXCEPTION_HV_OOL(vec, label) \ - EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_HV, vec); \ - EXCEPTION_PROLOG_2_RELON(label, EXC_HV) - -/* This associate vector numbers with bits in paca->irq_happened */ -#define SOFTEN_VALUE_0x500 PACA_IRQ_EE -#define SOFTEN_VALUE_0x900 PACA_IRQ_DEC -#define SOFTEN_VALUE_0x980 PACA_IRQ_DEC -#define SOFTEN_VALUE_0xa00 PACA_IRQ_DBELL -#define SOFTEN_VALUE_0xe80 PACA_IRQ_DBELL -#define SOFTEN_VALUE_0xe60 PACA_IRQ_HMI -#define SOFTEN_VALUE_0xea0 PACA_IRQ_EE -#define SOFTEN_VALUE_0xf00 PACA_IRQ_PMI - -#define __SOFTEN_TEST(h, vec, bitmask) \ - lbz r10,PACAIRQSOFTMASK(r13); \ - andi. r10,r10,bitmask; \ - li r10,SOFTEN_VALUE_##vec; \ - bne masked_##h##interrupt - -#define _SOFTEN_TEST(h, vec, bitmask) __SOFTEN_TEST(h, vec, bitmask) - -#define SOFTEN_TEST_PR(vec, bitmask) \ - KVMTEST(EXC_STD, vec); \ - _SOFTEN_TEST(EXC_STD, vec, bitmask) - -#define SOFTEN_TEST_HV(vec, bitmask) \ - KVMTEST(EXC_HV, vec); \ - _SOFTEN_TEST(EXC_HV, vec, bitmask) - -#define KVMTEST_PR(vec) \ - KVMTEST(EXC_STD, vec) - -#define KVMTEST_HV(vec) \ - KVMTEST(EXC_HV, vec) - -#define SOFTEN_NOTEST_PR(vec, bitmask) _SOFTEN_TEST(EXC_STD, vec, bitmask) -#define SOFTEN_NOTEST_HV(vec, bitmask) _SOFTEN_TEST(EXC_HV, vec, bitmask) - -#define __MASKABLE_EXCEPTION(vec, label, h, extra, bitmask) \ - SET_SCRATCH0(r13); /* save r13 */ \ - EXCEPTION_PROLOG_0(PACA_EXGEN); \ - MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec, bitmask); \ - EXCEPTION_PROLOG_2(label, h); - -#define MASKABLE_EXCEPTION(vec, label, bitmask) \ - __MASKABLE_EXCEPTION(vec, label, EXC_STD, SOFTEN_TEST_PR, bitmask) - -#define MASKABLE_EXCEPTION_OOL(vec, label, bitmask) \ - MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_PR, vec, bitmask);\ - EXCEPTION_PROLOG_2(label, EXC_STD) - -#define MASKABLE_EXCEPTION_HV(vec, label, bitmask) \ - __MASKABLE_EXCEPTION(vec, label, EXC_HV, SOFTEN_TEST_HV, bitmask) - -#define MASKABLE_EXCEPTION_HV_OOL(vec, label, bitmask) \ - MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_HV, vec, bitmask);\ - EXCEPTION_PROLOG_2(label, EXC_HV) - -#define __MASKABLE_RELON_EXCEPTION(vec, label, h, extra, bitmask) \ - SET_SCRATCH0(r13); /* save r13 */ \ - EXCEPTION_PROLOG_0(PACA_EXGEN); \ - MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec, bitmask); \ - EXCEPTION_PROLOG_2_RELON(label, h) - -#define MASKABLE_RELON_EXCEPTION(vec, label, bitmask) \ - __MASKABLE_RELON_EXCEPTION(vec, label, EXC_STD, SOFTEN_NOTEST_PR, bitmask) - -#define MASKABLE_RELON_EXCEPTION_OOL(vec, label, bitmask) \ - MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_NOTEST_PR, vec, bitmask);\ - EXCEPTION_PROLOG_2(label, EXC_STD); - -#define MASKABLE_RELON_EXCEPTION_HV(vec, label, bitmask) \ - __MASKABLE_RELON_EXCEPTION(vec, label, EXC_HV, SOFTEN_TEST_HV, bitmask) - -#define MASKABLE_RELON_EXCEPTION_HV_OOL(vec, label, bitmask) \ - MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_HV, vec, bitmask);\ - EXCEPTION_PROLOG_2_RELON(label, EXC_HV) - -/* - * Our exception common code can be passed various "additions" - * to specify the behaviour of interrupts, whether to kick the - * runlatch, etc... - */ - -/* - * This addition reconciles our actual IRQ state with the various software - * flags that track it. This may call C code. - */ -#define ADD_RECONCILE RECONCILE_IRQ_STATE(r10,r11) - -#define ADD_NVGPRS \ - bl save_nvgprs - -#define RUNLATCH_ON \ -BEGIN_FTR_SECTION \ - ld r3, PACA_THREAD_INFO(r13); \ - ld r4,TI_LOCAL_FLAGS(r3); \ - andi. r0,r4,_TLF_RUNLATCH; \ - beql ppc64_runlatch_on_trampoline; \ -END_FTR_SECTION_IFSET(CPU_FTR_CTRL) - -#define EXCEPTION_COMMON(area, trap, label, hdlr, ret, additions) \ - EXCEPTION_PROLOG_COMMON(trap, area); \ - /* Volatile regs are potentially clobbered here */ \ - additions; \ - addi r3,r1,STACK_FRAME_OVERHEAD; \ - bl hdlr; \ - b ret - -/* - * Exception where stack is already set in r1, r1 is saved in r10, and it - * continues rather than returns. - */ -#define EXCEPTION_COMMON_NORET_STACK(area, trap, label, hdlr, additions) \ - EXCEPTION_PROLOG_COMMON_1(); \ - kuap_save_amr_and_lock r9, r10, cr1; \ - EXCEPTION_PROLOG_COMMON_2(area); \ - EXCEPTION_PROLOG_COMMON_3(trap); \ - /* Volatile regs are potentially clobbered here */ \ - additions; \ - addi r3,r1,STACK_FRAME_OVERHEAD; \ - bl hdlr - -#define STD_EXCEPTION_COMMON(trap, label, hdlr) \ - EXCEPTION_COMMON(PACA_EXGEN, trap, label, hdlr, \ - ret_from_except, ADD_NVGPRS;ADD_RECONCILE) - -/* - * Like STD_EXCEPTION_COMMON, but for exceptions that can occur - * in the idle task and therefore need the special idle handling - * (finish nap and runlatch) - */ -#define STD_EXCEPTION_COMMON_ASYNC(trap, label, hdlr) \ - EXCEPTION_COMMON(PACA_EXGEN, trap, label, hdlr, \ - ret_from_except_lite, FINISH_NAP;ADD_RECONCILE;RUNLATCH_ON) - -/* - * When the idle code in power4_idle puts the CPU into NAP mode, - * it has to do so in a loop, and relies on the external interrupt - * and decrementer interrupt entry code to get it out of the loop. - * It sets the _TLF_NAPPING bit in current_thread_info()->local_flags - * to signal that it is in the loop and needs help to get out. - */ -#ifdef CONFIG_PPC_970_NAP -#define FINISH_NAP \ -BEGIN_FTR_SECTION \ - ld r11, PACA_THREAD_INFO(r13); \ - ld r9,TI_LOCAL_FLAGS(r11); \ - andi. r10,r9,_TLF_NAPPING; \ - bnel power4_fixup_nap; \ -END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) -#else -#define FINISH_NAP -#endif +#endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_EXCEPTION_H */ diff --git a/arch/powerpc/include/asm/head-64.h b/arch/powerpc/include/asm/head-64.h index a4f947888744..a466765709a9 100644 --- a/arch/powerpc/include/asm/head-64.h +++ b/arch/powerpc/include/asm/head-64.h @@ -169,53 +169,6 @@ name: #define ABS_ADDR(label) (label - fs_label + fs_start) -/* - * Following are the BOOK3S exception handler helper macros. - * Handlers come in a number of types, and each type has a number of varieties. - * - * EXC_REAL_* - real, unrelocated exception vectors - * EXC_VIRT_* - virt (AIL), unrelocated exception vectors - * TRAMP_REAL_* - real, unrelocated helpers (virt can call these) - * TRAMP_VIRT_* - virt, unreloc helpers (in practice, real can use) - * TRAMP_KVM - KVM handlers that get put into real, unrelocated - * EXC_COMMON - virt, relocated common handlers - * - * The EXC handlers are given a name, and branch to name_common, or the - * appropriate KVM or masking function. Vector handler verieties are as - * follows: - * - * EXC_{REAL|VIRT}_BEGIN/END - used to open-code the exception - * - * EXC_{REAL|VIRT} - standard exception - * - * EXC_{REAL|VIRT}_suffix - * where _suffix is: - * - _MASKABLE - maskable exception - * - _OOL - out of line with trampoline to common handler - * - _HV - HV exception - * - * There can be combinations, e.g., EXC_VIRT_OOL_MASKABLE_HV - * - * The one unusual case is __EXC_REAL_OOL_HV_DIRECT, which is - * an OOL vector that branches to a specified handler rather than the usual - * trampoline that goes to common. It, and other underscore macros, should - * be used with care. - * - * KVM handlers come in the following verieties: - * TRAMP_KVM - * TRAMP_KVM_SKIP - * TRAMP_KVM_HV - * TRAMP_KVM_HV_SKIP - * - * COMMON handlers come in the following verieties: - * EXC_COMMON_BEGIN/END - used to open-code the handler - * EXC_COMMON - * EXC_COMMON_ASYNC - * - * TRAMP_REAL and TRAMP_VIRT can be used with BEGIN/END. KVM - * and OOL handlers are implemented as types of TRAMP and TRAMP_VIRT handlers. - */ - #define EXC_REAL_BEGIN(name, start, size) \ FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##name, start, size) @@ -255,162 +208,7 @@ name: #define EXC_VIRT_NONE(start, size) \ FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size); \ - FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size); - - -#define EXC_REAL(name, start, size) \ - EXC_REAL_BEGIN(name, start, size); \ - STD_EXCEPTION(start, name##_common); \ - EXC_REAL_END(name, start, size); - -#define EXC_VIRT(name, start, size, realvec) \ - EXC_VIRT_BEGIN(name, start, size); \ - STD_RELON_EXCEPTION(start, realvec, name##_common); \ - EXC_VIRT_END(name, start, size); - -#define EXC_REAL_MASKABLE(name, start, size, bitmask) \ - EXC_REAL_BEGIN(name, start, size); \ - MASKABLE_EXCEPTION(start, name##_common, bitmask); \ - EXC_REAL_END(name, start, size); - -#define EXC_VIRT_MASKABLE(name, start, size, realvec, bitmask) \ - EXC_VIRT_BEGIN(name, start, size); \ - MASKABLE_RELON_EXCEPTION(realvec, name##_common, bitmask); \ - EXC_VIRT_END(name, start, size); - -#define EXC_REAL_HV(name, start, size) \ - EXC_REAL_BEGIN(name, start, size); \ - STD_EXCEPTION_HV(start, start, name##_common); \ - EXC_REAL_END(name, start, size); - -#define EXC_VIRT_HV(name, start, size, realvec) \ - EXC_VIRT_BEGIN(name, start, size); \ - STD_RELON_EXCEPTION_HV(start, realvec, name##_common); \ - EXC_VIRT_END(name, start, size); - -#define __EXC_REAL_OOL(name, start, size) \ - EXC_REAL_BEGIN(name, start, size); \ - __OOL_EXCEPTION(start, label, tramp_real_##name); \ - EXC_REAL_END(name, start, size); - -#define __TRAMP_REAL_OOL(name, vec) \ - TRAMP_REAL_BEGIN(tramp_real_##name); \ - STD_EXCEPTION_OOL(vec, name##_common); - -#define EXC_REAL_OOL(name, start, size) \ - __EXC_REAL_OOL(name, start, size); \ - __TRAMP_REAL_OOL(name, start); - -#define __EXC_REAL_OOL_MASKABLE(name, start, size) \ - __EXC_REAL_OOL(name, start, size); - -#define __TRAMP_REAL_OOL_MASKABLE(name, vec, bitmask) \ - TRAMP_REAL_BEGIN(tramp_real_##name); \ - MASKABLE_EXCEPTION_OOL(vec, name##_common, bitmask); - -#define EXC_REAL_OOL_MASKABLE(name, start, size, bitmask) \ - __EXC_REAL_OOL_MASKABLE(name, start, size); \ - __TRAMP_REAL_OOL_MASKABLE(name, start, bitmask); - -#define __EXC_REAL_OOL_HV_DIRECT(name, start, size, handler) \ - EXC_REAL_BEGIN(name, start, size); \ - __OOL_EXCEPTION(start, label, handler); \ - EXC_REAL_END(name, start, size); - -#define __EXC_REAL_OOL_HV(name, start, size) \ - __EXC_REAL_OOL(name, start, size); - -#define __TRAMP_REAL_OOL_HV(name, vec) \ - TRAMP_REAL_BEGIN(tramp_real_##name); \ - STD_EXCEPTION_HV_OOL(vec, name##_common); \ - -#define EXC_REAL_OOL_HV(name, start, size) \ - __EXC_REAL_OOL_HV(name, start, size); \ - __TRAMP_REAL_OOL_HV(name, start); - -#define __EXC_REAL_OOL_MASKABLE_HV(name, start, size) \ - __EXC_REAL_OOL(name, start, size); - -#define __TRAMP_REAL_OOL_MASKABLE_HV(name, vec, bitmask) \ - TRAMP_REAL_BEGIN(tramp_real_##name); \ - MASKABLE_EXCEPTION_HV_OOL(vec, name##_common, bitmask); \ - -#define EXC_REAL_OOL_MASKABLE_HV(name, start, size, bitmask) \ - __EXC_REAL_OOL_MASKABLE_HV(name, start, size); \ - __TRAMP_REAL_OOL_MASKABLE_HV(name, start, bitmask); - -#define __EXC_VIRT_OOL(name, start, size) \ - EXC_VIRT_BEGIN(name, start, size); \ - __OOL_EXCEPTION(start, label, tramp_virt_##name); \ - EXC_VIRT_END(name, start, size); - -#define __TRAMP_VIRT_OOL(name, realvec) \ - TRAMP_VIRT_BEGIN(tramp_virt_##name); \ - STD_RELON_EXCEPTION_OOL(realvec, name##_common); - -#define EXC_VIRT_OOL(name, start, size, realvec) \ - __EXC_VIRT_OOL(name, start, size); \ - __TRAMP_VIRT_OOL(name, realvec); - -#define __EXC_VIRT_OOL_MASKABLE(name, start, size) \ - __EXC_VIRT_OOL(name, start, size); - -#define __TRAMP_VIRT_OOL_MASKABLE(name, realvec, bitmask) \ - TRAMP_VIRT_BEGIN(tramp_virt_##name); \ - MASKABLE_RELON_EXCEPTION_OOL(realvec, name##_common, bitmask); - -#define EXC_VIRT_OOL_MASKABLE(name, start, size, realvec, bitmask) \ - __EXC_VIRT_OOL_MASKABLE(name, start, size); \ - __TRAMP_VIRT_OOL_MASKABLE(name, realvec, bitmask); - -#define __EXC_VIRT_OOL_HV(name, start, size) \ - __EXC_VIRT_OOL(name, start, size); - -#define __TRAMP_VIRT_OOL_HV(name, realvec) \ - TRAMP_VIRT_BEGIN(tramp_virt_##name); \ - STD_RELON_EXCEPTION_HV_OOL(realvec, name##_common); \ - -#define EXC_VIRT_OOL_HV(name, start, size, realvec) \ - __EXC_VIRT_OOL_HV(name, start, size); \ - __TRAMP_VIRT_OOL_HV(name, realvec); - -#define __EXC_VIRT_OOL_MASKABLE_HV(name, start, size) \ - __EXC_VIRT_OOL(name, start, size); - -#define __TRAMP_VIRT_OOL_MASKABLE_HV(name, realvec, bitmask) \ - TRAMP_VIRT_BEGIN(tramp_virt_##name); \ - MASKABLE_RELON_EXCEPTION_HV_OOL(realvec, name##_common, bitmask);\ - -#define EXC_VIRT_OOL_MASKABLE_HV(name, start, size, realvec, bitmask) \ - __EXC_VIRT_OOL_MASKABLE_HV(name, start, size); \ - __TRAMP_VIRT_OOL_MASKABLE_HV(name, realvec, bitmask); - -#define TRAMP_KVM(area, n) \ - TRAMP_KVM_BEGIN(do_kvm_##n); \ - KVM_HANDLER(area, EXC_STD, n); \ - -#define TRAMP_KVM_SKIP(area, n) \ - TRAMP_KVM_BEGIN(do_kvm_##n); \ - KVM_HANDLER_SKIP(area, EXC_STD, n); \ - -/* - * HV variant exceptions get the 0x2 bit added to their trap number. - */ -#define TRAMP_KVM_HV(area, n) \ - TRAMP_KVM_BEGIN(do_kvm_H##n); \ - KVM_HANDLER(area, EXC_HV, n + 0x2); \ - -#define TRAMP_KVM_HV_SKIP(area, n) \ - TRAMP_KVM_BEGIN(do_kvm_H##n); \ - KVM_HANDLER_SKIP(area, EXC_HV, n + 0x2); \ - -#define EXC_COMMON(name, realvec, hdlr) \ - EXC_COMMON_BEGIN(name); \ - STD_EXCEPTION_COMMON(realvec, name, hdlr); \ - -#define EXC_COMMON_ASYNC(name, realvec, hdlr) \ - EXC_COMMON_BEGIN(name); \ - STD_EXCEPTION_COMMON_ASYNC(realvec, name, hdlr); \ + FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size) #endif /* __ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h index 78202d5fb13a..67e2da195eae 100644 --- a/arch/powerpc/include/asm/hw_breakpoint.h +++ b/arch/powerpc/include/asm/hw_breakpoint.h @@ -76,18 +76,25 @@ static inline void hw_breakpoint_disable(void) extern void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs); int hw_breakpoint_handler(struct die_args *args); -extern int set_dawr(struct arch_hw_breakpoint *brk); +#else /* CONFIG_HAVE_HW_BREAKPOINT */ +static inline void hw_breakpoint_disable(void) { } +static inline void thread_change_pc(struct task_struct *tsk, + struct pt_regs *regs) { } + +#endif /* CONFIG_HAVE_HW_BREAKPOINT */ + + +#ifdef CONFIG_PPC_DAWR extern bool dawr_force_enable; static inline bool dawr_enabled(void) { return dawr_force_enable; } - -#else /* CONFIG_HAVE_HW_BREAKPOINT */ -static inline void hw_breakpoint_disable(void) { } -static inline void thread_change_pc(struct task_struct *tsk, - struct pt_regs *regs) { } +int set_dawr(struct arch_hw_breakpoint *brk); +#else static inline bool dawr_enabled(void) { return false; } -#endif /* CONFIG_HAVE_HW_BREAKPOINT */ +static inline int set_dawr(struct arch_hw_breakpoint *brk) { return -1; } +#endif + #endif /* __KERNEL__ */ #endif /* _PPC_BOOK3S_64_HW_BREAKPOINT_H */ diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index 2c1845e5e851..18d342b815e4 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h @@ -314,13 +314,5 @@ extern bool iommu_fixed_is_weak; extern const struct dma_map_ops dma_iommu_ops; -static inline unsigned long device_to_mask(struct device *dev) -{ - if (dev->dma_mask && *dev->dma_mask) - return *dev->dma_mask; - /* Assume devices without mask can take 32 bit addresses */ - return 0xfffffffful; -} - #endif /* __KERNEL__ */ #endif /* _ASM_IOMMU_H */ diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h index 806494283e2a..3b4b305796ae 100644 --- a/arch/powerpc/include/asm/lppaca.h +++ b/arch/powerpc/include/asm/lppaca.h @@ -5,6 +5,29 @@ */ #ifndef _ASM_POWERPC_LPPACA_H #define _ASM_POWERPC_LPPACA_H + +/* + * The below VPHN macros are outside the __KERNEL__ check since these are + * used for compiling the vphn selftest in userspace + */ + +/* The H_HOME_NODE_ASSOCIATIVITY h_call returns 6 64-bit registers. */ +#define VPHN_REGISTER_COUNT 6 + +/* + * 6 64-bit registers unpacked into up to 24 be32 associativity values. To + * form the complete property we have to add the length in the first cell. + */ +#define VPHN_ASSOC_BUFSIZE (VPHN_REGISTER_COUNT*sizeof(u64)/sizeof(u16) + 1) + +/* + * The H_HOME_NODE_ASSOCIATIVITY hcall takes two values for flags: + * 1 for retrieving associativity information for a guest cpu + * 2 for retrieving associativity information for a host/hypervisor cpu + */ +#define VPHN_FLAG_VCPU 1 +#define VPHN_FLAG_PCPU 2 + #ifdef __KERNEL__ /* @@ -19,6 +42,7 @@ */ #include <linux/cache.h> #include <linux/threads.h> +#include <linux/spinlock_types.h> #include <asm/types.h> #include <asm/mmu.h> #include <asm/firmware.h> @@ -141,7 +165,19 @@ struct dtl_entry { #define DISPATCH_LOG_BYTES 4096 /* bytes per cpu */ #define N_DISPATCH_LOG (DISPATCH_LOG_BYTES / sizeof(struct dtl_entry)) +/* + * Dispatch trace log event enable mask: + * 0x1: voluntary virtual processor waits + * 0x2: time-slice preempts + * 0x4: virtual partition memory page faults + */ +#define DTL_LOG_CEDE 0x1 +#define DTL_LOG_PREEMPT 0x2 +#define DTL_LOG_FAULT 0x4 +#define DTL_LOG_ALL (DTL_LOG_CEDE | DTL_LOG_PREEMPT | DTL_LOG_FAULT) + extern struct kmem_cache *dtl_cache; +extern rwlock_t dtl_access_lock; /* * When CONFIG_VIRT_CPU_ACCOUNTING_NATIVE = y, the cpu accounting code controls @@ -151,6 +187,10 @@ extern struct kmem_cache *dtl_cache; */ extern void (*dtl_consumer)(struct dtl_entry *entry, u64 index); +extern void register_dtl_buffer(int cpu); +extern void alloc_dtl_buffers(unsigned long *time_limit); +extern long hcall_vphn(unsigned long cpu, u64 flags, __be32 *associativity); + #endif /* CONFIG_PPC_BOOK3S */ #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_LPPACA_H */ diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h index 09a8553833d1..383242eb0dea 100644 --- a/arch/powerpc/include/asm/opal-api.h +++ b/arch/powerpc/include/asm/opal-api.h @@ -564,6 +564,7 @@ enum OpalHMI_XstopType { CHECKSTOP_TYPE_UNKNOWN = 0, CHECKSTOP_TYPE_CORE = 1, CHECKSTOP_TYPE_NX = 2, + CHECKSTOP_TYPE_NPU = 3 }; enum OpalHMI_CoreXstopReason { diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index 4ed5d57f2359..57bd029c715e 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h @@ -283,8 +283,6 @@ int64_t opal_xive_set_queue_state(uint64_t vp, uint32_t prio, uint32_t qtoggle, uint32_t qindex); int64_t opal_xive_get_vp_state(uint64_t vp, __be64 *out_w01); -int64_t opal_pci_set_p2p(uint64_t phb_init, uint64_t phb_target, - uint64_t desc, uint16_t pe_number); int64_t opal_imc_counters_init(uint32_t type, uint64_t address, uint64_t cpu_pir); diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 9bd2326bef6f..e3cc9eb9204d 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -166,7 +166,9 @@ struct paca_struct { u64 kstack; /* Saved Kernel stack addr */ u64 saved_r1; /* r1 save for RTAS calls or PM or EE=0 */ u64 saved_msr; /* MSR saved here by enter_rtas */ +#ifdef CONFIG_PPC_BOOK3E u16 trap_save; /* Used when bad stack is encountered */ +#endif u8 irq_soft_mask; /* mask for irq soft masking */ u8 irq_happened; /* irq happened while soft-disabled */ u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */ diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index 64145751b2fd..c58ba7963688 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -140,6 +140,30 @@ static inline void pte_frag_set(mm_context_t *ctx, void *p) } #endif +#ifndef pmd_is_leaf +#define pmd_is_leaf pmd_is_leaf +static inline bool pmd_is_leaf(pmd_t pmd) +{ + return false; +} +#endif + +#ifndef pud_is_leaf +#define pud_is_leaf pud_is_leaf +static inline bool pud_is_leaf(pud_t pud) +{ + return false; +} +#endif + +#ifndef pgd_is_leaf +#define pgd_is_leaf pgd_is_leaf +static inline bool pgd_is_leaf(pgd_t pgd) +{ + return false; +} +#endif + #ifdef CONFIG_PPC64 #define is_ioremap_addr is_ioremap_addr static inline bool is_ioremap_addr(const void *x) diff --git a/arch/powerpc/include/asm/pnv-ocxl.h b/arch/powerpc/include/asm/pnv-ocxl.h index 208b5503f4ed..7de82647e761 100644 --- a/arch/powerpc/include/asm/pnv-ocxl.h +++ b/arch/powerpc/include/asm/pnv-ocxl.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0+ +/* SPDX-License-Identifier: GPL-2.0+ */ // Copyright 2017 IBM Corp. #ifndef _ASM_PNV_OCXL_H #define _ASM_PNV_OCXL_H diff --git a/arch/powerpc/include/asm/pnv-pci.h b/arch/powerpc/include/asm/pnv-pci.h index b5a85f1bb305..edcb1fc50aeb 100644 --- a/arch/powerpc/include/asm/pnv-pci.h +++ b/arch/powerpc/include/asm/pnv-pci.h @@ -22,15 +22,9 @@ extern int pnv_pci_get_presence_state(uint64_t id, uint8_t *state); extern int pnv_pci_get_power_state(uint64_t id, uint8_t *state); extern int pnv_pci_set_power_state(uint64_t id, uint8_t state, struct opal_msg *msg); -extern int pnv_pci_set_p2p(struct pci_dev *initiator, struct pci_dev *target, - u64 desc); -extern int pnv_pci_enable_tunnel(struct pci_dev *dev, uint64_t *asnind); -extern int pnv_pci_disable_tunnel(struct pci_dev *dev); extern int pnv_pci_set_tunnel_bar(struct pci_dev *dev, uint64_t addr, int enable); -extern int pnv_pci_get_as_notify_info(struct task_struct *task, u32 *lpid, - u32 *pid, u32 *tid); int pnv_phb_to_cxl_mode(struct pci_dev *dev, uint64_t mode); int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq, unsigned int virq); diff --git a/arch/powerpc/include/asm/powernv.h b/arch/powerpc/include/asm/powernv.h index bc69ed2d952c..e1a858718716 100644 --- a/arch/powerpc/include/asm/powernv.h +++ b/arch/powerpc/include/asm/powernv.h @@ -7,35 +7,13 @@ #define _ASM_POWERNV_H #ifdef CONFIG_PPC_POWERNV -#define NPU2_WRITE 1 extern void powernv_set_nmmu_ptcr(unsigned long ptcr); -extern struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev, - unsigned long flags, - void (*cb)(struct npu_context *, void *), - void *priv); -extern void pnv_npu2_destroy_context(struct npu_context *context, - struct pci_dev *gpdev); -extern int pnv_npu2_handle_fault(struct npu_context *context, uintptr_t *ea, - unsigned long *flags, unsigned long *status, - int count); void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val); void pnv_tm_init(void); #else static inline void powernv_set_nmmu_ptcr(unsigned long ptcr) { } -static inline struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev, - unsigned long flags, - struct npu_context *(*cb)(struct npu_context *, void *), - void *priv) { return ERR_PTR(-ENODEV); } -static inline void pnv_npu2_destroy_context(struct npu_context *context, - struct pci_dev *gpdev) { } - -static inline int pnv_npu2_handle_fault(struct npu_context *context, - uintptr_t *ea, unsigned long *flags, - unsigned long *status, int count) { - return -ENODEV; -} static inline void pnv_tm_init(void) { } #endif diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 2291daf39cd1..c1df75edde44 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -410,6 +410,15 @@ #define __PPC_RC21 (0x1 << 10) /* + * Both low and high 16 bits are added as SIGNED additions, so if low 16 bits + * has high bit set, high 16 bits must be adjusted. These macros do that (stolen + * from binutils). + */ +#define PPC_LO(v) ((v) & 0xffff) +#define PPC_HI(v) (((v) >> 16) & 0xffff) +#define PPC_HA(v) PPC_HI((v) + 0x8000) + +/* * Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a * larx with EH set as an illegal instruction. */ @@ -588,7 +597,16 @@ #define PPC_SLBIA(IH) stringify_in_c(.long PPC_INST_SLBIA | \ ((IH & 0x7) << 21)) -#define PPC_INVALIDATE_ERAT PPC_SLBIA(7) + +/* + * These may only be used on ISA v3.0 or later (aka. CPU_FTR_ARCH_300, radix + * implies CPU_FTR_ARCH_300). USER/GUEST invalidates may only be used by radix + * mode (on HPT these would also invalidate various SLBEs which may not be + * desired). + */ +#define PPC_ISA_3_0_INVALIDATE_ERAT PPC_SLBIA(7) +#define PPC_RADIX_INVALIDATE_ERAT_USER PPC_SLBIA(3) +#define PPC_RADIX_INVALIDATE_ERAT_GUEST PPC_SLBIA(6) #define VCMPEQUD_RC(vrt, vra, vrb) stringify_in_c(.long PPC_INST_VCMPEQUD | \ ___PPC_RT(vrt) | ___PPC_RA(vra) | \ diff --git a/arch/powerpc/include/asm/ps3stor.h b/arch/powerpc/include/asm/ps3stor.h index d9f6589bc107..1d8279014f22 100644 --- a/arch/powerpc/include/asm/ps3stor.h +++ b/arch/powerpc/include/asm/ps3stor.h @@ -39,7 +39,7 @@ struct ps3_storage_device { unsigned int num_regions; unsigned long accessible_regions; unsigned int region_idx; /* first accessible region */ - struct ps3_storage_region regions[0]; /* Must be last */ + struct ps3_storage_region regions[]; /* Must be last */ }; static inline struct ps3_storage_device *to_ps3_storage_device(struct device *dev) diff --git a/arch/powerpc/include/asm/pte-walk.h b/arch/powerpc/include/asm/pte-walk.h index 2d633e9d686c..33fa5dd8ee6a 100644 --- a/arch/powerpc/include/asm/pte-walk.h +++ b/arch/powerpc/include/asm/pte-walk.h @@ -10,8 +10,20 @@ extern pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea, static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea, bool *is_thp, unsigned *hshift) { + pte_t *pte; + VM_WARN(!arch_irqs_disabled(), "%s called with irq enabled\n", __func__); - return __find_linux_pte(pgdir, ea, is_thp, hshift); + pte = __find_linux_pte(pgdir, ea, is_thp, hshift); + +#if defined(CONFIG_DEBUG_VM) && \ + !(defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)) + /* + * We should not find huge page if these configs are not enabled. + */ + if (hshift) + WARN_ON(*hshift); +#endif + return pte; } static inline pte_t *find_init_mm_pte(unsigned long ea, unsigned *hshift) @@ -26,10 +38,22 @@ static inline pte_t *find_init_mm_pte(unsigned long ea, unsigned *hshift) static inline pte_t *find_current_mm_pte(pgd_t *pgdir, unsigned long ea, bool *is_thp, unsigned *hshift) { + pte_t *pte; + VM_WARN(!arch_irqs_disabled(), "%s called with irq enabled\n", __func__); VM_WARN(pgdir != current->mm->pgd, "%s lock less page table lookup called on wrong mm\n", __func__); - return __find_linux_pte(pgdir, ea, is_thp, hshift); + pte = __find_linux_pte(pgdir, ea, is_thp, hshift); + +#if defined(CONFIG_DEBUG_VM) && \ + !(defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)) + /* + * We should not find huge page if these configs are not enabled. + */ + if (hshift) + WARN_ON(*hshift); +#endif + return pte; } #endif /* _ASM_POWERPC_PTE_WALK_H */ diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h index f85e2b01c3df..2f7e1ea5089e 100644 --- a/arch/powerpc/include/asm/topology.h +++ b/arch/powerpc/include/asm/topology.h @@ -35,6 +35,7 @@ static inline int pcibus_to_node(struct pci_bus *bus) cpu_all_mask : \ cpumask_of_node(pcibus_to_node(bus))) +extern int cpu_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc); extern int __node_distance(int, int); #define node_distance(a, b) __node_distance(a, b) @@ -84,6 +85,11 @@ static inline int numa_update_cpu_topology(bool cpus_locked) static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node) {} +static inline int cpu_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc) +{ + return 0; +} + #endif /* CONFIG_NUMA */ #if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR) diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index 76f34346b642..8b03eb44e876 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -312,6 +312,7 @@ raw_copy_in_user(void __user *to, const void __user *from, unsigned long n) { unsigned long ret; + barrier_nospec(); allow_user_access(to, from, n); ret = __copy_tofrom_user(to, from, n); prevent_user_access(to, from, n); diff --git a/arch/powerpc/include/asm/vas.h b/arch/powerpc/include/asm/vas.h index da0b19870570..f93e6b0f5c84 100644 --- a/arch/powerpc/include/asm/vas.h +++ b/arch/powerpc/include/asm/vas.h @@ -163,14 +163,4 @@ int vas_copy_crb(void *crb, int offset); */ int vas_paste_crb(struct vas_window *win, int offset, bool re); -/* - * Return a system-wide unique id for the VAS window @win. - */ -extern u32 vas_win_id(struct vas_window *win); - -/* - * Return the power bus paste address associated with @win so the caller - * can map that address into their address space. - */ -extern u64 vas_win_paste_addr(struct vas_window *win); #endif /* __ASM_POWERPC_VAS_H */ |