diff options
Diffstat (limited to 'arch/powerpc')
434 files changed, 2924 insertions, 4427 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 22e2f1113c4c..3eaddb8997a9 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -109,6 +109,7 @@ config PPC # Please keep this list sorted alphabetically. # select ARCH_32BIT_OFF_T if PPC32 + select ARCH_DISABLE_KASAN_INLINE if PPC_RADIX_MMU select ARCH_ENABLE_MEMORY_HOTPLUG select ARCH_ENABLE_MEMORY_HOTREMOVE select ARCH_HAS_COPY_MC if PPC64 @@ -118,7 +119,6 @@ config PPC select ARCH_HAS_DEBUG_WX if STRICT_KERNEL_RWX select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_DMA_MAP_DIRECT if PPC_PSERIES - select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_HUGEPD if HUGETLB_PAGE @@ -155,10 +155,12 @@ config PPC select ARCH_USE_MEMTEST select ARCH_USE_QUEUED_RWLOCKS if PPC_QUEUED_SPINLOCKS select ARCH_USE_QUEUED_SPINLOCKS if PPC_QUEUED_SPINLOCKS + select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT select ARCH_WANT_IPC_PARSE_VERSION select ARCH_WANT_IRQS_OFF_ACTIVATE_MM select ARCH_WANT_LD_ORPHAN_WARN select ARCH_WANTS_MODULES_DATA_IN_VMALLOC if PPC_BOOK3S_32 || PPC_8xx + select ARCH_WANTS_NO_INSTR select ARCH_WEAK_RELEASE_ACQUIRE select BINFMT_ELF select BUILDTIME_TABLE_SORT @@ -190,7 +192,8 @@ config PPC select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_JUMP_LABEL_RELATIVE select HAVE_ARCH_KASAN if PPC32 && PPC_PAGE_SHIFT <= 14 - select HAVE_ARCH_KASAN_VMALLOC if PPC32 && PPC_PAGE_SHIFT <= 14 + select HAVE_ARCH_KASAN if PPC_RADIX_MMU + select HAVE_ARCH_KASAN_VMALLOC if HAVE_ARCH_KASAN select HAVE_ARCH_KFENCE if PPC_BOOK3S_32 || PPC_8xx || 40x select HAVE_ARCH_KGDB select HAVE_ARCH_MMAP_RND_BITS @@ -210,7 +213,7 @@ config PPC select HAVE_EFFICIENT_UNALIGNED_ACCESS if !(CPU_LITTLE_ENDIAN && POWER7_CPU) select HAVE_FAST_GUP select HAVE_FTRACE_MCOUNT_RECORD - select HAVE_FUNCTION_DESCRIPTORS if PPC64 && !CPU_LITTLE_ENDIAN + select HAVE_FUNCTION_DESCRIPTORS if PPC64_ELF_ABI_V1 select HAVE_FUNCTION_ERROR_INJECTION select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_TRACER @@ -760,6 +763,22 @@ config PPC_256K_PAGES endchoice +config PAGE_SIZE_4KB + def_bool y + depends on PPC_4K_PAGES + +config PAGE_SIZE_16KB + def_bool y + depends on PPC_16K_PAGES + +config PAGE_SIZE_64KB + def_bool y + depends on PPC_64K_PAGES + +config PAGE_SIZE_256KB + def_bool y + depends on PPC_256K_PAGES + config PPC_PAGE_SHIFT int default 18 if PPC_256K_PAGES diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug index 192f0ed0097f..9f363c143d86 100644 --- a/arch/powerpc/Kconfig.debug +++ b/arch/powerpc/Kconfig.debug @@ -374,4 +374,5 @@ config PPC_FAST_ENDIAN_SWITCH config KASAN_SHADOW_OFFSET hex depends on KASAN - default 0xe0000000 + default 0xe0000000 if PPC32 + default 0xa80e000000000000 if PPC64 diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 45a9caa37b4e..a0cd70712061 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -89,10 +89,10 @@ endif ifdef CONFIG_PPC64 ifndef CONFIG_CC_IS_CLANG -cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1) -cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mcall-aixdesc) -aflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1) -aflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mabi=elfv2 +cflags-$(CONFIG_PPC64_ELF_ABI_V1) += $(call cc-option,-mabi=elfv1) +cflags-$(CONFIG_PPC64_ELF_ABI_V1) += $(call cc-option,-mcall-aixdesc) +aflags-$(CONFIG_PPC64_ELF_ABI_V1) += $(call cc-option,-mabi=elfv1) +aflags-$(CONFIG_PPC64_ELF_ABI_V2) += -mabi=elfv2 endif endif @@ -141,7 +141,7 @@ endif CFLAGS-$(CONFIG_PPC64) := $(call cc-option,-mtraceback=no) ifndef CONFIG_CC_IS_CLANG -ifdef CONFIG_CPU_LITTLE_ENDIAN +ifdef CONFIG_PPC64_ELF_ABI_V2 CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2,$(call cc-option,-mcall-aixdesc)) AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2) else @@ -213,7 +213,7 @@ CHECKFLAGS += -m$(BITS) -D__powerpc__ -D__powerpc$(BITS)__ ifdef CONFIG_CPU_BIG_ENDIAN CHECKFLAGS += -D__BIG_ENDIAN__ else -CHECKFLAGS += -D__LITTLE_ENDIAN__ -D_CALL_ELF=2 +CHECKFLAGS += -D__LITTLE_ENDIAN__ endif ifdef CONFIG_476FPE_ERR46 diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index 008bf0bff186..a9cd2ea4a861 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile @@ -38,9 +38,13 @@ BOOTCFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ $(LINUXINCLUDE) ifdef CONFIG_PPC64_BOOT_WRAPPER -BOOTCFLAGS += -m64 +ifdef CONFIG_CPU_LITTLE_ENDIAN +BOOTCFLAGS += -m64 -mcpu=powerpc64le else -BOOTCFLAGS += -m32 +BOOTCFLAGS += -m64 -mcpu=powerpc64 +endif +else +BOOTCFLAGS += -m32 -mcpu=powerpc endif BOOTCFLAGS += -isystem $(shell $(BOOTCC) -print-file-name=include) @@ -49,6 +53,8 @@ ifdef CONFIG_CPU_BIG_ENDIAN BOOTCFLAGS += -mbig-endian else BOOTCFLAGS += -mlittle-endian +endif +ifdef CONFIG_PPC64_ELF_ABI_V2 BOOTCFLAGS += $(call cc-option,-mabi=elfv2) endif diff --git a/arch/powerpc/boot/crt0.S b/arch/powerpc/boot/crt0.S index feadee18e271..44544720daae 100644 --- a/arch/powerpc/boot/crt0.S +++ b/arch/powerpc/boot/crt0.S @@ -8,7 +8,8 @@ #include "ppc_asm.h" RELA = 7 -RELACOUNT = 0x6ffffff9 +RELASZ = 8 +RELAENT = 9 .data /* A procedure descriptor used when booting this as a COFF file. @@ -75,34 +76,39 @@ p_base: mflr r10 /* r10 now points to runtime addr of p_base */ bne 11f lwz r9,4(r12) /* get RELA pointer in r9 */ b 12f -11: addis r8,r8,(-RELACOUNT)@ha - cmpwi r8,RELACOUNT@l +11: cmpwi r8,RELASZ + bne .Lcheck_for_relaent + lwz r0,4(r12) /* get RELASZ value in r0 */ + b 12f +.Lcheck_for_relaent: + cmpwi r8,RELAENT bne 12f - lwz r0,4(r12) /* get RELACOUNT value in r0 */ + lwz r14,4(r12) /* get RELAENT value in r14 */ 12: addi r12,r12,8 b 9b /* The relocation section contains a list of relocations. * We now do the R_PPC_RELATIVE ones, which point to words - * which need to be initialized with addend + offset. - * The R_PPC_RELATIVE ones come first and there are RELACOUNT - * of them. */ + * which need to be initialized with addend + offset */ 10: /* skip relocation if we don't have both */ cmpwi r0,0 beq 3f cmpwi r9,0 beq 3f + cmpwi r14,0 + beq 3f add r9,r9,r11 /* Relocate RELA pointer */ + divwu r0,r0,r14 /* RELASZ / RELAENT */ mtctr r0 2: lbz r0,4+3(r9) /* ELF32_R_INFO(reloc->r_info) */ cmpwi r0,22 /* R_PPC_RELATIVE */ - bne 3f + bne .Lnext lwz r12,0(r9) /* reloc->r_offset */ lwz r0,8(r9) /* reloc->r_addend */ add r0,r0,r11 stwx r0,r11,r12 - addi r9,r9,12 +.Lnext: add r9,r9,r14 bdnz 2b /* Do a cache flush for our text, in case the loader didn't */ @@ -160,32 +166,39 @@ p_base: mflr r10 /* r10 now points to runtime addr of p_base */ bne 10f ld r13,8(r11) /* get RELA pointer in r13 */ b 11f -10: addis r12,r12,(-RELACOUNT)@ha - cmpdi r12,RELACOUNT@l - bne 11f - ld r8,8(r11) /* get RELACOUNT value in r8 */ +10: cmpwi r12,RELASZ + bne .Lcheck_for_relaent + lwz r8,8(r11) /* get RELASZ pointer in r8 */ + b 11f +.Lcheck_for_relaent: + cmpwi r12,RELAENT + bne 11f + lwz r14,8(r11) /* get RELAENT pointer in r14 */ 11: addi r11,r11,16 b 9b 12: - cmpdi r13,0 /* check we have both RELA and RELACOUNT */ + cmpdi r13,0 /* check we have both RELA, RELASZ, RELAENT*/ cmpdi cr1,r8,0 beq 3f beq cr1,3f + cmpdi r14,0 + beq 3f /* Calcuate the runtime offset. */ subf r13,r13,r9 /* Run through the list of relocations and process the * R_PPC64_RELATIVE ones. */ + divdu r8,r8,r14 /* RELASZ / RELAENT */ mtctr r8 13: ld r0,8(r9) /* ELF64_R_TYPE(reloc->r_info) */ cmpdi r0,22 /* R_PPC64_RELATIVE */ - bne 3f + bne .Lnext ld r12,0(r9) /* reloc->r_offset */ ld r0,16(r9) /* reloc->r_addend */ add r0,r0,r13 stdx r0,r13,r12 - addi r9,r9,24 +.Lnext: add r9,r9,r14 bdnz 13b /* Do a cache flush for our text, in case the loader didn't */ diff --git a/arch/powerpc/boot/cuboot-hotfoot.c b/arch/powerpc/boot/cuboot-hotfoot.c index 888a6b9bfead..0e5532f855d6 100644 --- a/arch/powerpc/boot/cuboot-hotfoot.c +++ b/arch/powerpc/boot/cuboot-hotfoot.c @@ -70,7 +70,7 @@ static void hotfoot_fixups(void) printf("Fixing devtree for 4M Flash\n"); - /* First fix up the base addresse */ + /* First fix up the base address */ getprop(devp, "reg", regs, sizeof(regs)); regs[0] = 0; regs[1] = 0xffc00000; diff --git a/arch/powerpc/boot/dts/fsl/p2020si-post.dtsi b/arch/powerpc/boot/dts/fsl/p2020si-post.dtsi index 884e01bcb243..7a590c92fe56 100644 --- a/arch/powerpc/boot/dts/fsl/p2020si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/p2020si-post.dtsi @@ -198,4 +198,9 @@ reg = <0xe0000 0x1000>; fsl,has-rstcr; }; + + pmc: power@e0070 { + compatible = "fsl,mpc8548-pmc"; + reg = <0xe0070 0x20>; + }; }; diff --git a/arch/powerpc/boot/dts/microwatt.dts b/arch/powerpc/boot/dts/microwatt.dts index 65b270a90f94..b69db1d275cd 100644 --- a/arch/powerpc/boot/dts/microwatt.dts +++ b/arch/powerpc/boot/dts/microwatt.dts @@ -90,6 +90,8 @@ 64-bit; d-cache-size = <0x1000>; ibm,chip-id = <0>; + ibm,mmu-lpid-bits = <12>; + ibm,mmu-pid-bits = <20>; }; }; diff --git a/arch/powerpc/boot/ops.h b/arch/powerpc/boot/ops.h index 6455fc9a244f..8334bc3cbe49 100644 --- a/arch/powerpc/boot/ops.h +++ b/arch/powerpc/boot/ops.h @@ -200,12 +200,6 @@ void __dt_fixup_mac_addresses(u32 startindex, ...); __dt_fixup_mac_addresses(0, __VA_ARGS__, NULL) -static inline void *find_node_by_linuxphandle(const u32 linuxphandle) -{ - return find_node_by_prop_value(NULL, "linux,phandle", - (char *)&linuxphandle, sizeof(u32)); -} - static inline char *get_path(const void *phandle, char *buf, int len) { if (dt_ops.get_path) diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper index 9184eda780fd..55978f32fa77 100755 --- a/arch/powerpc/boot/wrapper +++ b/arch/powerpc/boot/wrapper @@ -162,7 +162,7 @@ while [ "$#" -gt 0 ]; do fi ;; --no-gzip) - # a "feature" of the the wrapper script is that it can be used outside + # a "feature" of the wrapper script is that it can be used outside # the kernel tree. So keeping this around for backwards compatibility. compression= uboot_comp=none diff --git a/arch/powerpc/crypto/aes-spe-glue.c b/arch/powerpc/crypto/aes-spe-glue.c index c2b23b69d7b1..e8dfe9fb0266 100644 --- a/arch/powerpc/crypto/aes-spe-glue.c +++ b/arch/powerpc/crypto/aes-spe-glue.c @@ -404,7 +404,7 @@ static int ppc_xts_decrypt(struct skcipher_request *req) /* * Algorithm definitions. Disabling alignment (cra_alignmask=0) was chosen - * because the e500 platform can handle unaligned reads/writes very efficently. + * because the e500 platform can handle unaligned reads/writes very efficiently. * This improves IPsec thoughput by another few percent. Additionally we assume * that AES context is always aligned to at least 8 bytes because it is created * with kmalloc() in the crypto infrastructure diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h index a7a0572f3846..17e7a778c856 100644 --- a/arch/powerpc/include/asm/book3s/64/hash.h +++ b/arch/powerpc/include/asm/book3s/64/hash.h @@ -18,6 +18,10 @@ #include <asm/book3s/64/hash-4k.h> #endif +#define H_PTRS_PER_PTE (1 << H_PTE_INDEX_SIZE) +#define H_PTRS_PER_PMD (1 << H_PMD_INDEX_SIZE) +#define H_PTRS_PER_PUD (1 << H_PUD_INDEX_SIZE) + /* Bits to set in a PMD/PUD/PGD entry valid bit*/ #define HASH_PMD_VAL_BITS (0x8000000000000000UL) #define HASH_PUD_VAL_BITS (0x8000000000000000UL) diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h index 12e150e615b7..b37a28f62cf6 100644 --- a/arch/powerpc/include/asm/book3s/64/hugetlb.h +++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h @@ -8,10 +8,6 @@ */ void radix__flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr); void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr); -extern unsigned long -radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr, - unsigned long len, unsigned long pgoff, - unsigned long flags); extern void radix__huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h index 21f780942911..1c4eebbc69c9 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h +++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h @@ -18,6 +18,7 @@ * complete pgtable.h but only a portion of it. */ #include <asm/book3s/64/pgtable.h> +#include <asm/book3s/64/slice.h> #include <asm/task_size_64.h> #include <asm/cpu_has_feature.h> diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h index 006cbec70ffe..570a4960cf17 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu.h +++ b/arch/powerpc/include/asm/book3s/64/mmu.h @@ -4,12 +4,6 @@ #include <asm/page.h> -#ifdef CONFIG_HUGETLB_PAGE -#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA -#endif -#define HAVE_ARCH_UNMAPPED_AREA -#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN - #ifndef __ASSEMBLY__ /* * Page size definition diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index eecff2036869..cb9d5fd39d7f 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -231,6 +231,9 @@ extern unsigned long __pmd_frag_size_shift; #define PTRS_PER_PUD (1 << PUD_INDEX_SIZE) #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) +#define MAX_PTRS_PER_PTE ((H_PTRS_PER_PTE > R_PTRS_PER_PTE) ? H_PTRS_PER_PTE : R_PTRS_PER_PTE) +#define MAX_PTRS_PER_PMD ((H_PTRS_PER_PMD > R_PTRS_PER_PMD) ? H_PTRS_PER_PMD : R_PTRS_PER_PMD) +#define MAX_PTRS_PER_PUD ((H_PTRS_PER_PUD > R_PTRS_PER_PUD) ? H_PTRS_PER_PUD : R_PTRS_PER_PUD) #define MAX_PTRS_PER_PGD (1 << (H_PGD_INDEX_SIZE > RADIX_PGD_INDEX_SIZE ? \ H_PGD_INDEX_SIZE : RADIX_PGD_INDEX_SIZE)) diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h index d090d9612348..686001eda936 100644 --- a/arch/powerpc/include/asm/book3s/64/radix.h +++ b/arch/powerpc/include/asm/book3s/64/radix.h @@ -35,6 +35,11 @@ #define RADIX_PMD_SHIFT (PAGE_SHIFT + RADIX_PTE_INDEX_SIZE) #define RADIX_PUD_SHIFT (RADIX_PMD_SHIFT + RADIX_PMD_INDEX_SIZE) #define RADIX_PGD_SHIFT (RADIX_PUD_SHIFT + RADIX_PUD_INDEX_SIZE) + +#define R_PTRS_PER_PTE (1 << RADIX_PTE_INDEX_SIZE) +#define R_PTRS_PER_PMD (1 << RADIX_PMD_INDEX_SIZE) +#define R_PTRS_PER_PUD (1 << RADIX_PUD_INDEX_SIZE) + /* * Size of EA range mapped by our pagetables. */ @@ -68,11 +73,11 @@ * * * 3rd quadrant expanded: - * +------------------------------+ - * | | + * +------------------------------+ Highest address (0xc010000000000000) + * +------------------------------+ KASAN shadow end (0xc00fc00000000000) * | | * | | - * +------------------------------+ Kernel vmemmap end (0xc010000000000000) + * +------------------------------+ Kernel vmemmap end/shadow start (0xc00e000000000000) * | | * | 512TB | * | | @@ -91,6 +96,7 @@ * +------------------------------+ Kernel linear (0xc.....) */ +/* For the sizes of the shadow area, see kasan.h */ /* * If we store section details in page->flags we can't increase the MAX_PHYSMEM_BITS diff --git a/arch/powerpc/include/asm/book3s/64/slice.h b/arch/powerpc/include/asm/book3s/64/slice.h index f0d3194ba41b..5fbe18544cbd 100644 --- a/arch/powerpc/include/asm/book3s/64/slice.h +++ b/arch/powerpc/include/asm/book3s/64/slice.h @@ -2,6 +2,16 @@ #ifndef _ASM_POWERPC_BOOK3S_64_SLICE_H #define _ASM_POWERPC_BOOK3S_64_SLICE_H +#ifndef __ASSEMBLY__ + +#ifdef CONFIG_PPC_64S_HASH_MMU +#ifdef CONFIG_HUGETLB_PAGE +#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA +#endif +#define HAVE_ARCH_UNMAPPED_AREA +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN +#endif + #define SLICE_LOW_SHIFT 28 #define SLICE_LOW_TOP (0x100000000ul) #define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT) @@ -13,4 +23,20 @@ #define SLB_ADDR_LIMIT_DEFAULT DEFAULT_MAP_WINDOW_USER64 +struct mm_struct; + +unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, + unsigned long flags, unsigned int psize, + int topdown); + +unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr); + +void slice_set_range_psize(struct mm_struct *mm, unsigned long start, + unsigned long len, unsigned int psize); + +void slice_init_new_context_exec(struct mm_struct *mm); +void slice_setup_new_exec(void); + +#endif /* __ASSEMBLY__ */ + #endif /* _ASM_POWERPC_BOOK3S_64_SLICE_H */ diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h index ab3832b93f0a..4b573a3b7e17 100644 --- a/arch/powerpc/include/asm/checksum.h +++ b/arch/powerpc/include/asm/checksum.h @@ -38,14 +38,15 @@ extern __wsum csum_and_copy_to_user(const void *src, void __user *dst, */ static inline __sum16 csum_fold(__wsum sum) { - unsigned int tmp; - - /* swap the two 16-bit halves of sum */ - __asm__("rlwinm %0,%1,16,0,31" : "=r" (tmp) : "r" (sum)); - /* if there is a carry from adding the two 16-bit halves, - it will carry from the lower half into the upper half, - giving us the correct sum in the upper half. */ - return (__force __sum16)(~((__force u32)sum + tmp) >> 16); + u32 tmp = (__force u32)sum; + + /* + * swap the two 16-bit halves of sum + * if there is a carry from adding the two 16-bit halves, + * it will carry from the lower half into the upper half, + * giving us the correct sum in the upper half. + */ + return (__force __sum16)(~(tmp + rol32(tmp, 16)) >> 16); } static inline u32 from64to32(u64 x) @@ -95,16 +96,15 @@ static __always_inline __wsum csum_add(__wsum csum, __wsum addend) { #ifdef __powerpc64__ u64 res = (__force u64)csum; -#endif + + res += (__force u64)addend; + return (__force __wsum)((u32)res + (res >> 32)); +#else if (__builtin_constant_p(csum) && csum == 0) return addend; if (__builtin_constant_p(addend) && addend == 0) return csum; -#ifdef __powerpc64__ - res += (__force u64)addend; - return (__force __wsum)((u32)res + (res >> 32)); -#else asm("addc %0,%0,%1;" "addze %0,%0;" : "+r" (csum) : "r" (addend) : "xer"); diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h index 409483b2d0ce..1c6316ec4b74 100644 --- a/arch/powerpc/include/asm/code-patching.h +++ b/arch/powerpc/include/asm/code-patching.h @@ -22,10 +22,55 @@ #define BRANCH_SET_LINK 0x1 #define BRANCH_ABSOLUTE 0x2 -bool is_offset_in_branch_range(long offset); -bool is_offset_in_cond_branch_range(long offset); -int create_branch(ppc_inst_t *instr, const u32 *addr, - unsigned long target, int flags); +DECLARE_STATIC_KEY_FALSE(init_mem_is_free); + +/* + * Powerpc branch instruction is : + * + * 0 6 30 31 + * +---------+----------------+---+---+ + * | opcode | LI |AA |LK | + * +---------+----------------+---+---+ + * Where AA = 0 and LK = 0 + * + * LI is a signed 24 bits integer. The real branch offset is computed + * by: imm32 = SignExtend(LI:'0b00', 32); + * + * So the maximum forward branch should be: + * (0x007fffff << 2) = 0x01fffffc = 0x1fffffc + * The maximum backward branch should be: + * (0xff800000 << 2) = 0xfe000000 = -0x2000000 + */ +static inline bool is_offset_in_branch_range(long offset) +{ + return (offset >= -0x2000000 && offset <= 0x1fffffc && !(offset & 0x3)); +} + +static inline bool is_offset_in_cond_branch_range(long offset) +{ + return offset >= -0x8000 && offset <= 0x7fff && !(offset & 0x3); +} + +static inline int create_branch(ppc_inst_t *instr, const u32 *addr, + unsigned long target, int flags) +{ + long offset; + + *instr = ppc_inst(0); + offset = target; + if (! (flags & BRANCH_ABSOLUTE)) + offset = offset - (unsigned long)addr; + + /* Check we can represent the target in the instruction format */ + if (!is_offset_in_branch_range(offset)) + return 1; + + /* Mask out the flags and target, so they don't step on each other. */ + *instr = ppc_inst(0x48000000 | (flags & 0x3) | (offset & 0x03FFFFFC)); + + return 0; +} + int create_cond_branch(ppc_inst_t *instr, const u32 *addr, unsigned long target, int flags); int patch_branch(u32 *addr, unsigned long target, int flags); @@ -87,7 +132,7 @@ bool is_conditional_branch(ppc_inst_t instr); static inline unsigned long ppc_function_entry(void *func) { -#ifdef PPC64_ELF_ABI_v2 +#ifdef CONFIG_PPC64_ELF_ABI_V2 u32 *insn = func; /* @@ -112,7 +157,7 @@ static inline unsigned long ppc_function_entry(void *func) return (unsigned long)(insn + 2); else return (unsigned long)func; -#elif defined(PPC64_ELF_ABI_v1) +#elif defined(CONFIG_PPC64_ELF_ABI_V1) /* * On PPC64 ABIv1 the function pointer actually points to the * function's descriptor. The first entry in the descriptor is the @@ -126,7 +171,7 @@ static inline unsigned long ppc_function_entry(void *func) static inline unsigned long ppc_global_function_entry(void *func) { -#ifdef PPC64_ELF_ABI_v2 +#ifdef CONFIG_PPC64_ELF_ABI_V2 /* PPC64 ABIv2 the global entry point is at the address */ return (unsigned long)func; #else @@ -143,7 +188,7 @@ static inline unsigned long ppc_global_function_entry(void *func) static inline unsigned long ppc_kallsyms_lookup_name(const char *name) { unsigned long addr; -#ifdef PPC64_ELF_ABI_v1 +#ifdef CONFIG_PPC64_ELF_ABI_V1 /* check for dot variant */ char dot_name[1 + KSYM_NAME_LEN]; bool dot_appended = false; @@ -164,7 +209,7 @@ static inline unsigned long ppc_kallsyms_lookup_name(const char *name) if (!addr && dot_appended) /* Let's try the original non-dot symbol lookup */ addr = kallsyms_lookup_name(name); -#elif defined(PPC64_ELF_ABI_v2) +#elif defined(CONFIG_PPC64_ELF_ABI_V2) addr = kallsyms_lookup_name(name); if (addr) addr = ppc_function_entry((void *)addr); @@ -174,14 +219,13 @@ static inline unsigned long ppc_kallsyms_lookup_name(const char *name) return addr; } -#ifdef CONFIG_PPC64 /* * Some instruction encodings commonly used in dynamic ftracing * and function live patching. */ /* This must match the definition of STK_GOT in <asm/ppc_asm.h> */ -#ifdef PPC64_ELF_ABI_v2 +#ifdef CONFIG_PPC64_ELF_ABI_V2 #define R2_STACK_OFFSET 24 #else #define R2_STACK_OFFSET 40 @@ -191,6 +235,5 @@ static inline unsigned long ppc_kallsyms_lookup_name(const char *name) /* usually preceded by a mflr r0 */ #define PPC_INST_STD_LR PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF) -#endif /* CONFIG_PPC64 */ #endif /* _ASM_POWERPC_CODE_PATCHING_H */ diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index e85c849214a2..549eb6dd146f 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -440,6 +440,10 @@ static inline void cpu_feature_keys_init(void) { } #define CPU_FTRS_POWER9_DD2_2 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1 | \ CPU_FTR_P9_TM_HV_ASSIST | \ CPU_FTR_P9_TM_XER_SO_BUG) +#define CPU_FTRS_POWER9_DD2_3 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1 | \ + CPU_FTR_P9_TM_HV_ASSIST | \ + CPU_FTR_P9_TM_XER_SO_BUG | \ + CPU_FTR_DAWR) #define CPU_FTRS_POWER10 (CPU_FTR_LWSYNC | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\ CPU_FTR_MMCRA | CPU_FTR_SMT | \ @@ -469,14 +473,16 @@ static inline void cpu_feature_keys_init(void) { } #define CPU_FTRS_POSSIBLE \ (CPU_FTRS_POWER7 | CPU_FTRS_POWER8E | CPU_FTRS_POWER8 | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_VSX_COMP | CPU_FTRS_POWER9 | \ - CPU_FTRS_POWER9_DD2_1 | CPU_FTRS_POWER9_DD2_2 | CPU_FTRS_POWER10) + CPU_FTRS_POWER9_DD2_1 | CPU_FTRS_POWER9_DD2_2 | \ + CPU_FTRS_POWER9_DD2_3 | CPU_FTRS_POWER10) #else #define CPU_FTRS_POSSIBLE \ (CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | \ CPU_FTRS_POWER6 | CPU_FTRS_POWER7 | CPU_FTRS_POWER8E | \ CPU_FTRS_POWER8 | CPU_FTRS_CELL | CPU_FTRS_PA6T | \ CPU_FTR_VSX_COMP | CPU_FTR_ALTIVEC_COMP | CPU_FTRS_POWER9 | \ - CPU_FTRS_POWER9_DD2_1 | CPU_FTRS_POWER9_DD2_2 | CPU_FTRS_POWER10) + CPU_FTRS_POWER9_DD2_1 | CPU_FTRS_POWER9_DD2_2 | \ + CPU_FTRS_POWER9_DD2_3 | CPU_FTRS_POWER10) #endif /* CONFIG_CPU_LITTLE_ENDIAN */ #endif #else @@ -541,14 +547,16 @@ enum { #define CPU_FTRS_ALWAYS \ (CPU_FTRS_POSSIBLE & ~CPU_FTR_HVMODE & CPU_FTRS_POWER7 & \ CPU_FTRS_POWER8E & CPU_FTRS_POWER8 & CPU_FTRS_POWER9 & \ - CPU_FTRS_POWER9_DD2_1 & CPU_FTRS_DT_CPU_BASE) + CPU_FTRS_POWER9_DD2_1 & CPU_FTRS_POWER9_DD2_2 & \ + CPU_FTRS_POWER10 & CPU_FTRS_DT_CPU_BASE) #else #define CPU_FTRS_ALWAYS \ (CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & \ CPU_FTRS_POWER6 & CPU_FTRS_POWER7 & CPU_FTRS_CELL & \ CPU_FTRS_PA6T & CPU_FTRS_POWER8 & CPU_FTRS_POWER8E & \ ~CPU_FTR_HVMODE & CPU_FTRS_POSSIBLE & CPU_FTRS_POWER9 & \ - CPU_FTRS_POWER9_DD2_1 & CPU_FTRS_DT_CPU_BASE) + CPU_FTRS_POWER9_DD2_1 & CPU_FTRS_POWER9_DD2_2 & \ + CPU_FTRS_POWER10 & CPU_FTRS_DT_CPU_BASE) #endif /* CONFIG_CPU_LITTLE_ENDIAN */ #endif #else diff --git a/arch/powerpc/include/asm/drmem.h b/arch/powerpc/include/asm/drmem.h index 4265d5e95c2c..13bf6dee8e2d 100644 --- a/arch/powerpc/include/asm/drmem.h +++ b/arch/powerpc/include/asm/drmem.h @@ -23,6 +23,9 @@ struct drmem_lmb_info { u64 lmb_size; }; +struct device_node; +struct property; + extern struct drmem_lmb_info *drmem_info; static inline struct drmem_lmb *drmem_lmb_next(struct drmem_lmb *lmb, diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h index bd513fd49be9..514dd056c2c8 100644 --- a/arch/powerpc/include/asm/eeh.h +++ b/arch/powerpc/include/asm/eeh.h @@ -333,8 +333,6 @@ static inline bool eeh_enabled(void) static inline void eeh_show_enabled(void) { } -static inline void eeh_dev_phb_init_dynamic(struct pci_controller *phb) { } - static inline int eeh_check_failure(const volatile void __iomem *token) { return 0; @@ -354,11 +352,7 @@ static inline int eeh_phb_pe_create(struct pci_controller *phb) { return 0; } #endif /* CONFIG_EEH */ #if defined(CONFIG_PPC_PSERIES) && defined(CONFIG_EEH) -void pseries_eeh_init_edev(struct pci_dn *pdn); void pseries_eeh_init_edev_recursive(struct pci_dn *pdn); -#else -static inline void pseries_eeh_add_device_early(struct pci_dn *pdn) { } -static inline void pseries_eeh_add_device_tree_early(struct pci_dn *pdn) { } #endif #ifdef CONFIG_PPC64 diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h index 971589a21bc0..79f1c480b5eb 100644 --- a/arch/powerpc/include/asm/elf.h +++ b/arch/powerpc/include/asm/elf.h @@ -160,7 +160,7 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, * even if DLINFO_ARCH_ITEMS goes to zero or is undefined. * update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ -#define ARCH_DLINFO \ +#define COMMON_ARCH_DLINFO \ do { \ /* Handle glibc compatibility. */ \ NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \ @@ -173,6 +173,18 @@ do { \ ARCH_DLINFO_CACHE_GEOMETRY; \ } while (0) +#define ARCH_DLINFO \ +do { \ + COMMON_ARCH_DLINFO; \ + NEW_AUX_ENT(AT_MINSIGSTKSZ, get_min_sigframe_size()); \ +} while (0) + +#define COMPAT_ARCH_DLINFO \ +do { \ + COMMON_ARCH_DLINFO; \ + NEW_AUX_ENT(AT_MINSIGSTKSZ, get_min_sigframe_size_compat()); \ +} while (0) + /* Relocate the kernel image to @final_address */ void relocate(unsigned long final_address); diff --git a/arch/powerpc/include/asm/fadump-internal.h b/arch/powerpc/include/asm/fadump-internal.h index 81bcb9abb371..27f9e11eda28 100644 --- a/arch/powerpc/include/asm/fadump-internal.h +++ b/arch/powerpc/include/asm/fadump-internal.h @@ -50,7 +50,7 @@ struct fadump_crash_info_header { u64 elfcorehdr_addr; u32 crashing_cpu; struct pt_regs regs; - struct cpumask online_mask; + struct cpumask cpu_mask; }; struct fadump_memory_range { diff --git a/arch/powerpc/include/asm/fsl_85xx_cache_sram.h b/arch/powerpc/include/asm/fsl_85xx_cache_sram.h deleted file mode 100644 index 0235a0447baa..000000000000 --- a/arch/powerpc/include/asm/fsl_85xx_cache_sram.h +++ /dev/null @@ -1,35 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Copyright 2009 Freescale Semiconductor, Inc. - * - * Cache SRAM handling for QorIQ platform - * - * Author: Vivek Mahajan <vivek.mahajan@freescale.com> - - * This file is derived from the original work done - * by Sylvain Munaut for the Bestcomm SRAM allocator. - */ - -#ifndef __ASM_POWERPC_FSL_85XX_CACHE_SRAM_H__ -#define __ASM_POWERPC_FSL_85XX_CACHE_SRAM_H__ - -#include <asm/rheap.h> -#include <linux/spinlock.h> - -/* - * Cache-SRAM - */ - -struct mpc85xx_cache_sram { - phys_addr_t base_phys; - void *base_virt; - unsigned int size; - rh_info_t *rh; - spinlock_t lock; -}; - -extern void mpc85xx_cache_sram_free(void *ptr); -extern void *mpc85xx_cache_sram_alloc(unsigned int size, - phys_addr_t *phys, unsigned int align); - -#endif /* __AMS_POWERPC_FSL_85XX_CACHE_SRAM_H__ */ diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h index d83758acd1c7..3cee7115441b 100644 --- a/arch/powerpc/include/asm/ftrace.h +++ b/arch/powerpc/include/asm/ftrace.h @@ -64,7 +64,7 @@ void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, * those. */ #define ARCH_HAS_SYSCALL_MATCH_SYM_NAME -#ifdef PPC64_ELF_ABI_v1 +#ifdef CONFIG_PPC64_ELF_ABI_V1 static inline bool arch_syscall_match_sym_name(const char *sym, const char *name) { /* We need to skip past the initial dot, and the __se_sys alias */ @@ -83,10 +83,10 @@ static inline bool arch_syscall_match_sym_name(const char *sym, const char *name (!strncmp(sym, "ppc32_", 6) && !strcmp(sym + 6, name + 4)) || (!strncmp(sym, "ppc64_", 6) && !strcmp(sym + 6, name + 4)); } -#endif /* PPC64_ELF_ABI_v1 */ +#endif /* CONFIG_PPC64_ELF_ABI_V1 */ #endif /* CONFIG_FTRACE_SYSCALLS */ -#ifdef CONFIG_PPC64 +#if defined(CONFIG_PPC64) && defined(CONFIG_FUNCTION_TRACER) #include <asm/paca.h> static inline void this_cpu_disable_ftrace(void) @@ -110,11 +110,13 @@ static inline u8 this_cpu_get_ftrace_enabled(void) return get_paca()->ftrace_enabled; } +void ftrace_free_init_tramp(void); #else /* CONFIG_PPC64 */ static inline void this_cpu_disable_ftrace(void) { } static inline void this_cpu_enable_ftrace(void) { } static inline void this_cpu_set_ftrace_enabled(u8 ftrace_enabled) { } static inline u8 this_cpu_get_ftrace_enabled(void) { return 1; } +static inline void ftrace_free_init_tramp(void) { } #endif /* CONFIG_PPC64 */ #endif /* !__ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index 8a5674fd120d..32ce0fb7548f 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -24,7 +24,7 @@ static inline int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, unsigned long len) { - if (IS_ENABLED(CONFIG_PPC_MM_SLICES) && !radix_enabled()) + if (IS_ENABLED(CONFIG_PPC_64S_HASH_MMU) && !radix_enabled()) return slice_is_hugepage_only_range(mm, addr, len); return 0; } diff --git a/arch/powerpc/include/asm/inst.h b/arch/powerpc/include/asm/inst.h index 80b6d74146c6..b49aae9f6f27 100644 --- a/arch/powerpc/include/asm/inst.h +++ b/arch/powerpc/include/asm/inst.h @@ -158,13 +158,10 @@ static inline char *__ppc_inst_as_str(char str[PPC_INST_STR_LEN], ppc_inst_t x) __str; \ }) -static inline int copy_inst_from_kernel_nofault(ppc_inst_t *inst, u32 *src) +static inline int __copy_inst_from_kernel_nofault(ppc_inst_t *inst, u32 *src) { unsigned int val, suffix; - if (unlikely(!is_kernel_addr((unsigned long)src))) - return -ERANGE; - /* See https://github.com/ClangBuiltLinux/linux/issues/1521 */ #if defined(CONFIG_CC_IS_CLANG) && CONFIG_CLANG_VERSION < 140000 val = suffix = 0; @@ -181,4 +178,12 @@ Efault: return -EFAULT; } +static inline int copy_inst_from_kernel_nofault(ppc_inst_t *inst, u32 *src) +{ + if (unlikely(!is_kernel_addr((unsigned long)src))) + return -ERANGE; + + return __copy_inst_from_kernel_nofault(inst, src); +} + #endif /* _ASM_POWERPC_INST_H */ diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h index f964ef5c57d8..b14f54d789d2 100644 --- a/arch/powerpc/include/asm/interrupt.h +++ b/arch/powerpc/include/asm/interrupt.h @@ -324,22 +324,46 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte } #endif + /* If data relocations are enabled, it's safe to use nmi_enter() */ + if (mfmsr() & MSR_DR) { + nmi_enter(); + return; + } + /* - * Do not use nmi_enter() for pseries hash guest taking a real-mode + * But do not use nmi_enter() for pseries hash guest taking a real-mode * NMI because not everything it touches is within the RMA limit. */ - if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) || - !firmware_has_feature(FW_FEATURE_LPAR) || - radix_enabled() || (mfmsr() & MSR_DR)) - nmi_enter(); + if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && + firmware_has_feature(FW_FEATURE_LPAR) && + !radix_enabled()) + return; + + /* + * Likewise, don't use it if we have some form of instrumentation (like + * KASAN shadow) that is not safe to access in real mode (even on radix) + */ + if (IS_ENABLED(CONFIG_KASAN)) + return; + + /* Otherwise, it should be safe to call it */ + nmi_enter(); } static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state) { - if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) || - !firmware_has_feature(FW_FEATURE_LPAR) || - radix_enabled() || (mfmsr() & MSR_DR)) + if (mfmsr() & MSR_DR) { + // nmi_exit if relocations are on nmi_exit(); + } else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && + firmware_has_feature(FW_FEATURE_LPAR) && + !radix_enabled()) { + // no nmi_exit for a pseries hash guest taking a real mode exception + } else if (IS_ENABLED(CONFIG_KASAN)) { + // no nmi_exit for KASAN in real mode + } else { + nmi_exit(); + } /* * nmi does not call nap_adjust_return because nmi should not create @@ -407,7 +431,8 @@ static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct inter * Specific handlers may have additional restrictions. */ #define DEFINE_INTERRUPT_HANDLER_RAW(func) \ -static __always_inline long ____##func(struct pt_regs *regs); \ +static __always_inline __no_sanitize_address __no_kcsan long \ +____##func(struct pt_regs *regs); \ \ interrupt_handler long func(struct pt_regs *regs) \ { \ @@ -421,7 +446,8 @@ interrupt_handler long func(struct pt_regs *regs) \ } \ NOKPROBE_SYMBOL(func); \ \ -static __always_inline long ____##func(struct pt_regs *regs) +static __always_inline __no_sanitize_address __no_kcsan long \ +____##func(struct pt_regs *regs) /** * DECLARE_INTERRUPT_HANDLER - Declare synchronous interrupt handler function @@ -541,7 +567,8 @@ static __always_inline void ____##func(struct pt_regs *regs) * body with a pair of curly brackets. */ #define DEFINE_INTERRUPT_HANDLER_NMI(func) \ -static __always_inline long ____##func(struct pt_regs *regs); \ +static __always_inline __no_sanitize_address __no_kcsan long \ +____##func(struct pt_regs *regs); \ \ interrupt_handler long func(struct pt_regs *regs) \ { \ @@ -558,7 +585,8 @@ interrupt_handler long func(struct pt_regs *regs) \ } \ NOKPROBE_SYMBOL(func); \ \ -static __always_inline long ____##func(struct pt_regs *regs) +static __always_inline __no_sanitize_address __no_kcsan long \ +____##func(struct pt_regs *regs) /* Interrupt handlers */ diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index fee979d3a1aa..c5a5f7c9b231 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h @@ -38,8 +38,6 @@ extern struct pci_dev *isa_bridge_pcidev; #define SIO_CONFIG_RA 0x398 #define SIO_CONFIG_RD 0x399 -#define SLOW_DOWN_IO - /* 32 bits uses slightly different variables for the various IO * bases. Most of this file only uses _IO_BASE though which we * define properly based on the platform diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index d7912b66c874..7e29c73e3dd4 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h @@ -51,13 +51,11 @@ struct iommu_table_ops { int (*xchg_no_kill)(struct iommu_table *tbl, long index, unsigned long *hpa, - enum dma_data_direction *direction, - bool realmode); + enum dma_data_direction *direction); void (*tce_kill)(struct iommu_table *tbl, unsigned long index, - unsigned long pages, - bool realmode); + unsigned long pages); __be64 *(*useraddrptr)(struct iommu_table *tbl, long index, bool alloc); #endif diff --git a/arch/powerpc/include/asm/kasan.h b/arch/powerpc/include/asm/kasan.h index 3c478e5ef24c..a6be4025cba2 100644 --- a/arch/powerpc/include/asm/kasan.h +++ b/arch/powerpc/include/asm/kasan.h @@ -30,9 +30,31 @@ #define KASAN_SHADOW_OFFSET ASM_CONST(CONFIG_KASAN_SHADOW_OFFSET) +#ifdef CONFIG_PPC32 #define KASAN_SHADOW_END (-(-KASAN_SHADOW_START >> KASAN_SHADOW_SCALE_SHIFT)) +#elif defined(CONFIG_PPC_BOOK3S_64) +/* + * The shadow ends before the highest accessible address + * because we don't need a shadow for the shadow. Instead: + * c00e000000000000 << 3 + a80e000000000000 = c00fc00000000000 + */ +#define KASAN_SHADOW_END 0xc00fc00000000000UL +#endif #ifdef CONFIG_KASAN +#ifdef CONFIG_PPC_BOOK3S_64 +DECLARE_STATIC_KEY_FALSE(powerpc_kasan_enabled_key); + +static __always_inline bool kasan_arch_is_ready(void) +{ + if (static_branch_likely(&powerpc_kasan_enabled_key)) + return true; + return false; +} + +#define kasan_arch_is_ready kasan_arch_is_ready +#endif + void kasan_early_init(void); void kasan_mmu_init(void); void kasan_init(void); diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h index fb2237809d63..d751ddd08110 100644 --- a/arch/powerpc/include/asm/kup.h +++ b/arch/powerpc/include/asm/kup.h @@ -52,7 +52,6 @@ __bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) return false; } -static inline void __kuap_assert_locked(void) { } static inline void __kuap_lock(void) { } static inline void __kuap_save_and_lock(struct pt_regs *regs) { } static inline void kuap_user_restore(struct pt_regs *regs) { } diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index b6d31bff5209..c8882d9b86c2 100644 --- a/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h @@ -14,9 +14,6 @@ #define XICS_MFRR 0xc #define XICS_IPI 2 /* interrupt source # for IPIs */ -/* LPIDs we support with this build -- runtime limit may be lower */ -#define KVMPPC_NR_LPIDS (LPID_RSVD + 1) - /* Maximum number of threads per physical core */ #define MAX_SMT_THREADS 8 diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index faf301d0dec0..2909a88acd16 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -36,7 +36,12 @@ #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE #include <asm/kvm_book3s_asm.h> /* for MAX_SMT_THREADS */ #define KVM_MAX_VCPU_IDS (MAX_SMT_THREADS * KVM_MAX_VCORES) -#define KVM_MAX_NESTED_GUESTS KVMPPC_NR_LPIDS + +/* + * Limit the nested partition table to 4096 entries (because that's what + * hardware supports). Both guest and host use this value. + */ +#define KVM_MAX_NESTED_GUESTS_SHIFT 12 #else #define KVM_MAX_VCPU_IDS KVM_MAX_VCPUS @@ -327,8 +332,7 @@ struct kvm_arch { struct list_head uvmem_pfns; struct mutex mmu_setup_lock; /* nests inside vcpu mutexes */ u64 l1_ptcr; - int max_nested_lpid; - struct kvm_nested_guest *nested_guests[KVM_MAX_NESTED_GUESTS]; + struct idr kvm_nested_guest_idr; /* This array can grow quite large, keep it at the end */ struct kvmppc_vcore *vcores[KVM_MAX_VCORES]; #endif diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 838d4cb460b7..9f625af3b65b 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -177,8 +177,6 @@ extern void kvmppc_setup_partition_table(struct kvm *kvm); extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, struct kvm_create_spapr_tce_64 *args); -extern struct kvmppc_spapr_tce_table *kvmppc_find_table( - struct kvm *kvm, unsigned long liobn); #define kvmppc_ioba_validate(stt, ioba, npages) \ (iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \ (stt)->size, (ioba), (npages)) ? \ @@ -685,7 +683,7 @@ extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, bool line_status); extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu); extern void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu); -extern void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu); +extern bool kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu); static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu) { @@ -723,7 +721,7 @@ static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 ir int level, bool line_status) { return -ENODEV; } static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { } static inline void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu) { } -static inline void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu) { } +static inline bool kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu) { return true; } static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu) { return 0; } @@ -789,13 +787,6 @@ long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags, unsigned long dest, unsigned long src); long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr, unsigned long slb_v, unsigned int status, bool data); -unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu); -unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu); -unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server); -int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, - unsigned long mfrr); -int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr); -int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr); void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu); /* @@ -877,7 +868,6 @@ int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu, struct kvm_dirty_tlb *cfg); long kvmppc_alloc_lpid(void); -void kvmppc_claim_lpid(long lpid); void kvmppc_free_lpid(long lpid); void kvmppc_init_lpid(unsigned long nr_lpids); diff --git a/arch/powerpc/include/asm/linkage.h b/arch/powerpc/include/asm/linkage.h index 1f00d2891d69..b71b9582e754 100644 --- a/arch/powerpc/include/asm/linkage.h +++ b/arch/powerpc/include/asm/linkage.h @@ -4,7 +4,7 @@ #include <asm/types.h> -#ifdef PPC64_ELF_ABI_v1 +#ifdef CONFIG_PPC64_ELF_ABI_V1 #define cond_syscall(x) \ asm ("\t.weak " #x "\n\t.set " #x ", sys_ni_syscall\n" \ "\t.weak ." #x "\n\t.set ." #x ", .sys_ni_syscall\n") diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index b8527a74bd4d..3f25bd3e14eb 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -34,15 +34,10 @@ extern void mm_iommu_init(struct mm_struct *mm); extern void mm_iommu_cleanup(struct mm_struct *mm); extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm, unsigned long ua, unsigned long size); -extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm( - struct mm_struct *mm, unsigned long ua, unsigned long size); extern struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries); extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, unsigned long ua, unsigned int pageshift, unsigned long *hpa); -extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, - unsigned long ua, unsigned int pageshift, unsigned long *hpa); -extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua); extern bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa, unsigned int pageshift, unsigned long *size); extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem); diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h index 857d9ff24295..09e2ffd360bb 100644 --- a/arch/powerpc/include/asm/module.h +++ b/arch/powerpc/include/asm/module.h @@ -41,10 +41,8 @@ struct mod_arch_specific { #ifdef CONFIG_DYNAMIC_FTRACE unsigned long tramp; -#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS unsigned long tramp_regs; #endif -#endif /* List of BUG addresses, source line numbers and filenames */ struct list_head bug_list; diff --git a/arch/powerpc/include/asm/nohash/tlbflush.h b/arch/powerpc/include/asm/nohash/tlbflush.h index c08d25e3e626..698935d4f72d 100644 --- a/arch/powerpc/include/asm/nohash/tlbflush.h +++ b/arch/powerpc/include/asm/nohash/tlbflush.h @@ -30,7 +30,6 @@ struct mm_struct; extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); -extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); #ifdef CONFIG_PPC_8xx static inline void local_flush_tlb_mm(struct mm_struct *mm) @@ -45,7 +44,18 @@ static inline void local_flush_tlb_page(struct vm_area_struct *vma, unsigned lon { asm volatile ("tlbie %0; sync" : : "r" (vmaddr) : "memory"); } + +static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + start &= PAGE_MASK; + + if (end - start <= PAGE_SIZE) + asm volatile ("tlbie %0; sync" : : "r" (start) : "memory"); + else + asm volatile ("sync; tlbia; isync" : : : "memory"); +} #else +extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); extern void local_flush_tlb_mm(struct mm_struct *mm); extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 8330968ca346..4d7aaab82702 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -12,6 +12,7 @@ #ifdef CONFIG_PPC64 +#include <linux/cache.h> #include <linux/string.h> #include <asm/types.h> #include <asm/lppaca.h> @@ -152,16 +153,9 @@ struct paca_struct { struct tlb_core_data tcd; #endif /* CONFIG_PPC_BOOK3E */ -#ifdef CONFIG_PPC_BOOK3S #ifdef CONFIG_PPC_64S_HASH_MMU -#ifdef CONFIG_PPC_MM_SLICES unsigned char mm_ctx_low_slices_psize[BITS_PER_LONG / BITS_PER_BYTE]; unsigned char mm_ctx_high_slices_psize[SLICE_ARRAY_SIZE]; -#else - u16 mm_ctx_user_psize; - u16 mm_ctx_sllp; -#endif -#endif #endif /* diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index f2c5c26869f1..e5f75c70eda8 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -216,6 +216,9 @@ static inline bool pfn_valid(unsigned long pfn) #define __pa(x) ((phys_addr_t)(unsigned long)(x) - VIRT_PHYS_OFFSET) #else #ifdef CONFIG_PPC64 + +#define VIRTUAL_WARN_ON(x) WARN_ON(IS_ENABLED(CONFIG_DEBUG_VIRTUAL) && (x)) + /* * gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET * with -mcmodel=medium, so we use & and | instead of - and + on 64-bit. @@ -223,13 +226,13 @@ static inline bool pfn_valid(unsigned long pfn) */ #define __va(x) \ ({ \ - VIRTUAL_BUG_ON((unsigned long)(x) >= PAGE_OFFSET); \ + VIRTUAL_WARN_ON((unsigned long)(x) >= PAGE_OFFSET); \ (void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET); \ }) #define __pa(x) \ ({ \ - VIRTUAL_BUG_ON((unsigned long)(x) < PAGE_OFFSET); \ + VIRTUAL_WARN_ON((unsigned long)(x) < PAGE_OFFSET); \ (unsigned long)(x) & 0x0fffffffffffffffUL; \ }) @@ -333,6 +336,5 @@ static inline unsigned long kaslr_offset(void) #include <asm-generic/memory_model.h> #endif /* __ASSEMBLY__ */ -#include <asm/slice.h> #endif /* _ASM_POWERPC_PAGE_H */ diff --git a/arch/powerpc/include/asm/parport.h b/arch/powerpc/include/asm/parport.h index 8abfb8f7c33d..42cc321ed754 100644 --- a/arch/powerpc/include/asm/parport.h +++ b/arch/powerpc/include/asm/parport.h @@ -11,7 +11,7 @@ #define _ASM_POWERPC_PARPORT_H #ifdef __KERNEL__ -#include <asm/prom.h> +#include <linux/of_irq.h> static int parport_pc_find_nonpci_ports (int autoirq, int autodma) { diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h index 90f488fa4c17..c85f901227c9 100644 --- a/arch/powerpc/include/asm/pci-bridge.h +++ b/arch/powerpc/include/asm/pci-bridge.h @@ -170,10 +170,10 @@ static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus) return bus->sysdata; } -#ifndef CONFIG_PPC64 - extern int pci_device_from_OF_node(struct device_node *node, u8 *bus, u8 *devfn); +#ifndef CONFIG_PPC64 + extern void pci_create_OF_bus_map(void); #else /* CONFIG_PPC64 */ @@ -235,16 +235,6 @@ struct pci_dn *add_sriov_vf_pdns(struct pci_dev *pdev); void remove_sriov_vf_pdns(struct pci_dev *pdev); #endif -static inline int pci_device_from_OF_node(struct device_node *np, - u8 *bus, u8 *devfn) -{ - if (!PCI_DN(np)) - return -ENODEV; - *bus = PCI_DN(np)->busno; - *devfn = PCI_DN(np)->devfn; - return 0; -} - #if defined(CONFIG_EEH) static inline struct eeh_dev *pdn_to_eeh_dev(struct pci_dn *pdn) { diff --git a/arch/powerpc/include/asm/pnv-pci.h b/arch/powerpc/include/asm/pnv-pci.h index b3f480799352..8afc92860dbb 100644 --- a/arch/powerpc/include/asm/pnv-pci.h +++ b/arch/powerpc/include/asm/pnv-pci.h @@ -9,6 +9,7 @@ #include <linux/pci.h> #include <linux/pci_hotplug.h> #include <linux/irq.h> +#include <linux/of.h> #include <misc/cxl-base.h> #include <asm/opal-api.h> diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 82f1f0041c6f..89beabf5325c 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -127,8 +127,53 @@ /* opcode and xopcode for instructions */ -#define OP_TRAP 3 -#define OP_TRAP_64 2 +#define OP_PREFIX 1 +#define OP_TRAP_64 2 +#define OP_TRAP 3 +#define OP_SC 17 +#define OP_19 19 +#define OP_31 31 +#define OP_LWZ 32 +#define OP_LWZU 33 +#define OP_LBZ 34 +#define OP_LBZU 35 +#define OP_STW 36 +#define OP_STWU 37 +#define OP_STB 38 +#define OP_STBU 39 +#define OP_LHZ 40 +#define OP_LHZU 41 +#define OP_LHA 42 +#define OP_LHAU 43 +#define OP_STH 44 +#define OP_STHU 45 +#define OP_LMW 46 +#define OP_STMW 47 +#define OP_LFS 48 +#define OP_LFSU 49 +#define OP_LFD 50 +#define OP_LFDU 51 +#define OP_STFS 52 +#define OP_STFSU 53 +#define OP_STFD 54 +#define OP_STFDU 55 +#define OP_LQ 56 +#define OP_LD 58 +#define OP_STD 62 + +#define OP_19_XOP_RFID 18 +#define OP_19_XOP_RFMCI 38 +#define OP_19_XOP_RFDI 39 +#define OP_19_XOP_RFI 50 +#define OP_19_XOP_RFCI 51 +#define OP_19_XOP_RFSCV 82 +#define OP_19_XOP_HRFID 274 +#define OP_19_XOP_URFID 306 +#define OP_19_XOP_STOP 370 +#define OP_19_XOP_DOZE 402 +#define OP_19_XOP_NAP 434 +#define OP_19_XOP_SLEEP 466 +#define OP_19_XOP_RVWINKLE 498 #define OP_31_XOP_TRAP 4 #define OP_31_XOP_LDX 21 @@ -150,6 +195,8 @@ #define OP_31_XOP_LHZUX 311 #define OP_31_XOP_MSGSNDP 142 #define OP_31_XOP_MSGCLRP 174 +#define OP_31_XOP_MTMSR 146 +#define OP_31_XOP_MTMSRD 178 #define OP_31_XOP_TLBIE 306 #define OP_31_XOP_MFSPR 339 #define OP_31_XOP_LWAX 341 @@ -208,42 +255,6 @@ /* VMX Vector Store Instructions */ #define OP_31_XOP_STVX 231 -/* Prefixed Instructions */ -#define OP_PREFIX 1 - -#define OP_31 31 -#define OP_LWZ 32 -#define OP_STFS 52 -#define OP_STFSU 53 -#define OP_STFD 54 -#define OP_STFDU 55 -#define OP_LD 58 -#define OP_LWZU 33 -#define OP_LBZ 34 -#define OP_LBZU 35 -#define OP_STW 36 -#define OP_STWU 37 -#define OP_STD 62 -#define OP_STB 38 -#define OP_STBU 39 -#define OP_LHZ 40 -#define OP_LHZU 41 -#define OP_LHA 42 -#define OP_LHAU 43 -#define OP_STH 44 -#define OP_STHU 45 -#define OP_LMW 46 -#define OP_STMW 47 -#define OP_LFS 48 -#define OP_LFSU 49 -#define OP_LFD 50 -#define OP_LFDU 51 -#define OP_STFS 52 -#define OP_STFSU 53 -#define OP_STFD 54 -#define OP_STFDU 55 -#define OP_LQ 56 - /* sorted alphabetically */ #define PPC_INST_BCCTR_FLUSH 0x4c400420 #define PPC_INST_COPY 0x7c20060c @@ -285,13 +296,6 @@ #define PPC_INST_TRECHKPT 0x7c0007dd #define PPC_INST_TRECLAIM 0x7c00075d #define PPC_INST_TSR 0x7c0005dd -#define PPC_INST_LD 0xe8000000 -#define PPC_INST_STD 0xf8000000 -#define PPC_INST_ADDIS 0x3c000000 -#define PPC_INST_ADD 0x7c000214 -#define PPC_INST_DIVD 0x7c0003d2 -#define PPC_INST_BRANCH 0x48000000 -#define PPC_INST_BL 0x48000001 #define PPC_INST_BRANCH_COND 0x40800000 /* Prefixes */ @@ -352,6 +356,10 @@ #define PPC_HIGHER(v) (((v) >> 32) & 0xffff) #define PPC_HIGHEST(v) (((v) >> 48) & 0xffff) +/* LI Field */ +#define PPC_LI_MASK 0x03fffffc +#define PPC_LI(v) ((v) & PPC_LI_MASK) + /* * Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a * larx with EH set as an illegal instruction. @@ -460,10 +468,10 @@ (0x100000c7 | ___PPC_RT(vrt) | ___PPC_RA(vra) | ___PPC_RB(vrb) | __PPC_RC21) #define PPC_RAW_VCMPEQUB_RC(vrt, vra, vrb) \ (0x10000006 | ___PPC_RT(vrt) | ___PPC_RA(vra) | ___PPC_RB(vrb) | __PPC_RC21) -#define PPC_RAW_LD(r, base, i) (PPC_INST_LD | ___PPC_RT(r) | ___PPC_RA(base) | IMM_DS(i)) +#define PPC_RAW_LD(r, base, i) (0xe8000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_DS(i)) #define PPC_RAW_LWZ(r, base, i) (0x80000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i)) #define PPC_RAW_LWZX(t, a, b) (0x7c00002e | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b)) -#define PPC_RAW_STD(r, base, i) (PPC_INST_STD | ___PPC_RS(r) | ___PPC_RA(base) | IMM_DS(i)) +#define PPC_RAW_STD(r, base, i) (0xf8000000 | ___PPC_RS(r) | ___PPC_RA(base) | IMM_DS(i)) #define PPC_RAW_STDCX(s, a, b) (0x7c0001ad | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_RAW_LFSX(t, a, b) (0x7c00042e | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_RAW_STFSX(s, a, b) (0x7c00052e | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b)) @@ -474,8 +482,8 @@ #define PPC_RAW_ADDE(t, a, b) (0x7c000114 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_RAW_ADDZE(t, a) (0x7c000194 | ___PPC_RT(t) | ___PPC_RA(a)) #define PPC_RAW_ADDME(t, a) (0x7c0001d4 | ___PPC_RT(t) | ___PPC_RA(a)) -#define PPC_RAW_ADD(t, a, b) (PPC_INST_ADD | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b)) -#define PPC_RAW_ADD_DOT(t, a, b) (PPC_INST_ADD | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1) +#define PPC_RAW_ADD(t, a, b) (0x7c000214 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_ADD_DOT(t, a, b) (0x7c000214 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1) #define PPC_RAW_ADDC(t, a, b) (0x7c000014 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_RAW_ADDC_DOT(t, a, b) (0x7c000014 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1) #define PPC_RAW_NOP() PPC_RAW_ORI(0, 0, 0) @@ -571,7 +579,8 @@ #define PPC_RAW_MTSPR(spr, d) (0x7c0003a6 | ___PPC_RS(d) | __PPC_SPR(spr)) #define PPC_RAW_EIEIO() (0x7c0006ac) -#define PPC_RAW_BRANCH(addr) (PPC_INST_BRANCH | ((addr) & 0x03fffffc)) +#define PPC_RAW_BRANCH(offset) (0x48000000 | PPC_LI(offset)) +#define PPC_RAW_BL(offset) (0x48000001 | PPC_LI(offset)) /* Deal with instructions that older assemblers aren't aware of */ #define PPC_BCCTR_FLUSH stringify_in_c(.long PPC_INST_BCCTR_FLUSH) diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 4dea2d963738..83c02f5a7f2a 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -149,7 +149,7 @@ #define __STK_REG(i) (112 + ((i)-14)*8) #define STK_REG(i) __STK_REG(__REG_##i) -#ifdef PPC64_ELF_ABI_v2 +#ifdef CONFIG_PPC64_ELF_ABI_V2 #define STK_GOT 24 #define __STK_PARAM(i) (32 + ((i)-3)*8) #else @@ -158,7 +158,7 @@ #endif #define STK_PARAM(i) __STK_PARAM(__REG_##i) -#ifdef PPC64_ELF_ABI_v2 +#ifdef CONFIG_PPC64_ELF_ABI_V2 #define _GLOBAL(name) \ .align 2 ; \ diff --git a/arch/powerpc/include/asm/probes.h b/arch/powerpc/include/asm/probes.h index c5d984700d24..6f66e358aa37 100644 --- a/arch/powerpc/include/asm/probes.h +++ b/arch/powerpc/include/asm/probes.h @@ -8,6 +8,7 @@ * Copyright IBM Corporation, 2012 */ #include <linux/types.h> +#include <asm/disassemble.h> typedef u32 ppc_opcode_t; #define BREAKPOINT_INSTRUCTION 0x7fe00008 /* trap */ @@ -31,6 +32,41 @@ typedef u32 ppc_opcode_t; #define MSR_SINGLESTEP (MSR_SE) #endif +static inline bool can_single_step(u32 inst) +{ + switch (get_op(inst)) { + case OP_TRAP_64: return false; + case OP_TRAP: return false; + case OP_SC: return false; + case OP_19: + switch (get_xop(inst)) { + case OP_19_XOP_RFID: return false; + case OP_19_XOP_RFMCI: return false; + case OP_19_XOP_RFDI: return false; + case OP_19_XOP_RFI: return false; + case OP_19_XOP_RFCI: return false; + case OP_19_XOP_RFSCV: return false; + case OP_19_XOP_HRFID: return false; + case OP_19_XOP_URFID: return false; + case OP_19_XOP_STOP: return false; + case OP_19_XOP_DOZE: return false; + case OP_19_XOP_NAP: return false; + case OP_19_XOP_SLEEP: return false; + case OP_19_XOP_RVWINKLE: return false; + } + break; + case OP_31: + switch (get_xop(inst)) { + case OP_31_XOP_TRAP: return false; + case OP_31_XOP_TRAP_64: return false; + case OP_31_XOP_MTMSR: return false; + case OP_31_XOP_MTMSRD: return false; + } + break; + } + return true; +} + /* Enable single stepping for the current task */ static inline void enable_single_step(struct pt_regs *regs) { diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 39c25021030f..fdfaae194ddd 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -392,8 +392,6 @@ static inline void prefetchw(const void *x) #define spin_lock_prefetch(x) prefetchw(x) -#define HAVE_ARCH_PICK_MMAP_LAYOUT - /* asm stubs */ extern unsigned long isa300_idle_stop_noloss(unsigned long psscr_val); extern unsigned long isa300_idle_stop_mayloss(unsigned long psscr_val); diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index 42f89e2d8f04..a03403695cd4 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h @@ -120,7 +120,7 @@ struct pt_regs STACK_FRAME_OVERHEAD + KERNEL_REDZONE_SIZE) #define STACK_FRAME_MARKER 12 -#ifdef PPC64_ELF_ABI_v2 +#ifdef CONFIG_PPC64_ELF_ABI_V2 #define STACK_FRAME_MIN_SIZE 32 #else #define STACK_FRAME_MIN_SIZE STACK_FRAME_OVERHEAD diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 2835f6363228..1e8b2e04e626 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -417,7 +417,6 @@ #define FSCR_DSCR __MASK(FSCR_DSCR_LG) #define FSCR_INTR_CAUSE (ASM_CONST(0xFF) << 56) /* interrupt cause */ #define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */ -#define HFSCR_PREFIX __MASK(FSCR_PREFIX_LG) #define HFSCR_MSGP __MASK(FSCR_MSGP_LG) #define HFSCR_TAR __MASK(FSCR_TAR_LG) #define HFSCR_EBB __MASK(FSCR_EBB_LG) @@ -474,8 +473,6 @@ #ifndef SPRN_LPID #define SPRN_LPID 0x13F /* Logical Partition Identifier */ #endif -#define LPID_RSVD_POWER7 0x3ff /* Reserved LPID for partn switching */ -#define LPID_RSVD 0xfff /* Reserved LPID for partn switching */ #define SPRN_HMER 0x150 /* Hypervisor maintenance exception reg */ #define HMER_DEBUG_TRIG (1ul << (63 - 17)) /* Debug trigger */ #define SPRN_HMEER 0x151 /* Hyp maintenance exception enable reg */ diff --git a/arch/powerpc/include/asm/signal.h b/arch/powerpc/include/asm/signal.h index 99e1c6de27bc..922d43700fb4 100644 --- a/arch/powerpc/include/asm/signal.h +++ b/arch/powerpc/include/asm/signal.h @@ -9,4 +9,9 @@ struct pt_regs; void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags); +unsigned long get_min_sigframe_size_32(void); +unsigned long get_min_sigframe_size_64(void); +unsigned long get_min_sigframe_size(void); +unsigned long get_min_sigframe_size_compat(void); + #endif /* _ASM_POWERPC_SIGNAL_H */ diff --git a/arch/powerpc/include/asm/slice.h b/arch/powerpc/include/asm/slice.h deleted file mode 100644 index 0bdd9c62eca0..000000000000 --- a/arch/powerpc/include/asm/slice.h +++ /dev/null @@ -1,46 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_POWERPC_SLICE_H -#define _ASM_POWERPC_SLICE_H - -#ifdef CONFIG_PPC_BOOK3S_64 -#include <asm/book3s/64/slice.h> -#endif - -#ifndef __ASSEMBLY__ - -struct mm_struct; - -#ifdef CONFIG_PPC_MM_SLICES - -#ifdef CONFIG_HUGETLB_PAGE -#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA -#endif -#define HAVE_ARCH_UNMAPPED_AREA -#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN - -unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, - unsigned long flags, unsigned int psize, - int topdown); - -unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr); - -void slice_set_range_psize(struct mm_struct *mm, unsigned long start, - unsigned long len, unsigned int psize); - -void slice_init_new_context_exec(struct mm_struct *mm); -void slice_setup_new_exec(void); - -#else /* CONFIG_PPC_MM_SLICES */ - -static inline void slice_init_new_context_exec(struct mm_struct *mm) {} - -static inline unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr) -{ - return 0; -} - -#endif /* CONFIG_PPC_MM_SLICES */ - -#endif /* __ASSEMBLY__ */ - -#endif /* _ASM_POWERPC_SLICE_H */ diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index 60ab739a5e3b..f63505d74932 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -189,8 +189,6 @@ extern void __cpu_die(unsigned int cpu); #define smp_setup_cpu_maps() #define thread_group_shares_l2 0 #define thread_group_shares_l3 0 -static inline void inhibit_secondary_onlining(void) {} -static inline void uninhibit_secondary_onlining(void) {} static inline const struct cpumask *cpu_sibling_mask(int cpu) { return cpumask_of(cpu); diff --git a/arch/powerpc/include/asm/svm.h b/arch/powerpc/include/asm/svm.h index 85580b30aba4..a02bd54b8948 100644 --- a/arch/powerpc/include/asm/svm.h +++ b/arch/powerpc/include/asm/svm.h @@ -10,6 +10,8 @@ #ifdef CONFIG_PPC_SVM +#include <asm/reg.h> + static inline bool is_secure_guest(void) { return mfmsr() & MSR_S; diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h index 1f43ef696033..aee25e3ebf96 100644 --- a/arch/powerpc/include/asm/switch_to.h +++ b/arch/powerpc/include/asm/switch_to.h @@ -62,6 +62,15 @@ static inline void disable_kernel_altivec(void) #else static inline void save_altivec(struct task_struct *t) { } static inline void __giveup_altivec(struct task_struct *t) { } +static inline void enable_kernel_altivec(void) +{ + BUILD_BUG(); +} + +static inline void disable_kernel_altivec(void) +{ + BUILD_BUG(); +} #endif #ifdef CONFIG_VSX diff --git a/arch/powerpc/include/asm/task_size_64.h b/arch/powerpc/include/asm/task_size_64.h index 38fdf8041d12..5a709951c901 100644 --- a/arch/powerpc/include/asm/task_size_64.h +++ b/arch/powerpc/include/asm/task_size_64.h @@ -72,4 +72,12 @@ #define STACK_TOP_MAX TASK_SIZE_USER64 #define STACK_TOP (is_32bit_task() ? STACK_TOP_USER32 : STACK_TOP_USER64) +#define arch_get_mmap_base(addr, base) \ + (((addr) > DEFAULT_MAP_WINDOW) ? (base) + TASK_SIZE - DEFAULT_MAP_WINDOW : (base)) + +#define arch_get_mmap_end(addr, len, flags) \ + (((addr) > DEFAULT_MAP_WINDOW) || \ + (((flags) & MAP_FIXED) && ((addr) + (len) > DEFAULT_MAP_WINDOW)) ? TASK_SIZE : \ + DEFAULT_MAP_WINDOW) + #endif /* _ASM_POWERPC_TASK_SIZE_64_H */ diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h index 924b2157882f..1e5643a9b1f2 100644 --- a/arch/powerpc/include/asm/time.h +++ b/arch/powerpc/include/asm/time.h @@ -24,6 +24,7 @@ extern unsigned long tb_ticks_per_jiffy; extern unsigned long tb_ticks_per_usec; extern unsigned long tb_ticks_per_sec; extern struct clock_event_device decrementer_clockevent; +extern u64 decrementer_max; extern void generic_calibrate_decr(void); diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h index 36fcafb1fd6d..8a4d4f4d9749 100644 --- a/arch/powerpc/include/asm/topology.h +++ b/arch/powerpc/include/asm/topology.h @@ -111,14 +111,10 @@ static inline void unmap_cpu_from_node(unsigned long cpu) {} #endif /* CONFIG_NUMA */ #if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR) -extern int find_and_online_cpu_nid(int cpu); +void find_and_update_cpu_nid(int cpu); extern int cpu_to_coregroup_id(int cpu); #else -static inline int find_and_online_cpu_nid(int cpu) -{ - return 0; -} - +static inline void find_and_update_cpu_nid(int cpu) {} static inline int cpu_to_coregroup_id(int cpu) { #ifdef CONFIG_SMP diff --git a/arch/powerpc/include/asm/types.h b/arch/powerpc/include/asm/types.h index 84078c28c1a2..93157a661dcc 100644 --- a/arch/powerpc/include/asm/types.h +++ b/arch/powerpc/include/asm/types.h @@ -11,14 +11,6 @@ #include <uapi/asm/types.h> -#ifdef __powerpc64__ -#if defined(_CALL_ELF) && _CALL_ELF == 2 -#define PPC64_ELF_ABI_v2 1 -#else -#define PPC64_ELF_ABI_v1 1 -#endif -#endif /* __powerpc64__ */ - #ifndef __ASSEMBLY__ typedef __vector128 vector128; diff --git a/arch/powerpc/include/asm/vas.h b/arch/powerpc/include/asm/vas.h index 83afcb6c194b..c36f71e01c0f 100644 --- a/arch/powerpc/include/asm/vas.h +++ b/arch/powerpc/include/asm/vas.h @@ -126,7 +126,7 @@ static inline void vas_user_win_add_mm_context(struct vas_user_win_ref *ref) * Receive window attributes specified by the (in-kernel) owner of window. */ struct vas_rx_win_attr { - void *rx_fifo; + u64 rx_fifo; int rx_fifo_size; int wcreds_max; diff --git a/arch/powerpc/include/uapi/asm/auxvec.h b/arch/powerpc/include/uapi/asm/auxvec.h index 7af21dc0e320..aa7c16215453 100644 --- a/arch/powerpc/include/uapi/asm/auxvec.h +++ b/arch/powerpc/include/uapi/asm/auxvec.h @@ -48,6 +48,8 @@ #define AT_L3_CACHESIZE 46 #define AT_L3_CACHEGEOMETRY 47 -#define AT_VECTOR_SIZE_ARCH 14 /* entries in ARCH_DLINFO */ +#define AT_MINSIGSTKSZ 51 /* stack needed for signal delivery */ + +#define AT_VECTOR_SIZE_ARCH 15 /* entries in ARCH_DLINFO */ #endif diff --git a/arch/powerpc/include/uapi/asm/signal.h b/arch/powerpc/include/uapi/asm/signal.h index 37d41d87c45b..a5dfe84f50ab 100644 --- a/arch/powerpc/include/uapi/asm/signal.h +++ b/arch/powerpc/include/uapi/asm/signal.h @@ -62,8 +62,13 @@ typedef struct { #define SA_RESTORER 0x04000000U +#ifdef __powerpc64__ +#define MINSIGSTKSZ 8192 +#define SIGSTKSZ 32768 +#else #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 +#endif #include <asm-generic/signal-defs.h> diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 4ddd161aef32..2e2a2a9bcf43 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -33,6 +33,17 @@ KASAN_SANITIZE_early_32.o := n KASAN_SANITIZE_cputable.o := n KASAN_SANITIZE_prom_init.o := n KASAN_SANITIZE_btext.o := n +KASAN_SANITIZE_paca.o := n +KASAN_SANITIZE_setup_64.o := n +KASAN_SANITIZE_mce.o := n +KASAN_SANITIZE_mce_power.o := n + +# we have to be particularly careful in ppc64 to exclude code that +# runs with translations off, as we cannot access the shadow with +# translations off. However, ppc32 can sanitize this. +ifdef CONFIG_PPC64 +KASAN_SANITIZE_traps.o := n +endif ifdef CONFIG_KASAN CFLAGS_early_32.o += -DDISABLE_BRANCH_PROFILING @@ -68,7 +79,7 @@ obj-$(CONFIG_PPC_BOOK3S_IDLE) += idle_book3s.o procfs-y := proc_powerpc.o obj-$(CONFIG_PROC_FS) += $(procfs-y) rtaspci-$(CONFIG_PPC64)-$(CONFIG_PCI) := rtas_pci.o -obj-$(CONFIG_PPC_RTAS) += rtas.o rtas-rtc.o $(rtaspci-y-y) +obj-$(CONFIG_PPC_RTAS) += rtas_entry.o rtas.o rtas-rtc.o $(rtaspci-y-y) obj-$(CONFIG_PPC_RTAS_DAEMON) += rtasd.o obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o obj-$(CONFIG_RTAS_PROC) += rtas-proc.o diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c index 9d9d56b574cc..8f69bb07e500 100644 --- a/arch/powerpc/kernel/btext.c +++ b/arch/powerpc/kernel/btext.c @@ -10,9 +10,9 @@ #include <linux/export.h> #include <linux/memblock.h> #include <linux/pgtable.h> +#include <linux/of.h> #include <asm/sections.h> -#include <asm/prom.h> #include <asm/btext.h> #include <asm/page.h> #include <asm/mmu.h> @@ -45,8 +45,7 @@ unsigned long disp_BAT[2] __initdata = {0, 0}; static unsigned char vga_font[cmapsz]; -int boot_text_mapped __force_data = 0; -int force_printk_to_btext = 0; +static int boot_text_mapped __force_data; extern void rmci_on(void); extern void rmci_off(void); diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c index 00b0992be3e7..f502337dd37d 100644 --- a/arch/powerpc/kernel/cacheinfo.c +++ b/arch/powerpc/kernel/cacheinfo.c @@ -18,7 +18,6 @@ #include <linux/of.h> #include <linux/percpu.h> #include <linux/slab.h> -#include <asm/prom.h> #include <asm/cputhreads.h> #include <asm/smp.h> diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index ae0fdef0ac11..a5dbfccd2047 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -12,9 +12,9 @@ #include <linux/init.h> #include <linux/export.h> #include <linux/jump_label.h> +#include <linux/of.h> #include <asm/cputable.h> -#include <asm/prom.h> /* for PTRRELOC on ARCH=ppc */ #include <asm/mce.h> #include <asm/mmu.h> #include <asm/setup.h> @@ -487,11 +487,29 @@ static struct cpu_spec __initdata cpu_specs[] = { .machine_check_early = __machine_check_early_realmode_p9, .platform = "power9", }, - { /* Power9 DD2.2 or later */ + { /* Power9 DD2.2 */ + .pvr_mask = 0xffffefff, + .pvr_value = 0x004e0202, + .cpu_name = "POWER9 (raw)", + .cpu_features = CPU_FTRS_POWER9_DD2_2, + .cpu_user_features = COMMON_USER_POWER9, + .cpu_user_features2 = COMMON_USER2_POWER9, + .mmu_features = MMU_FTRS_POWER9, + .icache_bsize = 128, + .dcache_bsize = 128, + .num_pmcs = 6, + .pmc_type = PPC_PMC_IBM, + .oprofile_cpu_type = "ppc64/power9", + .cpu_setup = __setup_cpu_power9, + .cpu_restore = __restore_cpu_power9, + .machine_check_early = __machine_check_early_realmode_p9, + .platform = "power9", + }, + { /* Power9 DD2.3 or later */ .pvr_mask = 0xffff0000, .pvr_value = 0x004e0000, .cpu_name = "POWER9 (raw)", - .cpu_features = CPU_FTRS_POWER9_DD2_2, + .cpu_features = CPU_FTRS_POWER9_DD2_3, .cpu_user_features = COMMON_USER_POWER9, .cpu_user_features2 = COMMON_USER2_POWER9, .mmu_features = MMU_FTRS_POWER9, @@ -2025,7 +2043,7 @@ static struct cpu_spec * __init setup_cpu_spec(unsigned long offset, * oprofile_cpu_type already has a value, then we are * possibly overriding a real PVR with a logical one, * and, in that case, keep the current value for - * oprofile_cpu_type. Futhermore, let's ensure that the + * oprofile_cpu_type. Furthermore, let's ensure that the * fix for the PMAO bug is enabled on compatibility mode. */ if (old.oprofile_cpu_type != NULL) { @@ -2119,7 +2137,7 @@ void __init cpu_feature_keys_init(void) struct static_key_true mmu_feature_keys[NUM_MMU_FTR_KEYS] = { [0 ... NUM_MMU_FTR_KEYS - 1] = STATIC_KEY_TRUE_INIT }; -EXPORT_SYMBOL_GPL(mmu_feature_keys); +EXPORT_SYMBOL(mmu_feature_keys); void __init mmu_feature_keys_init(void) { diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index 32b4a97f1b79..9a3b85bfc83f 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c @@ -12,9 +12,9 @@ #include <linux/crash_dump.h> #include <linux/io.h> #include <linux/memblock.h> +#include <linux/of.h> #include <asm/code-patching.h> #include <asm/kdump.h> -#include <asm/prom.h> #include <asm/firmware.h> #include <linux/uio.h> #include <asm/rtas.h> diff --git a/arch/powerpc/kernel/dawr.c b/arch/powerpc/kernel/dawr.c index 64e423d2fe0f..30d4eca88d17 100644 --- a/arch/powerpc/kernel/dawr.c +++ b/arch/powerpc/kernel/dawr.c @@ -27,7 +27,7 @@ int set_dawr(int nr, struct arch_hw_breakpoint *brk) dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) >> 3; /* * DAWR length is stored in field MDR bits 48:53. Matches range in - * doublewords (64 bits) baised by -1 eg. 0b000000=1DW and + * doublewords (64 bits) biased by -1 eg. 0b000000=1DW and * 0b111111=64DW. * brk->hw_len is in bytes. * This aligns up to double word size, shifts and does the bias. diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index 7d1b2c4a4891..2ad365c21afa 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -10,6 +10,7 @@ #include <linux/jump_label.h> #include <linux/libfdt.h> #include <linux/memblock.h> +#include <linux/of_fdt.h> #include <linux/printk.h> #include <linux/sched.h> #include <linux/string.h> @@ -19,7 +20,6 @@ #include <asm/dt_cpu_ftrs.h> #include <asm/mce.h> #include <asm/mmu.h> -#include <asm/prom.h> #include <asm/setup.h> @@ -774,20 +774,26 @@ static __init void cpufeatures_cpu_quirks(void) if ((version & 0xffffefff) == 0x004e0200) { /* DD2.0 has no feature flag */ cur_cpu_spec->cpu_features |= CPU_FTR_P9_RADIX_PREFETCH_BUG; + cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR); } else if ((version & 0xffffefff) == 0x004e0201) { cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1; cur_cpu_spec->cpu_features |= CPU_FTR_P9_RADIX_PREFETCH_BUG; + cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR); } else if ((version & 0xffffefff) == 0x004e0202) { cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_HV_ASSIST; cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_XER_SO_BUG; cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1; + cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR); + } else if ((version & 0xffffefff) == 0x004e0203) { + cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_HV_ASSIST; + cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_XER_SO_BUG; + cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1; } else if ((version & 0xffff0000) == 0x004e0000) { /* DD2.1 and up have DD2_1 */ cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1; } if ((version & 0xffff0000) == 0x004e0000) { - cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR); cur_cpu_spec->cpu_features |= CPU_FTR_P9_TIDR; } diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index 28bb1e7263a6..ab316e155ea9 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -1329,7 +1329,7 @@ int eeh_pe_set_option(struct eeh_pe *pe, int option) /* * EEH functionality could possibly be disabled, just - * return error for the case. And the EEH functinality + * return error for the case. And the EEH functionality * isn't expected to be disabled on one specific PE. */ switch (option) { @@ -1804,7 +1804,7 @@ static int eeh_debugfs_break_device(struct pci_dev *pdev) * PE freeze. Using the in_8() accessor skips the eeh detection hook * so the freeze hook so the EEH Detection machinery won't be * triggered here. This is to match the usual behaviour of EEH - * where the HW will asyncronously freeze a PE and it's up to + * where the HW will asynchronously freeze a PE and it's up to * the kernel to notice and deal with it. * * 3. Turn Memory space back on. This is more important for VFs diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index 422f80b5b27b..260273e56431 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -16,7 +16,6 @@ #include <asm/eeh_event.h> #include <asm/ppc-pci.h> #include <asm/pci-bridge.h> -#include <asm/prom.h> #include <asm/rtas.h> struct eeh_rmv_data { diff --git a/arch/powerpc/kernel/eeh_event.c b/arch/powerpc/kernel/eeh_event.c index a7a8dc182efb..c23a454af08a 100644 --- a/arch/powerpc/kernel/eeh_event.c +++ b/arch/powerpc/kernel/eeh_event.c @@ -143,7 +143,7 @@ int __eeh_send_failure_event(struct eeh_pe *pe) int eeh_send_failure_event(struct eeh_pe *pe) { /* - * If we've manually supressed recovery events via debugfs + * If we've manually suppressed recovery events via debugfs * then just drop it on the floor. */ if (eeh_debugfs_no_recover) { diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c index 845e024321d4..d2873d17d2b1 100644 --- a/arch/powerpc/kernel/eeh_pe.c +++ b/arch/powerpc/kernel/eeh_pe.c @@ -13,6 +13,7 @@ #include <linux/export.h> #include <linux/gfp.h> #include <linux/kernel.h> +#include <linux/of.h> #include <linux/pci.h> #include <linux/string.h> @@ -301,7 +302,7 @@ struct eeh_pe *eeh_pe_get(struct pci_controller *phb, int pe_no) * @new_pe_parent. * * If @new_pe_parent is NULL then the new PE will be inserted under - * directly under the the PHB. + * directly under the PHB. */ int eeh_pe_tree_insert(struct eeh_dev *edev, struct eeh_pe *new_pe_parent) { diff --git a/arch/powerpc/kernel/eeh_sysfs.c b/arch/powerpc/kernel/eeh_sysfs.c index 429620da73ba..706e1eb95efe 100644 --- a/arch/powerpc/kernel/eeh_sysfs.c +++ b/arch/powerpc/kernel/eeh_sysfs.c @@ -6,6 +6,7 @@ * * Send comments and feedback to Linas Vepstas <linas@austin.ibm.com> */ +#include <linux/of.h> #include <linux/pci.h> #include <linux/stat.h> #include <asm/ppc-pci.h> diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 7748c278d13c..1d599df6f169 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -555,52 +555,3 @@ ret_from_mcheck_exc: _ASM_NOKPROBE_SYMBOL(ret_from_mcheck_exc) #endif /* CONFIG_BOOKE */ #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */ - -/* - * PROM code for specific machines follows. Put it - * here so it's easy to add arch-specific sections later. - * -- Cort - */ -#ifdef CONFIG_PPC_RTAS -/* - * On CHRP, the Run-Time Abstraction Services (RTAS) have to be - * called with the MMU off. - */ -_GLOBAL(enter_rtas) - stwu r1,-INT_FRAME_SIZE(r1) - mflr r0 - stw r0,INT_FRAME_SIZE+4(r1) - LOAD_REG_ADDR(r4, rtas) - lis r6,1f@ha /* physical return address for rtas */ - addi r6,r6,1f@l - tophys(r6,r6) - lwz r8,RTASENTRY(r4) - lwz r4,RTASBASE(r4) - mfmsr r9 - stw r9,8(r1) - LOAD_REG_IMMEDIATE(r0,MSR_KERNEL) - mtmsr r0 /* disable interrupts so SRR0/1 don't get trashed */ - li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR) - mtlr r6 - stw r1, THREAD + RTAS_SP(r2) - mtspr SPRN_SRR0,r8 - mtspr SPRN_SRR1,r9 - rfi -1: - lis r8, 1f@h - ori r8, r8, 1f@l - LOAD_REG_IMMEDIATE(r9,MSR_KERNEL) - mtspr SPRN_SRR0,r8 - mtspr SPRN_SRR1,r9 - rfi /* Reactivate MMU translation */ -1: - lwz r8,INT_FRAME_SIZE+4(r1) /* get return address */ - lwz r9,8(r1) /* original msr value */ - addi r1,r1,INT_FRAME_SIZE - li r0,0 - stw r0, THREAD + RTAS_SP(r2) - mtlr r8 - mtmsr r9 - blr /* return to caller */ -_ASM_NOKPROBE_SYMBOL(enter_rtas) -#endif /* CONFIG_PPC_RTAS */ diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 9581906b5ee9..01ace4c56104 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -264,156 +264,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) addi r1,r1,SWITCH_FRAME_SIZE blr -#ifdef CONFIG_PPC_RTAS -/* - * On CHRP, the Run-Time Abstraction Services (RTAS) have to be - * called with the MMU off. - * - * In addition, we need to be in 32b mode, at least for now. - * - * Note: r3 is an input parameter to rtas, so don't trash it... - */ -_GLOBAL(enter_rtas) - mflr r0 - std r0,16(r1) - stdu r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space. */ - - /* Because RTAS is running in 32b mode, it clobbers the high order half - * of all registers that it saves. We therefore save those registers - * RTAS might touch to the stack. (r0, r3-r13 are caller saved) - */ - SAVE_GPR(2, r1) /* Save the TOC */ - SAVE_GPR(13, r1) /* Save paca */ - SAVE_NVGPRS(r1) /* Save the non-volatiles */ - - mfcr r4 - std r4,_CCR(r1) - mfctr r5 - std r5,_CTR(r1) - mfspr r6,SPRN_XER - std r6,_XER(r1) - mfdar r7 - std r7,_DAR(r1) - mfdsisr r8 - std r8,_DSISR(r1) - - /* Temporary workaround to clear CR until RTAS can be modified to - * ignore all bits. - */ - li r0,0 - mtcr r0 - -#ifdef CONFIG_BUG - /* There is no way it is acceptable to get here with interrupts enabled, - * check it with the asm equivalent of WARN_ON - */ - lbz r0,PACAIRQSOFTMASK(r13) -1: tdeqi r0,IRQS_ENABLED - EMIT_WARN_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING -#endif - - /* Hard-disable interrupts */ - mfmsr r6 - rldicl r7,r6,48,1 - rotldi r7,r7,16 - mtmsrd r7,1 - - /* Unfortunately, the stack pointer and the MSR are also clobbered, - * so they are saved in the PACA which allows us to restore - * our original state after RTAS returns. - */ - std r1,PACAR1(r13) - std r6,PACASAVEDMSR(r13) - - /* Setup our real return addr */ - LOAD_REG_ADDR(r4,rtas_return_loc) - clrldi r4,r4,2 /* convert to realmode address */ - mtlr r4 - - li r0,0 - ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI - andc r0,r6,r0 - - li r9,1 - rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG) - ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI|MSR_LE - andc r6,r0,r9 - -__enter_rtas: - sync /* disable interrupts so SRR0/1 */ - mtmsrd r0 /* don't get trashed */ - - LOAD_REG_ADDR(r4, rtas) - ld r5,RTASENTRY(r4) /* get the rtas->entry value */ - ld r4,RTASBASE(r4) /* get the rtas->base value */ - - mtspr SPRN_SRR0,r5 - mtspr SPRN_SRR1,r6 - RFI_TO_KERNEL - b . /* prevent speculative execution */ - -rtas_return_loc: - FIXUP_ENDIAN - - /* - * Clear RI and set SF before anything. - */ - mfmsr r6 - li r0,MSR_RI - andc r6,r6,r0 - sldi r0,r0,(MSR_SF_LG - MSR_RI_LG) - or r6,r6,r0 - sync - mtmsrd r6 - - /* relocation is off at this point */ - GET_PACA(r4) - clrldi r4,r4,2 /* convert to realmode address */ - - bcl 20,31,$+4 -0: mflr r3 - ld r3,(1f-0b)(r3) /* get &rtas_restore_regs */ - - ld r1,PACAR1(r4) /* Restore our SP */ - ld r4,PACASAVEDMSR(r4) /* Restore our MSR */ - - mtspr SPRN_SRR0,r3 - mtspr SPRN_SRR1,r4 - RFI_TO_KERNEL - b . /* prevent speculative execution */ -_ASM_NOKPROBE_SYMBOL(__enter_rtas) -_ASM_NOKPROBE_SYMBOL(rtas_return_loc) - - .align 3 -1: .8byte rtas_restore_regs - -rtas_restore_regs: - /* relocation is on at this point */ - REST_GPR(2, r1) /* Restore the TOC */ - REST_GPR(13, r1) /* Restore paca */ - REST_NVGPRS(r1) /* Restore the non-volatiles */ - - GET_PACA(r13) - - ld r4,_CCR(r1) - mtcr r4 - ld r5,_CTR(r1) - mtctr r5 - ld r6,_XER(r1) - mtspr SPRN_XER,r6 - ld r7,_DAR(r1) - mtdar r7 - ld r8,_DSISR(r1) - mtdsisr r8 - - addi r1,r1,SWITCH_FRAME_SIZE /* Unstack our frame */ - ld r0,16(r1) /* get return address */ - - mtlr r0 - blr /* return to caller */ - -#endif /* CONFIG_PPC_RTAS */ - _GLOBAL(enter_prom) mflr r0 std r0,16(r1) diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index 4c09c6688ac6..ea0a073abd96 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -25,9 +25,10 @@ #include <linux/cma.h> #include <linux/hugetlb.h> #include <linux/debugfs.h> +#include <linux/of.h> +#include <linux/of_fdt.h> #include <asm/page.h> -#include <asm/prom.h> #include <asm/fadump.h> #include <asm/fadump-internal.h> #include <asm/setup.h> @@ -73,8 +74,8 @@ static struct cma *fadump_cma; * The total size of fadump reserved memory covers for boot memory size * + cpu data size + hpte size and metadata. * Initialize only the area equivalent to boot memory size for CMA use. - * The reamining portion of fadump reserved memory will be not given - * to CMA and pages for thoes will stay reserved. boot memory size is + * The remaining portion of fadump reserved memory will be not given + * to CMA and pages for those will stay reserved. boot memory size is * aligned per CMA requirement to satisy cma_init_reserved_mem() call. * But for some reason even if it fails we still have the memory reservation * with us and we can still continue doing fadump. @@ -365,6 +366,11 @@ static unsigned long __init get_fadump_area_size(void) size += fw_dump.cpu_state_data_size; size += fw_dump.hpte_region_size; + /* + * Account for pagesize alignment of boot memory area destination address. + * This faciliates in mmap reading of first kernel's memory. + */ + size = PAGE_ALIGN(size); size += fw_dump.boot_memory_size; size += sizeof(struct fadump_crash_info_header); size += sizeof(struct elfhdr); /* ELF core header.*/ @@ -728,7 +734,7 @@ void crash_fadump(struct pt_regs *regs, const char *str) else ppc_save_regs(&fdh->regs); - fdh->online_mask = *cpu_online_mask; + fdh->cpu_mask = *cpu_online_mask; /* * If we came in via system reset, wait a while for the secondary @@ -867,7 +873,6 @@ static int fadump_alloc_mem_ranges(struct fadump_mrange_info *mrange_info) sizeof(struct fadump_memory_range)); return 0; } - static inline int fadump_add_mem_range(struct fadump_mrange_info *mrange_info, u64 base, u64 end) { @@ -886,7 +891,12 @@ static inline int fadump_add_mem_range(struct fadump_mrange_info *mrange_info, start = mem_ranges[mrange_info->mem_range_cnt - 1].base; size = mem_ranges[mrange_info->mem_range_cnt - 1].size; - if ((start + size) == base) + /* + * Boot memory area needs separate PT_LOAD segment(s) as it + * is moved to a different location at the time of crash. + * So, fold only if the region is not boot memory area. + */ + if ((start + size) == base && start >= fw_dump.boot_mem_top) is_adjacent = true; } if (!is_adjacent) { @@ -968,11 +978,14 @@ static int fadump_init_elfcore_header(char *bufp) elf->e_entry = 0; elf->e_phoff = sizeof(struct elfhdr); elf->e_shoff = 0; -#if defined(_CALL_ELF) - elf->e_flags = _CALL_ELF; -#else - elf->e_flags = 0; -#endif + + if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2)) + elf->e_flags = 2; + else if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V1)) + elf->e_flags = 1; + else + elf->e_flags = 0; + elf->e_ehsize = sizeof(struct elfhdr); elf->e_phentsize = sizeof(struct elf_phdr); elf->e_phnum = 0; @@ -1164,6 +1177,11 @@ static unsigned long init_fadump_header(unsigned long addr) fdh->elfcorehdr_addr = addr; /* We will set the crashing cpu id in crash_fadump() during crash. */ fdh->crashing_cpu = FADUMP_CPU_UNKNOWN; + /* + * When LPAR is terminated by PYHP, ensure all possible CPUs' + * register data is processed while exporting the vmcore. + */ + fdh->cpu_mask = *cpu_possible_mask; return addr; } @@ -1271,7 +1289,6 @@ static void fadump_release_reserved_area(u64 start, u64 end) static void sort_and_merge_mem_ranges(struct fadump_mrange_info *mrange_info) { struct fadump_memory_range *mem_ranges; - struct fadump_memory_range tmp_range; u64 base, size; int i, j, idx; @@ -1286,11 +1303,8 @@ static void sort_and_merge_mem_ranges(struct fadump_mrange_info *mrange_info) if (mem_ranges[idx].base > mem_ranges[j].base) idx = j; } - if (idx != i) { - tmp_range = mem_ranges[idx]; - mem_ranges[idx] = mem_ranges[i]; - mem_ranges[i] = tmp_range; - } + if (idx != i) + swap(mem_ranges[idx], mem_ranges[i]); } /* Merge adjacent reserved ranges */ @@ -1661,8 +1675,8 @@ int __init setup_fadump(void) } /* * Use subsys_initcall_sync() here because there is dependency with - * crash_save_vmcoreinfo_init(), which mush run first to ensure vmcoreinfo initialization - * is done before regisering with f/w. + * crash_save_vmcoreinfo_init(), which must run first to ensure vmcoreinfo initialization + * is done before registering with f/w. */ subsys_initcall_sync(setup_fadump); #else /* !CONFIG_PRESERVE_FA_DUMP */ diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 5c5181e8d5f1..d3eea633d11a 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -111,7 +111,7 @@ __secondary_hold_acknowledge: #ifdef CONFIG_RELOCATABLE /* This flag is set to 1 by a loader if the kernel should run * at the loaded address instead of the linked address. This - * is used by kexec-tools to keep the the kdump kernel in the + * is used by kexec-tools to keep the kdump kernel in the * crash_kernel region. The loader is responsible for * observing the alignment requirement. */ @@ -435,7 +435,7 @@ generic_secondary_common_init: ld r12,CPU_SPEC_RESTORE(r23) cmpdi 0,r12,0 beq 3f -#ifdef PPC64_ELF_ABI_v1 +#ifdef CONFIG_PPC64_ELF_ABI_V1 ld r12,0(r12) #endif mtctr r12 diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c index 4ad79eb638c6..77cd4c5a2d63 100644 --- a/arch/powerpc/kernel/idle.c +++ b/arch/powerpc/kernel/idle.c @@ -37,7 +37,7 @@ static int __init powersave_off(char *arg) { ppc_md.power_save = NULL; cpuidle_disable = IDLE_POWERSAVE_OFF; - return 0; + return 1; } __setup("powersave=off", powersave_off); diff --git a/arch/powerpc/kernel/interrupt_64.S b/arch/powerpc/kernel/interrupt_64.S index 7bab2d7de372..ce25b28cf418 100644 --- a/arch/powerpc/kernel/interrupt_64.S +++ b/arch/powerpc/kernel/interrupt_64.S @@ -219,16 +219,6 @@ system_call_vectored common 0x3000 */ system_call_vectored sigill 0x7ff0 - -/* - * Entered via kernel return set up by kernel/sstep.c, must match entry regs - */ - .globl system_call_vectored_emulate -system_call_vectored_emulate: -_ASM_NOKPROBE_SYMBOL(system_call_vectored_emulate) - li r10,IRQS_ALL_DISABLED - stb r10,PACAIRQSOFTMASK(r13) - b system_call_vectored_common #endif /* CONFIG_PPC_BOOK3S */ .balign IFETCH_ALIGN_BYTES @@ -721,7 +711,7 @@ _GLOBAL(ret_from_kernel_thread) REST_NVGPRS(r1) mtctr r14 mr r3,r15 -#ifdef PPC64_ELF_ABI_v2 +#ifdef CONFIG_PPC64_ELF_ABI_V2 mr r12,r14 #endif bctrl diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 07093b7cdcb9..7e56ddb3e0b9 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -27,7 +27,6 @@ #include <linux/sched.h> #include <linux/debugfs.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/iommu.h> #include <asm/pci-bridge.h> #include <asm/machdep.h> @@ -1065,7 +1064,7 @@ extern long iommu_tce_xchg_no_kill(struct mm_struct *mm, long ret; unsigned long size = 0; - ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction, false); + ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction); if (!ret && ((*direction == DMA_FROM_DEVICE) || (*direction == DMA_BIDIRECTIONAL)) && !mm_iommu_is_devmem(mm, *hpa, tbl->it_page_shift, @@ -1080,7 +1079,7 @@ void iommu_tce_kill(struct iommu_table *tbl, unsigned long entry, unsigned long pages) { if (tbl->it_ops->tce_kill) - tbl->it_ops->tce_kill(tbl, entry, pages, false); + tbl->it_ops->tce_kill(tbl, entry, pages); } EXPORT_SYMBOL_GPL(iommu_tce_kill); diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 752fb182eacb..ea38c13936c7 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -52,13 +52,13 @@ #include <linux/of_irq.h> #include <linux/vmalloc.h> #include <linux/pgtable.h> +#include <linux/static_call.h> #include <linux/uaccess.h> #include <asm/interrupt.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/cache.h> -#include <asm/prom.h> #include <asm/ptrace.h> #include <asm/machdep.h> #include <asm/udbg.h> @@ -217,7 +217,6 @@ static inline void replay_soft_interrupts_irqrestore(void) #define replay_soft_interrupts_irqrestore() replay_soft_interrupts() #endif -#ifdef CONFIG_CC_HAS_ASM_GOTO notrace void arch_local_irq_restore(unsigned long mask) { unsigned char irq_happened; @@ -313,82 +312,6 @@ happened: __hard_irq_enable(); preempt_enable(); } -#else -notrace void arch_local_irq_restore(unsigned long mask) -{ - unsigned char irq_happened; - - /* Write the new soft-enabled value */ - irq_soft_mask_set(mask); - if (mask) - return; - - if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) - WARN_ON_ONCE(in_nmi() || in_hardirq()); - - /* - * From this point onward, we can take interrupts, preempt, - * etc... unless we got hard-disabled. We check if an event - * happened. If none happened, we know we can just return. - * - * We may have preempted before the check below, in which case - * we are checking the "new" CPU instead of the old one. This - * is only a problem if an event happened on the "old" CPU. - * - * External interrupt events will have caused interrupts to - * be hard-disabled, so there is no problem, we - * cannot have preempted. - */ - irq_happened = get_irq_happened(); - if (!irq_happened) { - if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) - WARN_ON_ONCE(!(mfmsr() & MSR_EE)); - return; - } - - /* We need to hard disable to replay. */ - if (!(irq_happened & PACA_IRQ_HARD_DIS)) { - if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) - WARN_ON_ONCE(!(mfmsr() & MSR_EE)); - __hard_irq_disable(); - local_paca->irq_happened |= PACA_IRQ_HARD_DIS; - } else { - /* - * We should already be hard disabled here. We had bugs - * where that wasn't the case so let's dbl check it and - * warn if we are wrong. Only do that when IRQ tracing - * is enabled as mfmsr() can be costly. - */ - if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) { - if (WARN_ON_ONCE(mfmsr() & MSR_EE)) - __hard_irq_disable(); - } - - if (irq_happened == PACA_IRQ_HARD_DIS) { - local_paca->irq_happened = 0; - __hard_irq_enable(); - return; - } - } - - /* - * Disable preempt here, so that the below preempt_enable will - * perform resched if required (a replayed interrupt may set - * need_resched). - */ - preempt_disable(); - irq_soft_mask_set(IRQS_ALL_DISABLED); - trace_hardirqs_off(); - - replay_soft_interrupts_irqrestore(); - local_paca->irq_happened = 0; - - trace_hardirqs_on(); - irq_soft_mask_set(IRQS_ENABLED); - __hard_irq_enable(); - preempt_enable(); -} -#endif EXPORT_SYMBOL(arch_local_irq_restore); /* @@ -730,6 +653,8 @@ static __always_inline void call_do_irq(struct pt_regs *regs, void *sp) ); } +DEFINE_STATIC_CALL_RET0(ppc_get_irq, *ppc_md.get_irq); + void __do_irq(struct pt_regs *regs) { unsigned int irq; @@ -741,7 +666,7 @@ void __do_irq(struct pt_regs *regs) * * This will typically lower the interrupt line to the CPU */ - irq = ppc_md.get_irq(); + irq = static_call(ppc_get_irq)(); /* We can hard enable interrupts now to allow perf interrupts */ if (should_hard_irq_enable()) @@ -809,6 +734,9 @@ void __init init_IRQ(void) if (ppc_md.init_IRQ) ppc_md.init_IRQ(); + + if (!WARN_ON(!ppc_md.get_irq)) + static_call_update(ppc_get_irq, ppc_md.get_irq); } #ifdef CONFIG_BOOKE_OR_40x diff --git a/arch/powerpc/kernel/isa-bridge.c b/arch/powerpc/kernel/isa-bridge.c index 39c625737c09..dc746611ebc0 100644 --- a/arch/powerpc/kernel/isa-bridge.c +++ b/arch/powerpc/kernel/isa-bridge.c @@ -18,11 +18,11 @@ #include <linux/init.h> #include <linux/mm.h> #include <linux/notifier.h> +#include <linux/of_address.h> #include <linux/vmalloc.h> #include <asm/processor.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/pci-bridge.h> #include <asm/machdep.h> #include <asm/ppc-pci.h> diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index 7dae0b01abfb..1c97c0f177ae 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -45,7 +45,7 @@ kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset) { kprobe_opcode_t *addr = NULL; -#ifdef PPC64_ELF_ABI_v2 +#ifdef CONFIG_PPC64_ELF_ABI_V2 /* PPC64 ABIv2 needs local entry point */ addr = (kprobe_opcode_t *)kallsyms_lookup_name(name); if (addr && !offset) { @@ -63,7 +63,7 @@ kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset) #endif addr = (kprobe_opcode_t *)ppc_function_entry(addr); } -#elif defined(PPC64_ELF_ABI_v1) +#elif defined(CONFIG_PPC64_ELF_ABI_V1) /* * 64bit powerpc ABIv1 uses function descriptors: * - Check for the dot variant of the symbol first. @@ -107,7 +107,7 @@ kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset) static bool arch_kprobe_on_func_entry(unsigned long offset) { -#ifdef PPC64_ELF_ABI_v2 +#ifdef CONFIG_PPC64_ELF_ABI_V2 #ifdef CONFIG_KPROBES_ON_FTRACE return offset <= 16; #else @@ -150,8 +150,8 @@ int arch_prepare_kprobe(struct kprobe *p) if ((unsigned long)p->addr & 0x03) { printk("Attempt to register kprobe at an unaligned address\n"); ret = -EINVAL; - } else if (IS_MTMSRD(insn) || IS_RFID(insn)) { - printk("Cannot register a kprobe on mtmsr[d]/rfi[d]\n"); + } else if (!can_single_step(ppc_inst_val(insn))) { + printk("Cannot register a kprobe on instructions that can't be single stepped\n"); ret = -EINVAL; } else if ((unsigned long)p->addr & ~PAGE_MASK && ppc_inst_prefixed(ppc_inst_read(p->addr - 1))) { diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c index cfc03e016ff2..5c58460b269a 100644 --- a/arch/powerpc/kernel/legacy_serial.c +++ b/arch/powerpc/kernel/legacy_serial.c @@ -7,10 +7,10 @@ #include <linux/pci.h> #include <linux/of_address.h> #include <linux/of_device.h> +#include <linux/of_irq.h> #include <linux/serial_reg.h> #include <asm/io.h> #include <asm/mmu.h> -#include <asm/prom.h> #include <asm/serial.h> #include <asm/udbg.h> #include <asm/pci-bridge.h> diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index d38a019b38e1..fd6d8d3a548e 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -454,7 +454,7 @@ _GLOBAL(kexec_sequence) beq 1f /* clear out hardware hash page table and tlb */ -#ifdef PPC64_ELF_ABI_v1 +#ifdef CONFIG_PPC64_ELF_ABI_V1 ld r12,0(r27) /* deref function descriptor */ #else mr r12,r27 diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c index 97a76a8619fb..f6d6ae0a1692 100644 --- a/arch/powerpc/kernel/module.c +++ b/arch/powerpc/kernel/module.c @@ -64,13 +64,13 @@ int module_finalize(const Elf_Ehdr *hdr, (void *)sect->sh_addr + sect->sh_size); #endif /* CONFIG_PPC64 */ -#ifdef PPC64_ELF_ABI_v1 +#ifdef CONFIG_PPC64_ELF_ABI_V1 sect = find_section(hdr, sechdrs, ".opd"); if (sect != NULL) { me->arch.start_opd = sect->sh_addr; me->arch.end_opd = sect->sh_addr + sect->sh_size; } -#endif /* PPC64_ELF_ABI_v1 */ +#endif /* CONFIG_PPC64_ELF_ABI_V1 */ #ifdef CONFIG_PPC_BARRIER_NOSPEC sect = find_section(hdr, sechdrs, "__spec_barrier_fixup"); diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c index a0432ef46967..ea6536171778 100644 --- a/arch/powerpc/kernel/module_32.c +++ b/arch/powerpc/kernel/module_32.c @@ -99,7 +99,7 @@ static unsigned long get_plt_size(const Elf32_Ehdr *hdr, /* Sort the relocation information based on a symbol and * addend key. This is a stable O(n*log n) complexity - * alogrithm but it will reduce the complexity of + * algorithm but it will reduce the complexity of * count_relocs() to linear complexity O(n) */ sort((void *)hdr + sechdrs[i].sh_offset, @@ -256,9 +256,8 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, value, (uint32_t)location); pr_debug("Location before: %08X.\n", *(uint32_t *)location); - value = (*(uint32_t *)location & ~0x03fffffc) - | ((value - (uint32_t)location) - & 0x03fffffc); + value = (*(uint32_t *)location & ~PPC_LI_MASK) | + PPC_LI(value - (uint32_t)location); if (patch_instruction(location, ppc_inst(value))) return -EFAULT; @@ -266,10 +265,8 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, pr_debug("Location after: %08X.\n", *(uint32_t *)location); pr_debug("ie. jump to %08X+%08X = %08X\n", - *(uint32_t *)location & 0x03fffffc, - (uint32_t)location, - (*(uint32_t *)location & 0x03fffffc) - + (uint32_t)location); + *(uint32_t *)PPC_LI((uint32_t)location), (uint32_t)location, + (*(uint32_t *)PPC_LI((uint32_t)location)) + (uint32_t)location); break; case R_PPC_REL32: @@ -289,23 +286,32 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, } #ifdef CONFIG_DYNAMIC_FTRACE -int module_trampoline_target(struct module *mod, unsigned long addr, - unsigned long *target) +notrace int module_trampoline_target(struct module *mod, unsigned long addr, + unsigned long *target) { - unsigned int jmp[4]; + ppc_inst_t jmp[4]; /* Find where the trampoline jumps to */ - if (copy_from_kernel_nofault(jmp, (void *)addr, sizeof(jmp))) + if (copy_inst_from_kernel_nofault(jmp, (void *)addr)) + return -EFAULT; + if (__copy_inst_from_kernel_nofault(jmp + 1, (void *)addr + 4)) + return -EFAULT; + if (__copy_inst_from_kernel_nofault(jmp + 2, (void *)addr + 8)) + return -EFAULT; + if (__copy_inst_from_kernel_nofault(jmp + 3, (void *)addr + 12)) return -EFAULT; /* verify that this is what we expect it to be */ - if ((jmp[0] & 0xffff0000) != PPC_RAW_LIS(_R12, 0) || - (jmp[1] & 0xffff0000) != PPC_RAW_ADDI(_R12, _R12, 0) || - jmp[2] != PPC_RAW_MTCTR(_R12) || - jmp[3] != PPC_RAW_BCTR()) + if ((ppc_inst_val(jmp[0]) & 0xffff0000) != PPC_RAW_LIS(_R12, 0)) + return -EINVAL; + if ((ppc_inst_val(jmp[1]) & 0xffff0000) != PPC_RAW_ADDI(_R12, _R12, 0)) + return -EINVAL; + if (ppc_inst_val(jmp[2]) != PPC_RAW_MTCTR(_R12)) + return -EINVAL; + if (ppc_inst_val(jmp[3]) != PPC_RAW_BCTR()) return -EINVAL; - addr = (jmp[1] & 0xffff) | ((jmp[0] & 0xffff) << 16); + addr = (ppc_inst_val(jmp[1]) & 0xffff) | ((ppc_inst_val(jmp[0]) & 0xffff) << 16); if (addr & 0x8000) addr -= 0x10000; diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index 794720530442..7e45dc98df8a 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -31,7 +31,7 @@ this, and makes other things simpler. Anton? --RR. */ -#ifdef PPC64_ELF_ABI_v2 +#ifdef CONFIG_PPC64_ELF_ABI_V2 static func_desc_t func_desc(unsigned long addr) { @@ -122,7 +122,7 @@ static u32 ppc64_stub_insns[] = { /* Save current r2 value in magic place on the stack. */ PPC_RAW_STD(_R2, _R1, R2_STACK_OFFSET), PPC_RAW_LD(_R12, _R11, 32), -#ifdef PPC64_ELF_ABI_v1 +#ifdef CONFIG_PPC64_ELF_ABI_V1 /* Set up new r2 from function descriptor */ PPC_RAW_LD(_R2, _R11, 40), #endif @@ -194,7 +194,7 @@ static unsigned long get_stubs_size(const Elf64_Ehdr *hdr, /* Sort the relocation information based on a symbol and * addend key. This is a stable O(n*log n) complexity - * alogrithm but it will reduce the complexity of + * algorithm but it will reduce the complexity of * count_relocs() to linear complexity O(n) */ sort((void *)sechdrs[i].sh_addr, @@ -361,7 +361,7 @@ static inline int create_ftrace_stub(struct ppc64_stub_entry *entry, entry->jump[1] |= PPC_HA(reladdr); entry->jump[2] |= PPC_LO(reladdr); - /* Eventhough we don't use funcdata in the stub, it's needed elsewhere. */ + /* Even though we don't use funcdata in the stub, it's needed elsewhere. */ entry->funcdata = func_desc(addr); entry->magic = STUB_MAGIC; @@ -653,8 +653,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, } /* Only replace bits 2 through 26 */ - value = (*(uint32_t *)location & ~0x03fffffc) - | (value & 0x03fffffc); + value = (*(uint32_t *)location & ~PPC_LI_MASK) | PPC_LI(value); if (patch_instruction((u32 *)location, ppc_inst(value))) return -EFAULT; diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c index 0d9f9cd41e13..e385d3164648 100644 --- a/arch/powerpc/kernel/nvram_64.c +++ b/arch/powerpc/kernel/nvram_64.c @@ -19,9 +19,9 @@ #include <linux/pstore.h> #include <linux/zlib.h> #include <linux/uaccess.h> +#include <linux/of.h> #include <asm/nvram.h> #include <asm/rtas.h> -#include <asm/prom.h> #include <asm/machdep.h> #undef DEBUG_NVRAM diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index 39da688a9455..ba593fd60124 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -344,15 +344,10 @@ void copy_mm_to_paca(struct mm_struct *mm) { mm_context_t *context = &mm->context; -#ifdef CONFIG_PPC_MM_SLICES VM_BUG_ON(!mm_ctx_slb_addr_limit(context)); memcpy(&get_paca()->mm_ctx_low_slices_psize, mm_ctx_low_slices(context), LOW_SLICE_ARRAY_SZ); memcpy(&get_paca()->mm_ctx_high_slices_psize, mm_ctx_high_slices(context), TASK_SLICE_ARRAY_SZ(context)); -#else /* CONFIG_PPC_MM_SLICES */ - get_paca()->mm_ctx_user_psize = context->user_psize; - get_paca()->mm_ctx_sllp = context->sllp; -#endif } #endif /* CONFIG_PPC_64S_HASH_MMU */ diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 8bc9cf62cd93..068410cd54a3 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -30,10 +30,10 @@ #include <linux/vgaarb.h> #include <linux/numa.h> #include <linux/msi.h> +#include <linux/irqdomain.h> #include <asm/processor.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/pci-bridge.h> #include <asm/byteorder.h> #include <asm/machdep.h> @@ -42,7 +42,7 @@ #include "../../../drivers/pci/pci.h" -/* hose_spinlock protects accesses to the the phb_bitmap. */ +/* hose_spinlock protects accesses to the phb_bitmap. */ static DEFINE_SPINLOCK(hose_spinlock); LIST_HEAD(hose_list); @@ -1688,7 +1688,7 @@ EXPORT_SYMBOL_GPL(pcibios_scan_phb); static void fixup_hide_host_resource_fsl(struct pci_dev *dev) { int i, class = dev->class >> 8; - /* When configured as agent, programing interface = 1 */ + /* When configured as agent, programming interface = 1 */ int prog_if = dev->class & 0xf; if ((class == PCI_CLASS_PROCESSOR_POWERPC || diff --git a/arch/powerpc/kernel/pci-hotplug.c b/arch/powerpc/kernel/pci-hotplug.c index 2fc12198ec07..0fe251c6ac2c 100644 --- a/arch/powerpc/kernel/pci-hotplug.c +++ b/arch/powerpc/kernel/pci-hotplug.c @@ -12,6 +12,7 @@ #include <linux/pci.h> #include <linux/export.h> +#include <linux/of.h> #include <asm/pci-bridge.h> #include <asm/ppc-pci.h> #include <asm/firmware.h> diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c index 48537964fba1..5a174936c9a0 100644 --- a/arch/powerpc/kernel/pci_32.c +++ b/arch/powerpc/kernel/pci_32.c @@ -21,7 +21,6 @@ #include <asm/processor.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/sections.h> #include <asm/pci-bridge.h> #include <asm/ppc-pci.h> diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index 3fb7e572abed..19b03ddf5631 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c @@ -19,10 +19,10 @@ #include <linux/syscalls.h> #include <linux/irq.h> #include <linux/vmalloc.h> +#include <linux/of.h> #include <asm/processor.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/pci-bridge.h> #include <asm/byteorder.h> #include <asm/machdep.h> @@ -285,3 +285,12 @@ int pcibus_to_node(struct pci_bus *bus) } EXPORT_SYMBOL(pcibus_to_node); #endif + +int pci_device_from_OF_node(struct device_node *np, u8 *bus, u8 *devfn) +{ + if (!PCI_DN(np)) + return -ENODEV; + *bus = PCI_DN(np)->busno; + *devfn = PCI_DN(np)->devfn; + return 0; +} diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c index 61571ae23953..938ab8838ab5 100644 --- a/arch/powerpc/kernel/pci_dn.c +++ b/arch/powerpc/kernel/pci_dn.c @@ -12,9 +12,9 @@ #include <linux/export.h> #include <linux/init.h> #include <linux/gfp.h> +#include <linux/of.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/pci-bridge.h> #include <asm/ppc-pci.h> #include <asm/firmware.h> diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c index c3024f104765..756043dd06e9 100644 --- a/arch/powerpc/kernel/pci_of_scan.c +++ b/arch/powerpc/kernel/pci_of_scan.c @@ -13,8 +13,8 @@ #include <linux/pci.h> #include <linux/export.h> +#include <linux/of.h> #include <asm/pci-bridge.h> -#include <asm/prom.h> /** * get_int_prop - Decode a u32 from a device tree property @@ -244,7 +244,7 @@ EXPORT_SYMBOL(of_create_pci_dev); * @dev: pci_dev structure for the bridge * * of_scan_bus() calls this routine for each PCI bridge that it finds, and - * this routine in turn call of_scan_bus() recusively to scan for more child + * this routine in turn call of_scan_bus() recursively to scan for more child * devices. */ void of_scan_pci_bridge(struct pci_dev *dev) diff --git a/arch/powerpc/kernel/proc_powerpc.c b/arch/powerpc/kernel/proc_powerpc.c index 6a029f2378e1..b109cd7b5d01 100644 --- a/arch/powerpc/kernel/proc_powerpc.c +++ b/arch/powerpc/kernel/proc_powerpc.c @@ -7,12 +7,12 @@ #include <linux/mm.h> #include <linux/proc_fs.h> #include <linux/kernel.h> +#include <linux/of.h> #include <asm/machdep.h> #include <asm/vdso_datapage.h> #include <asm/rtas.h> #include <linux/uaccess.h> -#include <asm/prom.h> #ifdef CONFIG_PPC64 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 984813a4d5dc..d00b20c65966 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -34,10 +34,8 @@ #include <linux/ftrace.h> #include <linux/kernel_stat.h> #include <linux/personality.h> -#include <linux/random.h> #include <linux/hw_breakpoint.h> #include <linux/uaccess.h> -#include <linux/elf-randomize.h> #include <linux/pkeys.h> #include <linux/seq_buf.h> @@ -45,7 +43,6 @@ #include <asm/io.h> #include <asm/processor.h> #include <asm/mmu.h> -#include <asm/prom.h> #include <asm/machdep.h> #include <asm/time.h> #include <asm/runlatch.h> @@ -307,7 +304,7 @@ static void __giveup_vsx(struct task_struct *tsk) unsigned long msr = tsk->thread.regs->msr; /* - * We should never be ssetting MSR_VSX without also setting + * We should never be setting MSR_VSX without also setting * MSR_FP and MSR_VEC */ WARN_ON((msr & MSR_VSX) && !((msr & MSR_FP) && (msr & MSR_VEC))); @@ -645,7 +642,7 @@ static void do_break_handler(struct pt_regs *regs) return; } - /* Otherwise findout which DAWR caused exception and disable it. */ + /* Otherwise find out which DAWR caused exception and disable it. */ wp_get_instr_detail(regs, &instr, &type, &size, &ea); for (i = 0; i < nr_wp_slots(); i++) { @@ -2313,42 +2310,3 @@ unsigned long arch_align_stack(unsigned long sp) sp -= get_random_int() & ~PAGE_MASK; return sp & ~0xf; } - -static inline unsigned long brk_rnd(void) -{ - unsigned long rnd = 0; - - /* 8MB for 32bit, 1GB for 64bit */ - if (is_32bit_task()) - rnd = (get_random_long() % (1UL<<(23-PAGE_SHIFT))); - else - rnd = (get_random_long() % (1UL<<(30-PAGE_SHIFT))); - - return rnd << PAGE_SHIFT; -} - -unsigned long arch_randomize_brk(struct mm_struct *mm) -{ - unsigned long base = mm->brk; - unsigned long ret; - -#ifdef CONFIG_PPC_BOOK3S_64 - /* - * If we are using 1TB segments and we are allowed to randomise - * the heap, we can put it above 1TB so it is backed by a 1TB - * segment. Otherwise the heap will be in the bottom 1TB - * which always uses 256MB segments and this may result in a - * performance penalty. - */ - if (!radix_enabled() && !is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T)) - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T); -#endif - - ret = PAGE_ALIGN(base + brk_rnd()); - - if (ret < mm->brk) - return mm->brk; - - return ret; -} - diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 86c4f009563d..feae8509b59c 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -31,7 +31,6 @@ #include <linux/cpu.h> #include <linux/pgtable.h> -#include <asm/prom.h> #include <asm/rtas.h> #include <asm/page.h> #include <asm/processor.h> diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 0ac5faacc909..04694ec423f6 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -28,6 +28,8 @@ #include <linux/bitops.h> #include <linux/pgtable.h> #include <linux/printk.h> +#include <linux/of.h> +#include <linux/of_fdt.h> #include <asm/prom.h> #include <asm/rtas.h> #include <asm/page.h> @@ -3416,7 +3418,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, * * PowerMacs use a different mechanism to spin CPUs * - * (This must be done after instanciating RTAS) + * (This must be done after instantiating RTAS) */ if (of_platform != PLATFORM_POWERMAC) prom_hold_cpus(); diff --git a/arch/powerpc/kernel/ptrace/ptrace-view.c b/arch/powerpc/kernel/ptrace/ptrace-view.c index f15bc78caf71..076d867412c7 100644 --- a/arch/powerpc/kernel/ptrace/ptrace-view.c +++ b/arch/powerpc/kernel/ptrace/ptrace-view.c @@ -174,7 +174,7 @@ int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data) /* * softe copies paca->irq_soft_mask variable state. Since irq_soft_mask is - * no more used as a flag, lets force usr to alway see the softe value as 1 + * no more used as a flag, lets force usr to always see the softe value as 1 * which means interrupts are not soft disabled. */ if (IS_ENABLED(CONFIG_PPC64) && regno == PT_SOFTE) { diff --git a/arch/powerpc/kernel/ptrace/ptrace.c b/arch/powerpc/kernel/ptrace/ptrace.c index 6d5026a9db4f..4d2dc22d4a2d 100644 --- a/arch/powerpc/kernel/ptrace/ptrace.c +++ b/arch/powerpc/kernel/ptrace/ptrace.c @@ -444,10 +444,4 @@ void __init pt_regs_check(void) * real registers. */ BUILD_BUG_ON(PT_DSCR < sizeof(struct user_pt_regs) / sizeof(unsigned long)); - -#ifdef PPC64_ELF_ABI_v1 - BUILD_BUG_ON(!IS_ENABLED(CONFIG_HAVE_FUNCTION_DESCRIPTORS)); -#else - BUILD_BUG_ON(IS_ENABLED(CONFIG_HAVE_FUNCTION_DESCRIPTORS)); -#endif } diff --git a/arch/powerpc/kernel/rtas-proc.c b/arch/powerpc/kernel/rtas-proc.c index 6857a5b0a1c3..081b2b741a8c 100644 --- a/arch/powerpc/kernel/rtas-proc.c +++ b/arch/powerpc/kernel/rtas-proc.c @@ -24,11 +24,11 @@ #include <linux/seq_file.h> #include <linux/bitops.h> #include <linux/rtc.h> +#include <linux/of.h> #include <linux/uaccess.h> #include <asm/processor.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/rtas.h> #include <asm/machdep.h> /* for ppc_md */ #include <asm/time.h> @@ -259,7 +259,6 @@ __initcall(proc_rtas_init); static int parse_number(const char __user *p, size_t count, u64 *val) { char buf[40]; - char *end; if (count > 39) return -EINVAL; @@ -269,11 +268,7 @@ static int parse_number(const char __user *p, size_t count, u64 *val) buf[count] = 0; - *val = simple_strtoull(buf, &end, 10); - if (*end && *end != '\n') - return -EINVAL; - - return 0; + return kstrtoull(buf, 10, val); } /* ****************************************************************** */ diff --git a/arch/powerpc/kernel/rtas-rtc.c b/arch/powerpc/kernel/rtas-rtc.c index 33c07c8af6c8..5a31d1829bca 100644 --- a/arch/powerpc/kernel/rtas-rtc.c +++ b/arch/powerpc/kernel/rtas-rtc.c @@ -6,7 +6,6 @@ #include <linux/rtc.h> #include <linux/delay.h> #include <linux/ratelimit.h> -#include <asm/prom.h> #include <asm/rtas.h> #include <asm/time.h> diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 1f42aabbbab3..9bb43aa53d43 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -24,9 +24,10 @@ #include <linux/slab.h> #include <linux/reboot.h> #include <linux/syscalls.h> +#include <linux/of.h> +#include <linux/of_fdt.h> #include <asm/interrupt.h> -#include <asm/prom.h> #include <asm/rtas.h> #include <asm/hvcall.h> #include <asm/machdep.h> @@ -49,6 +50,19 @@ void enter_rtas(unsigned long); static inline void do_enter_rtas(unsigned long args) { + unsigned long msr; + + /* + * Make sure MSR[RI] is currently enabled as it will be forced later + * in enter_rtas. + */ + msr = mfmsr(); + BUG_ON(!(msr & MSR_RI)); + + BUG_ON(!irqs_disabled()); + + hard_irq_disable(); /* Ensure MSR[EE] is disabled on PPC64 */ + enter_rtas(args); srr_regs_clobbered(); /* rtas uses SRRs, invalidate */ @@ -462,6 +476,11 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...) if (!rtas.entry || token == RTAS_UNKNOWN_SERVICE) return -1; + if ((mfmsr() & (MSR_IR|MSR_DR)) != (MSR_IR|MSR_DR)) { + WARN_ON_ONCE(1); + return -1; + } + s = lock_rtas(); /* We use the global rtas args buffer */ diff --git a/arch/powerpc/kernel/rtas_entry.S b/arch/powerpc/kernel/rtas_entry.S new file mode 100644 index 000000000000..9a434d42e660 --- /dev/null +++ b/arch/powerpc/kernel/rtas_entry.S @@ -0,0 +1,172 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include <asm/asm-offsets.h> +#include <asm/bug.h> +#include <asm/page.h> +#include <asm/ppc_asm.h> + +/* + * RTAS is called with MSR IR, DR, EE disabled, and LR in the return address. + * + * Note: r3 is an input parameter to rtas, so don't trash it... + */ + +#ifdef CONFIG_PPC32 +_GLOBAL(enter_rtas) + stwu r1,-INT_FRAME_SIZE(r1) + mflr r0 + stw r0,INT_FRAME_SIZE+4(r1) + LOAD_REG_ADDR(r4, rtas) + lis r6,1f@ha /* physical return address for rtas */ + addi r6,r6,1f@l + tophys(r6,r6) + lwz r8,RTASENTRY(r4) + lwz r4,RTASBASE(r4) + mfmsr r9 + stw r9,8(r1) + li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR) + mtlr r6 + stw r1, THREAD + RTAS_SP(r2) + mtspr SPRN_SRR0,r8 + mtspr SPRN_SRR1,r9 + rfi +1: + lis r8, 1f@h + ori r8, r8, 1f@l + LOAD_REG_IMMEDIATE(r9,MSR_KERNEL) + mtspr SPRN_SRR0,r8 + mtspr SPRN_SRR1,r9 + rfi /* Reactivate MMU translation */ +1: + lwz r8,INT_FRAME_SIZE+4(r1) /* get return address */ + lwz r9,8(r1) /* original msr value */ + addi r1,r1,INT_FRAME_SIZE + li r0,0 + stw r0, THREAD + RTAS_SP(r2) + mtlr r8 + mtmsr r9 + blr /* return to caller */ +_ASM_NOKPROBE_SYMBOL(enter_rtas) + +#else /* CONFIG_PPC32 */ +#include <asm/exception-64s.h> + +/* + * 32-bit rtas on 64-bit machines has the additional problem that RTAS may + * not preserve the upper parts of registers it uses. + */ +_GLOBAL(enter_rtas) + mflr r0 + std r0,16(r1) + stdu r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space. */ + + /* Because RTAS is running in 32b mode, it clobbers the high order half + * of all registers that it saves. We therefore save those registers + * RTAS might touch to the stack. (r0, r3-r12 are caller saved) + */ + SAVE_GPR(2, r1) /* Save the TOC */ + SAVE_NVGPRS(r1) /* Save the non-volatiles */ + + mfcr r4 + std r4,_CCR(r1) + mfctr r5 + std r5,_CTR(r1) + mfspr r6,SPRN_XER + std r6,_XER(r1) + mfdar r7 + std r7,_DAR(r1) + mfdsisr r8 + std r8,_DSISR(r1) + + /* Temporary workaround to clear CR until RTAS can be modified to + * ignore all bits. + */ + li r0,0 + mtcr r0 + + mfmsr r6 + + /* Unfortunately, the stack pointer and the MSR are also clobbered, + * so they are saved in the PACA which allows us to restore + * our original state after RTAS returns. + */ + std r1,PACAR1(r13) + std r6,PACASAVEDMSR(r13) + + /* Setup our real return addr */ + LOAD_REG_ADDR(r4,rtas_return_loc) + clrldi r4,r4,2 /* convert to realmode address */ + mtlr r4 + +__enter_rtas: + LOAD_REG_ADDR(r4, rtas) + ld r5,RTASENTRY(r4) /* get the rtas->entry value */ + ld r4,RTASBASE(r4) /* get the rtas->base value */ + + /* + * RTAS runs in 32-bit big endian real mode, but leave MSR[RI] on as we + * may hit NMI (SRESET or MCE) while in RTAS. RTAS should disable RI in + * its critical regions (as specified in PAPR+ section 7.2.1). MSR[S] + * is not impacted by RFI_TO_KERNEL (only urfid can unset it). So if + * MSR[S] is set, it will remain when entering RTAS. + */ + LOAD_REG_IMMEDIATE(r6, MSR_ME | MSR_RI) + + li r0,0 + mtmsrd r0,1 /* disable RI before using SRR0/1 */ + + mtspr SPRN_SRR0,r5 + mtspr SPRN_SRR1,r6 + RFI_TO_KERNEL + b . /* prevent speculative execution */ +rtas_return_loc: + FIXUP_ENDIAN + + /* Set SF before anything. */ + LOAD_REG_IMMEDIATE(r6, MSR_KERNEL & ~(MSR_IR|MSR_DR)) + mtmsrd r6 + + /* relocation is off at this point */ + GET_PACA(r13) + + bcl 20,31,$+4 +0: mflr r3 + ld r3,(1f-0b)(r3) /* get &rtas_restore_regs */ + + ld r1,PACAR1(r13) /* Restore our SP */ + ld r4,PACASAVEDMSR(r13) /* Restore our MSR */ + + mtspr SPRN_SRR0,r3 + mtspr SPRN_SRR1,r4 + RFI_TO_KERNEL + b . /* prevent speculative execution */ +_ASM_NOKPROBE_SYMBOL(enter_rtas) +_ASM_NOKPROBE_SYMBOL(__enter_rtas) +_ASM_NOKPROBE_SYMBOL(rtas_return_loc) + + .align 3 +1: .8byte rtas_restore_regs + +rtas_restore_regs: + /* relocation is on at this point */ + REST_GPR(2, r1) /* Restore the TOC */ + REST_NVGPRS(r1) /* Restore the non-volatiles */ + + ld r4,_CCR(r1) + mtcr r4 + ld r5,_CTR(r1) + mtctr r5 + ld r6,_XER(r1) + mtspr SPRN_XER,r6 + ld r7,_DAR(r1) + mtdar r7 + ld r8,_DSISR(r1) + mtdsisr r8 + + addi r1,r1,SWITCH_FRAME_SIZE /* Unstack our frame */ + ld r0,16(r1) /* get return address */ + + mtlr r0 + blr /* return to caller */ + +#endif /* CONFIG_PPC32 */ diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c index a99179d83538..bc817a5619d6 100644 --- a/arch/powerpc/kernel/rtas_flash.c +++ b/arch/powerpc/kernel/rtas_flash.c @@ -120,7 +120,7 @@ static struct kmem_cache *flash_block_cache = NULL; /* * Local copy of the flash block list. * - * The rtas_firmware_flash_list varable will be + * The rtas_firmware_flash_list variable will be * set once the data is fully read. * * For convenience as we build the list we use virtual addrs, diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c index 781c1869902e..5a2f5ea3b054 100644 --- a/arch/powerpc/kernel/rtas_pci.c +++ b/arch/powerpc/kernel/rtas_pci.c @@ -14,10 +14,11 @@ #include <linux/string.h> #include <linux/init.h> #include <linux/pgtable.h> +#include <linux/of_address.h> +#include <linux/of_fdt.h> #include <asm/io.h> #include <asm/irq.h> -#include <asm/prom.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <asm/iommu.h> diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c index cf0f42909ddf..5270b450bbde 100644 --- a/arch/powerpc/kernel/rtasd.c +++ b/arch/powerpc/kernel/rtasd.c @@ -22,7 +22,6 @@ #include <linux/uaccess.h> #include <asm/io.h> #include <asm/rtas.h> -#include <asm/prom.h> #include <asm/nvram.h> #include <linux/atomic.h> #include <asm/machdep.h> diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 518ae5aa9410..9d83d16fef9a 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -23,19 +23,19 @@ #include <linux/console.h> #include <linux/screen_info.h> #include <linux/root_dev.h> -#include <linux/notifier.h> #include <linux/cpu.h> #include <linux/unistd.h> #include <linux/serial.h> #include <linux/serial_8250.h> #include <linux/percpu.h> #include <linux/memblock.h> +#include <linux/of_irq.h> +#include <linux/of_fdt.h> #include <linux/of_platform.h> #include <linux/hugetlb.h> #include <linux/pgtable.h> #include <asm/io.h> #include <asm/paca.h> -#include <asm/prom.h> #include <asm/processor.h> #include <asm/vdso_datapage.h> #include <asm/smp.h> @@ -279,7 +279,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) proc_freq / 1000000, proc_freq % 1000000); /* If we are a Freescale core do a simple check so - * we dont have to keep adding cases in the future */ + * we don't have to keep adding cases in the future */ if (PVR_VER(pvr) & 0x8000) { switch (PVR_VER(pvr)) { case 0x8000: /* 7441/7450/7451, Voyager */ @@ -680,8 +680,25 @@ int check_legacy_ioport(unsigned long base_port) } EXPORT_SYMBOL(check_legacy_ioport); -static int ppc_panic_event(struct notifier_block *this, - unsigned long event, void *ptr) +/* + * Panic notifiers setup + * + * We have 3 notifiers for powerpc, each one from a different "nature": + * + * - ppc_panic_fadump_handler() is a hypervisor notifier, which hard-disables + * IRQs and deal with the Firmware-Assisted dump, when it is configured; + * should run early in the panic path. + * + * - dump_kernel_offset() is an informative notifier, just showing the KASLR + * offset if we have RANDOMIZE_BASE set. + * + * - ppc_panic_platform_handler() is a low-level handler that's registered + * only if the platform wishes to perform final actions in the panic path, + * hence it should run late and might not even return. Currently, only + * pseries and ps3 platforms register callbacks. + */ +static int ppc_panic_fadump_handler(struct notifier_block *this, + unsigned long event, void *ptr) { /* * panic does a local_irq_disable, but we really @@ -691,45 +708,63 @@ static int ppc_panic_event(struct notifier_block *this, /* * If firmware-assisted dump has been registered then trigger - * firmware-assisted dump and let firmware handle everything else. + * its callback and let the firmware handles everything else. */ crash_fadump(NULL, ptr); - if (ppc_md.panic) - ppc_md.panic(ptr); /* May not return */ + return NOTIFY_DONE; } -static struct notifier_block ppc_panic_block = { - .notifier_call = ppc_panic_event, - .priority = INT_MIN /* may not return; must be done last */ -}; - -/* - * Dump out kernel offset information on panic. - */ static int dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p) { pr_emerg("Kernel Offset: 0x%lx from 0x%lx\n", kaslr_offset(), KERNELBASE); - return 0; + return NOTIFY_DONE; } +static int ppc_panic_platform_handler(struct notifier_block *this, + unsigned long event, void *ptr) +{ + /* + * This handler is only registered if we have a panic callback + * on ppc_md, hence NULL check is not needed. + * Also, it may not return, so it runs really late on panic path. + */ + ppc_md.panic(ptr); + + return NOTIFY_DONE; +} + +static struct notifier_block ppc_fadump_block = { + .notifier_call = ppc_panic_fadump_handler, + .priority = INT_MAX, /* run early, to notify the firmware ASAP */ +}; + static struct notifier_block kernel_offset_notifier = { - .notifier_call = dump_kernel_offset + .notifier_call = dump_kernel_offset, +}; + +static struct notifier_block ppc_panic_block = { + .notifier_call = ppc_panic_platform_handler, + .priority = INT_MIN, /* may not return; must be done last */ }; void __init setup_panic(void) { + /* Hard-disables IRQs + deal with FW-assisted dump (fadump) */ + atomic_notifier_chain_register(&panic_notifier_list, + &ppc_fadump_block); + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0) atomic_notifier_chain_register(&panic_notifier_list, &kernel_offset_notifier); - /* PPC64 always does a hard irq disable in its panic handler */ - if (!IS_ENABLED(CONFIG_PPC64) && !ppc_md.panic) - return; - atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block); + /* Low-level platform-specific routines that should run on panic */ + if (ppc_md.panic) + atomic_notifier_chain_register(&panic_notifier_list, + &ppc_panic_block); } #ifdef CONFIG_CHECK_CACHE_COHERENCY diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index a6e9d36d7c01..813261789303 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -20,9 +20,10 @@ #include <linux/export.h> #include <linux/nvram.h> #include <linux/pgtable.h> +#include <linux/of_fdt.h> +#include <linux/irq.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/processor.h> #include <asm/setup.h> #include <asm/smp.h> diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index a96f05063bc9..0e8fc1cd1c55 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -31,11 +31,12 @@ #include <linux/memory.h> #include <linux/nmi.h> #include <linux/pgtable.h> +#include <linux/of.h> +#include <linux/of_fdt.h> #include <asm/kvm_guest.h> #include <asm/io.h> #include <asm/kdump.h> -#include <asm/prom.h> #include <asm/processor.h> #include <asm/smp.h> #include <asm/elf.h> diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c index f7f8620663c7..68a91e553e14 100644 --- a/arch/powerpc/kernel/signal.c +++ b/arch/powerpc/kernel/signal.c @@ -141,6 +141,21 @@ unsigned long copy_ckvsx_from_user(struct task_struct *task, int show_unhandled_signals = 1; +unsigned long get_min_sigframe_size(void) +{ + if (IS_ENABLED(CONFIG_PPC64)) + return get_min_sigframe_size_64(); + else + return get_min_sigframe_size_32(); +} + +#ifdef CONFIG_COMPAT +unsigned long get_min_sigframe_size_compat(void) +{ + return get_min_sigframe_size_32(); +} +#endif + /* * Allocate space for the signal frame */ diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index d84c434b2b78..157a7403e3eb 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -233,6 +233,12 @@ struct rt_sigframe { int abigap[56]; }; +unsigned long get_min_sigframe_size_32(void) +{ + return max(sizeof(struct rt_sigframe) + __SIGNAL_FRAMESIZE + 16, + sizeof(struct sigframe) + __SIGNAL_FRAMESIZE); +} + /* * Save the current user registers on the user stack. * We only save the altivec/spe registers if the process has used diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 73d483b07ff3..472596a109e2 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -66,6 +66,11 @@ struct rt_sigframe { char abigap[USER_REDZONE_SIZE]; } __attribute__ ((aligned (16))); +unsigned long get_min_sigframe_size_64(void) +{ + return sizeof(struct rt_sigframe) + __SIGNAL_FRAMESIZE; +} + /* * This computes a quad word aligned pointer inside the vmx_reserve array * element. For historical reasons sigcontext might not be quad word aligned, @@ -123,7 +128,7 @@ static long notrace __unsafe_setup_sigcontext(struct sigcontext __user *sc, #endif struct pt_regs *regs = tsk->thread.regs; unsigned long msr = regs->msr; - /* Force usr to alway see softe as 1 (interrupts enabled) */ + /* Force usr to always see softe as 1 (interrupts enabled) */ unsigned long softe = 0x1; BUG_ON(tsk != current); diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index de0f6f09a5dd..bcefab484ea6 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -43,7 +43,6 @@ #include <asm/kvm_ppc.h> #include <asm/dbell.h> #include <asm/page.h> -#include <asm/prom.h> #include <asm/smp.h> #include <asm/time.h> #include <asm/machdep.h> @@ -412,32 +411,32 @@ static struct cpumask nmi_ipi_pending_mask; static bool nmi_ipi_busy = false; static void (*nmi_ipi_function)(struct pt_regs *) = NULL; -static void nmi_ipi_lock_start(unsigned long *flags) +noinstr static void nmi_ipi_lock_start(unsigned long *flags) { raw_local_irq_save(*flags); hard_irq_disable(); - while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) { + while (arch_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) { raw_local_irq_restore(*flags); - spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0); + spin_until_cond(arch_atomic_read(&__nmi_ipi_lock) == 0); raw_local_irq_save(*flags); hard_irq_disable(); } } -static void nmi_ipi_lock(void) +noinstr static void nmi_ipi_lock(void) { - while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) - spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0); + while (arch_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) + spin_until_cond(arch_atomic_read(&__nmi_ipi_lock) == 0); } -static void nmi_ipi_unlock(void) +noinstr static void nmi_ipi_unlock(void) { smp_mb(); - WARN_ON(atomic_read(&__nmi_ipi_lock) != 1); - atomic_set(&__nmi_ipi_lock, 0); + WARN_ON(arch_atomic_read(&__nmi_ipi_lock) != 1); + arch_atomic_set(&__nmi_ipi_lock, 0); } -static void nmi_ipi_unlock_end(unsigned long *flags) +noinstr static void nmi_ipi_unlock_end(unsigned long *flags) { nmi_ipi_unlock(); raw_local_irq_restore(*flags); @@ -446,7 +445,7 @@ static void nmi_ipi_unlock_end(unsigned long *flags) /* * Platform NMI handler calls this to ack */ -int smp_handle_nmi_ipi(struct pt_regs *regs) +noinstr int smp_handle_nmi_ipi(struct pt_regs *regs) { void (*fn)(struct pt_regs *) = NULL; unsigned long flags; @@ -875,7 +874,7 @@ out_free: * @tg : The thread-group structure of the CPU node which @cpu belongs * to. * - * Returns the index to tg->thread_list that points to the the start + * Returns the index to tg->thread_list that points to the start * of the thread_group that @cpu belongs to. * * Returns -1 if cpu doesn't belong to any of the groups pointed to by @@ -1102,7 +1101,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) DBG("smp_prepare_cpus\n"); /* - * setup_cpu may need to be called on the boot cpu. We havent + * setup_cpu may need to be called on the boot cpu. We haven't * spun any cpus up but lets be paranoid. */ BUG_ON(boot_cpuid != smp_processor_id()); diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c index c4f5b4ce926f..fc999140bc27 100644 --- a/arch/powerpc/kernel/syscalls.c +++ b/arch/powerpc/kernel/syscalls.c @@ -73,7 +73,7 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, size_t, len, int ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct __kernel_old_timeval __user *tvp) { - if ( (unsigned long)n >= 4096 ) + if ((unsigned long)n >= 4096) return sys_old_select((void __user *)n); return sys_select(n, inp, outp, exp, tvp); diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index 2069bbb90a9a..3a10cda9c05e 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -9,12 +9,12 @@ #include <linux/nodemask.h> #include <linux/cpumask.h> #include <linux/notifier.h> +#include <linux/of.h> #include <asm/current.h> #include <asm/processor.h> #include <asm/cputable.h> #include <asm/hvcall.h> -#include <asm/prom.h> #include <asm/machdep.h> #include <asm/smp.h> #include <asm/pmc.h> diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index f80cce0e3899..587adcc12860 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -54,8 +54,10 @@ #include <linux/of_clk.h> #include <linux/suspend.h> #include <linux/processor.h> -#include <asm/trace.h> +#include <linux/mc146818rtc.h> +#include <linux/platform_device.h> +#include <asm/trace.h> #include <asm/interrupt.h> #include <asm/io.h> #include <asm/nvram.h> @@ -63,7 +65,6 @@ #include <asm/machdep.h> #include <linux/uaccess.h> #include <asm/time.h> -#include <asm/prom.h> #include <asm/irq.h> #include <asm/div64.h> #include <asm/smp.h> @@ -156,10 +157,6 @@ bool tb_invalid; u64 __cputime_usec_factor; EXPORT_SYMBOL(__cputime_usec_factor); -#ifdef CONFIG_PPC_SPLPAR -void (*dtl_consumer)(struct dtl_entry *, u64); -#endif - static void calc_cputime_factors(void) { struct div_result res; @@ -185,6 +182,8 @@ static inline unsigned long read_spurr(unsigned long tb) #include <asm/dtl.h> +void (*dtl_consumer)(struct dtl_entry *, u64); + /* * Scan the dispatch trace log and count up the stolen time. * Should be called with interrupts disabled. @@ -829,7 +828,7 @@ static void __read_persistent_clock(struct timespec64 *ts) static int first = 1; ts->tv_nsec = 0; - /* XXX this is a litle fragile but will work okay in the short term */ + /* XXX this is a little fragile but will work okay in the short term */ if (first) { first = 0; if (ppc_md.time_init) @@ -974,7 +973,7 @@ void secondary_cpu_time_init(void) */ start_cpu_decrementer(); - /* FIME: Should make unrelatred change to move snapshot_timebase + /* FIME: Should make unrelated change to move snapshot_timebase * call here ! */ register_decrementer_clockevent(smp_processor_id()); } diff --git a/arch/powerpc/kernel/trace/Makefile b/arch/powerpc/kernel/trace/Makefile index 542aa7a8b2b4..af8527538fe4 100644 --- a/arch/powerpc/kernel/trace/Makefile +++ b/arch/powerpc/kernel/trace/Makefile @@ -14,10 +14,7 @@ obj64-$(CONFIG_FUNCTION_TRACER) += ftrace_mprofile.o else obj64-$(CONFIG_FUNCTION_TRACER) += ftrace_64_pg.o endif -obj-$(CONFIG_FUNCTION_TRACER) += ftrace_low.o -obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o -obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o -obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o +obj-$(CONFIG_FUNCTION_TRACER) += ftrace_low.o ftrace.o obj-$(CONFIG_TRACING) += trace_clock.o obj-$(CONFIG_PPC64) += $(obj64-y) diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c index 4ee04aacf9f1..2a893e06e4f1 100644 --- a/arch/powerpc/kernel/trace/ftrace.c +++ b/arch/powerpc/kernel/trace/ftrace.c @@ -28,9 +28,6 @@ #include <asm/syscall.h> #include <asm/inst.h> - -#ifdef CONFIG_DYNAMIC_FTRACE - /* * We generally only have a single long_branch tramp and at most 2 or 3 plt * tramps generated. But, we don't use the plt tramps currently. We also allot @@ -48,12 +45,12 @@ ftrace_call_replace(unsigned long ip, unsigned long addr, int link) addr = ppc_function_entry((void *)addr); /* if (link) set op to 'bl' else 'b' */ - create_branch(&op, (u32 *)ip, addr, link ? 1 : 0); + create_branch(&op, (u32 *)ip, addr, link ? BRANCH_SET_LINK : 0); return op; } -static int +static inline int ftrace_modify_code(unsigned long ip, ppc_inst_t old, ppc_inst_t new) { ppc_inst_t replaced; @@ -78,10 +75,7 @@ ftrace_modify_code(unsigned long ip, ppc_inst_t old, ppc_inst_t new) } /* replace the text with the new text */ - if (patch_instruction((u32 *)ip, new)) - return -EPERM; - - return 0; + return patch_instruction((u32 *)ip, new); } /* @@ -89,28 +83,26 @@ ftrace_modify_code(unsigned long ip, ppc_inst_t old, ppc_inst_t new) */ static int test_24bit_addr(unsigned long ip, unsigned long addr) { - ppc_inst_t op; addr = ppc_function_entry((void *)addr); - /* use the create_branch to verify that this offset can be branched */ - return create_branch(&op, (u32 *)ip, addr, 0) == 0; + return is_offset_in_branch_range(addr - ip); } static int is_bl_op(ppc_inst_t op) { - return (ppc_inst_val(op) & 0xfc000003) == 0x48000001; + return (ppc_inst_val(op) & ~PPC_LI_MASK) == PPC_RAW_BL(0); } static int is_b_op(ppc_inst_t op) { - return (ppc_inst_val(op) & 0xfc000003) == 0x48000000; + return (ppc_inst_val(op) & ~PPC_LI_MASK) == PPC_RAW_BRANCH(0); } static unsigned long find_bl_target(unsigned long ip, ppc_inst_t op) { int offset; - offset = (ppc_inst_val(op) & 0x03fffffc); + offset = PPC_LI(ppc_inst_val(op)); /* make it signed */ if (offset & 0x02000000) offset |= 0xfe000000; @@ -119,7 +111,6 @@ static unsigned long find_bl_target(unsigned long ip, ppc_inst_t op) } #ifdef CONFIG_MODULES -#ifdef CONFIG_PPC64 static int __ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) @@ -159,25 +150,39 @@ __ftrace_make_nop(struct module *mod, return -EINVAL; } -#ifdef CONFIG_MPROFILE_KERNEL - /* When using -mkernel_profile there is no load to jump over */ - pop = ppc_inst(PPC_RAW_NOP()); + if (IS_ENABLED(CONFIG_MPROFILE_KERNEL)) { + if (copy_inst_from_kernel_nofault(&op, (void *)(ip - 4))) { + pr_err("Fetching instruction at %lx failed.\n", ip - 4); + return -EFAULT; + } - if (copy_inst_from_kernel_nofault(&op, (void *)(ip - 4))) { - pr_err("Fetching instruction at %lx failed.\n", ip - 4); - return -EFAULT; - } + /* We expect either a mflr r0, or a std r0, LRSAVE(r1) */ + if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_MFLR(_R0))) && + !ppc_inst_equal(op, ppc_inst(PPC_INST_STD_LR))) { + pr_err("Unexpected instruction %s around bl _mcount\n", + ppc_inst_as_str(op)); + return -EINVAL; + } + } else if (IS_ENABLED(CONFIG_PPC64)) { + /* + * Check what is in the next instruction. We can see ld r2,40(r1), but + * on first pass after boot we will see mflr r0. + */ + if (copy_inst_from_kernel_nofault(&op, (void *)(ip + 4))) { + pr_err("Fetching op failed.\n"); + return -EFAULT; + } - /* We expect either a mflr r0, or a std r0, LRSAVE(r1) */ - if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_MFLR(_R0))) && - !ppc_inst_equal(op, ppc_inst(PPC_INST_STD_LR))) { - pr_err("Unexpected instruction %s around bl _mcount\n", - ppc_inst_as_str(op)); - return -EINVAL; + if (!ppc_inst_equal(op, ppc_inst(PPC_INST_LD_TOC))) { + pr_err("Expected %08lx found %s\n", PPC_INST_LD_TOC, ppc_inst_as_str(op)); + return -EINVAL; + } } -#else + /* - * Our original call site looks like: + * When using -mprofile-kernel or PPC32 there is no load to jump over. + * + * Otherwise our original call site looks like: * * bl <tramp> * ld r2,XX(r1) @@ -189,23 +194,10 @@ __ftrace_make_nop(struct module *mod, * * Use a b +8 to jump over the load. */ - - pop = ppc_inst(PPC_INST_BRANCH | 8); /* b +8 */ - - /* - * Check what is in the next instruction. We can see ld r2,40(r1), but - * on first pass after boot we will see mflr r0. - */ - if (copy_inst_from_kernel_nofault(&op, (void *)(ip + 4))) { - pr_err("Fetching op failed.\n"); - return -EFAULT; - } - - if (!ppc_inst_equal(op, ppc_inst(PPC_INST_LD_TOC))) { - pr_err("Expected %08lx found %s\n", PPC_INST_LD_TOC, ppc_inst_as_str(op)); - return -EINVAL; - } -#endif /* CONFIG_MPROFILE_KERNEL */ + if (IS_ENABLED(CONFIG_MPROFILE_KERNEL) || IS_ENABLED(CONFIG_PPC32)) + pop = ppc_inst(PPC_RAW_NOP()); + else + pop = ppc_inst(PPC_RAW_BRANCH(8)); /* b +8 */ if (patch_instruction((u32 *)ip, pop)) { pr_err("Patching NOP failed.\n"); @@ -214,54 +206,16 @@ __ftrace_make_nop(struct module *mod, return 0; } - -#else /* !PPC64 */ -static int -__ftrace_make_nop(struct module *mod, - struct dyn_ftrace *rec, unsigned long addr) +#else +static int __ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { - ppc_inst_t op; - unsigned long ip = rec->ip; - unsigned long tramp, ptr; - - if (copy_from_kernel_nofault(&op, (void *)ip, MCOUNT_INSN_SIZE)) - return -EFAULT; - - /* Make sure that that this is still a 24bit jump */ - if (!is_bl_op(op)) { - pr_err("Not expected bl: opcode is %s\n", ppc_inst_as_str(op)); - return -EINVAL; - } - - /* lets find where the pointer goes */ - tramp = find_bl_target(ip, op); - - /* Find where the trampoline jumps to */ - if (module_trampoline_target(mod, tramp, &ptr)) { - pr_err("Failed to get trampoline target\n"); - return -EFAULT; - } - - if (ptr != addr) { - pr_err("Trampoline location %08lx does not match addr\n", - tramp); - return -EINVAL; - } - - op = ppc_inst(PPC_RAW_NOP()); - - if (patch_instruction((u32 *)ip, op)) - return -EPERM; - return 0; } -#endif /* PPC64 */ #endif /* CONFIG_MODULES */ static unsigned long find_ftrace_tramp(unsigned long ip) { int i; - ppc_inst_t instr; /* * We have the compiler generated long_branch tramps at the end @@ -270,8 +224,7 @@ static unsigned long find_ftrace_tramp(unsigned long ip) for (i = NUM_FTRACE_TRAMPS - 1; i >= 0; i--) if (!ftrace_tramps[i]) continue; - else if (create_branch(&instr, (void *)ip, - ftrace_tramps[i], 0) == 0) + else if (is_offset_in_branch_range(ftrace_tramps[i] - ip)) return ftrace_tramps[i]; return 0; @@ -301,23 +254,12 @@ static int setup_mcount_compiler_tramp(unsigned long tramp) int i; ppc_inst_t op; unsigned long ptr; - ppc_inst_t instr; - static unsigned long ftrace_plt_tramps[NUM_FTRACE_TRAMPS]; /* Is this a known long jump tramp? */ for (i = 0; i < NUM_FTRACE_TRAMPS; i++) - if (!ftrace_tramps[i]) - break; - else if (ftrace_tramps[i] == tramp) + if (ftrace_tramps[i] == tramp) return 0; - /* Is this a known plt tramp? */ - for (i = 0; i < NUM_FTRACE_TRAMPS; i++) - if (!ftrace_plt_tramps[i]) - break; - else if (ftrace_plt_tramps[i] == tramp) - return -1; - /* New trampoline -- read where this goes */ if (copy_inst_from_kernel_nofault(&op, (void *)tramp)) { pr_debug("Fetching opcode failed.\n"); @@ -339,16 +281,10 @@ static int setup_mcount_compiler_tramp(unsigned long tramp) } /* Let's re-write the tramp to go to ftrace_[regs_]caller */ -#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS - ptr = ppc_global_function_entry((void *)ftrace_regs_caller); -#else - ptr = ppc_global_function_entry((void *)ftrace_caller); -#endif - if (create_branch(&instr, (void *)tramp, ptr, 0)) { - pr_debug("%ps is not reachable from existing mcount tramp\n", - (void *)ptr); - return -1; - } + if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS)) + ptr = ppc_global_function_entry((void *)ftrace_regs_caller); + else + ptr = ppc_global_function_entry((void *)ftrace_caller); if (patch_branch((u32 *)tramp, ptr, 0)) { pr_debug("REL24 out of range!\n"); @@ -418,10 +354,12 @@ int ftrace_make_nop(struct module *mod, old = ftrace_call_replace(ip, addr, 1); new = ppc_inst(PPC_RAW_NOP()); return ftrace_modify_code(ip, old, new); - } else if (core_kernel_text(ip)) + } else if (core_kernel_text(ip)) { return __ftrace_make_nop_kernel(rec, addr); + } else if (!IS_ENABLED(CONFIG_MODULES)) { + return -EINVAL; + } -#ifdef CONFIG_MODULES /* * Out of range jumps are called from modules. * We should either already have a pointer to the module @@ -444,53 +382,27 @@ int ftrace_make_nop(struct module *mod, mod = rec->arch.mod; return __ftrace_make_nop(mod, rec, addr); -#else - /* We should not get here without modules */ - return -EINVAL; -#endif /* CONFIG_MODULES */ } #ifdef CONFIG_MODULES -#ifdef CONFIG_PPC64 /* * Examine the existing instructions for __ftrace_make_call. * They should effectively be a NOP, and follow formal constraints, * depending on the ABI. Return false if they don't. */ -#ifndef CONFIG_MPROFILE_KERNEL -static int -expected_nop_sequence(void *ip, ppc_inst_t op0, ppc_inst_t op1) +static bool expected_nop_sequence(void *ip, ppc_inst_t op0, ppc_inst_t op1) { - /* - * We expect to see: - * - * b +8 - * ld r2,XX(r1) - * - * The load offset is different depending on the ABI. For simplicity - * just mask it out when doing the compare. - */ - if (!ppc_inst_equal(op0, ppc_inst(0x48000008)) || - (ppc_inst_val(op1) & 0xffff0000) != 0xe8410000) - return 0; - return 1; -} -#else -static int -expected_nop_sequence(void *ip, ppc_inst_t op0, ppc_inst_t op1) -{ - /* look for patched "NOP" on ppc64 with -mprofile-kernel */ - if (!ppc_inst_equal(op0, ppc_inst(PPC_RAW_NOP()))) - return 0; - return 1; + if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V1)) + return ppc_inst_equal(op0, ppc_inst(PPC_RAW_BRANCH(8))) && + ppc_inst_equal(op1, ppc_inst(PPC_INST_LD_TOC)); + else + return ppc_inst_equal(op0, ppc_inst(PPC_RAW_NOP())); } -#endif static int __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { ppc_inst_t op[2]; - ppc_inst_t instr; void *ip = (void *)rec->ip; unsigned long entry, ptr, tramp; struct module *mod = rec->arch.mod; @@ -499,7 +411,8 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) if (copy_inst_from_kernel_nofault(op, ip)) return -EFAULT; - if (copy_inst_from_kernel_nofault(op + 1, ip + 4)) + if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V1) && + copy_inst_from_kernel_nofault(op + 1, ip + 4)) return -EFAULT; if (!expected_nop_sequence(ip, op[0], op[1])) { @@ -509,20 +422,15 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) } /* If we never set up ftrace trampoline(s), then bail */ -#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS - if (!mod->arch.tramp || !mod->arch.tramp_regs) { -#else - if (!mod->arch.tramp) { -#endif + if (!mod->arch.tramp || + (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && !mod->arch.tramp_regs)) { pr_err("No ftrace trampoline\n"); return -EINVAL; } -#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS - if (rec->flags & FTRACE_FL_REGS) + if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && rec->flags & FTRACE_FL_REGS) tramp = mod->arch.tramp_regs; else -#endif tramp = mod->arch.tramp; if (module_trampoline_target(mod, tramp, &ptr)) { @@ -539,12 +447,6 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) return -EINVAL; } - /* Ensure branch is within 24 bits */ - if (create_branch(&instr, ip, tramp, BRANCH_SET_LINK)) { - pr_err("Branch out of range\n"); - return -EINVAL; - } - if (patch_branch(ip, tramp, BRANCH_SET_LINK)) { pr_err("REL24 out of range!\n"); return -EINVAL; @@ -552,58 +454,11 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) return 0; } - -#else /* !CONFIG_PPC64: */ -static int -__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) -{ - int err; - ppc_inst_t op; - u32 *ip = (u32 *)rec->ip; - struct module *mod = rec->arch.mod; - unsigned long tramp; - - /* read where this goes */ - if (copy_inst_from_kernel_nofault(&op, ip)) - return -EFAULT; - - /* It should be pointing to a nop */ - if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_NOP()))) { - pr_err("Expected NOP but have %s\n", ppc_inst_as_str(op)); - return -EINVAL; - } - - /* If we never set up a trampoline to ftrace_caller, then bail */ -#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS - if (!mod->arch.tramp || !mod->arch.tramp_regs) { #else - if (!mod->arch.tramp) { -#endif - pr_err("No ftrace trampoline\n"); - return -EINVAL; - } - -#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS - if (rec->flags & FTRACE_FL_REGS) - tramp = mod->arch.tramp_regs; - else -#endif - tramp = mod->arch.tramp; - /* create the branch to the trampoline */ - err = create_branch(&op, ip, tramp, BRANCH_SET_LINK); - if (err) { - pr_err("REL24 out of range!\n"); - return -EINVAL; - } - - pr_devel("write to %lx\n", rec->ip); - - if (patch_instruction(ip, op)) - return -EPERM; - +static int __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) +{ return 0; } -#endif /* CONFIG_PPC64 */ #endif /* CONFIG_MODULES */ static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr) @@ -616,16 +471,12 @@ static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr) entry = ppc_global_function_entry((void *)ftrace_caller); ptr = ppc_global_function_entry((void *)addr); - if (ptr != entry) { -#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS + if (ptr != entry && IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS)) entry = ppc_global_function_entry((void *)ftrace_regs_caller); - if (ptr != entry) { -#endif - pr_err("Unknown ftrace addr to patch: %ps\n", (void *)ptr); - return -EINVAL; -#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS - } -#endif + + if (ptr != entry) { + pr_err("Unknown ftrace addr to patch: %ps\n", (void *)ptr); + return -EINVAL; } /* Make sure we have a nop */ @@ -668,10 +519,13 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) old = ppc_inst(PPC_RAW_NOP()); new = ftrace_call_replace(ip, addr, 1); return ftrace_modify_code(ip, old, new); - } else if (core_kernel_text(ip)) + } else if (core_kernel_text(ip)) { return __ftrace_make_call_kernel(rec, addr); + } else if (!IS_ENABLED(CONFIG_MODULES)) { + /* We should not get here without modules */ + return -EINVAL; + } -#ifdef CONFIG_MODULES /* * Out of range jumps are called from modules. * Being that we are converting from nop, it had better @@ -683,10 +537,6 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) } return __ftrace_make_call(rec, addr); -#else - /* We should not get here without modules */ - return -EINVAL; -#endif /* CONFIG_MODULES */ } #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS @@ -770,12 +620,6 @@ __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, return -EINVAL; } - /* Ensure branch is within 24 bits */ - if (create_branch(&op, (u32 *)ip, tramp, BRANCH_SET_LINK)) { - pr_err("Branch out of range\n"); - return -EINVAL; - } - if (patch_branch((u32 *)ip, tramp, BRANCH_SET_LINK)) { pr_err("REL24 out of range!\n"); return -EINVAL; @@ -783,6 +627,11 @@ __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, return 0; } +#else +static int __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr) +{ + return 0; +} #endif int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, @@ -807,9 +656,11 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, * variant, so there is nothing to do here */ return 0; + } else if (!IS_ENABLED(CONFIG_MODULES)) { + /* We should not get here without modules */ + return -EINVAL; } -#ifdef CONFIG_MODULES /* * Out of range jumps are called from modules. */ @@ -819,10 +670,6 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, } return __ftrace_modify_call(rec, old_addr, addr); -#else - /* We should not get here without modules */ - return -EINVAL; -#endif /* CONFIG_MODULES */ } #endif @@ -836,15 +683,13 @@ int ftrace_update_ftrace_func(ftrace_func_t func) new = ftrace_call_replace(ip, (unsigned long)func, 1); ret = ftrace_modify_code(ip, old, new); -#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS /* Also update the regs callback function */ - if (!ret) { + if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && !ret) { ip = (unsigned long)(&ftrace_regs_call); old = ppc_inst_read((u32 *)&ftrace_regs_call); new = ftrace_call_replace(ip, (unsigned long)func, 1); ret = ftrace_modify_code(ip, old, new); } -#endif return ret; } @@ -863,25 +708,39 @@ void arch_ftrace_update_code(int command) extern unsigned int ftrace_tramp_text[], ftrace_tramp_init[]; +void ftrace_free_init_tramp(void) +{ + int i; + + for (i = 0; i < NUM_FTRACE_TRAMPS && ftrace_tramps[i]; i++) + if (ftrace_tramps[i] == (unsigned long)ftrace_tramp_init) { + ftrace_tramps[i] = 0; + return; + } +} + int __init ftrace_dyn_arch_init(void) { int i; unsigned int *tramp[] = { ftrace_tramp_text, ftrace_tramp_init }; u32 stub_insns[] = { - 0xe98d0000 | PACATOC, /* ld r12,PACATOC(r13) */ - 0x3d8c0000, /* addis r12,r12,<high> */ - 0x398c0000, /* addi r12,r12,<low> */ - 0x7d8903a6, /* mtctr r12 */ - 0x4e800420, /* bctr */ + PPC_RAW_LD(_R12, _R13, PACATOC), + PPC_RAW_ADDIS(_R12, _R12, 0), + PPC_RAW_ADDI(_R12, _R12, 0), + PPC_RAW_MTCTR(_R12), + PPC_RAW_BCTR() }; -#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS - unsigned long addr = ppc_global_function_entry((void *)ftrace_regs_caller); -#else - unsigned long addr = ppc_global_function_entry((void *)ftrace_caller); -#endif - long reladdr = addr - kernel_toc_addr(); + unsigned long addr; + long reladdr; + + if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS)) + addr = ppc_global_function_entry((void *)ftrace_regs_caller); + else + addr = ppc_global_function_entry((void *)ftrace_caller); - if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) { + reladdr = addr - kernel_toc_addr(); + + if (reladdr >= SZ_2G || reladdr < -(long)SZ_2G) { pr_err("Address of %ps out of range of kernel_toc.\n", (void *)addr); return -1; @@ -896,13 +755,7 @@ int __init ftrace_dyn_arch_init(void) return 0; } -#else -int __init ftrace_dyn_arch_init(void) -{ - return 0; -} #endif -#endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER @@ -939,8 +792,8 @@ int ftrace_disable_ftrace_graph_caller(void) * Hook the return address and push it in the stack of return addrs * in current thread info. Return the address we want to divert to. */ -unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip, - unsigned long sp) +static unsigned long +__prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp) { unsigned long return_hooker; int bit; @@ -969,12 +822,18 @@ out: void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct ftrace_regs *fregs) { - fregs->regs.link = prepare_ftrace_return(parent_ip, ip, fregs->regs.gpr[1]); + fregs->regs.link = __prepare_ftrace_return(parent_ip, ip, fregs->regs.gpr[1]); +} +#else +unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip, + unsigned long sp) +{ + return __prepare_ftrace_return(parent, ip, sp); } #endif #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ -#ifdef PPC64_ELF_ABI_v1 +#ifdef CONFIG_PPC64_ELF_ABI_V1 char *arch_ftrace_match_adjust(char *str, const char *search) { if (str[0] == '.' && search[0] != '.') @@ -982,4 +841,4 @@ char *arch_ftrace_match_adjust(char *str, const char *search) else return str; } -#endif /* PPC64_ELF_ABI_v1 */ +#endif /* CONFIG_PPC64_ELF_ABI_V1 */ diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index a08bb7cefdc5..3aaa50e5c72f 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -393,7 +393,7 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) * Builds that do not support KVM could take this second option to increase * the recoverability of NMIs. */ -void hv_nmi_check_nonrecoverable(struct pt_regs *regs) +noinstr void hv_nmi_check_nonrecoverable(struct pt_regs *regs) { #ifdef CONFIG_PPC_POWERNV unsigned long kbase = (unsigned long)_stext; @@ -433,7 +433,9 @@ void hv_nmi_check_nonrecoverable(struct pt_regs *regs) return; nonrecoverable: - regs_set_unrecoverable(regs); + regs->msr &= ~MSR_RI; + local_paca->hsrr_valid = 0; + local_paca->srr_valid = 0; #endif } DEFINE_INTERRUPT_HANDLER_NMI(system_reset_exception) diff --git a/arch/powerpc/kernel/uprobes.c b/arch/powerpc/kernel/uprobes.c index c6975467d9ff..95a41ae9dfa7 100644 --- a/arch/powerpc/kernel/uprobes.c +++ b/arch/powerpc/kernel/uprobes.c @@ -48,6 +48,11 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, return -EINVAL; } + if (!can_single_step(ppc_inst_val(ppc_inst_read(auprobe->insn)))) { + pr_info_ratelimited("Cannot register a uprobe on instructions that can't be single stepped\n"); + return -ENOTSUPP; + } + return 0; } diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 717f2c9a7573..0da287544054 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -25,7 +25,6 @@ #include <asm/processor.h> #include <asm/mmu.h> #include <asm/mmu_context.h> -#include <asm/prom.h> #include <asm/machdep.h> #include <asm/cputable.h> #include <asm/sections.h> diff --git a/arch/powerpc/kernel/vdso/Makefile b/arch/powerpc/kernel/vdso/Makefile index 954974287ee7..096b0bf1335f 100644 --- a/arch/powerpc/kernel/vdso/Makefile +++ b/arch/powerpc/kernel/vdso/Makefile @@ -48,6 +48,7 @@ UBSAN_SANITIZE := n KASAN_SANITIZE := n ccflags-y := -shared -fno-common -fno-builtin -nostdlib -Wl,--hash-style=both +ccflags-$(CONFIG_LD_IS_LLD) += $(call cc-option,--ld-path=$(LD),-fuse-ld=lld) CC32FLAGS := -Wl,-soname=linux-vdso32.so.1 -m32 AS32FLAGS := -D__VDSO32__ -s diff --git a/arch/powerpc/kernel/vdso/vdso32.lds.S b/arch/powerpc/kernel/vdso/vdso32.lds.S index 58e0099f70f4..e0d19d74455f 100644 --- a/arch/powerpc/kernel/vdso/vdso32.lds.S +++ b/arch/powerpc/kernel/vdso/vdso32.lds.S @@ -13,7 +13,6 @@ OUTPUT_FORMAT("elf32-powerpcle", "elf32-powerpcle", "elf32-powerpcle") OUTPUT_FORMAT("elf32-powerpc", "elf32-powerpc", "elf32-powerpc") #endif OUTPUT_ARCH(powerpc:common) -ENTRY(_start) SECTIONS { diff --git a/arch/powerpc/kernel/vdso/vdso64.lds.S b/arch/powerpc/kernel/vdso/vdso64.lds.S index 0288cad428b0..1a4a7bc4c815 100644 --- a/arch/powerpc/kernel/vdso/vdso64.lds.S +++ b/arch/powerpc/kernel/vdso/vdso64.lds.S @@ -13,7 +13,6 @@ OUTPUT_FORMAT("elf64-powerpcle", "elf64-powerpcle", "elf64-powerpcle") OUTPUT_FORMAT("elf64-powerpc", "elf64-powerpc", "elf64-powerpc") #endif OUTPUT_ARCH(powerpc:common64) -ENTRY(_start) SECTIONS { diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c index bfc27496fe7e..7d28b9553654 100644 --- a/arch/powerpc/kernel/watchdog.c +++ b/arch/powerpc/kernel/watchdog.c @@ -56,7 +56,7 @@ * solved by also having a SMP watchdog where all CPUs check all other * CPUs heartbeat. * - * The SMP checker can detect lockups on other CPUs. A gobal "pending" + * The SMP checker can detect lockups on other CPUs. A global "pending" * cpumask is kept, containing all CPUs which enable the watchdog. Each * CPU clears their pending bit in their heartbeat timer. When the bitmask * becomes empty, the last CPU to clear its pending bit updates a global diff --git a/arch/powerpc/kexec/Makefile b/arch/powerpc/kexec/Makefile index b6c52608cb49..0c2abe7f9908 100644 --- a/arch/powerpc/kexec/Makefile +++ b/arch/powerpc/kexec/Makefile @@ -13,3 +13,5 @@ obj-$(CONFIG_KEXEC_FILE) += file_load.o ranges.o file_load_$(BITS).o elf_$(BITS) GCOV_PROFILE_core_$(BITS).o := n KCOV_INSTRUMENT_core_$(BITS).o := n UBSAN_SANITIZE_core_$(BITS).o := n +KASAN_SANITIZE_core.o := n +KASAN_SANITIZE_core_$(BITS) := n diff --git a/arch/powerpc/kexec/core.c b/arch/powerpc/kexec/core.c index abf5897ae88c..7ab4980fe13a 100644 --- a/arch/powerpc/kexec/core.c +++ b/arch/powerpc/kexec/core.c @@ -18,7 +18,6 @@ #include <asm/kdump.h> #include <asm/machdep.h> #include <asm/pgalloc.h> -#include <asm/prom.h> #include <asm/sections.h> void machine_kexec_mask_interrupts(void) { diff --git a/arch/powerpc/kexec/core_64.c b/arch/powerpc/kexec/core_64.c index 6cc7793b8420..c2bea9db1c1e 100644 --- a/arch/powerpc/kexec/core_64.c +++ b/arch/powerpc/kexec/core_64.c @@ -16,6 +16,7 @@ #include <linux/kernel.h> #include <linux/cpu.h> #include <linux/hardirq.h> +#include <linux/of.h> #include <asm/page.h> #include <asm/current.h> @@ -25,7 +26,6 @@ #include <asm/paca.h> #include <asm/mmu.h> #include <asm/sections.h> /* _end */ -#include <asm/prom.h> #include <asm/smp.h> #include <asm/hw_breakpoint.h> #include <asm/svm.h> @@ -406,7 +406,7 @@ static int __init export_htab_values(void) if (!node) return -ENODEV; - /* remove any stale propertys so ours can be found */ + /* remove any stale properties so ours can be found */ of_remove_property(node, of_find_property(node, htab_base_prop.name, NULL)); of_remove_property(node, of_find_property(node, htab_size_prop.name, NULL)); diff --git a/arch/powerpc/kexec/crash.c b/arch/powerpc/kexec/crash.c index 22ceeeb705ab..d85fa9fc6f3c 100644 --- a/arch/powerpc/kexec/crash.c +++ b/arch/powerpc/kexec/crash.c @@ -20,7 +20,6 @@ #include <asm/processor.h> #include <asm/machdep.h> #include <asm/kexec.h> -#include <asm/prom.h> #include <asm/smp.h> #include <asm/setjmp.h> #include <asm/debug.h> diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index 9bdfc8b50899..0cd23ce07d68 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile @@ -37,9 +37,6 @@ kvm-e500mc-objs := \ e500_emulate.o kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs) -kvm-book3s_64-builtin-objs-$(CONFIG_SPAPR_TCE_IOMMU) := \ - book3s_64_vio_hv.o - kvm-pr-y := \ fpu.o \ emulate.o \ @@ -76,7 +73,7 @@ kvm-hv-$(CONFIG_PPC_TRANSACTIONAL_MEM) += \ book3s_hv_tm.o kvm-book3s_64-builtin-xics-objs-$(CONFIG_KVM_XICS) := \ - book3s_hv_rm_xics.o book3s_hv_rm_xive.o + book3s_hv_rm_xics.o kvm-book3s_64-builtin-tm-objs-$(CONFIG_PPC_TRANSACTIONAL_MEM) += \ book3s_hv_tm_builtin.o @@ -134,3 +131,8 @@ obj-$(CONFIG_KVM_BOOK3S_64_PR) += kvm-pr.o obj-$(CONFIG_KVM_BOOK3S_64_HV) += kvm-hv.o obj-y += $(kvm-book3s_64-builtin-objs-y) + +# KVM does a lot in real-mode, and 64-bit Book3S KASAN doesn't support that +ifdef CONFIG_PPC_BOOK3S_64 +KASAN_SANITIZE := n +endif diff --git a/arch/powerpc/kvm/book3s_64_entry.S b/arch/powerpc/kvm/book3s_64_entry.S index e42d1c609e47..e43704547a1e 100644 --- a/arch/powerpc/kvm/book3s_64_entry.S +++ b/arch/powerpc/kvm/book3s_64_entry.S @@ -124,7 +124,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) /* * "Skip" interrupts are part of a trick KVM uses a with hash guests to load - * the faulting instruction in guest memory from the the hypervisor without + * the faulting instruction in guest memory from the hypervisor without * walking page tables. * * When the guest takes a fault that requires the hypervisor to load the diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 0aeb51738ca9..514fd45c1994 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -58,7 +58,7 @@ struct kvm_resize_hpt { /* Possible values and their usage: * <0 an error occurred during allocation, * -EBUSY allocation is in the progress, - * 0 allocation made successfuly. + * 0 allocation made successfully. */ int error; @@ -256,26 +256,34 @@ void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, int kvmppc_mmu_hv_init(void) { - unsigned long host_lpid, rsvd_lpid; + unsigned long nr_lpids; if (!mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE)) return -EINVAL; - host_lpid = 0; - if (cpu_has_feature(CPU_FTR_HVMODE)) - host_lpid = mfspr(SPRN_LPID); + if (cpu_has_feature(CPU_FTR_HVMODE)) { + if (WARN_ON(mfspr(SPRN_LPID) != 0)) + return -EINVAL; + nr_lpids = 1UL << mmu_lpid_bits; + } else { + nr_lpids = 1UL << KVM_MAX_NESTED_GUESTS_SHIFT; + } - /* POWER8 and above have 12-bit LPIDs (10-bit in POWER7) */ - if (cpu_has_feature(CPU_FTR_ARCH_207S)) - rsvd_lpid = LPID_RSVD; - else - rsvd_lpid = LPID_RSVD_POWER7; + if (!cpu_has_feature(CPU_FTR_ARCH_300)) { + /* POWER7 has 10-bit LPIDs, POWER8 has 12-bit LPIDs */ + if (cpu_has_feature(CPU_FTR_ARCH_207S)) + WARN_ON(nr_lpids != 1UL << 12); + else + WARN_ON(nr_lpids != 1UL << 10); - kvmppc_init_lpid(rsvd_lpid + 1); + /* + * Reserve the last implemented LPID use in partition + * switching for POWER7 and POWER8. + */ + nr_lpids -= 1; + } - kvmppc_claim_lpid(host_lpid); - /* rsvd_lpid is reserved for use in partition switching */ - kvmppc_claim_lpid(rsvd_lpid); + kvmppc_init_lpid(nr_lpids); return 0; } @@ -879,7 +887,7 @@ static bool kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, struct revmap_entry *rev = kvm->arch.hpt.rev; unsigned long head, i, j; __be64 *hptep; - int ret = 0; + bool ret = false; unsigned long *rmapp; rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; @@ -887,7 +895,7 @@ static bool kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, lock_rmap(rmapp); if (*rmapp & KVMPPC_RMAP_REFERENCED) { *rmapp &= ~KVMPPC_RMAP_REFERENCED; - ret = 1; + ret = true; } if (!(*rmapp & KVMPPC_RMAP_PRESENT)) { unlock_rmap(rmapp); @@ -919,7 +927,7 @@ static bool kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, rev[i].guest_rpte |= HPTE_R_R; note_hpte_modification(kvm, &rev[i]); } - ret = 1; + ret = true; } __unlock_hpte(hptep, be64_to_cpu(hptep[0])); } while ((i = j) != head); diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index 85cfa6328222..d6589c4fe889 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c @@ -32,6 +32,18 @@ #include <asm/tce.h> #include <asm/mmu_context.h> +static struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm, + unsigned long liobn) +{ + struct kvmppc_spapr_tce_table *stt; + + list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list) + if (stt->liobn == liobn) + return stt; + + return NULL; +} + static unsigned long kvmppc_tce_pages(unsigned long iommu_pages) { return ALIGN(iommu_pages * sizeof(u64), PAGE_SIZE) / PAGE_SIZE; @@ -753,3 +765,34 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu, return ret; } EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce); + +long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn, + unsigned long ioba) +{ + struct kvmppc_spapr_tce_table *stt; + long ret; + unsigned long idx; + struct page *page; + u64 *tbl; + + stt = kvmppc_find_table(vcpu->kvm, liobn); + if (!stt) + return H_TOO_HARD; + + ret = kvmppc_ioba_validate(stt, ioba, 1); + if (ret != H_SUCCESS) + return ret; + + idx = (ioba >> stt->page_shift) - stt->offset; + page = stt->pages[idx / TCES_PER_PAGE]; + if (!page) { + vcpu->arch.regs.gpr[4] = 0; + return H_SUCCESS; + } + tbl = (u64 *)page_address(page); + + vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE]; + + return H_SUCCESS; +} +EXPORT_SYMBOL_GPL(kvmppc_h_get_tce); diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c deleted file mode 100644 index fdeda6a9cff4..000000000000 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ /dev/null @@ -1,672 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * - * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> - * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com> - * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com> - */ - -#include <linux/types.h> -#include <linux/string.h> -#include <linux/kvm.h> -#include <linux/kvm_host.h> -#include <linux/highmem.h> -#include <linux/gfp.h> -#include <linux/slab.h> -#include <linux/hugetlb.h> -#include <linux/list.h> -#include <linux/stringify.h> - -#include <asm/kvm_ppc.h> -#include <asm/kvm_book3s.h> -#include <asm/book3s/64/mmu-hash.h> -#include <asm/mmu_context.h> -#include <asm/hvcall.h> -#include <asm/synch.h> -#include <asm/ppc-opcode.h> -#include <asm/udbg.h> -#include <asm/iommu.h> -#include <asm/tce.h> -#include <asm/pte-walk.h> - -#ifdef CONFIG_BUG - -#define WARN_ON_ONCE_RM(condition) ({ \ - static bool __section(".data.unlikely") __warned; \ - int __ret_warn_once = !!(condition); \ - \ - if (unlikely(__ret_warn_once && !__warned)) { \ - __warned = true; \ - pr_err("WARN_ON_ONCE_RM: (%s) at %s:%u\n", \ - __stringify(condition), \ - __func__, __LINE__); \ - dump_stack(); \ - } \ - unlikely(__ret_warn_once); \ -}) - -#else - -#define WARN_ON_ONCE_RM(condition) ({ \ - int __ret_warn_on = !!(condition); \ - unlikely(__ret_warn_on); \ -}) - -#endif - -/* - * Finds a TCE table descriptor by LIOBN. - * - * WARNING: This will be called in real or virtual mode on HV KVM and virtual - * mode on PR KVM - */ -struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm, - unsigned long liobn) -{ - struct kvmppc_spapr_tce_table *stt; - - list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list) - if (stt->liobn == liobn) - return stt; - - return NULL; -} -EXPORT_SYMBOL_GPL(kvmppc_find_table); - -#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE -static long kvmppc_rm_tce_to_ua(struct kvm *kvm, - unsigned long tce, unsigned long *ua) -{ - unsigned long gfn = tce >> PAGE_SHIFT; - struct kvm_memory_slot *memslot; - - memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); - if (!memslot) - return -EINVAL; - - *ua = __gfn_to_hva_memslot(memslot, gfn) | - (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE)); - - return 0; -} - -/* - * Validates TCE address. - * At the moment flags and page mask are validated. - * As the host kernel does not access those addresses (just puts them - * to the table and user space is supposed to process them), we can skip - * checking other things (such as TCE is a guest RAM address or the page - * was actually allocated). - */ -static long kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table *stt, - unsigned long tce) -{ - unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE); - enum dma_data_direction dir = iommu_tce_direction(tce); - struct kvmppc_spapr_tce_iommu_table *stit; - unsigned long ua = 0; - - /* Allow userspace to poison TCE table */ - if (dir == DMA_NONE) - return H_SUCCESS; - - if (iommu_tce_check_gpa(stt->page_shift, gpa)) - return H_PARAMETER; - - if (kvmppc_rm_tce_to_ua(stt->kvm, tce, &ua)) - return H_TOO_HARD; - - list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { - unsigned long hpa = 0; - struct mm_iommu_table_group_mem_t *mem; - long shift = stit->tbl->it_page_shift; - - mem = mm_iommu_lookup_rm(stt->kvm->mm, ua, 1ULL << shift); - if (!mem) - return H_TOO_HARD; - - if (mm_iommu_ua_to_hpa_rm(mem, ua, shift, &hpa)) - return H_TOO_HARD; - } - - return H_SUCCESS; -} - -/* Note on the use of page_address() in real mode, - * - * It is safe to use page_address() in real mode on ppc64 because - * page_address() is always defined as lowmem_page_address() - * which returns __va(PFN_PHYS(page_to_pfn(page))) which is arithmetic - * operation and does not access page struct. - * - * Theoretically page_address() could be defined different - * but either WANT_PAGE_VIRTUAL or HASHED_PAGE_VIRTUAL - * would have to be enabled. - * WANT_PAGE_VIRTUAL is never enabled on ppc32/ppc64, - * HASHED_PAGE_VIRTUAL could be enabled for ppc32 only and only - * if CONFIG_HIGHMEM is defined. As CONFIG_SPARSEMEM_VMEMMAP - * is not expected to be enabled on ppc32, page_address() - * is safe for ppc32 as well. - * - * WARNING: This will be called in real-mode on HV KVM and virtual - * mode on PR KVM - */ -static u64 *kvmppc_page_address(struct page *page) -{ -#if defined(HASHED_PAGE_VIRTUAL) || defined(WANT_PAGE_VIRTUAL) -#error TODO: fix to avoid page_address() here -#endif - return (u64 *) page_address(page); -} - -/* - * Handles TCE requests for emulated devices. - * Puts guest TCE values to the table and expects user space to convert them. - * Cannot fail so kvmppc_rm_tce_validate must be called before it. - */ -static void kvmppc_rm_tce_put(struct kvmppc_spapr_tce_table *stt, - unsigned long idx, unsigned long tce) -{ - struct page *page; - u64 *tbl; - - idx -= stt->offset; - page = stt->pages[idx / TCES_PER_PAGE]; - /* - * kvmppc_rm_ioba_validate() allows pages not be allocated if TCE is - * being cleared, otherwise it returns H_TOO_HARD and we skip this. - */ - if (!page) { - WARN_ON_ONCE_RM(tce != 0); - return; - } - tbl = kvmppc_page_address(page); - - tbl[idx % TCES_PER_PAGE] = tce; -} - -/* - * TCEs pages are allocated in kvmppc_rm_tce_put() which won't be able to do so - * in real mode. - * Check if kvmppc_rm_tce_put() can succeed in real mode, i.e. a TCEs page is - * allocated or not required (when clearing a tce entry). - */ -static long kvmppc_rm_ioba_validate(struct kvmppc_spapr_tce_table *stt, - unsigned long ioba, unsigned long npages, bool clearing) -{ - unsigned long i, idx, sttpage, sttpages; - unsigned long ret = kvmppc_ioba_validate(stt, ioba, npages); - - if (ret) - return ret; - /* - * clearing==true says kvmppc_rm_tce_put won't be allocating pages - * for empty tces. - */ - if (clearing) - return H_SUCCESS; - - idx = (ioba >> stt->page_shift) - stt->offset; - sttpage = idx / TCES_PER_PAGE; - sttpages = ALIGN(idx % TCES_PER_PAGE + npages, TCES_PER_PAGE) / - TCES_PER_PAGE; - for (i = sttpage; i < sttpage + sttpages; ++i) - if (!stt->pages[i]) - return H_TOO_HARD; - - return H_SUCCESS; -} - -static long iommu_tce_xchg_no_kill_rm(struct mm_struct *mm, - struct iommu_table *tbl, - unsigned long entry, unsigned long *hpa, - enum dma_data_direction *direction) -{ - long ret; - - ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction, true); - - if (!ret && ((*direction == DMA_FROM_DEVICE) || - (*direction == DMA_BIDIRECTIONAL))) { - __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry); - /* - * kvmppc_rm_tce_iommu_do_map() updates the UA cache after - * calling this so we still get here a valid UA. - */ - if (pua && *pua) - mm_iommu_ua_mark_dirty_rm(mm, be64_to_cpu(*pua)); - } - - return ret; -} - -static void iommu_tce_kill_rm(struct iommu_table *tbl, - unsigned long entry, unsigned long pages) -{ - if (tbl->it_ops->tce_kill) - tbl->it_ops->tce_kill(tbl, entry, pages, true); -} - -static void kvmppc_rm_clear_tce(struct kvm *kvm, struct kvmppc_spapr_tce_table *stt, - struct iommu_table *tbl, unsigned long entry) -{ - unsigned long i; - unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift); - unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift); - - for (i = 0; i < subpages; ++i) { - unsigned long hpa = 0; - enum dma_data_direction dir = DMA_NONE; - - iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, io_entry + i, &hpa, &dir); - } -} - -static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm, - struct iommu_table *tbl, unsigned long entry) -{ - struct mm_iommu_table_group_mem_t *mem = NULL; - const unsigned long pgsize = 1ULL << tbl->it_page_shift; - __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry); - - if (!pua) - /* it_userspace allocation might be delayed */ - return H_TOO_HARD; - - mem = mm_iommu_lookup_rm(kvm->mm, be64_to_cpu(*pua), pgsize); - if (!mem) - return H_TOO_HARD; - - mm_iommu_mapped_dec(mem); - - *pua = cpu_to_be64(0); - - return H_SUCCESS; -} - -static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm, - struct iommu_table *tbl, unsigned long entry) -{ - enum dma_data_direction dir = DMA_NONE; - unsigned long hpa = 0; - long ret; - - if (iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir)) - /* - * real mode xchg can fail if struct page crosses - * a page boundary - */ - return H_TOO_HARD; - - if (dir == DMA_NONE) - return H_SUCCESS; - - ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry); - if (ret) - iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir); - - return ret; -} - -static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm, - struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl, - unsigned long entry) -{ - unsigned long i, ret = H_SUCCESS; - unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift); - unsigned long io_entry = entry * subpages; - - for (i = 0; i < subpages; ++i) { - ret = kvmppc_rm_tce_iommu_do_unmap(kvm, tbl, io_entry + i); - if (ret != H_SUCCESS) - break; - } - - iommu_tce_kill_rm(tbl, io_entry, subpages); - - return ret; -} - -static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl, - unsigned long entry, unsigned long ua, - enum dma_data_direction dir) -{ - long ret; - unsigned long hpa = 0; - __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry); - struct mm_iommu_table_group_mem_t *mem; - - if (!pua) - /* it_userspace allocation might be delayed */ - return H_TOO_HARD; - - mem = mm_iommu_lookup_rm(kvm->mm, ua, 1ULL << tbl->it_page_shift); - if (!mem) - return H_TOO_HARD; - - if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift, - &hpa))) - return H_TOO_HARD; - - if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem))) - return H_TOO_HARD; - - ret = iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir); - if (ret) { - mm_iommu_mapped_dec(mem); - /* - * real mode xchg can fail if struct page crosses - * a page boundary - */ - return H_TOO_HARD; - } - - if (dir != DMA_NONE) - kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry); - - *pua = cpu_to_be64(ua); - - return 0; -} - -static long kvmppc_rm_tce_iommu_map(struct kvm *kvm, - struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl, - unsigned long entry, unsigned long ua, - enum dma_data_direction dir) -{ - unsigned long i, pgoff, ret = H_SUCCESS; - unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift); - unsigned long io_entry = entry * subpages; - - for (i = 0, pgoff = 0; i < subpages; - ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) { - - ret = kvmppc_rm_tce_iommu_do_map(kvm, tbl, - io_entry + i, ua + pgoff, dir); - if (ret != H_SUCCESS) - break; - } - - iommu_tce_kill_rm(tbl, io_entry, subpages); - - return ret; -} - -long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, - unsigned long ioba, unsigned long tce) -{ - struct kvmppc_spapr_tce_table *stt; - long ret; - struct kvmppc_spapr_tce_iommu_table *stit; - unsigned long entry, ua = 0; - enum dma_data_direction dir; - - /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */ - /* liobn, ioba, tce); */ - - stt = kvmppc_find_table(vcpu->kvm, liobn); - if (!stt) - return H_TOO_HARD; - - ret = kvmppc_rm_ioba_validate(stt, ioba, 1, tce == 0); - if (ret != H_SUCCESS) - return ret; - - ret = kvmppc_rm_tce_validate(stt, tce); - if (ret != H_SUCCESS) - return ret; - - dir = iommu_tce_direction(tce); - if ((dir != DMA_NONE) && kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua)) - return H_PARAMETER; - - entry = ioba >> stt->page_shift; - - list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { - if (dir == DMA_NONE) - ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt, - stit->tbl, entry); - else - ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt, - stit->tbl, entry, ua, dir); - - if (ret != H_SUCCESS) { - kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, entry); - return ret; - } - } - - kvmppc_rm_tce_put(stt, entry, tce); - - return H_SUCCESS; -} - -static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu, unsigned long mmu_seq, - unsigned long ua, unsigned long *phpa) -{ - pte_t *ptep, pte; - unsigned shift = 0; - - /* - * Called in real mode with MSR_EE = 0. We are safe here. - * It is ok to do the lookup with arch.pgdir here, because - * we are doing this on secondary cpus and current task there - * is not the hypervisor. Also this is safe against THP in the - * host, because an IPI to primary thread will wait for the secondary - * to exit which will agains result in the below page table walk - * to finish. - */ - /* an rmap lock won't make it safe. because that just ensure hash - * page table entries are removed with rmap lock held. After that - * mmu notifier returns and we go ahead and removing ptes from Qemu page table. - */ - ptep = find_kvm_host_pte(vcpu->kvm, mmu_seq, ua, &shift); - if (!ptep) - return -ENXIO; - - pte = READ_ONCE(*ptep); - if (!pte_present(pte)) - return -ENXIO; - - if (!shift) - shift = PAGE_SHIFT; - - /* Avoid handling anything potentially complicated in realmode */ - if (shift > PAGE_SHIFT) - return -EAGAIN; - - if (!pte_young(pte)) - return -EAGAIN; - - *phpa = (pte_pfn(pte) << PAGE_SHIFT) | (ua & ((1ULL << shift) - 1)) | - (ua & ~PAGE_MASK); - - return 0; -} - -long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, - unsigned long liobn, unsigned long ioba, - unsigned long tce_list, unsigned long npages) -{ - struct kvm *kvm = vcpu->kvm; - struct kvmppc_spapr_tce_table *stt; - long i, ret = H_SUCCESS; - unsigned long tces, entry, ua = 0; - unsigned long mmu_seq; - bool prereg = false; - struct kvmppc_spapr_tce_iommu_table *stit; - - /* - * used to check for invalidations in progress - */ - mmu_seq = kvm->mmu_notifier_seq; - smp_rmb(); - - stt = kvmppc_find_table(vcpu->kvm, liobn); - if (!stt) - return H_TOO_HARD; - - entry = ioba >> stt->page_shift; - /* - * The spec says that the maximum size of the list is 512 TCEs - * so the whole table addressed resides in 4K page - */ - if (npages > 512) - return H_PARAMETER; - - if (tce_list & (SZ_4K - 1)) - return H_PARAMETER; - - ret = kvmppc_rm_ioba_validate(stt, ioba, npages, false); - if (ret != H_SUCCESS) - return ret; - - if (mm_iommu_preregistered(vcpu->kvm->mm)) { - /* - * We get here if guest memory was pre-registered which - * is normally VFIO case and gpa->hpa translation does not - * depend on hpt. - */ - struct mm_iommu_table_group_mem_t *mem; - - if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua)) - return H_TOO_HARD; - - mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K); - if (mem) - prereg = mm_iommu_ua_to_hpa_rm(mem, ua, - IOMMU_PAGE_SHIFT_4K, &tces) == 0; - } - - if (!prereg) { - /* - * This is usually a case of a guest with emulated devices only - * when TCE list is not in preregistered memory. - * We do not require memory to be preregistered in this case - * so lock rmap and do __find_linux_pte_or_hugepte(). - */ - if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua)) - return H_TOO_HARD; - - arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock); - if (kvmppc_rm_ua_to_hpa(vcpu, mmu_seq, ua, &tces)) { - ret = H_TOO_HARD; - goto unlock_exit; - } - } - - for (i = 0; i < npages; ++i) { - unsigned long tce = be64_to_cpu(((u64 *)tces)[i]); - - ret = kvmppc_rm_tce_validate(stt, tce); - if (ret != H_SUCCESS) - goto unlock_exit; - } - - for (i = 0; i < npages; ++i) { - unsigned long tce = be64_to_cpu(((u64 *)tces)[i]); - - ua = 0; - if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua)) { - ret = H_PARAMETER; - goto unlock_exit; - } - - list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { - ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt, - stit->tbl, entry + i, ua, - iommu_tce_direction(tce)); - - if (ret != H_SUCCESS) { - kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, - entry + i); - goto unlock_exit; - } - } - - kvmppc_rm_tce_put(stt, entry + i, tce); - } - -unlock_exit: - if (!prereg) - arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock); - return ret; -} - -long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu, - unsigned long liobn, unsigned long ioba, - unsigned long tce_value, unsigned long npages) -{ - struct kvmppc_spapr_tce_table *stt; - long i, ret; - struct kvmppc_spapr_tce_iommu_table *stit; - - stt = kvmppc_find_table(vcpu->kvm, liobn); - if (!stt) - return H_TOO_HARD; - - ret = kvmppc_rm_ioba_validate(stt, ioba, npages, tce_value == 0); - if (ret != H_SUCCESS) - return ret; - - /* Check permission bits only to allow userspace poison TCE for debug */ - if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ)) - return H_PARAMETER; - - list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { - unsigned long entry = ioba >> stt->page_shift; - - for (i = 0; i < npages; ++i) { - ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt, - stit->tbl, entry + i); - - if (ret == H_SUCCESS) - continue; - - if (ret == H_TOO_HARD) - return ret; - - WARN_ON_ONCE_RM(1); - kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, entry + i); - } - } - - for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift)) - kvmppc_rm_tce_put(stt, ioba >> stt->page_shift, tce_value); - - return ret; -} - -/* This can be called in either virtual mode or real mode */ -long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn, - unsigned long ioba) -{ - struct kvmppc_spapr_tce_table *stt; - long ret; - unsigned long idx; - struct page *page; - u64 *tbl; - - stt = kvmppc_find_table(vcpu->kvm, liobn); - if (!stt) - return H_TOO_HARD; - - ret = kvmppc_ioba_validate(stt, ioba, 1); - if (ret != H_SUCCESS) - return ret; - - idx = (ioba >> stt->page_shift) - stt->offset; - page = stt->pages[idx / TCES_PER_PAGE]; - if (!page) { - vcpu->arch.regs.gpr[4] = 0; - return H_SUCCESS; - } - tbl = (u64 *)page_address(page); - - vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE]; - - return H_SUCCESS; -} -EXPORT_SYMBOL_GPL(kvmppc_h_get_tce); - -#endif /* KVM_BOOK3S_HV_POSSIBLE */ diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c index fdb57be71aa6..5bbfb2eed127 100644 --- a/arch/powerpc/kvm/book3s_emulate.c +++ b/arch/powerpc/kvm/book3s_emulate.c @@ -268,7 +268,7 @@ int kvmppc_core_emulate_op_pr(struct kvm_vcpu *vcpu, /* * add rules to fit in ISA specification regarding TM - * state transistion in TM disable/Suspended state, + * state transition in TM disable/Suspended state, * and target TM state is TM inactive(00) state. (the * change should be suppressed). */ diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 6fa518f6501d..e08fb3124dca 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -42,6 +42,7 @@ #include <linux/module.h> #include <linux/compiler.h> #include <linux/of.h> +#include <linux/irqdomain.h> #include <asm/ftrace.h> #include <asm/reg.h> @@ -1326,6 +1327,12 @@ static int kvmppc_hcall_impl_hv(unsigned long cmd) case H_CONFER: case H_REGISTER_VPA: case H_SET_MODE: +#ifdef CONFIG_SPAPR_TCE_IOMMU + case H_GET_TCE: + case H_PUT_TCE: + case H_PUT_TCE_INDIRECT: + case H_STUFF_TCE: +#endif case H_LOGICAL_CI_LOAD: case H_LOGICAL_CI_STORE: #ifdef CONFIG_KVM_XICS @@ -2834,7 +2841,7 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu) * to trap and then we emulate them. */ vcpu->arch.hfscr = HFSCR_TAR | HFSCR_EBB | HFSCR_PM | HFSCR_BHRB | - HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP | HFSCR_PREFIX; + HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP; if (cpu_has_feature(CPU_FTR_HVMODE)) { vcpu->arch.hfscr &= mfspr(SPRN_HFSCR); #ifdef CONFIG_PPC_TRANSACTIONAL_MEM @@ -3967,6 +3974,7 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns kvmhv_save_hv_regs(vcpu, &hvregs); hvregs.lpcr = lpcr; + hvregs.amor = ~0; vcpu->arch.regs.msr = vcpu->arch.shregs.msr; hvregs.version = HV_GUEST_STATE_VERSION; if (vcpu->arch.nested) { @@ -4029,6 +4037,8 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u64 *tb) { + struct kvm *kvm = vcpu->kvm; + struct kvm_nested_guest *nested = vcpu->arch.nested; u64 next_timer; int trap; @@ -4048,34 +4058,61 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, trap = kvmhv_vcpu_entry_p9_nested(vcpu, time_limit, lpcr, tb); /* H_CEDE has to be handled now, not later */ - if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested && + if (trap == BOOK3S_INTERRUPT_SYSCALL && !nested && kvmppc_get_gpr(vcpu, 3) == H_CEDE) { kvmppc_cede(vcpu); kvmppc_set_gpr(vcpu, 3, 0); trap = 0; } - } else { - struct kvm *kvm = vcpu->kvm; + } else if (nested) { + __this_cpu_write(cpu_in_guest, kvm); + trap = kvmhv_vcpu_entry_p9(vcpu, time_limit, lpcr, tb); + __this_cpu_write(cpu_in_guest, NULL); + } else { kvmppc_xive_push_vcpu(vcpu); __this_cpu_write(cpu_in_guest, kvm); trap = kvmhv_vcpu_entry_p9(vcpu, time_limit, lpcr, tb); __this_cpu_write(cpu_in_guest, NULL); - if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested && + if (trap == BOOK3S_INTERRUPT_SYSCALL && !(vcpu->arch.shregs.msr & MSR_PR)) { unsigned long req = kvmppc_get_gpr(vcpu, 3); - /* H_CEDE has to be handled now, not later */ + /* + * XIVE rearm and XICS hcalls must be handled + * before xive context is pulled (is this + * true?) + */ if (req == H_CEDE) { + /* H_CEDE has to be handled now */ kvmppc_cede(vcpu); - kvmppc_xive_rearm_escalation(vcpu); /* may un-cede */ + if (!kvmppc_xive_rearm_escalation(vcpu)) { + /* + * Pending escalation so abort + * the cede. + */ + vcpu->arch.ceded = 0; + } kvmppc_set_gpr(vcpu, 3, 0); trap = 0; - /* XICS hcalls must be handled before xive is pulled */ + } else if (req == H_ENTER_NESTED) { + /* + * L2 should not run with the L1 + * context so rearm and pull it. + */ + if (!kvmppc_xive_rearm_escalation(vcpu)) { + /* + * Pending escalation so abort + * H_ENTER_NESTED. + */ + kvmppc_set_gpr(vcpu, 3, 0); + trap = 0; + } + } else if (hcall_is_xics(req)) { int ret; @@ -4233,13 +4270,13 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc) start_wait = ktime_get(); vc->vcore_state = VCORE_SLEEPING; - trace_kvmppc_vcore_blocked(vc, 0); + trace_kvmppc_vcore_blocked(vc->runner, 0); spin_unlock(&vc->lock); schedule(); finish_rcuwait(&vc->wait); spin_lock(&vc->lock); vc->vcore_state = VCORE_INACTIVE; - trace_kvmppc_vcore_blocked(vc, 1); + trace_kvmppc_vcore_blocked(vc->runner, 1); ++vc->runner->stat.halt_successful_wait; cur = ktime_get(); @@ -4519,9 +4556,14 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, if (!nested) { kvmppc_core_prepare_to_enter(vcpu); - if (test_bit(BOOK3S_IRQPRIO_EXTERNAL, - &vcpu->arch.pending_exceptions)) + if (vcpu->arch.shregs.msr & MSR_EE) { + if (xive_interrupt_pending(vcpu)) + kvmppc_inject_interrupt_hv(vcpu, + BOOK3S_INTERRUPT_EXTERNAL, 0); + } else if (test_bit(BOOK3S_IRQPRIO_EXTERNAL, + &vcpu->arch.pending_exceptions)) { lpcr |= LPCR_MER; + } } else if (vcpu->arch.pending_exceptions || vcpu->arch.doorbell_request || xive_interrupt_pending(vcpu)) { @@ -4619,9 +4661,9 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, if (kvmppc_vcpu_check_block(vcpu)) break; - trace_kvmppc_vcore_blocked(vc, 0); + trace_kvmppc_vcore_blocked(vcpu, 0); schedule(); - trace_kvmppc_vcore_blocked(vc, 1); + trace_kvmppc_vcore_blocked(vcpu, 1); } finish_rcuwait(wait); } @@ -5283,6 +5325,10 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm) kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR); lpcr &= LPCR_PECE | LPCR_LPES; } else { + /* + * The L2 LPES mode will be set by the L0 according to whether + * or not it needs to take external interrupts in HV mode. + */ lpcr = 0; } lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE | diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index 7e52d0beee77..88a8f6473c4e 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -489,70 +489,6 @@ static long kvmppc_read_one_intr(bool *again) return kvmppc_check_passthru(xisr, xirr, again); } -#ifdef CONFIG_KVM_XICS -unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu) -{ - if (!kvmppc_xics_enabled(vcpu)) - return H_TOO_HARD; - if (xics_on_xive()) - return xive_rm_h_xirr(vcpu); - else - return xics_rm_h_xirr(vcpu); -} - -unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu) -{ - if (!kvmppc_xics_enabled(vcpu)) - return H_TOO_HARD; - vcpu->arch.regs.gpr[5] = get_tb(); - if (xics_on_xive()) - return xive_rm_h_xirr(vcpu); - else - return xics_rm_h_xirr(vcpu); -} - -unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server) -{ - if (!kvmppc_xics_enabled(vcpu)) - return H_TOO_HARD; - if (xics_on_xive()) - return xive_rm_h_ipoll(vcpu, server); - else - return H_TOO_HARD; -} - -int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, - unsigned long mfrr) -{ - if (!kvmppc_xics_enabled(vcpu)) - return H_TOO_HARD; - if (xics_on_xive()) - return xive_rm_h_ipi(vcpu, server, mfrr); - else - return xics_rm_h_ipi(vcpu, server, mfrr); -} - -int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) -{ - if (!kvmppc_xics_enabled(vcpu)) - return H_TOO_HARD; - if (xics_on_xive()) - return xive_rm_h_cppr(vcpu, cppr); - else - return xics_rm_h_cppr(vcpu, cppr); -} - -int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) -{ - if (!kvmppc_xics_enabled(vcpu)) - return H_TOO_HARD; - if (xics_on_xive()) - return xive_rm_h_eoi(vcpu, xirr); - else - return xics_rm_h_eoi(vcpu, xirr); -} -#endif /* CONFIG_KVM_XICS */ - void kvmppc_bad_interrupt(struct pt_regs *regs) { /* diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c index c943a051c6e7..0644732d1a25 100644 --- a/arch/powerpc/kvm/book3s_hv_nested.c +++ b/arch/powerpc/kvm/book3s_hv_nested.c @@ -261,8 +261,7 @@ static void load_l2_hv_regs(struct kvm_vcpu *vcpu, /* * Don't let L1 change LPCR bits for the L2 except these: */ - mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD | - LPCR_LPES | LPCR_MER; + mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD | LPCR_MER; /* * Additional filtering is required depending on hardware @@ -439,10 +438,11 @@ long kvmhv_nested_init(void) if (!radix_enabled()) return -ENODEV; - /* find log base 2 of KVMPPC_NR_LPIDS, rounding up */ - ptb_order = __ilog2(KVMPPC_NR_LPIDS - 1) + 1; - if (ptb_order < 8) - ptb_order = 8; + /* Partition table entry is 1<<4 bytes in size, hence the 4. */ + ptb_order = KVM_MAX_NESTED_GUESTS_SHIFT + 4; + /* Minimum partition table size is 1<<12 bytes */ + if (ptb_order < 12) + ptb_order = 12; pseries_partition_tb = kmalloc(sizeof(struct patb_entry) << ptb_order, GFP_KERNEL); if (!pseries_partition_tb) { @@ -450,7 +450,7 @@ long kvmhv_nested_init(void) return -ENOMEM; } - ptcr = __pa(pseries_partition_tb) | (ptb_order - 8); + ptcr = __pa(pseries_partition_tb) | (ptb_order - 12); rc = plpar_hcall_norets(H_SET_PARTITION_TABLE, ptcr); if (rc != H_SUCCESS) { pr_err("kvm-hv: Parent hypervisor does not support nesting (rc=%ld)\n", @@ -521,11 +521,6 @@ static void kvmhv_set_nested_ptbl(struct kvm_nested_guest *gp) kvmhv_set_ptbl_entry(gp->shadow_lpid, dw0, gp->process_table); } -void kvmhv_vm_nested_init(struct kvm *kvm) -{ - kvm->arch.max_nested_lpid = -1; -} - /* * Handle the H_SET_PARTITION_TABLE hcall. * r4 = guest real address of partition table + log_2(size) - 12 @@ -539,16 +534,14 @@ long kvmhv_set_partition_table(struct kvm_vcpu *vcpu) long ret = H_SUCCESS; srcu_idx = srcu_read_lock(&kvm->srcu); - /* - * Limit the partition table to 4096 entries (because that's what - * hardware supports), and check the base address. - */ - if ((ptcr & PRTS_MASK) > 12 - 8 || + /* Check partition size and base address. */ + if ((ptcr & PRTS_MASK) + 12 - 4 > KVM_MAX_NESTED_GUESTS_SHIFT || !kvm_is_visible_gfn(vcpu->kvm, (ptcr & PRTB_MASK) >> PAGE_SHIFT)) ret = H_PARAMETER; srcu_read_unlock(&kvm->srcu, srcu_idx); if (ret == H_SUCCESS) kvm->arch.l1_ptcr = ptcr; + return ret; } @@ -644,7 +637,7 @@ static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp) ret = -EFAULT; ptbl_addr = (kvm->arch.l1_ptcr & PRTB_MASK) + (gp->l1_lpid << 4); - if (gp->l1_lpid < (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 8))) { + if (gp->l1_lpid < (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4))) { int srcu_idx = srcu_read_lock(&kvm->srcu); ret = kvm_read_guest(kvm, ptbl_addr, &ptbl_entry, sizeof(ptbl_entry)); @@ -660,6 +653,35 @@ static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp) kvmhv_set_nested_ptbl(gp); } +void kvmhv_vm_nested_init(struct kvm *kvm) +{ + idr_init(&kvm->arch.kvm_nested_guest_idr); +} + +static struct kvm_nested_guest *__find_nested(struct kvm *kvm, int lpid) +{ + return idr_find(&kvm->arch.kvm_nested_guest_idr, lpid); +} + +static bool __prealloc_nested(struct kvm *kvm, int lpid) +{ + if (idr_alloc(&kvm->arch.kvm_nested_guest_idr, + NULL, lpid, lpid + 1, GFP_KERNEL) != lpid) + return false; + return true; +} + +static void __add_nested(struct kvm *kvm, int lpid, struct kvm_nested_guest *gp) +{ + if (idr_replace(&kvm->arch.kvm_nested_guest_idr, gp, lpid)) + WARN_ON(1); +} + +static void __remove_nested(struct kvm *kvm, int lpid) +{ + idr_remove(&kvm->arch.kvm_nested_guest_idr, lpid); +} + static struct kvm_nested_guest *kvmhv_alloc_nested(struct kvm *kvm, unsigned int lpid) { struct kvm_nested_guest *gp; @@ -720,13 +742,8 @@ static void kvmhv_remove_nested(struct kvm_nested_guest *gp) long ref; spin_lock(&kvm->mmu_lock); - if (gp == kvm->arch.nested_guests[lpid]) { - kvm->arch.nested_guests[lpid] = NULL; - if (lpid == kvm->arch.max_nested_lpid) { - while (--lpid >= 0 && !kvm->arch.nested_guests[lpid]) - ; - kvm->arch.max_nested_lpid = lpid; - } + if (gp == __find_nested(kvm, lpid)) { + __remove_nested(kvm, lpid); --gp->refcnt; } ref = gp->refcnt; @@ -743,24 +760,22 @@ static void kvmhv_remove_nested(struct kvm_nested_guest *gp) */ void kvmhv_release_all_nested(struct kvm *kvm) { - int i; + int lpid; struct kvm_nested_guest *gp; struct kvm_nested_guest *freelist = NULL; struct kvm_memory_slot *memslot; int srcu_idx, bkt; spin_lock(&kvm->mmu_lock); - for (i = 0; i <= kvm->arch.max_nested_lpid; i++) { - gp = kvm->arch.nested_guests[i]; - if (!gp) - continue; - kvm->arch.nested_guests[i] = NULL; + idr_for_each_entry(&kvm->arch.kvm_nested_guest_idr, gp, lpid) { + __remove_nested(kvm, lpid); if (--gp->refcnt == 0) { gp->next = freelist; freelist = gp; } } - kvm->arch.max_nested_lpid = -1; + idr_destroy(&kvm->arch.kvm_nested_guest_idr); + /* idr is empty and may be reused at this point */ spin_unlock(&kvm->mmu_lock); while ((gp = freelist) != NULL) { freelist = gp->next; @@ -792,12 +807,11 @@ struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid, { struct kvm_nested_guest *gp, *newgp; - if (l1_lpid >= KVM_MAX_NESTED_GUESTS || - l1_lpid >= (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4))) + if (l1_lpid >= (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4))) return NULL; spin_lock(&kvm->mmu_lock); - gp = kvm->arch.nested_guests[l1_lpid]; + gp = __find_nested(kvm, l1_lpid); if (gp) ++gp->refcnt; spin_unlock(&kvm->mmu_lock); @@ -808,17 +822,19 @@ struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid, newgp = kvmhv_alloc_nested(kvm, l1_lpid); if (!newgp) return NULL; + + if (!__prealloc_nested(kvm, l1_lpid)) { + kvmhv_release_nested(newgp); + return NULL; + } + spin_lock(&kvm->mmu_lock); - if (kvm->arch.nested_guests[l1_lpid]) { - /* someone else beat us to it */ - gp = kvm->arch.nested_guests[l1_lpid]; - } else { - kvm->arch.nested_guests[l1_lpid] = newgp; + gp = __find_nested(kvm, l1_lpid); + if (!gp) { + __add_nested(kvm, l1_lpid, newgp); ++newgp->refcnt; gp = newgp; newgp = NULL; - if (l1_lpid > kvm->arch.max_nested_lpid) - kvm->arch.max_nested_lpid = l1_lpid; } ++gp->refcnt; spin_unlock(&kvm->mmu_lock); @@ -841,20 +857,13 @@ void kvmhv_put_nested(struct kvm_nested_guest *gp) kvmhv_release_nested(gp); } -static struct kvm_nested_guest *kvmhv_find_nested(struct kvm *kvm, int lpid) -{ - if (lpid > kvm->arch.max_nested_lpid) - return NULL; - return kvm->arch.nested_guests[lpid]; -} - pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid, unsigned long ea, unsigned *hshift) { struct kvm_nested_guest *gp; pte_t *pte; - gp = kvmhv_find_nested(kvm, lpid); + gp = __find_nested(kvm, lpid); if (!gp) return NULL; @@ -960,7 +969,7 @@ static void kvmhv_remove_nest_rmap(struct kvm *kvm, u64 n_rmap, gpa = n_rmap & RMAP_NESTED_GPA_MASK; lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT; - gp = kvmhv_find_nested(kvm, lpid); + gp = __find_nested(kvm, lpid); if (!gp) return; @@ -1152,16 +1161,13 @@ static void kvmhv_emulate_tlbie_all_lpid(struct kvm_vcpu *vcpu, int ric) { struct kvm *kvm = vcpu->kvm; struct kvm_nested_guest *gp; - int i; + int lpid; spin_lock(&kvm->mmu_lock); - for (i = 0; i <= kvm->arch.max_nested_lpid; i++) { - gp = kvm->arch.nested_guests[i]; - if (gp) { - spin_unlock(&kvm->mmu_lock); - kvmhv_emulate_tlbie_lpid(vcpu, gp, ric); - spin_lock(&kvm->mmu_lock); - } + idr_for_each_entry(&kvm->arch.kvm_nested_guest_idr, gp, lpid) { + spin_unlock(&kvm->mmu_lock); + kvmhv_emulate_tlbie_lpid(vcpu, gp, ric); + spin_lock(&kvm->mmu_lock); } spin_unlock(&kvm->mmu_lock); } @@ -1313,7 +1319,7 @@ long do_h_rpt_invalidate_pat(struct kvm_vcpu *vcpu, unsigned long lpid, * H_ENTER_NESTED call. Since we can't differentiate this case from * the invalid case, we ignore such flush requests and return success. */ - if (!kvmhv_find_nested(vcpu->kvm, lpid)) + if (!__find_nested(vcpu->kvm, lpid)) return H_SUCCESS; /* @@ -1657,15 +1663,12 @@ long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu) int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid) { - int ret = -1; + int ret = lpid + 1; spin_lock(&kvm->mmu_lock); - while (++lpid <= kvm->arch.max_nested_lpid) { - if (kvm->arch.nested_guests[lpid]) { - ret = lpid; - break; - } - } + if (!idr_get_next(&kvm->arch.kvm_nested_guest_idr, &ret)) + ret = -1; spin_unlock(&kvm->mmu_lock); + return ret; } diff --git a/arch/powerpc/kvm/book3s_hv_p9_entry.c b/arch/powerpc/kvm/book3s_hv_p9_entry.c index a28e5b3daabd..112a09b33328 100644 --- a/arch/powerpc/kvm/book3s_hv_p9_entry.c +++ b/arch/powerpc/kvm/book3s_hv_p9_entry.c @@ -379,7 +379,7 @@ void restore_p9_host_os_sprs(struct kvm_vcpu *vcpu, { /* * current->thread.xxx registers must all be restored to host - * values before a potential context switch, othrewise the context + * values before a potential context switch, otherwise the context * switch itself will overwrite current->thread.xxx with the values * from the guest SPRs. */ @@ -539,8 +539,10 @@ static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u6 { struct kvm_nested_guest *nested = vcpu->arch.nested; u32 lpid; + u32 pid; lpid = nested ? nested->shadow_lpid : kvm->arch.lpid; + pid = vcpu->arch.pid; /* * Prior memory accesses to host PID Q3 must be completed before we @@ -551,7 +553,7 @@ static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u6 isync(); mtspr(SPRN_LPID, lpid); mtspr(SPRN_LPCR, lpcr); - mtspr(SPRN_PID, vcpu->arch.pid); + mtspr(SPRN_PID, pid); /* * isync not required here because we are HRFID'ing to guest before * any guest context access, which is context synchronising. @@ -561,9 +563,11 @@ static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u6 static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 lpcr) { u32 lpid; + u32 pid; int i; lpid = kvm->arch.lpid; + pid = vcpu->arch.pid; /* * See switch_mmu_to_guest_radix. ptesync should not be required here @@ -574,7 +578,7 @@ static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 isync(); mtspr(SPRN_LPID, lpid); mtspr(SPRN_LPCR, lpcr); - mtspr(SPRN_PID, vcpu->arch.pid); + mtspr(SPRN_PID, pid); for (i = 0; i < vcpu->arch.slb_max; i++) mtslb(vcpu->arch.slb[i].orige, vcpu->arch.slb[i].origv); @@ -585,6 +589,9 @@ static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 static void switch_mmu_to_host(struct kvm *kvm, u32 pid) { + u32 lpid = kvm->arch.host_lpid; + u64 lpcr = kvm->arch.host_lpcr; + /* * The guest has exited, so guest MMU context is no longer being * non-speculatively accessed, but a hwsync is needed before the @@ -594,8 +601,8 @@ static void switch_mmu_to_host(struct kvm *kvm, u32 pid) asm volatile("hwsync" ::: "memory"); isync(); mtspr(SPRN_PID, pid); - mtspr(SPRN_LPID, kvm->arch.host_lpid); - mtspr(SPRN_LPCR, kvm->arch.host_lpcr); + mtspr(SPRN_LPID, lpid); + mtspr(SPRN_LPCR, lpcr); /* * isync is not required after the switch, because mtmsrd with L=0 * is performed after this switch, which is context synchronising. diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c index 587c33fc4564..e165bfa842bf 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_xics.c +++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c @@ -479,6 +479,11 @@ static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp, } } +unsigned long xics_rm_h_xirr_x(struct kvm_vcpu *vcpu) +{ + vcpu->arch.regs.gpr[5] = get_tb(); + return xics_rm_h_xirr(vcpu); +} unsigned long xics_rm_h_xirr(struct kvm_vcpu *vcpu) { @@ -883,7 +888,7 @@ long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, /* --- Non-real mode XICS-related built-in routines --- */ -/** +/* * Host Operations poked by RM KVM */ static void rm_host_ipi_action(int action, void *data) diff --git a/arch/powerpc/kvm/book3s_hv_rm_xive.c b/arch/powerpc/kvm/book3s_hv_rm_xive.c deleted file mode 100644 index dd9880731bd6..000000000000 --- a/arch/powerpc/kvm/book3s_hv_rm_xive.c +++ /dev/null @@ -1,46 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include <linux/kernel.h> -#include <linux/kvm_host.h> -#include <linux/err.h> -#include <linux/kernel_stat.h> -#include <linux/pgtable.h> - -#include <asm/kvm_book3s.h> -#include <asm/kvm_ppc.h> -#include <asm/hvcall.h> -#include <asm/xics.h> -#include <asm/debug.h> -#include <asm/synch.h> -#include <asm/cputhreads.h> -#include <asm/ppc-opcode.h> -#include <asm/pnv-pci.h> -#include <asm/opal.h> -#include <asm/smp.h> -#include <asm/xive.h> -#include <asm/xive-regs.h> - -#include "book3s_xive.h" - -/* XXX */ -#include <asm/udbg.h> -//#define DBG(fmt...) udbg_printf(fmt) -#define DBG(fmt...) do { } while(0) - -static inline void __iomem *get_tima_phys(void) -{ - return local_paca->kvm_hstate.xive_tima_phys; -} - -#undef XIVE_RUNTIME_CHECKS -#define X_PFX xive_rm_ -#define X_STATIC -#define X_STAT_PFX stat_rm_ -#define __x_tima get_tima_phys() -#define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_page)) -#define __x_trig_page(xd) ((void __iomem *)((xd)->trig_page)) -#define __x_writeb __raw_rm_writeb -#define __x_readw __raw_rm_readw -#define __x_readq __raw_rm_readq -#define __x_writeq __raw_rm_writeq - -#include "book3s_xive_template.c" diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index d185dee26026..0fc0e68d20d0 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -51,6 +51,14 @@ #define STACK_SLOT_FSCR (SFS-96) /* + * Use the last LPID (all implemented LPID bits = 1) for partition switching. + * This is reserved in the LPID allocator. POWER7 only implements 0x3ff, but + * we write 0xfff into the LPID SPR anyway, which seems to work and just + * ignores the top bits. + */ +#define LPID_RSVD 0xfff + +/* * Call kvmppc_hv_entry in real mode. * Must be called with interrupts hard-disabled. * @@ -1784,13 +1792,8 @@ hcall_real_table: .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table .long DOTSYM(kvmppc_h_protect) - hcall_real_table -#ifdef CONFIG_SPAPR_TCE_IOMMU - .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table - .long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table -#else .long 0 /* 0x1c */ .long 0 /* 0x20 */ -#endif .long 0 /* 0x24 - H_SET_SPRG0 */ .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table .long DOTSYM(kvmppc_rm_h_page_init) - hcall_real_table @@ -1808,11 +1811,11 @@ hcall_real_table: .long 0 /* 0x5c */ .long 0 /* 0x60 */ #ifdef CONFIG_KVM_XICS - .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table - .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table - .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table - .long DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table - .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table + .long DOTSYM(xics_rm_h_eoi) - hcall_real_table + .long DOTSYM(xics_rm_h_cppr) - hcall_real_table + .long DOTSYM(xics_rm_h_ipi) - hcall_real_table + .long 0 /* 0x70 - H_IPOLL */ + .long DOTSYM(xics_rm_h_xirr) - hcall_real_table #else .long 0 /* 0x64 - H_EOI */ .long 0 /* 0x68 - H_CPPR */ @@ -1868,13 +1871,8 @@ hcall_real_table: .long 0 /* 0x12c */ .long 0 /* 0x130 */ .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table -#ifdef CONFIG_SPAPR_TCE_IOMMU - .long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table - .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table -#else .long 0 /* 0x138 */ .long 0 /* 0x13c */ -#endif .long 0 /* 0x140 */ .long 0 /* 0x144 */ .long 0 /* 0x148 */ @@ -1987,7 +1985,7 @@ hcall_real_table: .long 0 /* 0x2f4 */ .long 0 /* 0x2f8 */ #ifdef CONFIG_KVM_XICS - .long DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table + .long DOTSYM(xics_rm_h_xirr_x) - hcall_real_table #else .long 0 /* 0x2fc - H_XIRR_X*/ #endif diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c index 45c993dd05f5..598006301620 100644 --- a/arch/powerpc/kvm/book3s_hv_uvmem.c +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c @@ -120,7 +120,7 @@ static DEFINE_SPINLOCK(kvmppc_uvmem_bitmap_lock); * content is un-encrypted. * * (c) Normal - The GFN is a normal. The GFN is associated with - * a normal VM. The contents of the GFN is accesible to + * a normal VM. The contents of the GFN is accessible to * the Hypervisor. Its content is never encrypted. * * States of a VM. @@ -361,13 +361,15 @@ static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm, static bool kvmppc_next_nontransitioned_gfn(const struct kvm_memory_slot *memslot, struct kvm *kvm, unsigned long *gfn) { - struct kvmppc_uvmem_slot *p; + struct kvmppc_uvmem_slot *p = NULL, *iter; bool ret = false; unsigned long i; - list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) - if (*gfn >= p->base_pfn && *gfn < p->base_pfn + p->nr_pfns) + list_for_each_entry(iter, &kvm->arch.uvmem_pfns, list) + if (*gfn >= iter->base_pfn && *gfn < iter->base_pfn + iter->nr_pfns) { + p = iter; break; + } if (!p) return ret; /* diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S index 25a3679fb590..f4bec2fc51aa 100644 --- a/arch/powerpc/kvm/book3s_interrupts.S +++ b/arch/powerpc/kvm/book3s_interrupts.S @@ -15,7 +15,7 @@ #include <asm/asm-compat.h> #if defined(CONFIG_PPC_BOOK3S_64) -#ifdef PPC64_ELF_ABI_v2 +#ifdef CONFIG_PPC64_ELF_ABI_V2 #define FUNC(name) name #else #define FUNC(name) GLUE(.,name) diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 7bf9e6ca5c2d..d6abed6e51e6 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -1287,7 +1287,7 @@ int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr) /* Get last sc for papr */ if (vcpu->arch.papr_enabled) { - /* The sc instuction points SRR0 to the next inst */ + /* The sc instruction points SRR0 to the next inst */ emul = kvmppc_get_last_inst(vcpu, INST_SC, &last_sc); if (emul != EMULATE_DONE) { kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) - 4); diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c index dc4f51ac84bc..a1f2978b2a86 100644 --- a/arch/powerpc/kvm/book3s_pr_papr.c +++ b/arch/powerpc/kvm/book3s_pr_papr.c @@ -433,9 +433,12 @@ int kvmppc_hcall_impl_pr(unsigned long cmd) case H_REMOVE: case H_PROTECT: case H_BULK_REMOVE: +#ifdef CONFIG_SPAPR_TCE_IOMMU + case H_GET_TCE: case H_PUT_TCE: case H_PUT_TCE_INDIRECT: case H_STUFF_TCE: +#endif case H_CEDE: case H_LOGICAL_CI_LOAD: case H_LOGICAL_CI_STORE: @@ -464,7 +467,10 @@ static unsigned int default_hcall_list[] = { H_REMOVE, H_PROTECT, H_BULK_REMOVE, +#ifdef CONFIG_SPAPR_TCE_IOMMU + H_GET_TCE, H_PUT_TCE, +#endif H_CEDE, H_SET_MODE, #ifdef CONFIG_KVM_XICS diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S index b45b750fa77a..03886ca24498 100644 --- a/arch/powerpc/kvm/book3s_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_rmhandlers.S @@ -26,7 +26,7 @@ #if defined(CONFIG_PPC_BOOK3S_64) -#ifdef PPC64_ELF_ABI_v2 +#ifdef CONFIG_PPC64_ELF_ABI_V2 #define FUNC(name) name #else #define FUNC(name) GLUE(.,name) diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c index ab6d37d78c62..589a8f257120 100644 --- a/arch/powerpc/kvm/book3s_xics.c +++ b/arch/powerpc/kvm/book3s_xics.c @@ -462,7 +462,7 @@ static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp, * new guy. We cannot assume that the rejected interrupt is less * favored than the new one, and thus doesn't need to be delivered, * because by the time we exit icp_try_to_deliver() the target - * processor may well have alrady consumed & completed it, and thus + * processor may well have already consumed & completed it, and thus * the rejected interrupt might actually be already acceptable. */ if (icp_try_to_deliver(icp, new_irq, state->priority, &reject)) { diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index c0ce5531d9bc..4ca23644f752 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c @@ -30,27 +30,629 @@ #include "book3s_xive.h" - -/* - * Virtual mode variants of the hcalls for use on radix/radix - * with AIL. They require the VCPU's VP to be "pushed" - * - * We still instantiate them here because we use some of the - * generated utility functions as well in this file. - */ -#define XIVE_RUNTIME_CHECKS -#define X_PFX xive_vm_ -#define X_STATIC static -#define X_STAT_PFX stat_vm_ -#define __x_tima xive_tima #define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_mmio)) #define __x_trig_page(xd) ((void __iomem *)((xd)->trig_mmio)) -#define __x_writeb __raw_writeb -#define __x_readw __raw_readw -#define __x_readq __raw_readq -#define __x_writeq __raw_writeq -#include "book3s_xive_template.c" +/* Dummy interrupt used when taking interrupts out of a queue in H_CPPR */ +#define XICS_DUMMY 1 + +static void xive_vm_ack_pending(struct kvmppc_xive_vcpu *xc) +{ + u8 cppr; + u16 ack; + + /* + * Ensure any previous store to CPPR is ordered vs. + * the subsequent loads from PIPR or ACK. + */ + eieio(); + + /* Perform the acknowledge OS to register cycle. */ + ack = be16_to_cpu(__raw_readw(xive_tima + TM_SPC_ACK_OS_REG)); + + /* Synchronize subsequent queue accesses */ + mb(); + + /* XXX Check grouping level */ + + /* Anything ? */ + if (!((ack >> 8) & TM_QW1_NSR_EO)) + return; + + /* Grab CPPR of the most favored pending interrupt */ + cppr = ack & 0xff; + if (cppr < 8) + xc->pending |= 1 << cppr; + + /* Check consistency */ + if (cppr >= xc->hw_cppr) + pr_warn("KVM-XIVE: CPU %d odd ack CPPR, got %d at %d\n", + smp_processor_id(), cppr, xc->hw_cppr); + + /* + * Update our image of the HW CPPR. We don't yet modify + * xc->cppr, this will be done as we scan for interrupts + * in the queues. + */ + xc->hw_cppr = cppr; +} + +static u8 xive_vm_esb_load(struct xive_irq_data *xd, u32 offset) +{ + u64 val; + + if (offset == XIVE_ESB_SET_PQ_10 && xd->flags & XIVE_IRQ_FLAG_STORE_EOI) + offset |= XIVE_ESB_LD_ST_MO; + + val = __raw_readq(__x_eoi_page(xd) + offset); +#ifdef __LITTLE_ENDIAN__ + val >>= 64-8; +#endif + return (u8)val; +} + + +static void xive_vm_source_eoi(u32 hw_irq, struct xive_irq_data *xd) +{ + /* If the XIVE supports the new "store EOI facility, use it */ + if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) + __raw_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI); + else if (xd->flags & XIVE_IRQ_FLAG_LSI) { + /* + * For LSIs the HW EOI cycle is used rather than PQ bits, + * as they are automatically re-triggred in HW when still + * pending. + */ + __raw_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI); + } else { + uint64_t eoi_val; + + /* + * Otherwise for EOI, we use the special MMIO that does + * a clear of both P and Q and returns the old Q, + * except for LSIs where we use the "EOI cycle" special + * load. + * + * This allows us to then do a re-trigger if Q was set + * rather than synthetizing an interrupt in software + */ + eoi_val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_00); + + /* Re-trigger if needed */ + if ((eoi_val & 1) && __x_trig_page(xd)) + __raw_writeq(0, __x_trig_page(xd)); + } +} + +enum { + scan_fetch, + scan_poll, + scan_eoi, +}; + +static u32 xive_vm_scan_interrupts(struct kvmppc_xive_vcpu *xc, + u8 pending, int scan_type) +{ + u32 hirq = 0; + u8 prio = 0xff; + + /* Find highest pending priority */ + while ((xc->mfrr != 0xff || pending != 0) && hirq == 0) { + struct xive_q *q; + u32 idx, toggle; + __be32 *qpage; + + /* + * If pending is 0 this will return 0xff which is what + * we want + */ + prio = ffs(pending) - 1; + + /* Don't scan past the guest cppr */ + if (prio >= xc->cppr || prio > 7) { + if (xc->mfrr < xc->cppr) { + prio = xc->mfrr; + hirq = XICS_IPI; + } + break; + } + + /* Grab queue and pointers */ + q = &xc->queues[prio]; + idx = q->idx; + toggle = q->toggle; + + /* + * Snapshot the queue page. The test further down for EOI + * must use the same "copy" that was used by __xive_read_eq + * since qpage can be set concurrently and we don't want + * to miss an EOI. + */ + qpage = READ_ONCE(q->qpage); + +skip_ipi: + /* + * Try to fetch from the queue. Will return 0 for a + * non-queueing priority (ie, qpage = 0). + */ + hirq = __xive_read_eq(qpage, q->msk, &idx, &toggle); + + /* + * If this was a signal for an MFFR change done by + * H_IPI we skip it. Additionally, if we were fetching + * we EOI it now, thus re-enabling reception of a new + * such signal. + * + * We also need to do that if prio is 0 and we had no + * page for the queue. In this case, we have non-queued + * IPI that needs to be EOId. + * + * This is safe because if we have another pending MFRR + * change that wasn't observed above, the Q bit will have + * been set and another occurrence of the IPI will trigger. + */ + if (hirq == XICS_IPI || (prio == 0 && !qpage)) { + if (scan_type == scan_fetch) { + xive_vm_source_eoi(xc->vp_ipi, + &xc->vp_ipi_data); + q->idx = idx; + q->toggle = toggle; + } + /* Loop back on same queue with updated idx/toggle */ + WARN_ON(hirq && hirq != XICS_IPI); + if (hirq) + goto skip_ipi; + } + + /* If it's the dummy interrupt, continue searching */ + if (hirq == XICS_DUMMY) + goto skip_ipi; + + /* Clear the pending bit if the queue is now empty */ + if (!hirq) { + pending &= ~(1 << prio); + + /* + * Check if the queue count needs adjusting due to + * interrupts being moved away. + */ + if (atomic_read(&q->pending_count)) { + int p = atomic_xchg(&q->pending_count, 0); + + if (p) { + WARN_ON(p > atomic_read(&q->count)); + atomic_sub(p, &q->count); + } + } + } + + /* + * If the most favoured prio we found pending is less + * favored (or equal) than a pending IPI, we return + * the IPI instead. + */ + if (prio >= xc->mfrr && xc->mfrr < xc->cppr) { + prio = xc->mfrr; + hirq = XICS_IPI; + break; + } + + /* If fetching, update queue pointers */ + if (scan_type == scan_fetch) { + q->idx = idx; + q->toggle = toggle; + } + } + + /* If we are just taking a "peek", do nothing else */ + if (scan_type == scan_poll) + return hirq; + + /* Update the pending bits */ + xc->pending = pending; + + /* + * If this is an EOI that's it, no CPPR adjustment done here, + * all we needed was cleanup the stale pending bits and check + * if there's anything left. + */ + if (scan_type == scan_eoi) + return hirq; + + /* + * If we found an interrupt, adjust what the guest CPPR should + * be as if we had just fetched that interrupt from HW. + * + * Note: This can only make xc->cppr smaller as the previous + * loop will only exit with hirq != 0 if prio is lower than + * the current xc->cppr. Thus we don't need to re-check xc->mfrr + * for pending IPIs. + */ + if (hirq) + xc->cppr = prio; + /* + * If it was an IPI the HW CPPR might have been lowered too much + * as the HW interrupt we use for IPIs is routed to priority 0. + * + * We re-sync it here. + */ + if (xc->cppr != xc->hw_cppr) { + xc->hw_cppr = xc->cppr; + __raw_writeb(xc->cppr, xive_tima + TM_QW1_OS + TM_CPPR); + } + + return hirq; +} + +static unsigned long xive_vm_h_xirr(struct kvm_vcpu *vcpu) +{ + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + u8 old_cppr; + u32 hirq; + + pr_devel("H_XIRR\n"); + + xc->stat_vm_h_xirr++; + + /* First collect pending bits from HW */ + xive_vm_ack_pending(xc); + + pr_devel(" new pending=0x%02x hw_cppr=%d cppr=%d\n", + xc->pending, xc->hw_cppr, xc->cppr); + + /* Grab previous CPPR and reverse map it */ + old_cppr = xive_prio_to_guest(xc->cppr); + + /* Scan for actual interrupts */ + hirq = xive_vm_scan_interrupts(xc, xc->pending, scan_fetch); + + pr_devel(" got hirq=0x%x hw_cppr=%d cppr=%d\n", + hirq, xc->hw_cppr, xc->cppr); + + /* That should never hit */ + if (hirq & 0xff000000) + pr_warn("XIVE: Weird guest interrupt number 0x%08x\n", hirq); + + /* + * XXX We could check if the interrupt is masked here and + * filter it. If we chose to do so, we would need to do: + * + * if (masked) { + * lock(); + * if (masked) { + * old_Q = true; + * hirq = 0; + * } + * unlock(); + * } + */ + + /* Return interrupt and old CPPR in GPR4 */ + vcpu->arch.regs.gpr[4] = hirq | (old_cppr << 24); + + return H_SUCCESS; +} + +static unsigned long xive_vm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server) +{ + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + u8 pending = xc->pending; + u32 hirq; + + pr_devel("H_IPOLL(server=%ld)\n", server); + + xc->stat_vm_h_ipoll++; + + /* Grab the target VCPU if not the current one */ + if (xc->server_num != server) { + vcpu = kvmppc_xive_find_server(vcpu->kvm, server); + if (!vcpu) + return H_PARAMETER; + xc = vcpu->arch.xive_vcpu; + + /* Scan all priorities */ + pending = 0xff; + } else { + /* Grab pending interrupt if any */ + __be64 qw1 = __raw_readq(xive_tima + TM_QW1_OS); + u8 pipr = be64_to_cpu(qw1) & 0xff; + + if (pipr < 8) + pending |= 1 << pipr; + } + + hirq = xive_vm_scan_interrupts(xc, pending, scan_poll); + + /* Return interrupt and old CPPR in GPR4 */ + vcpu->arch.regs.gpr[4] = hirq | (xc->cppr << 24); + + return H_SUCCESS; +} + +static void xive_vm_push_pending_to_hw(struct kvmppc_xive_vcpu *xc) +{ + u8 pending, prio; + + pending = xc->pending; + if (xc->mfrr != 0xff) { + if (xc->mfrr < 8) + pending |= 1 << xc->mfrr; + else + pending |= 0x80; + } + if (!pending) + return; + prio = ffs(pending) - 1; + + __raw_writeb(prio, xive_tima + TM_SPC_SET_OS_PENDING); +} + +static void xive_vm_scan_for_rerouted_irqs(struct kvmppc_xive *xive, + struct kvmppc_xive_vcpu *xc) +{ + unsigned int prio; + + /* For each priority that is now masked */ + for (prio = xc->cppr; prio < KVMPPC_XIVE_Q_COUNT; prio++) { + struct xive_q *q = &xc->queues[prio]; + struct kvmppc_xive_irq_state *state; + struct kvmppc_xive_src_block *sb; + u32 idx, toggle, entry, irq, hw_num; + struct xive_irq_data *xd; + __be32 *qpage; + u16 src; + + idx = q->idx; + toggle = q->toggle; + qpage = READ_ONCE(q->qpage); + if (!qpage) + continue; + + /* For each interrupt in the queue */ + for (;;) { + entry = be32_to_cpup(qpage + idx); + + /* No more ? */ + if ((entry >> 31) == toggle) + break; + irq = entry & 0x7fffffff; + + /* Skip dummies and IPIs */ + if (irq == XICS_DUMMY || irq == XICS_IPI) + goto next; + sb = kvmppc_xive_find_source(xive, irq, &src); + if (!sb) + goto next; + state = &sb->irq_state[src]; + + /* Has it been rerouted ? */ + if (xc->server_num == state->act_server) + goto next; + + /* + * Allright, it *has* been re-routed, kill it from + * the queue. + */ + qpage[idx] = cpu_to_be32((entry & 0x80000000) | XICS_DUMMY); + + /* Find the HW interrupt */ + kvmppc_xive_select_irq(state, &hw_num, &xd); + + /* If it's not an LSI, set PQ to 11 the EOI will force a resend */ + if (!(xd->flags & XIVE_IRQ_FLAG_LSI)) + xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11); + + /* EOI the source */ + xive_vm_source_eoi(hw_num, xd); + +next: + idx = (idx + 1) & q->msk; + if (idx == 0) + toggle ^= 1; + } + } +} + +static int xive_vm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) +{ + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + struct kvmppc_xive *xive = vcpu->kvm->arch.xive; + u8 old_cppr; + + pr_devel("H_CPPR(cppr=%ld)\n", cppr); + + xc->stat_vm_h_cppr++; + + /* Map CPPR */ + cppr = xive_prio_from_guest(cppr); + + /* Remember old and update SW state */ + old_cppr = xc->cppr; + xc->cppr = cppr; + + /* + * Order the above update of xc->cppr with the subsequent + * read of xc->mfrr inside push_pending_to_hw() + */ + smp_mb(); + + if (cppr > old_cppr) { + /* + * We are masking less, we need to look for pending things + * to deliver and set VP pending bits accordingly to trigger + * a new interrupt otherwise we might miss MFRR changes for + * which we have optimized out sending an IPI signal. + */ + xive_vm_push_pending_to_hw(xc); + } else { + /* + * We are masking more, we need to check the queue for any + * interrupt that has been routed to another CPU, take + * it out (replace it with the dummy) and retrigger it. + * + * This is necessary since those interrupts may otherwise + * never be processed, at least not until this CPU restores + * its CPPR. + * + * This is in theory racy vs. HW adding new interrupts to + * the queue. In practice this works because the interesting + * cases are when the guest has done a set_xive() to move the + * interrupt away, which flushes the xive, followed by the + * target CPU doing a H_CPPR. So any new interrupt coming into + * the queue must still be routed to us and isn't a source + * of concern. + */ + xive_vm_scan_for_rerouted_irqs(xive, xc); + } + + /* Apply new CPPR */ + xc->hw_cppr = cppr; + __raw_writeb(cppr, xive_tima + TM_QW1_OS + TM_CPPR); + + return H_SUCCESS; +} + +static int xive_vm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) +{ + struct kvmppc_xive *xive = vcpu->kvm->arch.xive; + struct kvmppc_xive_src_block *sb; + struct kvmppc_xive_irq_state *state; + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + struct xive_irq_data *xd; + u8 new_cppr = xirr >> 24; + u32 irq = xirr & 0x00ffffff, hw_num; + u16 src; + int rc = 0; + + pr_devel("H_EOI(xirr=%08lx)\n", xirr); + + xc->stat_vm_h_eoi++; + + xc->cppr = xive_prio_from_guest(new_cppr); + + /* + * IPIs are synthetized from MFRR and thus don't need + * any special EOI handling. The underlying interrupt + * used to signal MFRR changes is EOId when fetched from + * the queue. + */ + if (irq == XICS_IPI || irq == 0) { + /* + * This barrier orders the setting of xc->cppr vs. + * subsquent test of xc->mfrr done inside + * scan_interrupts and push_pending_to_hw + */ + smp_mb(); + goto bail; + } + + /* Find interrupt source */ + sb = kvmppc_xive_find_source(xive, irq, &src); + if (!sb) { + pr_devel(" source not found !\n"); + rc = H_PARAMETER; + /* Same as above */ + smp_mb(); + goto bail; + } + state = &sb->irq_state[src]; + kvmppc_xive_select_irq(state, &hw_num, &xd); + + state->in_eoi = true; + + /* + * This barrier orders both setting of in_eoi above vs, + * subsequent test of guest_priority, and the setting + * of xc->cppr vs. subsquent test of xc->mfrr done inside + * scan_interrupts and push_pending_to_hw + */ + smp_mb(); + +again: + if (state->guest_priority == MASKED) { + arch_spin_lock(&sb->lock); + if (state->guest_priority != MASKED) { + arch_spin_unlock(&sb->lock); + goto again; + } + pr_devel(" EOI on saved P...\n"); + + /* Clear old_p, that will cause unmask to perform an EOI */ + state->old_p = false; + + arch_spin_unlock(&sb->lock); + } else { + pr_devel(" EOI on source...\n"); + + /* Perform EOI on the source */ + xive_vm_source_eoi(hw_num, xd); + + /* If it's an emulated LSI, check level and resend */ + if (state->lsi && state->asserted) + __raw_writeq(0, __x_trig_page(xd)); + + } + + /* + * This barrier orders the above guest_priority check + * and spin_lock/unlock with clearing in_eoi below. + * + * It also has to be a full mb() as it must ensure + * the MMIOs done in source_eoi() are completed before + * state->in_eoi is visible. + */ + mb(); + state->in_eoi = false; +bail: + + /* Re-evaluate pending IRQs and update HW */ + xive_vm_scan_interrupts(xc, xc->pending, scan_eoi); + xive_vm_push_pending_to_hw(xc); + pr_devel(" after scan pending=%02x\n", xc->pending); + + /* Apply new CPPR */ + xc->hw_cppr = xc->cppr; + __raw_writeb(xc->cppr, xive_tima + TM_QW1_OS + TM_CPPR); + + return rc; +} + +static int xive_vm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, + unsigned long mfrr) +{ + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + + pr_devel("H_IPI(server=%08lx,mfrr=%ld)\n", server, mfrr); + + xc->stat_vm_h_ipi++; + + /* Find target */ + vcpu = kvmppc_xive_find_server(vcpu->kvm, server); + if (!vcpu) + return H_PARAMETER; + xc = vcpu->arch.xive_vcpu; + + /* Locklessly write over MFRR */ + xc->mfrr = mfrr; + + /* + * The load of xc->cppr below and the subsequent MMIO store + * to the IPI must happen after the above mfrr update is + * globally visible so that: + * + * - Synchronize with another CPU doing an H_EOI or a H_CPPR + * updating xc->cppr then reading xc->mfrr. + * + * - The target of the IPI sees the xc->mfrr update + */ + mb(); + + /* Shoot the IPI if most favored than target cppr */ + if (mfrr < xc->cppr) + __raw_writeq(0, __x_trig_page(&xc->vp_ipi_data)); + + return H_SUCCESS; +} /* * We leave a gap of a couple of interrupts in the queue to @@ -124,7 +726,7 @@ void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) * interrupt might have fired and be on its way to the * host queue while we mask it, and if we unmask it * early enough (re-cede right away), there is a - * theorical possibility that it fires again, thus + * theoretical possibility that it fires again, thus * landing in the target queue more than once which is * a big no-no. * @@ -179,12 +781,13 @@ void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_GPL(kvmppc_xive_pull_vcpu); -void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu) +bool kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu) { void __iomem *esc_vaddr = (void __iomem *)vcpu->arch.xive_esc_vaddr; + bool ret = true; if (!esc_vaddr) - return; + return ret; /* we are using XIVE with single escalation */ @@ -197,7 +800,7 @@ void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu) * we also don't want to set xive_esc_on to 1 here in * case we race with xive_esc_irq(). */ - vcpu->arch.ceded = 0; + ret = false; /* * The escalation interrupts are special as we don't EOI them. * There is no need to use the load-after-store ordering offset @@ -210,6 +813,8 @@ void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu) __raw_readq(esc_vaddr + XIVE_ESB_SET_PQ_00); } mb(); + + return ret; } EXPORT_SYMBOL_GPL(kvmppc_xive_rearm_escalation); @@ -238,7 +843,7 @@ static irqreturn_t xive_esc_irq(int irq, void *data) vcpu->arch.irq_pending = 1; smp_mb(); - if (vcpu->arch.ceded) + if (vcpu->arch.ceded || vcpu->arch.nested) kvmppc_fast_vcpu_kick(vcpu); /* Since we have the no-EOI flag, the interrupt is effectively @@ -622,7 +1227,7 @@ static int xive_target_interrupt(struct kvm *kvm, /* * Targetting rules: In order to avoid losing track of - * pending interrupts accross mask and unmask, which would + * pending interrupts across mask and unmask, which would * allow queue overflows, we implement the following rules: * * - Unless it was never enabled (or we run out of capacity) @@ -1073,7 +1678,7 @@ int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq, /* * If old_p is set, the interrupt is pending, we switch it to * PQ=11. This will force a resend in the host so the interrupt - * isn't lost to whatver host driver may pick it up + * isn't lost to whatever host driver may pick it up */ if (state->old_p) xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_11); diff --git a/arch/powerpc/kvm/book3s_xive.h b/arch/powerpc/kvm/book3s_xive.h index 09d0657596c3..1e48f72e8aa5 100644 --- a/arch/powerpc/kvm/book3s_xive.h +++ b/arch/powerpc/kvm/book3s_xive.h @@ -285,13 +285,6 @@ static inline u32 __xive_read_eq(__be32 *qpage, u32 msk, u32 *idx, u32 *toggle) return cur & 0x7fffffff; } -extern unsigned long xive_rm_h_xirr(struct kvm_vcpu *vcpu); -extern unsigned long xive_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server); -extern int xive_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, - unsigned long mfrr); -extern int xive_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr); -extern int xive_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr); - /* * Common Xive routines for XICS-over-XIVE and XIVE native */ diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c index f81ba6f84e72..5271c33fe79e 100644 --- a/arch/powerpc/kvm/book3s_xive_native.c +++ b/arch/powerpc/kvm/book3s_xive_native.c @@ -209,7 +209,7 @@ static int kvmppc_xive_native_reset_mapped(struct kvm *kvm, unsigned long irq) /* * Clear the ESB pages of the IRQ number being mapped (or - * unmapped) into the guest and let the the VM fault handler + * unmapped) into the guest and let the VM fault handler * repopulate with the appropriate ESB pages (device or IC) */ pr_debug("clearing esb pages for girq 0x%lx\n", irq); diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c deleted file mode 100644 index b0015e05d99a..000000000000 --- a/arch/powerpc/kvm/book3s_xive_template.c +++ /dev/null @@ -1,636 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation - */ - -/* File to be included by other .c files */ - -#define XGLUE(a,b) a##b -#define GLUE(a,b) XGLUE(a,b) - -/* Dummy interrupt used when taking interrupts out of a queue in H_CPPR */ -#define XICS_DUMMY 1 - -static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc) -{ - u8 cppr; - u16 ack; - - /* - * Ensure any previous store to CPPR is ordered vs. - * the subsequent loads from PIPR or ACK. - */ - eieio(); - - /* Perform the acknowledge OS to register cycle. */ - ack = be16_to_cpu(__x_readw(__x_tima + TM_SPC_ACK_OS_REG)); - - /* Synchronize subsequent queue accesses */ - mb(); - - /* XXX Check grouping level */ - - /* Anything ? */ - if (!((ack >> 8) & TM_QW1_NSR_EO)) - return; - - /* Grab CPPR of the most favored pending interrupt */ - cppr = ack & 0xff; - if (cppr < 8) - xc->pending |= 1 << cppr; - -#ifdef XIVE_RUNTIME_CHECKS - /* Check consistency */ - if (cppr >= xc->hw_cppr) - pr_warn("KVM-XIVE: CPU %d odd ack CPPR, got %d at %d\n", - smp_processor_id(), cppr, xc->hw_cppr); -#endif - - /* - * Update our image of the HW CPPR. We don't yet modify - * xc->cppr, this will be done as we scan for interrupts - * in the queues. - */ - xc->hw_cppr = cppr; -} - -static u8 GLUE(X_PFX,esb_load)(struct xive_irq_data *xd, u32 offset) -{ - u64 val; - - if (offset == XIVE_ESB_SET_PQ_10 && xd->flags & XIVE_IRQ_FLAG_STORE_EOI) - offset |= XIVE_ESB_LD_ST_MO; - - val =__x_readq(__x_eoi_page(xd) + offset); -#ifdef __LITTLE_ENDIAN__ - val >>= 64-8; -#endif - return (u8)val; -} - - -static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd) -{ - /* If the XIVE supports the new "store EOI facility, use it */ - if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) - __x_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI); - else if (xd->flags & XIVE_IRQ_FLAG_LSI) { - /* - * For LSIs the HW EOI cycle is used rather than PQ bits, - * as they are automatically re-triggred in HW when still - * pending. - */ - __x_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI); - } else { - uint64_t eoi_val; - - /* - * Otherwise for EOI, we use the special MMIO that does - * a clear of both P and Q and returns the old Q, - * except for LSIs where we use the "EOI cycle" special - * load. - * - * This allows us to then do a re-trigger if Q was set - * rather than synthetizing an interrupt in software - */ - eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00); - - /* Re-trigger if needed */ - if ((eoi_val & 1) && __x_trig_page(xd)) - __x_writeq(0, __x_trig_page(xd)); - } -} - -enum { - scan_fetch, - scan_poll, - scan_eoi, -}; - -static u32 GLUE(X_PFX,scan_interrupts)(struct kvmppc_xive_vcpu *xc, - u8 pending, int scan_type) -{ - u32 hirq = 0; - u8 prio = 0xff; - - /* Find highest pending priority */ - while ((xc->mfrr != 0xff || pending != 0) && hirq == 0) { - struct xive_q *q; - u32 idx, toggle; - __be32 *qpage; - - /* - * If pending is 0 this will return 0xff which is what - * we want - */ - prio = ffs(pending) - 1; - - /* Don't scan past the guest cppr */ - if (prio >= xc->cppr || prio > 7) { - if (xc->mfrr < xc->cppr) { - prio = xc->mfrr; - hirq = XICS_IPI; - } - break; - } - - /* Grab queue and pointers */ - q = &xc->queues[prio]; - idx = q->idx; - toggle = q->toggle; - - /* - * Snapshot the queue page. The test further down for EOI - * must use the same "copy" that was used by __xive_read_eq - * since qpage can be set concurrently and we don't want - * to miss an EOI. - */ - qpage = READ_ONCE(q->qpage); - -skip_ipi: - /* - * Try to fetch from the queue. Will return 0 for a - * non-queueing priority (ie, qpage = 0). - */ - hirq = __xive_read_eq(qpage, q->msk, &idx, &toggle); - - /* - * If this was a signal for an MFFR change done by - * H_IPI we skip it. Additionally, if we were fetching - * we EOI it now, thus re-enabling reception of a new - * such signal. - * - * We also need to do that if prio is 0 and we had no - * page for the queue. In this case, we have non-queued - * IPI that needs to be EOId. - * - * This is safe because if we have another pending MFRR - * change that wasn't observed above, the Q bit will have - * been set and another occurrence of the IPI will trigger. - */ - if (hirq == XICS_IPI || (prio == 0 && !qpage)) { - if (scan_type == scan_fetch) { - GLUE(X_PFX,source_eoi)(xc->vp_ipi, - &xc->vp_ipi_data); - q->idx = idx; - q->toggle = toggle; - } - /* Loop back on same queue with updated idx/toggle */ -#ifdef XIVE_RUNTIME_CHECKS - WARN_ON(hirq && hirq != XICS_IPI); -#endif - if (hirq) - goto skip_ipi; - } - - /* If it's the dummy interrupt, continue searching */ - if (hirq == XICS_DUMMY) - goto skip_ipi; - - /* Clear the pending bit if the queue is now empty */ - if (!hirq) { - pending &= ~(1 << prio); - - /* - * Check if the queue count needs adjusting due to - * interrupts being moved away. - */ - if (atomic_read(&q->pending_count)) { - int p = atomic_xchg(&q->pending_count, 0); - if (p) { -#ifdef XIVE_RUNTIME_CHECKS - WARN_ON(p > atomic_read(&q->count)); -#endif - atomic_sub(p, &q->count); - } - } - } - - /* - * If the most favoured prio we found pending is less - * favored (or equal) than a pending IPI, we return - * the IPI instead. - */ - if (prio >= xc->mfrr && xc->mfrr < xc->cppr) { - prio = xc->mfrr; - hirq = XICS_IPI; - break; - } - - /* If fetching, update queue pointers */ - if (scan_type == scan_fetch) { - q->idx = idx; - q->toggle = toggle; - } - } - - /* If we are just taking a "peek", do nothing else */ - if (scan_type == scan_poll) - return hirq; - - /* Update the pending bits */ - xc->pending = pending; - - /* - * If this is an EOI that's it, no CPPR adjustment done here, - * all we needed was cleanup the stale pending bits and check - * if there's anything left. - */ - if (scan_type == scan_eoi) - return hirq; - - /* - * If we found an interrupt, adjust what the guest CPPR should - * be as if we had just fetched that interrupt from HW. - * - * Note: This can only make xc->cppr smaller as the previous - * loop will only exit with hirq != 0 if prio is lower than - * the current xc->cppr. Thus we don't need to re-check xc->mfrr - * for pending IPIs. - */ - if (hirq) - xc->cppr = prio; - /* - * If it was an IPI the HW CPPR might have been lowered too much - * as the HW interrupt we use for IPIs is routed to priority 0. - * - * We re-sync it here. - */ - if (xc->cppr != xc->hw_cppr) { - xc->hw_cppr = xc->cppr; - __x_writeb(xc->cppr, __x_tima + TM_QW1_OS + TM_CPPR); - } - - return hirq; -} - -X_STATIC unsigned long GLUE(X_PFX,h_xirr)(struct kvm_vcpu *vcpu) -{ - struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; - u8 old_cppr; - u32 hirq; - - pr_devel("H_XIRR\n"); - - xc->GLUE(X_STAT_PFX,h_xirr)++; - - /* First collect pending bits from HW */ - GLUE(X_PFX,ack_pending)(xc); - - pr_devel(" new pending=0x%02x hw_cppr=%d cppr=%d\n", - xc->pending, xc->hw_cppr, xc->cppr); - - /* Grab previous CPPR and reverse map it */ - old_cppr = xive_prio_to_guest(xc->cppr); - - /* Scan for actual interrupts */ - hirq = GLUE(X_PFX,scan_interrupts)(xc, xc->pending, scan_fetch); - - pr_devel(" got hirq=0x%x hw_cppr=%d cppr=%d\n", - hirq, xc->hw_cppr, xc->cppr); - -#ifdef XIVE_RUNTIME_CHECKS - /* That should never hit */ - if (hirq & 0xff000000) - pr_warn("XIVE: Weird guest interrupt number 0x%08x\n", hirq); -#endif - - /* - * XXX We could check if the interrupt is masked here and - * filter it. If we chose to do so, we would need to do: - * - * if (masked) { - * lock(); - * if (masked) { - * old_Q = true; - * hirq = 0; - * } - * unlock(); - * } - */ - - /* Return interrupt and old CPPR in GPR4 */ - vcpu->arch.regs.gpr[4] = hirq | (old_cppr << 24); - - return H_SUCCESS; -} - -X_STATIC unsigned long GLUE(X_PFX,h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server) -{ - struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; - u8 pending = xc->pending; - u32 hirq; - - pr_devel("H_IPOLL(server=%ld)\n", server); - - xc->GLUE(X_STAT_PFX,h_ipoll)++; - - /* Grab the target VCPU if not the current one */ - if (xc->server_num != server) { - vcpu = kvmppc_xive_find_server(vcpu->kvm, server); - if (!vcpu) - return H_PARAMETER; - xc = vcpu->arch.xive_vcpu; - - /* Scan all priorities */ - pending = 0xff; - } else { - /* Grab pending interrupt if any */ - __be64 qw1 = __x_readq(__x_tima + TM_QW1_OS); - u8 pipr = be64_to_cpu(qw1) & 0xff; - if (pipr < 8) - pending |= 1 << pipr; - } - - hirq = GLUE(X_PFX,scan_interrupts)(xc, pending, scan_poll); - - /* Return interrupt and old CPPR in GPR4 */ - vcpu->arch.regs.gpr[4] = hirq | (xc->cppr << 24); - - return H_SUCCESS; -} - -static void GLUE(X_PFX,push_pending_to_hw)(struct kvmppc_xive_vcpu *xc) -{ - u8 pending, prio; - - pending = xc->pending; - if (xc->mfrr != 0xff) { - if (xc->mfrr < 8) - pending |= 1 << xc->mfrr; - else - pending |= 0x80; - } - if (!pending) - return; - prio = ffs(pending) - 1; - - __x_writeb(prio, __x_tima + TM_SPC_SET_OS_PENDING); -} - -static void GLUE(X_PFX,scan_for_rerouted_irqs)(struct kvmppc_xive *xive, - struct kvmppc_xive_vcpu *xc) -{ - unsigned int prio; - - /* For each priority that is now masked */ - for (prio = xc->cppr; prio < KVMPPC_XIVE_Q_COUNT; prio++) { - struct xive_q *q = &xc->queues[prio]; - struct kvmppc_xive_irq_state *state; - struct kvmppc_xive_src_block *sb; - u32 idx, toggle, entry, irq, hw_num; - struct xive_irq_data *xd; - __be32 *qpage; - u16 src; - - idx = q->idx; - toggle = q->toggle; - qpage = READ_ONCE(q->qpage); - if (!qpage) - continue; - - /* For each interrupt in the queue */ - for (;;) { - entry = be32_to_cpup(qpage + idx); - - /* No more ? */ - if ((entry >> 31) == toggle) - break; - irq = entry & 0x7fffffff; - - /* Skip dummies and IPIs */ - if (irq == XICS_DUMMY || irq == XICS_IPI) - goto next; - sb = kvmppc_xive_find_source(xive, irq, &src); - if (!sb) - goto next; - state = &sb->irq_state[src]; - - /* Has it been rerouted ? */ - if (xc->server_num == state->act_server) - goto next; - - /* - * Allright, it *has* been re-routed, kill it from - * the queue. - */ - qpage[idx] = cpu_to_be32((entry & 0x80000000) | XICS_DUMMY); - - /* Find the HW interrupt */ - kvmppc_xive_select_irq(state, &hw_num, &xd); - - /* If it's not an LSI, set PQ to 11 the EOI will force a resend */ - if (!(xd->flags & XIVE_IRQ_FLAG_LSI)) - GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_11); - - /* EOI the source */ - GLUE(X_PFX,source_eoi)(hw_num, xd); - - next: - idx = (idx + 1) & q->msk; - if (idx == 0) - toggle ^= 1; - } - } -} - -X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr) -{ - struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; - struct kvmppc_xive *xive = vcpu->kvm->arch.xive; - u8 old_cppr; - - pr_devel("H_CPPR(cppr=%ld)\n", cppr); - - xc->GLUE(X_STAT_PFX,h_cppr)++; - - /* Map CPPR */ - cppr = xive_prio_from_guest(cppr); - - /* Remember old and update SW state */ - old_cppr = xc->cppr; - xc->cppr = cppr; - - /* - * Order the above update of xc->cppr with the subsequent - * read of xc->mfrr inside push_pending_to_hw() - */ - smp_mb(); - - if (cppr > old_cppr) { - /* - * We are masking less, we need to look for pending things - * to deliver and set VP pending bits accordingly to trigger - * a new interrupt otherwise we might miss MFRR changes for - * which we have optimized out sending an IPI signal. - */ - GLUE(X_PFX,push_pending_to_hw)(xc); - } else { - /* - * We are masking more, we need to check the queue for any - * interrupt that has been routed to another CPU, take - * it out (replace it with the dummy) and retrigger it. - * - * This is necessary since those interrupts may otherwise - * never be processed, at least not until this CPU restores - * its CPPR. - * - * This is in theory racy vs. HW adding new interrupts to - * the queue. In practice this works because the interesting - * cases are when the guest has done a set_xive() to move the - * interrupt away, which flushes the xive, followed by the - * target CPU doing a H_CPPR. So any new interrupt coming into - * the queue must still be routed to us and isn't a source - * of concern. - */ - GLUE(X_PFX,scan_for_rerouted_irqs)(xive, xc); - } - - /* Apply new CPPR */ - xc->hw_cppr = cppr; - __x_writeb(cppr, __x_tima + TM_QW1_OS + TM_CPPR); - - return H_SUCCESS; -} - -X_STATIC int GLUE(X_PFX,h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr) -{ - struct kvmppc_xive *xive = vcpu->kvm->arch.xive; - struct kvmppc_xive_src_block *sb; - struct kvmppc_xive_irq_state *state; - struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; - struct xive_irq_data *xd; - u8 new_cppr = xirr >> 24; - u32 irq = xirr & 0x00ffffff, hw_num; - u16 src; - int rc = 0; - - pr_devel("H_EOI(xirr=%08lx)\n", xirr); - - xc->GLUE(X_STAT_PFX,h_eoi)++; - - xc->cppr = xive_prio_from_guest(new_cppr); - - /* - * IPIs are synthetized from MFRR and thus don't need - * any special EOI handling. The underlying interrupt - * used to signal MFRR changes is EOId when fetched from - * the queue. - */ - if (irq == XICS_IPI || irq == 0) { - /* - * This barrier orders the setting of xc->cppr vs. - * subsquent test of xc->mfrr done inside - * scan_interrupts and push_pending_to_hw - */ - smp_mb(); - goto bail; - } - - /* Find interrupt source */ - sb = kvmppc_xive_find_source(xive, irq, &src); - if (!sb) { - pr_devel(" source not found !\n"); - rc = H_PARAMETER; - /* Same as above */ - smp_mb(); - goto bail; - } - state = &sb->irq_state[src]; - kvmppc_xive_select_irq(state, &hw_num, &xd); - - state->in_eoi = true; - - /* - * This barrier orders both setting of in_eoi above vs, - * subsequent test of guest_priority, and the setting - * of xc->cppr vs. subsquent test of xc->mfrr done inside - * scan_interrupts and push_pending_to_hw - */ - smp_mb(); - -again: - if (state->guest_priority == MASKED) { - arch_spin_lock(&sb->lock); - if (state->guest_priority != MASKED) { - arch_spin_unlock(&sb->lock); - goto again; - } - pr_devel(" EOI on saved P...\n"); - - /* Clear old_p, that will cause unmask to perform an EOI */ - state->old_p = false; - - arch_spin_unlock(&sb->lock); - } else { - pr_devel(" EOI on source...\n"); - - /* Perform EOI on the source */ - GLUE(X_PFX,source_eoi)(hw_num, xd); - - /* If it's an emulated LSI, check level and resend */ - if (state->lsi && state->asserted) - __x_writeq(0, __x_trig_page(xd)); - - } - - /* - * This barrier orders the above guest_priority check - * and spin_lock/unlock with clearing in_eoi below. - * - * It also has to be a full mb() as it must ensure - * the MMIOs done in source_eoi() are completed before - * state->in_eoi is visible. - */ - mb(); - state->in_eoi = false; -bail: - - /* Re-evaluate pending IRQs and update HW */ - GLUE(X_PFX,scan_interrupts)(xc, xc->pending, scan_eoi); - GLUE(X_PFX,push_pending_to_hw)(xc); - pr_devel(" after scan pending=%02x\n", xc->pending); - - /* Apply new CPPR */ - xc->hw_cppr = xc->cppr; - __x_writeb(xc->cppr, __x_tima + TM_QW1_OS + TM_CPPR); - - return rc; -} - -X_STATIC int GLUE(X_PFX,h_ipi)(struct kvm_vcpu *vcpu, unsigned long server, - unsigned long mfrr) -{ - struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; - - pr_devel("H_IPI(server=%08lx,mfrr=%ld)\n", server, mfrr); - - xc->GLUE(X_STAT_PFX,h_ipi)++; - - /* Find target */ - vcpu = kvmppc_xive_find_server(vcpu->kvm, server); - if (!vcpu) - return H_PARAMETER; - xc = vcpu->arch.xive_vcpu; - - /* Locklessly write over MFRR */ - xc->mfrr = mfrr; - - /* - * The load of xc->cppr below and the subsequent MMIO store - * to the IPI must happen after the above mfrr update is - * globally visible so that: - * - * - Synchronize with another CPU doing an H_EOI or a H_CPPR - * updating xc->cppr then reading xc->mfrr. - * - * - The target of the IPI sees the xc->mfrr update - */ - mb(); - - /* Shoot the IPI if most favored than target cppr */ - if (mfrr < xc->cppr) - __x_writeq(0, __x_trig_page(&xc->vp_ipi_data)); - - return H_SUCCESS; -} diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c index fa0d8dbbe484..57e0ad6a2ca3 100644 --- a/arch/powerpc/kvm/e500mc.c +++ b/arch/powerpc/kvm/e500mc.c @@ -309,7 +309,7 @@ static int kvmppc_core_vcpu_create_e500mc(struct kvm_vcpu *vcpu) BUILD_BUG_ON(offsetof(struct kvmppc_vcpu_e500, vcpu) != 0); vcpu_e500 = to_e500(vcpu); - /* Invalid PIR value -- this LPID dosn't have valid state on any cpu */ + /* Invalid PIR value -- this LPID doesn't have valid state on any cpu */ vcpu->arch.oldpir = 0xffffffff; err = kvmppc_e500_tlb_init(vcpu_e500); @@ -399,7 +399,6 @@ static int __init kvmppc_e500mc_init(void) * allocator. */ kvmppc_init_lpid(KVMPPC_NR_LPIDS/threads_per_core); - kvmppc_claim_lpid(0); /* host */ r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE); if (r) diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 533c4232e5ab..191992fcb2c2 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -19,6 +19,7 @@ #include <linux/module.h> #include <linux/irqbypass.h> #include <linux/kvm_irqfd.h> +#include <linux/of.h> #include <asm/cputable.h> #include <linux/uaccess.h> #include <asm/kvm_ppc.h> @@ -2496,41 +2497,37 @@ out: return r; } -static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)]; +static DEFINE_IDA(lpid_inuse); static unsigned long nr_lpids; long kvmppc_alloc_lpid(void) { - long lpid; + int lpid; - do { - lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS); - if (lpid >= nr_lpids) { + /* The host LPID must always be 0 (allocation starts at 1) */ + lpid = ida_alloc_range(&lpid_inuse, 1, nr_lpids - 1, GFP_KERNEL); + if (lpid < 0) { + if (lpid == -ENOMEM) + pr_err("%s: Out of memory\n", __func__); + else pr_err("%s: No LPIDs free\n", __func__); - return -ENOMEM; - } - } while (test_and_set_bit(lpid, lpid_inuse)); + return -ENOMEM; + } return lpid; } EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid); -void kvmppc_claim_lpid(long lpid) -{ - set_bit(lpid, lpid_inuse); -} -EXPORT_SYMBOL_GPL(kvmppc_claim_lpid); - void kvmppc_free_lpid(long lpid) { - clear_bit(lpid, lpid_inuse); + ida_free(&lpid_inuse, lpid); } EXPORT_SYMBOL_GPL(kvmppc_free_lpid); +/* nr_lpids_param includes the host LPID */ void kvmppc_init_lpid(unsigned long nr_lpids_param) { - nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param); - memset(lpid_inuse, 0, sizeof(lpid_inuse)); + nr_lpids = nr_lpids_param; } EXPORT_SYMBOL_GPL(kvmppc_init_lpid); diff --git a/arch/powerpc/kvm/trace_hv.h b/arch/powerpc/kvm/trace_hv.h index 38cd0ed0a617..32e2cb5811cc 100644 --- a/arch/powerpc/kvm/trace_hv.h +++ b/arch/powerpc/kvm/trace_hv.h @@ -409,9 +409,9 @@ TRACE_EVENT(kvmppc_run_core, ); TRACE_EVENT(kvmppc_vcore_blocked, - TP_PROTO(struct kvmppc_vcore *vc, int where), + TP_PROTO(struct kvm_vcpu *vcpu, int where), - TP_ARGS(vc, where), + TP_ARGS(vcpu, where), TP_STRUCT__entry( __field(int, n_runnable) @@ -421,8 +421,8 @@ TRACE_EVENT(kvmppc_vcore_blocked, ), TP_fast_assign( - __entry->runner_vcpu = vc->runner->vcpu_id; - __entry->n_runnable = vc->n_runnable; + __entry->runner_vcpu = vcpu->vcpu_id; + __entry->n_runnable = vcpu->arch.vcore->n_runnable; __entry->where = where; __entry->tgid = current->tgid; ), diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index 5d1881d2e39a..8560c912186d 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -13,6 +13,9 @@ CFLAGS_REMOVE_feature-fixups.o = $(CC_FLAGS_FTRACE) KASAN_SANITIZE_code-patching.o := n KASAN_SANITIZE_feature-fixups.o := n +# restart_table.o contains functions called in the NMI interrupt path +# which can be in real mode. Disable KASAN. +KASAN_SANITIZE_restart_table.o := n ifdef CONFIG_KASAN CFLAGS_code-patching.o += -DDISABLE_BRANCH_PROFILING diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index 00c68e7fb11e..6edf0697a526 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -8,6 +8,7 @@ #include <linux/init.h> #include <linux/cpuhotplug.h> #include <linux/uaccess.h> +#include <linux/jump_label.h> #include <asm/tlbflush.h> #include <asm/page.h> @@ -32,7 +33,7 @@ static int __patch_instruction(u32 *exec_addr, ppc_inst_t instr, u32 *patch_addr return 0; failed: - return -EFAULT; + return -EPERM; } int raw_patch_instruction(u32 *addr, ppc_inst_t instr) @@ -78,6 +79,8 @@ static int text_area_cpu_down(unsigned int cpu) return 0; } +static __ro_after_init DEFINE_STATIC_KEY_FALSE(poking_init_done); + /* * Although BUG_ON() is rude, in this case it should only happen if ENOMEM, and * we judge it as being preferable to a kernel that will crash later when @@ -88,6 +91,7 @@ void __init poking_init(void) BUG_ON(!cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powerpc/text_poke:online", text_area_cpu_up, text_area_cpu_down)); + static_branch_enable(&poking_init_done); } /* @@ -97,7 +101,7 @@ static int map_patch_area(void *addr, unsigned long text_poke_addr) { unsigned long pfn; - if (is_vmalloc_or_module_addr(addr)) + if (IS_ENABLED(CONFIG_MODULES) && is_vmalloc_or_module_addr(addr)) pfn = vmalloc_to_pfn(addr); else pfn = __pa_symbol(addr) >> PAGE_SHIFT; @@ -170,7 +174,7 @@ static int do_patch_instruction(u32 *addr, ppc_inst_t instr) * when text_poke_area is not ready, but we still need * to allow patching. We just do the plain old patching */ - if (!this_cpu_read(text_poke_area)) + if (!static_branch_likely(&poking_init_done)) return raw_patch_instruction(addr, instr); local_irq_save(flags); @@ -188,10 +192,12 @@ static int do_patch_instruction(u32 *addr, ppc_inst_t instr) #endif /* CONFIG_STRICT_KERNEL_RWX */ +__ro_after_init DEFINE_STATIC_KEY_FALSE(init_mem_is_free); + int patch_instruction(u32 *addr, ppc_inst_t instr) { /* Make sure we aren't patching a freed init section */ - if (system_state >= SYSTEM_FREEING_INITMEM && init_section_contains(addr, 4)) + if (static_branch_likely(&init_mem_is_free) && init_section_contains(addr, 4)) return 0; return do_patch_instruction(addr, instr); @@ -208,33 +214,6 @@ int patch_branch(u32 *addr, unsigned long target, int flags) return patch_instruction(addr, instr); } -bool is_offset_in_branch_range(long offset) -{ - /* - * Powerpc branch instruction is : - * - * 0 6 30 31 - * +---------+----------------+---+---+ - * | opcode | LI |AA |LK | - * +---------+----------------+---+---+ - * Where AA = 0 and LK = 0 - * - * LI is a signed 24 bits integer. The real branch offset is computed - * by: imm32 = SignExtend(LI:'0b00', 32); - * - * So the maximum forward branch should be: - * (0x007fffff << 2) = 0x01fffffc = 0x1fffffc - * The maximum backward branch should be: - * (0xff800000 << 2) = 0xfe000000 = -0x2000000 - */ - return (offset >= -0x2000000 && offset <= 0x1fffffc && !(offset & 0x3)); -} - -bool is_offset_in_cond_branch_range(long offset) -{ - return offset >= -0x8000 && offset <= 0x7fff && !(offset & 0x3); -} - /* * Helper to check if a given instruction is a conditional branch * Derived from the conditional checks in analyse_instr() @@ -257,26 +236,6 @@ bool is_conditional_branch(ppc_inst_t instr) } NOKPROBE_SYMBOL(is_conditional_branch); -int create_branch(ppc_inst_t *instr, const u32 *addr, - unsigned long target, int flags) -{ - long offset; - - *instr = ppc_inst(0); - offset = target; - if (! (flags & BRANCH_ABSOLUTE)) - offset = offset - (unsigned long)addr; - - /* Check we can represent the target in the instruction format */ - if (!is_offset_in_branch_range(offset)) - return 1; - - /* Mask out the flags and target, so they don't step on each other. */ - *instr = ppc_inst(0x48000000 | (flags & 0x3) | (offset & 0x03FFFFFC)); - - return 0; -} - int create_cond_branch(ppc_inst_t *instr, const u32 *addr, unsigned long target, int flags) { diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 343a78826035..993d3f31832a 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -451,7 +451,7 @@ static int __do_rfi_flush_fixups(void *data) if (types & L1D_FLUSH_FALLBACK) /* b .+16 to fallback flush */ - instrs[0] = PPC_INST_BRANCH | 16; + instrs[0] = PPC_RAW_BRANCH(16); i = 0; if (types & L1D_FLUSH_ORI) { diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index 6f79bde6d6c2..398b5694aeb7 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -15,9 +15,6 @@ #include <asm/cputable.h> #include <asm/disassemble.h> -extern char system_call_common[]; -extern char system_call_vectored_emulate[]; - #ifdef CONFIG_PPC64 /* Bits in SRR1 that are copied from MSR */ #define MSR_MASK 0xffffffff87c0ffffUL @@ -1166,7 +1163,7 @@ static nokprobe_inline void add_with_carry(const struct pt_regs *regs, if (carry_in) ++val; - op->type = COMPUTE + SETREG + SETXER; + op->type = COMPUTE | SETREG | SETXER; op->reg = rd; op->val = val; val = truncate_if_32bit(regs->msr, val); @@ -1187,7 +1184,7 @@ static nokprobe_inline void do_cmp_signed(const struct pt_regs *regs, { unsigned int crval, shift; - op->type = COMPUTE + SETCC; + op->type = COMPUTE | SETCC; crval = (regs->xer >> 31) & 1; /* get SO bit */ if (v1 < v2) crval |= 8; @@ -1206,7 +1203,7 @@ static nokprobe_inline void do_cmp_unsigned(const struct pt_regs *regs, { unsigned int crval, shift; - op->type = COMPUTE + SETCC; + op->type = COMPUTE | SETCC; crval = (regs->xer >> 31) & 1; /* get SO bit */ if (v1 < v2) crval |= 8; @@ -1376,7 +1373,6 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, if (branch_taken(word, regs, op)) op->type |= BRTAKEN; return 1; -#ifdef CONFIG_PPC64 case 17: /* sc */ if ((word & 0xfe2) == 2) op->type = SYSCALL; @@ -1388,7 +1384,6 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, } else op->type = UNKNOWN; return 0; -#endif case 18: /* b */ op->type = BRANCH | BRTAKEN; imm = word & 0x03fffffc; @@ -3643,43 +3638,22 @@ int emulate_step(struct pt_regs *regs, ppc_inst_t instr) regs_set_return_msr(regs, (regs->msr & ~op.val) | (val & op.val)); goto instr_done; -#ifdef CONFIG_PPC64 case SYSCALL: /* sc */ /* - * N.B. this uses knowledge about how the syscall - * entry code works. If that is changed, this will - * need to be changed also. + * Per ISA v3.1, section 7.5.15 'Trace Interrupt', we can't + * single step a system call instruction: + * + * Successful completion for an instruction means that the + * instruction caused no other interrupt. Thus a Trace + * interrupt never occurs for a System Call or System Call + * Vectored instruction, or for a Trap instruction that + * traps. */ - if (IS_ENABLED(CONFIG_PPC_FAST_ENDIAN_SWITCH) && - cpu_has_feature(CPU_FTR_REAL_LE) && - regs->gpr[0] == 0x1ebe) { - regs_set_return_msr(regs, regs->msr ^ MSR_LE); - goto instr_done; - } - regs->gpr[9] = regs->gpr[13]; - regs->gpr[10] = MSR_KERNEL; - regs->gpr[11] = regs->nip + 4; - regs->gpr[12] = regs->msr & MSR_MASK; - regs->gpr[13] = (unsigned long) get_paca(); - regs_set_return_ip(regs, (unsigned long) &system_call_common); - regs_set_return_msr(regs, MSR_KERNEL); - return 1; - -#ifdef CONFIG_PPC_BOOK3S_64 + return -1; case SYSCALL_VECTORED_0: /* scv 0 */ - regs->gpr[9] = regs->gpr[13]; - regs->gpr[10] = MSR_KERNEL; - regs->gpr[11] = regs->nip + 4; - regs->gpr[12] = regs->msr & MSR_MASK; - regs->gpr[13] = (unsigned long) get_paca(); - regs_set_return_ip(regs, (unsigned long) &system_call_vectored_emulate); - regs_set_return_msr(regs, MSR_KERNEL); - return 1; -#endif - + return -1; case RFI: return -1; -#endif } return 0; diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index df8172da2301..503a6e249940 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile @@ -5,7 +5,7 @@ ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) -obj-y := fault.o mem.o pgtable.o mmap.o maccess.o pageattr.o \ +obj-y := fault.o mem.o pgtable.o maccess.o pageattr.o \ init_$(BITS).o pgtable_$(BITS).o \ pgtable-frag.o ioremap.o ioremap_$(BITS).o \ init-common.o mmu_context.o drmem.o \ @@ -14,7 +14,6 @@ obj-$(CONFIG_PPC_MMU_NOHASH) += nohash/ obj-$(CONFIG_PPC_BOOK3S_32) += book3s32/ obj-$(CONFIG_PPC_BOOK3S_64) += book3s64/ obj-$(CONFIG_NUMA) += numa.o -obj-$(CONFIG_PPC_MM_SLICES) += slice.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o obj-$(CONFIG_PPC_COPRO_BASE) += copro_fault.o diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c index 203735caf691..49a737fbbd18 100644 --- a/arch/powerpc/mm/book3s32/mmu.c +++ b/arch/powerpc/mm/book3s32/mmu.c @@ -23,7 +23,6 @@ #include <linux/highmem.h> #include <linux/memblock.h> -#include <asm/prom.h> #include <asm/mmu.h> #include <asm/machdep.h> #include <asm/code-patching.h> diff --git a/arch/powerpc/mm/book3s64/Makefile b/arch/powerpc/mm/book3s64/Makefile index 2d50cac499c5..cad2abc1730f 100644 --- a/arch/powerpc/mm/book3s64/Makefile +++ b/arch/powerpc/mm/book3s64/Makefile @@ -5,7 +5,7 @@ ccflags-y := $(NO_MINIMAL_TOC) obj-y += mmu_context.o pgtable.o trace.o ifdef CONFIG_PPC_64S_HASH_MMU CFLAGS_REMOVE_slb.o = $(CC_FLAGS_FTRACE) -obj-y += hash_pgtable.o hash_utils.o hash_tlb.o slb.o +obj-y += hash_pgtable.o hash_utils.o hash_tlb.o slb.o slice.o obj-$(CONFIG_PPC_HASH_MMU_NATIVE) += hash_native.o obj-$(CONFIG_PPC_4K_PAGES) += hash_4k.o obj-$(CONFIG_PPC_64K_PAGES) += hash_64k.o @@ -24,3 +24,12 @@ obj-$(CONFIG_PPC_PKEY) += pkeys.o # Instrumenting the SLB fault path can lead to duplicate SLB entries KCOV_INSTRUMENT_slb.o := n + +# Parts of these can run in real mode and therefore are +# not safe with the current outline KASAN implementation +KASAN_SANITIZE_mmu_context.o := n +KASAN_SANITIZE_pgtable.o := n +KASAN_SANITIZE_radix_pgtable.o := n +KASAN_SANITIZE_radix_tlb.o := n +KASAN_SANITIZE_slb.o := n +KASAN_SANITIZE_pkeys.o := n diff --git a/arch/powerpc/mm/book3s64/hash_pgtable.c b/arch/powerpc/mm/book3s64/hash_pgtable.c index 7ce8914992e3..2e0cad5817ba 100644 --- a/arch/powerpc/mm/book3s64/hash_pgtable.c +++ b/arch/powerpc/mm/book3s64/hash_pgtable.c @@ -377,7 +377,7 @@ int hash__has_transparent_hugepage(void) if (mmu_psize_defs[MMU_PAGE_16M].shift != PMD_SHIFT) return 0; /* - * We need to make sure that we support 16MB hugepage in a segement + * We need to make sure that we support 16MB hugepage in a segment * with base page size 64K or 4K. We only enable THP with a PAGE_SIZE * of 64K. */ diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index 985cabdd7f67..fc92613dc2bf 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -37,6 +37,9 @@ #include <linux/cpu.h> #include <linux/pgtable.h> #include <linux/debugfs.h> +#include <linux/random.h> +#include <linux/elf-randomize.h> +#include <linux/of_fdt.h> #include <asm/interrupt.h> #include <asm/processor.h> @@ -46,7 +49,6 @@ #include <asm/types.h> #include <linux/uaccess.h> #include <asm/machdep.h> -#include <asm/prom.h> #include <asm/io.h> #include <asm/eeh.h> #include <asm/tlb.h> @@ -1264,7 +1266,6 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap) return pp; } -#ifdef CONFIG_PPC_MM_SLICES static unsigned int get_paca_psize(unsigned long addr) { unsigned char *psizes; @@ -1281,12 +1282,6 @@ static unsigned int get_paca_psize(unsigned long addr) return (psizes[index >> 1] >> (mask_index * 4)) & 0xF; } -#else -unsigned int get_paca_psize(unsigned long addr) -{ - return get_paca()->mm_ctx_user_psize; -} -#endif /* * Demote a segment to using 4k pages. @@ -1343,7 +1338,7 @@ static int subpage_protection(struct mm_struct *mm, unsigned long ea) spp >>= 30 - 2 * ((ea >> 12) & 0xf); /* - * 0 -> full premission + * 0 -> full permission * 1 -> Read only * 2 -> no access. * We return the flag that need to be cleared. @@ -1664,7 +1659,7 @@ DEFINE_INTERRUPT_HANDLER(do_hash_fault) err = hash_page_mm(mm, ea, access, TRAP(regs), flags); if (unlikely(err < 0)) { - // failed to instert a hash PTE due to an hypervisor error + // failed to insert a hash PTE due to an hypervisor error if (user_mode(regs)) { if (IS_ENABLED(CONFIG_PPC_SUBPAGE_PROT) && err == -2) _exception(SIGSEGV, regs, SEGV_ACCERR, ea); @@ -1680,7 +1675,6 @@ DEFINE_INTERRUPT_HANDLER(do_hash_fault) } } -#ifdef CONFIG_PPC_MM_SLICES static bool should_hash_preload(struct mm_struct *mm, unsigned long ea) { int psize = get_slice_psize(mm, ea); @@ -1697,12 +1691,6 @@ static bool should_hash_preload(struct mm_struct *mm, unsigned long ea) return true; } -#else -static bool should_hash_preload(struct mm_struct *mm, unsigned long ea) -{ - return true; -} -#endif static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea, bool is_exec, unsigned long trap) @@ -2147,3 +2135,20 @@ void __init print_system_hash_info(void) if (htab_hash_mask) pr_info("htab_hash_mask = 0x%lx\n", htab_hash_mask); } + +unsigned long arch_randomize_brk(struct mm_struct *mm) +{ + /* + * If we are using 1TB segments and we are allowed to randomise + * the heap, we can put it above 1TB so it is backed by a 1TB + * segment. Otherwise the heap will be in the bottom 1TB + * which always uses 256MB segments and this may result in a + * performance penalty. + */ + if (is_32bit_task()) + return randomize_page(mm->brk, SZ_32M); + else if (!radix_enabled() && mmu_highuser_ssize == MMU_SEGSIZE_1T) + return randomize_page(max_t(unsigned long, mm->brk, SZ_1T), SZ_1G); + else + return randomize_page(mm->brk, SZ_1G); +} diff --git a/arch/powerpc/mm/book3s64/iommu_api.c b/arch/powerpc/mm/book3s64/iommu_api.c index cd18e94d0843..7fcfba162e0d 100644 --- a/arch/powerpc/mm/book3s64/iommu_api.c +++ b/arch/powerpc/mm/book3s64/iommu_api.c @@ -305,24 +305,6 @@ struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm, } EXPORT_SYMBOL_GPL(mm_iommu_lookup); -struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(struct mm_struct *mm, - unsigned long ua, unsigned long size) -{ - struct mm_iommu_table_group_mem_t *mem, *ret = NULL; - - list_for_each_entry_lockless(mem, &mm->context.iommu_group_mem_list, - next) { - if ((mem->ua <= ua) && - (ua + size <= mem->ua + - (mem->entries << PAGE_SHIFT))) { - ret = mem; - break; - } - } - - return ret; -} - struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries) { @@ -369,56 +351,6 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, } EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa); -long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, - unsigned long ua, unsigned int pageshift, unsigned long *hpa) -{ - const long entry = (ua - mem->ua) >> PAGE_SHIFT; - unsigned long *pa; - - if (entry >= mem->entries) - return -EFAULT; - - if (pageshift > mem->pageshift) - return -EFAULT; - - if (!mem->hpas) { - *hpa = mem->dev_hpa + (ua - mem->ua); - return 0; - } - - pa = (void *) vmalloc_to_phys(&mem->hpas[entry]); - if (!pa) - return -EFAULT; - - *hpa = (*pa & MM_IOMMU_TABLE_GROUP_PAGE_MASK) | (ua & ~PAGE_MASK); - - return 0; -} - -extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua) -{ - struct mm_iommu_table_group_mem_t *mem; - long entry; - void *va; - unsigned long *pa; - - mem = mm_iommu_lookup_rm(mm, ua, PAGE_SIZE); - if (!mem) - return; - - if (mem->dev_hpa != MM_IOMMU_TABLE_INVALID_HPA) - return; - - entry = (ua - mem->ua) >> PAGE_SHIFT; - va = &mem->hpas[entry]; - - pa = (void *) vmalloc_to_phys(va); - if (!pa) - return; - - *pa |= MM_IOMMU_TABLE_GROUP_PAGE_DIRTY; -} - bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa, unsigned int pageshift, unsigned long *size) { diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c index 8b474ab32f67..7b9966402b25 100644 --- a/arch/powerpc/mm/book3s64/pgtable.c +++ b/arch/powerpc/mm/book3s64/pgtable.c @@ -332,7 +332,7 @@ static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm) spin_lock(&mm->page_table_lock); /* * If we find pgtable_page set, we return - * the allocated page with single fragement + * the allocated page with single fragment * count. */ if (likely(!mm->context.pmd_frag)) { diff --git a/arch/powerpc/mm/book3s64/radix_hugetlbpage.c b/arch/powerpc/mm/book3s64/radix_hugetlbpage.c index 23d3e08911d3..d2fb776febb4 100644 --- a/arch/powerpc/mm/book3s64/radix_hugetlbpage.c +++ b/arch/powerpc/mm/book3s64/radix_hugetlbpage.c @@ -41,61 +41,6 @@ void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma, unsigned long st radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize); } -/* - * A vairant of hugetlb_get_unmapped_area doing topdown search - * FIXME!! should we do as x86 does or non hugetlb area does ? - * ie, use topdown or not based on mmap_is_legacy check ? - */ -unsigned long -radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr, - unsigned long len, unsigned long pgoff, - unsigned long flags) -{ - struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; - struct hstate *h = hstate_file(file); - int fixed = (flags & MAP_FIXED); - unsigned long high_limit; - struct vm_unmapped_area_info info; - - high_limit = DEFAULT_MAP_WINDOW; - if (addr >= high_limit || (fixed && (addr + len > high_limit))) - high_limit = TASK_SIZE; - - if (len & ~huge_page_mask(h)) - return -EINVAL; - if (len > high_limit) - return -ENOMEM; - - if (fixed) { - if (addr > high_limit - len) - return -ENOMEM; - if (prepare_hugepage_range(file, addr, len)) - return -EINVAL; - return addr; - } - - if (addr) { - addr = ALIGN(addr, huge_page_size(h)); - vma = find_vma(mm, addr); - if (high_limit - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vm_start_gap(vma))) - return addr; - } - /* - * We are always doing an topdown search here. Slice code - * does that too. - */ - info.flags = VM_UNMAPPED_AREA_TOPDOWN; - info.length = len; - info.low_limit = max(PAGE_SIZE, mmap_min_addr); - info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW); - info.align_mask = PAGE_MASK & ~huge_page_mask(h); - info.align_offset = 0; - - return vm_unmapped_area(&info); -} - void radix__huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t old_pte, pte_t pte) diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index def04631a74d..db2f3d193448 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -359,7 +359,7 @@ static void __init radix_init_pgtable(void) if (!cpu_has_feature(CPU_FTR_HVMODE) && cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) { /* - * Older versions of KVM on these machines perfer if the + * Older versions of KVM on these machines prefer if the * guest only uses the low 19 PID bits. */ mmu_pid_bits = 19; diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c index 7724af19ed7e..dda51fef2d2e 100644 --- a/arch/powerpc/mm/book3s64/radix_tlb.c +++ b/arch/powerpc/mm/book3s64/radix_tlb.c @@ -397,7 +397,7 @@ static inline void _tlbie_pid(unsigned long pid, unsigned long ric) /* * Workaround the fact that the "ric" argument to __tlbie_pid - * must be a compile-time contraint to match the "i" constraint + * must be a compile-time constraint to match the "i" constraint * in the asm statement. */ switch (ric) { diff --git a/arch/powerpc/mm/book3s64/slb.c b/arch/powerpc/mm/book3s64/slb.c index 81091b9587f6..6956f637a38c 100644 --- a/arch/powerpc/mm/book3s64/slb.c +++ b/arch/powerpc/mm/book3s64/slb.c @@ -347,7 +347,7 @@ void slb_setup_new_exec(void) /* * We have no good place to clear the slb preload cache on exec, * flush_thread is about the earliest arch hook but that happens - * after we switch to the mm and have aleady preloaded the SLBEs. + * after we switch to the mm and have already preloaded the SLBEs. * * For the most part that's probably okay to use entries from the * previous exec, they will age out if unused. It may turn out to @@ -615,7 +615,7 @@ static void slb_cache_update(unsigned long esid_data) } else { /* * Our cache is full and the current cache content strictly - * doesn't indicate the active SLB conents. Bump the ptr + * doesn't indicate the active SLB contents. Bump the ptr * so that switch_slb() will ignore the cache. */ local_paca->slb_cache_ptr = SLB_CACHE_ENTRIES + 1; diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/book3s64/slice.c index f42711f865f3..c0b58afb9a47 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/book3s64/slice.c @@ -276,20 +276,18 @@ static bool slice_scan_available(unsigned long addr, } static unsigned long slice_find_area_bottomup(struct mm_struct *mm, - unsigned long len, + unsigned long addr, unsigned long len, const struct slice_mask *available, int psize, unsigned long high_limit) { int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); - unsigned long addr, found, next_end; + unsigned long found, next_end; struct vm_unmapped_area_info info; info.flags = 0; info.length = len; info.align_mask = PAGE_MASK & ((1ul << pshift) - 1); info.align_offset = 0; - - addr = TASK_UNMAPPED_BASE; /* * Check till the allow max value for this mmap request */ @@ -322,12 +320,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm, } static unsigned long slice_find_area_topdown(struct mm_struct *mm, - unsigned long len, + unsigned long addr, unsigned long len, const struct slice_mask *available, int psize, unsigned long high_limit) { int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); - unsigned long addr, found, prev; + unsigned long found, prev; struct vm_unmapped_area_info info; unsigned long min_addr = max(PAGE_SIZE, mmap_min_addr); @@ -335,8 +333,6 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm, info.length = len; info.align_mask = PAGE_MASK & ((1ul << pshift) - 1); info.align_offset = 0; - - addr = mm->mmap_base; /* * If we are trying to allocate above DEFAULT_MAP_WINDOW * Add the different to the mmap_base. @@ -377,7 +373,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm, * can happen with large stack limits and large mmap() * allocations. */ - return slice_find_area_bottomup(mm, len, available, psize, high_limit); + return slice_find_area_bottomup(mm, TASK_UNMAPPED_BASE, len, available, psize, high_limit); } @@ -386,9 +382,9 @@ static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len, int topdown, unsigned long high_limit) { if (topdown) - return slice_find_area_topdown(mm, len, mask, psize, high_limit); + return slice_find_area_topdown(mm, mm->mmap_base, len, mask, psize, high_limit); else - return slice_find_area_bottomup(mm, len, mask, psize, high_limit); + return slice_find_area_bottomup(mm, mm->mmap_base, len, mask, psize, high_limit); } static inline void slice_copy_mask(struct slice_mask *dst, @@ -639,6 +635,32 @@ return_addr: } EXPORT_SYMBOL_GPL(slice_get_unmapped_area); +unsigned long arch_get_unmapped_area(struct file *filp, + unsigned long addr, + unsigned long len, + unsigned long pgoff, + unsigned long flags) +{ + if (radix_enabled()) + return generic_get_unmapped_area(filp, addr, len, pgoff, flags); + + return slice_get_unmapped_area(addr, len, flags, + mm_ctx_user_psize(¤t->mm->context), 0); +} + +unsigned long arch_get_unmapped_area_topdown(struct file *filp, + const unsigned long addr0, + const unsigned long len, + const unsigned long pgoff, + const unsigned long flags) +{ + if (radix_enabled()) + return generic_get_unmapped_area_topdown(filp, addr0, len, pgoff, flags); + + return slice_get_unmapped_area(addr0, len, flags, + mm_ctx_user_psize(¤t->mm->context), 1); +} + unsigned int notrace get_slice_psize(struct mm_struct *mm, unsigned long addr) { unsigned char *psizes; @@ -692,7 +714,6 @@ void slice_init_new_context_exec(struct mm_struct *mm) bitmap_fill(mask->high_slices, SLICE_NUM_HIGH); } -#ifdef CONFIG_PPC_BOOK3S_64 void slice_setup_new_exec(void) { struct mm_struct *mm = current->mm; @@ -704,7 +725,6 @@ void slice_setup_new_exec(void) mm_ctx_set_slb_addr_limit(&mm->context, DEFAULT_MAP_WINDOW); } -#endif void slice_set_range_psize(struct mm_struct *mm, unsigned long start, unsigned long len, unsigned int psize) @@ -759,4 +779,29 @@ int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, return !slice_check_range_fits(mm, maskp, addr, len); } + +unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) +{ + /* With radix we don't use slice, so derive it from vma*/ + if (radix_enabled()) + return vma_kernel_pagesize(vma); + + return 1UL << mmu_psize_to_shift(get_slice_psize(vma->vm_mm, vma->vm_start)); +} + +static int file_to_psize(struct file *file) +{ + struct hstate *hstate = hstate_file(file); + return shift_to_mmu_psize(huge_page_shift(hstate)); +} + +unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags) +{ + if (radix_enabled()) + return generic_hugetlb_get_unmapped_area(file, addr, len, pgoff, flags); + + return slice_get_unmapped_area(addr, len, flags, file_to_psize(file), 1); +} #endif diff --git a/arch/powerpc/mm/cacheflush.c b/arch/powerpc/mm/cacheflush.c index 63363787e000..0e9b4879c0f9 100644 --- a/arch/powerpc/mm/cacheflush.c +++ b/arch/powerpc/mm/cacheflush.c @@ -12,7 +12,7 @@ static inline bool flush_coherent_icache(void) /* * For a snooping icache, we still need a dummy icbi to purge all the * prefetched instructions from the ifetch buffers. We also need a sync - * before the icbi to order the the actual stores to memory that might + * before the icbi to order the actual stores to memory that might * have modified instructions with the icbi. */ if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) { diff --git a/arch/powerpc/mm/drmem.c b/arch/powerpc/mm/drmem.c index 22197b18d85e..2369d1bf2411 100644 --- a/arch/powerpc/mm/drmem.c +++ b/arch/powerpc/mm/drmem.c @@ -11,7 +11,7 @@ #include <linux/of.h> #include <linux/of_fdt.h> #include <linux/memblock.h> -#include <asm/prom.h> +#include <linux/slab.h> #include <asm/drmem.h> static int n_root_addr_cells, n_root_size_cells; diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index b642a5a8668f..b282af39fcf6 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -542,40 +542,6 @@ retry: return page; } -#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA -static inline int file_to_psize(struct file *file) -{ - struct hstate *hstate = hstate_file(file); - return shift_to_mmu_psize(huge_page_shift(hstate)); -} - -unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, - unsigned long len, unsigned long pgoff, - unsigned long flags) -{ -#ifdef CONFIG_PPC_RADIX_MMU - if (radix_enabled()) - return radix__hugetlb_get_unmapped_area(file, addr, len, - pgoff, flags); -#endif -#ifdef CONFIG_PPC_MM_SLICES - return slice_get_unmapped_area(addr, len, flags, file_to_psize(file), 1); -#endif - BUG(); -} -#endif - -unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) -{ - /* With radix we don't use slice, so derive it from vma*/ - if (IS_ENABLED(CONFIG_PPC_MM_SLICES) && !radix_enabled()) { - unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start); - - return 1UL << mmu_psize_to_shift(psize); - } - return vma_kernel_pagesize(vma); -} - bool __init arch_hugetlb_valid_size(unsigned long size) { int shift = __ffs(size); diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 3d690be48e84..693a3a7a9463 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -29,7 +29,6 @@ #include <linux/slab.h> #include <linux/hugetlb.h> -#include <asm/prom.h> #include <asm/io.h> #include <asm/mmu.h> #include <asm/smp.h> diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 83c0ee9fbf05..05b0d584e50b 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -111,7 +111,7 @@ static int __meminit vmemmap_populated(unsigned long vmemmap_addr, int vmemmap_m } /* - * vmemmap virtual address space management does not have a traditonal page + * vmemmap virtual address space management does not have a traditional page * table to track which virtual struct pages are backed by physical mapping. * The virtual to physical mappings are tracked in a simple linked list * format. 'vmemmap_list' maintains the entire vmemmap physical mapping at @@ -128,7 +128,7 @@ static struct vmemmap_backing *next; /* * The same pointer 'next' tracks individual chunks inside the allocated - * full page during the boot time and again tracks the freeed nodes during + * full page during the boot time and again tracks the freed nodes during * runtime. It is racy but it does not happen as they are separated by the * boot process. Will create problem if some how we have memory hotplug * operation during boot !! @@ -372,6 +372,9 @@ void register_page_bootmem_memmap(unsigned long section_nr, #ifdef CONFIG_PPC_BOOK3S_64 unsigned int mmu_lpid_bits; +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE +EXPORT_SYMBOL_GPL(mmu_lpid_bits); +#endif unsigned int mmu_pid_bits; static bool disable_radix = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT); diff --git a/arch/powerpc/mm/kasan/Makefile b/arch/powerpc/mm/kasan/Makefile index bb1a5408b86b..4999aadb1867 100644 --- a/arch/powerpc/mm/kasan/Makefile +++ b/arch/powerpc/mm/kasan/Makefile @@ -2,6 +2,7 @@ KASAN_SANITIZE := n -obj-$(CONFIG_PPC32) += kasan_init_32.o +obj-$(CONFIG_PPC32) += init_32.o obj-$(CONFIG_PPC_8xx) += 8xx.o obj-$(CONFIG_PPC_BOOK3S_32) += book3s_32.o +obj-$(CONFIG_PPC_BOOK3S_64) += init_book3s_64.o diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/init_32.c index f3e4d069e0ba..f3e4d069e0ba 100644 --- a/arch/powerpc/mm/kasan/kasan_init_32.c +++ b/arch/powerpc/mm/kasan/init_32.c diff --git a/arch/powerpc/mm/kasan/init_book3s_64.c b/arch/powerpc/mm/kasan/init_book3s_64.c new file mode 100644 index 000000000000..0da5566d6b84 --- /dev/null +++ b/arch/powerpc/mm/kasan/init_book3s_64.c @@ -0,0 +1,102 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * KASAN for 64-bit Book3S powerpc + * + * Copyright 2019-2022, Daniel Axtens, IBM Corporation. + */ + +/* + * ppc64 turns on virtual memory late in boot, after calling into generic code + * like the device-tree parser, so it uses this in conjunction with a hook in + * outline mode to avoid invalid access early in boot. + */ + +#define DISABLE_BRANCH_PROFILING + +#include <linux/kasan.h> +#include <linux/printk.h> +#include <linux/sched/task.h> +#include <linux/memblock.h> +#include <asm/pgalloc.h> + +DEFINE_STATIC_KEY_FALSE(powerpc_kasan_enabled_key); + +static void __init kasan_init_phys_region(void *start, void *end) +{ + unsigned long k_start, k_end, k_cur; + void *va; + + if (start >= end) + return; + + k_start = ALIGN_DOWN((unsigned long)kasan_mem_to_shadow(start), PAGE_SIZE); + k_end = ALIGN((unsigned long)kasan_mem_to_shadow(end), PAGE_SIZE); + + va = memblock_alloc(k_end - k_start, PAGE_SIZE); + for (k_cur = k_start; k_cur < k_end; k_cur += PAGE_SIZE, va += PAGE_SIZE) + map_kernel_page(k_cur, __pa(va), PAGE_KERNEL); +} + +void __init kasan_init(void) +{ + /* + * We want to do the following things: + * 1) Map real memory into the shadow for all physical memblocks + * This takes us from c000... to c008... + * 2) Leave a hole over the shadow of vmalloc space. KASAN_VMALLOC + * will manage this for us. + * This takes us from c008... to c00a... + * 3) Map the 'early shadow'/zero page over iomap and vmemmap space. + * This takes us up to where we start at c00e... + */ + + void *k_start = kasan_mem_to_shadow((void *)RADIX_VMALLOC_END); + void *k_end = kasan_mem_to_shadow((void *)RADIX_VMEMMAP_END); + phys_addr_t start, end; + u64 i; + pte_t zero_pte = pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL); + + if (!early_radix_enabled()) { + pr_warn("KASAN not enabled as it requires radix!"); + return; + } + + for_each_mem_range(i, &start, &end) + kasan_init_phys_region((void *)start, (void *)end); + + for (i = 0; i < PTRS_PER_PTE; i++) + __set_pte_at(&init_mm, (unsigned long)kasan_early_shadow_page, + &kasan_early_shadow_pte[i], zero_pte, 0); + + for (i = 0; i < PTRS_PER_PMD; i++) + pmd_populate_kernel(&init_mm, &kasan_early_shadow_pmd[i], + kasan_early_shadow_pte); + + for (i = 0; i < PTRS_PER_PUD; i++) + pud_populate(&init_mm, &kasan_early_shadow_pud[i], + kasan_early_shadow_pmd); + + /* map the early shadow over the iomap and vmemmap space */ + kasan_populate_early_shadow(k_start, k_end); + + /* mark early shadow region as RO and wipe it */ + zero_pte = pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL_RO); + for (i = 0; i < PTRS_PER_PTE; i++) + __set_pte_at(&init_mm, (unsigned long)kasan_early_shadow_page, + &kasan_early_shadow_pte[i], zero_pte, 0); + + /* + * clear_page relies on some cache info that hasn't been set up yet. + * It ends up looping ~forever and blows up other data. + * Use memset instead. + */ + memset(kasan_early_shadow_page, 0, PAGE_SIZE); + + static_branch_inc(&powerpc_kasan_enabled_key); + + /* Enable error messages */ + init_task.kasan_depth = 0; + pr_info("KASAN init done\n"); +} + +void __init kasan_late_init(void) { } diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 46fb78e3bb36..52b77684acda 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -23,6 +23,8 @@ #include <asm/kasan.h> #include <asm/svm.h> #include <asm/mmzone.h> +#include <asm/ftrace.h> +#include <asm/code-patching.h> #include <mm/mmu_decl.h> @@ -309,7 +311,9 @@ void free_initmem(void) { ppc_md.progress = ppc_printk_progress; mark_initmem_nx(); + static_branch_enable(&init_mem_is_free); free_initmem_default(POISON_FREE_INITMEM); + ftrace_free_init_tramp(); } /* diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c deleted file mode 100644 index c475cf810aa8..000000000000 --- a/arch/powerpc/mm/mmap.c +++ /dev/null @@ -1,256 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * flexible mmap layout support - * - * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina. - * All Rights Reserved. - * - * Started by Ingo Molnar <mingo@elte.hu> - */ - -#include <linux/personality.h> -#include <linux/mm.h> -#include <linux/random.h> -#include <linux/sched/signal.h> -#include <linux/sched/mm.h> -#include <linux/elf-randomize.h> -#include <linux/security.h> -#include <linux/mman.h> - -/* - * Top of mmap area (just below the process stack). - * - * Leave at least a ~128 MB hole. - */ -#define MIN_GAP (128*1024*1024) -#define MAX_GAP (TASK_SIZE/6*5) - -static inline int mmap_is_legacy(struct rlimit *rlim_stack) -{ - if (current->personality & ADDR_COMPAT_LAYOUT) - return 1; - - if (rlim_stack->rlim_cur == RLIM_INFINITY) - return 1; - - return sysctl_legacy_va_layout; -} - -unsigned long arch_mmap_rnd(void) -{ - unsigned long shift, rnd; - - shift = mmap_rnd_bits; -#ifdef CONFIG_COMPAT - if (is_32bit_task()) - shift = mmap_rnd_compat_bits; -#endif - rnd = get_random_long() % (1ul << shift); - - return rnd << PAGE_SHIFT; -} - -static inline unsigned long stack_maxrandom_size(void) -{ - if (!(current->flags & PF_RANDOMIZE)) - return 0; - - /* 8MB for 32bit, 1GB for 64bit */ - if (is_32bit_task()) - return (1<<23); - else - return (1<<30); -} - -static inline unsigned long mmap_base(unsigned long rnd, - struct rlimit *rlim_stack) -{ - unsigned long gap = rlim_stack->rlim_cur; - unsigned long pad = stack_maxrandom_size() + stack_guard_gap; - - /* Values close to RLIM_INFINITY can overflow. */ - if (gap + pad > gap) - gap += pad; - - if (gap < MIN_GAP) - gap = MIN_GAP; - else if (gap > MAX_GAP) - gap = MAX_GAP; - - return PAGE_ALIGN(DEFAULT_MAP_WINDOW - gap - rnd); -} - -#ifdef HAVE_ARCH_UNMAPPED_AREA -#ifdef CONFIG_PPC_RADIX_MMU -/* - * Same function as generic code used only for radix, because we don't need to overload - * the generic one. But we will have to duplicate, because hash select - * HAVE_ARCH_UNMAPPED_AREA - */ -static unsigned long -radix__arch_get_unmapped_area(struct file *filp, unsigned long addr, - unsigned long len, unsigned long pgoff, - unsigned long flags) -{ - struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; - int fixed = (flags & MAP_FIXED); - unsigned long high_limit; - struct vm_unmapped_area_info info; - - high_limit = DEFAULT_MAP_WINDOW; - if (addr >= high_limit || (fixed && (addr + len > high_limit))) - high_limit = TASK_SIZE; - - if (len > high_limit) - return -ENOMEM; - - if (fixed) { - if (addr > high_limit - len) - return -ENOMEM; - return addr; - } - - if (addr) { - addr = PAGE_ALIGN(addr); - vma = find_vma(mm, addr); - if (high_limit - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vm_start_gap(vma))) - return addr; - } - - info.flags = 0; - info.length = len; - info.low_limit = mm->mmap_base; - info.high_limit = high_limit; - info.align_mask = 0; - - return vm_unmapped_area(&info); -} - -static unsigned long -radix__arch_get_unmapped_area_topdown(struct file *filp, - const unsigned long addr0, - const unsigned long len, - const unsigned long pgoff, - const unsigned long flags) -{ - struct vm_area_struct *vma; - struct mm_struct *mm = current->mm; - unsigned long addr = addr0; - int fixed = (flags & MAP_FIXED); - unsigned long high_limit; - struct vm_unmapped_area_info info; - - high_limit = DEFAULT_MAP_WINDOW; - if (addr >= high_limit || (fixed && (addr + len > high_limit))) - high_limit = TASK_SIZE; - - if (len > high_limit) - return -ENOMEM; - - if (fixed) { - if (addr > high_limit - len) - return -ENOMEM; - return addr; - } - - if (addr) { - addr = PAGE_ALIGN(addr); - vma = find_vma(mm, addr); - if (high_limit - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vm_start_gap(vma))) - return addr; - } - - info.flags = VM_UNMAPPED_AREA_TOPDOWN; - info.length = len; - info.low_limit = max(PAGE_SIZE, mmap_min_addr); - info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW); - info.align_mask = 0; - - addr = vm_unmapped_area(&info); - if (!(addr & ~PAGE_MASK)) - return addr; - VM_BUG_ON(addr != -ENOMEM); - - /* - * A failed mmap() very likely causes application failure, - * so fall back to the bottom-up function here. This scenario - * can happen with large stack limits and large mmap() - * allocations. - */ - return radix__arch_get_unmapped_area(filp, addr0, len, pgoff, flags); -} -#endif - -unsigned long arch_get_unmapped_area(struct file *filp, - unsigned long addr, - unsigned long len, - unsigned long pgoff, - unsigned long flags) -{ -#ifdef CONFIG_PPC_MM_SLICES - return slice_get_unmapped_area(addr, len, flags, - mm_ctx_user_psize(¤t->mm->context), 0); -#else - BUG(); -#endif -} - -unsigned long arch_get_unmapped_area_topdown(struct file *filp, - const unsigned long addr0, - const unsigned long len, - const unsigned long pgoff, - const unsigned long flags) -{ -#ifdef CONFIG_PPC_MM_SLICES - return slice_get_unmapped_area(addr0, len, flags, - mm_ctx_user_psize(¤t->mm->context), 1); -#else - BUG(); -#endif -} -#endif /* HAVE_ARCH_UNMAPPED_AREA */ - -static void radix__arch_pick_mmap_layout(struct mm_struct *mm, - unsigned long random_factor, - struct rlimit *rlim_stack) -{ -#ifdef CONFIG_PPC_RADIX_MMU - if (mmap_is_legacy(rlim_stack)) { - mm->mmap_base = TASK_UNMAPPED_BASE; - mm->get_unmapped_area = radix__arch_get_unmapped_area; - } else { - mm->mmap_base = mmap_base(random_factor, rlim_stack); - mm->get_unmapped_area = radix__arch_get_unmapped_area_topdown; - } -#endif -} - -/* - * This function, called very early during the creation of a new - * process VM image, sets up which VM layout function to use: - */ -void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) -{ - unsigned long random_factor = 0UL; - - if (current->flags & PF_RANDOMIZE) - random_factor = arch_mmap_rnd(); - - if (radix_enabled()) - return radix__arch_pick_mmap_layout(mm, random_factor, - rlim_stack); - /* - * Fall back to the standard layout if the personality - * bit is set, or if the expected stack growth is unlimited: - */ - if (mmap_is_legacy(rlim_stack)) { - mm->mmap_base = TASK_UNMAPPED_BASE; - mm->get_unmapped_area = arch_get_unmapped_area; - } else { - mm->mmap_base = mmap_base(random_factor, rlim_stack); - mm->get_unmapped_area = arch_get_unmapped_area_topdown; - } -} diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index 0dd4c18f8363..63c4b1a4d435 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h @@ -155,6 +155,10 @@ struct tlbcam { u32 MAS3; u32 MAS7; }; + +#define NUM_TLBCAMS 64 + +extern struct tlbcam TLBCAM[NUM_TLBCAMS]; #endif #if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_FSL_BOOKE) || defined(CONFIG_PPC_8xx) diff --git a/arch/powerpc/mm/nohash/40x.c b/arch/powerpc/mm/nohash/40x.c index 95751c322f6c..b32e465a3d52 100644 --- a/arch/powerpc/mm/nohash/40x.c +++ b/arch/powerpc/mm/nohash/40x.c @@ -32,7 +32,6 @@ #include <linux/highmem.h> #include <linux/memblock.h> -#include <asm/prom.h> #include <asm/io.h> #include <asm/mmu_context.h> #include <asm/mmu.h> diff --git a/arch/powerpc/mm/nohash/book3e_hugetlbpage.c b/arch/powerpc/mm/nohash/book3e_hugetlbpage.c index 8b88be91b622..307ca919d393 100644 --- a/arch/powerpc/mm/nohash/book3e_hugetlbpage.c +++ b/arch/powerpc/mm/nohash/book3e_hugetlbpage.c @@ -142,7 +142,7 @@ book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, pte_t pte) tsize = shift - 10; /* * We can't be interrupted while we're setting up the MAS - * regusters or after we've confirmed that no tlb exists. + * registers or after we've confirmed that no tlb exists. */ local_irq_save(flags); diff --git a/arch/powerpc/mm/nohash/fsl_book3e.c b/arch/powerpc/mm/nohash/fsl_book3e.c index dfe715e0f70a..b8ae6c08c06f 100644 --- a/arch/powerpc/mm/nohash/fsl_book3e.c +++ b/arch/powerpc/mm/nohash/fsl_book3e.c @@ -36,8 +36,8 @@ #include <linux/delay.h> #include <linux/highmem.h> #include <linux/memblock.h> +#include <linux/of_fdt.h> -#include <asm/prom.h> #include <asm/io.h> #include <asm/mmu_context.h> #include <asm/mmu.h> @@ -51,10 +51,9 @@ unsigned int tlbcam_index; -#define NUM_TLBCAMS (64) struct tlbcam TLBCAM[NUM_TLBCAMS]; -struct tlbcamrange { +static struct { unsigned long start; unsigned long limit; phys_addr_t phys; @@ -274,7 +273,7 @@ void __init adjust_total_lowmem(void) i = switch_to_as1(); __max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, false, true); - restore_to_as0(i, 0, 0, 1); + restore_to_as0(i, 0, NULL, 1); pr_info("Memory CAM mapping: "); for (i = 0; i < tlbcam_index - 1; i++) @@ -288,21 +287,18 @@ void __init adjust_total_lowmem(void) #ifdef CONFIG_STRICT_KERNEL_RWX void mmu_mark_rodata_ro(void) { - /* Everything is done in mmu_mark_initmem_nx() */ -} -#endif - -void mmu_mark_initmem_nx(void) -{ unsigned long remapped; - if (!strict_kernel_rwx_enabled()) - return; - remapped = map_mem_in_cams(__max_low_memory, CONFIG_LOWMEM_CAM_NUM, false, false); WARN_ON(__max_low_memory != remapped); } +#endif + +void mmu_mark_initmem_nx(void) +{ + /* Everything is done in mmu_mark_rodata_ro() */ +} void setup_initial_memory_limit(phys_addr_t first_memblock_base, phys_addr_t first_memblock_size) diff --git a/arch/powerpc/mm/nohash/kaslr_booke.c b/arch/powerpc/mm/nohash/kaslr_booke.c index 96c38f971603..1f3f9fedf1bc 100644 --- a/arch/powerpc/mm/nohash/kaslr_booke.c +++ b/arch/powerpc/mm/nohash/kaslr_booke.c @@ -14,8 +14,9 @@ #include <linux/memblock.h> #include <linux/libfdt.h> #include <linux/crash_core.h> +#include <linux/of.h> +#include <linux/of_fdt.h> #include <asm/cacheflush.h> -#include <asm/prom.h> #include <asm/kdump.h> #include <mm/mmu_decl.h> #include <generated/compile.h> @@ -315,7 +316,7 @@ static unsigned long __init kaslr_choose_location(void *dt_ptr, phys_addr_t size ram = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, true, true); linear_sz = min_t(unsigned long, ram, SZ_512M); - /* If the linear size is smaller than 64M, do not randmize */ + /* If the linear size is smaller than 64M, do not randomize */ if (linear_sz < SZ_64M) return 0; diff --git a/arch/powerpc/mm/nohash/mmu_context.c b/arch/powerpc/mm/nohash/mmu_context.c index 85b048f04c56..ccd5819b1bd9 100644 --- a/arch/powerpc/mm/nohash/mmu_context.c +++ b/arch/powerpc/mm/nohash/mmu_context.c @@ -317,15 +317,6 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next, */ int init_new_context(struct task_struct *t, struct mm_struct *mm) { - /* - * We have MMU_NO_CONTEXT set to be ~0. Hence check - * explicitly against context.id == 0. This ensures that we properly - * initialize context slice details for newly allocated mm's (which will - * have id == 0) and don't alter context slice inherited via fork (which - * will have id != 0). - */ - if (mm->context.id == 0) - slice_init_new_context_exec(mm); mm->context.id = MMU_NO_CONTEXT; mm->context.active = 0; pte_frag_set(&mm->context, NULL); diff --git a/arch/powerpc/mm/nohash/tlb.c b/arch/powerpc/mm/nohash/tlb.c index fd2c77af5c55..5e7ccb48b79c 100644 --- a/arch/powerpc/mm/nohash/tlb.c +++ b/arch/powerpc/mm/nohash/tlb.c @@ -358,6 +358,7 @@ void __init early_init_mmu_47x(void) /* * Flush kernel TLB entries in the given range */ +#ifndef CONFIG_PPC_8xx void flush_tlb_kernel_range(unsigned long start, unsigned long end) { #ifdef CONFIG_SMP @@ -370,6 +371,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) #endif } EXPORT_SYMBOL(flush_tlb_kernel_range); +#endif /* * Currently, for range flushing, we just do a full mm flush. This should @@ -773,9 +775,5 @@ void __init early_init_mmu(void) #ifdef CONFIG_PPC_47x early_init_mmu_47x(); #endif - -#ifdef CONFIG_PPC_MM_SLICES - mm_ctx_set_slb_addr_limit(&init_mm.context, SLB_ADDR_LIMIT_DEFAULT); -#endif } #endif /* CONFIG_PPC64 */ diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 13022d734951..0801b2ce9b7d 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -26,7 +26,6 @@ #include <linux/slab.h> #include <asm/cputhreads.h> #include <asm/sparsemem.h> -#include <asm/prom.h> #include <asm/smp.h> #include <asm/topology.h> #include <asm/firmware.h> @@ -1423,43 +1422,26 @@ out: return rc; } -int find_and_online_cpu_nid(int cpu) +void find_and_update_cpu_nid(int cpu) { __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0}; int new_nid; /* Use associativity from first thread for all siblings */ if (vphn_get_associativity(cpu, associativity)) - return cpu_to_node(cpu); + return; + /* Do not have previous associativity, so find it now. */ new_nid = associativity_to_nid(associativity); - if (new_nid < 0 || !node_possible(new_nid)) - new_nid = first_online_node; - if (!node_online(new_nid)) { -#ifdef CONFIG_MEMORY_HOTPLUG - /* - * Need to ensure that NODE_DATA is initialized for a node from - * available memory (see memblock_alloc_try_nid). If unable to - * init the node, then default to nearest node that has memory - * installed. Skip onlining a node if the subsystems are not - * yet initialized. - */ - if (!topology_inited || try_online_node(new_nid)) - new_nid = first_online_node; -#else - /* - * Default to using the nearest node that has memory installed. - * Otherwise, it would be necessary to patch the kernel MM code - * to deal with more memoryless-node error conditions. - */ + if (new_nid < 0 || !node_possible(new_nid)) new_nid = first_online_node; -#endif - } + else + // Associate node <-> cpu, so cpu_up() calls + // try_online_node() on the right node. + set_cpu_numa_node(cpu, new_nid); - pr_debug("%s:%d cpu %d nid %d\n", __FUNCTION__, __LINE__, - cpu, new_nid); - return new_nid; + pr_debug("%s:%d cpu %d nid %d\n", __func__, __LINE__, cpu, new_nid); } int cpu_to_coregroup_id(int cpu) diff --git a/arch/powerpc/mm/pageattr.c b/arch/powerpc/mm/pageattr.c index 85753e32a4de..6163e484bc6d 100644 --- a/arch/powerpc/mm/pageattr.c +++ b/arch/powerpc/mm/pageattr.c @@ -31,6 +31,7 @@ static int change_page_attr(pte_t *ptep, unsigned long addr, void *data) { long action = (long)data; + addr &= PAGE_MASK; /* modify the PTE bits as desired */ switch (action) { case SET_MEMORY_RO: diff --git a/arch/powerpc/mm/pgtable-frag.c b/arch/powerpc/mm/pgtable-frag.c index 97ae4935da79..20652daa1d7e 100644 --- a/arch/powerpc/mm/pgtable-frag.c +++ b/arch/powerpc/mm/pgtable-frag.c @@ -83,7 +83,7 @@ static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel) spin_lock(&mm->page_table_lock); /* * If we find pgtable_page set, we return - * the allocated page with single fragement + * the allocated page with single fragment * count. */ if (likely(!pte_frag_get(&mm->context))) { diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index 6ec5a7dd7913..e6166b71d36d 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c @@ -351,7 +351,7 @@ EXPORT_SYMBOL_GPL(vmalloc_to_phys); * (4) hugepd pointer, _PAGE_PTE = 0 and bits [2..6] indicate size of table * * So long as we atomically load page table pointers we are safe against teardown, - * we can follow the address down to the the page and take a ref on it. + * we can follow the address down to the page and take a ref on it. * This function need to be called with interrupts disabled. We use this variant * when we have MSR[EE] = 0 but the paca->irq_soft_mask = IRQS_ENABLED */ diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index 175aabf101e8..5ac1fd30341b 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -32,7 +32,6 @@ #include <linux/hugetlb.h> #include <asm/page.h> -#include <asm/prom.h> #include <asm/mmu_context.h> #include <asm/mmu.h> #include <asm/smp.h> diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c index 8c846982766f..2313053fe679 100644 --- a/arch/powerpc/mm/ptdump/ptdump.c +++ b/arch/powerpc/mm/ptdump/ptdump.c @@ -21,6 +21,7 @@ #include <linux/seq_file.h> #include <asm/fixmap.h> #include <linux/const.h> +#include <linux/kasan.h> #include <asm/page.h> #include <asm/hugetlb.h> @@ -289,11 +290,11 @@ static void populate_markers(void) #endif address_markers[i++].start_address = FIXADDR_START; address_markers[i++].start_address = FIXADDR_TOP; +#endif /* CONFIG_PPC64 */ #ifdef CONFIG_KASAN address_markers[i++].start_address = KASAN_SHADOW_START; address_markers[i++].start_address = KASAN_SHADOW_END; #endif -#endif /* CONFIG_PPC64 */ } static int ptdump_show(struct seq_file *m, void *v) diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h index 979701d360da..a4f7880f959d 100644 --- a/arch/powerpc/net/bpf_jit.h +++ b/arch/powerpc/net/bpf_jit.h @@ -13,7 +13,7 @@ #include <asm/types.h> #include <asm/ppc-opcode.h> -#ifdef PPC64_ELF_ABI_v1 +#ifdef CONFIG_PPC64_ELF_ABI_V1 #define FUNCTION_DESCR_SIZE 24 #else #define FUNCTION_DESCR_SIZE 0 @@ -35,7 +35,7 @@ } while (0) /* bl (unconditional 'branch' with link) */ -#define PPC_BL(dest) EMIT(PPC_INST_BL | (((dest) - (unsigned long)(image + ctx->idx)) & 0x03fffffc)) +#define PPC_BL(dest) EMIT(PPC_RAW_BL((dest) - (unsigned long)(image + ctx->idx))) /* "cond" here covers BO:BI fields. */ #define PPC_BCC_SHORT(cond, dest) \ diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index 427185256216..43e634126514 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -276,7 +276,7 @@ skip_codegen_passes: */ bpf_jit_dump(flen, proglen, pass, code_base); -#ifdef PPC64_ELF_ABI_v1 +#ifdef CONFIG_PPC64_ELF_ABI_V1 /* Function descriptor nastiness: Address + TOC */ ((u64 *)image)[0] = (u64)code_base; ((u64 *)image)[1] = local_paca->kernel_toc; diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index 585f257da045..594c54931e20 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -126,7 +126,7 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx) { int i; - if (__is_defined(PPC64_ELF_ABI_v2)) + if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2)) EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc))); /* @@ -266,7 +266,7 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o int b2p_index = bpf_to_ppc(BPF_REG_3); int bpf_tailcall_prologue_size = 8; - if (__is_defined(PPC64_ELF_ABI_v2)) + if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2)) bpf_tailcall_prologue_size += 4; /* skip past the toc load */ /* diff --git a/arch/powerpc/perf/8xx-pmu.c b/arch/powerpc/perf/8xx-pmu.c index 4738c4dbf567..308a2e40d7be 100644 --- a/arch/powerpc/perf/8xx-pmu.c +++ b/arch/powerpc/perf/8xx-pmu.c @@ -157,7 +157,7 @@ static void mpc8xx_pmu_del(struct perf_event *event, int flags) mpc8xx_pmu_read(event); - /* If it was the last user, stop counting to avoid useles overhead */ + /* If it was the last user, stop counting to avoid useless overhead */ switch (event_type(event)) { case PERF_8xx_ID_CPU_CYCLES: break; diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index b5b42cf0a703..140502a7fdf8 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -1142,7 +1142,7 @@ static u64 check_and_compute_delta(u64 prev, u64 val) /* * POWER7 can roll back counter values, if the new value is smaller * than the previous value it will cause the delta and the counter to - * have bogus values unless we rolled a counter over. If a coutner is + * have bogus values unless we rolled a counter over. If a counter is * rolled back, it will be smaller, but within 256, which is the maximum * number of events to rollback at once. If we detect a rollback * return 0. This can lead to a small lack of precision in the @@ -2057,7 +2057,7 @@ static int power_pmu_event_init(struct perf_event *event) /* * PMU config registers have fields that are * reserved and some specific values for bit fields are reserved. - * For ex., MMCRA[61:62] is Randome Sampling Mode (SM) + * For ex., MMCRA[61:62] is Random Sampling Mode (SM) * and value of 0b11 to this field is reserved. * Check for invalid values in attr.config. */ @@ -2447,7 +2447,7 @@ static void __perf_event_interrupt(struct pt_regs *regs) } /* - * During system wide profling or while specific CPU is monitored for an + * During system wide profiling or while specific CPU is monitored for an * event, some corner cases could cause PMC to overflow in idle path. This * will trigger a PMI after waking up from idle. Since counter values are _not_ * saved/restored in idle path, can lead to below "Can't find PMC" message. diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c index 12c1777187fc..cf5406b31e27 100644 --- a/arch/powerpc/perf/hv-24x7.c +++ b/arch/powerpc/perf/hv-24x7.c @@ -33,7 +33,7 @@ static bool aggregate_result_elements; static cpumask_t hv_24x7_cpumask; -static bool domain_is_valid(unsigned domain) +static bool domain_is_valid(unsigned int domain) { switch (domain) { #define DOMAIN(n, v, x, c) \ @@ -47,7 +47,7 @@ static bool domain_is_valid(unsigned domain) } } -static bool is_physical_domain(unsigned domain) +static bool is_physical_domain(unsigned int domain) { switch (domain) { #define DOMAIN(n, v, x, c) \ @@ -128,7 +128,7 @@ static bool domain_needs_aggregation(unsigned int domain) domain <= HV_PERF_DOMAIN_VCPU_REMOTE_NODE)); } -static const char *domain_name(unsigned domain) +static const char *domain_name(unsigned int domain) { if (!domain_is_valid(domain)) return NULL; @@ -146,7 +146,7 @@ static const char *domain_name(unsigned domain) return NULL; } -static bool catalog_entry_domain_is_valid(unsigned domain) +static bool catalog_entry_domain_is_valid(unsigned int domain) { /* POWER8 doesn't support virtual domains. */ if (interface_version == 1) @@ -258,7 +258,7 @@ static char *event_name(struct hv_24x7_event_data *ev, int *len) static char *event_desc(struct hv_24x7_event_data *ev, int *len) { - unsigned nl = be16_to_cpu(ev->event_name_len); + unsigned int nl = be16_to_cpu(ev->event_name_len); __be16 *desc_len = (__be16 *)(ev->remainder + nl - 2); *len = be16_to_cpu(*desc_len) - 2; @@ -267,9 +267,9 @@ static char *event_desc(struct hv_24x7_event_data *ev, int *len) static char *event_long_desc(struct hv_24x7_event_data *ev, int *len) { - unsigned nl = be16_to_cpu(ev->event_name_len); + unsigned int nl = be16_to_cpu(ev->event_name_len); __be16 *desc_len_ = (__be16 *)(ev->remainder + nl - 2); - unsigned desc_len = be16_to_cpu(*desc_len_); + unsigned int desc_len = be16_to_cpu(*desc_len_); __be16 *long_desc_len = (__be16 *)(ev->remainder + nl + desc_len - 2); *len = be16_to_cpu(*long_desc_len) - 2; @@ -296,8 +296,8 @@ static void *event_end(struct hv_24x7_event_data *ev, void *end) { void *start = ev; __be16 *dl_, *ldl_; - unsigned dl, ldl; - unsigned nl = be16_to_cpu(ev->event_name_len); + unsigned int dl, ldl; + unsigned int nl = be16_to_cpu(ev->event_name_len); if (nl < 2) { pr_debug("%s: name length too short: %d", __func__, nl); @@ -398,7 +398,7 @@ static long h_get_24x7_catalog_page(char page[], u64 version, u32 index) * - Specifying (i.e overriding) values for other parameters * is undefined. */ -static char *event_fmt(struct hv_24x7_event_data *event, unsigned domain) +static char *event_fmt(struct hv_24x7_event_data *event, unsigned int domain) { const char *sindex; const char *lpar; @@ -529,9 +529,9 @@ out_s: return NULL; } -static struct attribute *event_to_attr(unsigned ix, +static struct attribute *event_to_attr(unsigned int ix, struct hv_24x7_event_data *event, - unsigned domain, + unsigned int domain, int nonce) { int event_name_len; @@ -599,8 +599,8 @@ event_to_long_desc_attr(struct hv_24x7_event_data *event, int nonce) return device_str_attr_create(name, nl, nonce, desc, dl); } -static int event_data_to_attrs(unsigned ix, struct attribute **attrs, - struct hv_24x7_event_data *event, int nonce) +static int event_data_to_attrs(unsigned int ix, struct attribute **attrs, + struct hv_24x7_event_data *event, int nonce) { *attrs = event_to_attr(ix, event, event->domain, nonce); if (!*attrs) @@ -614,8 +614,8 @@ struct event_uniq { struct rb_node node; const char *name; int nl; - unsigned ct; - unsigned domain; + unsigned int ct; + unsigned int domain; }; static int memord(const void *d1, size_t s1, const void *d2, size_t s2) @@ -628,8 +628,8 @@ static int memord(const void *d1, size_t s1, const void *d2, size_t s2) return memcmp(d1, d2, s1); } -static int ev_uniq_ord(const void *v1, size_t s1, unsigned d1, const void *v2, - size_t s2, unsigned d2) +static int ev_uniq_ord(const void *v1, size_t s1, unsigned int d1, + const void *v2, size_t s2, unsigned int d2) { int r = memord(v1, s1, v2, s2); @@ -643,7 +643,7 @@ static int ev_uniq_ord(const void *v1, size_t s1, unsigned d1, const void *v2, } static int event_uniq_add(struct rb_root *root, const char *name, int nl, - unsigned domain) + unsigned int domain) { struct rb_node **new = &(root->rb_node), *parent = NULL; struct event_uniq *data; @@ -1398,7 +1398,7 @@ out: static int h_24x7_event_init(struct perf_event *event) { struct hv_perf_caps caps; - unsigned domain; + unsigned int domain; unsigned long hret; u64 ct; diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c index 526d4b767534..d7976ab40d38 100644 --- a/arch/powerpc/perf/imc-pmu.c +++ b/arch/powerpc/perf/imc-pmu.c @@ -6,6 +6,7 @@ * (C) 2017 Anju T Sudhakar, IBM Corporation. * (C) 2017 Hemant K Shaw, IBM Corporation. */ +#include <linux/of.h> #include <linux/perf_event.h> #include <linux/slab.h> #include <asm/opal.h> @@ -521,7 +522,7 @@ static int nest_imc_event_init(struct perf_event *event) /* * Nest HW counter memory resides in a per-chip reserve-memory (HOMER). - * Get the base memory addresss for this cpu. + * Get the base memory address for this cpu. */ chip_id = cpu_to_chip_id(event->cpu); @@ -674,7 +675,7 @@ static int ppc_core_imc_cpu_offline(unsigned int cpu) /* * Check whether core_imc is registered. We could end up here * if the cpuhotplug callback registration fails. i.e, callback - * invokes the offline path for all sucessfully registered cpus. + * invokes the offline path for all successfully registered cpus. * At this stage, core_imc pmu will not be registered and we * should return here. * diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c index a74d382ecbb7..42abbcfc73da 100644 --- a/arch/powerpc/perf/isa207-common.c +++ b/arch/powerpc/perf/isa207-common.c @@ -82,11 +82,11 @@ static unsigned long sdar_mod_val(u64 event) static void mmcra_sdar_mode(u64 event, unsigned long *mmcra) { /* - * MMCRA[SDAR_MODE] specifices how the SDAR should be updated in - * continous sampling mode. + * MMCRA[SDAR_MODE] specifies how the SDAR should be updated in + * continuous sampling mode. * * Incase of Power8: - * MMCRA[SDAR_MODE] will be programmed as "0b01" for continous sampling + * MMCRA[SDAR_MODE] will be programmed as "0b01" for continuous sampling * mode and will be un-changed when setting MMCRA[63] (Marked events). * * Incase of Power9/power10: @@ -108,7 +108,7 @@ static void mmcra_sdar_mode(u64 event, unsigned long *mmcra) *mmcra |= MMCRA_SDAR_MODE_TLB; } -static u64 p10_thresh_cmp_val(u64 value) +static int p10_thresh_cmp_val(u64 value) { int exp = 0; u64 result = value; @@ -139,7 +139,7 @@ static u64 p10_thresh_cmp_val(u64 value) * exponent is also zero. */ if (!(value & 0xC0) && exp) - result = 0; + result = -1; else result = (exp << 8) | value; } @@ -187,7 +187,7 @@ static bool is_thresh_cmp_valid(u64 event) unsigned int cmp, exp; if (cpu_has_feature(CPU_FTR_ARCH_31)) - return p10_thresh_cmp_val(event) != 0; + return p10_thresh_cmp_val(event) >= 0; /* * Check the mantissa upper two bits are not zero, unless the @@ -502,12 +502,14 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp, value |= CNST_THRESH_CTL_SEL_VAL(event >> EVENT_THRESH_SHIFT); mask |= p10_CNST_THRESH_CMP_MASK; value |= p10_CNST_THRESH_CMP_VAL(p10_thresh_cmp_val(event_config1)); - } + } else if (event_is_threshold(event)) + return -1; } else if (cpu_has_feature(CPU_FTR_ARCH_300)) { if (event_is_threshold(event) && is_thresh_cmp_valid(event)) { mask |= CNST_THRESH_MASK; value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT); - } + } else if (event_is_threshold(event)) + return -1; } else { /* * Special case for PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC, diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c index c393e837648e..3ad40ffb9256 100644 --- a/arch/powerpc/perf/power9-pmu.c +++ b/arch/powerpc/perf/power9-pmu.c @@ -98,7 +98,7 @@ extern u64 PERF_REG_EXTENDED_MASK; /* PowerISA v2.07 format attribute structure*/ extern const struct attribute_group isa207_pmu_format_group; -int p9_dd21_bl_ev[] = { +static int p9_dd21_bl_ev[] = { PM_MRK_ST_DONE_L2, PM_RADIX_PWC_L1_HIT, PM_FLOP_CMPL, @@ -112,7 +112,7 @@ int p9_dd21_bl_ev[] = { PM_DISP_HELD_SYNC_HOLD, }; -int p9_dd22_bl_ev[] = { +static int p9_dd22_bl_ev[] = { PM_DTLB_MISS_16G, PM_DERAT_MISS_2M, PM_DTLB_MISS_2M, diff --git a/arch/powerpc/platforms/40x/ppc40x_simple.c b/arch/powerpc/platforms/40x/ppc40x_simple.c index e70b42729322..dce696c32679 100644 --- a/arch/powerpc/platforms/40x/ppc40x_simple.c +++ b/arch/powerpc/platforms/40x/ppc40x_simple.c @@ -13,7 +13,6 @@ #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <asm/ppc4xx.h> -#include <asm/prom.h> #include <asm/time.h> #include <asm/udbg.h> #include <asm/uic.h> diff --git a/arch/powerpc/platforms/44x/canyonlands.c b/arch/powerpc/platforms/44x/canyonlands.c index 807968a755ef..5b23aef8bdef 100644 --- a/arch/powerpc/platforms/44x/canyonlands.c +++ b/arch/powerpc/platforms/44x/canyonlands.c @@ -12,6 +12,7 @@ #include <asm/ppc4xx.h> #include <asm/udbg.h> #include <asm/uic.h> +#include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/delay.h> #include "44x.h" diff --git a/arch/powerpc/platforms/44x/fsp2.c b/arch/powerpc/platforms/44x/fsp2.c index af13a59d2f60..e2e4f6d8150d 100644 --- a/arch/powerpc/platforms/44x/fsp2.c +++ b/arch/powerpc/platforms/44x/fsp2.c @@ -14,11 +14,11 @@ */ #include <linux/init.h> +#include <linux/of_fdt.h> #include <linux/of_platform.h> #include <linux/rtc.h> #include <asm/machdep.h> -#include <asm/prom.h> #include <asm/udbg.h> #include <asm/time.h> #include <asm/uic.h> diff --git a/arch/powerpc/platforms/44x/ppc44x_simple.c b/arch/powerpc/platforms/44x/ppc44x_simple.c index 3dbd8ddd734a..2a0dcdf04b21 100644 --- a/arch/powerpc/platforms/44x/ppc44x_simple.c +++ b/arch/powerpc/platforms/44x/ppc44x_simple.c @@ -13,7 +13,6 @@ #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <asm/ppc4xx.h> -#include <asm/prom.h> #include <asm/time.h> #include <asm/udbg.h> #include <asm/uic.h> diff --git a/arch/powerpc/platforms/44x/ppc476.c b/arch/powerpc/platforms/44x/ppc476.c index fb7db5cedd4e..20cc8f80b086 100644 --- a/arch/powerpc/platforms/44x/ppc476.c +++ b/arch/powerpc/platforms/44x/ppc476.c @@ -19,11 +19,11 @@ #include <linux/init.h> #include <linux/of.h> +#include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/rtc.h> #include <asm/machdep.h> -#include <asm/prom.h> #include <asm/udbg.h> #include <asm/time.h> #include <asm/uic.h> diff --git a/arch/powerpc/platforms/44x/sam440ep.c b/arch/powerpc/platforms/44x/sam440ep.c index 68ba4b009da0..ed854b53877e 100644 --- a/arch/powerpc/platforms/44x/sam440ep.c +++ b/arch/powerpc/platforms/44x/sam440ep.c @@ -17,7 +17,6 @@ #include <linux/of_platform.h> #include <asm/machdep.h> -#include <asm/prom.h> #include <asm/udbg.h> #include <asm/time.h> #include <asm/uic.h> diff --git a/arch/powerpc/platforms/44x/warp.c b/arch/powerpc/platforms/44x/warp.c index 665f18e37efb..f03432ef010b 100644 --- a/arch/powerpc/platforms/44x/warp.c +++ b/arch/powerpc/platforms/44x/warp.c @@ -11,12 +11,13 @@ #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/delay.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <linux/of_gpio.h> #include <linux/slab.h> #include <linux/export.h> #include <asm/machdep.h> -#include <asm/prom.h> #include <asm/udbg.h> #include <asm/time.h> #include <asm/uic.h> diff --git a/arch/powerpc/platforms/4xx/cpm.c b/arch/powerpc/platforms/4xx/cpm.c index 2571841625a2..1d3bc35ee1a7 100644 --- a/arch/powerpc/platforms/4xx/cpm.c +++ b/arch/powerpc/platforms/4xx/cpm.c @@ -327,6 +327,6 @@ late_initcall(cpm_init); static int __init cpm_powersave_off(char *arg) { cpm.powersave_off = 1; - return 0; + return 1; } __setup("powersave=off", cpm_powersave_off); diff --git a/arch/powerpc/platforms/4xx/hsta_msi.c b/arch/powerpc/platforms/4xx/hsta_msi.c index fee430eadcc6..d4f7fff1fc87 100644 --- a/arch/powerpc/platforms/4xx/hsta_msi.c +++ b/arch/powerpc/platforms/4xx/hsta_msi.c @@ -10,6 +10,7 @@ #include <linux/interrupt.h> #include <linux/msi.h> #include <linux/of.h> +#include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/pci.h> #include <linux/semaphore.h> diff --git a/arch/powerpc/platforms/4xx/pci.c b/arch/powerpc/platforms/4xx/pci.c index 24f41e178cbc..ca5dd7a5842a 100644 --- a/arch/powerpc/platforms/4xx/pci.c +++ b/arch/powerpc/platforms/4xx/pci.c @@ -22,6 +22,7 @@ #include <linux/pci.h> #include <linux/init.h> #include <linux/of.h> +#include <linux/of_address.h> #include <linux/delay.h> #include <linux/slab.h> diff --git a/arch/powerpc/platforms/4xx/uic.c b/arch/powerpc/platforms/4xx/uic.c index 89e2587b1a59..d667ad039bd3 100644 --- a/arch/powerpc/platforms/4xx/uic.c +++ b/arch/powerpc/platforms/4xx/uic.c @@ -19,9 +19,10 @@ #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/kernel_stat.h> +#include <linux/of.h> +#include <linux/of_irq.h> #include <asm/irq.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/dcr.h> #define NR_UIC_INTS 32 diff --git a/arch/powerpc/platforms/512x/clock-commonclk.c b/arch/powerpc/platforms/512x/clock-commonclk.c index 0b03d812baae..0652c7e69225 100644 --- a/arch/powerpc/platforms/512x/clock-commonclk.c +++ b/arch/powerpc/platforms/512x/clock-commonclk.c @@ -663,7 +663,7 @@ static void __init mpc512x_clk_setup_mclk(struct mclk_setup_data *entry, size_t * the PSC/MSCAN/SPDIF (serial drivers et al) need the MCLK * for their bitrate * - in the absence of "aliases" for clocks we need to create - * individial 'struct clk' items for whatever might get + * individual 'struct clk' items for whatever might get * referenced or looked up, even if several of those items are * identical from the logical POV (their rate value) * - for easier future maintenance and for better reflection of diff --git a/arch/powerpc/platforms/512x/mpc5121_ads.c b/arch/powerpc/platforms/512x/mpc5121_ads.c index 9d030c2e0004..fc3fb999cd74 100644 --- a/arch/powerpc/platforms/512x/mpc5121_ads.c +++ b/arch/powerpc/platforms/512x/mpc5121_ads.c @@ -14,7 +14,6 @@ #include <asm/machdep.h> #include <asm/ipic.h> -#include <asm/prom.h> #include <asm/time.h> #include <sysdev/fsl_pci.h> diff --git a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c index ea46870e5d6e..6f08d07aee3b 100644 --- a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c +++ b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c @@ -14,7 +14,8 @@ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/io.h> -#include <asm/prom.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> static struct device_node *cpld_pic_node; static struct irq_domain *cpld_pic_host; diff --git a/arch/powerpc/platforms/512x/mpc512x_generic.c b/arch/powerpc/platforms/512x/mpc512x_generic.c index 303bc308b2e6..364564c995bd 100644 --- a/arch/powerpc/platforms/512x/mpc512x_generic.c +++ b/arch/powerpc/platforms/512x/mpc512x_generic.c @@ -13,7 +13,6 @@ #include <asm/machdep.h> #include <asm/ipic.h> -#include <asm/prom.h> #include <asm/time.h> #include "mpc512x.h" diff --git a/arch/powerpc/platforms/512x/mpc512x_shared.c b/arch/powerpc/platforms/512x/mpc512x_shared.c index e3411663edad..5ac0ead2540f 100644 --- a/arch/powerpc/platforms/512x/mpc512x_shared.c +++ b/arch/powerpc/platforms/512x/mpc512x_shared.c @@ -12,6 +12,7 @@ #include <linux/kernel.h> #include <linux/io.h> #include <linux/irq.h> +#include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/fsl-diu-fb.h> #include <linux/memblock.h> @@ -20,7 +21,6 @@ #include <asm/cacheflush.h> #include <asm/machdep.h> #include <asm/ipic.h> -#include <asm/prom.h> #include <asm/time.h> #include <asm/mpc5121.h> #include <asm/mpc52xx_psc.h> @@ -289,7 +289,7 @@ static void __init mpc512x_setup_diu(void) /* * We do not allocate and configure new area for bitmap buffer - * because it would requere copying bitmap data (splash image) + * because it would require copying bitmap data (splash image) * and so negatively affect boot time. Instead we reserve the * already configured frame buffer area so that it won't be * destroyed. The starting address of the area to reserve and diff --git a/arch/powerpc/platforms/52xx/efika.c b/arch/powerpc/platforms/52xx/efika.c index 3b7d70d71692..e0647720ed5e 100644 --- a/arch/powerpc/platforms/52xx/efika.c +++ b/arch/powerpc/platforms/52xx/efika.c @@ -14,7 +14,6 @@ #include <linux/pci.h> #include <linux/of.h> #include <asm/dma.h> -#include <asm/prom.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/rtas.h> diff --git a/arch/powerpc/platforms/52xx/lite5200.c b/arch/powerpc/platforms/52xx/lite5200.c index 04cc97397095..7ea9b6ce0591 100644 --- a/arch/powerpc/platforms/52xx/lite5200.c +++ b/arch/powerpc/platforms/52xx/lite5200.c @@ -21,7 +21,6 @@ #include <asm/time.h> #include <asm/io.h> #include <asm/machdep.h> -#include <asm/prom.h> #include <asm/mpc52xx.h> /* ************************************************************************ diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c index e7da22d1df87..129313b1d021 100644 --- a/arch/powerpc/platforms/52xx/lite5200_pm.c +++ b/arch/powerpc/platforms/52xx/lite5200_pm.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/init.h> #include <linux/suspend.h> +#include <linux/of_address.h> + #include <asm/io.h> #include <asm/time.h> #include <asm/mpc52xx.h> diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c index 110c444f4bc7..ee367ff3ec8a 100644 --- a/arch/powerpc/platforms/52xx/media5200.c +++ b/arch/powerpc/platforms/52xx/media5200.c @@ -20,8 +20,9 @@ #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/io.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <asm/time.h> -#include <asm/prom.h> #include <asm/machdep.h> #include <asm/mpc52xx.h> diff --git a/arch/powerpc/platforms/52xx/mpc5200_simple.c b/arch/powerpc/platforms/52xx/mpc5200_simple.c index b9f5675b0a1d..cc349d579061 100644 --- a/arch/powerpc/platforms/52xx/mpc5200_simple.c +++ b/arch/powerpc/platforms/52xx/mpc5200_simple.c @@ -22,8 +22,8 @@ */ #undef DEBUG +#include <linux/of.h> #include <asm/time.h> -#include <asm/prom.h> #include <asm/machdep.h> #include <asm/mpc52xx.h> diff --git a/arch/powerpc/platforms/52xx/mpc52xx_common.c b/arch/powerpc/platforms/52xx/mpc52xx_common.c index 565e3a83dc9e..4348506d667d 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_common.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_common.c @@ -15,11 +15,11 @@ #include <linux/gpio.h> #include <linux/kernel.h> #include <linux/spinlock.h> +#include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/of_gpio.h> #include <linux/export.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/mpc52xx.h> /* MPC5200 device tree match tables */ @@ -308,7 +308,7 @@ int mpc5200_psc_ac97_gpio_reset(int psc_number) spin_lock_irqsave(&gpio_lock, flags); - /* Reconfiure pin-muxing to gpio */ + /* Reconfigure pin-muxing to gpio */ mux = in_be32(&simple_gpio->port_config); out_be32(&simple_gpio->port_config, mux & (~gpio)); diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c index f862b48b4824..968f5b727273 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c @@ -5,7 +5,7 @@ * Copyright (c) 2009 Secret Lab Technologies Ltd. * Copyright (c) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix * - * This file is a driver for the the General Purpose Timer (gpt) devices + * This file is a driver for the General Purpose Timer (gpt) devices * found on the MPC5200 SoC. Each timer has an IO pin which can be used * for GPIO or can be used to raise interrupts. The timer function can * be used independently from the IO pin, or it can be used to control @@ -55,6 +55,8 @@ #include <linux/list.h> #include <linux/mutex.h> #include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/of_gpio.h> #include <linux/kernel.h> @@ -398,7 +400,7 @@ static int mpc52xx_gpt_do_start(struct mpc52xx_gpt_priv *gpt, u64 period, set |= MPC52xx_GPT_MODE_CONTINUOUS; /* Determine the number of clocks in the requested period. 64 bit - * arithmatic is done here to preserve the precision until the value + * arithmetic is done here to preserve the precision until the value * is scaled back down into the u32 range. Period is in 'ns', bus * frequency is in Hz. */ clocks = period * (u64)gpt->ipb_freq; @@ -502,7 +504,7 @@ u64 mpc52xx_gpt_timer_period(struct mpc52xx_gpt_priv *gpt) if (prescale == 0) prescale = 0x10000; period = period * prescale * 1000000000ULL; - do_div(period, (u64)gpt->ipb_freq); + do_div(period, gpt->ipb_freq); return period; } EXPORT_SYMBOL(mpc52xx_gpt_timer_period); diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c index b91ebebd9ff2..48038aaedbd3 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c @@ -11,11 +11,12 @@ #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/spinlock.h> #include <linux/module.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/mpc52xx.h> #include <asm/time.h> @@ -104,7 +105,7 @@ static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req) * * Configure the watermarks so DMA will always complete correctly. * It may be worth experimenting with the ALARM value to see if - * there is a performance impacit. However, if it is wrong there + * there is a performance impact. However, if it is wrong there * is a risk of DMA not transferring the last chunk of data */ if (write) { diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pci.c b/arch/powerpc/platforms/52xx/mpc52xx_pci.c index af0f79995214..859e2818c43d 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_pci.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_pci.c @@ -13,6 +13,7 @@ #undef DEBUG #include <linux/pci.h> +#include <linux/of_address.h> #include <asm/mpc52xx.h> #include <asm/delay.h> #include <asm/machdep.h> @@ -242,7 +243,7 @@ mpc52xx_pci_setup(struct pci_controller *hose, u32 tmp; int iwcr0 = 0, iwcr1 = 0, iwcr2 = 0; - pr_debug("mpc52xx_pci_setup(hose=%p, pci_regs=%p)\n", hose, pci_regs); + pr_debug("%s(hose=%p, pci_regs=%p)\n", __func__, hose, pci_regs); /* pci_process_bridge_OF_ranges() found all our addresses for us; * now store them in the right places */ @@ -257,11 +258,7 @@ mpc52xx_pci_setup(struct pci_controller *hose, /* Memory windows */ res = &hose->mem_resources[0]; if (res->flags) { - pr_debug("mem_resource[0] = " - "{.start=%llx, .end=%llx, .flags=%llx}\n", - (unsigned long long)res->start, - (unsigned long long)res->end, - (unsigned long long)res->flags); + pr_debug("mem_resource[0] = %pr\n", res); out_be32(&pci_regs->iw0btar, MPC52xx_PCI_IWBTAR_TRANSLATION(res->start, res->start, resource_size(res))); @@ -274,8 +271,7 @@ mpc52xx_pci_setup(struct pci_controller *hose, res = &hose->mem_resources[1]; if (res->flags) { - pr_debug("mem_resource[1] = {.start=%x, .end=%x, .flags=%lx}\n", - res->start, res->end, res->flags); + pr_debug("mem_resource[1] = %pr\n", res); out_be32(&pci_regs->iw1btar, MPC52xx_PCI_IWBTAR_TRANSLATION(res->start, res->start, resource_size(res))); @@ -292,11 +288,8 @@ mpc52xx_pci_setup(struct pci_controller *hose, printk(KERN_ERR "%s: Didn't find IO resources\n", __FILE__); return; } - pr_debug(".io_resource={.start=%llx,.end=%llx,.flags=%llx} " - ".io_base_phys=0x%p\n", - (unsigned long long)res->start, - (unsigned long long)res->end, - (unsigned long long)res->flags, (void*)hose->io_base_phys); + pr_debug(".io_resource = %pr .io_base_phys=0x%pa\n", + res, &hose->io_base_phys); out_be32(&pci_regs->iw2btar, MPC52xx_PCI_IWBTAR_TRANSLATION(hose->io_base_phys, res->start, @@ -336,8 +329,7 @@ mpc52xx_pci_fixup_resources(struct pci_dev *dev) { int i; - pr_debug("mpc52xx_pci_fixup_resources() %.4x:%.4x\n", - dev->vendor, dev->device); + pr_debug("%s() %.4x:%.4x\n", __func__, dev->vendor, dev->device); /* We don't rely on boot loader for PCI and resets all devices */ diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c index 76a8102bdb98..1e0a5e9644dc 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c @@ -101,8 +101,9 @@ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/mpc52xx.h> /* HW IRQ mapping */ diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c index b1d208ded981..549b3629e39a 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c @@ -2,6 +2,8 @@ #include <linux/init.h> #include <linux/suspend.h> #include <linux/io.h> +#include <linux/of_address.h> + #include <asm/time.h> #include <asm/cacheflush.h> #include <asm/mpc52xx.h> diff --git a/arch/powerpc/platforms/82xx/ep8248e.c b/arch/powerpc/platforms/82xx/ep8248e.c index 369ebb1b7af1..28e627f8a320 100644 --- a/arch/powerpc/platforms/82xx/ep8248e.c +++ b/arch/powerpc/platforms/82xx/ep8248e.c @@ -20,7 +20,6 @@ #include <asm/machdep.h> #include <asm/time.h> #include <asm/mpc8260.h> -#include <asm/prom.h> #include <sysdev/fsl_soc.h> #include <sysdev/cpm2_pic.h> diff --git a/arch/powerpc/platforms/82xx/km82xx.c b/arch/powerpc/platforms/82xx/km82xx.c index 745ed61df5d8..1c8bbf4251d9 100644 --- a/arch/powerpc/platforms/82xx/km82xx.c +++ b/arch/powerpc/platforms/82xx/km82xx.c @@ -20,7 +20,6 @@ #include <asm/machdep.h> #include <linux/time.h> #include <asm/mpc8260.h> -#include <asm/prom.h> #include <sysdev/fsl_soc.h> #include <sysdev/cpm2_pic.h> diff --git a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c index 285bfe19b798..cf3210042a2e 100644 --- a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c +++ b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c @@ -14,9 +14,9 @@ #include <linux/irq.h> #include <linux/types.h> #include <linux/slab.h> +#include <linux/of_irq.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/cpm2.h> #include "pq2.h" diff --git a/arch/powerpc/platforms/83xx/km83xx.c b/arch/powerpc/platforms/83xx/km83xx.c index d9eed0decb28..907acdecc94a 100644 --- a/arch/powerpc/platforms/83xx/km83xx.c +++ b/arch/powerpc/platforms/83xx/km83xx.c @@ -29,7 +29,6 @@ #include <asm/machdep.h> #include <asm/ipic.h> #include <asm/irq.h> -#include <asm/prom.h> #include <asm/udbg.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> diff --git a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c index a38372f9ac12..abb62fa630ef 100644 --- a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c +++ b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c @@ -8,17 +8,16 @@ */ #include <linux/kernel.h> +#include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/device.h> #include <linux/mutex.h> #include <linux/i2c.h> #include <linux/gpio/driver.h> -#include <linux/of.h> -#include <linux/of_gpio.h> #include <linux/slab.h> #include <linux/kthread.h> +#include <linux/property.h> #include <linux/reboot.h> -#include <asm/prom.h> #include <asm/machdep.h> /* @@ -116,21 +115,17 @@ static int mcu_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) static int mcu_gpiochip_add(struct mcu *mcu) { - struct device_node *np; + struct device *dev = &mcu->client->dev; struct gpio_chip *gc = &mcu->gc; - np = of_find_compatible_node(NULL, NULL, "fsl,mcu-mpc8349emitx"); - if (!np) - return -ENODEV; - gc->owner = THIS_MODULE; - gc->label = kasprintf(GFP_KERNEL, "%pOF", np); + gc->label = kasprintf(GFP_KERNEL, "%pfw", dev_fwnode(dev)); gc->can_sleep = 1; gc->ngpio = MCU_NUM_GPIO; gc->base = -1; gc->set = mcu_gpio_set; gc->direction_output = mcu_gpio_dir_out; - gc->of_node = np; + gc->parent = dev; return gpiochip_add_data(gc, mcu); } diff --git a/arch/powerpc/platforms/83xx/mpc832x_mds.c b/arch/powerpc/platforms/83xx/mpc832x_mds.c index 850d566ef900..435344405d2c 100644 --- a/arch/powerpc/platforms/83xx/mpc832x_mds.c +++ b/arch/powerpc/platforms/83xx/mpc832x_mds.c @@ -28,7 +28,6 @@ #include <asm/machdep.h> #include <asm/ipic.h> #include <asm/irq.h> -#include <asm/prom.h> #include <asm/udbg.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c index b6133a237a70..bb8caa5071f8 100644 --- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c +++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c @@ -15,6 +15,7 @@ #include <linux/spi/spi.h> #include <linux/spi/mmc_spi.h> #include <linux/mmc/host.h> +#include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/fsl_devices.h> diff --git a/arch/powerpc/platforms/83xx/mpc834x_itx.c b/arch/powerpc/platforms/83xx/mpc834x_itx.c index 9630f3aa4d9c..6a110f275304 100644 --- a/arch/powerpc/platforms/83xx/mpc834x_itx.c +++ b/arch/powerpc/platforms/83xx/mpc834x_itx.c @@ -27,7 +27,6 @@ #include <asm/machdep.h> #include <asm/ipic.h> #include <asm/irq.h> -#include <asm/prom.h> #include <asm/udbg.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> diff --git a/arch/powerpc/platforms/83xx/mpc834x_mds.c b/arch/powerpc/platforms/83xx/mpc834x_mds.c index 0713deffb40c..7dde5a75332b 100644 --- a/arch/powerpc/platforms/83xx/mpc834x_mds.c +++ b/arch/powerpc/platforms/83xx/mpc834x_mds.c @@ -19,6 +19,7 @@ #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/root_dev.h> +#include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/atomic.h> @@ -27,7 +28,6 @@ #include <asm/machdep.h> #include <asm/ipic.h> #include <asm/irq.h> -#include <asm/prom.h> #include <asm/udbg.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> diff --git a/arch/powerpc/platforms/83xx/mpc836x_mds.c b/arch/powerpc/platforms/83xx/mpc836x_mds.c index da4cf52cb55b..b1e6665be5d3 100644 --- a/arch/powerpc/platforms/83xx/mpc836x_mds.c +++ b/arch/powerpc/platforms/83xx/mpc836x_mds.c @@ -35,7 +35,6 @@ #include <asm/machdep.h> #include <asm/ipic.h> #include <asm/irq.h> -#include <asm/prom.h> #include <asm/udbg.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> diff --git a/arch/powerpc/platforms/83xx/mpc836x_rdk.c b/arch/powerpc/platforms/83xx/mpc836x_rdk.c index 3427ad0d9d38..731bc5ce726d 100644 --- a/arch/powerpc/platforms/83xx/mpc836x_rdk.c +++ b/arch/powerpc/platforms/83xx/mpc836x_rdk.c @@ -12,7 +12,6 @@ #include <linux/pci.h> #include <linux/of_platform.h> #include <linux/io.h> -#include <asm/prom.h> #include <asm/time.h> #include <asm/ipic.h> #include <asm/udbg.h> diff --git a/arch/powerpc/platforms/83xx/mpc837x_mds.c b/arch/powerpc/platforms/83xx/mpc837x_mds.c index fc88ab97f6e3..fa3538803af7 100644 --- a/arch/powerpc/platforms/83xx/mpc837x_mds.c +++ b/arch/powerpc/platforms/83xx/mpc837x_mds.c @@ -9,12 +9,12 @@ #include <linux/pci.h> #include <linux/of.h> +#include <linux/of_address.h> #include <linux/of_platform.h> #include <asm/time.h> #include <asm/ipic.h> #include <asm/udbg.h> -#include <asm/prom.h> #include <sysdev/fsl_pci.h> #include "mpc83xx.h" diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c index bb147d34d4a6..6d47a5b81485 100644 --- a/arch/powerpc/platforms/83xx/suspend.c +++ b/arch/powerpc/platforms/83xx/suspend.c @@ -322,18 +322,15 @@ static const struct platform_suspend_ops mpc83xx_suspend_ops = { static const struct of_device_id pmc_match[]; static int pmc_probe(struct platform_device *ofdev) { - const struct of_device_id *match; struct device_node *np = ofdev->dev.of_node; struct resource res; const struct pmc_type *type; int ret = 0; - match = of_match_device(pmc_match, &ofdev->dev); - if (!match) + type = of_device_get_match_data(&ofdev->dev); + if (!type) return -EINVAL; - type = match->data; - if (!of_device_is_available(np)) return -ENODEV; diff --git a/arch/powerpc/platforms/83xx/usb.c b/arch/powerpc/platforms/83xx/usb.c index b0bda20aaccf..e2a13a052f96 100644 --- a/arch/powerpc/platforms/83xx/usb.c +++ b/arch/powerpc/platforms/83xx/usb.c @@ -11,9 +11,9 @@ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/of.h> +#include <linux/of_address.h> #include <asm/io.h> -#include <asm/prom.h> #include <sysdev/fsl_soc.h> #include "mpc83xx.h" diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig index 4142ebf01382..2be17ffe8714 100644 --- a/arch/powerpc/platforms/85xx/Kconfig +++ b/arch/powerpc/platforms/85xx/Kconfig @@ -16,15 +16,6 @@ if FSL_SOC_BOOKE if PPC32 -config FSL_85XX_CACHE_SRAM - bool - select PPC_LIB_RHEAP - help - When selected, this option enables cache-sram support - for memory allocation on P1/P2 QorIQ platforms. - cache-sram-size and cache-sram-offset kernel boot - parameters should be passed when this option is enabled. - config BSC9131_RDB bool "Freescale BSC9131RDB" select DEFAULT_UIMAGE diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c index 17ae75d62518..28d6b36f1ccd 100644 --- a/arch/powerpc/platforms/85xx/corenet_generic.c +++ b/arch/powerpc/platforms/85xx/corenet_generic.c @@ -19,7 +19,6 @@ #include <asm/pci-bridge.h> #include <asm/ppc-pci.h> #include <mm/mmu_decl.h> -#include <asm/prom.h> #include <asm/udbg.h> #include <asm/mpic.h> #include <asm/ehv_pic.h> diff --git a/arch/powerpc/platforms/85xx/ge_imp3a.c b/arch/powerpc/platforms/85xx/ge_imp3a.c index 743c65e4d8e4..8e827376d97b 100644 --- a/arch/powerpc/platforms/85xx/ge_imp3a.c +++ b/arch/powerpc/platforms/85xx/ge_imp3a.c @@ -17,13 +17,13 @@ #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/interrupt.h> +#include <linux/of_address.h> #include <linux/of_platform.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <mm/mmu_decl.h> -#include <asm/prom.h> #include <asm/udbg.h> #include <asm/mpic.h> #include <asm/swiotlb.h> diff --git a/arch/powerpc/platforms/85xx/ksi8560.c b/arch/powerpc/platforms/85xx/ksi8560.c index 6ef8580fdc0e..bdf9d42f8521 100644 --- a/arch/powerpc/platforms/85xx/ksi8560.c +++ b/arch/powerpc/platforms/85xx/ksi8560.c @@ -26,7 +26,6 @@ #include <asm/mpic.h> #include <mm/mmu_decl.h> #include <asm/udbg.h> -#include <asm/prom.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> diff --git a/arch/powerpc/platforms/85xx/mpc8536_ds.c b/arch/powerpc/platforms/85xx/mpc8536_ds.c index 53bccb8bbcbe..e5d7386ad612 100644 --- a/arch/powerpc/platforms/85xx/mpc8536_ds.c +++ b/arch/powerpc/platforms/85xx/mpc8536_ds.c @@ -18,7 +18,6 @@ #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <mm/mmu_decl.h> -#include <asm/prom.h> #include <asm/udbg.h> #include <asm/mpic.h> #include <asm/swiotlb.h> diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c index 5bd487030256..48f3acfece0b 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c @@ -21,6 +21,8 @@ #include <linux/initrd.h> #include <linux/interrupt.h> #include <linux/fsl_devices.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/pgtable.h> @@ -33,7 +35,6 @@ #include <asm/pci-bridge.h> #include <asm/irq.h> #include <mm/mmu_decl.h> -#include <asm/prom.h> #include <asm/udbg.h> #include <asm/mpic.h> #include <asm/i8259.h> @@ -151,7 +152,7 @@ static void __init mpc85xx_cds_pci_irq_fixup(struct pci_dev *dev) */ case PCI_DEVICE_ID_VIA_82C586_2: /* There are two USB controllers. - * Identify them by functon number + * Identify them by function number */ if (PCI_FUNC(dev->devfn) == 3) dev->irq = 11; diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c index 2157a8017aa4..f8d2c97f39bd 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c @@ -15,13 +15,13 @@ #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/interrupt.h> +#include <linux/of_irq.h> #include <linux/of_platform.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <mm/mmu_decl.h> -#include <asm/prom.h> #include <asm/udbg.h> #include <asm/mpic.h> #include <asm/i8259.h> diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c index 7759eca7d535..3a2ac410af18 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c @@ -39,7 +39,6 @@ #include <asm/pci-bridge.h> #include <asm/irq.h> #include <mm/mmu_decl.h> -#include <asm/prom.h> #include <asm/udbg.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> diff --git a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c index 80a80174768c..d99aba158235 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c @@ -19,7 +19,6 @@ #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <mm/mmu_decl.h> -#include <asm/prom.h> #include <asm/udbg.h> #include <asm/mpic.h> #include <soc/fsl/qe/qe.h> diff --git a/arch/powerpc/platforms/85xx/p1010rdb.c b/arch/powerpc/platforms/85xx/p1010rdb.c index 24855284b14a..8ba9306a96b6 100644 --- a/arch/powerpc/platforms/85xx/p1010rdb.c +++ b/arch/powerpc/platforms/85xx/p1010rdb.c @@ -16,7 +16,6 @@ #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <mm/mmu_decl.h> -#include <asm/prom.h> #include <asm/udbg.h> #include <asm/mpic.h> diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c index 1f1af0557470..537599906146 100644 --- a/arch/powerpc/platforms/85xx/p1022_ds.c +++ b/arch/powerpc/platforms/85xx/p1022_ds.c @@ -18,6 +18,7 @@ #include <linux/fsl/guts.h> #include <linux/pci.h> +#include <linux/of_address.h> #include <linux/of_platform.h> #include <asm/div64.h> #include <asm/mpic.h> diff --git a/arch/powerpc/platforms/85xx/p1022_rdk.c b/arch/powerpc/platforms/85xx/p1022_rdk.c index fd9e3e7ef234..bc58a99164c9 100644 --- a/arch/powerpc/platforms/85xx/p1022_rdk.c +++ b/arch/powerpc/platforms/85xx/p1022_rdk.c @@ -14,6 +14,7 @@ #include <linux/fsl/guts.h> #include <linux/pci.h> +#include <linux/of_address.h> #include <linux/of_platform.h> #include <asm/div64.h> #include <asm/mpic.h> diff --git a/arch/powerpc/platforms/85xx/p1023_rdb.c b/arch/powerpc/platforms/85xx/p1023_rdb.c index 3b9cc4979641..c04868eb2eb1 100644 --- a/arch/powerpc/platforms/85xx/p1023_rdb.c +++ b/arch/powerpc/platforms/85xx/p1023_rdb.c @@ -15,6 +15,7 @@ #include <linux/delay.h> #include <linux/module.h> #include <linux/fsl_devices.h> +#include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/of_device.h> @@ -22,7 +23,6 @@ #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <mm/mmu_decl.h> -#include <asm/prom.h> #include <asm/udbg.h> #include <asm/mpic.h> #include "smp.h" diff --git a/arch/powerpc/platforms/85xx/qemu_e500.c b/arch/powerpc/platforms/85xx/qemu_e500.c index 4c4d577effd9..64109ad6736c 100644 --- a/arch/powerpc/platforms/85xx/qemu_e500.c +++ b/arch/powerpc/platforms/85xx/qemu_e500.c @@ -12,6 +12,7 @@ */ #include <linux/kernel.h> +#include <linux/of.h> #include <linux/of_fdt.h> #include <linux/pgtable.h> #include <asm/machdep.h> diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c index a1c6a7827c8f..9c43cf32f4c9 100644 --- a/arch/powerpc/platforms/85xx/smp.c +++ b/arch/powerpc/platforms/85xx/smp.c @@ -208,7 +208,7 @@ static int smp_85xx_start_cpu(int cpu) * The bootpage and highmem can be accessed via ioremap(), but * we need to directly access the spinloop if its in lowmem. */ - ioremappable = *cpu_rel_addr > virt_to_phys(high_memory); + ioremappable = *cpu_rel_addr > virt_to_phys(high_memory - 1); /* Map the spin table */ if (ioremappable) diff --git a/arch/powerpc/platforms/85xx/socrates.c b/arch/powerpc/platforms/85xx/socrates.c index 166b3515ba73..09f64470c765 100644 --- a/arch/powerpc/platforms/85xx/socrates.c +++ b/arch/powerpc/platforms/85xx/socrates.c @@ -29,7 +29,6 @@ #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <asm/mpic.h> -#include <asm/prom.h> #include <mm/mmu_decl.h> #include <asm/udbg.h> diff --git a/arch/powerpc/platforms/85xx/stx_gp3.c b/arch/powerpc/platforms/85xx/stx_gp3.c index 69e917e3ba1c..6b1fe7bb3a8c 100644 --- a/arch/powerpc/platforms/85xx/stx_gp3.c +++ b/arch/powerpc/platforms/85xx/stx_gp3.c @@ -28,7 +28,6 @@ #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <asm/mpic.h> -#include <asm/prom.h> #include <mm/mmu_decl.h> #include <asm/udbg.h> diff --git a/arch/powerpc/platforms/85xx/tqm85xx.c b/arch/powerpc/platforms/85xx/tqm85xx.c index 95a1a1118a31..d187f4b8bff6 100644 --- a/arch/powerpc/platforms/85xx/tqm85xx.c +++ b/arch/powerpc/platforms/85xx/tqm85xx.c @@ -26,7 +26,6 @@ #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <asm/mpic.h> -#include <asm/prom.h> #include <mm/mmu_decl.h> #include <asm/udbg.h> diff --git a/arch/powerpc/platforms/85xx/xes_mpc85xx.c b/arch/powerpc/platforms/85xx/xes_mpc85xx.c index 397e158c1edb..5836e4ecb7a0 100644 --- a/arch/powerpc/platforms/85xx/xes_mpc85xx.c +++ b/arch/powerpc/platforms/85xx/xes_mpc85xx.c @@ -16,13 +16,13 @@ #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/interrupt.h> +#include <linux/of_address.h> #include <linux/of_platform.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <mm/mmu_decl.h> -#include <asm/prom.h> #include <asm/udbg.h> #include <asm/mpic.h> diff --git a/arch/powerpc/platforms/86xx/gef_ppc9a.c b/arch/powerpc/platforms/86xx/gef_ppc9a.c index 44bbbc535e1d..8e358fa0bc41 100644 --- a/arch/powerpc/platforms/86xx/gef_ppc9a.c +++ b/arch/powerpc/platforms/86xx/gef_ppc9a.c @@ -18,12 +18,12 @@ #include <linux/kdev_t.h> #include <linux/delay.h> #include <linux/seq_file.h> +#include <linux/of_address.h> #include <linux/of_platform.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> -#include <asm/prom.h> #include <mm/mmu_decl.h> #include <asm/udbg.h> @@ -180,7 +180,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_USB, * * This function is called to determine whether the BSP is compatible with the * supplied device-tree, which is assumed to be the correct one for the actual - * board. It is expected thati, in the future, a kernel may support multiple + * board. It is expected that, in the future, a kernel may support multiple * boards. */ static int __init gef_ppc9a_probe(void) diff --git a/arch/powerpc/platforms/86xx/gef_sbc310.c b/arch/powerpc/platforms/86xx/gef_sbc310.c index 46d6d3d4957a..b5b2733567cb 100644 --- a/arch/powerpc/platforms/86xx/gef_sbc310.c +++ b/arch/powerpc/platforms/86xx/gef_sbc310.c @@ -18,12 +18,12 @@ #include <linux/kdev_t.h> #include <linux/delay.h> #include <linux/seq_file.h> +#include <linux/of_address.h> #include <linux/of_platform.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> -#include <asm/prom.h> #include <mm/mmu_decl.h> #include <asm/udbg.h> @@ -167,7 +167,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_USB, * * This function is called to determine whether the BSP is compatible with the * supplied device-tree, which is assumed to be the correct one for the actual - * board. It is expected thati, in the future, a kernel may support multiple + * board. It is expected that, in the future, a kernel may support multiple * boards. */ static int __init gef_sbc310_probe(void) diff --git a/arch/powerpc/platforms/86xx/gef_sbc610.c b/arch/powerpc/platforms/86xx/gef_sbc610.c index acf2c6c3c1eb..bb4c8e6b44d0 100644 --- a/arch/powerpc/platforms/86xx/gef_sbc610.c +++ b/arch/powerpc/platforms/86xx/gef_sbc610.c @@ -18,12 +18,12 @@ #include <linux/kdev_t.h> #include <linux/delay.h> #include <linux/seq_file.h> +#include <linux/of_address.h> #include <linux/of_platform.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> -#include <asm/prom.h> #include <mm/mmu_decl.h> #include <asm/udbg.h> @@ -157,7 +157,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_USB, * * This function is called to determine whether the BSP is compatible with the * supplied device-tree, which is assumed to be the correct one for the actual - * board. It is expected thati, in the future, a kernel may support multiple + * board. It is expected that, in the future, a kernel may support multiple * boards. */ static int __init gef_sbc610_probe(void) diff --git a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c index 7733d0607da2..b593b9afd30a 100644 --- a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c +++ b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c @@ -20,12 +20,13 @@ #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <linux/fsl/guts.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> -#include <asm/prom.h> #include <mm/mmu_decl.h> #include <asm/udbg.h> diff --git a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c index a6b8ffcbf01a..5294394c9c07 100644 --- a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c +++ b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c @@ -19,7 +19,6 @@ #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> -#include <asm/prom.h> #include <mm/mmu_decl.h> #include <asm/udbg.h> #include <asm/swiotlb.h> diff --git a/arch/powerpc/platforms/86xx/mvme7100.c b/arch/powerpc/platforms/86xx/mvme7100.c index ee983613570c..b2cc32a32d0b 100644 --- a/arch/powerpc/platforms/86xx/mvme7100.c +++ b/arch/powerpc/platforms/86xx/mvme7100.c @@ -19,6 +19,7 @@ #include <linux/pci.h> #include <linux/of.h> +#include <linux/of_fdt.h> #include <linux/of_platform.h> #include <linux/of_address.h> #include <asm/udbg.h> diff --git a/arch/powerpc/platforms/8xx/Makefile b/arch/powerpc/platforms/8xx/Makefile index 27a7c6f828e0..5a098f7d5d31 100644 --- a/arch/powerpc/platforms/8xx/Makefile +++ b/arch/powerpc/platforms/8xx/Makefile @@ -3,7 +3,7 @@ # Makefile for the PowerPC 8xx linux kernel. # obj-y += m8xx_setup.o machine_check.o pic.o -obj-$(CONFIG_CPM1) += cpm1.o +obj-$(CONFIG_CPM1) += cpm1.o cpm1-ic.o obj-$(CONFIG_UCODE_PATCH) += micropatch.o obj-$(CONFIG_MPC885ADS) += mpc885ads_setup.o obj-$(CONFIG_MPC86XADS) += mpc86xads_setup.o diff --git a/arch/powerpc/platforms/8xx/adder875.c b/arch/powerpc/platforms/8xx/adder875.c index 651486acb896..10e6e4fe77fc 100644 --- a/arch/powerpc/platforms/8xx/adder875.c +++ b/arch/powerpc/platforms/8xx/adder875.c @@ -15,9 +15,9 @@ #include <asm/cpm1.h> #include <asm/fs_pd.h> #include <asm/udbg.h> -#include <asm/prom.h> #include "mpc8xx.h" +#include "pic.h" struct cpm_pin { int port, pin, flags; @@ -104,7 +104,7 @@ define_machine(adder875) { .name = "Adder MPC875", .probe = adder875_probe, .setup_arch = adder875_setup, - .init_IRQ = mpc8xx_pics_init, + .init_IRQ = mpc8xx_pic_init, .get_irq = mpc8xx_get_irq, .restart = mpc8xx_restart, .calibrate_decr = generic_calibrate_decr, diff --git a/arch/powerpc/platforms/8xx/cpm1-ic.c b/arch/powerpc/platforms/8xx/cpm1-ic.c new file mode 100644 index 000000000000..a18fc7c99f83 --- /dev/null +++ b/arch/powerpc/platforms/8xx/cpm1-ic.c @@ -0,0 +1,188 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Interrupt controller for the + * Communication Processor Module. + * Copyright (c) 1997 Dan error_act (dmalek@jlc.net) + */ +#include <linux/kernel.h> +#include <linux/interrupt.h> +#include <linux/irqdomain.h> +#include <linux/platform_device.h> +#include <asm/cpm1.h> + +struct cpm_pic_data { + cpic8xx_t __iomem *reg; + struct irq_domain *host; +}; + +static void cpm_mask_irq(struct irq_data *d) +{ + struct cpm_pic_data *data = irq_data_get_irq_chip_data(d); + unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d); + + clrbits32(&data->reg->cpic_cimr, (1 << cpm_vec)); +} + +static void cpm_unmask_irq(struct irq_data *d) +{ + struct cpm_pic_data *data = irq_data_get_irq_chip_data(d); + unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d); + + setbits32(&data->reg->cpic_cimr, (1 << cpm_vec)); +} + +static void cpm_end_irq(struct irq_data *d) +{ + struct cpm_pic_data *data = irq_data_get_irq_chip_data(d); + unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d); + + out_be32(&data->reg->cpic_cisr, (1 << cpm_vec)); +} + +static struct irq_chip cpm_pic = { + .name = "CPM PIC", + .irq_mask = cpm_mask_irq, + .irq_unmask = cpm_unmask_irq, + .irq_eoi = cpm_end_irq, +}; + +static int cpm_get_irq(struct irq_desc *desc) +{ + struct cpm_pic_data *data = irq_desc_get_handler_data(desc); + int cpm_vec; + + /* + * Get the vector by setting the ACK bit and then reading + * the register. + */ + out_be16(&data->reg->cpic_civr, 1); + cpm_vec = in_be16(&data->reg->cpic_civr); + cpm_vec >>= 11; + + return irq_linear_revmap(data->host, cpm_vec); +} + +static void cpm_cascade(struct irq_desc *desc) +{ + generic_handle_irq(cpm_get_irq(desc)); +} + +static int cpm_pic_host_map(struct irq_domain *h, unsigned int virq, + irq_hw_number_t hw) +{ + irq_set_chip_data(virq, h->host_data); + irq_set_status_flags(virq, IRQ_LEVEL); + irq_set_chip_and_handler(virq, &cpm_pic, handle_fasteoi_irq); + return 0; +} + +static const struct irq_domain_ops cpm_pic_host_ops = { + .map = cpm_pic_host_map, +}; + +static int cpm_pic_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + int irq; + struct cpm_pic_data *data; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->reg = devm_ioremap(dev, res->start, resource_size(res)); + if (!data->reg) + return -ENODEV; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + /* Initialize the CPM interrupt controller. */ + out_be32(&data->reg->cpic_cicr, + (CICR_SCD_SCC4 | CICR_SCC_SCC3 | CICR_SCB_SCC2 | CICR_SCA_SCC1) | + ((virq_to_hw(irq) / 2) << 13) | CICR_HP_MASK); + + out_be32(&data->reg->cpic_cimr, 0); + + data->host = irq_domain_add_linear(dev->of_node, 64, &cpm_pic_host_ops, data); + if (!data->host) + return -ENODEV; + + irq_set_handler_data(irq, data); + irq_set_chained_handler(irq, cpm_cascade); + + setbits32(&data->reg->cpic_cicr, CICR_IEN); + + return 0; +} + +static const struct of_device_id cpm_pic_match[] = { + { + .compatible = "fsl,cpm1-pic", + }, { + .type = "cpm-pic", + .compatible = "CPM", + }, {}, +}; + +static struct platform_driver cpm_pic_driver = { + .driver = { + .name = "cpm-pic", + .of_match_table = cpm_pic_match, + }, + .probe = cpm_pic_probe, +}; + +static int __init cpm_pic_init(void) +{ + return platform_driver_register(&cpm_pic_driver); +} +arch_initcall(cpm_pic_init); + +/* + * The CPM can generate the error interrupt when there is a race condition + * between generating and masking interrupts. All we have to do is ACK it + * and return. This is a no-op function so we don't need any special + * tests in the interrupt handler. + */ +static irqreturn_t cpm_error_interrupt(int irq, void *dev) +{ + return IRQ_HANDLED; +} + +static int cpm_error_probe(struct platform_device *pdev) +{ + int irq; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + return request_irq(irq, cpm_error_interrupt, IRQF_NO_THREAD, "error", NULL); +} + +static const struct of_device_id cpm_error_ids[] = { + { .compatible = "fsl,cpm1" }, + { .type = "cpm" }, + {}, +}; + +static struct platform_driver cpm_error_driver = { + .driver = { + .name = "cpm-error", + .of_match_table = cpm_error_ids, + }, + .probe = cpm_error_probe, +}; + +static int __init cpm_error_init(void) +{ + return platform_driver_register(&cpm_error_driver); +} +subsys_initcall(cpm_error_init); diff --git a/arch/powerpc/platforms/8xx/cpm1.c b/arch/powerpc/platforms/8xx/cpm1.c index c58b6f1c40e3..bb38c8d8f8de 100644 --- a/arch/powerpc/platforms/8xx/cpm1.c +++ b/arch/powerpc/platforms/8xx/cpm1.c @@ -33,12 +33,12 @@ #include <linux/module.h> #include <linux/spinlock.h> #include <linux/slab.h> +#include <linux/of_irq.h> #include <asm/page.h> #include <asm/8xx_immap.h> #include <asm/cpm1.h> #include <asm/io.h> #include <asm/rheap.h> -#include <asm/prom.h> #include <asm/cpm.h> #include <asm/fs_pd.h> @@ -51,145 +51,6 @@ cpm8xx_t __iomem *cpmp; /* Pointer to comm processor space */ immap_t __iomem *mpc8xx_immr = (void __iomem *)VIRT_IMMR_BASE; -static cpic8xx_t __iomem *cpic_reg; - -static struct irq_domain *cpm_pic_host; - -static void cpm_mask_irq(struct irq_data *d) -{ - unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d); - - clrbits32(&cpic_reg->cpic_cimr, (1 << cpm_vec)); -} - -static void cpm_unmask_irq(struct irq_data *d) -{ - unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d); - - setbits32(&cpic_reg->cpic_cimr, (1 << cpm_vec)); -} - -static void cpm_end_irq(struct irq_data *d) -{ - unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d); - - out_be32(&cpic_reg->cpic_cisr, (1 << cpm_vec)); -} - -static struct irq_chip cpm_pic = { - .name = "CPM PIC", - .irq_mask = cpm_mask_irq, - .irq_unmask = cpm_unmask_irq, - .irq_eoi = cpm_end_irq, -}; - -int cpm_get_irq(void) -{ - int cpm_vec; - - /* - * Get the vector by setting the ACK bit and then reading - * the register. - */ - out_be16(&cpic_reg->cpic_civr, 1); - cpm_vec = in_be16(&cpic_reg->cpic_civr); - cpm_vec >>= 11; - - return irq_linear_revmap(cpm_pic_host, cpm_vec); -} - -static int cpm_pic_host_map(struct irq_domain *h, unsigned int virq, - irq_hw_number_t hw) -{ - pr_debug("cpm_pic_host_map(%d, 0x%lx)\n", virq, hw); - - irq_set_status_flags(virq, IRQ_LEVEL); - irq_set_chip_and_handler(virq, &cpm_pic, handle_fasteoi_irq); - return 0; -} - -/* - * The CPM can generate the error interrupt when there is a race condition - * between generating and masking interrupts. All we have to do is ACK it - * and return. This is a no-op function so we don't need any special - * tests in the interrupt handler. - */ -static irqreturn_t cpm_error_interrupt(int irq, void *dev) -{ - return IRQ_HANDLED; -} - -static const struct irq_domain_ops cpm_pic_host_ops = { - .map = cpm_pic_host_map, -}; - -unsigned int __init cpm_pic_init(void) -{ - struct device_node *np = NULL; - struct resource res; - unsigned int sirq = 0, hwirq, eirq; - int ret; - - pr_debug("cpm_pic_init\n"); - - np = of_find_compatible_node(NULL, NULL, "fsl,cpm1-pic"); - if (np == NULL) - np = of_find_compatible_node(NULL, "cpm-pic", "CPM"); - if (np == NULL) { - printk(KERN_ERR "CPM PIC init: can not find cpm-pic node\n"); - return sirq; - } - - ret = of_address_to_resource(np, 0, &res); - if (ret) - goto end; - - cpic_reg = ioremap(res.start, resource_size(&res)); - if (cpic_reg == NULL) - goto end; - - sirq = irq_of_parse_and_map(np, 0); - if (!sirq) - goto end; - - /* Initialize the CPM interrupt controller. */ - hwirq = (unsigned int)virq_to_hw(sirq); - out_be32(&cpic_reg->cpic_cicr, - (CICR_SCD_SCC4 | CICR_SCC_SCC3 | CICR_SCB_SCC2 | CICR_SCA_SCC1) | - ((hwirq/2) << 13) | CICR_HP_MASK); - - out_be32(&cpic_reg->cpic_cimr, 0); - - cpm_pic_host = irq_domain_add_linear(np, 64, &cpm_pic_host_ops, NULL); - if (cpm_pic_host == NULL) { - printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n"); - sirq = 0; - goto end; - } - - /* Install our own error handler. */ - np = of_find_compatible_node(NULL, NULL, "fsl,cpm1"); - if (np == NULL) - np = of_find_node_by_type(NULL, "cpm"); - if (np == NULL) { - printk(KERN_ERR "CPM PIC init: can not find cpm node\n"); - goto end; - } - - eirq = irq_of_parse_and_map(np, 0); - if (!eirq) - goto end; - - if (request_irq(eirq, cpm_error_interrupt, IRQF_NO_THREAD, "error", - NULL)) - printk(KERN_ERR "Could not allocate CPM error IRQ!"); - - setbits32(&cpic_reg->cpic_cicr, CICR_IEN); - -end: - of_node_put(np); - return sirq; -} void __init cpm_reset(void) { @@ -280,6 +141,7 @@ cpm_setbrg(uint brg, uint rate) out_be32(bp, (((BRG_UART_CLK_DIV16 / rate) - 1) << 1) | CPM_BRG_EN | CPM_BRG_DIV16); } +EXPORT_SYMBOL(cpm_setbrg); struct cpm_ioport16 { __be16 dir, par, odr_sor, dat, intr; diff --git a/arch/powerpc/platforms/8xx/ep88xc.c b/arch/powerpc/platforms/8xx/ep88xc.c index ebcf34a14789..b3b22520b435 100644 --- a/arch/powerpc/platforms/8xx/ep88xc.c +++ b/arch/powerpc/platforms/8xx/ep88xc.c @@ -20,6 +20,7 @@ #include <asm/cpm1.h> #include "mpc8xx.h" +#include "pic.h" struct cpm_pin { int port, pin, flags; @@ -166,7 +167,7 @@ define_machine(ep88xc) { .name = "Embedded Planet EP88xC", .probe = ep88xc_probe, .setup_arch = ep88xc_setup_arch, - .init_IRQ = mpc8xx_pics_init, + .init_IRQ = mpc8xx_pic_init, .get_irq = mpc8xx_get_irq, .restart = mpc8xx_restart, .calibrate_decr = mpc8xx_calibrate_decr, diff --git a/arch/powerpc/platforms/8xx/m8xx_setup.c b/arch/powerpc/platforms/8xx/m8xx_setup.c index df4d57d07f9a..24f358f86d16 100644 --- a/arch/powerpc/platforms/8xx/m8xx_setup.c +++ b/arch/powerpc/platforms/8xx/m8xx_setup.c @@ -17,10 +17,11 @@ #include <linux/time.h> #include <linux/rtc.h> #include <linux/fsl_devices.h> +#include <linux/of.h> +#include <linux/of_irq.h> #include <asm/io.h> #include <asm/8xx_immap.h> -#include <asm/prom.h> #include <asm/fs_pd.h> #include <mm/mmu_decl.h> @@ -28,9 +29,6 @@ #include "mpc8xx.h" -extern int cpm_pic_init(void); -extern int cpm_get_irq(void); - /* A place holder for time base interrupts, if they are ever enabled. */ static irqreturn_t timebase_interrupt(int irq, void *dev) { @@ -207,28 +205,3 @@ void __noreturn mpc8xx_restart(char *cmd) in_8(&clk_r->res[0]); panic("Restart failed\n"); } - -static void cpm_cascade(struct irq_desc *desc) -{ - generic_handle_irq(cpm_get_irq()); -} - -/* Initialize the internal interrupt controllers. The number of - * interrupts supported can vary with the processor type, and the - * 82xx family can have up to 64. - * External interrupts can be either edge or level triggered, and - * need to be initialized by the appropriate driver. - */ -void __init mpc8xx_pics_init(void) -{ - int irq; - - if (mpc8xx_pic_init()) { - printk(KERN_ERR "Failed interrupt 8xx controller initialization\n"); - return; - } - - irq = cpm_pic_init(); - if (irq) - irq_set_chained_handler(irq, cpm_cascade); -} diff --git a/arch/powerpc/platforms/8xx/mpc86xads_setup.c b/arch/powerpc/platforms/8xx/mpc86xads_setup.c index 8d02f5ff4481..03267e4a44a9 100644 --- a/arch/powerpc/platforms/8xx/mpc86xads_setup.c +++ b/arch/powerpc/platforms/8xx/mpc86xads_setup.c @@ -29,6 +29,7 @@ #include "mpc86xads.h" #include "mpc8xx.h" +#include "pic.h" struct cpm_pin { int port, pin, flags; @@ -140,7 +141,7 @@ define_machine(mpc86x_ads) { .name = "MPC86x ADS", .probe = mpc86xads_probe, .setup_arch = mpc86xads_setup_arch, - .init_IRQ = mpc8xx_pics_init, + .init_IRQ = mpc8xx_pic_init, .get_irq = mpc8xx_get_irq, .restart = mpc8xx_restart, .calibrate_decr = mpc8xx_calibrate_decr, diff --git a/arch/powerpc/platforms/8xx/mpc885ads_setup.c b/arch/powerpc/platforms/8xx/mpc885ads_setup.c index a0c83c1905c6..b1e39f96de00 100644 --- a/arch/powerpc/platforms/8xx/mpc885ads_setup.c +++ b/arch/powerpc/platforms/8xx/mpc885ads_setup.c @@ -42,6 +42,7 @@ #include "mpc885ads.h" #include "mpc8xx.h" +#include "pic.h" static u32 __iomem *bcsr, *bcsr5; @@ -216,7 +217,7 @@ define_machine(mpc885_ads) { .name = "Freescale MPC885 ADS", .probe = mpc885ads_probe, .setup_arch = mpc885ads_setup_arch, - .init_IRQ = mpc8xx_pics_init, + .init_IRQ = mpc8xx_pic_init, .get_irq = mpc8xx_get_irq, .restart = mpc8xx_restart, .calibrate_decr = mpc8xx_calibrate_decr, diff --git a/arch/powerpc/platforms/8xx/mpc8xx.h b/arch/powerpc/platforms/8xx/mpc8xx.h index 31cc2ecace42..79fae3324866 100644 --- a/arch/powerpc/platforms/8xx/mpc8xx.h +++ b/arch/powerpc/platforms/8xx/mpc8xx.h @@ -15,7 +15,6 @@ extern void __noreturn mpc8xx_restart(char *cmd); extern void mpc8xx_calibrate_decr(void); extern int mpc8xx_set_rtc_time(struct rtc_time *tm); extern void mpc8xx_get_rtc_time(struct rtc_time *tm); -extern void mpc8xx_pics_init(void); extern unsigned int mpc8xx_get_irq(void); #endif /* __MPC8xx_H */ diff --git a/arch/powerpc/platforms/8xx/pic.c b/arch/powerpc/platforms/8xx/pic.c index 04a6abf14c29..ea6b0e523c60 100644 --- a/arch/powerpc/platforms/8xx/pic.c +++ b/arch/powerpc/platforms/8xx/pic.c @@ -4,7 +4,8 @@ #include <linux/signal.h> #include <linux/irq.h> #include <linux/dma-mapping.h> -#include <asm/prom.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <asm/irq.h> #include <asm/io.h> #include <asm/8xx_immap.h> @@ -14,8 +15,6 @@ #define PIC_VEC_SPURRIOUS 15 -extern int cpm_get_irq(struct pt_regs *regs); - static struct irq_domain *mpc8xx_pic_host; static unsigned long mpc8xx_cached_irq_mask; static sysconf8xx_t __iomem *siu_reg; @@ -125,7 +124,7 @@ static const struct irq_domain_ops mpc8xx_pic_host_ops = { .xlate = mpc8xx_pic_host_xlate, }; -int __init mpc8xx_pic_init(void) +void __init mpc8xx_pic_init(void) { struct resource res; struct device_node *np; @@ -136,7 +135,7 @@ int __init mpc8xx_pic_init(void) np = of_find_node_by_type(NULL, "mpc8xx-pic"); if (np == NULL) { printk(KERN_ERR "Could not find fsl,pq1-pic node\n"); - return -ENOMEM; + return; } ret = of_address_to_resource(np, 0, &res); @@ -144,20 +143,13 @@ int __init mpc8xx_pic_init(void) goto out; siu_reg = ioremap(res.start, resource_size(&res)); - if (siu_reg == NULL) { - ret = -EINVAL; + if (!siu_reg) goto out; - } mpc8xx_pic_host = irq_domain_add_linear(np, 64, &mpc8xx_pic_host_ops, NULL); - if (mpc8xx_pic_host == NULL) { + if (!mpc8xx_pic_host) printk(KERN_ERR "MPC8xx PIC: failed to allocate irq host!\n"); - ret = -ENOMEM; - goto out; - } - ret = 0; out: of_node_put(np); - return ret; } diff --git a/arch/powerpc/platforms/8xx/pic.h b/arch/powerpc/platforms/8xx/pic.h index 9fe00eebdc8b..c70f1b446f94 100644 --- a/arch/powerpc/platforms/8xx/pic.h +++ b/arch/powerpc/platforms/8xx/pic.h @@ -4,7 +4,7 @@ #include <linux/irq.h> #include <linux/interrupt.h> -int mpc8xx_pic_init(void); +void mpc8xx_pic_init(void); unsigned int mpc8xx_get_irq(void); /* diff --git a/arch/powerpc/platforms/8xx/tqm8xx_setup.c b/arch/powerpc/platforms/8xx/tqm8xx_setup.c index 4cea8b1afa44..3725d51248df 100644 --- a/arch/powerpc/platforms/8xx/tqm8xx_setup.c +++ b/arch/powerpc/platforms/8xx/tqm8xx_setup.c @@ -43,6 +43,7 @@ #include <asm/udbg.h> #include "mpc8xx.h" +#include "pic.h" struct cpm_pin { int port, pin, flags; @@ -142,7 +143,7 @@ define_machine(tqm8xx) { .name = "TQM8xx", .probe = tqm8xx_probe, .setup_arch = tqm8xx_setup_arch, - .init_IRQ = mpc8xx_pics_init, + .init_IRQ = mpc8xx_pic_init, .get_irq = mpc8xx_get_irq, .restart = mpc8xx_restart, .calibrate_decr = mpc8xx_calibrate_decr, diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index e2e1fec91c6e..9e2df4b66478 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -104,6 +104,7 @@ config PPC_BOOK3S_64 select HAVE_MOVE_PUD select IRQ_WORK select PPC_64S_HASH_MMU if !PPC_RADIX_MMU + select KASAN_VMALLOC if KASAN config PPC_BOOK3E_64 bool "Embedded processors" @@ -377,7 +378,6 @@ config SPE config PPC_64S_HASH_MMU bool "Hash MMU Support" depends on PPC_BOOK3S_64 - select PPC_MM_SLICES default y help Enable support for the Power ISA Hash style MMU. This is implemented @@ -451,9 +451,6 @@ config PPC_BOOK3E_MMU def_bool y depends on FSL_BOOKE || PPC_BOOK3E -config PPC_MM_SLICES - bool - config PPC_HAVE_PMU_SUPPORT bool @@ -556,6 +553,12 @@ config CPU_LITTLE_ENDIAN endchoice +config PPC64_ELF_ABI_V1 + def_bool PPC64 && CPU_BIG_ENDIAN + +config PPC64_ELF_ABI_V2 + def_bool PPC64 && CPU_LITTLE_ENDIAN + config PPC64_BOOT_WRAPPER def_bool n depends on CPU_LITTLE_ENDIAN diff --git a/arch/powerpc/platforms/amigaone/setup.c b/arch/powerpc/platforms/amigaone/setup.c index 9d252c554f7f..397ce6a40bd0 100644 --- a/arch/powerpc/platforms/amigaone/setup.c +++ b/arch/powerpc/platforms/amigaone/setup.c @@ -8,6 +8,7 @@ * Copyright 2003 by Hans-Joerg Frieden and Thomas Frieden */ +#include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/of.h> #include <linux/of_address.h> diff --git a/arch/powerpc/platforms/book3s/vas-api.c b/arch/powerpc/platforms/book3s/vas-api.c index f9a1615b74da..c0799fb26b6d 100644 --- a/arch/powerpc/platforms/book3s/vas-api.c +++ b/arch/powerpc/platforms/book3s/vas-api.c @@ -30,7 +30,7 @@ * * where "vas_copy" and "vas_paste" are defined in copy-paste.h. * copy/paste returns to the user space directly. So refer NX hardware - * documententation for exact copy/paste usage and completion / error + * documentation for exact copy/paste usage and completion / error * conditions. */ diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c index 354a58c1e6f2..f3291e957a19 100644 --- a/arch/powerpc/platforms/cell/axon_msi.c +++ b/arch/powerpc/platforms/cell/axon_msi.c @@ -13,10 +13,10 @@ #include <linux/of_platform.h> #include <linux/slab.h> #include <linux/debugfs.h> +#include <linux/of_irq.h> #include <asm/dcr.h> #include <asm/machdep.h> -#include <asm/prom.h> #include "cell.h" diff --git a/arch/powerpc/platforms/cell/cbe_powerbutton.c b/arch/powerpc/platforms/cell/cbe_powerbutton.c index bda589dfb051..a3ee397486f6 100644 --- a/arch/powerpc/platforms/cell/cbe_powerbutton.c +++ b/arch/powerpc/platforms/cell/cbe_powerbutton.c @@ -9,9 +9,9 @@ #include <linux/input.h> #include <linux/module.h> +#include <linux/of.h> #include <linux/platform_device.h> #include <asm/pmi.h> -#include <asm/prom.h> static struct input_dev *button_dev; static struct platform_device *button_pdev; diff --git a/arch/powerpc/platforms/cell/cbe_regs.c b/arch/powerpc/platforms/cell/cbe_regs.c index 1c4c53bec66c..316e533afc00 100644 --- a/arch/powerpc/platforms/cell/cbe_regs.c +++ b/arch/powerpc/platforms/cell/cbe_regs.c @@ -10,12 +10,12 @@ #include <linux/percpu.h> #include <linux/types.h> #include <linux/export.h> +#include <linux/of_address.h> #include <linux/of_device.h> #include <linux/of_platform.h> #include <linux/pgtable.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/ptrace.h> #include <asm/cell-regs.h> @@ -23,7 +23,7 @@ * Current implementation uses "cpu" nodes. We build our own mapping * array of cpu numbers to cpu nodes locally for now to allow interrupt * time code to have a fast path rather than call of_get_cpu_node(). If - * we implement cpu hotplug, we'll have to install an appropriate norifier + * we implement cpu hotplug, we'll have to install an appropriate notifier * in order to release references to the cpu going away */ static struct cbe_regs_map diff --git a/arch/powerpc/platforms/cell/cbe_thermal.c b/arch/powerpc/platforms/cell/cbe_thermal.c index abb5e527b4db..2f45428e32c8 100644 --- a/arch/powerpc/platforms/cell/cbe_thermal.c +++ b/arch/powerpc/platforms/cell/cbe_thermal.c @@ -39,7 +39,6 @@ #include <linux/stringify.h> #include <asm/spu.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/cell-regs.h> #include "spu_priv1_mmio.h" diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c index 0873a7a20271..03ee8152ee97 100644 --- a/arch/powerpc/platforms/cell/interrupt.c +++ b/arch/powerpc/platforms/cell/interrupt.c @@ -18,15 +18,16 @@ #include <linux/interrupt.h> #include <linux/irq.h> +#include <linux/irqdomain.h> #include <linux/export.h> #include <linux/percpu.h> #include <linux/types.h> #include <linux/ioport.h> #include <linux/kernel_stat.h> #include <linux/pgtable.h> +#include <linux/of_address.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/ptrace.h> #include <asm/machdep.h> #include <asm/cell-regs.h> diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index 25e726bf0172..0ca3efeef293 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c @@ -12,8 +12,10 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/interrupt.h> +#include <linux/irqdomain.h> #include <linux/notifier.h> #include <linux/of.h> +#include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/slab.h> #include <linux/memblock.h> @@ -582,7 +584,7 @@ static int cell_of_bus_notify(struct notifier_block *nb, unsigned long action, { struct device *dev = data; - /* We are only intereted in device addition */ + /* We are only interested in device addition */ if (action != BUS_NOTIFY_ADD_DEVICE) return 0; diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c index dff8d5e7ab82..58d967ee38b3 100644 --- a/arch/powerpc/platforms/cell/pervasive.c +++ b/arch/powerpc/platforms/cell/pervasive.c @@ -19,7 +19,6 @@ #include <asm/io.h> #include <asm/machdep.h> -#include <asm/prom.h> #include <asm/reg.h> #include <asm/cell-regs.h> #include <asm/cpu_has_feature.h> diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c index 4325c05bedd9..8d934ea6270c 100644 --- a/arch/powerpc/platforms/cell/ras.c +++ b/arch/powerpc/platforms/cell/ras.c @@ -12,11 +12,11 @@ #include <linux/reboot.h> #include <linux/kexec.h> #include <linux/crash_dump.h> +#include <linux/of.h> #include <asm/kexec.h> #include <asm/reg.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/machdep.h> #include <asm/rtas.h> #include <asm/cell-regs.h> diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c index edefa785d2ef..52de014983c9 100644 --- a/arch/powerpc/platforms/cell/setup.c +++ b/arch/powerpc/platforms/cell/setup.c @@ -31,7 +31,6 @@ #include <asm/mmu.h> #include <asm/processor.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/rtas.h> #include <asm/pci-bridge.h> #include <asm/iommu.h> diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c index d7ab868aab54..31ce00b52a32 100644 --- a/arch/powerpc/platforms/cell/smp.c +++ b/arch/powerpc/platforms/cell/smp.c @@ -28,7 +28,6 @@ #include <asm/irq.h> #include <asm/page.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/smp.h> #include <asm/paca.h> #include <asm/machdep.h> diff --git a/arch/powerpc/platforms/cell/spider-pci.c b/arch/powerpc/platforms/cell/spider-pci.c index a1c293f42a1f..e36ebd84f55b 100644 --- a/arch/powerpc/platforms/cell/spider-pci.c +++ b/arch/powerpc/platforms/cell/spider-pci.c @@ -8,6 +8,7 @@ #undef DEBUG #include <linux/kernel.h> +#include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/slab.h> #include <linux/io.h> @@ -81,7 +82,7 @@ static int __init spiderpci_pci_setup_chip(struct pci_controller *phb, /* * On CellBlade, we can't know that which XDR memory is used by * kmalloc() to allocate dummy_page_va. - * In order to imporve the performance, the XDR which is used to + * In order to improve the performance, the XDR which is used to * allocate dummy_page_va is the nearest the spider-pci. * We have to select the CBE which is the nearest the spider-pci * to allocate memory from the best XDR, but I don't know that diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c index 8af75867cb42..11df737c8c6a 100644 --- a/arch/powerpc/platforms/cell/spider-pic.c +++ b/arch/powerpc/platforms/cell/spider-pic.c @@ -10,9 +10,10 @@ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/ioport.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <linux/pgtable.h> -#include <asm/prom.h> #include <asm/io.h> #include "interrupt.h" diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index 2eecba3345c3..7bd0b563e163 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c @@ -24,7 +24,6 @@ #include <asm/spu_priv1.h> #include <asm/spu_csa.h> #include <asm/xmon.h> -#include <asm/prom.h> #include <asm/kexec.h> const struct spu_management_ops *spu_management_ops; diff --git a/arch/powerpc/platforms/cell/spu_manage.c b/arch/powerpc/platforms/cell/spu_manage.c index ddf8742f09a3..ae09c5a91b40 100644 --- a/arch/powerpc/platforms/cell/spu_manage.c +++ b/arch/powerpc/platforms/cell/spu_manage.c @@ -16,11 +16,12 @@ #include <linux/io.h> #include <linux/mutex.h> #include <linux/device.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <asm/spu.h> #include <asm/spu_priv1.h> #include <asm/firmware.h> -#include <asm/prom.h> #include "spufs/spufs.h" #include "interrupt.h" @@ -457,7 +458,7 @@ static void __init init_affinity_node(int cbe) /* * Walk through each phandle in vicinity property of the spu - * (tipically two vicinity phandles per spe node) + * (typically two vicinity phandles per spe node) */ for (i = 0; i < (lenp / sizeof(phandle)); i++) { if (vic_handles[i] == avoid_ph) diff --git a/arch/powerpc/platforms/cell/spu_priv1_mmio.c b/arch/powerpc/platforms/cell/spu_priv1_mmio.c index 0c2e6bb6fe51..d150e3987304 100644 --- a/arch/powerpc/platforms/cell/spu_priv1_mmio.c +++ b/arch/powerpc/platforms/cell/spu_priv1_mmio.c @@ -19,7 +19,6 @@ #include <asm/spu.h> #include <asm/spu_priv1.h> #include <asm/firmware.h> -#include <asm/prom.h> #include "interrupt.h" #include "spu_priv1_mmio.h" diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index 4c702192412f..34334c32b7f5 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -21,10 +21,10 @@ #include <linux/namei.h> #include <linux/pagemap.h> #include <linux/poll.h> +#include <linux/of.h> #include <linux/seq_file.h> #include <linux/slab.h> -#include <asm/prom.h> #include <asm/spu.h> #include <asm/spu_priv1.h> #include <linux/uaccess.h> diff --git a/arch/powerpc/platforms/chrp/nvram.c b/arch/powerpc/platforms/chrp/nvram.c index e820332b59a0..dab78076fedb 100644 --- a/arch/powerpc/platforms/chrp/nvram.c +++ b/arch/powerpc/platforms/chrp/nvram.c @@ -10,7 +10,7 @@ #include <linux/init.h> #include <linux/spinlock.h> #include <linux/uaccess.h> -#include <asm/prom.h> +#include <linux/of.h> #include <asm/machdep.h> #include <asm/rtas.h> #include "chrp.h" diff --git a/arch/powerpc/platforms/chrp/pci.c b/arch/powerpc/platforms/chrp/pci.c index 76e6256cb0a7..6f6598e771ff 100644 --- a/arch/powerpc/platforms/chrp/pci.c +++ b/arch/powerpc/platforms/chrp/pci.c @@ -9,11 +9,11 @@ #include <linux/string.h> #include <linux/init.h> #include <linux/pgtable.h> +#include <linux/of_address.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/hydra.h> -#include <asm/prom.h> #include <asm/machdep.h> #include <asm/sections.h> #include <asm/pci-bridge.h> diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c index 3cfc382841e5..ec63c0558db6 100644 --- a/arch/powerpc/platforms/chrp/setup.c +++ b/arch/powerpc/platforms/chrp/setup.c @@ -32,9 +32,11 @@ #include <linux/root_dev.h> #include <linux/initrd.h> #include <linux/timer.h> +#include <linux/of_address.h> +#include <linux/of_fdt.h> +#include <linux/of_irq.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/pci-bridge.h> #include <asm/dma.h> #include <asm/machdep.h> @@ -251,7 +253,7 @@ static void __noreturn briq_restart(char *cmd) * Per default, input/output-device points to the keyboard/screen * If no card is installed, the built-in serial port is used as a fallback. * But unfortunately, the firmware does not connect /chosen/{stdin,stdout} - * the the built-in serial node. Instead, a /failsafe node is created. + * to the built-in serial node. Instead, a /failsafe node is created. */ static __init void chrp_init(void) { diff --git a/arch/powerpc/platforms/chrp/smp.c b/arch/powerpc/platforms/chrp/smp.c index e30cd2915e54..ab95155647a4 100644 --- a/arch/powerpc/platforms/chrp/smp.c +++ b/arch/powerpc/platforms/chrp/smp.c @@ -24,7 +24,6 @@ #include <asm/page.h> #include <asm/sections.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/smp.h> #include <asm/machdep.h> #include <asm/mpic.h> diff --git a/arch/powerpc/platforms/chrp/time.c b/arch/powerpc/platforms/chrp/time.c index acde7bbe0716..d46417e3d8e0 100644 --- a/arch/powerpc/platforms/chrp/time.c +++ b/arch/powerpc/platforms/chrp/time.c @@ -21,17 +21,15 @@ #include <linux/init.h> #include <linux/bcd.h> #include <linux/ioport.h> +#include <linux/of_address.h> #include <asm/io.h> #include <asm/nvram.h> -#include <asm/prom.h> #include <asm/sections.h> #include <asm/time.h> #include <platforms/chrp/chrp.h> -extern spinlock_t rtc_lock; - #define NVRAM_AS0 0x74 #define NVRAM_AS1 0x75 #define NVRAM_DATA 0x77 diff --git a/arch/powerpc/platforms/embedded6xx/gamecube.c b/arch/powerpc/platforms/embedded6xx/gamecube.c index ade928f7ea73..5c2575adcc7e 100644 --- a/arch/powerpc/platforms/embedded6xx/gamecube.c +++ b/arch/powerpc/platforms/embedded6xx/gamecube.c @@ -16,7 +16,6 @@ #include <asm/io.h> #include <asm/machdep.h> -#include <asm/prom.h> #include <asm/time.h> #include <asm/udbg.h> diff --git a/arch/powerpc/platforms/embedded6xx/holly.c b/arch/powerpc/platforms/embedded6xx/holly.c index 07e71ba3e846..78f2378d9223 100644 --- a/arch/powerpc/platforms/embedded6xx/holly.c +++ b/arch/powerpc/platforms/embedded6xx/holly.c @@ -22,12 +22,13 @@ #include <linux/serial.h> #include <linux/tty.h> #include <linux/serial_core.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/extable.h> #include <asm/time.h> #include <asm/machdep.h> -#include <asm/prom.h> #include <asm/udbg.h> #include <asm/tsi108.h> #include <asm/pci-bridge.h> diff --git a/arch/powerpc/platforms/embedded6xx/linkstation.c b/arch/powerpc/platforms/embedded6xx/linkstation.c index eb8342e7f84e..1830e1ac1f8f 100644 --- a/arch/powerpc/platforms/embedded6xx/linkstation.c +++ b/arch/powerpc/platforms/embedded6xx/linkstation.c @@ -15,7 +15,6 @@ #include <linux/of_platform.h> #include <asm/time.h> -#include <asm/prom.h> #include <asm/mpic.h> #include <asm/pci-bridge.h> diff --git a/arch/powerpc/platforms/embedded6xx/ls_uart.c b/arch/powerpc/platforms/embedded6xx/ls_uart.c index 9d891bd5df5a..0133e175a0fc 100644 --- a/arch/powerpc/platforms/embedded6xx/ls_uart.c +++ b/arch/powerpc/platforms/embedded6xx/ls_uart.c @@ -14,8 +14,8 @@ #include <linux/delay.h> #include <linux/serial_reg.h> #include <linux/serial_8250.h> +#include <linux/of.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/termbits.h> #include "mpc10x.h" diff --git a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c index 9eb9abb5bce2..8b2b42210356 100644 --- a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c +++ b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c @@ -27,10 +27,10 @@ #include <linux/serial.h> #include <linux/tty.h> #include <linux/serial_core.h> +#include <linux/of_irq.h> #include <asm/time.h> #include <asm/machdep.h> -#include <asm/prom.h> #include <asm/udbg.h> #include <asm/tsi108.h> #include <asm/pci-bridge.h> diff --git a/arch/powerpc/platforms/embedded6xx/mvme5100.c b/arch/powerpc/platforms/embedded6xx/mvme5100.c index c06a0490d157..4854cc592cec 100644 --- a/arch/powerpc/platforms/embedded6xx/mvme5100.c +++ b/arch/powerpc/platforms/embedded6xx/mvme5100.c @@ -12,12 +12,12 @@ * Author: Stephen Chivers <schivers@csc.com> */ +#include <linux/of_irq.h> #include <linux/of_platform.h> #include <asm/i8259.h> #include <asm/pci-bridge.h> #include <asm/mpic.h> -#include <asm/prom.h> #include <mm/mmu_decl.h> #include <asm/udbg.h> diff --git a/arch/powerpc/platforms/embedded6xx/storcenter.c b/arch/powerpc/platforms/embedded6xx/storcenter.c index e188b90f7016..5f16e80b6ed6 100644 --- a/arch/powerpc/platforms/embedded6xx/storcenter.c +++ b/arch/powerpc/platforms/embedded6xx/storcenter.c @@ -17,7 +17,6 @@ #include <linux/of_platform.h> #include <asm/time.h> -#include <asm/prom.h> #include <asm/mpic.h> #include <asm/pci-bridge.h> diff --git a/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c b/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c index 5aea46566233..e02bdabf358c 100644 --- a/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c +++ b/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c @@ -7,10 +7,11 @@ * Copyright (C) 2008,2009 Albert Herranz */ +#include <linux/of_address.h> + #include <mm/mmu_decl.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/udbg.h> #include <asm/fixmap.h> diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c index f60ade584bb2..9e03ff8f631c 100644 --- a/arch/powerpc/platforms/embedded6xx/wii.c +++ b/arch/powerpc/platforms/embedded6xx/wii.c @@ -13,13 +13,13 @@ #include <linux/init.h> #include <linux/irq.h> #include <linux/seq_file.h> +#include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/memblock.h> #include <mm/mmu_decl.h> #include <asm/io.h> #include <asm/machdep.h> -#include <asm/prom.h> #include <asm/time.h> #include <asm/udbg.h> diff --git a/arch/powerpc/platforms/fsl_uli1575.c b/arch/powerpc/platforms/fsl_uli1575.c index 044a20c1fbde..84afae7a2561 100644 --- a/arch/powerpc/platforms/fsl_uli1575.c +++ b/arch/powerpc/platforms/fsl_uli1575.c @@ -10,6 +10,7 @@ #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/mc146818rtc.h> +#include <linux/of_irq.h> #include <asm/pci-bridge.h> diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c index 37875e478b3a..b911b31717cc 100644 --- a/arch/powerpc/platforms/maple/pci.c +++ b/arch/powerpc/platforms/maple/pci.c @@ -12,10 +12,10 @@ #include <linux/string.h> #include <linux/init.h> #include <linux/irq.h> +#include <linux/of_irq.h> #include <asm/sections.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/pci-bridge.h> #include <asm/machdep.h> #include <asm/iommu.h> diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c index 4e9ad5bf3efb..c26c379e1cc8 100644 --- a/arch/powerpc/platforms/maple/setup.c +++ b/arch/powerpc/platforms/maple/setup.c @@ -36,12 +36,12 @@ #include <linux/serial.h> #include <linux/smp.h> #include <linux/bitops.h> +#include <linux/of_address.h> #include <linux/of_device.h> #include <linux/memblock.h> #include <asm/processor.h> #include <asm/sections.h> -#include <asm/prom.h> #include <asm/io.h> #include <asm/pci-bridge.h> #include <asm/iommu.h> diff --git a/arch/powerpc/platforms/maple/time.c b/arch/powerpc/platforms/maple/time.c index 78209bb7629c..823e219ef8ee 100644 --- a/arch/powerpc/platforms/maple/time.c +++ b/arch/powerpc/platforms/maple/time.c @@ -19,9 +19,9 @@ #include <linux/interrupt.h> #include <linux/mc146818rtc.h> #include <linux/bcd.h> +#include <linux/of_address.h> #include <asm/sections.h> -#include <asm/prom.h> #include <asm/io.h> #include <asm/machdep.h> #include <asm/time.h> diff --git a/arch/powerpc/platforms/pasemi/dma_lib.c b/arch/powerpc/platforms/pasemi/dma_lib.c index 26427311fc72..1be1f18f6f09 100644 --- a/arch/powerpc/platforms/pasemi/dma_lib.c +++ b/arch/powerpc/platforms/pasemi/dma_lib.c @@ -10,6 +10,8 @@ #include <linux/pci.h> #include <linux/slab.h> #include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <linux/sched.h> #include <asm/pasemi_dma.h> diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c index 5be7242fbd86..0a38663d44ed 100644 --- a/arch/powerpc/platforms/pasemi/iommu.c +++ b/arch/powerpc/platforms/pasemi/iommu.c @@ -11,6 +11,7 @@ #include <linux/types.h> #include <linux/spinlock.h> #include <linux/pci.h> +#include <linux/of.h> #include <asm/iommu.h> #include <asm/machdep.h> #include <asm/firmware.h> diff --git a/arch/powerpc/platforms/pasemi/misc.c b/arch/powerpc/platforms/pasemi/misc.c index 1bf65d02d3ba..f859ada29074 100644 --- a/arch/powerpc/platforms/pasemi/misc.c +++ b/arch/powerpc/platforms/pasemi/misc.c @@ -11,6 +11,7 @@ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/of.h> +#include <linux/of_irq.h> #include <linux/i2c.h> #ifdef CONFIG_I2C_BOARDINFO diff --git a/arch/powerpc/platforms/pasemi/msi.c b/arch/powerpc/platforms/pasemi/msi.c index ea1e41451408..dc1846660005 100644 --- a/arch/powerpc/platforms/pasemi/msi.c +++ b/arch/powerpc/platforms/pasemi/msi.c @@ -9,9 +9,9 @@ */ #include <linux/irq.h> +#include <linux/irqdomain.h> #include <linux/msi.h> #include <asm/mpic.h> -#include <asm/prom.h> #include <asm/hw_irq.h> #include <asm/ppc-pci.h> #include <asm/msi_bitmap.h> diff --git a/arch/powerpc/platforms/pasemi/pci.c b/arch/powerpc/platforms/pasemi/pci.c index d4b922759d6e..55f0160910bf 100644 --- a/arch/powerpc/platforms/pasemi/pci.c +++ b/arch/powerpc/platforms/pasemi/pci.c @@ -12,6 +12,7 @@ #include <linux/kernel.h> +#include <linux/of_address.h> #include <linux/pci.h> #include <asm/pci-bridge.h> diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c index f974bfe7fde1..2aef49e04dd4 100644 --- a/arch/powerpc/platforms/pasemi/setup.c +++ b/arch/powerpc/platforms/pasemi/setup.c @@ -18,8 +18,8 @@ #include <linux/pci.h> #include <linux/of_platform.h> #include <linux/gfp.h> +#include <linux/irqdomain.h> -#include <asm/prom.h> #include <asm/iommu.h> #include <asm/machdep.h> #include <asm/i8259.h> diff --git a/arch/powerpc/platforms/powermac/backlight.c b/arch/powerpc/platforms/powermac/backlight.c index 32224cb489d7..aeb79a8b3e10 100644 --- a/arch/powerpc/platforms/powermac/backlight.c +++ b/arch/powerpc/platforms/powermac/backlight.c @@ -15,7 +15,6 @@ #include <linux/pmu.h> #include <linux/atomic.h> #include <linux/export.h> -#include <asm/prom.h> #include <asm/backlight.h> #define OLD_BACKLIGHT_MAX 15 diff --git a/arch/powerpc/platforms/powermac/bootx_init.c b/arch/powerpc/platforms/powermac/bootx_init.c index d20ef35e6d9d..72eb99aba40f 100644 --- a/arch/powerpc/platforms/powermac/bootx_init.c +++ b/arch/powerpc/platforms/powermac/bootx_init.c @@ -8,6 +8,7 @@ #include <linux/kernel.h> #include <linux/string.h> #include <linux/init.h> +#include <linux/of_fdt.h> #include <generated/utsrelease.h> #include <asm/sections.h> #include <asm/prom.h> @@ -243,7 +244,7 @@ static void __init bootx_scan_dt_build_strings(unsigned long base, DBG(" detected display ! adding properties names !\n"); bootx_dt_add_string("linux,boot-display", mem_end); bootx_dt_add_string("linux,opened", mem_end); - strlcpy(bootx_disp_path, namep, sizeof(bootx_disp_path)); + strscpy(bootx_disp_path, namep, sizeof(bootx_disp_path)); } /* get and store all property names */ diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c index e67c624f35a2..5cc958adba13 100644 --- a/arch/powerpc/platforms/powermac/feature.c +++ b/arch/powerpc/platforms/powermac/feature.c @@ -31,7 +31,6 @@ #include <asm/keylargo.h> #include <asm/uninorth.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/machdep.h> #include <asm/pmac_feature.h> #include <asm/dbdma.h> diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c index df89d916236d..c1c430c66dc9 100644 --- a/arch/powerpc/platforms/powermac/low_i2c.c +++ b/arch/powerpc/platforms/powermac/low_i2c.c @@ -40,10 +40,10 @@ #include <linux/mutex.h> #include <linux/i2c.h> #include <linux/slab.h> +#include <linux/of_irq.h> #include <asm/keylargo.h> #include <asm/uninorth.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/machdep.h> #include <asm/smu.h> #include <asm/pmac_pfunc.h> @@ -1472,7 +1472,7 @@ int __init pmac_i2c_init(void) smu_i2c_probe(); #endif - /* Now add plaform functions for some known devices */ + /* Now add platform functions for some known devices */ pmac_i2c_devscan(pmac_i2c_dev_create); return 0; diff --git a/arch/powerpc/platforms/powermac/nvram.c b/arch/powerpc/platforms/powermac/nvram.c index de8fcb607290..fe2e0249cbc2 100644 --- a/arch/powerpc/platforms/powermac/nvram.c +++ b/arch/powerpc/platforms/powermac/nvram.c @@ -17,9 +17,9 @@ #include <linux/memblock.h> #include <linux/completion.h> #include <linux/spinlock.h> +#include <linux/of_address.h> #include <asm/sections.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/machdep.h> #include <asm/nvram.h> @@ -71,7 +71,7 @@ struct core99_header { static int nvram_naddrs; static volatile unsigned char __iomem *nvram_data; static int is_core_99; -static int core99_bank = 0; +static int core99_bank; static int nvram_partitions[3]; // XXX Turn that into a sem static DEFINE_RAW_SPINLOCK(nv_lock); diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c index e9abe0f2e7f0..d71359b5331c 100644 --- a/arch/powerpc/platforms/powermac/pci.c +++ b/arch/powerpc/platforms/powermac/pci.c @@ -12,11 +12,12 @@ #include <linux/string.h> #include <linux/init.h> #include <linux/irq.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <linux/of_pci.h> #include <asm/sections.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/pci-bridge.h> #include <asm/machdep.h> #include <asm/pmac_feature.h> diff --git a/arch/powerpc/platforms/powermac/pfunc_core.c b/arch/powerpc/platforms/powermac/pfunc_core.c index 94df0a91b46f..22741ddfd5b2 100644 --- a/arch/powerpc/platforms/powermac/pfunc_core.c +++ b/arch/powerpc/platforms/powermac/pfunc_core.c @@ -12,8 +12,8 @@ #include <linux/slab.h> #include <linux/module.h> #include <linux/mutex.h> +#include <linux/of.h> -#include <asm/prom.h> #include <asm/pmac_pfunc.h> /* Debug */ @@ -685,7 +685,7 @@ static int pmf_add_functions(struct pmf_device *dev, void *driverdata) const int plen = strlen(PP_PREFIX); int count = 0; - for (pp = dev->node->properties; pp != 0; pp = pp->next) { + for_each_property_of_node(dev->node, pp) { const char *name; if (strncmp(pp->name, PP_PREFIX, plen) != 0) continue; diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c index bb0566633af5..8c8d8e0a7d13 100644 --- a/arch/powerpc/platforms/powermac/pic.c +++ b/arch/powerpc/platforms/powermac/pic.c @@ -20,11 +20,13 @@ #include <linux/adb.h> #include <linux/minmax.h> #include <linux/pmu.h> +#include <linux/irqdomain.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <asm/sections.h> #include <asm/io.h> #include <asm/smp.h> -#include <asm/prom.h> #include <asm/pci-bridge.h> #include <asm/time.h> #include <asm/pmac_feature.h> @@ -382,7 +384,7 @@ static void __init pmac_pic_probe_oldstyle(void) #endif } -int of_irq_parse_oldworld(struct device_node *device, int index, +int of_irq_parse_oldworld(const struct device_node *device, int index, struct of_phandle_args *out_irq) { const u32 *ints = NULL; diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h index ba8d4e97095b..1b696f352640 100644 --- a/arch/powerpc/platforms/powermac/pmac.h +++ b/arch/powerpc/platforms/powermac/pmac.h @@ -16,6 +16,8 @@ struct rtc_time; extern int pmac_newworld; +void g5_phy_disable_cpu1(void); + extern long pmac_time_init(void); extern time64_t pmac_get_boot_time(void); extern void pmac_get_rtc_time(struct rtc_time *); diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c index 974d4b49867b..f71735ec449f 100644 --- a/arch/powerpc/platforms/powermac/setup.c +++ b/arch/powerpc/platforms/powermac/setup.c @@ -50,7 +50,6 @@ #include <asm/reg.h> #include <asm/sections.h> -#include <asm/prom.h> #include <asm/io.h> #include <asm/pci-bridge.h> #include <asm/ohare.h> @@ -81,10 +80,6 @@ static int current_root_goodness = -1; #define DEFAULT_ROOT_DEVICE Root_SDA1 /* sda1 - slightly silly choice */ -#ifdef CONFIG_PPC64 -int sccdbg; -#endif - sys_ctrler_t sys_ctrler = SYS_CTRLER_UNKNOWN; EXPORT_SYMBOL(sys_ctrler); diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index da1efdc30d6c..d9df45741ece 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -22,6 +22,7 @@ #include <linux/sched/hotplug.h> #include <linux/smp.h> #include <linux/interrupt.h> +#include <linux/irqdomain.h> #include <linux/kernel_stat.h> #include <linux/delay.h> #include <linux/init.h> @@ -39,7 +40,6 @@ #include <asm/page.h> #include <asm/sections.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/smp.h> #include <asm/machdep.h> #include <asm/pmac_feature.h> @@ -875,8 +875,6 @@ static int smp_core99_cpu_online(unsigned int cpu) static void __init smp_core99_bringup_done(void) { - extern void __init g5_phy_disable_cpu1(void); - /* Close i2c bus if it was used for tb sync */ if (pmac_tb_clock_chip_host) pmac_i2c_close(pmac_tb_clock_chip_host); diff --git a/arch/powerpc/platforms/powermac/time.c b/arch/powerpc/platforms/powermac/time.c index 31d6213a6c8f..4c5790aff1b5 100644 --- a/arch/powerpc/platforms/powermac/time.c +++ b/arch/powerpc/platforms/powermac/time.c @@ -24,9 +24,9 @@ #include <linux/interrupt.h> #include <linux/hardirq.h> #include <linux/rtc.h> +#include <linux/of_address.h> #include <asm/sections.h> -#include <asm/prom.h> #include <asm/io.h> #include <asm/machdep.h> #include <asm/time.h> diff --git a/arch/powerpc/platforms/powermac/udbg_adb.c b/arch/powerpc/platforms/powermac/udbg_adb.c index 12158bb4fed7..b4756defd596 100644 --- a/arch/powerpc/platforms/powermac/udbg_adb.c +++ b/arch/powerpc/platforms/powermac/udbg_adb.c @@ -7,11 +7,11 @@ #include <linux/adb.h> #include <linux/pmu.h> #include <linux/cuda.h> +#include <linux/of.h> #include <asm/machdep.h> #include <asm/io.h> #include <asm/page.h> #include <asm/xmon.h> -#include <asm/prom.h> #include <asm/bootx.h> #include <asm/errno.h> #include <asm/pmac_feature.h> diff --git a/arch/powerpc/platforms/powermac/udbg_scc.c b/arch/powerpc/platforms/powermac/udbg_scc.c index 965827ac2e9c..734df5a32f99 100644 --- a/arch/powerpc/platforms/powermac/udbg_scc.c +++ b/arch/powerpc/platforms/powermac/udbg_scc.c @@ -5,10 +5,10 @@ * Copyright (C) 2001-2005 PPC 64 Team, IBM Corp */ #include <linux/types.h> +#include <linux/of.h> #include <asm/udbg.h> #include <asm/processor.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/pmac_feature.h> extern u8 real_readb(volatile u8 __iomem *addr); diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile index dc7b37c23b60..6488b3842199 100644 --- a/arch/powerpc/platforms/powernv/Makefile +++ b/arch/powerpc/platforms/powernv/Makefile @@ -1,4 +1,12 @@ # SPDX-License-Identifier: GPL-2.0 + +# nothing that deals with real mode is safe to KASAN +# in particular, idle code runs a bunch of things in real mode +KASAN_SANITIZE_idle.o := n +KASAN_SANITIZE_pci-ioda.o := n +# pnv_machine_check_early +KASAN_SANITIZE_setup.o := n + obj-y += setup.o opal-call.o opal-wrappers.o opal.o opal-async.o obj-y += idle.o opal-rtc.o opal-nvram.o opal-lpc.o opal-flash.o obj-y += rng.o opal-elog.o opal-dump.o opal-sysparam.o opal-sensor.o diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c index 89e22c460ebf..a83cb679dd59 100644 --- a/arch/powerpc/platforms/powernv/eeh-powernv.c +++ b/arch/powerpc/platforms/powernv/eeh-powernv.c @@ -11,6 +11,7 @@ #include <linux/export.h> #include <linux/init.h> #include <linux/interrupt.h> +#include <linux/irqdomain.h> #include <linux/list.h> #include <linux/msi.h> #include <linux/of.h> @@ -390,7 +391,7 @@ static struct eeh_dev *pnv_eeh_probe(struct pci_dev *pdev) * should be blocked until PE reset. MMIO access is dropped * by hardware certainly. In order to drop PCI config requests, * one more flag (EEH_PE_CFG_RESTRICTED) is introduced, which - * will be checked in the backend for PE state retrival. If + * will be checked in the backend for PE state retrieval. If * the PE becomes frozen for the first time and the flag has * been set for the PE, we will set EEH_PE_CFG_BLOCKED for * that PE to block its config space. @@ -981,7 +982,7 @@ static int pnv_eeh_do_af_flr(struct pci_dn *pdn, int option) case EEH_RESET_FUNDAMENTAL: /* * Wait for Transaction Pending bit to clear. A word-aligned - * test is used, so we use the conrol offset rather than status + * test is used, so we use the control offset rather than status * and shift the test bit to match. */ pnv_eeh_wait_for_pending(pdn, "AF", @@ -1048,7 +1049,7 @@ static int pnv_eeh_reset(struct eeh_pe *pe, int option) * frozen state during PE reset. However, the good idea here from * benh is to keep frozen state before we get PE reset done completely * (until BAR restore). With the frozen state, HW drops illegal IO - * or MMIO access, which can incur recrusive frozen PE during PE + * or MMIO access, which can incur recursive frozen PE during PE * reset. The side effect is that EEH core has to clear the frozen * state explicitly after BAR restore. */ @@ -1095,8 +1096,8 @@ static int pnv_eeh_reset(struct eeh_pe *pe, int option) * bus is behind a hotplug slot and it will use the slot provided * reset methods to prevent spurious hotplug events during the reset. * - * Fundemental resets need to be handled internally to EEH since the - * PCI core doesn't really have a concept of a fundemental reset, + * Fundamental resets need to be handled internally to EEH since the + * PCI core doesn't really have a concept of a fundamental reset, * mainly because there's no standard way to generate one. Only a * few devices require an FRESET so it should be fine. */ @@ -1640,24 +1641,6 @@ static struct eeh_ops pnv_eeh_ops = { .notify_resume = NULL }; -#ifdef CONFIG_PCI_IOV -static void pnv_pci_fixup_vf_mps(struct pci_dev *pdev) -{ - struct pci_dn *pdn = pci_get_pdn(pdev); - int parent_mps; - - if (!pdev->is_virtfn) - return; - - /* Synchronize MPS for VF and PF */ - parent_mps = pcie_get_mps(pdev->physfn); - if ((128 << pdev->pcie_mpss) >= parent_mps) - pcie_set_mps(pdev, parent_mps); - pdn->mps = pcie_get_mps(pdev); -} -DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pnv_pci_fixup_vf_mps); -#endif /* CONFIG_PCI_IOV */ - /** * eeh_powernv_init - Register platform dependent EEH operations * diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c index a6677a111aca..6f94b808dd39 100644 --- a/arch/powerpc/platforms/powernv/idle.c +++ b/arch/powerpc/platforms/powernv/idle.c @@ -112,7 +112,7 @@ static int __init pnv_save_sprs_for_deep_states(void) if (rc != 0) return rc; - /* Only p8 needs to set extra HID regiters */ + /* Only p8 needs to set extra HID registers */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) { uint64_t hid1_val = mfspr(SPRN_HID1); uint64_t hid4_val = mfspr(SPRN_HID4); @@ -1204,7 +1204,7 @@ static void __init pnv_arch300_idle_init(void) * The idle code does not deal with TB loss occurring * in a shallower state than SPR loss, so force it to * behave like SPRs are lost if TB is lost. POWER9 would - * never encouter this, but a POWER8 core would if it + * never encounter this, but a POWER8 core would if it * implemented the stop instruction. So this is for forward * compatibility. */ diff --git a/arch/powerpc/platforms/powernv/ocxl.c b/arch/powerpc/platforms/powernv/ocxl.c index 28b009b46464..27c936075031 100644 --- a/arch/powerpc/platforms/powernv/ocxl.c +++ b/arch/powerpc/platforms/powernv/ocxl.c @@ -289,7 +289,7 @@ int pnv_ocxl_get_pasid_count(struct pci_dev *dev, int *count) * be used by a function depends on how many functions exist * on the device. The NPU needs to be configured to know how * many bits are available to PASIDs and how many are to be - * used by the function BDF indentifier. + * used by the function BDF identifier. * * We only support one AFU-carrying function for now. */ diff --git a/arch/powerpc/platforms/powernv/opal-fadump.c b/arch/powerpc/platforms/powernv/opal-fadump.c index c8ad057c7221..964f464b1b0e 100644 --- a/arch/powerpc/platforms/powernv/opal-fadump.c +++ b/arch/powerpc/platforms/powernv/opal-fadump.c @@ -60,7 +60,7 @@ void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node) addr = be64_to_cpu(addr); pr_debug("Kernel metadata addr: %llx\n", addr); opal_fdm_active = (void *)addr; - if (opal_fdm_active->registered_regions == 0) + if (be16_to_cpu(opal_fdm_active->registered_regions) == 0) return; ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_BOOT_MEM, &addr); @@ -95,17 +95,17 @@ static int opal_fadump_unregister(struct fw_dump *fadump_conf); static void opal_fadump_update_config(struct fw_dump *fadump_conf, const struct opal_fadump_mem_struct *fdm) { - pr_debug("Boot memory regions count: %d\n", fdm->region_cnt); + pr_debug("Boot memory regions count: %d\n", be16_to_cpu(fdm->region_cnt)); /* * The destination address of the first boot memory region is the * destination address of boot memory regions. */ - fadump_conf->boot_mem_dest_addr = fdm->rgn[0].dest; + fadump_conf->boot_mem_dest_addr = be64_to_cpu(fdm->rgn[0].dest); pr_debug("Destination address of boot memory regions: %#016llx\n", fadump_conf->boot_mem_dest_addr); - fadump_conf->fadumphdr_addr = fdm->fadumphdr_addr; + fadump_conf->fadumphdr_addr = be64_to_cpu(fdm->fadumphdr_addr); } /* @@ -126,9 +126,9 @@ static void __init opal_fadump_get_config(struct fw_dump *fadump_conf, fadump_conf->boot_memory_size = 0; pr_debug("Boot memory regions:\n"); - for (i = 0; i < fdm->region_cnt; i++) { - base = fdm->rgn[i].src; - size = fdm->rgn[i].size; + for (i = 0; i < be16_to_cpu(fdm->region_cnt); i++) { + base = be64_to_cpu(fdm->rgn[i].src); + size = be64_to_cpu(fdm->rgn[i].size); pr_debug("\t[%03d] base: 0x%lx, size: 0x%lx\n", i, base, size); fadump_conf->boot_mem_addr[i] = base; @@ -143,7 +143,7 @@ static void __init opal_fadump_get_config(struct fw_dump *fadump_conf, * Start address of reserve dump area (permanent reservation) for * re-registering FADump after dump capture. */ - fadump_conf->reserve_dump_area_start = fdm->rgn[0].dest; + fadump_conf->reserve_dump_area_start = be64_to_cpu(fdm->rgn[0].dest); /* * Rarely, but it can so happen that system crashes before all @@ -155,13 +155,14 @@ static void __init opal_fadump_get_config(struct fw_dump *fadump_conf, * Hope the memory that could not be preserved only has pages * that are usually filtered out while saving the vmcore. */ - if (fdm->region_cnt > fdm->registered_regions) { + if (be16_to_cpu(fdm->region_cnt) > be16_to_cpu(fdm->registered_regions)) { pr_warn("Not all memory regions were saved!!!\n"); pr_warn(" Unsaved memory regions:\n"); - i = fdm->registered_regions; - while (i < fdm->region_cnt) { + i = be16_to_cpu(fdm->registered_regions); + while (i < be16_to_cpu(fdm->region_cnt)) { pr_warn("\t[%03d] base: 0x%llx, size: 0x%llx\n", - i, fdm->rgn[i].src, fdm->rgn[i].size); + i, be64_to_cpu(fdm->rgn[i].src), + be64_to_cpu(fdm->rgn[i].size)); i++; } @@ -170,7 +171,7 @@ static void __init opal_fadump_get_config(struct fw_dump *fadump_conf, } fadump_conf->boot_mem_top = (fadump_conf->boot_memory_size + hole_size); - fadump_conf->boot_mem_regs_cnt = fdm->region_cnt; + fadump_conf->boot_mem_regs_cnt = be16_to_cpu(fdm->region_cnt); opal_fadump_update_config(fadump_conf, fdm); } @@ -178,35 +179,38 @@ static void __init opal_fadump_get_config(struct fw_dump *fadump_conf, static void opal_fadump_init_metadata(struct opal_fadump_mem_struct *fdm) { fdm->version = OPAL_FADUMP_VERSION; - fdm->region_cnt = 0; - fdm->registered_regions = 0; - fdm->fadumphdr_addr = 0; + fdm->region_cnt = cpu_to_be16(0); + fdm->registered_regions = cpu_to_be16(0); + fdm->fadumphdr_addr = cpu_to_be64(0); } static u64 opal_fadump_init_mem_struct(struct fw_dump *fadump_conf) { u64 addr = fadump_conf->reserve_dump_area_start; + u16 reg_cnt; int i; opal_fdm = __va(fadump_conf->kernel_metadata); opal_fadump_init_metadata(opal_fdm); /* Boot memory regions */ + reg_cnt = be16_to_cpu(opal_fdm->region_cnt); for (i = 0; i < fadump_conf->boot_mem_regs_cnt; i++) { - opal_fdm->rgn[i].src = fadump_conf->boot_mem_addr[i]; - opal_fdm->rgn[i].dest = addr; - opal_fdm->rgn[i].size = fadump_conf->boot_mem_sz[i]; + opal_fdm->rgn[i].src = cpu_to_be64(fadump_conf->boot_mem_addr[i]); + opal_fdm->rgn[i].dest = cpu_to_be64(addr); + opal_fdm->rgn[i].size = cpu_to_be64(fadump_conf->boot_mem_sz[i]); - opal_fdm->region_cnt++; + reg_cnt++; addr += fadump_conf->boot_mem_sz[i]; } + opal_fdm->region_cnt = cpu_to_be16(reg_cnt); /* - * Kernel metadata is passed to f/w and retrieved in capture kerenl. + * Kernel metadata is passed to f/w and retrieved in capture kernel. * So, use it to save fadump header address instead of calculating it. */ - opal_fdm->fadumphdr_addr = (opal_fdm->rgn[0].dest + - fadump_conf->boot_memory_size); + opal_fdm->fadumphdr_addr = cpu_to_be64(be64_to_cpu(opal_fdm->rgn[0].dest) + + fadump_conf->boot_memory_size); opal_fadump_update_config(fadump_conf, opal_fdm); @@ -269,18 +273,21 @@ static u64 opal_fadump_get_bootmem_min(void) static int opal_fadump_register(struct fw_dump *fadump_conf) { s64 rc = OPAL_PARAMETER; + u16 registered_regs; int i, err = -EIO; - for (i = 0; i < opal_fdm->region_cnt; i++) { + registered_regs = be16_to_cpu(opal_fdm->registered_regions); + for (i = 0; i < be16_to_cpu(opal_fdm->region_cnt); i++) { rc = opal_mpipl_update(OPAL_MPIPL_ADD_RANGE, - opal_fdm->rgn[i].src, - opal_fdm->rgn[i].dest, - opal_fdm->rgn[i].size); + be64_to_cpu(opal_fdm->rgn[i].src), + be64_to_cpu(opal_fdm->rgn[i].dest), + be64_to_cpu(opal_fdm->rgn[i].size)); if (rc != OPAL_SUCCESS) break; - opal_fdm->registered_regions++; + registered_regs++; } + opal_fdm->registered_regions = cpu_to_be16(registered_regs); switch (rc) { case OPAL_SUCCESS: @@ -291,7 +298,8 @@ static int opal_fadump_register(struct fw_dump *fadump_conf) case OPAL_RESOURCE: /* If MAX regions limit in f/w is hit, warn and proceed. */ pr_warn("%d regions could not be registered for MPIPL as MAX limit is reached!\n", - (opal_fdm->region_cnt - opal_fdm->registered_regions)); + (be16_to_cpu(opal_fdm->region_cnt) - + be16_to_cpu(opal_fdm->registered_regions))); fadump_conf->dump_registered = 1; err = 0; break; @@ -312,7 +320,7 @@ static int opal_fadump_register(struct fw_dump *fadump_conf) * If some regions were registered before OPAL_MPIPL_ADD_RANGE * OPAL call failed, unregister all regions. */ - if ((err < 0) && (opal_fdm->registered_regions > 0)) + if ((err < 0) && (be16_to_cpu(opal_fdm->registered_regions) > 0)) opal_fadump_unregister(fadump_conf); return err; @@ -328,7 +336,7 @@ static int opal_fadump_unregister(struct fw_dump *fadump_conf) return -EIO; } - opal_fdm->registered_regions = 0; + opal_fdm->registered_regions = cpu_to_be16(0); fadump_conf->dump_registered = 0; return 0; } @@ -563,25 +571,26 @@ static void opal_fadump_region_show(struct fw_dump *fadump_conf, else fdm_ptr = opal_fdm; - for (i = 0; i < fdm_ptr->region_cnt; i++) { + for (i = 0; i < be16_to_cpu(fdm_ptr->region_cnt); i++) { /* * Only regions that are registered for MPIPL * would have dump data. */ if ((fadump_conf->dump_active) && - (i < fdm_ptr->registered_regions)) - dumped_bytes = fdm_ptr->rgn[i].size; + (i < be16_to_cpu(fdm_ptr->registered_regions))) + dumped_bytes = be64_to_cpu(fdm_ptr->rgn[i].size); seq_printf(m, "DUMP: Src: %#016llx, Dest: %#016llx, ", - fdm_ptr->rgn[i].src, fdm_ptr->rgn[i].dest); + be64_to_cpu(fdm_ptr->rgn[i].src), + be64_to_cpu(fdm_ptr->rgn[i].dest)); seq_printf(m, "Size: %#llx, Dumped: %#llx bytes\n", - fdm_ptr->rgn[i].size, dumped_bytes); + be64_to_cpu(fdm_ptr->rgn[i].size), dumped_bytes); } - /* Dump is active. Show reserved area start address. */ + /* Dump is active. Show preserved area start address. */ if (fadump_conf->dump_active) { - seq_printf(m, "\nMemory above %#016lx is reserved for saving crash dump\n", - fadump_conf->reserve_dump_area_start); + seq_printf(m, "\nMemory above %#016llx is reserved for saving crash dump\n", + fadump_conf->boot_mem_top); } } @@ -624,6 +633,7 @@ void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node) { const __be32 *prop; unsigned long dn; + __be64 be_addr; u64 addr = 0; int i, len; s64 ret; @@ -680,13 +690,13 @@ void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node) if (!prop) return; - ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_KERNEL, &addr); - if ((ret != OPAL_SUCCESS) || !addr) { + ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_KERNEL, &be_addr); + if ((ret != OPAL_SUCCESS) || !be_addr) { pr_err("Failed to get Kernel metadata (%lld)\n", ret); return; } - addr = be64_to_cpu(addr); + addr = be64_to_cpu(be_addr); pr_debug("Kernel metadata addr: %llx\n", addr); opal_fdm_active = __va(addr); @@ -697,14 +707,14 @@ void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node) } /* Kernel regions not registered with f/w for MPIPL */ - if (opal_fdm_active->registered_regions == 0) { + if (be16_to_cpu(opal_fdm_active->registered_regions) == 0) { opal_fdm_active = NULL; return; } - ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_CPU, &addr); - if (addr) { - addr = be64_to_cpu(addr); + ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_CPU, &be_addr); + if (be_addr) { + addr = be64_to_cpu(be_addr); pr_debug("CPU metadata addr: %llx\n", addr); opal_cpu_metadata = __va(addr); } diff --git a/arch/powerpc/platforms/powernv/opal-fadump.h b/arch/powerpc/platforms/powernv/opal-fadump.h index f1e9ecf548c5..3f715efb0aa6 100644 --- a/arch/powerpc/platforms/powernv/opal-fadump.h +++ b/arch/powerpc/platforms/powernv/opal-fadump.h @@ -31,14 +31,14 @@ * OPAL FADump kernel metadata * * The address of this structure will be registered with f/w for retrieving - * and processing during crash dump. + * in the capture kernel to process the crash dump. */ struct opal_fadump_mem_struct { u8 version; u8 reserved[3]; - u16 region_cnt; /* number of regions */ - u16 registered_regions; /* Regions registered for MPIPL */ - u64 fadumphdr_addr; + __be16 region_cnt; /* number of regions */ + __be16 registered_regions; /* Regions registered for MPIPL */ + __be64 fadumphdr_addr; struct opal_mpipl_region rgn[FADUMP_MAX_MEM_REGS]; } __packed; @@ -135,7 +135,7 @@ static inline void opal_fadump_read_regs(char *bufp, unsigned int regs_cnt, for (i = 0; i < regs_cnt; i++, bufp += reg_entry_size) { reg_entry = (struct hdat_fadump_reg_entry *)bufp; val = (cpu_endian ? be64_to_cpu(reg_entry->reg_val) : - reg_entry->reg_val); + (u64)(reg_entry->reg_val)); opal_fadump_set_regval_regnum(regs, be32_to_cpu(reg_entry->reg_type), be32_to_cpu(reg_entry->reg_num), diff --git a/arch/powerpc/platforms/powernv/opal-flash.c b/arch/powerpc/platforms/powernv/opal-flash.c index 18481a8c52fa..d5ea04e8e4c5 100644 --- a/arch/powerpc/platforms/powernv/opal-flash.c +++ b/arch/powerpc/platforms/powernv/opal-flash.c @@ -520,6 +520,10 @@ void __init opal_flash_update_init(void) { int ret; + /* Firmware update is not supported by firmware */ + if (!opal_check_token(OPAL_FLASH_VALIDATE)) + return; + /* Allocate validate image buffer */ validate_flash_data.buf = kzalloc(VALIDATE_BUF_SIZE, GFP_KERNEL); if (!validate_flash_data.buf) { diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c index 3fea5da6d1b3..348a8cdaecd6 100644 --- a/arch/powerpc/platforms/powernv/opal-imc.c +++ b/arch/powerpc/platforms/powernv/opal-imc.c @@ -211,7 +211,7 @@ static void disable_core_pmu_counters(void) get_hard_smp_processor_id(cpu)); if (rc) pr_err("%s: Failed to stop Core (cpu = %d)\n", - __FUNCTION__, cpu); + __func__, cpu); } cpus_read_unlock(); } diff --git a/arch/powerpc/platforms/powernv/opal-lpc.c b/arch/powerpc/platforms/powernv/opal-lpc.c index 5390c888db16..d129d6d45a50 100644 --- a/arch/powerpc/platforms/powernv/opal-lpc.c +++ b/arch/powerpc/platforms/powernv/opal-lpc.c @@ -197,7 +197,7 @@ static ssize_t lpc_debug_read(struct file *filp, char __user *ubuf, /* * Select access size based on count and alignment and - * access type. IO and MEM only support byte acceses, + * access type. IO and MEM only support byte accesses, * FW supports all 3. */ len = 1; diff --git a/arch/powerpc/platforms/powernv/opal-memory-errors.c b/arch/powerpc/platforms/powernv/opal-memory-errors.c index 1e8e17df9ce8..a1754a28265d 100644 --- a/arch/powerpc/platforms/powernv/opal-memory-errors.c +++ b/arch/powerpc/platforms/powernv/opal-memory-errors.c @@ -82,7 +82,7 @@ static DECLARE_WORK(mem_error_work, mem_error_handler); /* * opal_memory_err_event - notifier handler that queues up the opal message - * to be preocessed later. + * to be processed later. */ static int opal_memory_err_event(struct notifier_block *nb, unsigned long msg_type, void *msg) diff --git a/arch/powerpc/platforms/powernv/pci-cxl.c b/arch/powerpc/platforms/powernv/pci-cxl.c index 53172862d23b..7e419de71db8 100644 --- a/arch/powerpc/platforms/powernv/pci-cxl.c +++ b/arch/powerpc/platforms/powernv/pci-cxl.c @@ -4,6 +4,7 @@ */ #include <linux/module.h> +#include <misc/cxl-base.h> #include <asm/pnv-pci.h> #include <asm/opal.h> diff --git a/arch/powerpc/platforms/powernv/pci-ioda-tce.c b/arch/powerpc/platforms/powernv/pci-ioda-tce.c index 30551bbd7988..e96324502db0 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda-tce.c +++ b/arch/powerpc/platforms/powernv/pci-ioda-tce.c @@ -145,8 +145,7 @@ int pnv_tce_build(struct iommu_table *tbl, long index, long npages, #ifdef CONFIG_IOMMU_API int pnv_tce_xchg(struct iommu_table *tbl, long index, - unsigned long *hpa, enum dma_data_direction *direction, - bool alloc) + unsigned long *hpa, enum dma_data_direction *direction) { u64 proto_tce = iommu_direction_to_tce_perm(*direction); unsigned long newtce = *hpa | proto_tce, oldtce; @@ -164,7 +163,7 @@ int pnv_tce_xchg(struct iommu_table *tbl, long index, } if (!ptce) { - ptce = pnv_tce(tbl, false, idx, alloc); + ptce = pnv_tce(tbl, false, idx, true); if (!ptce) return -ENOMEM; } diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index b722ac902269..c8cf2728031a 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -21,10 +21,11 @@ #include <linux/rculist.h> #include <linux/sizes.h> #include <linux/debugfs.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <asm/sections.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/pci-bridge.h> #include <asm/machdep.h> #include <asm/msi_bitmap.h> @@ -1267,22 +1268,20 @@ static bool pnv_pci_ioda_iommu_bypass_supported(struct pci_dev *pdev, return false; } -static inline __be64 __iomem *pnv_ioda_get_inval_reg(struct pnv_phb *phb, - bool real_mode) +static inline __be64 __iomem *pnv_ioda_get_inval_reg(struct pnv_phb *phb) { - return real_mode ? (__be64 __iomem *)(phb->regs_phys + 0x210) : - (phb->regs + 0x210); + return phb->regs + 0x210; } static void pnv_pci_p7ioc_tce_invalidate(struct iommu_table *tbl, - unsigned long index, unsigned long npages, bool rm) + unsigned long index, unsigned long npages) { struct iommu_table_group_link *tgl = list_first_entry_or_null( &tbl->it_group_list, struct iommu_table_group_link, next); struct pnv_ioda_pe *pe = container_of(tgl->table_group, struct pnv_ioda_pe, table_group); - __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, rm); + __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb); unsigned long start, end, inc; start = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset); @@ -1297,11 +1296,7 @@ static void pnv_pci_p7ioc_tce_invalidate(struct iommu_table *tbl, mb(); /* Ensure above stores are visible */ while (start <= end) { - if (rm) - __raw_rm_writeq_be(start, invalidate); - else - __raw_writeq_be(start, invalidate); - + __raw_writeq_be(start, invalidate); start += inc; } @@ -1320,7 +1315,7 @@ static int pnv_ioda1_tce_build(struct iommu_table *tbl, long index, attrs); if (!ret) - pnv_pci_p7ioc_tce_invalidate(tbl, index, npages, false); + pnv_pci_p7ioc_tce_invalidate(tbl, index, npages); return ret; } @@ -1328,10 +1323,9 @@ static int pnv_ioda1_tce_build(struct iommu_table *tbl, long index, #ifdef CONFIG_IOMMU_API /* Common for IODA1 and IODA2 */ static int pnv_ioda_tce_xchg_no_kill(struct iommu_table *tbl, long index, - unsigned long *hpa, enum dma_data_direction *direction, - bool realmode) + unsigned long *hpa, enum dma_data_direction *direction) { - return pnv_tce_xchg(tbl, index, hpa, direction, !realmode); + return pnv_tce_xchg(tbl, index, hpa, direction); } #endif @@ -1340,7 +1334,7 @@ static void pnv_ioda1_tce_free(struct iommu_table *tbl, long index, { pnv_tce_free(tbl, index, npages); - pnv_pci_p7ioc_tce_invalidate(tbl, index, npages, false); + pnv_pci_p7ioc_tce_invalidate(tbl, index, npages); } static struct iommu_table_ops pnv_ioda1_iommu_ops = { @@ -1361,18 +1355,18 @@ static struct iommu_table_ops pnv_ioda1_iommu_ops = { static inline void pnv_pci_phb3_tce_invalidate_pe(struct pnv_ioda_pe *pe) { /* 01xb - invalidate TCEs that match the specified PE# */ - __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, false); + __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb); unsigned long val = PHB3_TCE_KILL_INVAL_PE | (pe->pe_number & 0xFF); mb(); /* Ensure above stores are visible */ __raw_writeq_be(val, invalidate); } -static void pnv_pci_phb3_tce_invalidate(struct pnv_ioda_pe *pe, bool rm, +static void pnv_pci_phb3_tce_invalidate(struct pnv_ioda_pe *pe, unsigned shift, unsigned long index, unsigned long npages) { - __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, rm); + __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb); unsigned long start, end, inc; /* We'll invalidate DMA address in PE scope */ @@ -1387,10 +1381,7 @@ static void pnv_pci_phb3_tce_invalidate(struct pnv_ioda_pe *pe, bool rm, mb(); while (start <= end) { - if (rm) - __raw_rm_writeq_be(start, invalidate); - else - __raw_writeq_be(start, invalidate); + __raw_writeq_be(start, invalidate); start += inc; } } @@ -1407,7 +1398,7 @@ static inline void pnv_pci_ioda2_tce_invalidate_pe(struct pnv_ioda_pe *pe) } static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl, - unsigned long index, unsigned long npages, bool rm) + unsigned long index, unsigned long npages) { struct iommu_table_group_link *tgl; @@ -1418,7 +1409,7 @@ static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl, unsigned int shift = tbl->it_page_shift; if (phb->model == PNV_PHB_MODEL_PHB3 && phb->regs) - pnv_pci_phb3_tce_invalidate(pe, rm, shift, + pnv_pci_phb3_tce_invalidate(pe, shift, index, npages); else opal_pci_tce_kill(phb->opal_id, @@ -1437,7 +1428,7 @@ static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index, attrs); if (!ret) - pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false); + pnv_pci_ioda2_tce_invalidate(tbl, index, npages); return ret; } @@ -1447,7 +1438,7 @@ static void pnv_ioda2_tce_free(struct iommu_table *tbl, long index, { pnv_tce_free(tbl, index, npages); - pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false); + pnv_pci_ioda2_tce_invalidate(tbl, index, npages); } static struct iommu_table_ops pnv_ioda2_iommu_ops = { @@ -2383,7 +2374,7 @@ static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe, /* * This function is supposed to be called on basis of PE from top - * to bottom style. So the the I/O or MMIO segment assigned to + * to bottom style. So the I/O or MMIO segment assigned to * parent PE could be overridden by its child PEs if necessary. */ static void pnv_ioda_setup_pe_seg(struct pnv_ioda_pe *pe) @@ -2738,7 +2729,7 @@ static void pnv_pci_ioda1_release_pe_dma(struct pnv_ioda_pe *pe) if (rc != OPAL_SUCCESS) return; - pnv_pci_p7ioc_tce_invalidate(tbl, tbl->it_offset, tbl->it_size, false); + pnv_pci_p7ioc_tce_invalidate(tbl, tbl->it_offset, tbl->it_size); if (pe->table_group.group) { iommu_group_put(pe->table_group.group); WARN_ON(pe->table_group.group); diff --git a/arch/powerpc/platforms/powernv/pci-sriov.c b/arch/powerpc/platforms/powernv/pci-sriov.c index 04155aaaadb1..7195133b26bb 100644 --- a/arch/powerpc/platforms/powernv/pci-sriov.c +++ b/arch/powerpc/platforms/powernv/pci-sriov.c @@ -22,7 +22,7 @@ * have the same requirement. * * For a SR-IOV BAR things are a little more awkward since size and alignment - * are not coupled. The alignment is set based on the the per-VF BAR size, but + * are not coupled. The alignment is set based on the per-VF BAR size, but * the total BAR area is: number-of-vfs * per-vf-size. The number of VFs * isn't necessarily a power of two, so neither is the total size. To fix that * we need to finesse (read: hack) the Linux BAR allocator so that it will @@ -699,7 +699,7 @@ static int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs) return -ENOSPC; } - /* allocate a contigious block of PEs for our VFs */ + /* allocate a contiguous block of PEs for our VFs */ base_pe = pnv_ioda_alloc_pe(phb, num_vfs); if (!base_pe) { pci_err(pdev, "Unable to allocate PEs for %d VFs\n", num_vfs); diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index f7054879ecd4..233a50e65fce 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c @@ -18,7 +18,6 @@ #include <asm/sections.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/pci-bridge.h> #include <asm/machdep.h> #include <asm/msi_bitmap.h> diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index 966a9eb64339..f12643958b8d 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h @@ -311,8 +311,7 @@ extern int pnv_tce_build(struct iommu_table *tbl, long index, long npages, unsigned long attrs); extern void pnv_tce_free(struct iommu_table *tbl, long index, long npages); extern int pnv_tce_xchg(struct iommu_table *tbl, long index, - unsigned long *hpa, enum dma_data_direction *direction, - bool alloc); + unsigned long *hpa, enum dma_data_direction *direction); extern __be64 *pnv_tce_useraddrptr(struct iommu_table *tbl, long index, bool alloc); extern unsigned long pnv_tce_get(struct iommu_table *tbl, long index); diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index 105d889abd51..824c3ad7a0fa 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c @@ -96,6 +96,15 @@ static void __init init_fw_feat_flags(struct device_node *np) if (fw_feature_is("disabled", "needs-spec-barrier-for-bound-checks", np)) security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR); + + if (fw_feature_is("enabled", "no-need-l1d-flush-msr-pr-1-to-0", np)) + security_ftr_clear(SEC_FTR_L1D_FLUSH_ENTRY); + + if (fw_feature_is("enabled", "no-need-l1d-flush-kernel-on-user-access", np)) + security_ftr_clear(SEC_FTR_L1D_FLUSH_UACCESS); + + if (fw_feature_is("enabled", "no-need-store-drain-on-priv-state-switch", np)) + security_ftr_clear(SEC_FTR_STF_BARRIER); } static void __init pnv_setup_security_mitigations(void) diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c index cbb67813cd5d..9e1a25398f98 100644 --- a/arch/powerpc/platforms/powernv/smp.c +++ b/arch/powerpc/platforms/powernv/smp.c @@ -345,7 +345,7 @@ static void __init pnv_smp_probe(void) } } -static int pnv_system_reset_exception(struct pt_regs *regs) +noinstr static int pnv_system_reset_exception(struct pt_regs *regs) { if (smp_handle_nmi_ipi(regs)) return 1; diff --git a/arch/powerpc/platforms/powernv/ultravisor.c b/arch/powerpc/platforms/powernv/ultravisor.c index e4a00ad06f9d..67c8c4b2d8b1 100644 --- a/arch/powerpc/platforms/powernv/ultravisor.c +++ b/arch/powerpc/platforms/powernv/ultravisor.c @@ -55,6 +55,7 @@ static int __init uv_init(void) return -ENODEV; uv_memcons = memcons_init(node, "memcons"); + of_node_put(node); if (!uv_memcons) return -ENOENT; diff --git a/arch/powerpc/platforms/powernv/vas-fault.c b/arch/powerpc/platforms/powernv/vas-fault.c index a7aabc18039e..c1bfad56447d 100644 --- a/arch/powerpc/platforms/powernv/vas-fault.c +++ b/arch/powerpc/platforms/powernv/vas-fault.c @@ -216,7 +216,7 @@ int vas_setup_fault_window(struct vas_instance *vinst) vas_init_rx_win_attr(&attr, VAS_COP_TYPE_FAULT); attr.rx_fifo_size = vinst->fault_fifo_size; - attr.rx_fifo = vinst->fault_fifo; + attr.rx_fifo = __pa(vinst->fault_fifo); /* * Max creds is based on number of CRBs can fit in the FIFO. diff --git a/arch/powerpc/platforms/powernv/vas-window.c b/arch/powerpc/platforms/powernv/vas-window.c index 0f8d39fbf2b2..0072682531d8 100644 --- a/arch/powerpc/platforms/powernv/vas-window.c +++ b/arch/powerpc/platforms/powernv/vas-window.c @@ -404,7 +404,7 @@ static void init_winctx_regs(struct pnv_vas_window *window, * * See also: Design note in function header. */ - val = __pa(winctx->rx_fifo); + val = winctx->rx_fifo; val = SET_FIELD(VAS_PAGE_MIGRATION_SELECT, val, 0); write_hvwc_reg(window, VREG(LFIFO_BAR), val); @@ -739,7 +739,7 @@ static void init_winctx_for_rxwin(struct pnv_vas_window *rxwin, */ winctx->fifo_disable = true; winctx->intr_disable = true; - winctx->rx_fifo = NULL; + winctx->rx_fifo = 0; } winctx->lnotify_lpid = rxattr->lnotify_lpid; diff --git a/arch/powerpc/platforms/powernv/vas.h b/arch/powerpc/platforms/powernv/vas.h index 8bb08e395de0..08d9d3d5a22b 100644 --- a/arch/powerpc/platforms/powernv/vas.h +++ b/arch/powerpc/platforms/powernv/vas.h @@ -376,7 +376,7 @@ struct pnv_vas_window { * is a container for the register fields in the window context. */ struct vas_winctx { - void *rx_fifo; + u64 rx_fifo; int rx_fifo_size; int wcreds_max; int rsvd_txbuf_count; diff --git a/arch/powerpc/platforms/ps3/Kconfig b/arch/powerpc/platforms/ps3/Kconfig index a4048b8c8c50..610682caabc4 100644 --- a/arch/powerpc/platforms/ps3/Kconfig +++ b/arch/powerpc/platforms/ps3/Kconfig @@ -90,7 +90,7 @@ config PS3_VERBOSE_RESULT bool "PS3 Verbose LV1 hypercall results" if PS3_ADVANCED depends on PPC_PS3 help - Enables more verbose log mesages for LV1 hypercall results. + Enables more verbose log messages for LV1 hypercall results. If in doubt, say N here and reduce the size of the kernel by a small amount. diff --git a/arch/powerpc/platforms/ps3/htab.c b/arch/powerpc/platforms/ps3/htab.c index ef710a715903..c27e6cf85272 100644 --- a/arch/powerpc/platforms/ps3/htab.c +++ b/arch/powerpc/platforms/ps3/htab.c @@ -10,7 +10,6 @@ #include <linux/memblock.h> #include <asm/machdep.h> -#include <asm/prom.h> #include <asm/udbg.h> #include <asm/lv1call.h> #include <asm/ps3fb.h> diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c index 5ce924611b94..1326de55fda6 100644 --- a/arch/powerpc/platforms/ps3/mm.c +++ b/arch/powerpc/platforms/ps3/mm.c @@ -14,7 +14,6 @@ #include <asm/cell-regs.h> #include <asm/firmware.h> -#include <asm/prom.h> #include <asm/udbg.h> #include <asm/lv1call.h> #include <asm/setup.h> @@ -364,7 +363,7 @@ static void __maybe_unused _dma_dump_region(const struct ps3_dma_region *r, * @bus_addr: Starting ioc bus address of the area to map. * @len: Length in bytes of the area to map. * @link: A struct list_head used with struct ps3_dma_region.chunk_list, the - * list of all chuncks owned by the region. + * list of all chunks owned by the region. * * This implementation uses a very simple dma page manager * based on the dma_chunk structure. This scheme assumes diff --git a/arch/powerpc/platforms/ps3/os-area.c b/arch/powerpc/platforms/ps3/os-area.c index cb844e0add2b..b384cd2d6b99 100644 --- a/arch/powerpc/platforms/ps3/os-area.c +++ b/arch/powerpc/platforms/ps3/os-area.c @@ -17,8 +17,6 @@ #include <linux/of.h> #include <linux/slab.h> -#include <asm/prom.h> - #include "platform.h" enum { diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c index 3de9145c20bc..d7495785fe47 100644 --- a/arch/powerpc/platforms/ps3/setup.c +++ b/arch/powerpc/platforms/ps3/setup.c @@ -13,13 +13,13 @@ #include <linux/console.h> #include <linux/export.h> #include <linux/memblock.h> +#include <linux/of.h> #include <asm/machdep.h> #include <asm/firmware.h> #include <asm/time.h> #include <asm/iommu.h> #include <asm/udbg.h> -#include <asm/prom.h> #include <asm/lv1call.h> #include <asm/ps3gpu.h> diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c index b637bf292047..2502e9b17df4 100644 --- a/arch/powerpc/platforms/ps3/system-bus.c +++ b/arch/powerpc/platforms/ps3/system-bus.c @@ -601,7 +601,7 @@ static dma_addr_t ps3_ioc0_map_page(struct device *_dev, struct page *page, iopte_flag |= CBE_IOPTE_PP_W | CBE_IOPTE_SO_RW; break; default: - /* not happned */ + /* not happened */ BUG(); } result = ps3_dma_map(dev->d_region, (unsigned long)ptr, size, diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile index 9764e1a2ed5c..7aaff5323544 100644 --- a/arch/powerpc/platforms/pseries/Makefile +++ b/arch/powerpc/platforms/pseries/Makefile @@ -33,3 +33,7 @@ obj-$(CONFIG_SUSPEND) += suspend.o obj-$(CONFIG_PPC_VAS) += vas.o vas-sysfs.o obj-$(CONFIG_ARCH_HAS_CC_PLATFORM) += cc_platform.o + +# nothing that operates in real mode is safe for KASAN +KASAN_SANITIZE_ras.o := n +KASAN_SANITIZE_kexec.o := n diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c index 45a3a3022a85..15ed8206c463 100644 --- a/arch/powerpc/platforms/pseries/cmm.c +++ b/arch/powerpc/platforms/pseries/cmm.c @@ -475,8 +475,6 @@ static struct notifier_block cmm_reboot_nb = { static int cmm_memory_cb(struct notifier_block *self, unsigned long action, void *arg) { - int ret = 0; - switch (action) { case MEM_GOING_OFFLINE: mutex_lock(&hotplug_mutex); @@ -493,7 +491,7 @@ static int cmm_memory_cb(struct notifier_block *self, break; } - return notifier_from_errno(ret); + return NOTIFY_OK; } static struct notifier_block cmm_mem_nb = { diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index b1f01ac0c29e..498d6efcb5ae 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c @@ -19,7 +19,6 @@ #include "of_helpers.h" #include "pseries.h" -#include <asm/prom.h> #include <asm/machdep.h> #include <linux/uaccess.h> #include <asm/rtas.h> @@ -389,7 +388,7 @@ static void pseries_hp_work_fn(struct work_struct *work) handle_dlpar_errorlog(hp_work->errlog); kfree(hp_work->errlog); - kfree((void *)work); + kfree(work); } void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog) diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index 09fafcf2d3a0..1b0c901a6f3b 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c @@ -43,6 +43,8 @@ static int ibm_get_config_addr_info; static int ibm_get_config_addr_info2; static int ibm_configure_pe; +static void pseries_eeh_init_edev(struct pci_dn *pdn); + static void pseries_pcibios_bus_add_device(struct pci_dev *pdev) { struct pci_dn *pdn = pci_get_pdn(pdev); @@ -359,7 +361,7 @@ static struct eeh_pe *pseries_eeh_pe_get_parent(struct eeh_dev *edev) * This function takes care of the initialisation and inserts the eeh_dev * into the correct eeh_pe. If no eeh_pe exists we'll allocate one. */ -void pseries_eeh_init_edev(struct pci_dn *pdn) +static void pseries_eeh_init_edev(struct pci_dn *pdn) { struct eeh_pe pe, *parent; struct eeh_dev *edev; @@ -510,7 +512,7 @@ static int pseries_eeh_set_option(struct eeh_pe *pe, int option) int ret = 0; /* - * When we're enabling or disabling EEH functioality on + * When we're enabling or disabling EEH functionality on * the particular PE, the PE config address is possibly * unavailable. Therefore, we have to figure it out from * the FDT node. @@ -845,8 +847,7 @@ static int __init eeh_pseries_init(void) return -EINVAL; } - /* Initialize error log lock and size */ - spin_lock_init(&slot_errbuf_lock); + /* Initialize error log size */ eeh_error_buf_size = rtas_token("rtas-error-log-max"); if (eeh_error_buf_size == RTAS_UNKNOWN_SERVICE) { pr_info("%s: unknown EEH error log size\n", diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index b81fc846d99c..0f8cd8b06432 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -398,7 +398,7 @@ static int dlpar_online_cpu(struct device_node *dn) if (get_hard_smp_processor_id(cpu) != thread) continue; cpu_maps_update_done(); - find_and_online_cpu_nid(cpu); + find_and_update_cpu_nid(cpu); rc = device_online(get_cpu_device(cpu)); if (rc) { dlpar_offline_cpu(dn); diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index 91cf23495ccb..2e3a317722a8 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c @@ -16,7 +16,6 @@ #include <asm/firmware.h> #include <asm/machdep.h> -#include <asm/prom.h> #include <asm/sparsemem.h> #include <asm/fadump.h> #include <asm/drmem.h> diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 4d991cf840d9..fba64304e859 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -666,8 +666,7 @@ static void pci_dma_bus_setup_pSeries(struct pci_bus *bus) #ifdef CONFIG_IOMMU_API static int tce_exchange_pseries(struct iommu_table *tbl, long index, unsigned - long *tce, enum dma_data_direction *direction, - bool realmode) + long *tce, enum dma_data_direction *direction) { long rc; unsigned long ioba = (unsigned long) index << tbl->it_page_shift; @@ -1430,7 +1429,7 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) pci->table_group->tables[1] = newtbl; - /* Keep default DMA window stuct if removed */ + /* Keep default DMA window struct if removed */ if (default_win_removed) { tbl->it_size = 0; vfree(tbl->it_map); diff --git a/arch/powerpc/platforms/pseries/kexec.c b/arch/powerpc/platforms/pseries/kexec.c index 145fcfbc017f..ab6cdbebb35e 100644 --- a/arch/powerpc/platforms/pseries/kexec.c +++ b/arch/powerpc/platforms/pseries/kexec.c @@ -61,3 +61,11 @@ void pseries_kexec_cpu_down(int crash_shutdown, int secondary) } else xics_kexec_teardown_cpu(secondary); } + +void pseries_machine_kexec(struct kimage *image) +{ + if (firmware_has_feature(FW_FEATURE_SET_MODE)) + pseries_disable_reloc_on_exc(); + + default_machine_kexec(image); +} diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 760581c5752f..937f9c010b22 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -31,7 +31,6 @@ #include <asm/mmu_context.h> #include <asm/iommu.h> #include <asm/tlb.h> -#include <asm/prom.h> #include <asm/cputable.h> #include <asm/udbg.h> #include <asm/smp.h> diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c index 2119c003fcf9..507dc0b5987d 100644 --- a/arch/powerpc/platforms/pseries/lparcfg.c +++ b/arch/powerpc/platforms/pseries/lparcfg.c @@ -28,7 +28,6 @@ #include <asm/firmware.h> #include <asm/rtas.h> #include <asm/time.h> -#include <asm/prom.h> #include <asm/vdso_datapage.h> #include <asm/vio.h> #include <asm/mmu.h> diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c index fb2919fd6bc0..a3a71d37cb9a 100644 --- a/arch/powerpc/platforms/pseries/msi.c +++ b/arch/powerpc/platforms/pseries/msi.c @@ -7,6 +7,7 @@ #include <linux/crash_dump.h> #include <linux/device.h> #include <linux/irq.h> +#include <linux/irqdomain.h> #include <linux/msi.h> #include <asm/rtas.h> diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c index 69db2eca367f..cbf1720eb4aa 100644 --- a/arch/powerpc/platforms/pseries/nvram.c +++ b/arch/powerpc/platforms/pseries/nvram.c @@ -13,9 +13,9 @@ #include <linux/slab.h> #include <linux/ctype.h> #include <linux/uaccess.h> +#include <linux/of.h> #include <asm/nvram.h> #include <asm/rtas.h> -#include <asm/prom.h> #include <asm/machdep.h> /* Max bytes to read/write in one go */ diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c index 39962c905542..181b855b3050 100644 --- a/arch/powerpc/platforms/pseries/papr_scm.c +++ b/arch/powerpc/platforms/pseries/papr_scm.c @@ -125,8 +125,8 @@ struct papr_scm_priv { /* The bits which needs to be overridden */ u64 health_bitmap_inject_mask; - /* array to have event_code and stat_id mappings */ - char **nvdimm_events_map; + /* array to have event_code and stat_id mappings */ + u8 *nvdimm_events_map; }; static int papr_scm_pmem_flush(struct nd_region *nd_region, @@ -370,7 +370,7 @@ static int papr_scm_pmu_get_value(struct perf_event *event, struct device *dev, stat = &stats->scm_statistic[0]; memcpy(&stat->stat_id, - p->nvdimm_events_map[event->attr.config], + &p->nvdimm_events_map[event->attr.config * sizeof(stat->stat_id)], sizeof(stat->stat_id)); stat->stat_val = 0; @@ -462,14 +462,13 @@ static int papr_scm_pmu_check_events(struct papr_scm_priv *p, struct nvdimm_pmu { struct papr_scm_perf_stat *stat; struct papr_scm_perf_stats *stats; - int index, rc, count; u32 available_events; - - if (!p->stat_buffer_len) - return -ENOENT; + int index, rc = 0; available_events = (p->stat_buffer_len - sizeof(struct papr_scm_perf_stats)) / sizeof(struct papr_scm_perf_stat); + if (available_events == 0) + return -EOPNOTSUPP; /* Allocate the buffer for phyp where stats are written */ stats = kzalloc(p->stat_buffer_len, GFP_KERNEL); @@ -478,35 +477,30 @@ static int papr_scm_pmu_check_events(struct papr_scm_priv *p, struct nvdimm_pmu return rc; } - /* Allocate memory to nvdimm_event_map */ - p->nvdimm_events_map = kcalloc(available_events, sizeof(char *), GFP_KERNEL); - if (!p->nvdimm_events_map) { - rc = -ENOMEM; - goto out_stats; - } - /* Called to get list of events supported */ rc = drc_pmem_query_stats(p, stats, 0); if (rc) - goto out_nvdimm_events_map; - - for (index = 0, stat = stats->scm_statistic, count = 0; - index < available_events; index++, ++stat) { - p->nvdimm_events_map[count] = kmemdup_nul(stat->stat_id, 8, GFP_KERNEL); - if (!p->nvdimm_events_map[count]) { - rc = -ENOMEM; - goto out_nvdimm_events_map; - } + goto out; - count++; + /* + * Allocate memory and populate nvdimm_event_map. + * Allocate an extra element for NULL entry + */ + p->nvdimm_events_map = kcalloc(available_events + 1, + sizeof(stat->stat_id), + GFP_KERNEL); + if (!p->nvdimm_events_map) { + rc = -ENOMEM; + goto out; } - p->nvdimm_events_map[count] = NULL; - kfree(stats); - return 0; -out_nvdimm_events_map: - kfree(p->nvdimm_events_map); -out_stats: + /* Copy all stat_ids to event map */ + for (index = 0, stat = stats->scm_statistic; + index < available_events; index++, ++stat) { + memcpy(&p->nvdimm_events_map[index * sizeof(stat->stat_id)], + &stat->stat_id, sizeof(stat->stat_id)); + } +out: kfree(stats); return rc; } diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c index 3b6800f774c2..6e671c3809ec 100644 --- a/arch/powerpc/platforms/pseries/pci.c +++ b/arch/powerpc/platforms/pseries/pci.c @@ -14,7 +14,6 @@ #include <asm/eeh.h> #include <asm/pci-bridge.h> -#include <asm/prom.h> #include <asm/ppc-pci.h> #include <asm/pci.h> #include "pseries.h" diff --git a/arch/powerpc/platforms/pseries/pmem.c b/arch/powerpc/platforms/pseries/pmem.c index 439ac72c2470..3c290b9ed01b 100644 --- a/arch/powerpc/platforms/pseries/pmem.c +++ b/arch/powerpc/platforms/pseries/pmem.c @@ -15,7 +15,6 @@ #include <linux/of.h> #include <linux/of_platform.h> #include <linux/slab.h> -#include <asm/prom.h> #include <asm/rtas.h> #include <asm/firmware.h> #include <asm/machdep.h> diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h index af162aeeae86..f5c916c839c9 100644 --- a/arch/powerpc/platforms/pseries/pseries.h +++ b/arch/powerpc/platforms/pseries/pseries.h @@ -38,6 +38,7 @@ static inline void smp_init_pseries(void) { } #endif extern void pseries_kexec_cpu_down(int crash_shutdown, int secondary); +void pseries_machine_kexec(struct kimage *image); extern void pSeries_final_fixup(void); diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c index 7f7369fec46b..cad7a0c93117 100644 --- a/arch/powerpc/platforms/pseries/reconfig.c +++ b/arch/powerpc/platforms/pseries/reconfig.c @@ -13,7 +13,6 @@ #include <linux/slab.h> #include <linux/of.h> -#include <asm/prom.h> #include <asm/machdep.h> #include <linux/uaccess.h> #include <asm/mmu.h> diff --git a/arch/powerpc/platforms/pseries/rtas-fadump.c b/arch/powerpc/platforms/pseries/rtas-fadump.c index 35f9cb602c30..b5853e9fcc3c 100644 --- a/arch/powerpc/platforms/pseries/rtas-fadump.c +++ b/arch/powerpc/platforms/pseries/rtas-fadump.c @@ -13,9 +13,10 @@ #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/crash_dump.h> +#include <linux/of.h> +#include <linux/of_fdt.h> #include <asm/page.h> -#include <asm/prom.h> #include <asm/rtas.h> #include <asm/fadump.h> #include <asm/fadump-internal.h> @@ -108,6 +109,12 @@ static u64 rtas_fadump_init_mem_struct(struct fw_dump *fadump_conf) fdm.hpte_region.destination_address = cpu_to_be64(addr); addr += fadump_conf->hpte_region_size; + /* + * Align boot memory area destination address to page boundary to + * be able to mmap read this area in the vmcore. + */ + addr = PAGE_ALIGN(addr); + /* RMA region section */ fdm.rmr_region.request_flag = cpu_to_be32(RTAS_FADUMP_REQUEST_FLAG); fdm.rmr_region.source_data_type = @@ -351,7 +358,7 @@ static int __init rtas_fadump_build_cpu_notes(struct fw_dump *fadump_conf) /* Lower 4 bytes of reg_value contains logical cpu id */ cpu = (be64_to_cpu(reg_entry->reg_value) & RTAS_FADUMP_CPU_ID_MASK); - if (fdh && !cpumask_test_cpu(cpu, &fdh->online_mask)) { + if (fdh && !cpumask_test_cpu(cpu, &fdh->cpu_mask)) { RTAS_FADUMP_SKIP_TO_NEXT_CPU(reg_entry); continue; } @@ -462,10 +469,10 @@ static void rtas_fadump_region_show(struct fw_dump *fadump_conf, be64_to_cpu(fdm_ptr->rmr_region.source_len), be64_to_cpu(fdm_ptr->rmr_region.bytes_dumped)); - /* Dump is active. Show reserved area start address. */ + /* Dump is active. Show preserved area start address. */ if (fdm_active) { - seq_printf(m, "\nMemory above %#016lx is reserved for saving crash dump\n", - fadump_conf->reserve_dump_area_start); + seq_printf(m, "\nMemory above %#016llx is reserved for saving crash dump\n", + fadump_conf->boot_mem_top); } } diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 0f74b2284773..afb074269b42 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -36,6 +36,7 @@ #include <linux/seq_file.h> #include <linux/root_dev.h> #include <linux/of.h> +#include <linux/of_irq.h> #include <linux/of_pci.h> #include <linux/memblock.h> #include <linux/swiotlb.h> @@ -43,7 +44,6 @@ #include <asm/mmu.h> #include <asm/processor.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/rtas.h> #include <asm/pci-bridge.h> #include <asm/iommu.h> @@ -421,16 +421,6 @@ void pseries_disable_reloc_on_exc(void) } EXPORT_SYMBOL(pseries_disable_reloc_on_exc); -#ifdef CONFIG_KEXEC_CORE -static void pSeries_machine_kexec(struct kimage *image) -{ - if (firmware_has_feature(FW_FEATURE_SET_MODE)) - pseries_disable_reloc_on_exc(); - - default_machine_kexec(image); -} -#endif - #ifdef __LITTLE_ENDIAN__ void pseries_big_endian_exceptions(void) { @@ -658,7 +648,7 @@ static resource_size_t pseries_get_iov_fw_value(struct pci_dev *dev, int resno, */ num_res = of_read_number(&indexes[NUM_RES_PROPERTY], 1); if (resno >= num_res) - return 0; /* or an errror */ + return 0; /* or an error */ i = START_OF_ENTRIES + NEXT_ENTRY * resno; switch (value) { @@ -762,7 +752,7 @@ static void pseries_pci_fixup_iov_resources(struct pci_dev *pdev) if (!pdev->is_physfn) return; - /*Firmware must support open sriov otherwise dont configure*/ + /*Firmware must support open sriov otherwise don't configure*/ indexes = of_get_property(dn, "ibm,open-sriov-vf-bar-info", NULL); if (indexes) of_pci_parse_iov_addrs(pdev, indexes); @@ -1096,7 +1086,7 @@ define_machine(pseries) { .machine_check_exception = pSeries_machine_check_exception, .machine_check_log_err = pSeries_machine_check_log_err, #ifdef CONFIG_KEXEC_CORE - .machine_kexec = pSeries_machine_kexec, + .machine_kexec = pseries_machine_kexec, .kexec_cpu_down = pseries_kexec_cpu_down, #endif #ifdef CONFIG_MEMORY_HOTPLUG diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index f47429323eee..fd2174edfa1d 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c @@ -27,7 +27,6 @@ #include <asm/irq.h> #include <asm/page.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/smp.h> #include <asm/paca.h> #include <asm/machdep.h> diff --git a/arch/powerpc/platforms/pseries/vas-sysfs.c b/arch/powerpc/platforms/pseries/vas-sysfs.c index ec65586cbeb3..f9f682724e77 100644 --- a/arch/powerpc/platforms/pseries/vas-sysfs.c +++ b/arch/powerpc/platforms/pseries/vas-sysfs.c @@ -74,26 +74,26 @@ struct vas_sysfs_entry { /* * Create sysfs interface: - * /sys/devices/vas/vas0/gzip/default_capabilities + * /sys/devices/virtual/misc/vas/vas0/gzip/default_capabilities * This directory contains the following VAS GZIP capabilities - * for the defaule credit type. - * /sys/devices/vas/vas0/gzip/default_capabilities/nr_total_credits + * for the default credit type. + * /sys/devices/virtual/misc/vas/vas0/gzip/default_capabilities/nr_total_credits * Total number of default credits assigned to the LPAR which * can be changed with DLPAR operation. - * /sys/devices/vas/vas0/gzip/default_capabilities/nr_used_credits + * /sys/devices/virtual/misc/vas/vas0/gzip/default_capabilities/nr_used_credits * Number of credits used by the user space. One credit will * be assigned for each window open. * - * /sys/devices/vas/vas0/gzip/qos_capabilities + * /sys/devices/virtual/misc/vas/vas0/gzip/qos_capabilities * This directory contains the following VAS GZIP capabilities * for the Quality of Service (QoS) credit type. - * /sys/devices/vas/vas0/gzip/qos_capabilities/nr_total_credits + * /sys/devices/virtual/misc/vas/vas0/gzip/qos_capabilities/nr_total_credits * Total number of QoS credits assigned to the LPAR. The user * has to define this value using HMC interface. It can be * changed dynamically by the user. - * /sys/devices/vas/vas0/gzip/qos_capabilities/nr_used_credits + * /sys/devices/virtual/misc/vas/vas0/gzip/qos_capabilities/nr_used_credits * Number of credits used by the user space. - * /sys/devices/vas/vas0/gzip/qos_capabilities/update_total_credits + * /sys/devices/virtual/misc/vas/vas0/gzip/qos_capabilities/update_total_credits * Update total QoS credits dynamically */ @@ -248,6 +248,7 @@ int __init sysfs_pseries_vas_init(struct vas_all_caps *vas_caps) pseries_vas_kobj = kobject_create_and_add("vas0", &vas_miscdev.this_device->kobj); if (!pseries_vas_kobj) { + misc_deregister(&vas_miscdev); pr_err("Failed to create VAS sysfs entry\n"); return -ENOMEM; } @@ -259,6 +260,7 @@ int __init sysfs_pseries_vas_init(struct vas_all_caps *vas_caps) if (!gzip_caps_kobj) { pr_err("Failed to create VAS GZIP capability entry\n"); kobject_put(pseries_vas_kobj); + misc_deregister(&vas_miscdev); return -ENOMEM; } } diff --git a/arch/powerpc/platforms/pseries/vas.c b/arch/powerpc/platforms/pseries/vas.c index ec643bbdb67f..500a1fc4a1d7 100644 --- a/arch/powerpc/platforms/pseries/vas.c +++ b/arch/powerpc/platforms/pseries/vas.c @@ -801,7 +801,7 @@ int vas_reconfig_capabilties(u8 type, int new_nr_creds) atomic_set(&caps->nr_total_credits, new_nr_creds); /* * The total number of available credits may be decreased or - * inceased with DLPAR operation. Means some windows have to be + * increased with DLPAR operation. Means some windows have to be * closed / reopened. Hold the vas_pseries_mutex so that the * the user space can not open new windows. */ diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c index c9f9be4ea26a..00ecac2c205b 100644 --- a/arch/powerpc/platforms/pseries/vio.c +++ b/arch/powerpc/platforms/pseries/vio.c @@ -23,6 +23,7 @@ #include <linux/dma-map-ops.h> #include <linux/kobject.h> #include <linux/kexec.h> +#include <linux/of_irq.h> #include <asm/iommu.h> #include <asm/dma.h> diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile index 026b3f01a991..9cb1d029511a 100644 --- a/arch/powerpc/sysdev/Makefile +++ b/arch/powerpc/sysdev/Makefile @@ -23,7 +23,6 @@ obj-$(CONFIG_FSL_PMC) += fsl_pmc.o obj-$(CONFIG_FSL_CORENET_RCPM) += fsl_rcpm.o obj-$(CONFIG_FSL_LBC) += fsl_lbc.o obj-$(CONFIG_FSL_GTM) += fsl_gtm.o -obj-$(CONFIG_FSL_85XX_CACHE_SRAM) += fsl_85xx_l2ctlr.o fsl_85xx_cache_sram.o obj-$(CONFIG_FSL_RIO) += fsl_rio.o fsl_rmu.o obj-$(CONFIG_TSI108_BRIDGE) += tsi108_pci.o tsi108_dev.o obj-$(CONFIG_RTC_DRV_CMOS) += rtc_cmos_setup.o diff --git a/arch/powerpc/sysdev/cpm2_pic.c b/arch/powerpc/sysdev/cpm2_pic.c index 9e86074719a9..cb9ba4ef557a 100644 --- a/arch/powerpc/sysdev/cpm2_pic.c +++ b/arch/powerpc/sysdev/cpm2_pic.c @@ -30,11 +30,11 @@ #include <linux/sched.h> #include <linux/signal.h> #include <linux/irq.h> +#include <linux/irqdomain.h> #include <asm/immap_cpm2.h> #include <asm/mpc8260.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/fs_pd.h> #include "cpm2_pic.h" diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c index be6b99b1b352..98096bbfd62e 100644 --- a/arch/powerpc/sysdev/dart_iommu.c +++ b/arch/powerpc/sysdev/dart_iommu.c @@ -25,8 +25,8 @@ #include <linux/memblock.h> #include <linux/gfp.h> #include <linux/kmemleak.h> +#include <linux/of_address.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/iommu.h> #include <asm/pci-bridge.h> #include <asm/machdep.h> @@ -404,9 +404,10 @@ void __init iommu_init_early_dart(struct pci_controller_ops *controller_ops) } /* Initialize the DART HW */ - if (dart_init(dn) != 0) + if (dart_init(dn) != 0) { + of_node_put(dn); return; - + } /* * U4 supports a DART bypass, we use it for 64-bit capable devices to * improve performance. However, that only works for devices connected @@ -419,6 +420,7 @@ void __init iommu_init_early_dart(struct pci_controller_ops *controller_ops) /* Setup pci_dma ops */ set_pci_dma_ops(&dma_iommu_ops); + of_node_put(dn); } #ifdef CONFIG_PM diff --git a/arch/powerpc/sysdev/dcr.c b/arch/powerpc/sysdev/dcr.c index 22991e1128e3..3093f14111e6 100644 --- a/arch/powerpc/sysdev/dcr.c +++ b/arch/powerpc/sysdev/dcr.c @@ -8,7 +8,7 @@ #include <linux/kernel.h> #include <linux/export.h> -#include <asm/prom.h> +#include <linux/of_address.h> #include <asm/dcr.h> #ifdef CONFIG_PPC_DCR_MMIO diff --git a/arch/powerpc/sysdev/fsl_85xx_cache_ctlr.h b/arch/powerpc/sysdev/fsl_85xx_cache_ctlr.h deleted file mode 100644 index ce370749add9..000000000000 --- a/arch/powerpc/sysdev/fsl_85xx_cache_ctlr.h +++ /dev/null @@ -1,88 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Copyright 2009-2010, 2012 Freescale Semiconductor, Inc - * - * QorIQ based Cache Controller Memory Mapped Registers - * - * Author: Vivek Mahajan <vivek.mahajan@freescale.com> - */ - -#ifndef __FSL_85XX_CACHE_CTLR_H__ -#define __FSL_85XX_CACHE_CTLR_H__ - -#define L2CR_L2FI 0x40000000 /* L2 flash invalidate */ -#define L2CR_L2IO 0x00200000 /* L2 instruction only */ -#define L2CR_SRAM_ZERO 0x00000000 /* L2SRAM zero size */ -#define L2CR_SRAM_FULL 0x00010000 /* L2SRAM full size */ -#define L2CR_SRAM_HALF 0x00020000 /* L2SRAM half size */ -#define L2CR_SRAM_TWO_HALFS 0x00030000 /* L2SRAM two half sizes */ -#define L2CR_SRAM_QUART 0x00040000 /* L2SRAM one quarter size */ -#define L2CR_SRAM_TWO_QUARTS 0x00050000 /* L2SRAM two quarter size */ -#define L2CR_SRAM_EIGHTH 0x00060000 /* L2SRAM one eighth size */ -#define L2CR_SRAM_TWO_EIGHTH 0x00070000 /* L2SRAM two eighth size */ - -#define L2SRAM_OPTIMAL_SZ_SHIFT 0x00000003 /* Optimum size for L2SRAM */ - -#define L2SRAM_BAR_MSK_LO18 0xFFFFC000 /* Lower 18 bits */ -#define L2SRAM_BARE_MSK_HI4 0x0000000F /* Upper 4 bits */ - -enum cache_sram_lock_ways { - LOCK_WAYS_ZERO, - LOCK_WAYS_EIGHTH, - LOCK_WAYS_TWO_EIGHTH, - LOCK_WAYS_HALF = 4, - LOCK_WAYS_FULL = 8, -}; - -struct mpc85xx_l2ctlr { - u32 ctl; /* 0x000 - L2 control */ - u8 res1[0xC]; - u32 ewar0; /* 0x010 - External write address 0 */ - u32 ewarea0; /* 0x014 - External write address extended 0 */ - u32 ewcr0; /* 0x018 - External write ctrl */ - u8 res2[4]; - u32 ewar1; /* 0x020 - External write address 1 */ - u32 ewarea1; /* 0x024 - External write address extended 1 */ - u32 ewcr1; /* 0x028 - External write ctrl 1 */ - u8 res3[4]; - u32 ewar2; /* 0x030 - External write address 2 */ - u32 ewarea2; /* 0x034 - External write address extended 2 */ - u32 ewcr2; /* 0x038 - External write ctrl 2 */ - u8 res4[4]; - u32 ewar3; /* 0x040 - External write address 3 */ - u32 ewarea3; /* 0x044 - External write address extended 3 */ - u32 ewcr3; /* 0x048 - External write ctrl 3 */ - u8 res5[0xB4]; - u32 srbar0; /* 0x100 - SRAM base address 0 */ - u32 srbarea0; /* 0x104 - SRAM base addr reg ext address 0 */ - u32 srbar1; /* 0x108 - SRAM base address 1 */ - u32 srbarea1; /* 0x10C - SRAM base addr reg ext address 1 */ - u8 res6[0xCF0]; - u32 errinjhi; /* 0xE00 - Error injection mask high */ - u32 errinjlo; /* 0xE04 - Error injection mask low */ - u32 errinjctl; /* 0xE08 - Error injection tag/ecc control */ - u8 res7[0x14]; - u32 captdatahi; /* 0xE20 - Error data high capture */ - u32 captdatalo; /* 0xE24 - Error data low capture */ - u32 captecc; /* 0xE28 - Error syndrome */ - u8 res8[0x14]; - u32 errdet; /* 0xE40 - Error detect */ - u32 errdis; /* 0xE44 - Error disable */ - u32 errinten; /* 0xE48 - Error interrupt enable */ - u32 errattr; /* 0xE4c - Error attribute capture */ - u32 erradrrl; /* 0xE50 - Error address capture low */ - u32 erradrrh; /* 0xE54 - Error address capture high */ - u32 errctl; /* 0xE58 - Error control */ - u8 res9[0x1A4]; -}; - -struct sram_parameters { - unsigned int sram_size; - phys_addr_t sram_offset; -}; - -extern int instantiate_cache_sram(struct platform_device *dev, - struct sram_parameters sram_params); -extern void remove_cache_sram(struct platform_device *dev); - -#endif /* __FSL_85XX_CACHE_CTLR_H__ */ diff --git a/arch/powerpc/sysdev/fsl_85xx_cache_sram.c b/arch/powerpc/sysdev/fsl_85xx_cache_sram.c deleted file mode 100644 index a3aeaa5f0f1b..000000000000 --- a/arch/powerpc/sysdev/fsl_85xx_cache_sram.c +++ /dev/null @@ -1,147 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright 2009-2010 Freescale Semiconductor, Inc. - * - * Simple memory allocator abstraction for QorIQ (P1/P2) based Cache-SRAM - * - * Author: Vivek Mahajan <vivek.mahajan@freescale.com> - * - * This file is derived from the original work done - * by Sylvain Munaut for the Bestcomm SRAM allocator. - */ - -#include <linux/kernel.h> -#include <linux/export.h> -#include <linux/slab.h> -#include <linux/err.h> -#include <linux/of_platform.h> -#include <linux/pgtable.h> -#include <asm/fsl_85xx_cache_sram.h> - -#include "fsl_85xx_cache_ctlr.h" - -struct mpc85xx_cache_sram *cache_sram; - -void *mpc85xx_cache_sram_alloc(unsigned int size, - phys_addr_t *phys, unsigned int align) -{ - unsigned long offset; - unsigned long flags; - - if (unlikely(cache_sram == NULL)) - return NULL; - - if (!size || (size > cache_sram->size) || (align > cache_sram->size)) { - pr_err("%s(): size(=%x) or align(=%x) zero or too big\n", - __func__, size, align); - return NULL; - } - - if ((align & (align - 1)) || align <= 1) { - pr_err("%s(): align(=%x) must be power of two and >1\n", - __func__, align); - return NULL; - } - - spin_lock_irqsave(&cache_sram->lock, flags); - offset = rh_alloc_align(cache_sram->rh, size, align, NULL); - spin_unlock_irqrestore(&cache_sram->lock, flags); - - if (IS_ERR_VALUE(offset)) - return NULL; - - *phys = cache_sram->base_phys + offset; - - return (unsigned char *)cache_sram->base_virt + offset; -} -EXPORT_SYMBOL(mpc85xx_cache_sram_alloc); - -void mpc85xx_cache_sram_free(void *ptr) -{ - unsigned long flags; - BUG_ON(!ptr); - - spin_lock_irqsave(&cache_sram->lock, flags); - rh_free(cache_sram->rh, ptr - cache_sram->base_virt); - spin_unlock_irqrestore(&cache_sram->lock, flags); -} -EXPORT_SYMBOL(mpc85xx_cache_sram_free); - -int __init instantiate_cache_sram(struct platform_device *dev, - struct sram_parameters sram_params) -{ - int ret = 0; - - if (cache_sram) { - dev_err(&dev->dev, "Already initialized cache-sram\n"); - return -EBUSY; - } - - cache_sram = kzalloc(sizeof(struct mpc85xx_cache_sram), GFP_KERNEL); - if (!cache_sram) { - dev_err(&dev->dev, "Out of memory for cache_sram structure\n"); - return -ENOMEM; - } - - cache_sram->base_phys = sram_params.sram_offset; - cache_sram->size = sram_params.sram_size; - - if (!request_mem_region(cache_sram->base_phys, cache_sram->size, - "fsl_85xx_cache_sram")) { - dev_err(&dev->dev, "%pOF: request memory failed\n", - dev->dev.of_node); - ret = -ENXIO; - goto out_free; - } - - cache_sram->base_virt = ioremap_coherent(cache_sram->base_phys, - cache_sram->size); - if (!cache_sram->base_virt) { - dev_err(&dev->dev, "%pOF: ioremap_coherent failed\n", - dev->dev.of_node); - ret = -ENOMEM; - goto out_release; - } - - cache_sram->rh = rh_create(sizeof(unsigned int)); - if (IS_ERR(cache_sram->rh)) { - dev_err(&dev->dev, "%pOF: Unable to create remote heap\n", - dev->dev.of_node); - ret = PTR_ERR(cache_sram->rh); - goto out_unmap; - } - - rh_attach_region(cache_sram->rh, 0, cache_sram->size); - spin_lock_init(&cache_sram->lock); - - dev_info(&dev->dev, "[base:0x%llx, size:0x%x] configured and loaded\n", - (unsigned long long)cache_sram->base_phys, cache_sram->size); - - return 0; - -out_unmap: - iounmap(cache_sram->base_virt); - -out_release: - release_mem_region(cache_sram->base_phys, cache_sram->size); - -out_free: - kfree(cache_sram); - return ret; -} - -void remove_cache_sram(struct platform_device *dev) -{ - BUG_ON(!cache_sram); - - rh_detach_region(cache_sram->rh, 0, cache_sram->size); - rh_destroy(cache_sram->rh); - - iounmap(cache_sram->base_virt); - release_mem_region(cache_sram->base_phys, cache_sram->size); - - kfree(cache_sram); - cache_sram = NULL; - - dev_info(&dev->dev, "MPC85xx Cache-SRAM driver unloaded\n"); -} diff --git a/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c b/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c deleted file mode 100644 index 2d0af0c517bb..000000000000 --- a/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c +++ /dev/null @@ -1,216 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright 2009-2010, 2012 Freescale Semiconductor, Inc. - * - * QorIQ (P1/P2) L2 controller init for Cache-SRAM instantiation - * - * Author: Vivek Mahajan <vivek.mahajan@freescale.com> - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/of_platform.h> -#include <asm/io.h> - -#include "fsl_85xx_cache_ctlr.h" - -static char *sram_size; -static char *sram_offset; -struct mpc85xx_l2ctlr __iomem *l2ctlr; - -static int get_cache_sram_params(struct sram_parameters *sram_params) -{ - unsigned long long addr; - unsigned int size; - - if (!sram_size || (kstrtouint(sram_size, 0, &size) < 0)) - return -EINVAL; - - if (!sram_offset || (kstrtoull(sram_offset, 0, &addr) < 0)) - return -EINVAL; - - sram_params->sram_offset = addr; - sram_params->sram_size = size; - - return 0; -} - -static int __init get_size_from_cmdline(char *str) -{ - if (!str) - return 0; - - sram_size = str; - return 1; -} - -static int __init get_offset_from_cmdline(char *str) -{ - if (!str) - return 0; - - sram_offset = str; - return 1; -} - -__setup("cache-sram-size=", get_size_from_cmdline); -__setup("cache-sram-offset=", get_offset_from_cmdline); - -static int mpc85xx_l2ctlr_of_probe(struct platform_device *dev) -{ - long rval; - unsigned int rem; - unsigned char ways; - const unsigned int *prop; - unsigned int l2cache_size; - struct sram_parameters sram_params; - - if (!dev->dev.of_node) { - dev_err(&dev->dev, "Device's OF-node is NULL\n"); - return -EINVAL; - } - - prop = of_get_property(dev->dev.of_node, "cache-size", NULL); - if (!prop) { - dev_err(&dev->dev, "Missing L2 cache-size\n"); - return -EINVAL; - } - l2cache_size = *prop; - - if (get_cache_sram_params(&sram_params)) - return 0; /* fall back to L2 cache only */ - - rem = l2cache_size % sram_params.sram_size; - ways = LOCK_WAYS_FULL * sram_params.sram_size / l2cache_size; - if (rem || (ways & (ways - 1))) { - dev_err(&dev->dev, "Illegal cache-sram-size in command line\n"); - return -EINVAL; - } - - l2ctlr = of_iomap(dev->dev.of_node, 0); - if (!l2ctlr) { - dev_err(&dev->dev, "Can't map L2 controller\n"); - return -EINVAL; - } - - /* - * Write bits[0-17] to srbar0 - */ - out_be32(&l2ctlr->srbar0, - lower_32_bits(sram_params.sram_offset) & L2SRAM_BAR_MSK_LO18); - - /* - * Write bits[18-21] to srbare0 - */ -#ifdef CONFIG_PHYS_64BIT - out_be32(&l2ctlr->srbarea0, - upper_32_bits(sram_params.sram_offset) & L2SRAM_BARE_MSK_HI4); -#endif - - clrsetbits_be32(&l2ctlr->ctl, L2CR_L2E, L2CR_L2FI); - - switch (ways) { - case LOCK_WAYS_EIGHTH: - setbits32(&l2ctlr->ctl, - L2CR_L2E | L2CR_L2FI | L2CR_SRAM_EIGHTH); - break; - - case LOCK_WAYS_TWO_EIGHTH: - setbits32(&l2ctlr->ctl, - L2CR_L2E | L2CR_L2FI | L2CR_SRAM_QUART); - break; - - case LOCK_WAYS_HALF: - setbits32(&l2ctlr->ctl, - L2CR_L2E | L2CR_L2FI | L2CR_SRAM_HALF); - break; - - case LOCK_WAYS_FULL: - default: - setbits32(&l2ctlr->ctl, - L2CR_L2E | L2CR_L2FI | L2CR_SRAM_FULL); - break; - } - eieio(); - - rval = instantiate_cache_sram(dev, sram_params); - if (rval < 0) { - dev_err(&dev->dev, "Can't instantiate Cache-SRAM\n"); - iounmap(l2ctlr); - return -EINVAL; - } - - return 0; -} - -static int mpc85xx_l2ctlr_of_remove(struct platform_device *dev) -{ - BUG_ON(!l2ctlr); - - iounmap(l2ctlr); - remove_cache_sram(dev); - dev_info(&dev->dev, "MPC85xx L2 controller unloaded\n"); - - return 0; -} - -static const struct of_device_id mpc85xx_l2ctlr_of_match[] = { - { - .compatible = "fsl,p2020-l2-cache-controller", - }, - { - .compatible = "fsl,p2010-l2-cache-controller", - }, - { - .compatible = "fsl,p1020-l2-cache-controller", - }, - { - .compatible = "fsl,p1011-l2-cache-controller", - }, - { - .compatible = "fsl,p1013-l2-cache-controller", - }, - { - .compatible = "fsl,p1022-l2-cache-controller", - }, - { - .compatible = "fsl,mpc8548-l2-cache-controller", - }, - { .compatible = "fsl,mpc8544-l2-cache-controller",}, - { .compatible = "fsl,mpc8572-l2-cache-controller",}, - { .compatible = "fsl,mpc8536-l2-cache-controller",}, - { .compatible = "fsl,p1021-l2-cache-controller",}, - { .compatible = "fsl,p1012-l2-cache-controller",}, - { .compatible = "fsl,p1025-l2-cache-controller",}, - { .compatible = "fsl,p1016-l2-cache-controller",}, - { .compatible = "fsl,p1024-l2-cache-controller",}, - { .compatible = "fsl,p1015-l2-cache-controller",}, - { .compatible = "fsl,p1010-l2-cache-controller",}, - { .compatible = "fsl,bsc9131-l2-cache-controller",}, - {}, -}; - -static struct platform_driver mpc85xx_l2ctlr_of_platform_driver = { - .driver = { - .name = "fsl-l2ctlr", - .of_match_table = mpc85xx_l2ctlr_of_match, - }, - .probe = mpc85xx_l2ctlr_of_probe, - .remove = mpc85xx_l2ctlr_of_remove, -}; - -static __init int mpc85xx_l2ctlr_of_init(void) -{ - return platform_driver_register(&mpc85xx_l2ctlr_of_platform_driver); -} - -static void __exit mpc85xx_l2ctlr_of_exit(void) -{ - platform_driver_unregister(&mpc85xx_l2ctlr_of_platform_driver); -} - -subsys_initcall(mpc85xx_l2ctlr_of_init); -module_exit(mpc85xx_l2ctlr_of_exit); - -MODULE_DESCRIPTION("Freescale MPC85xx L2 controller init"); -MODULE_LICENSE("GPL v2"); diff --git a/arch/powerpc/sysdev/fsl_lbc.c b/arch/powerpc/sysdev/fsl_lbc.c index 1985e067e952..217cea150987 100644 --- a/arch/powerpc/sysdev/fsl_lbc.c +++ b/arch/powerpc/sysdev/fsl_lbc.c @@ -18,13 +18,14 @@ #include <linux/types.h> #include <linux/io.h> #include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/mod_devicetable.h> #include <linux/syscore_ops.h> -#include <asm/prom.h> #include <asm/fsl_lbc.h> static DEFINE_SPINLOCK(fsl_lbc_lock); @@ -37,7 +38,7 @@ EXPORT_SYMBOL(fsl_lbc_ctrl_dev); * * This function converts a base address of lbc into the right format for the * BR register. If the SOC has eLBC then it returns 32bit physical address - * else it convers a 34bit local bus physical address to correct format of + * else it converts a 34bit local bus physical address to correct format of * 32bit address for BR register (Example: MPC8641). */ u32 fsl_lbc_addr(phys_addr_t addr_base) diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c index b3475ae9f236..ef9a5999fa93 100644 --- a/arch/powerpc/sysdev/fsl_msi.c +++ b/arch/powerpc/sysdev/fsl_msi.c @@ -11,11 +11,13 @@ #include <linux/msi.h> #include <linux/pci.h> #include <linux/slab.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/interrupt.h> +#include <linux/irqdomain.h> #include <linux/seq_file.h> #include <sysdev/fsl_soc.h> -#include <asm/prom.h> #include <asm/hw_irq.h> #include <asm/ppc-pci.h> #include <asm/mpic.h> diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c index a97ce602394e..1011cfea2e32 100644 --- a/arch/powerpc/sysdev/fsl_pci.c +++ b/arch/powerpc/sysdev/fsl_pci.c @@ -22,6 +22,8 @@ #include <linux/interrupt.h> #include <linux/memblock.h> #include <linux/log2.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/suspend.h> @@ -29,7 +31,6 @@ #include <linux/uaccess.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/pci-bridge.h> #include <asm/ppc-pci.h> #include <asm/machdep.h> @@ -218,7 +219,7 @@ static void setup_pci_atmu(struct pci_controller *hose) * windows have implemented the default target value as 0xf * for CCSR space.In all Freescale legacy devices the target * of 0xf is reserved for local memory space. 9132 Rev1.0 - * now has local mempry space mapped to target 0x0 instead of + * now has local memory space mapped to target 0x0 instead of * 0xf. Hence adding a workaround to remove the target 0xf * defined for memory space from Inbound window attributes. */ diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c index ff7906b48ca1..1bfc9afa8a1a 100644 --- a/arch/powerpc/sysdev/fsl_rio.c +++ b/arch/powerpc/sysdev/fsl_rio.c @@ -505,8 +505,10 @@ int fsl_rio_setup(struct platform_device *dev) if (rc) { dev_err(&dev->dev, "Can't get %pOF property 'reg'\n", rmu_node); + of_node_put(rmu_node); goto err_rmu; } + of_node_put(rmu_node); rmu_regs_win = ioremap(rmu_regs.start, resource_size(&rmu_regs)); if (!rmu_regs_win) { dev_err(&dev->dev, "Unable to map rmu register window\n"); diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c index 90ad16161604..78118c188993 100644 --- a/arch/powerpc/sysdev/fsl_soc.c +++ b/arch/powerpc/sysdev/fsl_soc.c @@ -31,7 +31,6 @@ #include <asm/io.h> #include <asm/irq.h> #include <asm/time.h> -#include <asm/prom.h> #include <asm/machdep.h> #include <sysdev/fsl_soc.h> #include <mm/mmu_decl.h> diff --git a/arch/powerpc/sysdev/ge/ge_pic.c b/arch/powerpc/sysdev/ge/ge_pic.c index 02553a8ce191..a6c424680c37 100644 --- a/arch/powerpc/sysdev/ge/ge_pic.c +++ b/arch/powerpc/sysdev/ge/ge_pic.c @@ -14,12 +14,14 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/irq.h> +#include <linux/irqdomain.h> #include <linux/interrupt.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <linux/spinlock.h> #include <asm/byteorder.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/irq.h> #include "ge_pic.h" @@ -150,7 +152,7 @@ static struct irq_chip gef_pic_chip = { }; -/* When an interrupt is being configured, this call allows some flexibilty +/* When an interrupt is being configured, this call allows some flexibility * in deciding which irq_chip structure is used */ static int gef_pic_host_map(struct irq_domain *h, unsigned int virq, diff --git a/arch/powerpc/sysdev/grackle.c b/arch/powerpc/sysdev/grackle.c index aaba0b809032..fd2f94a884f0 100644 --- a/arch/powerpc/sysdev/grackle.c +++ b/arch/powerpc/sysdev/grackle.c @@ -9,9 +9,9 @@ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/init.h> +#include <linux/of.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/pci-bridge.h> #include <asm/grackle.h> diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c index 3b1ae98e3ce9..06e391485da7 100644 --- a/arch/powerpc/sysdev/i8259.c +++ b/arch/powerpc/sysdev/i8259.c @@ -6,11 +6,11 @@ #include <linux/ioport.h> #include <linux/interrupt.h> +#include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/delay.h> #include <asm/io.h> #include <asm/i8259.h> -#include <asm/prom.h> static volatile void __iomem *pci_intack; /* RO, gives us the irq vector */ diff --git a/arch/powerpc/sysdev/indirect_pci.c b/arch/powerpc/sysdev/indirect_pci.c index 09b36617425e..1aacb403a010 100644 --- a/arch/powerpc/sysdev/indirect_pci.c +++ b/arch/powerpc/sysdev/indirect_pci.c @@ -12,7 +12,6 @@ #include <linux/init.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/pci-bridge.h> #include <asm/machdep.h> diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c index 3f10c9fc3b68..5f69e2d50f26 100644 --- a/arch/powerpc/sysdev/ipic.c +++ b/arch/powerpc/sysdev/ipic.c @@ -18,9 +18,10 @@ #include <linux/device.h> #include <linux/spinlock.h> #include <linux/fsl_devices.h> +#include <linux/irqdomain.h> +#include <linux/of_address.h> #include <asm/irq.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/ipic.h> #include "ipic.h" diff --git a/arch/powerpc/sysdev/mmio_nvram.c b/arch/powerpc/sysdev/mmio_nvram.c index 628f9b759c84..eb48210ef98e 100644 --- a/arch/powerpc/sysdev/mmio_nvram.c +++ b/arch/powerpc/sysdev/mmio_nvram.c @@ -10,12 +10,12 @@ #include <linux/fs.h> #include <linux/init.h> #include <linux/kernel.h> +#include <linux/of_address.h> #include <linux/spinlock.h> #include <linux/types.h> #include <asm/machdep.h> #include <asm/nvram.h> -#include <asm/prom.h> static void __iomem *mmio_nvram_start; static long mmio_nvram_len; diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index dbcbaa4c0663..9a9381f102d6 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c @@ -30,6 +30,8 @@ #include <linux/syscore_ops.h> #include <linux/ratelimit.h> #include <linux/pgtable.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <asm/ptrace.h> #include <asm/signal.h> diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c index 36ec0bdd8b63..698fefaaa6dd 100644 --- a/arch/powerpc/sysdev/mpic_msgr.c +++ b/arch/powerpc/sysdev/mpic_msgr.c @@ -7,12 +7,13 @@ */ #include <linux/list.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/export.h> #include <linux/slab.h> -#include <asm/prom.h> #include <asm/hw_irq.h> #include <asm/ppc-pci.h> #include <asm/mpic_msgr.h> @@ -99,7 +100,7 @@ void mpic_msgr_disable(struct mpic_msgr *msgr) EXPORT_SYMBOL_GPL(mpic_msgr_disable); /* The following three functions are used to compute the order and number of - * the message register blocks. They are clearly very inefficent. However, + * the message register blocks. They are clearly very inefficient. However, * they are called *only* a few times during device initialization. */ static unsigned int mpic_msgr_number_of_blocks(void) diff --git a/arch/powerpc/sysdev/mpic_msi.c b/arch/powerpc/sysdev/mpic_msi.c index f412d6ad0b66..34246c8e01c2 100644 --- a/arch/powerpc/sysdev/mpic_msi.c +++ b/arch/powerpc/sysdev/mpic_msi.c @@ -4,10 +4,11 @@ */ #include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/of_irq.h> #include <linux/bitmap.h> #include <linux/msi.h> #include <asm/mpic.h> -#include <asm/prom.h> #include <asm/hw_irq.h> #include <asm/ppc-pci.h> #include <asm/msi_bitmap.h> @@ -37,7 +38,7 @@ static int __init mpic_msi_reserve_u3_hwirqs(struct mpic *mpic) /* Reserve source numbers we know are reserved in the HW. * * This is a bit of a mix of U3 and U4 reserves but that's going - * to work fine, we have plenty enugh numbers left so let's just + * to work fine, we have plenty enough numbers left so let's just * mark anything we don't like reserved. */ for (i = 0; i < 8; i++) diff --git a/arch/powerpc/sysdev/mpic_timer.c b/arch/powerpc/sysdev/mpic_timer.c index 444e9ce42d0a..b2f0a73e8f93 100644 --- a/arch/powerpc/sysdev/mpic_timer.c +++ b/arch/powerpc/sysdev/mpic_timer.c @@ -255,7 +255,7 @@ EXPORT_SYMBOL(mpic_start_timer); /** * mpic_stop_timer - stop hardware timer - * @handle: the timer to be stoped + * @handle: the timer to be stopped * * The timer periodically generates an interrupt. Unless user stops the timer. */ diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c index 3f4841dfefb5..1d8cfdfdf115 100644 --- a/arch/powerpc/sysdev/mpic_u3msi.c +++ b/arch/powerpc/sysdev/mpic_u3msi.c @@ -5,9 +5,9 @@ */ #include <linux/irq.h> +#include <linux/irqdomain.h> #include <linux/msi.h> #include <asm/mpic.h> -#include <asm/prom.h> #include <asm/hw_irq.h> #include <asm/ppc-pci.h> #include <asm/msi_bitmap.h> @@ -78,7 +78,7 @@ static u64 find_u4_magic_addr(struct pci_dev *pdev, unsigned int hwirq) /* U4 PCIe MSIs need to write to the special register in * the bridge that generates interrupts. There should be - * theorically a register at 0xf8005000 where you just write + * theoretically a register at 0xf8005000 where you just write * the MSI number and that triggers the right interrupt, but * unfortunately, this is busted in HW, the bridge endian swaps * the value and hits the wrong nibble in the register. diff --git a/arch/powerpc/sysdev/msi_bitmap.c b/arch/powerpc/sysdev/msi_bitmap.c index fdd3e17150fc..0b6e37f3ffb8 100644 --- a/arch/powerpc/sysdev/msi_bitmap.c +++ b/arch/powerpc/sysdev/msi_bitmap.c @@ -8,6 +8,7 @@ #include <linux/kmemleak.h> #include <linux/bitmap.h> #include <linux/memblock.h> +#include <linux/of.h> #include <asm/msi_bitmap.h> #include <asm/setup.h> diff --git a/arch/powerpc/sysdev/pmi.c b/arch/powerpc/sysdev/pmi.c index 9c8744e09a9c..9dabb50c36eb 100644 --- a/arch/powerpc/sysdev/pmi.c +++ b/arch/powerpc/sysdev/pmi.c @@ -17,12 +17,13 @@ #include <linux/spinlock.h> #include <linux/module.h> #include <linux/workqueue.h> +#include <linux/of_address.h> #include <linux/of_device.h> +#include <linux/of_irq.h> #include <linux/of_platform.h> #include <asm/io.h> #include <asm/pmi.h> -#include <asm/prom.h> struct pmi_data { struct list_head handler; diff --git a/arch/powerpc/sysdev/rtc_cmos_setup.c b/arch/powerpc/sysdev/rtc_cmos_setup.c index af0f9beddca9..47cc87bd6a33 100644 --- a/arch/powerpc/sysdev/rtc_cmos_setup.c +++ b/arch/powerpc/sysdev/rtc_cmos_setup.c @@ -14,8 +14,8 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/mc146818rtc.h> +#include <linux/of_address.h> -#include <asm/prom.h> static int __init add_rtc(void) { diff --git a/arch/powerpc/sysdev/tsi108_dev.c b/arch/powerpc/sysdev/tsi108_dev.c index 9e13fb35ed5c..30051397292f 100644 --- a/arch/powerpc/sysdev/tsi108_dev.c +++ b/arch/powerpc/sysdev/tsi108_dev.c @@ -16,13 +16,14 @@ #include <linux/device.h> #include <linux/etherdevice.h> #include <linux/platform_device.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <linux/of_net.h> #include <asm/tsi108.h> #include <linux/atomic.h> #include <asm/io.h> #include <asm/irq.h> -#include <asm/prom.h> #include <mm/mmu_decl.h> #undef DEBUG diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c index 1070220f15d5..5af4c35ff584 100644 --- a/arch/powerpc/sysdev/tsi108_pci.c +++ b/arch/powerpc/sysdev/tsi108_pci.c @@ -12,7 +12,9 @@ #include <linux/init.h> #include <linux/pci.h> #include <linux/irq.h> +#include <linux/irqdomain.h> #include <linux/interrupt.h> +#include <linux/of_address.h> #include <asm/byteorder.h> #include <asm/io.h> @@ -23,7 +25,6 @@ #include <asm/tsi108.h> #include <asm/tsi108_pci.h> #include <asm/tsi108_irq.h> -#include <asm/prom.h> #undef DEBUG #ifdef DEBUG diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c index 7d13d2ef5a90..edc17b6b1cc2 100644 --- a/arch/powerpc/sysdev/xics/icp-native.c +++ b/arch/powerpc/sysdev/xics/icp-native.c @@ -6,15 +6,16 @@ #include <linux/types.h> #include <linux/kernel.h> #include <linux/irq.h> +#include <linux/irqdomain.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/cpu.h> #include <linux/of.h> +#include <linux/of_address.h> #include <linux/spinlock.h> #include <linux/module.h> -#include <asm/prom.h> #include <asm/io.h> #include <asm/smp.h> #include <asm/irq.h> diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c index bda4c32582d9..4dae624b9f2f 100644 --- a/arch/powerpc/sysdev/xics/icp-opal.c +++ b/arch/powerpc/sysdev/xics/icp-opal.c @@ -196,6 +196,7 @@ int __init icp_opal_init(void) printk("XICS: Using OPAL ICP fallbacks\n"); + of_node_put(np); return 0; } diff --git a/arch/powerpc/sysdev/xics/ics-native.c b/arch/powerpc/sysdev/xics/ics-native.c index dec7d93a8ba1..112c8a1e8159 100644 --- a/arch/powerpc/sysdev/xics/ics-native.c +++ b/arch/powerpc/sysdev/xics/ics-native.c @@ -15,11 +15,11 @@ #include <linux/init.h> #include <linux/cpu.h> #include <linux/of.h> +#include <linux/of_address.h> #include <linux/spinlock.h> #include <linux/msi.h> #include <linux/list.h> -#include <asm/prom.h> #include <asm/smp.h> #include <asm/machdep.h> #include <asm/irq.h> diff --git a/arch/powerpc/sysdev/xics/ics-opal.c b/arch/powerpc/sysdev/xics/ics-opal.c index c4d95d8beb6f..6cfbb4fac7fb 100644 --- a/arch/powerpc/sysdev/xics/ics-opal.c +++ b/arch/powerpc/sysdev/xics/ics-opal.c @@ -18,7 +18,6 @@ #include <linux/spinlock.h> #include <linux/msi.h> -#include <asm/prom.h> #include <asm/smp.h> #include <asm/machdep.h> #include <asm/irq.h> diff --git a/arch/powerpc/sysdev/xics/ics-rtas.c b/arch/powerpc/sysdev/xics/ics-rtas.c index b9da317b7a2d..9e7007f9aca5 100644 --- a/arch/powerpc/sysdev/xics/ics-rtas.c +++ b/arch/powerpc/sysdev/xics/ics-rtas.c @@ -10,7 +10,6 @@ #include <linux/spinlock.h> #include <linux/msi.h> -#include <asm/prom.h> #include <asm/smp.h> #include <asm/machdep.h> #include <asm/irq.h> diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c index f3fb2a12124c..d3a4156e8788 100644 --- a/arch/powerpc/sysdev/xics/xics-common.c +++ b/arch/powerpc/sysdev/xics/xics-common.c @@ -6,6 +6,7 @@ #include <linux/threads.h> #include <linux/kernel.h> #include <linux/irq.h> +#include <linux/irqdomain.h> #include <linux/debugfs.h> #include <linux/smp.h> #include <linux/interrupt.h> @@ -17,7 +18,6 @@ #include <linux/spinlock.h> #include <linux/delay.h> -#include <asm/prom.h> #include <asm/io.h> #include <asm/smp.h> #include <asm/machdep.h> @@ -146,7 +146,7 @@ void __init xics_smp_probe(void) #endif /* CONFIG_SMP */ -void xics_teardown_cpu(void) +noinstr void xics_teardown_cpu(void) { struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr); @@ -159,7 +159,7 @@ void xics_teardown_cpu(void) icp_ops->teardown_cpu(); } -void xics_kexec_teardown_cpu(int secondary) +noinstr void xics_kexec_teardown_cpu(int secondary) { xics_teardown_cpu(); diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c index bb5bda6b2357..61b9f98dfd4a 100644 --- a/arch/powerpc/sysdev/xive/common.c +++ b/arch/powerpc/sysdev/xive/common.c @@ -9,6 +9,7 @@ #include <linux/threads.h> #include <linux/kernel.h> #include <linux/irq.h> +#include <linux/irqdomain.h> #include <linux/debugfs.h> #include <linux/smp.h> #include <linux/interrupt.h> @@ -21,7 +22,6 @@ #include <linux/msi.h> #include <linux/vmalloc.h> -#include <asm/prom.h> #include <asm/io.h> #include <asm/smp.h> #include <asm/machdep.h> @@ -1241,7 +1241,7 @@ static int xive_setup_cpu_ipi(unsigned int cpu) return 0; } -static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc) +noinstr static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc) { unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu); @@ -1634,7 +1634,7 @@ void xive_flush_interrupt(void) #endif /* CONFIG_SMP */ -void xive_teardown_cpu(void) +noinstr void xive_teardown_cpu(void) { struct xive_cpu *xc = __this_cpu_read(xive_cpu); unsigned int cpu = smp_processor_id(); diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c index f940428ad13f..d25d8c692909 100644 --- a/arch/powerpc/sysdev/xive/native.c +++ b/arch/powerpc/sysdev/xive/native.c @@ -13,6 +13,7 @@ #include <linux/seq_file.h> #include <linux/init.h> #include <linux/of.h> +#include <linux/of_address.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/delay.h> @@ -21,7 +22,6 @@ #include <linux/kmemleak.h> #include <asm/machdep.h> -#include <asm/prom.h> #include <asm/io.h> #include <asm/smp.h> #include <asm/irq.h> @@ -617,7 +617,7 @@ bool __init xive_native_init(void) xive_tima_os = r.start; - /* Grab size of provisionning pages */ + /* Grab size of provisioning pages */ xive_parse_provisioning(np); /* Switch the XIVE to exploitation mode */ diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c index 29456c255f9f..7d5128676e83 100644 --- a/arch/powerpc/sysdev/xive/spapr.c +++ b/arch/powerpc/sysdev/xive/spapr.c @@ -11,6 +11,8 @@ #include <linux/interrupt.h> #include <linux/init.h> #include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_fdt.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/cpumask.h> @@ -830,12 +832,12 @@ bool __init xive_spapr_init(void) /* Resource 1 is the OS ring TIMA */ if (of_address_to_resource(np, 1, &r)) { pr_err("Failed to get thread mgmnt area resource\n"); - return false; + goto err_put; } tima = ioremap(r.start, resource_size(&r)); if (!tima) { pr_err("Failed to map thread mgmnt area\n"); - return false; + goto err_put; } if (!xive_get_max_prio(&max_prio)) @@ -871,6 +873,7 @@ bool __init xive_spapr_init(void) if (!xive_core_init(np, &xive_spapr_ops, tima, TM_QW1_OS, max_prio)) goto err_mem_free; + of_node_put(np); pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10)); return true; @@ -878,6 +881,8 @@ err_mem_free: xive_irq_bitmap_remove_all(); err_unmap: iounmap(tima); +err_put: + of_node_put(np); return false; } diff --git a/arch/powerpc/xmon/ppc-opc.c b/arch/powerpc/xmon/ppc-opc.c index dfb80810b16c..0774d711453e 100644 --- a/arch/powerpc/xmon/ppc-opc.c +++ b/arch/powerpc/xmon/ppc-opc.c @@ -408,7 +408,7 @@ const struct powerpc_operand powerpc_operands[] = #define FXM4 FXM + 1 { 0xff, 12, insert_fxm, extract_fxm, PPC_OPERAND_OPTIONAL | PPC_OPERAND_OPTIONAL_VALUE}, - /* If the FXM4 operand is ommitted, use the sentinel value -1. */ + /* If the FXM4 operand is omitted, use the sentinel value -1. */ { -1, -1, NULL, NULL, 0}, /* The IMM20 field in an LI instruction. */ diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index fd72753e8ad5..fff81c2300fa 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -31,7 +31,6 @@ #include <asm/ptrace.h> #include <asm/smp.h> #include <asm/string.h> -#include <asm/prom.h> #include <asm/machdep.h> #include <asm/xmon.h> #include <asm/processor.h> @@ -373,7 +372,7 @@ static void write_ciabr(unsigned long ciabr) * set_ciabr() - set the CIABR * @addr: The value to set. * - * This function sets the correct privilege value into the the HW + * This function sets the correct privilege value into the HW * breakpoint address before writing it up in the CIABR register. */ static void set_ciabr(unsigned long addr) @@ -921,9 +920,9 @@ static void insert_bpts(void) bp->enabled = 0; continue; } - if (IS_MTMSRD(instr) || IS_RFID(instr)) { - printf("Breakpoint at %lx is on an mtmsrd or rfid " - "instruction, disabling it\n", bp->address); + if (!can_single_step(ppc_inst_val(instr))) { + printf("Breakpoint at %lx is on an instruction that can't be single stepped, disabling it\n", + bp->address); bp->enabled = 0; continue; } @@ -1470,9 +1469,8 @@ static long check_bp_loc(unsigned long addr) printf("Can't read instruction at address %lx\n", addr); return 0; } - if (IS_MTMSRD(instr) || IS_RFID(instr)) { - printf("Breakpoints may not be placed on mtmsrd or rfid " - "instructions\n"); + if (!can_single_step(ppc_inst_val(instr))) { + printf("Breakpoints may not be placed on instructions that can't be single stepped\n"); return 0; } return 1; @@ -2024,7 +2022,7 @@ static void dump_206_sprs(void) if (!cpu_has_feature(CPU_FTR_ARCH_206)) return; - /* Actually some of these pre-date 2.06, but whatevs */ + /* Actually some of these pre-date 2.06, but whatever */ printf("srr0 = %.16lx srr1 = %.16lx dsisr = %.8lx\n", mfspr(SPRN_SRR0), mfspr(SPRN_SRR1), mfspr(SPRN_DSISR)); |