diff options
Diffstat (limited to 'arch/powerpc')
67 files changed, 462 insertions, 429 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 1f48bbfb3ce9..1f0bd7e223f5 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -116,7 +116,6 @@ config PPC # select ARCH_32BIT_OFF_T if PPC32 select ARCH_HAS_DEBUG_VIRTUAL - select ARCH_HAS_DEBUG_VM_PGTABLE select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_FORTIFY_SOURCE @@ -136,7 +135,7 @@ config PPC select ARCH_HAS_STRICT_KERNEL_RWX if (PPC32 && !HIBERNATION) select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_UACCESS_FLUSHCACHE - select ARCH_HAS_UACCESS_MCSAFE if PPC64 + select ARCH_HAS_COPY_MC if PPC64 select ARCH_HAS_UBSAN_SANITIZE_ALL select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_KEEP_MEMBLOCK @@ -246,6 +245,7 @@ config PPC select OLD_SIGACTION if PPC32 select OLD_SIGSUSPEND select PCI_DOMAINS if PCI + select PCI_MSI_ARCH_FALLBACKS if PCI_MSI select PCI_SYSCALL if PCI select PPC_DAWR if PPC64 select RTC_LIB @@ -860,6 +860,18 @@ config PPC_SUBPAGE_PROT If unsure, say N here. +config PPC_PROT_SAO_LPAR + bool "Support PROT_SAO mappings in LPARs" + depends on PPC_BOOK3S_64 + help + This option adds support for PROT_SAO mappings from userspace + inside LPARs on supported CPUs. + + This may cause issues when performing guest migration from + a CPU that supports SAO to one that does not. + + If unsure, say N here. + config PPC_COPRO_BASE bool @@ -934,23 +946,6 @@ config ARCH_WANTS_FREEZER_CONTROL source "kernel/power/Kconfig" -config SECCOMP - bool "Enable seccomp to safely compute untrusted bytecode" - depends on PROC_FS - default y - help - This kernel feature is useful for number crunching applications - that may need to compute untrusted bytecode during their - execution. By using pipes or other transports made available to - the process as file descriptors supporting the read/write - syscalls, it's possible to isolate those applications in - their own address space using seccomp. Once seccomp is - enabled via /proc/<pid>/seccomp, it cannot be disabled - and the task is only allowed to execute a few safe syscalls - defined by each seccomp mode. - - If unsure, say Y. Only embedded should say N here. - config PPC_MEM_KEYS prompt "PowerPC Memory Protection Keys" def_bool y diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig index af9af03059e4..15ed8d0aa014 100644 --- a/arch/powerpc/configs/pasemi_defconfig +++ b/arch/powerpc/configs/pasemi_defconfig @@ -108,7 +108,6 @@ CONFIG_FB_NVIDIA=y CONFIG_FB_NVIDIA_I2C=y CONFIG_FB_RADEON=y # CONFIG_LCD_CLASS_DEVICE is not set -CONFIG_VGACON_SOFT_SCROLLBACK=y CONFIG_LOGO=y CONFIG_SOUND=y CONFIG_SND=y diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig index 5e6f92ba3210..66e9a0fd64ff 100644 --- a/arch/powerpc/configs/ppc6xx_defconfig +++ b/arch/powerpc/configs/ppc6xx_defconfig @@ -743,7 +743,6 @@ CONFIG_FB_TRIDENT=m CONFIG_FB_SM501=m CONFIG_FB_IBM_GXT4500=y CONFIG_LCD_PLATFORM=m -CONFIG_VGACON_SOFT_SCROLLBACK=y CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y CONFIG_LOGO=y diff --git a/arch/powerpc/crypto/crc-vpmsum_test.c b/arch/powerpc/crypto/crc-vpmsum_test.c index dce86e75f1a8..c1c1ef9457fb 100644 --- a/arch/powerpc/crypto/crc-vpmsum_test.c +++ b/arch/powerpc/crypto/crc-vpmsum_test.c @@ -9,6 +9,7 @@ #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> +#include <linux/random.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/cpufeature.h> @@ -22,10 +23,11 @@ static unsigned long iterations = 10000; static int __init crc_test_init(void) { u16 crc16 = 0, verify16 = 0; - u32 crc32 = 0, verify32 = 0; __le32 verify32le = 0; unsigned char *data; + u32 verify32 = 0; unsigned long i; + __le32 crc32; int ret; struct crypto_shash *crct10dif_tfm; @@ -98,7 +100,7 @@ static int __init crc_test_init(void) crypto_shash_final(crc32c_shash, (u8 *)(&crc32)); verify32 = le32_to_cpu(verify32le); verify32le = ~cpu_to_le32(__crc32c_le(~verify32, data+offset, len)); - if (crc32 != (u32)verify32le) { + if (crc32 != verify32le) { pr_err("FAILURE in CRC32: got 0x%08x expected 0x%08x (len %lu)\n", crc32, verify32, len); break; diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h index 55442d45c597..b392384a3b15 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu.h +++ b/arch/powerpc/include/asm/book3s/64/mmu.h @@ -239,14 +239,14 @@ static inline void early_init_mmu_secondary(void) extern void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base, phys_addr_t first_memblock_size); -extern void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base, - phys_addr_t first_memblock_size); static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base, phys_addr_t first_memblock_size) { - if (early_radix_enabled()) - return radix__setup_initial_memory_limit(first_memblock_base, - first_memblock_size); + /* + * Hash has more strict restrictions. At this point we don't + * know which translations we will pick. Hence go with hash + * restrictions. + */ return hash__setup_initial_memory_limit(first_memblock_base, first_memblock_size); } diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 6de56c3b33c4..495fc0ccb453 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -20,13 +20,9 @@ #define _PAGE_RW (_PAGE_READ | _PAGE_WRITE) #define _PAGE_RWX (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC) #define _PAGE_PRIVILEGED 0x00008 /* kernel access only */ - -#define _PAGE_CACHE_CTL 0x00030 /* Bits for the folowing cache modes */ - /* No bits set is normal cacheable memory */ - /* 0x00010 unused, is SAO bit on radix POWER9 */ +#define _PAGE_SAO 0x00010 /* Strong access order */ #define _PAGE_NON_IDEMPOTENT 0x00020 /* non idempotent memory */ #define _PAGE_TOLERANT 0x00030 /* tolerant memory, cache inhibited */ - #define _PAGE_DIRTY 0x00080 /* C: page changed */ #define _PAGE_ACCESSED 0x00100 /* R: page referenced */ /* @@ -828,6 +824,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, return hash__set_pte_at(mm, addr, ptep, pte, percpu); } +#define _PAGE_CACHE_CTL (_PAGE_SAO | _PAGE_NON_IDEMPOTENT | _PAGE_TOLERANT) + #define pgprot_noncached pgprot_noncached static inline pgprot_t pgprot_noncached(pgprot_t prot) { diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h index 9cce06194dcc..82f099ba2411 100644 --- a/arch/powerpc/include/asm/checksum.h +++ b/arch/powerpc/include/asm/checksum.h @@ -18,19 +18,18 @@ * Like csum_partial, this must be called with even lengths, * except for the last fragment. */ -extern __wsum csum_partial_copy_generic(const void *src, void *dst, - int len, __wsum sum, - int *src_err, int *dst_err); +extern __wsum csum_partial_copy_generic(const void *src, void *dst, int len); #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER extern __wsum csum_and_copy_from_user(const void __user *src, void *dst, - int len, __wsum sum, int *err_ptr); + int len); #define HAVE_CSUM_COPY_USER extern __wsum csum_and_copy_to_user(const void *src, void __user *dst, - int len, __wsum sum, int *err_ptr); + int len); -#define csum_partial_copy_nocheck(src, dst, len, sum) \ - csum_partial_copy_generic((src), (dst), (len), (sum), NULL, NULL) +#define _HAVE_ARCH_CSUM_AND_COPY +#define csum_partial_copy_nocheck(src, dst, len) \ + csum_partial_copy_generic((src), (dst), (len)) /* diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h index 3e3cdfaa76c6..9191fc29e6ed 100644 --- a/arch/powerpc/include/asm/compat.h +++ b/arch/powerpc/include/asm/compat.h @@ -27,8 +27,6 @@ typedef s16 compat_nlink_t; typedef u16 compat_ipc_pid_t; typedef u32 compat_caddr_t; typedef __kernel_fsid_t compat_fsid_t; -typedef s64 compat_s64; -typedef u64 compat_u64; struct compat_stat { compat_dev_t st_dev; diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index fdddb822d564..32a15dc49e8c 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -9,6 +9,11 @@ #ifndef __ASSEMBLY__ +/* + * Added to include __machine_check_early_realmode_* functions + */ +#include <asm/mce.h> + /* This structure can grow, it's real size is used by head.S code * via the mkdefs mechanism. */ @@ -191,7 +196,7 @@ static inline void cpu_feature_keys_init(void) { } #define CPU_FTR_SPURR LONG_ASM_CONST(0x0000000001000000) #define CPU_FTR_DSCR LONG_ASM_CONST(0x0000000002000000) #define CPU_FTR_VSX LONG_ASM_CONST(0x0000000004000000) -// Free LONG_ASM_CONST(0x0000000008000000) +#define CPU_FTR_SAO LONG_ASM_CONST(0x0000000008000000) #define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0000000010000000) #define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0000000020000000) #define CPU_FTR_ASYM_SMT LONG_ASM_CONST(0x0000000040000000) @@ -436,7 +441,7 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | \ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ - CPU_FTR_DSCR | CPU_FTR_ASYM_SMT | \ + CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ CPU_FTR_CFAR | CPU_FTR_HVMODE | \ CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR | CPU_FTR_DABRX ) @@ -445,7 +450,7 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | \ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ - CPU_FTR_DSCR | \ + CPU_FTR_DSCR | CPU_FTR_SAO | \ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \ CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \ @@ -456,7 +461,7 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | \ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ - CPU_FTR_DSCR | \ + CPU_FTR_DSCR | CPU_FTR_SAO | \ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \ CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \ @@ -474,7 +479,7 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | \ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ - CPU_FTR_DSCR | \ + CPU_FTR_DSCR | CPU_FTR_SAO | \ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \ CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \ diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h index 925cf89cbf4b..6bfc87915d5d 100644 --- a/arch/powerpc/include/asm/fixmap.h +++ b/arch/powerpc/include/asm/fixmap.h @@ -52,7 +52,7 @@ enum fixed_addresses { FIX_HOLE, /* reserve the top 128K for early debugging purposes */ FIX_EARLY_DEBUG_TOP = FIX_HOLE, - FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+(ALIGN(SZ_128, PAGE_SIZE)/PAGE_SIZE)-1, + FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+(ALIGN(SZ_128K, PAGE_SIZE)/PAGE_SIZE)-1, #ifdef CONFIG_HIGHMEM FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index 3a0db7b0b46e..35060be09073 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -200,17 +200,14 @@ static inline bool arch_irqs_disabled(void) #define powerpc_local_irq_pmu_save(flags) \ do { \ raw_local_irq_pmu_save(flags); \ - trace_hardirqs_off(); \ + if (!raw_irqs_disabled_flags(flags)) \ + trace_hardirqs_off(); \ } while(0) #define powerpc_local_irq_pmu_restore(flags) \ do { \ - if (raw_irqs_disabled_flags(flags)) { \ - raw_local_irq_pmu_restore(flags); \ - trace_hardirqs_off(); \ - } else { \ + if (!raw_irqs_disabled_flags(flags)) \ trace_hardirqs_on(); \ - raw_local_irq_pmu_restore(flags); \ - } \ + raw_local_irq_pmu_restore(flags); \ } while(0) #else #define powerpc_local_irq_pmu_save(flags) \ diff --git a/arch/powerpc/include/asm/kasan.h b/arch/powerpc/include/asm/kasan.h index d635b96c7ea6..7355ed05e65e 100644 --- a/arch/powerpc/include/asm/kasan.h +++ b/arch/powerpc/include/asm/kasan.h @@ -15,11 +15,18 @@ #ifndef __ASSEMBLY__ #include <asm/page.h> +#include <linux/sizes.h> #define KASAN_SHADOW_SCALE_SHIFT 3 +#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_MODULES) && defined(CONFIG_STRICT_KERNEL_RWX) +#define KASAN_KERN_START ALIGN_DOWN(PAGE_OFFSET - SZ_256M, SZ_256M) +#else +#define KASAN_KERN_START PAGE_OFFSET +#endif + #define KASAN_SHADOW_START (KASAN_SHADOW_OFFSET + \ - (PAGE_OFFSET >> KASAN_SHADOW_SCALE_SHIFT)) + (KASAN_KERN_START >> KASAN_SHADOW_SCALE_SHIFT)) #define KASAN_SHADOW_OFFSET ASM_CONST(CONFIG_KASAN_SHADOW_OFFSET) diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index e020d269416d..10ded83414de 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -58,7 +58,8 @@ #define KVM_ARCH_WANT_MMU_NOTIFIER extern int kvm_unmap_hva_range(struct kvm *kvm, - unsigned long start, unsigned long end); + unsigned long start, unsigned long end, + unsigned flags); extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); extern int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); diff --git a/arch/powerpc/include/asm/mce.h b/arch/powerpc/include/asm/mce.h index adf2cda67f9a..89aa8248a57d 100644 --- a/arch/powerpc/include/asm/mce.h +++ b/arch/powerpc/include/asm/mce.h @@ -210,6 +210,9 @@ struct mce_error_info { #define MCE_EVENT_RELEASE true #define MCE_EVENT_DONTRELEASE false +struct pt_regs; +struct notifier_block; + extern void save_mce_event(struct pt_regs *regs, long handled, struct mce_error_info *mce_err, uint64_t nip, uint64_t addr, uint64_t phys_addr); @@ -225,5 +228,9 @@ int mce_register_notifier(struct notifier_block *nb); int mce_unregister_notifier(struct notifier_block *nb); #ifdef CONFIG_PPC_BOOK3S_64 void flush_and_reload_slb(void); +long __machine_check_early_realmode_p7(struct pt_regs *regs); +long __machine_check_early_realmode_p8(struct pt_regs *regs); +long __machine_check_early_realmode_p9(struct pt_regs *regs); +long __machine_check_early_realmode_p10(struct pt_regs *regs); #endif /* CONFIG_PPC_BOOK3S_64 */ #endif /* __ASM_PPC64_MCE_H__ */ diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h index 7c07728af300..7cb6d18f5cd6 100644 --- a/arch/powerpc/include/asm/mman.h +++ b/arch/powerpc/include/asm/mman.h @@ -13,20 +13,43 @@ #include <linux/pkeys.h> #include <asm/cpu_has_feature.h> -#ifdef CONFIG_PPC_MEM_KEYS static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, unsigned long pkey) { - return pkey_to_vmflag_bits(pkey); +#ifdef CONFIG_PPC_MEM_KEYS + return (((prot & PROT_SAO) ? VM_SAO : 0) | pkey_to_vmflag_bits(pkey)); +#else + return ((prot & PROT_SAO) ? VM_SAO : 0); +#endif } #define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey) static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags) { - return __pgprot(vmflag_to_pte_pkey_bits(vm_flags)); +#ifdef CONFIG_PPC_MEM_KEYS + return (vm_flags & VM_SAO) ? + __pgprot(_PAGE_SAO | vmflag_to_pte_pkey_bits(vm_flags)) : + __pgprot(0 | vmflag_to_pte_pkey_bits(vm_flags)); +#else + return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0); +#endif } #define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags) -#endif + +static inline bool arch_validate_prot(unsigned long prot, unsigned long addr) +{ + if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_SAO)) + return false; + if (prot & PROT_SAO) { + if (!cpu_has_feature(CPU_FTR_SAO)) + return false; + if (firmware_has_feature(FW_FEATURE_LPAR) && + !IS_ENABLED(CONFIG_PPC_PROT_SAO_LPAR)) + return false; + } + return true; +} +#define arch_validate_prot arch_validate_prot #endif /* CONFIG_PPC64 */ #endif /* _ASM_POWERPC_MMAN_H */ diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h index 59ee9fa4ae09..6cb8aa357191 100644 --- a/arch/powerpc/include/asm/nohash/64/pgtable.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h @@ -82,6 +82,8 @@ */ #include <asm/nohash/pte-book3e.h> +#define _PAGE_SAO 0 + #define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1)) /* diff --git a/arch/powerpc/include/asm/perf_event.h b/arch/powerpc/include/asm/perf_event.h index 1e8b2e1ec1db..daec64d41b44 100644 --- a/arch/powerpc/include/asm/perf_event.h +++ b/arch/powerpc/include/asm/perf_event.h @@ -40,4 +40,7 @@ static inline bool is_sier_available(void) { return false; } /* To support perf_regs sier update */ extern bool is_sier_available(void); +/* To define perf extended regs mask value */ +extern u64 PERF_REG_EXTENDED_MASK; +#define PERF_REG_EXTENDED_MASK PERF_REG_EXTENDED_MASK #endif diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h index 86c9eb064b22..f6acabb6c9be 100644 --- a/arch/powerpc/include/asm/perf_event_server.h +++ b/arch/powerpc/include/asm/perf_event_server.h @@ -62,6 +62,11 @@ struct power_pmu { int *blacklist_ev; /* BHRB entries in the PMU */ int bhrb_nr; + /* + * set this flag with `PERF_PMU_CAP_EXTENDED_REGS` if + * the pmu supports extended perf regs capability + */ + int capabilities; }; /* diff --git a/arch/powerpc/include/asm/string.h b/arch/powerpc/include/asm/string.h index 283552cd0e58..2aa0e31e6884 100644 --- a/arch/powerpc/include/asm/string.h +++ b/arch/powerpc/include/asm/string.h @@ -53,9 +53,7 @@ void *__memmove(void *to, const void *from, __kernel_size_t n); #ifndef CONFIG_KASAN #define __HAVE_ARCH_MEMSET32 #define __HAVE_ARCH_MEMSET64 -#define __HAVE_ARCH_MEMCPY_MCSAFE -extern int memcpy_mcsafe(void *dst, const void *src, __kernel_size_t sz); extern void *__memset16(uint16_t *, uint16_t v, __kernel_size_t); extern void *__memset32(uint32_t *, uint32_t v, __kernel_size_t); extern void *__memset64(uint64_t *, uint64_t v, __kernel_size_t); diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index 00699903f1ef..20a35373cafc 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -435,6 +435,32 @@ do { \ extern unsigned long __copy_tofrom_user(void __user *to, const void __user *from, unsigned long size); +#ifdef CONFIG_ARCH_HAS_COPY_MC +unsigned long __must_check +copy_mc_generic(void *to, const void *from, unsigned long size); + +static inline unsigned long __must_check +copy_mc_to_kernel(void *to, const void *from, unsigned long size) +{ + return copy_mc_generic(to, from, size); +} +#define copy_mc_to_kernel copy_mc_to_kernel + +static inline unsigned long __must_check +copy_mc_to_user(void __user *to, const void *from, unsigned long n) +{ + if (likely(check_copy_size(from, n, true))) { + if (access_ok(to, n)) { + allow_write_to_user(to, n); + n = copy_mc_generic((void *)to, from, n); + prevent_write_to_user(to, n); + } + } + + return n; +} +#endif + #ifdef __powerpc64__ static inline unsigned long raw_copy_in_user(void __user *to, const void __user *from, unsigned long n) @@ -523,20 +549,6 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n) return ret; } -static __always_inline unsigned long __must_check -copy_to_user_mcsafe(void __user *to, const void *from, unsigned long n) -{ - if (likely(check_copy_size(from, n, true))) { - if (access_ok(to, n)) { - allow_write_to_user(to, n); - n = memcpy_mcsafe((void *)to, from, n); - prevent_write_to_user(to, n); - } - } - - return n; -} - unsigned long __arch_clear_user(void __user *addr, unsigned long size); static inline unsigned long clear_user(void __user *addr, unsigned long size) diff --git a/arch/powerpc/include/uapi/asm/mman.h b/arch/powerpc/include/uapi/asm/mman.h index 3a700351feca..c0c737215b00 100644 --- a/arch/powerpc/include/uapi/asm/mman.h +++ b/arch/powerpc/include/uapi/asm/mman.h @@ -11,7 +11,7 @@ #include <asm-generic/mman-common.h> -#define PROT_SAO 0x10 /* Unsupported since v5.9 */ +#define PROT_SAO 0x10 /* Strong Access Ordering */ #define MAP_RENAME MAP_ANONYMOUS /* In SunOS terminology */ #define MAP_NORESERVE 0x40 /* don't reserve swap pages */ diff --git a/arch/powerpc/include/uapi/asm/perf_regs.h b/arch/powerpc/include/uapi/asm/perf_regs.h index f599064dd8dc..bdf5f10f8b9f 100644 --- a/arch/powerpc/include/uapi/asm/perf_regs.h +++ b/arch/powerpc/include/uapi/asm/perf_regs.h @@ -48,6 +48,24 @@ enum perf_event_powerpc_regs { PERF_REG_POWERPC_DSISR, PERF_REG_POWERPC_SIER, PERF_REG_POWERPC_MMCRA, - PERF_REG_POWERPC_MAX, + /* Extended registers */ + PERF_REG_POWERPC_MMCR0, + PERF_REG_POWERPC_MMCR1, + PERF_REG_POWERPC_MMCR2, + PERF_REG_POWERPC_MMCR3, + PERF_REG_POWERPC_SIER2, + PERF_REG_POWERPC_SIER3, + /* Max regs without the extended regs */ + PERF_REG_POWERPC_MAX = PERF_REG_POWERPC_MMCRA + 1, }; + +#define PERF_REG_PMU_MASK ((1ULL << PERF_REG_POWERPC_MAX) - 1) + +/* PERF_REG_EXTENDED_MASK value for CPU_FTR_ARCH_300 */ +#define PERF_REG_PMU_MASK_300 (((1ULL << (PERF_REG_POWERPC_MMCR2 + 1)) - 1) - PERF_REG_PMU_MASK) +/* PERF_REG_EXTENDED_MASK value for CPU_FTR_ARCH_31 */ +#define PERF_REG_PMU_MASK_31 (((1ULL << (PERF_REG_POWERPC_SIER3 + 1)) - 1) - PERF_REG_PMU_MASK) + +#define PERF_REG_MAX_ISA_300 (PERF_REG_POWERPC_MMCR2 + 1) +#define PERF_REG_MAX_ISA_31 (PERF_REG_POWERPC_SIER3 + 1) #endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */ diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 3d406a9626e8..2aa89c6b2896 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -72,9 +72,6 @@ extern void __setup_cpu_power9(unsigned long offset, struct cpu_spec* spec); extern void __restore_cpu_power9(void); extern void __setup_cpu_power10(unsigned long offset, struct cpu_spec* spec); extern void __restore_cpu_power10(void); -extern long __machine_check_early_realmode_p7(struct pt_regs *regs); -extern long __machine_check_early_realmode_p8(struct pt_regs *regs); -extern long __machine_check_early_realmode_p9(struct pt_regs *regs); #endif /* CONFIG_PPC64 */ #if defined(CONFIG_E500) extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec); @@ -542,6 +539,25 @@ static struct cpu_spec __initdata cpu_specs[] = { .machine_check_early = __machine_check_early_realmode_p9, .platform = "power9", }, + { /* Power10 */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x00800000, + .cpu_name = "POWER10 (raw)", + .cpu_features = CPU_FTRS_POWER10, + .cpu_user_features = COMMON_USER_POWER10, + .cpu_user_features2 = COMMON_USER2_POWER10, + .mmu_features = MMU_FTRS_POWER10, + .icache_bsize = 128, + .dcache_bsize = 128, + .num_pmcs = 6, + .pmc_type = PPC_PMC_IBM, + .oprofile_cpu_type = "ppc64/power10", + .oprofile_type = PPC_OPROFILE_INVALID, + .cpu_setup = __setup_cpu_power10, + .cpu_restore = __restore_cpu_power10, + .machine_check_early = __machine_check_early_realmode_p10, + .platform = "power10", + }, { /* Cell Broadband Engine */ .pvr_mask = 0xffff0000, .pvr_value = 0x00700000, diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c index 569fecd7b5b2..9053fc9d20c7 100644 --- a/arch/powerpc/kernel/dma-iommu.c +++ b/arch/powerpc/kernel/dma-iommu.c @@ -120,7 +120,8 @@ u64 dma_iommu_get_required_mask(struct device *dev) if (!tbl) return 0; - mask = 1ULL < (fls_long(tbl->it_offset + tbl->it_size) - 1); + mask = 1ULL << (fls_long(tbl->it_offset + tbl->it_size) + + tbl->it_page_shift - 1); mask += mask - 1; return mask; diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index 6f8c0c6b937a..f204ad79b6b5 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -64,10 +64,6 @@ struct dt_cpu_feature { * Set up the base CPU */ -extern long __machine_check_early_realmode_p8(struct pt_regs *regs); -extern long __machine_check_early_realmode_p9(struct pt_regs *regs); -extern long __machine_check_early_realmode_p10(struct pt_regs *regs); - static int hv_mode; static struct { @@ -657,7 +653,7 @@ static struct dt_cpu_feature_match __initdata {"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL}, {"processor-utilization-of-resources-register", feat_enable_purr, 0}, {"no-execute", feat_enable, 0}, - /* strong-access-ordering is unused */ + {"strong-access-ordering", feat_enable, CPU_FTR_SAO}, {"cache-inhibited-large-page", feat_enable_large_ci, 0}, {"coprocessor-icswx", feat_enable, 0}, {"hypervisor-virtualization-interrupt", feat_enable_hvi, 0}, diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 33a42e42c56f..733e40eba4eb 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -113,6 +113,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM) ld r11,exception_marker@toc(r2) std r11,-16(r10) /* "regshere" marker */ +BEGIN_FTR_SECTION + HMT_MEDIUM +END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) + /* * RECONCILE_IRQ_STATE without calling trace_hardirqs_off(), which * would clobber syscall parameters. Also we always enter with IRQs diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index 10ebb4bf71ad..5cdf4168a61a 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -191,13 +191,13 @@ int is_fadump_active(void) */ static bool is_fadump_mem_area_contiguous(u64 d_start, u64 d_end) { - struct memblock_region *reg; + phys_addr_t reg_start, reg_end; bool ret = false; - u64 start, end; + u64 i, start, end; - for_each_memblock(memory, reg) { - start = max_t(u64, d_start, reg->base); - end = min_t(u64, d_end, (reg->base + reg->size)); + for_each_mem_range(i, ®_start, ®_end) { + start = max_t(u64, d_start, reg_start); + end = min_t(u64, d_end, reg_end); if (d_start < end) { /* Memory hole from d_start to start */ if (start > d_start) @@ -422,34 +422,34 @@ static int __init add_boot_mem_regions(unsigned long mstart, static int __init fadump_get_boot_mem_regions(void) { - unsigned long base, size, cur_size, hole_size, last_end; + unsigned long size, cur_size, hole_size, last_end; unsigned long mem_size = fw_dump.boot_memory_size; - struct memblock_region *reg; + phys_addr_t reg_start, reg_end; int ret = 1; + u64 i; fw_dump.boot_mem_regs_cnt = 0; last_end = 0; hole_size = 0; cur_size = 0; - for_each_memblock(memory, reg) { - base = reg->base; - size = reg->size; - hole_size += (base - last_end); + for_each_mem_range(i, ®_start, ®_end) { + size = reg_end - reg_start; + hole_size += (reg_start - last_end); if ((cur_size + size) >= mem_size) { size = (mem_size - cur_size); - ret = add_boot_mem_regions(base, size); + ret = add_boot_mem_regions(reg_start, size); break; } mem_size -= size; cur_size += size; - ret = add_boot_mem_regions(base, size); + ret = add_boot_mem_regions(reg_start, size); if (!ret) break; - last_end = base + size; + last_end = reg_end; } fw_dump.boot_mem_top = PAGE_ALIGN(fw_dump.boot_memory_size + hole_size); @@ -985,9 +985,8 @@ static int fadump_init_elfcore_header(char *bufp) */ static int fadump_setup_crash_memory_ranges(void) { - struct memblock_region *reg; - u64 start, end; - int i, ret; + u64 i, start, end; + int ret; pr_debug("Setup crash memory ranges.\n"); crash_mrange_info.mem_range_cnt = 0; @@ -1005,10 +1004,7 @@ static int fadump_setup_crash_memory_ranges(void) return ret; } - for_each_memblock(memory, reg) { - start = (u64)reg->base; - end = start + (u64)reg->size; - + for_each_mem_range(i, &start, &end) { /* * skip the memory chunk that is already added * (0 through boot_memory_top). @@ -1242,14 +1238,17 @@ static void fadump_free_reserved_memory(unsigned long start_pfn, */ static void fadump_release_reserved_area(u64 start, u64 end) { + unsigned long reg_spfn, reg_epfn; u64 tstart, tend, spfn, epfn; - struct memblock_region *reg; + int i; spfn = PHYS_PFN(start); epfn = PHYS_PFN(end); - for_each_memblock(memory, reg) { - tstart = max_t(u64, spfn, memblock_region_memory_base_pfn(reg)); - tend = min_t(u64, epfn, memblock_region_memory_end_pfn(reg)); + + for_each_mem_pfn_range(i, MAX_NUMNODES, ®_spfn, ®_epfn, NULL) { + tstart = max_t(u64, spfn, reg_spfn); + tend = min_t(u64, epfn, reg_epfn); + if (tstart < tend) { fadump_free_reserved_memory(tstart, tend); @@ -1684,12 +1683,10 @@ int __init fadump_reserve_mem(void) /* Preserve everything above the base address */ static void __init fadump_reserve_crash_area(u64 base) { - struct memblock_region *reg; - u64 mstart, msize; + u64 i, mstart, mend, msize; - for_each_memblock(memory, reg) { - mstart = reg->base; - msize = reg->size; + for_each_mem_range(i, &mstart, &mend) { + msize = mend - mstart; if ((mstart + msize) < base) continue; diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index 6ab9b4d037c3..01ab2163659e 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -218,6 +218,7 @@ bool arch_kprobe_on_func_entry(unsigned long offset) void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { ri->ret_addr = (kprobe_opcode_t *)regs->link; + ri->fp = NULL; /* Replace the return addr with trampoline addr */ regs->link = (unsigned long)kretprobe_trampoline; @@ -396,50 +397,9 @@ asm(".global kretprobe_trampoline\n" */ static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) { - struct kretprobe_instance *ri = NULL; - struct hlist_head *head, empty_rp; - struct hlist_node *tmp; - unsigned long flags, orig_ret_address = 0; - unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; - - INIT_HLIST_HEAD(&empty_rp); - kretprobe_hash_lock(current, &head, &flags); - - /* - * It is possible to have multiple instances associated with a given - * task either because an multiple functions in the call path - * have a return probe installed on them, and/or more than one return - * return probe was registered for a target function. - * - * We can handle this because: - * - instances are always inserted at the head of the list - * - when multiple return probes are registered for the same - * function, the first instance's ret_addr will point to the - * real return address, and all the rest will point to - * kretprobe_trampoline - */ - hlist_for_each_entry_safe(ri, tmp, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - - if (ri->rp && ri->rp->handler) - ri->rp->handler(ri, regs); - - orig_ret_address = (unsigned long)ri->ret_addr; - recycle_rp_inst(ri, &empty_rp); - - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - - kretprobe_assert(ri, orig_ret_address, trampoline_address); + unsigned long orig_ret_address; + orig_ret_address = __kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL); /* * We get here through one of two paths: * 1. by taking a trap -> kprobe_handler() -> here @@ -458,13 +418,6 @@ static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) regs->nip = orig_ret_address - 4; regs->link = orig_ret_address; - kretprobe_hash_unlock(current, &flags); - - hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { - hlist_del(&ri->hlist); - kfree(ri); - } - return 0; } NOKPROBE_SYMBOL(trampoline_probe_handler); diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 016bd831908e..73a57043ee66 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -548,7 +548,7 @@ void notrace restore_math(struct pt_regs *regs) * are live for the user thread). */ if ((!(msr & MSR_FP)) && should_restore_fp()) - new_msr |= MSR_FP | current->thread.fpexc_mode; + new_msr |= MSR_FP; if ((!(msr & MSR_VEC)) && should_restore_altivec()) new_msr |= MSR_VEC; @@ -559,11 +559,17 @@ void notrace restore_math(struct pt_regs *regs) } if (new_msr) { + unsigned long fpexc_mode = 0; + msr_check_and_set(new_msr); - if (new_msr & MSR_FP) + if (new_msr & MSR_FP) { do_restore_fp(); + // This also covers VSX, because VSX implies FP + fpexc_mode = current->thread.fpexc_mode; + } + if (new_msr & MSR_VEC) do_restore_altivec(); @@ -572,7 +578,7 @@ void notrace restore_math(struct pt_regs *regs) msr_check_and_clear(new_msr); - regs->msr |= new_msr; + regs->msr |= new_msr | fpexc_mode; } } #endif diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index b198b0ff25bc..808ec9fab605 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -311,6 +311,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) min = pvr & 0xFF; break; case 0x004e: /* POWER9 bits 12-15 give chip type */ + case 0x0080: /* POWER10 bit 12 gives SMT8/4 */ maj = (pvr >> 8) & 0x0F; min = pvr & 0xFF; break; diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl index c2d737ff2e7b..9d7fb4ced290 100644 --- a/arch/powerpc/kernel/syscalls/syscall.tbl +++ b/arch/powerpc/kernel/syscalls/syscall.tbl @@ -34,7 +34,7 @@ 18 spu oldstat sys_ni_syscall 19 common lseek sys_lseek compat_sys_lseek 20 common getpid sys_getpid -21 nospu mount sys_mount compat_sys_mount +21 nospu mount sys_mount 22 32 umount sys_oldumount 22 64 umount sys_ni_syscall 22 spu umount sys_ni_syscall @@ -193,8 +193,8 @@ 142 common _newselect sys_select compat_sys_select 143 common flock sys_flock 144 common msync sys_msync -145 common readv sys_readv compat_sys_readv -146 common writev sys_writev compat_sys_writev +145 common readv sys_readv +146 common writev sys_writev 147 common getsid sys_getsid 148 common fdatasync sys_fdatasync 149 nospu _sysctl sys_ni_syscall @@ -369,7 +369,7 @@ 282 common unshare sys_unshare 283 common splice sys_splice 284 common tee sys_tee -285 common vmsplice sys_vmsplice compat_sys_vmsplice +285 common vmsplice sys_vmsplice 286 common openat sys_openat compat_sys_openat 287 common mkdirat sys_mkdirat 288 common mknodat sys_mknodat @@ -449,8 +449,8 @@ 348 common syncfs sys_syncfs 349 common sendmmsg sys_sendmmsg compat_sys_sendmmsg 350 common setns sys_setns -351 nospu process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv -352 nospu process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev +351 nospu process_vm_readv sys_process_vm_readv +352 nospu process_vm_writev sys_process_vm_writev 353 nospu finit_module sys_finit_module 354 nospu kcmp sys_kcmp 355 common sched_setattr sys_sched_setattr diff --git a/arch/powerpc/kernel/vdso32/Makefile b/arch/powerpc/kernel/vdso32/Makefile index 87ab1152d5ce..e147bbdc12cd 100644 --- a/arch/powerpc/kernel/vdso32/Makefile +++ b/arch/powerpc/kernel/vdso32/Makefile @@ -50,7 +50,7 @@ $(obj-vdso32): %.o: %.S FORCE # actual build commands quiet_cmd_vdso32ld = VDSO32L $@ - cmd_vdso32ld = $(VDSOCC) $(c_flags) $(CC32FLAGS) -o $@ $(call cc-ldoption, -Wl$(comma)--orphan-handling=warn) -Wl,-T$(filter %.lds,$^) $(filter %.o,$^) + cmd_vdso32ld = $(VDSOCC) $(c_flags) $(CC32FLAGS) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^) quiet_cmd_vdso32as = VDSO32A $@ cmd_vdso32as = $(VDSOCC) $(a_flags) $(CC32FLAGS) -c -o $@ $< diff --git a/arch/powerpc/kernel/vdso32/vdso32.lds.S b/arch/powerpc/kernel/vdso32/vdso32.lds.S index 4c985467a668..5206c2eb2a1d 100644 --- a/arch/powerpc/kernel/vdso32/vdso32.lds.S +++ b/arch/powerpc/kernel/vdso32/vdso32.lds.S @@ -111,7 +111,6 @@ SECTIONS *(.note.GNU-stack) *(.data .data.* .gnu.linkonce.d.* .sdata*) *(.bss .sbss .dynbss .dynsbss) - *(.glink .iplt .plt .rela*) } } diff --git a/arch/powerpc/kernel/vdso64/Makefile b/arch/powerpc/kernel/vdso64/Makefile index 38c317f25141..32ebb3522ea1 100644 --- a/arch/powerpc/kernel/vdso64/Makefile +++ b/arch/powerpc/kernel/vdso64/Makefile @@ -34,7 +34,7 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE # actual build commands quiet_cmd_vdso64ld = VDSO64L $@ - cmd_vdso64ld = $(CC) $(c_flags) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^) $(call cc-ldoption, -Wl$(comma)--orphan-handling=warn) + cmd_vdso64ld = $(CC) $(c_flags) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^) # install commands for the unstripped file quiet_cmd_vdso_install = INSTALL $@ diff --git a/arch/powerpc/kernel/vdso64/vdso64.lds.S b/arch/powerpc/kernel/vdso64/vdso64.lds.S index 4e3a8d4ee614..256fb9720298 100644 --- a/arch/powerpc/kernel/vdso64/vdso64.lds.S +++ b/arch/powerpc/kernel/vdso64/vdso64.lds.S @@ -30,7 +30,7 @@ SECTIONS . = ALIGN(16); .text : { *(.text .stub .text.* .gnu.linkonce.t.* __ftr_alt_*) - *(.sfpr) + *(.sfpr .glink) } :text PROVIDE(__etext = .); PROVIDE(_etext = .); @@ -111,7 +111,6 @@ SECTIONS *(.branch_lt) *(.data .data.* .gnu.linkonce.d.* .sdata*) *(.bss .sbss .dynbss .dynsbss) - *(.glink .iplt .plt .rela*) } } diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 326e113d2e45..e0548b4950de 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -360,8 +360,8 @@ SECTIONS PROVIDE32 (end = .); STABS_DEBUG - DWARF_DEBUG + ELF_DETAILS DISCARDS /DISCARD/ : { diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c index 53bb71e3a2e1..c69bcf9b547a 100644 --- a/arch/powerpc/kexec/file_load_64.c +++ b/arch/powerpc/kexec/file_load_64.c @@ -138,15 +138,13 @@ out: */ static int get_crash_memory_ranges(struct crash_mem **mem_ranges) { - struct memblock_region *reg; + phys_addr_t base, end; struct crash_mem *tmem; + u64 i; int ret; - for_each_memblock(memory, reg) { - u64 base, size; - - base = (u64)reg->base; - size = (u64)reg->size; + for_each_mem_range(i, &base, &end) { + u64 size = end - base; /* Skip backup memory region, which needs a separate entry */ if (base == BACKUP_SRC_START) { @@ -250,8 +248,7 @@ static int __locate_mem_hole_top_down(struct kexec_buf *kbuf, phys_addr_t start, end; u64 i; - for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, - MEMBLOCK_NONE, &start, &end, NULL) { + for_each_mem_range_rev(i, &start, &end) { /* * memblock uses [start, end) convention while it is * [start, end] here. Fix the off-by-one to have the @@ -350,8 +347,7 @@ static int __locate_mem_hole_bottom_up(struct kexec_buf *kbuf, phys_addr_t start, end; u64 i; - for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, - MEMBLOCK_NONE, &start, &end, NULL) { + for_each_mem_range(i, &start, &end) { /* * memblock uses [start, end) convention while it is * [start, end] here. Fix the off-by-one to have the diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 41fedec69ac3..49db50d1db04 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -834,7 +834,8 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm, kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new, change); } -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, + unsigned flags) { return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end); } diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index 073617ce83e0..8f58dd20b362 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -95,23 +95,15 @@ EXPORT_SYMBOL_GPL(kvm_free_hpt_cma); void __init kvm_cma_reserve(void) { unsigned long align_size; - struct memblock_region *reg; - phys_addr_t selected_size = 0; + phys_addr_t selected_size; /* * We need CMA reservation only when we are in HV mode */ if (!cpu_has_feature(CPU_FTR_HVMODE)) return; - /* - * We cannot use memblock_phys_mem_size() here, because - * memblock_analyze() has not been called yet. - */ - for_each_memblock(memory, reg) - selected_size += memblock_region_memory_end_pfn(reg) - - memblock_region_memory_base_pfn(reg); - selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT; + selected_size = PAGE_ALIGN(memblock_phys_mem_size() * kvm_cma_resv_ratio / 100); if (selected_size) { pr_info("%s: reserving %ld MiB for global area\n", __func__, (unsigned long)selected_size / SZ_1M); diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c index 7705d5557239..84e5a2dc8be5 100644 --- a/arch/powerpc/kvm/book3s_hv_uvmem.c +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c @@ -687,9 +687,9 @@ static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm) struct kvmppc_uvmem_page_pvt *pvt; unsigned long pfn_last, pfn_first; - pfn_first = kvmppc_uvmem_pgmap.res.start >> PAGE_SHIFT; + pfn_first = kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT; pfn_last = pfn_first + - (resource_size(&kvmppc_uvmem_pgmap.res) >> PAGE_SHIFT); + (range_len(&kvmppc_uvmem_pgmap.range) >> PAGE_SHIFT); spin_lock(&kvmppc_uvmem_bitmap_lock); bit = find_first_zero_bit(kvmppc_uvmem_bitmap, @@ -1007,7 +1007,7 @@ static vm_fault_t kvmppc_uvmem_migrate_to_ram(struct vm_fault *vmf) static void kvmppc_uvmem_page_free(struct page *page) { unsigned long pfn = page_to_pfn(page) - - (kvmppc_uvmem_pgmap.res.start >> PAGE_SHIFT); + (kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT); struct kvmppc_uvmem_page_pvt *pvt; spin_lock(&kvmppc_uvmem_bitmap_lock); @@ -1170,7 +1170,9 @@ int kvmppc_uvmem_init(void) } kvmppc_uvmem_pgmap.type = MEMORY_DEVICE_PRIVATE; - kvmppc_uvmem_pgmap.res = *res; + kvmppc_uvmem_pgmap.range.start = res->start; + kvmppc_uvmem_pgmap.range.end = res->end; + kvmppc_uvmem_pgmap.nr_range = 1; kvmppc_uvmem_pgmap.ops = &kvmppc_uvmem_ops; /* just one global instance: */ kvmppc_uvmem_pgmap.owner = &kvmppc_uvmem_pgmap; @@ -1205,7 +1207,7 @@ void kvmppc_uvmem_free(void) return; memunmap_pages(&kvmppc_uvmem_pgmap); - release_mem_region(kvmppc_uvmem_pgmap.res.start, - resource_size(&kvmppc_uvmem_pgmap.res)); + release_mem_region(kvmppc_uvmem_pgmap.range.start, + range_len(&kvmppc_uvmem_pgmap.range)); kfree(kvmppc_uvmem_bitmap); } diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index d6c1069e9954..ed0c9c43d0cf 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -734,7 +734,8 @@ static int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) return 0; } -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, + unsigned flags) { /* kvm_unmap_hva flushes everything anyways */ kvm_unmap_hva(kvm, start); diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index d66a645503eb..69a91b571845 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -39,7 +39,7 @@ obj-$(CONFIG_PPC_BOOK3S_64) += copyuser_power7.o copypage_power7.o \ memcpy_power7.o obj64-y += copypage_64.o copyuser_64.o mem_64.o hweight_64.o \ - memcpy_64.o memcpy_mcsafe_64.o + memcpy_64.o copy_mc_64.o ifndef CONFIG_PPC_QUEUED_SPINLOCKS obj64-$(CONFIG_SMP) += locks.o diff --git a/arch/powerpc/lib/checksum_32.S b/arch/powerpc/lib/checksum_32.S index ecd150dc3ed9..ec5cd2dede35 100644 --- a/arch/powerpc/lib/checksum_32.S +++ b/arch/powerpc/lib/checksum_32.S @@ -78,12 +78,10 @@ EXPORT_SYMBOL(__csum_partial) /* * Computes the checksum of a memory block at src, length len, - * and adds in "sum" (32-bit), while copying the block to dst. - * If an access exception occurs on src or dst, it stores -EFAULT - * to *src_err or *dst_err respectively, and (for an error on - * src) zeroes the rest of dst. + * and adds in 0xffffffff, while copying the block to dst. + * If an access exception occurs it returns zero. * - * csum_partial_copy_generic(src, dst, len, sum, src_err, dst_err) + * csum_partial_copy_generic(src, dst, len) */ #define CSUM_COPY_16_BYTES_WITHEX(n) \ 8 ## n ## 0: \ @@ -108,14 +106,14 @@ EXPORT_SYMBOL(__csum_partial) adde r12,r12,r10 #define CSUM_COPY_16_BYTES_EXCODE(n) \ - EX_TABLE(8 ## n ## 0b, src_error); \ - EX_TABLE(8 ## n ## 1b, src_error); \ - EX_TABLE(8 ## n ## 2b, src_error); \ - EX_TABLE(8 ## n ## 3b, src_error); \ - EX_TABLE(8 ## n ## 4b, dst_error); \ - EX_TABLE(8 ## n ## 5b, dst_error); \ - EX_TABLE(8 ## n ## 6b, dst_error); \ - EX_TABLE(8 ## n ## 7b, dst_error); + EX_TABLE(8 ## n ## 0b, fault); \ + EX_TABLE(8 ## n ## 1b, fault); \ + EX_TABLE(8 ## n ## 2b, fault); \ + EX_TABLE(8 ## n ## 3b, fault); \ + EX_TABLE(8 ## n ## 4b, fault); \ + EX_TABLE(8 ## n ## 5b, fault); \ + EX_TABLE(8 ## n ## 6b, fault); \ + EX_TABLE(8 ## n ## 7b, fault); .text .stabs "arch/powerpc/lib/",N_SO,0,0,0f @@ -127,11 +125,8 @@ LG_CACHELINE_BYTES = L1_CACHE_SHIFT CACHELINE_MASK = (L1_CACHE_BYTES-1) _GLOBAL(csum_partial_copy_generic) - stwu r1,-16(r1) - stw r7,12(r1) - stw r8,8(r1) - - addic r12,r6,0 + li r12,-1 + addic r0,r0,0 /* clear carry */ addi r6,r4,-4 neg r0,r4 addi r4,r3,-4 @@ -246,34 +241,19 @@ _GLOBAL(csum_partial_copy_generic) rlwinm r3,r3,8,0,31 /* odd destination address: rotate one byte */ blr -/* read fault */ -src_error: - lwz r7,12(r1) - addi r1,r1,16 - cmpwi cr0,r7,0 - beqlr - li r0,-EFAULT - stw r0,0(r7) - blr -/* write fault */ -dst_error: - lwz r8,8(r1) - addi r1,r1,16 - cmpwi cr0,r8,0 - beqlr - li r0,-EFAULT - stw r0,0(r8) +fault: + li r3,0 blr - EX_TABLE(70b, src_error); - EX_TABLE(71b, dst_error); - EX_TABLE(72b, src_error); - EX_TABLE(73b, dst_error); - EX_TABLE(54b, dst_error); + EX_TABLE(70b, fault); + EX_TABLE(71b, fault); + EX_TABLE(72b, fault); + EX_TABLE(73b, fault); + EX_TABLE(54b, fault); /* * this stuff handles faults in the cacheline loop and branches to either - * src_error (if in read part) or dst_error (if in write part) + * fault (if in read part) or fault (if in write part) */ CSUM_COPY_16_BYTES_EXCODE(0) #if L1_CACHE_BYTES >= 32 @@ -290,12 +270,12 @@ dst_error: #endif #endif - EX_TABLE(30b, src_error); - EX_TABLE(31b, dst_error); - EX_TABLE(40b, src_error); - EX_TABLE(41b, dst_error); - EX_TABLE(50b, src_error); - EX_TABLE(51b, dst_error); + EX_TABLE(30b, fault); + EX_TABLE(31b, fault); + EX_TABLE(40b, fault); + EX_TABLE(41b, fault); + EX_TABLE(50b, fault); + EX_TABLE(51b, fault); EXPORT_SYMBOL(csum_partial_copy_generic) diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S index 514978f908d4..98ff51bd2f7d 100644 --- a/arch/powerpc/lib/checksum_64.S +++ b/arch/powerpc/lib/checksum_64.S @@ -182,34 +182,33 @@ EXPORT_SYMBOL(__csum_partial) .macro srcnr 100: - EX_TABLE(100b,.Lsrc_error_nr) + EX_TABLE(100b,.Lerror_nr) .endm .macro source 150: - EX_TABLE(150b,.Lsrc_error) + EX_TABLE(150b,.Lerror) .endm .macro dstnr 200: - EX_TABLE(200b,.Ldest_error_nr) + EX_TABLE(200b,.Lerror_nr) .endm .macro dest 250: - EX_TABLE(250b,.Ldest_error) + EX_TABLE(250b,.Lerror) .endm /* * Computes the checksum of a memory block at src, length len, - * and adds in "sum" (32-bit), while copying the block to dst. - * If an access exception occurs on src or dst, it stores -EFAULT - * to *src_err or *dst_err respectively. The caller must take any action - * required in this case (zeroing memory, recalculating partial checksum etc). + * and adds in 0xffffffff (32-bit), while copying the block to dst. + * If an access exception occurs, it returns 0. * - * csum_partial_copy_generic(r3=src, r4=dst, r5=len, r6=sum, r7=src_err, r8=dst_err) + * csum_partial_copy_generic(r3=src, r4=dst, r5=len) */ _GLOBAL(csum_partial_copy_generic) + li r6,-1 addic r0,r6,0 /* clear carry */ srdi. r6,r5,3 /* less than 8 bytes? */ @@ -401,29 +400,15 @@ dstnr; stb r6,0(r4) srdi r3,r3,32 blr -.Lsrc_error: +.Lerror: ld r14,STK_REG(R14)(r1) ld r15,STK_REG(R15)(r1) ld r16,STK_REG(R16)(r1) addi r1,r1,STACKFRAMESIZE -.Lsrc_error_nr: - cmpdi 0,r7,0 - beqlr - li r6,-EFAULT - stw r6,0(r7) +.Lerror_nr: + li r3,0 blr -.Ldest_error: - ld r14,STK_REG(R14)(r1) - ld r15,STK_REG(R15)(r1) - ld r16,STK_REG(R16)(r1) - addi r1,r1,STACKFRAMESIZE -.Ldest_error_nr: - cmpdi 0,r8,0 - beqlr - li r6,-EFAULT - stw r6,0(r8) - blr EXPORT_SYMBOL(csum_partial_copy_generic) /* diff --git a/arch/powerpc/lib/checksum_wrappers.c b/arch/powerpc/lib/checksum_wrappers.c index fabe4db28726..b895166afc82 100644 --- a/arch/powerpc/lib/checksum_wrappers.c +++ b/arch/powerpc/lib/checksum_wrappers.c @@ -12,83 +12,37 @@ #include <linux/uaccess.h> __wsum csum_and_copy_from_user(const void __user *src, void *dst, - int len, __wsum sum, int *err_ptr) + int len) { - unsigned int csum; + __wsum csum; might_sleep(); - allow_read_from_user(src, len); - - *err_ptr = 0; - - if (!len) { - csum = 0; - goto out; - } - if (unlikely((len < 0) || !access_ok(src, len))) { - *err_ptr = -EFAULT; - csum = (__force unsigned int)sum; - goto out; - } + if (unlikely(!access_ok(src, len))) + return 0; - csum = csum_partial_copy_generic((void __force *)src, dst, - len, sum, err_ptr, NULL); - - if (unlikely(*err_ptr)) { - int missing = __copy_from_user(dst, src, len); - - if (missing) { - memset(dst + len - missing, 0, missing); - *err_ptr = -EFAULT; - } else { - *err_ptr = 0; - } + allow_read_from_user(src, len); - csum = csum_partial(dst, len, sum); - } + csum = csum_partial_copy_generic((void __force *)src, dst, len); -out: prevent_read_from_user(src, len); - return (__force __wsum)csum; + return csum; } EXPORT_SYMBOL(csum_and_copy_from_user); -__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len, - __wsum sum, int *err_ptr) +__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len) { - unsigned int csum; + __wsum csum; might_sleep(); - allow_write_to_user(dst, len); - - *err_ptr = 0; - - if (!len) { - csum = 0; - goto out; - } + if (unlikely(!access_ok(dst, len))) + return 0; - if (unlikely((len < 0) || !access_ok(dst, len))) { - *err_ptr = -EFAULT; - csum = -1; /* invalid checksum */ - goto out; - } - - csum = csum_partial_copy_generic(src, (void __force *)dst, - len, sum, NULL, err_ptr); - - if (unlikely(*err_ptr)) { - csum = csum_partial(src, len, sum); + allow_write_to_user(dst, len); - if (copy_to_user(dst, src, len)) { - *err_ptr = -EFAULT; - csum = -1; /* invalid checksum */ - } - } + csum = csum_partial_copy_generic(src, (void __force *)dst, len); -out: prevent_write_to_user(dst, len); - return (__force __wsum)csum; + return csum; } EXPORT_SYMBOL(csum_and_copy_to_user); diff --git a/arch/powerpc/lib/memcpy_mcsafe_64.S b/arch/powerpc/lib/copy_mc_64.S index cb882d9a6d8a..88d46c471493 100644 --- a/arch/powerpc/lib/memcpy_mcsafe_64.S +++ b/arch/powerpc/lib/copy_mc_64.S @@ -50,7 +50,7 @@ err3; stb r0,0(r3) blr -_GLOBAL(memcpy_mcsafe) +_GLOBAL(copy_mc_generic) mr r7,r5 cmpldi r5,16 blt .Lshort_copy @@ -239,4 +239,4 @@ err1; stb r0,0(r3) 15: li r3,0 blr -EXPORT_SYMBOL_GPL(memcpy_mcsafe); +EXPORT_SYMBOL_GPL(copy_mc_generic); diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c index c0162911f6cb..d426eaf76bb0 100644 --- a/arch/powerpc/mm/book3s32/mmu.c +++ b/arch/powerpc/mm/book3s32/mmu.c @@ -191,10 +191,17 @@ static bool is_module_segment(unsigned long addr) { if (!IS_ENABLED(CONFIG_MODULES)) return false; +#ifdef MODULES_VADDR + if (addr < ALIGN_DOWN(MODULES_VADDR, SZ_256M)) + return false; + if (addr > ALIGN(MODULES_END, SZ_256M) - 1) + return false; +#else if (addr < ALIGN_DOWN(VMALLOC_START, SZ_256M)) return false; - if (addr >= ALIGN(VMALLOC_END, SZ_256M)) + if (addr > ALIGN(VMALLOC_END, SZ_256M) - 1) return false; +#endif return true; } diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index 1da9dbba9217..b830adee51f5 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -7,7 +7,7 @@ * * SMP scalability work: * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM - * + * * Module name: htab.c * * Description: @@ -232,6 +232,8 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags) rflags |= HPTE_R_I; else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_NON_IDEMPOTENT) rflags |= (HPTE_R_I | HPTE_R_G); + else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO) + rflags |= (HPTE_R_W | HPTE_R_I | HPTE_R_M); else /* * Add memory coherence if cache inhibited is not set @@ -865,8 +867,8 @@ static void __init htab_initialize(void) unsigned long table; unsigned long pteg_count; unsigned long prot; - unsigned long base = 0, size = 0; - struct memblock_region *reg; + phys_addr_t base = 0, size = 0, end; + u64 i; DBG(" -> htab_initialize()\n"); @@ -882,7 +884,7 @@ static void __init htab_initialize(void) /* * Calculate the required size of the htab. We want the number of * PTEGs to equal one half the number of real pages. - */ + */ htab_size_bytes = htab_get_table_size(); pteg_count = htab_size_bytes >> 7; @@ -892,7 +894,7 @@ static void __init htab_initialize(void) firmware_has_feature(FW_FEATURE_PS3_LV1)) { /* Using a hypervisor which owns the htab */ htab_address = NULL; - _SDR1 = 0; + _SDR1 = 0; #ifdef CONFIG_FA_DUMP /* * If firmware assisted dump is active firmware preserves @@ -958,9 +960,9 @@ static void __init htab_initialize(void) #endif /* CONFIG_DEBUG_PAGEALLOC */ /* create bolted the linear mapping in the hash table */ - for_each_memblock(memory, reg) { - base = (unsigned long)__va(reg->base); - size = reg->size; + for_each_mem_range(i, &base, &end) { + size = end - base; + base = (unsigned long)__va(base); DBG("creating mapping for region: %lx..%lx (prot: %lx)\n", base, size, prot); @@ -1115,8 +1117,10 @@ void hash__early_init_mmu_secondary(void) && cpu_has_feature(CPU_FTR_HVMODE)) tlbiel_all(); - if (IS_ENABLED(CONFIG_PPC_MEM_KEYS) && mmu_has_feature(MMU_FTR_PKEY)) +#ifdef CONFIG_PPC_MEM_KEYS + if (mmu_has_feature(MMU_FTR_PKEY)) mtspr(SPRN_UAMOR, default_uamor); +#endif } #endif /* CONFIG_SMP */ diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index 28c784976bed..cc72666e891a 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -329,7 +329,8 @@ static int __meminit create_physical_mapping(unsigned long start, static void __init radix_init_pgtable(void) { unsigned long rts_field; - struct memblock_region *reg; + phys_addr_t start, end; + u64 i; /* We don't support slb for radix */ mmu_slb_size = 0; @@ -337,20 +338,19 @@ static void __init radix_init_pgtable(void) /* * Create the linear mapping */ - for_each_memblock(memory, reg) { + for_each_mem_range(i, &start, &end) { /* * The memblock allocator is up at this point, so the * page tables will be allocated within the range. No * need or a node (which we don't have yet). */ - if ((reg->base + reg->size) >= RADIX_VMALLOC_START) { + if (end >= RADIX_VMALLOC_START) { pr_warn("Outside the supported range\n"); continue; } - WARN_ON(create_physical_mapping(reg->base, - reg->base + reg->size, + WARN_ON(create_physical_mapping(start, end, radix_mem_block_size, -1, PAGE_KERNEL)); } @@ -734,21 +734,6 @@ void radix__mmu_cleanup_all(void) } } -void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base, - phys_addr_t first_memblock_size) -{ - /* - * We don't currently support the first MEMBLOCK not mapping 0 - * physical on those processors - */ - BUG_ON(first_memblock_base != 0); - - /* - * Radix mode is not limited by RMA / VRMA addressing. - */ - ppc64_rma_size = ULONG_MAX; -} - #ifdef CONFIG_MEMORY_HOTPLUG static void free_pte_table(pte_t *pte_start, pmd_t *pmd) { diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 02e127fa5777..8459056cce67 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -433,9 +433,16 @@ void __init mmu_early_init_devtree(void) if (!(mfmsr() & MSR_HV)) early_check_vec5(); - if (early_radix_enabled()) + if (early_radix_enabled()) { radix__early_init_devtree(); - else + /* + * We have finalized the translation we are going to use by now. + * Radix mode is not limited by RMA / VRMA addressing. + * Hence don't limit memblock allocations. + */ + ppc64_rma_size = ULONG_MAX; + memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); + } else hash__early_init_devtree(); } #endif /* CONFIG_PPC_BOOK3S_64 */ diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c index fb294046e00e..26fda3203320 100644 --- a/arch/powerpc/mm/kasan/kasan_init_32.c +++ b/arch/powerpc/mm/kasan/kasan_init_32.c @@ -138,11 +138,11 @@ void __init kasan_mmu_init(void) void __init kasan_init(void) { - struct memblock_region *reg; + phys_addr_t base, end; + u64 i; - for_each_memblock(memory, reg) { - phys_addr_t base = reg->base; - phys_addr_t top = min(base + reg->size, total_lowmem); + for_each_mem_range(i, &base, &end) { + phys_addr_t top = min(end, total_lowmem); int ret; if (base >= top) diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 42e25874f5a8..5e2e7c0a8f1a 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -184,15 +184,16 @@ void __init initmem_init(void) /* mark pages that don't exist as nosave */ static int __init mark_nonram_nosave(void) { - struct memblock_region *reg, *prev = NULL; - - for_each_memblock(memory, reg) { - if (prev && - memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg)) - register_nosave_region(memblock_region_memory_end_pfn(prev), - memblock_region_memory_base_pfn(reg)); - prev = reg; + unsigned long spfn, epfn, prev = 0; + int i; + + for_each_mem_pfn_range(i, MAX_NUMNODES, &spfn, &epfn, NULL) { + if (prev && prev < spfn) + register_nosave_region(prev, spfn); + + prev = epfn; } + return 0; } #else /* CONFIG_NEED_MULTIPLE_NODES */ @@ -584,20 +585,24 @@ void flush_icache_user_page(struct vm_area_struct *vma, struct page *page, */ static int __init add_system_ram_resources(void) { - struct memblock_region *reg; + phys_addr_t start, end; + u64 i; - for_each_memblock(memory, reg) { + for_each_mem_range(i, &start, &end) { struct resource *res; - unsigned long base = reg->base; - unsigned long size = reg->size; res = kzalloc(sizeof(struct resource), GFP_KERNEL); WARN_ON(!res); if (res) { res->name = "System RAM"; - res->start = base; - res->end = base + size - 1; + res->start = start; + /* + * In memblock, end points to the first byte after + * the range while in resourses, end points to the + * last byte in the range. + */ + res->end = end - 1; res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; WARN_ON(request_resource(&iomem_resource, res) < 0); } diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 1f61fa2148b5..f4e20d8e6c02 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -804,17 +804,14 @@ static void __init setup_nonnuma(void) unsigned long total_ram = memblock_phys_mem_size(); unsigned long start_pfn, end_pfn; unsigned int nid = 0; - struct memblock_region *reg; + int i; printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", top_of_ram, total_ram); printk(KERN_DEBUG "Memory hole size: %ldMB\n", (top_of_ram - total_ram) >> 20); - for_each_memblock(memory, reg) { - start_pfn = memblock_region_memory_base_pfn(reg); - end_pfn = memblock_region_memory_end_pfn(reg); - + for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { fake_numa_create_new_node(end_pfn, &nid); memblock_set_node(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn), diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 6eb4eab79385..079159e97bca 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -123,11 +123,11 @@ static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top) void __init mapin_ram(void) { - struct memblock_region *reg; + phys_addr_t base, end; + u64 i; - for_each_memblock(memory, reg) { - phys_addr_t base = reg->base; - phys_addr_t top = min(base + reg->size, total_lowmem); + for_each_mem_range(i, &base, &end) { + phys_addr_t top = min(end, total_lowmem); if (base >= top) continue; diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index 16d09b36fe06..e809cb5a1631 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -475,7 +475,6 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, case BPF_JMP | BPF_JSET | BPF_K: case BPF_JMP | BPF_JSET | BPF_X: true_cond = COND_NE; - /* Fall through */ cond_branch: /* same targets, can avoid doing the test :) */ if (filter[i].jt == filter[i].jf) { diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 78fe34986594..08643cba1494 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -1557,9 +1557,16 @@ nocheck: ret = 0; out: if (has_branch_stack(event)) { - power_pmu_bhrb_enable(event); - cpuhw->bhrb_filter = ppmu->bhrb_filter_map( - event->attr.branch_sample_type); + u64 bhrb_filter = -1; + + if (ppmu->bhrb_filter_map) + bhrb_filter = ppmu->bhrb_filter_map( + event->attr.branch_sample_type); + + if (bhrb_filter != -1) { + cpuhw->bhrb_filter = bhrb_filter; + power_pmu_bhrb_enable(event); + } } perf_pmu_enable(event->pmu); @@ -1881,7 +1888,6 @@ static int power_pmu_event_init(struct perf_event *event) int n; int err; struct cpu_hw_events *cpuhw; - u64 bhrb_filter; if (!ppmu) return -ENOENT; @@ -1987,7 +1993,10 @@ static int power_pmu_event_init(struct perf_event *event) err = power_check_constraints(cpuhw, events, cflags, n + 1); if (has_branch_stack(event)) { - bhrb_filter = ppmu->bhrb_filter_map( + u64 bhrb_filter = -1; + + if (ppmu->bhrb_filter_map) + bhrb_filter = ppmu->bhrb_filter_map( event->attr.branch_sample_type); if (bhrb_filter == -1) { @@ -2141,6 +2150,10 @@ static void record_and_restart(struct perf_event *event, unsigned long val, if (perf_event_overflow(event, &data, regs)) power_pmu_stop(event, 0); + } else if (period) { + /* Account for interrupt in case of invalid SIAR */ + if (perf_event_account_interrupt(event)) + power_pmu_stop(event, 0); } } @@ -2323,6 +2336,7 @@ int register_power_pmu(struct power_pmu *pmu) pmu->name); power_pmu.attr_groups = ppmu->attr_groups; + power_pmu.capabilities |= (ppmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS); #ifdef MSR_HV /* diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c index cdb7bfbd157e..6e7e820508df 100644 --- a/arch/powerpc/perf/hv-24x7.c +++ b/arch/powerpc/perf/hv-24x7.c @@ -1128,6 +1128,15 @@ static struct bin_attribute *if_bin_attrs[] = { NULL, }; +static struct attribute *cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static struct attribute_group cpumask_attr_group = { + .attrs = cpumask_attrs, +}; + static struct attribute *if_attrs[] = { &dev_attr_catalog_len.attr, &dev_attr_catalog_version.attr, @@ -1135,7 +1144,6 @@ static struct attribute *if_attrs[] = { &dev_attr_sockets.attr, &dev_attr_chipspersocket.attr, &dev_attr_coresperchip.attr, - &dev_attr_cpumask.attr, NULL, }; @@ -1151,6 +1159,7 @@ static const struct attribute_group *attr_groups[] = { &event_desc_group, &event_long_desc_group, &if_group, + &cpumask_attr_group, NULL, }; diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c index a45d694a5d5d..62d0b54086f8 100644 --- a/arch/powerpc/perf/imc-pmu.c +++ b/arch/powerpc/perf/imc-pmu.c @@ -1289,7 +1289,7 @@ static int trace_imc_prepare_sample(struct trace_imc_data *mem, header->misc = 0; if (cpu_has_feature(CPU_FTR_ARCH_31)) { - switch (IMC_TRACE_RECORD_VAL_HVPR(mem->val)) { + switch (IMC_TRACE_RECORD_VAL_HVPR(be64_to_cpu(READ_ONCE(mem->val)))) { case 0:/* when MSR HV and PR not set in the trace-record */ header->misc |= PERF_RECORD_MISC_GUEST_KERNEL; break; @@ -1297,7 +1297,7 @@ static int trace_imc_prepare_sample(struct trace_imc_data *mem, header->misc |= PERF_RECORD_MISC_GUEST_USER; break; case 2: /* MSR HV is 1 and PR is 0 */ - header->misc |= PERF_RECORD_MISC_HYPERVISOR; + header->misc |= PERF_RECORD_MISC_KERNEL; break; case 3: /* MSR HV is 1 and PR is 1 */ header->misc |= PERF_RECORD_MISC_USER; diff --git a/arch/powerpc/perf/perf_regs.c b/arch/powerpc/perf/perf_regs.c index a213a0aa5d25..8e53f2fc3fe0 100644 --- a/arch/powerpc/perf/perf_regs.c +++ b/arch/powerpc/perf/perf_regs.c @@ -13,9 +13,11 @@ #include <asm/ptrace.h> #include <asm/perf_regs.h> +u64 PERF_REG_EXTENDED_MASK; + #define PT_REGS_OFFSET(id, r) [id] = offsetof(struct pt_regs, r) -#define REG_RESERVED (~((1ULL << PERF_REG_POWERPC_MAX) - 1)) +#define REG_RESERVED (~(PERF_REG_EXTENDED_MASK | PERF_REG_PMU_MASK)) static unsigned int pt_regs_offset[PERF_REG_POWERPC_MAX] = { PT_REGS_OFFSET(PERF_REG_POWERPC_R0, gpr[0]), @@ -69,10 +71,36 @@ static unsigned int pt_regs_offset[PERF_REG_POWERPC_MAX] = { PT_REGS_OFFSET(PERF_REG_POWERPC_MMCRA, dsisr), }; +/* Function to return the extended register values */ +static u64 get_ext_regs_value(int idx) +{ + switch (idx) { + case PERF_REG_POWERPC_MMCR0: + return mfspr(SPRN_MMCR0); + case PERF_REG_POWERPC_MMCR1: + return mfspr(SPRN_MMCR1); + case PERF_REG_POWERPC_MMCR2: + return mfspr(SPRN_MMCR2); +#ifdef CONFIG_PPC64 + case PERF_REG_POWERPC_MMCR3: + return mfspr(SPRN_MMCR3); + case PERF_REG_POWERPC_SIER2: + return mfspr(SPRN_SIER2); + case PERF_REG_POWERPC_SIER3: + return mfspr(SPRN_SIER3); +#endif + default: return 0; + } +} + u64 perf_reg_value(struct pt_regs *regs, int idx) { - if (WARN_ON_ONCE(idx >= PERF_REG_POWERPC_MAX)) - return 0; + u64 perf_reg_extended_max = PERF_REG_POWERPC_MAX; + + if (cpu_has_feature(CPU_FTR_ARCH_31)) + perf_reg_extended_max = PERF_REG_MAX_ISA_31; + else if (cpu_has_feature(CPU_FTR_ARCH_300)) + perf_reg_extended_max = PERF_REG_MAX_ISA_300; if (idx == PERF_REG_POWERPC_SIER && (IS_ENABLED(CONFIG_FSL_EMB_PERF_EVENT) || @@ -85,6 +113,16 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) IS_ENABLED(CONFIG_PPC32))) return 0; + if (idx >= PERF_REG_POWERPC_MAX && idx < perf_reg_extended_max) + return get_ext_regs_value(idx); + + /* + * If the idx is referring to value beyond the + * supported registers, return 0 with a warning + */ + if (WARN_ON_ONCE(idx >= perf_reg_extended_max)) + return 0; + return regs_get_register(regs, pt_regs_offset[idx]); } diff --git a/arch/powerpc/perf/power10-pmu.c b/arch/powerpc/perf/power10-pmu.c index f7cff7f36a1c..83148656b524 100644 --- a/arch/powerpc/perf/power10-pmu.c +++ b/arch/powerpc/perf/power10-pmu.c @@ -87,6 +87,8 @@ #define POWER10_MMCRA_IFM3 0x00000000C0000000UL #define POWER10_MMCRA_BHRB_MASK 0x00000000C0000000UL +extern u64 PERF_REG_EXTENDED_MASK; + /* Table of alternatives, sorted by column 0 */ static const unsigned int power10_event_alternatives[][MAX_ALT] = { { PM_RUN_CYC_ALT, PM_RUN_CYC }, @@ -397,6 +399,7 @@ static struct power_pmu power10_pmu = { .cache_events = &power10_cache_events, .attr_groups = power10_pmu_attr_groups, .bhrb_nr = 32, + .capabilities = PERF_PMU_CAP_EXTENDED_REGS, }; int init_power10_pmu(void) @@ -408,6 +411,9 @@ int init_power10_pmu(void) strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power10")) return -ENODEV; + /* Set the PERF_REG_EXTENDED_MASK here */ + PERF_REG_EXTENDED_MASK = PERF_REG_PMU_MASK_31; + rc = register_power_pmu(&power10_pmu); if (rc) return rc; diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c index 05dae38b969a..2a57e93a79dc 100644 --- a/arch/powerpc/perf/power9-pmu.c +++ b/arch/powerpc/perf/power9-pmu.c @@ -90,6 +90,8 @@ enum { #define POWER9_MMCRA_IFM3 0x00000000C0000000UL #define POWER9_MMCRA_BHRB_MASK 0x00000000C0000000UL +extern u64 PERF_REG_EXTENDED_MASK; + /* Nasty Power9 specific hack */ #define PVR_POWER9_CUMULUS 0x00002000 @@ -434,6 +436,7 @@ static struct power_pmu power9_pmu = { .cache_events = &power9_cache_events, .attr_groups = power9_pmu_attr_groups, .bhrb_nr = 32, + .capabilities = PERF_PMU_CAP_EXTENDED_REGS, }; int init_power9_pmu(void) @@ -457,6 +460,9 @@ int init_power9_pmu(void) } } + /* Set the PERF_REG_EXTENDED_MASK here */ + PERF_REG_EXTENDED_MASK = PERF_REG_PMU_MASK_300; + rc = register_power_pmu(&power9_pmu); if (rc) return rc; diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 87737ec86d39..1dc9d3c81872 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -36,7 +36,7 @@ config PPC_BOOK3S_6xx select PPC_HAVE_PMU_SUPPORT select PPC_HAVE_KUEP select PPC_HAVE_KUAP - select HAVE_ARCH_VMAP_STACK + select HAVE_ARCH_VMAP_STACK if !ADB_PMU config PPC_BOOK3S_601 bool "PowerPC 601" diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c index 77513a80cef9..345ab062b21a 100644 --- a/arch/powerpc/platforms/powernv/idle.c +++ b/arch/powerpc/platforms/powernv/idle.c @@ -1223,7 +1223,7 @@ static void __init pnv_probe_idle_states(void) return; } - if (pvr_version_is(PVR_POWER9)) + if (cpu_has_feature(CPU_FTR_ARCH_300)) pnv_power9_idle_init(); for (i = 0; i < nr_pnv_idle_states; i++) diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index c9c25fb0783c..023a4f987bb2 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -2705,7 +2705,7 @@ void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe) struct iommu_table *tbl = pe->table_group.tables[0]; int64_t rc; - if (pe->dma_setup_done) + if (!pe->dma_setup_done) return; rc = pnv_pci_ioda2_unset_window(&pe->table_group, 0); diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index c6e0d8abf75e..7a974ed6b240 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -107,22 +107,28 @@ static int pseries_cpu_disable(void) */ static void pseries_cpu_die(unsigned int cpu) { - int tries; int cpu_status = 1; unsigned int pcpu = get_hard_smp_processor_id(cpu); + unsigned long timeout = jiffies + msecs_to_jiffies(120000); - for (tries = 0; tries < 25; tries++) { + while (true) { cpu_status = smp_query_cpu_stopped(pcpu); if (cpu_status == QCSS_STOPPED || cpu_status == QCSS_HARDWARE_ERROR) break; - cpu_relax(); + if (time_after(jiffies, timeout)) { + pr_warn("CPU %i (hwid %i) didn't die after 120 seconds\n", + cpu, pcpu); + timeout = jiffies + msecs_to_jiffies(120000); + } + + cond_resched(); } - if (cpu_status != 0) { - printk("Querying DEAD? cpu %i (%i) shows %i\n", - cpu, pcpu, cpu_status); + if (cpu_status == QCSS_HARDWARE_ERROR) { + pr_warn("CPU %i (hwid %i) reported error while dying\n", + cpu, pcpu); } /* Isolation and deallocation are definitely done by diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c index f439f0dfea7d..a88a707a608a 100644 --- a/arch/powerpc/platforms/pseries/papr_scm.c +++ b/arch/powerpc/platforms/pseries/papr_scm.c @@ -822,7 +822,7 @@ free_stats: kfree(stats); return rc ? rc : seq_buf_used(&s); } -DEVICE_ATTR_RO(perf_stats); +DEVICE_ATTR_ADMIN_RO(perf_stats); static ssize_t flags_show(struct device *dev, struct device_attribute *attr, char *buf) diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index f3736fcd98fc..13c86a292c6d 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c @@ -184,7 +184,6 @@ static void handle_system_shutdown(char event_modifier) case EPOW_SHUTDOWN_ON_UPS: pr_emerg("Loss of system power detected. System is running on" " UPS/battery. Check RTAS error log for details\n"); - orderly_poweroff(true); break; case EPOW_SHUTDOWN_LOSS_OF_CRITICAL_FUNCTIONS: |