diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-08-17 21:32:50 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-08-17 21:32:50 +0300 |
commit | 5e2d059b52e397d9ac42f4c4d9d9a841887b5818 (patch) | |
tree | c8cd8fd7187113be33e29fcc75f45a8bbc27e6b2 /arch/powerpc/include | |
parent | d190775206d06397a9309421cac5ba2f2c243521 (diff) | |
parent | a2dc009afa9ae8b92305be7728676562a104cb40 (diff) | |
download | linux-5e2d059b52e397d9ac42f4c4d9d9a841887b5818.tar.xz |
Merge tag 'powerpc-4.19-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc updates from Michael Ellerman:
"Notable changes:
- A fix for a bug in our page table fragment allocator, where a page
table page could be freed and reallocated for something else while
still in use, leading to memory corruption etc. The fix reuses
pt_mm in struct page (x86 only) for a powerpc only refcount.
- Fixes to our pkey support. Several are user-visible changes, but
bring us in to line with x86 behaviour and/or fix outright bugs.
Thanks to Florian Weimer for reporting many of these.
- A series to improve the hvc driver & related OPAL console code,
which have been seen to cause hardlockups at times. The hvc driver
changes in particular have been in linux-next for ~month.
- Increase our MAX_PHYSMEM_BITS to 128TB when SPARSEMEM_VMEMMAP=y.
- Remove Power8 DD1 and Power9 DD1 support, neither chip should be in
use anywhere other than as a paper weight.
- An optimised memcmp implementation using Power7-or-later VMX
instructions
- Support for barrier_nospec on some NXP CPUs.
- Support for flushing the count cache on context switch on some IBM
CPUs (controlled by firmware), as a Spectre v2 mitigation.
- A series to enhance the information we print on unhandled signals
to bring it into line with other arches, including showing the
offending VMA and dumping the instructions around the fault.
Thanks to: Aaro Koskinen, Akshay Adiga, Alastair D'Silva, Alexey
Kardashevskiy, Alexey Spirkov, Alistair Popple, Andrew Donnellan,
Aneesh Kumar K.V, Anju T Sudhakar, Arnd Bergmann, Bartosz Golaszewski,
Benjamin Herrenschmidt, Bharat Bhushan, Bjoern Noetel, Boqun Feng,
Breno Leitao, Bryant G. Ly, Camelia Groza, Christophe Leroy, Christoph
Hellwig, Cyril Bur, Dan Carpenter, Daniel Klamt, Darren Stevens, Dave
Young, David Gibson, Diana Craciun, Finn Thain, Florian Weimer,
Frederic Barrat, Gautham R. Shenoy, Geert Uytterhoeven, Geoff Levand,
Guenter Roeck, Gustavo Romero, Haren Myneni, Hari Bathini, Joel
Stanley, Jonathan Neuschäfer, Kees Cook, Madhavan Srinivasan, Mahesh
Salgaonkar, Markus Elfring, Mathieu Malaterre, Mauro S. M. Rodrigues,
Michael Hanselmann, Michael Neuling, Michael Schmitz, Mukesh Ojha,
Murilo Opsfelder Araujo, Nicholas Piggin, Parth Y Shah, Paul
Mackerras, Paul Menzel, Ram Pai, Randy Dunlap, Rashmica Gupta, Reza
Arbab, Rodrigo R. Galvao, Russell Currey, Sam Bobroff, Scott Wood,
Shilpasri G Bhat, Simon Guo, Souptick Joarder, Stan Johnson, Thiago
Jung Bauermann, Tyrel Datwyler, Vaibhav Jain, Vasant Hegde, Venkat
Rao, zhong jiang"
* tag 'powerpc-4.19-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (234 commits)
powerpc/mm/book3s/radix: Add mapping statistics
powerpc/uaccess: Enable get_user(u64, *p) on 32-bit
powerpc/mm/hash: Remove unnecessary do { } while(0) loop
powerpc/64s: move machine check SLB flushing to mm/slb.c
powerpc/powernv/idle: Fix build error
powerpc/mm/tlbflush: update the mmu_gather page size while iterating address range
powerpc/mm: remove warning about ‘type’ being set
powerpc/32: Include setup.h header file to fix warnings
powerpc: Move `path` variable inside DEBUG_PROM
powerpc/powermac: Make some functions static
powerpc/powermac: Remove variable x that's never read
cxl: remove a dead branch
powerpc/powermac: Add missing include of header pmac.h
powerpc/kexec: Use common error handling code in setup_new_fdt()
powerpc/xmon: Add address lookup for percpu symbols
powerpc/mm: remove huge_pte_offset_and_shift() prototype
powerpc/lib: Use patch_site to patch copy_32 functions once cache is enabled
powerpc/pseries: Fix endianness while restoring of r3 in MCE handler.
powerpc/fadump: merge adjacent memory ranges to reduce PT_LOAD segements
powerpc/fadump: handle crash memory ranges array index overflow
...
Diffstat (limited to 'arch/powerpc/include')
77 files changed, 471 insertions, 384 deletions
diff --git a/arch/powerpc/include/asm/asm-405.h b/arch/powerpc/include/asm/asm-405.h new file mode 100644 index 000000000000..7270d3ae7c8e --- /dev/null +++ b/arch/powerpc/include/asm/asm-405.h @@ -0,0 +1,19 @@ +#ifndef _ASM_POWERPC_ASM_405_H +#define _ASM_POWERPC_ASM_405_H + +#include <asm/asm-const.h> + +#ifdef __KERNEL__ +#ifdef CONFIG_IBM405_ERR77 +/* Erratum #77 on the 405 means we need a sync or dcbt before every + * stwcx. The old ATOMIC_SYNC_FIX covered some but not all of this. + */ +#define PPC405_ERR77(ra,rb) stringify_in_c(dcbt ra, rb;) +#define PPC405_ERR77_SYNC stringify_in_c(sync;) +#else +#define PPC405_ERR77(ra,rb) +#define PPC405_ERR77_SYNC +#endif +#endif + +#endif /* _ASM_POWERPC_ASM_405_H */ diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h index 7f2a7702596c..19b70c5b5f18 100644 --- a/arch/powerpc/include/asm/asm-compat.h +++ b/arch/powerpc/include/asm/asm-compat.h @@ -1,21 +1,10 @@ #ifndef _ASM_POWERPC_ASM_COMPAT_H #define _ASM_POWERPC_ASM_COMPAT_H +#include <asm/asm-const.h> #include <asm/types.h> #include <asm/ppc-opcode.h> -#ifdef __ASSEMBLY__ -# define stringify_in_c(...) __VA_ARGS__ -# define ASM_CONST(x) x -#else -/* This version of stringify will deal with commas... */ -# define __stringify_in_c(...) #__VA_ARGS__ -# define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " " -# define __ASM_CONST(x) x##UL -# define ASM_CONST(x) __ASM_CONST(x) -#endif - - #ifdef __powerpc64__ /* operations for longs and pointers */ @@ -70,17 +59,4 @@ #endif -#ifdef __KERNEL__ -#ifdef CONFIG_IBM405_ERR77 -/* Erratum #77 on the 405 means we need a sync or dcbt before every - * stwcx. The old ATOMIC_SYNC_FIX covered some but not all of this. - */ -#define PPC405_ERR77(ra,rb) stringify_in_c(dcbt ra, rb;) -#define PPC405_ERR77_SYNC stringify_in_c(sync;) -#else -#define PPC405_ERR77(ra,rb) -#define PPC405_ERR77_SYNC -#endif -#endif - #endif /* _ASM_POWERPC_ASM_COMPAT_H */ diff --git a/arch/powerpc/include/asm/asm-const.h b/arch/powerpc/include/asm/asm-const.h new file mode 100644 index 000000000000..082c1538c562 --- /dev/null +++ b/arch/powerpc/include/asm/asm-const.h @@ -0,0 +1,14 @@ +#ifndef _ASM_POWERPC_ASM_CONST_H +#define _ASM_POWERPC_ASM_CONST_H + +#ifdef __ASSEMBLY__ +# define stringify_in_c(...) __VA_ARGS__ +# define ASM_CONST(x) x +#else +/* This version of stringify will deal with commas... */ +# define __stringify_in_c(...) #__VA_ARGS__ +# define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " " +# define __ASM_CONST(x) x##UL +# define ASM_CONST(x) __ASM_CONST(x) +#endif +#endif /* _ASM_POWERPC_ASM_CONST_H */ diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h index 7841b8a60657..1f4691ce4126 100644 --- a/arch/powerpc/include/asm/asm-prototypes.h +++ b/arch/powerpc/include/asm/asm-prototypes.h @@ -48,8 +48,8 @@ void __trace_opal_exit(long opcode, unsigned long retval); /* VMX copying */ int enter_vmx_usercopy(void); int exit_vmx_usercopy(void); -int enter_vmx_copy(void); -void * exit_vmx_copy(void *dest); +int enter_vmx_ops(void); +void *exit_vmx_ops(void *dest); /* Traps */ long machine_check_early(struct pt_regs *regs); @@ -143,4 +143,11 @@ struct kvm_vcpu; void _kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr); void _kvmppc_save_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr); +/* Patch sites */ +extern s32 patch__call_flush_count_cache; +extern s32 patch__flush_count_cache_return; +extern s32 patch__memset_nocache, patch__memcpy_nocache; + +extern long flush_count_cache; + #endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */ diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index 963abf8bf1c0..52eafaf74054 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -10,6 +10,7 @@ #include <linux/types.h> #include <asm/cmpxchg.h> #include <asm/barrier.h> +#include <asm/asm-405.h> #define ATOMIC_INIT(i) { (i) } diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h index f67b3f6e36be..fbe8df433019 100644 --- a/arch/powerpc/include/asm/barrier.h +++ b/arch/powerpc/include/asm/barrier.h @@ -5,6 +5,8 @@ #ifndef _ASM_POWERPC_BARRIER_H #define _ASM_POWERPC_BARRIER_H +#include <asm/asm-const.h> + /* * Memory barrier. * The sync instruction guarantees that all memory accesses initiated @@ -77,19 +79,25 @@ do { \ }) #ifdef CONFIG_PPC_BOOK3S_64 +#define NOSPEC_BARRIER_SLOT nop +#elif defined(CONFIG_PPC_FSL_BOOK3E) +#define NOSPEC_BARRIER_SLOT nop; nop +#endif + +#ifdef CONFIG_PPC_BARRIER_NOSPEC /* * Prevent execution of subsequent instructions until preceding branches have * been fully resolved and are no longer executing speculatively. */ -#define barrier_nospec_asm NOSPEC_BARRIER_FIXUP_SECTION; nop +#define barrier_nospec_asm NOSPEC_BARRIER_FIXUP_SECTION; NOSPEC_BARRIER_SLOT // This also acts as a compiler barrier due to the memory clobber. #define barrier_nospec() asm (stringify_in_c(barrier_nospec_asm) ::: "memory") -#else /* !CONFIG_PPC_BOOK3S_64 */ +#else /* !CONFIG_PPC_BARRIER_NOSPEC */ #define barrier_nospec_asm #define barrier_nospec() -#endif +#endif /* CONFIG_PPC_BARRIER_NOSPEC */ #include <asm-generic/barrier.h> diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h index b750ffef83c7..ff71566dadee 100644 --- a/arch/powerpc/include/asm/bitops.h +++ b/arch/powerpc/include/asm/bitops.h @@ -45,6 +45,7 @@ #include <linux/compiler.h> #include <asm/asm-compat.h> #include <asm/synch.h> +#include <asm/asm-405.h> /* PPC bit number conversion */ #define PPC_BITLSHIFT(be) (BITS_PER_LONG - 1 - (be)) diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index 02f5acd7ccc4..751cf931bb3f 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -84,17 +84,12 @@ * of RAM. -- Cort */ #define VMALLOC_OFFSET (0x1000000) /* 16M */ -#ifdef PPC_PIN_SIZE -#define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) -#else #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) -#endif #define VMALLOC_END ioremap_bot #ifndef __ASSEMBLY__ #include <linux/sched.h> #include <linux/threads.h> -#include <asm/io.h> /* For sub-arch specific PPC_PIN_SIZE */ extern unsigned long ioremap_bot; @@ -164,7 +159,6 @@ static inline unsigned long pte_update(pte_t *p, 1: lwarx %0,0,%3\n\ andc %1,%0,%4\n\ or %1,%1,%5\n" - PPC405_ERR77(0,%3) " stwcx. %1,0,%3\n\ bne- 1b" : "=&r" (old), "=&r" (tmp), "=m" (*p) @@ -186,7 +180,6 @@ static inline unsigned long long pte_update(pte_t *p, lwzx %0,0,%3\n\ andc %1,%L0,%5\n\ or %1,%1,%6\n" - PPC405_ERR77(0,%3) " stwcx. %1,0,%4\n\ bne- 1b" : "=&r" (old), "=&r" (tmp), "=m" (*p) diff --git a/arch/powerpc/include/asm/book3s/32/tlbflush.h b/arch/powerpc/include/asm/book3s/32/tlbflush.h new file mode 100644 index 000000000000..068085b709fb --- /dev/null +++ b/arch/powerpc/include/asm/book3s/32/tlbflush.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_BOOK3S_32_TLBFLUSH_H +#define _ASM_POWERPC_BOOK3S_32_TLBFLUSH_H + +#define MMU_NO_CONTEXT (0) +/* + * TLB flushing for "classic" hash-MMU 32-bit CPUs, 6xx, 7xx, 7xxx + */ +extern void flush_tlb_mm(struct mm_struct *mm); +extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); +extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); +extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end); +extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); +static inline void local_flush_tlb_page(struct vm_area_struct *vma, + unsigned long vmaddr) +{ + flush_tlb_page(vma, vmaddr); +} +static inline void local_flush_tlb_mm(struct mm_struct *mm) +{ + flush_tlb_mm(mm); +} + +#endif /* _ASM_POWERPC_TLBFLUSH_H */ diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h index c81793d47af9..f82ee8a3b561 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-64k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h @@ -137,10 +137,9 @@ extern bool __rpte_sub_valid(real_pte_t rpte, unsigned long index); shift = mmu_psize_defs[psize].shift; \ for (index = 0; vpn < __end; index++, \ vpn += (1L << (shift - VPN_SHIFT))) { \ - if (!__split || __rpte_sub_valid(rpte, index)) \ - do { + if (!__split || __rpte_sub_valid(rpte, index)) -#define pte_iterate_hashed_end() } while(0); } } while(0) +#define pte_iterate_hashed_end() } } while(0) #define pte_pagesize_index(mm, addr, pte) \ (((pte) & H_PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K) diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h index 0387b155f13d..d52a51b2ce7b 100644 --- a/arch/powerpc/include/asm/book3s/64/hash.h +++ b/arch/powerpc/include/asm/book3s/64/hash.h @@ -3,6 +3,8 @@ #define _ASM_POWERPC_BOOK3S_64_HASH_H #ifdef __KERNEL__ +#include <asm/asm-const.h> + /* * Common bits between 4K and 64K pages in a linux-style PTE. * Additional bits may be defined in pgtable-hash64-*.h diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h index c459f937d484..50888388a359 100644 --- a/arch/powerpc/include/asm/book3s/64/hugetlb.h +++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h @@ -32,26 +32,6 @@ static inline int hstate_get_psize(struct hstate *hstate) } } -#define arch_make_huge_pte arch_make_huge_pte -static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, - struct page *page, int writable) -{ - unsigned long page_shift; - - if (!cpu_has_feature(CPU_FTR_POWER9_DD1)) - return entry; - - page_shift = huge_page_shift(hstate_vma(vma)); - /* - * We don't support 1G hugetlb pages yet. - */ - VM_WARN_ON(page_shift == mmu_psize_defs[MMU_PAGE_1G].shift); - if (page_shift == mmu_psize_defs[MMU_PAGE_2M].shift) - return __pte(pte_val(entry) | R_PAGE_LARGE); - else - return entry; -} - #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE static inline bool gigantic_page_supported(void) { diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h index 50ed64fba4ae..b3520b549cba 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h +++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h @@ -12,9 +12,9 @@ * 2 of the License, or (at your option) any later version. */ -#include <asm/asm-compat.h> #include <asm/page.h> #include <asm/bug.h> +#include <asm/asm-const.h> /* * This is necessary to get the definition of PGTABLE_RANGE which we @@ -364,6 +364,16 @@ static inline unsigned long hpte_new_to_old_r(unsigned long r) return r & ~HPTE_R_3_0_SSIZE_MASK; } +static inline unsigned long hpte_get_old_v(struct hash_pte *hptep) +{ + unsigned long hpte_v; + + hpte_v = be64_to_cpu(hptep->v); + if (cpu_has_feature(CPU_FTR_ARCH_300)) + hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r)); + return hpte_v; +} + /* * This function sets the AVPN and L fields of the HPTE appropriately * using the base page size and actual page size. @@ -487,6 +497,9 @@ extern void hpte_init_native(void); extern void slb_initialize(void); extern void slb_flush_and_rebolt(void); +void slb_flush_all_realmode(void); +void __slb_restore_bolted_realmode(void); +void slb_restore_bolted_realmode(void); extern void slb_vmalloc_update(void); extern void slb_set_size(u16 size); diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h index 01ee40f11f3a..391ed2c3b697 100644 --- a/arch/powerpc/include/asm/book3s/64/pgalloc.h +++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h @@ -9,6 +9,7 @@ #include <linux/slab.h> #include <linux/cpumask.h> +#include <linux/kmemleak.h> #include <linux/percpu.h> struct vmemmap_backing { @@ -83,6 +84,13 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), pgtable_gfp_flags(mm, GFP_KERNEL)); /* + * Don't scan the PGD for pointers, it contains references to PUDs but + * those references are not full pointers and so can't be recognised by + * kmemleak. + */ + kmemleak_no_scan(pgd); + + /* * With hugetlb, we don't clear the second half of the page table. * If we share the same slab cache with the pmd or pud level table, * we need to make sure we zero out the full table on alloc. @@ -110,8 +118,19 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) { - return kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX), - pgtable_gfp_flags(mm, GFP_KERNEL)); + pud_t *pud; + + pud = kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX), + pgtable_gfp_flags(mm, GFP_KERNEL)); + /* + * Tell kmemleak to ignore the PUD, that means don't scan it for + * pointers and don't consider it a leak. PUDs are typically only + * referred to by their PGD, but kmemleak is not able to recognise those + * as pointers, leading to false leak reports. + */ + kmemleak_ignore(pud); + + return pud; } static inline void pud_free(struct mm_struct *mm, pud_t *pud) @@ -208,4 +227,11 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, #define check_pgt_cache() do { } while (0) +extern atomic_long_t direct_pages_count[MMU_PAGE_COUNT]; +static inline void update_page_count(int psize, long count) +{ + if (IS_ENABLED(CONFIG_PROC_FS)) + atomic_long_add(count, &direct_pages_count[psize]); +} + #endif /* _ASM_POWERPC_BOOK3S_64_PGALLOC_H */ diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 42aafba7a308..676118743a06 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -479,9 +479,8 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, { if (full && radix_enabled()) { /* - * Let's skip the DD1 style pte update here. We know that - * this is a full mm pte clear and hence can be sure there is - * no parallel set_pte. + * We know that this is a full mm pte clear and + * hence can be sure there is no parallel set_pte. */ return radix__ptep_get_and_clear_full(mm, addr, ptep, full); } diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h index ef9f96742ce1..7d1a3d1543fc 100644 --- a/arch/powerpc/include/asm/book3s/64/radix.h +++ b/arch/powerpc/include/asm/book3s/64/radix.h @@ -2,6 +2,8 @@ #ifndef _ASM_POWERPC_PGTABLE_RADIX_H #define _ASM_POWERPC_PGTABLE_RADIX_H +#include <asm/asm-const.h> + #ifndef __ASSEMBLY__ #include <asm/cmpxchg.h> #endif @@ -12,12 +14,6 @@ #include <asm/book3s/64/radix-4k.h> #endif -/* - * For P9 DD1 only, we need to track whether the pte's huge. - */ -#define R_PAGE_LARGE _RPAGE_RSV1 - - #ifndef __ASSEMBLY__ #include <asm/book3s/64/tlbflush-radix.h> #include <asm/cpu_has_feature.h> @@ -36,6 +32,9 @@ #define RADIX_PUD_BAD_BITS 0x60000000000000e0UL #define RADIX_PGD_BAD_BITS 0x60000000000000e0UL +#define RADIX_PMD_SHIFT (PAGE_SHIFT + RADIX_PTE_INDEX_SIZE) +#define RADIX_PUD_SHIFT (RADIX_PMD_SHIFT + RADIX_PMD_INDEX_SIZE) +#define RADIX_PGD_SHIFT (RADIX_PUD_SHIFT + RADIX_PUD_INDEX_SIZE) /* * Size of EA range mapped by our pagetables. */ @@ -154,20 +153,7 @@ static inline unsigned long radix__pte_update(struct mm_struct *mm, { unsigned long old_pte; - if (cpu_has_feature(CPU_FTR_POWER9_DD1)) { - - unsigned long new_pte; - - old_pte = __radix_pte_update(ptep, ~0ul, 0); - /* - * new value of pte - */ - new_pte = (old_pte | set) & ~clr; - radix__flush_tlb_pte_p9_dd1(old_pte, mm, addr); - if (new_pte) - __radix_pte_update(ptep, 0, new_pte); - } else - old_pte = __radix_pte_update(ptep, clr, set); + old_pte = __radix_pte_update(ptep, clr, set); if (!huge) assert_pte_locked(mm, addr); @@ -253,8 +239,6 @@ static inline int radix__pmd_trans_huge(pmd_t pmd) static inline pmd_t radix__pmd_mkhuge(pmd_t pmd) { - if (cpu_has_feature(CPU_FTR_POWER9_DD1)) - return __pmd(pmd_val(pmd) | _PAGE_PTE | R_PAGE_LARGE); return __pmd(pmd_val(pmd) | _PAGE_PTE); } @@ -285,18 +269,14 @@ static inline unsigned long radix__get_tree_size(void) unsigned long rts_field; /* * We support 52 bits, hence: - * DD1 52-28 = 24, 0b11000 - * Others 52-31 = 21, 0b10101 + * bits 52 - 31 = 21, 0b10101 * RTS encoding details * bits 0 - 3 of rts -> bits 6 - 8 unsigned long * bits 4 - 5 of rts -> bits 62 - 63 of unsigned long */ - if (cpu_has_feature(CPU_FTR_POWER9_DD1)) - rts_field = (0x3UL << 61); - else { - rts_field = (0x5UL << 5); /* 6 - 8 bits */ - rts_field |= (0x2UL << 61); - } + rts_field = (0x5UL << 5); /* 6 - 8 bits */ + rts_field |= (0x2UL << 61); + return rts_field; } diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h index ef5c3f2994c9..1154a6dc6d26 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h @@ -48,8 +48,6 @@ extern void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmad extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr); extern void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr); extern void radix__flush_tlb_all(void); -extern void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm, - unsigned long address); extern void radix__flush_tlb_lpid_page(unsigned int lpid, unsigned long addr, diff --git a/arch/powerpc/include/asm/book3s/tlbflush.h b/arch/powerpc/include/asm/book3s/tlbflush.h new file mode 100644 index 000000000000..dec11de41055 --- /dev/null +++ b/arch/powerpc/include/asm/book3s/tlbflush.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_BOOK3S_TLBFLUSH_H +#define _ASM_POWERPC_BOOK3S_TLBFLUSH_H + +#ifdef CONFIG_PPC64 +#include <asm/book3s/64/tlbflush.h> +#else +#include <asm/book3s/32/tlbflush.h> +#endif + +#endif /* _ASM_POWERPC_BOOK3S_TLBFLUSH_H */ diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h index 0d72ec75da63..d5a8d7bf0759 100644 --- a/arch/powerpc/include/asm/cacheflush.h +++ b/arch/powerpc/include/asm/cacheflush.h @@ -11,7 +11,6 @@ #include <linux/mm.h> #include <asm/cputable.h> -#include <asm/cpu_has_feature.h> /* * No cache flushing is required when address mappings are changed, diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h index 9b001f1f6b32..27183871eb3b 100644 --- a/arch/powerpc/include/asm/cmpxchg.h +++ b/arch/powerpc/include/asm/cmpxchg.h @@ -5,8 +5,8 @@ #ifdef __KERNEL__ #include <linux/compiler.h> #include <asm/synch.h> -#include <asm/asm-compat.h> #include <linux/bug.h> +#include <asm/asm-405.h> #ifdef __BIG_ENDIAN #define BITOFF_CAL(size, off) ((sizeof(u32) - size - off) * BITS_PER_BYTE) diff --git a/arch/powerpc/include/asm/code-patching-asm.h b/arch/powerpc/include/asm/code-patching-asm.h new file mode 100644 index 000000000000..ed7b1448493a --- /dev/null +++ b/arch/powerpc/include/asm/code-patching-asm.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright 2018, Michael Ellerman, IBM Corporation. + */ +#ifndef _ASM_POWERPC_CODE_PATCHING_ASM_H +#define _ASM_POWERPC_CODE_PATCHING_ASM_H + +/* Define a "site" that can be patched */ +.macro patch_site label name + .pushsection ".rodata" + .balign 4 + .global \name +\name: + .4byte \label - . + .popsection +.endm + +#endif /* _ASM_POWERPC_CODE_PATCHING_ASM_H */ diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h index 812535f40124..31733a95bbd0 100644 --- a/arch/powerpc/include/asm/code-patching.h +++ b/arch/powerpc/include/asm/code-patching.h @@ -14,6 +14,7 @@ #include <asm/ppc-opcode.h> #include <linux/string.h> #include <linux/kallsyms.h> +#include <asm/asm-compat.h> /* Flags for create_branch: * "b" == create_branch(addr, target, 0); @@ -32,6 +33,8 @@ unsigned int create_cond_branch(const unsigned int *addr, int patch_branch(unsigned int *addr, unsigned long target, int flags); int patch_instruction(unsigned int *addr, unsigned int instr); int raw_patch_instruction(unsigned int *addr, unsigned int instr); +int patch_instruction_site(s32 *addr, unsigned int instr); +int patch_branch_site(s32 *site, unsigned long target, int flags); int instr_is_relative_branch(unsigned int instr); int instr_is_relative_link_branch(unsigned int instr); diff --git a/arch/powerpc/include/asm/cpuidle.h b/arch/powerpc/include/asm/cpuidle.h index e210a83eb196..43e5f31fe64d 100644 --- a/arch/powerpc/include/asm/cpuidle.h +++ b/arch/powerpc/include/asm/cpuidle.h @@ -79,6 +79,19 @@ struct stop_sprs { u64 mmcra; }; +#define PNV_IDLE_NAME_LEN 16 +struct pnv_idle_states_t { + char name[PNV_IDLE_NAME_LEN]; + u32 latency_ns; + u32 residency_ns; + u64 psscr_val; + u64 psscr_mask; + u32 flags; + bool valid; +}; + +extern struct pnv_idle_states_t *pnv_idle_states; +extern int nr_pnv_idle_states; extern u32 pnv_fastsleep_workaround_at_entry[]; extern u32 pnv_fastsleep_workaround_at_exit[]; diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index 9c0a3083571b..29f49a35d6ee 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -4,9 +4,8 @@ #include <linux/types.h> -#include <asm/asm-compat.h> -#include <asm/feature-fixups.h> #include <uapi/asm/cputable.h> +#include <asm/asm-const.h> #ifndef __ASSEMBLY__ @@ -210,7 +209,6 @@ static inline void cpu_feature_keys_init(void) { } #define CPU_FTR_DAWR LONG_ASM_CONST(0x0000008000000000) #define CPU_FTR_DABRX LONG_ASM_CONST(0x0000010000000000) #define CPU_FTR_PMAO_BUG LONG_ASM_CONST(0x0000020000000000) -#define CPU_FTR_POWER9_DD1 LONG_ASM_CONST(0x0000040000000000) #define CPU_FTR_POWER9_DD2_1 LONG_ASM_CONST(0x0000080000000000) #define CPU_FTR_P9_TM_HV_ASSIST LONG_ASM_CONST(0x0000100000000000) #define CPU_FTR_P9_TM_XER_SO_BUG LONG_ASM_CONST(0x0000200000000000) @@ -452,7 +450,6 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \ CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP | CPU_FTR_PKEY) #define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG) -#define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL) #define CPU_FTRS_POWER9 (CPU_FTR_LWSYNC | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\ CPU_FTR_MMCRA | CPU_FTR_SMT | \ @@ -464,8 +461,6 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \ CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | CPU_FTR_PKEY | \ CPU_FTR_P9_TLBIE_BUG | CPU_FTR_P9_TIDR) -#define CPU_FTRS_POWER9_DD1 ((CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD1) & \ - (~CPU_FTR_SAO)) #define CPU_FTRS_POWER9_DD2_0 CPU_FTRS_POWER9 #define CPU_FTRS_POWER9_DD2_1 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1) #define CPU_FTRS_POWER9_DD2_2 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1 | \ @@ -488,17 +483,15 @@ static inline void cpu_feature_keys_init(void) { } #ifdef CONFIG_CPU_LITTLE_ENDIAN #define CPU_FTRS_POSSIBLE \ (CPU_FTRS_POWER7 | CPU_FTRS_POWER8E | CPU_FTRS_POWER8 | \ - CPU_FTRS_POWER8_DD1 | CPU_FTR_ALTIVEC_COMP | CPU_FTR_VSX_COMP | \ - CPU_FTRS_POWER9 | CPU_FTRS_POWER9_DD1 | CPU_FTRS_POWER9_DD2_1 | \ - CPU_FTRS_POWER9_DD2_2) + CPU_FTR_ALTIVEC_COMP | CPU_FTR_VSX_COMP | CPU_FTRS_POWER9 | \ + CPU_FTRS_POWER9_DD2_1 | CPU_FTRS_POWER9_DD2_2) #else #define CPU_FTRS_POSSIBLE \ (CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | \ CPU_FTRS_POWER6 | CPU_FTRS_POWER7 | CPU_FTRS_POWER8E | \ - CPU_FTRS_POWER8 | CPU_FTRS_POWER8_DD1 | CPU_FTRS_CELL | \ - CPU_FTRS_PA6T | CPU_FTR_VSX_COMP | CPU_FTR_ALTIVEC_COMP | \ - CPU_FTRS_POWER9 | CPU_FTRS_POWER9_DD1 | CPU_FTRS_POWER9_DD2_1 | \ - CPU_FTRS_POWER9_DD2_2) + CPU_FTRS_POWER8 | CPU_FTRS_CELL | CPU_FTRS_PA6T | \ + CPU_FTR_VSX_COMP | CPU_FTR_ALTIVEC_COMP | CPU_FTRS_POWER9 | \ + CPU_FTRS_POWER9_DD2_1 | CPU_FTRS_POWER9_DD2_2) #endif /* CONFIG_CPU_LITTLE_ENDIAN */ #endif #else @@ -566,17 +559,15 @@ enum { #ifdef CONFIG_CPU_LITTLE_ENDIAN #define CPU_FTRS_ALWAYS \ (CPU_FTRS_POSSIBLE & ~CPU_FTR_HVMODE & CPU_FTRS_POWER7 & \ - CPU_FTRS_POWER8E & CPU_FTRS_POWER8 & CPU_FTRS_POWER8_DD1 & \ - CPU_FTRS_POWER9 & CPU_FTRS_POWER9_DD1 & CPU_FTRS_POWER9_DD2_1 & \ - CPU_FTRS_DT_CPU_BASE) + CPU_FTRS_POWER8E & CPU_FTRS_POWER8 & CPU_FTRS_POWER9 & \ + CPU_FTRS_POWER9_DD2_1 & CPU_FTRS_DT_CPU_BASE) #else #define CPU_FTRS_ALWAYS \ (CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & \ CPU_FTRS_POWER6 & CPU_FTRS_POWER7 & CPU_FTRS_CELL & \ CPU_FTRS_PA6T & CPU_FTRS_POWER8 & CPU_FTRS_POWER8E & \ - CPU_FTRS_POWER8_DD1 & ~CPU_FTR_HVMODE & CPU_FTRS_POSSIBLE & \ - CPU_FTRS_POWER9 & CPU_FTRS_POWER9_DD1 & CPU_FTRS_POWER9_DD2_1 & \ - CPU_FTRS_DT_CPU_BASE) + ~CPU_FTR_HVMODE & CPU_FTRS_POSSIBLE & CPU_FTRS_POWER9 & \ + CPU_FTRS_POWER9_DD2_1 & CPU_FTRS_DT_CPU_BASE) #endif /* CONFIG_CPU_LITTLE_ENDIAN */ #endif #else diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h index bc4903badb3f..133672744b2e 100644 --- a/arch/powerpc/include/asm/cputime.h +++ b/arch/powerpc/include/asm/cputime.h @@ -23,7 +23,6 @@ #include <asm/div64.h> #include <asm/time.h> #include <asm/param.h> -#include <asm/cpu_has_feature.h> typedef u64 __nocast cputime_t; typedef u64 __nocast cputime64_t; diff --git a/arch/powerpc/include/asm/dbell.h b/arch/powerpc/include/asm/dbell.h index 9f2ae0d25e15..99b84db23e8c 100644 --- a/arch/powerpc/include/asm/dbell.h +++ b/arch/powerpc/include/asm/dbell.h @@ -16,7 +16,7 @@ #include <linux/threads.h> #include <asm/ppc-opcode.h> -#include <asm/cpu_has_feature.h> +#include <asm/feature-fixups.h> #define PPC_DBELL_MSG_BRDCAST (0x04000000) #define PPC_DBELL_TYPE(x) (((x) & 0xf) << (63-36)) diff --git a/arch/powerpc/include/asm/dcr-native.h b/arch/powerpc/include/asm/dcr-native.h index 4a2beef74277..151dff555f50 100644 --- a/arch/powerpc/include/asm/dcr-native.h +++ b/arch/powerpc/include/asm/dcr-native.h @@ -25,6 +25,7 @@ #include <linux/spinlock.h> #include <asm/cputable.h> #include <asm/cpu_has_feature.h> +#include <linux/stringify.h> typedef struct { unsigned int base; diff --git a/arch/powerpc/include/asm/debug.h b/arch/powerpc/include/asm/debug.h index ce5da214ffe5..7756026b95ca 100644 --- a/arch/powerpc/include/asm/debug.h +++ b/arch/powerpc/include/asm/debug.h @@ -45,7 +45,6 @@ static inline int debugger_break_match(struct pt_regs *regs) { return 0; } static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; } #endif -void set_breakpoint(struct arch_hw_breakpoint *brk); void __set_breakpoint(struct arch_hw_breakpoint *brk); bool ppc_breakpoint_available(void); #ifdef CONFIG_PPC_ADV_DEBUG_REGS diff --git a/arch/powerpc/include/asm/dt_cpu_ftrs.h b/arch/powerpc/include/asm/dt_cpu_ftrs.h index 71515d909ed1..0c729e2d0e8a 100644 --- a/arch/powerpc/include/asm/dt_cpu_ftrs.h +++ b/arch/powerpc/include/asm/dt_cpu_ftrs.h @@ -10,8 +10,6 @@ */ #include <linux/types.h> -#include <asm/asm-compat.h> -#include <asm/feature-fixups.h> #include <uapi/asm/cputable.h> #ifdef CONFIG_PPC_DT_CPU_FTRS diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h index 677102baf3cd..219637ea69a1 100644 --- a/arch/powerpc/include/asm/eeh.h +++ b/arch/powerpc/include/asm/eeh.h @@ -36,13 +36,14 @@ struct pci_dn; #ifdef CONFIG_EEH /* EEH subsystem flags */ -#define EEH_ENABLED 0x01 /* EEH enabled */ -#define EEH_FORCE_DISABLED 0x02 /* EEH disabled */ -#define EEH_PROBE_MODE_DEV 0x04 /* From PCI device */ -#define EEH_PROBE_MODE_DEVTREE 0x08 /* From device tree */ -#define EEH_VALID_PE_ZERO 0x10 /* PE#0 is valid */ -#define EEH_ENABLE_IO_FOR_LOG 0x20 /* Enable IO for log */ -#define EEH_EARLY_DUMP_LOG 0x40 /* Dump log immediately */ +#define EEH_ENABLED 0x01 /* EEH enabled */ +#define EEH_FORCE_DISABLED 0x02 /* EEH disabled */ +#define EEH_PROBE_MODE_DEV 0x04 /* From PCI device */ +#define EEH_PROBE_MODE_DEVTREE 0x08 /* From device tree */ +#define EEH_VALID_PE_ZERO 0x10 /* PE#0 is valid */ +#define EEH_ENABLE_IO_FOR_LOG 0x20 /* Enable IO for log */ +#define EEH_EARLY_DUMP_LOG 0x40 /* Dump log immediately */ +#define EEH_POSTPONED_PROBE 0x80 /* Powernv may postpone device probe */ /* * Delay for PE reset, all in ms diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index c40b4380951c..a86feddddad0 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -35,6 +35,7 @@ * implementations as possible. */ #include <asm/head-64.h> +#include <asm/feature-fixups.h> /* PACA save area offsets (exgen, exmc, etc) */ #define EX_R9 0 @@ -156,7 +157,7 @@ b hrfi_flush_fallback #ifdef CONFIG_RELOCATABLE -#define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \ +#define __EXCEPTION_PROLOG_2_RELON(label, h) \ mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \ LOAD_HANDLER(r12,label); \ mtctr r12; \ @@ -166,25 +167,26 @@ bctr; #else /* If not relocatable, we can jump directly -- and save messing with LR */ -#define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \ +#define __EXCEPTION_PROLOG_2_RELON(label, h) \ mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \ mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ li r10,MSR_RI; \ mtmsrd r10,1; /* Set RI (EE=0) */ \ b label; #endif -#define EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \ - __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \ +#define EXCEPTION_PROLOG_2_RELON(label, h) \ + __EXCEPTION_PROLOG_2_RELON(label, h) /* - * As EXCEPTION_PROLOG_PSERIES(), except we've already got relocation on - * so no need to rfid. Save lr in case we're CONFIG_RELOCATABLE, in which - * case EXCEPTION_RELON_PROLOG_PSERIES_1 will be using lr. + * As EXCEPTION_PROLOG(), except we've already got relocation on so no need to + * rfid. Save LR in case we're CONFIG_RELOCATABLE, in which case + * EXCEPTION_PROLOG_2_RELON will be using LR. */ -#define EXCEPTION_RELON_PROLOG_PSERIES(area, label, h, extra, vec) \ +#define EXCEPTION_RELON_PROLOG(area, label, h, extra, vec) \ + SET_SCRATCH0(r13); /* save r13 */ \ EXCEPTION_PROLOG_0(area); \ EXCEPTION_PROLOG_1(area, extra, vec); \ - EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) + EXCEPTION_PROLOG_2_RELON(label, h) /* * We're short on space and time in the exception prolog, so we can't @@ -315,7 +317,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) #define EXCEPTION_PROLOG_1(area, extra, vec) \ _EXCEPTION_PROLOG_1(area, extra, vec) -#define __EXCEPTION_PROLOG_PSERIES_1(label, h) \ +#define __EXCEPTION_PROLOG_2(label, h) \ ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \ mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \ LOAD_HANDLER(r12,label) \ @@ -324,11 +326,11 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) mtspr SPRN_##h##SRR1,r10; \ h##RFI_TO_KERNEL; \ b . /* prevent speculative execution */ -#define EXCEPTION_PROLOG_PSERIES_1(label, h) \ - __EXCEPTION_PROLOG_PSERIES_1(label, h) +#define EXCEPTION_PROLOG_2(label, h) \ + __EXCEPTION_PROLOG_2(label, h) /* _NORI variant keeps MSR_RI clear */ -#define __EXCEPTION_PROLOG_PSERIES_1_NORI(label, h) \ +#define __EXCEPTION_PROLOG_2_NORI(label, h) \ ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \ xori r10,r10,MSR_RI; /* Clear MSR_RI */ \ mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \ @@ -339,13 +341,14 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) h##RFI_TO_KERNEL; \ b . /* prevent speculative execution */ -#define EXCEPTION_PROLOG_PSERIES_1_NORI(label, h) \ - __EXCEPTION_PROLOG_PSERIES_1_NORI(label, h) +#define EXCEPTION_PROLOG_2_NORI(label, h) \ + __EXCEPTION_PROLOG_2_NORI(label, h) -#define EXCEPTION_PROLOG_PSERIES(area, label, h, extra, vec) \ +#define EXCEPTION_PROLOG(area, label, h, extra, vec) \ + SET_SCRATCH0(r13); /* save r13 */ \ EXCEPTION_PROLOG_0(area); \ EXCEPTION_PROLOG_1(area, extra, vec); \ - EXCEPTION_PROLOG_PSERIES_1(label, h); + EXCEPTION_PROLOG_2(label, h); #define __KVMTEST(h, n) \ lbz r10,HSTATE_IN_GUEST(r13); \ @@ -416,10 +419,10 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) #endif /* Do not enable RI */ -#define EXCEPTION_PROLOG_PSERIES_NORI(area, label, h, extra, vec) \ +#define EXCEPTION_PROLOG_NORI(area, label, h, extra, vec) \ EXCEPTION_PROLOG_0(area); \ EXCEPTION_PROLOG_1(area, extra, vec); \ - EXCEPTION_PROLOG_PSERIES_1_NORI(label, h); + EXCEPTION_PROLOG_2_NORI(label, h); #define __KVM_HANDLER(area, h, n) \ @@ -550,10 +553,8 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) /* * Exception vectors. */ -#define STD_EXCEPTION_PSERIES(vec, label) \ - SET_SCRATCH0(r13); /* save r13 */ \ - EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label, \ - EXC_STD, KVMTEST_PR, vec); \ +#define STD_EXCEPTION(vec, label) \ + EXCEPTION_PROLOG(PACA_EXGEN, label, EXC_STD, KVMTEST_PR, vec); /* Version of above for when we have to branch out-of-line */ #define __OOL_EXCEPTION(vec, label, hdlr) \ @@ -561,36 +562,31 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) EXCEPTION_PROLOG_0(PACA_EXGEN) \ b hdlr; -#define STD_EXCEPTION_PSERIES_OOL(vec, label) \ +#define STD_EXCEPTION_OOL(vec, label) \ EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_PR, vec); \ - EXCEPTION_PROLOG_PSERIES_1(label, EXC_STD) + EXCEPTION_PROLOG_2(label, EXC_STD) #define STD_EXCEPTION_HV(loc, vec, label) \ - SET_SCRATCH0(r13); /* save r13 */ \ - EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label, \ - EXC_HV, KVMTEST_HV, vec); + EXCEPTION_PROLOG(PACA_EXGEN, label, EXC_HV, KVMTEST_HV, vec); #define STD_EXCEPTION_HV_OOL(vec, label) \ EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_HV, vec); \ - EXCEPTION_PROLOG_PSERIES_1(label, EXC_HV) + EXCEPTION_PROLOG_2(label, EXC_HV) -#define STD_RELON_EXCEPTION_PSERIES(loc, vec, label) \ +#define STD_RELON_EXCEPTION(loc, vec, label) \ /* No guest interrupts come through here */ \ - SET_SCRATCH0(r13); /* save r13 */ \ - EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label, EXC_STD, NOTEST, vec); + EXCEPTION_RELON_PROLOG(PACA_EXGEN, label, EXC_STD, NOTEST, vec); -#define STD_RELON_EXCEPTION_PSERIES_OOL(vec, label) \ +#define STD_RELON_EXCEPTION_OOL(vec, label) \ EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, vec); \ - EXCEPTION_RELON_PROLOG_PSERIES_1(label, EXC_STD) + EXCEPTION_PROLOG_2_RELON(label, EXC_STD) #define STD_RELON_EXCEPTION_HV(loc, vec, label) \ - SET_SCRATCH0(r13); /* save r13 */ \ - EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label, \ - EXC_HV, KVMTEST_HV, vec); + EXCEPTION_RELON_PROLOG(PACA_EXGEN, label, EXC_HV, KVMTEST_HV, vec); #define STD_RELON_EXCEPTION_HV_OOL(vec, label) \ EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_HV, vec); \ - EXCEPTION_RELON_PROLOG_PSERIES_1(label, EXC_HV) + EXCEPTION_PROLOG_2_RELON(label, EXC_HV) /* This associate vector numbers with bits in paca->irq_happened */ #define SOFTEN_VALUE_0x500 PACA_IRQ_EE @@ -627,55 +623,45 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) #define SOFTEN_NOTEST_PR(vec, bitmask) _SOFTEN_TEST(EXC_STD, vec, bitmask) #define SOFTEN_NOTEST_HV(vec, bitmask) _SOFTEN_TEST(EXC_HV, vec, bitmask) -#define __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra, bitmask) \ +#define __MASKABLE_EXCEPTION(vec, label, h, extra, bitmask) \ SET_SCRATCH0(r13); /* save r13 */ \ EXCEPTION_PROLOG_0(PACA_EXGEN); \ MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec, bitmask); \ - EXCEPTION_PROLOG_PSERIES_1(label, h); + EXCEPTION_PROLOG_2(label, h); -#define _MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra, bitmask) \ - __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra, bitmask) +#define MASKABLE_EXCEPTION(vec, label, bitmask) \ + __MASKABLE_EXCEPTION(vec, label, EXC_STD, SOFTEN_TEST_PR, bitmask) -#define MASKABLE_EXCEPTION_PSERIES(loc, vec, label, bitmask) \ - _MASKABLE_EXCEPTION_PSERIES(vec, label, \ - EXC_STD, SOFTEN_TEST_PR, bitmask) - -#define MASKABLE_EXCEPTION_PSERIES_OOL(vec, label, bitmask) \ +#define MASKABLE_EXCEPTION_OOL(vec, label, bitmask) \ MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_PR, vec, bitmask);\ - EXCEPTION_PROLOG_PSERIES_1(label, EXC_STD) + EXCEPTION_PROLOG_2(label, EXC_STD) -#define MASKABLE_EXCEPTION_HV(loc, vec, label, bitmask) \ - _MASKABLE_EXCEPTION_PSERIES(vec, label, \ - EXC_HV, SOFTEN_TEST_HV, bitmask) +#define MASKABLE_EXCEPTION_HV(vec, label, bitmask) \ + __MASKABLE_EXCEPTION(vec, label, EXC_HV, SOFTEN_TEST_HV, bitmask) #define MASKABLE_EXCEPTION_HV_OOL(vec, label, bitmask) \ MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_HV, vec, bitmask);\ - EXCEPTION_PROLOG_PSERIES_1(label, EXC_HV) + EXCEPTION_PROLOG_2(label, EXC_HV) -#define __MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, h, extra, bitmask) \ +#define __MASKABLE_RELON_EXCEPTION(vec, label, h, extra, bitmask) \ SET_SCRATCH0(r13); /* save r13 */ \ EXCEPTION_PROLOG_0(PACA_EXGEN); \ MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec, bitmask); \ - EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) - -#define _MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, h, extra, bitmask)\ - __MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, h, extra, bitmask) + EXCEPTION_PROLOG_2_RELON(label, h) -#define MASKABLE_RELON_EXCEPTION_PSERIES(loc, vec, label, bitmask) \ - _MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, \ - EXC_STD, SOFTEN_NOTEST_PR, bitmask) +#define MASKABLE_RELON_EXCEPTION(vec, label, bitmask) \ + __MASKABLE_RELON_EXCEPTION(vec, label, EXC_STD, SOFTEN_NOTEST_PR, bitmask) -#define MASKABLE_RELON_EXCEPTION_PSERIES_OOL(vec, label, bitmask) \ +#define MASKABLE_RELON_EXCEPTION_OOL(vec, label, bitmask) \ MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_NOTEST_PR, vec, bitmask);\ - EXCEPTION_PROLOG_PSERIES_1(label, EXC_STD); + EXCEPTION_PROLOG_2(label, EXC_STD); -#define MASKABLE_RELON_EXCEPTION_HV(loc, vec, label, bitmask) \ - _MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, \ - EXC_HV, SOFTEN_TEST_HV, bitmask) +#define MASKABLE_RELON_EXCEPTION_HV(vec, label, bitmask) \ + __MASKABLE_RELON_EXCEPTION(vec, label, EXC_HV, SOFTEN_TEST_HV, bitmask) #define MASKABLE_RELON_EXCEPTION_HV_OOL(vec, label, bitmask) \ MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_HV, vec, bitmask);\ - EXCEPTION_RELON_PROLOG_PSERIES_1(label, EXC_HV) + EXCEPTION_PROLOG_2_RELON(label, EXC_HV) /* * Our exception common code can be passed various "additions" diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h index 5a23010af600..1e7a33592e29 100644 --- a/arch/powerpc/include/asm/fadump.h +++ b/arch/powerpc/include/asm/fadump.h @@ -195,9 +195,6 @@ struct fadump_crash_info_header { struct cpumask online_mask; }; -/* Crash memory ranges */ -#define INIT_CRASHMEM_RANGES (INIT_MEMBLOCK_REGIONS + 2) - struct fad_crash_memory_ranges { unsigned long long base; unsigned long long size; diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h index fcfd05672b1b..33b6f9c892c8 100644 --- a/arch/powerpc/include/asm/feature-fixups.h +++ b/arch/powerpc/include/asm/feature-fixups.h @@ -1,6 +1,8 @@ #ifndef __ASM_POWERPC_FEATURE_FIXUPS_H #define __ASM_POWERPC_FEATURE_FIXUPS_H +#include <asm/asm-const.h> + /* * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h index 535add3f7791..7a051bd21f87 100644 --- a/arch/powerpc/include/asm/firmware.h +++ b/arch/powerpc/include/asm/firmware.h @@ -14,8 +14,7 @@ #ifdef __KERNEL__ -#include <asm/asm-compat.h> -#include <asm/feature-fixups.h> +#include <asm/asm-const.h> /* firmware feature bitmask values */ diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h index 6c40dfda5912..41cc15c14eee 100644 --- a/arch/powerpc/include/asm/fixmap.h +++ b/arch/powerpc/include/asm/fixmap.h @@ -15,7 +15,6 @@ #define _ASM_FIXMAP_H #ifndef __ASSEMBLY__ -#include <linux/kernel.h> #include <asm/page.h> #include <asm/pgtable.h> #ifdef CONFIG_HIGHMEM diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h index 1a944c18c539..94542776a62d 100644 --- a/arch/powerpc/include/asm/futex.h +++ b/arch/powerpc/include/asm/futex.h @@ -8,7 +8,7 @@ #include <linux/uaccess.h> #include <asm/errno.h> #include <asm/synch.h> -#include <asm/asm-compat.h> +#include <asm/asm-405.h> #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ __asm__ __volatile ( \ diff --git a/arch/powerpc/include/asm/head-64.h b/arch/powerpc/include/asm/head-64.h index 7e0e93f24cb7..a4f947888744 100644 --- a/arch/powerpc/include/asm/head-64.h +++ b/arch/powerpc/include/asm/head-64.h @@ -260,22 +260,22 @@ name: #define EXC_REAL(name, start, size) \ EXC_REAL_BEGIN(name, start, size); \ - STD_EXCEPTION_PSERIES(start, name##_common); \ + STD_EXCEPTION(start, name##_common); \ EXC_REAL_END(name, start, size); #define EXC_VIRT(name, start, size, realvec) \ EXC_VIRT_BEGIN(name, start, size); \ - STD_RELON_EXCEPTION_PSERIES(start, realvec, name##_common); \ + STD_RELON_EXCEPTION(start, realvec, name##_common); \ EXC_VIRT_END(name, start, size); #define EXC_REAL_MASKABLE(name, start, size, bitmask) \ EXC_REAL_BEGIN(name, start, size); \ - MASKABLE_EXCEPTION_PSERIES(start, start, name##_common, bitmask);\ + MASKABLE_EXCEPTION(start, name##_common, bitmask); \ EXC_REAL_END(name, start, size); #define EXC_VIRT_MASKABLE(name, start, size, realvec, bitmask) \ EXC_VIRT_BEGIN(name, start, size); \ - MASKABLE_RELON_EXCEPTION_PSERIES(start, realvec, name##_common, bitmask);\ + MASKABLE_RELON_EXCEPTION(realvec, name##_common, bitmask); \ EXC_VIRT_END(name, start, size); #define EXC_REAL_HV(name, start, size) \ @@ -295,7 +295,7 @@ name: #define __TRAMP_REAL_OOL(name, vec) \ TRAMP_REAL_BEGIN(tramp_real_##name); \ - STD_EXCEPTION_PSERIES_OOL(vec, name##_common); \ + STD_EXCEPTION_OOL(vec, name##_common); #define EXC_REAL_OOL(name, start, size) \ __EXC_REAL_OOL(name, start, size); \ @@ -306,7 +306,7 @@ name: #define __TRAMP_REAL_OOL_MASKABLE(name, vec, bitmask) \ TRAMP_REAL_BEGIN(tramp_real_##name); \ - MASKABLE_EXCEPTION_PSERIES_OOL(vec, name##_common, bitmask); \ + MASKABLE_EXCEPTION_OOL(vec, name##_common, bitmask); #define EXC_REAL_OOL_MASKABLE(name, start, size, bitmask) \ __EXC_REAL_OOL_MASKABLE(name, start, size); \ @@ -346,7 +346,7 @@ name: #define __TRAMP_VIRT_OOL(name, realvec) \ TRAMP_VIRT_BEGIN(tramp_virt_##name); \ - STD_RELON_EXCEPTION_PSERIES_OOL(realvec, name##_common); \ + STD_RELON_EXCEPTION_OOL(realvec, name##_common); #define EXC_VIRT_OOL(name, start, size, realvec) \ __EXC_VIRT_OOL(name, start, size); \ @@ -357,7 +357,7 @@ name: #define __TRAMP_VIRT_OOL_MASKABLE(name, realvec, bitmask) \ TRAMP_VIRT_BEGIN(tramp_virt_##name); \ - MASKABLE_RELON_EXCEPTION_PSERIES_OOL(realvec, name##_common, bitmask);\ + MASKABLE_RELON_EXCEPTION_OOL(realvec, name##_common, bitmask); #define EXC_VIRT_OOL_MASKABLE(name, start, size, realvec, bitmask) \ __EXC_VIRT_OOL_MASKABLE(name, start, size); \ diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h index cec820f961da..a4b65b186ec6 100644 --- a/arch/powerpc/include/asm/highmem.h +++ b/arch/powerpc/include/asm/highmem.h @@ -25,7 +25,7 @@ #include <linux/interrupt.h> #include <asm/kmap_types.h> -#include <asm/tlbflush.h> +#include <asm/cacheflush.h> #include <asm/page.h> #include <asm/fixmap.h> diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index 3225eb6402cc..2d00cc530083 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -84,9 +84,6 @@ static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr, return dir + idx; } -pte_t *huge_pte_offset_and_shift(struct mm_struct *mm, - unsigned long addr, unsigned *shift); - void flush_dcache_icache_hugepage(struct page *page); int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index 662c8347d699..a0b17f9f1ea4 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -342,10 +342,12 @@ #define H_CPU_CHAR_BRANCH_HINTS_HONORED (1ull << 58) // IBM bit 5 #define H_CPU_CHAR_THREAD_RECONFIG_CTRL (1ull << 57) // IBM bit 6 #define H_CPU_CHAR_COUNT_CACHE_DISABLED (1ull << 56) // IBM bit 7 +#define H_CPU_CHAR_BCCTR_FLUSH_ASSIST (1ull << 54) // IBM bit 9 #define H_CPU_BEHAV_FAVOUR_SECURITY (1ull << 63) // IBM bit 0 #define H_CPU_BEHAV_L1D_FLUSH_PR (1ull << 62) // IBM bit 1 #define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ull << 61) // IBM bit 2 +#define H_CPU_BEHAV_FLUSH_COUNT_CACHE (1ull << 58) // IBM bit 5 /* Flag values used in H_REGISTER_PROC_TBL hcall */ #define PROC_TABLE_OP_MASK 0x18 diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h index 27d6e3c8fde9..ece4dc89c90b 100644 --- a/arch/powerpc/include/asm/hw_breakpoint.h +++ b/arch/powerpc/include/asm/hw_breakpoint.h @@ -56,6 +56,7 @@ struct perf_event_attr; struct perf_event; struct pmu; struct perf_sample_data; +struct task_struct; #define HW_BREAKPOINT_ALIGN 0x7 diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index e151774cb577..32a18f2f49bc 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -253,14 +253,16 @@ static inline bool lazy_irq_pending(void) /* * This is called by asynchronous interrupts to conditionally - * re-enable hard interrupts when soft-disabled after having - * cleared the source of the interrupt + * re-enable hard interrupts after having cleared the source + * of the interrupt. They are kept disabled if there is a different + * soft-masked interrupt pending that requires hard masking. */ static inline void may_hard_irq_enable(void) { - get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS; - if (!(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)) + if (!(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)) { + get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS; __hard_irq_enable(); + } } static inline bool arch_irq_disabled_regs(struct pt_regs *regs) diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index 20febe0b7f32..ab3a4fba38e3 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h @@ -30,6 +30,7 @@ #include <asm/machdep.h> #include <asm/types.h> #include <asm/pci-bridge.h> +#include <asm/asm-const.h> #define IOMMU_PAGE_SHIFT_4K 12 #define IOMMU_PAGE_SIZE_4K (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K) @@ -69,6 +70,8 @@ struct iommu_table_ops { long index, unsigned long *hpa, enum dma_data_direction *direction); + + __be64 *(*useraddrptr)(struct iommu_table *tbl, long index, bool alloc); #endif void (*clear)(struct iommu_table *tbl, long index, long npages); @@ -117,15 +120,16 @@ struct iommu_table { unsigned long *it_map; /* A simple allocation bitmap for now */ unsigned long it_page_shift;/* table iommu page size */ struct list_head it_group_list;/* List of iommu_table_group_link */ - unsigned long *it_userspace; /* userspace view of the table */ + __be64 *it_userspace; /* userspace view of the table */ struct iommu_table_ops *it_ops; struct kref it_kref; + int it_nid; }; +#define IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry) \ + ((tbl)->it_ops->useraddrptr((tbl), (entry), false)) #define IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry) \ - ((tbl)->it_userspace ? \ - &((tbl)->it_userspace[(entry) - (tbl)->it_offset]) : \ - NULL) + ((tbl)->it_ops->useraddrptr((tbl), (entry), true)) /* Pure 2^n version of get_order */ static inline __attribute_const__ diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h index 9a287e0ac8b1..a3b2cf940b4e 100644 --- a/arch/powerpc/include/asm/jump_label.h +++ b/arch/powerpc/include/asm/jump_label.h @@ -14,7 +14,7 @@ #include <linux/types.h> #include <asm/feature-fixups.h> -#include <asm/asm-compat.h> +#include <asm/asm-const.h> #define JUMP_ENTRY_TYPE stringify_in_c(FTR_ENTRY_LONG) #define JUMP_LABEL_NOP_SIZE 4 diff --git a/arch/powerpc/include/asm/kvm_booke_hv_asm.h b/arch/powerpc/include/asm/kvm_booke_hv_asm.h index e5f048bbcb7c..931260b59ac6 100644 --- a/arch/powerpc/include/asm/kvm_booke_hv_asm.h +++ b/arch/powerpc/include/asm/kvm_booke_hv_asm.h @@ -9,6 +9,8 @@ #ifndef ASM_KVM_BOOKE_HV_ASM_H #define ASM_KVM_BOOKE_HV_ASM_H +#include <asm/feature-fixups.h> + #ifdef __ASSEMBLY__ /* diff --git a/arch/powerpc/include/asm/mmu-44x.h b/arch/powerpc/include/asm/mmu-44x.h index cb57f29f531d..295b3dbb2698 100644 --- a/arch/powerpc/include/asm/mmu-44x.h +++ b/arch/powerpc/include/asm/mmu-44x.h @@ -5,7 +5,7 @@ * PPC440 support */ -#include <asm/page.h> +#include <asm/asm-const.h> #define PPC44x_MMUCR_TID 0x000000ff #define PPC44x_MMUCR_STS 0x00010000 @@ -124,19 +124,19 @@ typedef struct { /* Size of the TLBs used for pinning in lowmem */ #define PPC_PIN_SIZE (1 << 28) /* 256M */ -#if (PAGE_SHIFT == 12) +#if defined(CONFIG_PPC_4K_PAGES) #define PPC44x_TLBE_SIZE PPC44x_TLB_4K #define PPC47x_TLBE_SIZE PPC47x_TLB0_4K #define mmu_virtual_psize MMU_PAGE_4K -#elif (PAGE_SHIFT == 14) +#elif defined(CONFIG_PPC_16K_PAGES) #define PPC44x_TLBE_SIZE PPC44x_TLB_16K #define PPC47x_TLBE_SIZE PPC47x_TLB0_16K #define mmu_virtual_psize MMU_PAGE_16K -#elif (PAGE_SHIFT == 16) +#elif defined(CONFIG_PPC_64K_PAGES) #define PPC44x_TLBE_SIZE PPC44x_TLB_64K #define PPC47x_TLBE_SIZE PPC47x_TLB0_64K #define mmu_virtual_psize MMU_PAGE_64K -#elif (PAGE_SHIFT == 18) +#elif defined(CONFIG_PPC_256K_PAGES) #define PPC44x_TLBE_SIZE PPC44x_TLB_256K #define mmu_virtual_psize MMU_PAGE_256K #else diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 61d15ce92278..13ea441ac531 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -5,8 +5,7 @@ #include <linux/types.h> -#include <asm/asm-compat.h> -#include <asm/feature-fixups.h> +#include <asm/asm-const.h> /* * MMU features bit definitions diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index 7c46a98cc7f4..a507a65b0866 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -8,7 +8,8 @@ #ifndef __ASSEMBLY__ #include <linux/sched.h> #include <linux/threads.h> -#include <asm/io.h> /* For sub-arch specific PPC_PIN_SIZE */ +#include <asm/mmu.h> /* For sub-arch specific PPC_PIN_SIZE */ +#include <asm/asm-405.h> extern unsigned long ioremap_bot; @@ -222,10 +223,6 @@ static inline unsigned long long pte_update(pte_t *p, } #endif /* CONFIG_PTE_64BIT */ -/* - * 2.6 calls this without flushing the TLB entry; this is wrong - * for our hash-based implementation, we fix that up here. - */ #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep) { diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h index dd0c7236208f..7cd6809f4d33 100644 --- a/arch/powerpc/include/asm/nohash/64/pgtable.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h @@ -3,11 +3,12 @@ #define _ASM_POWERPC_NOHASH_64_PGTABLE_H /* * This file contains the functions and defines necessary to modify and use - * the ppc64 hashed page table. + * the ppc64 non-hashed page table. */ #include <asm/nohash/64/pgtable-4k.h> #include <asm/barrier.h> +#include <asm/asm-const.h> #ifdef CONFIG_PPC_64K_PAGES #error "Page size not supported" @@ -37,7 +38,7 @@ /* * The vmalloc space starts at the beginning of that region, and - * occupies half of it on hash CPUs and a quarter of it on Book3E + * occupies a quarter of it on Book3E * (we keep a quarter for the virtual memmap) */ #define VMALLOC_START KERN_VIRT_START @@ -77,7 +78,7 @@ /* * Defines the address of the vmemap area, in its own region on - * hash table CPUs and after the vmalloc space on Book3E + * after the vmalloc space on Book3E */ #define VMEMMAP_BASE VMALLOC_END #define VMEMMAP_END KERN_IO_START @@ -247,14 +248,6 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, pte_update(mm, addr, ptep, _PAGE_RW, 0, 1); } -/* - * We currently remove entries from the hashtable regardless of whether - * the entry was young or dirty. The generic routines only flush if the - * entry was young or dirty which is not good enough. - * - * We should be more intelligent about this but for the moment we override - * these functions and force a tlb flush unconditionally - */ #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH #define ptep_clear_flush_young(__vma, __address, __ptep) \ ({ \ @@ -278,9 +271,7 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, } -/* Set the dirty and/or accessed bits atomically in a linux PTE, this - * function doesn't need to flush the hash entry - */ +/* Set the dirty and/or accessed bits atomically in a linux PTE */ static inline void __ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep, pte_t entry, unsigned long address, diff --git a/arch/powerpc/include/asm/nohash/tlbflush.h b/arch/powerpc/include/asm/nohash/tlbflush.h new file mode 100644 index 000000000000..b1d8fec29169 --- /dev/null +++ b/arch/powerpc/include/asm/nohash/tlbflush.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_NOHASH_TLBFLUSH_H +#define _ASM_POWERPC_NOHASH_TLBFLUSH_H + +/* + * TLB flushing: + * + * - flush_tlb_mm(mm) flushes the specified mm context TLB's + * - flush_tlb_page(vma, vmaddr) flushes one page + * - local_flush_tlb_mm(mm, full) flushes the specified mm context on + * the local processor + * - local_flush_tlb_page(vma, vmaddr) flushes one page on the local processor + * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB + * - flush_tlb_range(vma, start, end) flushes a range of pages + * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages + * + */ + +/* + * TLB flushing for software loaded TLB chips + * + * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range & + * flush_tlb_kernel_range are best implemented as tlbia vs + * specific tlbie's + */ + +struct vm_area_struct; +struct mm_struct; + +#define MMU_NO_CONTEXT ((unsigned int)-1) + +extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end); +extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); + +extern void local_flush_tlb_mm(struct mm_struct *mm); +extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); + +extern void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, + int tsize, int ind); + +#ifdef CONFIG_SMP +extern void flush_tlb_mm(struct mm_struct *mm); +extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); +extern void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, + int tsize, int ind); +#else +#define flush_tlb_mm(mm) local_flush_tlb_mm(mm) +#define flush_tlb_page(vma,addr) local_flush_tlb_page(vma,addr) +#define __flush_tlb_page(mm,addr,p,i) __local_flush_tlb_page(mm,addr,p,i) +#endif + +#endif /* _ASM_POWERPC_NOHASH_TLBFLUSH_H */ diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h index 3bab299eda49..8365353330b4 100644 --- a/arch/powerpc/include/asm/opal-api.h +++ b/arch/powerpc/include/asm/opal-api.h @@ -206,9 +206,11 @@ #define OPAL_NPU_SPA_CLEAR_CACHE 160 #define OPAL_NPU_TL_SET 161 #define OPAL_SENSOR_READ_U64 162 +#define OPAL_SENSOR_GROUP_ENABLE 163 #define OPAL_PCI_GET_PBCQ_TUNNEL_BAR 164 #define OPAL_PCI_SET_PBCQ_TUNNEL_BAR 165 -#define OPAL_LAST 165 +#define OPAL_NX_COPROC_INIT 167 +#define OPAL_LAST 167 #define QUIESCE_HOLD 1 /* Spin all calls at entry */ #define QUIESCE_REJECT 2 /* Fail all calls with OPAL_BUSY */ diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index e1b2910c6e81..834e7e29f1e4 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h @@ -292,6 +292,8 @@ int opal_set_powercap(u32 handle, int token, u32 pcap); int opal_get_power_shift_ratio(u32 handle, int token, u32 *psr); int opal_set_power_shift_ratio(u32 handle, int token, u32 psr); int opal_sensor_group_clear(u32 group_hndl, int token); +int opal_sensor_group_enable(u32 group_hndl, int token, bool enable); +int opal_nx_coproc_init(uint32_t chip_id, uint32_t ct); s64 opal_signal_system_reset(s32 cpu); s64 opal_quiesce(u64 shutdown_type, s32 cpu); @@ -305,6 +307,8 @@ extern void opal_configure_cores(void); extern int opal_get_chars(uint32_t vtermno, char *buf, int count); extern int opal_put_chars(uint32_t vtermno, const char *buf, int total_len); +extern int opal_put_chars_atomic(uint32_t vtermno, const char *buf, int total_len); +extern int opal_flush_console(uint32_t vtermno); extern void hvc_opal_init_early(void); @@ -326,6 +330,7 @@ extern int opal_async_wait_response_interruptible(uint64_t token, struct opal_msg *msg); extern int opal_get_sensor_data(u32 sensor_hndl, u32 *sensor_data); extern int opal_get_sensor_data_u64(u32 sensor_hndl, u64 *sensor_data); +extern int sensor_group_enable(u32 grp_hndl, bool enable); struct rtc_time; extern time64_t opal_get_boot_time(void); diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 6d34bd71139d..ad4f16164619 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -187,11 +187,6 @@ struct paca_struct { u8 subcore_sibling_mask; /* Flag to request this thread not to stop */ atomic_t dont_stop; - /* - * Pointer to an array which contains pointer - * to the sibling threads' paca. - */ - struct paca_struct **thread_sibling_pacas; /* The PSSCR value that the kernel requested before going to stop */ u64 requested_psscr; @@ -252,6 +247,9 @@ struct paca_struct { void *rfi_flush_fallback_area; u64 l1d_flush_size; #endif +#ifdef CONFIG_PPC_PSERIES + u8 *mce_data_buf; /* buffer to hold per cpu rtas errlog */ +#endif /* CONFIG_PPC_PSERIES */ } ____cacheline_aligned; extern void copy_mm_to_paca(struct mm_struct *mm); diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index db7be0779d55..f6a1265face2 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -16,8 +16,7 @@ #else #include <asm/types.h> #endif -#include <asm/asm-compat.h> -#include <asm/kdump.h> +#include <asm/asm-const.h> /* * On regular PPC32 page size is 4K (but we support 4K/16K/64K/256K pages diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h index af04acdb873f..c0ce17e909ef 100644 --- a/arch/powerpc/include/asm/page_64.h +++ b/arch/powerpc/include/asm/page_64.h @@ -10,6 +10,8 @@ * 2 of the License, or (at your option) any later version. */ +#include <asm/asm-const.h> + /* * We always define HW_PAGE_SHIFT to 12 as use of 64K pages remains Linux * specific, every notion of page number shared with the firmware, TCEs, diff --git a/arch/powerpc/include/asm/pkeys.h b/arch/powerpc/include/asm/pkeys.h index 5ba80cffb505..20ebf153c871 100644 --- a/arch/powerpc/include/asm/pkeys.h +++ b/arch/powerpc/include/asm/pkeys.h @@ -13,7 +13,8 @@ DECLARE_STATIC_KEY_TRUE(pkey_disabled); extern int pkeys_total; /* total pkeys as per device tree */ -extern u32 initial_allocation_mask; /* bits set for reserved keys */ +extern u32 initial_allocation_mask; /* bits set for the initially allocated keys */ +extern u32 reserved_allocation_mask; /* bits set for reserved keys */ #define ARCH_VM_PKEY_FLAGS (VM_PKEY_BIT0 | VM_PKEY_BIT1 | VM_PKEY_BIT2 | \ VM_PKEY_BIT3 | VM_PKEY_BIT4) @@ -83,19 +84,21 @@ static inline u16 pte_to_pkey_bits(u64 pteflags) #define __mm_pkey_is_allocated(mm, pkey) \ (mm_pkey_allocation_map(mm) & pkey_alloc_mask(pkey)) -#define __mm_pkey_is_reserved(pkey) (initial_allocation_mask & \ +#define __mm_pkey_is_reserved(pkey) (reserved_allocation_mask & \ pkey_alloc_mask(pkey)) static inline bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey) { - /* A reserved key is never considered as 'explicitly allocated' */ - return ((pkey < arch_max_pkey()) && - !__mm_pkey_is_reserved(pkey) && - __mm_pkey_is_allocated(mm, pkey)); + if (pkey < 0 || pkey >= arch_max_pkey()) + return false; + + /* Reserved keys are never allocated. */ + if (__mm_pkey_is_reserved(pkey)) + return false; + + return __mm_pkey_is_allocated(mm, pkey); } -extern void __arch_activate_pkey(int pkey); -extern void __arch_deactivate_pkey(int pkey); /* * Returns a positive, 5-bit key on success, or -1 on failure. * Relies on the mmap_sem to protect against concurrency in mm_pkey_alloc() and @@ -124,11 +127,6 @@ static inline int mm_pkey_alloc(struct mm_struct *mm) ret = ffz((u32)mm_pkey_allocation_map(mm)); __mm_pkey_allocated(mm, ret); - /* - * Enable the key in the hardware - */ - if (ret > 0) - __arch_activate_pkey(ret); return ret; } @@ -140,10 +138,6 @@ static inline int mm_pkey_free(struct mm_struct *mm, int pkey) if (!mm_pkey_is_allocated(mm, pkey)) return -EINVAL; - /* - * Disable the key in the hardware - */ - __arch_deactivate_pkey(pkey); __mm_pkey_free(mm, pkey); return 0; @@ -187,6 +181,16 @@ static inline int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, { if (static_branch_likely(&pkey_disabled)) return -EINVAL; + + /* + * userspace should not change pkey-0 permissions. + * pkey-0 is associated with every page in the kernel. + * If userspace denies any permission on pkey-0, the + * kernel cannot operate. + */ + if (pkey == 0) + return init_val ? -EINVAL : 0; + return __arch_set_user_pkey_access(tsk, pkey, init_val); } diff --git a/arch/powerpc/include/asm/pnv-pci.h b/arch/powerpc/include/asm/pnv-pci.h index d2d8c28db336..7f627e3f4da4 100644 --- a/arch/powerpc/include/asm/pnv-pci.h +++ b/arch/powerpc/include/asm/pnv-pci.h @@ -50,13 +50,6 @@ int pnv_cxl_alloc_hwirq_ranges(struct cxl_irq_ranges *irqs, struct pci_dev *dev, int num); void pnv_cxl_release_hwirq_ranges(struct cxl_irq_ranges *irqs, struct pci_dev *dev); - -/* Support for the cxl kernel api on the real PHB (instead of vPHB) */ -int pnv_cxl_enable_phb_kernel_api(struct pci_controller *hose, bool enable); -bool pnv_pci_on_cxl_phb(struct pci_dev *dev); -struct cxl_afu *pnv_cxl_phb_to_afu(struct pci_controller *hose); -void pnv_cxl_phb_set_peer_afu(struct pci_dev *dev, struct cxl_afu *afu); - #endif struct pnv_php_slot { diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 4436887bc415..665af14850e4 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -12,8 +12,7 @@ #ifndef _ASM_POWERPC_PPC_OPCODE_H #define _ASM_POWERPC_PPC_OPCODE_H -#include <linux/stringify.h> -#include <asm/asm-compat.h> +#include <asm/asm-const.h> #define __REG_R0 0 #define __REG_R1 1 @@ -367,6 +366,8 @@ #define PPC_INST_STFDX 0x7c0005ae #define PPC_INST_LVX 0x7c0000ce #define PPC_INST_STVX 0x7c0001ce +#define PPC_INST_VCMPEQUD 0x100000c7 +#define PPC_INST_VCMPEQUB 0x10000006 /* macros to insert fields into opcodes */ #define ___PPC_RA(a) (((a) & 0x1f) << 16) @@ -397,6 +398,7 @@ #define __PPC_BI(s) (((s) & 0x1f) << 16) #define __PPC_CT(t) (((t) & 0x0f) << 21) #define __PPC_SPR(r) ((((r) & 0x1f) << 16) | ((((r) >> 5) & 0x1f) << 11)) +#define __PPC_RC21 (0x1 << 10) /* * Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a @@ -568,4 +570,12 @@ ((IH & 0x7) << 21)) #define PPC_INVALIDATE_ERAT PPC_SLBIA(7) +#define VCMPEQUD_RC(vrt, vra, vrb) stringify_in_c(.long PPC_INST_VCMPEQUD | \ + ___PPC_RT(vrt) | ___PPC_RA(vra) | \ + ___PPC_RB(vrb) | __PPC_RC21) + +#define VCMPEQUB_RC(vrt, vra, vrb) stringify_in_c(.long PPC_INST_VCMPEQUB | \ + ___PPC_RT(vrt) | ___PPC_RA(vra) | \ + ___PPC_RB(vrb) | __PPC_RC21) + #endif /* _ASM_POWERPC_PPC_OPCODE_H */ diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 75ece56dcd62..b5d023680801 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -9,6 +9,7 @@ #include <asm/processor.h> #include <asm/ppc-opcode.h> #include <asm/firmware.h> +#include <asm/feature-fixups.h> #ifdef __ASSEMBLY__ diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 5debe337ea9d..52fadded5c1e 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -39,10 +39,9 @@ #endif /* CONFIG_PPC64 */ #ifndef __ASSEMBLY__ -#include <linux/compiler.h> -#include <linux/cache.h> +#include <linux/types.h> +#include <asm/thread_info.h> #include <asm/ptrace.h> -#include <asm/types.h> #include <asm/hw_breakpoint.h> /* We do _not_ want to define new machine types at all, those must die diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index e4923686e43a..447cbd1bee99 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h @@ -24,6 +24,7 @@ #define _ASM_POWERPC_PTRACE_H #include <uapi/asm/ptrace.h> +#include <asm/asm-const.h> #ifdef __powerpc64__ diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 562568414cf4..486b7c83b8c5 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -13,6 +13,8 @@ #include <linux/stringify.h> #include <asm/cputable.h> +#include <asm/asm-const.h> +#include <asm/feature-fixups.h> /* Pickup Book E specific registers. */ #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) diff --git a/arch/powerpc/include/asm/reg_a2.h b/arch/powerpc/include/asm/reg_a2.h index 3ba9c6f096fc..74c2c57c492a 100644 --- a/arch/powerpc/include/asm/reg_a2.h +++ b/arch/powerpc/include/asm/reg_a2.h @@ -12,6 +12,8 @@ #ifndef __ASM_POWERPC_REG_A2_H__ #define __ASM_POWERPC_REG_A2_H__ +#include <asm/asm-const.h> + #define SPRN_TENSR 0x1b5 #define SPRN_TENS 0x1b6 /* Thread ENable Set */ #define SPRN_TENC 0x1b7 /* Thread ENable Clear */ diff --git a/arch/powerpc/include/asm/reg_fsl_emb.h b/arch/powerpc/include/asm/reg_fsl_emb.h index d7ccf93e6279..a21f529c43d9 100644 --- a/arch/powerpc/include/asm/reg_fsl_emb.h +++ b/arch/powerpc/include/asm/reg_fsl_emb.h @@ -7,6 +7,8 @@ #ifndef __ASM_POWERPC_REG_FSL_EMB_H__ #define __ASM_POWERPC_REG_FSL_EMB_H__ +#include <linux/stringify.h> + #ifndef __ASSEMBLY__ /* Performance Monitor Registers */ #define mfpmr(rn) ({unsigned int rval; \ diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h index 44989b22383c..759597bf0fd8 100644 --- a/arch/powerpc/include/asm/security_features.h +++ b/arch/powerpc/include/asm/security_features.h @@ -22,6 +22,7 @@ enum stf_barrier_type { void setup_stf_barrier(void); void do_stf_barrier_fixups(enum stf_barrier_type types); +void setup_count_cache_flush(void); static inline void security_ftr_set(unsigned long feature) { @@ -59,6 +60,9 @@ static inline bool security_ftr_enabled(unsigned long feature) // Indirect branch prediction cache disabled #define SEC_FTR_COUNT_CACHE_DISABLED 0x0000000000000020ull +// bcctr 2,0,0 triggers a hardware assisted count cache flush +#define SEC_FTR_BCCTR_FLUSH_ASSIST 0x0000000000000800ull + // Features indicating need for Spectre/Meltdown mitigations @@ -74,6 +78,9 @@ static inline bool security_ftr_enabled(unsigned long feature) // Firmware configuration indicates user favours security over performance #define SEC_FTR_FAVOUR_SECURITY 0x0000000000000200ull +// Software required to flush count cache on context switch +#define SEC_FTR_FLUSH_COUNT_CACHE 0x0000000000000400ull + // Features enabled by default #define SEC_FTR_DEFAULT \ diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index 8721fd004291..1a951b00465d 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -52,11 +52,15 @@ enum l1d_flush_type { void setup_rfi_flush(enum l1d_flush_type, bool enable); void do_rfi_flush_fixups(enum l1d_flush_type types); +#ifdef CONFIG_PPC_BARRIER_NOSPEC void setup_barrier_nospec(void); +#else +static inline void setup_barrier_nospec(void) { }; +#endif void do_barrier_nospec_fixups(bool enable); extern bool barrier_nospec_enabled; -#ifdef CONFIG_PPC_BOOK3S_64 +#ifdef CONFIG_PPC_BARRIER_NOSPEC void do_barrier_nospec_fixups_range(bool enable, void *start, void *end); #else static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void *end) { }; diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index 29ffaabdf75b..95b66a0c639b 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -56,7 +56,6 @@ struct smp_ops_t { int (*cpu_bootable)(unsigned int nr); }; -extern void smp_flush_nmi_ipi(u64 delay_us); extern int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us); extern int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us); extern void smp_send_debugger_break(void); diff --git a/arch/powerpc/include/asm/sparsemem.h b/arch/powerpc/include/asm/sparsemem.h index bc66712bdc3c..28f5dae25db6 100644 --- a/arch/powerpc/include/asm/sparsemem.h +++ b/arch/powerpc/include/asm/sparsemem.h @@ -6,13 +6,20 @@ #ifdef CONFIG_SPARSEMEM /* * SECTION_SIZE_BITS 2^N: how big each section will be - * MAX_PHYSADDR_BITS 2^N: how much physical address space we have * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space */ #define SECTION_SIZE_BITS 24 - -#define MAX_PHYSADDR_BITS 46 +/* + * If we store section details in page->flags we can't increase the MAX_PHYSMEM_BITS + * if we increase SECTIONS_WIDTH we will not store node details in page->flags and + * page_to_nid does a page->section->node lookup + * Hence only increase for VMEMMAP. + */ +#ifdef CONFIG_SPARSEMEM_VMEMMAP +#define MAX_PHYSMEM_BITS 47 +#else #define MAX_PHYSMEM_BITS 46 +#endif #endif /* CONFIG_SPARSEMEM */ diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 72dc4ddc2972..685c72310f5d 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -24,9 +24,9 @@ #include <asm/paca.h> #include <asm/hvcall.h> #endif -#include <asm/asm-compat.h> #include <asm/synch.h> #include <asm/ppc-opcode.h> +#include <asm/asm-405.h> #ifdef CONFIG_PPC64 /* use 0x800000yy when locked, where yy == CPU number */ diff --git a/arch/powerpc/include/asm/stacktrace.h b/arch/powerpc/include/asm/stacktrace.h new file mode 100644 index 000000000000..6149b53b3bc8 --- /dev/null +++ b/arch/powerpc/include/asm/stacktrace.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Stack trace functions. + * + * Copyright 2018, Murilo Opsfelder Araujo, IBM Corporation. + */ + +#ifndef _ASM_POWERPC_STACKTRACE_H +#define _ASM_POWERPC_STACKTRACE_H + +void show_user_instructions(struct pt_regs *regs); + +#endif /* _ASM_POWERPC_STACKTRACE_H */ diff --git a/arch/powerpc/include/asm/string.h b/arch/powerpc/include/asm/string.h index 9b8cedf618f4..1647de15a31e 100644 --- a/arch/powerpc/include/asm/string.h +++ b/arch/powerpc/include/asm/string.h @@ -50,6 +50,8 @@ static inline void *memset64(uint64_t *p, uint64_t v, __kernel_size_t n) return __memset64(p, v, n * 8); } #else +#define __HAVE_ARCH_STRLEN + extern void *memset16(uint16_t *, uint16_t, __kernel_size_t); #endif #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h index 6ec546090ba1..aca70fb43147 100644 --- a/arch/powerpc/include/asm/synch.h +++ b/arch/powerpc/include/asm/synch.h @@ -3,8 +3,8 @@ #define _ASM_POWERPC_SYNCH_H #ifdef __KERNEL__ -#include <linux/stringify.h> #include <asm/feature-fixups.h> +#include <asm/asm-const.h> #ifndef __ASSEMBLY__ extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup; diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index f308dfeb2746..3c0002044bc9 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -9,6 +9,8 @@ #ifndef _ASM_POWERPC_THREAD_INFO_H #define _ASM_POWERPC_THREAD_INFO_H +#include <asm/asm-const.h> + #ifdef __KERNEL__ #define THREAD_SHIFT CONFIG_THREAD_SHIFT @@ -25,7 +27,6 @@ #include <linux/cache.h> #include <asm/processor.h> #include <asm/page.h> -#include <linux/stringify.h> #include <asm/accounting.h> /* diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h index 9138baccebb0..f0e571b2dc7c 100644 --- a/arch/powerpc/include/asm/tlb.h +++ b/arch/powerpc/include/asm/tlb.h @@ -17,7 +17,6 @@ #include <asm/pgtable.h> #endif #include <asm/pgalloc.h> -#include <asm/tlbflush.h> #ifndef __powerpc64__ #include <asm/page.h> #include <asm/mmu.h> @@ -53,7 +52,8 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, if (!tlb->page_size) tlb->page_size = page_size; else if (tlb->page_size != page_size) { - tlb_flush_mmu(tlb); + if (!tlb->fullmm) + tlb_flush_mmu(tlb); /* * update the page size after flush for the new * mmu_gather. diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h index 7d5a157c7832..61fba43bf8b2 100644 --- a/arch/powerpc/include/asm/tlbflush.h +++ b/arch/powerpc/include/asm/tlbflush.h @@ -1,87 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_POWERPC_TLBFLUSH_H #define _ASM_POWERPC_TLBFLUSH_H -/* - * TLB flushing: - * - * - flush_tlb_mm(mm) flushes the specified mm context TLB's - * - flush_tlb_page(vma, vmaddr) flushes one page - * - local_flush_tlb_mm(mm, full) flushes the specified mm context on - * the local processor - * - local_flush_tlb_page(vma, vmaddr) flushes one page on the local processor - * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB - * - flush_tlb_range(vma, start, end) flushes a range of pages - * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ -#ifdef __KERNEL__ - -#ifdef CONFIG_PPC_MMU_NOHASH -/* - * TLB flushing for software loaded TLB chips - * - * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range & - * flush_tlb_kernel_range are best implemented as tlbia vs - * specific tlbie's - */ - -struct vm_area_struct; -struct mm_struct; - -#define MMU_NO_CONTEXT ((unsigned int)-1) - -extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, - unsigned long end); -extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); - -extern void local_flush_tlb_mm(struct mm_struct *mm); -extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); - -extern void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, - int tsize, int ind); - -#ifdef CONFIG_SMP -extern void flush_tlb_mm(struct mm_struct *mm); -extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); -extern void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, - int tsize, int ind); -#else -#define flush_tlb_mm(mm) local_flush_tlb_mm(mm) -#define flush_tlb_page(vma,addr) local_flush_tlb_page(vma,addr) -#define __flush_tlb_page(mm,addr,p,i) __local_flush_tlb_page(mm,addr,p,i) -#endif - -#elif defined(CONFIG_PPC_STD_MMU_32) - -#define MMU_NO_CONTEXT (0) -/* - * TLB flushing for "classic" hash-MMU 32-bit CPUs, 6xx, 7xx, 7xxx - */ -extern void flush_tlb_mm(struct mm_struct *mm); -extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); -extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); -extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, - unsigned long end); -extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); -static inline void local_flush_tlb_page(struct vm_area_struct *vma, - unsigned long vmaddr) -{ - flush_tlb_page(vma, vmaddr); -} -static inline void local_flush_tlb_mm(struct mm_struct *mm) -{ - flush_tlb_mm(mm); -} - -#elif defined(CONFIG_PPC_BOOK3S_64) -#include <asm/book3s/64/tlbflush.h> +#ifdef CONFIG_PPC_BOOK3S +#include <asm/book3s/tlbflush.h> #else -#error Unsupported MMU type -#endif +#include <asm/nohash/tlbflush.h> +#endif /* !CONFIG_PPC_BOOK3S */ -#endif /*__KERNEL__ */ #endif /* _ASM_POWERPC_TLBFLUSH_H */ diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index 468653ce844c..bac225bb7f64 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -2,7 +2,6 @@ #ifndef _ARCH_POWERPC_UACCESS_H #define _ARCH_POWERPC_UACCESS_H -#include <asm/asm-compat.h> #include <asm/ppc_asm.h> #include <asm/processor.h> #include <asm/page.h> @@ -250,10 +249,17 @@ do { \ } \ } while (0) +/* + * This is a type: either unsigned long, if the argument fits into + * that type, or otherwise unsigned long long. + */ +#define __long_type(x) \ + __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) + #define __get_user_nocheck(x, ptr, size) \ ({ \ long __gu_err; \ - unsigned long __gu_val; \ + __long_type(*(ptr)) __gu_val; \ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ __chk_user_ptr(ptr); \ if (!is_kernel_addr((unsigned long)__gu_addr)) \ @@ -267,7 +273,7 @@ do { \ #define __get_user_check(x, ptr, size) \ ({ \ long __gu_err = -EFAULT; \ - unsigned long __gu_val = 0; \ + __long_type(*(ptr)) __gu_val = 0; \ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ might_fault(); \ if (access_ok(VERIFY_READ, __gu_addr, (size))) { \ @@ -281,7 +287,7 @@ do { \ #define __get_user_nosleep(x, ptr, size) \ ({ \ long __gu_err; \ - unsigned long __gu_val; \ + __long_type(*(ptr)) __gu_val; \ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ __chk_user_ptr(ptr); \ barrier_nospec(); \ diff --git a/arch/powerpc/include/asm/xive.h b/arch/powerpc/include/asm/xive.h index 8d1a2792484f..3c704f5dd3ae 100644 --- a/arch/powerpc/include/asm/xive.h +++ b/arch/powerpc/include/asm/xive.h @@ -87,7 +87,6 @@ extern int xive_smp_prepare_cpu(unsigned int cpu); extern void xive_smp_setup_cpu(void); extern void xive_smp_disable_cpu(void); extern void xive_teardown_cpu(void); -extern void xive_kexec_teardown_cpu(int secondary); extern void xive_shutdown(void); extern void xive_flush_interrupt(void); |