diff options
Diffstat (limited to 'arch/powerpc/include')
69 files changed, 681 insertions, 636 deletions
diff --git a/arch/powerpc/include/asm/8xx_immap.h b/arch/powerpc/include/asm/8xx_immap.h index bdf0563ba423..f9cac46a95cb 100644 --- a/arch/powerpc/include/asm/8xx_immap.h +++ b/arch/powerpc/include/asm/8xx_immap.h @@ -560,5 +560,7 @@ typedef struct immap { cpm8xx_t im_cpm; /* Communication processor */ } immap_t; +extern immap_t __iomem *mpc8xx_immr; + #endif /* __IMMAP_8XX__ */ #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild index 419319c4963c..61a8d5555cd7 100644 --- a/arch/powerpc/include/asm/Kbuild +++ b/arch/powerpc/include/asm/Kbuild @@ -3,7 +3,6 @@ generated-y += syscall_table_32.h generated-y += syscall_table_64.h generated-y += syscall_table_spu.h generic-y += agp.h -generic-y += export.h generic-y += kvm_types.h generic-y += mcs_spinlock.h generic-y += qrwlock.h diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h index 7e0f0322912b..671ecc6711e3 100644 --- a/arch/powerpc/include/asm/bitops.h +++ b/arch/powerpc/include/asm/bitops.h @@ -233,35 +233,24 @@ static inline int arch_test_and_change_bit(unsigned long nr, return test_and_change_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0; } -#ifdef CONFIG_PPC64 -static inline unsigned long -clear_bit_unlock_return_word(int nr, volatile unsigned long *addr) +static inline bool arch_xor_unlock_is_negative_byte(unsigned long mask, + volatile unsigned long *p) { unsigned long old, t; - unsigned long *p = (unsigned long *)addr + BIT_WORD(nr); - unsigned long mask = BIT_MASK(nr); __asm__ __volatile__ ( PPC_RELEASE_BARRIER "1:" PPC_LLARX "%0,0,%3,0\n" - "andc %1,%0,%2\n" + "xor %1,%0,%2\n" PPC_STLCX "%1,0,%3\n" "bne- 1b\n" : "=&r" (old), "=&r" (t) : "r" (mask), "r" (p) : "cc", "memory"); - return old; + return (old & BIT_MASK(7)) != 0; } - -/* - * This is a special function for mm/filemap.c - * Bit 7 corresponds to PG_waiters. - */ -#define arch_clear_bit_unlock_is_negative_byte(nr, addr) \ - (clear_bit_unlock_return_word(nr, addr) & BIT_MASK(7)) - -#endif /* CONFIG_PPC64 */ +#define arch_xor_unlock_is_negative_byte arch_xor_unlock_is_negative_byte #include <asm-generic/bitops/non-atomic.h> diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h index 678f9c9d89b6..4e14a5427a63 100644 --- a/arch/powerpc/include/asm/book3s/32/kup.h +++ b/arch/powerpc/include/asm/book3s/32/kup.h @@ -9,79 +9,53 @@ #ifndef __ASSEMBLY__ -#include <linux/jump_label.h> - -extern struct static_key_false disable_kuap_key; - -static __always_inline bool kuep_is_disabled(void) -{ - return !IS_ENABLED(CONFIG_PPC_KUEP); -} - #ifdef CONFIG_PPC_KUAP #include <linux/sched.h> #define KUAP_NONE (~0UL) -#define KUAP_ALL (~1UL) -static __always_inline bool kuap_is_disabled(void) -{ - return static_branch_unlikely(&disable_kuap_key); -} - -static inline void kuap_lock_one(unsigned long addr) +static __always_inline void kuap_lock_one(unsigned long addr) { mtsr(mfsr(addr) | SR_KS, addr); isync(); /* Context sync required after mtsr() */ } -static inline void kuap_unlock_one(unsigned long addr) +static __always_inline void kuap_unlock_one(unsigned long addr) { mtsr(mfsr(addr) & ~SR_KS, addr); isync(); /* Context sync required after mtsr() */ } -static inline void kuap_lock_all(void) +static __always_inline void uaccess_begin_32s(unsigned long addr) { - update_user_segments(mfsr(0) | SR_KS); - isync(); /* Context sync required after mtsr() */ -} + unsigned long tmp; -static inline void kuap_unlock_all(void) -{ - update_user_segments(mfsr(0) & ~SR_KS); - isync(); /* Context sync required after mtsr() */ + asm volatile(ASM_MMU_FTR_IFSET( + "mfsrin %0, %1;" + "rlwinm %0, %0, 0, %2;" + "mtsrin %0, %1;" + "isync", "", %3) + : "=&r"(tmp) + : "r"(addr), "i"(~SR_KS), "i"(MMU_FTR_KUAP) + : "memory"); } -void kuap_lock_all_ool(void); -void kuap_unlock_all_ool(void); - -static inline void kuap_lock_addr(unsigned long addr, bool ool) +static __always_inline void uaccess_end_32s(unsigned long addr) { - if (likely(addr != KUAP_ALL)) - kuap_lock_one(addr); - else if (!ool) - kuap_lock_all(); - else - kuap_lock_all_ool(); -} + unsigned long tmp; -static inline void kuap_unlock(unsigned long addr, bool ool) -{ - if (likely(addr != KUAP_ALL)) - kuap_unlock_one(addr); - else if (!ool) - kuap_unlock_all(); - else - kuap_unlock_all_ool(); + asm volatile(ASM_MMU_FTR_IFSET( + "mfsrin %0, %1;" + "oris %0, %0, %2;" + "mtsrin %0, %1;" + "isync", "", %3) + : "=&r"(tmp) + : "r"(addr), "i"(SR_KS >> 16), "i"(MMU_FTR_KUAP) + : "memory"); } -static inline void __kuap_lock(void) -{ -} - -static inline void __kuap_save_and_lock(struct pt_regs *regs) +static __always_inline void __kuap_save_and_lock(struct pt_regs *regs) { unsigned long kuap = current->thread.kuap; @@ -90,18 +64,19 @@ static inline void __kuap_save_and_lock(struct pt_regs *regs) return; current->thread.kuap = KUAP_NONE; - kuap_lock_addr(kuap, false); + kuap_lock_one(kuap); } +#define __kuap_save_and_lock __kuap_save_and_lock -static inline void kuap_user_restore(struct pt_regs *regs) +static __always_inline void kuap_user_restore(struct pt_regs *regs) { } -static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap) +static __always_inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap) { if (unlikely(kuap != KUAP_NONE)) { current->thread.kuap = KUAP_NONE; - kuap_lock_addr(kuap, false); + kuap_lock_one(kuap); } if (likely(regs->kuap == KUAP_NONE)) @@ -109,10 +84,10 @@ static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kua current->thread.kuap = regs->kuap; - kuap_unlock(regs->kuap, false); + kuap_unlock_one(regs->kuap); } -static inline unsigned long __kuap_get_and_assert_locked(void) +static __always_inline unsigned long __kuap_get_and_assert_locked(void) { unsigned long kuap = current->thread.kuap; @@ -120,9 +95,10 @@ static inline unsigned long __kuap_get_and_assert_locked(void) return kuap; } +#define __kuap_get_and_assert_locked __kuap_get_and_assert_locked -static __always_inline void __allow_user_access(void __user *to, const void __user *from, - u32 size, unsigned long dir) +static __always_inline void allow_user_access(void __user *to, const void __user *from, + u32 size, unsigned long dir) { BUILD_BUG_ON(!__builtin_constant_p(dir)); @@ -130,10 +106,10 @@ static __always_inline void __allow_user_access(void __user *to, const void __us return; current->thread.kuap = (__force u32)to; - kuap_unlock_one((__force u32)to); + uaccess_begin_32s((__force u32)to); } -static __always_inline void __prevent_user_access(unsigned long dir) +static __always_inline void prevent_user_access(unsigned long dir) { u32 kuap = current->thread.kuap; @@ -143,42 +119,51 @@ static __always_inline void __prevent_user_access(unsigned long dir) return; current->thread.kuap = KUAP_NONE; - kuap_lock_addr(kuap, true); + uaccess_end_32s(kuap); } -static inline unsigned long __prevent_user_access_return(void) +static __always_inline unsigned long prevent_user_access_return(void) { unsigned long flags = current->thread.kuap; if (flags != KUAP_NONE) { current->thread.kuap = KUAP_NONE; - kuap_lock_addr(flags, true); + uaccess_end_32s(flags); } return flags; } -static inline void __restore_user_access(unsigned long flags) +static __always_inline void restore_user_access(unsigned long flags) { if (flags != KUAP_NONE) { current->thread.kuap = flags; - kuap_unlock(flags, true); + uaccess_begin_32s(flags); } } -static inline bool +static __always_inline bool __bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) { unsigned long kuap = regs->kuap; - if (!is_write || kuap == KUAP_ALL) + if (!is_write) return false; if (kuap == KUAP_NONE) return true; - /* If faulting address doesn't match unlocked segment, unlock all */ - if ((kuap ^ address) & 0xf0000000) - regs->kuap = KUAP_ALL; + /* + * If faulting address doesn't match unlocked segment, change segment. + * In case of unaligned store crossing two segments, emulate store. + */ + if ((kuap ^ address) & 0xf0000000) { + if (!(kuap & 0x0fffffff) && address > kuap - 4 && fix_alignment(regs)) { + regs_add_return_ip(regs, 4); + emulate_single_step(regs); + } else { + regs->kuap = address; + } + } return false; } diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index 7bf1fe7297c6..9b13eb14e21b 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -462,11 +462,6 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) pgprot_val(pgprot)); } -static inline unsigned long pte_pfn(pte_t pte) -{ - return pte_val(pte) >> PTE_RPN_SHIFT; -} - /* Generic modifiers for PTE bits */ static inline pte_t pte_wrprotect(pte_t pte) { @@ -498,7 +493,7 @@ static inline pte_t pte_mkpte(pte_t pte) return pte; } -static inline pte_t pte_mkwrite(pte_t pte) +static inline pte_t pte_mkwrite_novma(pte_t pte) { return __pte(pte_val(pte) | _PAGE_RW); } @@ -541,58 +536,43 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) /* This low level function performs the actual PTE insertion - * Setting the PTE depends on the MMU type and other factors. It's - * an horrible mess that I'm not going to try to clean up now but - * I'm keeping it in one place rather than spread around + * Setting the PTE depends on the MMU type and other factors. + * + * First case is 32-bit in UP mode with 32-bit PTEs, we need to preserve + * the _PAGE_HASHPTE bit since we may not have invalidated the previous + * translation in the hash yet (done in a subsequent flush_tlb_xxx()) + * and see we need to keep track that this PTE needs invalidating. + * + * Second case is 32-bit with 64-bit PTE. In this case, we + * can just store as long as we do the two halves in the right order + * with a barrier in between. This is possible because we take care, + * in the hash code, to pre-invalidate if the PTE was already hashed, + * which synchronizes us with any concurrent invalidation. + * In the percpu case, we fallback to the simple update preserving + * the hash bits (ie, same as the non-SMP case). + * + * Third case is 32-bit in SMP mode with 32-bit PTEs. We use the + * helper pte_update() which does an atomic update. We need to do that + * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a + * per-CPU PTE such as a kmap_atomic, we also do a simple update preserving + * the hash bits instead. */ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte, int percpu) { -#if defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT) - /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the - * helper pte_update() which does an atomic update. We need to do that - * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a - * per-CPU PTE such as a kmap_atomic, we do a simple update preserving - * the hash bits instead (ie, same as the non-SMP case) - */ - if (percpu) - *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) - | (pte_val(pte) & ~_PAGE_HASHPTE)); - else - pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, pte_val(pte), 0); + if ((!IS_ENABLED(CONFIG_SMP) && !IS_ENABLED(CONFIG_PTE_64BIT)) || percpu) { + *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) | + (pte_val(pte) & ~_PAGE_HASHPTE)); + } else if (IS_ENABLED(CONFIG_PTE_64BIT)) { + if (pte_val(*ptep) & _PAGE_HASHPTE) + flush_hash_entry(mm, ptep, addr); -#elif defined(CONFIG_PTE_64BIT) - /* Second case is 32-bit with 64-bit PTE. In this case, we - * can just store as long as we do the two halves in the right order - * with a barrier in between. This is possible because we take care, - * in the hash code, to pre-invalidate if the PTE was already hashed, - * which synchronizes us with any concurrent invalidation. - * In the percpu case, we also fallback to the simple update preserving - * the hash bits - */ - if (percpu) { - *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) - | (pte_val(pte) & ~_PAGE_HASHPTE)); - return; + asm volatile("stw%X0 %2,%0; eieio; stw%X1 %L2,%1" : + "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) : + "r" (pte) : "memory"); + } else { + pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, pte_val(pte), 0); } - if (pte_val(*ptep) & _PAGE_HASHPTE) - flush_hash_entry(mm, ptep, addr); - __asm__ __volatile__("\ - stw%X0 %2,%0\n\ - eieio\n\ - stw%X1 %L2,%1" - : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) - : "r" (pte) : "memory"); - -#else - /* Third case is 32-bit hash table in UP mode, we need to preserve - * the _PAGE_HASHPTE bit since we may not have invalidated the previous - * translation in the hash yet (done in a subsequent flush_tlb_xxx()) - * and see we need to keep track that this PTE needs invalidating - */ - *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) - | (pte_val(pte) & ~_PAGE_HASHPTE)); -#endif } /* diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h index b6ac4f86c87b..6472b08fa1b0 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-4k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h @@ -136,12 +136,6 @@ static inline int hash__pmd_trans_huge(pmd_t pmd) return 0; } -static inline int hash__pmd_same(pmd_t pmd_a, pmd_t pmd_b) -{ - BUG(); - return 0; -} - static inline pmd_t hash__pmd_mkhuge(pmd_t pmd) { BUG(); diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h index 338e62fbea0b..0bf6fd0bf42a 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-64k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h @@ -263,11 +263,6 @@ static inline int hash__pmd_trans_huge(pmd_t pmd) (_PAGE_PTE | H_PAGE_THP_HUGE)); } -static inline int hash__pmd_same(pmd_t pmd_a, pmd_t pmd_b) -{ - return (((pmd_raw(pmd_a) ^ pmd_raw(pmd_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0); -} - static inline pmd_t hash__pmd_mkhuge(pmd_t pmd) { return __pmd(pmd_val(pmd) | (_PAGE_PTE | H_PAGE_THP_HUGE)); diff --git a/arch/powerpc/include/asm/book3s/64/hash-pkey.h b/arch/powerpc/include/asm/book3s/64/hash-pkey.h index f1e60d579f6c..6c5564c4fae4 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-pkey.h +++ b/arch/powerpc/include/asm/book3s/64/hash-pkey.h @@ -24,7 +24,7 @@ static inline u64 pte_to_hpte_pkey_bits(u64 pteflags, unsigned long flags) ((pteflags & H_PTE_PKEY_BIT1) ? HPTE_R_KEY_BIT1 : 0x0UL) | ((pteflags & H_PTE_PKEY_BIT0) ? HPTE_R_KEY_BIT0 : 0x0UL)); - if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP) || + if (mmu_has_feature(MMU_FTR_KUAP) || mmu_has_feature(MMU_FTR_BOOK3S_KUEP)) { if ((pte_pkey == 0) && (flags & HPTE_USE_KERNEL_KEY)) return HASH_DEFAULT_KERNEL_KEY; diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h index 17e7a778c856..6e70ae511631 100644 --- a/arch/powerpc/include/asm/book3s/64/hash.h +++ b/arch/powerpc/include/asm/book3s/64/hash.h @@ -132,8 +132,22 @@ static inline int get_region_id(unsigned long ea) return region_id; } +static inline int hash__pmd_same(pmd_t pmd_a, pmd_t pmd_b) +{ + return (((pmd_raw(pmd_a) ^ pmd_raw(pmd_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0); +} + #define hash__pmd_bad(pmd) (pmd_val(pmd) & H_PMD_BAD_BITS) + +/* + * pud comparison that will work with both pte and page table pointer. + */ +static inline int hash__pud_same(pud_t pud_a, pud_t pud_b) +{ + return (((pud_raw(pud_a) ^ pud_raw(pud_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0); +} #define hash__pud_bad(pud) (pud_val(pud) & H_PUD_BAD_BITS) + static inline int hash__p4d_bad(p4d_t p4d) { return (p4d_val(p4d) == 0); diff --git a/arch/powerpc/include/asm/book3s/64/kup.h b/arch/powerpc/include/asm/book3s/64/kup.h index 84c09e546115..497a7bd31ecc 100644 --- a/arch/powerpc/include/asm/book3s/64/kup.h +++ b/arch/powerpc/include/asm/book3s/64/kup.h @@ -31,7 +31,7 @@ mfspr \gpr2, SPRN_AMR cmpd \gpr1, \gpr2 beq 99f - END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_BOOK3S_KUAP, 68) + END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_KUAP, 68) isync mtspr SPRN_AMR, \gpr1 @@ -78,7 +78,7 @@ * No need to restore IAMR when returning to kernel space. */ 100: - END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 67) + END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_KUAP, 67) #endif .endm @@ -91,7 +91,7 @@ LOAD_REG_IMMEDIATE(\gpr2, AMR_KUAP_BLOCKED) 999: tdne \gpr1, \gpr2 EMIT_WARN_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE) - END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 67) + END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_KUAP, 67) #endif .endm #endif @@ -130,7 +130,7 @@ */ BEGIN_MMU_FTR_SECTION_NESTED(68) b 100f // skip_save_amr - END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY | MMU_FTR_BOOK3S_KUAP, 68) + END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY | MMU_FTR_KUAP, 68) /* * if pkey is disabled and we are entering from userspace @@ -166,7 +166,7 @@ mtspr SPRN_AMR, \gpr2 isync 102: - END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 69) + END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_KUAP, 69) /* * if entering from kernel we don't need save IAMR @@ -213,14 +213,14 @@ extern u64 __ro_after_init default_iamr; * access restrictions. Because of this ignore AMR value when accessing * userspace via kernel thread. */ -static inline u64 current_thread_amr(void) +static __always_inline u64 current_thread_amr(void) { if (current->thread.regs) return current->thread.regs->amr; return default_amr; } -static inline u64 current_thread_iamr(void) +static __always_inline u64 current_thread_iamr(void) { if (current->thread.regs) return current->thread.regs->iamr; @@ -230,12 +230,7 @@ static inline u64 current_thread_iamr(void) #ifdef CONFIG_PPC_KUAP -static __always_inline bool kuap_is_disabled(void) -{ - return !mmu_has_feature(MMU_FTR_BOOK3S_KUAP); -} - -static inline void kuap_user_restore(struct pt_regs *regs) +static __always_inline void kuap_user_restore(struct pt_regs *regs) { bool restore_amr = false, restore_iamr = false; unsigned long amr, iamr; @@ -243,7 +238,7 @@ static inline void kuap_user_restore(struct pt_regs *regs) if (!mmu_has_feature(MMU_FTR_PKEY)) return; - if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) { + if (!mmu_has_feature(MMU_FTR_KUAP)) { amr = mfspr(SPRN_AMR); if (amr != regs->amr) restore_amr = true; @@ -274,7 +269,7 @@ static inline void kuap_user_restore(struct pt_regs *regs) */ } -static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long amr) +static __always_inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long amr) { if (likely(regs->amr == amr)) return; @@ -290,7 +285,7 @@ static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long amr */ } -static inline unsigned long __kuap_get_and_assert_locked(void) +static __always_inline unsigned long __kuap_get_and_assert_locked(void) { unsigned long amr = mfspr(SPRN_AMR); @@ -298,22 +293,16 @@ static inline unsigned long __kuap_get_and_assert_locked(void) WARN_ON_ONCE(amr != AMR_KUAP_BLOCKED); return amr; } +#define __kuap_get_and_assert_locked __kuap_get_and_assert_locked -/* Do nothing, book3s/64 does that in ASM */ -static inline void __kuap_lock(void) -{ -} - -static inline void __kuap_save_and_lock(struct pt_regs *regs) -{ -} +/* __kuap_lock() not required, book3s/64 does that in ASM */ /* * We support individually allowing read or write, but we don't support nesting * because that would require an expensive read/modify write of the AMR. */ -static inline unsigned long get_kuap(void) +static __always_inline unsigned long get_kuap(void) { /* * We return AMR_KUAP_BLOCKED when we don't support KUAP because @@ -323,7 +312,7 @@ static inline unsigned long get_kuap(void) * This has no effect in terms of actually blocking things on hash, * so it doesn't break anything. */ - if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) + if (!mmu_has_feature(MMU_FTR_KUAP)) return AMR_KUAP_BLOCKED; return mfspr(SPRN_AMR); @@ -331,7 +320,7 @@ static inline unsigned long get_kuap(void) static __always_inline void set_kuap(unsigned long value) { - if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) + if (!mmu_has_feature(MMU_FTR_KUAP)) return; /* @@ -343,7 +332,8 @@ static __always_inline void set_kuap(unsigned long value) isync(); } -static inline bool __bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) +static __always_inline bool +__bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) { /* * For radix this will be a storage protection fault (DSISR_PROTFAULT). @@ -386,12 +376,12 @@ static __always_inline void allow_user_access(void __user *to, const void __user #else /* CONFIG_PPC_KUAP */ -static inline unsigned long get_kuap(void) +static __always_inline unsigned long get_kuap(void) { return AMR_KUAP_BLOCKED; } -static inline void set_kuap(unsigned long value) { } +static __always_inline void set_kuap(unsigned long value) { } static __always_inline void allow_user_access(void __user *to, const void __user *from, unsigned long size, unsigned long dir) @@ -406,7 +396,7 @@ static __always_inline void prevent_user_access(unsigned long dir) do_uaccess_flush(); } -static inline unsigned long prevent_user_access_return(void) +static __always_inline unsigned long prevent_user_access_return(void) { unsigned long flags = get_kuap(); @@ -417,7 +407,7 @@ static inline unsigned long prevent_user_access_return(void) return flags; } -static inline void restore_user_access(unsigned long flags) +static __always_inline void restore_user_access(unsigned long flags) { set_kuap(flags); if (static_branch_unlikely(&uaccess_flush_key) && flags == AMR_KUAP_BLOCKED) diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h index 570a4960cf17..fedbc5d38191 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu.h +++ b/arch/powerpc/include/asm/book3s/64/mmu.h @@ -71,10 +71,7 @@ extern unsigned int mmu_pid_bits; /* Base PID to allocate from */ extern unsigned int mmu_base_pid; -/* - * memory block size used with radix translation. - */ -extern unsigned long __ro_after_init radix_mem_block_size; +extern unsigned long __ro_after_init memory_block_size; #define PRTB_SIZE_SHIFT (mmu_pid_bits + 4) #define PRTB_ENTRIES (1ul << mmu_pid_bits) @@ -261,7 +258,7 @@ static inline void radix_init_pseries(void) { } #define arch_clear_mm_cpumask_cpu(cpu, mm) \ do { \ if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { \ - atomic_dec(&(mm)->context.active_cpus); \ + dec_mm_active_cpus(mm); \ cpumask_clear_cpu(cpu, mm_cpumask(mm)); \ } \ } while (0) diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 4acc9690f599..5c497c862d75 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -104,6 +104,7 @@ * and every thing below PAGE_SHIFT; */ #define PTE_RPN_MASK (((1UL << _PAGE_PA_MAX) - 1) & (PAGE_MASK)) +#define PTE_RPN_SHIFT PAGE_SHIFT /* * set of bits not changed in pmd_modify. Even though we have hash specific bits * in here, on radix we expect them to be zero. @@ -569,11 +570,6 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) return __pte(((pte_basic_t)pfn << PAGE_SHIFT) | pgprot_val(pgprot) | _PAGE_PTE); } -static inline unsigned long pte_pfn(pte_t pte) -{ - return (pte_val(pte) & PTE_RPN_MASK) >> PAGE_SHIFT; -} - /* Generic modifiers for PTE bits */ static inline pte_t pte_wrprotect(pte_t pte) { @@ -600,7 +596,7 @@ static inline pte_t pte_mkexec(pte_t pte) return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_EXEC)); } -static inline pte_t pte_mkwrite(pte_t pte) +static inline pte_t pte_mkwrite_novma(pte_t pte) { /* * write implies read, hence set both @@ -921,8 +917,29 @@ static inline pud_t pte_pud(pte_t pte) { return __pud_raw(pte_raw(pte)); } + +static inline pte_t *pudp_ptep(pud_t *pud) +{ + return (pte_t *)pud; +} + +#define pud_pfn(pud) pte_pfn(pud_pte(pud)) +#define pud_dirty(pud) pte_dirty(pud_pte(pud)) +#define pud_young(pud) pte_young(pud_pte(pud)) +#define pud_mkold(pud) pte_pud(pte_mkold(pud_pte(pud))) +#define pud_wrprotect(pud) pte_pud(pte_wrprotect(pud_pte(pud))) +#define pud_mkdirty(pud) pte_pud(pte_mkdirty(pud_pte(pud))) +#define pud_mkclean(pud) pte_pud(pte_mkclean(pud_pte(pud))) +#define pud_mkyoung(pud) pte_pud(pte_mkyoung(pud_pte(pud))) +#define pud_mkwrite(pud) pte_pud(pte_mkwrite_novma(pud_pte(pud))) #define pud_write(pud) pte_write(pud_pte(pud)) +#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY +#define pud_soft_dirty(pmd) pte_soft_dirty(pud_pte(pud)) +#define pud_mksoft_dirty(pmd) pte_pud(pte_mksoft_dirty(pud_pte(pud))) +#define pud_clear_soft_dirty(pmd) pte_pud(pte_clear_soft_dirty(pud_pte(pud))) +#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ + static inline int pud_bad(pud_t pud) { if (radix_enabled()) @@ -1071,7 +1088,7 @@ static inline pte_t *pmdp_ptep(pmd_t *pmd) #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd))) #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) -#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) +#define pmd_mkwrite_novma(pmd) pte_pmd(pte_mkwrite_novma(pmd_pte(pmd))) #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY #define pmd_soft_dirty(pmd) pte_soft_dirty(pmd_pte(pmd)) @@ -1115,15 +1132,24 @@ static inline bool pmd_access_permitted(pmd_t pmd, bool write) #ifdef CONFIG_TRANSPARENT_HUGEPAGE extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot); +extern pud_t pfn_pud(unsigned long pfn, pgprot_t pgprot); extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot); extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot); extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd); +extern void set_pud_at(struct mm_struct *mm, unsigned long addr, + pud_t *pudp, pud_t pud); + static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd) { } +static inline void update_mmu_cache_pud(struct vm_area_struct *vma, + unsigned long addr, pud_t *pud) +{ +} + extern int hash__has_transparent_hugepage(void); static inline int has_transparent_hugepage(void) { @@ -1133,6 +1159,14 @@ static inline int has_transparent_hugepage(void) } #define has_transparent_hugepage has_transparent_hugepage +static inline int has_transparent_pud_hugepage(void) +{ + if (radix_enabled()) + return radix__has_transparent_pud_hugepage(); + return 0; +} +#define has_transparent_pud_hugepage has_transparent_pud_hugepage + static inline unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, unsigned long clr, unsigned long set) @@ -1142,6 +1176,16 @@ pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, return hash__pmd_hugepage_update(mm, addr, pmdp, clr, set); } +static inline unsigned long +pud_hugepage_update(struct mm_struct *mm, unsigned long addr, pud_t *pudp, + unsigned long clr, unsigned long set) +{ + if (radix_enabled()) + return radix__pud_hugepage_update(mm, addr, pudp, clr, set); + BUG(); + return pud_val(*pudp); +} + /* * returns true for pmd migration entries, THP, devmap, hugetlb * But compile time dependent on THP config @@ -1151,6 +1195,11 @@ static inline int pmd_large(pmd_t pmd) return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE)); } +static inline int pud_large(pud_t pud) +{ + return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE)); +} + /* * For radix we should always find H_PAGE_HASHPTE zero. Hence * the below will work for radix too @@ -1166,6 +1215,17 @@ static inline int __pmdp_test_and_clear_young(struct mm_struct *mm, return ((old & _PAGE_ACCESSED) != 0); } +static inline int __pudp_test_and_clear_young(struct mm_struct *mm, + unsigned long addr, pud_t *pudp) +{ + unsigned long old; + + if ((pud_raw(*pudp) & cpu_to_be64(_PAGE_ACCESSED | H_PAGE_HASHPTE)) == 0) + return 0; + old = pud_hugepage_update(mm, addr, pudp, _PAGE_ACCESSED, 0); + return ((old & _PAGE_ACCESSED) != 0); +} + #define __HAVE_ARCH_PMDP_SET_WRPROTECT static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp) @@ -1174,6 +1234,14 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr, pmd_hugepage_update(mm, addr, pmdp, _PAGE_WRITE, 0); } +#define __HAVE_ARCH_PUDP_SET_WRPROTECT +static inline void pudp_set_wrprotect(struct mm_struct *mm, unsigned long addr, + pud_t *pudp) +{ + if (pud_write(*pudp)) + pud_hugepage_update(mm, addr, pudp, _PAGE_WRITE, 0); +} + /* * Only returns true for a THP. False for pmd migration entry. * We also need to return true when we come across a pte that @@ -1195,6 +1263,17 @@ static inline int pmd_trans_huge(pmd_t pmd) return hash__pmd_trans_huge(pmd); } +static inline int pud_trans_huge(pud_t pud) +{ + if (!pud_present(pud)) + return false; + + if (radix_enabled()) + return radix__pud_trans_huge(pud); + return 0; +} + + #define __HAVE_ARCH_PMD_SAME static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) { @@ -1203,6 +1282,15 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) return hash__pmd_same(pmd_a, pmd_b); } +#define pud_same pud_same +static inline int pud_same(pud_t pud_a, pud_t pud_b) +{ + if (radix_enabled()) + return radix__pud_same(pud_a, pud_b); + return hash__pud_same(pud_a, pud_b); +} + + static inline pmd_t __pmd_mkhuge(pmd_t pmd) { if (radix_enabled()) @@ -1210,6 +1298,14 @@ static inline pmd_t __pmd_mkhuge(pmd_t pmd) return hash__pmd_mkhuge(pmd); } +static inline pud_t __pud_mkhuge(pud_t pud) +{ + if (radix_enabled()) + return radix__pud_mkhuge(pud); + BUG(); + return pud; +} + /* * pfn_pmd return a pmd_t that can be used as pmd pte entry. */ @@ -1225,14 +1321,34 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd) return pmd; } +static inline pud_t pud_mkhuge(pud_t pud) +{ +#ifdef CONFIG_DEBUG_VM + if (radix_enabled()) + WARN_ON((pud_raw(pud) & cpu_to_be64(_PAGE_PTE)) == 0); + else + WARN_ON(1); +#endif + return pud; +} + + #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS extern int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp, pmd_t entry, int dirty); +#define __HAVE_ARCH_PUDP_SET_ACCESS_FLAGS +extern int pudp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pud_t *pudp, + pud_t entry, int dirty); #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG extern int pmdp_test_and_clear_young(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp); +#define __HAVE_ARCH_PUDP_TEST_AND_CLEAR_YOUNG +extern int pudp_test_and_clear_young(struct vm_area_struct *vma, + unsigned long address, pud_t *pudp); + #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, @@ -1243,6 +1359,16 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, return hash__pmdp_huge_get_and_clear(mm, addr, pmdp); } +#define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR +static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm, + unsigned long addr, pud_t *pudp) +{ + if (radix_enabled()) + return radix__pudp_huge_get_and_clear(mm, addr, pudp); + BUG(); + return *pudp; +} + static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { @@ -1257,6 +1383,11 @@ pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmdp, int full); +#define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL +pud_t pudp_huge_get_and_clear_full(struct vm_area_struct *vma, + unsigned long addr, + pud_t *pudp, int full); + #define __HAVE_ARCH_PGTABLE_DEPOSIT static inline void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, pgtable_t pgtable) @@ -1305,6 +1436,14 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd) return hash__pmd_mkdevmap(pmd); } +static inline pud_t pud_mkdevmap(pud_t pud) +{ + if (radix_enabled()) + return radix__pud_mkdevmap(pud); + BUG(); + return pud; +} + static inline int pmd_devmap(pmd_t pmd) { return pte_devmap(pmd_pte(pmd)); @@ -1312,7 +1451,7 @@ static inline int pmd_devmap(pmd_t pmd) static inline int pud_devmap(pud_t pud) { - return 0; + return pte_devmap(pud_pte(pud)); } static inline int pgd_devmap(pgd_t pgd) @@ -1321,16 +1460,6 @@ static inline int pgd_devmap(pgd_t pgd) } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ -static inline int pud_pfn(pud_t pud) -{ - /* - * Currently all calls to pud_pfn() are gated around a pud_devmap() - * check so this should never be used. If it grows another user we - * want to know about it. - */ - BUILD_BUG(); - return 0; -} #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *); void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long, diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h index 686001eda936..357e23a403d3 100644 --- a/arch/powerpc/include/asm/book3s/64/radix.h +++ b/arch/powerpc/include/asm/book3s/64/radix.h @@ -250,6 +250,10 @@ static inline int radix__pud_bad(pud_t pud) return !!(pud_val(pud) & RADIX_PUD_BAD_BITS); } +static inline int radix__pud_same(pud_t pud_a, pud_t pud_b) +{ + return ((pud_raw(pud_a) ^ pud_raw(pud_b)) == 0); +} static inline int radix__p4d_bad(p4d_t p4d) { @@ -268,9 +272,22 @@ static inline pmd_t radix__pmd_mkhuge(pmd_t pmd) return __pmd(pmd_val(pmd) | _PAGE_PTE); } +static inline int radix__pud_trans_huge(pud_t pud) +{ + return (pud_val(pud) & (_PAGE_PTE | _PAGE_DEVMAP)) == _PAGE_PTE; +} + +static inline pud_t radix__pud_mkhuge(pud_t pud) +{ + return __pud(pud_val(pud) | _PAGE_PTE); +} + extern unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, unsigned long clr, unsigned long set); +extern unsigned long radix__pud_hugepage_update(struct mm_struct *mm, unsigned long addr, + pud_t *pudp, unsigned long clr, + unsigned long set); extern pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp); extern void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, @@ -278,6 +295,9 @@ extern void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, extern pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); extern pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp); +pud_t radix__pudp_huge_get_and_clear(struct mm_struct *mm, + unsigned long addr, pud_t *pudp); + static inline int radix__has_transparent_hugepage(void) { /* For radix 2M at PMD level means thp */ @@ -285,6 +305,14 @@ static inline int radix__has_transparent_hugepage(void) return 1; return 0; } + +static inline int radix__has_transparent_pud_hugepage(void) +{ + /* For radix 1G at PUD level means pud hugepage support */ + if (mmu_psize_defs[MMU_PAGE_1G].shift == PUD_SHIFT) + return 1; + return 0; +} #endif static inline pmd_t radix__pmd_mkdevmap(pmd_t pmd) @@ -292,9 +320,20 @@ static inline pmd_t radix__pmd_mkdevmap(pmd_t pmd) return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_DEVMAP)); } +static inline pud_t radix__pud_mkdevmap(pud_t pud) +{ + return __pud(pud_val(pud) | (_PAGE_PTE | _PAGE_DEVMAP)); +} + +struct vmem_altmap; +struct dev_pagemap; extern int __meminit radix__vmemmap_create_mapping(unsigned long start, unsigned long page_size, unsigned long phys); +int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, + int node, struct vmem_altmap *altmap); +void __ref radix__vmemmap_free(unsigned long start, unsigned long end, + struct vmem_altmap *altmap); extern void radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size); @@ -325,5 +364,15 @@ int radix__remove_section_mapping(unsigned long start, unsigned long end); void radix__kernel_map_pages(struct page *page, int numpages, int enable); +#ifdef CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP +#define vmemmap_can_optimize vmemmap_can_optimize +bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap); +#endif + +#define vmemmap_populate_compound_pages vmemmap_populate_compound_pages +int __meminit vmemmap_populate_compound_pages(unsigned long start_pfn, + unsigned long start, + unsigned long end, int node, + struct dev_pagemap *pgmap); #endif /* __ASSEMBLY__ */ #endif diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h index 77797a2a82eb..a38542259fab 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h @@ -68,6 +68,8 @@ void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start, unsigned long end, int psize); extern void radix__flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); +extern void radix__flush_pud_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end); extern void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end); diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h index 0d0c1447ecf0..1950c1b825b4 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h @@ -5,6 +5,7 @@ #define MMU_NO_CONTEXT ~0UL #include <linux/mm_types.h> +#include <linux/mmu_notifier.h> #include <asm/book3s/64/tlbflush-hash.h> #include <asm/book3s/64/tlbflush-radix.h> @@ -50,6 +51,14 @@ static inline void flush_pmd_tlb_range(struct vm_area_struct *vma, radix__flush_pmd_tlb_range(vma, start, end); } +#define __HAVE_ARCH_FLUSH_PUD_TLB_RANGE +static inline void flush_pud_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + if (radix_enabled()) + radix__flush_pud_tlb_range(vma, start, end); +} + #define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma, unsigned long start, diff --git a/arch/powerpc/include/asm/book3s/pgtable.h b/arch/powerpc/include/asm/book3s/pgtable.h index d18b748ea3ae..3b7bd36a2321 100644 --- a/arch/powerpc/include/asm/book3s/pgtable.h +++ b/arch/powerpc/include/asm/book3s/pgtable.h @@ -9,13 +9,6 @@ #endif #ifndef __ASSEMBLY__ -/* Insert a PTE, top-level function is out of line. It uses an inline - * low level function in the respective pgtable-* files - */ -extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, - pte_t pte); - - #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, pte_t *ptep, pte_t entry, int dirty); @@ -36,7 +29,9 @@ void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t * corresponding HPTE into the hash table ahead of time, instead of * waiting for the inevitable extra hash-table miss exception. */ -static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) +static inline void update_mmu_cache_range(struct vm_fault *vmf, + struct vm_area_struct *vma, unsigned long address, + pte_t *ptep, unsigned int nr) { if (IS_ENABLED(CONFIG_PPC32) && !mmu_has_feature(MMU_FTR_HPTE_TABLE)) return; diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h index ef42adb44aa3..1db485aacbd9 100644 --- a/arch/powerpc/include/asm/bug.h +++ b/arch/powerpc/include/asm/bug.h @@ -4,14 +4,13 @@ #ifdef __KERNEL__ #include <asm/asm-compat.h> -#include <asm/extable.h> #ifdef CONFIG_BUG #ifdef __ASSEMBLY__ #include <asm/asm-offsets.h> #ifdef CONFIG_DEBUG_BUGVERBOSE -.macro __EMIT_BUG_ENTRY addr,file,line,flags +.macro EMIT_BUG_ENTRY addr,file,line,flags .section __bug_table,"aw" 5001: .4byte \addr - . .4byte 5002f - . @@ -23,7 +22,7 @@ .previous .endm #else -.macro __EMIT_BUG_ENTRY addr,file,line,flags +.macro EMIT_BUG_ENTRY addr,file,line,flags .section __bug_table,"aw" 5001: .4byte \addr - . .short \flags @@ -32,18 +31,6 @@ .endm #endif /* verbose */ -.macro EMIT_WARN_ENTRY addr,file,line,flags - EX_TABLE(\addr,\addr+4) - __EMIT_BUG_ENTRY \addr,\file,\line,\flags -.endm - -.macro EMIT_BUG_ENTRY addr,file,line,flags - .if \flags & 1 /* BUGFLAG_WARNING */ - .err /* Use EMIT_WARN_ENTRY for warnings */ - .endif - __EMIT_BUG_ENTRY \addr,\file,\line,\flags -.endm - #else /* !__ASSEMBLY__ */ /* _EMIT_BUG_ENTRY expects args %0,%1,%2,%3 to be FILE, LINE, flags and sizeof(struct bug_entry), respectively */ @@ -73,16 +60,6 @@ "i" (sizeof(struct bug_entry)), \ ##__VA_ARGS__) -#define WARN_ENTRY(insn, flags, label, ...) \ - asm_volatile_goto( \ - "1: " insn "\n" \ - EX_TABLE(1b, %l[label]) \ - _EMIT_BUG_ENTRY \ - : : "i" (__FILE__), "i" (__LINE__), \ - "i" (flags), \ - "i" (sizeof(struct bug_entry)), \ - ##__VA_ARGS__ : : label) - /* * BUG_ON() and WARN_ON() do their best to cooperate with compile-time * optimisations. However depending on the complexity of the condition @@ -95,16 +72,7 @@ } while (0) #define HAVE_ARCH_BUG -#define __WARN_FLAGS(flags) do { \ - __label__ __label_warn_on; \ - \ - WARN_ENTRY("twi 31, 0, 0", BUGFLAG_WARNING | (flags), __label_warn_on); \ - barrier_before_unreachable(); \ - __builtin_unreachable(); \ - \ -__label_warn_on: \ - break; \ -} while (0) +#define __WARN_FLAGS(flags) BUG_ENTRY("twi 31, 0, 0", BUGFLAG_WARNING | (flags)) #ifdef CONFIG_PPC64 #define BUG_ON(x) do { \ @@ -117,25 +85,15 @@ __label_warn_on: \ } while (0) #define WARN_ON(x) ({ \ - bool __ret_warn_on = false; \ - do { \ - if (__builtin_constant_p((x))) { \ - if (!(x)) \ - break; \ + int __ret_warn_on = !!(x); \ + if (__builtin_constant_p(__ret_warn_on)) { \ + if (__ret_warn_on) \ __WARN(); \ - __ret_warn_on = true; \ - } else { \ - __label__ __label_warn_on; \ - \ - WARN_ENTRY(PPC_TLNEI " %4, 0", \ - BUGFLAG_WARNING | BUGFLAG_TAINT(TAINT_WARN), \ - __label_warn_on, \ - "r" ((__force long)(x))); \ - break; \ -__label_warn_on: \ - __ret_warn_on = true; \ - } \ - } while (0); \ + } else { \ + BUG_ENTRY(PPC_TLNEI " %4, 0", \ + BUGFLAG_WARNING | BUGFLAG_TAINT(TAINT_WARN), \ + "r" (__ret_warn_on)); \ + } \ unlikely(__ret_warn_on); \ }) @@ -148,14 +106,13 @@ __label_warn_on: \ #ifdef __ASSEMBLY__ .macro EMIT_BUG_ENTRY addr,file,line,flags .endm -.macro EMIT_WARN_ENTRY addr,file,line,flags -.endm #else /* !__ASSEMBLY__ */ #define _EMIT_BUG_ENTRY -#define _EMIT_WARN_ENTRY #endif #endif /* CONFIG_BUG */ +#define EMIT_WARN_ENTRY EMIT_BUG_ENTRY + #include <asm-generic/bug.h> #ifndef __ASSEMBLY__ @@ -163,6 +120,7 @@ __label_warn_on: \ struct pt_regs; void hash__do_page_fault(struct pt_regs *); void bad_page_fault(struct pt_regs *, int); +void emulate_single_step(struct pt_regs *regs); extern void _exception(int, struct pt_regs *, int, unsigned long); extern void _exception_pkey(struct pt_regs *, unsigned long, int); extern void die(const char *, struct pt_regs *, long); diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h index 7564dd4fd12b..ef7d2de33b89 100644 --- a/arch/powerpc/include/asm/cacheflush.h +++ b/arch/powerpc/include/asm/cacheflush.h @@ -35,13 +35,19 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end) * It just marks the page as not i-cache clean. We do the i-cache * flush later when the page is given to a user process, if necessary. */ -static inline void flush_dcache_page(struct page *page) +static inline void flush_dcache_folio(struct folio *folio) { if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) return; /* avoid an atomic op if possible */ - if (test_bit(PG_dcache_clean, &page->flags)) - clear_bit(PG_dcache_clean, &page->flags); + if (test_bit(PG_dcache_clean, &folio->flags)) + clear_bit(PG_dcache_clean, &folio->flags); +} +#define flush_dcache_folio flush_dcache_folio + +static inline void flush_dcache_page(struct page *page) +{ + flush_dcache_folio(page_folio(page)); } void flush_icache_range(unsigned long start, unsigned long stop); @@ -51,7 +57,7 @@ void flush_icache_user_page(struct vm_area_struct *vma, struct page *page, unsigned long addr, int len); #define flush_icache_user_page flush_icache_user_page -void flush_dcache_icache_page(struct page *page); +void flush_dcache_icache_folio(struct folio *folio); /** * flush_dcache_range(): Write any modified data cache blocks out to memory and diff --git a/arch/powerpc/include/asm/cpm2.h b/arch/powerpc/include/asm/cpm2.h index 9ee192a6c5d7..249d43cc6427 100644 --- a/arch/powerpc/include/asm/cpm2.h +++ b/arch/powerpc/include/asm/cpm2.h @@ -1080,6 +1080,9 @@ typedef struct im_idma { #define FCC2_MEM_OFFSET FCC_MEM_OFFSET(1) #define FCC3_MEM_OFFSET FCC_MEM_OFFSET(2) +/* Pipeline Maximum Depth */ +#define MPC82XX_BCR_PLDP 0x00800000 + /* Clocks and GRG's */ enum cpm_clk_dir { diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index 443a9d482b15..8765d5158324 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -252,7 +252,7 @@ static inline void cpu_feature_keys_init(void) { } * This is also required by 52xx family. */ #if defined(CONFIG_SMP) || defined(CONFIG_MPC10X_BRIDGE) \ - || defined(CONFIG_PPC_83xx) || defined(CONFIG_8260) \ + || defined(CONFIG_PPC_83xx) || defined(CONFIG_PPC_82xx) \ || defined(CONFIG_PPC_MPC52xx) #define CPU_FTR_COMMON CPU_FTR_NEED_COHERENT #else diff --git a/arch/powerpc/include/asm/dtl.h b/arch/powerpc/include/asm/dtl.h index 4bcb9f9ac764..d6f43d149f8d 100644 --- a/arch/powerpc/include/asm/dtl.h +++ b/arch/powerpc/include/asm/dtl.h @@ -39,6 +39,5 @@ extern rwlock_t dtl_access_lock; extern void register_dtl_buffer(int cpu); extern void alloc_dtl_buffers(unsigned long *time_limit); -extern long hcall_vphn(unsigned long cpu, u64 flags, __be32 *associativity); #endif /* _ASM_POWERPC_DTL_H */ diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h index a26ca097d032..79f1c480b5eb 100644 --- a/arch/powerpc/include/asm/elf.h +++ b/arch/powerpc/include/asm/elf.h @@ -12,14 +12,8 @@ /* * This is used to ensure we don't load something for the wrong architecture. - * 64le only supports ELFv2 64-bit binaries (64be supports v1 and v2). */ -#if defined(CONFIG_PPC64) && defined(CONFIG_CPU_LITTLE_ENDIAN) -#define elf_check_arch(x) (((x)->e_machine == ELF_ARCH) && \ - (((x)->e_flags & 0x3) == 0x2)) -#else #define elf_check_arch(x) ((x)->e_machine == ELF_ARCH) -#endif #define compat_elf_check_arch(x) ((x)->e_machine == EM_PPC) #define CORE_DUMP_USE_REGSET diff --git a/arch/powerpc/include/asm/fb.h b/arch/powerpc/include/asm/fb.h index 5f1a2e5f7654..3cecf14d51de 100644 --- a/arch/powerpc/include/asm/fb.h +++ b/arch/powerpc/include/asm/fb.h @@ -2,18 +2,20 @@ #ifndef _ASM_FB_H_ #define _ASM_FB_H_ -#include <linux/fs.h> - #include <asm/page.h> -static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, - unsigned long off) +static inline pgprot_t pgprot_framebuffer(pgprot_t prot, + unsigned long vm_start, unsigned long vm_end, + unsigned long offset) { - vma->vm_page_prot = phys_mem_access_prot(file, off >> PAGE_SHIFT, - vma->vm_end - vma->vm_start, - vma->vm_page_prot); + /* + * PowerPC's implementation of phys_mem_access_prot() does + * not use the file argument. Set it to NULL in preparation + * of later updates to the interface. + */ + return phys_mem_access_prot(NULL, PHYS_PFN(offset), vm_end - vm_start, prot); } -#define fb_pgprotect fb_pgprotect +#define pgprot_framebuffer pgprot_framebuffer #include <asm-generic/fb.h> diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h index ac605fc369c4..77824bd289a3 100644 --- a/arch/powerpc/include/asm/feature-fixups.h +++ b/arch/powerpc/include/asm/feature-fixups.h @@ -292,6 +292,7 @@ extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup; extern long __start__btb_flush_fixup, __stop__btb_flush_fixup; void apply_feature_fixups(void); +void update_mmu_feature_fixups(unsigned long mask); void setup_feature_keys(void); #endif diff --git a/arch/powerpc/include/asm/fs_pd.h b/arch/powerpc/include/asm/fs_pd.h deleted file mode 100644 index 8def56ec05c6..000000000000 --- a/arch/powerpc/include/asm/fs_pd.h +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Platform information definitions. - * - * 2006 (c) MontaVista Software, Inc. - * Vitaly Bordug <vbordug@ru.mvista.com> - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. - */ - -#ifndef FS_PD_H -#define FS_PD_H -#include <sysdev/fsl_soc.h> -#include <asm/time.h> - -#ifdef CONFIG_CPM2 -#include <asm/cpm2.h> - -#if defined(CONFIG_8260) -#include <asm/mpc8260.h> -#endif - -#define cpm2_map(member) (&cpm2_immr->member) -#define cpm2_map_size(member, size) (&cpm2_immr->member) -#define cpm2_unmap(addr) do {} while(0) -#endif - -#ifdef CONFIG_PPC_8xx -#include <asm/8xx_immap.h> - -extern immap_t __iomem *mpc8xx_immr; - -#define immr_map(member) (&mpc8xx_immr->member) -#define immr_map_size(member, size) (&mpc8xx_immr->member) -#define immr_unmap(addr) do {} while (0) -#endif - -static inline int uart_baudrate(void) -{ - return get_baudrate(); -} - -static inline int uart_clock(void) -{ - return ppc_proc_freq; -} - -#endif diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h index 91c049d51d0e..9e5a39b6a311 100644 --- a/arch/powerpc/include/asm/ftrace.h +++ b/arch/powerpc/include/asm/ftrace.h @@ -11,8 +11,8 @@ #define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR /* Ignore unused weak functions which will have larger offsets */ -#ifdef CONFIG_MPROFILE_KERNEL -#define FTRACE_MCOUNT_MAX_OFFSET 12 +#if defined(CONFIG_MPROFILE_KERNEL) || defined(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY) +#define FTRACE_MCOUNT_MAX_OFFSET 16 #elif defined(CONFIG_PPC32) #define FTRACE_MCOUNT_MAX_OFFSET 8 #endif @@ -22,18 +22,26 @@ extern void _mcount(void); static inline unsigned long ftrace_call_adjust(unsigned long addr) { - /* relocation of mcount call site is the same as the address */ + if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY)) + addr += MCOUNT_INSN_SIZE; + return addr; } unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp); +struct module; +struct dyn_ftrace; struct dyn_arch_ftrace { struct module *mod; }; #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS +#define ftrace_need_init_nop() (true) +int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec); +#define ftrace_init_nop ftrace_init_nop + struct ftrace_regs { struct pt_regs regs; }; @@ -124,15 +132,19 @@ static inline u8 this_cpu_get_ftrace_enabled(void) { return get_paca()->ftrace_enabled; } - -void ftrace_free_init_tramp(void); #else /* CONFIG_PPC64 */ static inline void this_cpu_disable_ftrace(void) { } static inline void this_cpu_enable_ftrace(void) { } static inline void this_cpu_set_ftrace_enabled(u8 ftrace_enabled) { } static inline u8 this_cpu_get_ftrace_enabled(void) { return 1; } -static inline void ftrace_free_init_tramp(void) { } #endif /* CONFIG_PPC64 */ + +#ifdef CONFIG_FUNCTION_TRACER +extern unsigned int ftrace_tramp_text[], ftrace_tramp_init[]; +void ftrace_free_init_tramp(void); +#else +static inline void ftrace_free_init_tramp(void) { } +#endif #endif /* !__ASSEMBLY__ */ #endif /* _ASM_POWERPC_FTRACE */ diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h index 84d39fd42f71..66db0147d5b4 100644 --- a/arch/powerpc/include/asm/hw_breakpoint.h +++ b/arch/powerpc/include/asm/hw_breakpoint.h @@ -18,6 +18,7 @@ struct arch_hw_breakpoint { u16 len; /* length of the target data symbol */ u16 hw_len; /* length programmed in hw */ u8 flags; + bool perf_single_step; /* temporarily uninstalled for a perf single step */ }; /* Note: Don't change the first 6 bits below as they are in the same order diff --git a/arch/powerpc/include/asm/ibmebus.h b/arch/powerpc/include/asm/ibmebus.h index 088f95b2e14f..6f33253a364a 100644 --- a/arch/powerpc/include/asm/ibmebus.h +++ b/arch/powerpc/include/asm/ibmebus.h @@ -46,6 +46,8 @@ #include <linux/of_device.h> #include <linux/of_platform.h> +struct platform_driver; + extern struct bus_type ibmebus_bus_type; int ibmebus_register_driver(struct platform_driver *drv); diff --git a/arch/powerpc/include/asm/ide.h b/arch/powerpc/include/asm/ide.h deleted file mode 100644 index ce87a4441ca3..000000000000 --- a/arch/powerpc/include/asm/ide.h +++ /dev/null @@ -1,18 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 1994-1996 Linus Torvalds & authors - * - * This file contains the powerpc architecture specific IDE code. - */ -#ifndef _ASM_POWERPC_IDE_H -#define _ASM_POWERPC_IDE_H - -#include <linux/compiler.h> -#include <asm/io.h> - -#define __ide_mm_insw(p, a, c) readsw((void __iomem *)(p), (a), (c)) -#define __ide_mm_insl(p, a, c) readsl((void __iomem *)(p), (a), (c)) -#define __ide_mm_outsw(p, a, c) writesw((void __iomem *)(p), (a), (c)) -#define __ide_mm_outsl(p, a, c) writesl((void __iomem *)(p), (a), (c)) - -#endif /* _ASM_POWERPC_IDE_H */ diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index f1e657c9bbe8..0732b743e099 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h @@ -3,11 +3,6 @@ #define _ASM_POWERPC_IO_H #ifdef __KERNEL__ -#define ARCH_HAS_IOREMAP_WC -#ifdef CONFIG_PPC32 -#define ARCH_HAS_IOREMAP_WT -#endif - /* */ @@ -732,9 +727,7 @@ static inline void name at \ #define writel_relaxed(v, addr) writel(v, addr) #define writeq_relaxed(v, addr) writeq(v, addr) -#ifdef CONFIG_GENERIC_IOMAP -#include <asm-generic/iomap.h> -#else +#ifndef CONFIG_GENERIC_IOMAP /* * Here comes the implementation of the IOMAP interfaces. */ @@ -896,8 +889,8 @@ static inline void iosync(void) * */ extern void __iomem *ioremap(phys_addr_t address, unsigned long size); -extern void __iomem *ioremap_prot(phys_addr_t address, unsigned long size, - unsigned long flags); +#define ioremap ioremap +#define ioremap_prot ioremap_prot extern void __iomem *ioremap_wc(phys_addr_t address, unsigned long size); #define ioremap_wc ioremap_wc @@ -911,14 +904,12 @@ void __iomem *ioremap_coherent(phys_addr_t address, unsigned long size); #define ioremap_cache(addr, size) \ ioremap_prot((addr), (size), pgprot_val(PAGE_KERNEL)) -extern void iounmap(volatile void __iomem *addr); +#define iounmap iounmap void __iomem *ioremap_phb(phys_addr_t paddr, unsigned long size); int early_ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size, pgprot_t prot); -void __iomem *do_ioremap(phys_addr_t pa, phys_addr_t offset, unsigned long size, - pgprot_t prot, void *caller); extern void __iomem *__ioremap_caller(phys_addr_t, unsigned long size, pgprot_t prot, void *caller); diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index 34e14dfd8e04..026695943550 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h @@ -28,6 +28,9 @@ #define IOMMU_PAGE_MASK(tblptr) (~((1 << (tblptr)->it_page_shift) - 1)) #define IOMMU_PAGE_ALIGN(addr, tblptr) ALIGN(addr, IOMMU_PAGE_SIZE(tblptr)) +#define DIRECT64_PROPNAME "linux,direct64-ddr-window-info" +#define DMA64_PROPNAME "linux,dma64-ddr-window-info" + /* Boot time flags */ extern int iommu_is_off; extern int iommu_force_on; diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h index f257cacb49a9..ba1a5974e714 100644 --- a/arch/powerpc/include/asm/irq.h +++ b/arch/powerpc/include/asm/irq.h @@ -55,7 +55,7 @@ int irq_choose_cpu(const struct cpumask *mask); #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI) extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask, - bool exclude_self); + int exclude_cpu); #define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace #endif diff --git a/arch/powerpc/include/asm/kfence.h b/arch/powerpc/include/asm/kfence.h index 6fd2b4d486c5..424ceef82ae6 100644 --- a/arch/powerpc/include/asm/kfence.h +++ b/arch/powerpc/include/asm/kfence.h @@ -23,7 +23,7 @@ static inline bool arch_kfence_init_pool(void) #ifdef CONFIG_PPC64 static inline bool kfence_protect_page(unsigned long addr, bool protect) { - struct page *page = virt_to_page(addr); + struct page *page = virt_to_page((void *)addr); __kernel_map_pages(page, 1, !protect); diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h index d751ddd08110..ad7e8c5aec3f 100644 --- a/arch/powerpc/include/asm/kup.h +++ b/arch/powerpc/include/asm/kup.h @@ -6,6 +6,12 @@ #define KUAP_WRITE 2 #define KUAP_READ_WRITE (KUAP_READ | KUAP_WRITE) +#ifndef __ASSEMBLY__ +#include <linux/types.h> + +static __always_inline bool kuap_is_disabled(void); +#endif + #ifdef CONFIG_PPC_BOOK3S_64 #include <asm/book3s/64/kup.h> #endif @@ -41,26 +47,24 @@ void setup_kuep(bool disabled); #ifdef CONFIG_PPC_KUAP void setup_kuap(bool disabled); + +static __always_inline bool kuap_is_disabled(void) +{ + return !mmu_has_feature(MMU_FTR_KUAP); +} #else static inline void setup_kuap(bool disabled) { } static __always_inline bool kuap_is_disabled(void) { return true; } -static inline bool +static __always_inline bool __bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) { return false; } -static inline void __kuap_lock(void) { } -static inline void __kuap_save_and_lock(struct pt_regs *regs) { } -static inline void kuap_user_restore(struct pt_regs *regs) { } -static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long amr) { } - -static inline unsigned long __kuap_get_and_assert_locked(void) -{ - return 0; -} +static __always_inline void kuap_user_restore(struct pt_regs *regs) { } +static __always_inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long amr) { } /* * book3s/64/kup-radix.h defines these functions for the !KUAP case to flush @@ -68,11 +72,11 @@ static inline unsigned long __kuap_get_and_assert_locked(void) * platforms. */ #ifndef CONFIG_PPC_BOOK3S_64 -static inline void __allow_user_access(void __user *to, const void __user *from, - unsigned long size, unsigned long dir) { } -static inline void __prevent_user_access(unsigned long dir) { } -static inline unsigned long __prevent_user_access_return(void) { return 0UL; } -static inline void __restore_user_access(unsigned long flags) { } +static __always_inline void allow_user_access(void __user *to, const void __user *from, + unsigned long size, unsigned long dir) { } +static __always_inline void prevent_user_access(unsigned long dir) { } +static __always_inline unsigned long prevent_user_access_return(void) { return 0UL; } +static __always_inline void restore_user_access(unsigned long flags) { } #endif /* CONFIG_PPC_BOOK3S_64 */ #endif /* CONFIG_PPC_KUAP */ @@ -85,29 +89,24 @@ bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) return __bad_kuap_fault(regs, address, is_write); } -static __always_inline void kuap_assert_locked(void) -{ - if (kuap_is_disabled()) - return; - - if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG)) - __kuap_get_and_assert_locked(); -} - static __always_inline void kuap_lock(void) { +#ifdef __kuap_lock if (kuap_is_disabled()) return; __kuap_lock(); +#endif } static __always_inline void kuap_save_and_lock(struct pt_regs *regs) { +#ifdef __kuap_save_and_lock if (kuap_is_disabled()) return; __kuap_save_and_lock(regs); +#endif } static __always_inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long amr) @@ -120,46 +119,18 @@ static __always_inline void kuap_kernel_restore(struct pt_regs *regs, unsigned l static __always_inline unsigned long kuap_get_and_assert_locked(void) { - if (kuap_is_disabled()) - return 0; - - return __kuap_get_and_assert_locked(); -} - -#ifndef CONFIG_PPC_BOOK3S_64 -static __always_inline void allow_user_access(void __user *to, const void __user *from, - unsigned long size, unsigned long dir) -{ - if (kuap_is_disabled()) - return; - - __allow_user_access(to, from, size, dir); -} - -static __always_inline void prevent_user_access(unsigned long dir) -{ - if (kuap_is_disabled()) - return; - - __prevent_user_access(dir); -} - -static __always_inline unsigned long prevent_user_access_return(void) -{ - if (kuap_is_disabled()) - return 0; - - return __prevent_user_access_return(); +#ifdef __kuap_get_and_assert_locked + if (!kuap_is_disabled()) + return __kuap_get_and_assert_locked(); +#endif + return 0; } -static __always_inline void restore_user_access(unsigned long flags) +static __always_inline void kuap_assert_locked(void) { - if (kuap_is_disabled()) - return; - - __restore_user_access(flags); + if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG)) + kuap_get_and_assert_locked(); } -#endif /* CONFIG_PPC_BOOK3S_64 */ static __always_inline void allow_read_from_user(const void __user *from, unsigned long size) { diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index d16d80ad2ae4..b4da8514af43 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -894,7 +894,7 @@ void kvmppc_init_lpid(unsigned long nr_lpids); static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn) { - struct page *page; + struct folio *folio; /* * We can only access pages that the kernel maps * as memory. Bail out for unmapped ones. @@ -903,10 +903,10 @@ static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn) return; /* Clear i-cache for new pages */ - page = pfn_to_page(pfn); - if (!test_bit(PG_dcache_clean, &page->flags)) { - flush_dcache_icache_page(page); - set_bit(PG_dcache_clean, &page->flags); + folio = page_folio(pfn_to_page(pfn)); + if (!test_bit(PG_dcache_clean, &folio->flags)) { + flush_dcache_icache_folio(folio); + set_bit(PG_dcache_clean, &folio->flags); } } diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h index 45492fb5bf22..ec6ced6d7ced 100644 --- a/arch/powerpc/include/asm/local.h +++ b/arch/powerpc/include/asm/local.h @@ -115,23 +115,23 @@ static __inline__ long local_xchg(local_t *l, long n) } /** - * local_add_unless - add unless the number is a given value + * local_add_unless - add unless the number is already a given value * @l: pointer of type local_t * @a: the amount to add to v... * @u: ...unless v is equal to u. * - * Atomically adds @a to @l, so long as it was not @u. - * Returns non-zero if @l was not @u, and zero otherwise. + * Atomically adds @a to @l, if @v was not already @u. + * Returns true if the addition was done. */ -static __inline__ int local_add_unless(local_t *l, long a, long u) +static __inline__ bool local_add_unless(local_t *l, long a, long u) { unsigned long flags; - int ret = 0; + bool ret = false; powerpc_local_irq_pmu_save(flags); if (l->v != u) { l->v += a; - ret = 1; + ret = true; } powerpc_local_irq_pmu_restore(flags); diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h index 34d44cb17c87..61ec2447dabf 100644 --- a/arch/powerpc/include/asm/lppaca.h +++ b/arch/powerpc/include/asm/lppaca.h @@ -6,28 +6,6 @@ #ifndef _ASM_POWERPC_LPPACA_H #define _ASM_POWERPC_LPPACA_H -/* - * The below VPHN macros are outside the __KERNEL__ check since these are - * used for compiling the vphn selftest in userspace - */ - -/* The H_HOME_NODE_ASSOCIATIVITY h_call returns 6 64-bit registers. */ -#define VPHN_REGISTER_COUNT 6 - -/* - * 6 64-bit registers unpacked into up to 24 be32 associativity values. To - * form the complete property we have to add the length in the first cell. - */ -#define VPHN_ASSOC_BUFSIZE (VPHN_REGISTER_COUNT*sizeof(u64)/sizeof(u16) + 1) - -/* - * The H_HOME_NODE_ASSOCIATIVITY hcall takes two values for flags: - * 1 for retrieving associativity information for a guest cpu - * 2 for retrieving associativity information for a host/hypervisor cpu - */ -#define VPHN_FLAG_VCPU 1 -#define VPHN_FLAG_PCPU 2 - #ifdef __KERNEL__ /* @@ -45,6 +23,7 @@ #include <asm/types.h> #include <asm/mmu.h> #include <asm/firmware.h> +#include <asm/paca.h> /* * The lppaca is the "virtual processor area" registered with the hypervisor, @@ -127,13 +106,23 @@ struct lppaca { */ #define LPPACA_OLD_SHARED_PROC 2 -static inline bool lppaca_shared_proc(struct lppaca *l) +#ifdef CONFIG_PPC_PSERIES +/* + * All CPUs should have the same shared proc value, so directly access the PACA + * to avoid false positives from DEBUG_PREEMPT. + */ +static inline bool lppaca_shared_proc(void) { + struct lppaca *l = local_paca->lppaca_ptr; + if (!firmware_has_feature(FW_FEATURE_SPLPAR)) return false; return !!(l->__old_status & LPPACA_OLD_SHARED_PROC); } +#define get_lppaca() (get_paca()->lppaca_ptr) +#endif + /* * SLB shadow buffer structure as defined in the PAPR. The save_area * contains adjacent ESID and VSID pairs for each shadowed SLB. The @@ -149,8 +138,6 @@ struct slb_shadow { } save_area[SLB_NUM_BOLTED]; } ____cacheline_aligned; -extern long hcall_vphn(unsigned long cpu, u64 flags, __be32 *associativity); - #endif /* CONFIG_PPC_BOOK3S */ #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_LPPACA_H */ diff --git a/arch/powerpc/include/asm/macio.h b/arch/powerpc/include/asm/macio.h index ff5fd82d9ff0..3a07c62973aa 100644 --- a/arch/powerpc/include/asm/macio.h +++ b/arch/powerpc/include/asm/macio.h @@ -3,7 +3,8 @@ #define __MACIO_ASIC_H__ #ifdef __KERNEL__ -#include <linux/of_device.h> +#include <linux/of.h> +#include <linux/platform_device.h> extern struct bus_type macio_bus_type; diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 94b981152667..52cc25864a1b 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -33,7 +33,7 @@ * key 0 controlling userspace addresses on radix * Key 3 on hash */ -#define MMU_FTR_BOOK3S_KUAP ASM_CONST(0x00000200) +#define MMU_FTR_KUAP ASM_CONST(0x00000200) /* * Supports KUEP feature @@ -144,11 +144,6 @@ typedef pte_t *pgtable_t; -#ifdef CONFIG_PPC_E500 -#include <asm/percpu.h> -DECLARE_PER_CPU(int, next_tlbcam_idx); -#endif - enum { MMU_FTRS_POSSIBLE = #if defined(CONFIG_PPC_BOOK3S_604) @@ -188,7 +183,7 @@ enum { #endif /* CONFIG_PPC_RADIX_MMU */ #endif #ifdef CONFIG_PPC_KUAP - MMU_FTR_BOOK3S_KUAP | + MMU_FTR_KUAP | #endif /* CONFIG_PPC_KUAP */ #ifdef CONFIG_PPC_MEM_KEYS MMU_FTR_PKEY | diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index 57f5017111f4..37bffa0f7918 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -127,6 +127,7 @@ static inline void inc_mm_active_cpus(struct mm_struct *mm) static inline void dec_mm_active_cpus(struct mm_struct *mm) { + VM_WARN_ON_ONCE(atomic_read(&mm->context.active_cpus) <= 0); atomic_dec(&mm->context.active_cpus); } diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h index ac53606c2594..a8e2e8339fb7 100644 --- a/arch/powerpc/include/asm/module.h +++ b/arch/powerpc/include/asm/module.h @@ -75,10 +75,6 @@ struct mod_arch_specific { #endif #ifdef CONFIG_DYNAMIC_FTRACE -# ifdef MODULE - asm(".section .ftrace.tramp,\"ax\",@nobits; .align 3; .previous"); -# endif /* MODULE */ - int module_trampoline_target(struct module *mod, unsigned long trampoline, unsigned long *target); int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs); diff --git a/arch/powerpc/include/asm/mpc8260.h b/arch/powerpc/include/asm/mpc8260.h deleted file mode 100644 index 155114bbd1a2..000000000000 --- a/arch/powerpc/include/asm/mpc8260.h +++ /dev/null @@ -1,22 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Since there are many different boards and no standard configuration, - * we have a unique include file for each. Rather than change every - * file that has to include MPC8260 configuration, they all include - * this one and the configuration switching is done here. - */ -#ifdef __KERNEL__ -#ifndef __ASM_POWERPC_MPC8260_H__ -#define __ASM_POWERPC_MPC8260_H__ - -#define MPC82XX_BCR_PLDP 0x00800000 /* Pipeline Maximum Depth */ - -#ifdef CONFIG_8260 - -#ifdef CONFIG_PCI_8260 -#include <platforms/82xx/m82xx_pci.h> -#endif - -#endif /* CONFIG_8260 */ -#endif /* !__ASM_POWERPC_MPC8260_H__ */ -#endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h b/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h index de092b04ee1a..92df40c6cc6b 100644 --- a/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h @@ -46,7 +46,8 @@ static inline int check_and_get_huge_psize(int shift) } #define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT -void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte); +void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, + pte_t pte, unsigned long sz); #define __HAVE_ARCH_HUGE_PTE_CLEAR static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, diff --git a/arch/powerpc/include/asm/nohash/32/kup-8xx.h b/arch/powerpc/include/asm/nohash/32/kup-8xx.h index c44d97751723..46bc5925e5fd 100644 --- a/arch/powerpc/include/asm/nohash/32/kup-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/kup-8xx.h @@ -9,76 +9,74 @@ #ifndef __ASSEMBLY__ -#include <linux/jump_label.h> - #include <asm/reg.h> -extern struct static_key_false disable_kuap_key; - -static __always_inline bool kuap_is_disabled(void) -{ - return static_branch_unlikely(&disable_kuap_key); -} - -static inline void __kuap_lock(void) -{ -} - -static inline void __kuap_save_and_lock(struct pt_regs *regs) +static __always_inline void __kuap_save_and_lock(struct pt_regs *regs) { regs->kuap = mfspr(SPRN_MD_AP); mtspr(SPRN_MD_AP, MD_APG_KUAP); } +#define __kuap_save_and_lock __kuap_save_and_lock -static inline void kuap_user_restore(struct pt_regs *regs) +static __always_inline void kuap_user_restore(struct pt_regs *regs) { } -static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap) +static __always_inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap) { mtspr(SPRN_MD_AP, regs->kuap); } -static inline unsigned long __kuap_get_and_assert_locked(void) +#ifdef CONFIG_PPC_KUAP_DEBUG +static __always_inline unsigned long __kuap_get_and_assert_locked(void) { - unsigned long kuap; + WARN_ON_ONCE(mfspr(SPRN_MD_AP) >> 16 != MD_APG_KUAP >> 16); - kuap = mfspr(SPRN_MD_AP); + return 0; +} +#define __kuap_get_and_assert_locked __kuap_get_and_assert_locked +#endif - if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG)) - WARN_ON_ONCE(kuap >> 16 != MD_APG_KUAP >> 16); +static __always_inline void uaccess_begin_8xx(unsigned long val) +{ + asm(ASM_MMU_FTR_IFSET("mtspr %0, %1", "", %2) : : + "i"(SPRN_MD_AP), "r"(val), "i"(MMU_FTR_KUAP) : "memory"); +} - return kuap; +static __always_inline void uaccess_end_8xx(void) +{ + asm(ASM_MMU_FTR_IFSET("mtspr %0, %1", "", %2) : : + "i"(SPRN_MD_AP), "r"(MD_APG_KUAP), "i"(MMU_FTR_KUAP) : "memory"); } -static inline void __allow_user_access(void __user *to, const void __user *from, - unsigned long size, unsigned long dir) +static __always_inline void allow_user_access(void __user *to, const void __user *from, + unsigned long size, unsigned long dir) { - mtspr(SPRN_MD_AP, MD_APG_INIT); + uaccess_begin_8xx(MD_APG_INIT); } -static inline void __prevent_user_access(unsigned long dir) +static __always_inline void prevent_user_access(unsigned long dir) { - mtspr(SPRN_MD_AP, MD_APG_KUAP); + uaccess_end_8xx(); } -static inline unsigned long __prevent_user_access_return(void) +static __always_inline unsigned long prevent_user_access_return(void) { unsigned long flags; flags = mfspr(SPRN_MD_AP); - mtspr(SPRN_MD_AP, MD_APG_KUAP); + uaccess_end_8xx(); return flags; } -static inline void __restore_user_access(unsigned long flags) +static __always_inline void restore_user_access(unsigned long flags) { - mtspr(SPRN_MD_AP, flags); + uaccess_begin_8xx(flags); } -static inline bool +static __always_inline bool __bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) { return !((regs->kuap ^ MD_APG_KUAP) & 0xff000000); diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index fec56d965f00..f99c53a5f184 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -170,8 +170,8 @@ void unmap_kernel_page(unsigned long va); #define pte_clear(mm, addr, ptep) \ do { pte_update(mm, addr, ptep, ~0, 0, 0); } while (0) -#ifndef pte_mkwrite -static inline pte_t pte_mkwrite(pte_t pte) +#ifndef pte_mkwrite_novma +static inline pte_t pte_mkwrite_novma(pte_t pte) { return __pte(pte_val(pte) | _PAGE_RW); } @@ -355,7 +355,7 @@ static inline int pte_young(pte_t pte) #define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT) #else #define pmd_page_vaddr(pmd) \ - ((unsigned long)(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1))) + ((const void *)(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1))) #define pmd_pfn(pmd) (__pa(pmd_val(pmd)) >> PAGE_SHIFT) #endif diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h index 1a89ebdc3acc..e6fe1d5731f2 100644 --- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h @@ -94,6 +94,13 @@ static inline pte_t pte_wrprotect(pte_t pte) #define pte_wrprotect pte_wrprotect +static inline int pte_read(pte_t pte) +{ + return (pte_val(pte) & _PAGE_RO) != _PAGE_NA; +} + +#define pte_read pte_read + static inline int pte_write(pte_t pte) { return !(pte_val(pte) & _PAGE_RO); @@ -101,12 +108,12 @@ static inline int pte_write(pte_t pte) #define pte_write pte_write -static inline pte_t pte_mkwrite(pte_t pte) +static inline pte_t pte_mkwrite_novma(pte_t pte) { return __pte(pte_val(pte) & ~_PAGE_RO); } -#define pte_mkwrite pte_mkwrite +#define pte_mkwrite_novma pte_mkwrite_novma static inline bool pte_user(pte_t pte) { diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h index 287e25864ffa..eb6891e34cbd 100644 --- a/arch/powerpc/include/asm/nohash/64/pgtable.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h @@ -85,7 +85,7 @@ #ifndef __ASSEMBLY__ /* pte_clear moved to later in this file */ -static inline pte_t pte_mkwrite(pte_t pte) +static inline pte_t pte_mkwrite_novma(pte_t pte) { return __pte(pte_val(pte) | _PAGE_RW); } @@ -127,7 +127,7 @@ static inline pte_t pmd_pte(pmd_t pmd) #define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \ || (pmd_val(pmd) & PMD_BAD_BITS)) #define pmd_present(pmd) (!pmd_none(pmd)) -#define pmd_page_vaddr(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS) +#define pmd_page_vaddr(pmd) ((const void *)(pmd_val(pmd) & ~PMD_MASKED_BITS)) extern struct page *pmd_page(pmd_t pmd); #define pmd_pfn(pmd) (page_to_pfn(pmd_page(pmd))) @@ -197,7 +197,7 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm, { unsigned long old; - if (pte_young(*ptep)) + if (!pte_young(*ptep)) return 0; old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0); return (old & _PAGE_ACCESSED) != 0; diff --git a/arch/powerpc/include/asm/nohash/kup-booke.h b/arch/powerpc/include/asm/nohash/kup-booke.h index 49bb41ed0816..0c7c3258134c 100644 --- a/arch/powerpc/include/asm/nohash/kup-booke.h +++ b/arch/powerpc/include/asm/nohash/kup-booke.h @@ -3,6 +3,7 @@ #define _ASM_POWERPC_KUP_BOOKE_H_ #include <asm/bug.h> +#include <asm/mmu.h> #ifdef CONFIG_PPC_KUAP @@ -13,32 +14,26 @@ #else -#include <linux/jump_label.h> #include <linux/sched.h> #include <asm/reg.h> -extern struct static_key_false disable_kuap_key; - -static __always_inline bool kuap_is_disabled(void) -{ - return static_branch_unlikely(&disable_kuap_key); -} - -static inline void __kuap_lock(void) +static __always_inline void __kuap_lock(void) { mtspr(SPRN_PID, 0); isync(); } +#define __kuap_lock __kuap_lock -static inline void __kuap_save_and_lock(struct pt_regs *regs) +static __always_inline void __kuap_save_and_lock(struct pt_regs *regs) { regs->kuap = mfspr(SPRN_PID); mtspr(SPRN_PID, 0); isync(); } +#define __kuap_save_and_lock __kuap_save_and_lock -static inline void kuap_user_restore(struct pt_regs *regs) +static __always_inline void kuap_user_restore(struct pt_regs *regs) { if (kuap_is_disabled()) return; @@ -48,7 +43,7 @@ static inline void kuap_user_restore(struct pt_regs *regs) /* Context synchronisation is performed by rfi */ } -static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap) +static __always_inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap) { if (regs->kuap) mtspr(SPRN_PID, current->thread.pid); @@ -56,48 +51,55 @@ static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kua /* Context synchronisation is performed by rfi */ } -static inline unsigned long __kuap_get_and_assert_locked(void) +#ifdef CONFIG_PPC_KUAP_DEBUG +static __always_inline unsigned long __kuap_get_and_assert_locked(void) { - unsigned long kuap = mfspr(SPRN_PID); + WARN_ON_ONCE(mfspr(SPRN_PID)); - if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG)) - WARN_ON_ONCE(kuap); + return 0; +} +#define __kuap_get_and_assert_locked __kuap_get_and_assert_locked +#endif - return kuap; +static __always_inline void uaccess_begin_booke(unsigned long val) +{ + asm(ASM_MMU_FTR_IFSET("mtspr %0, %1; isync", "", %2) : : + "i"(SPRN_PID), "r"(val), "i"(MMU_FTR_KUAP) : "memory"); } -static inline void __allow_user_access(void __user *to, const void __user *from, - unsigned long size, unsigned long dir) +static __always_inline void uaccess_end_booke(void) { - mtspr(SPRN_PID, current->thread.pid); - isync(); + asm(ASM_MMU_FTR_IFSET("mtspr %0, %1; isync", "", %2) : : + "i"(SPRN_PID), "r"(0), "i"(MMU_FTR_KUAP) : "memory"); } -static inline void __prevent_user_access(unsigned long dir) +static __always_inline void allow_user_access(void __user *to, const void __user *from, + unsigned long size, unsigned long dir) { - mtspr(SPRN_PID, 0); - isync(); + uaccess_begin_booke(current->thread.pid); +} + +static __always_inline void prevent_user_access(unsigned long dir) +{ + uaccess_end_booke(); } -static inline unsigned long __prevent_user_access_return(void) +static __always_inline unsigned long prevent_user_access_return(void) { unsigned long flags = mfspr(SPRN_PID); - mtspr(SPRN_PID, 0); - isync(); + uaccess_end_booke(); return flags; } -static inline void __restore_user_access(unsigned long flags) +static __always_inline void restore_user_access(unsigned long flags) { - if (flags) { - mtspr(SPRN_PID, current->thread.pid); - isync(); - } + if (flags) + uaccess_begin_booke(current->thread.pid); } -static inline bool +static __always_inline bool __bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) { return !regs->kuap; diff --git a/arch/powerpc/include/asm/nohash/mmu-e500.h b/arch/powerpc/include/asm/nohash/mmu-e500.h index e43a418d3ccd..6ddced0415cb 100644 --- a/arch/powerpc/include/asm/nohash/mmu-e500.h +++ b/arch/powerpc/include/asm/nohash/mmu-e500.h @@ -319,6 +319,9 @@ extern int book3e_htw_mode; #endif +#include <asm/percpu.h> +DECLARE_PER_CPU(int, next_tlbcam_idx); + #endif /* !__ASSEMBLY__ */ #endif /* _ASM_POWERPC_MMU_BOOK3E_H_ */ diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h index a6caaaab6f92..c721478c5934 100644 --- a/arch/powerpc/include/asm/nohash/pgtable.h +++ b/arch/powerpc/include/asm/nohash/pgtable.h @@ -25,7 +25,9 @@ static inline int pte_write(pte_t pte) return pte_val(pte) & _PAGE_RW; } #endif +#ifndef pte_read static inline int pte_read(pte_t pte) { return 1; } +#endif static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } @@ -101,8 +103,6 @@ static inline bool pte_access_permitted(pte_t pte, bool write) static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) { return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) | pgprot_val(pgprot)); } -static inline unsigned long pte_pfn(pte_t pte) { - return pte_val(pte) >> PTE_RPN_SHIFT; } /* Generic modifiers for PTE bits */ static inline pte_t pte_exprotect(pte_t pte) @@ -166,12 +166,6 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte) return __pte(pte_val(pte) & ~_PAGE_SWP_EXCLUSIVE); } -/* Insert a PTE, top-level function is out of line. It uses an inline - * low level function in the respective pgtable-* files - */ -extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, - pte_t pte); - /* This low level function performs the actual PTE insertion * Setting the PTE depends on the MMU type and other factors. It's * an horrible mess that I'm not going to try to clean up now but @@ -282,10 +276,12 @@ static inline int pud_huge(pud_t pud) * for the page which has just been mapped in. */ #if defined(CONFIG_PPC_E500) && defined(CONFIG_HUGETLB_PAGE) -void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep); +void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, + unsigned long address, pte_t *ptep, unsigned int nr); #else -static inline -void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) {} +static inline void update_mmu_cache_range(struct vm_fault *vmf, + struct vm_area_struct *vma, unsigned long address, + pte_t *ptep, unsigned int nr) {} #endif #endif /* __ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index cb325938766a..e667d455ecb4 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -15,7 +15,6 @@ #include <linux/cache.h> #include <linux/string.h> #include <asm/types.h> -#include <asm/lppaca.h> #include <asm/mmu.h> #include <asm/page.h> #ifdef CONFIG_PPC_BOOK3E_64 @@ -47,14 +46,11 @@ extern unsigned int debug_smp_processor_id(void); /* from linux/smp.h */ #define get_paca() local_paca #endif -#ifdef CONFIG_PPC_PSERIES -#define get_lppaca() (get_paca()->lppaca_ptr) -#endif - #define get_slb_shadow() (get_paca()->slb_shadow_ptr) struct task_struct; struct rtas_args; +struct lppaca; /* * Defines the layout of the paca. diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index f2b6bf5687d0..e5fcc79b5bfb 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -9,6 +9,7 @@ #ifndef __ASSEMBLY__ #include <linux/types.h> #include <linux/kernel.h> +#include <linux/bug.h> #else #include <asm/types.h> #endif @@ -119,16 +120,6 @@ extern long long virt_phys_offset; #define ARCH_PFN_OFFSET ((unsigned long)(MEMORY_START >> PAGE_SHIFT)) #endif -#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) -#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) -#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) - -#define virt_addr_valid(vaddr) ({ \ - unsigned long _addr = (unsigned long)vaddr; \ - _addr >= PAGE_OFFSET && _addr < (unsigned long)high_memory && \ - pfn_valid(virt_to_pfn(_addr)); \ -}) - /* * On Book-E parts we need __va to parse the device tree and we can't * determine MEMORY_START until then. However we can determine PHYSICAL_START @@ -233,6 +224,25 @@ extern long long virt_phys_offset; #endif #endif +#ifndef __ASSEMBLY__ +static inline unsigned long virt_to_pfn(const void *kaddr) +{ + return __pa(kaddr) >> PAGE_SHIFT; +} + +static inline const void *pfn_to_kaddr(unsigned long pfn) +{ + return __va(pfn << PAGE_SHIFT); +} +#endif + +#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) +#define virt_addr_valid(vaddr) ({ \ + unsigned long _addr = (unsigned long)vaddr; \ + _addr >= PAGE_OFFSET && _addr < (unsigned long)high_memory && \ + pfn_valid(virt_to_pfn((void *)_addr)); \ +}) + /* * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI, * and needs to be executable. This means the whole heap ends diff --git a/arch/powerpc/include/asm/paravirt.h b/arch/powerpc/include/asm/paravirt.h index f5ba1a3c41f8..e08513d73119 100644 --- a/arch/powerpc/include/asm/paravirt.h +++ b/arch/powerpc/include/asm/paravirt.h @@ -6,6 +6,7 @@ #include <asm/smp.h> #ifdef CONFIG_PPC64 #include <asm/paca.h> +#include <asm/lppaca.h> #include <asm/hvcall.h> #endif diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h index 289f1ec85bc5..f5078a7dd85a 100644 --- a/arch/powerpc/include/asm/pci.h +++ b/arch/powerpc/include/asm/pci.h @@ -82,7 +82,8 @@ extern int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, extern int pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma, enum pci_mmap_state mmap_state); - +extern void pci_adjust_legacy_attr(struct pci_bus *bus, + enum pci_mmap_state mmap_type); #define HAVE_PCI_LEGACY 1 extern void pcibios_claim_one_bus(struct pci_bus *b); diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h index 3360cad78ace..3a971e2a8c73 100644 --- a/arch/powerpc/include/asm/pgalloc.h +++ b/arch/powerpc/include/asm/pgalloc.h @@ -45,6 +45,10 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) pte_fragment_free((unsigned long *)ptepage, 0); } +/* arch use pte_free_defer() implementation in arch/powerpc/mm/pgtable-frag.c */ +#define pte_free_defer pte_free_defer +void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable); + /* * Functions that deal with pagetables that could be at any level of * the table need to be passed an "index_size" so they know how to diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index 6a88bfdaa69b..d0ee46de248e 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -41,6 +41,12 @@ struct mm_struct; #ifndef __ASSEMBLY__ +void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, + pte_t pte, unsigned int nr); +#define set_ptes set_ptes +#define update_mmu_cache(vma, addr, ptep) \ + update_mmu_cache_range(NULL, vma, addr, ptep, 1) + #ifndef MAX_PTRS_PER_PGD #define MAX_PTRS_PER_PGD PTRS_PER_PGD #endif @@ -48,6 +54,12 @@ struct mm_struct; /* Keep these as a macros to avoid include dependency mess */ #define pte_page(x) pfn_to_page(pte_pfn(x)) #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) + +static inline unsigned long pte_pfn(pte_t pte) +{ + return (pte_val(pte) & PTE_RPN_MASK) >> PTE_RPN_SHIFT; +} + /* * Select all bits except the pfn */ @@ -60,9 +72,9 @@ static inline pgprot_t pte_pgprot(pte_t pte) } #ifndef pmd_page_vaddr -static inline unsigned long pmd_page_vaddr(pmd_t pmd) +static inline const void *pmd_page_vaddr(pmd_t pmd) { - return ((unsigned long)__va(pmd_val(pmd) & ~PMD_MASKED_BITS)); + return __va(pmd_val(pmd) & ~PMD_MASKED_BITS); } #define pmd_page_vaddr pmd_page_vaddr #endif @@ -158,13 +170,30 @@ static inline pgtable_t pmd_pgtable(pmd_t pmd) } #ifdef CONFIG_PPC64 -#define is_ioremap_addr is_ioremap_addr -static inline bool is_ioremap_addr(const void *x) +int __meminit vmemmap_populated(unsigned long vmemmap_addr, int vmemmap_map_size); +bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start, + unsigned long page_size); +/* + * mm/memory_hotplug.c:mhp_supports_memmap_on_memory goes into details + * some of the restrictions. We don't check for PMD_SIZE because our + * vmemmap allocation code can fallback correctly. The pageblock + * alignment requirement is met using altmap->reserve blocks. + */ +#define arch_supports_memmap_on_memory arch_supports_memmap_on_memory +static inline bool arch_supports_memmap_on_memory(unsigned long vmemmap_size) { - unsigned long addr = (unsigned long)x; - - return addr >= IOREMAP_BASE && addr < IOREMAP_END; + if (!radix_enabled()) + return false; + /* + * With 4K page size and 2M PMD_SIZE, we can align + * things better with memory block size value + * starting from 128MB. Hence align things with PMD_SIZE. + */ + if (IS_ENABLED(CONFIG_PPC_4K_PAGES)) + return IS_ALIGNED(vmemmap_size, PMD_SIZE); + return true; } + #endif /* CONFIG_PPC64 */ #endif /* __ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h index 8239c0af5eb2..fe3d0ea0058a 100644 --- a/arch/powerpc/include/asm/plpar_wrappers.h +++ b/arch/powerpc/include/asm/plpar_wrappers.h @@ -9,6 +9,7 @@ #include <asm/hvcall.h> #include <asm/paca.h> +#include <asm/lppaca.h> #include <asm/page.h> static inline long poll_pending(void) diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index ef6972aa33b9..005601243dda 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -397,6 +397,7 @@ #define PPC_RAW_RFCI (0x4c000066) #define PPC_RAW_RFDI (0x4c00004e) #define PPC_RAW_RFMCI (0x4c00004c) +#define PPC_RAW_TLBILX_LPID (0x7c000024) #define PPC_RAW_TLBILX(t, a, b) (0x7c000024 | __PPC_T_TLB(t) | __PPC_RA0(a) | __PPC_RB(b)) #define PPC_RAW_WAIT_v203 (0x7c00007c) #define PPC_RAW_WAIT(w, p) (0x7c00003c | __PPC_WC(w) | __PPC_PL(p)) @@ -616,6 +617,7 @@ #define PPC_TLBILX(t, a, b) stringify_in_c(.long PPC_RAW_TLBILX(t, a, b)) #define PPC_TLBILX_ALL(a, b) PPC_TLBILX(0, a, b) #define PPC_TLBILX_PID(a, b) PPC_TLBILX(1, a, b) +#define PPC_TLBILX_LPID stringify_in_c(.long PPC_RAW_TLBILX_LPID) #define PPC_TLBILX_VA(a, b) PPC_TLBILX(3, a, b) #define PPC_WAIT_v203 stringify_in_c(.long PPC_RAW_WAIT_v203) #define PPC_WAIT(w, p) stringify_in_c(.long PPC_RAW_WAIT(w, p)) diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 8a6754ffdc7e..b2c51d337e60 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -172,11 +172,6 @@ struct thread_struct { unsigned int align_ctl; /* alignment handling control */ #ifdef CONFIG_HAVE_HW_BREAKPOINT struct perf_event *ptrace_bps[HBP_NUM_MAX]; - /* - * Helps identify source of single-step exception and subsequent - * hw-breakpoint enablement - */ - struct perf_event *last_hit_ubp[HBP_NUM_MAX]; #endif /* CONFIG_HAVE_HW_BREAKPOINT */ struct arch_hw_breakpoint hw_brk[HBP_NUM_MAX]; /* hardware breakpoint info */ unsigned long trap_nr; /* last trap # on this thread */ @@ -393,7 +388,6 @@ int validate_sp_size(unsigned long sp, struct task_struct *p, */ #define ARCH_HAS_PREFETCH #define ARCH_HAS_PREFETCHW -#define ARCH_HAS_SPINLOCK_PREFETCH static inline void prefetch(const void *x) { @@ -411,8 +405,6 @@ static inline void prefetchw(const void *x) __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (x)); } -#define spin_lock_prefetch(x) prefetchw(x) - /* asm stubs */ extern unsigned long isa300_idle_stop_noloss(unsigned long psscr_val); extern unsigned long isa300_idle_stop_mayloss(unsigned long psscr_val); diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index bb0121222ee3..4ae4ab9090a2 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -1414,11 +1414,9 @@ static inline void mtmsr_isync(unsigned long val) #define mfspr(rn) ({unsigned long rval; \ asm volatile("mfspr %0," __stringify(rn) \ : "=r" (rval)); rval;}) -#ifndef mtspr #define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : \ : "r" ((unsigned long)(v)) \ : "memory") -#endif #define wrtspr(rn) asm volatile("mtspr " __stringify(rn) ",2" : : : "memory") static inline void wrtee(unsigned long val) diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index 3abe15ac79db..c697c3c74694 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h @@ -202,7 +202,9 @@ typedef struct { #define RTAS_USER_REGION_SIZE (64 * 1024) /* RTAS return status codes */ +#define RTAS_HARDWARE_ERROR -1 /* Hardware Error */ #define RTAS_BUSY -2 /* RTAS Busy */ +#define RTAS_INVALID_PARAMETER -3 /* Invalid indicator/domain/sensor etc. */ #define RTAS_EXTENDED_DELAY_MIN 9900 #define RTAS_EXTENDED_DELAY_MAX 9905 @@ -425,6 +427,7 @@ extern int rtas_set_indicator(int indicator, int index, int new_value); extern int rtas_set_indicator_fast(int indicator, int index, int new_value); extern void rtas_progress(char *s, unsigned short hex); int rtas_ibm_suspend_me(int *fw_status); +int rtas_error_rc(int rtas_rc); struct rtc_time; extern time64_t rtas_get_boot_time(void); diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h index 4e1f548c8d37..ea26665f82cf 100644 --- a/arch/powerpc/include/asm/sections.h +++ b/arch/powerpc/include/asm/sections.h @@ -74,6 +74,8 @@ static inline int overlaps_kernel_text(unsigned long start, unsigned long end) (unsigned long)_stext < end; } +#else +static inline unsigned long kernel_toc_addr(void) { BUILD_BUG(); return -1UL; } #endif #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index e29e83f8a89c..eed74c1fb832 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -8,7 +8,6 @@ extern void ppc_printk_progress(char *s, unsigned short hex); extern unsigned long long memory_limit; -extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); struct device_node; diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index bc5d39a835fe..bf5dde1a4114 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -183,13 +183,9 @@ static inline bool test_thread_local_flags(unsigned int flags) #define clear_tsk_compat_task(tsk) do { } while (0) #endif -#ifdef CONFIG_PPC64 -#ifdef CONFIG_CPU_BIG_ENDIAN +#if defined(CONFIG_PPC64) #define is_elf2_task() (test_thread_flag(TIF_ELF2ABI)) #else -#define is_elf2_task() (1) -#endif -#else #define is_elf2_task() (0) #endif diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h index 8a4d4f4d9749..f4e6f2dd04b7 100644 --- a/arch/powerpc/include/asm/topology.h +++ b/arch/powerpc/include/asm/topology.h @@ -143,5 +143,20 @@ static inline int cpu_to_coregroup_id(int cpu) #endif #endif +#ifdef CONFIG_HOTPLUG_SMT +#include <linux/cpu_smt.h> +#include <asm/cputhreads.h> + +static inline bool topology_is_primary_thread(unsigned int cpu) +{ + return cpu == cpu_first_thread_sibling(cpu); +} + +static inline bool topology_smt_thread_allowed(unsigned int cpu) +{ + return cpu_thread_in_core(cpu) < cpu_smt_num_threads; +} +#endif + #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_TOPOLOGY_H */ diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index a2d255aa9627..fb725ec77926 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -386,7 +386,7 @@ copy_mc_to_user(void __user *to, const void *from, unsigned long n) extern long __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size); -static __must_check inline bool user_access_begin(const void __user *ptr, size_t len) +static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len) { if (unlikely(!access_ok(ptr, len))) return false; @@ -401,7 +401,7 @@ static __must_check inline bool user_access_begin(const void __user *ptr, size_t #define user_access_save prevent_user_access_return #define user_access_restore restore_user_access -static __must_check inline bool +static __must_check __always_inline bool user_read_access_begin(const void __user *ptr, size_t len) { if (unlikely(!access_ok(ptr, len))) @@ -415,7 +415,7 @@ user_read_access_begin(const void __user *ptr, size_t len) #define user_read_access_begin user_read_access_begin #define user_read_access_end prevent_current_read_from_user -static __must_check inline bool +static __must_check __always_inline bool user_write_access_begin(const void __user *ptr, size_t len) { if (unlikely(!access_ok(ptr, len))) diff --git a/arch/powerpc/include/asm/vermagic.h b/arch/powerpc/include/asm/vermagic.h index b054a8576e5d..6f250fe506bd 100644 --- a/arch/powerpc/include/asm/vermagic.h +++ b/arch/powerpc/include/asm/vermagic.h @@ -2,7 +2,9 @@ #ifndef _ASM_VERMAGIC_H #define _ASM_VERMAGIC_H -#ifdef CONFIG_MPROFILE_KERNEL +#ifdef CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY +#define MODULE_ARCH_VERMAGIC_FTRACE "patchable-function-entry " +#elif defined(CONFIG_MPROFILE_KERNEL) #define MODULE_ARCH_VERMAGIC_FTRACE "mprofile-kernel " #else #define MODULE_ARCH_VERMAGIC_FTRACE "" diff --git a/arch/powerpc/include/asm/vphn.h b/arch/powerpc/include/asm/vphn.h new file mode 100644 index 000000000000..8c2f795eea68 --- /dev/null +++ b/arch/powerpc/include/asm/vphn.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _ASM_POWERPC_VPHN_H +#define _ASM_POWERPC_VPHN_H + +/* The H_HOME_NODE_ASSOCIATIVITY h_call returns 6 64-bit registers. */ +#define VPHN_REGISTER_COUNT 6 + +/* + * 6 64-bit registers unpacked into up to 24 be32 associativity values. To + * form the complete property we have to add the length in the first cell. + */ +#define VPHN_ASSOC_BUFSIZE (VPHN_REGISTER_COUNT*sizeof(u64)/sizeof(u16) + 1) + +/* + * The H_HOME_NODE_ASSOCIATIVITY hcall takes two values for flags: + * 1 for retrieving associativity information for a guest cpu + * 2 for retrieving associativity information for a host/hypervisor cpu + */ +#define VPHN_FLAG_VCPU 1 +#define VPHN_FLAG_PCPU 2 + +long hcall_vphn(unsigned long cpu, u64 flags, __be32 *associativity); + +#endif // _ASM_POWERPC_VPHN_H diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h index 46c31fb8748d..30a12d208687 100644 --- a/arch/powerpc/include/asm/word-at-a-time.h +++ b/arch/powerpc/include/asm/word-at-a-time.h @@ -34,7 +34,7 @@ static inline long find_zero(unsigned long mask) return leading_zero_bits >> 3; } -static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c) +static inline unsigned long has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c) { unsigned long rhs = val | c->low_bits; *data = rhs; |