diff options
Diffstat (limited to 'arch/powerpc/include/asm')
34 files changed, 207 insertions, 133 deletions
diff --git a/arch/powerpc/include/asm/asm-const.h b/arch/powerpc/include/asm/asm-const.h index dbfa5e1e3198..bfb3c3534877 100644 --- a/arch/powerpc/include/asm/asm-const.h +++ b/arch/powerpc/include/asm/asm-const.h @@ -12,6 +12,4 @@ # define ASM_CONST(x) __ASM_CONST(x) #endif -#define UPD_CONSTR "<>" - #endif /* _ASM_POWERPC_ASM_CONST_H */ diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index 6a53ef178bfd..fd594fdbd84d 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -27,14 +27,14 @@ static __inline__ int arch_atomic_read(const atomic_t *v) { int t; - __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"UPD_CONSTR(v->counter)); + __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter)); return t; } static __inline__ void arch_atomic_set(atomic_t *v, int i) { - __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"UPD_CONSTR(v->counter) : "r"(i)); + __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i)); } #define ATOMIC_OP(op, asm_op) \ @@ -320,14 +320,14 @@ static __inline__ s64 arch_atomic64_read(const atomic64_t *v) { s64 t; - __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"UPD_CONSTR(v->counter)); + __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter)); return t; } static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i) { - __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"UPD_CONSTR(v->counter) : "r"(i)); + __asm__ __volatile__("std%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i)); } #define ATOMIC64_OP(op, asm_op) \ diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h index d4b145b279f6..9f38040f0641 100644 --- a/arch/powerpc/include/asm/book3s/32/kup.h +++ b/arch/powerpc/include/asm/book3s/32/kup.h @@ -136,6 +136,14 @@ static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap) if (kuap_is_disabled()) return; + if (unlikely(kuap != KUAP_NONE)) { + current->thread.kuap = KUAP_NONE; + kuap_lock(kuap, false); + } + + if (likely(regs->kuap == KUAP_NONE)) + return; + current->thread.kuap = regs->kuap; kuap_unlock(regs->kuap, false); diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h index d959b0195ad9..674fe0e890dc 100644 --- a/arch/powerpc/include/asm/book3s/64/hash.h +++ b/arch/powerpc/include/asm/book3s/64/hash.h @@ -255,6 +255,8 @@ int hash__create_section_mapping(unsigned long start, unsigned long end, int nid, pgprot_t prot); int hash__remove_section_mapping(unsigned long start, unsigned long end); +void hash__kernel_map_pages(struct page *page, int numpages, int enable); + #endif /* !__ASSEMBLY__ */ #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */ diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 5d34a8646f08..33e073d6b0c4 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -1101,6 +1101,16 @@ static inline void vmemmap_remove_mapping(unsigned long start, } #endif +#ifdef CONFIG_DEBUG_PAGEALLOC +static inline void __kernel_map_pages(struct page *page, int numpages, int enable) +{ + if (radix_enabled()) + radix__kernel_map_pages(page, numpages, enable); + else + hash__kernel_map_pages(page, numpages, enable); +} +#endif + static inline pte_t pmd_pte(pmd_t pmd) { return __pte_raw(pmd_raw(pmd)); diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h index 59cab558e2f0..d090d9612348 100644 --- a/arch/powerpc/include/asm/book3s/64/radix.h +++ b/arch/powerpc/include/asm/book3s/64/radix.h @@ -316,5 +316,8 @@ int radix__create_section_mapping(unsigned long start, unsigned long end, int nid, pgprot_t prot); int radix__remove_section_mapping(unsigned long start, unsigned long end); #endif /* CONFIG_MEMORY_HOTPLUG */ + +void radix__kernel_map_pages(struct page *page, int numpages, int enable); + #endif /* __ASSEMBLY__ */ #endif diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h index a95f63788c6b..4ba834599c4d 100644 --- a/arch/powerpc/include/asm/code-patching.h +++ b/arch/powerpc/include/asm/code-patching.h @@ -23,6 +23,7 @@ #define BRANCH_ABSOLUTE 0x2 bool is_offset_in_branch_range(long offset); +bool is_offset_in_cond_branch_range(long offset); int create_branch(struct ppc_inst *instr, const u32 *addr, unsigned long target, int flags); int create_cond_branch(struct ppc_inst *instr, const u32 *addr, diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h index 6b800d3e2681..a1d238255f07 100644 --- a/arch/powerpc/include/asm/interrupt.h +++ b/arch/powerpc/include/asm/interrupt.h @@ -265,13 +265,16 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte local_paca->irq_soft_mask = IRQS_ALL_DISABLED; local_paca->irq_happened |= PACA_IRQ_HARD_DIS; - if (is_implicit_soft_masked(regs)) { - // Adjust regs->softe soft implicit soft-mask, so - // arch_irq_disabled_regs(regs) behaves as expected. + if (!(regs->msr & MSR_EE) || is_implicit_soft_masked(regs)) { + /* + * Adjust regs->softe to be soft-masked if it had not been + * reconcied (e.g., interrupt entry with MSR[EE]=0 but softe + * not yet set disabled), or if it was in an implicit soft + * masked state. This makes arch_irq_disabled_regs(regs) + * behave as expected. + */ regs->softe = IRQS_ALL_DISABLED; } - if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) - BUG_ON(!arch_irq_disabled_regs(regs) && !(regs->msr & MSR_EE)); /* Don't do any per-CPU operations until interrupt state is fixed */ @@ -525,10 +528,9 @@ static __always_inline long ____##func(struct pt_regs *regs) /* kernel/traps.c */ DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception); #ifdef CONFIG_PPC_BOOK3S_64 -DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception); -#else -DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception); +DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async); #endif +DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception); DECLARE_INTERRUPT_HANDLER(SMIException); DECLARE_INTERRUPT_HANDLER(handle_hmi_exception); DECLARE_INTERRUPT_HANDLER(unknown_exception); diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index f130783c8301..beba4979bff9 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h @@ -122,7 +122,7 @@ static inline u##size name(const volatile u##size __iomem *addr) \ { \ u##size ret; \ __asm__ __volatile__("sync;"#insn"%U1%X1 %0,%1;twi 0,%0,0;isync"\ - : "=r" (ret) : "m"UPD_CONSTR (*addr) : "memory"); \ + : "=r" (ret) : "m<>" (*addr) : "memory"); \ return ret; \ } @@ -130,7 +130,7 @@ static inline u##size name(const volatile u##size __iomem *addr) \ static inline void name(volatile u##size __iomem *addr, u##size val) \ { \ __asm__ __volatile__("sync;"#insn"%U0%X0 %1,%0" \ - : "=m"UPD_CONSTR (*addr) : "r" (val) : "memory"); \ + : "=m<>" (*addr) : "r" (val) : "memory"); \ mmiowb_set_pending(); \ } diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index bf3b84128525..c361212ac160 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h @@ -280,12 +280,6 @@ extern void iommu_init_early_dart(struct pci_controller_ops *controller_ops); extern void iommu_init_early_pasemi(void); #if defined(CONFIG_PPC64) && defined(CONFIG_PM) -static inline void iommu_save(void) -{ - if (ppc_md.iommu_save) - ppc_md.iommu_save(); -} - static inline void iommu_restore(void) { if (ppc_md.iommu_restore) diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h index 88d0d7cf3a79..c6f250eca3fb 100644 --- a/arch/powerpc/include/asm/kexec.h +++ b/arch/powerpc/include/asm/kexec.h @@ -79,7 +79,6 @@ extern int crash_wake_offline; struct kimage; struct pt_regs; extern void default_machine_kexec(struct kimage *image); -extern int default_machine_kexec_prepare(struct kimage *image); extern void default_machine_crash_shutdown(struct pt_regs *regs); extern int crash_shutdown_register(crash_shutdown_t handler); extern int crash_shutdown_unregister(crash_shutdown_t handler); diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h index 4fc0e15e23a5..bab364152b29 100644 --- a/arch/powerpc/include/asm/kprobes.h +++ b/arch/powerpc/include/asm/kprobes.h @@ -51,7 +51,7 @@ extern kprobe_opcode_t optprobe_template_end[]; #define flush_insn_slot(p) do { } while (0) #define kretprobe_blacklist_size 0 -void kretprobe_trampoline(void); +void __kretprobe_trampoline(void); extern void arch_remove_kprobe(struct kprobe *p); /* Architecture specific copy of original instruction */ diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index caaa0f592d8e..3d31f2c59e43 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -434,7 +434,7 @@ extern int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu); #define SPLIT_HACK_OFFS 0xfb000000 /* - * This packs a VCPU ID from the [0..KVM_MAX_VCPU_ID) space down to the + * This packs a VCPU ID from the [0..KVM_MAX_VCPU_IDS) space down to the * [0..KVM_MAX_VCPUS) space, using knowledge of the guest's core stride * (but not its actual threading mode, which is not available) to avoid * collisions. diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index 19b6942c6969..fff391b9b97b 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h @@ -378,6 +378,10 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r, rb |= 1; /* L field */ rb |= r & 0xff000 & ((1ul << a_pgshift) - 1); /* LP field */ } + /* + * This sets both bits of the B field in the PTE. 0b1x values are + * reserved, but those will have been filtered by kvmppc_do_h_enter. + */ rb |= (v >> HPTE_V_SSIZE_SHIFT) << 8; /* B field */ return rb; } diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 080a7feb7731..e4d23193eba7 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -33,11 +33,11 @@ #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE #include <asm/kvm_book3s_asm.h> /* for MAX_SMT_THREADS */ -#define KVM_MAX_VCPU_ID (MAX_SMT_THREADS * KVM_MAX_VCORES) +#define KVM_MAX_VCPU_IDS (MAX_SMT_THREADS * KVM_MAX_VCORES) #define KVM_MAX_NESTED_GUESTS KVMPPC_NR_LPIDS #else -#define KVM_MAX_VCPU_ID KVM_MAX_VCPUS +#define KVM_MAX_VCPU_IDS KVM_MAX_VCPUS #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ #define __KVM_HAVE_ARCH_INTC_INITIALIZED @@ -190,7 +190,7 @@ struct kvmppc_spapr_tce_table { u64 size; /* window size in pages */ struct list_head iommu_tables; struct mutex alloc_lock; - struct page *pages[0]; + struct page *pages[]; }; /* XICS components, defined in book3s_xics.c */ diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index 764f2732a821..9c3c9f04129f 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -29,10 +29,9 @@ struct machdep_calls { char *name; #ifdef CONFIG_PPC64 #ifdef CONFIG_PM - void (*iommu_save)(void); void (*iommu_restore)(void); #endif -#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE +#ifdef CONFIG_MEMORY_HOTPLUG unsigned long (*memory_block_size)(void); #endif #endif /* CONFIG_PPC64 */ @@ -43,7 +42,6 @@ struct machdep_calls { void (*setup_arch)(void); /* Optional, may be NULL */ /* Optional, may be NULL. */ void (*show_cpuinfo)(struct seq_file *m); - void (*show_percpuinfo)(struct seq_file *m, int i); /* Returns the current operating frequency of "cpu" in Hz */ unsigned long (*get_proc_freq)(unsigned int cpu); @@ -74,8 +72,6 @@ struct machdep_calls { int (*set_rtc_time)(struct rtc_time *); void (*get_rtc_time)(struct rtc_time *); time64_t (*get_boot_time)(void); - unsigned char (*rtc_read_val)(int addr); - void (*rtc_write_val)(int addr, unsigned char val); void (*calibrate_decr)(void); @@ -141,8 +137,6 @@ struct machdep_calls { May be NULL. */ void (*init)(void); - void (*kgdb_map_scc)(void); - /* * optional PCI "hooks" */ @@ -187,13 +181,6 @@ struct machdep_calls { #ifdef CONFIG_KEXEC_CORE void (*kexec_cpu_down)(int crash_shutdown, int secondary); - /* Called to do what every setup is needed on image and the - * reboot code buffer. Returns 0 on success. - * Provide your own (maybe dummy) implementation if your platform - * claims to support kexec. - */ - int (*machine_kexec_prepare)(struct kimage *image); - /* Called to perform the _real_ kexec. * Do NOT allocate memory or fail here. We are past the point of * no return. diff --git a/arch/powerpc/include/asm/mem_encrypt.h b/arch/powerpc/include/asm/mem_encrypt.h index ba9dab07c1be..2f26b8fc8d29 100644 --- a/arch/powerpc/include/asm/mem_encrypt.h +++ b/arch/powerpc/include/asm/mem_encrypt.h @@ -10,11 +10,6 @@ #include <asm/svm.h> -static inline bool mem_encrypt_active(void) -{ - return is_secure_guest(); -} - static inline bool force_dma_unencrypted(struct device *dev) { return is_secure_guest(); diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index f06ae00f2a65..b67742e2a9b2 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -193,10 +193,12 @@ static inline pte_t pte_wrprotect(pte_t pte) } #endif +#ifndef pte_mkexec static inline pte_t pte_mkexec(pte_t pte) { return __pte(pte_val(pte) | _PAGE_EXEC); } +#endif #define pmd_none(pmd) (!pmd_val(pmd)) #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) @@ -245,7 +247,7 @@ static int number_of_cells_per_pte(pmd_t *pmd, pte_basic_t val, int huge) static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p, unsigned long clr, unsigned long set, int huge) { - pte_basic_t *entry = &p->pte; + pte_basic_t *entry = (pte_basic_t *)p; pte_basic_t old = pte_val(*p); pte_basic_t new = (old & ~(pte_basic_t)clr) | set; int num, i; @@ -306,30 +308,29 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, } #define __HAVE_ARCH_PTEP_SET_WRPROTECT +#ifndef ptep_set_wrprotect static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - unsigned long clr = ~pte_val(pte_wrprotect(__pte(~0))); - unsigned long set = pte_val(pte_wrprotect(__pte(0))); - - pte_update(mm, addr, ptep, clr, set, 0); + pte_update(mm, addr, ptep, _PAGE_RW, 0, 0); } +#endif +#ifndef __ptep_set_access_flags static inline void __ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep, pte_t entry, unsigned long address, int psize) { - pte_t pte_set = pte_mkyoung(pte_mkdirty(pte_mkwrite(pte_mkexec(__pte(0))))); - pte_t pte_clr = pte_mkyoung(pte_mkdirty(pte_mkwrite(pte_mkexec(__pte(~0))))); - unsigned long set = pte_val(entry) & pte_val(pte_set); - unsigned long clr = ~pte_val(entry) & ~pte_val(pte_clr); + unsigned long set = pte_val(entry) & + (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); int huge = psize > mmu_virtual_psize ? 1 : 0; - pte_update(vma->vm_mm, address, ptep, clr, set, huge); + pte_update(vma->vm_mm, address, ptep, 0, set, huge); flush_tlb_page(vma, address); } +#endif static inline int pte_young(pte_t pte) { diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h index fcc48d590d88..1a89ebdc3acc 100644 --- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h @@ -136,6 +136,28 @@ static inline pte_t pte_mkhuge(pte_t pte) #define pte_mkhuge pte_mkhuge +static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p, + unsigned long clr, unsigned long set, int huge); + +static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +{ + pte_update(mm, addr, ptep, 0, _PAGE_RO, 0); +} +#define ptep_set_wrprotect ptep_set_wrprotect + +static inline void __ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep, + pte_t entry, unsigned long address, int psize) +{ + unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_EXEC); + unsigned long clr = ~pte_val(entry) & _PAGE_RO; + int huge = psize > mmu_virtual_psize ? 1 : 0; + + pte_update(vma->vm_mm, address, ptep, clr, set, huge); + + flush_tlb_page(vma, address); +} +#define __ptep_set_access_flags __ptep_set_access_flags + static inline unsigned long pgd_leaf_size(pgd_t pgd) { if (pgd_val(pgd) & _PMD_PAGE_8M) diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h index d081704b13fb..9d2905a47410 100644 --- a/arch/powerpc/include/asm/nohash/64/pgtable.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h @@ -118,11 +118,6 @@ static inline pte_t pte_wrprotect(pte_t pte) return __pte(pte_val(pte) & ~_PAGE_RW); } -static inline pte_t pte_mkexec(pte_t pte) -{ - return __pte(pte_val(pte) | _PAGE_EXEC); -} - #define PMD_BAD_BITS (PTE_TABLE_SIZE-1) #define PUD_BAD_BITS (PMD_TABLE_SIZE-1) diff --git a/arch/powerpc/include/asm/nohash/pte-book3e.h b/arch/powerpc/include/asm/nohash/pte-book3e.h index 813918f40765..f798640422c2 100644 --- a/arch/powerpc/include/asm/nohash/pte-book3e.h +++ b/arch/powerpc/include/asm/nohash/pte-book3e.h @@ -48,7 +48,7 @@ #define _PAGE_WRITETHRU 0x800000 /* W: cache write-through */ /* "Higher level" linux bit combinations */ -#define _PAGE_EXEC _PAGE_BAP_UX /* .. and was cache cleaned */ +#define _PAGE_EXEC (_PAGE_BAP_SX | _PAGE_BAP_UX) /* .. and was cache cleaned */ #define _PAGE_RW (_PAGE_BAP_SW | _PAGE_BAP_UW) /* User write permission */ #define _PAGE_KERNEL_RW (_PAGE_BAP_SW | _PAGE_BAP_SR | _PAGE_DIRTY) #define _PAGE_KERNEL_RO (_PAGE_BAP_SR) @@ -93,11 +93,11 @@ /* Permission masks used to generate the __P and __S table */ #define PAGE_NONE __pgprot(_PAGE_BASE) #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) -#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC) +#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_BAP_UX) #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) -#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) +#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_BAP_UX) #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) -#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) +#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_BAP_UX) #ifndef __ASSEMBLY__ static inline pte_t pte_mkprivileged(pte_t pte) @@ -113,6 +113,16 @@ static inline pte_t pte_mkuser(pte_t pte) } #define pte_mkuser pte_mkuser + +static inline pte_t pte_mkexec(pte_t pte) +{ + if (pte_val(pte) & _PAGE_BAP_UR) + return __pte((pte_val(pte) & ~_PAGE_BAP_SX) | _PAGE_BAP_UX); + else + return __pte((pte_val(pte) & ~_PAGE_BAP_UX) | _PAGE_BAP_SX); +} +#define pte_mkexec pte_mkexec + #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/nohash/tlbflush.h b/arch/powerpc/include/asm/nohash/tlbflush.h index 1edb7243e515..c08d25e3e626 100644 --- a/arch/powerpc/include/asm/nohash/tlbflush.h +++ b/arch/powerpc/include/asm/nohash/tlbflush.h @@ -32,11 +32,26 @@ extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); +#ifdef CONFIG_PPC_8xx +static inline void local_flush_tlb_mm(struct mm_struct *mm) +{ + unsigned int pid = READ_ONCE(mm->context.id); + + if (pid != MMU_NO_CONTEXT) + asm volatile ("sync; tlbia; isync" : : : "memory"); +} + +static inline void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) +{ + asm volatile ("tlbie %0; sync" : : "r" (vmaddr) : "memory"); +} +#else extern void local_flush_tlb_mm(struct mm_struct *mm); extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); extern void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, int tsize, int ind); +#endif #ifdef CONFIG_SMP extern void flush_tlb_mm(struct mm_struct *mm); diff --git a/arch/powerpc/include/asm/paravirt.h b/arch/powerpc/include/asm/paravirt.h index bcb7b5f917be..eb7df559ae74 100644 --- a/arch/powerpc/include/asm/paravirt.h +++ b/arch/powerpc/include/asm/paravirt.h @@ -21,7 +21,7 @@ static inline bool is_shared_processor(void) return static_branch_unlikely(&shared_processor); } -/* If bit 0 is set, the cpu has been preempted */ +/* If bit 0 is set, the cpu has been ceded, conferred, or preempted */ static inline u32 yield_count_of(int cpu) { __be32 yield_count = READ_ONCE(lppaca_of(cpu).yield_count); @@ -92,17 +92,47 @@ static inline void prod_cpu(int cpu) #define vcpu_is_preempted vcpu_is_preempted static inline bool vcpu_is_preempted(int cpu) { + /* + * The dispatch/yield bit alone is an imperfect indicator of + * whether the hypervisor has dispatched @cpu to run on a physical + * processor. When it is clear, @cpu is definitely not preempted. + * But when it is set, it means only that it *might* be, subject to + * other conditions. So we check other properties of the VM and + * @cpu first, resorting to the yield count last. + */ + + /* + * Hypervisor preemption isn't possible in dedicated processor + * mode by definition. + */ if (!is_shared_processor()) return false; #ifdef CONFIG_PPC_SPLPAR if (!is_kvm_guest()) { - int first_cpu = cpu_first_thread_sibling(smp_processor_id()); + int first_cpu; /* - * Preemption can only happen at core granularity. This CPU - * is not preempted if one of the CPU of this core is not - * preempted. + * The result of vcpu_is_preempted() is used in a + * speculative way, and is always subject to invalidation + * by events internal and external to Linux. While we can + * be called in preemptable context (in the Linux sense), + * we're not accessing per-cpu resources in a way that can + * race destructively with Linux scheduler preemption and + * migration, and callers can tolerate the potential for + * error introduced by sampling the CPU index without + * pinning the task to it. So it is permissible to use + * raw_smp_processor_id() here to defeat the preempt debug + * warnings that can arise from using smp_processor_id() + * in arbitrary contexts. + */ + first_cpu = cpu_first_thread_sibling(raw_smp_processor_id()); + + /* + * The PowerVM hypervisor dispatches VMs on a whole core + * basis. So we know that a thread sibling of the local CPU + * cannot have been preempted by the hypervisor, even if it + * has called H_CONFER, which will set the yield bit. */ if (cpu_first_thread_sibling(cpu) == first_cpu) return false; diff --git a/arch/powerpc/include/asm/pgtable-types.h b/arch/powerpc/include/asm/pgtable-types.h index d11b4c61d686..efed0db7b1db 100644 --- a/arch/powerpc/include/asm/pgtable-types.h +++ b/arch/powerpc/include/asm/pgtable-types.h @@ -2,17 +2,33 @@ #ifndef _ASM_POWERPC_PGTABLE_TYPES_H #define _ASM_POWERPC_PGTABLE_TYPES_H +#if defined(__CHECKER__) || !defined(CONFIG_PPC32) +#define STRICT_MM_TYPECHECKS +#endif + /* PTE level */ #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES) typedef struct { pte_basic_t pte, pte1, pte2, pte3; } pte_t; -#else +#elif defined(STRICT_MM_TYPECHECKS) typedef struct { pte_basic_t pte; } pte_t; +#else +typedef pte_basic_t pte_t; #endif + +#if defined(STRICT_MM_TYPECHECKS) || \ + (defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES)) #define __pte(x) ((pte_t) { (x) }) static inline pte_basic_t pte_val(pte_t x) { return x.pte; } +#else +#define __pte(x) ((pte_t)(x)) +static inline pte_basic_t pte_val(pte_t x) +{ + return x; +} +#endif /* PMD level */ #ifdef CONFIG_PPC64 diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h index 2b9edbf6e929..f6cf0159024e 100644 --- a/arch/powerpc/include/asm/ppc-pci.h +++ b/arch/powerpc/include/asm/ppc-pci.h @@ -55,11 +55,6 @@ void eeh_pe_dev_mode_mark(struct eeh_pe *pe, int mode); void eeh_sysfs_add_device(struct pci_dev *pdev); void eeh_sysfs_remove_device(struct pci_dev *pdev); -static inline const char *eeh_driver_name(struct pci_dev *pdev) -{ - return (pdev && pdev->driver) ? pdev->driver->name : "<null>"; -} - #endif /* CONFIG_EEH */ #define PCI_BUSNO(bdfn) ((bdfn >> 8) & 0xff) diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 1c538a9a11e0..7be24048b8d1 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -28,8 +28,8 @@ #else #define SAVE_GPR(n, base) stw n,GPR0+4*(n)(base) #define REST_GPR(n, base) lwz n,GPR0+4*(n)(base) -#define SAVE_NVGPRS(base) stmw 13, GPR0+4*13(base) -#define REST_NVGPRS(base) lmw 13, GPR0+4*13(base) +#define SAVE_NVGPRS(base) SAVE_GPR(13, base); SAVE_8GPRS(14, base); SAVE_10GPRS(22, base) +#define REST_NVGPRS(base) REST_GPR(13, base); REST_8GPRS(14, base); REST_10GPRS(22, base) #endif #define SAVE_2GPRS(n, base) SAVE_GPR(n, base); SAVE_GPR(n+1, base) diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index f348e564f7dd..e39bd0ff69f3 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -300,7 +300,7 @@ struct thread_struct { #define task_pt_regs(tsk) ((tsk)->thread.regs) -unsigned long get_wchan(struct task_struct *p); +unsigned long __get_wchan(struct task_struct *p); #define KSTK_EIP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0) #define KSTK_ESP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->gpr[1]: 0) diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h index 6e4af4492a14..79cb7a25a5fb 100644 --- a/arch/powerpc/include/asm/sections.h +++ b/arch/powerpc/include/asm/sections.h @@ -6,21 +6,8 @@ #include <linux/elf.h> #include <linux/uaccess.h> -#define arch_is_kernel_initmem_freed arch_is_kernel_initmem_freed - #include <asm-generic/sections.h> -extern bool init_mem_is_free; - -static inline int arch_is_kernel_initmem_freed(unsigned long addr) -{ - if (!init_mem_is_free) - return 0; - - return addr >= (unsigned long)__init_begin && - addr < (unsigned long)__init_end; -} - extern char __head_end[]; #ifdef __powerpc64__ diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h index 792eefaf230b..27574f218b37 100644 --- a/arch/powerpc/include/asm/security_features.h +++ b/arch/powerpc/include/asm/security_features.h @@ -39,6 +39,11 @@ static inline bool security_ftr_enabled(u64 feature) return !!(powerpc_security_features & feature); } +#ifdef CONFIG_PPC_BOOK3S_64 +enum stf_barrier_type stf_barrier_type_get(void); +#else +static inline enum stf_barrier_type stf_barrier_type_get(void) { return STF_BARRIER_NONE; } +#endif // Features indicating support for Spectre/Meltdown mitigations diff --git a/arch/powerpc/include/asm/simple_spinlock.h b/arch/powerpc/include/asm/simple_spinlock.h index 8985791a2ba5..7ae6aeef8464 100644 --- a/arch/powerpc/include/asm/simple_spinlock.h +++ b/arch/powerpc/include/asm/simple_spinlock.h @@ -123,27 +123,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) } } -static inline -void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) -{ - unsigned long flags_dis; - - while (1) { - if (likely(__arch_spin_trylock(lock) == 0)) - break; - local_save_flags(flags_dis); - local_irq_restore(flags); - do { - HMT_low(); - if (is_shared_processor()) - splpar_spin_yield(lock); - } while (unlikely(lock->slock != 0)); - HMT_medium(); - local_irq_restore(flags_dis); - } -} -#define arch_spin_lock_flags arch_spin_lock_flags - static inline void arch_spin_unlock(arch_spinlock_t *lock) { __asm__ __volatile__("# arch_spin_unlock\n\t" diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index 7ef1cd8168a0..007332a4a732 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -87,22 +87,7 @@ int is_cpu_dead(unsigned int cpu); /* 32-bit */ extern int smp_hw_index[]; -/* - * This is particularly ugly: it appears we can't actually get the definition - * of task_struct here, but we need access to the CPU this task is running on. - * Instead of using task_struct we're using _TASK_CPU which is extracted from - * asm-offsets.h by kbuild to get the current processor ID. - * - * This also needs to be safeguarded when building asm-offsets.s because at - * that time _TASK_CPU is not defined yet. It could have been guarded by - * _TASK_CPU itself, but we want the build to fail if _TASK_CPU is missing - * when building something else than asm-offsets.s - */ -#ifdef GENERATING_ASM_OFFSETS -#define raw_smp_processor_id() (0) -#else -#define raw_smp_processor_id() (*(unsigned int *)((void *)current + _TASK_CPU)) -#endif +#define raw_smp_processor_id() (current_thread_info()->cpu) #define hard_smp_processor_id() (smp_hw_index[smp_processor_id()]) static inline int get_hard_smp_processor_id(int cpu) diff --git a/arch/powerpc/include/asm/static_call.h b/arch/powerpc/include/asm/static_call.h new file mode 100644 index 000000000000..0a0bc79bd1fa --- /dev/null +++ b/arch/powerpc/include/asm/static_call.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_STATIC_CALL_H +#define _ASM_POWERPC_STATIC_CALL_H + +#define __PPC_SCT(name, inst) \ + asm(".pushsection .text, \"ax\" \n" \ + ".align 5 \n" \ + ".globl " STATIC_CALL_TRAMP_STR(name) " \n" \ + STATIC_CALL_TRAMP_STR(name) ": \n" \ + inst " \n" \ + " lis 12,2f@ha \n" \ + " lwz 12,2f@l(12) \n" \ + " mtctr 12 \n" \ + " bctr \n" \ + "1: li 3, 0 \n" \ + " blr \n" \ + "2: .long 0 \n" \ + ".type " STATIC_CALL_TRAMP_STR(name) ", @function \n" \ + ".size " STATIC_CALL_TRAMP_STR(name) ", . - " STATIC_CALL_TRAMP_STR(name) " \n" \ + ".popsection \n") + +#define PPC_SCT_RET0 20 /* Offset of label 1 */ +#define PPC_SCT_DATA 28 /* Offset of label 2 */ + +#define ARCH_DEFINE_STATIC_CALL_TRAMP(name, func) __PPC_SCT(name, "b " #func) +#define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) __PPC_SCT(name, "blr") + +#endif /* _ASM_POWERPC_STATIC_CALL_H */ diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index b4ec6c7dd72e..5725029aaa29 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -47,6 +47,9 @@ struct thread_info { int preempt_count; /* 0 => preemptable, <0 => BUG */ +#ifdef CONFIG_SMP + unsigned int cpu; +#endif unsigned long local_flags; /* private flags for thread */ #ifdef CONFIG_LIVEPATCH unsigned long *livepatch_sp; diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index 22c79ab40006..63316100080c 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -86,7 +86,7 @@ __pu_failed: \ "1: " op "%U1%X1 %0,%1 # put_user\n" \ EX_TABLE(1b, %l2) \ : \ - : "r" (x), "m"UPD_CONSTR (*addr) \ + : "r" (x), "m<>" (*addr) \ : \ : label) @@ -143,7 +143,7 @@ do { \ "1: "op"%U1%X1 %0, %1 # get_user\n" \ EX_TABLE(1b, %l2) \ : "=r" (x) \ - : "m"UPD_CONSTR (*addr) \ + : "m<>" (*addr) \ : \ : label) @@ -200,7 +200,7 @@ __gus_failed: \ ".previous\n" \ EX_TABLE(1b, 3b) \ : "=r" (err), "=r" (x) \ - : "m"UPD_CONSTR (*addr), "i" (-EFAULT), "0" (err)) + : "m<>" (*addr), "i" (-EFAULT), "0" (err)) #ifdef __powerpc64__ #define __get_user_asm2(x, addr, err) \ |