diff options
Diffstat (limited to 'arch/powerpc/include')
33 files changed, 380 insertions, 164 deletions
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h index c0deafc212b8..25d42bd3f114 100644 --- a/arch/powerpc/include/asm/barrier.h +++ b/arch/powerpc/include/asm/barrier.h @@ -74,6 +74,11 @@ do { \ ___p1; \ }) +/* + * This must resolve to hwsync on SMP for the context switch path. + * See _switch, and core scheduler context switch memory ordering + * comments. + */ #define smp_mb__before_spinlock() smp_mb() #include <asm-generic/barrier.h> diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h index 33a24fdd7958..b750ffef83c7 100644 --- a/arch/powerpc/include/asm/bitops.h +++ b/arch/powerpc/include/asm/bitops.h @@ -206,68 +206,13 @@ static __inline__ void __clear_bit_unlock(int nr, volatile unsigned long *addr) * Return the zero-based bit position (LE, not IBM bit numbering) of * the most significant 1-bit in a double word. */ -static __inline__ __attribute__((const)) -int __ilog2(unsigned long x) -{ - int lz; +#define __ilog2(x) ilog2(x) - asm (PPC_CNTLZL "%0,%1" : "=r" (lz) : "r" (x)); - return BITS_PER_LONG - 1 - lz; -} +#include <asm-generic/bitops/ffz.h> -static inline __attribute__((const)) -int __ilog2_u32(u32 n) -{ - int bit; - asm ("cntlzw %0,%1" : "=r" (bit) : "r" (n)); - return 31 - bit; -} +#include <asm-generic/bitops/builtin-__ffs.h> -#ifdef __powerpc64__ -static inline __attribute__((const)) -int __ilog2_u64(u64 n) -{ - int bit; - asm ("cntlzd %0,%1" : "=r" (bit) : "r" (n)); - return 63 - bit; -} -#endif - -/* - * Determines the bit position of the least significant 0 bit in the - * specified double word. The returned bit position will be - * zero-based, starting from the right side (63/31 - 0). - */ -static __inline__ unsigned long ffz(unsigned long x) -{ - /* no zero exists anywhere in the 8 byte area. */ - if ((x = ~x) == 0) - return BITS_PER_LONG; - - /* - * Calculate the bit position of the least significant '1' bit in x - * (since x has been changed this will actually be the least significant - * '0' bit in * the original x). Note: (x & -x) gives us a mask that - * is the least significant * (RIGHT-most) 1-bit of the value in x. - */ - return __ilog2(x & -x); -} - -static __inline__ unsigned long __ffs(unsigned long x) -{ - return __ilog2(x & -x); -} - -/* - * ffs: find first bit set. This is defined the same way as - * the libc and compiler builtin ffs routines, therefore - * differs in spirit from the above ffz (man ffs). - */ -static __inline__ int ffs(int x) -{ - unsigned long i = (unsigned long)x; - return __ilog2(i & -i) + 1; -} +#include <asm-generic/bitops/builtin-ffs.h> /* * fls: find last (most-significant) bit set. @@ -275,33 +220,15 @@ static __inline__ int ffs(int x) */ static __inline__ int fls(unsigned int x) { - int lz; - - asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x)); - return 32 - lz; + return 32 - __builtin_clz(x); } -static __inline__ unsigned long __fls(unsigned long x) -{ - return __ilog2(x); -} +#include <asm-generic/bitops/builtin-__fls.h> -/* - * 64-bit can do this using one cntlzd (count leading zeroes doubleword) - * instruction; for 32-bit we use the generic version, which does two - * 32-bit fls calls. - */ -#ifdef __powerpc64__ static __inline__ int fls64(__u64 x) { - int lz; - - asm ("cntlzd %0,%1" : "=r" (lz) : "r" (x)); - return 64 - lz; + return 64 - __builtin_clzll(x); } -#else -#include <asm-generic/bitops/fls64.h> -#endif /* __powerpc64__ */ #ifdef CONFIG_PPC64 unsigned int __arch_hweight8(unsigned int w); diff --git a/arch/powerpc/include/asm/book3s/32/pgalloc.h b/arch/powerpc/include/asm/book3s/32/pgalloc.h index d310546e5d9d..a120e7f8d535 100644 --- a/arch/powerpc/include/asm/book3s/32/pgalloc.h +++ b/arch/powerpc/include/asm/book3s/32/pgalloc.h @@ -31,7 +31,8 @@ extern struct kmem_cache *pgtable_cache[]; static inline pgd_t *pgd_alloc(struct mm_struct *mm) { - return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), GFP_KERNEL); + return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), + pgtable_gfp_flags(mm, GFP_KERNEL)); } static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index 26ed228d4dc6..7fb755880409 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -297,6 +297,8 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm, extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp); +int map_kernel_page(unsigned long va, phys_addr_t pa, int flags); + /* Generic accessors to PTE bits */ static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_RW);} static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY); } diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h index 4e957b027fe0..0ce513f2926f 100644 --- a/arch/powerpc/include/asm/book3s/64/hash.h +++ b/arch/powerpc/include/asm/book3s/64/hash.h @@ -89,6 +89,9 @@ static inline int hash__pgd_bad(pgd_t pgd) { return (pgd_val(pgd) == 0); } +#ifdef CONFIG_STRICT_KERNEL_RWX +extern void hash__mark_rodata_ro(void); +#endif extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned long pte, int huge); diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h index cd5e7aa8cc34..20b1485ff1e8 100644 --- a/arch/powerpc/include/asm/book3s/64/pgalloc.h +++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h @@ -53,10 +53,11 @@ extern void __tlb_remove_table(void *_table); static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm) { #ifdef CONFIG_PPC_64K_PAGES - return (pgd_t *)__get_free_page(PGALLOC_GFP); + return (pgd_t *)__get_free_page(pgtable_gfp_flags(mm, PGALLOC_GFP)); #else struct page *page; - page = alloc_pages(PGALLOC_GFP | __GFP_REPEAT, 4); + page = alloc_pages(pgtable_gfp_flags(mm, PGALLOC_GFP | __GFP_REPEAT), + 4); if (!page) return NULL; return (pgd_t *) page_address(page); @@ -76,7 +77,8 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) { if (radix_enabled()) return radix__pgd_alloc(mm); - return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), GFP_KERNEL); + return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), + pgtable_gfp_flags(mm, GFP_KERNEL)); } static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) @@ -93,7 +95,8 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) { - return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), GFP_KERNEL); + return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), + pgtable_gfp_flags(mm, GFP_KERNEL)); } static inline void pud_free(struct mm_struct *mm, pud_t *pud) @@ -119,7 +122,8 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) { - return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), GFP_KERNEL); + return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), + pgtable_gfp_flags(mm, GFP_KERNEL)); } static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) @@ -168,7 +172,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm, struct page *page; pte_t *pte; - pte = pte_alloc_one_kernel(mm, address); + pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT); if (!pte) return NULL; page = virt_to_page(pte); diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 85bc9875c3be..c0737c86a362 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -5,6 +5,7 @@ #ifndef __ASSEMBLY__ #include <linux/mmdebug.h> +#include <linux/bug.h> #endif /* @@ -79,6 +80,9 @@ #define _PAGE_SOFT_DIRTY _RPAGE_SW3 /* software: software dirty tracking */ #define _PAGE_SPECIAL _RPAGE_SW2 /* software: special page */ +#define _PAGE_DEVMAP _RPAGE_SW1 /* software: ZONE_DEVICE page */ +#define __HAVE_ARCH_PTE_DEVMAP + /* * Drivers request for cache inhibited pte mapping using _PAGE_NO_CACHE * Instead of fixing all of them, add an alternate define which @@ -599,6 +603,16 @@ static inline pte_t pte_mkhuge(pte_t pte) return pte; } +static inline pte_t pte_mkdevmap(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_SPECIAL|_PAGE_DEVMAP); +} + +static inline int pte_devmap(pte_t pte) +{ + return !!(pte_raw(pte) & cpu_to_be64(_PAGE_DEVMAP)); +} + static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { /* FIXME!! check whether this need to be a conditional */ @@ -1146,6 +1160,37 @@ static inline bool arch_needs_pgtable_deposit(void) return true; } + +static inline pmd_t pmd_mkdevmap(pmd_t pmd) +{ + return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_DEVMAP)); +} + +static inline int pmd_devmap(pmd_t pmd) +{ + return pte_devmap(pmd_pte(pmd)); +} + +static inline int pud_devmap(pud_t pud) +{ + return 0; +} + +static inline int pgd_devmap(pgd_t pgd) +{ + return 0; +} #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +static inline const int pud_pfn(pud_t pud) +{ + /* + * Currently all calls to pud_pfn() are gated around a pud_devmap() + * check so this should never be used. If it grows another user we + * want to know about it. + */ + BUILD_BUG(); + return 0; +} #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */ diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h index ac16d1943022..487709ff6875 100644 --- a/arch/powerpc/include/asm/book3s/64/radix.h +++ b/arch/powerpc/include/asm/book3s/64/radix.h @@ -116,6 +116,10 @@ #define RADIX_PUD_TABLE_SIZE (sizeof(pud_t) << RADIX_PUD_INDEX_SIZE) #define RADIX_PGD_TABLE_SIZE (sizeof(pgd_t) << RADIX_PGD_INDEX_SIZE) +#ifdef CONFIG_STRICT_KERNEL_RWX +extern void radix__mark_rodata_ro(void); +#endif + static inline unsigned long __radix_pte_update(pte_t *ptep, unsigned long clr, unsigned long set) { @@ -252,7 +256,7 @@ static inline int radix__pgd_bad(pgd_t pgd) static inline int radix__pmd_trans_huge(pmd_t pmd) { - return !!(pmd_val(pmd) & _PAGE_PTE); + return (pmd_val(pmd) & (_PAGE_PTE | _PAGE_DEVMAP)) == _PAGE_PTE; } static inline pmd_t radix__pmd_mkhuge(pmd_t pmd) diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h index abef812de7f8..5482928eea1b 100644 --- a/arch/powerpc/include/asm/code-patching.h +++ b/arch/powerpc/include/asm/code-patching.h @@ -83,8 +83,16 @@ static inline unsigned long ppc_function_entry(void *func) * On PPC64 ABIv1 the function pointer actually points to the * function's descriptor. The first entry in the descriptor is the * address of the function text. + * + * However, we may also receive pointer to an assembly symbol. To + * detect that, we first check if the function pointer we receive + * already points to kernel/module text and we only dereference it + * if it doesn't. */ - return ((func_descr_t *)func)->entry; + if (kernel_text_address((unsigned long)func)) + return (unsigned long)func; + else + return ((func_descr_t *)func)->entry; #else return (unsigned long)func; #endif diff --git a/arch/powerpc/include/asm/dbell.h b/arch/powerpc/include/asm/dbell.h index f70cbfe0ec04..9f2ae0d25e15 100644 --- a/arch/powerpc/include/asm/dbell.h +++ b/arch/powerpc/include/asm/dbell.h @@ -56,6 +56,19 @@ static inline void ppc_msgsync(void) : : "i" (CPU_FTR_HVMODE|CPU_FTR_ARCH_300)); } +static inline void _ppc_msgclr(u32 msg) +{ + __asm__ __volatile__ (ASM_FTR_IFSET(PPC_MSGCLR(%1), PPC_MSGCLRP(%1), %0) + : : "i" (CPU_FTR_HVMODE), "r" (msg)); +} + +static inline void ppc_msgclr(enum ppc_dbell type) +{ + u32 msg = PPC_DBELL_TYPE(type); + + _ppc_msgclr(msg); +} + #else /* CONFIG_PPC_BOOK3S */ #define PPC_DBELL_MSGTYPE PPC_DBELL diff --git a/arch/powerpc/include/asm/delay.h b/arch/powerpc/include/asm/delay.h index 52e4d54da2a9..3df4417dd9c8 100644 --- a/arch/powerpc/include/asm/delay.h +++ b/arch/powerpc/include/asm/delay.h @@ -2,6 +2,7 @@ #define _ASM_POWERPC_DELAY_H #ifdef __KERNEL__ +#include <linux/processor.h> #include <asm/time.h> /* @@ -58,11 +59,18 @@ extern void udelay(unsigned long usecs); typeof(condition) __ret; \ unsigned long __loops = tb_ticks_per_usec * timeout; \ unsigned long __start = get_tbl(); \ - while (!(__ret = (condition)) && (tb_ticks_since(__start) <= __loops)) \ - if (delay) \ + \ + if (delay) { \ + while (!(__ret = (condition)) && \ + (tb_ticks_since(__start) <= __loops)) \ udelay(delay); \ - else \ - cpu_relax(); \ + } else { \ + spin_begin(); \ + while (!(__ret = (condition)) && \ + (tb_ticks_since(__start) <= __loops)) \ + spin_cpu_relax(); \ + spin_end(); \ + } \ if (!__ret) \ __ret = (condition); \ __ret; \ diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 183d73b6ed99..9a318973af05 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -36,20 +36,38 @@ */ #include <asm/head-64.h> +/* PACA save area offsets (exgen, exmc, etc) */ #define EX_R9 0 #define EX_R10 8 #define EX_R11 16 #define EX_R12 24 #define EX_R13 32 -#define EX_SRR0 40 -#define EX_DAR 48 -#define EX_DSISR 56 -#define EX_CCR 60 -#define EX_R3 64 -#define EX_LR 72 -#define EX_CFAR 80 -#define EX_PPR 88 /* SMT thread status register (priority) */ -#define EX_CTR 96 +#define EX_DAR 40 +#define EX_DSISR 48 +#define EX_CCR 52 +#define EX_CFAR 56 +#define EX_PPR 64 +#if defined(CONFIG_RELOCATABLE) +#define EX_CTR 72 +#define EX_SIZE 10 /* size in u64 units */ +#else +#define EX_SIZE 9 /* size in u64 units */ +#endif + +/* + * EX_LR is only used in EXSLB and where it does not overlap with EX_DAR + * EX_CCR similarly with DSISR, but being 4 byte registers there is a hole + * in the save area so it's not necessary to overlap them. Could be used + * for future savings though if another 4 byte register was to be saved. + */ +#define EX_LR EX_DAR + +/* + * EX_R3 is only used by the bad_stack handler. bad_stack reloads and + * saves DAR from SPRN_DAR, and EX_DAR is not used. So EX_R3 can overlap + * with EX_DAR. + */ +#define EX_R3 EX_DAR #ifdef CONFIG_RELOCATABLE #define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \ @@ -236,6 +254,19 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) #define kvmppc_interrupt kvmppc_interrupt_pr #endif +/* + * Branch to label using its 0xC000 address. This results in instruction + * address suitable for MSR[IR]=0 or 1, which allows relocation to be turned + * on using mtmsr rather than rfid. + * + * This could set the 0xc bits for !RELOCATABLE as an immediate, rather than + * load KBASE for a slight optimisation. + */ +#define BRANCH_TO_C000(reg, label) \ + __LOAD_HANDLER(reg, label); \ + mtctr reg; \ + bctr + #ifdef CONFIG_RELOCATABLE #define BRANCH_TO_COMMON(reg, label) \ __LOAD_HANDLER(reg, label); \ diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h index 60b91084f33c..ce88bbe1d809 100644 --- a/arch/powerpc/include/asm/fadump.h +++ b/arch/powerpc/include/asm/fadump.h @@ -43,6 +43,9 @@ #define MIN_BOOT_MEM (((RMA_END < (0x1UL << 28)) ? (0x1UL << 28) : RMA_END) \ + (0x1UL << 26)) +/* The upper limit percentage for user specified boot memory size (25%) */ +#define MAX_BOOT_MEM_RATIO 4 + #define memblock_num_regions(memblock_type) (memblock.memblock_type.cnt) /* Firmware provided dump sections */ @@ -200,6 +203,7 @@ struct fad_crash_memory_ranges { unsigned long long size; }; +extern int is_fadump_boot_memory_area(u64 addr, ulong size); extern int early_init_dt_scan_fw_dump(unsigned long node, const char *uname, int depth, void *data); extern int fadump_reserve_mem(void); diff --git a/arch/powerpc/include/asm/head-64.h b/arch/powerpc/include/asm/head-64.h index 86eb87382031..d81eac5b509f 100644 --- a/arch/powerpc/include/asm/head-64.h +++ b/arch/powerpc/include/asm/head-64.h @@ -3,6 +3,7 @@ #include <asm/cache.h> +#ifdef __ASSEMBLY__ /* * We can't do CPP stringification and concatination directly into the section * name for some reason, so these macros can do it for us. @@ -49,8 +50,8 @@ * CLOSE_FIXED_SECTION() or elsewhere, there may be something * unexpected being added there. Remove the '. = x_len' line, rebuild, and * check what is pushing the section down. - * - If the build dies in linking, check arch/powerpc/kernel/vmlinux.lds.S - * for instructions. + * - If the build dies in linking, check arch/powerpc/tools/head_check.sh + * comments. * - If the kernel crashes or hangs in very early boot, it could be linker * stubs at the start of the main text. */ @@ -63,11 +64,29 @@ . = 0x0; \ start_##sname: +/* + * .linker_stub_catch section is used to catch linker stubs from being + * inserted in our .text section, above the start_text label (which breaks + * the ABS_ADDR calculation). See kernel/vmlinux.lds.S and tools/head_check.sh + * for more details. We would prefer to just keep a cacheline (0x80), but + * 0x100 seems to be how the linker aligns branch stub groups. + */ +#ifdef CONFIG_LD_HEAD_STUB_CATCH +#define OPEN_TEXT_SECTION(start) \ + .section ".linker_stub_catch","ax",@progbits; \ +linker_stub_catch: \ + . = 0x4; \ + text_start = (start) + 0x100; \ + .section ".text","ax",@progbits; \ + .balign 0x100; \ +start_text: +#else #define OPEN_TEXT_SECTION(start) \ text_start = (start); \ .section ".text","ax",@progbits; \ . = 0x0; \ start_text: +#endif #define ZERO_FIXED_SECTION(sname, start, end) \ sname##_start = (start); \ @@ -397,4 +416,6 @@ name: EXC_COMMON_BEGIN(name); \ STD_EXCEPTION_COMMON(realvec + 0x2, name, hdlr); \ +#endif /* __ASSEMBLY__ */ + #endif /* _ASM_POWERPC_HEAD_64_H */ diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index eba60416536e..c1dd1929342d 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -129,6 +129,10 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs) } extern bool prep_irq_for_idle(void); +extern bool prep_irq_for_idle_irqsoff(void); +extern void irq_set_pending_from_srr1(unsigned long srr1); + +#define fini_irq_for_idle_irqsoff() trace_hardirqs_off(); extern void force_external_irq_replay(void); diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index f90b22c722e1..cd2fc1cc1cc7 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -226,6 +226,7 @@ struct machdep_calls { extern void e500_idle(void); extern void power4_idle(void); extern void power7_idle(void); +extern void power9_idle(void); extern void ppc6xx_idle(void); extern void book3e_idle(void); diff --git a/arch/powerpc/include/asm/mce.h b/arch/powerpc/include/asm/mce.h index 81eff8631434..190d69a7f701 100644 --- a/arch/powerpc/include/asm/mce.h +++ b/arch/powerpc/include/asm/mce.h @@ -90,13 +90,14 @@ enum MCE_UserErrorType { enum MCE_RaErrorType { MCE_RA_ERROR_INDETERMINATE = 0, MCE_RA_ERROR_IFETCH = 1, - MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH = 2, - MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN = 3, - MCE_RA_ERROR_LOAD = 4, - MCE_RA_ERROR_STORE = 5, - MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE = 6, - MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN = 7, - MCE_RA_ERROR_LOAD_STORE_FOREIGN = 8, + MCE_RA_ERROR_IFETCH_FOREIGN = 2, + MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH = 3, + MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN = 4, + MCE_RA_ERROR_LOAD = 5, + MCE_RA_ERROR_STORE = 6, + MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE = 7, + MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN = 8, + MCE_RA_ERROR_LOAD_STORE_FOREIGN = 9, }; enum MCE_LinkErrorType { diff --git a/arch/powerpc/include/asm/nohash/32/pgalloc.h b/arch/powerpc/include/asm/nohash/32/pgalloc.h index 633139291a48..cc369a70f2bb 100644 --- a/arch/powerpc/include/asm/nohash/32/pgalloc.h +++ b/arch/powerpc/include/asm/nohash/32/pgalloc.h @@ -31,7 +31,8 @@ extern struct kmem_cache *pgtable_cache[]; static inline pgd_t *pgd_alloc(struct mm_struct *mm) { - return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), GFP_KERNEL); + return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), + pgtable_gfp_flags(mm, GFP_KERNEL)); } static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index 5134ade2e850..91314268f04f 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -340,6 +340,8 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm, extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp); +int map_kernel_page(unsigned long va, phys_addr_t pa, int flags); + #endif /* !__ASSEMBLY__ */ #endif /* __ASM_POWERPC_NOHASH_32_PGTABLE_H */ diff --git a/arch/powerpc/include/asm/nohash/64/pgalloc.h b/arch/powerpc/include/asm/nohash/64/pgalloc.h index 897d2e1c8a9b..9721c7867b9c 100644 --- a/arch/powerpc/include/asm/nohash/64/pgalloc.h +++ b/arch/powerpc/include/asm/nohash/64/pgalloc.h @@ -43,7 +43,8 @@ extern struct kmem_cache *pgtable_cache[]; static inline pgd_t *pgd_alloc(struct mm_struct *mm) { - return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), GFP_KERNEL); + return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), + pgtable_gfp_flags(mm, GFP_KERNEL)); } static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) @@ -57,7 +58,8 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) { - return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), GFP_KERNEL); + return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), + pgtable_gfp_flags(mm, GFP_KERNEL)); } static inline void pud_free(struct mm_struct *mm, pud_t *pud) @@ -96,7 +98,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm, struct page *page; pte_t *pte; - pte = pte_alloc_one_kernel(mm, address); + pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT); if (!pte) return NULL; page = virt_to_page(pte); @@ -189,7 +191,8 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) { - return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), GFP_KERNEL); + return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), + pgtable_gfp_flags(mm, GFP_KERNEL)); } static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h index cb3e6242a78c..ef930ba500f9 100644 --- a/arch/powerpc/include/asm/opal-api.h +++ b/arch/powerpc/include/asm/opal-api.h @@ -667,12 +667,14 @@ enum { enum { OPAL_PHB_ERROR_DATA_TYPE_P7IOC = 1, - OPAL_PHB_ERROR_DATA_TYPE_PHB3 = 2 + OPAL_PHB_ERROR_DATA_TYPE_PHB3 = 2, + OPAL_PHB_ERROR_DATA_TYPE_PHB4 = 3 }; enum { OPAL_P7IOC_NUM_PEST_REGS = 128, - OPAL_PHB3_NUM_PEST_REGS = 256 + OPAL_PHB3_NUM_PEST_REGS = 256, + OPAL_PHB4_NUM_PEST_REGS = 512 }; struct OpalIoPhbErrorCommon { @@ -802,6 +804,75 @@ struct OpalIoPhb3ErrorData { __be64 pestB[OPAL_PHB3_NUM_PEST_REGS]; }; +struct OpalIoPhb4ErrorData { + struct OpalIoPhbErrorCommon common; + + __be32 brdgCtl; + + /* PHB4 cfg regs */ + __be32 deviceStatus; + __be32 slotStatus; + __be32 linkStatus; + __be32 devCmdStatus; + __be32 devSecStatus; + + /* cfg AER regs */ + __be32 rootErrorStatus; + __be32 uncorrErrorStatus; + __be32 corrErrorStatus; + __be32 tlpHdr1; + __be32 tlpHdr2; + __be32 tlpHdr3; + __be32 tlpHdr4; + __be32 sourceId; + + /* PHB4 ETU Error Regs */ + __be64 nFir; /* 000 */ + __be64 nFirMask; /* 003 */ + __be64 nFirWOF; /* 008 */ + __be64 phbPlssr; /* 120 */ + __be64 phbCsr; /* 110 */ + __be64 lemFir; /* C00 */ + __be64 lemErrorMask; /* C18 */ + __be64 lemWOF; /* C40 */ + __be64 phbErrorStatus; /* C80 */ + __be64 phbFirstErrorStatus; /* C88 */ + __be64 phbErrorLog0; /* CC0 */ + __be64 phbErrorLog1; /* CC8 */ + __be64 phbTxeErrorStatus; /* D00 */ + __be64 phbTxeFirstErrorStatus; /* D08 */ + __be64 phbTxeErrorLog0; /* D40 */ + __be64 phbTxeErrorLog1; /* D48 */ + __be64 phbRxeArbErrorStatus; /* D80 */ + __be64 phbRxeArbFirstErrorStatus; /* D88 */ + __be64 phbRxeArbErrorLog0; /* DC0 */ + __be64 phbRxeArbErrorLog1; /* DC8 */ + __be64 phbRxeMrgErrorStatus; /* E00 */ + __be64 phbRxeMrgFirstErrorStatus; /* E08 */ + __be64 phbRxeMrgErrorLog0; /* E40 */ + __be64 phbRxeMrgErrorLog1; /* E48 */ + __be64 phbRxeTceErrorStatus; /* E80 */ + __be64 phbRxeTceFirstErrorStatus; /* E88 */ + __be64 phbRxeTceErrorLog0; /* EC0 */ + __be64 phbRxeTceErrorLog1; /* EC8 */ + + /* PHB4 REGB Error Regs */ + __be64 phbPblErrorStatus; /* 1900 */ + __be64 phbPblFirstErrorStatus; /* 1908 */ + __be64 phbPblErrorLog0; /* 1940 */ + __be64 phbPblErrorLog1; /* 1948 */ + __be64 phbPcieDlpErrorLog1; /* 1AA0 */ + __be64 phbPcieDlpErrorLog2; /* 1AA8 */ + __be64 phbPcieDlpErrorStatus; /* 1AB0 */ + __be64 phbRegbErrorStatus; /* 1C00 */ + __be64 phbRegbFirstErrorStatus; /* 1C08 */ + __be64 phbRegbErrorLog0; /* 1C40 */ + __be64 phbRegbErrorLog1; /* 1C48 */ + + __be64 pestA[OPAL_PHB4_NUM_PEST_REGS]; + __be64 pestB[OPAL_PHB4_NUM_PEST_REGS]; +}; + enum { OPAL_REINIT_CPUS_HILE_BE = (1 << 0), OPAL_REINIT_CPUS_HILE_LE = (1 << 1), @@ -877,6 +948,7 @@ enum { OPAL_PHB_CAPI_MODE_SNOOP_OFF = 2, OPAL_PHB_CAPI_MODE_SNOOP_ON = 3, OPAL_PHB_CAPI_MODE_DMA = 4, + OPAL_PHB_CAPI_MODE_DMA_TVT1 = 5, }; /* OPAL I2C request */ diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 1c09f8fe2ee8..dc88a31cc79a 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -21,7 +21,11 @@ #include <asm/lppaca.h> #include <asm/mmu.h> #include <asm/page.h> +#ifdef CONFIG_PPC_BOOK3E #include <asm/exception-64e.h> +#else +#include <asm/exception-64s.h> +#endif #ifdef CONFIG_KVM_BOOK3S_64_HANDLER #include <asm/kvm_book3s_asm.h> #endif @@ -98,8 +102,8 @@ struct paca_struct { * Now, starting in cacheline 2, the exception save areas */ /* used for most interrupts/exceptions */ - u64 exgen[13] __attribute__((aligned(0x80))); - u64 exslb[13]; /* used for SLB/segment table misses + u64 exgen[EX_SIZE] __attribute__((aligned(0x80))); + u64 exslb[EX_SIZE]; /* used for SLB/segment table misses * on the linear mapping */ /* SLB related definitions */ u16 vmalloc_sllp; @@ -177,12 +181,14 @@ struct paca_struct { * to the sibling threads' paca. */ struct paca_struct **thread_sibling_pacas; + /* The PSSCR value that the kernel requested before going to stop */ + u64 requested_psscr; #endif #ifdef CONFIG_PPC_STD_MMU_64 /* Non-maskable exceptions that are not performance critical */ - u64 exnmi[13]; /* used for system reset (nmi) */ - u64 exmc[13]; /* used for machine checks */ + u64 exnmi[EX_SIZE]; /* used for system reset (nmi) */ + u64 exmc[EX_SIZE]; /* used for machine checks */ #endif #ifdef CONFIG_PPC_BOOK3S_64 /* Exclusive stacks for system reset and machine check exception. */ diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h index 0413457ba11d..d795c5d5789c 100644 --- a/arch/powerpc/include/asm/pgalloc.h +++ b/arch/powerpc/include/asm/pgalloc.h @@ -3,6 +3,20 @@ #include <linux/mm.h> +#ifndef MODULE +static inline gfp_t pgtable_gfp_flags(struct mm_struct *mm, gfp_t gfp) +{ + if (unlikely(mm == &init_mm)) + return gfp; + return gfp | __GFP_ACCOUNT; +} +#else /* !MODULE */ +static inline gfp_t pgtable_gfp_flags(struct mm_struct *mm, gfp_t gfp) +{ + return gfp | __GFP_ACCOUNT; +} +#endif /* MODULE */ + #ifdef CONFIG_PPC_BOOK3S #include <asm/book3s/pgalloc.h> #else diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 1a9b45198c06..fa9ebaead91e 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -191,8 +191,7 @@ /* sorted alphabetically */ #define PPC_INST_BHRBE 0x7c00025c #define PPC_INST_CLRBHRB 0x7c00035c -#define PPC_INST_COPY 0x7c00060c -#define PPC_INST_COPY_FIRST 0x7c20060c +#define PPC_INST_COPY 0x7c20060c #define PPC_INST_CP_ABORT 0x7c00068c #define PPC_INST_DCBA 0x7c0005ec #define PPC_INST_DCBA_MASK 0xfc0007fe @@ -223,10 +222,10 @@ #define PPC_INST_MSGCLR 0x7c0001dc #define PPC_INST_MSGSYNC 0x7c0006ec #define PPC_INST_MSGSNDP 0x7c00011c +#define PPC_INST_MSGCLRP 0x7c00015c #define PPC_INST_MTTMR 0x7c0003dc #define PPC_INST_NOP 0x60000000 -#define PPC_INST_PASTE 0x7c00070c -#define PPC_INST_PASTE_LAST 0x7c20070d +#define PPC_INST_PASTE 0x7c20070d #define PPC_INST_POPCNTB 0x7c0000f4 #define PPC_INST_POPCNTB_MASK 0xfc0007fe #define PPC_INST_POPCNTD 0x7c0003f4 @@ -394,6 +393,8 @@ /* Deal with instructions that older assemblers aren't aware of */ #define PPC_CP_ABORT stringify_in_c(.long PPC_INST_CP_ABORT) +#define PPC_COPY(a, b) stringify_in_c(.long PPC_INST_COPY | \ + ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_DCBAL(a, b) stringify_in_c(.long PPC_INST_DCBAL | \ __PPC_RA(a) | __PPC_RB(b)) #define PPC_DCBZL(a, b) stringify_in_c(.long PPC_INST_DCBZL | \ @@ -411,6 +412,8 @@ ___PPC_RB(b)) #define PPC_MSGSNDP(b) stringify_in_c(.long PPC_INST_MSGSNDP | \ ___PPC_RB(b)) +#define PPC_MSGCLRP(b) stringify_in_c(.long PPC_INST_MSGCLRP | \ + ___PPC_RB(b)) #define PPC_POPCNTB(a, s) stringify_in_c(.long PPC_INST_POPCNTB | \ __PPC_RA(a) | __PPC_RS(s)) #define PPC_POPCNTD(a, s) stringify_in_c(.long PPC_INST_POPCNTD | \ diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 359c44341761..6baeeb9acd0d 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -770,15 +770,18 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601) #else #define FIXUP_ENDIAN \ tdi 0,0,0x48; /* Reverse endian of b . + 8 */ \ - b $+36; /* Skip trampoline if endian is good */ \ - .long 0x05009f42; /* bcl 20,31,$+4 */ \ - .long 0xa602487d; /* mflr r10 */ \ - .long 0x1c004a39; /* addi r10,r10,28 */ \ + b $+44; /* Skip trampoline if endian is good */ \ .long 0xa600607d; /* mfmsr r11 */ \ .long 0x01006b69; /* xori r11,r11,1 */ \ + .long 0x00004039; /* li r10,0 */ \ + .long 0x6401417d; /* mtmsrd r10,1 */ \ + .long 0x05009f42; /* bcl 20,31,$+4 */ \ + .long 0xa602487d; /* mflr r10 */ \ + .long 0x14004a39; /* addi r10,r10,20 */ \ .long 0xa6035a7d; /* mtsrr0 r10 */ \ .long 0xa6037b7d; /* mtsrr1 r11 */ \ .long 0x2400004c /* rfid */ + #endif /* !CONFIG_PPC_BOOK3E */ #endif /* __ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 1189d04f3bd1..fab7ff877304 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -421,6 +421,26 @@ static inline unsigned long __pack_fe01(unsigned int fpmode) #ifdef CONFIG_PPC64 #define cpu_relax() do { HMT_low(); HMT_medium(); barrier(); } while (0) + +#define spin_begin() HMT_low() + +#define spin_cpu_relax() barrier() + +#define spin_cpu_yield() spin_cpu_relax() + +#define spin_end() HMT_medium() + +#define spin_until_cond(cond) \ +do { \ + if (unlikely(!(cond))) { \ + spin_begin(); \ + do { \ + spin_cpu_relax(); \ + } while (!(cond)); \ + spin_end(); \ + } \ +} while (0) + #else #define cpu_relax() barrier() #endif @@ -474,11 +494,11 @@ extern unsigned long cpuidle_disable; enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF}; extern int powersave_nap; /* set if nap mode can be used in idle loop */ -extern unsigned long power7_nap(int check_irq); -extern unsigned long power7_sleep(void); -extern unsigned long power7_winkle(void); -extern unsigned long power9_idle_stop(unsigned long stop_psscr_val, - unsigned long stop_psscr_mask); +extern unsigned long power7_idle_insn(unsigned long type); /* PNV_THREAD_NAP/etc*/ +extern void power7_idle_type(unsigned long type); +extern unsigned long power9_idle_stop(unsigned long psscr_val); +extern void power9_idle_type(unsigned long stop_psscr_val, + unsigned long stop_psscr_mask); extern void flush_instruction_cache(void); extern void hard_reset_now(void); diff --git a/arch/powerpc/include/asm/trace.h b/arch/powerpc/include/asm/trace.h index c05cef6ee06c..18f168aebae3 100644 --- a/arch/powerpc/include/asm/trace.h +++ b/arch/powerpc/include/asm/trace.h @@ -168,6 +168,39 @@ TRACE_EVENT(hash_fault, __entry->addr, __entry->access, __entry->trap) ); + +TRACE_EVENT(tlbie, + + TP_PROTO(unsigned long lpid, unsigned long local, unsigned long rb, + unsigned long rs, unsigned long ric, unsigned long prs, + unsigned long r), + TP_ARGS(lpid, local, rb, rs, ric, prs, r), + TP_STRUCT__entry( + __field(unsigned long, lpid) + __field(unsigned long, local) + __field(unsigned long, rb) + __field(unsigned long, rs) + __field(unsigned long, ric) + __field(unsigned long, prs) + __field(unsigned long, r) + ), + + TP_fast_assign( + __entry->lpid = lpid; + __entry->local = local; + __entry->rb = rb; + __entry->rs = rs; + __entry->ric = ric; + __entry->prs = prs; + __entry->r = r; + ), + + TP_printk("lpid=%ld, local=%ld, rb=0x%lx, rs=0x%lx, ric=0x%lx, " + "prs=0x%lx, r=0x%lx", __entry->lpid, __entry->local, + __entry->rb, __entry->rs, __entry->ric, __entry->prs, + __entry->r) +); + #endif /* _TRACE_POWERPC_H */ #undef TRACE_INCLUDE_PATH diff --git a/arch/powerpc/include/uapi/asm/Kbuild b/arch/powerpc/include/uapi/asm/Kbuild index b15bf6bc0e94..0d960ef78a9a 100644 --- a/arch/powerpc/include/uapi/asm/Kbuild +++ b/arch/powerpc/include/uapi/asm/Kbuild @@ -1,2 +1,8 @@ # UAPI Header export list include include/uapi/asm-generic/Kbuild.asm + +generic-y += param.h +generic-y += poll.h +generic-y += resource.h +generic-y += sockios.h +generic-y += statfs.h diff --git a/arch/powerpc/include/uapi/asm/param.h b/arch/powerpc/include/uapi/asm/param.h deleted file mode 100644 index 965d45427975..000000000000 --- a/arch/powerpc/include/uapi/asm/param.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/param.h> diff --git a/arch/powerpc/include/uapi/asm/poll.h b/arch/powerpc/include/uapi/asm/poll.h deleted file mode 100644 index c98509d3149e..000000000000 --- a/arch/powerpc/include/uapi/asm/poll.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/poll.h> diff --git a/arch/powerpc/include/uapi/asm/resource.h b/arch/powerpc/include/uapi/asm/resource.h deleted file mode 100644 index 04bc4db8921b..000000000000 --- a/arch/powerpc/include/uapi/asm/resource.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/resource.h> diff --git a/arch/powerpc/include/uapi/asm/sockios.h b/arch/powerpc/include/uapi/asm/sockios.h deleted file mode 100644 index 55cef7675a31..000000000000 --- a/arch/powerpc/include/uapi/asm/sockios.h +++ /dev/null @@ -1,20 +0,0 @@ -#ifndef _ASM_POWERPC_SOCKIOS_H -#define _ASM_POWERPC_SOCKIOS_H - -/* - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -/* Socket-level I/O control calls. */ -#define FIOSETOWN 0x8901 -#define SIOCSPGRP 0x8902 -#define FIOGETOWN 0x8903 -#define SIOCGPGRP 0x8904 -#define SIOCATMARK 0x8905 -#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ -#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ - -#endif /* _ASM_POWERPC_SOCKIOS_H */ diff --git a/arch/powerpc/include/uapi/asm/statfs.h b/arch/powerpc/include/uapi/asm/statfs.h deleted file mode 100644 index 5244834583a4..000000000000 --- a/arch/powerpc/include/uapi/asm/statfs.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASM_POWERPC_STATFS_H -#define _ASM_POWERPC_STATFS_H - -#include <asm-generic/statfs.h> - -#endif |