diff options
Diffstat (limited to 'arch/powerpc/include/asm')
53 files changed, 698 insertions, 210 deletions
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild index 5c4fbc80dc6c..2542ea15d338 100644 --- a/arch/powerpc/include/asm/Kbuild +++ b/arch/powerpc/include/asm/Kbuild @@ -8,3 +8,4 @@ generic-y += mcs_spinlock.h generic-y += preempt.h generic-y += rwsem.h generic-y += vtime.h +generic-y += msi.h diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h index cee3aa087653..7f2a7702596c 100644 --- a/arch/powerpc/include/asm/asm-compat.h +++ b/arch/powerpc/include/asm/asm-compat.h @@ -25,7 +25,7 @@ #define PPC_LCMPI stringify_in_c(cmpdi) #define PPC_LCMPLI stringify_in_c(cmpldi) #define PPC_LCMP stringify_in_c(cmpd) -#define PPC_LONG stringify_in_c(.llong) +#define PPC_LONG stringify_in_c(.8byte) #define PPC_LONG_ALIGN stringify_in_c(.balign 8) #define PPC_TLNEI stringify_in_c(tdnei) #define PPC_LLARX(t, a, b, eh) PPC_LDARX(t, a, b, eh) diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index 7fb755880409..4d453f979553 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -294,13 +294,11 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm, #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) -extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, - pmd_t **pmdp); - int map_kernel_page(unsigned long va, phys_addr_t pa, int flags); /* Generic accessors to PTE bits */ static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_RW);} +static inline int pte_read(pte_t pte) { return 1; } static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY); } static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); } static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); } diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h index 36fc7bfe9e11..f88452019114 100644 --- a/arch/powerpc/include/asm/book3s/64/hash.h +++ b/arch/powerpc/include/asm/book3s/64/hash.h @@ -40,7 +40,7 @@ * Define the address range of the kernel non-linear virtual area */ #define H_KERN_VIRT_START ASM_CONST(0xD000000000000000) -#define H_KERN_VIRT_SIZE ASM_CONST(0x0000100000000000) +#define H_KERN_VIRT_SIZE ASM_CONST(0x0000400000000000) /* 64T */ /* * The vmalloc space starts at the beginning of that region, and @@ -48,9 +48,11 @@ * (we keep a quarter for the virtual memmap) */ #define H_VMALLOC_START H_KERN_VIRT_START -#define H_VMALLOC_SIZE (H_KERN_VIRT_SIZE >> 1) +#define H_VMALLOC_SIZE ASM_CONST(0x380000000000) /* 56T */ #define H_VMALLOC_END (H_VMALLOC_START + H_VMALLOC_SIZE) +#define H_KERN_IO_START H_VMALLOC_END + /* * Region IDs */ diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h index 5c28bd6f2ae1..2d1ca488ca44 100644 --- a/arch/powerpc/include/asm/book3s/64/hugetlb.h +++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h @@ -54,9 +54,7 @@ static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE static inline bool gigantic_page_supported(void) { - if (radix_enabled()) - return true; - return false; + return true; } #endif diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h index 6981a52b3887..f28d21c69f79 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h +++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h @@ -468,7 +468,7 @@ extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend, int psize, int ssize); int htab_remove_mapping(unsigned long vstart, unsigned long vend, int psize, int ssize); -extern void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages); +extern void pseries_add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages); extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr); #ifdef CONFIG_PPC_PSERIES diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h index 5b4023c616f7..c3b00e8ff791 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu.h +++ b/arch/powerpc/include/asm/book3s/64/mmu.h @@ -83,6 +83,9 @@ typedef struct { mm_context_id_t id; u16 user_psize; /* page size index */ + /* Number of bits in the mm_cpumask */ + atomic_t active_cpus; + /* NPU NMMU context */ struct npu_context *npu_context; @@ -97,11 +100,6 @@ typedef struct { #ifdef CONFIG_PPC_SUBPAGE_PROT struct subpage_prot_table spt; #endif /* CONFIG_PPC_SUBPAGE_PROT */ -#ifdef CONFIG_PPC_ICSWX - struct spinlock *cop_lockp; /* guard acop and cop_pid */ - unsigned long acop; /* mask of enabled coprocessor types */ - unsigned int cop_pid; /* pid value used with coprocessors */ -#endif /* CONFIG_PPC_ICSWX */ #ifdef CONFIG_PPC_64K_PAGES /* for 4K PTE fragment support */ void *pte_frag; diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h index e2329db9d6f4..1fcfa425cefa 100644 --- a/arch/powerpc/include/asm/book3s/64/pgalloc.h +++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h @@ -41,8 +41,6 @@ extern struct kmem_cache *pgtable_cache[]; pgtable_cache[(shift) - 1]; \ }) -#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO - extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int); extern void pte_fragment_free(unsigned long *, int); extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift); diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 818a58fc3f4f..b9aff515b4de 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -272,8 +272,10 @@ extern unsigned long __vmalloc_end; extern unsigned long __kernel_virt_start; extern unsigned long __kernel_virt_size; +extern unsigned long __kernel_io_start; #define KERN_VIRT_START __kernel_virt_start #define KERN_VIRT_SIZE __kernel_virt_size +#define KERN_IO_START __kernel_io_start extern struct page *vmemmap; extern unsigned long ioremap_bot; extern unsigned long pci_io_base; @@ -298,7 +300,6 @@ extern unsigned long pci_io_base; * PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE */ -#define KERN_IO_START (KERN_VIRT_START + (KERN_VIRT_SIZE >> 1)) #define FULL_IO_SIZE 0x80000000ul #define ISA_IO_BASE (KERN_IO_START) #define ISA_IO_END (KERN_IO_START + 0x10000ul) @@ -409,6 +410,11 @@ static inline int pte_write(pte_t pte) return __pte_write(pte) || pte_savedwrite(pte); } +static inline int pte_read(pte_t pte) +{ + return !!(pte_raw(pte) & cpu_to_be64(_PAGE_READ)); +} + #define __HAVE_ARCH_PTEP_SET_WRPROTECT static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) @@ -1167,6 +1173,7 @@ static inline bool arch_needs_pgtable_deposit(void) return false; return true; } +extern void serialize_against_pte_lookup(struct mm_struct *mm); static inline pmd_t pmd_mkdevmap(pmd_t pmd) diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h index 544440b5aff3..1e5ba94e62ef 100644 --- a/arch/powerpc/include/asm/book3s/64/radix.h +++ b/arch/powerpc/include/asm/book3s/64/radix.h @@ -110,6 +110,8 @@ */ #define RADIX_VMEMMAP_BASE (RADIX_VMALLOC_END) +#define RADIX_KERN_IO_START (RADIX_KERN_VIRT_START + (RADIX_KERN_VIRT_SIZE >> 1)) + #ifndef __ASSEMBLY__ #define RADIX_PTE_TABLE_SIZE (sizeof(pte_t) << RADIX_PTE_INDEX_SIZE) #define RADIX_PMD_TABLE_SIZE (sizeof(pmd_t) << RADIX_PMD_INDEX_SIZE) diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h index cc7fbde4f53c..9b433a624bf3 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h @@ -22,22 +22,21 @@ extern void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end extern void radix__local_flush_tlb_mm(struct mm_struct *mm); extern void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); -extern void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr); extern void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr, int psize); extern void radix__tlb_flush(struct mmu_gather *tlb); #ifdef CONFIG_SMP extern void radix__flush_tlb_mm(struct mm_struct *mm); extern void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); -extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr); extern void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr, int psize); #else #define radix__flush_tlb_mm(mm) radix__local_flush_tlb_mm(mm) #define radix__flush_tlb_page(vma,addr) radix__local_flush_tlb_page(vma,addr) #define radix__flush_tlb_page_psize(mm,addr,p) radix__local_flush_tlb_page_psize(mm,addr,p) -#define radix__flush_tlb_pwc(tlb, addr) radix__local_flush_tlb_pwc(tlb, addr) #endif +extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr); +extern void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr); extern void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa, unsigned long page_size); extern void radix__flush_tlb_lpid(unsigned long lpid); diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h index 87fcc1948817..7ee763d3bea9 100644 --- a/arch/powerpc/include/asm/bug.h +++ b/arch/powerpc/include/asm/bug.h @@ -133,6 +133,7 @@ extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long); extern void bad_page_fault(struct pt_regs *, unsigned long, int); extern void _exception(int, struct pt_regs *, int, unsigned long); extern void die(const char *, struct pt_regs *, long); +extern bool die_will_crash(void); #endif /* !__ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h index 5a90292afbad..d122f7f957ce 100644 --- a/arch/powerpc/include/asm/cache.h +++ b/arch/powerpc/include/asm/cache.h @@ -5,7 +5,7 @@ /* bytes per L1 cache line */ -#if defined(CONFIG_8xx) || defined(CONFIG_403GCX) +#if defined(CONFIG_PPC_8xx) || defined(CONFIG_403GCX) #define L1_CACHE_SHIFT 4 #define MAX_COPY_PREFETCH 1 #elif defined(CONFIG_PPC_E500MC) diff --git a/arch/powerpc/include/asm/cpuidle.h b/arch/powerpc/include/asm/cpuidle.h index 52586f9956bb..eb43b5c3a7b5 100644 --- a/arch/powerpc/include/asm/cpuidle.h +++ b/arch/powerpc/include/asm/cpuidle.h @@ -67,6 +67,17 @@ #define ERR_DEEP_STATE_ESL_MISMATCH -2 #ifndef __ASSEMBLY__ +/* Additional SPRs that need to be saved/restored during stop */ +struct stop_sprs { + u64 pid; + u64 ldbar; + u64 fscr; + u64 hfscr; + u64 mmcr1; + u64 mmcr2; + u64 mmcra; +}; + extern u32 pnv_fastsleep_workaround_at_entry[]; extern u32 pnv_fastsleep_workaround_at_exit[]; @@ -90,20 +101,4 @@ static inline void report_invalid_psscr_val(u64 psscr_val, int err) #endif -/* Idle state entry routines */ -#ifdef CONFIG_PPC_P7_NAP -#define IDLE_STATE_ENTER_SEQ(IDLE_INST) \ - /* Magic NAP/SLEEP/WINKLE mode enter sequence */ \ - std r0,0(r1); \ - ptesync; \ - ld r0,0(r1); \ -236: cmpd cr0,r0,r0; \ - bne 236b; \ - IDLE_INST; \ - -#define IDLE_STATE_ENTER_SEQ_NORET(IDLE_INST) \ - IDLE_STATE_ENTER_SEQ(IDLE_INST) \ - b . -#endif /* CONFIG_PPC_P7_NAP */ - #endif diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index d02ad93bf708..a9bf921f4efc 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -513,7 +513,7 @@ enum { #else CPU_FTRS_GENERIC_32 | #endif -#ifdef CONFIG_8xx +#ifdef CONFIG_PPC_8xx CPU_FTRS_8XX | #endif #ifdef CONFIG_40x @@ -565,7 +565,7 @@ enum { #else CPU_FTRS_GENERIC_32 & #endif -#ifdef CONFIG_8xx +#ifdef CONFIG_PPC_8xx CPU_FTRS_8XX & #endif #ifdef CONFIG_40x diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h index 8e37b71674f4..9847ae3a12d1 100644 --- a/arch/powerpc/include/asm/eeh.h +++ b/arch/powerpc/include/asm/eeh.h @@ -131,7 +131,6 @@ static inline bool eeh_pe_passed(struct eeh_pe *pe) struct eeh_dev { int mode; /* EEH mode */ int class_code; /* Class code of the device */ - int config_addr; /* Config address */ int pe_config_addr; /* PE config address */ u32 config_space[16]; /* Saved PCI config space */ int pcix_cap; /* Saved PCIx capability */ @@ -141,7 +140,6 @@ struct eeh_dev { struct eeh_pe *pe; /* Associated PE */ struct list_head list; /* Form link list in the PE */ struct list_head rmv_list; /* Record the removed edevs */ - struct pci_controller *phb; /* Associated PHB */ struct pci_dn *pdn; /* Associated PCI device node */ struct pci_dev *pdev; /* Associated PCI device */ bool in_error; /* Error flag for edev */ @@ -262,7 +260,8 @@ typedef void *(*eeh_traverse_func)(void *data, void *flag); void eeh_set_pe_aux_size(int size); int eeh_phb_pe_create(struct pci_controller *phb); struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb); -struct eeh_pe *eeh_pe_get(struct eeh_dev *edev); +struct eeh_pe *eeh_pe_get(struct pci_controller *phb, + int pe_no, int config_addr); int eeh_add_to_parent_pe(struct eeh_dev *edev); int eeh_rmv_from_parent_pe(struct eeh_dev *edev); void eeh_pe_update_time_stamp(struct eeh_pe *pe); diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h index ce88bbe1d809..5a23010af600 100644 --- a/arch/powerpc/include/asm/fadump.h +++ b/arch/powerpc/include/asm/fadump.h @@ -209,11 +209,13 @@ extern int early_init_dt_scan_fw_dump(unsigned long node, extern int fadump_reserve_mem(void); extern int setup_fadump(void); extern int is_fadump_active(void); +extern int should_fadump_crash(void); extern void crash_fadump(struct pt_regs *, const char *); extern void fadump_cleanup(void); #else /* CONFIG_FA_DUMP */ static inline int is_fadump_active(void) { return 0; } +static inline int should_fadump_crash(void) { return 0; } static inline void crash_fadump(struct pt_regs *regs, const char *str) { } #endif #endif diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h index 2de2319b99e2..8f88f771cc55 100644 --- a/arch/powerpc/include/asm/feature-fixups.h +++ b/arch/powerpc/include/asm/feature-fixups.h @@ -19,11 +19,11 @@ */ #if defined(CONFIG_PPC64) && !defined(__powerpc64__) /* 64 bits kernel, 32 bits code (ie. vdso32) */ -#define FTR_ENTRY_LONG .llong +#define FTR_ENTRY_LONG .8byte #define FTR_ENTRY_OFFSET .long 0xffffffff; .long #elif defined(CONFIG_PPC64) -#define FTR_ENTRY_LONG .llong -#define FTR_ENTRY_OFFSET .llong +#define FTR_ENTRY_LONG .8byte +#define FTR_ENTRY_OFFSET .8byte #else #define FTR_ENTRY_LONG .long #define FTR_ENTRY_OFFSET .long diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h index 4508b322f2cd..6c40dfda5912 100644 --- a/arch/powerpc/include/asm/fixmap.h +++ b/arch/powerpc/include/asm/fixmap.h @@ -17,6 +17,7 @@ #ifndef __ASSEMBLY__ #include <linux/kernel.h> #include <asm/page.h> +#include <asm/pgtable.h> #ifdef CONFIG_HIGHMEM #include <linux/threads.h> #include <asm/kmap_types.h> @@ -62,9 +63,6 @@ enum fixed_addresses { __end_of_fixed_addresses }; -extern void __set_fixmap (enum fixed_addresses idx, - phys_addr_t phys, pgprot_t flags); - #define __FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) #define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE) @@ -72,5 +70,11 @@ extern void __set_fixmap (enum fixed_addresses idx, #include <asm-generic/fixmap.h> +static inline void __set_fixmap(enum fixed_addresses idx, + phys_addr_t phys, pgprot_t flags) +{ + map_kernel_page(fix_to_virt(idx), phys, pgprot_val(flags)); +} + #endif /* !__ASSEMBLY__ */ #endif diff --git a/arch/powerpc/include/asm/fs_pd.h b/arch/powerpc/include/asm/fs_pd.h index f79d6c74eb2a..8def56ec05c6 100644 --- a/arch/powerpc/include/asm/fs_pd.h +++ b/arch/powerpc/include/asm/fs_pd.h @@ -26,7 +26,7 @@ #define cpm2_unmap(addr) do {} while(0) #endif -#ifdef CONFIG_8xx +#ifdef CONFIG_PPC_8xx #include <asm/8xx_immap.h> extern immap_t __iomem *mpc8xx_immr; diff --git a/arch/powerpc/include/asm/hardirq.h b/arch/powerpc/include/asm/hardirq.h index 8add8b861e8d..c97603d617e3 100644 --- a/arch/powerpc/include/asm/hardirq.h +++ b/arch/powerpc/include/asm/hardirq.h @@ -12,6 +12,10 @@ typedef struct { unsigned int mce_exceptions; unsigned int spurious_irqs; unsigned int hmi_exceptions; + unsigned int sreset_irqs; +#ifdef CONFIG_PPC_WATCHDOG + unsigned int soft_nmi_irqs; +#endif #ifdef CONFIG_PPC_DOORBELL unsigned int doorbell_irqs; #endif diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index 7f4025a6c69e..b8a0fb442c64 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -218,18 +218,4 @@ static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr, } #endif /* CONFIG_HUGETLB_PAGE */ -/* - * FSL Book3E platforms require special gpage handling - the gpages - * are reserved early in the boot process by memblock instead of via - * the .dts as on IBM platforms. - */ -#if defined(CONFIG_HUGETLB_PAGE) && (defined(CONFIG_PPC_FSL_BOOK3E) || \ - defined(CONFIG_PPC_8xx)) -extern void __init reserve_hugetlb_gpages(void); -#else -static inline void reserve_hugetlb_gpages(void) -{ -} -#endif - #endif /* _ASM_POWERPC_HUGETLB_H */ diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index 57d38b504ff7..3d34dc0869f6 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -280,7 +280,18 @@ #define H_RESIZE_HPT_COMMIT 0x370 #define H_REGISTER_PROC_TBL 0x37C #define H_SIGNAL_SYS_RESET 0x380 -#define MAX_HCALL_OPCODE H_SIGNAL_SYS_RESET +#define H_INT_GET_SOURCE_INFO 0x3A8 +#define H_INT_SET_SOURCE_CONFIG 0x3AC +#define H_INT_GET_SOURCE_CONFIG 0x3B0 +#define H_INT_GET_QUEUE_INFO 0x3B4 +#define H_INT_SET_QUEUE_CONFIG 0x3B8 +#define H_INT_GET_QUEUE_CONFIG 0x3BC +#define H_INT_SET_OS_REPORTING_LINE 0x3C0 +#define H_INT_GET_OS_REPORTING_LINE 0x3C4 +#define H_INT_ESB 0x3C8 +#define H_INT_SYNC 0x3CC +#define H_INT_RESET 0x3D0 +#define MAX_HCALL_OPCODE H_INT_RESET /* H_VIOCTL functions */ #define H_GET_VIOA_DUMP_SIZE 0x01 diff --git a/arch/powerpc/include/asm/icswx.h b/arch/powerpc/include/asm/icswx.h index 27e588f6c72e..6a2c87577541 100644 --- a/arch/powerpc/include/asm/icswx.h +++ b/arch/powerpc/include/asm/icswx.h @@ -69,7 +69,10 @@ struct coprocessor_completion_block { #define CSB_CC_WR_PROTECTION (16) #define CSB_CC_UNKNOWN_CODE (17) #define CSB_CC_ABORT (18) +#define CSB_CC_EXCEED_BYTE_COUNT (19) /* P9 or later */ #define CSB_CC_TRANSPORT (20) +#define CSB_CC_INVALID_CRB (21) /* P9 or later */ +#define CSB_CC_INVALID_DDE (30) /* P9 or later */ #define CSB_CC_SEGMENTED_DDL (31) #define CSB_CC_PROGRESS_POINT (32) #define CSB_CC_DDE_OVERFLOW (33) diff --git a/arch/powerpc/include/asm/imc-pmu.h b/arch/powerpc/include/asm/imc-pmu.h new file mode 100644 index 000000000000..7f74c282710f --- /dev/null +++ b/arch/powerpc/include/asm/imc-pmu.h @@ -0,0 +1,128 @@ +#ifndef __ASM_POWERPC_IMC_PMU_H +#define __ASM_POWERPC_IMC_PMU_H + +/* + * IMC Nest Performance Monitor counter support. + * + * Copyright (C) 2017 Madhavan Srinivasan, IBM Corporation. + * (C) 2017 Anju T Sudhakar, IBM Corporation. + * (C) 2017 Hemant K Shaw, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or later version. + */ + +#include <linux/perf_event.h> +#include <linux/slab.h> +#include <linux/of.h> +#include <linux/io.h> +#include <asm/opal.h> + +/* + * For static allocation of some of the structures. + */ +#define IMC_MAX_PMUS 32 + +/* + * Compatibility macros for IMC devices + */ +#define IMC_DTB_COMPAT "ibm,opal-in-memory-counters" +#define IMC_DTB_UNIT_COMPAT "ibm,imc-counters" + + +/* + * LDBAR: Counter address and Enable/Disable macro. + * perf/imc-pmu.c has the LDBAR layout information. + */ +#define THREAD_IMC_LDBAR_MASK 0x0003ffffffffe000ULL +#define THREAD_IMC_ENABLE 0x8000000000000000ULL + +/* + * Structure to hold memory address information for imc units. + */ +struct imc_mem_info { + u64 *vbase; + u32 id; +}; + +/* + * Place holder for nest pmu events and values. + */ +struct imc_events { + u32 value; + char *name; + char *unit; + char *scale; +}; + +/* Event attribute array index */ +#define IMC_FORMAT_ATTR 0 +#define IMC_EVENT_ATTR 1 +#define IMC_CPUMASK_ATTR 2 +#define IMC_NULL_ATTR 3 + +/* PMU Format attribute macros */ +#define IMC_EVENT_OFFSET_MASK 0xffffffffULL + +/* + * Device tree parser code detects IMC pmu support and + * registers new IMC pmus. This structure will hold the + * pmu functions, events, counter memory information + * and attrs for each imc pmu and will be referenced at + * the time of pmu registration. + */ +struct imc_pmu { + struct pmu pmu; + struct imc_mem_info *mem_info; + struct imc_events **events; + /* + * Attribute groups for the PMU. Slot 0 used for + * format attribute, slot 1 used for cpusmask attribute, + * slot 2 used for event attribute. Slot 3 keep as + * NULL. + */ + const struct attribute_group *attr_groups[4]; + u32 counter_mem_size; + int domain; + /* + * flag to notify whether the memory is mmaped + * or allocated by kernel. + */ + bool imc_counter_mmaped; +}; + +/* + * Structure to hold id, lock and reference count for the imc events which + * are inited. + */ +struct imc_pmu_ref { + struct mutex lock; + unsigned int id; + int refc; +}; + +/* + * In-Memory Collection Counters type. + * Data comes from Device tree. + * Three device type are supported. + */ + +enum { + IMC_TYPE_THREAD = 0x1, + IMC_TYPE_CORE = 0x4, + IMC_TYPE_CHIP = 0x10, +}; + +/* + * Domains for IMC PMUs + */ +#define IMC_DOMAIN_NEST 1 +#define IMC_DOMAIN_CORE 2 +#define IMC_DOMAIN_THREAD 3 + +extern int init_imc_pmu(struct device_node *parent, + struct imc_pmu *pmu_ptr, int pmu_id); +extern void thread_imc_disable(void); +#endif /* __ASM_POWERPC_IMC_PMU_H */ diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index 7cea76f11c26..83596f32f50b 100644 --- a/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h @@ -104,6 +104,10 @@ struct kvmppc_host_state { u8 napping; #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE + /* + * hwthread_req/hwthread_state pair is used to pull sibling threads + * out of guest on pre-ISAv3.0B CPUs where threads share MMU. + */ u8 hwthread_req; u8 hwthread_state; u8 host_ipi; diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index cd2fc1cc1cc7..73b92017b6d7 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -76,7 +76,6 @@ struct machdep_calls { void __noreturn (*restart)(char *cmd); void __noreturn (*halt)(void); - void (*panic)(char *str); void (*cpu_die)(void); long (*time_init)(void); /* Optional, may be NULL */ diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index 35bec1c5bd5a..309592589e30 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -77,76 +77,8 @@ extern void switch_cop(struct mm_struct *next); extern int use_cop(unsigned long acop, struct mm_struct *mm); extern void drop_cop(unsigned long acop, struct mm_struct *mm); -/* - * switch_mm is the entry point called from the architecture independent - * code in kernel/sched/core.c - */ -static inline void switch_mm_irqs_off(struct mm_struct *prev, - struct mm_struct *next, - struct task_struct *tsk) -{ - bool new_on_cpu = false; - - /* Mark this context has been used on the new CPU */ - if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) { - cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); - - /* - * This full barrier orders the store to the cpumask above vs - * a subsequent operation which allows this CPU to begin loading - * translations for next. - * - * When using the radix MMU that operation is the load of the - * MMU context id, which is then moved to SPRN_PID. - * - * For the hash MMU it is either the first load from slb_cache - * in switch_slb(), and/or the store of paca->mm_ctx_id in - * copy_mm_to_paca(). - * - * On the read side the barrier is in pte_xchg(), which orders - * the store to the PTE vs the load of mm_cpumask. - */ - smp_mb(); - - new_on_cpu = true; - } - - /* 32-bit keeps track of the current PGDIR in the thread struct */ -#ifdef CONFIG_PPC32 - tsk->thread.pgdir = next->pgd; -#endif /* CONFIG_PPC32 */ - - /* 64-bit Book3E keeps track of current PGD in the PACA */ -#ifdef CONFIG_PPC_BOOK3E_64 - get_paca()->pgd = next->pgd; -#endif - /* Nothing else to do if we aren't actually switching */ - if (prev == next) - return; - -#ifdef CONFIG_PPC_ICSWX - /* Switch coprocessor context only if prev or next uses a coprocessor */ - if (prev->context.acop || next->context.acop) - switch_cop(next); -#endif /* CONFIG_PPC_ICSWX */ - - /* We must stop all altivec streams before changing the HW - * context - */ -#ifdef CONFIG_ALTIVEC - if (cpu_has_feature(CPU_FTR_ALTIVEC)) - asm volatile ("dssall"); -#endif /* CONFIG_ALTIVEC */ - - if (new_on_cpu) - radix_kvm_prefetch_workaround(next); - - /* - * The actual HW switching method differs between the various - * sub architectures. Out of line for now - */ - switch_mmu_context(prev, next, tsk); -} +extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk); static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) @@ -168,11 +100,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, */ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) { - unsigned long flags; - - local_irq_save(flags); switch_mm(prev, next, current); - local_irq_restore(flags); } /* We don't currently use enter_lazy_tlb() for anything */ diff --git a/arch/powerpc/include/asm/nmi.h b/arch/powerpc/include/asm/nmi.h index 6f8e79cd35d8..3760150a0ff0 100644 --- a/arch/powerpc/include/asm/nmi.h +++ b/arch/powerpc/include/asm/nmi.h @@ -1,9 +1,8 @@ #ifndef _ASM_NMI_H #define _ASM_NMI_H -#ifdef CONFIG_HARDLOCKUP_DETECTOR +#ifdef CONFIG_PPC_WATCHDOG extern void arch_touch_nmi_watchdog(void); - extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self); #define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index 91314268f04f..185c6a47f9ba 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -121,7 +121,7 @@ extern int icache_44x_need_flush; #include <asm/nohash/pte-book3e.h> #elif defined(CONFIG_FSL_BOOKE) #include <asm/nohash/32/pte-fsl-booke.h> -#elif defined(CONFIG_8xx) +#elif defined(CONFIG_PPC_8xx) #include <asm/nohash/32/pte-8xx.h> #endif @@ -337,9 +337,6 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm, #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) -extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, - pmd_t **pmdp); - int map_kernel_page(unsigned long va, phys_addr_t pa, int flags); #endif /* !__ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h index e5805ad78e12..17989c3d9a24 100644 --- a/arch/powerpc/include/asm/nohash/pgtable.h +++ b/arch/powerpc/include/asm/nohash/pgtable.h @@ -14,6 +14,7 @@ static inline int pte_write(pte_t pte) { return (pte_val(pte) & (_PAGE_RW | _PAGE_RO)) != _PAGE_RO; } +static inline int pte_read(pte_t pte) { return 1; } static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h index 3130a73652c7..450a60b81d2a 100644 --- a/arch/powerpc/include/asm/opal-api.h +++ b/arch/powerpc/include/asm/opal-api.h @@ -42,6 +42,7 @@ #define OPAL_I2C_STOP_ERR -24 #define OPAL_XIVE_PROVISIONING -31 #define OPAL_XIVE_FREE_ACTIVE -32 +#define OPAL_TIMEOUT -33 /* API Tokens (in r0) */ #define OPAL_INVALID_CALL -1 @@ -190,7 +191,16 @@ #define OPAL_NPU_INIT_CONTEXT 146 #define OPAL_NPU_DESTROY_CONTEXT 147 #define OPAL_NPU_MAP_LPAR 148 -#define OPAL_LAST 148 +#define OPAL_IMC_COUNTERS_INIT 149 +#define OPAL_IMC_COUNTERS_START 150 +#define OPAL_IMC_COUNTERS_STOP 151 +#define OPAL_GET_POWERCAP 152 +#define OPAL_SET_POWERCAP 153 +#define OPAL_GET_POWER_SHIFT_RATIO 154 +#define OPAL_SET_POWER_SHIFT_RATIO 155 +#define OPAL_SENSOR_GROUP_CLEAR 156 +#define OPAL_PCI_SET_P2P 157 +#define OPAL_LAST 157 /* Device tree flags */ @@ -1084,6 +1094,18 @@ enum { XIVE_DUMP_EMU_STATE = 5, }; +/* "type" argument options for OPAL_IMC_COUNTERS_* calls */ +enum { + OPAL_IMC_COUNTERS_NEST = 1, + OPAL_IMC_COUNTERS_CORE = 2, +}; + + +/* PCI p2p descriptor */ +#define OPAL_PCI_P2P_ENABLE 0x1 +#define OPAL_PCI_P2P_LOAD 0x2 +#define OPAL_PCI_P2P_STORE 0x4 + #endif /* __ASSEMBLY__ */ #endif /* __OPAL_API_H */ diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index 588fb1c23af9..726c23304a57 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h @@ -50,7 +50,7 @@ int64_t opal_tpo_write(uint64_t token, uint32_t year_mon_day, uint32_t hour_min); int64_t opal_cec_power_down(uint64_t request); int64_t opal_cec_reboot(void); -int64_t opal_cec_reboot2(uint32_t reboot_type, char *diag); +int64_t opal_cec_reboot2(uint32_t reboot_type, const char *diag); int64_t opal_read_nvram(uint64_t buffer, uint64_t size, uint64_t offset); int64_t opal_write_nvram(uint64_t buffer, uint64_t size, uint64_t offset); int64_t opal_handle_interrupt(uint64_t isn, __be64 *outstanding_event_mask); @@ -267,6 +267,19 @@ int64_t opal_xive_allocate_irq(uint32_t chip_id); int64_t opal_xive_free_irq(uint32_t girq); int64_t opal_xive_sync(uint32_t type, uint32_t id); int64_t opal_xive_dump(uint32_t type, uint32_t id); +int64_t opal_pci_set_p2p(uint64_t phb_init, uint64_t phb_target, + uint64_t desc, uint16_t pe_number); + +int64_t opal_imc_counters_init(uint32_t type, uint64_t address, + uint64_t cpu_pir); +int64_t opal_imc_counters_start(uint32_t type, uint64_t cpu_pir); +int64_t opal_imc_counters_stop(uint32_t type, uint64_t cpu_pir); + +int opal_get_powercap(u32 handle, int token, u32 *pcap); +int opal_set_powercap(u32 handle, int token, u32 pcap); +int opal_get_power_shift_ratio(u32 handle, int token, u32 *psr); +int opal_set_power_shift_ratio(u32 handle, int token, u32 psr); +int opal_sensor_group_clear(u32 group_hndl, int token); /* Internal functions */ extern int early_init_dt_scan_opal(unsigned long node, const char *uname, @@ -345,6 +358,10 @@ static inline int opal_get_async_rc(struct opal_msg msg) void opal_wake_poller(void); +void opal_powercap_init(void); +void opal_psr_init(void); +void opal_sensor_groups_init(void); + #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_OPAL_H */ diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index dc88a31cc79a..04b60af027ae 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -31,6 +31,7 @@ #endif #include <asm/accounting.h> #include <asm/hmi.h> +#include <asm/cpuidle.h> register struct paca_struct *local_paca asm("r13"); @@ -183,6 +184,12 @@ struct paca_struct { struct paca_struct **thread_sibling_pacas; /* The PSSCR value that the kernel requested before going to stop */ u64 requested_psscr; + + /* + * Save area for additional SPRs that need to be + * saved/restored during cpuidle stop. + */ + struct stop_sprs stop_sprs; #endif #ifdef CONFIG_PPC_STD_MMU_64 diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h index 56c67d3f0108..0b8aa1fe2d5f 100644 --- a/arch/powerpc/include/asm/pci-bridge.h +++ b/arch/powerpc/include/asm/pci-bridge.h @@ -195,7 +195,6 @@ struct pci_dn { struct pci_dn *parent; struct pci_controller *phb; /* for pci devices */ struct iommu_table_group *table_group; /* for phb's or bridges */ - struct device_node *node; /* back-pointer to the device_node */ int pci_ext_config_space; /* for pci devices */ diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h index d795c5d5789c..45ae1212ab8a 100644 --- a/arch/powerpc/include/asm/pgalloc.h +++ b/arch/powerpc/include/asm/pgalloc.h @@ -17,6 +17,8 @@ static inline gfp_t pgtable_gfp_flags(struct mm_struct *mm, gfp_t gfp) } #endif /* MODULE */ +#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) + #ifdef CONFIG_PPC_BOOK3S #include <asm/book3s/pgalloc.h> #else diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index afae9a336136..7d0d38f58243 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -66,22 +66,14 @@ extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, #ifndef CONFIG_TRANSPARENT_HUGEPAGE #define pmd_large(pmd) 0 #endif -pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, - bool *is_thp, unsigned *shift); -static inline pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, - bool *is_thp, unsigned *shift) -{ - VM_WARN(!arch_irqs_disabled(), - "%s called with irq enabled\n", __func__); - return __find_linux_pte_or_hugepte(pgdir, ea, is_thp, shift); -} +/* can we use this in kvm */ unsigned long vmalloc_to_phys(void *vmalloc_addr); void pgtable_cache_add(unsigned shift, void (*ctor)(void *)); void pgtable_cache_init(void); -#ifdef CONFIG_STRICT_KERNEL_RWX +#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_PPC32) void mark_initmem_nx(void); #else static inline void mark_initmem_nx(void) { } diff --git a/arch/powerpc/include/asm/pnv-pci.h b/arch/powerpc/include/asm/pnv-pci.h index de9681034353..3e5cf251ad9a 100644 --- a/arch/powerpc/include/asm/pnv-pci.h +++ b/arch/powerpc/include/asm/pnv-pci.h @@ -26,6 +26,8 @@ extern int pnv_pci_get_presence_state(uint64_t id, uint8_t *state); extern int pnv_pci_get_power_state(uint64_t id, uint8_t *state); extern int pnv_pci_set_power_state(uint64_t id, uint8_t state, struct opal_msg *msg); +extern int pnv_pci_set_p2p(struct pci_dev *initiator, struct pci_dev *target, + u64 desc); int pnv_phb_to_cxl_mode(struct pci_dev *dev, uint64_t mode); int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq, diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index fa9ebaead91e..ce0930d68857 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -193,6 +193,7 @@ #define PPC_INST_CLRBHRB 0x7c00035c #define PPC_INST_COPY 0x7c20060c #define PPC_INST_CP_ABORT 0x7c00068c +#define PPC_INST_DARN 0x7c0005e6 #define PPC_INST_DCBA 0x7c0005ec #define PPC_INST_DCBA_MASK 0xfc0007fe #define PPC_INST_DCBAL 0x7c2005ec @@ -204,6 +205,8 @@ #define PPC_INST_ISEL_MASK 0xfc00003e #define PPC_INST_LDARX 0x7c0000a8 #define PPC_INST_STDCX 0x7c0001ad +#define PPC_INST_LQARX 0x7c000228 +#define PPC_INST_STQCX 0x7c00016d #define PPC_INST_LSWI 0x7c0004aa #define PPC_INST_LSWX 0x7c00042a #define PPC_INST_LWARX 0x7c000028 @@ -261,7 +264,7 @@ #define PPC_INST_TLBSRX_DOT 0x7c0006a5 #define PPC_INST_VPMSUMW 0x10000488 #define PPC_INST_VPMSUMD 0x100004c8 -#define PPC_INST_XXLOR 0xf0000510 +#define PPC_INST_XXLOR 0xf0000490 #define PPC_INST_XXSWAPD 0xf0000250 #define PPC_INST_XVCPSGNDP 0xf0000780 #define PPC_INST_TRECHKPT 0x7c0007dd @@ -395,16 +398,25 @@ #define PPC_CP_ABORT stringify_in_c(.long PPC_INST_CP_ABORT) #define PPC_COPY(a, b) stringify_in_c(.long PPC_INST_COPY | \ ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_DARN(t, l) stringify_in_c(.long PPC_INST_DARN | \ + ___PPC_RT(t) | \ + (((l) & 0x3) << 16)) #define PPC_DCBAL(a, b) stringify_in_c(.long PPC_INST_DCBAL | \ __PPC_RA(a) | __PPC_RB(b)) #define PPC_DCBZL(a, b) stringify_in_c(.long PPC_INST_DCBZL | \ __PPC_RA(a) | __PPC_RB(b)) +#define PPC_LQARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LQARX | \ + ___PPC_RT(t) | ___PPC_RA(a) | \ + ___PPC_RB(b) | __PPC_EH(eh)) #define PPC_LDARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LDARX | \ ___PPC_RT(t) | ___PPC_RA(a) | \ ___PPC_RB(b) | __PPC_EH(eh)) #define PPC_LWARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LWARX | \ ___PPC_RT(t) | ___PPC_RA(a) | \ ___PPC_RB(b) | __PPC_EH(eh)) +#define PPC_STQCX(t, a, b) stringify_in_c(.long PPC_INST_STQCX | \ + ___PPC_RT(t) | ___PPC_RA(a) | \ + ___PPC_RB(b)) #define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \ ___PPC_RB(b)) #define PPC_MSGSYNC stringify_in_c(.long PPC_INST_MSGSYNC) @@ -414,6 +426,8 @@ ___PPC_RB(b)) #define PPC_MSGCLRP(b) stringify_in_c(.long PPC_INST_MSGCLRP | \ ___PPC_RB(b)) +#define PPC_PASTE(a, b) stringify_in_c(.long PPC_INST_PASTE | \ + ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_POPCNTB(a, s) stringify_in_c(.long PPC_INST_POPCNTB | \ __PPC_RA(a) | __PPC_RS(s)) #define PPC_POPCNTD(a, s) stringify_in_c(.long PPC_INST_POPCNTD | \ diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 6baeeb9acd0d..36f3e41c9fbe 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -378,10 +378,16 @@ BEGIN_FTR_SECTION_NESTED(96); \ cmpwi dest,0; \ beq- 90b; \ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96) -#elif defined(CONFIG_8xx) -#define MFTB(dest) mftb dest #else -#define MFTB(dest) mfspr dest, SPRN_TBRL +#define MFTB(dest) MFTBL(dest) +#endif + +#ifdef CONFIG_PPC_8xx +#define MFTBL(dest) mftb dest +#define MFTBU(dest) mftbu dest +#else +#define MFTBL(dest) mfspr dest, SPRN_TBRL +#define MFTBU(dest) mfspr dest, SPRN_TBRU #endif #ifndef CONFIG_SMP @@ -411,7 +417,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601) * and they must be used. */ -#if !defined(CONFIG_4xx) && !defined(CONFIG_8xx) +#if !defined(CONFIG_4xx) && !defined(CONFIG_PPC_8xx) #define tlbia \ li r4,1024; \ mtctr r4; \ @@ -439,7 +445,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601) .machine push ; \ .machine "power4" ; \ lis scratch,0x60000000@h; \ - dcbt r0,scratch,0b01010; \ + dcbt 0,scratch,0b01010; \ .machine pop /* diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h index 35c00d7a0cf8..825bd5998701 100644 --- a/arch/powerpc/include/asm/prom.h +++ b/arch/powerpc/include/asm/prom.h @@ -159,7 +159,10 @@ struct of_drconf_cell { #define OV5_PFO_HW_842 0x1140 /* PFO Compression Accelerator */ #define OV5_PFO_HW_ENCR 0x1120 /* PFO Encryption Accelerator */ #define OV5_SUB_PROCESSORS 0x1501 /* 1,2,or 4 Sub-Processors supported */ -#define OV5_XIVE_EXPLOIT 0x1701 /* XIVE exploitation supported */ +#define OV5_XIVE_SUPPORT 0x17C0 /* XIVE Exploitation Support Mask */ +#define OV5_XIVE_LEGACY 0x1700 /* XIVE legacy mode Only */ +#define OV5_XIVE_EXPLOIT 0x1740 /* XIVE exploitation mode Only */ +#define OV5_XIVE_EITHER 0x1780 /* XIVE legacy or exploitation mode */ /* MMU Base Architecture */ #define OV5_MMU_SUPPORT 0x18C0 /* MMU Mode Support Mask */ #define OV5_MMU_HASH 0x1800 /* Hash MMU Only */ diff --git a/arch/powerpc/include/asm/pte-walk.h b/arch/powerpc/include/asm/pte-walk.h new file mode 100644 index 000000000000..2d633e9d686c --- /dev/null +++ b/arch/powerpc/include/asm/pte-walk.h @@ -0,0 +1,35 @@ +#ifndef _ASM_POWERPC_PTE_WALK_H +#define _ASM_POWERPC_PTE_WALK_H + +#include <linux/sched.h> + +/* Don't use this directly */ +extern pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea, + bool *is_thp, unsigned *hshift); + +static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea, + bool *is_thp, unsigned *hshift) +{ + VM_WARN(!arch_irqs_disabled(), "%s called with irq enabled\n", __func__); + return __find_linux_pte(pgdir, ea, is_thp, hshift); +} + +static inline pte_t *find_init_mm_pte(unsigned long ea, unsigned *hshift) +{ + pgd_t *pgdir = init_mm.pgd; + return __find_linux_pte(pgdir, ea, NULL, hshift); +} +/* + * This is what we should always use. Any other lockless page table lookup needs + * careful audit against THP split. + */ +static inline pte_t *find_current_mm_pte(pgd_t *pgdir, unsigned long ea, + bool *is_thp, unsigned *hshift) +{ + VM_WARN(!arch_irqs_disabled(), "%s called with irq enabled\n", __func__); + VM_WARN(pgdir != current->mm->pgd, + "%s lock less page table lookup called on wrong mm\n", __func__); + return __find_linux_pte(pgdir, ea, is_thp, hshift); +} + +#endif /* _ASM_POWERPC_PTE_WALK_H */ diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index a3b6575c7842..f92eaf7a4c0d 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -22,9 +22,9 @@ #include <asm/reg_fsl_emb.h> #endif -#ifdef CONFIG_8xx +#ifdef CONFIG_PPC_8xx #include <asm/reg_8xx.h> -#endif /* CONFIG_8xx */ +#endif /* CONFIG_PPC_8xx */ #define MSR_SF_LG 63 /* Enable 64 bit mode */ #define MSR_ISF_LG 61 /* Interrupt 64b mode valid on 630 */ @@ -135,7 +135,7 @@ #define MSR_KERNEL (MSR_ | MSR_64BIT) #define MSR_USER32 (MSR_ | MSR_PR | MSR_EE) #define MSR_USER64 (MSR_USER32 | MSR_64BIT) -#elif defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_8xx) +#elif defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_8xx) /* Default MSR for kernel mode. */ #define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR) #define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE) @@ -272,16 +272,65 @@ #define SPRN_DAR 0x013 /* Data Address Register */ #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */ #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */ -#define DSISR_NOHPTE 0x40000000 /* no translation found */ -#define DSISR_PROTFAULT 0x08000000 /* protection fault */ -#define DSISR_BADACCESS 0x04000000 /* bad access to CI or G */ -#define DSISR_ISSTORE 0x02000000 /* access was a store */ -#define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */ -#define DSISR_NOSEGMENT 0x00200000 /* SLB miss */ -#define DSISR_KEYFAULT 0x00200000 /* Key fault */ -#define DSISR_UNSUPP_MMU 0x00080000 /* Unsupported MMU config */ -#define DSISR_SET_RC 0x00040000 /* Failed setting of R/C bits */ -#define DSISR_PGDIRFAULT 0x00020000 /* Fault on page directory */ +#define DSISR_BAD_DIRECT_ST 0x80000000 /* Obsolete: Direct store error */ +#define DSISR_NOHPTE 0x40000000 /* no translation found */ +#define DSISR_ATTR_CONFLICT 0x20000000 /* P9: Process vs. Partition attr */ +#define DSISR_NOEXEC_OR_G 0x10000000 /* Alias of SRR1 bit, see below */ +#define DSISR_PROTFAULT 0x08000000 /* protection fault */ +#define DSISR_BADACCESS 0x04000000 /* bad access to CI or G */ +#define DSISR_ISSTORE 0x02000000 /* access was a store */ +#define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */ +#define DSISR_NOSEGMENT 0x00200000 /* STAB miss (unsupported) */ +#define DSISR_KEYFAULT 0x00200000 /* Storage Key fault */ +#define DSISR_BAD_EXT_CTRL 0x00100000 /* Obsolete: External ctrl error */ +#define DSISR_UNSUPP_MMU 0x00080000 /* P9: Unsupported MMU config */ +#define DSISR_SET_RC 0x00040000 /* P9: Failed setting of R/C bits */ +#define DSISR_PRTABLE_FAULT 0x00020000 /* P9: Fault on process table */ +#define DSISR_ICSWX_NO_CT 0x00004000 /* P7: icswx unavailable cp type */ +#define DSISR_BAD_COPYPASTE 0x00000008 /* P9: Copy/Paste on wrong memtype */ +#define DSISR_BAD_AMO 0x00000004 /* P9: Incorrect AMO opcode */ +#define DSISR_BAD_CI_LDST 0x00000002 /* P8: Bad HV CI load/store */ + +/* + * DSISR_NOEXEC_OR_G doesn't actually exist. This bit is always + * 0 on DSIs. However, on ISIs, the corresponding bit in SRR1 + * indicates an attempt at executing from a no-execute PTE + * or segment or from a guarded page. + * + * We add a definition here for completeness as we alias + * DSISR and SRR1 in do_page_fault. + */ + +/* + * DSISR bits that are treated as a fault. Any bit set + * here will skip hash_page, and cause do_page_fault to + * trigger a SIGBUS or SIGSEGV: + */ +#define DSISR_BAD_FAULT_32S (DSISR_BAD_DIRECT_ST | \ + DSISR_BADACCESS | \ + DSISR_BAD_EXT_CTRL) +#define DSISR_BAD_FAULT_64S (DSISR_BAD_FAULT_32S | \ + DSISR_ATTR_CONFLICT | \ + DSISR_KEYFAULT | \ + DSISR_UNSUPP_MMU | \ + DSISR_PRTABLE_FAULT | \ + DSISR_ICSWX_NO_CT | \ + DSISR_BAD_COPYPASTE | \ + DSISR_BAD_AMO | \ + DSISR_BAD_CI_LDST) +/* + * These bits are equivalent in SRR1 and DSISR for 0x400 + * instruction access interrupts on Book3S + */ +#define DSISR_SRR1_MATCH_32S (DSISR_NOHPTE | \ + DSISR_NOEXEC_OR_G | \ + DSISR_PROTFAULT) +#define DSISR_SRR1_MATCH_64S (DSISR_SRR1_MATCH_32S | \ + DSISR_KEYFAULT | \ + DSISR_UNSUPP_MMU | \ + DSISR_SET_RC | \ + DSISR_PRTABLE_FAULT) + #define SPRN_TBRL 0x10C /* Time Base Read Lower Register (user, R/O) */ #define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */ #define SPRN_CIR 0x11B /* Chip Information Register (hyper, R/0) */ @@ -307,6 +356,7 @@ #define SPRN_PMSR 0x355 /* Power Management Status Reg */ #define SPRN_PMMAR 0x356 /* Power Management Memory Activity Register */ #define SPRN_PSSCR 0x357 /* Processor Stop Status and Control Register (ISA 3.0) */ +#define SPRN_PSSCR_PR 0x337 /* PSSCR ISA 3.0, privileged mode access */ #define SPRN_PMCR 0x374 /* Power Management Control Register */ /* HFSCR and FSCR bit numbers are the same */ @@ -675,6 +725,7 @@ * may not be recoverable */ #define SRR1_WS_DEEPER 0x00020000 /* Some resources not maintained */ #define SRR1_WS_DEEP 0x00010000 /* All resources maintained */ +#define SRR1_PROGTM 0x00200000 /* TM Bad Thing */ #define SRR1_PROGFPE 0x00100000 /* Floating Point Enabled */ #define SRR1_PROGILL 0x00080000 /* Illegal instruction */ #define SRR1_PROGPRIV 0x00040000 /* Privileged instruction */ @@ -1114,7 +1165,7 @@ #endif #endif -#ifdef CONFIG_8xx +#ifdef CONFIG_PPC_8xx #define SPRN_SPRG_SCRATCH0 SPRN_SPRG0 #define SPRN_SPRG_SCRATCH1 SPRN_SPRG1 #define SPRN_SPRG_SCRATCH2 SPRN_SPRG2 @@ -1197,10 +1248,8 @@ * differentiated by the version number in the Communication Processor * Module (CPM). */ -#define PVR_821 0x00500000 -#define PVR_823 PVR_821 -#define PVR_850 PVR_821 -#define PVR_860 PVR_821 +#define PVR_8xx 0x00500000 + #define PVR_8240 0x00810100 #define PVR_8245 0x80811014 #define PVR_8260 PVR_8240 @@ -1295,12 +1344,12 @@ static inline void msr_check_and_clear(unsigned long bits) ".section __ftr_fixup,\"a\"\n" \ ".align 3\n" \ "98:\n" \ - " .llong %1\n" \ - " .llong %1\n" \ - " .llong 97b-98b\n" \ - " .llong 99b-98b\n" \ - " .llong 0\n" \ - " .llong 0\n" \ + " .8byte %1\n" \ + " .8byte %1\n" \ + " .8byte 97b-98b\n" \ + " .8byte 99b-98b\n" \ + " .8byte 0\n" \ + " .8byte 0\n" \ ".previous" \ : "=r" (rval) \ : "i" (CPU_FTR_CELL_TB_BUG), "i" (SPRN_TBRL) : "cr0"); \ @@ -1313,7 +1362,7 @@ static inline void msr_check_and_clear(unsigned long bits) #else /* __powerpc64__ */ -#if defined(CONFIG_8xx) +#if defined(CONFIG_PPC_8xx) #define mftbl() ({unsigned long rval; \ asm volatile("mftbl %0" : "=r" (rval)); rval;}) #define mftbu() ({unsigned long rval; \ diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index 737e012ef56e..eb2a33d5df26 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h @@ -221,10 +221,7 @@ #define SPRN_CSRR0 SPRN_SRR2 /* Critical Save and Restore Register 0 */ #define SPRN_CSRR1 SPRN_SRR3 /* Critical Save and Restore Register 1 */ #endif - -#ifdef CONFIG_PPC_ICSWX #define SPRN_HACOP 0x15F /* Hypervisor Available Coprocessor Register */ -#endif /* Bit definitions for CCR1. */ #define CCR1_DPC 0x00000100 /* Disable L1 I-Cache/D-Cache parity checking */ diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index 654d64c9f3ac..3a3fb0ca68f5 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -23,7 +23,6 @@ extern void reloc_got2(unsigned long); void check_for_initrd(void); void initmem_init(void); -void setup_panic(void); #define ARCH_PANIC_TIMEOUT 180 #ifdef CONFIG_PPC_PSERIES diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index 8ea98504f900..fac963e10d39 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -97,6 +97,7 @@ static inline void set_hard_smp_processor_id(int cpu, int phys) #endif DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map); +DECLARE_PER_CPU(cpumask_var_t, cpu_l2_cache_map); DECLARE_PER_CPU(cpumask_var_t, cpu_core_map); static inline struct cpumask *cpu_sibling_mask(int cpu) @@ -109,6 +110,11 @@ static inline struct cpumask *cpu_core_mask(int cpu) return per_cpu(cpu_core_map, cpu); } +static inline struct cpumask *cpu_l2_cache_mask(int cpu) +{ + return per_cpu(cpu_l2_cache_map, cpu); +} + extern int cpu_to_core_id(int cpu); /* Since OpenPIC has only 4 IPIs, we use slightly different message numbers. diff --git a/arch/powerpc/include/asm/sstep.h b/arch/powerpc/include/asm/sstep.h index d3a42cc45a82..ab9d849644d0 100644 --- a/arch/powerpc/include/asm/sstep.h +++ b/arch/powerpc/include/asm/sstep.h @@ -23,12 +23,9 @@ struct pt_regs; #define IS_RFID(instr) (((instr) & 0xfc0007fe) == 0x4c000024) #define IS_RFI(instr) (((instr) & 0xfc0007fe) == 0x4c000064) -/* Emulate instructions that cause a transfer of control. */ -extern int emulate_step(struct pt_regs *regs, unsigned int instr); - enum instruction_type { COMPUTE, /* arith/logical/CR op, etc. */ - LOAD, + LOAD, /* load and store types need to be contiguous */ LOAD_MULTI, LOAD_FP, LOAD_VMX, @@ -55,10 +52,31 @@ enum instruction_type { #define INSTR_TYPE_MASK 0x1f +#define OP_IS_LOAD_STORE(type) (LOAD <= (type) && (type) <= STCX) + +/* Compute flags, ORed in with type */ +#define SETREG 0x20 +#define SETCC 0x40 +#define SETXER 0x80 + +/* Branch flags, ORed in with type */ +#define SETLK 0x20 +#define BRTAKEN 0x40 +#define DECCTR 0x80 + /* Load/store flags, ORed in with type */ #define SIGNEXT 0x20 #define UPDATE 0x40 /* matches bit in opcode 31 instructions */ #define BYTEREV 0x80 +#define FPCONV 0x100 + +/* Barrier type field, ORed in with type */ +#define BARRIER_MASK 0xe0 +#define BARRIER_SYNC 0x00 +#define BARRIER_ISYNC 0x20 +#define BARRIER_EIEIO 0x40 +#define BARRIER_LWSYNC 0x60 +#define BARRIER_PTESYNC 0x80 /* Cacheop values, ORed in with type */ #define CACHEOP_MASK 0x700 @@ -67,10 +85,17 @@ enum instruction_type { #define DCBTST 0x200 #define DCBT 0x300 #define ICBI 0x400 +#define DCBZ 0x500 + +/* VSX flags values */ +#define VSX_FPCONV 1 /* do floating point SP/DP conversion */ +#define VSX_SPLAT 2 /* store loaded value into all elements */ +#define VSX_LDLEFT 4 /* load VSX register from left */ +#define VSX_CHECK_VEC 8 /* check MSR_VEC not MSR_VSX for reg >= 32 */ /* Size field in type word */ -#define SIZE(n) ((n) << 8) -#define GETSIZE(w) ((w) >> 8) +#define SIZE(n) ((n) << 12) +#define GETSIZE(w) ((w) >> 12) #define MKOP(t, f, s) ((t) | (f) | SIZE(s)) @@ -83,7 +108,63 @@ struct instruction_op { int update_reg; /* For MFSPR */ int spr; + u32 ccval; + u32 xerval; + u8 element_size; /* for VSX/VMX loads/stores */ + u8 vsx_flags; +}; + +union vsx_reg { + u8 b[16]; + u16 h[8]; + u32 w[4]; + unsigned long d[2]; + float fp[4]; + double dp[2]; + __vector128 v; }; -extern int analyse_instr(struct instruction_op *op, struct pt_regs *regs, +/* + * Decode an instruction, and return information about it in *op + * without changing *regs. + * + * Return value is 1 if the instruction can be emulated just by + * updating *regs with the information in *op, -1 if we need the + * GPRs but *regs doesn't contain the full register set, or 0 + * otherwise. + */ +extern int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, unsigned int instr); + +/* + * Emulate an instruction that can be executed just by updating + * fields in *regs. + */ +void emulate_update_regs(struct pt_regs *reg, struct instruction_op *op); + +/* + * Emulate instructions that cause a transfer of control, + * arithmetic/logical instructions, loads and stores, + * cache operations and barriers. + * + * Returns 1 if the instruction was emulated successfully, + * 0 if it could not be emulated, or -1 for an instruction that + * should not be emulated (rfid, mtmsrd clearing MSR_RI, etc.). + */ +extern int emulate_step(struct pt_regs *regs, unsigned int instr); + +/* + * Emulate a load or store instruction by reading/writing the + * memory of the current process. FP/VMX/VSX registers are assumed + * to hold live values if the appropriate enable bit in regs->msr is + * set; otherwise this will use the saved values in the thread struct + * for user-mode accesses. + */ +extern int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op); + +extern void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg, + const void *mem, bool cross_endian); +extern void emulate_vsx_store(struct instruction_op *op, + const union vsx_reg *reg, void *mem, + bool cross_endian); +extern int emulate_dcbz(unsigned long ea, struct pt_regs *regs); diff --git a/arch/powerpc/include/asm/string.h b/arch/powerpc/include/asm/string.h index da3cdffca440..cc9addefb51c 100644 --- a/arch/powerpc/include/asm/string.h +++ b/arch/powerpc/include/asm/string.h @@ -10,6 +10,7 @@ #define __HAVE_ARCH_MEMMOVE #define __HAVE_ARCH_MEMCMP #define __HAVE_ARCH_MEMCHR +#define __HAVE_ARCH_MEMSET16 extern char * strcpy(char *,const char *); extern char * strncpy(char *,const char *, __kernel_size_t); @@ -23,6 +24,31 @@ extern void * memmove(void *,const void *,__kernel_size_t); extern int memcmp(const void *,const void *,__kernel_size_t); extern void * memchr(const void *,int,__kernel_size_t); +#ifdef CONFIG_PPC64 +#define __HAVE_ARCH_MEMSET32 +#define __HAVE_ARCH_MEMSET64 + +extern void *__memset16(uint16_t *, uint16_t v, __kernel_size_t); +extern void *__memset32(uint32_t *, uint32_t v, __kernel_size_t); +extern void *__memset64(uint64_t *, uint64_t v, __kernel_size_t); + +static inline void *memset16(uint16_t *p, uint16_t v, __kernel_size_t n) +{ + return __memset16(p, v, n * 2); +} + +static inline void *memset32(uint32_t *p, uint32_t v, __kernel_size_t n) +{ + return __memset32(p, v, n * 4); +} + +static inline void *memset64(uint64_t *p, uint64_t v, __kernel_size_t n) +{ + return __memset64(p, v, n * 8); +} +#else +extern void *memset16(uint16_t *, uint16_t, __kernel_size_t); +#endif #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_STRING_H */ diff --git a/arch/powerpc/include/asm/timex.h b/arch/powerpc/include/asm/timex.h index 2cf846edb3fc..cb61eae5b7ed 100644 --- a/arch/powerpc/include/asm/timex.h +++ b/arch/powerpc/include/asm/timex.h @@ -29,7 +29,7 @@ static inline cycles_t get_cycles(void) ret = 0; __asm__ __volatile__( -#ifdef CONFIG_8xx +#ifdef CONFIG_PPC_8xx "97: mftb %0\n" #else "97: mfspr %0, %2\n" @@ -45,11 +45,7 @@ static inline cycles_t get_cycles(void) " .long 0\n" " .long 0\n" ".previous" -#ifdef CONFIG_8xx - : "=r" (ret) : "i" (CPU_FTR_601)); -#else : "=r" (ret) : "i" (CPU_FTR_601), "i" (SPRN_TBRL)); -#endif return ret; #endif } diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h index 609557569f65..a7eabff27a0f 100644 --- a/arch/powerpc/include/asm/tlb.h +++ b/arch/powerpc/include/asm/tlb.h @@ -69,13 +69,22 @@ static inline int mm_is_core_local(struct mm_struct *mm) topology_sibling_cpumask(smp_processor_id())); } +#ifdef CONFIG_PPC_BOOK3S_64 +static inline int mm_is_thread_local(struct mm_struct *mm) +{ + if (atomic_read(&mm->context.active_cpus) > 1) + return false; + return cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)); +} +#else /* CONFIG_PPC_BOOK3S_64 */ static inline int mm_is_thread_local(struct mm_struct *mm) { return cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())); } +#endif /* !CONFIG_PPC_BOOK3S_64 */ -#else +#else /* CONFIG_SMP */ static inline int mm_is_core_local(struct mm_struct *mm) { return 1; diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h index dc4e15937ccf..2d84bca8d053 100644 --- a/arch/powerpc/include/asm/topology.h +++ b/arch/powerpc/include/asm/topology.h @@ -16,8 +16,6 @@ struct device_node; #include <asm/mmzone.h> -#define parent_node(node) (node) - #define cpumask_of_node(node) ((node) == -1 ? \ cpu_all_mask : \ node_to_cpumask_map[node]) diff --git a/arch/powerpc/include/asm/vas.h b/arch/powerpc/include/asm/vas.h new file mode 100644 index 000000000000..fd5963acd658 --- /dev/null +++ b/arch/powerpc/include/asm/vas.h @@ -0,0 +1,159 @@ +/* + * Copyright 2016-17 IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _ASM_POWERPC_VAS_H +#define _ASM_POWERPC_VAS_H + +/* + * Min and max FIFO sizes are based on Version 1.05 Section 3.1.4.25 + * (Local FIFO Size Register) of the VAS workbook. + */ +#define VAS_RX_FIFO_SIZE_MIN (1 << 10) /* 1KB */ +#define VAS_RX_FIFO_SIZE_MAX (8 << 20) /* 8MB */ + +/* + * Threshold Control Mode: Have paste operation fail if the number of + * requests in receive FIFO exceeds a threshold. + * + * NOTE: No special error code yet if paste is rejected because of these + * limits. So users can't distinguish between this and other errors. + */ +#define VAS_THRESH_DISABLED 0 +#define VAS_THRESH_FIFO_GT_HALF_FULL 1 +#define VAS_THRESH_FIFO_GT_QTR_FULL 2 +#define VAS_THRESH_FIFO_GT_EIGHTH_FULL 3 + +/* + * Get/Set bit fields + */ +#define GET_FIELD(m, v) (((v) & (m)) >> MASK_LSH(m)) +#define MASK_LSH(m) (__builtin_ffsl(m) - 1) +#define SET_FIELD(m, v, val) \ + (((v) & ~(m)) | ((((typeof(v))(val)) << MASK_LSH(m)) & (m))) + +/* + * Co-processor Engine type. + */ +enum vas_cop_type { + VAS_COP_TYPE_FAULT, + VAS_COP_TYPE_842, + VAS_COP_TYPE_842_HIPRI, + VAS_COP_TYPE_GZIP, + VAS_COP_TYPE_GZIP_HIPRI, + VAS_COP_TYPE_FTW, + VAS_COP_TYPE_MAX, +}; + +/* + * Receive window attributes specified by the (in-kernel) owner of window. + */ +struct vas_rx_win_attr { + void *rx_fifo; + int rx_fifo_size; + int wcreds_max; + + bool pin_win; + bool rej_no_credit; + bool tx_wcred_mode; + bool rx_wcred_mode; + bool tx_win_ord_mode; + bool rx_win_ord_mode; + bool data_stamp; + bool nx_win; + bool fault_win; + bool user_win; + bool notify_disable; + bool intr_disable; + bool notify_early; + + int lnotify_lpid; + int lnotify_pid; + int lnotify_tid; + u32 pswid; + + int tc_mode; +}; + +/* + * Window attributes specified by the in-kernel owner of a send window. + */ +struct vas_tx_win_attr { + enum vas_cop_type cop; + int wcreds_max; + int lpid; + int pidr; /* hardware PID (from SPRN_PID) */ + int pid; /* linux process id */ + int pswid; + int rsvd_txbuf_count; + int tc_mode; + + bool user_win; + bool pin_win; + bool rej_no_credit; + bool rsvd_txbuf_enable; + bool tx_wcred_mode; + bool rx_wcred_mode; + bool tx_win_ord_mode; + bool rx_win_ord_mode; +}; + +/* + * Helper to initialize receive window attributes to defaults for an + * NX window. + */ +void vas_init_rx_win_attr(struct vas_rx_win_attr *rxattr, enum vas_cop_type cop); + +/* + * Open a VAS receive window for the instance of VAS identified by @vasid + * Use @attr to initialize the attributes of the window. + * + * Return a handle to the window or ERR_PTR() on error. + */ +struct vas_window *vas_rx_win_open(int vasid, enum vas_cop_type cop, + struct vas_rx_win_attr *attr); + +/* + * Helper to initialize send window attributes to defaults for an NX window. + */ +extern void vas_init_tx_win_attr(struct vas_tx_win_attr *txattr, + enum vas_cop_type cop); + +/* + * Open a VAS send window for the instance of VAS identified by @vasid + * and the co-processor type @cop. Use @attr to initialize attributes + * of the window. + * + * Note: The instance of VAS must already have an open receive window for + * the coprocessor type @cop. + * + * Return a handle to the send window or ERR_PTR() on error. + */ +struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop, + struct vas_tx_win_attr *attr); + +/* + * Close the send or receive window identified by @win. For receive windows + * return -EAGAIN if there are active send windows attached to this receive + * window. + */ +int vas_win_close(struct vas_window *win); + +/* + * Copy the co-processor request block (CRB) @crb into the local L2 cache. + */ +int vas_copy_crb(void *crb, int offset); + +/* + * Paste a previously copied CRB (see vas_copy_crb()) from the L2 cache to + * the hardware address associated with the window @win. @re is expected/ + * assumed to be true for NX windows. + */ +int vas_paste_crb(struct vas_window *win, int offset, bool re); + +#endif /* __ASM_POWERPC_VAS_H */ diff --git a/arch/powerpc/include/asm/xive.h b/arch/powerpc/include/asm/xive.h index c23ff4389ca2..371fbebf1ec9 100644 --- a/arch/powerpc/include/asm/xive.h +++ b/arch/powerpc/include/asm/xive.h @@ -45,6 +45,7 @@ struct xive_irq_data { void __iomem *trig_mmio; u32 esb_shift; int src_chip; + u32 hw_irq; /* Setup/used by frontend */ int target; @@ -55,6 +56,7 @@ struct xive_irq_data { #define XIVE_IRQ_FLAG_SHIFT_BUG 0x04 #define XIVE_IRQ_FLAG_MASK_FW 0x08 #define XIVE_IRQ_FLAG_EOI_FW 0x10 +#define XIVE_IRQ_FLAG_H_INT_ESB 0x20 #define XIVE_INVALID_CHIP_ID -1 @@ -110,11 +112,13 @@ extern bool __xive_enabled; static inline bool xive_enabled(void) { return __xive_enabled; } +extern bool xive_spapr_init(void); extern bool xive_native_init(void); extern void xive_smp_probe(void); extern int xive_smp_prepare_cpu(unsigned int cpu); extern void xive_smp_setup_cpu(void); extern void xive_smp_disable_cpu(void); +extern void xive_teardown_cpu(void); extern void xive_kexec_teardown_cpu(int secondary); extern void xive_shutdown(void); extern void xive_flush_interrupt(void); @@ -147,6 +151,7 @@ extern int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id) static inline bool xive_enabled(void) { return false; } +static inline bool xive_spapr_init(void) { return false; } static inline bool xive_native_init(void) { return false; } static inline void xive_smp_probe(void) { } extern inline int xive_smp_prepare_cpu(unsigned int cpu) { return -EINVAL; } |