diff options
author | David S. Miller <davem@davemloft.net> | 2013-05-05 05:34:13 +0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-05-05 05:34:13 +0400 |
commit | 048c9acca90ca7da42b92745445fe008a48add88 (patch) | |
tree | e2e551a565a7dcdca0fc398aa659231745e5a901 /arch/powerpc/include | |
parent | 07df841877195765d958df146f614fc7bdedd5e3 (diff) | |
parent | ad348cc5349be4ef4abe08819afbb63386585413 (diff) | |
download | linux-048c9acca90ca7da42b92745445fe008a48add88.tar.xz |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc
Merge sparc bug fixes that didn't make it into v3.9 into
sparc-next.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/powerpc/include')
42 files changed, 494 insertions, 153 deletions
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h index 08bd299c75b1..910194e9a1e2 100644 --- a/arch/powerpc/include/asm/bitops.h +++ b/arch/powerpc/include/asm/bitops.h @@ -53,7 +53,7 @@ #define smp_mb__after_clear_bit() smp_mb() /* Macro for generating the ***_bits() functions */ -#define DEFINE_BITOP(fn, op, prefix, postfix) \ +#define DEFINE_BITOP(fn, op, prefix) \ static __inline__ void fn(unsigned long mask, \ volatile unsigned long *_p) \ { \ @@ -66,16 +66,15 @@ static __inline__ void fn(unsigned long mask, \ PPC405_ERR77(0,%3) \ PPC_STLCX "%0,0,%3\n" \ "bne- 1b\n" \ - postfix \ : "=&r" (old), "+m" (*p) \ : "r" (mask), "r" (p) \ : "cc", "memory"); \ } -DEFINE_BITOP(set_bits, or, "", "") -DEFINE_BITOP(clear_bits, andc, "", "") -DEFINE_BITOP(clear_bits_unlock, andc, PPC_RELEASE_BARRIER, "") -DEFINE_BITOP(change_bits, xor, "", "") +DEFINE_BITOP(set_bits, or, "") +DEFINE_BITOP(clear_bits, andc, "") +DEFINE_BITOP(clear_bits_unlock, andc, PPC_RELEASE_BARRIER) +DEFINE_BITOP(change_bits, xor, "") static __inline__ void set_bit(int nr, volatile unsigned long *addr) { diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index fb3245e928ea..fcc54ad159ba 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -52,6 +52,7 @@ struct cpu_spec { char *cpu_name; unsigned long cpu_features; /* Kernel features */ unsigned int cpu_user_features; /* Userland features */ + unsigned int cpu_user_features2; /* Userland features v2 */ unsigned int mmu_features; /* MMU features */ /* cache line sizes */ @@ -151,7 +152,7 @@ extern const char *powerpc_base_platform; #define CPU_FTR_HVMODE LONG_ASM_CONST(0x0000000100000000) #define CPU_FTR_ARCH_201 LONG_ASM_CONST(0x0000000200000000) #define CPU_FTR_ARCH_206 LONG_ASM_CONST(0x0000000400000000) -#define CPU_FTR_CFAR LONG_ASM_CONST(0x0000000800000000) +#define CPU_FTR_ARCH_207S LONG_ASM_CONST(0x0000000800000000) #define CPU_FTR_IABR LONG_ASM_CONST(0x0000001000000000) #define CPU_FTR_MMCRA LONG_ASM_CONST(0x0000002000000000) #define CPU_FTR_CTRL LONG_ASM_CONST(0x0000004000000000) @@ -172,7 +173,7 @@ extern const char *powerpc_base_platform; #define CPU_FTR_ICSWX LONG_ASM_CONST(0x0020000000000000) #define CPU_FTR_VMX_COPY LONG_ASM_CONST(0x0040000000000000) #define CPU_FTR_TM LONG_ASM_CONST(0x0080000000000000) -#define CPU_FTR_BCTAR LONG_ASM_CONST(0x0100000000000000) +#define CPU_FTR_CFAR LONG_ASM_CONST(0x0100000000000000) #define CPU_FTR_HAS_PPR LONG_ASM_CONST(0x0200000000000000) #define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000) @@ -374,7 +375,7 @@ extern const char *powerpc_base_platform; #define CPU_FTRS_E6500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \ CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ - CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV) + CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV | CPU_FTR_ALTIVEC_COMP) #define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) /* 64-bit CPUs */ @@ -421,8 +422,8 @@ extern const char *powerpc_base_platform; CPU_FTR_DSCR | CPU_FTR_SAO | \ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \ - CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | CPU_FTR_BCTAR | \ - CPU_FTR_TM_COMP) + CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \ + CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP) #define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ diff --git a/arch/powerpc/include/asm/dma.h b/arch/powerpc/include/asm/dma.h index f6813e919bb2..a5c6d83b5f60 100644 --- a/arch/powerpc/include/asm/dma.h +++ b/arch/powerpc/include/asm/dma.h @@ -16,10 +16,6 @@ * * None of this really applies for Power Macintoshes. There is * basically just enough here to get kernel/dma.c to compile. - * - * There may be some comments or restrictions made here which are - * not valid for the PReP platform. Take what you read - * with a grain of salt. */ #include <asm/io.h> @@ -57,7 +53,6 @@ * - page registers for 5-7 don't use data bit 0, represent 128K pages * - page registers for 0-3 use bit 0, represent 64K pages * - * On PReP, DMA transfers are limited to the lower 16MB of _physical_ memory. * On CHRP, the W83C553F (and VLSI Tollgate?) support full 32 bit addressing. * Note that addresses loaded into registers must be _physical_ addresses, * not logical addresses (which may differ if paging is active). diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h index ac9790fc3836..cc0655a702a7 100644 --- a/arch/powerpc/include/asm/elf.h +++ b/arch/powerpc/include/asm/elf.h @@ -61,6 +61,7 @@ typedef elf_vrregset_t elf_fpxregset_t; instruction set this cpu supports. This could be done in userspace, but it's not easy, and we've already done it here. */ # define ELF_HWCAP (cur_cpu_spec->cpu_user_features) +# define ELF_HWCAP2 (cur_cpu_spec->cpu_user_features2) /* This yields a string that ld.so will use to load implementation specific libraries for optimization. This is more specific in diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 05e6d2ee1db9..8e5fae8beaf6 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -414,7 +414,6 @@ label##_relon_hv: \ #define SOFTEN_NOTEST_HV(vec) _SOFTEN_TEST(EXC_HV, vec) #define __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) \ - HMT_MEDIUM_PPR_DISCARD; \ SET_SCRATCH0(r13); /* save r13 */ \ EXCEPTION_PROLOG_0(PACA_EXGEN); \ __EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec); \ @@ -427,6 +426,7 @@ label##_relon_hv: \ . = loc; \ .globl label##_pSeries; \ label##_pSeries: \ + HMT_MEDIUM_PPR_DISCARD; \ _MASKABLE_EXCEPTION_PSERIES(vec, label, \ EXC_STD, SOFTEN_TEST_PR) diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h index 097dee57a7a9..0df54646f968 100644 --- a/arch/powerpc/include/asm/firmware.h +++ b/arch/powerpc/include/asm/firmware.h @@ -18,7 +18,6 @@ #include <asm/feature-fixups.h> /* firmware feature bitmask values */ -#define FIRMWARE_MAX_FEATURES 63 #define FW_FEATURE_PFT ASM_CONST(0x0000000000000001) #define FW_FEATURE_TCE ASM_CONST(0x0000000000000002) @@ -51,6 +50,8 @@ #define FW_FEATURE_OPALv2 ASM_CONST(0x0000000020000000) #define FW_FEATURE_SET_MODE ASM_CONST(0x0000000040000000) #define FW_FEATURE_BEST_ENERGY ASM_CONST(0x0000000080000000) +#define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000) +#define FW_FEATURE_PRRN ASM_CONST(0x0000000200000000) #ifndef __ASSEMBLY__ @@ -65,7 +66,8 @@ enum { FW_FEATURE_BULK_REMOVE | FW_FEATURE_XDABR | FW_FEATURE_MULTITCE | FW_FEATURE_SPLPAR | FW_FEATURE_LPAR | FW_FEATURE_CMO | FW_FEATURE_VPHN | FW_FEATURE_XCMO | - FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY, + FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY | + FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN, FW_FEATURE_PSERIES_ALWAYS = 0, FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2, FW_FEATURE_POWERNV_ALWAYS = 0, diff --git a/arch/powerpc/include/asm/hardirq.h b/arch/powerpc/include/asm/hardirq.h index 3147a2970125..3bdcfce2c42a 100644 --- a/arch/powerpc/include/asm/hardirq.h +++ b/arch/powerpc/include/asm/hardirq.h @@ -10,6 +10,9 @@ typedef struct { unsigned int pmu_irqs; unsigned int mce_exceptions; unsigned int spurious_irqs; +#ifdef CONFIG_PPC_DOORBELL + unsigned int doorbell_irqs; +#endif } ____cacheline_aligned irq_cpustat_t; DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index 62e11a32c4c2..f2498c8e595d 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -3,9 +3,37 @@ #ifdef CONFIG_HUGETLB_PAGE #include <asm/page.h> +#include <asm-generic/hugetlb.h> extern struct kmem_cache *hugepte_cache; +#ifdef CONFIG_PPC_BOOK3S_64 +/* + * This should work for other subarchs too. But right now we use the + * new format only for 64bit book3s + */ +static inline pte_t *hugepd_page(hugepd_t hpd) +{ + BUG_ON(!hugepd_ok(hpd)); + /* + * We have only four bits to encode, MMU page size + */ + BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf); + return (pte_t *)(hpd.pd & ~HUGEPD_SHIFT_MASK); +} + +static inline unsigned int hugepd_mmu_psize(hugepd_t hpd) +{ + return (hpd.pd & HUGEPD_SHIFT_MASK) >> 2; +} + +static inline unsigned int hugepd_shift(hugepd_t hpd) +{ + return mmu_psize_to_shift(hugepd_mmu_psize(hpd)); +} + +#else + static inline pte_t *hugepd_page(hugepd_t hpd) { BUG_ON(!hugepd_ok(hpd)); @@ -17,6 +45,9 @@ static inline unsigned int hugepd_shift(hugepd_t hpd) return hpd.pd & HUGEPD_SHIFT_MASK; } +#endif /* CONFIG_PPC_BOOK3S_64 */ + + static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr, unsigned pdshift) { diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index f94ef4213e9d..dd15e5e37d6d 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h @@ -15,10 +15,6 @@ extern int check_legacy_ioport(unsigned long base_port); #define I8042_DATA_REG 0x60 #define FDC_BASE 0x3f0 -/* only relevant for PReP */ -#define _PIDXR 0x279 -#define _PNPWRP 0xa79 -#define PNPBIOS_BASE 0xf000 #if defined(CONFIG_PPC64) && defined(CONFIG_PCI) extern struct pci_dev *isa_bridge_pcidev; diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h index aabcdba8f6b0..b9dd382cb349 100644 --- a/arch/powerpc/include/asm/kvm_asm.h +++ b/arch/powerpc/include/asm/kvm_asm.h @@ -67,6 +67,10 @@ #define BOOKE_INTERRUPT_HV_SYSCALL 40 #define BOOKE_INTERRUPT_HV_PRIV 41 +/* altivec */ +#define BOOKE_INTERRUPT_ALTIVEC_UNAVAIL 42 +#define BOOKE_INTERRUPT_ALTIVEC_ASSIST 43 + /* book3s */ #define BOOK3S_INTERRUPT_SYSTEM_RESET 0x100 diff --git a/arch/powerpc/include/asm/linkage.h b/arch/powerpc/include/asm/linkage.h new file mode 100644 index 000000000000..b36f650a13ff --- /dev/null +++ b/arch/powerpc/include/asm/linkage.h @@ -0,0 +1,13 @@ +#ifndef _ASM_POWERPC_LINKAGE_H +#define _ASM_POWERPC_LINKAGE_H + +#ifdef CONFIG_PPC64 +#define cond_syscall(x) \ + asm ("\t.weak " #x "\n\t.set " #x ", sys_ni_syscall\n" \ + "\t.weak ." #x "\n\t.set ." #x ", .sys_ni_syscall\n") +#define SYSCALL_ALIAS(alias, name) \ + asm ("\t.globl " #alias "\n\t.set " #alias ", " #name "\n" \ + "\t.globl ." #alias "\n\t.set ." #alias ", ." #name) +#endif + +#endif /* _ASM_POWERPC_LINKAGE_H */ diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index 3d6b4100dac1..3f3f691be2e7 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -50,7 +50,8 @@ struct machdep_calls { unsigned long prpn, unsigned long rflags, unsigned long vflags, - int psize, int ssize); + int psize, int apsize, + int ssize); long (*hpte_remove)(unsigned long hpte_group); void (*hpte_removebolted)(unsigned long ea, int psize, int ssize); diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h index 99d43e0c1e4a..936db360790a 100644 --- a/arch/powerpc/include/asm/mmu-book3e.h +++ b/arch/powerpc/include/asm/mmu-book3e.h @@ -215,6 +215,7 @@ #define TLBILX_T_CLASS3 7 #ifndef __ASSEMBLY__ +#include <asm/bug.h> extern unsigned int tlbcam_index; @@ -231,6 +232,10 @@ typedef struct { u64 high_slices_psize; /* 4 bits per slice for now */ u16 user_psize; /* page size index */ #endif +#ifdef CONFIG_PPC_64K_PAGES + /* for 4K PTE fragment support */ + void *pte_frag; +#endif } mm_context_t; /* Page size definitions, common between 32 and 64-bit @@ -250,6 +255,23 @@ struct mmu_psize_def }; extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; +static inline int shift_to_mmu_psize(unsigned int shift) +{ + int psize; + + for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) + if (mmu_psize_defs[psize].shift == shift) + return psize; + return -1; +} + +static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize) +{ + if (mmu_psize_defs[mmu_psize].shift) + return mmu_psize_defs[mmu_psize].shift; + BUG(); +} + /* The page sizes use the same names as 64-bit hash but are * constants */ diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index b59e06f507ea..2accc9611248 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h @@ -21,6 +21,7 @@ * complete pgtable.h but only a portion of it. */ #include <asm/pgtable-ppc64.h> +#include <asm/bug.h> /* * Segment table @@ -154,11 +155,29 @@ extern unsigned long htab_hash_mask; struct mmu_psize_def { unsigned int shift; /* number of bits */ - unsigned int penc; /* HPTE encoding */ + int penc[MMU_PAGE_COUNT]; /* HPTE encoding */ unsigned int tlbiel; /* tlbiel supported for that page size */ unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */ unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */ }; +extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; + +static inline int shift_to_mmu_psize(unsigned int shift) +{ + int psize; + + for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) + if (mmu_psize_defs[psize].shift == shift) + return psize; + return -1; +} + +static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize) +{ + if (mmu_psize_defs[mmu_psize].shift) + return mmu_psize_defs[mmu_psize].shift; + BUG(); +} #endif /* __ASSEMBLY__ */ @@ -181,6 +200,13 @@ struct mmu_psize_def */ #define VPN_SHIFT 12 +/* + * HPTE Large Page (LP) details + */ +#define LP_SHIFT 12 +#define LP_BITS 8 +#define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT) + #ifndef __ASSEMBLY__ static inline int segment_shift(int ssize) @@ -193,7 +219,6 @@ static inline int segment_shift(int ssize) /* * The current system page and segment sizes */ -extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; extern int mmu_linear_psize; extern int mmu_virtual_psize; extern int mmu_vmalloc_psize; @@ -237,14 +262,14 @@ static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize, /* * This function sets the AVPN and L fields of the HPTE appropriately - * for the page size + * using the base page size and actual page size. */ -static inline unsigned long hpte_encode_v(unsigned long vpn, - int psize, int ssize) +static inline unsigned long hpte_encode_v(unsigned long vpn, int base_psize, + int actual_psize, int ssize) { unsigned long v; - v = hpte_encode_avpn(vpn, psize, ssize); - if (psize != MMU_PAGE_4K) + v = hpte_encode_avpn(vpn, base_psize, ssize); + if (actual_psize != MMU_PAGE_4K) v |= HPTE_V_LARGE; return v; } @@ -254,19 +279,17 @@ static inline unsigned long hpte_encode_v(unsigned long vpn, * for the page size. We assume the pa is already "clean" that is properly * aligned for the requested page size */ -static inline unsigned long hpte_encode_r(unsigned long pa, int psize) +static inline unsigned long hpte_encode_r(unsigned long pa, int base_psize, + int actual_psize) { - unsigned long r; - /* A 4K page needs no special encoding */ - if (psize == MMU_PAGE_4K) + if (actual_psize == MMU_PAGE_4K) return pa & HPTE_R_RPN; else { - unsigned int penc = mmu_psize_defs[psize].penc; - unsigned int shift = mmu_psize_defs[psize].shift; - return (pa & ~((1ul << shift) - 1)) | (penc << 12); + unsigned int penc = mmu_psize_defs[base_psize].penc[actual_psize]; + unsigned int shift = mmu_psize_defs[actual_psize].shift; + return (pa & ~((1ul << shift) - 1)) | (penc << LP_SHIFT); } - return r; } /* @@ -319,7 +342,8 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, unsigned int shift, unsigned int mmu_psize); extern void hash_failure_debug(unsigned long ea, unsigned long access, unsigned long vsid, unsigned long trap, - int ssize, int psize, unsigned long pte); + int ssize, int psize, int lpsize, + unsigned long pte); extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend, unsigned long pstart, unsigned long prot, int psize, int ssize); @@ -498,6 +522,10 @@ typedef struct { unsigned long acop; /* mask of enabled coprocessor types */ unsigned int cop_pid; /* pid value used with coprocessors */ #endif /* CONFIG_PPC_ICSWX */ +#ifdef CONFIG_PPC_64K_PAGES + /* for 4K PTE fragment support */ + void *pte_frag; +#endif } mm_context_t; diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index a4b28f165b6c..b6c8b58b1d76 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h @@ -117,6 +117,7 @@ extern int opal_enter_rtas(struct rtas_args *args, #define OPAL_SET_SLOT_LED_STATUS 55 #define OPAL_GET_EPOW_STATUS 56 #define OPAL_SET_SYSTEM_ATTENTION_LED 57 +#define OPAL_PCI_MSI_EOI 63 #ifndef __ASSEMBLY__ @@ -506,6 +507,7 @@ int64_t opal_pci_get_xive_reissue(uint64_t phb_id, uint32_t xive_number, uint8_t *p_bit, uint8_t *q_bit); int64_t opal_pci_set_xive_reissue(uint64_t phb_id, uint32_t xive_number, uint8_t p_bit, uint8_t q_bit); +int64_t opal_pci_msi_eoi(uint64_t phb_id, uint32_t hw_irq); int64_t opal_pci_set_xive_pe(uint64_t phb_id, uint32_t pe_number, uint32_t xive_num); int64_t opal_get_xive_source(uint64_t phb_id, uint32_t xive_num, diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index f072e974f8a2..988c812aab5b 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -249,6 +249,7 @@ extern long long virt_phys_offset; #define is_kernel_addr(x) ((x) >= PAGE_OFFSET) #endif +#ifndef CONFIG_PPC_BOOK3S_64 /* * Use the top bit of the higher-level page table entries to indicate whether * the entries we point to contain hugepages. This works because we know that @@ -260,6 +261,7 @@ extern long long virt_phys_offset; #else #define PD_HUGE 0x80000000 #endif +#endif /* CONFIG_PPC_BOOK3S_64 */ /* * Some number of bits at the level of the page table that points to @@ -354,14 +356,27 @@ typedef unsigned long pgprot_t; typedef struct { signed long pd; } hugepd_t; #ifdef CONFIG_HUGETLB_PAGE +#ifdef CONFIG_PPC_BOOK3S_64 +static inline int hugepd_ok(hugepd_t hpd) +{ + /* + * hugepd pointer, bottom two bits == 00 and next 4 bits + * indicate size of table + */ + return (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0)); +} +#else static inline int hugepd_ok(hugepd_t hpd) { return (hpd.pd > 0); } +#endif #define is_hugepd(pdep) (hugepd_ok(*((hugepd_t *)(pdep)))) +int pgd_huge(pgd_t pgd); #else /* CONFIG_HUGETLB_PAGE */ #define is_hugepd(pdep) 0 +#define pgd_huge(pgd) 0 #endif /* CONFIG_HUGETLB_PAGE */ struct page; @@ -378,7 +393,11 @@ void arch_free_page(struct page *page, int order); struct vm_area_struct; +#ifdef CONFIG_PPC_64K_PAGES +typedef pte_t *pgtable_t; +#else typedef struct page *pgtable_t; +#endif #include <asm-generic/memory_model.h> #endif /* __ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h index cd915d6b093d..88693cef4f3d 100644 --- a/arch/powerpc/include/asm/page_64.h +++ b/arch/powerpc/include/asm/page_64.h @@ -99,8 +99,7 @@ extern unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, unsigned long flags, unsigned int psize, - int topdown, - int use_cache); + int topdown); extern unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr); diff --git a/arch/powerpc/include/asm/parport.h b/arch/powerpc/include/asm/parport.h index 6dc2577932b1..a452968b29ea 100644 --- a/arch/powerpc/include/asm/parport.h +++ b/arch/powerpc/include/asm/parport.h @@ -21,9 +21,7 @@ static int parport_pc_find_nonpci_ports (int autoirq, int autodma) int count = 0; int virq; - for (np = NULL; (np = of_find_compatible_node(np, - "parallel", - "pnpPNP,400")) != NULL;) { + for_each_compatible_node(np, "parallel", "pnpPNP,400") { prop = of_get_property(np, "reg", &propsize); if (!prop || propsize > 6*sizeof(u32)) continue; diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h index 025a130729bc..ffbc5fd549ac 100644 --- a/arch/powerpc/include/asm/pci-bridge.h +++ b/arch/powerpc/include/asm/pci-bridge.h @@ -70,6 +70,8 @@ struct pci_controller { * BIG_ENDIAN - cfg_addr is a big endian register * BROKEN_MRM - the 440EPx/GRx chips have an errata that causes hangs on * the PLB4. Effectively disable MRM commands by setting this. + * FSL_CFG_REG_LINK - Freescale controller version in which the PCIe + * link status is in a RC PCIe cfg register (vs being a SoC register) */ #define PPC_INDIRECT_TYPE_SET_CFG_TYPE 0x00000001 #define PPC_INDIRECT_TYPE_EXT_REG 0x00000002 @@ -77,6 +79,7 @@ struct pci_controller { #define PPC_INDIRECT_TYPE_NO_PCIE_LINK 0x00000008 #define PPC_INDIRECT_TYPE_BIG_ENDIAN 0x00000010 #define PPC_INDIRECT_TYPE_BROKEN_MRM 0x00000020 +#define PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK 0x00000040 u32 indirect_type; /* Currently, we limit ourselves to 1 IO range and 3 mem * ranges since the common pci_bus structure can't handle more @@ -90,9 +93,9 @@ struct pci_controller { #ifdef CONFIG_PPC64 unsigned long buid; +#endif /* CONFIG_PPC64 */ void *private_data; -#endif /* CONFIG_PPC64 */ }; /* These are used for config access before all the PCI probing @@ -117,6 +120,12 @@ extern void setup_indirect_pci(struct pci_controller* hose, resource_size_t cfg_addr, resource_size_t cfg_data, u32 flags); +extern int indirect_read_config(struct pci_bus *bus, unsigned int devfn, + int offset, int len, u32 *val); + +extern int indirect_write_config(struct pci_bus *bus, unsigned int devfn, + int offset, int len, u32 val); + static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus) { return bus->sysdata; diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h index d0aec72722e9..f265049dd7d6 100644 --- a/arch/powerpc/include/asm/perf_event_server.h +++ b/arch/powerpc/include/asm/perf_event_server.h @@ -33,6 +33,8 @@ struct power_pmu { unsigned long *valp); int (*get_alternatives)(u64 event_id, unsigned int flags, u64 alt[]); + u64 (*bhrb_filter_map)(u64 branch_sample_type); + void (*config_bhrb)(u64 pmu_bhrb_filter); void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]); int (*limited_pmc_event)(u64 event_id); u32 flags; @@ -42,6 +44,9 @@ struct power_pmu { int (*cache_events)[PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX]; + + /* BHRB entries in the PMU */ + int bhrb_nr; }; /* @@ -52,6 +57,9 @@ struct power_pmu { #define PPMU_NO_SIPR 0x00000004 /* no SIPR/HV in MMCRA at all */ #define PPMU_NO_CONT_SAMPLING 0x00000008 /* no continuous sampling */ #define PPMU_SIAR_VALID 0x00000010 /* Processor has SIAR Valid bit */ +#define PPMU_HAS_SSLOT 0x00000020 /* Has sampled slot in MMCRA */ +#define PPMU_HAS_SIER 0x00000040 /* Has SIER */ +#define PPMU_BHRB 0x00000080 /* has BHRB feature enabled */ /* * Values for flags to get_alternatives() @@ -65,6 +73,7 @@ extern int register_power_pmu(struct power_pmu *); struct pt_regs; extern unsigned long perf_misc_flags(struct pt_regs *regs); extern unsigned long perf_instruction_pointer(struct pt_regs *regs); +extern unsigned long int read_bhrb(int n); /* * Only override the default definitions in include/linux/perf_event.h diff --git a/arch/powerpc/include/asm/pgalloc-32.h b/arch/powerpc/include/asm/pgalloc-32.h index 580cf73b96e8..27b2386f738a 100644 --- a/arch/powerpc/include/asm/pgalloc-32.h +++ b/arch/powerpc/include/asm/pgalloc-32.h @@ -37,6 +37,17 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr); extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr); +static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) +{ + free_page((unsigned long)pte); +} + +static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) +{ + pgtable_page_dtor(ptepage); + __free_page(ptepage); +} + static inline void pgtable_free(void *table, unsigned index_size) { BUG_ON(index_size); /* 32-bit doesn't use this */ @@ -45,4 +56,38 @@ static inline void pgtable_free(void *table, unsigned index_size) #define check_pgt_cache() do { } while (0) +#ifdef CONFIG_SMP +static inline void pgtable_free_tlb(struct mmu_gather *tlb, + void *table, int shift) +{ + unsigned long pgf = (unsigned long)table; + BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE); + pgf |= shift; + tlb_remove_table(tlb, (void *)pgf); +} + +static inline void __tlb_remove_table(void *_table) +{ + void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE); + unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE; + + pgtable_free(table, shift); +} +#else +static inline void pgtable_free_tlb(struct mmu_gather *tlb, + void *table, int shift) +{ + pgtable_free(table, shift); +} +#endif + +static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, + unsigned long address) +{ + struct page *page = page_address(table); + + tlb_flush_pgtable(tlb, address); + pgtable_page_dtor(page); + pgtable_free_tlb(tlb, page, 0); +} #endif /* _ASM_POWERPC_PGALLOC_32_H */ diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h index 292725cec2e3..91acb12bac92 100644 --- a/arch/powerpc/include/asm/pgalloc-64.h +++ b/arch/powerpc/include/asm/pgalloc-64.h @@ -35,7 +35,10 @@ struct vmemmap_backing { #define MAX_PGTABLE_INDEX_SIZE 0xf extern struct kmem_cache *pgtable_cache[]; -#define PGT_CACHE(shift) (pgtable_cache[(shift)-1]) +#define PGT_CACHE(shift) ({ \ + BUG_ON(!(shift)); \ + pgtable_cache[(shift) - 1]; \ + }) static inline pgd_t *pgd_alloc(struct mm_struct *mm) { @@ -72,8 +75,100 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte)) #define pmd_pgtable(pmd) pmd_page(pmd) +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, + unsigned long address) +{ + return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); +} + +static inline pgtable_t pte_alloc_one(struct mm_struct *mm, + unsigned long address) +{ + struct page *page; + pte_t *pte; + + pte = pte_alloc_one_kernel(mm, address); + if (!pte) + return NULL; + page = virt_to_page(pte); + pgtable_page_ctor(page); + return page; +} + +static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) +{ + free_page((unsigned long)pte); +} + +static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) +{ + pgtable_page_dtor(ptepage); + __free_page(ptepage); +} + +static inline void pgtable_free(void *table, unsigned index_size) +{ + if (!index_size) + free_page((unsigned long)table); + else { + BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE); + kmem_cache_free(PGT_CACHE(index_size), table); + } +} + +#ifdef CONFIG_SMP +static inline void pgtable_free_tlb(struct mmu_gather *tlb, + void *table, int shift) +{ + unsigned long pgf = (unsigned long)table; + BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE); + pgf |= shift; + tlb_remove_table(tlb, (void *)pgf); +} + +static inline void __tlb_remove_table(void *_table) +{ + void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE); + unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE; + + pgtable_free(table, shift); +} +#else /* !CONFIG_SMP */ +static inline void pgtable_free_tlb(struct mmu_gather *tlb, + void *table, int shift) +{ + pgtable_free(table, shift); +} +#endif /* CONFIG_SMP */ + +static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, + unsigned long address) +{ + struct page *page = page_address(table); + + tlb_flush_pgtable(tlb, address); + pgtable_page_dtor(page); + pgtable_free_tlb(tlb, page, 0); +} + +#else /* if CONFIG_PPC_64K_PAGES */ +/* + * we support 16 fragments per PTE page. + */ +#define PTE_FRAG_NR 16 +/* + * We use a 2K PTE page fragment and another 2K for storing + * real_pte_t hash index + */ +#define PTE_FRAG_SIZE_SHIFT 12 +#define PTE_FRAG_SIZE (2 * PTRS_PER_PTE * sizeof(pte_t)) -#else /* CONFIG_PPC_64K_PAGES */ +extern pte_t *page_table_alloc(struct mm_struct *, unsigned long, int); +extern void page_table_free(struct mm_struct *, unsigned long *, int); +extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift); +#ifdef CONFIG_SMP +extern void __tlb_remove_table(void *_table); +#endif #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd) @@ -83,51 +178,56 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pmd_set(pmd, (unsigned long)pte); } -#define pmd_populate(mm, pmd, pte_page) \ - pmd_populate_kernel(mm, pmd, page_address(pte_page)) -#define pmd_pgtable(pmd) pmd_page(pmd) - -#endif /* CONFIG_PPC_64K_PAGES */ - -static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) +static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, + pgtable_t pte_page) { - return kmem_cache_alloc(PGT_CACHE(PMD_INDEX_SIZE), - GFP_KERNEL|__GFP_REPEAT); + pmd_set(pmd, (unsigned long)pte_page); } -static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) +static inline pgtable_t pmd_pgtable(pmd_t pmd) { - kmem_cache_free(PGT_CACHE(PMD_INDEX_SIZE), pmd); + return (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE); } static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { - return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); + return (pte_t *)page_table_alloc(mm, address, 1); } static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) { - struct page *page; - pte_t *pte; + return (pgtable_t)page_table_alloc(mm, address, 0); +} - pte = pte_alloc_one_kernel(mm, address); - if (!pte) - return NULL; - page = virt_to_page(pte); - pgtable_page_ctor(page); - return page; +static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) +{ + page_table_free(mm, (unsigned long *)pte, 1); } -static inline void pgtable_free(void *table, unsigned index_size) +static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) { - if (!index_size) - free_page((unsigned long)table); - else { - BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE); - kmem_cache_free(PGT_CACHE(index_size), table); - } + page_table_free(mm, (unsigned long *)ptepage, 0); +} + +static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, + unsigned long address) +{ + tlb_flush_pgtable(tlb, address); + pgtable_free_tlb(tlb, table, 0); +} +#endif /* CONFIG_PPC_64K_PAGES */ + +static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) +{ + return kmem_cache_alloc(PGT_CACHE(PMD_INDEX_SIZE), + GFP_KERNEL|__GFP_REPEAT); +} + +static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) +{ + kmem_cache_free(PGT_CACHE(PMD_INDEX_SIZE), pmd); } #define __pmd_free_tlb(tlb, pmd, addr) \ diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h index bf301ac62f35..e9a9f60e596d 100644 --- a/arch/powerpc/include/asm/pgalloc.h +++ b/arch/powerpc/include/asm/pgalloc.h @@ -3,6 +3,7 @@ #ifdef __KERNEL__ #include <linux/mm.h> +#include <asm-generic/tlb.h> #ifdef CONFIG_PPC_BOOK3E extern void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address); @@ -13,56 +14,11 @@ static inline void tlb_flush_pgtable(struct mmu_gather *tlb, } #endif /* !CONFIG_PPC_BOOK3E */ -static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) -{ - free_page((unsigned long)pte); -} - -static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) -{ - pgtable_page_dtor(ptepage); - __free_page(ptepage); -} - #ifdef CONFIG_PPC64 #include <asm/pgalloc-64.h> #else #include <asm/pgalloc-32.h> #endif -#ifdef CONFIG_SMP -struct mmu_gather; -extern void tlb_remove_table(struct mmu_gather *, void *); - -static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift) -{ - unsigned long pgf = (unsigned long)table; - BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE); - pgf |= shift; - tlb_remove_table(tlb, (void *)pgf); -} - -static inline void __tlb_remove_table(void *_table) -{ - void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE); - unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE; - - pgtable_free(table, shift); -} -#else /* CONFIG_SMP */ -static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift) -{ - pgtable_free(table, shift); -} -#endif /* !CONFIG_SMP */ - -static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage, - unsigned long address) -{ - tlb_flush_pgtable(tlb, address); - pgtable_page_dtor(ptepage); - pgtable_free_tlb(tlb, page_address(ptepage), 0); -} - #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_PGALLOC_H */ diff --git a/arch/powerpc/include/asm/pgtable-ppc64-64k.h b/arch/powerpc/include/asm/pgtable-ppc64-64k.h index be4e2878fbc0..45142d640720 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64-64k.h +++ b/arch/powerpc/include/asm/pgtable-ppc64-64k.h @@ -4,10 +4,10 @@ #include <asm-generic/pgtable-nopud.h> -#define PTE_INDEX_SIZE 12 -#define PMD_INDEX_SIZE 12 +#define PTE_INDEX_SIZE 8 +#define PMD_INDEX_SIZE 10 #define PUD_INDEX_SIZE 0 -#define PGD_INDEX_SIZE 6 +#define PGD_INDEX_SIZE 12 #ifndef __ASSEMBLY__ #define PTE_TABLE_SIZE (sizeof(real_pte_t) << PTE_INDEX_SIZE) diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h index 0182c203e411..e3d55f6f24fe 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64.h +++ b/arch/powerpc/include/asm/pgtable-ppc64.h @@ -167,8 +167,7 @@ * Find an entry in a page-table-directory. We combine the address region * (the high order N bits) and the pgd portion of the address. */ -/* to avoid overflow in free_pgtables we don't use PTRS_PER_PGD here */ -#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & 0x1ff) +#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1)) #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index a9cbd3ba5c33..7aeb9555f6ea 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -17,6 +17,12 @@ struct mm_struct; # include <asm/pgtable-ppc32.h> #endif +/* + * We save the slot number & secondary bit in the second half of the + * PTE page. We use the 8 bytes per each pte entry. + */ +#define PTE_PAGE_HIDX_OFFSET (PTRS_PER_PTE * 8) + #ifndef __ASSEMBLY__ #include <asm/tlbflush.h> @@ -212,6 +218,8 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); extern int gup_hugepd(hugepd_t *hugepd, unsigned pdshift, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr); +extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, + unsigned long end, int write, struct page **pages, int *nr); #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 8752bc8e34a3..0c34e4803499 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -82,6 +82,8 @@ #define __REGA0_R31 31 /* sorted alphabetically */ +#define PPC_INST_BHRBE 0x7c00025c +#define PPC_INST_CLRBHRB 0x7c00035c #define PPC_INST_DCBA 0x7c0005ec #define PPC_INST_DCBA_MASK 0xfc0007fe #define PPC_INST_DCBAL 0x7c2005ec @@ -297,6 +299,12 @@ #define PPC_NAP stringify_in_c(.long PPC_INST_NAP) #define PPC_SLEEP stringify_in_c(.long PPC_INST_SLEEP) +/* BHRB instructions */ +#define PPC_CLRBHRB stringify_in_c(.long PPC_INST_CLRBHRB) +#define PPC_MFBHRBE(r, n) stringify_in_c(.long PPC_INST_BHRBE | \ + __PPC_RT(r) | \ + (((n) & 0x3ff) << 11)) + /* Transactional memory instructions */ #define TRECHKPT stringify_in_c(.long PPC_INST_TRECHKPT) #define TRECLAIM(r) stringify_in_c(.long PPC_INST_TRECLAIM \ diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 7ff9eaa3ea6c..d7e67ca8b4a6 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -40,7 +40,7 @@ * -- BenH. */ -/* PREP sub-platform types see residual.h for these */ +/* PREP sub-platform types. Unused */ #define _PREP_Motorola 0x01 /* motorola prep */ #define _PREP_Firm 0x02 /* firmworks prep */ #define _PREP_IBM 0x00 /* ibm prep */ @@ -56,13 +56,6 @@ extern int _chrp_type; -#ifdef CONFIG_PPC_PREP - -/* what kind of prep workstation we are */ -extern int _prep_type; - -#endif /* CONFIG_PPC_PREP */ - #endif /* defined(__KERNEL__) && defined(CONFIG_PPC32) */ /* @@ -288,6 +281,9 @@ struct thread_struct { #endif #ifdef CONFIG_PPC_BOOK3S_64 unsigned long tar; + unsigned long ebbrr; + unsigned long ebbhr; + unsigned long bescr; #endif }; diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h index 99c92d5363e4..bc2da154f68b 100644 --- a/arch/powerpc/include/asm/prom.h +++ b/arch/powerpc/include/asm/prom.h @@ -74,6 +74,75 @@ struct of_drconf_cell { #define DRCONF_MEM_AI_INVALID 0x00000040 #define DRCONF_MEM_RESERVED 0x00000080 +/* + * There are two methods for telling firmware what our capabilities are. + * Newer machines have an "ibm,client-architecture-support" method on the + * root node. For older machines, we have to call the "process-elf-header" + * method in the /packages/elf-loader node, passing it a fake 32-bit + * ELF header containing a couple of PT_NOTE sections that contain + * structures that contain various information. + */ + +/* New method - extensible architecture description vector. */ + +/* Option vector bits - generic bits in byte 1 */ +#define OV_IGNORE 0x80 /* ignore this vector */ +#define OV_CESSATION_POLICY 0x40 /* halt if unsupported option present*/ + +/* Option vector 1: processor architectures supported */ +#define OV1_PPC_2_00 0x80 /* set if we support PowerPC 2.00 */ +#define OV1_PPC_2_01 0x40 /* set if we support PowerPC 2.01 */ +#define OV1_PPC_2_02 0x20 /* set if we support PowerPC 2.02 */ +#define OV1_PPC_2_03 0x10 /* set if we support PowerPC 2.03 */ +#define OV1_PPC_2_04 0x08 /* set if we support PowerPC 2.04 */ +#define OV1_PPC_2_05 0x04 /* set if we support PowerPC 2.05 */ +#define OV1_PPC_2_06 0x02 /* set if we support PowerPC 2.06 */ +#define OV1_PPC_2_07 0x01 /* set if we support PowerPC 2.07 */ + +/* Option vector 2: Open Firmware options supported */ +#define OV2_REAL_MODE 0x20 /* set if we want OF in real mode */ + +/* Option vector 3: processor options supported */ +#define OV3_FP 0x80 /* floating point */ +#define OV3_VMX 0x40 /* VMX/Altivec */ +#define OV3_DFP 0x20 /* decimal FP */ + +/* Option vector 4: IBM PAPR implementation */ +#define OV4_MIN_ENT_CAP 0x01 /* minimum VP entitled capacity */ + +/* Option vector 5: PAPR/OF options supported + * These bits are also used in firmware_has_feature() to validate + * the capabilities reported for vector 5 in the device tree so we + * encode the vector index in the define and use the OV5_FEAT() + * and OV5_INDX() macros to extract the desired information. + */ +#define OV5_FEAT(x) ((x) & 0xff) +#define OV5_INDX(x) ((x) >> 8) +#define OV5_LPAR 0x0280 /* logical partitioning supported */ +#define OV5_SPLPAR 0x0240 /* shared-processor LPAR supported */ +/* ibm,dynamic-reconfiguration-memory property supported */ +#define OV5_DRCONF_MEMORY 0x0220 +#define OV5_LARGE_PAGES 0x0210 /* large pages supported */ +#define OV5_DONATE_DEDICATE_CPU 0x0202 /* donate dedicated CPU support */ +#define OV5_MSI 0x0201 /* PCIe/MSI support */ +#define OV5_CMO 0x0480 /* Cooperative Memory Overcommitment */ +#define OV5_XCMO 0x0440 /* Page Coalescing */ +#define OV5_TYPE1_AFFINITY 0x0580 /* Type 1 NUMA affinity */ +#define OV5_PRRN 0x0540 /* Platform Resource Reassignment */ +#define OV5_PFO_HW_RNG 0x0E80 /* PFO Random Number Generator */ +#define OV5_PFO_HW_842 0x0E40 /* PFO Compression Accelerator */ +#define OV5_PFO_HW_ENCR 0x0E20 /* PFO Encryption Accelerator */ +#define OV5_SUB_PROCESSORS 0x0F01 /* 1,2,or 4 Sub-Processors supported */ + +/* Option Vector 6: IBM PAPR hints */ +#define OV6_LINUX 0x02 /* Linux is our OS */ + +/* + * The architecture vector has an array of PVR mask/value pairs, + * followed by # option vectors - 1, followed by the option vectors. + */ +extern unsigned char ibm_architecture_vec[]; + /* These includes are put at the bottom because they may contain things * that are overridden by this file. Ideally they shouldn't be included * by this file, but there are a bunch of .c files that currently depend diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index 5f995681bc1d..becc08e6a65c 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h @@ -92,7 +92,8 @@ static inline long regs_return_value(struct pt_regs *regs) } while(0) struct task_struct; -extern unsigned long ptrace_get_reg(struct task_struct *task, int regno); +extern int ptrace_get_reg(struct task_struct *task, int regno, + unsigned long *data); extern int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data); diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index c9c67fc888c9..3d17427e4fd7 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -267,7 +267,17 @@ #define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */ #define SPRN_FSCR 0x099 /* Facility Status & Control Register */ #define FSCR_TAR (1 << (63-55)) /* Enable Target Address Register */ +#define FSCR_EBB (1 << (63-56)) /* Enable Event Based Branching */ #define FSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */ +#define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */ +#define HFSCR_TAR (1 << (63-55)) /* Enable Target Address Register */ +#define HFSCR_EBB (1 << (63-56)) /* Enable Event Based Branching */ +#define HFSCR_TM (1 << (63-58)) /* Enable Transactional Memory */ +#define HFSCR_PM (1 << (63-60)) /* Enable prob/priv access to PMU SPRs */ +#define HFSCR_BHRB (1 << (63-59)) /* Enable Branch History Rolling Buffer*/ +#define HFSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */ +#define HFSCR_VECVSX (1 << (63-62)) /* Enable VMX/VSX */ +#define HFSCR_FP (1 << (63-63)) /* Enable Floating Point */ #define SPRN_TAR 0x32f /* Target Address Register */ #define SPRN_LPCR 0x13E /* LPAR Control Register */ #define LPCR_VPM0 (1ul << (63-0)) @@ -631,6 +641,7 @@ #define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */ #define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */ #define SPRN_MMCR1 798 +#define SPRN_MMCR2 769 #define SPRN_MMCRA 0x312 #define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */ #define MMCRA_SDAR_DCACHE_MISS 0x40000000UL @@ -649,6 +660,13 @@ #define POWER7P_MMCRA_SIAR_VALID 0x10000000 /* P7+ SIAR contents valid */ #define POWER7P_MMCRA_SDAR_VALID 0x08000000 /* P7+ SDAR contents valid */ +#define SPRN_MMCRH 316 /* Hypervisor monitor mode control register */ +#define SPRN_MMCRS 894 /* Supervisor monitor mode control register */ +#define SPRN_MMCRC 851 /* Core monitor mode control register */ +#define SPRN_EBBHR 804 /* Event based branch handler register */ +#define SPRN_EBBRR 805 /* Event based branch return register */ +#define SPRN_BESCR 806 /* Branch event status and control register */ + #define SPRN_PMC1 787 #define SPRN_PMC2 788 #define SPRN_PMC3 789 @@ -659,6 +677,11 @@ #define SPRN_PMC8 794 #define SPRN_SIAR 780 #define SPRN_SDAR 781 +#define SPRN_SIER 784 +#define SIER_SIPR 0x2000000 /* Sampled MSR_PR */ +#define SIER_SIHV 0x1000000 /* Sampled MSR_HV */ +#define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */ +#define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */ #define SPRN_PA6T_MMCR0 795 #define PA6T_MMCR0_EN0 0x0000000000000001UL diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index aef00c675905..a8bc2bb4adc9 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h @@ -143,6 +143,8 @@ struct rtas_suspend_me_data { #define RTAS_TYPE_PMGM_TIME_ALARM 0x6f #define RTAS_TYPE_PMGM_CONFIG_CHANGE 0x70 #define RTAS_TYPE_PMGM_SERVICE_PROC 0x71 +/* Platform Resource Reassignment Notification */ +#define RTAS_TYPE_PRRN 0xA0 /* RTAS check-exception vector offset */ #define RTAS_VECTOR_EXTERNAL_INTERRUPT 0x500 @@ -277,6 +279,10 @@ extern int early_init_dt_scan_rtas(unsigned long node, extern void pSeries_log_error(char *buf, unsigned int err_type, int fatal); +#ifdef CONFIG_PPC_PSERIES +extern int pseries_devicetree_update(s32 scope); +#endif + #ifdef CONFIG_PPC_RTAS_DAEMON extern void rtas_cancel_event_scan(void); #else diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index 195ce2ac5691..ffbaabebcdca 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -143,6 +143,8 @@ extern void __cpu_die(unsigned int cpu); /* for UP */ #define hard_smp_processor_id() get_hard_smp_processor_id(0) #define smp_setup_cpu_maps() +static inline void inhibit_secondary_onlining(void) {} +static inline void uninhibit_secondary_onlining(void) {} #endif /* CONFIG_SMP */ diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index ebbec52d21bd..43523fe0d8b4 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h @@ -190,7 +190,7 @@ SYSCALL_SPU(getcwd) SYSCALL_SPU(capget) SYSCALL_SPU(capset) COMPAT_SYS(sigaltstack) -SYSX_SPU(sys_sendfile,compat_sys_sendfile_wrapper,sys_sendfile) +COMPAT_SYS_SPU(sendfile) SYSCALL(ni_syscall) SYSCALL(ni_syscall) PPC_SYS(vfork) @@ -230,7 +230,7 @@ COMPAT_SYS_SPU(sched_setaffinity) COMPAT_SYS_SPU(sched_getaffinity) SYSCALL(ni_syscall) SYSCALL(ni_syscall) -SYSX(sys_ni_syscall,compat_sys_sendfile64_wrapper,sys_sendfile64) +SYS32ONLY(sendfile64) COMPAT_SYS_SPU(io_setup) SYSCALL_SPU(io_destroy) COMPAT_SYS_SPU(io_getevents) @@ -239,7 +239,7 @@ SYSCALL_SPU(io_cancel) SYSCALL(set_tid_address) SYSX_SPU(sys_fadvise64,ppc32_fadvise64,sys_fadvise64) SYSCALL(exit_group) -SYSX(sys_lookup_dcookie,ppc32_lookup_dcookie,sys_lookup_dcookie) +COMPAT_SYS(lookup_dcookie) SYSCALL_SPU(epoll_create) SYSCALL_SPU(epoll_ctl) SYSCALL_SPU(epoll_wait) @@ -273,8 +273,8 @@ COMPAT_SYS(mq_timedreceive) COMPAT_SYS(mq_notify) COMPAT_SYS(mq_getsetattr) COMPAT_SYS(kexec_load) -COMPAT_SYS(add_key) -COMPAT_SYS(request_key) +SYSCALL(add_key) +SYSCALL(request_key) COMPAT_SYS(keyctl) COMPAT_SYS(waitid) SYSCALL(ioprio_set) diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index 406b7b9a1341..8ceea14d6fe4 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -182,8 +182,6 @@ static inline bool test_thread_local_flags(unsigned int flags) #define is_32bit_task() (1) #endif -#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) - #endif /* !__ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h index 852ed1b384f6..161ab662843b 100644 --- a/arch/powerpc/include/asm/topology.h +++ b/arch/powerpc/include/asm/topology.h @@ -71,6 +71,7 @@ static inline void sysfs_remove_device_from_node(struct device *dev, #if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR) extern int start_topology_update(void); extern int stop_topology_update(void); +extern int prrn_is_enabled(void); #else static inline int start_topology_update(void) { @@ -80,6 +81,10 @@ static inline int stop_topology_update(void) { return 0; } +static inline int prrn_is_enabled(void) +{ + return 0; +} #endif /* CONFIG_NUMA && CONFIG_PPC_SPLPAR */ #include <asm-generic/topology.h> diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index 1487f0f12293..3ca819f541bf 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h @@ -56,11 +56,5 @@ #define __ARCH_WANT_SYS_VFORK #define __ARCH_WANT_SYS_CLONE -/* - * "Conditional" syscalls - */ -#define cond_syscall(x) \ - asmlinkage long x (void) __attribute__((weak,alias("sys_ni_syscall"))) - #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_UNISTD_H_ */ diff --git a/arch/powerpc/include/asm/uprobes.h b/arch/powerpc/include/asm/uprobes.h index b532060d0916..23016020915e 100644 --- a/arch/powerpc/include/asm/uprobes.h +++ b/arch/powerpc/include/asm/uprobes.h @@ -51,4 +51,5 @@ extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs); extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk); extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data); extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs); +extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs); #endif /* _ASM_UPROBES_H */ diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h index 4ae9a09c3b89..282d43a0c855 100644 --- a/arch/powerpc/include/asm/xics.h +++ b/arch/powerpc/include/asm/xics.h @@ -150,6 +150,7 @@ extern void xics_register_ics(struct ics *ics); extern void xics_teardown_cpu(void); extern void xics_kexec_teardown_cpu(int secondary); extern void xics_migrate_irqs_away(void); +extern void icp_native_eoi(struct irq_data *d); #ifdef CONFIG_SMP extern int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask, unsigned int strict_check); diff --git a/arch/powerpc/include/uapi/asm/linkage.h b/arch/powerpc/include/uapi/asm/linkage.h deleted file mode 100644 index e1c4ac1cc4ba..000000000000 --- a/arch/powerpc/include/uapi/asm/linkage.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASM_POWERPC_LINKAGE_H -#define _ASM_POWERPC_LINKAGE_H - -/* Nothing to see here... */ - -#endif /* _ASM_POWERPC_LINKAGE_H */ diff --git a/arch/powerpc/include/uapi/asm/ptrace.h b/arch/powerpc/include/uapi/asm/ptrace.h index 66b9ca4ee94a..77d2ed35b111 100644 --- a/arch/powerpc/include/uapi/asm/ptrace.h +++ b/arch/powerpc/include/uapi/asm/ptrace.h @@ -211,6 +211,7 @@ struct ppc_debug_info { #define PPC_DEBUG_FEATURE_INSN_BP_MASK 0x0000000000000002 #define PPC_DEBUG_FEATURE_DATA_BP_RANGE 0x0000000000000004 #define PPC_DEBUG_FEATURE_DATA_BP_MASK 0x0000000000000008 +#define PPC_DEBUG_FEATURE_DATA_BP_DAWR 0x0000000000000010 #ifndef __ASSEMBLY__ diff --git a/arch/powerpc/include/uapi/asm/socket.h b/arch/powerpc/include/uapi/asm/socket.h index a26dcaece509..a36daf3c6f9a 100644 --- a/arch/powerpc/include/uapi/asm/socket.h +++ b/arch/powerpc/include/uapi/asm/socket.h @@ -79,4 +79,6 @@ #define SO_LOCK_FILTER 44 +#define SO_SELECT_ERR_QUEUE 45 + #endif /* _ASM_POWERPC_SOCKET_H */ |