summaryrefslogtreecommitdiff
path: root/arch/powerpc/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include/asm')
-rw-r--r--arch/powerpc/include/asm/atomic.h159
-rw-r--r--arch/powerpc/include/asm/book3s/32/mmu-hash.h (renamed from arch/powerpc/include/asm/mmu-hash32.h)0
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-4k.h36
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-64k.h57
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash.h68
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu-hash.h (renamed from arch/powerpc/include/asm/mmu-hash64.h)4
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h36
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush-hash.h94
-rw-r--r--arch/powerpc/include/asm/cache.h19
-rw-r--r--arch/powerpc/include/asm/cacheflush.h54
-rw-r--r--arch/powerpc/include/asm/checksum.h141
-rw-r--r--arch/powerpc/include/asm/cmpxchg.h237
-rw-r--r--arch/powerpc/include/asm/code-patching.h21
-rw-r--r--arch/powerpc/include/asm/cputable.h28
-rw-r--r--arch/powerpc/include/asm/cputhreads.h15
-rw-r--r--arch/powerpc/include/asm/eeh.h5
-rw-r--r--arch/powerpc/include/asm/fsl_pm.h51
-rw-r--r--arch/powerpc/include/asm/ftrace.h5
-rw-r--r--arch/powerpc/include/asm/hugetlb.h2
-rw-r--r--arch/powerpc/include/asm/hvcall.h1
-rw-r--r--arch/powerpc/include/asm/hydra.h2
-rw-r--r--arch/powerpc/include/asm/io.h2
-rw-r--r--arch/powerpc/include/asm/machdep.h6
-rw-r--r--arch/powerpc/include/asm/mman.h5
-rw-r--r--arch/powerpc/include/asm/mmu-8xx.h4
-rw-r--r--arch/powerpc/include/asm/mmu.h5
-rw-r--r--arch/powerpc/include/asm/mmu_context.h12
-rw-r--r--arch/powerpc/include/asm/module.h14
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgtable.h5
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable.h3
-rw-r--r--arch/powerpc/include/asm/opal.h3
-rw-r--r--arch/powerpc/include/asm/page.h111
-rw-r--r--arch/powerpc/include/asm/page_32.h17
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h9
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h10
-rw-r--r--arch/powerpc/include/asm/pgalloc-64.h42
-rw-r--r--arch/powerpc/include/asm/pgtable-types.h103
-rw-r--r--arch/powerpc/include/asm/pmac_feature.h2
-rw-r--r--arch/powerpc/include/asm/processor.h4
-rw-r--r--arch/powerpc/include/asm/reg.h18
-rw-r--r--arch/powerpc/include/asm/reg_8xx.h93
-rw-r--r--arch/powerpc/include/asm/reg_booke.h2
-rw-r--r--arch/powerpc/include/asm/sections.h12
-rw-r--r--arch/powerpc/include/asm/smp.h4
-rw-r--r--arch/powerpc/include/asm/smu.h2
-rw-r--r--arch/powerpc/include/asm/switch_to.h13
-rw-r--r--arch/powerpc/include/asm/time.h6
-rw-r--r--arch/powerpc/include/asm/tlbflush.h92
-rw-r--r--arch/powerpc/include/asm/uninorth.h2
-rw-r--r--arch/powerpc/include/asm/xics.h2
50 files changed, 1129 insertions, 509 deletions
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index 55f106ed12bf..ae0751ef8788 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -12,6 +12,24 @@
#define ATOMIC_INIT(i) { (i) }
+/*
+ * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
+ * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
+ * on the platform without lwsync.
+ */
+#define __atomic_op_acquire(op, args...) \
+({ \
+ typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
+ __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory"); \
+ __ret; \
+})
+
+#define __atomic_op_release(op, args...) \
+({ \
+ __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory"); \
+ op##_relaxed(args); \
+})
+
static __inline__ int atomic_read(const atomic_t *v)
{
int t;
@@ -42,27 +60,27 @@ static __inline__ void atomic_##op(int a, atomic_t *v) \
: "cc"); \
} \
-#define ATOMIC_OP_RETURN(op, asm_op) \
-static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
+#define ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
+static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \
{ \
int t; \
\
__asm__ __volatile__( \
- PPC_ATOMIC_ENTRY_BARRIER \
-"1: lwarx %0,0,%2 # atomic_" #op "_return\n" \
- #asm_op " %0,%1,%0\n" \
- PPC405_ERR77(0,%2) \
-" stwcx. %0,0,%2 \n" \
+"1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
+ #asm_op " %0,%2,%0\n" \
+ PPC405_ERR77(0, %3) \
+" stwcx. %0,0,%3\n" \
" bne- 1b\n" \
- PPC_ATOMIC_EXIT_BARRIER \
- : "=&r" (t) \
+ : "=&r" (t), "+m" (v->counter) \
: "r" (a), "r" (&v->counter) \
- : "cc", "memory"); \
+ : "cc"); \
\
return t; \
}
-#define ATOMIC_OPS(op, asm_op) ATOMIC_OP(op, asm_op) ATOMIC_OP_RETURN(op, asm_op)
+#define ATOMIC_OPS(op, asm_op) \
+ ATOMIC_OP(op, asm_op) \
+ ATOMIC_OP_RETURN_RELAXED(op, asm_op)
ATOMIC_OPS(add, add)
ATOMIC_OPS(sub, subf)
@@ -71,8 +89,11 @@ ATOMIC_OP(and, and)
ATOMIC_OP(or, or)
ATOMIC_OP(xor, xor)
+#define atomic_add_return_relaxed atomic_add_return_relaxed
+#define atomic_sub_return_relaxed atomic_sub_return_relaxed
+
#undef ATOMIC_OPS
-#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP_RETURN_RELAXED
#undef ATOMIC_OP
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
@@ -92,21 +113,19 @@ static __inline__ void atomic_inc(atomic_t *v)
: "cc", "xer");
}
-static __inline__ int atomic_inc_return(atomic_t *v)
+static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
{
int t;
__asm__ __volatile__(
- PPC_ATOMIC_ENTRY_BARRIER
-"1: lwarx %0,0,%1 # atomic_inc_return\n\
- addic %0,%0,1\n"
- PPC405_ERR77(0,%1)
-" stwcx. %0,0,%1 \n\
- bne- 1b"
- PPC_ATOMIC_EXIT_BARRIER
- : "=&r" (t)
+"1: lwarx %0,0,%2 # atomic_inc_return_relaxed\n"
+" addic %0,%0,1\n"
+ PPC405_ERR77(0, %2)
+" stwcx. %0,0,%2\n"
+" bne- 1b"
+ : "=&r" (t), "+m" (v->counter)
: "r" (&v->counter)
- : "cc", "xer", "memory");
+ : "cc", "xer");
return t;
}
@@ -136,27 +155,34 @@ static __inline__ void atomic_dec(atomic_t *v)
: "cc", "xer");
}
-static __inline__ int atomic_dec_return(atomic_t *v)
+static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
{
int t;
__asm__ __volatile__(
- PPC_ATOMIC_ENTRY_BARRIER
-"1: lwarx %0,0,%1 # atomic_dec_return\n\
- addic %0,%0,-1\n"
- PPC405_ERR77(0,%1)
-" stwcx. %0,0,%1\n\
- bne- 1b"
- PPC_ATOMIC_EXIT_BARRIER
- : "=&r" (t)
+"1: lwarx %0,0,%2 # atomic_dec_return_relaxed\n"
+" addic %0,%0,-1\n"
+ PPC405_ERR77(0, %2)
+" stwcx. %0,0,%2\n"
+" bne- 1b"
+ : "=&r" (t), "+m" (v->counter)
: "r" (&v->counter)
- : "cc", "xer", "memory");
+ : "cc", "xer");
return t;
}
+#define atomic_inc_return_relaxed atomic_inc_return_relaxed
+#define atomic_dec_return_relaxed atomic_dec_return_relaxed
+
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_cmpxchg_relaxed(v, o, n) \
+ cmpxchg_relaxed(&((v)->counter), (o), (n))
+#define atomic_cmpxchg_acquire(v, o, n) \
+ cmpxchg_acquire(&((v)->counter), (o), (n))
+
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
/**
* __atomic_add_unless - add unless the number is a given value
@@ -285,26 +311,27 @@ static __inline__ void atomic64_##op(long a, atomic64_t *v) \
: "cc"); \
}
-#define ATOMIC64_OP_RETURN(op, asm_op) \
-static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
+#define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
+static inline long \
+atomic64_##op##_return_relaxed(long a, atomic64_t *v) \
{ \
long t; \
\
__asm__ __volatile__( \
- PPC_ATOMIC_ENTRY_BARRIER \
-"1: ldarx %0,0,%2 # atomic64_" #op "_return\n" \
- #asm_op " %0,%1,%0\n" \
-" stdcx. %0,0,%2 \n" \
+"1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \
+ #asm_op " %0,%2,%0\n" \
+" stdcx. %0,0,%3\n" \
" bne- 1b\n" \
- PPC_ATOMIC_EXIT_BARRIER \
- : "=&r" (t) \
+ : "=&r" (t), "+m" (v->counter) \
: "r" (a), "r" (&v->counter) \
- : "cc", "memory"); \
+ : "cc"); \
\
return t; \
}
-#define ATOMIC64_OPS(op, asm_op) ATOMIC64_OP(op, asm_op) ATOMIC64_OP_RETURN(op, asm_op)
+#define ATOMIC64_OPS(op, asm_op) \
+ ATOMIC64_OP(op, asm_op) \
+ ATOMIC64_OP_RETURN_RELAXED(op, asm_op)
ATOMIC64_OPS(add, add)
ATOMIC64_OPS(sub, subf)
@@ -312,8 +339,11 @@ ATOMIC64_OP(and, and)
ATOMIC64_OP(or, or)
ATOMIC64_OP(xor, xor)
-#undef ATOMIC64_OPS
-#undef ATOMIC64_OP_RETURN
+#define atomic64_add_return_relaxed atomic64_add_return_relaxed
+#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
+
+#undef ATOPIC64_OPS
+#undef ATOMIC64_OP_RETURN_RELAXED
#undef ATOMIC64_OP
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
@@ -332,20 +362,18 @@ static __inline__ void atomic64_inc(atomic64_t *v)
: "cc", "xer");
}
-static __inline__ long atomic64_inc_return(atomic64_t *v)
+static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v)
{
long t;
__asm__ __volatile__(
- PPC_ATOMIC_ENTRY_BARRIER
-"1: ldarx %0,0,%1 # atomic64_inc_return\n\
- addic %0,%0,1\n\
- stdcx. %0,0,%1 \n\
- bne- 1b"
- PPC_ATOMIC_EXIT_BARRIER
- : "=&r" (t)
+"1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n"
+" addic %0,%0,1\n"
+" stdcx. %0,0,%2\n"
+" bne- 1b"
+ : "=&r" (t), "+m" (v->counter)
: "r" (&v->counter)
- : "cc", "xer", "memory");
+ : "cc", "xer");
return t;
}
@@ -374,24 +402,25 @@ static __inline__ void atomic64_dec(atomic64_t *v)
: "cc", "xer");
}
-static __inline__ long atomic64_dec_return(atomic64_t *v)
+static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v)
{
long t;
__asm__ __volatile__(
- PPC_ATOMIC_ENTRY_BARRIER
-"1: ldarx %0,0,%1 # atomic64_dec_return\n\
- addic %0,%0,-1\n\
- stdcx. %0,0,%1\n\
- bne- 1b"
- PPC_ATOMIC_EXIT_BARRIER
- : "=&r" (t)
+"1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n"
+" addic %0,%0,-1\n"
+" stdcx. %0,0,%2\n"
+" bne- 1b"
+ : "=&r" (t), "+m" (v->counter)
: "r" (&v->counter)
- : "cc", "xer", "memory");
+ : "cc", "xer");
return t;
}
+#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
+#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
+
#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
@@ -420,7 +449,13 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
}
#define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
+#define atomic64_cmpxchg_relaxed(v, o, n) \
+ cmpxchg_relaxed(&((v)->counter), (o), (n))
+#define atomic64_cmpxchg_acquire(v, o, n) \
+ cmpxchg_acquire(&((v)->counter), (o), (n))
+
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+#define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
/**
* atomic64_add_unless - add unless the number is a given value
diff --git a/arch/powerpc/include/asm/mmu-hash32.h b/arch/powerpc/include/asm/book3s/32/mmu-hash.h
index 16f513e5cbd7..16f513e5cbd7 100644
--- a/arch/powerpc/include/asm/mmu-hash32.h
+++ b/arch/powerpc/include/asm/book3s/32/mmu-hash.h
diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
index ea0414d6659e..5f08a0832238 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
@@ -52,44 +52,14 @@
_PAGE_F_SECOND | _PAGE_F_GIX)
/* shift to put page number into pte */
-#define PTE_RPN_SHIFT (18)
+#define PTE_RPN_SHIFT (12)
+#define PTE_RPN_SIZE (45) /* gives 57-bit real addresses */
#define _PAGE_4K_PFN 0
#ifndef __ASSEMBLY__
/*
- * 4-level page tables related bits
+ * On all 4K setups, remap_4k_pfn() equates to remap_pfn_range()
*/
-
-#define pgd_none(pgd) (!pgd_val(pgd))
-#define pgd_bad(pgd) (pgd_val(pgd) == 0)
-#define pgd_present(pgd) (pgd_val(pgd) != 0)
-#define pgd_page_vaddr(pgd) (pgd_val(pgd) & ~PGD_MASKED_BITS)
-
-static inline void pgd_clear(pgd_t *pgdp)
-{
- *pgdp = __pgd(0);
-}
-
-static inline pte_t pgd_pte(pgd_t pgd)
-{
- return __pte(pgd_val(pgd));
-}
-
-static inline pgd_t pte_pgd(pte_t pte)
-{
- return __pgd(pte_val(pte));
-}
-extern struct page *pgd_page(pgd_t pgd);
-
-#define pud_offset(pgdp, addr) \
- (((pud_t *) pgd_page_vaddr(*(pgdp))) + \
- (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)))
-
-#define pud_ERROR(e) \
- pr_err("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e))
-
-/*
- * On all 4K setups, remap_4k_pfn() equates to remap_pfn_range() */
#define remap_4k_pfn(vma, addr, pfn, prot) \
remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot))
diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h
index 849bbec80f7b..0a7956a80a08 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-64k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h
@@ -1,15 +1,14 @@
#ifndef _ASM_POWERPC_BOOK3S_64_HASH_64K_H
#define _ASM_POWERPC_BOOK3S_64_HASH_64K_H
-#include <asm-generic/pgtable-nopud.h>
-
#define PTE_INDEX_SIZE 8
-#define PMD_INDEX_SIZE 10
-#define PUD_INDEX_SIZE 0
+#define PMD_INDEX_SIZE 5
+#define PUD_INDEX_SIZE 5
#define PGD_INDEX_SIZE 12
#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE)
+#define PTRS_PER_PUD (1 << PUD_INDEX_SIZE)
#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
/* With 4k base page size, hugepage PTEs go at the PMD level */
@@ -20,13 +19,18 @@
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
-/* PGDIR_SHIFT determines what a third-level page table entry can map */
-#define PGDIR_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE)
+/* PUD_SHIFT determines what a third-level page table entry can map */
+#define PUD_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE)
+#define PUD_SIZE (1UL << PUD_SHIFT)
+#define PUD_MASK (~(PUD_SIZE-1))
+
+/* PGDIR_SHIFT determines what a fourth-level page table entry can map */
+#define PGDIR_SHIFT (PUD_SHIFT + PUD_INDEX_SIZE)
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
-#define _PAGE_COMBO 0x00040000 /* this is a combo 4k page */
-#define _PAGE_4K_PFN 0x00080000 /* PFN is for a single 4k page */
+#define _PAGE_COMBO 0x00001000 /* this is a combo 4k page */
+#define _PAGE_4K_PFN 0x00002000 /* PFN is for a single 4k page */
/*
* Used to track subpage group valid if _PAGE_COMBO is set
* This overloads _PAGE_F_GIX and _PAGE_F_SECOND
@@ -39,10 +43,12 @@
/* Shift to put page number into pte.
*
- * That gives us a max RPN of 34 bits, which means a max of 50 bits
- * of addressable physical space, or 46 bits for the special 4k PFNs.
+ * That gives us a max RPN of 41 bits, which means a max of 57 bits
+ * of addressable physical space, or 53 bits for the special 4k PFNs.
*/
-#define PTE_RPN_SHIFT (30)
+#define PTE_RPN_SHIFT (16)
+#define PTE_RPN_SIZE (41)
+
/*
* we support 16 fragments per PTE page of 64K size.
*/
@@ -54,13 +60,12 @@
#define PTE_FRAG_SIZE_SHIFT 12
#define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT)
-/*
- * Bits to mask out from a PMD to get to the PTE page
- * PMDs point to PTE table fragments which are PTE_FRAG_SIZE aligned.
- */
-#define PMD_MASKED_BITS (PTE_FRAG_SIZE - 1)
-/* Bits to mask out from a PGD/PUD to get to the PMD page */
-#define PUD_MASKED_BITS 0x1ff
+/* Bits to mask out from a PMD to get to the PTE page */
+#define PMD_MASKED_BITS 0xc0000000000000ffUL
+/* Bits to mask out from a PUD to get to the PMD page */
+#define PUD_MASKED_BITS 0xc0000000000000ffUL
+/* Bits to mask out from a PGD to get to the PUD page */
+#define PGD_MASKED_BITS 0xc0000000000000ffUL
#ifndef __ASSEMBLY__
@@ -120,7 +125,7 @@ extern bool __rpte_sub_valid(real_pte_t rpte, unsigned long index);
(((pte) & _PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K)
#define remap_4k_pfn(vma, addr, pfn, prot) \
- (WARN_ON(((pfn) >= (1UL << (64 - PTE_RPN_SHIFT)))) ? -EINVAL : \
+ (WARN_ON(((pfn) >= (1UL << PTE_RPN_SIZE))) ? -EINVAL : \
remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, \
__pgprot(pgprot_val((prot)) | _PAGE_4K_PFN)))
@@ -130,11 +135,9 @@ extern bool __rpte_sub_valid(real_pte_t rpte, unsigned long index);
#else
#define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE)
#endif
+#define PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE)
#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
-#define pgd_pte(pgd) (pud_pte(((pud_t){ pgd })))
-#define pte_pgd(pte) ((pgd_t)pte_pud(pte))
-
#ifdef CONFIG_HUGETLB_PAGE
/*
* We have PGD_INDEX_SIZ = 12 and PTE_INDEX_SIZE = 8, so that we can have
@@ -208,30 +211,30 @@ static inline char *get_hpte_slot_array(pmd_t *pmdp)
/*
* The linux hugepage PMD now include the pmd entries followed by the address
* to the stashed pgtable_t. The stashed pgtable_t contains the hpte bits.
- * [ 1 bit secondary | 3 bit hidx | 1 bit valid | 000]. We use one byte per
+ * [ 000 | 1 bit secondary | 3 bit hidx | 1 bit valid]. We use one byte per
* each HPTE entry. With 16MB hugepage and 64K HPTE we need 256 entries and
* with 4K HPTE we need 4096 entries. Both will fit in a 4K pgtable_t.
*
- * The last three bits are intentionally left to zero. This memory location
+ * The top three bits are intentionally left as zero. This memory location
* are also used as normal page PTE pointers. So if we have any pointers
* left around while we collapse a hugepage, we need to make sure
* _PAGE_PRESENT bit of that is zero when we look at them
*/
static inline unsigned int hpte_valid(unsigned char *hpte_slot_array, int index)
{
- return (hpte_slot_array[index] >> 3) & 0x1;
+ return hpte_slot_array[index] & 0x1;
}
static inline unsigned int hpte_hash_index(unsigned char *hpte_slot_array,
int index)
{
- return hpte_slot_array[index] >> 4;
+ return hpte_slot_array[index] >> 1;
}
static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array,
unsigned int index, unsigned int hidx)
{
- hpte_slot_array[index] = hidx << 4 | 0x1 << 3;
+ hpte_slot_array[index] = (hidx << 1) | 0x1;
}
/*
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index 8d1c8162f0c1..d0ee6fcef823 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -4,8 +4,7 @@
/*
* Common bits between 4K and 64K pages in a linux-style PTE.
- * These match the bits in the (hardware-defined) PowerPC PTE as closely
- * as possible. Additional bits may be defined in pgtable-hash64-*.h
+ * Additional bits may be defined in pgtable-hash64-*.h
*
* Note: We only support user read/write permissions. Supervisor always
* have full read/write to pages above PAGE_OFFSET (pages below that
@@ -14,32 +13,35 @@
* We could create separate kernel read-only if we used the 3 PP bits
* combinations that newer processors provide but we currently don't.
*/
-#define _PAGE_PTE 0x00001
-#define _PAGE_PRESENT 0x00002 /* software: pte contains a translation */
-#define _PAGE_BIT_SWAP_TYPE 2
-#define _PAGE_USER 0x00004 /* matches one of the PP bits */
-#define _PAGE_EXEC 0x00008 /* No execute on POWER4 and newer (we invert) */
-#define _PAGE_GUARDED 0x00010
-/* We can derive Memory coherence from _PAGE_NO_CACHE */
+#define _PAGE_BIT_SWAP_TYPE 0
+
+#define _PAGE_EXEC 0x00001 /* execute permission */
+#define _PAGE_RW 0x00002 /* read & write access allowed */
+#define _PAGE_READ 0x00004 /* read access allowed */
+#define _PAGE_USER 0x00008 /* page may be accessed by userspace */
+#define _PAGE_GUARDED 0x00010 /* G: guarded (side-effect) page */
+/* M (memory coherence) is always set in the HPTE, so we don't need it here */
#define _PAGE_COHERENT 0x0
#define _PAGE_NO_CACHE 0x00020 /* I: cache inhibit */
#define _PAGE_WRITETHRU 0x00040 /* W: cache write-through */
#define _PAGE_DIRTY 0x00080 /* C: page changed */
#define _PAGE_ACCESSED 0x00100 /* R: page referenced */
-#define _PAGE_RW 0x00200 /* software: user write access allowed */
-#define _PAGE_HASHPTE 0x00400 /* software: pte has an associated HPTE */
+#define _PAGE_SPECIAL 0x00400 /* software: special page */
#define _PAGE_BUSY 0x00800 /* software: PTE & hash are busy */
-#define _PAGE_F_GIX 0x07000 /* full page: hidx bits */
-#define _PAGE_F_GIX_SHIFT 12
-#define _PAGE_F_SECOND 0x08000 /* Whether to use secondary hash or not */
-#define _PAGE_SPECIAL 0x10000 /* software: special page */
#ifdef CONFIG_MEM_SOFT_DIRTY
-#define _PAGE_SOFT_DIRTY 0x20000 /* software: software dirty tracking */
+#define _PAGE_SOFT_DIRTY 0x200 /* software: software dirty tracking */
#else
-#define _PAGE_SOFT_DIRTY 0x00000
+#define _PAGE_SOFT_DIRTY 0x000
#endif
+#define _PAGE_F_GIX_SHIFT 57
+#define _PAGE_F_GIX (7ul << 57) /* HPTE index within HPTEG */
+#define _PAGE_F_SECOND (1ul << 60) /* HPTE is in 2ndary HPTEG */
+#define _PAGE_HASHPTE (1ul << 61) /* PTE has associated HPTE */
+#define _PAGE_PTE (1ul << 62) /* distinguishes PTEs from pointers */
+#define _PAGE_PRESENT (1ul << 63) /* pte contains a translation */
+
/*
* We need to differentiate between explicit huge page and THP huge
* page, since THP huge page also need to track real subpage details
@@ -132,7 +134,7 @@
* The mask convered by the RPN must be a ULL on 32-bit platforms with
* 64-bit PTEs
*/
-#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
+#define PTE_RPN_MASK (((1UL << PTE_RPN_SIZE) - 1) << PTE_RPN_SHIFT)
/*
* _PAGE_CHG_MASK masks of bits that are to be preserved across
* pgprot changes
@@ -223,15 +225,17 @@
#define PUD_BAD_BITS (PMD_TABLE_SIZE-1)
#ifndef __ASSEMBLY__
-#define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \
- || (pmd_val(pmd) & PMD_BAD_BITS))
-#define pmd_page_vaddr(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS)
+#define pmd_bad(pmd) (pmd_val(pmd) & PMD_BAD_BITS)
+#define pmd_page_vaddr(pmd) __va(pmd_val(pmd) & ~PMD_MASKED_BITS)
+
+#define pud_bad(pud) (pud_val(pud) & PUD_BAD_BITS)
+#define pud_page_vaddr(pud) __va(pud_val(pud) & ~PUD_MASKED_BITS)
-#define pud_bad(pud) (!is_kernel_addr(pud_val(pud)) \
- || (pud_val(pud) & PUD_BAD_BITS))
-#define pud_page_vaddr(pud) (pud_val(pud) & ~PUD_MASKED_BITS)
+/* Pointers in the page table tree are physical addresses */
+#define __pgtable_ptr_val(ptr) __pa(ptr)
#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1))
+#define pud_index(address) (((address) >> (PUD_SHIFT)) & (PTRS_PER_PUD - 1))
#define pmd_index(address) (((address) >> (PMD_SHIFT)) & (PTRS_PER_PMD - 1))
#define pte_index(address) (((address) >> (PAGE_SHIFT)) & (PTRS_PER_PTE - 1))
@@ -360,8 +364,18 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
:"cc");
}
+static inline int pgd_bad(pgd_t pgd)
+{
+ return (pgd_val(pgd) == 0);
+}
+
#define __HAVE_ARCH_PTE_SAME
#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)
+static inline unsigned long pgd_page_vaddr(pgd_t pgd)
+{
+ return (unsigned long)__va(pgd_val(pgd) & ~PGD_MASKED_BITS);
+}
+
/* Generic accessors to PTE bits */
static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_RW);}
@@ -402,7 +416,7 @@ static inline int pte_protnone(pte_t pte)
static inline int pte_present(pte_t pte)
{
- return pte_val(pte) & _PAGE_PRESENT;
+ return !!(pte_val(pte) & _PAGE_PRESENT);
}
/* Conversion functions: convert a page and protection to a page entry,
@@ -413,13 +427,13 @@ static inline int pte_present(pte_t pte)
*/
static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
{
- return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
+ return __pte((((pte_basic_t)(pfn) << PTE_RPN_SHIFT) & PTE_RPN_MASK) |
pgprot_val(pgprot));
}
static inline unsigned long pte_pfn(pte_t pte)
{
- return pte_val(pte) >> PTE_RPN_SHIFT;
+ return (pte_val(pte) & PTE_RPN_MASK) >> PTE_RPN_SHIFT;
}
/* Generic modifiers for PTE bits */
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
index 7352d3f212df..0cea4807e26f 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
@@ -114,6 +114,7 @@
#define POWER7_TLB_SETS 128 /* # sets in POWER7 TLB */
#define POWER8_TLB_SETS 512 /* # sets in POWER8 TLB */
+#define POWER9_TLB_SETS_HASH 256 /* # sets in POWER9 TLB Hash mode */
#ifndef __ASSEMBLY__
@@ -607,6 +608,9 @@ static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1;
return get_vsid(context, ea, ssize);
}
+
+unsigned htab_shift_for_mem_size(unsigned long mem_size);
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_MMU_HASH64_H_ */
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index ac07a30a7934..77d3ce05798e 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -43,13 +43,8 @@
*/
#ifndef __real_pte
-#ifdef CONFIG_STRICT_MM_TYPECHECKS
#define __real_pte(e,p) ((real_pte_t){(e)})
#define __rpte_to_pte(r) ((r).pte)
-#else
-#define __real_pte(e,p) (e)
-#define __rpte_to_pte(r) (__pte(r))
-#endif
#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >>_PAGE_F_GIX_SHIFT)
#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
@@ -111,6 +106,26 @@ static inline void pgd_set(pgd_t *pgdp, unsigned long val)
*pgdp = __pgd(val);
}
+static inline void pgd_clear(pgd_t *pgdp)
+{
+ *pgdp = __pgd(0);
+}
+
+#define pgd_none(pgd) (!pgd_val(pgd))
+#define pgd_present(pgd) (!pgd_none(pgd))
+
+static inline pte_t pgd_pte(pgd_t pgd)
+{
+ return __pte(pgd_val(pgd));
+}
+
+static inline pgd_t pte_pgd(pte_t pte)
+{
+ return __pgd(pte_val(pte));
+}
+
+extern struct page *pgd_page(pgd_t pgd);
+
/*
* Find an entry in a page-table-directory. We combine the address region
* (the high order N bits) and the pgd portion of the address.
@@ -118,9 +133,10 @@ static inline void pgd_set(pgd_t *pgdp, unsigned long val)
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
+#define pud_offset(pgdp, addr) \
+ (((pud_t *) pgd_page_vaddr(*(pgdp))) + pud_index(addr))
#define pmd_offset(pudp,addr) \
(((pmd_t *) pud_page_vaddr(*(pudp))) + pmd_index(addr))
-
#define pte_offset_kernel(dir,addr) \
(((pte_t *) pmd_page_vaddr(*(dir))) + pte_index(addr))
@@ -135,6 +151,8 @@ static inline void pgd_set(pgd_t *pgdp, unsigned long val)
pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
#define pmd_ERROR(e) \
pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
+#define pud_ERROR(e) \
+ pr_err("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e))
#define pgd_ERROR(e) \
pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
@@ -154,10 +172,10 @@ static inline void pgd_set(pgd_t *pgdp, unsigned long val)
#define SWP_TYPE_BITS 5
#define __swp_type(x) (((x).val >> _PAGE_BIT_SWAP_TYPE) \
& ((1UL << SWP_TYPE_BITS) - 1))
-#define __swp_offset(x) ((x).val >> PTE_RPN_SHIFT)
+#define __swp_offset(x) (((x).val & PTE_RPN_MASK) >> PTE_RPN_SHIFT)
#define __swp_entry(type, offset) ((swp_entry_t) { \
- ((type) << _PAGE_BIT_SWAP_TYPE) \
- | ((offset) << PTE_RPN_SHIFT) })
+ ((type) << _PAGE_BIT_SWAP_TYPE) \
+ | (((offset) << PTE_RPN_SHIFT) & PTE_RPN_MASK)})
/*
* swp_entry_t must be independent of pte bits. We build a swp_entry_t from
* swap type and offset we get from swap and convert that to pte to find a
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
new file mode 100644
index 000000000000..1b753f96b374
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
@@ -0,0 +1,94 @@
+#ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H
+#define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H
+
+#define MMU_NO_CONTEXT 0
+
+/*
+ * TLB flushing for 64-bit hash-MMU CPUs
+ */
+
+#include <linux/percpu.h>
+#include <asm/page.h>
+
+#define PPC64_TLB_BATCH_NR 192
+
+struct ppc64_tlb_batch {
+ int active;
+ unsigned long index;
+ struct mm_struct *mm;
+ real_pte_t pte[PPC64_TLB_BATCH_NR];
+ unsigned long vpn[PPC64_TLB_BATCH_NR];
+ unsigned int psize;
+ int ssize;
+};
+DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
+
+extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
+
+#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
+
+static inline void arch_enter_lazy_mmu_mode(void)
+{
+ struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
+
+ batch->active = 1;
+}
+
+static inline void arch_leave_lazy_mmu_mode(void)
+{
+ struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
+
+ if (batch->index)
+ __flush_tlb_pending(batch);
+ batch->active = 0;
+}
+
+#define arch_flush_lazy_mmu_mode() do {} while (0)
+
+
+extern void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize,
+ int ssize, unsigned long flags);
+extern void flush_hash_range(unsigned long number, int local);
+extern void flush_hash_hugepage(unsigned long vsid, unsigned long addr,
+ pmd_t *pmdp, unsigned int psize, int ssize,
+ unsigned long flags);
+
+static inline void local_flush_tlb_mm(struct mm_struct *mm)
+{
+}
+
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+}
+
+static inline void local_flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long vmaddr)
+{
+}
+
+static inline void flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long vmaddr)
+{
+}
+
+static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
+ unsigned long vmaddr)
+{
+}
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+}
+
+static inline void flush_tlb_kernel_range(unsigned long start,
+ unsigned long end)
+{
+}
+
+/* Private function for use by PCI IO mapping code */
+extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
+ unsigned long end);
+extern void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long addr);
+#endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H */
diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
index 5f8229e24fe6..ffbafbf76b19 100644
--- a/arch/powerpc/include/asm/cache.h
+++ b/arch/powerpc/include/asm/cache.h
@@ -69,6 +69,25 @@ extern void _set_L3CR(unsigned long);
#define _set_L3CR(val) do { } while(0)
#endif
+static inline void dcbz(void *addr)
+{
+ __asm__ __volatile__ ("dcbz 0, %0" : : "r"(addr) : "memory");
+}
+
+static inline void dcbi(void *addr)
+{
+ __asm__ __volatile__ ("dcbi 0, %0" : : "r"(addr) : "memory");
+}
+
+static inline void dcbf(void *addr)
+{
+ __asm__ __volatile__ ("dcbf 0, %0" : : "r"(addr) : "memory");
+}
+
+static inline void dcbst(void *addr)
+{
+ __asm__ __volatile__ ("dcbst 0, %0" : : "r"(addr) : "memory");
+}
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_CACHE_H */
diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h
index 6229e6b6037b..69fb16d7a811 100644
--- a/arch/powerpc/include/asm/cacheflush.h
+++ b/arch/powerpc/include/asm/cacheflush.h
@@ -30,8 +30,6 @@ extern void flush_dcache_page(struct page *page);
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
-extern void __flush_disable_L1(void);
-
extern void flush_icache_range(unsigned long, unsigned long);
extern void flush_icache_user_range(struct vm_area_struct *vma,
struct page *page, unsigned long addr,
@@ -47,12 +45,58 @@ static inline void __flush_dcache_icache_phys(unsigned long physaddr)
}
#endif
-extern void flush_dcache_range(unsigned long start, unsigned long stop);
#ifdef CONFIG_PPC32
-extern void clean_dcache_range(unsigned long start, unsigned long stop);
-extern void invalidate_dcache_range(unsigned long start, unsigned long stop);
+/*
+ * Write any modified data cache blocks out to memory and invalidate them.
+ * Does not invalidate the corresponding instruction cache blocks.
+ */
+static inline void flush_dcache_range(unsigned long start, unsigned long stop)
+{
+ void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1));
+ unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1);
+ unsigned long i;
+
+ for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES)
+ dcbf(addr);
+ mb(); /* sync */
+}
+
+/*
+ * Write any modified data cache blocks out to memory.
+ * Does not invalidate the corresponding cache lines (especially for
+ * any corresponding instruction cache).
+ */
+static inline void clean_dcache_range(unsigned long start, unsigned long stop)
+{
+ void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1));
+ unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1);
+ unsigned long i;
+
+ for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES)
+ dcbst(addr);
+ mb(); /* sync */
+}
+
+/*
+ * Like above, but invalidate the D-cache. This is used by the 8xx
+ * to invalidate the cache so the PPC core doesn't get stale data
+ * from the CPM (no cache snooping here :-).
+ */
+static inline void invalidate_dcache_range(unsigned long start,
+ unsigned long stop)
+{
+ void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1));
+ unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1);
+ unsigned long i;
+
+ for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES)
+ dcbi(addr);
+ mb(); /* sync */
+}
+
#endif /* CONFIG_PPC32 */
#ifdef CONFIG_PPC64
+extern void flush_dcache_range(unsigned long start, unsigned long stop);
extern void flush_inval_dcache_range(unsigned long start, unsigned long stop);
extern void flush_dcache_phys_range(unsigned long start, unsigned long stop);
#endif
diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h
index e8d9ef4755a4..ee655ed1ff1b 100644
--- a/arch/powerpc/include/asm/checksum.h
+++ b/arch/powerpc/include/asm/checksum.h
@@ -9,30 +9,9 @@
* 2 of the License, or (at your option) any later version.
*/
-/*
- * This is a version of ip_compute_csum() optimized for IP headers,
- * which always checksum on 4 octet boundaries. ihl is the number
- * of 32-bit words and is always >= 5.
- */
#ifdef CONFIG_GENERIC_CSUM
#include <asm-generic/checksum.h>
#else
-extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
-
-/*
- * computes the checksum of a memory block at buff, length len,
- * and adds in "sum" (32-bit)
- *
- * returns a 32-bit number suitable for feeding into itself
- * or csum_tcpudp_magic
- *
- * this function must be called with even lengths, except
- * for the last fragment, which may be odd
- *
- * it's best to have buff aligned on a 32-bit boundary
- */
-extern __wsum csum_partial(const void *buff, int len, __wsum sum);
-
/*
* Computes the checksum of a memory block at src, length len,
* and adds in "sum" (32-bit), while copying the block to dst.
@@ -47,21 +26,12 @@ extern __wsum csum_partial_copy_generic(const void *src, void *dst,
int len, __wsum sum,
int *src_err, int *dst_err);
-#ifdef __powerpc64__
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
extern __wsum csum_and_copy_from_user(const void __user *src, void *dst,
int len, __wsum sum, int *err_ptr);
#define HAVE_CSUM_COPY_USER
extern __wsum csum_and_copy_to_user(const void *src, void __user *dst,
int len, __wsum sum, int *err_ptr);
-#else
-/*
- * the same as csum_partial, but copies from src to dst while it
- * checksums.
- */
-#define csum_partial_copy_from_user(src, dst, len, sum, errp) \
- csum_partial_copy_generic((__force const void *)(src), (dst), (len), (sum), (errp), NULL)
-#endif
#define csum_partial_copy_nocheck(src, dst, len, sum) \
csum_partial_copy_generic((src), (dst), (len), (sum), NULL, NULL)
@@ -83,15 +53,6 @@ static inline __sum16 csum_fold(__wsum sum)
return (__force __sum16)(~((__force u32)sum + tmp) >> 16);
}
-/*
- * this routine is used for miscellaneous IP-like checksums, mainly
- * in icmp.c
- */
-static inline __sum16 ip_compute_csum(const void *buff, int len)
-{
- return csum_fold(csum_partial(buff, len, 0));
-}
-
static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
@@ -135,17 +96,117 @@ static inline __wsum csum_add(__wsum csum, __wsum addend)
{
#ifdef __powerpc64__
u64 res = (__force u64)csum;
+#endif
+ if (__builtin_constant_p(csum) && csum == 0)
+ return addend;
+ if (__builtin_constant_p(addend) && addend == 0)
+ return csum;
+#ifdef __powerpc64__
res += (__force u64)addend;
return (__force __wsum)((u32)res + (res >> 32));
#else
asm("addc %0,%0,%1;"
"addze %0,%0;"
- : "+r" (csum) : "r" (addend));
+ : "+r" (csum) : "r" (addend) : "xer");
return csum;
#endif
}
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries. ihl is the number
+ * of 32-bit words and is always >= 5.
+ */
+static inline __wsum ip_fast_csum_nofold(const void *iph, unsigned int ihl)
+{
+ const u32 *ptr = (const u32 *)iph + 1;
+#ifdef __powerpc64__
+ unsigned int i;
+ u64 s = *(const u32 *)iph;
+
+ for (i = 0; i < ihl - 1; i++, ptr++)
+ s += *ptr;
+ s += (s >> 32);
+ return (__force __wsum)s;
+#else
+ __wsum sum, tmp;
+
+ asm("mtctr %3;"
+ "addc %0,%4,%5;"
+ "1: lwzu %1, 4(%2);"
+ "adde %0,%0,%1;"
+ "bdnz 1b;"
+ "addze %0,%0;"
+ : "=r" (sum), "=r" (tmp), "+b" (ptr)
+ : "r" (ihl - 2), "r" (*(const u32 *)iph), "r" (*ptr)
+ : "ctr", "xer", "memory");
+
+ return sum;
+#endif
+}
+
+static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
+{
+ return csum_fold(ip_fast_csum_nofold(iph, ihl));
+}
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+__wsum __csum_partial(const void *buff, int len, __wsum sum);
+
+static inline __wsum csum_partial(const void *buff, int len, __wsum sum)
+{
+ if (__builtin_constant_p(len) && len <= 16 && (len & 1) == 0) {
+ if (len == 2)
+ sum = csum_add(sum, (__force __wsum)*(const u16 *)buff);
+ if (len >= 4)
+ sum = csum_add(sum, (__force __wsum)*(const u32 *)buff);
+ if (len == 6)
+ sum = csum_add(sum, (__force __wsum)
+ *(const u16 *)(buff + 4));
+ if (len >= 8)
+ sum = csum_add(sum, (__force __wsum)
+ *(const u32 *)(buff + 4));
+ if (len == 10)
+ sum = csum_add(sum, (__force __wsum)
+ *(const u16 *)(buff + 8));
+ if (len >= 12)
+ sum = csum_add(sum, (__force __wsum)
+ *(const u32 *)(buff + 8));
+ if (len == 14)
+ sum = csum_add(sum, (__force __wsum)
+ *(const u16 *)(buff + 12));
+ if (len >= 16)
+ sum = csum_add(sum, (__force __wsum)
+ *(const u32 *)(buff + 12));
+ } else if (__builtin_constant_p(len) && (len & 3) == 0) {
+ sum = csum_add(sum, ip_fast_csum_nofold(buff, len >> 2));
+ } else {
+ sum = __csum_partial(buff, len, sum);
+ }
+ return sum;
+}
+
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+static inline __sum16 ip_compute_csum(const void *buff, int len)
+{
+ return csum_fold(csum_partial(buff, len, 0));
+}
+
#endif
#endif /* __KERNEL__ */
#endif
diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h
index d1a8d93cccfd..44efe739b6b9 100644
--- a/arch/powerpc/include/asm/cmpxchg.h
+++ b/arch/powerpc/include/asm/cmpxchg.h
@@ -5,25 +5,25 @@
#include <linux/compiler.h>
#include <asm/synch.h>
#include <asm/asm-compat.h>
+#include <linux/bug.h>
/*
* Atomic exchange
*
- * Changes the memory location '*ptr' to be val and returns
+ * Changes the memory location '*p' to be val and returns
* the previous value stored there.
*/
+
static __always_inline unsigned long
-__xchg_u32(volatile void *p, unsigned long val)
+__xchg_u32_local(volatile void *p, unsigned long val)
{
unsigned long prev;
__asm__ __volatile__(
- PPC_ATOMIC_ENTRY_BARRIER
"1: lwarx %0,0,%2 \n"
PPC405_ERR77(0,%2)
" stwcx. %3,0,%2 \n\
bne- 1b"
- PPC_ATOMIC_EXIT_BARRIER
: "=&r" (prev), "+m" (*(volatile unsigned int *)p)
: "r" (p), "r" (val)
: "cc", "memory");
@@ -31,42 +31,34 @@ __xchg_u32(volatile void *p, unsigned long val)
return prev;
}
-/*
- * Atomic exchange
- *
- * Changes the memory location '*ptr' to be val and returns
- * the previous value stored there.
- */
static __always_inline unsigned long
-__xchg_u32_local(volatile void *p, unsigned long val)
+__xchg_u32_relaxed(u32 *p, unsigned long val)
{
unsigned long prev;
__asm__ __volatile__(
-"1: lwarx %0,0,%2 \n"
- PPC405_ERR77(0,%2)
-" stwcx. %3,0,%2 \n\
- bne- 1b"
- : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
+"1: lwarx %0,0,%2\n"
+ PPC405_ERR77(0, %2)
+" stwcx. %3,0,%2\n"
+" bne- 1b"
+ : "=&r" (prev), "+m" (*p)
: "r" (p), "r" (val)
- : "cc", "memory");
+ : "cc");
return prev;
}
#ifdef CONFIG_PPC64
static __always_inline unsigned long
-__xchg_u64(volatile void *p, unsigned long val)
+__xchg_u64_local(volatile void *p, unsigned long val)
{
unsigned long prev;
__asm__ __volatile__(
- PPC_ATOMIC_ENTRY_BARRIER
"1: ldarx %0,0,%2 \n"
PPC405_ERR77(0,%2)
" stdcx. %3,0,%2 \n\
bne- 1b"
- PPC_ATOMIC_EXIT_BARRIER
: "=&r" (prev), "+m" (*(volatile unsigned long *)p)
: "r" (p), "r" (val)
: "cc", "memory");
@@ -75,64 +67,52 @@ __xchg_u64(volatile void *p, unsigned long val)
}
static __always_inline unsigned long
-__xchg_u64_local(volatile void *p, unsigned long val)
+__xchg_u64_relaxed(u64 *p, unsigned long val)
{
unsigned long prev;
__asm__ __volatile__(
-"1: ldarx %0,0,%2 \n"
- PPC405_ERR77(0,%2)
-" stdcx. %3,0,%2 \n\
- bne- 1b"
- : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
+"1: ldarx %0,0,%2\n"
+ PPC405_ERR77(0, %2)
+" stdcx. %3,0,%2\n"
+" bne- 1b"
+ : "=&r" (prev), "+m" (*p)
: "r" (p), "r" (val)
- : "cc", "memory");
+ : "cc");
return prev;
}
#endif
-/*
- * This function doesn't exist, so you'll get a linker error
- * if something tries to do an invalid xchg().
- */
-extern void __xchg_called_with_bad_pointer(void);
-
static __always_inline unsigned long
-__xchg(volatile void *ptr, unsigned long x, unsigned int size)
+__xchg_local(volatile void *ptr, unsigned long x, unsigned int size)
{
switch (size) {
case 4:
- return __xchg_u32(ptr, x);
+ return __xchg_u32_local(ptr, x);
#ifdef CONFIG_PPC64
case 8:
- return __xchg_u64(ptr, x);
+ return __xchg_u64_local(ptr, x);
#endif
}
- __xchg_called_with_bad_pointer();
+ BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg");
return x;
}
static __always_inline unsigned long
-__xchg_local(volatile void *ptr, unsigned long x, unsigned int size)
+__xchg_relaxed(void *ptr, unsigned long x, unsigned int size)
{
switch (size) {
case 4:
- return __xchg_u32_local(ptr, x);
+ return __xchg_u32_relaxed(ptr, x);
#ifdef CONFIG_PPC64
case 8:
- return __xchg_u64_local(ptr, x);
+ return __xchg_u64_relaxed(ptr, x);
#endif
}
- __xchg_called_with_bad_pointer();
+ BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg_local");
return x;
}
-#define xchg(ptr,x) \
- ({ \
- __typeof__(*(ptr)) _x_ = (x); \
- (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
- })
-
#define xchg_local(ptr,x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
@@ -140,6 +120,12 @@ __xchg_local(volatile void *ptr, unsigned long x, unsigned int size)
(unsigned long)_x_, sizeof(*(ptr))); \
})
+#define xchg_relaxed(ptr, x) \
+({ \
+ __typeof__(*(ptr)) _x_ = (x); \
+ (__typeof__(*(ptr))) __xchg_relaxed((ptr), \
+ (unsigned long)_x_, sizeof(*(ptr))); \
+})
/*
* Compare and exchange - if *p == old, set it to new,
* and return the old value of *p.
@@ -190,6 +176,56 @@ __cmpxchg_u32_local(volatile unsigned int *p, unsigned long old,
return prev;
}
+static __always_inline unsigned long
+__cmpxchg_u32_relaxed(u32 *p, unsigned long old, unsigned long new)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__ (
+"1: lwarx %0,0,%2 # __cmpxchg_u32_relaxed\n"
+" cmpw 0,%0,%3\n"
+" bne- 2f\n"
+ PPC405_ERR77(0, %2)
+" stwcx. %4,0,%2\n"
+" bne- 1b\n"
+"2:"
+ : "=&r" (prev), "+m" (*p)
+ : "r" (p), "r" (old), "r" (new)
+ : "cc");
+
+ return prev;
+}
+
+/*
+ * cmpxchg family don't have order guarantee if cmp part fails, therefore we
+ * can avoid superfluous barriers if we use assembly code to implement
+ * cmpxchg() and cmpxchg_acquire(), however we don't do the similar for
+ * cmpxchg_release() because that will result in putting a barrier in the
+ * middle of a ll/sc loop, which is probably a bad idea. For example, this
+ * might cause the conditional store more likely to fail.
+ */
+static __always_inline unsigned long
+__cmpxchg_u32_acquire(u32 *p, unsigned long old, unsigned long new)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__ (
+"1: lwarx %0,0,%2 # __cmpxchg_u32_acquire\n"
+" cmpw 0,%0,%3\n"
+" bne- 2f\n"
+ PPC405_ERR77(0, %2)
+" stwcx. %4,0,%2\n"
+" bne- 1b\n"
+ PPC_ACQUIRE_BARRIER
+ "\n"
+"2:"
+ : "=&r" (prev), "+m" (*p)
+ : "r" (p), "r" (old), "r" (new)
+ : "cc", "memory");
+
+ return prev;
+}
+
#ifdef CONFIG_PPC64
static __always_inline unsigned long
__cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
@@ -233,11 +269,47 @@ __cmpxchg_u64_local(volatile unsigned long *p, unsigned long old,
return prev;
}
-#endif
-/* This function doesn't exist, so you'll get a linker error
- if something tries to do an invalid cmpxchg(). */
-extern void __cmpxchg_called_with_bad_pointer(void);
+static __always_inline unsigned long
+__cmpxchg_u64_relaxed(u64 *p, unsigned long old, unsigned long new)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__ (
+"1: ldarx %0,0,%2 # __cmpxchg_u64_relaxed\n"
+" cmpd 0,%0,%3\n"
+" bne- 2f\n"
+" stdcx. %4,0,%2\n"
+" bne- 1b\n"
+"2:"
+ : "=&r" (prev), "+m" (*p)
+ : "r" (p), "r" (old), "r" (new)
+ : "cc");
+
+ return prev;
+}
+
+static __always_inline unsigned long
+__cmpxchg_u64_acquire(u64 *p, unsigned long old, unsigned long new)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__ (
+"1: ldarx %0,0,%2 # __cmpxchg_u64_acquire\n"
+" cmpd 0,%0,%3\n"
+" bne- 2f\n"
+" stdcx. %4,0,%2\n"
+" bne- 1b\n"
+ PPC_ACQUIRE_BARRIER
+ "\n"
+"2:"
+ : "=&r" (prev), "+m" (*p)
+ : "r" (p), "r" (old), "r" (new)
+ : "cc", "memory");
+
+ return prev;
+}
+#endif
static __always_inline unsigned long
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
@@ -251,7 +323,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
return __cmpxchg_u64(ptr, old, new);
#endif
}
- __cmpxchg_called_with_bad_pointer();
+ BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg");
return old;
}
@@ -267,10 +339,41 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
return __cmpxchg_u64_local(ptr, old, new);
#endif
}
- __cmpxchg_called_with_bad_pointer();
+ BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_local");
+ return old;
+}
+
+static __always_inline unsigned long
+__cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
+ unsigned int size)
+{
+ switch (size) {
+ case 4:
+ return __cmpxchg_u32_relaxed(ptr, old, new);
+#ifdef CONFIG_PPC64
+ case 8:
+ return __cmpxchg_u64_relaxed(ptr, old, new);
+#endif
+ }
+ BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_relaxed");
return old;
}
+static __always_inline unsigned long
+__cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
+ unsigned int size)
+{
+ switch (size) {
+ case 4:
+ return __cmpxchg_u32_acquire(ptr, old, new);
+#ifdef CONFIG_PPC64
+ case 8:
+ return __cmpxchg_u64_acquire(ptr, old, new);
+#endif
+ }
+ BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_acquire");
+ return old;
+}
#define cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
@@ -288,6 +391,23 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
(unsigned long)_n_, sizeof(*(ptr))); \
})
+#define cmpxchg_relaxed(ptr, o, n) \
+({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
+ (unsigned long)_o_, (unsigned long)_n_, \
+ sizeof(*(ptr))); \
+})
+
+#define cmpxchg_acquire(ptr, o, n) \
+({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg_acquire((ptr), \
+ (unsigned long)_o_, (unsigned long)_n_, \
+ sizeof(*(ptr))); \
+})
#ifdef CONFIG_PPC64
#define cmpxchg64(ptr, o, n) \
({ \
@@ -299,7 +419,16 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \
})
-#define cmpxchg64_relaxed cmpxchg64_local
+#define cmpxchg64_relaxed(ptr, o, n) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ cmpxchg_relaxed((ptr), (o), (n)); \
+})
+#define cmpxchg64_acquire(ptr, o, n) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ cmpxchg_acquire((ptr), (o), (n)); \
+})
#else
#include <asm-generic/cmpxchg-local.h>
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
index 840a5509b3f1..994c60a857ce 100644
--- a/arch/powerpc/include/asm/code-patching.h
+++ b/arch/powerpc/include/asm/code-patching.h
@@ -99,4 +99,25 @@ static inline unsigned long ppc_global_function_entry(void *func)
#endif
}
+#ifdef CONFIG_PPC64
+/*
+ * Some instruction encodings commonly used in dynamic ftracing
+ * and function live patching.
+ */
+
+/* This must match the definition of STK_GOT in <asm/ppc_asm.h> */
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+#define R2_STACK_OFFSET 24
+#else
+#define R2_STACK_OFFSET 40
+#endif
+
+#define PPC_INST_LD_TOC (PPC_INST_LD | ___PPC_RT(__REG_R2) | \
+ ___PPC_RA(__REG_R1) | R2_STACK_OFFSET)
+
+/* usually preceded by a mflr r0 */
+#define PPC_INST_STD_LR (PPC_INST_STD | ___PPC_RS(__REG_R0) | \
+ ___PPC_RA(__REG_R1) | PPC_LR_STKOFF)
+#endif /* CONFIG_PPC64 */
+
#endif /* _ASM_POWERPC_CODE_PATCHING_H */
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index b118072670fb..df4fb5faba43 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -43,6 +43,11 @@ extern int machine_check_e500(struct pt_regs *regs);
extern int machine_check_e200(struct pt_regs *regs);
extern int machine_check_47x(struct pt_regs *regs);
+extern void cpu_down_flush_e500v2(void);
+extern void cpu_down_flush_e500mc(void);
+extern void cpu_down_flush_e5500(void);
+extern void cpu_down_flush_e6500(void);
+
/* NOTE WELL: Update identify_cpu() if fields are added or removed! */
struct cpu_spec {
/* CPU is matched via (PVR & pvr_mask) == pvr_value */
@@ -59,6 +64,9 @@ struct cpu_spec {
unsigned int icache_bsize;
unsigned int dcache_bsize;
+ /* flush caches inside the current cpu */
+ void (*cpu_down_flush)(void);
+
/* number of performance monitor counters */
unsigned int num_pmcs;
enum powerpc_pmc_type pmc_type;
@@ -171,7 +179,7 @@ enum {
#define CPU_FTR_ARCH_201 LONG_ASM_CONST(0x0000000200000000)
#define CPU_FTR_ARCH_206 LONG_ASM_CONST(0x0000000400000000)
#define CPU_FTR_ARCH_207S LONG_ASM_CONST(0x0000000800000000)
-/* Free LONG_ASM_CONST(0x0000001000000000) */
+#define CPU_FTR_ARCH_300 LONG_ASM_CONST(0x0000001000000000)
#define CPU_FTR_MMCRA LONG_ASM_CONST(0x0000002000000000)
#define CPU_FTR_CTRL LONG_ASM_CONST(0x0000004000000000)
#define CPU_FTR_SMT LONG_ASM_CONST(0x0000008000000000)
@@ -196,6 +204,7 @@ enum {
#define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000)
#define CPU_FTR_DABRX LONG_ASM_CONST(0x0800000000000000)
#define CPU_FTR_PMAO_BUG LONG_ASM_CONST(0x1000000000000000)
+#define CPU_FTR_SUBCORE LONG_ASM_CONST(0x2000000000000000)
#ifndef __ASSEMBLY__
@@ -443,9 +452,19 @@ enum {
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
- CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP)
+ CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP | CPU_FTR_SUBCORE)
#define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG)
#define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL)
+#define CPU_FTRS_POWER9 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
+ CPU_FTR_MMCRA | CPU_FTR_SMT | \
+ CPU_FTR_COHERENT_ICACHE | \
+ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
+ CPU_FTR_DSCR | CPU_FTR_SAO | \
+ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
+ CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
+ CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
+ CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP | CPU_FTR_ARCH_300)
#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
@@ -464,7 +483,7 @@ enum {
(CPU_FTRS_POWER4 | CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | \
CPU_FTRS_POWER6 | CPU_FTRS_POWER7 | CPU_FTRS_POWER8E | \
CPU_FTRS_POWER8 | CPU_FTRS_POWER8_DD1 | CPU_FTRS_CELL | \
- CPU_FTRS_PA6T | CPU_FTR_VSX)
+ CPU_FTRS_PA6T | CPU_FTR_VSX | CPU_FTRS_POWER9)
#endif
#else
enum {
@@ -515,7 +534,8 @@ enum {
(CPU_FTRS_POWER4 & CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & \
CPU_FTRS_POWER6 & CPU_FTRS_POWER7 & CPU_FTRS_CELL & \
CPU_FTRS_PA6T & CPU_FTRS_POWER8 & CPU_FTRS_POWER8E & \
- CPU_FTRS_POWER8_DD1 & ~CPU_FTR_HVMODE & CPU_FTRS_POSSIBLE)
+ CPU_FTRS_POWER8_DD1 & ~CPU_FTR_HVMODE & CPU_FTRS_POSSIBLE & \
+ CPU_FTRS_POWER9)
#endif
#else
enum {
diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h
index ba42e46ea58e..666bef4ebfae 100644
--- a/arch/powerpc/include/asm/cputhreads.h
+++ b/arch/powerpc/include/asm/cputhreads.h
@@ -1,6 +1,7 @@
#ifndef _ASM_POWERPC_CPUTHREADS_H
#define _ASM_POWERPC_CPUTHREADS_H
+#ifndef __ASSEMBLY__
#include <linux/cpumask.h>
/*
@@ -94,7 +95,21 @@ static inline int cpu_last_thread_sibling(int cpu)
return cpu | (threads_per_core - 1);
}
+static inline u32 get_tensr(void)
+{
+#ifdef CONFIG_BOOKE
+ if (cpu_has_feature(CPU_FTR_SMT))
+ return mfspr(SPRN_TENSR);
+#endif
+ return 1;
+}
+
+void book3e_start_thread(int thread, unsigned long addr);
+void book3e_stop_thread(int thread);
+
+#endif /* __ASSEMBLY__ */
+#define INVALID_THREAD_HWID 0x0fff
#endif /* _ASM_POWERPC_CPUTHREADS_H */
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index 867c39b45df6..fb9f376ae27b 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -72,6 +72,7 @@ struct pci_dn;
#define EEH_PE_PHB (1 << 1) /* PHB PE */
#define EEH_PE_DEVICE (1 << 2) /* Device PE */
#define EEH_PE_BUS (1 << 3) /* Bus PE */
+#define EEH_PE_VF (1 << 4) /* VF PE */
#define EEH_PE_ISOLATED (1 << 0) /* Isolated PE */
#define EEH_PE_RECOVERING (1 << 1) /* Recovering PE */
@@ -136,11 +137,15 @@ struct eeh_dev {
int pcix_cap; /* Saved PCIx capability */
int pcie_cap; /* Saved PCIe capability */
int aer_cap; /* Saved AER capability */
+ int af_cap; /* Saved AF capability */
struct eeh_pe *pe; /* Associated PE */
struct list_head list; /* Form link list in the PE */
+ struct list_head rmv_list; /* Record the removed edevs */
struct pci_controller *phb; /* Associated PHB */
struct pci_dn *pdn; /* Associated PCI device node */
struct pci_dev *pdev; /* Associated PCI device */
+ bool in_error; /* Error flag for edev */
+ struct pci_dev *physfn; /* Associated SRIOV PF */
struct pci_bus *bus; /* PCI bus for partial hotplug */
};
diff --git a/arch/powerpc/include/asm/fsl_pm.h b/arch/powerpc/include/asm/fsl_pm.h
new file mode 100644
index 000000000000..47df55e36d4f
--- /dev/null
+++ b/arch/powerpc/include/asm/fsl_pm.h
@@ -0,0 +1,51 @@
+/*
+ * Support Power Management
+ *
+ * Copyright 2014-2015 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#ifndef __PPC_FSL_PM_H
+#define __PPC_FSL_PM_H
+
+#define E500_PM_PH10 1
+#define E500_PM_PH15 2
+#define E500_PM_PH20 3
+#define E500_PM_PH30 4
+#define E500_PM_DOZE E500_PM_PH10
+#define E500_PM_NAP E500_PM_PH15
+
+#define PLAT_PM_SLEEP 20
+#define PLAT_PM_LPM20 30
+
+#define FSL_PM_SLEEP (1 << 0)
+#define FSL_PM_DEEP_SLEEP (1 << 1)
+
+struct fsl_pm_ops {
+ /* mask pending interrupts to the RCPM from MPIC */
+ void (*irq_mask)(int cpu);
+
+ /* unmask pending interrupts to the RCPM from MPIC */
+ void (*irq_unmask)(int cpu);
+ void (*cpu_enter_state)(int cpu, int state);
+ void (*cpu_exit_state)(int cpu, int state);
+ void (*cpu_up_prepare)(int cpu);
+ void (*cpu_die)(int cpu);
+ int (*plat_enter_sleep)(void);
+ void (*freeze_time_base)(bool freeze);
+
+ /* keep the power of IP blocks during sleep/deep sleep */
+ void (*set_ip_power)(bool enable, u32 mask);
+
+ /* get platform supported power management modes */
+ unsigned int (*get_pm_modes)(void);
+};
+
+extern const struct fsl_pm_ops *qoriq_pm_ops;
+
+int __init fsl_rcpm_init(void);
+
+#endif /* __PPC_FSL_PM_H */
diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h
index ef89b1465573..50ca7585abe2 100644
--- a/arch/powerpc/include/asm/ftrace.h
+++ b/arch/powerpc/include/asm/ftrace.h
@@ -46,6 +46,8 @@
extern void _mcount(void);
#ifdef CONFIG_DYNAMIC_FTRACE
+# define FTRACE_ADDR ((unsigned long)ftrace_caller)
+# define FTRACE_REGS_ADDR FTRACE_ADDR
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
/* reloction of mcount call site is the same as the address */
@@ -58,6 +60,9 @@ struct dyn_arch_ftrace {
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* __ASSEMBLY__ */
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+#define ARCH_SUPPORTS_FTRACE_OPS 1
+#endif
#endif
#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64) && !defined(__ASSEMBLY__)
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index 7eac89b9f02e..42814f0567cc 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -19,7 +19,7 @@ static inline pte_t *hugepd_page(hugepd_t hpd)
* We have only four bits to encode, MMU page size
*/
BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf);
- return (pte_t *)(hpd.pd & ~HUGEPD_SHIFT_MASK);
+ return __va(hpd.pd & HUGEPD_ADDR_MASK);
}
static inline unsigned int hugepd_mmu_psize(hugepd_t hpd)
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index e3b54dd4f730..0bc9c284aa10 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -94,6 +94,7 @@
#define H_SG_LIST -72
#define H_OP_MODE -73
#define H_COP_HW -74
+#define H_STATE -75
#define H_UNSUPPORTED_FLAG_START -256
#define H_UNSUPPORTED_FLAG_END -511
#define H_MULTI_THREADS_ACTIVE -9005
diff --git a/arch/powerpc/include/asm/hydra.h b/arch/powerpc/include/asm/hydra.h
index 1cb39c96d155..b3b0f2d020f0 100644
--- a/arch/powerpc/include/asm/hydra.h
+++ b/arch/powerpc/include/asm/hydra.h
@@ -89,7 +89,7 @@ extern volatile struct Hydra __iomem *Hydra;
#define HYDRA_INT_EXT2 13 /* PCI IRQX */
#define HYDRA_INT_EXT3 14 /* PCI IRQY */
#define HYDRA_INT_EXT4 15 /* PCI IRQZ */
-#define HYDRA_INT_EXT5 16 /* IDE Primay/Secondary */
+#define HYDRA_INT_EXT5 16 /* IDE Primary/Secondary */
#define HYDRA_INT_EXT6 17 /* IDE Secondary */
#define HYDRA_INT_EXT7 18 /* Power Off Request */
#define HYDRA_INT_SPARE 19
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 6c1297ec374c..2fd1690b79d2 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -300,7 +300,7 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src,
* When CONFIG_PPC_INDIRECT_MMIO is set, the platform can provide hooks
* on all MMIOs. (Note that this is all 64 bits only for now)
*
- * To help platforms who may need to differenciate MMIO addresses in
+ * To help platforms who may need to differentiate MMIO addresses in
* their hooks, a bitfield is reserved for use by the platform near the
* top of MMIO addresses (not PIO, those have to cope the hard way).
*
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index 3f191f573d4f..fd22442d30a9 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -54,7 +54,7 @@ struct machdep_calls {
int psize, int apsize,
int ssize);
long (*hpte_remove)(unsigned long hpte_group);
- void (*hpte_removebolted)(unsigned long ea,
+ int (*hpte_removebolted)(unsigned long ea,
int psize, int ssize);
void (*flush_hash_range)(unsigned long number, int local);
void (*hugepage_invalidate)(unsigned long vsid,
@@ -174,11 +174,11 @@ struct machdep_calls {
platform, called once per cpu. */
void (*enable_pmcs)(void);
- /* Set DABR for this platform, leave empty for default implemenation */
+ /* Set DABR for this platform, leave empty for default implementation */
int (*set_dabr)(unsigned long dabr,
unsigned long dabrx);
- /* Set DAWR for this platform, leave empty for default implemenation */
+ /* Set DAWR for this platform, leave empty for default implementation */
int (*set_dawr)(unsigned long dawr,
unsigned long dawrx);
diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
index 8565c254151a..2563c435a4b1 100644
--- a/arch/powerpc/include/asm/mman.h
+++ b/arch/powerpc/include/asm/mman.h
@@ -18,11 +18,12 @@
* This file is included by linux/mman.h, so we can't use cacl_vm_prot_bits()
* here. How important is the optimization?
*/
-static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
+static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot,
+ unsigned long pkey)
{
return (prot & PROT_SAO) ? VM_SAO : 0;
}
-#define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
+#define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey)
static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
{
diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/mmu-8xx.h
index f05500a29a60..0a566f15f985 100644
--- a/arch/powerpc/include/asm/mmu-8xx.h
+++ b/arch/powerpc/include/asm/mmu-8xx.h
@@ -171,9 +171,9 @@ typedef struct {
} mm_context_t;
#endif /* !__ASSEMBLY__ */
-#if (PAGE_SHIFT == 12)
+#if defined(CONFIG_PPC_4K_PAGES)
#define mmu_virtual_psize MMU_PAGE_4K
-#elif (PAGE_SHIFT == 14)
+#elif defined(CONFIG_PPC_16K_PAGES)
#define mmu_virtual_psize MMU_PAGE_16K
#else
#error "Unsupported PAGE_SIZE"
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 3d5abfe6ba67..8ca1c983bf6c 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -97,6 +97,7 @@
#define MMU_FTRS_POWER6 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
#define MMU_FTRS_POWER7 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
#define MMU_FTRS_POWER8 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
+#define MMU_FTRS_POWER9 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
#define MMU_FTRS_CELL MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
MMU_FTR_CI_LARGE_PAGE
#define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
@@ -182,10 +183,10 @@ static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
#if defined(CONFIG_PPC_STD_MMU_64)
/* 64-bit classic hash table MMU */
-# include <asm/mmu-hash64.h>
+#include <asm/book3s/64/mmu-hash.h>
#elif defined(CONFIG_PPC_STD_MMU_32)
/* 32-bit classic hash table MMU */
-# include <asm/mmu-hash32.h>
+#include <asm/book3s/32/mmu-hash.h>
#elif defined(CONFIG_40x)
/* 40x-style software loaded TLB */
# include <asm/mmu-40x.h>
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 878c27771717..4eaab40e3ade 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -148,5 +148,17 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm,
{
}
+static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
+ bool write, bool execute, bool foreign)
+{
+ /* by default, allow everything */
+ return true;
+}
+
+static inline bool arch_pte_access_permitted(pte_t pte, bool write)
+{
+ /* by default, allow everything */
+ return true;
+}
#endif /* __KERNEL__ */
#endif /* __ASM_POWERPC_MMU_CONTEXT_H */
diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h
index dcfcad139bcc..cd4ffd86765f 100644
--- a/arch/powerpc/include/asm/module.h
+++ b/arch/powerpc/include/asm/module.h
@@ -19,7 +19,7 @@
* Thanks to Paul M for explaining this.
*
* PPC can only do rel jumps += 32MB, and often the kernel and other
- * modules are furthur away than this. So, we jump to a table of
+ * modules are further away than this. So, we jump to a table of
* trampolines attached to the module (the Procedure Linkage Table)
* whenever that happens.
*/
@@ -78,10 +78,18 @@ struct mod_arch_specific {
# endif /* MODULE */
#endif
-bool is_module_trampoline(u32 *insns);
-int module_trampoline_target(struct module *mod, u32 *trampoline,
+int module_trampoline_target(struct module *mod, unsigned long trampoline,
unsigned long *target);
+#ifdef CONFIG_DYNAMIC_FTRACE
+int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs);
+#else
+static inline int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs)
+{
+ return 0;
+}
+#endif
+
struct exception_table_entry;
void sort_ex_table(struct exception_table_entry *start,
struct exception_table_entry *finish);
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index c82cbf52d19e..780847597514 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -86,7 +86,7 @@ extern int icache_44x_need_flush;
* We no longer map larger than phys RAM with the BATs so we don't have
* to worry about the VMALLOC_OFFSET causing problems. We do have to worry
* about clashes between our early calls to ioremap() that start growing down
- * from ioremap_base being run into the VM area allocations (growing upwards
+ * from IOREMAP_TOP being run into the VM area allocations (growing upwards
* from VMALLOC_START). For this reason we have ioremap_bot to check when
* we actually run into our mappings setup in the early boot with the VM
* system. This really does become a problem for machines with good amounts
@@ -309,7 +309,8 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
#define pte_index(address) \
(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#define pte_offset_kernel(dir, addr) \
- ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
+ (pmd_bad(*(dir)) ? NULL : (pte_t *)pmd_page_vaddr(*(dir)) + \
+ pte_index(addr))
#define pte_offset_map(dir, addr) \
((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
#define pte_unmap(pte) kunmap_atomic(pte)
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index b9f734dd5b81..10debb93c4a4 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -108,6 +108,9 @@
#ifndef __ASSEMBLY__
/* pte_clear moved to later in this file */
+/* Pointers in the page table tree are virtual addresses */
+#define __pgtable_ptr_val(ptr) ((unsigned long)(ptr))
+
#define PMD_BAD_BITS (PTE_TABLE_SIZE-1)
#define PUD_BAD_BITS (PMD_TABLE_SIZE-1)
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 07a99e638449..9d86c6651716 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -248,6 +248,7 @@ extern int opal_elog_init(void);
extern void opal_platform_dump_init(void);
extern void opal_sys_param_init(void);
extern void opal_msglog_init(void);
+extern void opal_msglog_sysfs_init(void);
extern int opal_async_comp_init(void);
extern int opal_sensor_init(void);
extern int opal_hmi_handler_init(void);
@@ -273,6 +274,8 @@ void opal_free_sg_list(struct opal_sg_list *sg);
extern int opal_error_code(int rc);
+ssize_t opal_msglog_copy(char *to, loff_t pos, size_t count);
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_OPAL_H */
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index e34124f6fbf2..ab3d8977bacd 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -271,6 +271,13 @@ extern long long virt_phys_offset;
#else
#define PD_HUGE 0x80000000
#endif
+
+#else /* CONFIG_PPC_BOOK3S_64 */
+/*
+ * Book3S 64 stores real addresses in the hugepd entries to
+ * avoid overlaps with _PAGE_PRESENT and _PAGE_PTE.
+ */
+#define HUGEPD_ADDR_MASK (0x0ffffffffffffffful & ~HUGEPD_SHIFT_MASK)
#endif /* CONFIG_PPC_BOOK3S_64 */
/*
@@ -281,109 +288,7 @@ extern long long virt_phys_offset;
#ifndef __ASSEMBLY__
-#ifdef CONFIG_STRICT_MM_TYPECHECKS
-/* These are used to make use of C type-checking. */
-
-/* PTE level */
-typedef struct { pte_basic_t pte; } pte_t;
-#define __pte(x) ((pte_t) { (x) })
-static inline pte_basic_t pte_val(pte_t x)
-{
- return x.pte;
-}
-
-/* 64k pages additionally define a bigger "real PTE" type that gathers
- * the "second half" part of the PTE for pseudo 64k pages
- */
-#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_STD_MMU_64)
-typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
-#else
-typedef struct { pte_t pte; } real_pte_t;
-#endif
-
-/* PMD level */
-#ifdef CONFIG_PPC64
-typedef struct { unsigned long pmd; } pmd_t;
-#define __pmd(x) ((pmd_t) { (x) })
-static inline unsigned long pmd_val(pmd_t x)
-{
- return x.pmd;
-}
-
-/* PUD level exusts only on 4k pages */
-#ifndef CONFIG_PPC_64K_PAGES
-typedef struct { unsigned long pud; } pud_t;
-#define __pud(x) ((pud_t) { (x) })
-static inline unsigned long pud_val(pud_t x)
-{
- return x.pud;
-}
-#endif /* !CONFIG_PPC_64K_PAGES */
-#endif /* CONFIG_PPC64 */
-
-/* PGD level */
-typedef struct { unsigned long pgd; } pgd_t;
-#define __pgd(x) ((pgd_t) { (x) })
-static inline unsigned long pgd_val(pgd_t x)
-{
- return x.pgd;
-}
-
-/* Page protection bits */
-typedef struct { unsigned long pgprot; } pgprot_t;
-#define pgprot_val(x) ((x).pgprot)
-#define __pgprot(x) ((pgprot_t) { (x) })
-
-#else
-
-/*
- * .. while these make it easier on the compiler
- */
-
-typedef pte_basic_t pte_t;
-#define __pte(x) (x)
-static inline pte_basic_t pte_val(pte_t pte)
-{
- return pte;
-}
-
-#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_STD_MMU_64)
-typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
-#else
-typedef pte_t real_pte_t;
-#endif
-
-
-#ifdef CONFIG_PPC64
-typedef unsigned long pmd_t;
-#define __pmd(x) (x)
-static inline unsigned long pmd_val(pmd_t pmd)
-{
- return pmd;
-}
-
-#ifndef CONFIG_PPC_64K_PAGES
-typedef unsigned long pud_t;
-#define __pud(x) (x)
-static inline unsigned long pud_val(pud_t pud)
-{
- return pud;
-}
-#endif /* !CONFIG_PPC_64K_PAGES */
-#endif /* CONFIG_PPC64 */
-
-typedef unsigned long pgd_t;
-#define __pgd(x) (x)
-static inline unsigned long pgd_val(pgd_t pgd)
-{
- return pgd;
-}
-
-typedef unsigned long pgprot_t;
-#define pgprot_val(x) (x)
-#define __pgprot(x) (x)
-
-#endif
+#include <asm/pgtable-types.h>
typedef struct { signed long pd; } hugepd_t;
diff --git a/arch/powerpc/include/asm/page_32.h b/arch/powerpc/include/asm/page_32.h
index 68d73b2a7bfc..6a8e1797f223 100644
--- a/arch/powerpc/include/asm/page_32.h
+++ b/arch/powerpc/include/asm/page_32.h
@@ -1,6 +1,8 @@
#ifndef _ASM_POWERPC_PAGE_32_H
#define _ASM_POWERPC_PAGE_32_H
+#include <asm/cache.h>
+
#if defined(CONFIG_PHYSICAL_ALIGN) && (CONFIG_PHYSICAL_START != 0)
#if (CONFIG_PHYSICAL_START % CONFIG_PHYSICAL_ALIGN) != 0
#error "CONFIG_PHYSICAL_START must be a multiple of CONFIG_PHYSICAL_ALIGN"
@@ -36,9 +38,18 @@ typedef unsigned long long pte_basic_t;
typedef unsigned long pte_basic_t;
#endif
-struct page;
-extern void clear_pages(void *page, int order);
-static inline void clear_page(void *page) { clear_pages(page, 0); }
+/*
+ * Clear page using the dcbz instruction, which doesn't cause any
+ * memory traffic (except to write out any cache lines which get
+ * displaced). This only works on cacheable memory.
+ */
+static inline void clear_page(void *addr)
+{
+ unsigned int i;
+
+ for (i = 0; i < PAGE_SIZE / L1_CACHE_BYTES; i++, addr += L1_CACHE_BYTES)
+ dcbz(addr);
+}
extern void copy_page(void *to, void *from);
#include <asm-generic/getorder.h>
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 78968c1ff931..f5056e3394b4 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -211,15 +211,16 @@ struct pci_dn {
#define IODA_INVALID_PE (-1)
#ifdef CONFIG_PPC_POWERNV
int pe_number;
+ int vf_index; /* VF index in the PF */
#ifdef CONFIG_PCI_IOV
u16 vfs_expanded; /* number of VFs IOV BAR expanded */
u16 num_vfs; /* number of VFs enabled*/
- int offset; /* PE# for the first VF PE */
-#define M64_PER_IOV 4
- int m64_per_iov;
+ int *pe_num_map; /* PE# for the first VF PE or array */
+ bool m64_single_mode; /* Use M64 BAR in Single Mode */
#define IODA_INVALID_M64 (-1)
- int m64_wins[PCI_SRIOV_NUM_BARS][M64_PER_IOV];
+ int (*m64_map)[PCI_SRIOV_NUM_BARS];
#endif /* CONFIG_PCI_IOV */
+ int mps; /* Maximum Payload Size */
#endif
struct list_head child_list;
struct list_head list;
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
index 814622146d5a..e157489ee7a1 100644
--- a/arch/powerpc/include/asm/perf_event_server.h
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -136,16 +136,24 @@ extern ssize_t power_events_sysfs_show(struct device *dev,
* event 'cpu-cycles' can have two entries in sysfs: 'cpu-cycles' and
* 'PM_CYC' where the latter is the name by which the event is known in
* POWER CPU specification.
+ *
+ * Similarly, some hardware and cache events use the same event code. Eg.
+ * on POWER8, both "cache-references" and "L1-dcache-loads" events refer
+ * to the same event, PM_LD_REF_L1. The suffix, allows us to have two
+ * sysfs objects for the same event and thus two entries/aliases in sysfs.
*/
#define EVENT_VAR(_id, _suffix) event_attr_##_id##_suffix
#define EVENT_PTR(_id, _suffix) &EVENT_VAR(_id, _suffix).attr.attr
#define EVENT_ATTR(_name, _id, _suffix) \
- PMU_EVENT_ATTR(_name, EVENT_VAR(_id, _suffix), PME_##_id, \
+ PMU_EVENT_ATTR(_name, EVENT_VAR(_id, _suffix), _id, \
power_events_sysfs_show)
#define GENERIC_EVENT_ATTR(_name, _id) EVENT_ATTR(_name, _id, _g)
#define GENERIC_EVENT_PTR(_id) EVENT_PTR(_id, _g)
+#define CACHE_EVENT_ATTR(_name, _id) EVENT_ATTR(_name, _id, _c)
+#define CACHE_EVENT_PTR(_id) EVENT_PTR(_id, _c)
+
#define POWER_EVENT_ATTR(_name, _id) EVENT_ATTR(_name, _id, _p)
#define POWER_EVENT_PTR(_id) EVENT_PTR(_id, _p)
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
index 69ef28a81733..8d5fc3ac43da 100644
--- a/arch/powerpc/include/asm/pgalloc-64.h
+++ b/arch/powerpc/include/asm/pgalloc-64.h
@@ -53,7 +53,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
#ifndef CONFIG_PPC_64K_PAGES
-#define pgd_populate(MM, PGD, PUD) pgd_set(PGD, (unsigned long)PUD)
+#define pgd_populate(MM, PGD, PUD) pgd_set(PGD, __pgtable_ptr_val(PUD))
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
@@ -68,19 +68,19 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
{
- pud_set(pud, (unsigned long)pmd);
+ pud_set(pud, __pgtable_ptr_val(pmd));
}
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
pte_t *pte)
{
- pmd_set(pmd, (unsigned long)pte);
+ pmd_set(pmd, __pgtable_ptr_val(pte));
}
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
pgtable_t pte_page)
{
- pmd_set(pmd, (unsigned long)page_address(pte_page));
+ pmd_set(pmd, __pgtable_ptr_val(page_address(pte_page)));
}
#define pmd_pgtable(pmd) pmd_page(pmd)
@@ -171,23 +171,45 @@ extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
extern void __tlb_remove_table(void *_table);
#endif
-#define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
+#ifndef __PAGETABLE_PUD_FOLDED
+/* book3s 64 is 4 level page table */
+static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
+{
+ pgd_set(pgd, __pgtable_ptr_val(pud));
+}
+
+static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
+ GFP_KERNEL|__GFP_REPEAT);
+}
+
+static inline void pud_free(struct mm_struct *mm, pud_t *pud)
+{
+ kmem_cache_free(PGT_CACHE(PUD_INDEX_SIZE), pud);
+}
+#endif
+
+static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+{
+ pud_set(pud, __pgtable_ptr_val(pmd));
+}
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
pte_t *pte)
{
- pmd_set(pmd, (unsigned long)pte);
+ pmd_set(pmd, __pgtable_ptr_val(pte));
}
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
pgtable_t pte_page)
{
- pmd_set(pmd, (unsigned long)pte_page);
+ pmd_set(pmd, __pgtable_ptr_val(pte_page));
}
static inline pgtable_t pmd_pgtable(pmd_t pmd)
{
- return (pgtable_t)(pmd_val(pmd) & ~PMD_MASKED_BITS);
+ return (pgtable_t)pmd_page_vaddr(pmd);
}
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
@@ -233,11 +255,11 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
#define __pmd_free_tlb(tlb, pmd, addr) \
pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX)
-#ifndef CONFIG_PPC_64K_PAGES
+#ifndef __PAGETABLE_PUD_FOLDED
#define __pud_free_tlb(tlb, pud, addr) \
pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE)
-#endif /* CONFIG_PPC_64K_PAGES */
+#endif /* __PAGETABLE_PUD_FOLDED */
#define check_pgt_cache() do { } while (0)
diff --git a/arch/powerpc/include/asm/pgtable-types.h b/arch/powerpc/include/asm/pgtable-types.h
new file mode 100644
index 000000000000..43140f8b0592
--- /dev/null
+++ b/arch/powerpc/include/asm/pgtable-types.h
@@ -0,0 +1,103 @@
+#ifndef _ASM_POWERPC_PGTABLE_TYPES_H
+#define _ASM_POWERPC_PGTABLE_TYPES_H
+
+#ifdef CONFIG_STRICT_MM_TYPECHECKS
+/* These are used to make use of C type-checking. */
+
+/* PTE level */
+typedef struct { pte_basic_t pte; } pte_t;
+#define __pte(x) ((pte_t) { (x) })
+static inline pte_basic_t pte_val(pte_t x)
+{
+ return x.pte;
+}
+
+/* PMD level */
+#ifdef CONFIG_PPC64
+typedef struct { unsigned long pmd; } pmd_t;
+#define __pmd(x) ((pmd_t) { (x) })
+static inline unsigned long pmd_val(pmd_t x)
+{
+ return x.pmd;
+}
+
+/*
+ * 64 bit hash always use 4 level table. Everybody else use 4 level
+ * only for 4K page size.
+ */
+#if defined(CONFIG_PPC_BOOK3S_64) || !defined(CONFIG_PPC_64K_PAGES)
+typedef struct { unsigned long pud; } pud_t;
+#define __pud(x) ((pud_t) { (x) })
+static inline unsigned long pud_val(pud_t x)
+{
+ return x.pud;
+}
+#endif /* CONFIG_PPC_BOOK3S_64 || !CONFIG_PPC_64K_PAGES */
+#endif /* CONFIG_PPC64 */
+
+/* PGD level */
+typedef struct { unsigned long pgd; } pgd_t;
+#define __pgd(x) ((pgd_t) { (x) })
+static inline unsigned long pgd_val(pgd_t x)
+{
+ return x.pgd;
+}
+
+/* Page protection bits */
+typedef struct { unsigned long pgprot; } pgprot_t;
+#define pgprot_val(x) ((x).pgprot)
+#define __pgprot(x) ((pgprot_t) { (x) })
+
+#else
+
+/*
+ * .. while these make it easier on the compiler
+ */
+
+typedef pte_basic_t pte_t;
+#define __pte(x) (x)
+static inline pte_basic_t pte_val(pte_t pte)
+{
+ return pte;
+}
+
+#ifdef CONFIG_PPC64
+typedef unsigned long pmd_t;
+#define __pmd(x) (x)
+static inline unsigned long pmd_val(pmd_t pmd)
+{
+ return pmd;
+}
+
+#if defined(CONFIG_PPC_BOOK3S_64) || !defined(CONFIG_PPC_64K_PAGES)
+typedef unsigned long pud_t;
+#define __pud(x) (x)
+static inline unsigned long pud_val(pud_t pud)
+{
+ return pud;
+}
+#endif /* CONFIG_PPC_BOOK3S_64 || !CONFIG_PPC_64K_PAGES */
+#endif /* CONFIG_PPC64 */
+
+typedef unsigned long pgd_t;
+#define __pgd(x) (x)
+static inline unsigned long pgd_val(pgd_t pgd)
+{
+ return pgd;
+}
+
+typedef unsigned long pgprot_t;
+#define pgprot_val(x) (x)
+#define __pgprot(x) (x)
+
+#endif /* CONFIG_STRICT_MM_TYPECHECKS */
+/*
+ * With hash config 64k pages additionally define a bigger "real PTE" type that
+ * gathers the "second half" part of the PTE for pseudo 64k pages
+ */
+#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_STD_MMU_64)
+typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
+#else
+typedef struct { pte_t pte; } real_pte_t;
+#endif
+#endif /* _ASM_POWERPC_PGTABLE_TYPES_H */
diff --git a/arch/powerpc/include/asm/pmac_feature.h b/arch/powerpc/include/asm/pmac_feature.h
index 10902c9375d0..925697968946 100644
--- a/arch/powerpc/include/asm/pmac_feature.h
+++ b/arch/powerpc/include/asm/pmac_feature.h
@@ -46,7 +46,7 @@
/* PowerSurge are the first generation of PCI Pmacs. This include
* all of the Grand-Central based machines. We currently don't
- * differenciate most of them.
+ * differentiate most of them.
*/
#define PMAC_TYPE_PSURGE 0x10 /* PowerSurge */
#define PMAC_TYPE_ANS 0x11 /* Apple Network Server */
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index ac2330820b9a..009fab130cd8 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -236,7 +236,9 @@ struct thread_struct {
#endif
struct arch_hw_breakpoint hw_brk; /* info on the hardware breakpoint */
unsigned long trap_nr; /* last trap # on this thread */
+ u8 load_fp;
#ifdef CONFIG_ALTIVEC
+ u8 load_vec;
struct thread_vr_state vr_state;
struct thread_vr_state *vr_save_area;
unsigned long vrsave;
@@ -244,7 +246,7 @@ struct thread_struct {
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
/* VSR status */
- int used_vsr; /* set if process has used altivec */
+ int used_vsr; /* set if process has used VSX */
#endif /* CONFIG_VSX */
#ifdef CONFIG_SPE
unsigned long evr[32]; /* upper 32-bits of SPE regs */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index c4cb2ffc624e..f5f4c66bbbc9 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -75,6 +75,14 @@
#define MSR_HV 0
#endif
+/*
+ * To be used in shared book E/book S, this avoids needing to worry about
+ * book S/book E in shared code
+ */
+#ifndef MSR_SPE
+#define MSR_SPE 0
+#endif
+
#define MSR_VEC __MASK(MSR_VEC_LG) /* Enable AltiVec */
#define MSR_VSX __MASK(MSR_VSX_LG) /* Enable VSX */
#define MSR_POW __MASK(MSR_POW_LG) /* Enable Power Management */
@@ -376,7 +384,7 @@
#define SPRN_TSCR 0x399 /* Thread Switch Control Register */
#define SPRN_DEC 0x016 /* Decrement Register */
-#define SPRN_DER 0x095 /* Debug Enable Regsiter */
+#define SPRN_DER 0x095 /* Debug Enable Register */
#define DER_RSTE 0x40000000 /* Reset Interrupt */
#define DER_CHSTPE 0x20000000 /* Check Stop */
#define DER_MCIE 0x10000000 /* Machine Check Interrupt */
@@ -401,7 +409,7 @@
#define SPRN_DPDES 0x0B0 /* Directed Priv. Doorbell Exc. State */
#define SPRN_EAR 0x11A /* External Address Register */
#define SPRN_HASH1 0x3D2 /* Primary Hash Address Register */
-#define SPRN_HASH2 0x3D3 /* Secondary Hash Address Resgister */
+#define SPRN_HASH2 0x3D3 /* Secondary Hash Address Register */
#define SPRN_HID0 0x3F0 /* Hardware Implementation Register 0 */
#define HID0_HDICE_SH (63 - 23) /* 970 HDEC interrupt enable */
#define HID0_EMCP (1<<31) /* Enable Machine Check pin */
@@ -514,7 +522,7 @@
#define ICTRL_EICP 0x00000100 /* enable icache par. check */
#define SPRN_IMISS 0x3D4 /* Instruction TLB Miss Register */
#define SPRN_IMMR 0x27E /* Internal Memory Map Register */
-#define SPRN_L2CR 0x3F9 /* Level 2 Cache Control Regsiter */
+#define SPRN_L2CR 0x3F9 /* Level 2 Cache Control Register */
#define SPRN_L2CR2 0x3f8
#define L2CR_L2E 0x80000000 /* L2 enable */
#define L2CR_L2PE 0x40000000 /* L2 parity enable */
@@ -549,7 +557,7 @@
#define L2CR_L2DO_745x 0x00010000 /* L2 data only (745x) */
#define L2CR_L2REP_745x 0x00001000 /* L2 repl. algorithm (745x) */
#define L2CR_L2HWF_745x 0x00000800 /* L2 hardware flush (745x) */
-#define SPRN_L3CR 0x3FA /* Level 3 Cache Control Regsiter */
+#define SPRN_L3CR 0x3FA /* Level 3 Cache Control Register */
#define L3CR_L3E 0x80000000 /* L3 enable */
#define L3CR_L3PE 0x40000000 /* L3 data parity enable */
#define L3CR_L3APE 0x20000000 /* L3 addr parity enable */
@@ -1211,9 +1219,11 @@ static inline void mtmsr_isync(unsigned long val)
#define mfspr(rn) ({unsigned long rval; \
asm volatile("mfspr %0," __stringify(rn) \
: "=r" (rval)); rval;})
+#ifndef mtspr
#define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : \
: "r" ((unsigned long)(v)) \
: "memory")
+#endif
extern void msr_check_and_set(unsigned long bits);
extern bool strict_msr_control;
diff --git a/arch/powerpc/include/asm/reg_8xx.h b/arch/powerpc/include/asm/reg_8xx.h
index e8ea346b21d3..94d01f81e668 100644
--- a/arch/powerpc/include/asm/reg_8xx.h
+++ b/arch/powerpc/include/asm/reg_8xx.h
@@ -4,6 +4,8 @@
#ifndef _ASM_POWERPC_REG_8xx_H
#define _ASM_POWERPC_REG_8xx_H
+#include <asm/mmu-8xx.h>
+
/* Cache control on the MPC8xx is provided through some additional
* special purpose registers.
*/
@@ -14,6 +16,15 @@
#define SPRN_DC_ADR 569 /* Address needed for some commands */
#define SPRN_DC_DAT 570 /* Read-only data register */
+/* Misc Debug */
+#define SPRN_DPDR 630
+#define SPRN_MI_CAM 816
+#define SPRN_MI_RAM0 817
+#define SPRN_MI_RAM1 818
+#define SPRN_MD_CAM 824
+#define SPRN_MD_RAM0 825
+#define SPRN_MD_RAM1 826
+
/* Commands. Only the first few are available to the instruction cache.
*/
#define IDC_ENABLE 0x02000000 /* Cache enable */
@@ -39,4 +50,86 @@
#define DC_DFWT 0x40000000 /* Data cache is forced write through */
#define DC_LES 0x20000000 /* Caches are little endian mode */
+#ifdef CONFIG_8xx_CPU6
+#define do_mtspr_cpu6(rn, rn_addr, v) \
+ do { \
+ int _reg_cpu6 = rn_addr, _tmp_cpu6; \
+ asm volatile("stw %0, %1;" \
+ "lwz %0, %1;" \
+ "mtspr " __stringify(rn) ",%2" : \
+ : "r" (_reg_cpu6), "m"(_tmp_cpu6), \
+ "r" ((unsigned long)(v)) \
+ : "memory"); \
+ } while (0)
+
+#define do_mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : \
+ : "r" ((unsigned long)(v)) \
+ : "memory")
+#define mtspr(rn, v) \
+ do { \
+ if (rn == SPRN_IMMR) \
+ do_mtspr_cpu6(rn, 0x3d30, v); \
+ else if (rn == SPRN_IC_CST) \
+ do_mtspr_cpu6(rn, 0x2110, v); \
+ else if (rn == SPRN_IC_ADR) \
+ do_mtspr_cpu6(rn, 0x2310, v); \
+ else if (rn == SPRN_IC_DAT) \
+ do_mtspr_cpu6(rn, 0x2510, v); \
+ else if (rn == SPRN_DC_CST) \
+ do_mtspr_cpu6(rn, 0x3110, v); \
+ else if (rn == SPRN_DC_ADR) \
+ do_mtspr_cpu6(rn, 0x3310, v); \
+ else if (rn == SPRN_DC_DAT) \
+ do_mtspr_cpu6(rn, 0x3510, v); \
+ else if (rn == SPRN_MI_CTR) \
+ do_mtspr_cpu6(rn, 0x2180, v); \
+ else if (rn == SPRN_MI_AP) \
+ do_mtspr_cpu6(rn, 0x2580, v); \
+ else if (rn == SPRN_MI_EPN) \
+ do_mtspr_cpu6(rn, 0x2780, v); \
+ else if (rn == SPRN_MI_TWC) \
+ do_mtspr_cpu6(rn, 0x2b80, v); \
+ else if (rn == SPRN_MI_RPN) \
+ do_mtspr_cpu6(rn, 0x2d80, v); \
+ else if (rn == SPRN_MI_CAM) \
+ do_mtspr_cpu6(rn, 0x2190, v); \
+ else if (rn == SPRN_MI_RAM0) \
+ do_mtspr_cpu6(rn, 0x2390, v); \
+ else if (rn == SPRN_MI_RAM1) \
+ do_mtspr_cpu6(rn, 0x2590, v); \
+ else if (rn == SPRN_MD_CTR) \
+ do_mtspr_cpu6(rn, 0x3180, v); \
+ else if (rn == SPRN_M_CASID) \
+ do_mtspr_cpu6(rn, 0x3380, v); \
+ else if (rn == SPRN_MD_AP) \
+ do_mtspr_cpu6(rn, 0x3580, v); \
+ else if (rn == SPRN_MD_EPN) \
+ do_mtspr_cpu6(rn, 0x3780, v); \
+ else if (rn == SPRN_M_TWB) \
+ do_mtspr_cpu6(rn, 0x3980, v); \
+ else if (rn == SPRN_MD_TWC) \
+ do_mtspr_cpu6(rn, 0x3b80, v); \
+ else if (rn == SPRN_MD_RPN) \
+ do_mtspr_cpu6(rn, 0x3d80, v); \
+ else if (rn == SPRN_M_TW) \
+ do_mtspr_cpu6(rn, 0x3f80, v); \
+ else if (rn == SPRN_MD_CAM) \
+ do_mtspr_cpu6(rn, 0x3190, v); \
+ else if (rn == SPRN_MD_RAM0) \
+ do_mtspr_cpu6(rn, 0x3390, v); \
+ else if (rn == SPRN_MD_RAM1) \
+ do_mtspr_cpu6(rn, 0x3590, v); \
+ else if (rn == SPRN_DEC) \
+ do_mtspr_cpu6(rn, 0x2c00, v); \
+ else if (rn == SPRN_TBWL) \
+ do_mtspr_cpu6(rn, 0x3880, v); \
+ else if (rn == SPRN_TBWU) \
+ do_mtspr_cpu6(rn, 0x3a80, v); \
+ else if (rn == SPRN_DPDR) \
+ do_mtspr_cpu6(rn, 0x2d30, v); \
+ else \
+ do_mtspr(rn, v); \
+ } while (0)
+#endif
+
#endif /* _ASM_POWERPC_REG_8xx_H */
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 2fef74b474f0..737e012ef56e 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -681,7 +681,7 @@
#define SPRN_CDBCR 0x3D7 /* Cache Debug Control Register */
#define SPRN_TBHI 0x3DC /* Time Base High */
#define SPRN_TBLO 0x3DD /* Time Base Low */
-#define SPRN_DBCR 0x3F2 /* Debug Control Regsiter */
+#define SPRN_DBCR 0x3F2 /* Debug Control Register */
#define SPRN_PBL1 0x3FC /* Protection Bound Lower 1 */
#define SPRN_PBL2 0x3FE /* Protection Bound Lower 2 */
#define SPRN_PBU1 0x3FD /* Protection Bound Upper 1 */
diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h
index a5e930aca804..abf5866e08c6 100644
--- a/arch/powerpc/include/asm/sections.h
+++ b/arch/powerpc/include/asm/sections.h
@@ -22,6 +22,18 @@ static inline int in_kernel_text(unsigned long addr)
return 0;
}
+static inline unsigned long kernel_toc_addr(void)
+{
+ /* Defined by the linker, see vmlinux.lds.S */
+ extern unsigned long __toc_start;
+
+ /*
+ * The TOC register (r2) points 32kB into the TOC, so that 64kB of
+ * the TOC can be addressed using a single machine instruction.
+ */
+ return (unsigned long)(&__toc_start) + 0x8000UL;
+}
+
static inline int overlaps_interrupt_vector_text(unsigned long start,
unsigned long end)
{
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 78083ed20792..e1afd4c4f695 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -67,6 +67,9 @@ void generic_cpu_die(unsigned int cpu);
void generic_set_cpu_dead(unsigned int cpu);
void generic_set_cpu_up(unsigned int cpu);
int generic_check_cpu_restart(unsigned int cpu);
+int is_cpu_dead(unsigned int cpu);
+#else
+#define generic_set_cpu_up(i) do { } while (0)
#endif
#ifdef CONFIG_PPC64
@@ -201,6 +204,7 @@ extern void generic_secondary_thread_init(void);
extern unsigned long __secondary_hold_spinloop;
extern unsigned long __secondary_hold_acknowledge;
extern char __secondary_hold;
+extern unsigned int booting_thread_hwid;
extern void __early_start(void);
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/smu.h b/arch/powerpc/include/asm/smu.h
index 37d2da6feabf..f280dd11243f 100644
--- a/arch/powerpc/include/asm/smu.h
+++ b/arch/powerpc/include/asm/smu.h
@@ -154,7 +154,7 @@
*
* The Darwin I2C driver is less subtle though. On any non-success status
* from the response command, it waits 5ms and tries again up to 20 times,
- * it doesn't differenciate between fatal errors or "busy" status.
+ * it doesn't differentiate between fatal errors or "busy" status.
*
* This driver provides an asynchronous paramblock based i2c command
* interface to be used either directly by low level code or by a higher
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index 5b268b6be74c..17c8380673a6 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -28,12 +28,14 @@ extern void giveup_all(struct task_struct *);
extern void enable_kernel_fp(void);
extern void flush_fp_to_thread(struct task_struct *);
extern void giveup_fpu(struct task_struct *);
-extern void __giveup_fpu(struct task_struct *);
+extern void save_fpu(struct task_struct *);
static inline void disable_kernel_fp(void)
{
msr_check_and_clear(MSR_FP);
}
#else
+static inline void __giveup_fpu(struct task_struct *t) { }
+static inline void save_fpu(struct task_struct *t) { }
static inline void flush_fp_to_thread(struct task_struct *t) { }
#endif
@@ -41,18 +43,19 @@ static inline void flush_fp_to_thread(struct task_struct *t) { }
extern void enable_kernel_altivec(void);
extern void flush_altivec_to_thread(struct task_struct *);
extern void giveup_altivec(struct task_struct *);
-extern void __giveup_altivec(struct task_struct *);
+extern void save_altivec(struct task_struct *);
static inline void disable_kernel_altivec(void)
{
msr_check_and_clear(MSR_VEC);
}
+#else
+static inline void save_altivec(struct task_struct *t) { }
+static inline void __giveup_altivec(struct task_struct *t) { }
#endif
#ifdef CONFIG_VSX
extern void enable_kernel_vsx(void);
extern void flush_vsx_to_thread(struct task_struct *);
-extern void giveup_vsx(struct task_struct *);
-extern void __giveup_vsx(struct task_struct *);
static inline void disable_kernel_vsx(void)
{
msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
@@ -68,6 +71,8 @@ static inline void disable_kernel_spe(void)
{
msr_check_and_clear(MSR_SPE);
}
+#else
+static inline void __giveup_spe(struct task_struct *t) { }
#endif
static inline void clear_task_ebb(struct task_struct *t)
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
index 2d7109a8d296..1092fdd7e737 100644
--- a/arch/powerpc/include/asm/time.h
+++ b/arch/powerpc/include/asm/time.h
@@ -31,8 +31,6 @@ extern void tick_broadcast_ipi_handler(void);
extern void generic_calibrate_decr(void);
-extern void set_dec_cpu6(unsigned int val);
-
/* Some sane defaults: 125 MHz timebase, 1GHz processor */
extern unsigned long ppc_proc_freq;
#define DEFAULT_PROC_FREQ (DEFAULT_TB_FREQ * 8)
@@ -166,14 +164,12 @@ static inline void set_dec(int val)
{
#if defined(CONFIG_40x)
mtspr(SPRN_PIT, val);
-#elif defined(CONFIG_8xx_CPU6)
- set_dec_cpu6(val - 1);
#else
#ifndef CONFIG_BOOKE
--val;
#endif
mtspr(SPRN_DEC, val);
-#endif /* not 40x or 8xx_CPU6 */
+#endif /* not 40x */
}
static inline unsigned long tb_ticks_since(unsigned long tstamp)
diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h
index 23d351ca0303..9f77f85e3e99 100644
--- a/arch/powerpc/include/asm/tlbflush.h
+++ b/arch/powerpc/include/asm/tlbflush.h
@@ -78,97 +78,7 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
}
#elif defined(CONFIG_PPC_STD_MMU_64)
-
-#define MMU_NO_CONTEXT 0
-
-/*
- * TLB flushing for 64-bit hash-MMU CPUs
- */
-
-#include <linux/percpu.h>
-#include <asm/page.h>
-
-#define PPC64_TLB_BATCH_NR 192
-
-struct ppc64_tlb_batch {
- int active;
- unsigned long index;
- struct mm_struct *mm;
- real_pte_t pte[PPC64_TLB_BATCH_NR];
- unsigned long vpn[PPC64_TLB_BATCH_NR];
- unsigned int psize;
- int ssize;
-};
-DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
-
-extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
-
-#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
-
-static inline void arch_enter_lazy_mmu_mode(void)
-{
- struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
-
- batch->active = 1;
-}
-
-static inline void arch_leave_lazy_mmu_mode(void)
-{
- struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
-
- if (batch->index)
- __flush_tlb_pending(batch);
- batch->active = 0;
-}
-
-#define arch_flush_lazy_mmu_mode() do {} while (0)
-
-
-extern void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize,
- int ssize, unsigned long flags);
-extern void flush_hash_range(unsigned long number, int local);
-extern void flush_hash_hugepage(unsigned long vsid, unsigned long addr,
- pmd_t *pmdp, unsigned int psize, int ssize,
- unsigned long flags);
-
-static inline void local_flush_tlb_mm(struct mm_struct *mm)
-{
-}
-
-static inline void flush_tlb_mm(struct mm_struct *mm)
-{
-}
-
-static inline void local_flush_tlb_page(struct vm_area_struct *vma,
- unsigned long vmaddr)
-{
-}
-
-static inline void flush_tlb_page(struct vm_area_struct *vma,
- unsigned long vmaddr)
-{
-}
-
-static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
- unsigned long vmaddr)
-{
-}
-
-static inline void flush_tlb_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
-{
-}
-
-static inline void flush_tlb_kernel_range(unsigned long start,
- unsigned long end)
-{
-}
-
-/* Private function for use by PCI IO mapping code */
-extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
- unsigned long end);
-extern void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd,
- unsigned long addr);
+#include <asm/book3s/64/tlbflush-hash.h>
#else
#error Unsupported MMU type
#endif
diff --git a/arch/powerpc/include/asm/uninorth.h b/arch/powerpc/include/asm/uninorth.h
index d12b11d7641e..a1d112979fd2 100644
--- a/arch/powerpc/include/asm/uninorth.h
+++ b/arch/powerpc/include/asm/uninorth.h
@@ -132,7 +132,7 @@
/* This one _might_ return the CPU number of the CPU reading it;
* the bootROM decides whether to boot or to sleep/spinloop depending
- * on this register beeing 0 or not
+ * on this register being 0 or not
*/
#define UNI_N_CPU_NUMBER 0x0050
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h
index 254604856e69..04ef3ae511da 100644
--- a/arch/powerpc/include/asm/xics.h
+++ b/arch/powerpc/include/asm/xics.h
@@ -1,5 +1,5 @@
/*
- * Common definitions accross all variants of ICP and ICS interrupt
+ * Common definitions across all variants of ICP and ICS interrupt
* controllers.
*/