diff options
Diffstat (limited to 'arch/powerpc/mm/hash_native_64.c')
-rw-r--r-- | arch/powerpc/mm/hash_native_64.c | 192 |
1 files changed, 99 insertions, 93 deletions
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index 90039bc64119..ffc1e00f7a22 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c @@ -14,10 +14,10 @@ #include <linux/spinlock.h> #include <linux/bitops.h> +#include <linux/of.h> #include <linux/threads.h> #include <linux/smp.h> -#include <asm/abs_addr.h> #include <asm/machdep.h> #include <asm/mmu.h> #include <asm/mmu_context.h> @@ -39,22 +39,35 @@ DEFINE_RAW_SPINLOCK(native_tlbie_lock); -static inline void __tlbie(unsigned long va, int psize, int ssize) +static inline void __tlbie(unsigned long vpn, int psize, int ssize) { + unsigned long va; unsigned int penc; - /* clear top 16 bits, non SLS segment */ + /* + * We need 14 to 65 bits of va for a tlibe of 4K page + * With vpn we ignore the lower VPN_SHIFT bits already. + * And top two bits are already ignored because we can + * only accomadate 76 bits in a 64 bit vpn with a VPN_SHIFT + * of 12. + */ + va = vpn << VPN_SHIFT; + /* + * clear top 16 bits of 64bit va, non SLS segment + * Older versions of the architecture (2.02 and earler) require the + * masking of the top 16 bits. + */ va &= ~(0xffffULL << 48); switch (psize) { case MMU_PAGE_4K: - va &= ~0xffful; va |= ssize << 8; asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2) : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206) : "memory"); break; default: + /* We need 14 to 14 + i bits of va */ penc = mmu_psize_defs[psize].penc; va &= ~((1ul << mmu_psize_defs[psize].shift) - 1); va |= penc << 12; @@ -67,21 +80,28 @@ static inline void __tlbie(unsigned long va, int psize, int ssize) } } -static inline void __tlbiel(unsigned long va, int psize, int ssize) +static inline void __tlbiel(unsigned long vpn, int psize, int ssize) { + unsigned long va; unsigned int penc; - /* clear top 16 bits, non SLS segment */ + /* VPN_SHIFT can be atmost 12 */ + va = vpn << VPN_SHIFT; + /* + * clear top 16 bits of 64 bit va, non SLS segment + * Older versions of the architecture (2.02 and earler) require the + * masking of the top 16 bits. + */ va &= ~(0xffffULL << 48); switch (psize) { case MMU_PAGE_4K: - va &= ~0xffful; va |= ssize << 8; asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)" : : "r"(va) : "memory"); break; default: + /* We need 14 to 14 + i bits of va */ penc = mmu_psize_defs[psize].penc; va &= ~((1ul << mmu_psize_defs[psize].shift) - 1); va |= penc << 12; @@ -94,7 +114,7 @@ static inline void __tlbiel(unsigned long va, int psize, int ssize) } -static inline void tlbie(unsigned long va, int psize, int ssize, int local) +static inline void tlbie(unsigned long vpn, int psize, int ssize, int local) { unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL); int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); @@ -105,10 +125,10 @@ static inline void tlbie(unsigned long va, int psize, int ssize, int local) raw_spin_lock(&native_tlbie_lock); asm volatile("ptesync": : :"memory"); if (use_local) { - __tlbiel(va, psize, ssize); + __tlbiel(vpn, psize, ssize); asm volatile("ptesync": : :"memory"); } else { - __tlbie(va, psize, ssize); + __tlbie(vpn, psize, ssize); asm volatile("eieio; tlbsync; ptesync": : :"memory"); } if (lock_tlbie && !use_local) @@ -134,7 +154,7 @@ static inline void native_unlock_hpte(struct hash_pte *hptep) clear_bit_unlock(HPTE_LOCK_BIT, word); } -static long native_hpte_insert(unsigned long hpte_group, unsigned long va, +static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn, unsigned long pa, unsigned long rflags, unsigned long vflags, int psize, int ssize) { @@ -143,9 +163,9 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long va, int i; if (!(vflags & HPTE_V_BOLTED)) { - DBG_LOW(" insert(group=%lx, va=%016lx, pa=%016lx," + DBG_LOW(" insert(group=%lx, vpn=%016lx, pa=%016lx," " rflags=%lx, vflags=%lx, psize=%d)\n", - hpte_group, va, pa, rflags, vflags, psize); + hpte_group, vpn, pa, rflags, vflags, psize); } for (i = 0; i < HPTES_PER_GROUP; i++) { @@ -163,7 +183,7 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long va, if (i == HPTES_PER_GROUP) return -1; - hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID; + hpte_v = hpte_encode_v(vpn, psize, ssize) | vflags | HPTE_V_VALID; hpte_r = hpte_encode_r(pa, psize) | rflags; if (!(vflags & HPTE_V_BOLTED)) { @@ -225,17 +245,17 @@ static long native_hpte_remove(unsigned long hpte_group) } static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, - unsigned long va, int psize, int ssize, + unsigned long vpn, int psize, int ssize, int local) { struct hash_pte *hptep = htab_address + slot; unsigned long hpte_v, want_v; int ret = 0; - want_v = hpte_encode_v(va, psize, ssize); + want_v = hpte_encode_v(vpn, psize, ssize); - DBG_LOW(" update(va=%016lx, avpnv=%016lx, hash=%016lx, newpp=%x)", - va, want_v & HPTE_V_AVPN, slot, newpp); + DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)", + vpn, want_v & HPTE_V_AVPN, slot, newpp); native_lock_hpte(hptep); @@ -254,12 +274,12 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, native_unlock_hpte(hptep); /* Ensure it is out of the tlb too. */ - tlbie(va, psize, ssize, local); + tlbie(vpn, psize, ssize, local); return ret; } -static long native_hpte_find(unsigned long va, int psize, int ssize) +static long native_hpte_find(unsigned long vpn, int psize, int ssize) { struct hash_pte *hptep; unsigned long hash; @@ -267,8 +287,8 @@ static long native_hpte_find(unsigned long va, int psize, int ssize) long slot; unsigned long want_v, hpte_v; - hash = hpt_hash(va, mmu_psize_defs[psize].shift, ssize); - want_v = hpte_encode_v(va, psize, ssize); + hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize); + want_v = hpte_encode_v(vpn, psize, ssize); /* Bolted mappings are only ever in the primary group */ slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; @@ -295,14 +315,15 @@ static long native_hpte_find(unsigned long va, int psize, int ssize) static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, int psize, int ssize) { - unsigned long vsid, va; + unsigned long vpn; + unsigned long vsid; long slot; struct hash_pte *hptep; vsid = get_kernel_vsid(ea, ssize); - va = hpt_va(ea, vsid, ssize); + vpn = hpt_vpn(ea, vsid, ssize); - slot = native_hpte_find(va, psize, ssize); + slot = native_hpte_find(vpn, psize, ssize); if (slot == -1) panic("could not find page to bolt\n"); hptep = htab_address + slot; @@ -312,10 +333,10 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, (newpp & (HPTE_R_PP | HPTE_R_N)); /* Ensure it is out of the tlb too. */ - tlbie(va, psize, ssize, 0); + tlbie(vpn, psize, ssize, 0); } -static void native_hpte_invalidate(unsigned long slot, unsigned long va, +static void native_hpte_invalidate(unsigned long slot, unsigned long vpn, int psize, int ssize, int local) { struct hash_pte *hptep = htab_address + slot; @@ -325,9 +346,9 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va, local_irq_save(flags); - DBG_LOW(" invalidate(va=%016lx, hash: %x)\n", va, slot); + DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot); - want_v = hpte_encode_v(va, psize, ssize); + want_v = hpte_encode_v(vpn, psize, ssize); native_lock_hpte(hptep); hpte_v = hptep->v; @@ -339,7 +360,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va, hptep->v = 0; /* Invalidate the TLB */ - tlbie(va, psize, ssize, local); + tlbie(vpn, psize, ssize, local); local_irq_restore(flags); } @@ -349,11 +370,12 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va, #define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT) static void hpte_decode(struct hash_pte *hpte, unsigned long slot, - int *psize, int *ssize, unsigned long *va) + int *psize, int *ssize, unsigned long *vpn) { + unsigned long avpn, pteg, vpi; unsigned long hpte_r = hpte->r; unsigned long hpte_v = hpte->v; - unsigned long avpn; + unsigned long vsid, seg_off; int i, size, shift, penc; if (!(hpte_v & HPTE_V_LARGE)) @@ -380,32 +402,38 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot, } /* This works for all page sizes, and for 256M and 1T segments */ + *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT; shift = mmu_psize_defs[size].shift; - avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm) << 23; - - if (shift < 23) { - unsigned long vpi, vsid, pteg; - pteg = slot / HPTES_PER_GROUP; - if (hpte_v & HPTE_V_SECONDARY) - pteg = ~pteg; - switch (hpte_v >> HPTE_V_SSIZE_SHIFT) { - case MMU_SEGSIZE_256M: - vpi = ((avpn >> 28) ^ pteg) & htab_hash_mask; - break; - case MMU_SEGSIZE_1T: - vsid = avpn >> 40; + avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm); + pteg = slot / HPTES_PER_GROUP; + if (hpte_v & HPTE_V_SECONDARY) + pteg = ~pteg; + + switch (*ssize) { + case MMU_SEGSIZE_256M: + /* We only have 28 - 23 bits of seg_off in avpn */ + seg_off = (avpn & 0x1f) << 23; + vsid = avpn >> 5; + /* We can find more bits from the pteg value */ + if (shift < 23) { + vpi = (vsid ^ pteg) & htab_hash_mask; + seg_off |= vpi << shift; + } + *vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT; + case MMU_SEGSIZE_1T: + /* We only have 40 - 23 bits of seg_off in avpn */ + seg_off = (avpn & 0x1ffff) << 23; + vsid = avpn >> 17; + if (shift < 23) { vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask; - break; - default: - avpn = vpi = size = 0; + seg_off |= vpi << shift; } - avpn |= (vpi << mmu_psize_defs[size].shift); + *vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT; + default: + *vpn = size = 0; } - - *va = avpn; *psize = size; - *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT; } /* @@ -418,9 +446,10 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot, */ static void native_hpte_clear(void) { + unsigned long vpn = 0; unsigned long slot, slots, flags; struct hash_pte *hptep = htab_address; - unsigned long hpte_v, va; + unsigned long hpte_v; unsigned long pteg_count; int psize, ssize; @@ -448,9 +477,9 @@ static void native_hpte_clear(void) * already hold the native_tlbie_lock. */ if (hpte_v & HPTE_V_VALID) { - hpte_decode(hptep, slot, &psize, &ssize, &va); + hpte_decode(hptep, slot, &psize, &ssize, &vpn); hptep->v = 0; - __tlbie(va, psize, ssize); + __tlbie(vpn, psize, ssize); } } @@ -465,7 +494,8 @@ static void native_hpte_clear(void) */ static void native_flush_hash_range(unsigned long number, int local) { - unsigned long va, hash, index, hidx, shift, slot; + unsigned long vpn; + unsigned long hash, index, hidx, shift, slot; struct hash_pte *hptep; unsigned long hpte_v; unsigned long want_v; @@ -479,18 +509,18 @@ static void native_flush_hash_range(unsigned long number, int local) local_irq_save(flags); for (i = 0; i < number; i++) { - va = batch->vaddr[i]; + vpn = batch->vpn[i]; pte = batch->pte[i]; - pte_iterate_hashed_subpages(pte, psize, va, index, shift) { - hash = hpt_hash(va, shift, ssize); + pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) { + hash = hpt_hash(vpn, shift, ssize); hidx = __rpte_to_hidx(pte, index); if (hidx & _PTEIDX_SECONDARY) hash = ~hash; slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; slot += hidx & _PTEIDX_GROUP_IX; hptep = htab_address + slot; - want_v = hpte_encode_v(va, psize, ssize); + want_v = hpte_encode_v(vpn, psize, ssize); native_lock_hpte(hptep); hpte_v = hptep->v; if (!HPTE_V_COMPARE(hpte_v, want_v) || @@ -505,12 +535,12 @@ static void native_flush_hash_range(unsigned long number, int local) mmu_psize_defs[psize].tlbiel && local) { asm volatile("ptesync":::"memory"); for (i = 0; i < number; i++) { - va = batch->vaddr[i]; + vpn = batch->vpn[i]; pte = batch->pte[i]; - pte_iterate_hashed_subpages(pte, psize, va, index, - shift) { - __tlbiel(va, psize, ssize); + pte_iterate_hashed_subpages(pte, psize, + vpn, index, shift) { + __tlbiel(vpn, psize, ssize); } pte_iterate_hashed_end(); } asm volatile("ptesync":::"memory"); @@ -522,12 +552,12 @@ static void native_flush_hash_range(unsigned long number, int local) asm volatile("ptesync":::"memory"); for (i = 0; i < number; i++) { - va = batch->vaddr[i]; + vpn = batch->vpn[i]; pte = batch->pte[i]; - pte_iterate_hashed_subpages(pte, psize, va, index, - shift) { - __tlbie(va, psize, ssize); + pte_iterate_hashed_subpages(pte, psize, + vpn, index, shift) { + __tlbie(vpn, psize, ssize); } pte_iterate_hashed_end(); } asm volatile("eieio; tlbsync; ptesync":::"memory"); @@ -539,29 +569,6 @@ static void native_flush_hash_range(unsigned long number, int local) local_irq_restore(flags); } -#ifdef CONFIG_PPC_PSERIES -/* Disable TLB batching on nighthawk */ -static inline int tlb_batching_enabled(void) -{ - struct device_node *root = of_find_node_by_path("/"); - int enabled = 1; - - if (root) { - const char *model = of_get_property(root, "model", NULL); - if (model && !strcmp(model, "IBM,9076-N81")) - enabled = 0; - of_node_put(root); - } - - return enabled; -} -#else -static inline int tlb_batching_enabled(void) -{ - return 1; -} -#endif - void __init hpte_init_native(void) { ppc_md.hpte_invalidate = native_hpte_invalidate; @@ -570,6 +577,5 @@ void __init hpte_init_native(void) ppc_md.hpte_insert = native_hpte_insert; ppc_md.hpte_remove = native_hpte_remove; ppc_md.hpte_clear_all = native_hpte_clear; - if (tlb_batching_enabled()) - ppc_md.flush_hash_range = native_flush_hash_range; + ppc_md.flush_hash_range = native_flush_hash_range; } |