diff options
author | Dave Kleikamp <shaggy@linux.vnet.ibm.com> | 2010-03-05 13:43:12 +0300 |
---|---|---|
committer | Josh Boyer <jwboyer@linux.vnet.ibm.com> | 2010-05-05 17:11:10 +0400 |
commit | e7f75ad01d590243904c2d95ab47e6b2e9ef6dad (patch) | |
tree | 454cf065417973e9c2fcd75542351c2534b9a4b9 /arch/powerpc/mm/tlb_nohash_low.S | |
parent | 795033c344d88dc6aa5106d0cc358656f29bd722 (diff) | |
download | linux-e7f75ad01d590243904c2d95ab47e6b2e9ef6dad.tar.xz |
powerpc/47x: Base ppc476 support
This patch adds the base support for the 476 processor. The code was
primarily written by Ben Herrenschmidt and Torez Smith, but I've been
maintaining it for a while.
The goal is to have a single binary that will run on 44x and 47x, but
we still have some details to work out. The biggest is that the L1 cache
line size differs on the two platforms, but it's currently a compile-time
option.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Torez Smith <lnxtorez@linux.vnet.ibm.com>
Signed-off-by: Dave Kleikamp <shaggy@linux.vnet.ibm.com>
Signed-off-by: Josh Boyer <jwboyer@linux.vnet.ibm.com>
Diffstat (limited to 'arch/powerpc/mm/tlb_nohash_low.S')
-rw-r--r-- | arch/powerpc/mm/tlb_nohash_low.S | 118 |
1 files changed, 106 insertions, 12 deletions
diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S index bbdc5b577b85..e925cb58afd9 100644 --- a/arch/powerpc/mm/tlb_nohash_low.S +++ b/arch/powerpc/mm/tlb_nohash_low.S @@ -10,7 +10,7 @@ * - tlbil_va * - tlbil_pid * - tlbil_all - * - tlbivax_bcast (not yet) + * - tlbivax_bcast * * Code mostly moved over from misc_32.S * @@ -33,6 +33,7 @@ #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #include <asm/processor.h> +#include <asm/bug.h> #if defined(CONFIG_40x) @@ -65,7 +66,7 @@ _GLOBAL(__tlbil_va) * Nothing to do for 8xx, everything is inline */ -#elif defined(CONFIG_44x) +#elif defined(CONFIG_44x) /* Includes 47x */ /* * 440 implementation uses tlbsx/we for tlbil_va and a full sweep @@ -73,7 +74,13 @@ _GLOBAL(__tlbil_va) */ _GLOBAL(__tlbil_va) mfspr r5,SPRN_MMUCR - rlwimi r5,r4,0,24,31 /* Set TID */ + mfmsr r10 + + /* + * We write 16 bits of STID since 47x supports that much, we + * will never be passed out of bounds values on 440 (hopefully) + */ + rlwimi r5,r4,0,16,31 /* We have to run the search with interrupts disabled, otherwise * an interrupt which causes a TLB miss can clobber the MMUCR @@ -83,24 +90,41 @@ _GLOBAL(__tlbil_va) * and restoring MMUCR, so only normal interrupts have to be * taken care of. */ - mfmsr r4 wrteei 0 mtspr SPRN_MMUCR,r5 - tlbsx. r3, 0, r3 - wrtee r4 - bne 1f + tlbsx. r6,0,r3 + bne 10f sync - /* There are only 64 TLB entries, so r3 < 64, - * which means bit 22, is clear. Since 22 is - * the V bit in the TLB_PAGEID, loading this +BEGIN_MMU_FTR_SECTION + b 2f +END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x) + /* On 440 There are only 64 TLB entries, so r3 < 64, which means bit + * 22, is clear. Since 22 is the V bit in the TLB_PAGEID, loading this * value will invalidate the TLB entry. */ - tlbwe r3, r3, PPC44x_TLB_PAGEID + tlbwe r6,r6,PPC44x_TLB_PAGEID isync -1: blr +10: wrtee r10 + blr +2: +#ifdef CONFIG_PPC_47x + oris r7,r6,0x8000 /* specify way explicitely */ + clrrwi r4,r3,12 /* get an EPN for the hashing with V = 0 */ + ori r4,r4,PPC47x_TLBE_SIZE + tlbwe r4,r7,0 /* write it */ + isync + wrtee r10 + blr +#else /* CONFIG_PPC_47x */ +1: trap + EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0; +#endif /* !CONFIG_PPC_47x */ _GLOBAL(_tlbil_all) _GLOBAL(_tlbil_pid) +BEGIN_MMU_FTR_SECTION + b 2f +END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x) li r3,0 sync @@ -115,6 +139,76 @@ _GLOBAL(_tlbil_pid) isync blr +2: +#ifdef CONFIG_PPC_47x + /* 476 variant. There's not simple way to do this, hopefully we'll + * try to limit the amount of such full invalidates + */ + mfmsr r11 /* Interrupts off */ + wrteei 0 + li r3,-1 /* Current set */ + lis r10,tlb_47x_boltmap@h + ori r10,r10,tlb_47x_boltmap@l + lis r7,0x8000 /* Specify way explicitely */ + + b 9f /* For each set */ + +1: li r9,4 /* Number of ways */ + li r4,0 /* Current way */ + li r6,0 /* Default entry value 0 */ + andi. r0,r8,1 /* Check if way 0 is bolted */ + mtctr r9 /* Load way counter */ + bne- 3f /* Bolted, skip loading it */ + +2: /* For each way */ + or r5,r3,r4 /* Make way|index for tlbre */ + rlwimi r5,r5,16,8,15 /* Copy index into position */ + tlbre r6,r5,0 /* Read entry */ +3: addis r4,r4,0x2000 /* Next way */ + andi. r0,r6,PPC47x_TLB0_VALID /* Valid entry ? */ + beq 4f /* Nope, skip it */ + rlwimi r7,r5,0,1,2 /* Insert way number */ + rlwinm r6,r6,0,21,19 /* Clear V */ + tlbwe r6,r7,0 /* Write it */ +4: bdnz 2b /* Loop for each way */ + srwi r8,r8,1 /* Next boltmap bit */ +9: cmpwi cr1,r3,255 /* Last set done ? */ + addi r3,r3,1 /* Next set */ + beq cr1,1f /* End of loop */ + andi. r0,r3,0x1f /* Need to load a new boltmap word ? */ + bne 1b /* No, loop */ + lwz r8,0(r10) /* Load boltmap entry */ + addi r10,r10,4 /* Next word */ + b 1b /* Then loop */ +1: isync /* Sync shadows */ + wrtee r11 +#else /* CONFIG_PPC_47x */ +1: trap + EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0; +#endif /* !CONFIG_PPC_47x */ + blr + +#ifdef CONFIG_PPC_47x +/* + * _tlbivax_bcast is only on 47x. We don't bother doing a runtime + * check though, it will blow up soon enough if we mistakenly try + * to use it on a 440. + */ +_GLOBAL(_tlbivax_bcast) + mfspr r5,SPRN_MMUCR + mfmsr r10 + rlwimi r5,r4,0,16,31 + wrteei 0 + mtspr SPRN_MMUCR,r5 +/* tlbivax 0,r3 - use .long to avoid binutils deps */ + .long 0x7c000624 | (r3 << 11) + isync + eieio + tlbsync + sync + wrtee r10 + blr +#endif /* CONFIG_PPC_47x */ #elif defined(CONFIG_FSL_BOOKE) /* |