summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorAnshuman Khandual <khandual@linux.vnet.ibm.com>2015-07-29 10:09:59 +0300
committerMichael Ellerman <mpe@ellerman.id.au>2015-08-12 07:50:12 +0300
commit2be682af48e8236558da702fe67e178cfe7524a1 (patch)
treee5e057b31f0a48e0f1f460d89bb0c09a6d38e01e /arch
parent752b8adec4a776b4fdf01cf9443921bb3ba38779 (diff)
downloadlinux-2be682af48e8236558da702fe67e178cfe7524a1.tar.xz
powerpc/slb: Rename all the 'slot' occurrences to 'entry'
The SLB code uses 'slot' and 'entry' interchangeably, change it to always use 'entry'. Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com> [mpe: Rewrite change log] Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/mm/slb.c7
1 files changed, 3 insertions, 4 deletions
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 62fafb300c2c..faf9f0c4e823 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -41,9 +41,9 @@ static void slb_allocate(unsigned long ea)
(((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T)
static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
- unsigned long slot)
+ unsigned long entry)
{
- return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | slot;
+ return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | entry;
}
static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
@@ -308,12 +308,11 @@ void slb_initialize(void)
lflags = SLB_VSID_KERNEL | linear_llp;
vflags = SLB_VSID_KERNEL | vmalloc_llp;
- /* Invalidate the entire SLB (even slot 0) & all the ERATS */
+ /* Invalidate the entire SLB (even entry 0) & all the ERATS */
asm volatile("isync":::"memory");
asm volatile("slbmte %0,%0"::"r" (0) : "memory");
asm volatile("isync; slbia; isync":::"memory");
create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, 0);
-
create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
/* For the boot cpu, we're running on the stack in init_thread_union,