summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2018-03-26 13:04:48 +0300
committerMichael Ellerman <mpe@ellerman.id.au>2018-03-30 16:10:38 +0300
commitf384796c40dc55b3dba25e0ee9c1afd98c6d24d1 (patch)
tree400e1127554c05abff89a9d09bac2b40f844eba7 /arch/powerpc/mm
parent0dea04b288c06654b0de3563c5b8cdfe6130e450 (diff)
downloadlinux-f384796c40dc55b3dba25e0ee9c1afd98c6d24d1.tar.xz
powerpc/mm: Add support for handling > 512TB address in SLB miss
For addresses above 512TB we allocate additional mmu contexts. To make it all easy, addresses above 512TB are handled with IR/DR=1 and with stack frame setup. The mmu_context_t is also updated to track the new extended_ids. To support upto 4PB we need a total 8 contexts. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> [mpe: Minor formatting tweaks and comment wording, switch BUG to WARN in get_ea_context().] Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/copro_fault.c2
-rw-r--r--arch/powerpc/mm/hash_utils_64.c4
-rw-r--r--arch/powerpc/mm/mmu_context_book3s64.c15
-rw-r--r--arch/powerpc/mm/pgtable-hash64.c2
-rw-r--r--arch/powerpc/mm/slb.c108
-rw-r--r--arch/powerpc/mm/slb_low.S11
-rw-r--r--arch/powerpc/mm/slice.c15
-rw-r--r--arch/powerpc/mm/tlb_hash64.c2
8 files changed, 149 insertions, 10 deletions
diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
index 697b70ad1195..7d0945bd3a61 100644
--- a/arch/powerpc/mm/copro_fault.c
+++ b/arch/powerpc/mm/copro_fault.c
@@ -112,7 +112,7 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
return 1;
psize = get_slice_psize(mm, ea);
ssize = user_segment_size(ea);
- vsid = get_vsid(mm->context.id, ea, ssize);
+ vsid = get_user_vsid(&mm->context, ea, ssize);
vsidkey = SLB_VSID_USER;
break;
case VMALLOC_REGION_ID:
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 17fc13cab8dc..4180b89b8922 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -1267,7 +1267,7 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
}
psize = get_slice_psize(mm, ea);
ssize = user_segment_size(ea);
- vsid = get_vsid(mm->context.id, ea, ssize);
+ vsid = get_user_vsid(&mm->context, ea, ssize);
break;
case VMALLOC_REGION_ID:
vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
@@ -1532,7 +1532,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
/* Get VSID */
ssize = user_segment_size(ea);
- vsid = get_vsid(mm->context.id, ea, ssize);
+ vsid = get_user_vsid(&mm->context, ea, ssize);
if (!vsid)
return;
/*
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
index 422be81bf69f..b75194dff64c 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -179,6 +179,19 @@ void __destroy_context(int context_id)
}
EXPORT_SYMBOL_GPL(__destroy_context);
+static void destroy_contexts(mm_context_t *ctx)
+{
+ int index, context_id;
+
+ spin_lock(&mmu_context_lock);
+ for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) {
+ context_id = ctx->extended_id[index];
+ if (context_id)
+ ida_remove(&mmu_context_ida, context_id);
+ }
+ spin_unlock(&mmu_context_lock);
+}
+
#ifdef CONFIG_PPC_64K_PAGES
static void destroy_pagetable_page(struct mm_struct *mm)
{
@@ -217,7 +230,7 @@ void destroy_context(struct mm_struct *mm)
else
subpage_prot_free(mm);
destroy_pagetable_page(mm);
- __destroy_context(mm->context.id);
+ destroy_contexts(&mm->context);
mm->context.id = MMU_NO_CONTEXT;
}
diff --git a/arch/powerpc/mm/pgtable-hash64.c b/arch/powerpc/mm/pgtable-hash64.c
index 469808e77e58..a87b18cf6749 100644
--- a/arch/powerpc/mm/pgtable-hash64.c
+++ b/arch/powerpc/mm/pgtable-hash64.c
@@ -320,7 +320,7 @@ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
if (!is_kernel_addr(addr)) {
ssize = user_segment_size(addr);
- vsid = get_vsid(mm->context.id, addr, ssize);
+ vsid = get_user_vsid(&mm->context, addr, ssize);
WARN_ON(vsid == 0);
} else {
vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 13cfe413b40d..66577cc66dc9 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -22,6 +22,7 @@
#include <asm/cacheflush.h>
#include <asm/smp.h>
#include <linux/compiler.h>
+#include <linux/context_tracking.h>
#include <linux/mm_types.h>
#include <asm/udbg.h>
@@ -340,3 +341,110 @@ void slb_initialize(void)
asm volatile("isync":::"memory");
}
+
+static void insert_slb_entry(unsigned long vsid, unsigned long ea,
+ int bpsize, int ssize)
+{
+ unsigned long flags, vsid_data, esid_data;
+ enum slb_index index;
+ int slb_cache_index;
+
+ /*
+ * We are irq disabled, hence should be safe to access PACA.
+ */
+ index = get_paca()->stab_rr;
+
+ /*
+ * simple round-robin replacement of slb starting at SLB_NUM_BOLTED.
+ */
+ if (index < (mmu_slb_size - 1))
+ index++;
+ else
+ index = SLB_NUM_BOLTED;
+
+ get_paca()->stab_rr = index;
+
+ flags = SLB_VSID_USER | mmu_psize_defs[bpsize].sllp;
+ vsid_data = (vsid << slb_vsid_shift(ssize)) | flags |
+ ((unsigned long) ssize << SLB_VSID_SSIZE_SHIFT);
+ esid_data = mk_esid_data(ea, ssize, index);
+
+ asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data)
+ : "memory");
+
+ /*
+ * Now update slb cache entries
+ */
+ slb_cache_index = get_paca()->slb_cache_ptr;
+ if (slb_cache_index < SLB_CACHE_ENTRIES) {
+ /*
+ * We have space in slb cache for optimized switch_slb().
+ * Top 36 bits from esid_data as per ISA
+ */
+ get_paca()->slb_cache[slb_cache_index++] = esid_data >> 28;
+ get_paca()->slb_cache_ptr++;
+ } else {
+ /*
+ * Our cache is full and the current cache content strictly
+ * doesn't indicate the active SLB conents. Bump the ptr
+ * so that switch_slb() will ignore the cache.
+ */
+ get_paca()->slb_cache_ptr = SLB_CACHE_ENTRIES + 1;
+ }
+}
+
+static void handle_multi_context_slb_miss(int context_id, unsigned long ea)
+{
+ struct mm_struct *mm = current->mm;
+ unsigned long vsid;
+ int bpsize;
+
+ /*
+ * We are always above 1TB, hence use high user segment size.
+ */
+ vsid = get_vsid(context_id, ea, mmu_highuser_ssize);
+ bpsize = get_slice_psize(mm, ea);
+ insert_slb_entry(vsid, ea, bpsize, mmu_highuser_ssize);
+}
+
+void slb_miss_large_addr(struct pt_regs *regs)
+{
+ enum ctx_state prev_state = exception_enter();
+ unsigned long ea = regs->dar;
+ int context;
+
+ if (REGION_ID(ea) != USER_REGION_ID)
+ goto slb_bad_addr;
+
+ /*
+ * Are we beyound what the page table layout supports ?
+ */
+ if ((ea & ~REGION_MASK) >= H_PGTABLE_RANGE)
+ goto slb_bad_addr;
+
+ /* Lower address should have been handled by asm code */
+ if (ea < (1UL << MAX_EA_BITS_PER_CONTEXT))
+ goto slb_bad_addr;
+
+ /*
+ * consider this as bad access if we take a SLB miss
+ * on an address above addr limit.
+ */
+ if (ea >= current->mm->context.slb_addr_limit)
+ goto slb_bad_addr;
+
+ context = get_ea_context(&current->mm->context, ea);
+ if (!context)
+ goto slb_bad_addr;
+
+ handle_multi_context_slb_miss(context, ea);
+ exception_exit(prev_state);
+ return;
+
+slb_bad_addr:
+ if (user_mode(regs))
+ _exception(SIGSEGV, regs, SEGV_BNDERR, ea);
+ else
+ bad_page_fault(regs, ea, SIGSEGV);
+ exception_exit(prev_state);
+}
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index 2c7c717fd2ea..a83fbd2a4a24 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -75,10 +75,15 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_68_BIT_VA)
*/
_GLOBAL(slb_allocate)
/*
- * check for bad kernel/user address
- * (ea & ~REGION_MASK) >= PGTABLE_RANGE
+ * Check if the address falls within the range of the first context, or
+ * if we may need to handle multi context. For the first context we
+ * allocate the slb entry via the fast path below. For large address we
+ * branch out to C-code and see if additional contexts have been
+ * allocated.
+ * The test here is:
+ * (ea & ~REGION_MASK) >= (1ull << MAX_EA_BITS_PER_CONTEXT)
*/
- rldicr. r9,r3,4,(63 - H_PGTABLE_EADDR_SIZE - 4)
+ rldicr. r9,r3,4,(63 - MAX_EA_BITS_PER_CONTEXT - 4)
bne- 8f
srdi r9,r3,60 /* get region */
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 09ac1a709d0c..9cd87d11fe4e 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -648,6 +648,15 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
slice_print_mask(" mask", &potential_mask);
convert:
+ /*
+ * Try to allocate the context before we do slice convert
+ * so that we handle the context allocation failure gracefully.
+ */
+ if (need_extra_context(mm, newaddr)) {
+ if (alloc_extended_context(mm, newaddr) < 0)
+ return -ENOMEM;
+ }
+
slice_andnot_mask(&potential_mask, &potential_mask, &good_mask);
if (compat_maskp && !fixed)
slice_andnot_mask(&potential_mask, &potential_mask, compat_maskp);
@@ -658,10 +667,14 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
if (psize > MMU_PAGE_BASE)
on_each_cpu(slice_flush_segments, mm, 1);
}
+ return newaddr;
return_addr:
+ if (need_extra_context(mm, newaddr)) {
+ if (alloc_extended_context(mm, newaddr) < 0)
+ return -ENOMEM;
+ }
return newaddr;
-
}
EXPORT_SYMBOL_GPL(slice_get_unmapped_area);
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 9b23f12e863c..87d71dd25441 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -89,7 +89,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
/* Build full vaddr */
if (!is_kernel_addr(addr)) {
ssize = user_segment_size(addr);
- vsid = get_vsid(mm->context.id, addr, ssize);
+ vsid = get_user_vsid(&mm->context, addr, ssize);
} else {
vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
ssize = mmu_kernel_ssize;