summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm/mmu_context_hash64.c
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2008-12-18 22:13:24 +0300
committerPaul Mackerras <paulus@samba.org>2008-12-21 06:21:15 +0300
commit5e696617c425eb97bd943d781f3941fb1e8f0e5b (patch)
tree82138fbda2e28fbe8d0e5821f218cb160230ce27 /arch/powerpc/mm/mmu_context_hash64.c
parent6d2170be4561293a6aa821c773687bd3f18e8206 (diff)
downloadlinux-5e696617c425eb97bd943d781f3941fb1e8f0e5b.tar.xz
powerpc/mm: Split mmu_context handling
This splits the mmu_context handling between 32-bit hash based processors, 64-bit hash based processors and everybody else. This is preliminary work for adding SMP support for BookE processors. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Acked-by: Kumar Gala <galak@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/mm/mmu_context_hash64.c')
-rw-r--r--arch/powerpc/mm/mmu_context_hash64.c78
1 files changed, 78 insertions, 0 deletions
diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c
new file mode 100644
index 000000000000..dbeb86ac90cd
--- /dev/null
+++ b/arch/powerpc/mm/mmu_context_hash64.c
@@ -0,0 +1,78 @@
+/*
+ * MMU context allocation for 64-bit kernels.
+ *
+ * Copyright (C) 2004 Anton Blanchard, IBM Corp. <anton@samba.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/spinlock.h>
+#include <linux/idr.h>
+
+#include <asm/mmu_context.h>
+
+static DEFINE_SPINLOCK(mmu_context_lock);
+static DEFINE_IDR(mmu_context_idr);
+
+/*
+ * The proto-VSID space has 2^35 - 1 segments available for user mappings.
+ * Each segment contains 2^28 bytes. Each context maps 2^44 bytes,
+ * so we can support 2^19-1 contexts (19 == 35 + 28 - 44).
+ */
+#define NO_CONTEXT 0
+#define MAX_CONTEXT ((1UL << 19) - 1)
+
+int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+ int index;
+ int err;
+
+again:
+ if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL))
+ return -ENOMEM;
+
+ spin_lock(&mmu_context_lock);
+ err = idr_get_new_above(&mmu_context_idr, NULL, 1, &index);
+ spin_unlock(&mmu_context_lock);
+
+ if (err == -EAGAIN)
+ goto again;
+ else if (err)
+ return err;
+
+ if (index > MAX_CONTEXT) {
+ spin_lock(&mmu_context_lock);
+ idr_remove(&mmu_context_idr, index);
+ spin_unlock(&mmu_context_lock);
+ return -ENOMEM;
+ }
+
+ /* The old code would re-promote on fork, we don't do that
+ * when using slices as it could cause problem promoting slices
+ * that have been forced down to 4K
+ */
+ if (slice_mm_new_context(mm))
+ slice_set_user_psize(mm, mmu_virtual_psize);
+ mm->context.id = index;
+
+ return 0;
+}
+
+void destroy_context(struct mm_struct *mm)
+{
+ spin_lock(&mmu_context_lock);
+ idr_remove(&mmu_context_idr, mm->context.id);
+ spin_unlock(&mmu_context_lock);
+
+ mm->context.id = NO_CONTEXT;
+}