summaryrefslogtreecommitdiff
path: root/arch/um
diff options
context:
space:
mode:
Diffstat (limited to 'arch/um')
-rw-r--r--arch/um/include/asm/Kbuild1
-rw-r--r--arch/um/include/asm/mmu_context.h29
-rw-r--r--arch/um/include/shared/as-layout.h3
-rw-r--r--arch/um/kernel/exec.c4
-rw-r--r--arch/um/kernel/skas/mmu.c87
-rw-r--r--arch/um/kernel/tlb.c15
-rw-r--r--arch/um/kernel/um_arch.c5
-rw-r--r--arch/um/os-Linux/skas/process.c4
8 files changed, 10 insertions, 138 deletions
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild
index 314979467db1..a58811dc054c 100644
--- a/arch/um/include/asm/Kbuild
+++ b/arch/um/include/asm/Kbuild
@@ -25,3 +25,4 @@ generic-y += topology.h
generic-y += trace_clock.h
generic-y += word-at-a-time.h
generic-y += kprobes.h
+generic-y += mm_hooks.h
diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h
index f8a100770691..68e2eb9cfb47 100644
--- a/arch/um/include/asm/mmu_context.h
+++ b/arch/um/include/asm/mmu_context.h
@@ -10,33 +10,9 @@
#include <linux/mm_types.h>
#include <linux/mmap_lock.h>
+#include <asm/mm_hooks.h>
#include <asm/mmu.h>
-extern void uml_setup_stubs(struct mm_struct *mm);
-/*
- * Needed since we do not use the asm-generic/mm_hooks.h:
- */
-static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
-{
- uml_setup_stubs(mm);
- return 0;
-}
-extern void arch_exit_mmap(struct mm_struct *mm);
-static inline void arch_unmap(struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
-}
-static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
- bool write, bool execute, bool foreign)
-{
- /* by default, allow everything */
- return true;
-}
-
-/*
- * end asm-generic/mm_hooks.h functions
- */
-
extern void force_flush_all(void);
#define activate_mm activate_mm
@@ -47,9 +23,6 @@ static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
* when the new ->mm is used for the first time.
*/
__switch_mm(&new->context.id);
- mmap_write_lock_nested(new, SINGLE_DEPTH_NESTING);
- uml_setup_stubs(new);
- mmap_write_unlock(new);
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
diff --git a/arch/um/include/shared/as-layout.h b/arch/um/include/shared/as-layout.h
index 56408bf3480d..9a0bd648d872 100644
--- a/arch/um/include/shared/as-layout.h
+++ b/arch/um/include/shared/as-layout.h
@@ -20,7 +20,7 @@
* 'UL' and other type specifiers unilaterally. We
* use the following macros to deal with this.
*/
-#define STUB_START 0x100000UL
+#define STUB_START stub_start
#define STUB_CODE STUB_START
#define STUB_DATA (STUB_CODE + UM_KERN_PAGE_SIZE)
#define STUB_END (STUB_DATA + UM_KERN_PAGE_SIZE)
@@ -46,6 +46,7 @@ extern unsigned long long highmem;
extern unsigned long brk_start;
extern unsigned long host_task_size;
+extern unsigned long stub_start;
extern int linux_main(int argc, char **argv);
extern void uml_finishsetup(void);
diff --git a/arch/um/kernel/exec.c b/arch/um/kernel/exec.c
index e8fd5d540b05..4d8498100341 100644
--- a/arch/um/kernel/exec.c
+++ b/arch/um/kernel/exec.c
@@ -26,9 +26,7 @@ void flush_thread(void)
arch_flush_thread(&current->thread.arch);
- ret = unmap(&current->mm->context.id, 0, STUB_START, 0, &data);
- ret = ret || unmap(&current->mm->context.id, STUB_END,
- host_task_size - STUB_END, 1, &data);
+ ret = unmap(&current->mm->context.id, 0, TASK_SIZE, 1, &data);
if (ret) {
printk(KERN_ERR "flush_thread - clearing address space failed, "
"err = %d\n", ret);
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c
index d9961163da66..125df465e8ea 100644
--- a/arch/um/kernel/skas/mmu.c
+++ b/arch/um/kernel/skas/mmu.c
@@ -14,47 +14,6 @@
#include <os.h>
#include <skas.h>
-static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
- unsigned long kernel)
-{
- pgd_t *pgd;
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
-
- pgd = pgd_offset(mm, proc);
-
- p4d = p4d_alloc(mm, pgd, proc);
- if (!p4d)
- goto out;
-
- pud = pud_alloc(mm, p4d, proc);
- if (!pud)
- goto out_pud;
-
- pmd = pmd_alloc(mm, pud, proc);
- if (!pmd)
- goto out_pmd;
-
- pte = pte_alloc_map(mm, pmd, proc);
- if (!pte)
- goto out_pte;
-
- *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT));
- *pte = pte_mkread(*pte);
- return 0;
-
- out_pte:
- pmd_free(mm, pmd);
- out_pmd:
- pud_free(mm, pud);
- out_pud:
- p4d_free(mm, p4d);
- out:
- return -ENOMEM;
-}
-
int init_new_context(struct task_struct *task, struct mm_struct *mm)
{
struct mm_context *from_mm = NULL;
@@ -98,52 +57,6 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm)
return ret;
}
-void uml_setup_stubs(struct mm_struct *mm)
-{
- int err, ret;
-
- ret = init_stub_pte(mm, STUB_CODE,
- (unsigned long) __syscall_stub_start);
- if (ret)
- goto out;
-
- ret = init_stub_pte(mm, STUB_DATA, mm->context.id.stack);
- if (ret)
- goto out;
-
- mm->context.stub_pages[0] = virt_to_page(__syscall_stub_start);
- mm->context.stub_pages[1] = virt_to_page(mm->context.id.stack);
-
- /* dup_mmap already holds mmap_lock */
- err = install_special_mapping(mm, STUB_START, STUB_END - STUB_START,
- VM_READ | VM_MAYREAD | VM_EXEC |
- VM_MAYEXEC | VM_DONTCOPY | VM_PFNMAP,
- mm->context.stub_pages);
- if (err) {
- printk(KERN_ERR "install_special_mapping returned %d\n", err);
- goto out;
- }
- return;
-
-out:
- force_sigsegv(SIGSEGV);
-}
-
-void arch_exit_mmap(struct mm_struct *mm)
-{
- pte_t *pte;
-
- pte = virt_to_pte(mm, STUB_CODE);
- if (pte != NULL)
- pte_clear(mm, STUB_CODE, pte);
-
- pte = virt_to_pte(mm, STUB_DATA);
- if (pte == NULL)
- return;
-
- pte_clear(mm, STUB_DATA, pte);
-}
-
void destroy_context(struct mm_struct *mm)
{
struct mm_context *mmu = &mm->context;
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
index 5be1b0da9f3b..bc38f79ca3a3 100644
--- a/arch/um/kernel/tlb.c
+++ b/arch/um/kernel/tlb.c
@@ -125,9 +125,6 @@ static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
struct host_vm_op *last;
int fd = -1, ret = 0;
- if (virt + len > STUB_START && virt < STUB_END)
- return -EINVAL;
-
if (hvc->userspace)
fd = phys_mapping(phys, &offset);
else
@@ -165,9 +162,6 @@ static int add_munmap(unsigned long addr, unsigned long len,
struct host_vm_op *last;
int ret = 0;
- if (addr + len > STUB_START && addr < STUB_END)
- return -EINVAL;
-
if (hvc->index != 0) {
last = &hvc->ops[hvc->index - 1];
if ((last->type == MUNMAP) &&
@@ -195,9 +189,6 @@ static int add_mprotect(unsigned long addr, unsigned long len,
struct host_vm_op *last;
int ret = 0;
- if (addr + len > STUB_START && addr < STUB_END)
- return -EINVAL;
-
if (hvc->index != 0) {
last = &hvc->ops[hvc->index - 1];
if ((last->type == MPROTECT) &&
@@ -232,9 +223,6 @@ static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
pte = pte_offset_kernel(pmd, addr);
do {
- if ((addr >= STUB_START) && (addr < STUB_END))
- continue;
-
r = pte_read(*pte);
w = pte_write(*pte);
x = pte_exec(*pte);
@@ -478,9 +466,6 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
address &= PAGE_MASK;
- if (address >= STUB_START && address < STUB_END)
- goto kill;
-
pgd = pgd_offset(mm, address);
if (!pgd_present(*pgd))
goto kill;
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index 80e2660782a0..74e07e748a9b 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -249,6 +249,7 @@ void uml_finishsetup(void)
}
/* Set during early boot */
+unsigned long stub_start;
unsigned long task_size;
EXPORT_SYMBOL(task_size);
@@ -283,6 +284,10 @@ int __init linux_main(int argc, char **argv)
add_arg(DEFAULT_COMMAND_LINE_CONSOLE);
host_task_size = os_get_top_address();
+ /* reserve two pages for the stubs */
+ host_task_size -= 2 * PAGE_SIZE;
+ stub_start = host_task_size;
+
/*
* TASK_SIZE needs to be PGDIR_SIZE aligned or else exit_mmap craps
* out
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
index 623b0aeadf4c..fba674fac8b7 100644
--- a/arch/um/os-Linux/skas/process.c
+++ b/arch/um/os-Linux/skas/process.c
@@ -251,10 +251,6 @@ static int userspace_tramp(void *stack)
signal(SIGTERM, SIG_DFL);
signal(SIGWINCH, SIG_IGN);
- /*
- * This has a pte, but it can't be mapped in with the usual
- * tlb_flush mechanism because this is part of that mechanism
- */
fd = phys_mapping(to_phys(__syscall_stub_start), &offset);
addr = mmap64((void *) STUB_CODE, UM_KERN_PAGE_SIZE,
PROT_EXEC, MAP_FIXED | MAP_PRIVATE, fd, offset);