summaryrefslogtreecommitdiff
path: root/arch/xtensa/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/xtensa/mm')
-rw-r--r--arch/xtensa/mm/fault.c14
-rw-r--r--arch/xtensa/mm/kasan_init.c2
-rw-r--r--arch/xtensa/mm/misc.S5
-rw-r--r--arch/xtensa/mm/tlb.c5
4 files changed, 12 insertions, 14 deletions
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index faf7cf35a0ee..d1eb8d6c5b82 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -130,23 +130,14 @@ void do_page_fault(struct pt_regs *regs)
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
retry:
- mmap_read_lock(mm);
- vma = find_vma(mm, address);
-
+ vma = lock_mm_and_find_vma(mm, address, regs);
if (!vma)
- goto bad_area;
- if (vma->vm_start <= address)
- goto good_area;
- if (!(vma->vm_flags & VM_GROWSDOWN))
- goto bad_area;
- if (expand_stack(vma, address))
- goto bad_area;
+ goto bad_area_nosemaphore;
/* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
-good_area:
code = SEGV_ACCERR;
if (is_write) {
@@ -205,6 +196,7 @@ good_area:
*/
bad_area:
mmap_read_unlock(mm);
+bad_area_nosemaphore:
if (user_mode(regs)) {
force_sig_fault(SIGSEGV, code, (void *) address);
return;
diff --git a/arch/xtensa/mm/kasan_init.c b/arch/xtensa/mm/kasan_init.c
index 1fef24db2ff6..f00d122aa806 100644
--- a/arch/xtensa/mm/kasan_init.c
+++ b/arch/xtensa/mm/kasan_init.c
@@ -14,7 +14,6 @@
#include <linux/kernel.h>
#include <asm/initialize_mmu.h>
#include <asm/tlbflush.h>
-#include <asm/traps.h>
void __init kasan_early_init(void)
{
@@ -31,7 +30,6 @@ void __init kasan_early_init(void)
BUG_ON(!pmd_none(*pmd));
set_pmd(pmd, __pmd((unsigned long)kasan_early_shadow_pte));
}
- early_trap_init();
}
static void __init populate(void *start, void *end)
diff --git a/arch/xtensa/mm/misc.S b/arch/xtensa/mm/misc.S
index 0527bf6e3211..ec36f73c4765 100644
--- a/arch/xtensa/mm/misc.S
+++ b/arch/xtensa/mm/misc.S
@@ -47,6 +47,7 @@ ENTRY(clear_page)
abi_ret_default
ENDPROC(clear_page)
+EXPORT_SYMBOL(clear_page)
/*
* copy_page and copy_user_page are the same for non-cache-aliased configs.
@@ -89,6 +90,7 @@ ENTRY(copy_page)
abi_ret_default
ENDPROC(copy_page)
+EXPORT_SYMBOL(copy_page)
#ifdef CONFIG_MMU
/*
@@ -367,6 +369,7 @@ ENTRY(__invalidate_icache_range)
abi_ret_default
ENDPROC(__invalidate_icache_range)
+EXPORT_SYMBOL(__invalidate_icache_range)
/*
* void __flush_invalidate_dcache_range(ulong start, ulong size)
@@ -397,6 +400,7 @@ ENTRY(__flush_dcache_range)
abi_ret_default
ENDPROC(__flush_dcache_range)
+EXPORT_SYMBOL(__flush_dcache_range)
/*
* void _invalidate_dcache_range(ulong start, ulong size)
@@ -411,6 +415,7 @@ ENTRY(__invalidate_dcache_range)
abi_ret_default
ENDPROC(__invalidate_dcache_range)
+EXPORT_SYMBOL(__invalidate_dcache_range)
/*
* void _invalidate_icache_all(void)
diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c
index 27a477dae232..0a11fc5f185b 100644
--- a/arch/xtensa/mm/tlb.c
+++ b/arch/xtensa/mm/tlb.c
@@ -179,6 +179,7 @@ static unsigned get_pte_for_vaddr(unsigned vaddr)
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
+ unsigned int pteval;
if (!mm)
mm = task->active_mm;
@@ -197,7 +198,9 @@ static unsigned get_pte_for_vaddr(unsigned vaddr)
pte = pte_offset_map(pmd, vaddr);
if (!pte)
return 0;
- return pte_val(*pte);
+ pteval = pte_val(*pte);
+ pte_unmap(pte);
+ return pteval;
}
enum {