summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-06-16 03:11:44 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2023-07-01 14:16:24 +0300
commitb92cd80e5f0b14760a49ff68da23959a38452cda (patch)
tree8a01664fc522fa989e7fac594e10b50880a68502
parent755aa1bc6aaf9961aa4bdb54f32faaba06c08792 (diff)
downloadlinux-b92cd80e5f0b14760a49ff68da23959a38452cda.tar.xz
arm64/mm: Convert to using lock_mm_and_find_vma()
commit ae870a68b5d13d67cf4f18d47bb01ee3fee40acb upstream. This converts arm64 to use the new page fault helper. It was very straightforward, but still needed a fix for the "obvious" conversion I initially did. Thanks to Suren for the fix and testing. Fixed-and-tested-by: Suren Baghdasaryan <surenb@google.com> Unnecessary-code-removal-by: Liam R. Howlett <Liam.Howlett@oracle.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> [6.1: Ignore CONFIG_PER_VMA_LOCK context] Signed-off-by: Samuel Mendoza-Jonas <samjonas@amazon.com> Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/mm/fault.c46
2 files changed, 10 insertions, 37 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 43ff7c7a3ac9..20ee745c118a 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -211,6 +211,7 @@ config ARM64
select IRQ_DOMAIN
select IRQ_FORCED_THREADING
select KASAN_VMALLOC if KASAN
+ select LOCK_MM_AND_FIND_VMA
select MODULES_USE_ELF_RELA
select NEED_DMA_MAP_STATE
select NEED_SG_DMA_LENGTH
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 2fef1fa93e7b..6b6b8a82f294 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -483,27 +483,14 @@ static void do_bad_area(unsigned long far, unsigned long esr,
#define VM_FAULT_BADMAP ((__force vm_fault_t)0x010000)
#define VM_FAULT_BADACCESS ((__force vm_fault_t)0x020000)
-static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
+static vm_fault_t __do_page_fault(struct mm_struct *mm,
+ struct vm_area_struct *vma, unsigned long addr,
unsigned int mm_flags, unsigned long vm_flags,
struct pt_regs *regs)
{
- struct vm_area_struct *vma = find_vma(mm, addr);
-
- if (unlikely(!vma))
- return VM_FAULT_BADMAP;
-
/*
* Ok, we have a good vm_area for this memory access, so we can handle
* it.
- */
- if (unlikely(vma->vm_start > addr)) {
- if (!(vma->vm_flags & VM_GROWSDOWN))
- return VM_FAULT_BADMAP;
- if (expand_stack(vma, addr))
- return VM_FAULT_BADMAP;
- }
-
- /*
* Check that the permissions on the VMA allow for the fault which
* occurred.
*/
@@ -535,6 +522,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
unsigned long vm_flags;
unsigned int mm_flags = FAULT_FLAG_DEFAULT;
unsigned long addr = untagged_addr(far);
+ struct vm_area_struct *vma;
if (kprobe_page_fault(regs, esr))
return 0;
@@ -585,31 +573,14 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
- /*
- * As per x86, we may deadlock here. However, since the kernel only
- * validly references user space from well defined areas of the code,
- * we can bug out early if this is from code which shouldn't.
- */
- if (!mmap_read_trylock(mm)) {
- if (!user_mode(regs) && !search_exception_tables(regs->pc))
- goto no_context;
retry:
- mmap_read_lock(mm);
- } else {
- /*
- * The above mmap_read_trylock() might have succeeded in which
- * case, we'll have missed the might_sleep() from down_read().
- */
- might_sleep();
-#ifdef CONFIG_DEBUG_VM
- if (!user_mode(regs) && !search_exception_tables(regs->pc)) {
- mmap_read_unlock(mm);
- goto no_context;
- }
-#endif
+ vma = lock_mm_and_find_vma(mm, addr, regs);
+ if (unlikely(!vma)) {
+ fault = VM_FAULT_BADMAP;
+ goto done;
}
- fault = __do_page_fault(mm, addr, mm_flags, vm_flags, regs);
+ fault = __do_page_fault(mm, vma, addr, mm_flags, vm_flags, regs);
/* Quick path to respond to signals */
if (fault_signal_pending(fault, regs)) {
@@ -628,6 +599,7 @@ retry:
}
mmap_read_unlock(mm);
+done:
/*
* Handle the "normal" (no error) case first.
*/