diff options
author | Kautuk Consul <consul.kautuk@gmail.com> | 2012-03-31 16:05:17 +0400 |
---|---|---|
committer | Guan Xuetao <gxt@mprc.pku.edu.cn> | 2012-11-09 13:30:08 +0400 |
commit | f3f09d5a4462929609342460d756fab2e8151421 (patch) | |
tree | 43ce4d51ebca8aaca25514ca57e9b61e764ff51b /arch/unicore32/mm | |
parent | 0e4a43ed08e2f44aa7b96aa95d0a540d675483e1 (diff) | |
download | linux-f3f09d5a4462929609342460d756fab2e8151421.tar.xz |
unicore32/mm/fault.c: Port OOM changes to do_pf
Commit d065bd810b6deb67d4897a14bfe21f8eb526ba99
(mm: retry page fault when blocking on disk transfer) and
commit 37b23e0525d393d48a7d59f870b3bc061a30ccdb
(x86,mm: make pagefault killable)
The above commits introduced changes into the x86 pagefault handler
for making the page fault handler retryable as well as killable.
These changes reduce the mmap_sem hold time, which is crucial
during OOM killer invocation.
Port these changes to unicore32.
Signed-off-by: Kautuk Consul <consul.kautuk@gmail.com>
Acked-by: Guan Xuetao <gxt@mprc.pku.edu.cn>
Diffstat (limited to 'arch/unicore32/mm')
-rw-r--r-- | arch/unicore32/mm/fault.c | 37 |
1 files changed, 27 insertions, 10 deletions
diff --git a/arch/unicore32/mm/fault.c b/arch/unicore32/mm/fault.c index 2eeb9c04cab0..f9b5c10bccee 100644 --- a/arch/unicore32/mm/fault.c +++ b/arch/unicore32/mm/fault.c @@ -168,7 +168,7 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma) } static int __do_pf(struct mm_struct *mm, unsigned long addr, unsigned int fsr, - struct task_struct *tsk) + unsigned int flags, struct task_struct *tsk) { struct vm_area_struct *vma; int fault; @@ -194,14 +194,7 @@ good_area: * If for any reason at all we couldn't handle the fault, make * sure we exit gracefully rather than endlessly redo the fault. */ - fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, - (!(fsr ^ 0x12)) ? FAULT_FLAG_WRITE : 0); - if (unlikely(fault & VM_FAULT_ERROR)) - return fault; - if (fault & VM_FAULT_MAJOR) - tsk->maj_flt++; - else - tsk->min_flt++; + fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, flags); return fault; check_stack: @@ -216,6 +209,8 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs) struct task_struct *tsk; struct mm_struct *mm; int fault, sig, code; + unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | + ((!(fsr ^ 0x12)) ? FAULT_FLAG_WRITE : 0); tsk = current; mm = tsk->mm; @@ -236,6 +231,7 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs) if (!user_mode(regs) && !search_exception_tables(regs->UCreg_pc)) goto no_context; +retry: down_read(&mm->mmap_sem); } else { /* @@ -251,7 +247,28 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs) #endif } - fault = __do_pf(mm, addr, fsr, tsk); + fault = __do_pf(mm, addr, fsr, flags, tsk); + + /* If we need to retry but a fatal signal is pending, handle the + * signal first. We do not need to release the mmap_sem because + * it would already be released in __lock_page_or_retry in + * mm/filemap.c. */ + if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) + return 0; + + if (!(fault & VM_FAULT_ERROR) && (flags & FAULT_FLAG_ALLOW_RETRY)) { + if (fault & VM_FAULT_MAJOR) + tsk->maj_flt++; + else + tsk->min_flt++; + if (fault & VM_FAULT_RETRY) { + /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk + * of starvation. */ + flags &= ~FAULT_FLAG_ALLOW_RETRY; + goto retry; + } + } + up_read(&mm->mmap_sem); /* |