diff options
author | Thomas Hellstrom <thellstrom@vmware.com> | 2018-10-12 17:58:12 +0300 |
---|---|---|
committer | Thomas Hellstrom <thellstrom@vmware.com> | 2019-06-18 16:19:33 +0300 |
commit | c9e5f41f732057097cd66ab7b01e7f7a2ff6c795 (patch) | |
tree | 9ed5853df4a1ce6985981bf23fedb55d9a0145de /mm | |
parent | 4251fa5fc3bbe3e1df99f60a738d367373ee335f (diff) | |
download | linux-c9e5f41f732057097cd66ab7b01e7f7a2ff6c795.tar.xz |
mm: Allow the [page|pfn]_mkwrite callbacks to drop the mmap_sem
Driver fault callbacks are allowed to drop the mmap_sem when expecting
long hardware waits to avoid blocking other mm users. Allow the mkwrite
callbacks to do the same by returning early on VM_FAULT_RETRY.
In particular we want to be able to drop the mmap_sem when waiting for
a reservation object lock on a GPU buffer object. These locks may be
held while waiting for the GPU.
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@surriel.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Huang Ying <ying.huang@intel.com>
Cc: Souptick Joarder <jrdr.linux@gmail.com>
Cc: "Jérôme Glisse" <jglisse@redhat.com>
Cc: linux-mm@kvack.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Ralph Campbell <rcampbell@nvidia.com>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memory.c | 10 |
1 files changed, 6 insertions, 4 deletions
diff --git a/mm/memory.c b/mm/memory.c index ddf20bd0c317..168f546af1ad 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2238,7 +2238,7 @@ static vm_fault_t do_page_mkwrite(struct vm_fault *vmf) ret = vmf->vma->vm_ops->page_mkwrite(vmf); /* Restore original flags so that caller is not surprised */ vmf->flags = old_flags; - if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) + if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) return ret; if (unlikely(!(ret & VM_FAULT_LOCKED))) { lock_page(page); @@ -2515,7 +2515,7 @@ static vm_fault_t wp_pfn_shared(struct vm_fault *vmf) pte_unmap_unlock(vmf->pte, vmf->ptl); vmf->flags |= FAULT_FLAG_MKWRITE; ret = vma->vm_ops->pfn_mkwrite(vmf); - if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)) + if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)) return ret; return finish_mkwrite_fault(vmf); } @@ -2536,7 +2536,8 @@ static vm_fault_t wp_page_shared(struct vm_fault *vmf) pte_unmap_unlock(vmf->pte, vmf->ptl); tmp = do_page_mkwrite(vmf); if (unlikely(!tmp || (tmp & - (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { + (VM_FAULT_ERROR | VM_FAULT_NOPAGE | + VM_FAULT_RETRY)))) { put_page(vmf->page); return tmp; } @@ -3601,7 +3602,8 @@ static vm_fault_t do_shared_fault(struct vm_fault *vmf) unlock_page(vmf->page); tmp = do_page_mkwrite(vmf); if (unlikely(!tmp || - (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { + (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | + VM_FAULT_RETRY)))) { put_page(vmf->page); return tmp; } |