diff options
author | Lorenzo Stoakes <lstoakes@gmail.com> | 2023-03-18 00:58:25 +0300 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2023-04-06 05:42:51 +0300 |
commit | 9042599e81c295f0b12d940248d6608e87e7b6b6 (patch) | |
tree | 4a0ea1f330bf97fede0dbb73ca8638d3528fde31 /mm/memory.c | |
parent | 1c06b6a599b5b7be74a6baffafa00b0f70cbe523 (diff) | |
download | linux-9042599e81c295f0b12d940248d6608e87e7b6b6.tar.xz |
mm: refactor do_fault_around()
Patch series "Refactor do_fault_around()"
Refactor do_fault_around() to avoid bitwise tricks and rather difficult to
follow logic. Additionally, prefer fault_around_pages to
fault_around_bytes as the operations are performed at a base page
granularity.
This patch (of 2):
The existing logic is confusing and fails to abstract a number of bitwise
tricks.
Use ALIGN_DOWN() to perform alignment, pte_index() to obtain a PTE index
and represent the address range using PTE offsets, which naturally make it
clear that the operation is intended to occur within only a single PTE and
prevent spanning of more than one page table.
We rely on the fact that fault_around_bytes will always be page-aligned,
at least one page in size, a power of two and that it will not exceed
PAGE_SIZE * PTRS_PER_PTE in size (i.e. the address space mapped by a
PTE). These are all guaranteed by fault_around_bytes_set().
Link: https://lkml.kernel.org/r/cover.1679089214.git.lstoakes@gmail.com
Link: https://lkml.kernel.org/r/d125db1c3665a63b80cea29d56407825482e2262.1679089214.git.lstoakes@gmail.com
Signed-off-by: Lorenzo Stoakes <lstoakes@gmail.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 38 |
1 files changed, 16 insertions, 22 deletions
diff --git a/mm/memory.c b/mm/memory.c index a890b2951b53..9e6608b50261 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4449,8 +4449,8 @@ late_initcall(fault_around_debugfs); * It uses vm_ops->map_pages() to map the pages, which skips the page if it's * not ready to be mapped: not up-to-date, locked, etc. * - * This function doesn't cross the VMA boundaries, in order to call map_pages() - * only once. + * This function doesn't cross VMA or page table boundaries, in order to call + * map_pages() and acquire a PTE lock only once. * * fault_around_bytes defines how many bytes we'll try to map. * do_fault_around() expects it to be set to a power of two less than or equal @@ -4463,27 +4463,19 @@ late_initcall(fault_around_debugfs); */ static vm_fault_t do_fault_around(struct vm_fault *vmf) { - unsigned long address = vmf->address, nr_pages, mask; - pgoff_t start_pgoff = vmf->pgoff; - pgoff_t end_pgoff; - int off; + pgoff_t nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT; + pgoff_t pte_off = pte_index(vmf->address); + /* The page offset of vmf->address within the VMA. */ + pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff; + pgoff_t from_pte, to_pte; - nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT; - mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK; + /* The PTE offset of the start address, clamped to the VMA. */ + from_pte = max(ALIGN_DOWN(pte_off, nr_pages), + pte_off - min(pte_off, vma_off)); - address = max(address & mask, vmf->vma->vm_start); - off = ((vmf->address - address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); - start_pgoff -= off; - - /* - * end_pgoff is either the end of the page table, the end of - * the vma or nr_pages from start_pgoff, depending what is nearest. - */ - end_pgoff = start_pgoff - - ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) + - PTRS_PER_PTE - 1; - end_pgoff = min3(end_pgoff, vma_pages(vmf->vma) + vmf->vma->vm_pgoff - 1, - start_pgoff + nr_pages - 1); + /* The PTE offset of the end address, clamped to the VMA and PTE. */ + to_pte = min3(from_pte + nr_pages, (pgoff_t)PTRS_PER_PTE, + pte_off + vma_pages(vmf->vma) - vma_off) - 1; if (pmd_none(*vmf->pmd)) { vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm); @@ -4491,7 +4483,9 @@ static vm_fault_t do_fault_around(struct vm_fault *vmf) return VM_FAULT_OOM; } - return vmf->vma->vm_ops->map_pages(vmf, start_pgoff, end_pgoff); + return vmf->vma->vm_ops->map_pages(vmf, + vmf->pgoff + from_pte - pte_off, + vmf->pgoff + to_pte - pte_off); } /* Return true if we should do read fault-around, false otherwise */ |