summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2022-09-06 22:49:00 +0300
committerAndrew Morton <akpm@linux-foundation.org>2022-09-27 05:46:23 +0300
commit685405020b9f24ec979d41e6c27207be97c000cf (patch)
treeaa980aad7396a4cc047c6f8fd0c43bedc8a3db43
parentc4d1a92d0d3ada8a4073b8af8eff462d689d64c5 (diff)
downloadlinux-685405020b9f24ec979d41e6c27207be97c000cf.tar.xz
mm/khugepaged: stop using vma linked list
Use vma iterator & find_vma() instead of vma linked list. Link: https://lkml.kernel.org/r/20220906194824.2110408-53-Liam.Howlett@oracle.com Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com> Reviewed-by: Davidlohr Bueso <dave@stgolabs.net> Tested-by: Yu Zhao <yuzhao@google.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: David Hildenbrand <david@redhat.com> Cc: David Howells <dhowells@redhat.com> Cc: SeongJae Park <sj@kernel.org> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--mm/huge_memory.c4
-rw-r--r--mm/khugepaged.c9
2 files changed, 8 insertions, 5 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 534d30cff9d7..63b4d8ff4b55 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2341,11 +2341,11 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
split_huge_pmd_if_needed(vma, end);
/*
- * If we're also updating the vma->vm_next->vm_start,
+ * If we're also updating the next vma vm_start,
* check if we need to split it.
*/
if (adjust_next > 0) {
- struct vm_area_struct *next = vma->vm_next;
+ struct vm_area_struct *next = find_vma(vma->vm_mm, vma->vm_end);
unsigned long nstart = next->vm_start;
nstart += adjust_next;
split_huge_pmd_if_needed(next, nstart);
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 9ff3d39b286f..7c13d65aeb14 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -2050,6 +2050,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
__releases(&khugepaged_mm_lock)
__acquires(&khugepaged_mm_lock)
{
+ struct vma_iterator vmi;
struct mm_slot *mm_slot;
struct mm_struct *mm;
struct vm_area_struct *vma;
@@ -2078,11 +2079,13 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
vma = NULL;
if (unlikely(!mmap_read_trylock(mm)))
goto breakouterloop_mmap_lock;
- if (likely(!hpage_collapse_test_exit(mm)))
- vma = find_vma(mm, khugepaged_scan.address);
progress++;
- for (; vma; vma = vma->vm_next) {
+ if (unlikely(hpage_collapse_test_exit(mm)))
+ goto breakouterloop;
+
+ vma_iter_init(&vmi, mm, khugepaged_scan.address);
+ for_each_vma(vmi, vma) {
unsigned long hstart, hend;
cond_resched();