summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorSuren Baghdasaryan <surenb@google.com>2023-02-27 20:36:14 +0300
committerAndrew Morton <akpm@linux-foundation.org>2023-04-06 06:02:58 +0300
commit55fd6fccad3172c0feaaa817f0a1283629ff183e (patch)
tree55be1a6fc8ff3a939bb3ff2692977234c06c9484 /include
parentccf1d78d8b86e28502fa1b575a459a402177def4 (diff)
downloadlinux-55fd6fccad3172c0feaaa817f0a1283629ff183e.tar.xz
mm/khugepaged: write-lock VMA while collapsing a huge page
Protect VMA from concurrent page fault handler while collapsing a huge page. Page fault handler needs a stable PMD to use PTL and relies on per-VMA lock to prevent concurrent PMD changes. pmdp_collapse_flush(), set_huge_pmd() and collapse_and_free_pmd() can modify a PMD, which will not be detected by a page fault handler without proper locking. Before this patch, page tables can be walked under any one of the mmap_lock, the mapping lock, and the anon_vma lock; so when khugepaged unlinks and frees page tables, it must ensure that all of those either are locked or don't exist. This patch adds a fourth lock under which page tables can be traversed, and so khugepaged must also lock out that one. [surenb@google.com: vm_lock/i_mmap_rwsem inversion in retract_page_tables] Link: https://lkml.kernel.org/r/20230303213250.3555716-1-surenb@google.com [surenb@google.com: build fix] Link: https://lkml.kernel.org/r/CAJuCfpFjWhtzRE1X=J+_JjgJzNKhq-=JT8yTBSTHthwp0pqWZw@mail.gmail.com Link: https://lkml.kernel.org/r/20230227173632.3292573-16-surenb@google.com Signed-off-by: Suren Baghdasaryan <surenb@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/mm.h41
1 files changed, 30 insertions, 11 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index d0a6c99aba09..d6a2abc51e3d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -665,18 +665,23 @@ static inline void vma_end_read(struct vm_area_struct *vma)
rcu_read_unlock();
}
-static inline void vma_start_write(struct vm_area_struct *vma)
+static bool __is_vma_write_locked(struct vm_area_struct *vma, int *mm_lock_seq)
{
- int mm_lock_seq;
-
mmap_assert_write_locked(vma->vm_mm);
/*
* current task is holding mmap_write_lock, both vma->vm_lock_seq and
* mm->mm_lock_seq can't be concurrently modified.
*/
- mm_lock_seq = READ_ONCE(vma->vm_mm->mm_lock_seq);
- if (vma->vm_lock_seq == mm_lock_seq)
+ *mm_lock_seq = READ_ONCE(vma->vm_mm->mm_lock_seq);
+ return (vma->vm_lock_seq == *mm_lock_seq);
+}
+
+static inline void vma_start_write(struct vm_area_struct *vma)
+{
+ int mm_lock_seq;
+
+ if (__is_vma_write_locked(vma, &mm_lock_seq))
return;
down_write(&vma->lock);
@@ -684,14 +689,26 @@ static inline void vma_start_write(struct vm_area_struct *vma)
up_write(&vma->lock);
}
+static inline bool vma_try_start_write(struct vm_area_struct *vma)
+{
+ int mm_lock_seq;
+
+ if (__is_vma_write_locked(vma, &mm_lock_seq))
+ return true;
+
+ if (!down_write_trylock(&vma->vm_lock->lock))
+ return false;
+
+ vma->vm_lock_seq = mm_lock_seq;
+ up_write(&vma->vm_lock->lock);
+ return true;
+}
+
static inline void vma_assert_write_locked(struct vm_area_struct *vma)
{
- mmap_assert_write_locked(vma->vm_mm);
- /*
- * current task is holding mmap_write_lock, both vma->vm_lock_seq and
- * mm->mm_lock_seq can't be concurrently modified.
- */
- VM_BUG_ON_VMA(vma->vm_lock_seq != READ_ONCE(vma->vm_mm->mm_lock_seq), vma);
+ int mm_lock_seq;
+
+ VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma);
}
#else /* CONFIG_PER_VMA_LOCK */
@@ -701,6 +718,8 @@ static inline bool vma_start_read(struct vm_area_struct *vma)
{ return false; }
static inline void vma_end_read(struct vm_area_struct *vma) {}
static inline void vma_start_write(struct vm_area_struct *vma) {}
+static inline bool vma_try_start_write(struct vm_area_struct *vma)
+ { return true; }
static inline void vma_assert_write_locked(struct vm_area_struct *vma) {}
#endif /* CONFIG_PER_VMA_LOCK */