diff options
author | Suren Baghdasaryan <surenb@google.com> | 2023-08-04 18:27:23 +0300 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2023-08-21 23:37:46 +0300 |
commit | ad9f006351c3368171458ae7ab14d72f031b239f (patch) | |
tree | 654bdfd191b866fcf7b1ec3335f9e8c69cf6d5ec /mm/mmap.c | |
parent | 60081bf19b0ec8fa40c589bd361fa2bc763f1050 (diff) | |
download | linux-ad9f006351c3368171458ae7ab14d72f031b239f.tar.xz |
mm: always lock new vma before inserting into vma tree
While it's not strictly necessary to lock a newly created vma before
adding it into the vma tree (as long as no further changes are performed
to it), it seems like a good policy to lock it and prevent accidental
changes after it becomes visible to the page faults. Lock the vma before
adding it into the vma tree.
[akpm@linux-foundation.org: fix reject fixing in vma_link(), per Jann]
Link: https://lkml.kernel.org/r/20230804152724.3090321-6-surenb@google.com
Suggested-by: Jann Horn <jannh@google.com>
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Cc: Linus Torvalds <torvalds@linuxfoundation.org>
Cc: Jann Horn <jannh@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/mmap.c')
-rw-r--r-- | mm/mmap.c | 7 |
1 files changed, 5 insertions, 2 deletions
diff --git a/mm/mmap.c b/mm/mmap.c index 35b6bc9c7c95..ef584aca1cd3 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -401,6 +401,8 @@ static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma) if (vma_iter_prealloc(&vmi, vma)) return -ENOMEM; + vma_start_write(vma); + vma_iter_store(&vmi, vma); if (vma->vm_file) { @@ -463,7 +465,8 @@ static inline void vma_prepare(struct vma_prepare *vp) vma_start_write(vp->vma); if (vp->adj_next) vma_start_write(vp->adj_next); - /* vp->insert is always a newly created VMA, no need for locking */ + if (vp->insert) + vma_start_write(vp->insert); if (vp->remove) vma_start_write(vp->remove); if (vp->remove2) @@ -3093,6 +3096,7 @@ static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma, vma->vm_pgoff = addr >> PAGE_SHIFT; vm_flags_init(vma, flags); vma->vm_page_prot = vm_get_page_prot(flags); + vma_start_write(vma); if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL)) goto mas_store_fail; @@ -3341,7 +3345,6 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, get_file(new_vma->vm_file); if (new_vma->vm_ops && new_vma->vm_ops->open) new_vma->vm_ops->open(new_vma); - vma_start_write(new_vma); if (vma_link(mm, new_vma)) goto out_vma_link; *need_rmap_locks = false; |