summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorMike Kravetz <mike.kravetz@oracle.com>2022-09-15 01:18:06 +0300
committerAndrew Morton <akpm@linux-foundation.org>2022-10-04 00:03:16 +0300
commit12710fd696343a0d6c318bdad22fa7809af7859b (patch)
tree6e4a3a28696dde88c32f55bce7b9eb1f8105581a /mm
parentc86272287bc65cb3d698a95c19651265e9f287cd (diff)
downloadlinux-12710fd696343a0d6c318bdad22fa7809af7859b.tar.xz
hugetlb: rename vma_shareable() and refactor code
Rename the routine vma_shareable to vma_addr_pmd_shareable as it is checking a specific address within the vma. Refactor code to check if an aligned range is shareable as this will be needed in a subsequent patch. Link: https://lkml.kernel.org/r/20220914221810.95771-6-mike.kravetz@oracle.com Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com> Reviewed-by: Miaohe Lin <linmiaohe@huawei.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> Cc: Axel Rasmussen <axelrasmussen@google.com> Cc: David Hildenbrand <david@redhat.com> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: James Houghton <jthoughton@google.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Mina Almasry <almasrymina@google.com> Cc: Muchun Song <songmuchun@bytedance.com> Cc: Naoya Horiguchi <naoya.horiguchi@linux.dev> Cc: Pasha Tatashin <pasha.tatashin@soleen.com> Cc: Peter Xu <peterx@redhat.com> Cc: Prakash Sangappa <prakash.sangappa@oracle.com> Cc: Sven Schnelle <svens@linux.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c19
1 files changed, 13 insertions, 6 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index accb166791c7..482f7f357f75 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -6640,26 +6640,33 @@ static unsigned long page_table_shareable(struct vm_area_struct *svma,
return saddr;
}
-static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
+static bool __vma_aligned_range_pmd_shareable(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
{
- unsigned long base = addr & PUD_MASK;
- unsigned long end = base + PUD_SIZE;
-
/*
* check on proper vm_flags and page table alignment
*/
- if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
+ if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, start, end))
return true;
return false;
}
+static bool vma_addr_pmd_shareable(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ unsigned long start = addr & PUD_MASK;
+ unsigned long end = start + PUD_SIZE;
+
+ return __vma_aligned_range_pmd_shareable(vma, start, end);
+}
+
bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
{
#ifdef CONFIG_USERFAULTFD
if (uffd_disable_huge_pmd_share(vma))
return false;
#endif
- return vma_shareable(vma, addr);
+ return vma_addr_pmd_shareable(vma, addr);
}
/*