summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--mm/gup.c104
-rw-r--r--mm/huge_memory.c86
-rw-r--r--mm/internal.h5
3 files changed, 102 insertions, 93 deletions
diff --git a/mm/gup.c b/mm/gup.c
index 2b06d59f2fa3..f7028c698c26 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -580,6 +580,90 @@ static struct page *follow_huge_pud(struct vm_area_struct *vma,
return page;
}
+
+/* FOLL_FORCE can write to even unwritable PMDs in COW mappings. */
+static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page,
+ struct vm_area_struct *vma,
+ unsigned int flags)
+{
+ /* If the pmd is writable, we can write to the page. */
+ if (pmd_write(pmd))
+ return true;
+
+ /* Maybe FOLL_FORCE is set to override it? */
+ if (!(flags & FOLL_FORCE))
+ return false;
+
+ /* But FOLL_FORCE has no effect on shared mappings */
+ if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
+ return false;
+
+ /* ... or read-only private ones */
+ if (!(vma->vm_flags & VM_MAYWRITE))
+ return false;
+
+ /* ... or already writable ones that just need to take a write fault */
+ if (vma->vm_flags & VM_WRITE)
+ return false;
+
+ /*
+ * See can_change_pte_writable(): we broke COW and could map the page
+ * writable if we have an exclusive anonymous page ...
+ */
+ if (!page || !PageAnon(page) || !PageAnonExclusive(page))
+ return false;
+
+ /* ... and a write-fault isn't required for other reasons. */
+ if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd))
+ return false;
+ return !userfaultfd_huge_pmd_wp(vma, pmd);
+}
+
+static struct page *follow_huge_pmd(struct vm_area_struct *vma,
+ unsigned long addr, pmd_t *pmd,
+ unsigned int flags,
+ struct follow_page_context *ctx)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ pmd_t pmdval = *pmd;
+ struct page *page;
+ int ret;
+
+ assert_spin_locked(pmd_lockptr(mm, pmd));
+
+ page = pmd_page(pmdval);
+ if ((flags & FOLL_WRITE) &&
+ !can_follow_write_pmd(pmdval, page, vma, flags))
+ return NULL;
+
+ /* Avoid dumping huge zero page */
+ if ((flags & FOLL_DUMP) && is_huge_zero_pmd(pmdval))
+ return ERR_PTR(-EFAULT);
+
+ if (pmd_protnone(*pmd) && !gup_can_follow_protnone(vma, flags))
+ return NULL;
+
+ if (!pmd_write(pmdval) && gup_must_unshare(vma, flags, page))
+ return ERR_PTR(-EMLINK);
+
+ VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
+ !PageAnonExclusive(page), page);
+
+ ret = try_grab_page(page, flags);
+ if (ret)
+ return ERR_PTR(ret);
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ if (pmd_trans_huge(pmdval) && (flags & FOLL_TOUCH))
+ touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+ page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
+ ctx->page_mask = HPAGE_PMD_NR - 1;
+
+ return page;
+}
+
#else /* CONFIG_PGTABLE_HAS_HUGE_LEAVES */
static struct page *follow_huge_pud(struct vm_area_struct *vma,
unsigned long addr, pud_t *pudp,
@@ -587,6 +671,14 @@ static struct page *follow_huge_pud(struct vm_area_struct *vma,
{
return NULL;
}
+
+static struct page *follow_huge_pmd(struct vm_area_struct *vma,
+ unsigned long addr, pmd_t *pmd,
+ unsigned int flags,
+ struct follow_page_context *ctx)
+{
+ return NULL;
+}
#endif /* CONFIG_PGTABLE_HAS_HUGE_LEAVES */
static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
@@ -784,31 +876,31 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
return page;
return no_page_table(vma, flags, address);
}
- if (likely(!pmd_trans_huge(pmdval)))
+ if (likely(!pmd_leaf(pmdval)))
return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
if (pmd_protnone(pmdval) && !gup_can_follow_protnone(vma, flags))
return no_page_table(vma, flags, address);
ptl = pmd_lock(mm, pmd);
- if (unlikely(!pmd_present(*pmd))) {
+ pmdval = *pmd;
+ if (unlikely(!pmd_present(pmdval))) {
spin_unlock(ptl);
return no_page_table(vma, flags, address);
}
- if (unlikely(!pmd_trans_huge(*pmd))) {
+ if (unlikely(!pmd_leaf(pmdval))) {
spin_unlock(ptl);
return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
}
- if (flags & FOLL_SPLIT_PMD) {
+ if (pmd_trans_huge(pmdval) && (flags & FOLL_SPLIT_PMD)) {
spin_unlock(ptl);
split_huge_pmd(vma, pmd, address);
/* If pmd was left empty, stuff a page table in there quickly */
return pte_alloc(mm, pmd) ? ERR_PTR(-ENOMEM) :
follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
}
- page = follow_trans_huge_pmd(vma, address, pmd, flags);
+ page = follow_huge_pmd(vma, address, pmd, flags, ctx);
spin_unlock(ptl);
- ctx->page_mask = HPAGE_PMD_NR - 1;
return page;
}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 249318f367e4..157cee64850c 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1221,8 +1221,8 @@ vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write)
EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
-static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
- pmd_t *pmd, bool write)
+void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
+ pmd_t *pmd, bool write)
{
pmd_t _pmd;
@@ -1577,88 +1577,6 @@ static inline bool can_change_pmd_writable(struct vm_area_struct *vma,
return pmd_dirty(pmd);
}
-/* FOLL_FORCE can write to even unwritable PMDs in COW mappings. */
-static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page,
- struct vm_area_struct *vma,
- unsigned int flags)
-{
- /* If the pmd is writable, we can write to the page. */
- if (pmd_write(pmd))
- return true;
-
- /* Maybe FOLL_FORCE is set to override it? */
- if (!(flags & FOLL_FORCE))
- return false;
-
- /* But FOLL_FORCE has no effect on shared mappings */
- if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
- return false;
-
- /* ... or read-only private ones */
- if (!(vma->vm_flags & VM_MAYWRITE))
- return false;
-
- /* ... or already writable ones that just need to take a write fault */
- if (vma->vm_flags & VM_WRITE)
- return false;
-
- /*
- * See can_change_pte_writable(): we broke COW and could map the page
- * writable if we have an exclusive anonymous page ...
- */
- if (!page || !PageAnon(page) || !PageAnonExclusive(page))
- return false;
-
- /* ... and a write-fault isn't required for other reasons. */
- if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd))
- return false;
- return !userfaultfd_huge_pmd_wp(vma, pmd);
-}
-
-struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
- unsigned long addr,
- pmd_t *pmd,
- unsigned int flags)
-{
- struct mm_struct *mm = vma->vm_mm;
- struct page *page;
- int ret;
-
- assert_spin_locked(pmd_lockptr(mm, pmd));
-
- page = pmd_page(*pmd);
- VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
-
- if ((flags & FOLL_WRITE) &&
- !can_follow_write_pmd(*pmd, page, vma, flags))
- return NULL;
-
- /* Avoid dumping huge zero page */
- if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
- return ERR_PTR(-EFAULT);
-
- if (pmd_protnone(*pmd) && !gup_can_follow_protnone(vma, flags))
- return NULL;
-
- if (!pmd_write(*pmd) && gup_must_unshare(vma, flags, page))
- return ERR_PTR(-EMLINK);
-
- VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
- !PageAnonExclusive(page), page);
-
- ret = try_grab_page(page, flags);
- if (ret)
- return ERR_PTR(ret);
-
- if (flags & FOLL_TOUCH)
- touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
-
- page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
- VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);
-
- return page;
-}
-
/* NUMA hinting page fault entry point for trans huge pmds */
vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
{
diff --git a/mm/internal.h b/mm/internal.h
index d06072f06a8d..5933a8e7d2dd 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1114,9 +1114,8 @@ int __must_check try_grab_page(struct page *page, unsigned int flags);
*/
void touch_pud(struct vm_area_struct *vma, unsigned long addr,
pud_t *pud, bool write);
-struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
- unsigned long addr, pmd_t *pmd,
- unsigned int flags);
+void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
+ pmd_t *pmd, bool write);
/*
* mm/mmap.c