summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2024-10-05 23:01:14 +0300
committerAndrew Morton <akpm@linux-foundation.org>2024-11-08 01:38:07 +0300
commit713da0b33b3e9d16272b57f4c44dee5c052be9b7 (patch)
treef14ecf143015a993a9bdecad7d5e434d962b9c84
parent7d3e93eca3ca28bb5927b09b9b603c0c995bcd24 (diff)
downloadlinux-713da0b33b3e9d16272b57f4c44dee5c052be9b7.tar.xz
mm: renovate page_address_in_vma()
This function doesn't modify any of its arguments, so if we make a few other functions take const pointers, we can make page_address_in_vma() take const pointers too. All of its callers have the containing folio already, so pass that in as an argument instead of recalculating it. Also add kernel-doc Link: https://lkml.kernel.org/r/20241005200121.3231142-4-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--include/linux/rmap.h7
-rw-r--r--mm/internal.h4
-rw-r--r--mm/ksm.c7
-rw-r--r--mm/memory-failure.c2
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/rmap.c27
-rw-r--r--mm/util.c2
7 files changed, 30 insertions, 21 deletions
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index d5e93e44322e..78923015a2e8 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -728,11 +728,8 @@ page_vma_mapped_walk_restart(struct page_vma_mapped_walk *pvmw)
}
bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw);
-
-/*
- * Used by swapoff to help locate where page is expected in vma.
- */
-unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
+unsigned long page_address_in_vma(const struct folio *folio,
+ const struct page *, const struct vm_area_struct *);
/*
* Cleans the PTEs of shared mappings.
diff --git a/mm/internal.h b/mm/internal.h
index cd96848be245..8674f677304a 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -841,7 +841,7 @@ static inline bool free_area_empty(struct free_area *area, int migratetype)
}
/* mm/util.c */
-struct anon_vma *folio_anon_vma(struct folio *folio);
+struct anon_vma *folio_anon_vma(const struct folio *folio);
#ifdef CONFIG_MMU
void unmap_mapping_folio(struct folio *folio);
@@ -959,7 +959,7 @@ extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
* If any page in this range is mapped by this VMA, return the first address
* where any of these pages appear. Otherwise, return -EFAULT.
*/
-static inline unsigned long vma_address(struct vm_area_struct *vma,
+static inline unsigned long vma_address(const struct vm_area_struct *vma,
pgoff_t pgoff, unsigned long nr_pages)
{
unsigned long address;
diff --git a/mm/ksm.c b/mm/ksm.c
index e596bc1b5fa7..b813225a806d 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1256,7 +1256,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct folio *folio,
if (WARN_ON_ONCE(folio_test_large(folio)))
return err;
- pvmw.address = page_address_in_vma(&folio->page, vma);
+ pvmw.address = page_address_in_vma(folio, folio_page(folio, 0), vma);
if (pvmw.address == -EFAULT)
goto out;
@@ -1340,7 +1340,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
{
struct folio *kfolio = page_folio(kpage);
struct mm_struct *mm = vma->vm_mm;
- struct folio *folio;
+ struct folio *folio = page_folio(page);
pmd_t *pmd;
pmd_t pmde;
pte_t *ptep;
@@ -1350,7 +1350,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
int err = -EFAULT;
struct mmu_notifier_range range;
- addr = page_address_in_vma(page, vma);
+ addr = page_address_in_vma(folio, page, vma);
if (addr == -EFAULT)
goto out;
@@ -1416,7 +1416,6 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
ptep_clear_flush(vma, addr, ptep);
set_pte_at(mm, addr, ptep, newpte);
- folio = page_folio(page);
folio_remove_rmap_pte(folio, page, vma);
if (!folio_mapped(folio))
folio_free_swap(folio);
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 58a3d80961a4..ea9d883c01c1 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -671,7 +671,7 @@ static void collect_procs_file(struct folio *folio, struct page *page,
*/
if (vma->vm_mm != t->mm)
continue;
- addr = page_address_in_vma(page, vma);
+ addr = page_address_in_vma(folio, page, vma);
add_to_kill_anon_file(t, page, vma, to_kill, addr);
}
}
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index a29eff5d0585..bb37cd1a51d8 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1367,7 +1367,7 @@ static long do_mbind(unsigned long start, unsigned long len,
if (!list_entry_is_head(folio, &pagelist, lru)) {
vma_iter_init(&vmi, mm, start);
for_each_vma_range(vmi, vma, end) {
- addr = page_address_in_vma(
+ addr = page_address_in_vma(folio,
folio_page(folio, 0), vma);
if (addr != -EFAULT)
break;
diff --git a/mm/rmap.c b/mm/rmap.c
index e5ec8304a193..d4e5fe94fa92 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -767,14 +767,27 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
}
#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
-/*
- * At what user virtual address is page expected in vma?
- * Caller should check the page is actually part of the vma.
+/**
+ * page_address_in_vma - The virtual address of a page in this VMA.
+ * @folio: The folio containing the page.
+ * @page: The page within the folio.
+ * @vma: The VMA we need to know the address in.
+ *
+ * Calculates the user virtual address of this page in the specified VMA.
+ * It is the caller's responsibililty to check the page is actually
+ * within the VMA. There may not currently be a PTE pointing at this
+ * page, but if a page fault occurs at this address, this is the page
+ * which will be accessed.
+ *
+ * Context: Caller should hold a reference to the folio. Caller should
+ * hold a lock (eg the i_mmap_lock or the mmap_lock) which keeps the
+ * VMA from being altered.
+ *
+ * Return: The virtual address corresponding to this page in the VMA.
*/
-unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
+unsigned long page_address_in_vma(const struct folio *folio,
+ const struct page *page, const struct vm_area_struct *vma)
{
- struct folio *folio = page_folio(page);
-
if (folio_test_anon(folio)) {
struct anon_vma *page__anon_vma = folio_anon_vma(folio);
/*
@@ -790,7 +803,7 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
return -EFAULT;
}
- /* The !page__anon_vma above handles KSM folios */
+ /* KSM folios don't reach here because of the !page__anon_vma check */
return vma_address(vma, page_pgoff(folio, page), 1);
}
diff --git a/mm/util.c b/mm/util.c
index 4f1275023eb7..60017d2a9e48 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -820,7 +820,7 @@ void *vcalloc_noprof(size_t n, size_t size)
}
EXPORT_SYMBOL(vcalloc_noprof);
-struct anon_vma *folio_anon_vma(struct folio *folio)
+struct anon_vma *folio_anon_vma(const struct folio *folio)
{
unsigned long mapping = (unsigned long)folio->mapping;