summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Hildenbrand <david@redhat.com>2025-09-01 18:03:55 +0300
committerAndrew Morton <akpm@linux-foundation.org>2025-09-22 00:22:09 +0300
commitb5ba761a7f5612759770117657577925fcb2e668 (patch)
tree0bb56560c2df095da0f80d7234e11d754c6b53bb
parentce00897b94bc5c62fab962625efcf1ab824d3688 (diff)
downloadlinux-b5ba761a7f5612759770117657577925fcb2e668.tar.xz
mm/gup: drop nth_page() usage in unpin_user_page_range_dirty_lock()
There is the concern that unpin_user_page_range_dirty_lock() might do some weird merging of PFN ranges -- either now or in the future -- such that PFN range is contiguous but the page range might not be. Let's sanity-check for that and drop the nth_page() usage. Link: https://lkml.kernel.org/r/20250901150359.867252-35-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--mm/gup.c8
1 files changed, 7 insertions, 1 deletions
diff --git a/mm/gup.c b/mm/gup.c
index b8aee964421c..83438bbbf2f6 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -237,7 +237,7 @@ void folio_add_pin(struct folio *folio)
static inline struct folio *gup_folio_range_next(struct page *start,
unsigned long npages, unsigned long i, unsigned int *ntails)
{
- struct page *next = nth_page(start, i);
+ struct page *next = start + i;
struct folio *folio = page_folio(next);
unsigned int nr = 1;
@@ -342,6 +342,10 @@ EXPORT_SYMBOL(unpin_user_pages_dirty_lock);
* "gup-pinned page range" refers to a range of pages that has had one of the
* pin_user_pages() variants called on that page.
*
+ * The page range must be truly physically contiguous: the page range
+ * corresponds to a contiguous PFN range and all pages can be iterated
+ * naturally.
+ *
* For the page ranges defined by [page .. page+npages], make that range (or
* its head pages, if a compound page) dirty, if @make_dirty is true, and if the
* page range was previously listed as clean.
@@ -359,6 +363,8 @@ void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
struct folio *folio;
unsigned int nr;
+ VM_WARN_ON_ONCE(!page_range_contiguous(page, npages));
+
for (i = 0; i < npages; i += nr) {
folio = gup_folio_range_next(page, npages, i, &nr);
if (make_dirty && !folio_test_dirty(folio)) {