summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2022-02-09 23:21:28 +0300
committerMatthew Wilcox (Oracle) <willy@infradead.org>2022-03-15 15:23:25 +0300
commit5ad6b2bdaaea712486145fa5a78ec24d25289071 (patch)
tree5955982c75ab125ace0c4d14190579918eca89b5
parent2e7e80f7e7e9dbbb3c2a85ee923ca32826052816 (diff)
downloadlinux-5ad6b2bdaaea712486145fa5a78ec24d25289071.tar.xz
fs: Turn do_invalidatepage() into folio_invalidate()
Take a folio instead of a page, fix the types of the offset & length, and export it to filesystems. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Tested-by: Damien Le Moal <damien.lemoal@opensource.wdc.com> Acked-by: Damien Le Moal <damien.lemoal@opensource.wdc.com> Tested-by: Mike Marshall <hubcap@omnibond.com> # orangefs Tested-by: David Howells <dhowells@redhat.com> # afs
-rw-r--r--include/linux/mm.h3
-rw-r--r--include/linux/pagemap.h1
-rw-r--r--mm/readahead.c2
-rw-r--r--mm/truncate.c20
4 files changed, 12 insertions, 14 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 213cc569b192..7808a7959066 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1939,9 +1939,6 @@ int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
struct page **pages);
struct page *get_dump_page(unsigned long addr);
-extern void do_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length);
-
bool folio_mark_dirty(struct folio *folio);
bool set_page_dirty(struct page *page);
int set_page_dirty_lock(struct page *page);
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 55a80d8f0e9c..4503d5baa252 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -893,6 +893,7 @@ static inline void cancel_dirty_page(struct page *page)
}
bool folio_clear_dirty_for_io(struct folio *folio);
bool clear_page_dirty_for_io(struct page *page);
+void folio_invalidate(struct folio *folio, size_t offset, size_t length);
int __must_check folio_write_one(struct folio *folio);
static inline int __must_check write_one_page(struct page *page)
{
diff --git a/mm/readahead.c b/mm/readahead.c
index cf0dcf89eb69..c3c4c30fc121 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -51,7 +51,7 @@ static void read_cache_pages_invalidate_page(struct address_space *mapping,
if (!trylock_page(page))
BUG();
page->mapping = mapping;
- do_invalidatepage(page, 0, PAGE_SIZE);
+ folio_invalidate(page_folio(page), 0, PAGE_SIZE);
page->mapping = NULL;
unlock_page(page);
}
diff --git a/mm/truncate.c b/mm/truncate.c
index 9dbf0b75da5d..aa0ed373789d 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -138,33 +138,33 @@ static int invalidate_exceptional_entry2(struct address_space *mapping,
}
/**
- * do_invalidatepage - invalidate part or all of a page
- * @page: the page which is affected
+ * folio_invalidate - Invalidate part or all of a folio.
+ * @folio: The folio which is affected.
* @offset: start of the range to invalidate
* @length: length of the range to invalidate
*
- * do_invalidatepage() is called when all or part of the page has become
+ * folio_invalidate() is called when all or part of the folio has become
* invalidated by a truncate operation.
*
- * do_invalidatepage() does not have to release all buffers, but it must
+ * folio_invalidate() does not have to release all buffers, but it must
* ensure that no dirty buffer is left outside @offset and that no I/O
* is underway against any of the blocks which are outside the truncation
* point. Because the caller is about to free (and possibly reuse) those
* blocks on-disk.
*/
-void do_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length)
+void folio_invalidate(struct folio *folio, size_t offset, size_t length)
{
void (*invalidatepage)(struct page *, unsigned int, unsigned int);
- invalidatepage = page->mapping->a_ops->invalidatepage;
+ invalidatepage = folio->mapping->a_ops->invalidatepage;
#ifdef CONFIG_BLOCK
if (!invalidatepage)
invalidatepage = block_invalidatepage;
#endif
if (invalidatepage)
- (*invalidatepage)(page, offset, length);
+ (*invalidatepage)(&folio->page, offset, length);
}
+EXPORT_SYMBOL_GPL(folio_invalidate);
/*
* If truncate cannot remove the fs-private metadata from the page, the page
@@ -182,7 +182,7 @@ static void truncate_cleanup_folio(struct folio *folio)
unmap_mapping_folio(folio);
if (folio_has_private(folio))
- do_invalidatepage(&folio->page, 0, folio_size(folio));
+ folio_invalidate(folio, 0, folio_size(folio));
/*
* Some filesystems seem to re-dirty the page even after
@@ -264,7 +264,7 @@ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
folio_zero_range(folio, offset, length);
if (folio_has_private(folio))
- do_invalidatepage(&folio->page, offset, length);
+ folio_invalidate(folio, offset, length);
if (!folio_test_large(folio))
return true;
if (split_huge_page(&folio->page) == 0)