diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/filemap.c | 99 | ||||
-rw-r--r-- | mm/folio-compat.c | 4 | ||||
-rw-r--r-- | mm/memory.c | 4 | ||||
-rw-r--r-- | mm/migrate.c | 2 | ||||
-rw-r--r-- | mm/page-writeback.c | 10 | ||||
-rw-r--r-- | mm/page_io.c | 2 | ||||
-rw-r--r-- | mm/readahead.c | 37 | ||||
-rw-r--r-- | mm/secretmem.c | 8 | ||||
-rw-r--r-- | mm/shmem.c | 4 | ||||
-rw-r--r-- | mm/swapfile.c | 2 | ||||
-rw-r--r-- | mm/vmscan.c | 12 |
11 files changed, 80 insertions, 104 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 9a1eef6c5d35..fa0ca674450f 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -225,12 +225,12 @@ void __filemap_remove_folio(struct folio *folio, void *shadow) void filemap_free_folio(struct address_space *mapping, struct folio *folio) { - void (*freepage)(struct page *); + void (*free_folio)(struct folio *); int refs = 1; - freepage = mapping->a_ops->freepage; - if (freepage) - freepage(&folio->page); + free_folio = mapping->a_ops->free_folio; + if (free_folio) + free_folio(folio); if (folio_test_large(folio) && !folio_test_hugetlb(folio)) refs = folio_nr_pages(folio); @@ -807,7 +807,7 @@ void replace_page_cache_page(struct page *old, struct page *new) struct folio *fold = page_folio(old); struct folio *fnew = page_folio(new); struct address_space *mapping = old->mapping; - void (*freepage)(struct page *) = mapping->a_ops->freepage; + void (*free_folio)(struct folio *) = mapping->a_ops->free_folio; pgoff_t offset = old->index; XA_STATE(xas, &mapping->i_pages, offset); @@ -835,9 +835,9 @@ void replace_page_cache_page(struct page *old, struct page *new) if (PageSwapBacked(new)) __inc_lruvec_page_state(new, NR_SHMEM); xas_unlock_irq(&xas); - if (freepage) - freepage(old); - put_page(old); + if (free_folio) + free_folio(fold); + folio_put(fold); } EXPORT_SYMBOL_GPL(replace_page_cache_page); @@ -2414,12 +2414,12 @@ static int filemap_read_folio(struct file *file, struct address_space *mapping, /* * A previous I/O error may have been due to temporary failures, - * eg. multipath errors. PG_error will be set again if readpage + * eg. multipath errors. PG_error will be set again if read_folio * fails. */ folio_clear_error(folio); /* Start the actual read. The read will unlock the page. */ - error = mapping->a_ops->readpage(file, &folio->page); + error = mapping->a_ops->read_folio(file, folio); if (error) return error; @@ -2636,7 +2636,7 @@ err: * @already_read: Number of bytes already read by the caller. * * Copies data from the page cache. If the data is not currently present, - * uses the readahead and readpage address_space operations to fetch it. + * uses the readahead and read_folio address_space operations to fetch it. * * Return: Total number of bytes copied, including those already read by * the caller. If an error happens before any bytes are copied, returns @@ -3447,7 +3447,7 @@ int generic_file_mmap(struct file *file, struct vm_area_struct *vma) { struct address_space *mapping = file->f_mapping; - if (!mapping->a_ops->readpage) + if (!mapping->a_ops->read_folio) return -ENOEXEC; file_accessed(file); vma->vm_ops = &generic_file_vm_ops; @@ -3483,10 +3483,13 @@ EXPORT_SYMBOL(generic_file_mmap); EXPORT_SYMBOL(generic_file_readonly_mmap); static struct folio *do_read_cache_folio(struct address_space *mapping, - pgoff_t index, filler_t filler, void *data, gfp_t gfp) + pgoff_t index, filler_t filler, struct file *file, gfp_t gfp) { struct folio *folio; int err; + + if (!filler) + filler = mapping->a_ops->read_folio; repeat: folio = filemap_get_folio(mapping, index); if (!folio) { @@ -3503,11 +3506,7 @@ repeat: } filler: - if (filler) - err = filler(data, &folio->page); - else - err = mapping->a_ops->readpage(data, &folio->page); - + err = filler(file, folio); if (err < 0) { folio_put(folio); return ERR_PTR(err); @@ -3557,44 +3556,44 @@ out: } /** - * read_cache_folio - read into page cache, fill it if needed - * @mapping: the page's address_space - * @index: the page index - * @filler: function to perform the read - * @data: first arg to filler(data, page) function, often left as NULL + * read_cache_folio - Read into page cache, fill it if needed. + * @mapping: The address_space to read from. + * @index: The index to read. + * @filler: Function to perform the read, or NULL to use aops->read_folio(). + * @file: Passed to filler function, may be NULL if not required. * - * Read into the page cache. If a page already exists, and PageUptodate() is - * not set, try to fill the page and wait for it to become unlocked. + * Read one page into the page cache. If it succeeds, the folio returned + * will contain @index, but it may not be the first page of the folio. * - * If the page does not get brought uptodate, return -EIO. - * - * The function expects mapping->invalidate_lock to be already held. + * If the filler function returns an error, it will be returned to the + * caller. * - * Return: up to date page on success, ERR_PTR() on failure. + * Context: May sleep. Expects mapping->invalidate_lock to be held. + * Return: An uptodate folio on success, ERR_PTR() on failure. */ struct folio *read_cache_folio(struct address_space *mapping, pgoff_t index, - filler_t filler, void *data) + filler_t filler, struct file *file) { - return do_read_cache_folio(mapping, index, filler, data, + return do_read_cache_folio(mapping, index, filler, file, mapping_gfp_mask(mapping)); } EXPORT_SYMBOL(read_cache_folio); static struct page *do_read_cache_page(struct address_space *mapping, - pgoff_t index, filler_t *filler, void *data, gfp_t gfp) + pgoff_t index, filler_t *filler, struct file *file, gfp_t gfp) { struct folio *folio; - folio = do_read_cache_folio(mapping, index, filler, data, gfp); + folio = do_read_cache_folio(mapping, index, filler, file, gfp); if (IS_ERR(folio)) return &folio->page; return folio_file_page(folio, index); } struct page *read_cache_page(struct address_space *mapping, - pgoff_t index, filler_t *filler, void *data) + pgoff_t index, filler_t *filler, struct file *file) { - return do_read_cache_page(mapping, index, filler, data, + return do_read_cache_page(mapping, index, filler, file, mapping_gfp_mask(mapping)); } EXPORT_SYMBOL(read_cache_page); @@ -3622,27 +3621,6 @@ struct page *read_cache_page_gfp(struct address_space *mapping, } EXPORT_SYMBOL(read_cache_page_gfp); -int pagecache_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, - struct page **pagep, void **fsdata) -{ - const struct address_space_operations *aops = mapping->a_ops; - - return aops->write_begin(file, mapping, pos, len, flags, - pagep, fsdata); -} -EXPORT_SYMBOL(pagecache_write_begin); - -int pagecache_write_end(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned copied, - struct page *page, void *fsdata) -{ - const struct address_space_operations *aops = mapping->a_ops; - - return aops->write_end(file, mapping, pos, len, copied, page, fsdata); -} -EXPORT_SYMBOL(pagecache_write_end); - /* * Warn about a page cache invalidation failure during a direct I/O write. */ @@ -3754,7 +3732,6 @@ ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i) const struct address_space_operations *a_ops = mapping->a_ops; long status = 0; ssize_t written = 0; - unsigned int flags = 0; do { struct page *page; @@ -3784,7 +3761,7 @@ again: break; } - status = a_ops->write_begin(file, mapping, pos, bytes, flags, + status = a_ops->write_begin(file, mapping, pos, bytes, &page, &fsdata); if (unlikely(status < 0)) break; @@ -3978,8 +3955,8 @@ bool filemap_release_folio(struct folio *folio, gfp_t gfp) if (folio_test_writeback(folio)) return false; - if (mapping && mapping->a_ops->releasepage) - return mapping->a_ops->releasepage(&folio->page, gfp); - return try_to_free_buffers(&folio->page); + if (mapping && mapping->a_ops->release_folio) + return mapping->a_ops->release_folio(folio, gfp); + return try_to_free_buffers(folio); } EXPORT_SYMBOL(filemap_release_folio); diff --git a/mm/folio-compat.c b/mm/folio-compat.c index 46fa179e32fb..20bc15b57d93 100644 --- a/mm/folio-compat.c +++ b/mm/folio-compat.c @@ -131,12 +131,10 @@ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index, EXPORT_SYMBOL(pagecache_get_page); struct page *grab_cache_page_write_begin(struct address_space *mapping, - pgoff_t index, unsigned flags) + pgoff_t index) { unsigned fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE; - if (flags & AOP_FLAG_NOFS) - fgp_flags |= FGP_NOFS; return pagecache_get_page(mapping, index, fgp_flags, mapping_gfp_mask(mapping)); } diff --git a/mm/memory.c b/mm/memory.c index 76e3af9639d9..2a12028a3749 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -555,11 +555,11 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, dump_page(page, "bad pte"); pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n", (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); - pr_alert("file:%pD fault:%ps mmap:%ps readpage:%ps\n", + pr_alert("file:%pD fault:%ps mmap:%ps read_folio:%ps\n", vma->vm_file, vma->vm_ops ? vma->vm_ops->fault : NULL, vma->vm_file ? vma->vm_file->f_op->mmap : NULL, - mapping ? mapping->a_ops->readpage : NULL); + mapping ? mapping->a_ops->read_folio : NULL); dump_stack(); add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); } diff --git a/mm/migrate.c b/mm/migrate.c index 6c31ee1e1c9b..21d82636c291 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1013,7 +1013,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage, if (!page->mapping) { VM_BUG_ON_PAGE(PageAnon(page), page); if (page_has_private(page)) { - try_to_free_buffers(page); + try_to_free_buffers(folio); goto out_unlock_both; } } else if (page_mapped(page)) { diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 7e2da284e427..fa1117db4610 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2602,10 +2602,12 @@ EXPORT_SYMBOL(folio_redirty_for_writepage); * folio_mark_dirty - Mark a folio as being modified. * @folio: The folio. * - * For folios with a mapping this should be done with the folio lock held - * for the benefit of asynchronous memory errors who prefer a consistent - * dirty state. This rule can be broken in some special cases, - * but should be better not to. + * The folio may not be truncated while this function is running. + * Holding the folio lock is sufficient to prevent truncation, but some + * callers cannot acquire a sleeping lock. These callers instead hold + * the page table lock for a page table which contains at least one page + * in this folio. Truncation will block on the page table lock as it + * unmaps pages before removing the folio from its mapping. * * Return: True if the folio was newly dirtied, false if it was already dirty. */ diff --git a/mm/page_io.c b/mm/page_io.c index 3fbdab6a940e..a9444e67ec20 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -336,7 +336,7 @@ int swap_readpage(struct page *page, bool synchronous) struct file *swap_file = sis->swap_file; struct address_space *mapping = swap_file->f_mapping; - ret = mapping->a_ops->readpage(swap_file, page); + ret = mapping->a_ops->read_folio(swap_file, page_folio(page)); if (!ret) count_vm_event(PSWPIN); goto out; diff --git a/mm/readahead.c b/mm/readahead.c index 26bf74a6b2fe..b78921b54754 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -15,7 +15,7 @@ * explicitly requested by the application. Readahead only ever * attempts to read folios that are not yet in the page cache. If a * folio is present but not up-to-date, readahead will not try to read - * it. In that case a simple ->readpage() will be requested. + * it. In that case a simple ->read_folio() will be requested. * * Readahead is triggered when an application read request (whether a * system call or a page fault) finds that the requested folio is not in @@ -78,7 +78,7 @@ * address space operation, for which mpage_readahead() is a canonical * implementation. ->readahead() should normally initiate reads on all * folios, but may fail to read any or all folios without causing an I/O - * error. The page cache reading code will issue a ->readpage() request + * error. The page cache reading code will issue a ->read_folio() request * for any folio which ->readahead() did not read, and only an error * from this will be final. * @@ -110,7 +110,7 @@ * were not fetched with readahead_folio(). This will allow a * subsequent synchronous readahead request to try them again. If they * are left in the page cache, then they will be read individually using - * ->readpage() which may be less efficient. + * ->read_folio() which may be less efficient. */ #include <linux/blkdev.h> @@ -146,7 +146,7 @@ EXPORT_SYMBOL_GPL(file_ra_state_init); static void read_pages(struct readahead_control *rac) { const struct address_space_operations *aops = rac->mapping->a_ops; - struct page *page; + struct folio *folio; struct blk_plug plug; if (!readahead_count(rac)) @@ -157,24 +157,23 @@ static void read_pages(struct readahead_control *rac) if (aops->readahead) { aops->readahead(rac); /* - * Clean up the remaining pages. The sizes in ->ra + * Clean up the remaining folios. The sizes in ->ra * may be used to size the next readahead, so make sure * they accurately reflect what happened. */ - while ((page = readahead_page(rac))) { - rac->ra->size -= 1; - if (rac->ra->async_size > 0) { - rac->ra->async_size -= 1; - delete_from_page_cache(page); + while ((folio = readahead_folio(rac)) != NULL) { + unsigned long nr = folio_nr_pages(folio); + + rac->ra->size -= nr; + if (rac->ra->async_size >= nr) { + rac->ra->async_size -= nr; + filemap_remove_folio(folio); } - unlock_page(page); - put_page(page); + folio_unlock(folio); } } else { - while ((page = readahead_page(rac))) { - aops->readpage(rac->file, page); - put_page(page); - } + while ((folio = readahead_folio(rac)) != NULL) + aops->read_folio(rac->file, folio); } blk_finish_plug(&plug); @@ -255,8 +254,8 @@ void page_cache_ra_unbounded(struct readahead_control *ractl, } /* - * Now start the IO. We ignore I/O errors - if the page is not - * uptodate then the caller will launch readpage again, and + * Now start the IO. We ignore I/O errors - if the folio is not + * uptodate then the caller will launch read_folio again, and * will then handle the error. */ read_pages(ractl); @@ -304,7 +303,7 @@ void force_page_cache_ra(struct readahead_control *ractl, struct backing_dev_info *bdi = inode_to_bdi(mapping->host); unsigned long max_pages, index; - if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readahead)) + if (unlikely(!mapping->a_ops->read_folio && !mapping->a_ops->readahead)) return; /* diff --git a/mm/secretmem.c b/mm/secretmem.c index 3b3cf2892b6a..206ed6b40c1d 100644 --- a/mm/secretmem.c +++ b/mm/secretmem.c @@ -145,15 +145,15 @@ static int secretmem_migratepage(struct address_space *mapping, return -EBUSY; } -static void secretmem_freepage(struct page *page) +static void secretmem_free_folio(struct folio *folio) { - set_direct_map_default_noflush(page); - clear_highpage(page); + set_direct_map_default_noflush(&folio->page); + folio_zero_segment(folio, 0, folio_size(folio)); } const struct address_space_operations secretmem_aops = { .dirty_folio = noop_dirty_folio, - .freepage = secretmem_freepage, + .free_folio = secretmem_free_folio, .migratepage = secretmem_migratepage, .isolate_page = secretmem_isolate_page, }; diff --git a/mm/shmem.c b/mm/shmem.c index 4b2fea33158e..f3e8de8ff75c 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2426,7 +2426,7 @@ static int shmem_initxattrs(struct inode *, const struct xattr *, void *); static int shmem_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, + loff_t pos, unsigned len, struct page **pagep, void **fsdata) { struct inode *inode = mapping->host; @@ -4162,7 +4162,7 @@ int shmem_zero_setup(struct vm_area_struct *vma) * * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)", * with any new page allocations done using the specified allocation flags. - * But read_cache_page_gfp() uses the ->readpage() method: which does not + * But read_cache_page_gfp() uses the ->read_folio() method: which does not * suit tmpfs, since it may have pages in swapcache, and needs to find those * for itself; although drivers/gpu/drm i915 and ttm rely upon this support. * diff --git a/mm/swapfile.c b/mm/swapfile.c index 981a6e85c88e..6aec1b24f440 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -3028,7 +3028,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) /* * Read the swap header. */ - if (!mapping->a_ops->readpage) { + if (!mapping->a_ops->read_folio) { error = -EINVAL; goto bad_swap_unlock_inode; } diff --git a/mm/vmscan.c b/mm/vmscan.c index 1678802e03e7..edc89f26b738 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1181,7 +1181,7 @@ static pageout_t pageout(struct folio *folio, struct address_space *mapping) * folio->mapping == NULL while being dirty with clean buffers. */ if (folio_test_private(folio)) { - if (try_to_free_buffers(&folio->page)) { + if (try_to_free_buffers(folio)) { folio_clear_dirty(folio); pr_info("%s: orphaned folio\n", __func__); return PAGE_CLEAN; @@ -1282,9 +1282,9 @@ static int __remove_mapping(struct address_space *mapping, struct folio *folio, xa_unlock_irq(&mapping->i_pages); put_swap_page(&folio->page, swap); } else { - void (*freepage)(struct page *); + void (*free_folio)(struct folio *); - freepage = mapping->a_ops->freepage; + free_folio = mapping->a_ops->free_folio; /* * Remember a shadow entry for reclaimed file cache in * order to detect refaults, thus thrashing, later on. @@ -1310,8 +1310,8 @@ static int __remove_mapping(struct address_space *mapping, struct folio *folio, inode_add_lru(mapping->host); spin_unlock(&mapping->host->i_lock); - if (freepage != NULL) - freepage(&folio->page); + if (free_folio) + free_folio(folio); } return 1; @@ -1451,7 +1451,7 @@ static void folio_check_dirty_writeback(struct folio *folio, mapping = folio_mapping(folio); if (mapping && mapping->a_ops->is_dirty_writeback) - mapping->a_ops->is_dirty_writeback(&folio->page, dirty, writeback); + mapping->a_ops->is_dirty_writeback(folio, dirty, writeback); } static struct page *alloc_demote_page(struct page *page, unsigned long node) |