summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKairui Song <kasong@tencent.com>2026-02-01 20:47:32 +0300
committerAndrew Morton <akpm@linux-foundation.org>2026-04-05 23:53:02 +0300
commitae1a645def136e23b81330763edb76c554ce6e23 (patch)
treee30a2d92ca350d42091f5a69a468c458df175c10
parent1c7b8d8a51cc1022bcf6604adf3f1963f8162f3f (diff)
downloadlinux-ae1a645def136e23b81330763edb76c554ce6e23.tar.xz
mm/zswap: remove SWP_SYNCHRONOUS_IO swapcache bypass workaround
Since commit f1879e8a0c60 ("mm, swap: never bypass the swap cache even for SWP_SYNCHRONOUS_IO"), all swap-in operations go through the swap cache, including those from SWP_SYNCHRONOUS_IO devices like zram. Which means the workaround for swap cache bypassing introduced by commit 25cd241408a2 ("mm: zswap: fix data loss on SWP_SYNCHRONOUS_IO devices") is no longer needed. Remove it, but keep the comments that are still helpful. Link: https://lkml.kernel.org/r/20260202-zswap-syncio-cleanup-v1-1-86bb24a64521@tencent.com Signed-off-by: Kairui Song <kasong@tencent.com> Suggested-by: Yosry Ahmed <yosry.ahmed@linux.dev> Reviewed-by: Barry Song <baohua@kernel.org> Acked-by: Chris Li <chrisl@kernel.org> Acked-by: Yosry Ahmed <yosry.ahmed@linux.dev> Acked-by: Nhat Pham <nphamcs@gmail.com> Reviewed-by: Chengming Zhou <chengming.zhou@linux.dev> Cc: Baoquan He <bhe@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Kairui Song <kasong@tencent.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--mm/zswap.c19
1 files changed, 6 insertions, 13 deletions
diff --git a/mm/zswap.c b/mm/zswap.c
index 16b2ef7223e1..0823cadd02b6 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -1595,11 +1595,11 @@ int zswap_load(struct folio *folio)
{
swp_entry_t swp = folio->swap;
pgoff_t offset = swp_offset(swp);
- bool swapcache = folio_test_swapcache(folio);
struct xarray *tree = swap_zswap_tree(swp);
struct zswap_entry *entry;
VM_WARN_ON_ONCE(!folio_test_locked(folio));
+ VM_WARN_ON_ONCE(!folio_test_swapcache(folio));
if (zswap_never_enabled())
return -ENOENT;
@@ -1630,22 +1630,15 @@ int zswap_load(struct folio *folio)
count_objcg_events(entry->objcg, ZSWPIN, 1);
/*
- * When reading into the swapcache, invalidate our entry. The
- * swapcache can be the authoritative owner of the page and
+ * We are reading into the swapcache, invalidate zswap entry.
+ * The swapcache is the authoritative owner of the page and
* its mappings, and the pressure that results from having two
* in-memory copies outweighs any benefits of caching the
* compression work.
- *
- * (Most swapins go through the swapcache. The notable
- * exception is the singleton fault on SWP_SYNCHRONOUS_IO
- * files, which reads into a private page and may free it if
- * the fault fails. We remain the primary owner of the entry.)
*/
- if (swapcache) {
- folio_mark_dirty(folio);
- xa_erase(tree, offset);
- zswap_entry_free(entry);
- }
+ folio_mark_dirty(folio);
+ xa_erase(tree, offset);
+ zswap_entry_free(entry);
folio_unlock(folio);
return 0;