]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm/zswap: remove SWP_SYNCHRONOUS_IO swapcache bypass workaround
authorKairui Song <kasong@tencent.com>
Sun, 1 Feb 2026 17:47:32 +0000 (01:47 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Sun, 5 Apr 2026 20:53:02 +0000 (13:53 -0700)
Since commit f1879e8a0c60 ("mm, swap: never bypass the swap cache even for
SWP_SYNCHRONOUS_IO"), all swap-in operations go through the swap cache,
including those from SWP_SYNCHRONOUS_IO devices like zram.  Which means
the workaround for swap cache bypassing introduced by commit 25cd241408a2
("mm: zswap: fix data loss on SWP_SYNCHRONOUS_IO devices") is no longer
needed.  Remove it, but keep the comments that are still helpful.

Link: https://lkml.kernel.org/r/20260202-zswap-syncio-cleanup-v1-1-86bb24a64521@tencent.com
Signed-off-by: Kairui Song <kasong@tencent.com>
Suggested-by: Yosry Ahmed <yosry.ahmed@linux.dev>
Reviewed-by: Barry Song <baohua@kernel.org>
Acked-by: Chris Li <chrisl@kernel.org>
Acked-by: Yosry Ahmed <yosry.ahmed@linux.dev>
Acked-by: Nhat Pham <nphamcs@gmail.com>
Reviewed-by: Chengming Zhou <chengming.zhou@linux.dev>
Cc: Baoquan He <bhe@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kairui Song <kasong@tencent.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/zswap.c

index 16b2ef7223e12a580cde128d1c66bb673ea5248e..0823cadd02b6311c41b140c9c634cd2de23ddca2 100644 (file)
@@ -1595,11 +1595,11 @@ int zswap_load(struct folio *folio)
 {
        swp_entry_t swp = folio->swap;
        pgoff_t offset = swp_offset(swp);
-       bool swapcache = folio_test_swapcache(folio);
        struct xarray *tree = swap_zswap_tree(swp);
        struct zswap_entry *entry;
 
        VM_WARN_ON_ONCE(!folio_test_locked(folio));
+       VM_WARN_ON_ONCE(!folio_test_swapcache(folio));
 
        if (zswap_never_enabled())
                return -ENOENT;
@@ -1630,22 +1630,15 @@ int zswap_load(struct folio *folio)
                count_objcg_events(entry->objcg, ZSWPIN, 1);
 
        /*
-        * When reading into the swapcache, invalidate our entry. The
-        * swapcache can be the authoritative owner of the page and
+        * We are reading into the swapcache, invalidate zswap entry.
+        * The swapcache is the authoritative owner of the page and
         * its mappings, and the pressure that results from having two
         * in-memory copies outweighs any benefits of caching the
         * compression work.
-        *
-        * (Most swapins go through the swapcache. The notable
-        * exception is the singleton fault on SWP_SYNCHRONOUS_IO
-        * files, which reads into a private page and may free it if
-        * the fault fails. We remain the primary owner of the entry.)
         */
-       if (swapcache) {
-               folio_mark_dirty(folio);
-               xa_erase(tree, offset);
-               zswap_entry_free(entry);
-       }
+       folio_mark_dirty(folio);
+       xa_erase(tree, offset);
+       zswap_entry_free(entry);
 
        folio_unlock(folio);
        return 0;