]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm, swap: simplify the code and reduce indention
authorKairui Song <kasong@tencent.com>
Fri, 19 Dec 2025 19:43:34 +0000 (03:43 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Sat, 31 Jan 2026 22:22:54 +0000 (14:22 -0800)
Now swap cache is always used, multiple swap cache checks are no longer
useful, remove them and reduce the code indention.

No behavior change.

Link: https://lkml.kernel.org/r/20251220-swap-table-p2-v5-5-8862a265a033@tencent.com
Signed-off-by: Kairui Song <kasong@tencent.com>
Reviewed-by: Baoquan He <bhe@redhat.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Chris Li <chrisl@kernel.org>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Rafael J. Wysocki (Intel) <rafael@kernel.org>
Cc: Yosry Ahmed <yosry.ahmed@linux.dev>
Cc: Deepanshu Kartikey <kartikey406@gmail.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kairui Song <ryncsn@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memory.c

index 6cbee2838ef7dbad7c923205d434b168bf279cfa..8e38d3d934339b30e1242535c39ed3830add74cc 100644 (file)
@@ -4767,55 +4767,52 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
                goto out_release;
 
        page = folio_file_page(folio, swp_offset(entry));
-       if (swapcache) {
-               /*
-                * Make sure folio_free_swap() or swapoff did not release the
-                * swapcache from under us.  The page pin, and pte_same test
-                * below, are not enough to exclude that.  Even if it is still
-                * swapcache, we need to check that the page's swap has not
-                * changed.
-                */
-               if (unlikely(!folio_matches_swap_entry(folio, entry)))
-                       goto out_page;
-
-               if (unlikely(PageHWPoison(page))) {
-                       /*
-                        * hwpoisoned dirty swapcache pages are kept for killing
-                        * owner processes (which may be unknown at hwpoison time)
-                        */
-                       ret = VM_FAULT_HWPOISON;
-                       goto out_page;
-               }
-
-               /*
-                * KSM sometimes has to copy on read faults, for example, if
-                * folio->index of non-ksm folios would be nonlinear inside the
-                * anon VMA -- the ksm flag is lost on actual swapout.
-                */
-               folio = ksm_might_need_to_copy(folio, vma, vmf->address);
-               if (unlikely(!folio)) {
-                       ret = VM_FAULT_OOM;
-                       folio = swapcache;
-                       goto out_page;
-               } else if (unlikely(folio == ERR_PTR(-EHWPOISON))) {
-                       ret = VM_FAULT_HWPOISON;
-                       folio = swapcache;
-                       goto out_page;
-               }
-               if (folio != swapcache)
-                       page = folio_page(folio, 0);
+       /*
+        * Make sure folio_free_swap() or swapoff did not release the
+        * swapcache from under us.  The page pin, and pte_same test
+        * below, are not enough to exclude that.  Even if it is still
+        * swapcache, we need to check that the page's swap has not
+        * changed.
+        */
+       if (unlikely(!folio_matches_swap_entry(folio, entry)))
+               goto out_page;
 
+       if (unlikely(PageHWPoison(page))) {
                /*
-                * If we want to map a page that's in the swapcache writable, we
-                * have to detect via the refcount if we're really the exclusive
-                * owner. Try removing the extra reference from the local LRU
-                * caches if required.
+                * hwpoisoned dirty swapcache pages are kept for killing
+                * owner processes (which may be unknown at hwpoison time)
                 */
-               if ((vmf->flags & FAULT_FLAG_WRITE) && folio == swapcache &&
-                   !folio_test_ksm(folio) && !folio_test_lru(folio))
-                       lru_add_drain();
+               ret = VM_FAULT_HWPOISON;
+               goto out_page;
        }
 
+       /*
+        * KSM sometimes has to copy on read faults, for example, if
+        * folio->index of non-ksm folios would be nonlinear inside the
+        * anon VMA -- the ksm flag is lost on actual swapout.
+        */
+       folio = ksm_might_need_to_copy(folio, vma, vmf->address);
+       if (unlikely(!folio)) {
+               ret = VM_FAULT_OOM;
+               folio = swapcache;
+               goto out_page;
+       } else if (unlikely(folio == ERR_PTR(-EHWPOISON))) {
+               ret = VM_FAULT_HWPOISON;
+               folio = swapcache;
+               goto out_page;
+       } else if (folio != swapcache)
+               page = folio_page(folio, 0);
+
+       /*
+        * If we want to map a page that's in the swapcache writable, we
+        * have to detect via the refcount if we're really the exclusive
+        * owner. Try removing the extra reference from the local LRU
+        * caches if required.
+        */
+       if ((vmf->flags & FAULT_FLAG_WRITE) &&
+           !folio_test_ksm(folio) && !folio_test_lru(folio))
+               lru_add_drain();
+
        folio_throttle_swaprate(folio, GFP_KERNEL);
 
        /*
@@ -5005,7 +5002,7 @@ check_folio:
                        pte, pte, nr_pages);
 
        folio_unlock(folio);
-       if (folio != swapcache && swapcache) {
+       if (unlikely(folio != swapcache)) {
                /*
                 * Hold the lock to avoid the swap entry to be reused
                 * until we take the PT lock for the pte_same() check
@@ -5043,7 +5040,7 @@ out_page:
        folio_unlock(folio);
 out_release:
        folio_put(folio);
-       if (folio != swapcache && swapcache) {
+       if (folio != swapcache) {
                folio_unlock(swapcache);
                folio_put(swapcache);
        }