]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm: replace remaining pte_to_swp_entry() with softleaf_from_pte()
authorLorenzo Stoakes <lorenzo.stoakes@oracle.com>
Mon, 10 Nov 2025 22:21:34 +0000 (22:21 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 24 Nov 2025 23:08:52 +0000 (15:08 -0800)
There are straggler invocations of pte_to_swp_entry() lying around,
replace all of these with the software leaf entry equivalent -
softleaf_from_pte().

With those removed, eliminate pte_to_swp_entry() altogether.

No functional change intended.

Link: https://lkml.kernel.org/r/d8ee5ccefe4c42d7c4fe1a2e46f285ac40421cd3.1762812360.git.lorenzo.stoakes@oracle.com
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Byungchul Park <byungchul@sk.com>
Cc: Chengming Zhou <chengming.zhou@linux.dev>
Cc: Chris Li <chrisl@kernel.org>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Claudio Imbrenda <imbrenda@linux.ibm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Gregory Price <gourry@gourry.net>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: "Huang, Ying" <ying.huang@linux.alibaba.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jann Horn <jannh@google.com>
Cc: Janosch Frank <frankja@linux.ibm.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
Cc: Kairui Song <kasong@tencent.com>
Cc: Kemeng Shi <shikemeng@huaweicloud.com>
Cc: Lance Yang <lance.yang@linux.dev>
Cc: Leon Romanovsky <leon@kernel.org>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Mathew Brost <matthew.brost@intel.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Naoya Horiguchi <nao.horiguchi@gmail.com>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Nico Pache <npache@redhat.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Rakie Kim <rakie.kim@sk.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: SeongJae Park <sj@kernel.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Wei Xu <weixugc@google.com>
Cc: xu xin <xu.xin16@zte.com.cn>
Cc: Yuanchu Xie <yuanchu@google.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/leafops.h
include/linux/swapops.h
mm/debug_vm_pgtable.c
mm/internal.h
mm/memory-failure.c
mm/memory.c
mm/migrate.c
mm/mincore.c
mm/rmap.c
mm/swapfile.c

index d282fab866a1dd1d9eb124963a0a1c1ee44a11b0..cfafe7a5e7b13e9aff0f33e886f691c265862519 100644 (file)
@@ -54,11 +54,16 @@ static inline softleaf_t softleaf_mk_none(void)
  */
 static inline softleaf_t softleaf_from_pte(pte_t pte)
 {
+       softleaf_t arch_entry;
+
        if (pte_present(pte) || pte_none(pte))
                return softleaf_mk_none();
 
+       pte = pte_swp_clear_flags(pte);
+       arch_entry = __pte_to_swp_entry(pte);
+
        /* Temporary until swp_entry_t eliminated. */
-       return pte_to_swp_entry(pte);
+       return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
 }
 
 /**
index 3d02b288c15e94fb0a56db6f5ff418361ba018ed..8cfc966eae48eb9e1b0fd7e254bb500651571bc2 100644 (file)
@@ -107,19 +107,6 @@ static inline pgoff_t swp_offset(swp_entry_t entry)
        return entry.val & SWP_OFFSET_MASK;
 }
 
-/*
- * Convert the arch-dependent pte representation of a swp_entry_t into an
- * arch-independent swp_entry_t.
- */
-static inline swp_entry_t pte_to_swp_entry(pte_t pte)
-{
-       swp_entry_t arch_entry;
-
-       pte = pte_swp_clear_flags(pte);
-       arch_entry = __pte_to_swp_entry(pte);
-       return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
-}
-
 /*
  * Convert the arch-independent representation of a swp_entry_t into the
  * arch-dependent pte representation.
index 64db85a8055864e15877538567eb87fd2439dc81..1eae87dbef732639dba2bf96127fea47e2707546 100644 (file)
@@ -1229,7 +1229,7 @@ static int __init init_args(struct pgtable_debug_args *args)
        init_fixed_pfns(args);
 
        /* See generic_max_swapfile_size(): probe the maximum offset */
-       max_swap_offset = swp_offset(pte_to_swp_entry(swp_entry_to_pte(swp_entry(0, ~0UL))));
+       max_swap_offset = swp_offset(softleaf_from_pte(softleaf_to_pte(swp_entry(0, ~0UL))));
        /* Create a swp entry with all possible bits set while still being swap. */
        args->swp_entry = swp_entry(MAX_SWAPFILES - 1, max_swap_offset);
        /* Create a non-present migration entry. */
index 2ed041e6ebc3f1d61fd27c33d4af06ed5e70d650..929bc4a5dd9859767db1d674e3fdc2104a9f7631 100644 (file)
@@ -334,7 +334,7 @@ unsigned int folio_pte_batch(struct folio *folio, pte_t *ptep, pte_t pte,
  */
 static inline pte_t pte_move_swp_offset(pte_t pte, long delta)
 {
-       swp_entry_t entry = pte_to_swp_entry(pte);
+       const softleaf_t entry = softleaf_from_pte(pte);
        pte_t new = __swp_entry_to_pte(__swp_entry(swp_type(entry),
                                                   (swp_offset(entry) + delta)));
 
@@ -389,11 +389,14 @@ static inline int swap_pte_batch(pte_t *start_ptep, int max_nr, pte_t pte)
 
        cgroup_id = lookup_swap_cgroup_id(entry);
        while (ptep < end_ptep) {
+               softleaf_t entry;
+
                pte = ptep_get(ptep);
 
                if (!pte_same(pte, expected_pte))
                        break;
-               if (lookup_swap_cgroup_id(pte_to_swp_entry(pte)) != cgroup_id)
+               entry = softleaf_from_pte(pte);
+               if (lookup_swap_cgroup_id(entry) != cgroup_id)
                        break;
                expected_pte = pte_next_swp_offset(expected_pte);
                ptep++;
index 71652cfedcdf28221dd69093bba99d87b8719e30..7f908ad795ad35e0b2322451a774f37538d79717 100644 (file)
@@ -51,7 +51,7 @@
 #include <linux/backing-dev.h>
 #include <linux/migrate.h>
 #include <linux/slab.h>
-#include <linux/swapops.h>
+#include <linux/leafops.h>
 #include <linux/hugetlb.h>
 #include <linux/memory_hotplug.h>
 #include <linux/mm_inline.h>
index 525da447922829c872a387b3b9e8fbf33dc56d03..50b93b45b174005ce82e752ef24b00522f8e0aa7 100644 (file)
@@ -1218,7 +1218,7 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
        spinlock_t *src_ptl, *dst_ptl;
        int progress, max_nr, ret = 0;
        int rss[NR_MM_COUNTERS];
-       swp_entry_t entry = (swp_entry_t){0};
+       softleaf_t entry = softleaf_mk_none();
        struct folio *prealloc = NULL;
        int nr;
 
@@ -1282,7 +1282,7 @@ again:
                                                  dst_vma, src_vma,
                                                  addr, rss);
                        if (ret == -EIO) {
-                               entry = pte_to_swp_entry(ptep_get(src_pte));
+                               entry = softleaf_from_pte(ptep_get(src_pte));
                                break;
                        } else if (ret == -EBUSY) {
                                break;
@@ -4446,13 +4446,13 @@ static struct folio *__alloc_swap_folio(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
        struct folio *folio;
-       swp_entry_t entry;
+       softleaf_t entry;
 
        folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vmf->address);
        if (!folio)
                return NULL;
 
-       entry = pte_to_swp_entry(vmf->orig_pte);
+       entry = softleaf_from_pte(vmf->orig_pte);
        if (mem_cgroup_swapin_charge_folio(folio, vma->vm_mm,
                                           GFP_KERNEL, entry)) {
                folio_put(folio);
@@ -4470,7 +4470,7 @@ static struct folio *__alloc_swap_folio(struct vm_fault *vmf)
 static bool can_swapin_thp(struct vm_fault *vmf, pte_t *ptep, int nr_pages)
 {
        unsigned long addr;
-       swp_entry_t entry;
+       softleaf_t entry;
        int idx;
        pte_t pte;
 
@@ -4480,7 +4480,7 @@ static bool can_swapin_thp(struct vm_fault *vmf, pte_t *ptep, int nr_pages)
 
        if (!pte_same(pte, pte_move_swp_offset(vmf->orig_pte, -idx)))
                return false;
-       entry = pte_to_swp_entry(pte);
+       entry = softleaf_from_pte(pte);
        if (swap_pte_batch(ptep, nr_pages, pte) != nr_pages)
                return false;
 
@@ -4526,7 +4526,7 @@ static struct folio *alloc_swap_folio(struct vm_fault *vmf)
        unsigned long orders;
        struct folio *folio;
        unsigned long addr;
-       swp_entry_t entry;
+       softleaf_t entry;
        spinlock_t *ptl;
        pte_t *pte;
        gfp_t gfp;
@@ -4547,7 +4547,7 @@ static struct folio *alloc_swap_folio(struct vm_fault *vmf)
        if (!zswap_never_enabled())
                goto fallback;
 
-       entry = pte_to_swp_entry(vmf->orig_pte);
+       entry = softleaf_from_pte(vmf->orig_pte);
        /*
         * Get a list of all the (large) orders below PMD_ORDER that are enabled
         * and suitable for swapping THP.
index c39dfea1a925c9dae2b5cdad9e9ab2872ca85b6d..b2ad78bf85d5b1e70c9d888e6dd8c5559c754873 100644 (file)
@@ -534,7 +534,7 @@ void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, p
                 * lock release in migration_entry_wait_on_locked().
                 */
                hugetlb_vma_unlock_read(vma);
-               migration_entry_wait_on_locked(pte_to_swp_entry(pte), ptl);
+               migration_entry_wait_on_locked(entry, ptl);
                return;
        }
 
index 9a908d8bb70694c3c337a8970c38e13d9b261f88..e5d13eea92347d3cad8cbf6cf0e4a4f3edc92e88 100644 (file)
@@ -202,7 +202,9 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
                        for (i = 0; i < step; i++)
                                vec[i] = 1;
                } else { /* pte is a swap entry */
-                       *vec = mincore_swap(pte_to_swp_entry(pte), false);
+                       const softleaf_t entry = softleaf_from_pte(pte);
+
+                       *vec = mincore_swap(entry, false);
                }
                vec += step;
        }
index 345466ad396bf96de916e359b918d11565e3d97a..d871f2eb821c746af846322bd8c76b22a6d61d12 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1969,7 +1969,9 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
                if (likely(pte_present(pteval))) {
                        pfn = pte_pfn(pteval);
                } else {
-                       pfn = softleaf_to_pfn(pte_to_swp_entry(pteval));
+                       const softleaf_t entry = softleaf_from_pte(pteval);
+
+                       pfn = softleaf_to_pfn(entry);
                        VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
                }
 
@@ -2368,7 +2370,9 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
                if (likely(pte_present(pteval))) {
                        pfn = pte_pfn(pteval);
                } else {
-                       pfn = softleaf_to_pfn(pte_to_swp_entry(pteval));
+                       const softleaf_t entry = softleaf_from_pte(pteval);
+
+                       pfn = softleaf_to_pfn(entry);
                        VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
                }
 
index 8c7f14061f5bbf97a8c8364e1981755ae3ca86e7..94e0f0c54168759d75bc2756e7c09f35413e6c78 100644 (file)
@@ -3202,8 +3202,17 @@ static int claim_swapfile(struct swap_info_struct *si, struct inode *inode)
  */
 unsigned long generic_max_swapfile_size(void)
 {
-       return swp_offset(pte_to_swp_entry(
-                       swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
+       swp_entry_t entry = swp_entry(0, ~0UL);
+       const pte_t pte = softleaf_to_pte(entry);
+
+       /*
+        * Since the PTE can be an invalid softleaf entry (e.g. the none PTE),
+        * we need to do this manually.
+        */
+       entry = __pte_to_swp_entry(pte);
+       entry = swp_entry(__swp_type(entry), __swp_offset(entry));
+
+       return swp_offset(entry) + 1;
 }
 
 /* Can be overridden by an architecture for additional checks. */