]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm: eliminate further swapops predicates
authorLorenzo Stoakes <lorenzo.stoakes@oracle.com>
Mon, 10 Nov 2025 22:21:33 +0000 (22:21 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 24 Nov 2025 23:08:52 +0000 (15:08 -0800)
Having converted so much of the code base to software leaf entries, we can
mop up some remaining cases.

We replace is_pfn_swap_entry(), pfn_swap_entry_to_page(),
is_writable_device_private_entry(), is_device_exclusive_entry(),
is_migration_entry(), is_writable_migration_entry(),
is_readable_migration_entry(), swp_offset_pfn() and pfn_swap_entry_folio()
with softleaf equivalents.

No functional change intended.

Link: https://lkml.kernel.org/r/956bc9c031604811c0070d2f4bf2f1373f230213.1762812360.git.lorenzo.stoakes@oracle.com
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Byungchul Park <byungchul@sk.com>
Cc: Chengming Zhou <chengming.zhou@linux.dev>
Cc: Chris Li <chrisl@kernel.org>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Claudio Imbrenda <imbrenda@linux.ibm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Gregory Price <gourry@gourry.net>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: "Huang, Ying" <ying.huang@linux.alibaba.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jann Horn <jannh@google.com>
Cc: Janosch Frank <frankja@linux.ibm.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
Cc: Kairui Song <kasong@tencent.com>
Cc: Kemeng Shi <shikemeng@huaweicloud.com>
Cc: Lance Yang <lance.yang@linux.dev>
Cc: Leon Romanovsky <leon@kernel.org>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Mathew Brost <matthew.brost@intel.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Naoya Horiguchi <nao.horiguchi@gmail.com>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Nico Pache <npache@redhat.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Rakie Kim <rakie.kim@sk.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: SeongJae Park <sj@kernel.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Wei Xu <weixugc@google.com>
Cc: xu xin <xu.xin16@zte.com.cn>
Cc: Yuanchu Xie <yuanchu@google.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
16 files changed:
fs/proc/task_mmu.c
include/linux/leafops.h
include/linux/swapops.h
mm/debug_vm_pgtable.c
mm/hmm.c
mm/hugetlb.c
mm/ksm.c
mm/memory-failure.c
mm/memory.c
mm/mempolicy.c
mm/migrate.c
mm/migrate_device.c
mm/mprotect.c
mm/page_vma_mapped.c
mm/pagewalk.c
mm/rmap.c

index 92ada14eabc09cf437dfc11dca4b334ea3dcf7a9..41b062ce6ad8ed646fed1c774011a0f9488fc078 100644 (file)
@@ -1941,13 +1941,13 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
                if (pte_uffd_wp(pte))
                        flags |= PM_UFFD_WP;
        } else {
-               swp_entry_t entry;
+               softleaf_t entry;
 
                if (pte_swp_soft_dirty(pte))
                        flags |= PM_SOFT_DIRTY;
                if (pte_swp_uffd_wp(pte))
                        flags |= PM_UFFD_WP;
-               entry = pte_to_swp_entry(pte);
+               entry = softleaf_from_pte(pte);
                if (pm->show_pfn) {
                        pgoff_t offset;
 
@@ -1955,16 +1955,16 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
                         * For PFN swap offsets, keeping the offset field
                         * to be PFN only to be compatible with old smaps.
                         */
-                       if (is_pfn_swap_entry(entry))
-                               offset = swp_offset_pfn(entry);
+                       if (softleaf_has_pfn(entry))
+                               offset = softleaf_to_pfn(entry);
                        else
                                offset = swp_offset(entry);
                        frame = swp_type(entry) |
                            (offset << MAX_SWAPFILES_SHIFT);
                }
                flags |= PM_SWAP;
-               if (is_pfn_swap_entry(entry))
-                       page = pfn_swap_entry_to_page(entry);
+               if (softleaf_has_pfn(entry))
+                       page = softleaf_to_page(entry);
                if (softleaf_is_uffd_wp_marker(entry))
                        flags |= PM_UFFD_WP;
                if (softleaf_is_guard_marker(entry))
@@ -2033,7 +2033,7 @@ static int pagemap_pmd_range_thp(pmd_t *pmdp, unsigned long addr,
                if (pmd_swp_uffd_wp(pmd))
                        flags |= PM_UFFD_WP;
                VM_WARN_ON_ONCE(!pmd_is_migration_entry(pmd));
-               page = pfn_swap_entry_to_page(entry);
+               page = softleaf_to_page(entry);
        }
 
        if (page) {
index f5ea9b0385ff1c7fb110211fd1430c735286a5cb..d282fab866a1dd1d9eb124963a0a1c1ee44a11b0 100644 (file)
@@ -355,7 +355,7 @@ static inline unsigned long softleaf_to_pfn(softleaf_t entry)
        VM_WARN_ON_ONCE(!softleaf_has_pfn(entry));
 
        /* Temporary until swp_entry_t eliminated. */
-       return swp_offset_pfn(entry);
+       return swp_offset(entry) & SWP_PFN_MASK;
 }
 
 /**
@@ -366,10 +366,16 @@ static inline unsigned long softleaf_to_pfn(softleaf_t entry)
  */
 static inline struct page *softleaf_to_page(softleaf_t entry)
 {
+       struct page *page = pfn_to_page(softleaf_to_pfn(entry));
+
        VM_WARN_ON_ONCE(!softleaf_has_pfn(entry));
+       /*
+        * Any use of migration entries may only occur while the
+        * corresponding page is locked
+        */
+       VM_WARN_ON_ONCE(softleaf_is_migration(entry) && !PageLocked(page));
 
-       /* Temporary until swp_entry_t eliminated. */
-       return pfn_swap_entry_to_page(entry);
+       return page;
 }
 
 /**
@@ -380,10 +386,17 @@ static inline struct page *softleaf_to_page(softleaf_t entry)
  */
 static inline struct folio *softleaf_to_folio(softleaf_t entry)
 {
-       VM_WARN_ON_ONCE(!softleaf_has_pfn(entry));
+       struct folio *folio = pfn_folio(softleaf_to_pfn(entry));
 
-       /* Temporary until swp_entry_t eliminated. */
-       return pfn_swap_entry_folio(entry);
+       VM_WARN_ON_ONCE(!softleaf_has_pfn(entry));
+       /*
+        * Any use of migration entries may only occur while the
+        * corresponding folio is locked.
+        */
+       VM_WARN_ON_ONCE(softleaf_is_migration(entry) &&
+                       !folio_test_locked(folio));
+
+       return folio;
 }
 
 /**
index c8e6f927da48eb064543d74300bd80143df5438c..3d02b288c15e94fb0a56db6f5ff418361ba018ed 100644 (file)
@@ -28,7 +28,7 @@
 #define SWP_OFFSET_MASK        ((1UL << SWP_TYPE_SHIFT) - 1)
 
 /*
- * Definitions only for PFN swap entries (see is_pfn_swap_entry()).  To
+ * Definitions only for PFN swap entries (see leafeant_has_pfn()).  To
  * store PFN, we only need SWP_PFN_BITS bits.  Each of the pfn swap entries
  * can use the extra bits to store other information besides PFN.
  */
@@ -66,8 +66,6 @@
 #define SWP_MIG_YOUNG                  BIT(SWP_MIG_YOUNG_BIT)
 #define SWP_MIG_DIRTY                  BIT(SWP_MIG_DIRTY_BIT)
 
-static inline bool is_pfn_swap_entry(swp_entry_t entry);
-
 /* Clear all flags but only keep swp_entry_t related information */
 static inline pte_t pte_swp_clear_flags(pte_t pte)
 {
@@ -109,17 +107,6 @@ static inline pgoff_t swp_offset(swp_entry_t entry)
        return entry.val & SWP_OFFSET_MASK;
 }
 
-/*
- * This should only be called upon a pfn swap entry to get the PFN stored
- * in the swap entry.  Please refers to is_pfn_swap_entry() for definition
- * of pfn swap entry.
- */
-static inline unsigned long swp_offset_pfn(swp_entry_t entry)
-{
-       VM_BUG_ON(!is_pfn_swap_entry(entry));
-       return swp_offset(entry) & SWP_PFN_MASK;
-}
-
 /*
  * Convert the arch-dependent pte representation of a swp_entry_t into an
  * arch-independent swp_entry_t.
@@ -169,27 +156,11 @@ static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset)
        return swp_entry(SWP_DEVICE_WRITE, offset);
 }
 
-static inline bool is_device_private_entry(swp_entry_t entry)
-{
-       int type = swp_type(entry);
-       return type == SWP_DEVICE_READ || type == SWP_DEVICE_WRITE;
-}
-
-static inline bool is_writable_device_private_entry(swp_entry_t entry)
-{
-       return unlikely(swp_type(entry) == SWP_DEVICE_WRITE);
-}
-
 static inline swp_entry_t make_device_exclusive_entry(pgoff_t offset)
 {
        return swp_entry(SWP_DEVICE_EXCLUSIVE, offset);
 }
 
-static inline bool is_device_exclusive_entry(swp_entry_t entry)
-{
-       return swp_type(entry) == SWP_DEVICE_EXCLUSIVE;
-}
-
 #else /* CONFIG_DEVICE_PRIVATE */
 static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset)
 {
@@ -201,50 +172,14 @@ static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset)
        return swp_entry(0, 0);
 }
 
-static inline bool is_device_private_entry(swp_entry_t entry)
-{
-       return false;
-}
-
-static inline bool is_writable_device_private_entry(swp_entry_t entry)
-{
-       return false;
-}
-
 static inline swp_entry_t make_device_exclusive_entry(pgoff_t offset)
 {
        return swp_entry(0, 0);
 }
 
-static inline bool is_device_exclusive_entry(swp_entry_t entry)
-{
-       return false;
-}
-
 #endif /* CONFIG_DEVICE_PRIVATE */
 
 #ifdef CONFIG_MIGRATION
-static inline int is_migration_entry(swp_entry_t entry)
-{
-       return unlikely(swp_type(entry) == SWP_MIGRATION_READ ||
-                       swp_type(entry) == SWP_MIGRATION_READ_EXCLUSIVE ||
-                       swp_type(entry) == SWP_MIGRATION_WRITE);
-}
-
-static inline int is_writable_migration_entry(swp_entry_t entry)
-{
-       return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE);
-}
-
-static inline int is_readable_migration_entry(swp_entry_t entry)
-{
-       return unlikely(swp_type(entry) == SWP_MIGRATION_READ);
-}
-
-static inline int is_readable_exclusive_migration_entry(swp_entry_t entry)
-{
-       return unlikely(swp_type(entry) == SWP_MIGRATION_READ_EXCLUSIVE);
-}
 
 static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
 {
@@ -310,23 +245,10 @@ static inline swp_entry_t make_writable_migration_entry(pgoff_t offset)
        return swp_entry(0, 0);
 }
 
-static inline int is_migration_entry(swp_entry_t swp)
-{
-       return 0;
-}
-
 static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
                                        unsigned long address) { }
 static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
                                             unsigned long addr, pte_t *pte) { }
-static inline int is_writable_migration_entry(swp_entry_t entry)
-{
-       return 0;
-}
-static inline int is_readable_migration_entry(swp_entry_t entry)
-{
-       return 0;
-}
 
 static inline swp_entry_t make_migration_entry_young(swp_entry_t entry)
 {
@@ -410,47 +332,6 @@ static inline swp_entry_t make_guard_swp_entry(void)
        return make_pte_marker_entry(PTE_MARKER_GUARD);
 }
 
-static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry)
-{
-       struct page *p = pfn_to_page(swp_offset_pfn(entry));
-
-       /*
-        * Any use of migration entries may only occur while the
-        * corresponding page is locked
-        */
-       BUG_ON(is_migration_entry(entry) && !PageLocked(p));
-
-       return p;
-}
-
-static inline struct folio *pfn_swap_entry_folio(swp_entry_t entry)
-{
-       struct folio *folio = pfn_folio(swp_offset_pfn(entry));
-
-       /*
-        * Any use of migration entries may only occur while the
-        * corresponding folio is locked
-        */
-       BUG_ON(is_migration_entry(entry) && !folio_test_locked(folio));
-
-       return folio;
-}
-
-/*
- * A pfn swap entry is a special type of swap entry that always has a pfn stored
- * in the swap offset. They can either be used to represent unaddressable device
- * memory, to restrict access to a page undergoing migration or to represent a
- * pfn which has been hwpoisoned and unmapped.
- */
-static inline bool is_pfn_swap_entry(swp_entry_t entry)
-{
-       /* Make sure the swp offset can always store the needed fields */
-       BUILD_BUG_ON(SWP_TYPE_SHIFT < SWP_PFN_BITS);
-
-       return is_migration_entry(entry) || is_device_private_entry(entry) ||
-              is_device_exclusive_entry(entry) || is_hwpoison_entry(entry);
-}
-
 struct page_vma_mapped_walk;
 
 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
index 608d1011ce036b5536af8b5b72b92b39c84d9fb7..64db85a8055864e15877538567eb87fd2439dc81 100644 (file)
@@ -844,7 +844,7 @@ static void __init pmd_softleaf_tests(struct pgtable_debug_args *args) { }
 static void __init swap_migration_tests(struct pgtable_debug_args *args)
 {
        struct page *page;
-       swp_entry_t swp;
+       softleaf_t entry;
 
        if (!IS_ENABLED(CONFIG_MIGRATION))
                return;
@@ -867,17 +867,17 @@ static void __init swap_migration_tests(struct pgtable_debug_args *args)
         * be locked, otherwise it stumbles upon a BUG_ON().
         */
        __SetPageLocked(page);
-       swp = make_writable_migration_entry(page_to_pfn(page));
-       WARN_ON(!is_migration_entry(swp));
-       WARN_ON(!is_writable_migration_entry(swp));
+       entry = make_writable_migration_entry(page_to_pfn(page));
+       WARN_ON(!softleaf_is_migration(entry));
+       WARN_ON(!softleaf_is_migration_write(entry));
 
-       swp = make_readable_migration_entry(swp_offset(swp));
-       WARN_ON(!is_migration_entry(swp));
-       WARN_ON(is_writable_migration_entry(swp));
+       entry = make_readable_migration_entry(swp_offset(entry));
+       WARN_ON(!softleaf_is_migration(entry));
+       WARN_ON(softleaf_is_migration_write(entry));
 
-       swp = make_readable_migration_entry(page_to_pfn(page));
-       WARN_ON(!is_migration_entry(swp));
-       WARN_ON(is_writable_migration_entry(swp));
+       entry = make_readable_migration_entry(page_to_pfn(page));
+       WARN_ON(!softleaf_is_migration(entry));
+       WARN_ON(softleaf_is_migration_write(entry));
        __ClearPageLocked(page);
 }
 
index 0158f2d1e027fdbb6135f44082b3d8058c98e3a9..3912d92a2b9ab7aa49a4ffc9ae23ebd23a0ba18a 100644 (file)
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -270,7 +270,7 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
                        cpu_flags = HMM_PFN_VALID;
                        if (softleaf_is_device_private_write(entry))
                                cpu_flags |= HMM_PFN_WRITE;
-                       new_pfn_flags = swp_offset_pfn(entry) | cpu_flags;
+                       new_pfn_flags = softleaf_to_pfn(entry) | cpu_flags;
                        goto out;
                }
 
index 311c5d60131049a8c85ade0f7702b3ac3a969702..9e7815b4f0583f44527b371e2d57e7931d0bb564 100644 (file)
@@ -4934,7 +4934,7 @@ again:
                } else if (unlikely(softleaf_is_migration(softleaf))) {
                        bool uffd_wp = pte_swp_uffd_wp(entry);
 
-                       if (!is_readable_migration_entry(softleaf) && cow) {
+                       if (!softleaf_is_migration_read(softleaf) && cow) {
                                /*
                                 * COW mappings require pages in both
                                 * parent and child to be set to read.
index f9a1a3658eaddac785f58420b7234371a1c82dea..cfc182255c7bae8ea4d3040096b40c25d7c3363d 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -632,14 +632,14 @@ static int break_ksm_pmd_entry(pmd_t *pmdp, unsigned long addr, unsigned long en
                if (pte_present(pte)) {
                        folio = vm_normal_folio(walk->vma, addr, pte);
                } else if (!pte_none(pte)) {
-                       swp_entry_t entry = pte_to_swp_entry(pte);
+                       const softleaf_t entry = softleaf_from_pte(pte);
 
                        /*
                         * As KSM pages remain KSM pages until freed, no need to wait
                         * here for migration to end.
                         */
-                       if (is_migration_entry(entry))
-                               folio = pfn_swap_entry_folio(entry);
+                       if (softleaf_is_migration(entry))
+                               folio = softleaf_to_folio(entry);
                }
                /* return 1 if the page is an normal ksm page or KSM-placed zero page */
                found = (folio && folio_test_ksm(folio)) ||
index 1f7fb9bf287a8e3f3dd17b7ce9310a232a487b38..71652cfedcdf28221dd69093bba99d87b8719e30 100644 (file)
@@ -693,10 +693,10 @@ static int check_hwpoisoned_entry(pte_t pte, unsigned long addr, short shift,
        if (pte_present(pte)) {
                pfn = pte_pfn(pte);
        } else {
-               swp_entry_t swp = pte_to_swp_entry(pte);
+               const softleaf_t entry = softleaf_from_pte(pte);
 
-               if (is_hwpoison_entry(swp))
-                       pfn = swp_offset_pfn(swp);
+               if (softleaf_is_hwpoison(entry))
+                       pfn = softleaf_to_pfn(entry);
        }
 
        if (!pfn || pfn != poisoned_pfn)
index a3f001a47ecfb6cfd82b29fc3c6adee277a428b6..525da447922829c872a387b3b9e8fbf33dc56d03 100644 (file)
@@ -902,7 +902,8 @@ static void restore_exclusive_pte(struct vm_area_struct *vma,
 static int try_restore_exclusive_pte(struct vm_area_struct *vma,
                unsigned long addr, pte_t *ptep, pte_t orig_pte)
 {
-       struct page *page = pfn_swap_entry_to_page(pte_to_swp_entry(orig_pte));
+       const softleaf_t entry = softleaf_from_pte(orig_pte);
+       struct page *page = softleaf_to_page(entry);
        struct folio *folio = page_folio(page);
 
        if (folio_trylock(folio)) {
index dee95d5ecfd4e8f6c2adda90db73127e8cd98fac..acb9bf89f6190311c69298138f942425610ae10d 100644 (file)
@@ -705,7 +705,9 @@ static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr,
                if (pte_none(ptent))
                        continue;
                if (!pte_present(ptent)) {
-                       if (is_migration_entry(pte_to_swp_entry(ptent)))
+                       const softleaf_t entry = softleaf_from_pte(ptent);
+
+                       if (softleaf_is_migration(entry))
                                qp->nr_failed++;
                        continue;
                }
index 5edfd0b2f63d311fb2641eada54e7a87de0ce029..c39dfea1a925c9dae2b5cdad9e9ab2872ca85b6d 100644 (file)
@@ -483,7 +483,7 @@ void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
        spinlock_t *ptl;
        pte_t *ptep;
        pte_t pte;
-       swp_entry_t entry;
+       softleaf_t entry;
 
        ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
        if (!ptep)
@@ -495,8 +495,8 @@ void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
        if (pte_none(pte) || pte_present(pte))
                goto out;
 
-       entry = pte_to_swp_entry(pte);
-       if (!is_migration_entry(entry))
+       entry = softleaf_from_pte(pte);
+       if (!softleaf_is_migration(entry))
                goto out;
 
        migration_entry_wait_on_locked(entry, ptl);
index 592b4561507ce4f89dc271b53bd132b1b681636a..b1ce6e3478d603df3faa0bafd2b8764856d5a850 100644 (file)
@@ -279,7 +279,7 @@ again:
                unsigned long mpfn = 0, pfn;
                struct folio *folio;
                struct page *page;
-               swp_entry_t entry;
+               softleaf_t entry;
                pte_t pte;
 
                pte = ptep_get(ptep);
@@ -298,11 +298,11 @@ again:
                         * page table entry. Other special swap entries are not
                         * migratable, and we ignore regular swapped page.
                         */
-                       entry = pte_to_swp_entry(pte);
-                       if (!is_device_private_entry(entry))
+                       entry = softleaf_from_pte(pte);
+                       if (!softleaf_is_device_private(entry))
                                goto next;
 
-                       page = pfn_swap_entry_to_page(entry);
+                       page = softleaf_to_page(entry);
                        pgmap = page_pgmap(page);
                        if (!(migrate->flags &
                                MIGRATE_VMA_SELECT_DEVICE_PRIVATE) ||
@@ -330,7 +330,7 @@ again:
 
                        mpfn = migrate_pfn(page_to_pfn(page)) |
                                        MIGRATE_PFN_MIGRATE;
-                       if (is_writable_device_private_entry(entry))
+                       if (softleaf_is_device_private_write(entry))
                                mpfn |= MIGRATE_PFN_WRITE;
                } else {
                        pfn = pte_pfn(pte);
index f910cbf41442f4f98035ff80bcf17883d6c8e395..283889e4f1cec73983deff11e60c97b6564a4a6d 100644 (file)
@@ -317,11 +317,11 @@ static long change_pte_range(struct mmu_gather *tlb,
                                pages++;
                        }
                } else  {
-                       swp_entry_t entry = pte_to_swp_entry(oldpte);
+                       softleaf_t entry = softleaf_from_pte(oldpte);
                        pte_t newpte;
 
-                       if (is_writable_migration_entry(entry)) {
-                               struct folio *folio = pfn_swap_entry_folio(entry);
+                       if (softleaf_is_migration_write(entry)) {
+                               const struct folio *folio = softleaf_to_folio(entry);
 
                                /*
                                 * A protection check is difficult so
@@ -335,7 +335,7 @@ static long change_pte_range(struct mmu_gather *tlb,
                                newpte = swp_entry_to_pte(entry);
                                if (pte_swp_soft_dirty(oldpte))
                                        newpte = pte_swp_mksoft_dirty(newpte);
-                       } else if (is_writable_device_private_entry(entry)) {
+                       } else if (softleaf_is_device_private_write(entry)) {
                                /*
                                 * We do not preserve soft-dirtiness. See
                                 * copy_nonpresent_pte() for explanation.
index 8137d23667222dd2ea6feb4055cb8082e8070ef6..b38a1d00c971b161383ffd8078f41a42ebe81eb8 100644 (file)
@@ -49,7 +49,7 @@ again:
                if (is_migration)
                        return false;
        } else if (!is_migration) {
-               swp_entry_t entry;
+               softleaf_t entry;
 
                /*
                 * Handle un-addressable ZONE_DEVICE memory.
@@ -67,9 +67,9 @@ again:
                 * For more details on device private memory see HMM
                 * (include/linux/hmm.h or mm/hmm.c).
                 */
-               entry = pte_to_swp_entry(ptent);
-               if (!is_device_private_entry(entry) &&
-                   !is_device_exclusive_entry(entry))
+               entry = softleaf_from_pte(ptent);
+               if (!softleaf_is_device_private(entry) &&
+                   !softleaf_is_device_exclusive(entry))
                        return false;
        }
        spin_lock(*ptlp);
index 378c774795fcbcb82e9996072bfb18ae5fbdc077..90cc346a6ecf162d12f3de47a996563b619fdb56 100644 (file)
@@ -1007,11 +1007,10 @@ pte_table:
                        goto found;
                }
        } else if (!pte_none(pte)) {
-               swp_entry_t entry = pte_to_swp_entry(pte);
+               const softleaf_t entry = softleaf_from_pte(pte);
 
-               if ((flags & FW_MIGRATION) &&
-                   is_migration_entry(entry)) {
-                       page = pfn_swap_entry_to_page(entry);
+               if ((flags & FW_MIGRATION) && softleaf_is_migration(entry)) {
+                       page = softleaf_to_page(entry);
                        expose_page = false;
                        goto found;
                }
index 775710115a41eeec201b0d284e8305ff7b31aa8f..345466ad396bf96de916e359b918d11565e3d97a 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1969,7 +1969,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
                if (likely(pte_present(pteval))) {
                        pfn = pte_pfn(pteval);
                } else {
-                       pfn = swp_offset_pfn(pte_to_swp_entry(pteval));
+                       pfn = softleaf_to_pfn(pte_to_swp_entry(pteval));
                        VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
                }
 
@@ -2368,7 +2368,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
                if (likely(pte_present(pteval))) {
                        pfn = pte_pfn(pteval);
                } else {
-                       pfn = swp_offset_pfn(pte_to_swp_entry(pteval));
+                       pfn = softleaf_to_pfn(pte_to_swp_entry(pteval));
                        VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
                }
 
@@ -2453,8 +2453,11 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
                                folio_mark_dirty(folio);
                        writable = pte_write(pteval);
                } else {
+                       const softleaf_t entry = softleaf_from_pte(pteval);
+
                        pte_clear(mm, address, pvmw.pte);
-                       writable = is_writable_device_private_entry(pte_to_swp_entry(pteval));
+
+                       writable = softleaf_is_device_private_write(entry);
                }
 
                VM_WARN_ON_FOLIO(writable && folio_test_anon(folio) &&