]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm: rename PAGE_MAPPING_* to FOLIO_MAPPING_*
authorDavid Hildenbrand <david@redhat.com>
Fri, 4 Jul 2025 10:25:20 +0000 (12:25 +0200)
committerAndrew Morton <akpm@linux-foundation.org>
Sun, 13 Jul 2025 23:38:31 +0000 (16:38 -0700)
Now that the mapping flags are only used for folios, let's rename the
defines.

Link: https://lkml.kernel.org/r/20250704102524.326966-27-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Brendan Jackman <jackmanb@google.com>
Cc: Byungchul Park <byungchul@sk.com>
Cc: Chengming Zhou <chengming.zhou@linux.dev>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Eugenio Pé rez <eperezma@redhat.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Gregory Price <gourry@gourry.net>
Cc: "Huang, Ying" <ying.huang@linux.alibaba.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jason Wang <jasowang@redhat.com>
Cc: Jerrin Shaji George <jerrin.shaji-george@broadcom.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Mathew Brost <matthew.brost@intel.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Naoya Horiguchi <nao.horiguchi@gmail.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Peter Xu <peterx@redhat.com>
Cc: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: Rakie Kim <rakie.kim@sk.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Cc: xu xin <xu.xin16@zte.com.cn>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
fs/proc/page.c
include/linux/fs.h
include/linux/mm_types.h
include/linux/page-flags.h
include/linux/pagemap.h
mm/gup.c
mm/internal.h
mm/ksm.c
mm/rmap.c
mm/util.c

index 999af26c7298552c84d53689400909219f5f1648..0cdc78c0d23fab6ab2e03c48258bf12a6305de26 100644 (file)
@@ -149,7 +149,7 @@ u64 stable_page_flags(const struct page *page)
 
        k = folio->flags;
        mapping = (unsigned long)folio->mapping;
-       is_anon = mapping & PAGE_MAPPING_ANON;
+       is_anon = mapping & FOLIO_MAPPING_ANON;
 
        /*
         * pseudo flags for the well known (anonymous) memory mapped pages
@@ -158,7 +158,7 @@ u64 stable_page_flags(const struct page *page)
                u |= 1 << KPF_MMAP;
        if (is_anon) {
                u |= 1 << KPF_ANON;
-               if (mapping & PAGE_MAPPING_KSM)
+               if (mapping & FOLIO_MAPPING_KSM)
                        u |= 1 << KPF_KSM;
        }
 
index e14e9d11ca0f3ad6270b38470c5953d097f305ea..d3e7ad6941a8b4c82194691c3d6412f87ad2eaba 100644 (file)
@@ -526,7 +526,7 @@ struct address_space {
        /*
         * On most architectures that alignment is already the case; but
         * must be enforced here for CRIS, to let the least significant bit
-        * of struct page's "mapping" pointer be used for PAGE_MAPPING_ANON.
+        * of struct folio's "mapping" pointer be used for FOLIO_MAPPING_ANON.
         */
 
 /* XArray tags, for tagging dirty and writeback pages in the pagecache. */
index 804d269a4f5e8717277c4ec85dd60c71232497d4..1ec273b066915a8e4fd190eeee5a5adb23ddc102 100644 (file)
@@ -105,7 +105,6 @@ struct page {
                                        unsigned int order;
                                };
                        };
-                       /* See page-flags.h for PAGE_MAPPING_FLAGS */
                        struct address_space *mapping;
                        union {
                                pgoff_t __folio_index;          /* Our offset within mapping. */
index ae2b80fcea6aa9737b598c5fff180c9df8eabda1..8e4d6eda8a8d628d994860c7b94806c440db5f01 100644 (file)
@@ -695,10 +695,10 @@ PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted)
 /*
  * On an anonymous folio mapped into a user virtual memory area,
  * folio->mapping points to its anon_vma, not to a struct address_space;
- * with the PAGE_MAPPING_ANON bit set to distinguish it.  See rmap.h.
+ * with the FOLIO_MAPPING_ANON bit set to distinguish it.  See rmap.h.
  *
  * On an anonymous folio in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
- * the PAGE_MAPPING_ANON_KSM bit may be set along with the PAGE_MAPPING_ANON
+ * the FOLIO_MAPPING_ANON_KSM bit may be set along with the FOLIO_MAPPING_ANON
  * bit; and then folio->mapping points, not to an anon_vma, but to a private
  * structure which KSM associates with that merged folio.  See ksm.h.
  *
@@ -713,21 +713,21 @@ PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted)
  * false before calling the following functions (e.g., folio_test_anon).
  * See mm/slab.h.
  */
-#define PAGE_MAPPING_ANON      0x1
-#define PAGE_MAPPING_ANON_KSM  0x2
-#define PAGE_MAPPING_KSM       (PAGE_MAPPING_ANON | PAGE_MAPPING_ANON_KSM)
-#define PAGE_MAPPING_FLAGS     (PAGE_MAPPING_ANON | PAGE_MAPPING_ANON_KSM)
+#define FOLIO_MAPPING_ANON     0x1
+#define FOLIO_MAPPING_ANON_KSM 0x2
+#define FOLIO_MAPPING_KSM      (FOLIO_MAPPING_ANON | FOLIO_MAPPING_ANON_KSM)
+#define FOLIO_MAPPING_FLAGS    (FOLIO_MAPPING_ANON | FOLIO_MAPPING_ANON_KSM)
 
 static __always_inline bool folio_test_anon(const struct folio *folio)
 {
-       return ((unsigned long)folio->mapping & PAGE_MAPPING_ANON) != 0;
+       return ((unsigned long)folio->mapping & FOLIO_MAPPING_ANON) != 0;
 }
 
 static __always_inline bool PageAnonNotKsm(const struct page *page)
 {
        unsigned long flags = (unsigned long)page_folio(page)->mapping;
 
-       return (flags & PAGE_MAPPING_FLAGS) == PAGE_MAPPING_ANON;
+       return (flags & FOLIO_MAPPING_FLAGS) == FOLIO_MAPPING_ANON;
 }
 
 static __always_inline bool PageAnon(const struct page *page)
@@ -743,8 +743,8 @@ static __always_inline bool PageAnon(const struct page *page)
  */
 static __always_inline bool folio_test_ksm(const struct folio *folio)
 {
-       return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
-                               PAGE_MAPPING_KSM;
+       return ((unsigned long)folio->mapping & FOLIO_MAPPING_FLAGS) ==
+                               FOLIO_MAPPING_KSM;
 }
 #else
 FOLIO_TEST_FLAG_FALSE(ksm)
index e63fbfbd5b0f3270ad8bce2b92becbb0b1220e5c..10a222e68b851e20995d76edf7e424c5ebacd757 100644 (file)
@@ -502,7 +502,7 @@ static inline pgoff_t mapping_align_index(struct address_space *mapping,
 static inline bool mapping_large_folio_support(struct address_space *mapping)
 {
        /* AS_FOLIO_ORDER is only reasonable for pagecache folios */
-       VM_WARN_ONCE((unsigned long)mapping & PAGE_MAPPING_ANON,
+       VM_WARN_ONCE((unsigned long)mapping & FOLIO_MAPPING_ANON,
                        "Anonymous mapping always supports large folio");
 
        return mapping_max_folio_order(mapping) > 0;
index 30d320719fa23bd0ca2afb08136fd6204b471026..adffe663594dc61441ad64cc7754d5f5376d5478 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -2804,9 +2804,9 @@ static bool gup_fast_folio_allowed(struct folio *folio, unsigned int flags)
                return false;
 
        /* Anonymous folios pose no problem. */
-       mapping_flags = (unsigned long)mapping & PAGE_MAPPING_FLAGS;
+       mapping_flags = (unsigned long)mapping & FOLIO_MAPPING_FLAGS;
        if (mapping_flags)
-               return mapping_flags & PAGE_MAPPING_ANON;
+               return mapping_flags & FOLIO_MAPPING_ANON;
 
        /*
         * At this point, we know the mapping is non-null and points to an
index 22a95a2b7fa19e9857e089fff2dbfa96fe2d2d0f..2e235740128ad84561dc334295e77b812d04e72d 100644 (file)
@@ -149,7 +149,7 @@ static inline void *folio_raw_mapping(const struct folio *folio)
 {
        unsigned long mapping = (unsigned long)folio->mapping;
 
-       return (void *)(mapping & ~PAGE_MAPPING_FLAGS);
+       return (void *)(mapping & ~FOLIO_MAPPING_FLAGS);
 }
 
 /*
index ef73b25fd65a6c6b9db4010abbc88d70692896c2..2b0210d41c553174a4d064ed2ad7eecd5a8ad672 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -893,7 +893,7 @@ static struct folio *ksm_get_folio(struct ksm_stable_node *stable_node,
        unsigned long kpfn;
 
        expected_mapping = (void *)((unsigned long)stable_node |
-                                       PAGE_MAPPING_KSM);
+                                       FOLIO_MAPPING_KSM);
 again:
        kpfn = READ_ONCE(stable_node->kpfn); /* Address dependency. */
        folio = pfn_folio(kpfn);
@@ -1070,7 +1070,7 @@ static inline void folio_set_stable_node(struct folio *folio,
                                         struct ksm_stable_node *stable_node)
 {
        VM_WARN_ON_FOLIO(folio_test_anon(folio) && PageAnonExclusive(&folio->page), folio);
-       folio->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM);
+       folio->mapping = (void *)((unsigned long)stable_node | FOLIO_MAPPING_KSM);
 }
 
 #ifdef CONFIG_SYSFS
index bd83724d14b6c9f7900c31e137fdd39aa686eb39..4b1a2a33e39f0b1de02527b7c25c24024019dcbe 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -503,12 +503,12 @@ struct anon_vma *folio_get_anon_vma(const struct folio *folio)
 
        rcu_read_lock();
        anon_mapping = (unsigned long)READ_ONCE(folio->mapping);
-       if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
+       if ((anon_mapping & FOLIO_MAPPING_FLAGS) != FOLIO_MAPPING_ANON)
                goto out;
        if (!folio_mapped(folio))
                goto out;
 
-       anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
+       anon_vma = (struct anon_vma *) (anon_mapping - FOLIO_MAPPING_ANON);
        if (!atomic_inc_not_zero(&anon_vma->refcount)) {
                anon_vma = NULL;
                goto out;
@@ -550,12 +550,12 @@ struct anon_vma *folio_lock_anon_vma_read(const struct folio *folio,
 retry:
        rcu_read_lock();
        anon_mapping = (unsigned long)READ_ONCE(folio->mapping);
-       if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
+       if ((anon_mapping & FOLIO_MAPPING_FLAGS) != FOLIO_MAPPING_ANON)
                goto out;
        if (!folio_mapped(folio))
                goto out;
 
-       anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
+       anon_vma = (struct anon_vma *) (anon_mapping - FOLIO_MAPPING_ANON);
        root_anon_vma = READ_ONCE(anon_vma->root);
        if (down_read_trylock(&root_anon_vma->rwsem)) {
                /*
@@ -1334,9 +1334,9 @@ void folio_move_anon_rmap(struct folio *folio, struct vm_area_struct *vma)
        VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
        VM_BUG_ON_VMA(!anon_vma, vma);
 
-       anon_vma += PAGE_MAPPING_ANON;
+       anon_vma += FOLIO_MAPPING_ANON;
        /*
-        * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written
+        * Ensure that anon_vma and the FOLIO_MAPPING_ANON bit are written
         * simultaneously, so a concurrent reader (eg folio_referenced()'s
         * folio_test_anon()) will not see one without the other.
         */
@@ -1367,10 +1367,10 @@ static void __folio_set_anon(struct folio *folio, struct vm_area_struct *vma,
        /*
         * page_idle does a lockless/optimistic rmap scan on folio->mapping.
         * Make sure the compiler doesn't split the stores of anon_vma and
-        * the PAGE_MAPPING_ANON type identifier, otherwise the rmap code
+        * the FOLIO_MAPPING_ANON type identifier, otherwise the rmap code
         * could mistake the mapping for a struct address_space and crash.
         */
-       anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
+       anon_vma = (void *) anon_vma + FOLIO_MAPPING_ANON;
        WRITE_ONCE(folio->mapping, (struct address_space *) anon_vma);
        folio->index = linear_page_index(vma, address);
 }
index 0b270c43d7d128feba5219500f6a3f59f1f6a69d..20bbfe4ce1b8b1155201d34722f8f27f784c9afd 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -670,9 +670,9 @@ struct anon_vma *folio_anon_vma(const struct folio *folio)
 {
        unsigned long mapping = (unsigned long)folio->mapping;
 
-       if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
+       if ((mapping & FOLIO_MAPPING_FLAGS) != FOLIO_MAPPING_ANON)
                return NULL;
-       return (void *)(mapping - PAGE_MAPPING_ANON);
+       return (void *)(mapping - FOLIO_MAPPING_ANON);
 }
 
 /**
@@ -699,7 +699,7 @@ struct address_space *folio_mapping(struct folio *folio)
                return swap_address_space(folio->swap);
 
        mapping = folio->mapping;
-       if ((unsigned long)mapping & PAGE_MAPPING_FLAGS)
+       if ((unsigned long)mapping & FOLIO_MAPPING_FLAGS)
                return NULL;
 
        return mapping;