]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
migrate: replace RMP_ flags with TTU_ flags
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 9 Jan 2026 04:13:43 +0000 (04:13 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 27 Jan 2026 04:02:33 +0000 (20:02 -0800)
Instead of translating between RMP_ and TTU_ flags, remove the RMP_ flags
and just use the TTU_ flag space; there's plenty available.

Possibly we should rename these to RMAP_ flags, and maybe even pass them
in through rmap_walk_arg, but that can be done later.

Link: https://lkml.kernel.org/r/20260109041345.3863089-3-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: David Hildenbrand (Red Hat) <david@kernel.org>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Byungchul Park <byungchul@sk.com>
Cc: Gregory Price <gourry@gourry.net>
Cc: Jann Horn <jannh@google.com>
Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
Cc: Lance Yang <lance.yang@linux.dev>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Rakie Kim <rakie.kim@sk.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Ying Huang <ying.huang@linux.alibaba.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/rmap.h
mm/huge_memory.c
mm/migrate.c

index dd764951b03d72f716689d7a774167111c181521..8dc0871e5f0011ad2a8a224229f6494bebb947ca 100644 (file)
@@ -92,6 +92,7 @@ struct anon_vma_chain {
 };
 
 enum ttu_flags {
+       TTU_USE_SHARED_ZEROPAGE = 0x2,  /* for unused pages of large folios */
        TTU_SPLIT_HUGE_PMD      = 0x4,  /* split huge PMD if any */
        TTU_IGNORE_MLOCK        = 0x8,  /* ignore mlock */
        TTU_SYNC                = 0x10, /* avoid racy checks with PVMW_SYNC */
@@ -933,12 +934,8 @@ int mapping_wrprotect_range(struct address_space *mapping, pgoff_t pgoff,
 int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
                      struct vm_area_struct *vma);
 
-enum rmp_flags {
-       RMP_LOCKED              = 1 << 0,
-       RMP_USE_SHARED_ZEROPAGE = 1 << 1,
-};
-
-void remove_migration_ptes(struct folio *src, struct folio *dst, int flags);
+void remove_migration_ptes(struct folio *src, struct folio *dst,
+               enum ttu_flags flags);
 
 /*
  * rmap_walk_control: To control rmap traversing for specific needs
index 40cf59301c21aa315f96e2aa732152fdf7e1feac..44ff8a648afd5a9d62f648dea2b89b5ac576df1a 100644 (file)
@@ -3431,7 +3431,7 @@ static void remap_page(struct folio *folio, unsigned long nr, int flags)
        if (!folio_test_anon(folio))
                return;
        for (;;) {
-               remove_migration_ptes(folio, folio, RMP_LOCKED | flags);
+               remove_migration_ptes(folio, folio, TTU_RMAP_LOCKED | flags);
                i += folio_nr_pages(folio);
                if (i >= nr)
                        break;
@@ -3944,7 +3944,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
        int old_order = folio_order(folio);
        struct folio *new_folio, *next;
        int nr_shmem_dropped = 0;
-       int remap_flags = 0;
+       enum ttu_flags ttu_flags = 0;
        int ret;
        pgoff_t end = 0;
 
@@ -4064,9 +4064,9 @@ fail:
                shmem_uncharge(mapping->host, nr_shmem_dropped);
 
        if (!ret && is_anon && !folio_is_device_private(folio))
-               remap_flags = RMP_USE_SHARED_ZEROPAGE;
+               ttu_flags = TTU_USE_SHARED_ZEROPAGE;
 
-       remap_page(folio, 1 << old_order, remap_flags);
+       remap_page(folio, 1 << old_order, ttu_flags);
 
        /*
         * Unlock all after-split folios except the one containing
index 4688b9e38cd2fdb11ed0f4a6665dd0465c64eda4..4750a2ba15fef7e204862bf2125dea79e7fd317b 100644 (file)
@@ -452,11 +452,12 @@ static bool remove_migration_pte(struct folio *folio,
  * Get rid of all migration entries and replace them by
  * references to the indicated page.
  */
-void remove_migration_ptes(struct folio *src, struct folio *dst, int flags)
+void remove_migration_ptes(struct folio *src, struct folio *dst,
+               enum ttu_flags flags)
 {
        struct rmap_walk_arg rmap_walk_arg = {
                .folio = src,
-               .map_unused_to_zeropage = flags & RMP_USE_SHARED_ZEROPAGE,
+               .map_unused_to_zeropage = flags & TTU_USE_SHARED_ZEROPAGE,
        };
 
        struct rmap_walk_control rwc = {
@@ -464,9 +465,9 @@ void remove_migration_ptes(struct folio *src, struct folio *dst, int flags)
                .arg = &rmap_walk_arg,
        };
 
-       VM_BUG_ON_FOLIO((flags & RMP_USE_SHARED_ZEROPAGE) && (src != dst), src);
+       VM_BUG_ON_FOLIO((flags & TTU_USE_SHARED_ZEROPAGE) && (src != dst), src);
 
-       if (flags & RMP_LOCKED)
+       if (flags & TTU_RMAP_LOCKED)
                rmap_walk_locked(dst, &rwc);
        else
                rmap_walk(dst, &rwc);
@@ -1521,8 +1522,7 @@ static int unmap_and_move_huge_page(new_folio_t get_new_folio,
                rc = move_to_new_folio(dst, src, mode);
 
        if (page_was_mapped)
-               remove_migration_ptes(src, !rc ? dst : src,
-                               ttu ? RMP_LOCKED : 0);
+               remove_migration_ptes(src, !rc ? dst : src, ttu);
 
        if (ttu & TTU_RMAP_LOCKED)
                i_mmap_unlock_write(mapping);