Instead of translating between RMP_ and TTU_ flags, remove the RMP_ flags
and just use the TTU_ flag space; there's plenty available.
Possibly we should rename these to RMAP_ flags, and maybe even pass them
in through rmap_walk_arg, but that can be done later.
Link: https://lkml.kernel.org/r/20260109041345.3863089-3-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: David Hildenbrand (Red Hat) <david@kernel.org>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Byungchul Park <byungchul@sk.com>
Cc: Gregory Price <gourry@gourry.net>
Cc: Jann Horn <jannh@google.com>
Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
Cc: Lance Yang <lance.yang@linux.dev>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Rakie Kim <rakie.kim@sk.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Ying Huang <ying.huang@linux.alibaba.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
};
enum ttu_flags {
+ TTU_USE_SHARED_ZEROPAGE = 0x2, /* for unused pages of large folios */
TTU_SPLIT_HUGE_PMD = 0x4, /* split huge PMD if any */
TTU_IGNORE_MLOCK = 0x8, /* ignore mlock */
TTU_SYNC = 0x10, /* avoid racy checks with PVMW_SYNC */
int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
struct vm_area_struct *vma);
-enum rmp_flags {
- RMP_LOCKED = 1 << 0,
- RMP_USE_SHARED_ZEROPAGE = 1 << 1,
-};
-
-void remove_migration_ptes(struct folio *src, struct folio *dst, int flags);
+void remove_migration_ptes(struct folio *src, struct folio *dst,
+ enum ttu_flags flags);
/*
* rmap_walk_control: To control rmap traversing for specific needs
if (!folio_test_anon(folio))
return;
for (;;) {
- remove_migration_ptes(folio, folio, RMP_LOCKED | flags);
+ remove_migration_ptes(folio, folio, TTU_RMAP_LOCKED | flags);
i += folio_nr_pages(folio);
if (i >= nr)
break;
int old_order = folio_order(folio);
struct folio *new_folio, *next;
int nr_shmem_dropped = 0;
- int remap_flags = 0;
+ enum ttu_flags ttu_flags = 0;
int ret;
pgoff_t end = 0;
shmem_uncharge(mapping->host, nr_shmem_dropped);
if (!ret && is_anon && !folio_is_device_private(folio))
- remap_flags = RMP_USE_SHARED_ZEROPAGE;
+ ttu_flags = TTU_USE_SHARED_ZEROPAGE;
- remap_page(folio, 1 << old_order, remap_flags);
+ remap_page(folio, 1 << old_order, ttu_flags);
/*
* Unlock all after-split folios except the one containing
* Get rid of all migration entries and replace them by
* references to the indicated page.
*/
-void remove_migration_ptes(struct folio *src, struct folio *dst, int flags)
+void remove_migration_ptes(struct folio *src, struct folio *dst,
+ enum ttu_flags flags)
{
struct rmap_walk_arg rmap_walk_arg = {
.folio = src,
- .map_unused_to_zeropage = flags & RMP_USE_SHARED_ZEROPAGE,
+ .map_unused_to_zeropage = flags & TTU_USE_SHARED_ZEROPAGE,
};
struct rmap_walk_control rwc = {
.arg = &rmap_walk_arg,
};
- VM_BUG_ON_FOLIO((flags & RMP_USE_SHARED_ZEROPAGE) && (src != dst), src);
+ VM_BUG_ON_FOLIO((flags & TTU_USE_SHARED_ZEROPAGE) && (src != dst), src);
- if (flags & RMP_LOCKED)
+ if (flags & TTU_RMAP_LOCKED)
rmap_walk_locked(dst, &rwc);
else
rmap_walk(dst, &rwc);
rc = move_to_new_folio(dst, src, mode);
if (page_was_mapped)
- remove_migration_ptes(src, !rc ? dst : src,
- ttu ? RMP_LOCKED : 0);
+ remove_migration_ptes(src, !rc ? dst : src, ttu);
if (ttu & TTU_RMAP_LOCKED)
i_mmap_unlock_write(mapping);