]> git.ipfire.org Git - thirdparty/kernel/stable.git/blobdiff - mm/migrate.c
mm: migrate: record the mlocked page status to remove unnecessary lru drain
[thirdparty/kernel/stable.git] / mm / migrate.c
index 125194f5af0f73abbb74d7247e15dd7553b30812..35a88334bb3c2ffa9b641f7fc8b9abe0ee53f04d 100644 (file)
@@ -1027,22 +1027,28 @@ union migration_ptr {
        struct anon_vma *anon_vma;
        struct address_space *mapping;
 };
+
+enum {
+       PAGE_WAS_MAPPED = BIT(0),
+       PAGE_WAS_MLOCKED = BIT(1),
+};
+
 static void __migrate_folio_record(struct folio *dst,
-                                  unsigned long page_was_mapped,
+                                  unsigned long old_page_state,
                                   struct anon_vma *anon_vma)
 {
        union migration_ptr ptr = { .anon_vma = anon_vma };
        dst->mapping = ptr.mapping;
-       dst->private = (void *)page_was_mapped;
+       dst->private = (void *)old_page_state;
 }
 
 static void __migrate_folio_extract(struct folio *dst,
-                                  int *page_was_mappedp,
+                                  int *old_page_state,
                                   struct anon_vma **anon_vmap)
 {
        union migration_ptr ptr = { .mapping = dst->mapping };
        *anon_vmap = ptr.anon_vma;
-       *page_was_mappedp = (unsigned long)dst->private;
+       *old_page_state = (unsigned long)dst->private;
        dst->mapping = NULL;
        dst->private = NULL;
 }
@@ -1103,7 +1109,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
 {
        struct folio *dst;
        int rc = -EAGAIN;
-       int page_was_mapped = 0;
+       int old_page_state = 0;
        struct anon_vma *anon_vma = NULL;
        bool is_lru = !__folio_test_movable(src);
        bool locked = false;
@@ -1157,6 +1163,8 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
                folio_lock(src);
        }
        locked = true;
+       if (folio_test_mlocked(src))
+               old_page_state |= PAGE_WAS_MLOCKED;
 
        if (folio_test_writeback(src)) {
                /*
@@ -1206,7 +1214,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
        dst_locked = true;
 
        if (unlikely(!is_lru)) {
-               __migrate_folio_record(dst, page_was_mapped, anon_vma);
+               __migrate_folio_record(dst, old_page_state, anon_vma);
                return MIGRATEPAGE_UNMAP;
        }
 
@@ -1232,11 +1240,11 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
                VM_BUG_ON_FOLIO(folio_test_anon(src) &&
                               !folio_test_ksm(src) && !anon_vma, src);
                try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0);
-               page_was_mapped = 1;
+               old_page_state |= PAGE_WAS_MAPPED;
        }
 
        if (!folio_mapped(src)) {
-               __migrate_folio_record(dst, page_was_mapped, anon_vma);
+               __migrate_folio_record(dst, old_page_state, anon_vma);
                return MIGRATEPAGE_UNMAP;
        }
 
@@ -1248,7 +1256,8 @@ out:
        if (rc == -EAGAIN)
                ret = NULL;
 
-       migrate_folio_undo_src(src, page_was_mapped, anon_vma, locked, ret);
+       migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
+                              anon_vma, locked, ret);
        migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private);
 
        return rc;
@@ -1261,12 +1270,12 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
                              struct list_head *ret)
 {
        int rc;
-       int page_was_mapped = 0;
+       int old_page_state = 0;
        struct anon_vma *anon_vma = NULL;
        bool is_lru = !__folio_test_movable(src);
        struct list_head *prev;
 
-       __migrate_folio_extract(dst, &page_was_mapped, &anon_vma);
+       __migrate_folio_extract(dst, &old_page_state, &anon_vma);
        prev = dst->lru.prev;
        list_del(&dst->lru);
 
@@ -1287,10 +1296,10 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
         * isolated from the unevictable LRU: but this case is the easiest.
         */
        folio_add_lru(dst);
-       if (page_was_mapped)
+       if (old_page_state & PAGE_WAS_MLOCKED)
                lru_add_drain();
 
-       if (page_was_mapped)
+       if (old_page_state & PAGE_WAS_MAPPED)
                remove_migration_ptes(src, dst, false);
 
 out_unlock_both:
@@ -1322,11 +1331,12 @@ out:
         */
        if (rc == -EAGAIN) {
                list_add(&dst->lru, prev);
-               __migrate_folio_record(dst, page_was_mapped, anon_vma);
+               __migrate_folio_record(dst, old_page_state, anon_vma);
                return rc;
        }
 
-       migrate_folio_undo_src(src, page_was_mapped, anon_vma, true, ret);
+       migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
+                              anon_vma, true, ret);
        migrate_folio_undo_dst(dst, true, put_new_folio, private);
 
        return rc;
@@ -1799,12 +1809,12 @@ out:
        dst = list_first_entry(&dst_folios, struct folio, lru);
        dst2 = list_next_entry(dst, lru);
        list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
-               int page_was_mapped = 0;
+               int old_page_state = 0;
                struct anon_vma *anon_vma = NULL;
 
-               __migrate_folio_extract(dst, &page_was_mapped, &anon_vma);
-               migrate_folio_undo_src(folio, page_was_mapped, anon_vma,
-                                      true, ret_folios);
+               __migrate_folio_extract(dst, &old_page_state, &anon_vma);
+               migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
+                                      anon_vma, true, ret_folios);
                list_del(&dst->lru);
                migrate_folio_undo_dst(dst, true, put_new_folio, private);
                dst = dst2;