]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm/migrate: remove slab checks in isolate_movable_page()
authorHyeonggon Yoo <42.hyeyoo@gmail.com>
Tue, 10 Dec 2024 12:48:07 +0000 (21:48 +0900)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 14 Jan 2025 06:40:58 +0000 (22:40 -0800)
Commit 8b8817630ae8 ("mm/migrate: make isolate_movable_page() skip slab
pages") introduced slab checks to prevent mis-identification of slab pages
as movable kernel pages.

However, after Matthew's frozen folio series, these slab checks became
unnecessary as the migration logic fails to increase the reference count
for frozen slab folios.  Remove these redundant slab checks and associated
memory barriers.

Link: https://lkml.kernel.org/r/20241210124807.8584-1-42.hyeyoo@gmail.com
Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: David Rientjes <rientjes@google.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/migrate.c
mm/slub.c

index e9e00d1d1d19b19ba131b993e765153744c056c8..32cc8e0b1ccef495277369a58299e788685a5c7a 100644 (file)
@@ -68,10 +68,6 @@ bool isolate_movable_page(struct page *page, isolate_mode_t mode)
        if (!folio)
                goto out;
 
-       if (unlikely(folio_test_slab(folio)))
-               goto out_putfolio;
-       /* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */
-       smp_rmb();
        /*
         * Check movable flag before taking the page lock because
         * we use non-atomic bitops on newly allocated page flags so
@@ -79,10 +75,6 @@ bool isolate_movable_page(struct page *page, isolate_mode_t mode)
         */
        if (unlikely(!__folio_test_movable(folio)))
                goto out_putfolio;
-       /* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */
-       smp_rmb();
-       if (unlikely(folio_test_slab(folio)))
-               goto out_putfolio;
 
        /*
         * As movable pages are not isolated from LRU lists, concurrent
index a8e9b5106f4c100b73f95c33bf282d5f896c2af8..996691c137eba12e71584eb03a8993cfbd4fc0b3 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2429,8 +2429,6 @@ static inline struct slab *alloc_slab_page(gfp_t flags, int node,
 
        slab = folio_slab(folio);
        __folio_set_slab(folio);
-       /* Make the flag visible before any changes to folio->mapping */
-       smp_wmb();
        if (folio_is_pfmemalloc(folio))
                slab_set_pfmemalloc(slab);
 
@@ -2651,8 +2649,6 @@ static void __free_slab(struct kmem_cache *s, struct slab *slab)
 
        __slab_clear_pfmemalloc(slab);
        folio->mapping = NULL;
-       /* Make the mapping reset visible before clearing the flag */
-       smp_wmb();
        __folio_clear_slab(folio);
        mm_account_reclaimed_pages(pages);
        unaccount_slab(slab, order, s);