]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm: convert partially_mapped set/clear operations to be atomic
authorUsama Arif <usamaarif642@gmail.com>
Thu, 12 Dec 2024 18:33:51 +0000 (18:33 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 19 Dec 2024 03:04:45 +0000 (19:04 -0800)
Other page flags in the 2nd page, like PG_hwpoison and PG_anon_exclusive
can get modified concurrently.  Changes to other page flags might be lost
if they are happening at the same time as non-atomic partially_mapped
operations.  Hence, make partially_mapped operations atomic.

Link: https://lkml.kernel.org/r/20241212183351.1345389-1-usamaarif642@gmail.com
Fixes: 8422acdc97ed ("mm: introduce a pageflag for partially mapped folios")
Reported-by: David Hildenbrand <david@redhat.com>
Link: https://lore.kernel.org/all/e53b04ad-1827-43a2-a1ab-864c7efecf6e@redhat.com/
Signed-off-by: Usama Arif <usamaarif642@gmail.com>
Acked-by: David Hildenbrand <david@redhat.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Barry Song <baohua@kernel.org>
Cc: Domenico Cerasuolo <cerasuolodomenico@gmail.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mike Rapoport (Microsoft) <rppt@kernel.org>
Cc: Nico Pache <npache@redhat.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Yu Zhao <yuzhao@google.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/page-flags.h
mm/huge_memory.c

index cf46ac7208029a112aabd951d9c195601d101768..691506bdf2c5a82ba0308b0f68e5a150a7eebdb0 100644 (file)
@@ -862,18 +862,10 @@ static inline void ClearPageCompound(struct page *page)
        ClearPageHead(page);
 }
 FOLIO_FLAG(large_rmappable, FOLIO_SECOND_PAGE)
-FOLIO_TEST_FLAG(partially_mapped, FOLIO_SECOND_PAGE)
-/*
- * PG_partially_mapped is protected by deferred_split split_queue_lock,
- * so its safe to use non-atomic set/clear.
- */
-__FOLIO_SET_FLAG(partially_mapped, FOLIO_SECOND_PAGE)
-__FOLIO_CLEAR_FLAG(partially_mapped, FOLIO_SECOND_PAGE)
+FOLIO_FLAG(partially_mapped, FOLIO_SECOND_PAGE)
 #else
 FOLIO_FLAG_FALSE(large_rmappable)
-FOLIO_TEST_FLAG_FALSE(partially_mapped)
-__FOLIO_SET_FLAG_NOOP(partially_mapped)
-__FOLIO_CLEAR_FLAG_NOOP(partially_mapped)
+FOLIO_FLAG_FALSE(partially_mapped)
 #endif
 
 #define PG_head_mask ((1UL << PG_head))
index 9bb351caa61902869fb3897f3a10e3f8a0b73751..df0c4988dd88b1502c1d0121f112d327d2288c43 100644 (file)
@@ -3577,7 +3577,7 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
                    !list_empty(&folio->_deferred_list)) {
                        ds_queue->split_queue_len--;
                        if (folio_test_partially_mapped(folio)) {
-                               __folio_clear_partially_mapped(folio);
+                               folio_clear_partially_mapped(folio);
                                mod_mthp_stat(folio_order(folio),
                                              MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
                        }
@@ -3689,7 +3689,7 @@ bool __folio_unqueue_deferred_split(struct folio *folio)
        if (!list_empty(&folio->_deferred_list)) {
                ds_queue->split_queue_len--;
                if (folio_test_partially_mapped(folio)) {
-                       __folio_clear_partially_mapped(folio);
+                       folio_clear_partially_mapped(folio);
                        mod_mthp_stat(folio_order(folio),
                                      MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
                }
@@ -3733,7 +3733,7 @@ void deferred_split_folio(struct folio *folio, bool partially_mapped)
        spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
        if (partially_mapped) {
                if (!folio_test_partially_mapped(folio)) {
-                       __folio_set_partially_mapped(folio);
+                       folio_set_partially_mapped(folio);
                        if (folio_test_pmd_mappable(folio))
                                count_vm_event(THP_DEFERRED_SPLIT_PAGE);
                        count_mthp_stat(folio_order(folio), MTHP_STAT_SPLIT_DEFERRED);
@@ -3826,7 +3826,7 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
                } else {
                        /* We lost race with folio_put() */
                        if (folio_test_partially_mapped(folio)) {
-                               __folio_clear_partially_mapped(folio);
+                               folio_clear_partially_mapped(folio);
                                mod_mthp_stat(folio_order(folio),
                                              MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
                        }