From a3871560ffc5755e561b75e257d2b15b19395608 Mon Sep 17 00:00:00 2001 From: Zi Yan Date: Fri, 18 Jul 2025 14:37:19 -0400 Subject: [PATCH] mm/huge_memory: get frozen folio refcount with folio_expected_ref_count() Instead of open coding the refcount calculation, use folio_expected_ref_count() to calculate frozen folio refcount. Because: 1. __folio_split() does not split a folio with PG_private, so no elevated refcount from PG_private; 2. a frozen folio in __folio_split() is fully unmapped, so folio_mapcount() in folio_expected_ref_count() is always 0; 3. (mapping || swap_cache) ? folio_nr_pages(folio) is taken care of by folio_expected_ref_count() too. Link: https://lkml.kernel.org/r/20250718023000.4044406-6-ziy@nvidia.com Link: https://lkml.kernel.org/r/20250718183720.4054515-6-ziy@nvidia.com Signed-off-by: Zi Yan Suggested-by: David Hildenbrand Acked-by: Balbir Singh Acked-by: David Hildenbrand Reviewed-by: Lorenzo Stoakes Cc: Antonio Quartulli Cc: Baolin Wang Cc: Barry Song Cc: Dan Carpenter Cc: Dev Jain Cc: Hugh Dickins Cc: Kirill A. Shutemov Cc: Liam Howlett Cc: Mariano Pache Cc: Mathew Brost Cc: Ryan Roberts Signed-off-by: Andrew Morton --- mm/huge_memory.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index d98283164edab..19e69704fcff2 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3731,6 +3731,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order, if (folio_ref_freeze(folio, 1 + extra_pins)) { struct address_space *swap_cache = NULL; struct lruvec *lruvec; + int expected_refs; if (folio_order(folio) > 1 && !list_empty(&folio->_deferred_list)) { @@ -3794,11 +3795,8 @@ static int __folio_split(struct folio *folio, unsigned int new_order, new_folio = next) { next = folio_next(new_folio); - folio_ref_unfreeze( - new_folio, - 1 + ((mapping || swap_cache) ? - folio_nr_pages(new_folio) : - 0)); + expected_refs = folio_expected_ref_count(new_folio) + 1; + folio_ref_unfreeze(new_folio, expected_refs); lru_add_split_folio(folio, new_folio, lruvec, list); @@ -3828,8 +3826,8 @@ static int __folio_split(struct folio *folio, unsigned int new_order, * Otherwise, a parallel folio_try_get() can grab @folio * and its caller can see stale page cache entries. */ - folio_ref_unfreeze(folio, 1 + - ((mapping || swap_cache) ? folio_nr_pages(folio) : 0)); + expected_refs = folio_expected_ref_count(folio) + 1; + folio_ref_unfreeze(folio, expected_refs); unlock_page_lruvec(lruvec); -- 2.47.2