]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm/huge_memory: get frozen folio refcount with folio_expected_ref_count()
authorZi Yan <ziy@nvidia.com>
Fri, 18 Jul 2025 18:37:19 +0000 (14:37 -0400)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 25 Jul 2025 02:12:39 +0000 (19:12 -0700)
Instead of open coding the refcount calculation, use
folio_expected_ref_count() to calculate frozen folio refcount.  Because:

1. __folio_split() does not split a folio with PG_private, so no elevated
   refcount from PG_private;
2. a frozen folio in __folio_split() is fully unmapped, so folio_mapcount()
   in folio_expected_ref_count() is always 0;
3. (mapping || swap_cache) ? folio_nr_pages(folio) is taken care of by
   folio_expected_ref_count() too.

Link: https://lkml.kernel.org/r/20250718023000.4044406-6-ziy@nvidia.com
Link: https://lkml.kernel.org/r/20250718183720.4054515-6-ziy@nvidia.com
Signed-off-by: Zi Yan <ziy@nvidia.com>
Suggested-by: David Hildenbrand <david@redhat.com>
Acked-by: Balbir Singh <balbirs@nvidia.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Antonio Quartulli <antonio@mandelbit.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Dan Carpenter <dan.carpenter@linaro.org>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Kirill A. Shutemov <k.shutemov@gmail.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Mariano Pache <npache@redhat.com>
Cc: Mathew Brost <matthew.brost@intel.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c

index d98283164edabd9156eeaa46f072a425d98bc3e9..19e69704fcff2d180ef23cd4845bba2d395acd08 100644 (file)
@@ -3731,6 +3731,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
        if (folio_ref_freeze(folio, 1 + extra_pins)) {
                struct address_space *swap_cache = NULL;
                struct lruvec *lruvec;
+               int expected_refs;
 
                if (folio_order(folio) > 1 &&
                    !list_empty(&folio->_deferred_list)) {
@@ -3794,11 +3795,8 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
                     new_folio = next) {
                        next = folio_next(new_folio);
 
-                       folio_ref_unfreeze(
-                               new_folio,
-                               1 + ((mapping || swap_cache) ?
-                                            folio_nr_pages(new_folio) :
-                                            0));
+                       expected_refs = folio_expected_ref_count(new_folio) + 1;
+                       folio_ref_unfreeze(new_folio, expected_refs);
 
                        lru_add_split_folio(folio, new_folio, lruvec, list);
 
@@ -3828,8 +3826,8 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
                 * Otherwise, a parallel folio_try_get() can grab @folio
                 * and its caller can see stale page cache entries.
                 */
-               folio_ref_unfreeze(folio, 1 +
-                       ((mapping || swap_cache) ? folio_nr_pages(folio) : 0));
+               expected_refs = folio_expected_ref_count(folio) + 1;
+               folio_ref_unfreeze(folio, expected_refs);
 
                unlock_page_lruvec(lruvec);