]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm/huge_memory: deduplicate code in __folio_split()
authorZi Yan <ziy@nvidia.com>
Fri, 18 Jul 2025 18:37:17 +0000 (14:37 -0400)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 25 Jul 2025 02:12:39 +0000 (19:12 -0700)
xas unlock, remap_page(), local_irq_enable() are moved out of if branches
to deduplicate the code.  While at it, add remap_flags to clean up
remap_page() call site.  nr_dropped is renamed to nr_shmem_dropped, as it
becomes a variable at __folio_split() scope.

Link: https://lkml.kernel.org/r/20250718183720.4054515-4-ziy@nvidia.com
Signed-off-by: Zi Yan <ziy@nvidia.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Antonio Quartulli <antonio@mandelbit.com>
Cc: Balbir Singh <balbirs@nvidia.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Dan Carpenter <dan.carpenter@linaro.org>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Kirill A. Shutemov <k.shutemov@gmail.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Mariano Pache <npache@redhat.com>
Cc: Mathew Brost <matthew.brost@intel.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c

index e01359008b1350ecfc1cbe231c40a3a4687c6fc1..d36f7bdaeb38383781713f37eeb9c4b33c309969 100644 (file)
@@ -3595,6 +3595,8 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
        struct anon_vma *anon_vma = NULL;
        int order = folio_order(folio);
        struct folio *new_folio, *next;
+       int nr_shmem_dropped = 0;
+       int remap_flags = 0;
        int extra_pins, ret;
        pgoff_t end;
        bool is_hzp;
@@ -3718,15 +3720,16 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
                 */
                xas_lock(&xas);
                xas_reset(&xas);
-               if (xas_load(&xas) != folio)
+               if (xas_load(&xas) != folio) {
+                       ret = -EAGAIN;
                        goto fail;
+               }
        }
 
        /* Prevent deferred_split_scan() touching ->_refcount */
        spin_lock(&ds_queue->split_queue_lock);
        if (folio_ref_freeze(folio, 1 + extra_pins)) {
                struct address_space *swap_cache = NULL;
-               int nr_dropped = 0;
                struct lruvec *lruvec;
 
                if (folio_order(folio) > 1 &&
@@ -3798,7 +3801,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
                        /* Some pages can be beyond EOF: drop them from cache */
                        if (new_folio->index >= end) {
                                if (shmem_mapping(mapping))
-                                       nr_dropped += folio_nr_pages(new_folio);
+                                       nr_shmem_dropped += folio_nr_pages(new_folio);
                                else if (folio_test_clear_dirty(new_folio))
                                        folio_account_cleaned(
                                                new_folio,
@@ -3828,47 +3831,41 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
 
                if (swap_cache)
                        xa_unlock(&swap_cache->i_pages);
-               if (mapping)
-                       xas_unlock(&xas);
+       } else {
+               spin_unlock(&ds_queue->split_queue_lock);
+               ret = -EAGAIN;
+       }
+fail:
+       if (mapping)
+               xas_unlock(&xas);
+
+       local_irq_enable();
 
-               local_irq_enable();
+       if (nr_shmem_dropped)
+               shmem_uncharge(mapping->host, nr_shmem_dropped);
 
-               if (nr_dropped)
-                       shmem_uncharge(mapping->host, nr_dropped);
+       if (!ret && is_anon)
+               remap_flags = RMP_USE_SHARED_ZEROPAGE;
+       remap_page(folio, 1 << order, remap_flags);
 
-               remap_page(folio, 1 << order,
-                          !ret && folio_test_anon(folio) ?
-                                  RMP_USE_SHARED_ZEROPAGE :
-                                  0);
+       /*
+        * Unlock all after-split folios except the one containing
+        * @lock_at page. If @folio is not split, it will be kept locked.
+        */
+       for (new_folio = folio; new_folio != end_folio; new_folio = next) {
+               next = folio_next(new_folio);
+               if (new_folio == page_folio(lock_at))
+                       continue;
 
+               folio_unlock(new_folio);
                /*
-                * Unlock all after-split folios except the one containing
-                * @lock_at page. If @folio is not split, it will be kept locked.
+                * Subpages may be freed if there wasn't any mapping
+                * like if add_to_swap() is running on a lru page that
+                * had its mapping zapped. And freeing these pages
+                * requires taking the lru_lock so we do the put_page
+                * of the tail pages after the split is complete.
                 */
-               for (new_folio = folio; new_folio != end_folio;
-                    new_folio = next) {
-                       next = folio_next(new_folio);
-                       if (new_folio == page_folio(lock_at))
-                               continue;
-
-                       folio_unlock(new_folio);
-                       /*
-                        * Subpages may be freed if there wasn't any mapping
-                        * like if add_to_swap() is running on a lru page that
-                        * had its mapping zapped. And freeing these pages
-                        * requires taking the lru_lock so we do the put_page
-                        * of the tail pages after the split is complete.
-                        */
-                       free_folio_and_swap_cache(new_folio);
-               }
-       } else {
-               spin_unlock(&ds_queue->split_queue_lock);
-fail:
-               if (mapping)
-                       xas_unlock(&xas);
-               local_irq_enable();
-               remap_page(folio, folio_nr_pages(folio), 0);
-               ret = -EAGAIN;
+               free_folio_and_swap_cache(new_folio);
        }
 
 out_unlock: