]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm/huge: avoid big else branch in zap_huge_pmd()
authorLorenzo Stoakes (Oracle) <ljs@kernel.org>
Fri, 20 Mar 2026 18:07:19 +0000 (18:07 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Sun, 5 Apr 2026 20:53:45 +0000 (13:53 -0700)
We don't need to have an extra level of indentation, we can simply exit
early in the first two branches.

No functional change intended.

Link: https://lkml.kernel.org/r/6b4d5efdbf5554b8fe788f677d0b50f355eec999.1774029655.git.ljs@kernel.org
Signed-off-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Acked-by: Qi Zheng <zhengqi.arch@bytedance.com>
Reviewed-by: Suren Baghdasaryan <surenb@google.com>
Cc: Barry Song <baohua@kernel.org>
Cc: David Hildenbrand <david@kernel.org>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Lance Yang <lance.yang@linux.dev>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Nico Pache <npache@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c

index db390b0098d9e2ae4471db0fea16e4a0b5706cf2..4dfffd6a1bbe1d25723286bd6b238247cd023a26 100644 (file)
@@ -2405,8 +2405,10 @@ static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd)
 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                 pmd_t *pmd, unsigned long addr)
 {
-       pmd_t orig_pmd;
+       struct folio *folio = NULL;
+       int flush_needed = 1;
        spinlock_t *ptl;
+       pmd_t orig_pmd;
 
        tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
 
@@ -2427,59 +2429,60 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                if (arch_needs_pgtable_deposit())
                        zap_deposited_table(tlb->mm, pmd);
                spin_unlock(ptl);
-       } else if (is_huge_zero_pmd(orig_pmd)) {
+               return 1;
+       }
+       if (is_huge_zero_pmd(orig_pmd)) {
                if (!vma_is_dax(vma) || arch_needs_pgtable_deposit())
                        zap_deposited_table(tlb->mm, pmd);
                spin_unlock(ptl);
-       } else {
-               struct folio *folio = NULL;
-               int flush_needed = 1;
+               return 1;
+       }
 
-               if (pmd_present(orig_pmd)) {
-                       struct page *page = pmd_page(orig_pmd);
+       if (pmd_present(orig_pmd)) {
+               struct page *page = pmd_page(orig_pmd);
 
-                       folio = page_folio(page);
-                       folio_remove_rmap_pmd(folio, page, vma);
-                       WARN_ON_ONCE(folio_mapcount(folio) < 0);
-                       VM_BUG_ON_PAGE(!PageHead(page), page);
-               } else if (pmd_is_valid_softleaf(orig_pmd)) {
-                       const softleaf_t entry = softleaf_from_pmd(orig_pmd);
+               folio = page_folio(page);
+               folio_remove_rmap_pmd(folio, page, vma);
+               WARN_ON_ONCE(folio_mapcount(folio) < 0);
+               VM_BUG_ON_PAGE(!PageHead(page), page);
+       } else if (pmd_is_valid_softleaf(orig_pmd)) {
+               const softleaf_t entry = softleaf_from_pmd(orig_pmd);
 
-                       folio = softleaf_to_folio(entry);
-                       flush_needed = 0;
+               folio = softleaf_to_folio(entry);
+               flush_needed = 0;
 
-                       if (!thp_migration_supported())
-                               WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
-               }
+               if (!thp_migration_supported())
+                       WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
+       }
 
-               if (folio_test_anon(folio)) {
+       if (folio_test_anon(folio)) {
+               zap_deposited_table(tlb->mm, pmd);
+               add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
+       } else {
+               if (arch_needs_pgtable_deposit())
                        zap_deposited_table(tlb->mm, pmd);
-                       add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
-               } else {
-                       if (arch_needs_pgtable_deposit())
-                               zap_deposited_table(tlb->mm, pmd);
-                       add_mm_counter(tlb->mm, mm_counter_file(folio),
-                                      -HPAGE_PMD_NR);
-
-                       /*
-                        * Use flush_needed to indicate whether the PMD entry
-                        * is present, instead of checking pmd_present() again.
-                        */
-                       if (flush_needed && pmd_young(orig_pmd) &&
-                           likely(vma_has_recency(vma)))
-                               folio_mark_accessed(folio);
-               }
+               add_mm_counter(tlb->mm, mm_counter_file(folio),
+                              -HPAGE_PMD_NR);
 
-               if (folio_is_device_private(folio)) {
-                       folio_remove_rmap_pmd(folio, &folio->page, vma);
-                       WARN_ON_ONCE(folio_mapcount(folio) < 0);
-                       folio_put(folio);
-               }
+               /*
+                * Use flush_needed to indicate whether the PMD entry
+                * is present, instead of checking pmd_present() again.
+                */
+               if (flush_needed && pmd_young(orig_pmd) &&
+                   likely(vma_has_recency(vma)))
+                       folio_mark_accessed(folio);
+       }
 
-               spin_unlock(ptl);
-               if (flush_needed)
-                       tlb_remove_page_size(tlb, &folio->page, HPAGE_PMD_SIZE);
+       if (folio_is_device_private(folio)) {
+               folio_remove_rmap_pmd(folio, &folio->page, vma);
+               WARN_ON_ONCE(folio_mapcount(folio) < 0);
+               folio_put(folio);
        }
+
+       spin_unlock(ptl);
+       if (flush_needed)
+               tlb_remove_page_size(tlb, &folio->page, HPAGE_PMD_SIZE);
+
        return 1;
 }