]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm: remove call to hugetlb_free_pgd_range()
authorAnthony Yznaga <anthony.yznaga@oracle.com>
Wed, 16 Jul 2025 01:26:10 +0000 (18:26 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 25 Jul 2025 02:12:32 +0000 (19:12 -0700)
With the removal of the last arch-specific implementation of
hugetlb_free_pgd_range(), hugetlb VMAs no longer need special handling
when freeing page tables.

Link: https://lkml.kernel.org/r/20250716012611.10369-3-anthony.yznaga@oracle.com
Signed-off-by: Anthony Yznaga <anthony.yznaga@oracle.com>
Acked-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Acked-by: Oscar Salvador <osalvador@suse.de>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Alexandre Ghiti <alexghiti@rivosinc.com>
Cc: Andreas Larsson <andreas@gaisler.com>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: David Hildenbrand <david@redhat.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memory.c

index bc27b1990fcba34a49f8ae9b556e86878cc9a06b..b4fb559dd0c6223d9baff87be57e36eff3592c81 100644 (file)
@@ -379,32 +379,26 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
                        vma_start_write(vma);
                unlink_anon_vmas(vma);
 
-               if (is_vm_hugetlb_page(vma)) {
-                       unlink_file_vma(vma);
-                       hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
-                               floor, next ? next->vm_start : ceiling);
-               } else {
-                       unlink_file_vma_batch_init(&vb);
-                       unlink_file_vma_batch_add(&vb, vma);
+               unlink_file_vma_batch_init(&vb);
+               unlink_file_vma_batch_add(&vb, vma);
 
-                       /*
-                        * Optimization: gather nearby vmas into one call down
-                        */
-                       while (next && next->vm_start <= vma->vm_end + PMD_SIZE
-                              && !is_vm_hugetlb_page(next)) {
-                               vma = next;
-                               next = mas_find(mas, ceiling - 1);
-                               if (unlikely(xa_is_zero(next)))
-                                       next = NULL;
-                               if (mm_wr_locked)
-                                       vma_start_write(vma);
-                               unlink_anon_vmas(vma);
-                               unlink_file_vma_batch_add(&vb, vma);
-                       }
-                       unlink_file_vma_batch_final(&vb);
-                       free_pgd_range(tlb, addr, vma->vm_end,
-                               floor, next ? next->vm_start : ceiling);
+               /*
+                * Optimization: gather nearby vmas into one call down
+                */
+               while (next && next->vm_start <= vma->vm_end + PMD_SIZE) {
+                       vma = next;
+                       next = mas_find(mas, ceiling - 1);
+                       if (unlikely(xa_is_zero(next)))
+                               next = NULL;
+                       if (mm_wr_locked)
+                               vma_start_write(vma);
+                       unlink_anon_vmas(vma);
+                       unlink_file_vma_batch_add(&vb, vma);
                }
+               unlink_file_vma_batch_final(&vb);
+
+               free_pgd_range(tlb, addr, vma->vm_end,
+                       floor, next ? next->vm_start : ceiling);
                vma = next;
        } while (vma);
 }