]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm: khugepaged: decouple SHMEM and file folios' collapse
authorBaolin Wang <baolin.wang@linux.alibaba.com>
Tue, 13 May 2025 06:56:35 +0000 (14:56 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 22 May 2025 21:55:38 +0000 (14:55 -0700)
Originally, the file pages collapse was intended for tmpfs/shmem to merge
into THP in the background.  However, now not only tmpfs/shmem can support
large folios, but some other file systems (such as XFS, erofs ...) also
support large folios.  Therefore, it is time to decouple the support of
file folios collapse from SHMEM.

Link: https://lkml.kernel.org/r/ce5c2314e0368cf34bda26f9bacf01c982d4da17.1747119309.git.baolin.wang@linux.alibaba.com
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Acked-by: David Hildenbrand <david@redhat.com>
Acked-by: Zi Yan <ziy@nvidia.com>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Mariano Pache <npache@redhat.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/khugepaged.h
mm/Kconfig
mm/khugepaged.c

index 1f46046080f515efb79098e17340db96a55223a9..b8d69cfbb58bca065c89574b7cd2e3fe0701c78f 100644 (file)
@@ -15,16 +15,8 @@ extern void khugepaged_enter_vma(struct vm_area_struct *vma,
                                 unsigned long vm_flags);
 extern void khugepaged_min_free_kbytes_update(void);
 extern bool current_is_khugepaged(void);
-#ifdef CONFIG_SHMEM
 extern int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
                                   bool install_pmd);
-#else
-static inline int collapse_pte_mapped_thp(struct mm_struct *mm,
-                                         unsigned long addr, bool install_pmd)
-{
-       return 0;
-}
-#endif
 
 static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
 {
index 60ea9eba48140a9bb7ef6fb30ee647cddb1574d4..bd08e151fa1bc51b6535bbc9c793ec55639a6f58 100644 (file)
@@ -886,7 +886,7 @@ config THP_SWAP
 
 config READ_ONLY_THP_FOR_FS
        bool "Read-only THP for filesystems (EXPERIMENTAL)"
-       depends on TRANSPARENT_HUGEPAGE && SHMEM
+       depends on TRANSPARENT_HUGEPAGE
 
        help
          Allow khugepaged to put read-only file-backed pages in THP.
index ebcd7c8a4b445046c3d79558a317dfdd16b9a075..cdf5a581368bf4f7f9241f4e301401a08d15543f 100644 (file)
@@ -1464,7 +1464,6 @@ static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot)
        }
 }
 
-#ifdef CONFIG_SHMEM
 /* folio must be locked, and mmap_lock must be held */
 static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
                        pmd_t *pmdp, struct folio *folio, struct page *page)
@@ -2353,14 +2352,6 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
        trace_mm_khugepaged_scan_file(mm, folio, file, present, swap, result);
        return result;
 }
-#else
-static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
-                                   struct file *file, pgoff_t start,
-                                   struct collapse_control *cc)
-{
-       BUILD_BUG();
-}
-#endif
 
 static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
                                            struct collapse_control *cc)
@@ -2436,7 +2427,7 @@ skip:
                        VM_BUG_ON(khugepaged_scan.address < hstart ||
                                  khugepaged_scan.address + HPAGE_PMD_SIZE >
                                  hend);
-                       if (IS_ENABLED(CONFIG_SHMEM) && !vma_is_anonymous(vma)) {
+                       if (!vma_is_anonymous(vma)) {
                                struct file *file = get_file(vma->vm_file);
                                pgoff_t pgoff = linear_page_index(vma,
                                                khugepaged_scan.address);
@@ -2782,7 +2773,7 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
                mmap_assert_locked(mm);
                memset(cc->node_load, 0, sizeof(cc->node_load));
                nodes_clear(cc->alloc_nmask);
-               if (IS_ENABLED(CONFIG_SHMEM) && !vma_is_anonymous(vma)) {
+               if (!vma_is_anonymous(vma)) {
                        struct file *file = get_file(vma->vm_file);
                        pgoff_t pgoff = linear_page_index(vma, addr);