]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm: shmem: allow fallback to smaller large orders for tmpfs mmap() access
authorBaolin Wang <baolin.wang@linux.alibaba.com>
Fri, 14 Nov 2025 00:46:32 +0000 (08:46 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 24 Nov 2025 23:08:53 +0000 (15:08 -0800)
After commit 69e0a3b49003 ("mm: shmem: fix the strategy for the tmpfs
'huge=' options"), we have fixed the large order allocation strategy for
tmpfs, which always tries PMD-sized large folios first, and if that fails,
falls back to smaller large folios.  For tmpfs large folio allocation via
mmap(), we should maintain the same strategy as well.  Let's unify the
large order allocation strategy for tmpfs.

There is no functional change for large folio allocation of anonymous shmem.

Link: https://lkml.kernel.org/r/283a0bdfd6ac7aa334a491422bcae70919c572bd.1763008453.git.baolin.wang@linux.alibaba.com
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/shmem.c

index 395ca58ac4a5578cee5d5a21117fb9e57fe5ef43..fc835b3e4914a76f181342ab87cc37c0171d9661 100644 (file)
@@ -645,34 +645,23 @@ static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index
         * the mTHP interface, so we still use PMD-sized huge order to
         * check whether global control is enabled.
         *
-        * For tmpfs mmap()'s huge order, we still use PMD-sized order to
-        * allocate huge pages due to lack of a write size hint.
-        *
         * For tmpfs with 'huge=always' or 'huge=within_size' mount option,
         * we will always try PMD-sized order first. If that failed, it will
         * fall back to small large folios.
         */
        switch (SHMEM_SB(inode->i_sb)->huge) {
        case SHMEM_HUGE_ALWAYS:
-               if (vma)
-                       return maybe_pmd_order;
-
                return THP_ORDERS_ALL_FILE_DEFAULT;
        case SHMEM_HUGE_WITHIN_SIZE:
-               if (vma)
-                       within_size_orders = maybe_pmd_order;
-               else
-                       within_size_orders = THP_ORDERS_ALL_FILE_DEFAULT;
-
-               within_size_orders = shmem_get_orders_within_size(inode, within_size_orders,
-                                                                 index, write_end);
+               within_size_orders = shmem_get_orders_within_size(inode,
+                               THP_ORDERS_ALL_FILE_DEFAULT, index, write_end);
                if (within_size_orders > 0)
                        return within_size_orders;
 
                fallthrough;
        case SHMEM_HUGE_ADVISE:
                if (vm_flags & VM_HUGEPAGE)
-                       return maybe_pmd_order;
+                       return THP_ORDERS_ALL_FILE_DEFAULT;
                fallthrough;
        default:
                return 0;