]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm/shmem: fix THP allocation and fallback loop
authorKairui Song <kasong@tencent.com>
Wed, 22 Oct 2025 10:57:19 +0000 (18:57 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 10 Nov 2025 05:19:42 +0000 (21:19 -0800)
The order check and fallback loop is updating the index value on every
loop.  This will cause the index to be wrongly aligned by a larger value
while the loop shrinks the order.

This may result in inserting and returning a folio of the wrong index and
cause data corruption with some userspace workloads [1].

[kasong@tencent.com: introduce a temporary variable to improve code]
Link: https://lkml.kernel.org/r/20251023065913.36925-1-ryncsn@gmail.com
Link: https://lore.kernel.org/linux-mm/CAMgjq7DqgAmj25nDUwwu1U2cSGSn8n4-Hqpgottedy0S6YYeUw@mail.gmail.com/
Link: https://lkml.kernel.org/r/20251022105719.18321-1-ryncsn@gmail.com
Link: https://lore.kernel.org/linux-mm/CAMgjq7DqgAmj25nDUwwu1U2cSGSn8n4-Hqpgottedy0S6YYeUw@mail.gmail.com/
Fixes: e7a2ab7b3bb5 ("mm: shmem: add mTHP support for anonymous shmem")
Closes: https://lore.kernel.org/linux-mm/CAMgjq7DqgAmj25nDUwwu1U2cSGSn8n4-Hqpgottedy0S6YYeUw@mail.gmail.com/
Signed-off-by: Kairui Song <kasong@tencent.com>
Acked-by: David Hildenbrand <david@redhat.com>
Acked-by: Zi Yan <ziy@nvidia.com>
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Reviewed-by: Barry Song <baohua@kernel.org>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Nico Pache <npache@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/shmem.c

index b9081b817d28f3db1fbdd90ed3f04b6904d6ff18..58701d14dd96cd4aaf17f817c6a8eaa05086bfc9 100644 (file)
@@ -1882,6 +1882,7 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf,
        struct shmem_inode_info *info = SHMEM_I(inode);
        unsigned long suitable_orders = 0;
        struct folio *folio = NULL;
+       pgoff_t aligned_index;
        long pages;
        int error, order;
 
@@ -1895,10 +1896,12 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf,
                order = highest_order(suitable_orders);
                while (suitable_orders) {
                        pages = 1UL << order;
-                       index = round_down(index, pages);
-                       folio = shmem_alloc_folio(gfp, order, info, index);
-                       if (folio)
+                       aligned_index = round_down(index, pages);
+                       folio = shmem_alloc_folio(gfp, order, info, aligned_index);
+                       if (folio) {
+                               index = aligned_index;
                                goto allocated;
+                       }
 
                        if (pages == HPAGE_PMD_NR)
                                count_vm_event(THP_FILE_FALLBACK);