]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm: shmem: fix incorrect index alignment for within_size policy
authorBaolin Wang <baolin.wang@linux.alibaba.com>
Thu, 19 Dec 2024 07:30:08 +0000 (15:30 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 31 Dec 2024 01:59:09 +0000 (17:59 -0800)
With enabling the shmem per-size within_size policy, using an incorrect
'order' size to round_up() the index can lead to incorrect i_size checks,
resulting in an inappropriate large orders being returned.

Changing to use '1 << order' to round_up() the index to fix this issue.
Additionally, adding an 'aligned_index' variable to avoid affecting the
index checks.

Link: https://lkml.kernel.org/r/77d8ef76a7d3d646e9225e9af88a76549a68aab1.1734593154.git.baolin.wang@linux.alibaba.com
Fixes: e7a2ab7b3bb5 ("mm: shmem: add mTHP support for anonymous shmem")
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/shmem.c

index f6fb053ac50dc4f873c8f002239daaf5eb0ff81e..dec659e84562e56645afb55552d8adb1756b3b30 100644 (file)
@@ -1689,6 +1689,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
        unsigned long mask = READ_ONCE(huge_shmem_orders_always);
        unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size);
        unsigned long vm_flags = vma ? vma->vm_flags : 0;
+       pgoff_t aligned_index;
        bool global_huge;
        loff_t i_size;
        int order;
@@ -1723,9 +1724,9 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
        /* Allow mTHP that will be fully within i_size. */
        order = highest_order(within_size_orders);
        while (within_size_orders) {
-               index = round_up(index + 1, order);
+               aligned_index = round_up(index + 1, 1 << order);
                i_size = round_up(i_size_read(inode), PAGE_SIZE);
-               if (i_size >> PAGE_SHIFT >= index) {
+               if (i_size >> PAGE_SHIFT >= aligned_index) {
                        mask |= within_size_orders;
                        break;
                }