]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm: use aligned address in copy_user_gigantic_page()
authorKefeng Wang <wangkefeng.wang@huawei.com>
Mon, 28 Oct 2024 14:56:56 +0000 (22:56 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 19 Dec 2024 03:04:42 +0000 (19:04 -0800)
In current kernel, hugetlb_wp() calls copy_user_large_folio() with the
fault address.  Where the fault address may be not aligned with the huge
page size.  Then, copy_user_large_folio() may call
copy_user_gigantic_page() with the address, while
copy_user_gigantic_page() requires the address to be huge page size
aligned.  So, this may cause memory corruption or information leak,
addtional, use more obvious naming 'addr_hint' instead of 'addr' for
copy_user_gigantic_page().

Link: https://lkml.kernel.org/r/20241028145656.932941-2-wangkefeng.wang@huawei.com
Fixes: 530dd9926dc1 ("mm: memory: improve copy_user_large_folio()")
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Cc: Huang Ying <ying.huang@intel.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/hugetlb.c
mm/memory.c

index ea2ed8e301ef2c22dec0b0a32f9a1ff747a7c3b4..cec4b121193fc4d481352b5de70949de50cf1fca 100644 (file)
@@ -5340,7 +5340,7 @@ again:
                                        break;
                                }
                                ret = copy_user_large_folio(new_folio, pte_folio,
-                                               ALIGN_DOWN(addr, sz), dst_vma);
+                                                           addr, dst_vma);
                                folio_put(pte_folio);
                                if (ret) {
                                        folio_put(new_folio);
@@ -6643,8 +6643,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
                        *foliop = NULL;
                        goto out;
                }
-               ret = copy_user_large_folio(folio, *foliop,
-                                           ALIGN_DOWN(dst_addr, size), dst_vma);
+               ret = copy_user_large_folio(folio, *foliop, dst_addr, dst_vma);
                folio_put(*foliop);
                *foliop = NULL;
                if (ret) {
index 84864387f9659fed39f2c5705cbc1ea8769228b4..209885a4134f72f29f3360420532e5406bf5ca03 100644 (file)
@@ -6852,13 +6852,14 @@ void folio_zero_user(struct folio *folio, unsigned long addr_hint)
 }
 
 static int copy_user_gigantic_page(struct folio *dst, struct folio *src,
-                                  unsigned long addr,
+                                  unsigned long addr_hint,
                                   struct vm_area_struct *vma,
                                   unsigned int nr_pages)
 {
-       int i;
+       unsigned long addr = ALIGN_DOWN(addr_hint, folio_size(dst));
        struct page *dst_page;
        struct page *src_page;
+       int i;
 
        for (i = 0; i < nr_pages; i++) {
                dst_page = folio_page(dst, i);