From: Greg Kroah-Hartman Date: Mon, 17 Oct 2022 09:50:35 +0000 (+0200) Subject: 5.10-stable patches X-Git-Tag: v5.4.219~27 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=f56cdc229bc9b3c66f7da33411c677a498d3c553;p=thirdparty%2Fkernel%2Fstable-queue.git 5.10-stable patches added patches: mm-hugetlb-fix-uaf-in-hugetlb_handle_userfault.patch --- diff --git a/queue-5.10/mm-hugetlb-fix-uaf-in-hugetlb_handle_userfault.patch b/queue-5.10/mm-hugetlb-fix-uaf-in-hugetlb_handle_userfault.patch new file mode 100644 index 00000000000..499f493df55 --- /dev/null +++ b/queue-5.10/mm-hugetlb-fix-uaf-in-hugetlb_handle_userfault.patch @@ -0,0 +1,127 @@ +From 958f32ce832ba781ac20e11bb2d12a9352ea28fc Mon Sep 17 00:00:00 2001 +From: Liu Shixin +Date: Fri, 23 Sep 2022 12:21:13 +0800 +Subject: mm: hugetlb: fix UAF in hugetlb_handle_userfault + +From: Liu Shixin + +commit 958f32ce832ba781ac20e11bb2d12a9352ea28fc upstream. + +The vma_lock and hugetlb_fault_mutex are dropped before handling userfault +and reacquire them again after handle_userfault(), but reacquire the +vma_lock could lead to UAF[1,2] due to the following race, + +hugetlb_fault + hugetlb_no_page + /*unlock vma_lock */ + hugetlb_handle_userfault + handle_userfault + /* unlock mm->mmap_lock*/ + vm_mmap_pgoff + do_mmap + mmap_region + munmap_vma_range + /* clean old vma */ + /* lock vma_lock again <--- UAF */ + /* unlock vma_lock */ + +Since the vma_lock will unlock immediately after +hugetlb_handle_userfault(), let's drop the unneeded lock and unlock in +hugetlb_handle_userfault() to fix the issue. + +[1] https://lore.kernel.org/linux-mm/000000000000d5e00a05e834962e@google.com/ +[2] https://lore.kernel.org/linux-mm/20220921014457.1668-1-liuzixian4@huawei.com/ +Link: https://lkml.kernel.org/r/20220923042113.137273-1-liushixin2@huawei.com +Fixes: 1a1aad8a9b7b ("userfaultfd: hugetlbfs: add userfaultfd hugetlb hook") +Signed-off-by: Liu Shixin +Signed-off-by: Kefeng Wang +Reported-by: syzbot+193f9cee8638750b23cf@syzkaller.appspotmail.com +Reported-by: Liu Zixian +Reviewed-by: Mike Kravetz +Cc: David Hildenbrand +Cc: John Hubbard +Cc: Muchun Song +Cc: Sidhartha Kumar +Cc: [4.14+] +Signed-off-by: Andrew Morton +Signed-off-by: Greg Kroah-Hartman +--- + mm/hugetlb.c | 29 +++++++++++++++-------------- + 1 file changed, 15 insertions(+), 14 deletions(-) + +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -4337,6 +4337,7 @@ static vm_fault_t hugetlb_no_page(struct + spinlock_t *ptl; + unsigned long haddr = address & huge_page_mask(h); + bool new_page = false; ++ u32 hash = hugetlb_fault_mutex_hash(mapping, idx); + + /* + * Currently, we are forced to kill the process in the event the +@@ -4346,7 +4347,7 @@ static vm_fault_t hugetlb_no_page(struct + if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { + pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n", + current->pid); +- return ret; ++ goto out; + } + + /* +@@ -4365,7 +4366,6 @@ retry: + * Check for page in userfault range + */ + if (userfaultfd_missing(vma)) { +- u32 hash; + struct vm_fault vmf = { + .vma = vma, + .address = haddr, +@@ -4380,17 +4380,14 @@ retry: + }; + + /* +- * hugetlb_fault_mutex and i_mmap_rwsem must be +- * dropped before handling userfault. Reacquire +- * after handling fault to make calling code simpler. ++ * vma_lock and hugetlb_fault_mutex must be dropped ++ * before handling userfault. Also mmap_lock will ++ * be dropped during handling userfault, any vma ++ * operation should be careful from here. + */ +- hash = hugetlb_fault_mutex_hash(mapping, idx); + mutex_unlock(&hugetlb_fault_mutex_table[hash]); + i_mmap_unlock_read(mapping); +- ret = handle_userfault(&vmf, VM_UFFD_MISSING); +- i_mmap_lock_read(mapping); +- mutex_lock(&hugetlb_fault_mutex_table[hash]); +- goto out; ++ return handle_userfault(&vmf, VM_UFFD_MISSING); + } + + page = alloc_huge_page(vma, haddr, 0); +@@ -4497,6 +4494,8 @@ retry: + + unlock_page(page); + out: ++ mutex_unlock(&hugetlb_fault_mutex_table[hash]); ++ i_mmap_unlock_read(mapping); + return ret; + + backout: +@@ -4592,10 +4591,12 @@ vm_fault_t hugetlb_fault(struct mm_struc + mutex_lock(&hugetlb_fault_mutex_table[hash]); + + entry = huge_ptep_get(ptep); +- if (huge_pte_none(entry)) { +- ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags); +- goto out_mutex; +- } ++ if (huge_pte_none(entry)) ++ /* ++ * hugetlb_no_page will drop vma lock and hugetlb fault ++ * mutex internally, which make us return immediately. ++ */ ++ return hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags); + + ret = 0; + diff --git a/queue-5.10/series b/queue-5.10/series index 6d1d7ce3814..30fb1d5a002 100644 --- a/queue-5.10/series +++ b/queue-5.10/series @@ -458,3 +458,4 @@ perf-intel-pt-fix-segfault-in-intel_pt_print_info-with-uclibc.patch arm64-topology-fix-possible-overflow-in-amu_fie_setup.patch io_uring-correct-pinned_vm-accounting.patch io_uring-af_unix-defer-registered-files-gc-to-io_uring-release.patch +mm-hugetlb-fix-uaf-in-hugetlb_handle_userfault.patch