From: Greg Kroah-Hartman Date: Mon, 17 Oct 2022 09:50:41 +0000 (+0200) Subject: 5.15-stable patches X-Git-Tag: v5.4.219~26 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=cefdaa9fce6b7abe1448c0be9e0166630fb1e866;p=thirdparty%2Fkernel%2Fstable-queue.git 5.15-stable patches added patches: mm-hugetlb-fix-uaf-in-hugetlb_handle_userfault.patch --- diff --git a/queue-5.15/mm-hugetlb-fix-uaf-in-hugetlb_handle_userfault.patch b/queue-5.15/mm-hugetlb-fix-uaf-in-hugetlb_handle_userfault.patch new file mode 100644 index 00000000000..e661be22c5e --- /dev/null +++ b/queue-5.15/mm-hugetlb-fix-uaf-in-hugetlb_handle_userfault.patch @@ -0,0 +1,154 @@ +From 958f32ce832ba781ac20e11bb2d12a9352ea28fc Mon Sep 17 00:00:00 2001 +From: Liu Shixin +Date: Fri, 23 Sep 2022 12:21:13 +0800 +Subject: mm: hugetlb: fix UAF in hugetlb_handle_userfault + +From: Liu Shixin + +commit 958f32ce832ba781ac20e11bb2d12a9352ea28fc upstream. + +The vma_lock and hugetlb_fault_mutex are dropped before handling userfault +and reacquire them again after handle_userfault(), but reacquire the +vma_lock could lead to UAF[1,2] due to the following race, + +hugetlb_fault + hugetlb_no_page + /*unlock vma_lock */ + hugetlb_handle_userfault + handle_userfault + /* unlock mm->mmap_lock*/ + vm_mmap_pgoff + do_mmap + mmap_region + munmap_vma_range + /* clean old vma */ + /* lock vma_lock again <--- UAF */ + /* unlock vma_lock */ + +Since the vma_lock will unlock immediately after +hugetlb_handle_userfault(), let's drop the unneeded lock and unlock in +hugetlb_handle_userfault() to fix the issue. + +[1] https://lore.kernel.org/linux-mm/000000000000d5e00a05e834962e@google.com/ +[2] https://lore.kernel.org/linux-mm/20220921014457.1668-1-liuzixian4@huawei.com/ +Link: https://lkml.kernel.org/r/20220923042113.137273-1-liushixin2@huawei.com +Fixes: 1a1aad8a9b7b ("userfaultfd: hugetlbfs: add userfaultfd hugetlb hook") +Signed-off-by: Liu Shixin +Signed-off-by: Kefeng Wang +Reported-by: syzbot+193f9cee8638750b23cf@syzkaller.appspotmail.com +Reported-by: Liu Zixian +Reviewed-by: Mike Kravetz +Cc: David Hildenbrand +Cc: John Hubbard +Cc: Muchun Song +Cc: Sidhartha Kumar +Cc: [4.14+] +Signed-off-by: Andrew Morton +Signed-off-by: Greg Kroah-Hartman +--- + mm/hugetlb.c | 37 +++++++++++++++++-------------------- + 1 file changed, 17 insertions(+), 20 deletions(-) + +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -4844,7 +4844,6 @@ static inline vm_fault_t hugetlb_handle_ + unsigned long haddr, + unsigned long reason) + { +- vm_fault_t ret; + u32 hash; + struct vm_fault vmf = { + .vma = vma, +@@ -4861,18 +4860,14 @@ static inline vm_fault_t hugetlb_handle_ + }; + + /* +- * hugetlb_fault_mutex and i_mmap_rwsem must be +- * dropped before handling userfault. Reacquire +- * after handling fault to make calling code simpler. ++ * vma_lock and hugetlb_fault_mutex must be dropped before handling ++ * userfault. Also mmap_lock will be dropped during handling ++ * userfault, any vma operation should be careful from here. + */ + hash = hugetlb_fault_mutex_hash(mapping, idx); + mutex_unlock(&hugetlb_fault_mutex_table[hash]); + i_mmap_unlock_read(mapping); +- ret = handle_userfault(&vmf, reason); +- i_mmap_lock_read(mapping); +- mutex_lock(&hugetlb_fault_mutex_table[hash]); +- +- return ret; ++ return handle_userfault(&vmf, reason); + } + + static vm_fault_t hugetlb_no_page(struct mm_struct *mm, +@@ -4889,6 +4884,7 @@ static vm_fault_t hugetlb_no_page(struct + spinlock_t *ptl; + unsigned long haddr = address & huge_page_mask(h); + bool new_page, new_pagecache_page = false; ++ u32 hash = hugetlb_fault_mutex_hash(mapping, idx); + + /* + * Currently, we are forced to kill the process in the event the +@@ -4898,7 +4894,7 @@ static vm_fault_t hugetlb_no_page(struct + if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { + pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n", + current->pid); +- return ret; ++ goto out; + } + + /* +@@ -4915,12 +4911,10 @@ retry: + page = find_lock_page(mapping, idx); + if (!page) { + /* Check for page in userfault range */ +- if (userfaultfd_missing(vma)) { +- ret = hugetlb_handle_userfault(vma, mapping, idx, ++ if (userfaultfd_missing(vma)) ++ return hugetlb_handle_userfault(vma, mapping, idx, + flags, haddr, + VM_UFFD_MISSING); +- goto out; +- } + + page = alloc_huge_page(vma, haddr, 0); + if (IS_ERR(page)) { +@@ -4980,10 +4974,9 @@ retry: + if (userfaultfd_minor(vma)) { + unlock_page(page); + put_page(page); +- ret = hugetlb_handle_userfault(vma, mapping, idx, ++ return hugetlb_handle_userfault(vma, mapping, idx, + flags, haddr, + VM_UFFD_MINOR); +- goto out; + } + } + +@@ -5034,6 +5027,8 @@ retry: + + unlock_page(page); + out: ++ mutex_unlock(&hugetlb_fault_mutex_table[hash]); ++ i_mmap_unlock_read(mapping); + return ret; + + backout: +@@ -5131,10 +5126,12 @@ vm_fault_t hugetlb_fault(struct mm_struc + mutex_lock(&hugetlb_fault_mutex_table[hash]); + + entry = huge_ptep_get(ptep); +- if (huge_pte_none(entry)) { +- ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags); +- goto out_mutex; +- } ++ if (huge_pte_none(entry)) ++ /* ++ * hugetlb_no_page will drop vma lock and hugetlb fault ++ * mutex internally, which make us return immediately. ++ */ ++ return hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags); + + ret = 0; + diff --git a/queue-5.15/series b/queue-5.15/series index 98a79d95563..1fd44d2bc85 100644 --- a/queue-5.15/series +++ b/queue-5.15/series @@ -616,3 +616,4 @@ io_uring-correct-pinned_vm-accounting.patch io_uring-rw-fix-short-rw-error-handling.patch io_uring-rw-fix-error-ed-retry-return-values.patch io_uring-rw-fix-unexpected-link-breakage.patch +mm-hugetlb-fix-uaf-in-hugetlb_handle_userfault.patch