]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm/userfaultfd: fix hugetlb fault mutex hash calculation
authorJianhui Zhou <jianhuizzzzz@gmail.com>
Tue, 10 Mar 2026 11:05:26 +0000 (19:05 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Sun, 5 Apr 2026 20:53:29 +0000 (13:53 -0700)
In mfill_atomic_hugetlb(), linear_page_index() is used to calculate the
page index for hugetlb_fault_mutex_hash().  However, linear_page_index()
returns the index in PAGE_SIZE units, while hugetlb_fault_mutex_hash()
expects the index in huge page units.  This mismatch means that different
addresses within the same huge page can produce different hash values,
leading to the use of different mutexes for the same huge page.  This can
cause races between faulting threads, which can corrupt the reservation
map and trigger the BUG_ON in resv_map_release().

Fix this by introducing hugetlb_linear_page_index(), which returns the
page index in huge page granularity, and using it in place of
linear_page_index().

Link: https://lkml.kernel.org/r/20260310110526.335749-1-jianhuizzzzz@gmail.com
Fixes: a08c7193e4f1 ("mm/filemap: remove hugetlb special casing in filemap.c")
Signed-off-by: Jianhui Zhou <jianhuizzzzz@gmail.com>
Reported-by: syzbot+f525fd79634858f478e7@syzkaller.appspotmail.com
Closes: https://syzkaller.appspot.com/bug?extid=f525fd79634858f478e7
Acked-by: SeongJae Park <sj@kernel.org>
Reviewed-by: David Hildenbrand (Arm) <david@kernel.org>
Acked-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Cc: Jane Chu <jane.chu@oracle.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: JonasZhou <JonasZhou@zhaoxin.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Peter Xu <peterx@redhat.com>
Cc: SeongJae Park <sj@kernel.org>
Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/hugetlb.h
mm/userfaultfd.c

index aaf3d472e6b5c516df7b4f6448f3727e02017f34..9c098a02a09e147b2038a4c40dbe3ed80de1f217 100644 (file)
@@ -792,6 +792,23 @@ static inline unsigned huge_page_shift(struct hstate *h)
        return h->order + PAGE_SHIFT;
 }
 
+/**
+ * hugetlb_linear_page_index() - linear_page_index() but in hugetlb
+ *                              page size granularity.
+ * @vma: the hugetlb VMA
+ * @address: the virtual address within the VMA
+ *
+ * Return: the page offset within the mapping in huge page units.
+ */
+static inline pgoff_t hugetlb_linear_page_index(struct vm_area_struct *vma,
+               unsigned long address)
+{
+       struct hstate *h = hstate_vma(vma);
+
+       return ((address - vma->vm_start) >> huge_page_shift(h)) +
+               (vma->vm_pgoff >> huge_page_order(h));
+}
+
 static inline bool order_is_gigantic(unsigned int order)
 {
        return order > MAX_PAGE_ORDER;
index e19872e5187850dcb4eba4b7d3ae33731d74e1db..2c565c7134b69f121ac30d82e9ff9562ce7be3d0 100644 (file)
@@ -573,7 +573,7 @@ retry:
                 * in the case of shared pmds.  fault mutex prevents
                 * races with other faulting threads.
                 */
-               idx = linear_page_index(dst_vma, dst_addr);
+               idx = hugetlb_linear_page_index(dst_vma, dst_addr);
                mapping = dst_vma->vm_file->f_mapping;
                hash = hugetlb_fault_mutex_hash(mapping, idx);
                mutex_lock(&hugetlb_fault_mutex_table[hash]);