]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 12 Dec 2022 07:39:57 +0000 (08:39 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 12 Dec 2022 07:39:57 +0000 (08:39 +0100)
added patches:
mm-hugetlb-fix-races-when-looking-up-a-cont-pte-pmd-size-hugetlb-page.patch

queue-5.10/mm-hugetlb-fix-races-when-looking-up-a-cont-pte-pmd-size-hugetlb-page.patch [new file with mode: 0644]
queue-5.10/series

diff --git a/queue-5.10/mm-hugetlb-fix-races-when-looking-up-a-cont-pte-pmd-size-hugetlb-page.patch b/queue-5.10/mm-hugetlb-fix-races-when-looking-up-a-cont-pte-pmd-size-hugetlb-page.patch
new file mode 100644 (file)
index 0000000..000adf3
--- /dev/null
@@ -0,0 +1,172 @@
+From fac35ba763ed07ba93154c95ffc0c4a55023707f Mon Sep 17 00:00:00 2001
+From: Baolin Wang <baolin.wang@linux.alibaba.com>
+Date: Thu, 1 Sep 2022 18:41:31 +0800
+Subject: mm/hugetlb: fix races when looking up a CONT-PTE/PMD size hugetlb page
+
+From: Baolin Wang <baolin.wang@linux.alibaba.com>
+
+commit fac35ba763ed07ba93154c95ffc0c4a55023707f upstream.
+
+On some architectures (like ARM64), it can support CONT-PTE/PMD size
+hugetlb, which means it can support not only PMD/PUD size hugetlb (2M and
+1G), but also CONT-PTE/PMD size(64K and 32M) if a 4K page size specified.
+
+So when looking up a CONT-PTE size hugetlb page by follow_page(), it will
+use pte_offset_map_lock() to get the pte entry lock for the CONT-PTE size
+hugetlb in follow_page_pte().  However this pte entry lock is incorrect
+for the CONT-PTE size hugetlb, since we should use huge_pte_lock() to get
+the correct lock, which is mm->page_table_lock.
+
+That means the pte entry of the CONT-PTE size hugetlb under current pte
+lock is unstable in follow_page_pte(), we can continue to migrate or
+poison the pte entry of the CONT-PTE size hugetlb, which can cause some
+potential race issues, even though they are under the 'pte lock'.
+
+For example, suppose thread A is trying to look up a CONT-PTE size hugetlb
+page by move_pages() syscall under the lock, however antoher thread B can
+migrate the CONT-PTE hugetlb page at the same time, which will cause
+thread A to get an incorrect page, if thread A also wants to do page
+migration, then data inconsistency error occurs.
+
+Moreover we have the same issue for CONT-PMD size hugetlb in
+follow_huge_pmd().
+
+To fix above issues, rename the follow_huge_pmd() as follow_huge_pmd_pte()
+to handle PMD and PTE level size hugetlb, which uses huge_pte_lock() to
+get the correct pte entry lock to make the pte entry stable.
+
+Mike said:
+
+Support for CONT_PMD/_PTE was added with bb9dd3df8ee9 ("arm64: hugetlb:
+refactor find_num_contig()").  Patch series "Support for contiguous pte
+hugepages", v4.  However, I do not believe these code paths were
+executed until migration support was added with 5480280d3f2d ("arm64/mm:
+enable HugeTLB migration for contiguous bit HugeTLB pages") I would go
+with 5480280d3f2d for the Fixes: targe.
+
+Link: https://lkml.kernel.org/r/635f43bdd85ac2615a58405da82b4d33c6e5eb05.1662017562.git.baolin.wang@linux.alibaba.com
+Fixes: 5480280d3f2d ("arm64/mm: enable HugeTLB migration for contiguous bit HugeTLB pages")
+Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
+Suggested-by: Mike Kravetz <mike.kravetz@oracle.com>
+Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: Muchun Song <songmuchun@bytedance.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Samuel Mendoza-Jonas <samjonas@amazon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/hugetlb.h |    8 ++++----
+ mm/gup.c                |   14 +++++++++++++-
+ mm/hugetlb.c            |   27 +++++++++++++--------------
+ 3 files changed, 30 insertions(+), 19 deletions(-)
+
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -174,8 +174,8 @@ struct page *follow_huge_addr(struct mm_
+ struct page *follow_huge_pd(struct vm_area_struct *vma,
+                           unsigned long address, hugepd_t hpd,
+                           int flags, int pdshift);
+-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+-                              pmd_t *pmd, int flags);
++struct page *follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address,
++                               int flags);
+ struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
+                               pud_t *pud, int flags);
+ struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
+@@ -261,8 +261,8 @@ static inline struct page *follow_huge_p
+       return NULL;
+ }
+-static inline struct page *follow_huge_pmd(struct mm_struct *mm,
+-                              unsigned long address, pmd_t *pmd, int flags)
++static inline struct page *follow_huge_pmd_pte(struct vm_area_struct *vma,
++                              unsigned long address, int flags)
+ {
+       return NULL;
+ }
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -405,6 +405,18 @@ static struct page *follow_page_pte(stru
+       if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
+                        (FOLL_PIN | FOLL_GET)))
+               return ERR_PTR(-EINVAL);
++
++      /*
++       * Considering PTE level hugetlb, like continuous-PTE hugetlb on
++       * ARM64 architecture.
++       */
++      if (is_vm_hugetlb_page(vma)) {
++              page = follow_huge_pmd_pte(vma, address, flags);
++              if (page)
++                      return page;
++              return no_page_table(vma, flags);
++      }
++
+ retry:
+       if (unlikely(pmd_bad(*pmd)))
+               return no_page_table(vma, flags);
+@@ -560,7 +572,7 @@ static struct page *follow_pmd_mask(stru
+       if (pmd_none(pmdval))
+               return no_page_table(vma, flags);
+       if (pmd_huge(pmdval) && is_vm_hugetlb_page(vma)) {
+-              page = follow_huge_pmd(mm, address, pmd, flags);
++              page = follow_huge_pmd_pte(vma, address, flags);
+               if (page)
+                       return page;
+               return no_page_table(vma, flags);
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -5585,12 +5585,13 @@ follow_huge_pd(struct vm_area_struct *vm
+ }
+ struct page * __weak
+-follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+-              pmd_t *pmd, int flags)
++follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address, int flags)
+ {
++      struct hstate *h = hstate_vma(vma);
++      struct mm_struct *mm = vma->vm_mm;
+       struct page *page = NULL;
+       spinlock_t *ptl;
+-      pte_t pte;
++      pte_t *ptep, pte;
+       /* FOLL_GET and FOLL_PIN are mutually exclusive. */
+       if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
+@@ -5598,17 +5599,15 @@ follow_huge_pmd(struct mm_struct *mm, un
+               return NULL;
+ retry:
+-      ptl = pmd_lockptr(mm, pmd);
+-      spin_lock(ptl);
+-      /*
+-       * make sure that the address range covered by this pmd is not
+-       * unmapped from other threads.
+-       */
+-      if (!pmd_huge(*pmd))
+-              goto out;
+-      pte = huge_ptep_get((pte_t *)pmd);
++      ptep = huge_pte_offset(mm, address, huge_page_size(h));
++      if (!ptep)
++              return NULL;
++
++      ptl = huge_pte_lock(h, mm, ptep);
++      pte = huge_ptep_get(ptep);
+       if (pte_present(pte)) {
+-              page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
++              page = pte_page(pte) +
++                      ((address & ~huge_page_mask(h)) >> PAGE_SHIFT);
+               /*
+                * try_grab_page() should always succeed here, because: a) we
+                * hold the pmd (ptl) lock, and b) we've just checked that the
+@@ -5624,7 +5623,7 @@ retry:
+       } else {
+               if (is_hugetlb_entry_migration(pte)) {
+                       spin_unlock(ptl);
+-                      __migration_entry_wait(mm, (pte_t *)pmd, ptl);
++                      __migration_entry_wait(mm, ptep, ptl);
+                       goto retry;
+               }
+               /*
index 69251c539c1a7336d9430b8e293aca468648be7c..7e239fafac8006a5b48ab0f122c360a13eb50aea 100644 (file)
@@ -57,6 +57,7 @@ hid-usbhid-add-always_poll-quirk-for-some-mice.patch
 hid-hid-lg4ff-add-check-for-empty-lbuf.patch
 hid-core-fix-shift-out-of-bounds-in-hid_report_raw_event.patch
 can-af_can-fix-null-pointer-dereference-in-can_rcv_filter.patch
+mm-hugetlb-fix-races-when-looking-up-a-cont-pte-pmd-size-hugetlb-page.patch
 rtc-cmos-disable-irq-around-direct-invocation-of-cmo.patch
 rtc-mc146818-lib-fix-locking-in-mc146818_set_time.patch
 rtc-mc146818-lib-fix-signedness-bug-in-mc146818_get_.patch