]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 8 Feb 2021 11:30:22 +0000 (12:30 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 8 Feb 2021 11:30:22 +0000 (12:30 +0100)
added patches:
arm-footbridge-fix-dc21285-pci-configuration-accessors.patch
mm-hugetlb-fix-a-race-between-isolating-and-freeing-page.patch
mm-hugetlb-remove-vm_bug_on_page-from-page_huge_active.patch
mm-hugetlbfs-fix-cannot-migrate-the-fallocated-hugetlb-page.patch
mm-thp-fix-madv_remove-deadlock-on-shmem-thp.patch

queue-4.14/arm-footbridge-fix-dc21285-pci-configuration-accessors.patch [new file with mode: 0644]
queue-4.14/mm-hugetlb-fix-a-race-between-isolating-and-freeing-page.patch [new file with mode: 0644]
queue-4.14/mm-hugetlb-remove-vm_bug_on_page-from-page_huge_active.patch [new file with mode: 0644]
queue-4.14/mm-hugetlbfs-fix-cannot-migrate-the-fallocated-hugetlb-page.patch [new file with mode: 0644]
queue-4.14/mm-thp-fix-madv_remove-deadlock-on-shmem-thp.patch [new file with mode: 0644]
queue-4.14/series

diff --git a/queue-4.14/arm-footbridge-fix-dc21285-pci-configuration-accessors.patch b/queue-4.14/arm-footbridge-fix-dc21285-pci-configuration-accessors.patch
new file mode 100644 (file)
index 0000000..44d37aa
--- /dev/null
@@ -0,0 +1,62 @@
+From 39d3454c3513840eb123b3913fda6903e45ce671 Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@armlinux.org.uk>
+Date: Sun, 18 Oct 2020 09:39:21 +0100
+Subject: ARM: footbridge: fix dc21285 PCI configuration accessors
+
+From: Russell King <rmk+kernel@armlinux.org.uk>
+
+commit 39d3454c3513840eb123b3913fda6903e45ce671 upstream.
+
+Building with gcc 4.9.2 reveals a latent bug in the PCI accessors
+for Footbridge platforms, which causes a fatal alignment fault
+while accessing IO memory. Fix this by making the assembly volatile.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/mach-footbridge/dc21285.c |   12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/arch/arm/mach-footbridge/dc21285.c
++++ b/arch/arm/mach-footbridge/dc21285.c
+@@ -69,15 +69,15 @@ dc21285_read_config(struct pci_bus *bus,
+       if (addr)
+               switch (size) {
+               case 1:
+-                      asm("ldrb       %0, [%1, %2]"
++                      asm volatile("ldrb      %0, [%1, %2]"
+                               : "=r" (v) : "r" (addr), "r" (where) : "cc");
+                       break;
+               case 2:
+-                      asm("ldrh       %0, [%1, %2]"
++                      asm volatile("ldrh      %0, [%1, %2]"
+                               : "=r" (v) : "r" (addr), "r" (where) : "cc");
+                       break;
+               case 4:
+-                      asm("ldr        %0, [%1, %2]"
++                      asm volatile("ldr       %0, [%1, %2]"
+                               : "=r" (v) : "r" (addr), "r" (where) : "cc");
+                       break;
+               }
+@@ -103,17 +103,17 @@ dc21285_write_config(struct pci_bus *bus
+       if (addr)
+               switch (size) {
+               case 1:
+-                      asm("strb       %0, [%1, %2]"
++                      asm volatile("strb      %0, [%1, %2]"
+                               : : "r" (value), "r" (addr), "r" (where)
+                               : "cc");
+                       break;
+               case 2:
+-                      asm("strh       %0, [%1, %2]"
++                      asm volatile("strh      %0, [%1, %2]"
+                               : : "r" (value), "r" (addr), "r" (where)
+                               : "cc");
+                       break;
+               case 4:
+-                      asm("str        %0, [%1, %2]"
++                      asm volatile("str       %0, [%1, %2]"
+                               : : "r" (value), "r" (addr), "r" (where)
+                               : "cc");
+                       break;
diff --git a/queue-4.14/mm-hugetlb-fix-a-race-between-isolating-and-freeing-page.patch b/queue-4.14/mm-hugetlb-fix-a-race-between-isolating-and-freeing-page.patch
new file mode 100644 (file)
index 0000000..d204dae
--- /dev/null
@@ -0,0 +1,64 @@
+From 0eb2df2b5629794020f75e94655e1994af63f0d4 Mon Sep 17 00:00:00 2001
+From: Muchun Song <songmuchun@bytedance.com>
+Date: Thu, 4 Feb 2021 18:32:10 -0800
+Subject: mm: hugetlb: fix a race between isolating and freeing page
+
+From: Muchun Song <songmuchun@bytedance.com>
+
+commit 0eb2df2b5629794020f75e94655e1994af63f0d4 upstream.
+
+There is a race between isolate_huge_page() and __free_huge_page().
+
+  CPU0:                                     CPU1:
+
+  if (PageHuge(page))
+                                            put_page(page)
+                                              __free_huge_page(page)
+                                                  spin_lock(&hugetlb_lock)
+                                                  update_and_free_page(page)
+                                                    set_compound_page_dtor(page,
+                                                      NULL_COMPOUND_DTOR)
+                                                  spin_unlock(&hugetlb_lock)
+    isolate_huge_page(page)
+      // trigger BUG_ON
+      VM_BUG_ON_PAGE(!PageHead(page), page)
+      spin_lock(&hugetlb_lock)
+      page_huge_active(page)
+        // trigger BUG_ON
+        VM_BUG_ON_PAGE(!PageHuge(page), page)
+      spin_unlock(&hugetlb_lock)
+
+When we isolate a HugeTLB page on CPU0.  Meanwhile, we free it to the
+buddy allocator on CPU1.  Then, we can trigger a BUG_ON on CPU0, because
+it is already freed to the buddy allocator.
+
+Link: https://lkml.kernel.org/r/20210115124942.46403-5-songmuchun@bytedance.com
+Fixes: c8721bbbdd36 ("mm: memory-hotplug: enable memory hotplug to handle hugepage")
+Signed-off-by: Muchun Song <songmuchun@bytedance.com>
+Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Reviewed-by: Oscar Salvador <osalvador@suse.de>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: Yang Shi <shy828301@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/hugetlb.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -4865,9 +4865,9 @@ bool isolate_huge_page(struct page *page
+ {
+       bool ret = true;
+-      VM_BUG_ON_PAGE(!PageHead(page), page);
+       spin_lock(&hugetlb_lock);
+-      if (!page_huge_active(page) || !get_page_unless_zero(page)) {
++      if (!PageHeadHuge(page) || !page_huge_active(page) ||
++          !get_page_unless_zero(page)) {
+               ret = false;
+               goto unlock;
+       }
diff --git a/queue-4.14/mm-hugetlb-remove-vm_bug_on_page-from-page_huge_active.patch b/queue-4.14/mm-hugetlb-remove-vm_bug_on_page-from-page_huge_active.patch
new file mode 100644 (file)
index 0000000..e46172b
--- /dev/null
@@ -0,0 +1,44 @@
+From ecbf4724e6061b4b01be20f6d797d64d462b2bc8 Mon Sep 17 00:00:00 2001
+From: Muchun Song <songmuchun@bytedance.com>
+Date: Thu, 4 Feb 2021 18:32:13 -0800
+Subject: mm: hugetlb: remove VM_BUG_ON_PAGE from page_huge_active
+
+From: Muchun Song <songmuchun@bytedance.com>
+
+commit ecbf4724e6061b4b01be20f6d797d64d462b2bc8 upstream.
+
+The page_huge_active() can be called from scan_movable_pages() which do
+not hold a reference count to the HugeTLB page.  So when we call
+page_huge_active() from scan_movable_pages(), the HugeTLB page can be
+freed parallel.  Then we will trigger a BUG_ON which is in the
+page_huge_active() when CONFIG_DEBUG_VM is enabled.  Just remove the
+VM_BUG_ON_PAGE.
+
+Link: https://lkml.kernel.org/r/20210115124942.46403-6-songmuchun@bytedance.com
+Fixes: 7e1f049efb86 ("mm: hugetlb: cleanup using paeg_huge_active()")
+Signed-off-by: Muchun Song <songmuchun@bytedance.com>
+Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Reviewed-by: Oscar Salvador <osalvador@suse.de>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: Yang Shi <shy828301@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/hugetlb.c |    3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -1233,8 +1233,7 @@ struct hstate *size_to_hstate(unsigned l
+  */
+ bool page_huge_active(struct page *page)
+ {
+-      VM_BUG_ON_PAGE(!PageHuge(page), page);
+-      return PageHead(page) && PagePrivate(&page[1]);
++      return PageHeadHuge(page) && PagePrivate(&page[1]);
+ }
+ /* never called for tail page */
diff --git a/queue-4.14/mm-hugetlbfs-fix-cannot-migrate-the-fallocated-hugetlb-page.patch b/queue-4.14/mm-hugetlbfs-fix-cannot-migrate-the-fallocated-hugetlb-page.patch
new file mode 100644 (file)
index 0000000..51a5454
--- /dev/null
@@ -0,0 +1,71 @@
+From 585fc0d2871c9318c949fbf45b1f081edd489e96 Mon Sep 17 00:00:00 2001
+From: Muchun Song <songmuchun@bytedance.com>
+Date: Thu, 4 Feb 2021 18:32:03 -0800
+Subject: mm: hugetlbfs: fix cannot migrate the fallocated HugeTLB page
+
+From: Muchun Song <songmuchun@bytedance.com>
+
+commit 585fc0d2871c9318c949fbf45b1f081edd489e96 upstream.
+
+If a new hugetlb page is allocated during fallocate it will not be
+marked as active (set_page_huge_active) which will result in a later
+isolate_huge_page failure when the page migration code would like to
+move that page.  Such a failure would be unexpected and wrong.
+
+Only export set_page_huge_active, just leave clear_page_huge_active as
+static.  Because there are no external users.
+
+Link: https://lkml.kernel.org/r/20210115124942.46403-3-songmuchun@bytedance.com
+Fixes: 70c3547e36f5 (hugetlbfs: add hugetlbfs_fallocate())
+Signed-off-by: Muchun Song <songmuchun@bytedance.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
+Reviewed-by: Oscar Salvador <osalvador@suse.de>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: Yang Shi <shy828301@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/hugetlbfs/inode.c    |    3 ++-
+ include/linux/hugetlb.h |    3 +++
+ mm/hugetlb.c            |    2 +-
+ 3 files changed, 6 insertions(+), 2 deletions(-)
+
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -649,8 +649,9 @@ static long hugetlbfs_fallocate(struct f
+               mutex_unlock(&hugetlb_fault_mutex_table[hash]);
++              set_page_huge_active(page);
+               /*
+-               * page_put due to reference from alloc_huge_page()
++               * put_page() due to reference from alloc_huge_page()
+                * unlock_page because locked by add_to_page_cache()
+                */
+               put_page(page);
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -531,6 +531,9 @@ static inline void set_huge_swap_pte_at(
+       set_huge_pte_at(mm, addr, ptep, pte);
+ }
+ #endif
++
++void set_page_huge_active(struct page *page);
++
+ #else /* CONFIG_HUGETLB_PAGE */
+ struct hstate {};
+ #define alloc_huge_page(v, a, r) NULL
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -1238,7 +1238,7 @@ bool page_huge_active(struct page *page)
+ }
+ /* never called for tail page */
+-static void set_page_huge_active(struct page *page)
++void set_page_huge_active(struct page *page)
+ {
+       VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
+       SetPagePrivate(&page[1]);
diff --git a/queue-4.14/mm-thp-fix-madv_remove-deadlock-on-shmem-thp.patch b/queue-4.14/mm-thp-fix-madv_remove-deadlock-on-shmem-thp.patch
new file mode 100644 (file)
index 0000000..d664c93
--- /dev/null
@@ -0,0 +1,111 @@
+From 1c2f67308af4c102b4e1e6cd6f69819ae59408e0 Mon Sep 17 00:00:00 2001
+From: Hugh Dickins <hughd@google.com>
+Date: Thu, 4 Feb 2021 18:32:31 -0800
+Subject: mm: thp: fix MADV_REMOVE deadlock on shmem THP
+
+From: Hugh Dickins <hughd@google.com>
+
+commit 1c2f67308af4c102b4e1e6cd6f69819ae59408e0 upstream.
+
+Sergey reported deadlock between kswapd correctly doing its usual
+lock_page(page) followed by down_read(page->mapping->i_mmap_rwsem), and
+madvise(MADV_REMOVE) on an madvise(MADV_HUGEPAGE) area doing
+down_write(page->mapping->i_mmap_rwsem) followed by lock_page(page).
+
+This happened when shmem_fallocate(punch hole)'s unmap_mapping_range()
+reaches zap_pmd_range()'s call to __split_huge_pmd().  The same deadlock
+could occur when partially truncating a mapped huge tmpfs file, or using
+fallocate(FALLOC_FL_PUNCH_HOLE) on it.
+
+__split_huge_pmd()'s page lock was added in 5.8, to make sure that any
+concurrent use of reuse_swap_page() (holding page lock) could not catch
+the anon THP's mapcounts and swapcounts while they were being split.
+
+Fortunately, reuse_swap_page() is never applied to a shmem or file THP
+(not even by khugepaged, which checks PageSwapCache before calling), and
+anonymous THPs are never created in shmem or file areas: so that
+__split_huge_pmd()'s page lock can only be necessary for anonymous THPs,
+on which there is no risk of deadlock with i_mmap_rwsem.
+
+Link: https://lkml.kernel.org/r/alpine.LSU.2.11.2101161409470.2022@eggly.anvils
+Fixes: c444eb564fb1 ("mm: thp: make the THP mapcount atomic against __split_huge_pmd_locked()")
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Reported-by: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com>
+Reviewed-by: Andrea Arcangeli <aarcange@redhat.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/huge_memory.c |   37 +++++++++++++++++++++++--------------
+ 1 file changed, 23 insertions(+), 14 deletions(-)
+
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2198,7 +2198,7 @@ void __split_huge_pmd(struct vm_area_str
+       spinlock_t *ptl;
+       struct mm_struct *mm = vma->vm_mm;
+       unsigned long haddr = address & HPAGE_PMD_MASK;
+-      bool was_locked = false;
++      bool do_unlock_page = false;
+       pmd_t _pmd;
+       mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE);
+@@ -2211,7 +2211,6 @@ void __split_huge_pmd(struct vm_area_str
+       VM_BUG_ON(freeze && !page);
+       if (page) {
+               VM_WARN_ON_ONCE(!PageLocked(page));
+-              was_locked = true;
+               if (page != pmd_page(*pmd))
+                       goto out;
+       }
+@@ -2220,19 +2219,29 @@ repeat:
+       if (pmd_trans_huge(*pmd)) {
+               if (!page) {
+                       page = pmd_page(*pmd);
+-                      if (unlikely(!trylock_page(page))) {
+-                              get_page(page);
+-                              _pmd = *pmd;
+-                              spin_unlock(ptl);
+-                              lock_page(page);
+-                              spin_lock(ptl);
+-                              if (unlikely(!pmd_same(*pmd, _pmd))) {
+-                                      unlock_page(page);
++                      /*
++                       * An anonymous page must be locked, to ensure that a
++                       * concurrent reuse_swap_page() sees stable mapcount;
++                       * but reuse_swap_page() is not used on shmem or file,
++                       * and page lock must not be taken when zap_pmd_range()
++                       * calls __split_huge_pmd() while i_mmap_lock is held.
++                       */
++                      if (PageAnon(page)) {
++                              if (unlikely(!trylock_page(page))) {
++                                      get_page(page);
++                                      _pmd = *pmd;
++                                      spin_unlock(ptl);
++                                      lock_page(page);
++                                      spin_lock(ptl);
++                                      if (unlikely(!pmd_same(*pmd, _pmd))) {
++                                              unlock_page(page);
++                                              put_page(page);
++                                              page = NULL;
++                                              goto repeat;
++                                      }
+                                       put_page(page);
+-                                      page = NULL;
+-                                      goto repeat;
+                               }
+-                              put_page(page);
++                              do_unlock_page = true;
+                       }
+               }
+               if (PageMlocked(page))
+@@ -2242,7 +2251,7 @@ repeat:
+       __split_huge_pmd_locked(vma, pmd, haddr, freeze);
+ out:
+       spin_unlock(ptl);
+-      if (!was_locked && page)
++      if (do_unlock_page)
+               unlock_page(page);
+       mmu_notifier_invalidate_range_end(mm, haddr, haddr + HPAGE_PMD_SIZE);
+ }
index 48765303ac097d85ab3cb1836b9e634368e9e255..5dba389309b4ab6b861ebff8f6886241de94a3dd 100644 (file)
@@ -18,3 +18,8 @@ cifs-report-error-instead-of-invalid-when-revalidating-a-dentry-fails.patch
 smb3-fix-out-of-bounds-bug-in-smb2_negotiate.patch
 mmc-core-limit-retries-when-analyse-of-sdio-tuples-fails.patch
 nvme-pci-avoid-the-deepest-sleep-state-on-kingston-a2000-ssds.patch
+arm-footbridge-fix-dc21285-pci-configuration-accessors.patch
+mm-hugetlbfs-fix-cannot-migrate-the-fallocated-hugetlb-page.patch
+mm-hugetlb-fix-a-race-between-isolating-and-freeing-page.patch
+mm-hugetlb-remove-vm_bug_on_page-from-page_huge_active.patch
+mm-thp-fix-madv_remove-deadlock-on-shmem-thp.patch