]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 4.19
authorSasha Levin <sashal@kernel.org>
Thu, 24 Sep 2020 23:38:16 +0000 (19:38 -0400)
committerSasha Levin <sashal@kernel.org>
Thu, 24 Sep 2020 23:38:16 +0000 (19:38 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-4.19/kprobes-fix-kill-kprobe-which-has-been-marked-as-gon.patch [new file with mode: 0644]
queue-4.19/kvm-fix-memory-leak-in-kvm_io_bus_unregister_dev.patch [new file with mode: 0644]
queue-4.19/mm-thp-fix-__split_huge_pmd_locked-for-migration-pmd.patch [new file with mode: 0644]
queue-4.19/series

diff --git a/queue-4.19/kprobes-fix-kill-kprobe-which-has-been-marked-as-gon.patch b/queue-4.19/kprobes-fix-kill-kprobe-which-has-been-marked-as-gon.patch
new file mode 100644 (file)
index 0000000..b297615
--- /dev/null
@@ -0,0 +1,70 @@
+From 07acd8e6b03311e8c965f1331fe7e87d88d35a15 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Sep 2020 21:20:21 -0700
+Subject: kprobes: fix kill kprobe which has been marked as gone
+
+From: Muchun Song <songmuchun@bytedance.com>
+
+[ Upstream commit b0399092ccebd9feef68d4ceb8d6219a8c0caa05 ]
+
+If a kprobe is marked as gone, we should not kill it again.  Otherwise, we
+can disarm the kprobe more than once.  In that case, the statistics of
+kprobe_ftrace_enabled can unbalance which can lead to that kprobe do not
+work.
+
+Fixes: e8386a0cb22f ("kprobes: support probing module __exit function")
+Co-developed-by: Chengming Zhou <zhouchengming@bytedance.com>
+Signed-off-by: Muchun Song <songmuchun@bytedance.com>
+Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Acked-by: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: "Naveen N . Rao" <naveen.n.rao@linux.ibm.com>
+Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
+Cc: David S. Miller <davem@davemloft.net>
+Cc: Song Liu <songliubraving@fb.com>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: <stable@vger.kernel.org>
+Link: https://lkml.kernel.org/r/20200822030055.32383-1-songmuchun@bytedance.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/kprobes.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index eb4bffe6d764d..230d9d599b5aa 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -2061,6 +2061,9 @@ static void kill_kprobe(struct kprobe *p)
+ {
+       struct kprobe *kp;
++      if (WARN_ON_ONCE(kprobe_gone(p)))
++              return;
++
+       p->flags |= KPROBE_FLAG_GONE;
+       if (kprobe_aggrprobe(p)) {
+               /*
+@@ -2243,7 +2246,10 @@ static int kprobes_module_callback(struct notifier_block *nb,
+       mutex_lock(&kprobe_mutex);
+       for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
+               head = &kprobe_table[i];
+-              hlist_for_each_entry_rcu(p, head, hlist)
++              hlist_for_each_entry_rcu(p, head, hlist) {
++                      if (kprobe_gone(p))
++                              continue;
++
+                       if (within_module_init((unsigned long)p->addr, mod) ||
+                           (checkcore &&
+                            within_module_core((unsigned long)p->addr, mod))) {
+@@ -2260,6 +2266,7 @@ static int kprobes_module_callback(struct notifier_block *nb,
+                                */
+                               kill_kprobe(p);
+                       }
++              }
+       }
+       mutex_unlock(&kprobe_mutex);
+       return NOTIFY_DONE;
+-- 
+2.25.1
+
diff --git a/queue-4.19/kvm-fix-memory-leak-in-kvm_io_bus_unregister_dev.patch b/queue-4.19/kvm-fix-memory-leak-in-kvm_io_bus_unregister_dev.patch
new file mode 100644 (file)
index 0000000..57f83ff
--- /dev/null
@@ -0,0 +1,71 @@
+From 83b34a9879ec079fbd00aeb2c01557d51151bc8a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Sep 2020 11:55:35 -0700
+Subject: KVM: fix memory leak in kvm_io_bus_unregister_dev()
+
+From: Rustam Kovhaev <rkovhaev@gmail.com>
+
+[ Upstream commit f65886606c2d3b562716de030706dfe1bea4ed5e ]
+
+when kmalloc() fails in kvm_io_bus_unregister_dev(), before removing
+the bus, we should iterate over all other devices linked to it and call
+kvm_iodevice_destructor() for them
+
+Fixes: 90db10434b16 ("KVM: kvm_io_bus_unregister_dev() should never fail")
+Cc: stable@vger.kernel.org
+Reported-and-tested-by: syzbot+f196caa45793d6374707@syzkaller.appspotmail.com
+Link: https://syzkaller.appspot.com/bug?extid=f196caa45793d6374707
+Signed-off-by: Rustam Kovhaev <rkovhaev@gmail.com>
+Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+Message-Id: <20200907185535.233114-1-rkovhaev@gmail.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ virt/kvm/kvm_main.c | 21 ++++++++++++---------
+ 1 file changed, 12 insertions(+), 9 deletions(-)
+
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 2155b52b17eca..6bd01d12df2ec 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -3844,7 +3844,7 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
+ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+                              struct kvm_io_device *dev)
+ {
+-      int i;
++      int i, j;
+       struct kvm_io_bus *new_bus, *bus;
+       bus = kvm_get_bus(kvm, bus_idx);
+@@ -3861,17 +3861,20 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+       new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) *
+                         sizeof(struct kvm_io_range)), GFP_KERNEL);
+-      if (!new_bus)  {
++      if (new_bus) {
++              memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
++              new_bus->dev_count--;
++              memcpy(new_bus->range + i, bus->range + i + 1,
++                     (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
++      } else {
+               pr_err("kvm: failed to shrink bus, removing it completely\n");
+-              goto broken;
++              for (j = 0; j < bus->dev_count; j++) {
++                      if (j == i)
++                              continue;
++                      kvm_iodevice_destructor(bus->range[j].dev);
++              }
+       }
+-      memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
+-      new_bus->dev_count--;
+-      memcpy(new_bus->range + i, bus->range + i + 1,
+-             (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
+-
+-broken:
+       rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
+       synchronize_srcu_expedited(&kvm->srcu);
+       kfree(bus);
+-- 
+2.25.1
+
diff --git a/queue-4.19/mm-thp-fix-__split_huge_pmd_locked-for-migration-pmd.patch b/queue-4.19/mm-thp-fix-__split_huge_pmd_locked-for-migration-pmd.patch
new file mode 100644 (file)
index 0000000..360f444
--- /dev/null
@@ -0,0 +1,110 @@
+From e62a9e2f8f4964663b95f27b27f2a5fde710782f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Sep 2020 21:20:24 -0700
+Subject: mm/thp: fix __split_huge_pmd_locked() for migration PMD
+
+From: Ralph Campbell <rcampbell@nvidia.com>
+
+[ Upstream commit ec0abae6dcdf7ef88607c869bf35a4b63ce1b370 ]
+
+A migrating transparent huge page has to already be unmapped.  Otherwise,
+the page could be modified while it is being copied to a new page and data
+could be lost.  The function __split_huge_pmd() checks for a PMD migration
+entry before calling __split_huge_pmd_locked() leading one to think that
+__split_huge_pmd_locked() can handle splitting a migrating PMD.
+
+However, the code always increments the page->_mapcount and adjusts the
+memory control group accounting assuming the page is mapped.
+
+Also, if the PMD entry is a migration PMD entry, the call to
+is_huge_zero_pmd(*pmd) is incorrect because it calls pmd_pfn(pmd) instead
+of migration_entry_to_pfn(pmd_to_swp_entry(pmd)).  Fix these problems by
+checking for a PMD migration entry.
+
+Fixes: 84c3fc4e9c56 ("mm: thp: check pmd migration entry in common path")
+Signed-off-by: Ralph Campbell <rcampbell@nvidia.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Reviewed-by: Yang Shi <shy828301@gmail.com>
+Reviewed-by: Zi Yan <ziy@nvidia.com>
+Cc: Jerome Glisse <jglisse@redhat.com>
+Cc: John Hubbard <jhubbard@nvidia.com>
+Cc: Alistair Popple <apopple@nvidia.com>
+Cc: Christoph Hellwig <hch@lst.de>
+Cc: Jason Gunthorpe <jgg@nvidia.com>
+Cc: Bharata B Rao <bharata@linux.ibm.com>
+Cc: Ben Skeggs <bskeggs@redhat.com>
+Cc: Shuah Khan <shuah@kernel.org>
+Cc: <stable@vger.kernel.org>   [4.14+]
+Link: https://lkml.kernel.org/r/20200903183140.19055-1-rcampbell@nvidia.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/huge_memory.c | 40 +++++++++++++++++++++++-----------------
+ 1 file changed, 23 insertions(+), 17 deletions(-)
+
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 1443ae6fee9bd..8b137248b146d 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2145,7 +2145,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
+               put_page(page);
+               add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
+               return;
+-      } else if (is_huge_zero_pmd(*pmd)) {
++      } else if (pmd_trans_huge(*pmd) && is_huge_zero_pmd(*pmd)) {
+               /*
+                * FIXME: Do we want to invalidate secondary mmu by calling
+                * mmu_notifier_invalidate_range() see comments below inside
+@@ -2233,27 +2233,33 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
+               pte = pte_offset_map(&_pmd, addr);
+               BUG_ON(!pte_none(*pte));
+               set_pte_at(mm, addr, pte, entry);
+-              atomic_inc(&page[i]._mapcount);
+-              pte_unmap(pte);
+-      }
+-
+-      /*
+-       * Set PG_double_map before dropping compound_mapcount to avoid
+-       * false-negative page_mapped().
+-       */
+-      if (compound_mapcount(page) > 1 && !TestSetPageDoubleMap(page)) {
+-              for (i = 0; i < HPAGE_PMD_NR; i++)
++              if (!pmd_migration)
+                       atomic_inc(&page[i]._mapcount);
++              pte_unmap(pte);
+       }
+-      if (atomic_add_negative(-1, compound_mapcount_ptr(page))) {
+-              /* Last compound_mapcount is gone. */
+-              __dec_node_page_state(page, NR_ANON_THPS);
+-              if (TestClearPageDoubleMap(page)) {
+-                      /* No need in mapcount reference anymore */
++      if (!pmd_migration) {
++              /*
++               * Set PG_double_map before dropping compound_mapcount to avoid
++               * false-negative page_mapped().
++               */
++              if (compound_mapcount(page) > 1 &&
++                  !TestSetPageDoubleMap(page)) {
+                       for (i = 0; i < HPAGE_PMD_NR; i++)
+-                              atomic_dec(&page[i]._mapcount);
++                              atomic_inc(&page[i]._mapcount);
++              }
++
++              lock_page_memcg(page);
++              if (atomic_add_negative(-1, compound_mapcount_ptr(page))) {
++                      /* Last compound_mapcount is gone. */
++                      __dec_lruvec_page_state(page, NR_ANON_THPS);
++                      if (TestClearPageDoubleMap(page)) {
++                              /* No need in mapcount reference anymore */
++                              for (i = 0; i < HPAGE_PMD_NR; i++)
++                                      atomic_dec(&page[i]._mapcount);
++                      }
+               }
++              unlock_page_memcg(page);
+       }
+       smp_wmb(); /* make pte visible before pmd */
+-- 
+2.25.1
+
index 421138f841edf000cf0d4c04db18c4f1d97824b6..0387cc8025b1592bda12d718ef06129cef516e65 100644 (file)
@@ -1 +1,4 @@
 af_key-pfkey_dump-needs-parameter-validation.patch
+kvm-fix-memory-leak-in-kvm_io_bus_unregister_dev.patch
+kprobes-fix-kill-kprobe-which-has-been-marked-as-gon.patch
+mm-thp-fix-__split_huge_pmd_locked-for-migration-pmd.patch