--- /dev/null
+From 840f6a27c7a9f0ccef5b4f8f9a310f52ab29e963 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 4 Sep 2020 21:07:49 -0700
+Subject: ibmvnic: add missing parenthesis in do_reset()
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 8ae4dff882eb879c17bf46574201bd37fc6bc8b5 ]
+
+Indentation and logic clearly show that this code is missing
+parenthesis.
+
+Fixes: 9f1345737790 ("ibmvnic fix NULL tx_pools and rx_tools issue at do_reset")
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/ibm/ibmvnic.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index de45b3709c14e..5329af2337a91 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -1939,16 +1939,18 @@ static int do_reset(struct ibmvnic_adapter *adapter,
+
+ } else {
+ rc = reset_tx_pools(adapter);
+- if (rc)
++ if (rc) {
+ netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n",
+ rc);
+ goto out;
++ }
+
+ rc = reset_rx_pools(adapter);
+- if (rc)
++ if (rc) {
+ netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n",
+ rc);
+ goto out;
++ }
+ }
+ ibmvnic_disable_irqs(adapter);
+ }
+--
+2.25.1
+
--- /dev/null
+From dd0875946a01df00cb2c5d31f317ab68b6df26b0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Aug 2020 13:26:41 -0400
+Subject: ibmvnic fix NULL tx_pools and rx_tools issue at do_reset
+
+From: Mingming Cao <mmc@linux.vnet.ibm.com>
+
+[ Upstream commit 9f13457377907fa253aef560e1a37e1ca4197f9b ]
+
+At the time of do_rest, ibmvnic tries to re-initalize the tx_pools
+and rx_pools to avoid re-allocating the long term buffer. However
+there is a window inside do_reset that the tx_pools and
+rx_pools were freed before re-initialized making it possible to deference
+null pointers.
+
+This patch fix this issue by always check the tx_pool
+and rx_pool are not NULL after ibmvnic_login. If so, re-allocating
+the pools. This will avoid getting into calling reset_tx/rx_pools with
+NULL adapter tx_pools/rx_pools pointer. Also add null pointer check in
+reset_tx_pools and reset_rx_pools to safe handle NULL pointer case.
+
+Signed-off-by: Mingming Cao <mmc@linux.vnet.ibm.com>
+Signed-off-by: Dany Madden <drt@linux.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/ibm/ibmvnic.c | 15 ++++++++++++++-
+ 1 file changed, 14 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index 2d20a48f0ba0a..de45b3709c14e 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -416,6 +416,9 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
+ int i, j, rc;
+ u64 *size_array;
+
++ if (!adapter->rx_pool)
++ return -1;
++
+ size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
+ be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
+
+@@ -586,6 +589,9 @@ static int reset_tx_pools(struct ibmvnic_adapter *adapter)
+ int tx_scrqs;
+ int i, rc;
+
++ if (!adapter->tx_pool)
++ return -1;
++
+ tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
+ for (i = 0; i < tx_scrqs; i++) {
+ rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
+@@ -1918,7 +1924,10 @@ static int do_reset(struct ibmvnic_adapter *adapter,
+ adapter->req_rx_add_entries_per_subcrq !=
+ old_num_rx_slots ||
+ adapter->req_tx_entries_per_subcrq !=
+- old_num_tx_slots) {
++ old_num_tx_slots ||
++ !adapter->rx_pool ||
++ !adapter->tso_pool ||
++ !adapter->tx_pool) {
+ release_rx_pools(adapter);
+ release_tx_pools(adapter);
+ release_napi(adapter);
+@@ -1931,10 +1940,14 @@ static int do_reset(struct ibmvnic_adapter *adapter,
+ } else {
+ rc = reset_tx_pools(adapter);
+ if (rc)
++ netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n",
++ rc);
+ goto out;
+
+ rc = reset_rx_pools(adapter);
+ if (rc)
++ netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n",
++ rc);
+ goto out;
+ }
+ ibmvnic_disable_irqs(adapter);
+--
+2.25.1
+
--- /dev/null
+From ce3643e9454ae2519e1d83db57bde3010a33ad12 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Sep 2020 21:20:21 -0700
+Subject: kprobes: fix kill kprobe which has been marked as gone
+
+From: Muchun Song <songmuchun@bytedance.com>
+
+[ Upstream commit b0399092ccebd9feef68d4ceb8d6219a8c0caa05 ]
+
+If a kprobe is marked as gone, we should not kill it again. Otherwise, we
+can disarm the kprobe more than once. In that case, the statistics of
+kprobe_ftrace_enabled can unbalance which can lead to that kprobe do not
+work.
+
+Fixes: e8386a0cb22f ("kprobes: support probing module __exit function")
+Co-developed-by: Chengming Zhou <zhouchengming@bytedance.com>
+Signed-off-by: Muchun Song <songmuchun@bytedance.com>
+Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Acked-by: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: "Naveen N . Rao" <naveen.n.rao@linux.ibm.com>
+Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
+Cc: David S. Miller <davem@davemloft.net>
+Cc: Song Liu <songliubraving@fb.com>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: <stable@vger.kernel.org>
+Link: https://lkml.kernel.org/r/20200822030055.32383-1-songmuchun@bytedance.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/kprobes.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index bbff4bccb885d..5646f291eb705 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -2088,6 +2088,9 @@ static void kill_kprobe(struct kprobe *p)
+ {
+ struct kprobe *kp;
+
++ if (WARN_ON_ONCE(kprobe_gone(p)))
++ return;
++
+ p->flags |= KPROBE_FLAG_GONE;
+ if (kprobe_aggrprobe(p)) {
+ /*
+@@ -2270,7 +2273,10 @@ static int kprobes_module_callback(struct notifier_block *nb,
+ mutex_lock(&kprobe_mutex);
+ for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
+ head = &kprobe_table[i];
+- hlist_for_each_entry_rcu(p, head, hlist)
++ hlist_for_each_entry_rcu(p, head, hlist) {
++ if (kprobe_gone(p))
++ continue;
++
+ if (within_module_init((unsigned long)p->addr, mod) ||
+ (checkcore &&
+ within_module_core((unsigned long)p->addr, mod))) {
+@@ -2287,6 +2293,7 @@ static int kprobes_module_callback(struct notifier_block *nb,
+ */
+ kill_kprobe(p);
+ }
++ }
+ }
+ mutex_unlock(&kprobe_mutex);
+ return NOTIFY_DONE;
+--
+2.25.1
+
--- /dev/null
+From fa69719f1dab682641d6d682cd4df5404fce0c00 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Sep 2020 21:20:24 -0700
+Subject: mm/thp: fix __split_huge_pmd_locked() for migration PMD
+
+From: Ralph Campbell <rcampbell@nvidia.com>
+
+[ Upstream commit ec0abae6dcdf7ef88607c869bf35a4b63ce1b370 ]
+
+A migrating transparent huge page has to already be unmapped. Otherwise,
+the page could be modified while it is being copied to a new page and data
+could be lost. The function __split_huge_pmd() checks for a PMD migration
+entry before calling __split_huge_pmd_locked() leading one to think that
+__split_huge_pmd_locked() can handle splitting a migrating PMD.
+
+However, the code always increments the page->_mapcount and adjusts the
+memory control group accounting assuming the page is mapped.
+
+Also, if the PMD entry is a migration PMD entry, the call to
+is_huge_zero_pmd(*pmd) is incorrect because it calls pmd_pfn(pmd) instead
+of migration_entry_to_pfn(pmd_to_swp_entry(pmd)). Fix these problems by
+checking for a PMD migration entry.
+
+Fixes: 84c3fc4e9c56 ("mm: thp: check pmd migration entry in common path")
+Signed-off-by: Ralph Campbell <rcampbell@nvidia.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Reviewed-by: Yang Shi <shy828301@gmail.com>
+Reviewed-by: Zi Yan <ziy@nvidia.com>
+Cc: Jerome Glisse <jglisse@redhat.com>
+Cc: John Hubbard <jhubbard@nvidia.com>
+Cc: Alistair Popple <apopple@nvidia.com>
+Cc: Christoph Hellwig <hch@lst.de>
+Cc: Jason Gunthorpe <jgg@nvidia.com>
+Cc: Bharata B Rao <bharata@linux.ibm.com>
+Cc: Ben Skeggs <bskeggs@redhat.com>
+Cc: Shuah Khan <shuah@kernel.org>
+Cc: <stable@vger.kernel.org> [4.14+]
+Link: https://lkml.kernel.org/r/20200903183140.19055-1-rcampbell@nvidia.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/huge_memory.c | 40 +++++++++++++++++++++++-----------------
+ 1 file changed, 23 insertions(+), 17 deletions(-)
+
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index da9040a6838f8..873de55d93fb2 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2174,7 +2174,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
+ put_page(page);
+ add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
+ return;
+- } else if (is_huge_zero_pmd(*pmd)) {
++ } else if (pmd_trans_huge(*pmd) && is_huge_zero_pmd(*pmd)) {
+ /*
+ * FIXME: Do we want to invalidate secondary mmu by calling
+ * mmu_notifier_invalidate_range() see comments below inside
+@@ -2262,27 +2262,33 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
+ pte = pte_offset_map(&_pmd, addr);
+ BUG_ON(!pte_none(*pte));
+ set_pte_at(mm, addr, pte, entry);
+- atomic_inc(&page[i]._mapcount);
+- pte_unmap(pte);
+- }
+-
+- /*
+- * Set PG_double_map before dropping compound_mapcount to avoid
+- * false-negative page_mapped().
+- */
+- if (compound_mapcount(page) > 1 && !TestSetPageDoubleMap(page)) {
+- for (i = 0; i < HPAGE_PMD_NR; i++)
++ if (!pmd_migration)
+ atomic_inc(&page[i]._mapcount);
++ pte_unmap(pte);
+ }
+
+- if (atomic_add_negative(-1, compound_mapcount_ptr(page))) {
+- /* Last compound_mapcount is gone. */
+- __dec_node_page_state(page, NR_ANON_THPS);
+- if (TestClearPageDoubleMap(page)) {
+- /* No need in mapcount reference anymore */
++ if (!pmd_migration) {
++ /*
++ * Set PG_double_map before dropping compound_mapcount to avoid
++ * false-negative page_mapped().
++ */
++ if (compound_mapcount(page) > 1 &&
++ !TestSetPageDoubleMap(page)) {
+ for (i = 0; i < HPAGE_PMD_NR; i++)
+- atomic_dec(&page[i]._mapcount);
++ atomic_inc(&page[i]._mapcount);
++ }
++
++ lock_page_memcg(page);
++ if (atomic_add_negative(-1, compound_mapcount_ptr(page))) {
++ /* Last compound_mapcount is gone. */
++ __dec_lruvec_page_state(page, NR_ANON_THPS);
++ if (TestClearPageDoubleMap(page)) {
++ /* No need in mapcount reference anymore */
++ for (i = 0; i < HPAGE_PMD_NR; i++)
++ atomic_dec(&page[i]._mapcount);
++ }
+ }
++ unlock_page_memcg(page);
+ }
+
+ smp_wmb(); /* make pte visible before pmd */
+--
+2.25.1
+
af_key-pfkey_dump-needs-parameter-validation.patch
+ibmvnic-fix-null-tx_pools-and-rx_tools-issue-at-do_r.patch
+ibmvnic-add-missing-parenthesis-in-do_reset.patch
+kprobes-fix-kill-kprobe-which-has-been-marked-as-gon.patch
+mm-thp-fix-__split_huge_pmd_locked-for-migration-pmd.patch