--- /dev/null
+From b3bc5f16a17e087822ea3c23b014f90deafb5f1e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Sep 2020 21:20:21 -0700
+Subject: kprobes: fix kill kprobe which has been marked as gone
+
+From: Muchun Song <songmuchun@bytedance.com>
+
+[ Upstream commit b0399092ccebd9feef68d4ceb8d6219a8c0caa05 ]
+
+If a kprobe is marked as gone, we should not kill it again. Otherwise, we
+can disarm the kprobe more than once. In that case, the statistics of
+kprobe_ftrace_enabled can unbalance which can lead to that kprobe do not
+work.
+
+Fixes: e8386a0cb22f ("kprobes: support probing module __exit function")
+Co-developed-by: Chengming Zhou <zhouchengming@bytedance.com>
+Signed-off-by: Muchun Song <songmuchun@bytedance.com>
+Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Acked-by: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: "Naveen N . Rao" <naveen.n.rao@linux.ibm.com>
+Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
+Cc: David S. Miller <davem@davemloft.net>
+Cc: Song Liu <songliubraving@fb.com>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: <stable@vger.kernel.org>
+Link: https://lkml.kernel.org/r/20200822030055.32383-1-songmuchun@bytedance.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/kprobes.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index 7b3a5c35904a0..836a2e0226269 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -2117,6 +2117,9 @@ static void kill_kprobe(struct kprobe *p)
+ {
+ struct kprobe *kp;
+
++ if (WARN_ON_ONCE(kprobe_gone(p)))
++ return;
++
+ p->flags |= KPROBE_FLAG_GONE;
+ if (kprobe_aggrprobe(p)) {
+ /*
+@@ -2259,7 +2262,10 @@ static int kprobes_module_callback(struct notifier_block *nb,
+ mutex_lock(&kprobe_mutex);
+ for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
+ head = &kprobe_table[i];
+- hlist_for_each_entry_rcu(p, head, hlist)
++ hlist_for_each_entry_rcu(p, head, hlist) {
++ if (kprobe_gone(p))
++ continue;
++
+ if (within_module_init((unsigned long)p->addr, mod) ||
+ (checkcore &&
+ within_module_core((unsigned long)p->addr, mod))) {
+@@ -2276,6 +2282,7 @@ static int kprobes_module_callback(struct notifier_block *nb,
+ */
+ kill_kprobe(p);
+ }
++ }
+ }
+ mutex_unlock(&kprobe_mutex);
+ return NOTIFY_DONE;
+--
+2.25.1
+
--- /dev/null
+From f88b41a313cefd6b2a569682c5fe871689c5a606 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Sep 2020 11:55:35 -0700
+Subject: KVM: fix memory leak in kvm_io_bus_unregister_dev()
+
+From: Rustam Kovhaev <rkovhaev@gmail.com>
+
+[ Upstream commit f65886606c2d3b562716de030706dfe1bea4ed5e ]
+
+when kmalloc() fails in kvm_io_bus_unregister_dev(), before removing
+the bus, we should iterate over all other devices linked to it and call
+kvm_iodevice_destructor() for them
+
+Fixes: 90db10434b16 ("KVM: kvm_io_bus_unregister_dev() should never fail")
+Cc: stable@vger.kernel.org
+Reported-and-tested-by: syzbot+f196caa45793d6374707@syzkaller.appspotmail.com
+Link: https://syzkaller.appspot.com/bug?extid=f196caa45793d6374707
+Signed-off-by: Rustam Kovhaev <rkovhaev@gmail.com>
+Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+Message-Id: <20200907185535.233114-1-rkovhaev@gmail.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ virt/kvm/kvm_main.c | 21 ++++++++++++---------
+ 1 file changed, 12 insertions(+), 9 deletions(-)
+
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 71f77ae6c2a66..1e30f8706349e 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -3688,7 +3688,7 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
+ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+ struct kvm_io_device *dev)
+ {
+- int i;
++ int i, j;
+ struct kvm_io_bus *new_bus, *bus;
+
+ bus = kvm_get_bus(kvm, bus_idx);
+@@ -3705,17 +3705,20 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+
+ new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) *
+ sizeof(struct kvm_io_range)), GFP_KERNEL);
+- if (!new_bus) {
++ if (new_bus) {
++ memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
++ new_bus->dev_count--;
++ memcpy(new_bus->range + i, bus->range + i + 1,
++ (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
++ } else {
+ pr_err("kvm: failed to shrink bus, removing it completely\n");
+- goto broken;
++ for (j = 0; j < bus->dev_count; j++) {
++ if (j == i)
++ continue;
++ kvm_iodevice_destructor(bus->range[j].dev);
++ }
+ }
+
+- memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
+- new_bus->dev_count--;
+- memcpy(new_bus->range + i, bus->range + i + 1,
+- (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
+-
+-broken:
+ rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
+ synchronize_srcu_expedited(&kvm->srcu);
+ kfree(bus);
+--
+2.25.1
+
--- /dev/null
+From f37b3c639d951de6bb059e6881d92a384318ecab Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Sep 2020 21:20:24 -0700
+Subject: mm/thp: fix __split_huge_pmd_locked() for migration PMD
+
+From: Ralph Campbell <rcampbell@nvidia.com>
+
+[ Upstream commit ec0abae6dcdf7ef88607c869bf35a4b63ce1b370 ]
+
+A migrating transparent huge page has to already be unmapped. Otherwise,
+the page could be modified while it is being copied to a new page and data
+could be lost. The function __split_huge_pmd() checks for a PMD migration
+entry before calling __split_huge_pmd_locked() leading one to think that
+__split_huge_pmd_locked() can handle splitting a migrating PMD.
+
+However, the code always increments the page->_mapcount and adjusts the
+memory control group accounting assuming the page is mapped.
+
+Also, if the PMD entry is a migration PMD entry, the call to
+is_huge_zero_pmd(*pmd) is incorrect because it calls pmd_pfn(pmd) instead
+of migration_entry_to_pfn(pmd_to_swp_entry(pmd)). Fix these problems by
+checking for a PMD migration entry.
+
+Fixes: 84c3fc4e9c56 ("mm: thp: check pmd migration entry in common path")
+Signed-off-by: Ralph Campbell <rcampbell@nvidia.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Reviewed-by: Yang Shi <shy828301@gmail.com>
+Reviewed-by: Zi Yan <ziy@nvidia.com>
+Cc: Jerome Glisse <jglisse@redhat.com>
+Cc: John Hubbard <jhubbard@nvidia.com>
+Cc: Alistair Popple <apopple@nvidia.com>
+Cc: Christoph Hellwig <hch@lst.de>
+Cc: Jason Gunthorpe <jgg@nvidia.com>
+Cc: Bharata B Rao <bharata@linux.ibm.com>
+Cc: Ben Skeggs <bskeggs@redhat.com>
+Cc: Shuah Khan <shuah@kernel.org>
+Cc: <stable@vger.kernel.org> [4.14+]
+Link: https://lkml.kernel.org/r/20200903183140.19055-1-rcampbell@nvidia.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/huge_memory.c | 40 +++++++++++++++++++++++-----------------
+ 1 file changed, 23 insertions(+), 17 deletions(-)
+
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 9f3d4f84032bc..51068ef1dff5a 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2078,7 +2078,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
+ put_page(page);
+ add_mm_counter(mm, MM_FILEPAGES, -HPAGE_PMD_NR);
+ return;
+- } else if (is_huge_zero_pmd(*pmd)) {
++ } else if (pmd_trans_huge(*pmd) && is_huge_zero_pmd(*pmd)) {
+ return __split_huge_zero_page_pmd(vma, haddr, pmd);
+ }
+
+@@ -2131,27 +2131,33 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
+ pte = pte_offset_map(&_pmd, addr);
+ BUG_ON(!pte_none(*pte));
+ set_pte_at(mm, addr, pte, entry);
+- atomic_inc(&page[i]._mapcount);
+- pte_unmap(pte);
+- }
+-
+- /*
+- * Set PG_double_map before dropping compound_mapcount to avoid
+- * false-negative page_mapped().
+- */
+- if (compound_mapcount(page) > 1 && !TestSetPageDoubleMap(page)) {
+- for (i = 0; i < HPAGE_PMD_NR; i++)
++ if (!pmd_migration)
+ atomic_inc(&page[i]._mapcount);
++ pte_unmap(pte);
+ }
+
+- if (atomic_add_negative(-1, compound_mapcount_ptr(page))) {
+- /* Last compound_mapcount is gone. */
+- __dec_node_page_state(page, NR_ANON_THPS);
+- if (TestClearPageDoubleMap(page)) {
+- /* No need in mapcount reference anymore */
++ if (!pmd_migration) {
++ /*
++ * Set PG_double_map before dropping compound_mapcount to avoid
++ * false-negative page_mapped().
++ */
++ if (compound_mapcount(page) > 1 &&
++ !TestSetPageDoubleMap(page)) {
+ for (i = 0; i < HPAGE_PMD_NR; i++)
+- atomic_dec(&page[i]._mapcount);
++ atomic_inc(&page[i]._mapcount);
++ }
++
++ lock_page_memcg(page);
++ if (atomic_add_negative(-1, compound_mapcount_ptr(page))) {
++ /* Last compound_mapcount is gone. */
++ __dec_lruvec_page_state(page, NR_ANON_THPS);
++ if (TestClearPageDoubleMap(page)) {
++ /* No need in mapcount reference anymore */
++ for (i = 0; i < HPAGE_PMD_NR; i++)
++ atomic_dec(&page[i]._mapcount);
++ }
+ }
++ unlock_page_memcg(page);
+ }
+
+ smp_wmb(); /* make pte visible before pmd */
+--
+2.25.1
+
--- /dev/null
+From ad02ac4f6abc1eac14b3074e9fb81da979c70a25 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 Jul 2020 21:00:03 +0530
+Subject: phy: qcom-qmp: Use correct values for ipq8074 PCIe Gen2 PHY init
+
+From: Sivaprakash Murugesan <sivaprak@codeaurora.org>
+
+[ Upstream commit afd55e6d1bd35b4b36847869011447a83a81c8e0 ]
+
+There were some problem in ipq8074 Gen2 PCIe phy init sequence.
+
+1. Few register values were wrongly updated in the phy init sequence.
+2. The register QSERDES_RX_SIGDET_CNTRL is a RX tuning parameter
+ register which is added in serdes table causing the wrong register
+ was getting updated.
+3. Clocks and resets were not added in the phy init.
+
+Fix these to make Gen2 PCIe port on ipq8074 devices to work.
+
+Fixes: eef243d04b2b6 ("phy: qcom-qmp: Add support for IPQ8074")
+Cc: stable@vger.kernel.org
+Co-developed-by: Selvam Sathappan Periakaruppan <speriaka@codeaurora.org>
+Signed-off-by: Selvam Sathappan Periakaruppan <speriaka@codeaurora.org>
+Signed-off-by: Sivaprakash Murugesan <sivaprak@codeaurora.org>
+Link: https://lore.kernel.org/r/1596036607-11877-4-git-send-email-sivaprak@codeaurora.org
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/phy/qualcomm/phy-qcom-qmp.c | 18 +++++++++++-------
+ 1 file changed, 11 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
+index 2526971f99299..3eeaf57e6d939 100644
+--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
++++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
+@@ -102,6 +102,8 @@
+ #define QSERDES_COM_CORECLK_DIV_MODE1 0x1bc
+
+ /* QMP PHY TX registers */
++#define QSERDES_TX_EMP_POST1_LVL 0x018
++#define QSERDES_TX_SLEW_CNTL 0x040
+ #define QSERDES_TX_RES_CODE_LANE_OFFSET 0x054
+ #define QSERDES_TX_DEBUG_BUS_SEL 0x064
+ #define QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN 0x068
+@@ -394,8 +396,8 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0xf),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x0),
+- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0x1f),
+- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x3f),
++ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0xff),
++ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x6),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0xf),
+ QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x0),
+@@ -421,7 +423,6 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CTRL_BY_PSM, 0x1),
+- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_CTRL, 0xa),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x1),
+@@ -430,7 +431,6 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x19),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_EP_DIV, 0x19),
+- QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_CNTRL, 0x7),
+ };
+
+ static const struct qmp_phy_init_tbl ipq8074_pcie_tx_tbl[] = {
+@@ -438,6 +438,8 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x6),
+ QMP_PHY_INIT_CFG(QSERDES_TX_RES_CODE_LANE_OFFSET, 0x2),
+ QMP_PHY_INIT_CFG(QSERDES_TX_RCV_DETECT_LVL_2, 0x12),
++ QMP_PHY_INIT_CFG(QSERDES_TX_EMP_POST1_LVL, 0x36),
++ QMP_PHY_INIT_CFG(QSERDES_TX_SLEW_CNTL, 0x0a),
+ };
+
+ static const struct qmp_phy_init_tbl ipq8074_pcie_rx_tbl[] = {
+@@ -448,7 +450,6 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xdb),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x4),
+- QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN_HALF, 0x4),
+ };
+
+ static const struct qmp_phy_init_tbl ipq8074_pcie_pcs_tbl[] = {
+@@ -665,6 +666,9 @@ static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
+ .mask_pcs_ready = PHYSTATUS,
+ };
+
++static const char * const ipq8074_pciephy_clk_l[] = {
++ "aux", "cfg_ahb",
++};
+ /* list of resets */
+ static const char * const ipq8074_pciephy_reset_l[] = {
+ "phy", "common",
+@@ -682,8 +686,8 @@ static const struct qmp_phy_cfg ipq8074_pciephy_cfg = {
+ .rx_tbl_num = ARRAY_SIZE(ipq8074_pcie_rx_tbl),
+ .pcs_tbl = ipq8074_pcie_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(ipq8074_pcie_pcs_tbl),
+- .clk_list = NULL,
+- .num_clks = 0,
++ .clk_list = ipq8074_pciephy_clk_l,
++ .num_clks = ARRAY_SIZE(ipq8074_pciephy_clk_l),
+ .reset_list = ipq8074_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l),
+ .vreg_list = NULL,
+--
+2.25.1
+
--- /dev/null
+From bec7e54c0d5d12acf8fd632c153e625183e8bbb1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 24 Sep 2020 18:24:49 +0900
+Subject: RDMA/ucma: ucma_context reference leak in error path
+
+From: Shamir Rabinovitch <shamir.rabinovitch@oracle.com>
+
+commit ef95a90ae6f4f21990e1f7ced6719784a409e811 upstream.
+
+Validating input parameters should be done before getting the cm_id
+otherwise it can leak a cm_id reference.
+
+Fixes: 6a21dfc0d0db ("RDMA/ucma: Limit possible option size")
+Signed-off-by: Shamir Rabinovitch <shamir.rabinovitch@oracle.com>
+Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+[iwamatsu: Backported to 4.4, 4.9 and 4.14: adjust context]
+Signed-off-by: Nobuhiro Iwamatsu (CIP) <nobuhiro1.iwamatsu@toshiba.co.jp>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/core/ucma.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
+index c3e5f921da12e..4002a8ddf6d0a 100644
+--- a/drivers/infiniband/core/ucma.c
++++ b/drivers/infiniband/core/ucma.c
+@@ -1315,13 +1315,13 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
+ if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ return -EFAULT;
+
++ if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE))
++ return -EINVAL;
++
+ ctx = ucma_get_ctx(file, cmd.id);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+- if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE))
+- return -EINVAL;
+-
+ optval = memdup_user((void __user *) (unsigned long) cmd.optval,
+ cmd.optlen);
+ if (IS_ERR(optval)) {
+--
+2.25.1
+
af_key-pfkey_dump-needs-parameter-validation.patch
+phy-qcom-qmp-use-correct-values-for-ipq8074-pcie-gen.patch
+kvm-fix-memory-leak-in-kvm_io_bus_unregister_dev.patch
+kprobes-fix-kill-kprobe-which-has-been-marked-as-gon.patch
+mm-thp-fix-__split_huge_pmd_locked-for-migration-pmd.patch
+rdma-ucma-ucma_context-reference-leak-in-error-path.patch