]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.3-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 27 Oct 2019 20:14:07 +0000 (21:14 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 27 Oct 2019 20:14:07 +0000 (21:14 +0100)
added patches:
rdma-cxgb4-do-not-dma-memory-off-of-the-stack.patch

queue-5.3/rdma-cxgb4-do-not-dma-memory-off-of-the-stack.patch [new file with mode: 0644]
queue-5.3/series

diff --git a/queue-5.3/rdma-cxgb4-do-not-dma-memory-off-of-the-stack.patch b/queue-5.3/rdma-cxgb4-do-not-dma-memory-off-of-the-stack.patch
new file mode 100644 (file)
index 0000000..8d56d22
--- /dev/null
@@ -0,0 +1,104 @@
+From 3840c5b78803b2b6cc1ff820100a74a092c40cbb Mon Sep 17 00:00:00 2001
+From: Greg KH <gregkh@linuxfoundation.org>
+Date: Tue, 1 Oct 2019 18:56:11 +0200
+Subject: RDMA/cxgb4: Do not dma memory off of the stack
+
+From: Greg KH <gregkh@linuxfoundation.org>
+
+commit 3840c5b78803b2b6cc1ff820100a74a092c40cbb upstream.
+
+Nicolas pointed out that the cxgb4 driver is doing dma off of the stack,
+which is generally considered a very bad thing.  On some architectures it
+could be a security problem, but odds are none of them actually run this
+driver, so it's just a "normal" bug.
+
+Resolve this by allocating the memory for a message off of the heap
+instead of the stack.  kmalloc() always will give us a proper memory
+location that DMA will work correctly from.
+
+Link: https://lore.kernel.org/r/20191001165611.GA3542072@kroah.com
+Reported-by: Nicolas Waisman <nico@semmle.com>
+Tested-by: Potnuri Bharat Teja <bharat@chelsio.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/cxgb4/mem.c |   28 +++++++++++++++++-----------
+ 1 file changed, 17 insertions(+), 11 deletions(-)
+
+--- a/drivers/infiniband/hw/cxgb4/mem.c
++++ b/drivers/infiniband/hw/cxgb4/mem.c
+@@ -275,13 +275,17 @@ static int write_tpt_entry(struct c4iw_r
+                          struct sk_buff *skb, struct c4iw_wr_wait *wr_waitp)
+ {
+       int err;
+-      struct fw_ri_tpte tpt;
++      struct fw_ri_tpte *tpt;
+       u32 stag_idx;
+       static atomic_t key;
+       if (c4iw_fatal_error(rdev))
+               return -EIO;
++      tpt = kmalloc(sizeof(*tpt), GFP_KERNEL);
++      if (!tpt)
++              return -ENOMEM;
++
+       stag_state = stag_state > 0;
+       stag_idx = (*stag) >> 8;
+@@ -291,6 +295,7 @@ static int write_tpt_entry(struct c4iw_r
+                       mutex_lock(&rdev->stats.lock);
+                       rdev->stats.stag.fail++;
+                       mutex_unlock(&rdev->stats.lock);
++                      kfree(tpt);
+                       return -ENOMEM;
+               }
+               mutex_lock(&rdev->stats.lock);
+@@ -305,28 +310,28 @@ static int write_tpt_entry(struct c4iw_r
+       /* write TPT entry */
+       if (reset_tpt_entry)
+-              memset(&tpt, 0, sizeof(tpt));
++              memset(tpt, 0, sizeof(*tpt));
+       else {
+-              tpt.valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
++              tpt->valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
+                       FW_RI_TPTE_STAGKEY_V((*stag & FW_RI_TPTE_STAGKEY_M)) |
+                       FW_RI_TPTE_STAGSTATE_V(stag_state) |
+                       FW_RI_TPTE_STAGTYPE_V(type) | FW_RI_TPTE_PDID_V(pdid));
+-              tpt.locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) |
++              tpt->locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) |
+                       (bind_enabled ? FW_RI_TPTE_MWBINDEN_F : 0) |
+                       FW_RI_TPTE_ADDRTYPE_V((zbva ? FW_RI_ZERO_BASED_TO :
+                                                     FW_RI_VA_BASED_TO))|
+                       FW_RI_TPTE_PS_V(page_size));
+-              tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
++              tpt->nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
+                       FW_RI_TPTE_PBLADDR_V(PBL_OFF(rdev, pbl_addr)>>3));
+-              tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
+-              tpt.va_hi = cpu_to_be32((u32)(to >> 32));
+-              tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
+-              tpt.dca_mwbcnt_pstag = cpu_to_be32(0);
+-              tpt.len_hi = cpu_to_be32((u32)(len >> 32));
++              tpt->len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
++              tpt->va_hi = cpu_to_be32((u32)(to >> 32));
++              tpt->va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
++              tpt->dca_mwbcnt_pstag = cpu_to_be32(0);
++              tpt->len_hi = cpu_to_be32((u32)(len >> 32));
+       }
+       err = write_adapter_mem(rdev, stag_idx +
+                               (rdev->lldi.vr->stag.start >> 5),
+-                              sizeof(tpt), &tpt, skb, wr_waitp);
++                              sizeof(*tpt), tpt, skb, wr_waitp);
+       if (reset_tpt_entry) {
+               c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
+@@ -334,6 +339,7 @@ static int write_tpt_entry(struct c4iw_r
+               rdev->stats.stag.cur -= 32;
+               mutex_unlock(&rdev->stats.lock);
+       }
++      kfree(tpt);
+       return err;
+ }
index 872a058a6827655bcdba198349823e5e848fa7bd..7fcb3610856f5f83d3f7267b23274f8cebf4dee0 100644 (file)
@@ -194,3 +194,4 @@ pci-pm-fix-pci_power_up.patch
 opp-of-drop-incorrect-lockdep_assert_held.patch
 of-reserved_mem-add-missing-of_node_put-for-proper-ref-counting.patch
 blk-rq-qos-fix-first-node-deletion-of-rq_qos_del.patch
+rdma-cxgb4-do-not-dma-memory-off-of-the-stack.patch