]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.15-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 22 Mar 2018 21:21:34 +0000 (22:21 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 22 Mar 2018 21:21:34 +0000 (22:21 +0100)
added patches:
rdma-vmw_pvrdma-fix-usage-of-user-response-structures-in-abi-file.patch

queue-4.15/rdma-vmw_pvrdma-fix-usage-of-user-response-structures-in-abi-file.patch [new file with mode: 0644]
queue-4.15/series

diff --git a/queue-4.15/rdma-vmw_pvrdma-fix-usage-of-user-response-structures-in-abi-file.patch b/queue-4.15/rdma-vmw_pvrdma-fix-usage-of-user-response-structures-in-abi-file.patch
new file mode 100644 (file)
index 0000000..dec6cc8
--- /dev/null
@@ -0,0 +1,103 @@
+From 1f5a6c47aabc4606f91ad2e6ef71a1ff1924101c Mon Sep 17 00:00:00 2001
+From: Adit Ranadive <aditr@vmware.com>
+Date: Thu, 15 Feb 2018 12:36:46 -0800
+Subject: RDMA/vmw_pvrdma: Fix usage of user response structures in ABI file
+
+From: Adit Ranadive <aditr@vmware.com>
+
+commit 1f5a6c47aabc4606f91ad2e6ef71a1ff1924101c upstream.
+
+This ensures that we return the right structures back to userspace.
+Otherwise, it looks like the reserved fields in the response structures
+in userspace might have uninitialized data in them.
+
+Fixes: 8b10ba783c9d ("RDMA/vmw_pvrdma: Add shared receive queue support")
+Fixes: 29c8d9eba550 ("IB: Add vmw_pvrdma driver")
+Suggested-by: Jason Gunthorpe <jgg@mellanox.com>
+Reviewed-by: Bryan Tan <bryantan@vmware.com>
+Reviewed-by: Aditya Sarwade <asarwade@vmware.com>
+Reviewed-by: Jorgen Hansen <jhansen@vmware.com>
+Signed-off-by: Adit Ranadive <aditr@vmware.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c    |    4 +++-
+ drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c   |    4 +++-
+ drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c |    4 +++-
+ 3 files changed, 9 insertions(+), 3 deletions(-)
+
+--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
++++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
+@@ -114,6 +114,7 @@ struct ib_cq *pvrdma_create_cq(struct ib
+       union pvrdma_cmd_resp rsp;
+       struct pvrdma_cmd_create_cq *cmd = &req.create_cq;
+       struct pvrdma_cmd_create_cq_resp *resp = &rsp.create_cq_resp;
++      struct pvrdma_create_cq_resp cq_resp = {0};
+       struct pvrdma_create_cq ucmd;
+       BUILD_BUG_ON(sizeof(struct pvrdma_cqe) != 64);
+@@ -198,6 +199,7 @@ struct ib_cq *pvrdma_create_cq(struct ib
+       cq->ibcq.cqe = resp->cqe;
+       cq->cq_handle = resp->cq_handle;
++      cq_resp.cqn = resp->cq_handle;
+       spin_lock_irqsave(&dev->cq_tbl_lock, flags);
+       dev->cq_tbl[cq->cq_handle % dev->dsr->caps.max_cq] = cq;
+       spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
+@@ -206,7 +208,7 @@ struct ib_cq *pvrdma_create_cq(struct ib
+               cq->uar = &(to_vucontext(context)->uar);
+               /* Copy udata back. */
+-              if (ib_copy_to_udata(udata, &cq->cq_handle, sizeof(__u32))) {
++              if (ib_copy_to_udata(udata, &cq_resp, sizeof(cq_resp))) {
+                       dev_warn(&dev->pdev->dev,
+                                "failed to copy back udata\n");
+                       pvrdma_destroy_cq(&cq->ibcq);
+--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
++++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
+@@ -113,6 +113,7 @@ struct ib_srq *pvrdma_create_srq(struct
+       union pvrdma_cmd_resp rsp;
+       struct pvrdma_cmd_create_srq *cmd = &req.create_srq;
+       struct pvrdma_cmd_create_srq_resp *resp = &rsp.create_srq_resp;
++      struct pvrdma_create_srq_resp srq_resp = {0};
+       struct pvrdma_create_srq ucmd;
+       unsigned long flags;
+       int ret;
+@@ -204,12 +205,13 @@ struct ib_srq *pvrdma_create_srq(struct
+       }
+       srq->srq_handle = resp->srqn;
++      srq_resp.srqn = resp->srqn;
+       spin_lock_irqsave(&dev->srq_tbl_lock, flags);
+       dev->srq_tbl[srq->srq_handle % dev->dsr->caps.max_srq] = srq;
+       spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
+       /* Copy udata back. */
+-      if (ib_copy_to_udata(udata, &srq->srq_handle, sizeof(__u32))) {
++      if (ib_copy_to_udata(udata, &srq_resp, sizeof(srq_resp))) {
+               dev_warn(&dev->pdev->dev, "failed to copy back udata\n");
+               pvrdma_destroy_srq(&srq->ibsrq);
+               return ERR_PTR(-EINVAL);
+--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
++++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
+@@ -447,6 +447,7 @@ struct ib_pd *pvrdma_alloc_pd(struct ib_
+       union pvrdma_cmd_resp rsp;
+       struct pvrdma_cmd_create_pd *cmd = &req.create_pd;
+       struct pvrdma_cmd_create_pd_resp *resp = &rsp.create_pd_resp;
++      struct pvrdma_alloc_pd_resp pd_resp = {0};
+       int ret;
+       void *ptr;
+@@ -475,9 +476,10 @@ struct ib_pd *pvrdma_alloc_pd(struct ib_
+       pd->privileged = !context;
+       pd->pd_handle = resp->pd_handle;
+       pd->pdn = resp->pd_handle;
++      pd_resp.pdn = resp->pd_handle;
+       if (context) {
+-              if (ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) {
++              if (ib_copy_to_udata(udata, &pd_resp, sizeof(pd_resp))) {
+                       dev_warn(&dev->pdev->dev,
+                                "failed to copy back protection domain\n");
+                       pvrdma_dealloc_pd(&pd->ibpd);
index ed3610e25e032172e6c59ce45dc4498aa173bfa6..b1d992790568d4b27ab194a097e782d44f58784f 100644 (file)
@@ -76,6 +76,7 @@ hwrng-core-clean-up-rng-list-when-last-hwrng-is-unregistered.patch
 dmaengine-ti-dma-crossbar-fix-event-mapping-for-tpcc_evt_mux_60_63.patch
 ib-mlx5-fix-integer-overflows-in-mlx5_ib_create_srq.patch
 ib-mlx5-fix-out-of-bounds-read-in-create_raw_packet_qp_rq.patch
+rdma-vmw_pvrdma-fix-usage-of-user-response-structures-in-abi-file.patch
 serial-8250_pci-don-t-fail-on-multiport-card-class.patch
 rdma-core-do-not-use-invalid-destination-in-determining-port-reuse.patch
 clk-migrate-the-count-of-orphaned-clocks-at-init.patch