]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.20-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 29 Jan 2019 09:52:50 +0000 (10:52 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 29 Jan 2019 09:52:50 +0000 (10:52 +0100)
added patches:
nvmet-rdma-add-unlikely-for-response-allocated-check.patch
nvmet-rdma-fix-null-dereference-under-heavy-load.patch

queue-4.20/nvmet-rdma-add-unlikely-for-response-allocated-check.patch [new file with mode: 0644]
queue-4.20/nvmet-rdma-fix-null-dereference-under-heavy-load.patch [new file with mode: 0644]
queue-4.20/series

diff --git a/queue-4.20/nvmet-rdma-add-unlikely-for-response-allocated-check.patch b/queue-4.20/nvmet-rdma-add-unlikely-for-response-allocated-check.patch
new file mode 100644 (file)
index 0000000..a2b06a8
--- /dev/null
@@ -0,0 +1,32 @@
+From ad1f824948e4ed886529219cf7cd717d078c630d Mon Sep 17 00:00:00 2001
+From: Israel Rukshin <israelr@mellanox.com>
+Date: Mon, 19 Nov 2018 10:58:51 +0000
+Subject: nvmet-rdma: Add unlikely for response allocated check
+
+From: Israel Rukshin <israelr@mellanox.com>
+
+commit ad1f824948e4ed886529219cf7cd717d078c630d upstream.
+
+Signed-off-by: Israel Rukshin <israelr@mellanox.com>
+Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
+Reviewed-by: Max Gurtovoy <maxg@mellanox.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Cc: Raju  Rangoju <rajur@chelsio.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/nvme/target/rdma.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/nvme/target/rdma.c
++++ b/drivers/nvme/target/rdma.c
+@@ -196,7 +196,7 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp
+ {
+       unsigned long flags;
+-      if (rsp->allocated) {
++      if (unlikely(rsp->allocated)) {
+               kfree(rsp);
+               return;
+       }
diff --git a/queue-4.20/nvmet-rdma-fix-null-dereference-under-heavy-load.patch b/queue-4.20/nvmet-rdma-fix-null-dereference-under-heavy-load.patch
new file mode 100644 (file)
index 0000000..daa9de8
--- /dev/null
@@ -0,0 +1,71 @@
+From 5cbab6303b4791a3e6713dfe2c5fda6a867f9adc Mon Sep 17 00:00:00 2001
+From: Raju Rangoju <rajur@chelsio.com>
+Date: Thu, 3 Jan 2019 23:05:31 +0530
+Subject: nvmet-rdma: fix null dereference under heavy load
+
+From: Raju Rangoju <rajur@chelsio.com>
+
+commit 5cbab6303b4791a3e6713dfe2c5fda6a867f9adc upstream.
+
+Under heavy load if we don't have any pre-allocated rsps left, we
+dynamically allocate a rsp, but we are not actually allocating memory
+for nvme_completion (rsp->req.rsp). In such a case, accessing pointer
+fields (req->rsp->status) in nvmet_req_init() will result in crash.
+
+To fix this, allocate the memory for nvme_completion by calling
+nvmet_rdma_alloc_rsp()
+
+Fixes: 8407879c("nvmet-rdma:fix possible bogus dereference under heavy load")
+
+Cc: <stable@vger.kernel.org>
+Reviewed-by: Max Gurtovoy <maxg@mellanox.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Raju Rangoju <rajur@chelsio.com>
+Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/nvme/target/rdma.c |   15 ++++++++++++++-
+ 1 file changed, 14 insertions(+), 1 deletion(-)
+
+--- a/drivers/nvme/target/rdma.c
++++ b/drivers/nvme/target/rdma.c
+@@ -139,6 +139,10 @@ static void nvmet_rdma_recv_done(struct
+ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
+ static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
+ static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
++static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
++                              struct nvmet_rdma_rsp *r);
++static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
++                              struct nvmet_rdma_rsp *r);
+ static const struct nvmet_fabrics_ops nvmet_rdma_ops;
+@@ -182,9 +186,17 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_que
+       spin_unlock_irqrestore(&queue->rsps_lock, flags);
+       if (unlikely(!rsp)) {
+-              rsp = kmalloc(sizeof(*rsp), GFP_KERNEL);
++              int ret;
++
++              rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
+               if (unlikely(!rsp))
+                       return NULL;
++              ret = nvmet_rdma_alloc_rsp(queue->dev, rsp);
++              if (unlikely(ret)) {
++                      kfree(rsp);
++                      return NULL;
++              }
++
+               rsp->allocated = true;
+       }
+@@ -197,6 +209,7 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp
+       unsigned long flags;
+       if (unlikely(rsp->allocated)) {
++              nvmet_rdma_free_rsp(rsp->queue->dev, rsp);
+               kfree(rsp);
+               return;
+       }
index 91d430be240c4b1fc6e1600da9a85f31ee4bab62..c01a712def7b8315ae46f766e38be3c3d9c8a890 100644 (file)
@@ -93,6 +93,8 @@ vt-invoke-notifier-on-screen-size-change.patch
 drm-meson-fix-atomic-mode-switching-regression.patch
 bpf-move-prev_-insn_idx-into-verifier-env.patch
 bpf-move-tmp-variable-into-ax-register-in-interprete.patch
+nvmet-rdma-add-unlikely-for-response-allocated-check.patch
+nvmet-rdma-fix-null-dereference-under-heavy-load.patch
 bpf-enable-access-to-ax-register-also-from-verifier-.patch
 bpf-restrict-map-value-pointer-arithmetic-for-unpriv.patch
 bpf-restrict-stack-pointer-arithmetic-for-unprivileg.patch