]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.19-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 29 Jan 2019 09:52:30 +0000 (10:52 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 29 Jan 2019 09:52:30 +0000 (10:52 +0100)
added patches:
nvmet-rdma-add-unlikely-for-response-allocated-check.patch
nvmet-rdma-fix-null-dereference-under-heavy-load.patch
s390-smp-fix-calling-smp_call_ipl_cpu-from-ipl-cpu.patch

queue-4.19/bpf-fix-inner-map-masking-to-prevent-oob-under-specu.patch
queue-4.19/nvmet-rdma-add-unlikely-for-response-allocated-check.patch [new file with mode: 0644]
queue-4.19/nvmet-rdma-fix-null-dereference-under-heavy-load.patch [new file with mode: 0644]
queue-4.19/s390-smp-fix-calling-smp_call_ipl_cpu-from-ipl-cpu.patch [new file with mode: 0644]
queue-4.19/series

index 3c0174e120309b45a97d124dacb4a2e03b9394f9..d3185754257a9b279be46a934e93d344102c9e7b 100644 (file)
@@ -87,11 +87,9 @@ Signed-off-by: Alexei Starovoitov <ast@kernel.org>
 Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
 Signed-off-by: Sasha Levin <sashal@kernel.org>
 ---
- kernel/bpf/map_in_map.c | 17 +++++++++++++++--
+ kernel/bpf/map_in_map.c |   17 +++++++++++++++--
  1 file changed, 15 insertions(+), 2 deletions(-)
 
-diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c
-index 3bfbf4464416..9670ee5ee74e 100644
 --- a/kernel/bpf/map_in_map.c
 +++ b/kernel/bpf/map_in_map.c
 @@ -12,6 +12,7 @@
@@ -102,7 +100,7 @@ index 3bfbf4464416..9670ee5ee74e 100644
        struct fd f;
  
        f = fdget(inner_map_ufd);
-@@ -35,7 +36,12 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
+@@ -35,7 +36,12 @@ struct bpf_map *bpf_map_meta_alloc(int i
                return ERR_PTR(-EINVAL);
        }
  
@@ -116,7 +114,7 @@ index 3bfbf4464416..9670ee5ee74e 100644
        if (!inner_map_meta) {
                fdput(f);
                return ERR_PTR(-ENOMEM);
-@@ -45,9 +51,16 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
+@@ -45,9 +51,16 @@ struct bpf_map *bpf_map_meta_alloc(int i
        inner_map_meta->key_size = inner_map->key_size;
        inner_map_meta->value_size = inner_map->value_size;
        inner_map_meta->map_flags = inner_map->map_flags;
@@ -134,6 +132,3 @@ index 3bfbf4464416..9670ee5ee74e 100644
        fdput(f);
        return inner_map_meta;
  }
--- 
-2.19.1
-
diff --git a/queue-4.19/nvmet-rdma-add-unlikely-for-response-allocated-check.patch b/queue-4.19/nvmet-rdma-add-unlikely-for-response-allocated-check.patch
new file mode 100644 (file)
index 0000000..a2b06a8
--- /dev/null
@@ -0,0 +1,32 @@
+From ad1f824948e4ed886529219cf7cd717d078c630d Mon Sep 17 00:00:00 2001
+From: Israel Rukshin <israelr@mellanox.com>
+Date: Mon, 19 Nov 2018 10:58:51 +0000
+Subject: nvmet-rdma: Add unlikely for response allocated check
+
+From: Israel Rukshin <israelr@mellanox.com>
+
+commit ad1f824948e4ed886529219cf7cd717d078c630d upstream.
+
+Signed-off-by: Israel Rukshin <israelr@mellanox.com>
+Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
+Reviewed-by: Max Gurtovoy <maxg@mellanox.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Cc: Raju  Rangoju <rajur@chelsio.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/nvme/target/rdma.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/nvme/target/rdma.c
++++ b/drivers/nvme/target/rdma.c
+@@ -196,7 +196,7 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp
+ {
+       unsigned long flags;
+-      if (rsp->allocated) {
++      if (unlikely(rsp->allocated)) {
+               kfree(rsp);
+               return;
+       }
diff --git a/queue-4.19/nvmet-rdma-fix-null-dereference-under-heavy-load.patch b/queue-4.19/nvmet-rdma-fix-null-dereference-under-heavy-load.patch
new file mode 100644 (file)
index 0000000..daa9de8
--- /dev/null
@@ -0,0 +1,71 @@
+From 5cbab6303b4791a3e6713dfe2c5fda6a867f9adc Mon Sep 17 00:00:00 2001
+From: Raju Rangoju <rajur@chelsio.com>
+Date: Thu, 3 Jan 2019 23:05:31 +0530
+Subject: nvmet-rdma: fix null dereference under heavy load
+
+From: Raju Rangoju <rajur@chelsio.com>
+
+commit 5cbab6303b4791a3e6713dfe2c5fda6a867f9adc upstream.
+
+Under heavy load if we don't have any pre-allocated rsps left, we
+dynamically allocate a rsp, but we are not actually allocating memory
+for nvme_completion (rsp->req.rsp). In such a case, accessing pointer
+fields (req->rsp->status) in nvmet_req_init() will result in crash.
+
+To fix this, allocate the memory for nvme_completion by calling
+nvmet_rdma_alloc_rsp()
+
+Fixes: 8407879c("nvmet-rdma:fix possible bogus dereference under heavy load")
+
+Cc: <stable@vger.kernel.org>
+Reviewed-by: Max Gurtovoy <maxg@mellanox.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Raju Rangoju <rajur@chelsio.com>
+Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/nvme/target/rdma.c |   15 ++++++++++++++-
+ 1 file changed, 14 insertions(+), 1 deletion(-)
+
+--- a/drivers/nvme/target/rdma.c
++++ b/drivers/nvme/target/rdma.c
+@@ -139,6 +139,10 @@ static void nvmet_rdma_recv_done(struct
+ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
+ static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
+ static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
++static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
++                              struct nvmet_rdma_rsp *r);
++static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
++                              struct nvmet_rdma_rsp *r);
+ static const struct nvmet_fabrics_ops nvmet_rdma_ops;
+@@ -182,9 +186,17 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_que
+       spin_unlock_irqrestore(&queue->rsps_lock, flags);
+       if (unlikely(!rsp)) {
+-              rsp = kmalloc(sizeof(*rsp), GFP_KERNEL);
++              int ret;
++
++              rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
+               if (unlikely(!rsp))
+                       return NULL;
++              ret = nvmet_rdma_alloc_rsp(queue->dev, rsp);
++              if (unlikely(ret)) {
++                      kfree(rsp);
++                      return NULL;
++              }
++
+               rsp->allocated = true;
+       }
+@@ -197,6 +209,7 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp
+       unsigned long flags;
+       if (unlikely(rsp->allocated)) {
++              nvmet_rdma_free_rsp(rsp->queue->dev, rsp);
+               kfree(rsp);
+               return;
+       }
diff --git a/queue-4.19/s390-smp-fix-calling-smp_call_ipl_cpu-from-ipl-cpu.patch b/queue-4.19/s390-smp-fix-calling-smp_call_ipl_cpu-from-ipl-cpu.patch
new file mode 100644 (file)
index 0000000..a6d0c8a
--- /dev/null
@@ -0,0 +1,79 @@
+From 60f1bf29c0b2519989927cae640cd1f50f59dc7f Mon Sep 17 00:00:00 2001
+From: David Hildenbrand <david@redhat.com>
+Date: Fri, 11 Jan 2019 15:18:22 +0100
+Subject: s390/smp: Fix calling smp_call_ipl_cpu() from ipl CPU
+
+From: David Hildenbrand <david@redhat.com>
+
+commit 60f1bf29c0b2519989927cae640cd1f50f59dc7f upstream.
+
+When calling smp_call_ipl_cpu() from the IPL CPU, we will try to read
+from pcpu_devices->lowcore. However, due to prefixing, that will result
+in reading from absolute address 0 on that CPU. We have to go via the
+actual lowcore instead.
+
+This means that right now, we will read lc->nodat_stack == 0 and
+therfore work on a very wrong stack.
+
+This BUG essentially broke rebooting under QEMU TCG (which will report
+a low address protection exception). And checking under KVM, it is
+also broken under KVM. With 1 VCPU it can be easily triggered.
+
+:/# echo 1 > /proc/sys/kernel/sysrq
+:/# echo b > /proc/sysrq-trigger
+[   28.476745] sysrq: SysRq : Resetting
+[   28.476793] Kernel stack overflow.
+[   28.476817] CPU: 0 PID: 424 Comm: sh Not tainted 5.0.0-rc1+ #13
+[   28.476820] Hardware name: IBM 2964 NE1 716 (KVM/Linux)
+[   28.476826] Krnl PSW : 0400c00180000000 0000000000115c0c (pcpu_delegate+0x12c/0x140)
+[   28.476861]            R:0 T:1 IO:0 EX:0 Key:0 M:0 W:0 P:0 AS:3 CC:0 PM:0 RI:0 EA:3
+[   28.476863] Krnl GPRS: ffffffffffffffff 0000000000000000 000000000010dff8 0000000000000000
+[   28.476864]            0000000000000000 0000000000000000 0000000000ab7090 000003e0006efbf0
+[   28.476864]            000000000010dff8 0000000000000000 0000000000000000 0000000000000000
+[   28.476865]            000000007fffc000 0000000000730408 000003e0006efc58 0000000000000000
+[   28.476887] Krnl Code: 0000000000115bfe: 4170f000            la      %r7,0(%r15)
+[   28.476887]            0000000000115c02: 41f0a000            la      %r15,0(%r10)
+[   28.476887]           #0000000000115c06: e370f0980024        stg     %r7,152(%r15)
+[   28.476887]           >0000000000115c0c: c0e5fffff86e        brasl   %r14,114ce8
+[   28.476887]            0000000000115c12: 41f07000            la      %r15,0(%r7)
+[   28.476887]            0000000000115c16: a7f4ffa8            brc     15,115b66
+[   28.476887]            0000000000115c1a: 0707                bcr     0,%r7
+[   28.476887]            0000000000115c1c: 0707                bcr     0,%r7
+[   28.476901] Call Trace:
+[   28.476902] Last Breaking-Event-Address:
+[   28.476920]  [<0000000000a01c4a>] arch_call_rest_init+0x22/0x80
+[   28.476927] Kernel panic - not syncing: Corrupt kernel stack, can't continue.
+[   28.476930] CPU: 0 PID: 424 Comm: sh Not tainted 5.0.0-rc1+ #13
+[   28.476932] Hardware name: IBM 2964 NE1 716 (KVM/Linux)
+[   28.476932] Call Trace:
+
+Fixes: 2f859d0dad81 ("s390/smp: reduce size of struct pcpu")
+Cc: stable@vger.kernel.org # 4.0+
+Reported-by: Cornelia Huck <cohuck@redhat.com>
+Signed-off-by: David Hildenbrand <david@redhat.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ arch/s390/kernel/smp.c |    8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/arch/s390/kernel/smp.c
++++ b/arch/s390/kernel/smp.c
+@@ -371,9 +371,13 @@ void smp_call_online_cpu(void (*func)(vo
+  */
+ void smp_call_ipl_cpu(void (*func)(void *), void *data)
+ {
++      struct lowcore *lc = pcpu_devices->lowcore;
++
++      if (pcpu_devices[0].address == stap())
++              lc = &S390_lowcore;
++
+       pcpu_delegate(&pcpu_devices[0], func, data,
+-                    pcpu_devices->lowcore->panic_stack -
+-                    PANIC_FRAME_OFFSET + PAGE_SIZE);
++                    lc->panic_stack - PANIC_FRAME_OFFSET + PAGE_SIZE);
+ }
+ int smp_find_processor_id(u16 address)
index a0fe7124c95288b8db5415147929d2b3a735d905..945d1558e3de28235c8a385c7c8df49b93cc06a2 100644 (file)
@@ -95,3 +95,6 @@ bpf-fix-check_map_access-smin_value-test-when-pointe.patch
 bpf-prevent-out-of-bounds-speculation-on-pointer-ari.patch
 bpf-fix-sanitation-of-alu-op-with-pointer-scalar-typ.patch
 bpf-fix-inner-map-masking-to-prevent-oob-under-specu.patch
+s390-smp-fix-calling-smp_call_ipl_cpu-from-ipl-cpu.patch
+nvmet-rdma-add-unlikely-for-response-allocated-check.patch
+nvmet-rdma-fix-null-dereference-under-heavy-load.patch