]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 20 Apr 2018 16:28:53 +0000 (18:28 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 20 Apr 2018 16:28:53 +0000 (18:28 +0200)
added patches:
alsa-pcm-fix-uaf-at-pcm-release-via-pcm-timer-access.patch
cxl-fix-possible-deadlock-when-processing-page-faults-from-cxllib.patch
dm-crypt-limit-the-number-of-allocated-pages.patch
dmaengine-at_xdmac-fix-rare-residue-corruption.patch
ib-srp-fix-completion-vector-assignment-algorithm.patch
ib-srp-fix-srp_abort.patch
libnvdimm-dimm-fix-dpa-reservation-vs-uninitialized-label-area.patch
libnvdimm-namespace-use-a-safe-lookup-for-dimm-device-name.patch
rdma-mlx5-protect-from-null-pointer-derefence.patch
rdma-rxe-fix-an-out-of-bounds-read.patch
rdma-ucma-don-t-allow-setting-rdma_option_ib_path-without-an-rdma-device.patch
tpm-self-test-failure-should-not-cause-suspend-to-fail.patch

13 files changed:
queue-4.14/alsa-pcm-fix-uaf-at-pcm-release-via-pcm-timer-access.patch [new file with mode: 0644]
queue-4.14/cxl-fix-possible-deadlock-when-processing-page-faults-from-cxllib.patch [new file with mode: 0644]
queue-4.14/dm-crypt-limit-the-number-of-allocated-pages.patch [new file with mode: 0644]
queue-4.14/dmaengine-at_xdmac-fix-rare-residue-corruption.patch [new file with mode: 0644]
queue-4.14/ib-srp-fix-completion-vector-assignment-algorithm.patch [new file with mode: 0644]
queue-4.14/ib-srp-fix-srp_abort.patch [new file with mode: 0644]
queue-4.14/libnvdimm-dimm-fix-dpa-reservation-vs-uninitialized-label-area.patch [new file with mode: 0644]
queue-4.14/libnvdimm-namespace-use-a-safe-lookup-for-dimm-device-name.patch [new file with mode: 0644]
queue-4.14/rdma-mlx5-protect-from-null-pointer-derefence.patch [new file with mode: 0644]
queue-4.14/rdma-rxe-fix-an-out-of-bounds-read.patch [new file with mode: 0644]
queue-4.14/rdma-ucma-don-t-allow-setting-rdma_option_ib_path-without-an-rdma-device.patch [new file with mode: 0644]
queue-4.14/series
queue-4.14/tpm-self-test-failure-should-not-cause-suspend-to-fail.patch [new file with mode: 0644]

diff --git a/queue-4.14/alsa-pcm-fix-uaf-at-pcm-release-via-pcm-timer-access.patch b/queue-4.14/alsa-pcm-fix-uaf-at-pcm-release-via-pcm-timer-access.patch
new file mode 100644 (file)
index 0000000..6d64bdf
--- /dev/null
@@ -0,0 +1,59 @@
+From a820ccbe21e8ce8e86c39cd1d3bc8c7d1cbb949b Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Mon, 2 Apr 2018 22:41:43 +0200
+Subject: ALSA: pcm: Fix UAF at PCM release via PCM timer access
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit a820ccbe21e8ce8e86c39cd1d3bc8c7d1cbb949b upstream.
+
+The PCM runtime object is created and freed dynamically at PCM stream
+open / close time.  This is tracked via substream->runtime, and it's
+cleared at snd_pcm_detach_substream().
+
+The runtime object assignment is protected by PCM open_mutex, so for
+all PCM operations, it's safely handled.  However, each PCM substream
+provides also an ALSA timer interface, and user-space can access to
+this while closing a PCM substream.  This may eventually lead to a
+UAF, as snd_pcm_timer_resolution() tries to access the runtime while
+clearing it in other side.
+
+Fortunately, it's the only concurrent access from the PCM timer, and
+it merely reads runtime->timer_resolution field.  So, we can avoid the
+race by reordering kfree() and wrapping the substream->runtime
+clearance with the corresponding timer lock.
+
+Reported-by: syzbot+8e62ff4e07aa2ce87826@syzkaller.appspotmail.com
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/core/pcm.c |    8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/sound/core/pcm.c
++++ b/sound/core/pcm.c
+@@ -28,6 +28,7 @@
+ #include <sound/core.h>
+ #include <sound/minors.h>
+ #include <sound/pcm.h>
++#include <sound/timer.h>
+ #include <sound/control.h>
+ #include <sound/info.h>
+@@ -1050,8 +1051,13 @@ void snd_pcm_detach_substream(struct snd
+       snd_free_pages((void*)runtime->control,
+                      PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control)));
+       kfree(runtime->hw_constraints.rules);
+-      kfree(runtime);
++      /* Avoid concurrent access to runtime via PCM timer interface */
++      if (substream->timer)
++              spin_lock_irq(&substream->timer->lock);
+       substream->runtime = NULL;
++      if (substream->timer)
++              spin_unlock_irq(&substream->timer->lock);
++      kfree(runtime);
+       put_pid(substream->pid);
+       substream->pid = NULL;
+       substream->pstr->substream_opened--;
diff --git a/queue-4.14/cxl-fix-possible-deadlock-when-processing-page-faults-from-cxllib.patch b/queue-4.14/cxl-fix-possible-deadlock-when-processing-page-faults-from-cxllib.patch
new file mode 100644 (file)
index 0000000..6a8f5e3
--- /dev/null
@@ -0,0 +1,154 @@
+From ad7b4e8022b9864c075fe71e1328b1d25cad82f6 Mon Sep 17 00:00:00 2001
+From: Frederic Barrat <fbarrat@linux.vnet.ibm.com>
+Date: Tue, 3 Apr 2018 15:54:02 +0200
+Subject: cxl: Fix possible deadlock when processing page faults from cxllib
+
+From: Frederic Barrat <fbarrat@linux.vnet.ibm.com>
+
+commit ad7b4e8022b9864c075fe71e1328b1d25cad82f6 upstream.
+
+cxllib_handle_fault() is called by an external driver when it needs to
+have the host resolve page faults for a buffer. The buffer can cover
+several pages and VMAs. The function iterates over all the pages used
+by the buffer, based on the page size of the VMA.
+
+To ensure some stability while processing the faults, the thread T1
+grabs the mm->mmap_sem semaphore with read access (R1). However, when
+processing a page fault for a single page, one of the underlying
+functions, copro_handle_mm_fault(), also grabs the same semaphore with
+read access (R2). So the thread T1 takes the semaphore twice.
+
+If another thread T2 tries to access the semaphore in write mode W1
+(say, because it wants to allocate memory and calls 'brk'), then that
+thread T2 will have to wait because there's a reader (R1). If the
+thread T1 is processing a new page at that time, it won't get an
+automatic grant at R2, because there's now a writer thread
+waiting (T2). And we have a deadlock.
+
+The timeline is:
+1. thread T1 owns the semaphore with read access R1
+2. thread T2 requests write access W1 and waits
+3. thread T1 requests read access R2 and waits
+
+The fix is for the thread T1 to release the semaphore R1 once it got
+the information it needs from the current VMA. The address space/VMAs
+could evolve while T1 iterates over the full buffer, but in the
+unlikely case where T1 misses a page, the external driver will raise a
+new page fault when retrying the memory access.
+
+Fixes: 3ced8d730063 ("cxl: Export library to support IBM XSL")
+Cc: stable@vger.kernel.org # 4.13+
+Signed-off-by: Frederic Barrat <fbarrat@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/misc/cxl/cxllib.c |   83 +++++++++++++++++++++++++++++-----------------
+ 1 file changed, 54 insertions(+), 29 deletions(-)
+
+--- a/drivers/misc/cxl/cxllib.c
++++ b/drivers/misc/cxl/cxllib.c
+@@ -207,49 +207,74 @@ int cxllib_get_PE_attributes(struct task
+ }
+ EXPORT_SYMBOL_GPL(cxllib_get_PE_attributes);
+-int cxllib_handle_fault(struct mm_struct *mm, u64 addr, u64 size, u64 flags)
++static int get_vma_info(struct mm_struct *mm, u64 addr,
++                      u64 *vma_start, u64 *vma_end,
++                      unsigned long *page_size)
+ {
+-      int rc;
+-      u64 dar;
+       struct vm_area_struct *vma = NULL;
+-      unsigned long page_size;
+-
+-      if (mm == NULL)
+-              return -EFAULT;
++      int rc = 0;
+       down_read(&mm->mmap_sem);
+       vma = find_vma(mm, addr);
+       if (!vma) {
+-              pr_err("Can't find vma for addr %016llx\n", addr);
+               rc = -EFAULT;
+               goto out;
+       }
+-      /* get the size of the pages allocated */
+-      page_size = vma_kernel_pagesize(vma);
++      *page_size = vma_kernel_pagesize(vma);
++      *vma_start = vma->vm_start;
++      *vma_end = vma->vm_end;
++out:
++      up_read(&mm->mmap_sem);
++      return rc;
++}
++
++int cxllib_handle_fault(struct mm_struct *mm, u64 addr, u64 size, u64 flags)
++{
++      int rc;
++      u64 dar, vma_start, vma_end;
++      unsigned long page_size;
+-      for (dar = (addr & ~(page_size - 1)); dar < (addr + size); dar += page_size) {
+-              if (dar < vma->vm_start || dar >= vma->vm_end) {
+-                      vma = find_vma(mm, addr);
+-                      if (!vma) {
+-                              pr_err("Can't find vma for addr %016llx\n", addr);
+-                              rc = -EFAULT;
+-                              goto out;
+-                      }
+-                      /* get the size of the pages allocated */
+-                      page_size = vma_kernel_pagesize(vma);
++      if (mm == NULL)
++              return -EFAULT;
++
++      /*
++       * The buffer we have to process can extend over several pages
++       * and may also cover several VMAs.
++       * We iterate over all the pages. The page size could vary
++       * between VMAs.
++       */
++      rc = get_vma_info(mm, addr, &vma_start, &vma_end, &page_size);
++      if (rc)
++              return rc;
++
++      for (dar = (addr & ~(page_size - 1)); dar < (addr + size);
++           dar += page_size) {
++              if (dar < vma_start || dar >= vma_end) {
++                      /*
++                       * We don't hold the mm->mmap_sem semaphore
++                       * while iterating, since the semaphore is
++                       * required by one of the lower-level page
++                       * fault processing functions and it could
++                       * create a deadlock.
++                       *
++                       * It means the VMAs can be altered between 2
++                       * loop iterations and we could theoretically
++                       * miss a page (however unlikely). But that's
++                       * not really a problem, as the driver will
++                       * retry access, get another page fault on the
++                       * missing page and call us again.
++                       */
++                      rc = get_vma_info(mm, dar, &vma_start, &vma_end,
++                                      &page_size);
++                      if (rc)
++                              return rc;
+               }
+               rc = cxl_handle_mm_fault(mm, flags, dar);
+-              if (rc) {
+-                      pr_err("cxl_handle_mm_fault failed %d", rc);
+-                      rc = -EFAULT;
+-                      goto out;
+-              }
++              if (rc)
++                      return -EFAULT;
+       }
+-      rc = 0;
+-out:
+-      up_read(&mm->mmap_sem);
+-      return rc;
++      return 0;
+ }
+ EXPORT_SYMBOL_GPL(cxllib_handle_fault);
diff --git a/queue-4.14/dm-crypt-limit-the-number-of-allocated-pages.patch b/queue-4.14/dm-crypt-limit-the-number-of-allocated-pages.patch
new file mode 100644 (file)
index 0000000..2835bc9
--- /dev/null
@@ -0,0 +1,151 @@
+From 5059353df86e2573ccd9d43fd9d9396dcec47ca2 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Sun, 13 Aug 2017 22:45:08 -0400
+Subject: dm crypt: limit the number of allocated pages
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit 5059353df86e2573ccd9d43fd9d9396dcec47ca2 upstream.
+
+dm-crypt consumes an excessive amount memory when the user attempts to
+zero a dm-crypt device with "blkdiscard -z". The command "blkdiscard -z"
+calls the BLKZEROOUT ioctl, it goes to the function __blkdev_issue_zeroout,
+__blkdev_issue_zeroout sends a large amount of write bios that contain
+the zero page as their payload.
+
+For each incoming page, dm-crypt allocates another page that holds the
+encrypted data, so when processing "blkdiscard -z", dm-crypt tries to
+allocate the amount of memory that is equal to the size of the device.
+This can trigger OOM killer or cause system crash.
+
+Fix this by limiting the amount of memory that dm-crypt allocates to 2%
+of total system memory. This limit is system-wide and is divided by the
+number of active dm-crypt devices and each device receives an equal
+share.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-crypt.c |   66 +++++++++++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 65 insertions(+), 1 deletion(-)
+
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -148,6 +148,8 @@ struct crypt_config {
+       mempool_t *tag_pool;
+       unsigned tag_pool_max_sectors;
++      struct percpu_counter n_allocated_pages;
++
+       struct bio_set *bs;
+       struct mutex bio_alloc_lock;
+@@ -219,6 +221,12 @@ struct crypt_config {
+ #define MAX_TAG_SIZE  480
+ #define POOL_ENTRY_SIZE       512
++static DEFINE_SPINLOCK(dm_crypt_clients_lock);
++static unsigned dm_crypt_clients_n = 0;
++static volatile unsigned long dm_crypt_pages_per_client;
++#define DM_CRYPT_MEMORY_PERCENT                       2
++#define DM_CRYPT_MIN_PAGES_PER_CLIENT         (BIO_MAX_PAGES * 16)
++
+ static void clone_init(struct dm_crypt_io *, struct bio *);
+ static void kcryptd_queue_crypt(struct dm_crypt_io *io);
+ static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
+@@ -2156,6 +2164,43 @@ static int crypt_wipe_key(struct crypt_c
+       return r;
+ }
++static void crypt_calculate_pages_per_client(void)
++{
++      unsigned long pages = (totalram_pages - totalhigh_pages) * DM_CRYPT_MEMORY_PERCENT / 100;
++
++      if (!dm_crypt_clients_n)
++              return;
++
++      pages /= dm_crypt_clients_n;
++      if (pages < DM_CRYPT_MIN_PAGES_PER_CLIENT)
++              pages = DM_CRYPT_MIN_PAGES_PER_CLIENT;
++      dm_crypt_pages_per_client = pages;
++}
++
++static void *crypt_page_alloc(gfp_t gfp_mask, void *pool_data)
++{
++      struct crypt_config *cc = pool_data;
++      struct page *page;
++
++      if (unlikely(percpu_counter_compare(&cc->n_allocated_pages, dm_crypt_pages_per_client) >= 0) &&
++          likely(gfp_mask & __GFP_NORETRY))
++              return NULL;
++
++      page = alloc_page(gfp_mask);
++      if (likely(page != NULL))
++              percpu_counter_add(&cc->n_allocated_pages, 1);
++
++      return page;
++}
++
++static void crypt_page_free(void *page, void *pool_data)
++{
++      struct crypt_config *cc = pool_data;
++
++      __free_page(page);
++      percpu_counter_sub(&cc->n_allocated_pages, 1);
++}
++
+ static void crypt_dtr(struct dm_target *ti)
+ {
+       struct crypt_config *cc = ti->private;
+@@ -2182,6 +2227,10 @@ static void crypt_dtr(struct dm_target *
+       mempool_destroy(cc->req_pool);
+       mempool_destroy(cc->tag_pool);
++      if (cc->page_pool)
++              WARN_ON(percpu_counter_sum(&cc->n_allocated_pages) != 0);
++      percpu_counter_destroy(&cc->n_allocated_pages);
++
+       if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
+               cc->iv_gen_ops->dtr(cc);
+@@ -2196,6 +2245,12 @@ static void crypt_dtr(struct dm_target *
+       /* Must zero key material before freeing */
+       kzfree(cc);
++
++      spin_lock(&dm_crypt_clients_lock);
++      WARN_ON(!dm_crypt_clients_n);
++      dm_crypt_clients_n--;
++      crypt_calculate_pages_per_client();
++      spin_unlock(&dm_crypt_clients_lock);
+ }
+ static int crypt_ctr_ivmode(struct dm_target *ti, const char *ivmode)
+@@ -2643,6 +2698,15 @@ static int crypt_ctr(struct dm_target *t
+       ti->private = cc;
++      spin_lock(&dm_crypt_clients_lock);
++      dm_crypt_clients_n++;
++      crypt_calculate_pages_per_client();
++      spin_unlock(&dm_crypt_clients_lock);
++
++      ret = percpu_counter_init(&cc->n_allocated_pages, 0, GFP_KERNEL);
++      if (ret < 0)
++              goto bad;
++
+       /* Optional parameters need to be read before cipher constructor */
+       if (argc > 5) {
+               ret = crypt_ctr_optional(ti, argc - 5, &argv[5]);
+@@ -2697,7 +2761,7 @@ static int crypt_ctr(struct dm_target *t
+               ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + additional_req_size,
+                     ARCH_KMALLOC_MINALIGN);
+-      cc->page_pool = mempool_create_page_pool(BIO_MAX_PAGES, 0);
++      cc->page_pool = mempool_create(BIO_MAX_PAGES, crypt_page_alloc, crypt_page_free, cc);
+       if (!cc->page_pool) {
+               ti->error = "Cannot allocate page mempool";
+               goto bad;
diff --git a/queue-4.14/dmaengine-at_xdmac-fix-rare-residue-corruption.patch b/queue-4.14/dmaengine-at_xdmac-fix-rare-residue-corruption.patch
new file mode 100644 (file)
index 0000000..ad4f608
--- /dev/null
@@ -0,0 +1,71 @@
+From c5637476bbf9bb86c7f0413b8f4822a73d8d2d07 Mon Sep 17 00:00:00 2001
+From: Maxime Jayat <maxime.jayat@mobile-devices.fr>
+Date: Thu, 22 Feb 2018 12:39:55 +0100
+Subject: dmaengine: at_xdmac: fix rare residue corruption
+
+From: Maxime Jayat <maxime.jayat@mobile-devices.fr>
+
+commit c5637476bbf9bb86c7f0413b8f4822a73d8d2d07 upstream.
+
+Despite the efforts made to correctly read the NDA and CUBC registers,
+the order in which the registers are read could sometimes lead to an
+inconsistent state.
+
+Re-using the timeline from the comments, this following timing of
+registers reads could lead to reading NDA with value "@desc2" and
+CUBC with value "MAX desc1":
+
+ INITD --------                    ------------
+              |____________________|
+       _______________________  _______________
+ NDA       @desc2             \/   @desc3
+       _______________________/\_______________
+       __________  ___________  _______________
+ CUBC       0    \/ MAX desc1 \/  MAX desc2
+       __________/\___________/\_______________
+        |  |          |  |
+Events:(1)(2)        (3)(4)
+
+(1) check_nda = @desc2
+(2) initd = 1
+(3) cur_ubc = MAX desc1
+(4) cur_nda = @desc2
+
+This is allowed by the condition ((check_nda == cur_nda) && initd),
+despite cur_ubc and cur_nda being in the precise state we don't want.
+
+This error leads to incorrect residue computation.
+
+Fix it by inversing the order in which CUBC and INITD are read. This
+makes sure that NDA and CUBC are always read together either _before_
+INITD goes to 0 or _after_ it is back at 1.
+The case where NDA is read before INITD is at 0 and CUBC is read after
+INITD is back at 1 will be rejected by check_nda and cur_nda being
+different.
+
+Fixes: 53398f488821 ("dmaengine: at_xdmac: fix residue corruption")
+Cc: stable@vger.kernel.org
+Signed-off-by: Maxime Jayat <maxime.jayat@mobile-devices.fr>
+Acked-by: Ludovic Desroches <ludovic.desroches@microchip.com>
+Signed-off-by: Vinod Koul <vinod.koul@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma/at_xdmac.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/dma/at_xdmac.c
++++ b/drivers/dma/at_xdmac.c
+@@ -1471,10 +1471,10 @@ at_xdmac_tx_status(struct dma_chan *chan
+       for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
+               check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
+               rmb();
+-              initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
+-              rmb();
+               cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
+               rmb();
++              initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
++              rmb();
+               cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
+               rmb();
diff --git a/queue-4.14/ib-srp-fix-completion-vector-assignment-algorithm.patch b/queue-4.14/ib-srp-fix-completion-vector-assignment-algorithm.patch
new file mode 100644 (file)
index 0000000..c715be5
--- /dev/null
@@ -0,0 +1,49 @@
+From 3a148896b24adf8688dc0c59af54531931677a40 Mon Sep 17 00:00:00 2001
+From: Bart Van Assche <bart.vanassche@wdc.com>
+Date: Mon, 12 Feb 2018 09:50:25 -0800
+Subject: IB/srp: Fix completion vector assignment algorithm
+
+From: Bart Van Assche <bart.vanassche@wdc.com>
+
+commit 3a148896b24adf8688dc0c59af54531931677a40 upstream.
+
+Ensure that cv_end is equal to ibdev->num_comp_vectors for the
+NUMA node with the highest index. This patch improves spreading
+of RDMA channels over completion vectors and thereby improves
+performance, especially on systems with only a single NUMA node.
+This patch drops support for the comp_vector login parameter by
+ignoring the value of that parameter since I have not found a
+good way to combine support for that parameter and automatic
+spreading of RDMA channels over completion vectors.
+
+Fixes: d92c0da71a35 ("IB/srp: Add multichannel support")
+Reported-by: Alexander Schmid <alex@modula-shop-systems.de>
+Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com>
+Cc: Alexander Schmid <alex@modula-shop-systems.de>
+Cc: stable@vger.kernel.org
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/ulp/srp/ib_srp.c |   10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -3430,12 +3430,10 @@ static ssize_t srp_create_target(struct
+                                     num_online_nodes());
+               const int ch_end = ((node_idx + 1) * target->ch_count /
+                                   num_online_nodes());
+-              const int cv_start = (node_idx * ibdev->num_comp_vectors /
+-                                    num_online_nodes() + target->comp_vector)
+-                                   % ibdev->num_comp_vectors;
+-              const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
+-                                  num_online_nodes() + target->comp_vector)
+-                                 % ibdev->num_comp_vectors;
++              const int cv_start = node_idx * ibdev->num_comp_vectors /
++                                   num_online_nodes();
++              const int cv_end = (node_idx + 1) * ibdev->num_comp_vectors /
++                                 num_online_nodes();
+               int cpu_idx = 0;
+               for_each_online_cpu(cpu) {
diff --git a/queue-4.14/ib-srp-fix-srp_abort.patch b/queue-4.14/ib-srp-fix-srp_abort.patch
new file mode 100644 (file)
index 0000000..7b5d995
--- /dev/null
@@ -0,0 +1,41 @@
+From e68088e78d82920632eba112b968e49d588d02a2 Mon Sep 17 00:00:00 2001
+From: Bart Van Assche <bart.vanassche@wdc.com>
+Date: Fri, 23 Feb 2018 14:09:24 -0800
+Subject: IB/srp: Fix srp_abort()
+
+From: Bart Van Assche <bart.vanassche@wdc.com>
+
+commit e68088e78d82920632eba112b968e49d588d02a2 upstream.
+
+Before commit e494f6a72839 ("[SCSI] improved eh timeout handler") it
+did not really matter whether or not abort handlers like srp_abort()
+called .scsi_done() when returning another value than SUCCESS. Since
+that commit however this matters. Hence only call .scsi_done() when
+returning SUCCESS.
+
+Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/ulp/srp/ib_srp.c |    8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -2656,9 +2656,11 @@ static int srp_abort(struct scsi_cmnd *s
+               ret = FAST_IO_FAIL;
+       else
+               ret = FAILED;
+-      srp_free_req(ch, req, scmnd, 0);
+-      scmnd->result = DID_ABORT << 16;
+-      scmnd->scsi_done(scmnd);
++      if (ret == SUCCESS) {
++              srp_free_req(ch, req, scmnd, 0);
++              scmnd->result = DID_ABORT << 16;
++              scmnd->scsi_done(scmnd);
++      }
+       return ret;
+ }
diff --git a/queue-4.14/libnvdimm-dimm-fix-dpa-reservation-vs-uninitialized-label-area.patch b/queue-4.14/libnvdimm-dimm-fix-dpa-reservation-vs-uninitialized-label-area.patch
new file mode 100644 (file)
index 0000000..159b8bb
--- /dev/null
@@ -0,0 +1,51 @@
+From c31898c8c711f2bbbcaebe802a55827e288d875a Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Fri, 6 Apr 2018 11:25:38 -0700
+Subject: libnvdimm, dimm: fix dpa reservation vs uninitialized label area
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+commit c31898c8c711f2bbbcaebe802a55827e288d875a upstream.
+
+At initialization time the 'dimm' driver caches a copy of the memory
+device's label area and reserves address space for each of the
+namespaces defined.
+
+However, as can be seen below, the reservation occurs even when the
+index blocks are invalid:
+
+ nvdimm nmem0: nvdimm_init_config_data: len: 131072 rc: 0
+ nvdimm nmem0: config data size: 131072
+ nvdimm nmem0: __nd_label_validate: nsindex0 labelsize 1 invalid
+ nvdimm nmem0: __nd_label_validate: nsindex1 labelsize 1 invalid
+ nvdimm nmem0: : pmem-6025e505: 0x1000000000 @ 0xf50000000 reserve <-- bad
+
+Gate dpa reservation on the presence of valid index blocks.
+
+Cc: <stable@vger.kernel.org>
+Fixes: 4a826c83db4e ("libnvdimm: namespace indices: read and validate")
+Reported-by: Krzysztof Rusocki <krzysztof.rusocki@intel.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/nvdimm/dimm.c |    8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/drivers/nvdimm/dimm.c
++++ b/drivers/nvdimm/dimm.c
+@@ -65,9 +65,11 @@ static int nvdimm_probe(struct device *d
+       ndd->ns_next = nd_label_next_nsindex(ndd->ns_current);
+       nd_label_copy(ndd, to_next_namespace_index(ndd),
+                       to_current_namespace_index(ndd));
+-      rc = nd_label_reserve_dpa(ndd);
+-      if (ndd->ns_current >= 0)
+-              nvdimm_set_aliasing(dev);
++      if (ndd->ns_current >= 0) {
++              rc = nd_label_reserve_dpa(ndd);
++              if (rc == 0)
++                      nvdimm_set_aliasing(dev);
++      }
+       nvdimm_clear_locked(dev);
+       nvdimm_bus_unlock(dev);
diff --git a/queue-4.14/libnvdimm-namespace-use-a-safe-lookup-for-dimm-device-name.patch b/queue-4.14/libnvdimm-namespace-use-a-safe-lookup-for-dimm-device-name.patch
new file mode 100644 (file)
index 0000000..c439f29
--- /dev/null
@@ -0,0 +1,70 @@
+From 4f8672201b7e7ed4f5f6c3cf6dcd080648580582 Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Fri, 6 Apr 2018 16:37:21 -0700
+Subject: libnvdimm, namespace: use a safe lookup for dimm device name
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+commit 4f8672201b7e7ed4f5f6c3cf6dcd080648580582 upstream.
+
+The following NULL dereference results from incorrectly assuming that
+ndd is valid in this print:
+
+  struct nvdimm_drvdata *ndd = to_ndd(&nd_region->mapping[i]);
+
+  /*
+   * Give up if we don't find an instance of a uuid at each
+   * position (from 0 to nd_region->ndr_mappings - 1), or if we
+   * find a dimm with two instances of the same uuid.
+   */
+  dev_err(&nd_region->dev, "%s missing label for %pUb\n",
+                  dev_name(ndd->dev), nd_label->uuid);
+
+ BUG: unable to handle kernel NULL pointer dereference at 0000000000000000
+ IP: nd_region_register_namespaces+0xd67/0x13c0 [libnvdimm]
+ PGD 0 P4D 0
+ Oops: 0000 [#1] SMP PTI
+ CPU: 43 PID: 673 Comm: kworker/u609:10 Not tainted 4.16.0-rc4+ #1
+ [..]
+ RIP: 0010:nd_region_register_namespaces+0xd67/0x13c0 [libnvdimm]
+ [..]
+ Call Trace:
+  ? devres_add+0x2f/0x40
+  ? devm_kmalloc+0x52/0x60
+  ? nd_region_activate+0x9c/0x320 [libnvdimm]
+  nd_region_probe+0x94/0x260 [libnvdimm]
+  ? kernfs_add_one+0xe4/0x130
+  nvdimm_bus_probe+0x63/0x100 [libnvdimm]
+
+Switch to using the nvdimm device directly.
+
+Fixes: 0e3b0d123c8f ("libnvdimm, namespace: allow multiple pmem...")
+Cc: <stable@vger.kernel.org>
+Reported-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/nvdimm/namespace_devs.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/nvdimm/namespace_devs.c
++++ b/drivers/nvdimm/namespace_devs.c
+@@ -1926,7 +1926,7 @@ struct device *create_namespace_pmem(str
+       }
+       if (i < nd_region->ndr_mappings) {
+-              struct nvdimm_drvdata *ndd = to_ndd(&nd_region->mapping[i]);
++              struct nvdimm *nvdimm = nd_region->mapping[i].nvdimm;
+               /*
+                * Give up if we don't find an instance of a uuid at each
+@@ -1934,7 +1934,7 @@ struct device *create_namespace_pmem(str
+                * find a dimm with two instances of the same uuid.
+                */
+               dev_err(&nd_region->dev, "%s missing label for %pUb\n",
+-                              dev_name(ndd->dev), nd_label->uuid);
++                              nvdimm_name(nvdimm), nd_label->uuid);
+               rc = -EINVAL;
+               goto err;
+       }
diff --git a/queue-4.14/rdma-mlx5-protect-from-null-pointer-derefence.patch b/queue-4.14/rdma-mlx5-protect-from-null-pointer-derefence.patch
new file mode 100644 (file)
index 0000000..ed6091f
--- /dev/null
@@ -0,0 +1,35 @@
+From 4289861d88d6c7b5e4c8cc7fe2ad6cdf0cdfc366 Mon Sep 17 00:00:00 2001
+From: Leon Romanovsky <leonro@mellanox.com>
+Date: Tue, 13 Mar 2018 15:29:24 +0200
+Subject: RDMA/mlx5: Protect from NULL pointer derefence
+
+From: Leon Romanovsky <leonro@mellanox.com>
+
+commit 4289861d88d6c7b5e4c8cc7fe2ad6cdf0cdfc366 upstream.
+
+The mlx5_ib_alloc_implicit_mr() can fail to acquire pages
+and the returned mr pointer won't be valid. Ensure that it
+is not error prior to access.
+
+Cc: <stable@vger.kernel.org> # 4.10
+Fixes: 81713d3788d2 ("IB/mlx5: Add implicit MR support")
+Reported-by: Noa Osherovich <noaos@mellanox.com>
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/mlx5/mr.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/infiniband/hw/mlx5/mr.c
++++ b/drivers/infiniband/hw/mlx5/mr.c
+@@ -1220,6 +1220,8 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct
+                       return ERR_PTR(-EINVAL);
+               mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags);
++              if (IS_ERR(mr))
++                      return ERR_CAST(mr);
+               return &mr->ibmr;
+       }
+ #endif
diff --git a/queue-4.14/rdma-rxe-fix-an-out-of-bounds-read.patch b/queue-4.14/rdma-rxe-fix-an-out-of-bounds-read.patch
new file mode 100644 (file)
index 0000000..27e2702
--- /dev/null
@@ -0,0 +1,88 @@
+From a6544a624c3ff92a64e4aca3931fa064607bd3da Mon Sep 17 00:00:00 2001
+From: Bart Van Assche <bart.vanassche@wdc.com>
+Date: Thu, 1 Mar 2018 14:00:29 -0800
+Subject: RDMA/rxe: Fix an out-of-bounds read
+
+From: Bart Van Assche <bart.vanassche@wdc.com>
+
+commit a6544a624c3ff92a64e4aca3931fa064607bd3da upstream.
+
+This patch avoids that KASAN reports the following when the SRP initiator
+calls srp_post_send():
+
+==================================================================
+BUG: KASAN: stack-out-of-bounds in rxe_post_send+0x5c4/0x980 [rdma_rxe]
+Read of size 8 at addr ffff880066606e30 by task 02-mq/1074
+
+CPU: 2 PID: 1074 Comm: 02-mq Not tainted 4.16.0-rc3-dbg+ #1
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.0.0-prebuilt.qemu-project.org 04/01/2014
+Call Trace:
+dump_stack+0x85/0xc7
+print_address_description+0x65/0x270
+kasan_report+0x231/0x350
+rxe_post_send+0x5c4/0x980 [rdma_rxe]
+srp_post_send.isra.16+0x149/0x190 [ib_srp]
+srp_queuecommand+0x94d/0x1670 [ib_srp]
+scsi_dispatch_cmd+0x1c2/0x550 [scsi_mod]
+scsi_queue_rq+0x843/0xa70 [scsi_mod]
+blk_mq_dispatch_rq_list+0x143/0xac0
+blk_mq_do_dispatch_ctx+0x1c5/0x260
+blk_mq_sched_dispatch_requests+0x2bf/0x2f0
+__blk_mq_run_hw_queue+0xdb/0x160
+__blk_mq_delay_run_hw_queue+0xba/0x100
+blk_mq_run_hw_queue+0xf2/0x190
+blk_mq_sched_insert_request+0x163/0x2f0
+blk_execute_rq+0xb0/0x130
+scsi_execute+0x14e/0x260 [scsi_mod]
+scsi_probe_and_add_lun+0x366/0x13d0 [scsi_mod]
+__scsi_scan_target+0x18a/0x810 [scsi_mod]
+scsi_scan_target+0x11e/0x130 [scsi_mod]
+srp_create_target+0x1522/0x19e0 [ib_srp]
+kernfs_fop_write+0x180/0x210
+__vfs_write+0xb1/0x2e0
+vfs_write+0xf6/0x250
+SyS_write+0x99/0x110
+do_syscall_64+0xee/0x2b0
+entry_SYSCALL_64_after_hwframe+0x42/0xb7
+
+The buggy address belongs to the page:
+page:ffffea0001998180 count:0 mapcount:0 mapping:0000000000000000 index:0x0
+flags: 0x4000000000000000()
+raw: 4000000000000000 0000000000000000 0000000000000000 00000000ffffffff
+raw: dead000000000100 dead000000000200 0000000000000000 0000000000000000
+page dumped because: kasan: bad access detected
+
+Memory state around the buggy address:
+ffff880066606d00: 00 00 00 00 00 00 00 00 00 00 00 00 00 f1 f1 f1
+ffff880066606d80: f1 00 f2 f2 f2 f2 f2 f2 f2 00 00 f2 f2 f2 f2 f2
+>ffff880066606e00: f2 00 00 00 00 00 f2 f2 f2 f3 f3 f3 f3 00 00 00
+                                    ^
+ffff880066606e80: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ffff880066606f00: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+==================================================================
+
+Fixes: 8700e3e7c485 ("Soft RoCE driver")
+Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com>
+Cc: Moni Shoua <monis@mellanox.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/sw/rxe/rxe_verbs.c |    5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
++++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
+@@ -712,9 +712,8 @@ static int init_send_wqe(struct rxe_qp *
+               memcpy(wqe->dma.sge, ibwr->sg_list,
+                      num_sge * sizeof(struct ib_sge));
+-      wqe->iova               = (mask & WR_ATOMIC_MASK) ?
+-                                      atomic_wr(ibwr)->remote_addr :
+-                                      rdma_wr(ibwr)->remote_addr;
++      wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr :
++              mask & WR_READ_OR_WRITE_MASK ? rdma_wr(ibwr)->remote_addr : 0;
+       wqe->mask               = mask;
+       wqe->dma.length         = length;
+       wqe->dma.resid          = length;
diff --git a/queue-4.14/rdma-ucma-don-t-allow-setting-rdma_option_ib_path-without-an-rdma-device.patch b/queue-4.14/rdma-ucma-don-t-allow-setting-rdma_option_ib_path-without-an-rdma-device.patch
new file mode 100644 (file)
index 0000000..16df36d
--- /dev/null
@@ -0,0 +1,35 @@
+From 8435168d50e66fa5eae01852769d20a36f9e5e83 Mon Sep 17 00:00:00 2001
+From: Roland Dreier <roland@purestorage.com>
+Date: Tue, 3 Apr 2018 15:33:01 -0700
+Subject: RDMA/ucma: Don't allow setting RDMA_OPTION_IB_PATH without an RDMA device
+
+From: Roland Dreier <roland@purestorage.com>
+
+commit 8435168d50e66fa5eae01852769d20a36f9e5e83 upstream.
+
+Check to make sure that ctx->cm_id->device is set before we use it.
+Otherwise userspace can trigger a NULL dereference by doing
+RDMA_USER_CM_CMD_SET_OPTION on an ID that is not bound to a device.
+
+Cc: <stable@vger.kernel.org>
+Reported-by: <syzbot+a67bc93e14682d92fc2f@syzkaller.appspotmail.com>
+Signed-off-by: Roland Dreier <roland@purestorage.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/core/ucma.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/infiniband/core/ucma.c
++++ b/drivers/infiniband/core/ucma.c
+@@ -1241,6 +1241,9 @@ static int ucma_set_ib_path(struct ucma_
+       if (!optlen)
+               return -EINVAL;
++      if (!ctx->cm_id->device)
++              return -EINVAL;
++
+       memset(&sa_path, 0, sizeof(sa_path));
+       sa_path.rec_type = SA_PATH_REC_TYPE_IB;
index f9ed4d220bcd14d0e4904d31b2390543c7e0e977..55494f5d52d4b1bae5927423c41efebbb3999acc 100644 (file)
@@ -82,3 +82,15 @@ ext4-don-t-allow-r-w-mounts-if-metadata-blocks-overlap-the-superblock.patch
 ext4-move-call-to-ext4_error-into-ext4_xattr_check_block.patch
 ext4-add-bounds-checking-to-ext4_xattr_find_entry.patch
 ext4-add-extra-checks-to-ext4_xattr_block_get.patch
+dm-crypt-limit-the-number-of-allocated-pages.patch
+rdma-ucma-don-t-allow-setting-rdma_option_ib_path-without-an-rdma-device.patch
+rdma-mlx5-protect-from-null-pointer-derefence.patch
+rdma-rxe-fix-an-out-of-bounds-read.patch
+alsa-pcm-fix-uaf-at-pcm-release-via-pcm-timer-access.patch
+ib-srp-fix-srp_abort.patch
+ib-srp-fix-completion-vector-assignment-algorithm.patch
+dmaengine-at_xdmac-fix-rare-residue-corruption.patch
+cxl-fix-possible-deadlock-when-processing-page-faults-from-cxllib.patch
+tpm-self-test-failure-should-not-cause-suspend-to-fail.patch
+libnvdimm-dimm-fix-dpa-reservation-vs-uninitialized-label-area.patch
+libnvdimm-namespace-use-a-safe-lookup-for-dimm-device-name.patch
diff --git a/queue-4.14/tpm-self-test-failure-should-not-cause-suspend-to-fail.patch b/queue-4.14/tpm-self-test-failure-should-not-cause-suspend-to-fail.patch
new file mode 100644 (file)
index 0000000..e851c01
--- /dev/null
@@ -0,0 +1,60 @@
+From 0803d7befa15cab5717d667a97a66214d2a4c083 Mon Sep 17 00:00:00 2001
+From: Chris Chiu <chiu@endlessm.com>
+Date: Tue, 20 Mar 2018 15:36:40 +0800
+Subject: tpm: self test failure should not cause suspend to fail
+
+From: Chris Chiu <chiu@endlessm.com>
+
+commit 0803d7befa15cab5717d667a97a66214d2a4c083 upstream.
+
+The Acer Acer Veriton X4110G has a TPM device detected as:
+  tpm_tis 00:0b: 1.2 TPM (device-id 0xFE, rev-id 71)
+
+After the first S3 suspend, the following error appears during resume:
+  tpm tpm0: A TPM error(38) occurred continue selftest
+
+Any following S3 suspend attempts will now fail with this error:
+  tpm tpm0: Error (38) sending savestate before suspend
+  PM: Device 00:0b failed to suspend: error 38
+
+Error 38 is TPM_ERR_INVALID_POSTINIT which means the TPM is
+not in the correct state. This indicates that the platform BIOS
+is not sending the usual TPM_Startup command during S3 resume.
+>From this point onwards, all TPM commands will fail.
+
+The same issue was previously reported on Foxconn 6150BK8MC and
+Sony Vaio TX3.
+
+The platform behaviour seems broken here, but we should not break
+suspend/resume because of this.
+
+When the unexpected TPM state is encountered, set a flag to skip the
+affected TPM_SaveState command on later suspends.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Chris Chiu <chiu@endlessm.com>
+Signed-off-by: Daniel Drake <drake@endlessm.com>
+Link: http://lkml.kernel.org/r/CAB4CAwfSCvj1cudi+MWaB5g2Z67d9DwY1o475YOZD64ma23UiQ@mail.gmail.com
+Link: https://lkml.org/lkml/2011/3/28/192
+Link: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=591031
+Reviewed-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/tpm/tpm-interface.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/char/tpm/tpm-interface.c
++++ b/drivers/char/tpm/tpm-interface.c
+@@ -971,6 +971,10 @@ int tpm_do_selftest(struct tpm_chip *chi
+       loops = jiffies_to_msecs(duration) / delay_msec;
+       rc = tpm_continue_selftest(chip);
++      if (rc == TPM_ERR_INVALID_POSTINIT) {
++              chip->flags |= TPM_CHIP_FLAG_ALWAYS_POWERED;
++              dev_info(&chip->dev, "TPM not ready (%d)\n", rc);
++      }
+       /* This may fail if there was no TPM driver during a suspend/resume
+        * cycle; some may return 10 (BAD_ORDINAL), others 28 (FAILEDSELFTEST)
+        */