]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.5-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 7 Feb 2020 10:41:23 +0000 (11:41 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 7 Feb 2020 10:41:23 +0000 (11:41 +0100)
added patches:
arm-dma-api-fix-max_pfn-off-by-one-error-in-__dma_supported.patch
dm-crypt-fix-benbi-iv-constructor-crash-if-used-in-authenticated-mode.patch
dm-crypt-fix-gfp-flags-passed-to-skcipher_request_alloc.patch
dm-fix-potential-for-q-make_request_fn-null-pointer.patch
dm-space-map-common-fix-to-ensure-new-block-isn-t-already-in-use.patch
dm-thin-fix-use-after-free-in-metadata_pre_commit_callback.patch
dm-thin-metadata-use-pool-locking-at-end-of-dm_pool_metadata_close.patch
dm-writecache-fix-incorrect-flush-sequence-when-doing-ssd-mode-commit.patch
dm-zoned-support-zone-sizes-smaller-than-128mib.patch
of-add-of_dma_default_coherent-select-it-on-powerpc.patch

queue-5.5/arm-dma-api-fix-max_pfn-off-by-one-error-in-__dma_supported.patch [new file with mode: 0644]
queue-5.5/dm-crypt-fix-benbi-iv-constructor-crash-if-used-in-authenticated-mode.patch [new file with mode: 0644]
queue-5.5/dm-crypt-fix-gfp-flags-passed-to-skcipher_request_alloc.patch [new file with mode: 0644]
queue-5.5/dm-fix-potential-for-q-make_request_fn-null-pointer.patch [new file with mode: 0644]
queue-5.5/dm-space-map-common-fix-to-ensure-new-block-isn-t-already-in-use.patch [new file with mode: 0644]
queue-5.5/dm-thin-fix-use-after-free-in-metadata_pre_commit_callback.patch [new file with mode: 0644]
queue-5.5/dm-thin-metadata-use-pool-locking-at-end-of-dm_pool_metadata_close.patch [new file with mode: 0644]
queue-5.5/dm-writecache-fix-incorrect-flush-sequence-when-doing-ssd-mode-commit.patch [new file with mode: 0644]
queue-5.5/dm-zoned-support-zone-sizes-smaller-than-128mib.patch [new file with mode: 0644]
queue-5.5/of-add-of_dma_default_coherent-select-it-on-powerpc.patch [new file with mode: 0644]
queue-5.5/series

diff --git a/queue-5.5/arm-dma-api-fix-max_pfn-off-by-one-error-in-__dma_supported.patch b/queue-5.5/arm-dma-api-fix-max_pfn-off-by-one-error-in-__dma_supported.patch
new file mode 100644 (file)
index 0000000..686d758
--- /dev/null
@@ -0,0 +1,72 @@
+From f3cc4e1d44a813a0685f2e558b78ace3db559722 Mon Sep 17 00:00:00 2001
+From: Chen-Yu Tsai <wens@csie.org>
+Date: Mon, 3 Feb 2020 17:37:48 -0800
+Subject: ARM: dma-api: fix max_pfn off-by-one error in __dma_supported()
+
+From: Chen-Yu Tsai <wens@csie.org>
+
+commit f3cc4e1d44a813a0685f2e558b78ace3db559722 upstream.
+
+max_pfn, as set in arch/arm/mm/init.c:
+
+    static void __init find_limits(unsigned long *min,
+                                  unsigned long *max_low,
+                                  unsigned long *max_high)
+    {
+           *max_low = PFN_DOWN(memblock_get_current_limit());
+           *min = PFN_UP(memblock_start_of_DRAM());
+           *max_high = PFN_DOWN(memblock_end_of_DRAM());
+    }
+
+with memblock_end_of_DRAM() pointing to the next byte after DRAM.  As
+such, max_pfn points to the PFN after the end of DRAM.
+
+Thus when using max_pfn to check DMA masks, we should subtract one when
+checking DMA ranges against it.
+
+Commit 8bf1268f48ad ("ARM: dma-api: fix off-by-one error in
+__dma_supported()") fixed the same issue, but missed this spot.
+
+This issue was found while working on the sun4i-csi v4l2 driver on the
+Allwinner R40 SoC.  On Allwinner SoCs, DRAM is offset at 0x40000000, and
+we are starting to use of_dma_configure() with the "dma-ranges" property
+in the device tree to have the DMA API handle the offset.
+
+In this particular instance, dma-ranges was set to the same range as the
+actual available (2 GiB) DRAM.  The following error appeared when the
+driver attempted to allocate a buffer:
+
+    sun4i-csi 1c09000.csi: Coherent DMA mask 0x7fffffff (pfn 0x40000-0xc0000)
+    covers a smaller range of system memory than the DMA zone pfn 0x0-0xc0001
+    sun4i-csi 1c09000.csi: dma_alloc_coherent of size 307200 failed
+
+Fixing the off-by-one error makes things work.
+
+Link: http://lkml.kernel.org/r/20191224030239.5656-1-wens@kernel.org
+Fixes: 11a5aa32562e ("ARM: dma-mapping: check DMA mask against available memory")
+Fixes: 9f28cde0bc64 ("ARM: another fix for the DMA mapping checks")
+Fixes: ab746573c405 ("ARM: dma-mapping: allow larger DMA mask than supported")
+Signed-off-by: Chen-Yu Tsai <wens@csie.org>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Cc: Russell King <linux@armlinux.org.uk>
+Cc: Robin Murphy <robin.murphy@arm.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/mm/dma-mapping.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -221,7 +221,7 @@ EXPORT_SYMBOL(arm_coherent_dma_ops);
+ static int __dma_supported(struct device *dev, u64 mask, bool warn)
+ {
+-      unsigned long max_dma_pfn = min(max_pfn, arm_dma_pfn_limit);
++      unsigned long max_dma_pfn = min(max_pfn - 1, arm_dma_pfn_limit);
+       /*
+        * Translate the device's DMA mask to a PFN limit.  This
diff --git a/queue-5.5/dm-crypt-fix-benbi-iv-constructor-crash-if-used-in-authenticated-mode.patch b/queue-5.5/dm-crypt-fix-benbi-iv-constructor-crash-if-used-in-authenticated-mode.patch
new file mode 100644 (file)
index 0000000..607ce9d
--- /dev/null
@@ -0,0 +1,54 @@
+From 4ea9471fbd1addb25a4d269991dc724e200ca5b5 Mon Sep 17 00:00:00 2001
+From: Milan Broz <gmazyland@gmail.com>
+Date: Mon, 6 Jan 2020 10:11:47 +0100
+Subject: dm crypt: fix benbi IV constructor crash if used in authenticated mode
+
+From: Milan Broz <gmazyland@gmail.com>
+
+commit 4ea9471fbd1addb25a4d269991dc724e200ca5b5 upstream.
+
+If benbi IV is used in AEAD construction, for example:
+  cryptsetup luksFormat <device> --cipher twofish-xts-benbi --key-size 512 --integrity=hmac-sha256
+the constructor uses wrong skcipher function and crashes:
+
+ BUG: kernel NULL pointer dereference, address: 00000014
+ ...
+ EIP: crypt_iv_benbi_ctr+0x15/0x70 [dm_crypt]
+ Call Trace:
+  ? crypt_subkey_size+0x20/0x20 [dm_crypt]
+  crypt_ctr+0x567/0xfc0 [dm_crypt]
+  dm_table_add_target+0x15f/0x340 [dm_mod]
+
+Fix this by properly using crypt_aead_blocksize() in this case.
+
+Fixes: ef43aa38063a6 ("dm crypt: add cryptographic data integrity protection (authenticated encryption)")
+Cc: stable@vger.kernel.org # v4.12+
+Link: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=941051
+Reported-by: Jerad Simpson <jbsimpson@gmail.com>
+Signed-off-by: Milan Broz <gmazyland@gmail.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-crypt.c |   10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -331,8 +331,14 @@ static int crypt_iv_essiv_gen(struct cry
+ static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
+                             const char *opts)
+ {
+-      unsigned bs = crypto_skcipher_blocksize(any_tfm(cc));
+-      int log = ilog2(bs);
++      unsigned bs;
++      int log;
++
++      if (test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags))
++              bs = crypto_aead_blocksize(any_tfm_aead(cc));
++      else
++              bs = crypto_skcipher_blocksize(any_tfm(cc));
++      log = ilog2(bs);
+       /* we need to calculate how far we must shift the sector count
+        * to get the cipher block count, we use this shift in _gen */
diff --git a/queue-5.5/dm-crypt-fix-gfp-flags-passed-to-skcipher_request_alloc.patch b/queue-5.5/dm-crypt-fix-gfp-flags-passed-to-skcipher_request_alloc.patch
new file mode 100644 (file)
index 0000000..ac30e04
--- /dev/null
@@ -0,0 +1,35 @@
+From 9402e959014a18b4ebf7558733076875808dd66c Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Thu, 2 Jan 2020 08:23:32 -0500
+Subject: dm crypt: fix GFP flags passed to skcipher_request_alloc()
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit 9402e959014a18b4ebf7558733076875808dd66c upstream.
+
+GFP_KERNEL is not supposed to be or'd with GFP_NOFS (the result is
+equivalent to GFP_KERNEL). Also, we use GFP_NOIO instead of GFP_NOFS
+because we don't want any I/O being submitted in the direct reclaim
+path.
+
+Fixes: 39d13a1ac41d ("dm crypt: reuse eboiv skcipher for IV generation")
+Cc: stable@vger.kernel.org # v5.4+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-crypt.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -717,7 +717,7 @@ static int crypt_iv_eboiv_gen(struct cry
+       struct crypto_wait wait;
+       int err;
+-      req = skcipher_request_alloc(any_tfm(cc), GFP_KERNEL | GFP_NOFS);
++      req = skcipher_request_alloc(any_tfm(cc), GFP_NOIO);
+       if (!req)
+               return -ENOMEM;
diff --git a/queue-5.5/dm-fix-potential-for-q-make_request_fn-null-pointer.patch b/queue-5.5/dm-fix-potential-for-q-make_request_fn-null-pointer.patch
new file mode 100644 (file)
index 0000000..de12684
--- /dev/null
@@ -0,0 +1,64 @@
+From 47ace7e012b9f7ad71d43ac9063d335ea3d6820b Mon Sep 17 00:00:00 2001
+From: Mike Snitzer <snitzer@redhat.com>
+Date: Mon, 27 Jan 2020 14:07:23 -0500
+Subject: dm: fix potential for q->make_request_fn NULL pointer
+
+From: Mike Snitzer <snitzer@redhat.com>
+
+commit 47ace7e012b9f7ad71d43ac9063d335ea3d6820b upstream.
+
+Move blk_queue_make_request() to dm.c:alloc_dev() so that
+q->make_request_fn is never NULL during the lifetime of a DM device
+(even one that is created without a DM table).
+
+Otherwise generic_make_request() will crash simply by doing:
+  dmsetup create -n test
+  mount /dev/dm-N /mnt
+
+While at it, move ->congested_data initialization out of
+dm.c:alloc_dev() and into the bio-based specific init method.
+
+Reported-by: Stefan Bader <stefan.bader@canonical.com>
+BugLink: https://bugs.launchpad.net/bugs/1860231
+Fixes: ff36ab34583a ("dm: remove request-based logic from make_request_fn wrapper")
+Depends-on: c12c9a3c3860c ("dm: various cleanups to md->queue initialization code")
+Cc: stable@vger.kernel.org
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm.c |    9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -1859,6 +1859,7 @@ static void dm_init_normal_md_queue(stru
+       /*
+        * Initialize aspects of queue that aren't relevant for blk-mq
+        */
++      md->queue->backing_dev_info->congested_data = md;
+       md->queue->backing_dev_info->congested_fn = dm_any_congested;
+ }
+@@ -1949,7 +1950,12 @@ static struct mapped_device *alloc_dev(i
+       if (!md->queue)
+               goto bad;
+       md->queue->queuedata = md;
+-      md->queue->backing_dev_info->congested_data = md;
++      /*
++       * default to bio-based required ->make_request_fn until DM
++       * table is loaded and md->type established. If request-based
++       * table is loaded: blk-mq will override accordingly.
++       */
++      blk_queue_make_request(md->queue, dm_make_request);
+       md->disk = alloc_disk_node(1, md->numa_node_id);
+       if (!md->disk)
+@@ -2264,7 +2270,6 @@ int dm_setup_md_queue(struct mapped_devi
+       case DM_TYPE_DAX_BIO_BASED:
+       case DM_TYPE_NVME_BIO_BASED:
+               dm_init_normal_md_queue(md);
+-              blk_queue_make_request(md->queue, dm_make_request);
+               break;
+       case DM_TYPE_NONE:
+               WARN_ON_ONCE(true);
diff --git a/queue-5.5/dm-space-map-common-fix-to-ensure-new-block-isn-t-already-in-use.patch b/queue-5.5/dm-space-map-common-fix-to-ensure-new-block-isn-t-already-in-use.patch
new file mode 100644 (file)
index 0000000..2c33202
--- /dev/null
@@ -0,0 +1,122 @@
+From 4feaef830de7ffdd8352e1fe14ad3bf13c9688f8 Mon Sep 17 00:00:00 2001
+From: Joe Thornber <ejt@redhat.com>
+Date: Tue, 7 Jan 2020 11:58:42 +0000
+Subject: dm space map common: fix to ensure new block isn't already in use
+
+From: Joe Thornber <ejt@redhat.com>
+
+commit 4feaef830de7ffdd8352e1fe14ad3bf13c9688f8 upstream.
+
+The space-maps track the reference counts for disk blocks allocated by
+both the thin-provisioning and cache targets.  There are variants for
+tracking metadata blocks and data blocks.
+
+Transactionality is implemented by never touching blocks from the
+previous transaction, so we can rollback in the event of a crash.
+
+When allocating a new block we need to ensure the block is free (has
+reference count of 0) in both the current and previous transaction.
+Prior to this fix we were doing this by searching for a free block in
+the previous transaction, and relying on a 'begin' counter to track
+where the last allocation in the current transaction was.  This
+'begin' field was not being updated in all code paths (eg, increment
+of a data block reference count due to breaking sharing of a neighbour
+block in the same btree leaf).
+
+This fix keeps the 'begin' field, but now it's just a hint to speed up
+the search.  Instead the current transaction is searched for a free
+block, and then the old transaction is double checked to ensure it's
+free.  Much simpler.
+
+This fixes reports of sm_disk_new_block()'s BUG_ON() triggering when
+DM thin-provisioning's snapshots are heavily used.
+
+Reported-by: Eric Wheeler <dm-devel@lists.ewheeler.net>
+Cc: stable@vger.kernel.org
+Signed-off-by: Joe Thornber <ejt@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/persistent-data/dm-space-map-common.c   |   27 +++++++++++++++++++++
+ drivers/md/persistent-data/dm-space-map-common.h   |    2 +
+ drivers/md/persistent-data/dm-space-map-disk.c     |    6 +++-
+ drivers/md/persistent-data/dm-space-map-metadata.c |    5 +++
+ 4 files changed, 37 insertions(+), 3 deletions(-)
+
+--- a/drivers/md/persistent-data/dm-space-map-common.c
++++ b/drivers/md/persistent-data/dm-space-map-common.c
+@@ -380,6 +380,33 @@ int sm_ll_find_free_block(struct ll_disk
+       return -ENOSPC;
+ }
++int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll,
++                               dm_block_t begin, dm_block_t end, dm_block_t *b)
++{
++      int r;
++      uint32_t count;
++
++      do {
++              r = sm_ll_find_free_block(new_ll, begin, new_ll->nr_blocks, b);
++              if (r)
++                      break;
++
++              /* double check this block wasn't used in the old transaction */
++              if (*b >= old_ll->nr_blocks)
++                      count = 0;
++              else {
++                      r = sm_ll_lookup(old_ll, *b, &count);
++                      if (r)
++                              break;
++
++                      if (count)
++                              begin = *b + 1;
++              }
++      } while (count);
++
++      return r;
++}
++
+ static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
+                       int (*mutator)(void *context, uint32_t old, uint32_t *new),
+                       void *context, enum allocation_event *ev)
+--- a/drivers/md/persistent-data/dm-space-map-common.h
++++ b/drivers/md/persistent-data/dm-space-map-common.h
+@@ -109,6 +109,8 @@ int sm_ll_lookup_bitmap(struct ll_disk *
+ int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result);
+ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
+                         dm_block_t end, dm_block_t *result);
++int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll,
++                               dm_block_t begin, dm_block_t end, dm_block_t *result);
+ int sm_ll_insert(struct ll_disk *ll, dm_block_t b, uint32_t ref_count, enum allocation_event *ev);
+ int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev);
+ int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev);
+--- a/drivers/md/persistent-data/dm-space-map-disk.c
++++ b/drivers/md/persistent-data/dm-space-map-disk.c
+@@ -167,8 +167,10 @@ static int sm_disk_new_block(struct dm_s
+       enum allocation_event ev;
+       struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+-      /* FIXME: we should loop round a couple of times */
+-      r = sm_ll_find_free_block(&smd->old_ll, smd->begin, smd->old_ll.nr_blocks, b);
++      /*
++       * Any block we allocate has to be free in both the old and current ll.
++       */
++      r = sm_ll_find_common_free_block(&smd->old_ll, &smd->ll, smd->begin, smd->ll.nr_blocks, b);
+       if (r)
+               return r;
+--- a/drivers/md/persistent-data/dm-space-map-metadata.c
++++ b/drivers/md/persistent-data/dm-space-map-metadata.c
+@@ -448,7 +448,10 @@ static int sm_metadata_new_block_(struct
+       enum allocation_event ev;
+       struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+-      r = sm_ll_find_free_block(&smm->old_ll, smm->begin, smm->old_ll.nr_blocks, b);
++      /*
++       * Any block we allocate has to be free in both the old and current ll.
++       */
++      r = sm_ll_find_common_free_block(&smm->old_ll, &smm->ll, smm->begin, smm->ll.nr_blocks, b);
+       if (r)
+               return r;
diff --git a/queue-5.5/dm-thin-fix-use-after-free-in-metadata_pre_commit_callback.patch b/queue-5.5/dm-thin-fix-use-after-free-in-metadata_pre_commit_callback.patch
new file mode 100644 (file)
index 0000000..e0bc570
--- /dev/null
@@ -0,0 +1,66 @@
+From a4a8d286586d4b28c8517a51db8d86954aadc74b Mon Sep 17 00:00:00 2001
+From: Mike Snitzer <snitzer@redhat.com>
+Date: Mon, 13 Jan 2020 12:29:04 -0500
+Subject: dm thin: fix use-after-free in metadata_pre_commit_callback
+
+From: Mike Snitzer <snitzer@redhat.com>
+
+commit a4a8d286586d4b28c8517a51db8d86954aadc74b upstream.
+
+dm-thin uses struct pool to hold the state of the pool. There may be
+multiple pool_c's pointing to a given pool, each pool_c represents a
+loaded target. pool_c's may be created and destroyed arbitrarily and the
+pool contains a reference count of pool_c's pointing to it.
+
+Since commit 694cfe7f31db3 ("dm thin: Flush data device before
+committing metadata") a pointer to pool_c is passed to
+dm_pool_register_pre_commit_callback and this function stores it in
+pmd->pre_commit_context. If this pool_c is freed, but pool is not
+(because there is another pool_c referencing it), we end up in a
+situation where pmd->pre_commit_context structure points to freed
+pool_c. It causes a crash in metadata_pre_commit_callback.
+
+Fix this by moving the dm_pool_register_pre_commit_callback() from
+pool_ctr() to pool_preresume(). This way the in-core thin-pool metadata
+is only ever armed with callback data whose lifetime matches the
+active thin-pool target.
+
+In should be noted that this fix preserves the ability to load a
+thin-pool table that uses a different data block device (that contains
+the same data) -- though it is unclear if that capability is still
+useful and/or needed.
+
+Fixes: 694cfe7f31db3 ("dm thin: Flush data device before committing metadata")
+Cc: stable@vger.kernel.org
+Reported-by: Zdenek Kabelac <zkabelac@redhat.com>
+Reported-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-thin.c |    7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -3408,10 +3408,6 @@ static int pool_ctr(struct dm_target *ti
+       if (r)
+               goto out_flags_changed;
+-      dm_pool_register_pre_commit_callback(pt->pool->pmd,
+-                                           metadata_pre_commit_callback,
+-                                           pt);
+-
+       pt->callbacks.congested_fn = pool_is_congested;
+       dm_table_add_target_callbacks(ti->table, &pt->callbacks);
+@@ -3574,6 +3570,9 @@ static int pool_preresume(struct dm_targ
+       if (r)
+               return r;
++      dm_pool_register_pre_commit_callback(pool->pmd,
++                                           metadata_pre_commit_callback, pt);
++
+       r = maybe_resize_data_dev(ti, &need_commit1);
+       if (r)
+               return r;
diff --git a/queue-5.5/dm-thin-metadata-use-pool-locking-at-end-of-dm_pool_metadata_close.patch b/queue-5.5/dm-thin-metadata-use-pool-locking-at-end-of-dm_pool_metadata_close.patch
new file mode 100644 (file)
index 0000000..66e266c
--- /dev/null
@@ -0,0 +1,83 @@
+From 44d8ebf436399a40fcd10dd31b29d37823d62fcc Mon Sep 17 00:00:00 2001
+From: Mike Snitzer <snitzer@redhat.com>
+Date: Mon, 13 Jan 2020 11:18:51 -0500
+Subject: dm thin metadata: use pool locking at end of dm_pool_metadata_close
+
+From: Mike Snitzer <snitzer@redhat.com>
+
+commit 44d8ebf436399a40fcd10dd31b29d37823d62fcc upstream.
+
+Ensure that the pool is locked during calls to __commit_transaction and
+__destroy_persistent_data_objects.  Just being consistent with locking,
+but reality is dm_pool_metadata_close is called once pool is being
+destroyed so access to pool shouldn't be contended.
+
+Also, use pmd_write_lock_in_core rather than __pmd_write_lock in
+dm_pool_commit_metadata and rename __pmd_write_lock to
+pmd_write_lock_in_core -- there was no need for the alias.
+
+In addition, verify that the pool is locked in __commit_transaction().
+
+Fixes: 873f258becca ("dm thin metadata: do not write metadata if no changes occurred")
+Cc: stable@vger.kernel.org
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-thin-metadata.c |   10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/drivers/md/dm-thin-metadata.c
++++ b/drivers/md/dm-thin-metadata.c
+@@ -387,16 +387,15 @@ static int subtree_equal(void *context,
+  * Variant that is used for in-core only changes or code that
+  * shouldn't put the pool in service on its own (e.g. commit).
+  */
+-static inline void __pmd_write_lock(struct dm_pool_metadata *pmd)
++static inline void pmd_write_lock_in_core(struct dm_pool_metadata *pmd)
+       __acquires(pmd->root_lock)
+ {
+       down_write(&pmd->root_lock);
+ }
+-#define pmd_write_lock_in_core(pmd) __pmd_write_lock((pmd))
+ static inline void pmd_write_lock(struct dm_pool_metadata *pmd)
+ {
+-      __pmd_write_lock(pmd);
++      pmd_write_lock_in_core(pmd);
+       if (unlikely(!pmd->in_service))
+               pmd->in_service = true;
+ }
+@@ -831,6 +830,7 @@ static int __commit_transaction(struct d
+        * We need to know if the thin_disk_superblock exceeds a 512-byte sector.
+        */
+       BUILD_BUG_ON(sizeof(struct thin_disk_superblock) > 512);
++      BUG_ON(!rwsem_is_locked(&pmd->root_lock));
+       if (unlikely(!pmd->in_service))
+               return 0;
+@@ -953,6 +953,7 @@ int dm_pool_metadata_close(struct dm_poo
+               return -EBUSY;
+       }
++      pmd_write_lock_in_core(pmd);
+       if (!dm_bm_is_read_only(pmd->bm) && !pmd->fail_io) {
+               r = __commit_transaction(pmd);
+               if (r < 0)
+@@ -961,6 +962,7 @@ int dm_pool_metadata_close(struct dm_poo
+       }
+       if (!pmd->fail_io)
+               __destroy_persistent_data_objects(pmd);
++      pmd_write_unlock(pmd);
+       kfree(pmd);
+       return 0;
+@@ -1841,7 +1843,7 @@ int dm_pool_commit_metadata(struct dm_po
+        * Care is taken to not have commit be what
+        * triggers putting the thin-pool in-service.
+        */
+-      __pmd_write_lock(pmd);
++      pmd_write_lock_in_core(pmd);
+       if (pmd->fail_io)
+               goto out;
diff --git a/queue-5.5/dm-writecache-fix-incorrect-flush-sequence-when-doing-ssd-mode-commit.patch b/queue-5.5/dm-writecache-fix-incorrect-flush-sequence-when-doing-ssd-mode-commit.patch
new file mode 100644 (file)
index 0000000..856d05d
--- /dev/null
@@ -0,0 +1,167 @@
+From aa9509209c5ac2f0b35d01a922bf9ae072d0c2fc Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Wed, 8 Jan 2020 10:46:05 -0500
+Subject: dm writecache: fix incorrect flush sequence when doing SSD mode commit
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit aa9509209c5ac2f0b35d01a922bf9ae072d0c2fc upstream.
+
+When committing state, the function writecache_flush does the following:
+1. write metadata (writecache_commit_flushed)
+2. flush disk cache (writecache_commit_flushed)
+3. wait for data writes to complete (writecache_wait_for_ios)
+4. increase superblock seq_count
+5. write the superblock
+6. flush disk cache
+
+It may happen that at step 3, when we wait for some write to finish, the
+disk may report the write as finished, but the write only hit the disk
+cache and it is not yet stored in persistent storage. At step 5 we write
+the superblock - it may happen that the superblock is written before the
+write that we waited for in step 3. If the machine crashes, it may result
+in incorrect data being returned after reboot.
+
+In order to fix the bug, we must swap steps 2 and 3 in the above sequence,
+so that we first wait for writes to complete and then flush the disk
+cache.
+
+Fixes: 48debafe4f2f ("dm: add writecache target")
+Cc: stable@vger.kernel.org # 4.18+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-writecache.c |   42 +++++++++++++++++++++---------------------
+ 1 file changed, 21 insertions(+), 21 deletions(-)
+
+--- a/drivers/md/dm-writecache.c
++++ b/drivers/md/dm-writecache.c
+@@ -442,7 +442,13 @@ static void writecache_notify_io(unsigne
+               complete(&endio->c);
+ }
+-static void ssd_commit_flushed(struct dm_writecache *wc)
++static void writecache_wait_for_ios(struct dm_writecache *wc, int direction)
++{
++      wait_event(wc->bio_in_progress_wait[direction],
++                 !atomic_read(&wc->bio_in_progress[direction]));
++}
++
++static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
+ {
+       struct dm_io_region region;
+       struct dm_io_request req;
+@@ -488,17 +494,20 @@ static void ssd_commit_flushed(struct dm
+       writecache_notify_io(0, &endio);
+       wait_for_completion_io(&endio.c);
++      if (wait_for_ios)
++              writecache_wait_for_ios(wc, WRITE);
++
+       writecache_disk_flush(wc, wc->ssd_dev);
+       memset(wc->dirty_bitmap, 0, wc->dirty_bitmap_size);
+ }
+-static void writecache_commit_flushed(struct dm_writecache *wc)
++static void writecache_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
+ {
+       if (WC_MODE_PMEM(wc))
+               wmb();
+       else
+-              ssd_commit_flushed(wc);
++              ssd_commit_flushed(wc, wait_for_ios);
+ }
+ static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev)
+@@ -522,12 +531,6 @@ static void writecache_disk_flush(struct
+               writecache_error(wc, r, "error flushing metadata: %d", r);
+ }
+-static void writecache_wait_for_ios(struct dm_writecache *wc, int direction)
+-{
+-      wait_event(wc->bio_in_progress_wait[direction],
+-                 !atomic_read(&wc->bio_in_progress[direction]));
+-}
+-
+ #define WFE_RETURN_FOLLOWING  1
+ #define WFE_LOWEST_SEQ                2
+@@ -724,15 +727,12 @@ static void writecache_flush(struct dm_w
+               e = e2;
+               cond_resched();
+       }
+-      writecache_commit_flushed(wc);
+-
+-      if (!WC_MODE_PMEM(wc))
+-              writecache_wait_for_ios(wc, WRITE);
++      writecache_commit_flushed(wc, true);
+       wc->seq_count++;
+       pmem_assign(sb(wc)->seq_count, cpu_to_le64(wc->seq_count));
+       writecache_flush_region(wc, &sb(wc)->seq_count, sizeof sb(wc)->seq_count);
+-      writecache_commit_flushed(wc);
++      writecache_commit_flushed(wc, false);
+       wc->overwrote_committed = false;
+@@ -756,7 +756,7 @@ static void writecache_flush(struct dm_w
+       }
+       if (need_flush_after_free)
+-              writecache_commit_flushed(wc);
++              writecache_commit_flushed(wc, false);
+ }
+ static void writecache_flush_work(struct work_struct *work)
+@@ -809,7 +809,7 @@ static void writecache_discard(struct dm
+       }
+       if (discarded_something)
+-              writecache_commit_flushed(wc);
++              writecache_commit_flushed(wc, false);
+ }
+ static bool writecache_wait_for_writeback(struct dm_writecache *wc)
+@@ -958,7 +958,7 @@ erase_this:
+       if (need_flush) {
+               writecache_flush_all_metadata(wc);
+-              writecache_commit_flushed(wc);
++              writecache_commit_flushed(wc, false);
+       }
+       wc_unlock(wc);
+@@ -1342,7 +1342,7 @@ static void __writecache_endio_pmem(stru
+                       wc->writeback_size--;
+                       n_walked++;
+                       if (unlikely(n_walked >= ENDIO_LATENCY)) {
+-                              writecache_commit_flushed(wc);
++                              writecache_commit_flushed(wc, false);
+                               wc_unlock(wc);
+                               wc_lock(wc);
+                               n_walked = 0;
+@@ -1423,7 +1423,7 @@ pop_from_list:
+                       writecache_wait_for_ios(wc, READ);
+               }
+-              writecache_commit_flushed(wc);
++              writecache_commit_flushed(wc, false);
+               wc_unlock(wc);
+       }
+@@ -1766,10 +1766,10 @@ static int init_memory(struct dm_writeca
+               write_original_sector_seq_count(wc, &wc->entries[b], -1, -1);
+       writecache_flush_all_metadata(wc);
+-      writecache_commit_flushed(wc);
++      writecache_commit_flushed(wc, false);
+       pmem_assign(sb(wc)->magic, cpu_to_le32(MEMORY_SUPERBLOCK_MAGIC));
+       writecache_flush_region(wc, &sb(wc)->magic, sizeof sb(wc)->magic);
+-      writecache_commit_flushed(wc);
++      writecache_commit_flushed(wc, false);
+       return 0;
+ }
diff --git a/queue-5.5/dm-zoned-support-zone-sizes-smaller-than-128mib.patch b/queue-5.5/dm-zoned-support-zone-sizes-smaller-than-128mib.patch
new file mode 100644 (file)
index 0000000..428f02e
--- /dev/null
@@ -0,0 +1,117 @@
+From b39962950339912978484cdac50069258545d753 Mon Sep 17 00:00:00 2001
+From: Dmitry Fomichev <dmitry.fomichev@wdc.com>
+Date: Mon, 23 Dec 2019 17:05:46 -0800
+Subject: dm zoned: support zone sizes smaller than 128MiB
+
+From: Dmitry Fomichev <dmitry.fomichev@wdc.com>
+
+commit b39962950339912978484cdac50069258545d753 upstream.
+
+dm-zoned is observed to log failed kernel assertions and not work
+correctly when operating against a device with a zone size smaller
+than 128MiB (e.g. 32768 bits per 4K block). The reason is that the
+bitmap size per zone is calculated as zero with such a small zone
+size. Fix this problem and also make the code related to zone bitmap
+management be able to handle per zone bitmaps smaller than a single
+block.
+
+A dm-zoned-tools patch is required to properly format dm-zoned devices
+with zone sizes smaller than 128MiB.
+
+Fixes: 3b1a94c88b79 ("dm zoned: drive-managed zoned block device target")
+Cc: stable@vger.kernel.org
+Signed-off-by: Dmitry Fomichev <dmitry.fomichev@wdc.com>
+Reviewed-by: Damien Le Moal <damien.lemoal@wdc.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-zoned-metadata.c |   23 ++++++++++++++---------
+ 1 file changed, 14 insertions(+), 9 deletions(-)
+
+--- a/drivers/md/dm-zoned-metadata.c
++++ b/drivers/md/dm-zoned-metadata.c
+@@ -134,6 +134,7 @@ struct dmz_metadata {
+       sector_t                zone_bitmap_size;
+       unsigned int            zone_nr_bitmap_blocks;
++      unsigned int            zone_bits_per_mblk;
+       unsigned int            nr_bitmap_blocks;
+       unsigned int            nr_map_blocks;
+@@ -1161,7 +1162,10 @@ static int dmz_init_zones(struct dmz_met
+       /* Init */
+       zmd->zone_bitmap_size = dev->zone_nr_blocks >> 3;
+-      zmd->zone_nr_bitmap_blocks = zmd->zone_bitmap_size >> DMZ_BLOCK_SHIFT;
++      zmd->zone_nr_bitmap_blocks =
++              max_t(sector_t, 1, zmd->zone_bitmap_size >> DMZ_BLOCK_SHIFT);
++      zmd->zone_bits_per_mblk = min_t(sector_t, dev->zone_nr_blocks,
++                                      DMZ_BLOCK_SIZE_BITS);
+       /* Allocate zone array */
+       zmd->zones = kcalloc(dev->nr_zones, sizeof(struct dm_zone), GFP_KERNEL);
+@@ -1956,7 +1960,7 @@ int dmz_copy_valid_blocks(struct dmz_met
+               dmz_release_mblock(zmd, to_mblk);
+               dmz_release_mblock(zmd, from_mblk);
+-              chunk_block += DMZ_BLOCK_SIZE_BITS;
++              chunk_block += zmd->zone_bits_per_mblk;
+       }
+       to_zone->weight = from_zone->weight;
+@@ -2017,7 +2021,7 @@ int dmz_validate_blocks(struct dmz_metad
+               /* Set bits */
+               bit = chunk_block & DMZ_BLOCK_MASK_BITS;
+-              nr_bits = min(nr_blocks, DMZ_BLOCK_SIZE_BITS - bit);
++              nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit);
+               count = dmz_set_bits((unsigned long *)mblk->data, bit, nr_bits);
+               if (count) {
+@@ -2096,7 +2100,7 @@ int dmz_invalidate_blocks(struct dmz_met
+               /* Clear bits */
+               bit = chunk_block & DMZ_BLOCK_MASK_BITS;
+-              nr_bits = min(nr_blocks, DMZ_BLOCK_SIZE_BITS - bit);
++              nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit);
+               count = dmz_clear_bits((unsigned long *)mblk->data,
+                                      bit, nr_bits);
+@@ -2156,6 +2160,7 @@ static int dmz_to_next_set_block(struct
+ {
+       struct dmz_mblock *mblk;
+       unsigned int bit, set_bit, nr_bits;
++      unsigned int zone_bits = zmd->zone_bits_per_mblk;
+       unsigned long *bitmap;
+       int n = 0;
+@@ -2170,15 +2175,15 @@ static int dmz_to_next_set_block(struct
+               /* Get offset */
+               bitmap = (unsigned long *) mblk->data;
+               bit = chunk_block & DMZ_BLOCK_MASK_BITS;
+-              nr_bits = min(nr_blocks, DMZ_BLOCK_SIZE_BITS - bit);
++              nr_bits = min(nr_blocks, zone_bits - bit);
+               if (set)
+-                      set_bit = find_next_bit(bitmap, DMZ_BLOCK_SIZE_BITS, bit);
++                      set_bit = find_next_bit(bitmap, zone_bits, bit);
+               else
+-                      set_bit = find_next_zero_bit(bitmap, DMZ_BLOCK_SIZE_BITS, bit);
++                      set_bit = find_next_zero_bit(bitmap, zone_bits, bit);
+               dmz_release_mblock(zmd, mblk);
+               n += set_bit - bit;
+-              if (set_bit < DMZ_BLOCK_SIZE_BITS)
++              if (set_bit < zone_bits)
+                       break;
+               nr_blocks -= nr_bits;
+@@ -2281,7 +2286,7 @@ static void dmz_get_zone_weight(struct d
+               /* Count bits in this block */
+               bitmap = mblk->data;
+               bit = chunk_block & DMZ_BLOCK_MASK_BITS;
+-              nr_bits = min(nr_blocks, DMZ_BLOCK_SIZE_BITS - bit);
++              nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit);
+               n += dmz_count_bits(bitmap, bit, nr_bits);
+               dmz_release_mblock(zmd, mblk);
diff --git a/queue-5.5/of-add-of_dma_default_coherent-select-it-on-powerpc.patch b/queue-5.5/of-add-of_dma_default_coherent-select-it-on-powerpc.patch
new file mode 100644 (file)
index 0000000..e72cf57
--- /dev/null
@@ -0,0 +1,80 @@
+From dabf6b36b83a18d57e3d4b9d50544ed040d86255 Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sun, 26 Jan 2020 22:52:47 +1100
+Subject: of: Add OF_DMA_DEFAULT_COHERENT & select it on powerpc
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit dabf6b36b83a18d57e3d4b9d50544ed040d86255 upstream.
+
+There's an OF helper called of_dma_is_coherent(), which checks if a
+device has a "dma-coherent" property to see if the device is coherent
+for DMA.
+
+But on some platforms devices are coherent by default, and on some
+platforms it's not possible to update existing device trees to add the
+"dma-coherent" property.
+
+So add a Kconfig symbol to allow arch code to tell
+of_dma_is_coherent() that devices are coherent by default, regardless
+of the presence of the property.
+
+Select that symbol on powerpc when NOT_COHERENT_CACHE is not set, ie.
+when the system has a coherent cache.
+
+Fixes: 92ea637edea3 ("of: introduce of_dma_is_coherent() helper")
+Cc: stable@vger.kernel.org # v3.16+
+Reported-by: Christian Zigotzky <chzigotzky@xenosoft.de>
+Tested-by: Christian Zigotzky <chzigotzky@xenosoft.de>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Rob Herring <robh@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/Kconfig |    1 +
+ drivers/of/Kconfig   |    4 ++++
+ drivers/of/address.c |    6 +++++-
+ 3 files changed, 10 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -238,6 +238,7 @@ config PPC
+       select NEED_DMA_MAP_STATE               if PPC64 || NOT_COHERENT_CACHE
+       select NEED_SG_DMA_LENGTH
+       select OF
++      select OF_DMA_DEFAULT_COHERENT          if !NOT_COHERENT_CACHE
+       select OF_EARLY_FLATTREE
+       select OLD_SIGACTION                    if PPC32
+       select OLD_SIGSUSPEND
+--- a/drivers/of/Kconfig
++++ b/drivers/of/Kconfig
+@@ -103,4 +103,8 @@ config OF_OVERLAY
+ config OF_NUMA
+       bool
++config OF_DMA_DEFAULT_COHERENT
++      # arches should select this if DMA is coherent by default for OF devices
++      bool
++
+ endif # OF
+--- a/drivers/of/address.c
++++ b/drivers/of/address.c
+@@ -995,12 +995,16 @@ out:
+  * @np:       device node
+  *
+  * It returns true if "dma-coherent" property was found
+- * for this device in DT.
++ * for this device in the DT, or if DMA is coherent by
++ * default for OF devices on the current platform.
+  */
+ bool of_dma_is_coherent(struct device_node *np)
+ {
+       struct device_node *node = of_node_get(np);
++      if (IS_ENABLED(CONFIG_OF_DMA_DEFAULT_COHERENT))
++              return true;
++
+       while (node) {
+               if (of_property_read_bool(node, "dma-coherent")) {
+                       of_node_put(node);
index 2bbc60c7f5dfb7bba1ed6c8daeb54eaf265960ae..fd5678017aeb7844bd6b62f278b2513ce222c9e3 100644 (file)
@@ -135,3 +135,13 @@ f2fs-fix-dcache-lookup-of-casefolded-directories.patch
 f2fs-fix-race-conditions-in-d_compare-and-d_hash.patch
 pm-core-fix-handling-of-devices-deleted-during-system-wide-resume.patch
 cpufreq-avoid-creating-excessively-large-stack-frames.patch
+of-add-of_dma_default_coherent-select-it-on-powerpc.patch
+arm-dma-api-fix-max_pfn-off-by-one-error-in-__dma_supported.patch
+dm-zoned-support-zone-sizes-smaller-than-128mib.patch
+dm-space-map-common-fix-to-ensure-new-block-isn-t-already-in-use.patch
+dm-writecache-fix-incorrect-flush-sequence-when-doing-ssd-mode-commit.patch
+dm-crypt-fix-gfp-flags-passed-to-skcipher_request_alloc.patch
+dm-crypt-fix-benbi-iv-constructor-crash-if-used-in-authenticated-mode.patch
+dm-thin-metadata-use-pool-locking-at-end-of-dm_pool_metadata_close.patch
+dm-thin-fix-use-after-free-in-metadata_pre_commit_callback.patch
+dm-fix-potential-for-q-make_request_fn-null-pointer.patch