]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.18-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 1 Jul 2018 14:47:23 +0000 (16:47 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 1 Jul 2018 14:47:23 +0000 (16:47 +0200)
added patches:
block-fix-transfer-when-chunk-sectors-exceeds-max.patch
dm-thin-handle-running-out-of-data-space-vs-concurrent-discard.patch

queue-3.18/block-fix-transfer-when-chunk-sectors-exceeds-max.patch [new file with mode: 0644]
queue-3.18/dm-thin-handle-running-out-of-data-space-vs-concurrent-discard.patch [new file with mode: 0644]
queue-3.18/series

diff --git a/queue-3.18/block-fix-transfer-when-chunk-sectors-exceeds-max.patch b/queue-3.18/block-fix-transfer-when-chunk-sectors-exceeds-max.patch
new file mode 100644 (file)
index 0000000..2a4c241
--- /dev/null
@@ -0,0 +1,38 @@
+From 15bfd21fbc5d35834b9ea383dc458a1f0c9e3434 Mon Sep 17 00:00:00 2001
+From: Keith Busch <keith.busch@intel.com>
+Date: Tue, 26 Jun 2018 09:14:58 -0600
+Subject: block: Fix transfer when chunk sectors exceeds max
+
+From: Keith Busch <keith.busch@intel.com>
+
+commit 15bfd21fbc5d35834b9ea383dc458a1f0c9e3434 upstream.
+
+A device may have boundary restrictions where the number of sectors
+between boundaries exceeds its max transfer size. In this case, we need
+to cap the max size to the smaller of the two limits.
+
+Reported-by: Jitendra Bhivare <jitendra.bhivare@broadcom.com>
+Tested-by: Jitendra Bhivare <jitendra.bhivare@broadcom.com>
+Cc: <stable@vger.kernel.org>
+Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Keith Busch <keith.busch@intel.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/blkdev.h |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -919,8 +919,8 @@ static inline unsigned int blk_max_size_
+       if (!q->limits.chunk_sectors)
+               return q->limits.max_sectors;
+-      return q->limits.chunk_sectors -
+-                      (offset & (q->limits.chunk_sectors - 1));
++      return min(q->limits.max_sectors, (unsigned int)(q->limits.chunk_sectors -
++                      (offset & (q->limits.chunk_sectors - 1))));
+ }
+ static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
diff --git a/queue-3.18/dm-thin-handle-running-out-of-data-space-vs-concurrent-discard.patch b/queue-3.18/dm-thin-handle-running-out-of-data-space-vs-concurrent-discard.patch
new file mode 100644 (file)
index 0000000..36b0655
--- /dev/null
@@ -0,0 +1,92 @@
+From a685557fbbc3122ed11e8ad3fa63a11ebc5de8c3 Mon Sep 17 00:00:00 2001
+From: Mike Snitzer <snitzer@redhat.com>
+Date: Tue, 26 Jun 2018 12:04:23 -0400
+Subject: dm thin: handle running out of data space vs concurrent discard
+
+From: Mike Snitzer <snitzer@redhat.com>
+
+commit a685557fbbc3122ed11e8ad3fa63a11ebc5de8c3 upstream.
+
+Discards issued to a DM thin device can complete to userspace (via
+fstrim) _before_ the metadata changes associated with the discards is
+reflected in the thinp superblock (e.g. free blocks).  As such, if a
+user constructs a test that loops repeatedly over these steps, block
+allocation can fail due to discards not having completed yet:
+1) fill thin device via filesystem file
+2) remove file
+3) fstrim
+
+From initial report, here:
+https://www.redhat.com/archives/dm-devel/2018-April/msg00022.html
+
+"The root cause of this issue is that dm-thin will first remove
+mapping and increase corresponding blocks' reference count to prevent
+them from being reused before DISCARD bios get processed by the
+underlying layers. However. increasing blocks' reference count could
+also increase the nr_allocated_this_transaction in struct sm_disk
+which makes smd->old_ll.nr_allocated +
+smd->nr_allocated_this_transaction bigger than smd->old_ll.nr_blocks.
+In this case, alloc_data_block() will never commit metadata to reset
+the begin pointer of struct sm_disk, because sm_disk_get_nr_free()
+always return an underflow value."
+
+While there is room for improvement to the space-map accounting that
+thinp is making use of: the reality is this test is inherently racey and
+will result in the previous iteration's fstrim's discard(s) completing
+vs concurrent block allocation, via dd, in the next iteration of the
+loop.
+
+No amount of space map accounting improvements will be able to allow
+user's to use a block before a discard of that block has completed.
+
+So the best we can really do is allow DM thinp to gracefully handle such
+aggressive use of all the pool's data by degrading the pool into
+out-of-data-space (OODS) mode.  We _should_ get that behaviour already
+(if space map accounting didn't falsely cause alloc_data_block() to
+believe free space was available).. but short of that we handle the
+current reality that dm_pool_alloc_data_block() can return -ENOSPC.
+
+Reported-by: Dennis Yang <dennisyang@qnap.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-thin.c |   11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -992,6 +992,8 @@ static void schedule_external_copy(struc
+ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
++static void requeue_bios(struct pool *pool);
++
+ static void check_for_space(struct pool *pool)
+ {
+       int r;
+@@ -1004,8 +1006,10 @@ static void check_for_space(struct pool
+       if (r)
+               return;
+-      if (nr_free)
++      if (nr_free) {
+               set_pool_mode(pool, PM_WRITE);
++              requeue_bios(pool);
++      }
+ }
+ /*
+@@ -1082,7 +1086,10 @@ static int alloc_data_block(struct thin_
+       r = dm_pool_alloc_data_block(pool->pmd, result);
+       if (r) {
+-              metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
++              if (r == -ENOSPC)
++                      set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
++              else
++                      metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
+               return r;
+       }
index 212a6888b1c9cbfa2591a6aa640b72616b2e07f0..94b1080c3b29afd38245dfb97ccdaeddcd30c4d9 100644 (file)
@@ -81,3 +81,5 @@ nfsd-restrict-rd_maxcount-to-svc_max_payload-in-nfsd_encode_readdir.patch
 video-uvesafb-fix-integer-overflow-in-allocation.patch
 xen-remove-unnecessary-bug_on-from-__unbind_from_irq.patch
 udf-detect-incorrect-directory-size.patch
+block-fix-transfer-when-chunk-sectors-exceeds-max.patch
+dm-thin-handle-running-out-of-data-space-vs-concurrent-discard.patch