]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 6 Apr 2020 08:49:45 +0000 (10:49 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 6 Apr 2020 08:49:45 +0000 (10:49 +0200)
added patches:
blk-mq-allow-blocking-queue-tag-iter-callbacks.patch

queue-4.14/blk-mq-allow-blocking-queue-tag-iter-callbacks.patch [new file with mode: 0644]
queue-4.14/series

diff --git a/queue-4.14/blk-mq-allow-blocking-queue-tag-iter-callbacks.patch b/queue-4.14/blk-mq-allow-blocking-queue-tag-iter-callbacks.patch
new file mode 100644 (file)
index 0000000..641cf0d
--- /dev/null
@@ -0,0 +1,58 @@
+From 530ca2c9bd6949c72c9b5cfc330cb3dbccaa3f5b Mon Sep 17 00:00:00 2001
+From: Keith Busch <keith.busch@intel.com>
+Date: Tue, 25 Sep 2018 10:36:20 -0600
+Subject: blk-mq: Allow blocking queue tag iter callbacks
+
+From: Keith Busch <keith.busch@intel.com>
+
+commit 530ca2c9bd6949c72c9b5cfc330cb3dbccaa3f5b upstream.
+
+A recent commit runs tag iterator callbacks under the rcu read lock,
+but existing callbacks do not satisfy the non-blocking requirement.
+The commit intended to prevent an iterator from accessing a queue that's
+being modified. This patch fixes the original issue by taking a queue
+reference instead of reading it, which allows callbacks to make blocking
+calls.
+
+Fixes: f5bbbbe4d6357 ("blk-mq: sync the update nr_hw_queues with blk_mq_queue_tag_busy_iter")
+Acked-by: Jianchao Wang <jianchao.w.wang@oracle.com>
+Signed-off-by: Keith Busch <keith.busch@intel.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Giuliano Procida <gprocida@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/blk-mq-tag.c |   13 ++++---------
+ 1 file changed, 4 insertions(+), 9 deletions(-)
+
+--- a/block/blk-mq-tag.c
++++ b/block/blk-mq-tag.c
+@@ -336,16 +336,11 @@ void blk_mq_queue_tag_busy_iter(struct r
+       /*
+        * __blk_mq_update_nr_hw_queues will update the nr_hw_queues and
+-       * queue_hw_ctx after freeze the queue. So we could use q_usage_counter
+-       * to avoid race with it. __blk_mq_update_nr_hw_queues will users
+-       * synchronize_rcu to ensure all of the users go out of the critical
+-       * section below and see zeroed q_usage_counter.
++       * queue_hw_ctx after freeze the queue, so we use q_usage_counter
++       * to avoid race with it.
+        */
+-      rcu_read_lock();
+-      if (percpu_ref_is_zero(&q->q_usage_counter)) {
+-              rcu_read_unlock();
++      if (!percpu_ref_tryget(&q->q_usage_counter))
+               return;
+-      }
+       queue_for_each_hw_ctx(q, hctx, i) {
+               struct blk_mq_tags *tags = hctx->tags;
+@@ -361,7 +356,7 @@ void blk_mq_queue_tag_busy_iter(struct r
+                       bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
+               bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
+       }
+-      rcu_read_unlock();
++      blk_queue_exit(q);
+ }
+ static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
index 0c6b2f5d17c5967e6c956b8a86f9c69b37913f8d..3029bdba01bf6095c635930e50447167787beb9c 100644 (file)
@@ -7,3 +7,4 @@ initramfs-restore-default-compression-behavior.patch
 tools-power-turbostat-fix-gcc-build-warnings.patch
 drm-etnaviv-replace-mmu-flush-marker-with-flush-sequence.patch
 blk-mq-sync-the-update-nr_hw_queues-with-blk_mq_queue_tag_busy_iter.patch
+blk-mq-allow-blocking-queue-tag-iter-callbacks.patch