]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.9-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 6 Apr 2020 08:49:24 +0000 (10:49 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 6 Apr 2020 08:49:24 +0000 (10:49 +0200)
added patches:
blk-mq-allow-blocking-queue-tag-iter-callbacks.patch

queue-4.9/blk-mq-allow-blocking-queue-tag-iter-callbacks.patch [new file with mode: 0644]
queue-4.9/series

diff --git a/queue-4.9/blk-mq-allow-blocking-queue-tag-iter-callbacks.patch b/queue-4.9/blk-mq-allow-blocking-queue-tag-iter-callbacks.patch
new file mode 100644 (file)
index 0000000..e29b43e
--- /dev/null
@@ -0,0 +1,58 @@
+From 530ca2c9bd6949c72c9b5cfc330cb3dbccaa3f5b Mon Sep 17 00:00:00 2001
+From: Keith Busch <keith.busch@intel.com>
+Date: Tue, 25 Sep 2018 10:36:20 -0600
+Subject: blk-mq: Allow blocking queue tag iter callbacks
+
+From: Keith Busch <keith.busch@intel.com>
+
+commit 530ca2c9bd6949c72c9b5cfc330cb3dbccaa3f5b upstream.
+
+A recent commit runs tag iterator callbacks under the rcu read lock,
+but existing callbacks do not satisfy the non-blocking requirement.
+The commit intended to prevent an iterator from accessing a queue that's
+being modified. This patch fixes the original issue by taking a queue
+reference instead of reading it, which allows callbacks to make blocking
+calls.
+
+Fixes: f5bbbbe4d6357 ("blk-mq: sync the update nr_hw_queues with blk_mq_queue_tag_busy_iter")
+Acked-by: Jianchao Wang <jianchao.w.wang@oracle.com>
+Signed-off-by: Keith Busch <keith.busch@intel.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Giuliano Procida <gprocida@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/blk-mq-tag.c |   13 ++++---------
+ 1 file changed, 4 insertions(+), 9 deletions(-)
+
+--- a/block/blk-mq-tag.c
++++ b/block/blk-mq-tag.c
+@@ -338,16 +338,11 @@ void blk_mq_queue_tag_busy_iter(struct r
+       /*
+        * __blk_mq_update_nr_hw_queues will update the nr_hw_queues and
+-       * queue_hw_ctx after freeze the queue. So we could use q_usage_counter
+-       * to avoid race with it. __blk_mq_update_nr_hw_queues will users
+-       * synchronize_rcu to ensure all of the users go out of the critical
+-       * section below and see zeroed q_usage_counter.
++       * queue_hw_ctx after freeze the queue, so we use q_usage_counter
++       * to avoid race with it.
+        */
+-      rcu_read_lock();
+-      if (percpu_ref_is_zero(&q->q_usage_counter)) {
+-              rcu_read_unlock();
++      if (!percpu_ref_tryget(&q->q_usage_counter))
+               return;
+-      }
+       queue_for_each_hw_ctx(q, hctx, i) {
+               struct blk_mq_tags *tags = hctx->tags;
+@@ -363,7 +358,7 @@ void blk_mq_queue_tag_busy_iter(struct r
+                       bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
+               bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
+       }
+-      rcu_read_unlock();
++      blk_queue_exit(q);
+ }
+ static unsigned int bt_unused_tags(const struct sbitmap_queue *bt)
index 3b93a345d379ed94eb3624efb72a0d0c462dc80c..932fa24848ad288b21b67d3d5d68e525025379e2 100644 (file)
@@ -7,3 +7,4 @@ sctp-fix-refcount-bug-in-sctp_wfree.patch
 sctp-fix-possibly-using-a-bad-saddr-with-a-given-dst.patch
 drm-etnaviv-replace-mmu-flush-marker-with-flush-sequence.patch
 blk-mq-sync-the-update-nr_hw_queues-with-blk_mq_queue_tag_busy_iter.patch
+blk-mq-allow-blocking-queue-tag-iter-callbacks.patch