From: Greg Kroah-Hartman Date: Mon, 6 Apr 2020 08:49:24 +0000 (+0200) Subject: 4.9-stable patches X-Git-Tag: v5.4.31~38 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=88d29d44d9752493b89d291dddc0ec3bbfb5caf2;p=thirdparty%2Fkernel%2Fstable-queue.git 4.9-stable patches added patches: blk-mq-allow-blocking-queue-tag-iter-callbacks.patch --- diff --git a/queue-4.9/blk-mq-allow-blocking-queue-tag-iter-callbacks.patch b/queue-4.9/blk-mq-allow-blocking-queue-tag-iter-callbacks.patch new file mode 100644 index 00000000000..e29b43e1be4 --- /dev/null +++ b/queue-4.9/blk-mq-allow-blocking-queue-tag-iter-callbacks.patch @@ -0,0 +1,58 @@ +From 530ca2c9bd6949c72c9b5cfc330cb3dbccaa3f5b Mon Sep 17 00:00:00 2001 +From: Keith Busch +Date: Tue, 25 Sep 2018 10:36:20 -0600 +Subject: blk-mq: Allow blocking queue tag iter callbacks + +From: Keith Busch + +commit 530ca2c9bd6949c72c9b5cfc330cb3dbccaa3f5b upstream. + +A recent commit runs tag iterator callbacks under the rcu read lock, +but existing callbacks do not satisfy the non-blocking requirement. +The commit intended to prevent an iterator from accessing a queue that's +being modified. This patch fixes the original issue by taking a queue +reference instead of reading it, which allows callbacks to make blocking +calls. + +Fixes: f5bbbbe4d6357 ("blk-mq: sync the update nr_hw_queues with blk_mq_queue_tag_busy_iter") +Acked-by: Jianchao Wang +Signed-off-by: Keith Busch +Signed-off-by: Jens Axboe +Signed-off-by: Giuliano Procida +Signed-off-by: Greg Kroah-Hartman + +--- + block/blk-mq-tag.c | 13 ++++--------- + 1 file changed, 4 insertions(+), 9 deletions(-) + +--- a/block/blk-mq-tag.c ++++ b/block/blk-mq-tag.c +@@ -338,16 +338,11 @@ void blk_mq_queue_tag_busy_iter(struct r + + /* + * __blk_mq_update_nr_hw_queues will update the nr_hw_queues and +- * queue_hw_ctx after freeze the queue. So we could use q_usage_counter +- * to avoid race with it. __blk_mq_update_nr_hw_queues will users +- * synchronize_rcu to ensure all of the users go out of the critical +- * section below and see zeroed q_usage_counter. ++ * queue_hw_ctx after freeze the queue, so we use q_usage_counter ++ * to avoid race with it. + */ +- rcu_read_lock(); +- if (percpu_ref_is_zero(&q->q_usage_counter)) { +- rcu_read_unlock(); ++ if (!percpu_ref_tryget(&q->q_usage_counter)) + return; +- } + + queue_for_each_hw_ctx(q, hctx, i) { + struct blk_mq_tags *tags = hctx->tags; +@@ -363,7 +358,7 @@ void blk_mq_queue_tag_busy_iter(struct r + bt_for_each(hctx, &tags->breserved_tags, fn, priv, true); + bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false); + } +- rcu_read_unlock(); ++ blk_queue_exit(q); + } + + static unsigned int bt_unused_tags(const struct sbitmap_queue *bt) diff --git a/queue-4.9/series b/queue-4.9/series index 3b93a345d37..932fa24848a 100644 --- a/queue-4.9/series +++ b/queue-4.9/series @@ -7,3 +7,4 @@ sctp-fix-refcount-bug-in-sctp_wfree.patch sctp-fix-possibly-using-a-bad-saddr-with-a-given-dst.patch drm-etnaviv-replace-mmu-flush-marker-with-flush-sequence.patch blk-mq-sync-the-update-nr_hw_queues-with-blk_mq_queue_tag_busy_iter.patch +blk-mq-allow-blocking-queue-tag-iter-callbacks.patch