From 43c9bb7d352ab72c3a0e2fe1872904d41acf8f02 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Mon, 6 Apr 2020 10:49:45 +0200 Subject: [PATCH] 4.14-stable patches added patches: blk-mq-allow-blocking-queue-tag-iter-callbacks.patch --- ...ow-blocking-queue-tag-iter-callbacks.patch | 58 +++++++++++++++++++ queue-4.14/series | 1 + 2 files changed, 59 insertions(+) create mode 100644 queue-4.14/blk-mq-allow-blocking-queue-tag-iter-callbacks.patch diff --git a/queue-4.14/blk-mq-allow-blocking-queue-tag-iter-callbacks.patch b/queue-4.14/blk-mq-allow-blocking-queue-tag-iter-callbacks.patch new file mode 100644 index 00000000000..641cf0de1c2 --- /dev/null +++ b/queue-4.14/blk-mq-allow-blocking-queue-tag-iter-callbacks.patch @@ -0,0 +1,58 @@ +From 530ca2c9bd6949c72c9b5cfc330cb3dbccaa3f5b Mon Sep 17 00:00:00 2001 +From: Keith Busch +Date: Tue, 25 Sep 2018 10:36:20 -0600 +Subject: blk-mq: Allow blocking queue tag iter callbacks + +From: Keith Busch + +commit 530ca2c9bd6949c72c9b5cfc330cb3dbccaa3f5b upstream. + +A recent commit runs tag iterator callbacks under the rcu read lock, +but existing callbacks do not satisfy the non-blocking requirement. +The commit intended to prevent an iterator from accessing a queue that's +being modified. This patch fixes the original issue by taking a queue +reference instead of reading it, which allows callbacks to make blocking +calls. + +Fixes: f5bbbbe4d6357 ("blk-mq: sync the update nr_hw_queues with blk_mq_queue_tag_busy_iter") +Acked-by: Jianchao Wang +Signed-off-by: Keith Busch +Signed-off-by: Jens Axboe +Signed-off-by: Giuliano Procida +Signed-off-by: Greg Kroah-Hartman + +--- + block/blk-mq-tag.c | 13 ++++--------- + 1 file changed, 4 insertions(+), 9 deletions(-) + +--- a/block/blk-mq-tag.c ++++ b/block/blk-mq-tag.c +@@ -336,16 +336,11 @@ void blk_mq_queue_tag_busy_iter(struct r + + /* + * __blk_mq_update_nr_hw_queues will update the nr_hw_queues and +- * queue_hw_ctx after freeze the queue. So we could use q_usage_counter +- * to avoid race with it. __blk_mq_update_nr_hw_queues will users +- * synchronize_rcu to ensure all of the users go out of the critical +- * section below and see zeroed q_usage_counter. ++ * queue_hw_ctx after freeze the queue, so we use q_usage_counter ++ * to avoid race with it. + */ +- rcu_read_lock(); +- if (percpu_ref_is_zero(&q->q_usage_counter)) { +- rcu_read_unlock(); ++ if (!percpu_ref_tryget(&q->q_usage_counter)) + return; +- } + + queue_for_each_hw_ctx(q, hctx, i) { + struct blk_mq_tags *tags = hctx->tags; +@@ -361,7 +356,7 @@ void blk_mq_queue_tag_busy_iter(struct r + bt_for_each(hctx, &tags->breserved_tags, fn, priv, true); + bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false); + } +- rcu_read_unlock(); ++ blk_queue_exit(q); + } + + static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth, diff --git a/queue-4.14/series b/queue-4.14/series index 0c6b2f5d17c..3029bdba01b 100644 --- a/queue-4.14/series +++ b/queue-4.14/series @@ -7,3 +7,4 @@ initramfs-restore-default-compression-behavior.patch tools-power-turbostat-fix-gcc-build-warnings.patch drm-etnaviv-replace-mmu-flush-marker-with-flush-sequence.patch blk-mq-sync-the-update-nr_hw_queues-with-blk_mq_queue_tag_busy_iter.patch +blk-mq-allow-blocking-queue-tag-iter-callbacks.patch -- 2.47.3