]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - queue-5.0/blk-mq-fix-sbitmap-ws_active-for-shared-tags.patch
Linux 4.19.33
[thirdparty/kernel/stable-queue.git] / queue-5.0 / blk-mq-fix-sbitmap-ws_active-for-shared-tags.patch
1 From e861857545567adec8da3bdff728efdf7db12285 Mon Sep 17 00:00:00 2001
2 From: Jens Axboe <axboe@kernel.dk>
3 Date: Mon, 25 Mar 2019 12:34:10 -0600
4 Subject: blk-mq: fix sbitmap ws_active for shared tags
5
6 From: Jens Axboe <axboe@kernel.dk>
7
8 commit e861857545567adec8da3bdff728efdf7db12285 upstream.
9
10 We now wrap sbitmap waitqueues in an active counter, so we can avoid
11 iterating wakeups unless we have waiters there. This works as long as
12 everyone that's manipulating the waitqueues use the proper helpers. For
13 the tag wait case for shared tags, however, we add ourselves to the
14 waitqueue without incrementing/decrementing the ->ws_active count. This
15 means that wakeups can take a long time to happen.
16
17 Fix this by manually doing the inc/dec as needed for the wait queue
18 handling.
19
20 Reported-by: Michael Leun <kbug@newton.leun.net>
21 Tested-by: Michael Leun <kbug@newton.leun.net>
22 Cc: stable@vger.kernel.org
23 Reviewed-by: Omar Sandoval <osandov@fb.com>
24 Fixes: 5d2ee7122c73 ("sbitmap: optimize wakeup check")
25 Signed-off-by: Jens Axboe <axboe@kernel.dk>
26 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
27
28 ---
29 block/blk-mq.c | 13 +++++++++++--
30 1 file changed, 11 insertions(+), 2 deletions(-)
31
32 --- a/block/blk-mq.c
33 +++ b/block/blk-mq.c
34 @@ -1076,7 +1076,13 @@ static int blk_mq_dispatch_wake(wait_que
35 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
36
37 spin_lock(&hctx->dispatch_wait_lock);
38 - list_del_init(&wait->entry);
39 + if (!list_empty(&wait->entry)) {
40 + struct sbitmap_queue *sbq;
41 +
42 + list_del_init(&wait->entry);
43 + sbq = &hctx->tags->bitmap_tags;
44 + atomic_dec(&sbq->ws_active);
45 + }
46 spin_unlock(&hctx->dispatch_wait_lock);
47
48 blk_mq_run_hw_queue(hctx, true);
49 @@ -1092,6 +1098,7 @@ static int blk_mq_dispatch_wake(wait_que
50 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
51 struct request *rq)
52 {
53 + struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags;
54 struct wait_queue_head *wq;
55 wait_queue_entry_t *wait;
56 bool ret;
57 @@ -1115,7 +1122,7 @@ static bool blk_mq_mark_tag_wait(struct
58 if (!list_empty_careful(&wait->entry))
59 return false;
60
61 - wq = &bt_wait_ptr(&hctx->tags->bitmap_tags, hctx)->wait;
62 + wq = &bt_wait_ptr(sbq, hctx)->wait;
63
64 spin_lock_irq(&wq->lock);
65 spin_lock(&hctx->dispatch_wait_lock);
66 @@ -1125,6 +1132,7 @@ static bool blk_mq_mark_tag_wait(struct
67 return false;
68 }
69
70 + atomic_inc(&sbq->ws_active);
71 wait->flags &= ~WQ_FLAG_EXCLUSIVE;
72 __add_wait_queue(wq, wait);
73
74 @@ -1145,6 +1153,7 @@ static bool blk_mq_mark_tag_wait(struct
75 * someone else gets the wakeup.
76 */
77 list_del_init(&wait->entry);
78 + atomic_dec(&sbq->ws_active);
79 spin_unlock(&hctx->dispatch_wait_lock);
80 spin_unlock_irq(&wq->lock);
81