1 From stable-bounces@linux.kernel.org Tue Oct 30 03:21:13 2007
2 From: Jens Axboe <jens.axboe@oracle.com>
3 Date: Tue, 30 Oct 2007 11:18:15 +0100
4 Subject: BLOCK: Fix bad sharing of tag busy list on queues with shared tag maps
6 Message-ID: <20071030101815.GO4993@kernel.dk>
7 Content-Disposition: inline
9 From: Jens Axboe <jens.axboe@oracle.com>
11 patch 6eca9004dfcb274a502438a591df5b197690afb1 in mainline.
13 For the locking to work, only the tag map and tag bit map may be shared
14 (incidentally, I was just explaining this to Nick yesterday, but I
15 apparently didn't review the code well enough myself). But we also share
16 the busy list! The busy_list must be queue private, or we need a
17 block_queue_tag covering lock as well.
19 So we have to move the busy_list to the queue. This'll work fine, and
20 it'll actually also fix a problem with blk_queue_invalidate_tags() which
21 will invalidate tags across all shared queues. This is a bit confusing,
22 the low level driver should call it for each queue seperately since
23 otherwise you cannot kill tags on just a single queue for eg a hard
24 drive that stops responding. Since the function has no callers
25 currently, it's not an issue.
27 This is fixed with commit 6eca9004dfcb274a502438a591df5b197690afb1 in
30 Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
31 Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
34 block/ll_rw_blk.c | 8 +++-----
35 include/linux/blkdev.h | 2 +-
36 2 files changed, 4 insertions(+), 6 deletions(-)
38 --- a/block/ll_rw_blk.c
39 +++ b/block/ll_rw_blk.c
40 @@ -819,7 +819,6 @@ static int __blk_free_tags(struct blk_qu
41 retval = atomic_dec_and_test(&bqt->refcnt);
44 - BUG_ON(!list_empty(&bqt->busy_list));
46 kfree(bqt->tag_index);
47 bqt->tag_index = NULL;
48 @@ -931,7 +930,6 @@ static struct blk_queue_tag *__blk_queue
49 if (init_tag_map(q, tags, depth))
52 - INIT_LIST_HEAD(&tags->busy_list);
54 atomic_set(&tags->refcnt, 1);
56 @@ -982,6 +980,7 @@ int blk_queue_init_tags(struct request_q
59 q->queue_flags |= (1 << QUEUE_FLAG_QUEUED);
60 + INIT_LIST_HEAD(&q->tag_busy_list);
64 @@ -1152,7 +1151,7 @@ int blk_queue_start_tag(struct request_q
66 bqt->tag_index[tag] = rq;
67 blkdev_dequeue_request(rq);
68 - list_add(&rq->queuelist, &bqt->busy_list);
69 + list_add(&rq->queuelist, &q->tag_busy_list);
73 @@ -1173,11 +1172,10 @@ EXPORT_SYMBOL(blk_queue_start_tag);
75 void blk_queue_invalidate_tags(struct request_queue *q)
77 - struct blk_queue_tag *bqt = q->queue_tags;
78 struct list_head *tmp, *n;
81 - list_for_each_safe(tmp, n, &bqt->busy_list) {
82 + list_for_each_safe(tmp, n, &q->tag_busy_list) {
83 rq = list_entry_rq(tmp);
86 --- a/include/linux/blkdev.h
87 +++ b/include/linux/blkdev.h
88 @@ -356,7 +356,6 @@ enum blk_queue_state {
89 struct blk_queue_tag {
90 struct request **tag_index; /* map of busy tags */
91 unsigned long *tag_map; /* bit map of free/busy tags */
92 - struct list_head busy_list; /* fifo list of busy tags */
93 int busy; /* current depth */
94 int max_depth; /* what we will send to device */
95 int real_max_depth; /* what the array can hold */
96 @@ -451,6 +450,7 @@ struct request_queue
97 unsigned int dma_alignment;
99 struct blk_queue_tag *queue_tags;
100 + struct list_head tag_busy_list;
102 unsigned int nr_sorted;
103 unsigned int in_flight;