]>
Commit | Line | Data |
---|---|---|
1 | Subject: block: only call ->request_fn when the queue is not stopped | |
2 | From: Jens Axboe <jens.axboe@oracle.com> | |
3 | Date: Fri Oct 17 08:46:57 2008 +0200: | |
4 | Git: 80a4b58e36b63d7b0b592beb1bd6410aadeeb63c | |
5 | References: bnc#457041 | |
6 | ||
7 | Callers should use either blk_run_queue/__blk_run_queue, or | |
8 | blk_start_queueing() to invoke request handling instead of calling | |
9 | ->request_fn() directly as that does not take the queue stopped | |
10 | flag into account. | |
11 | ||
12 | Also add appropriate comments on the above functions to detail | |
13 | their usage. | |
14 | ||
15 | Signed-off-by: Jens Axboe <jens.axboe@oracle.com> | |
16 | Signed-off-by: Hannes Reinecke <hare@suse.de> | |
17 | ||
18 | --- | |
19 | block/blk-core.c | 19 +++++++++++++++++-- | |
20 | block/elevator.c | 7 +++---- | |
21 | 2 files changed, 20 insertions(+), 6 deletions(-) | |
22 | ||
23 | --- a/block/blk-core.c | |
24 | +++ b/block/blk-core.c | |
25 | @@ -324,6 +324,9 @@ EXPORT_SYMBOL(blk_unplug); | |
26 | ||
27 | static void blk_invoke_request_fn(struct request_queue *q) | |
28 | { | |
29 | + if (unlikely(blk_queue_stopped(q))) | |
30 | + return; | |
31 | + | |
32 | /* | |
33 | * one level of recursion is ok and is much faster than kicking | |
34 | * the unplug handling | |
35 | @@ -399,8 +402,13 @@ void blk_sync_queue(struct request_queue | |
36 | EXPORT_SYMBOL(blk_sync_queue); | |
37 | ||
38 | /** | |
39 | - * blk_run_queue - run a single device queue | |
40 | + * __blk_run_queue - run a single device queue | |
41 | * @q: The queue to run | |
42 | + * | |
43 | + * Description: | |
44 | + * See @blk_run_queue. This variant must be called with the queue lock | |
45 | + * held and interrupts disabled. | |
46 | + * | |
47 | */ | |
48 | void __blk_run_queue(struct request_queue *q) | |
49 | { | |
50 | @@ -418,6 +426,12 @@ EXPORT_SYMBOL(__blk_run_queue); | |
51 | /** | |
52 | * blk_run_queue - run a single device queue | |
53 | * @q: The queue to run | |
54 | + * | |
55 | + * Description: | |
56 | + * Invoke request handling on this queue, if it has pending work to do. | |
57 | + * May be used to restart queueing when a request has completed. Also | |
58 | + * See @blk_start_queueing. | |
59 | + * | |
60 | */ | |
61 | void blk_run_queue(struct request_queue *q) | |
62 | { | |
63 | @@ -883,7 +897,8 @@ EXPORT_SYMBOL(blk_get_request); | |
64 | * | |
65 | * This is basically a helper to remove the need to know whether a queue | |
66 | * is plugged or not if someone just wants to initiate dispatch of requests | |
67 | - * for this queue. | |
68 | + * for this queue. Should be used to start queueing on a device outside | |
69 | + * of ->request_fn() context. Also see @blk_run_queue. | |
70 | * | |
71 | * The queue lock must be held with interrupts disabled. | |
72 | */ | |
73 | --- a/block/elevator.c | |
74 | +++ b/block/elevator.c | |
75 | @@ -620,7 +620,7 @@ void elv_insert(struct request_queue *q, | |
76 | * processing. | |
77 | */ | |
78 | blk_remove_plug(q); | |
79 | - q->request_fn(q); | |
80 | + blk_start_queueing(q); | |
81 | break; | |
82 | ||
83 | case ELEVATOR_INSERT_SORT: | |
84 | @@ -951,7 +951,7 @@ void elv_completed_request(struct reques | |
85 | blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN && | |
86 | blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) { | |
87 | blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0); | |
88 | - q->request_fn(q); | |
89 | + blk_start_queueing(q); | |
90 | } | |
91 | } | |
92 | } | |
93 | @@ -1110,8 +1110,7 @@ static int elevator_switch(struct reques | |
94 | elv_drain_elevator(q); | |
95 | ||
96 | while (q->rq.elvpriv) { | |
97 | - blk_remove_plug(q); | |
98 | - q->request_fn(q); | |
99 | + blk_start_queueing(q); | |
100 | spin_unlock_irq(q->queue_lock); | |
101 | msleep(10); | |
102 | spin_lock_irq(q->queue_lock); |