]>
Commit | Line | Data |
---|---|---|
2cb7cef9 BS |
1 | From: Hannes Reinecke <hare@suse.de> |
2 | Subject: Handle I/O on stopped queues correctly | |
3 | References: bnc#458393 | |
4 | ||
5 | The current multipath infrastructure has several issues when I/O is | |
6 | submitted during a table reload. | |
7 | The make_request_fn must not fail if no table is present, as I/O | |
8 | will be queued properly in the request queue. But on the other hand | |
9 | we should not submit queued I/Os if the queue of the underlying | |
10 | device is stopped; that queue is undergoing reconfiguration | |
11 | time and may be in all sort of states. | |
12 | ||
13 | And while we're at it, we should detach any hardware handler | |
14 | if the multipath table doesn't specify one. | |
15 | ||
16 | Signed-off-by: Hannes Reinecke <hare@suse.de> | |
17 | ||
18 | --- | |
19 | drivers/md/dm-mpath.c | 45 ++++++++++++++++++++++++++++----------------- | |
20 | drivers/md/dm.c | 6 +++++- | |
21 | 2 files changed, 33 insertions(+), 18 deletions(-) | |
22 | ||
23 | --- a/drivers/md/dm-mpath.c | |
24 | +++ b/drivers/md/dm-mpath.c | |
25 | @@ -159,9 +159,7 @@ static struct priority_group *alloc_prio | |
26 | ||
27 | static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti) | |
28 | { | |
29 | - unsigned long flags; | |
30 | struct pgpath *pgpath, *tmp; | |
31 | - struct multipath *m = ti->private; | |
32 | ||
33 | list_for_each_entry_safe(pgpath, tmp, pgpaths, list) { | |
34 | list_del(&pgpath->list); | |
35 | @@ -436,8 +434,8 @@ static void process_queued_ios(struct wo | |
36 | { | |
37 | struct multipath *m = | |
38 | container_of(work, struct multipath, process_queued_ios); | |
39 | - struct pgpath *pgpath = NULL, *tmp; | |
40 | - unsigned must_queue = 1; | |
41 | + struct pgpath *pgpath = NULL, *tmp; | |
42 | + unsigned must_queue = 1; | |
43 | unsigned long flags; | |
44 | ||
45 | spin_lock_irqsave(&m->lock, flags); | |
46 | @@ -450,6 +448,12 @@ static void process_queued_ios(struct wo | |
47 | ||
48 | pgpath = m->current_pgpath; | |
49 | ||
50 | + if (pgpath) { | |
51 | + struct block_device *bdev = pgpath->path.dev->bdev; | |
52 | + if (unlikely(blk_queue_stopped(bdev_get_queue(bdev)))) | |
53 | + goto out; | |
54 | + } | |
55 | + | |
56 | if ((pgpath && !m->queue_io) || | |
57 | (!pgpath && !m->queue_if_no_path)) | |
58 | must_queue = 0; | |
59 | @@ -625,22 +629,24 @@ static struct pgpath *parse_path(struct | |
60 | memcpy(p->path.pdev, p->path.dev->name, 16); | |
61 | } | |
62 | ||
63 | - if (m->hw_handler_name && p->path.dev) { | |
64 | + if (p->path.dev) { | |
65 | struct request_queue *q = bdev_get_queue(p->path.dev->bdev); | |
66 | ||
67 | - r = scsi_dh_attach(q, m->hw_handler_name); | |
68 | - if (r == -EBUSY) { | |
69 | - /* | |
70 | - * Already attached to different hw_handler, | |
71 | - * try to reattach with correct one. | |
72 | - */ | |
73 | - scsi_dh_detach(q); | |
74 | + if (m->hw_handler_name) { | |
75 | r = scsi_dh_attach(q, m->hw_handler_name); | |
76 | - } | |
77 | - if (r < 0) { | |
78 | - ti->error = "error attaching hardware handler"; | |
79 | - dm_put_device(ti, p->path.dev); | |
80 | - goto bad; | |
81 | + if (r == -EBUSY) { | |
82 | + /* | |
83 | + * Already attached to different hw_handler, | |
84 | + * try to reattach with correct one. | |
85 | + */ | |
86 | + scsi_dh_detach(q); | |
87 | + r = scsi_dh_attach(q, m->hw_handler_name); | |
88 | + } | |
89 | + if (r < 0) { | |
90 | + ti->error = "error attaching hardware handler"; | |
91 | + dm_put_device(ti, p->path.dev); | |
92 | + goto bad; | |
93 | + } | |
94 | } | |
95 | } | |
96 | ||
97 | @@ -650,6 +656,11 @@ static struct pgpath *parse_path(struct | |
98 | goto bad; | |
99 | } | |
100 | ||
101 | + if (!p->is_active) { | |
102 | + ps->type->fail_path(ps, &p->path); | |
103 | + p->fail_count++; | |
104 | + m->nr_valid_paths--; | |
105 | + } | |
106 | return p; | |
107 | ||
108 | bad: | |
109 | --- a/drivers/md/dm.c | |
110 | +++ b/drivers/md/dm.c | |
111 | @@ -1304,7 +1304,11 @@ static int dm_make_request(struct reques | |
112 | return 0; | |
113 | } | |
114 | ||
115 | - if (unlikely(!md->map)) { | |
116 | + /* | |
117 | + * Submitting to a stopped queue with no map is okay; | |
118 | + * might happen during reconfiguration. | |
119 | + */ | |
120 | + if (unlikely(!md->map) && !blk_queue_stopped(q)) { | |
121 | bio_endio(bio, -EIO); | |
122 | return 0; | |
123 | } |