]>
Commit | Line | Data |
---|---|---|
00e5a55c BS |
1 | From: Hannes Reinecke <hare@suse.de> |
2 | Subject: Accept failed paths for multipath maps | |
3 | References: bnc#458037,bnc#458393 | |
4 | ||
5 | The multipath kernel module is rejecting any map with an invalid | |
6 | device. However, as the multipathd is processing the events serially | |
7 | it will try to push a map with invalid devices if more than one | |
8 | device failed at the same time. | |
9 | So we can as well accept those maps and make sure to mark the | |
10 | paths as down. | |
11 | ||
12 | Signed-off-by: Hannes Reinecke <hare@suse.de> | |
13 | ||
14 | diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c | |
15 | index 510fd26..95feacd 100644 | |
16 | --- a/drivers/md/dm-mpath.c | |
17 | +++ b/drivers/md/dm-mpath.c | |
18 | @@ -141,7 +141,8 @@ static void deactivate_path(struct work_struct *work) | |
19 | struct pgpath *pgpath = | |
20 | container_of(work, struct pgpath, deactivate_path); | |
21 | ||
22 | - blk_abort_queue(pgpath->path.dev->bdev->bd_disk->queue); | |
23 | + if (pgpath->path.dev) | |
24 | + blk_abort_queue(pgpath->path.dev->bdev->bd_disk->queue); | |
25 | } | |
26 | ||
27 | static struct priority_group *alloc_priority_group(void) | |
28 | @@ -253,6 +254,11 @@ static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg, | |
29 | ||
30 | m->current_pgpath = path_to_pgpath(path); | |
31 | ||
32 | + if (!m->current_pgpath->path.dev) { | |
33 | + m->current_pgpath = NULL; | |
34 | + return -ENODEV; | |
35 | + } | |
36 | + | |
37 | if (m->current_pg != pg) | |
38 | __switch_pg(m, m->current_pgpath); | |
39 | ||
40 | @@ -570,6 +576,7 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps, | |
41 | { | |
42 | int r; | |
43 | struct pgpath *p; | |
44 | + char *path; | |
45 | struct multipath *m = ti->private; | |
46 | ||
47 | /* we need at least a path arg */ | |
48 | @@ -582,14 +589,37 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps, | |
49 | if (!p) | |
50 | return NULL; | |
51 | ||
52 | - r = dm_get_device(ti, shift(as), ti->begin, ti->len, | |
53 | + path = shift(as); | |
54 | + r = dm_get_device(ti, path, ti->begin, ti->len, | |
55 | dm_table_get_mode(ti->table), &p->path.dev); | |
56 | if (r) { | |
57 | - ti->error = "error getting device"; | |
58 | - goto bad; | |
59 | + unsigned major, minor; | |
60 | + | |
61 | + /* Try to add a failed device */ | |
62 | + if (r == -ENXIO && sscanf(path, "%u:%u", &major, &minor) == 2) { | |
63 | + dev_t dev; | |
64 | + | |
65 | + /* Extract the major/minor numbers */ | |
66 | + dev = MKDEV(major, minor); | |
67 | + if (MAJOR(dev) != major || MINOR(dev) != minor) { | |
68 | + /* Nice try, didn't work */ | |
69 | + DMWARN("Invalid device path %s", path); | |
70 | + ti->error = "error converting devnum"; | |
71 | + goto bad; | |
72 | + } | |
73 | + DMWARN("adding disabled device %d:%d", major, minor); | |
74 | + p->path.dev = NULL; | |
75 | + format_dev_t(p->path.pdev, dev); | |
76 | + p->is_active = 0; | |
77 | + } else { | |
78 | + ti->error = "error getting device"; | |
79 | + goto bad; | |
80 | + } | |
81 | + } else { | |
82 | + memcpy(p->path.pdev, p->path.dev->name, 16); | |
83 | } | |
84 | ||
85 | - if (m->hw_handler_name) { | |
86 | + if (m->hw_handler_name && p->path.dev) { | |
87 | struct request_queue *q = bdev_get_queue(p->path.dev->bdev); | |
88 | ||
89 | r = scsi_dh_attach(q, m->hw_handler_name); | |
90 | @@ -880,7 +910,7 @@ static int fail_path(struct pgpath *pgpath) | |
91 | if (!pgpath->is_active) | |
92 | goto out; | |
93 | ||
94 | - DMWARN("Failing path %s.", pgpath->path.dev->name); | |
95 | + DMWARN("Failing path %s.", pgpath->path.pdev); | |
96 | ||
97 | pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path); | |
98 | pgpath->is_active = 0; | |
99 | @@ -892,7 +922,7 @@ static int fail_path(struct pgpath *pgpath) | |
100 | m->current_pgpath = NULL; | |
101 | ||
102 | dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti, | |
103 | - pgpath->path.dev->name, m->nr_valid_paths); | |
104 | + pgpath->path.pdev, m->nr_valid_paths); | |
105 | ||
106 | queue_work(kmultipathd, &m->trigger_event); | |
107 | queue_work(kmultipathd, &pgpath->deactivate_path); | |
108 | @@ -917,6 +947,12 @@ static int reinstate_path(struct pgpath *pgpath) | |
109 | if (pgpath->is_active) | |
110 | goto out; | |
111 | ||
112 | + if (!pgpath->path.dev) { | |
113 | + DMWARN("Cannot reinstate disabled path %s", pgpath->path.pdev); | |
114 | + r = -ENODEV; | |
115 | + goto out; | |
116 | + } | |
117 | + | |
118 | if (!pgpath->pg->ps.type->reinstate_path) { | |
119 | DMWARN("Reinstate path not supported by path selector %s", | |
120 | pgpath->pg->ps.type->name); | |
121 | @@ -935,7 +971,7 @@ static int reinstate_path(struct pgpath *pgpath) | |
122 | queue_work(kmultipathd, &m->process_queued_ios); | |
123 | ||
124 | dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti, | |
125 | - pgpath->path.dev->name, m->nr_valid_paths); | |
126 | + pgpath->path.pdev, m->nr_valid_paths); | |
127 | ||
128 | queue_work(kmultipathd, &m->trigger_event); | |
129 | ||
130 | @@ -955,6 +991,9 @@ static int action_dev(struct multipath *m, struct dm_dev *dev, | |
131 | struct pgpath *pgpath; | |
132 | struct priority_group *pg; | |
133 | ||
134 | + if (!dev) | |
135 | + return 0; | |
136 | + | |
137 | list_for_each_entry(pg, &m->priority_groups, list) { | |
138 | list_for_each_entry(pgpath, &pg->pgpaths, list) { | |
139 | if (pgpath->path.dev == dev) | |
140 | @@ -1124,11 +1163,12 @@ static void pg_init_done(struct dm_path *path, int errors) | |
141 | ||
142 | static void activate_path(struct work_struct *work) | |
143 | { | |
144 | - int ret; | |
145 | + int ret = SCSI_DH_DEV_OFFLINED; | |
146 | struct pgpath *pgpath = | |
147 | container_of(work, struct pgpath, activate_path); | |
148 | ||
149 | - ret = scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev)); | |
150 | + if (pgpath->path.dev) | |
151 | + ret = scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev)); | |
152 | pg_init_done(&pgpath->path, ret); | |
153 | } | |
154 | ||
155 | @@ -1295,7 +1335,7 @@ static int multipath_status(struct dm_target *ti, status_type_t type, | |
156 | pg->ps.type->info_args); | |
157 | ||
158 | list_for_each_entry(p, &pg->pgpaths, list) { | |
159 | - DMEMIT("%s %s %u ", p->path.dev->name, | |
160 | + DMEMIT("%s %s %u ", p->path.pdev, | |
161 | p->is_active ? "A" : "F", | |
162 | p->fail_count); | |
163 | if (pg->ps.type->status) | |
164 | @@ -1321,7 +1361,7 @@ static int multipath_status(struct dm_target *ti, status_type_t type, | |
165 | pg->ps.type->table_args); | |
166 | ||
167 | list_for_each_entry(p, &pg->pgpaths, list) { | |
168 | - DMEMIT("%s ", p->path.dev->name); | |
169 | + DMEMIT("%s ", p->path.pdev); | |
170 | if (pg->ps.type->status) | |
171 | sz += pg->ps.type->status(&pg->ps, | |
172 | &p->path, type, result + sz, | |
173 | @@ -1403,7 +1443,7 @@ static int multipath_ioctl(struct dm_target *ti, struct inode *inode, | |
174 | if (!m->current_pgpath) | |
175 | __choose_pgpath(m, 1 << 19); /* Assume 512KB */ | |
176 | ||
177 | - if (m->current_pgpath) { | |
178 | + if (m->current_pgpath && m->current_pgpath->path.dev) { | |
179 | bdev = m->current_pgpath->path.dev->bdev; | |
180 | fake_dentry.d_inode = bdev->bd_inode; | |
181 | fake_file.f_mode = m->current_pgpath->path.dev->mode; | |
182 | diff --git a/drivers/md/dm-mpath.h b/drivers/md/dm-mpath.h | |
183 | index e230f71..f97388d 100644 | |
184 | --- a/drivers/md/dm-mpath.h | |
185 | +++ b/drivers/md/dm-mpath.h | |
186 | @@ -12,6 +12,7 @@ | |
187 | struct dm_dev; | |
188 | ||
189 | struct dm_path { | |
190 | + char pdev[16]; /* Requested physical device */ | |
191 | struct dm_dev *dev; /* Read-only */ | |
192 | void *pscontext; /* For path-selector use */ | |
193 | }; | |
194 | diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c | |
195 | index 1514e84..ee61b82 100644 | |
196 | --- a/drivers/md/dm-table.c | |
197 | +++ b/drivers/md/dm-table.c | |
198 | @@ -566,6 +566,9 @@ int dm_get_device(struct dm_target *ti, const char *path, sector_t start, | |
199 | */ | |
200 | void dm_put_device(struct dm_target *ti, struct dm_dev *dd) | |
201 | { | |
202 | + if (!dd) | |
203 | + return; | |
204 | + | |
205 | if (atomic_dec_and_test(&dd->count)) { | |
206 | close_dev(dd, ti->table->md); | |
207 | list_del(&dd->list); | |
208 | diff --git a/drivers/md/dm.c b/drivers/md/dm.c | |
209 | index 6783f91..134995e 100644 | |
210 | --- a/drivers/md/dm.c | |
211 | +++ b/drivers/md/dm.c | |
212 | @@ -1503,6 +1503,9 @@ static void map_request(struct dm_target *ti, struct request *rq, | |
213 | tio->ti = ti; | |
214 | atomic_inc(&md->pending); | |
215 | ||
216 | +#if 0 | |
217 | + /* This might trigger accidentally */ | |
218 | + | |
219 | /* | |
220 | * Although submitted requests to the md->queue are checked against | |
221 | * the table/queue limitations at the submission time, the limitations | |
222 | @@ -1525,6 +1528,7 @@ static void map_request(struct dm_target *ti, struct request *rq, | |
223 | dm_kill_request(clone, r); | |
224 | return; | |
225 | } | |
226 | +#endif | |
227 | ||
228 | r = ti->type->map_rq(ti, clone, &tio->info); | |
229 | switch (r) { |