]> git.ipfire.org Git - ipfire-2.x.git/blob - src/patches/suse-2.6.27.39/patches.suse/dm-mpath-accept-failed-paths
Add a patch to fix Intel E100 wake-on-lan problems.
[ipfire-2.x.git] / src / patches / suse-2.6.27.39 / patches.suse / dm-mpath-accept-failed-paths
1 From: Hannes Reinecke <hare@suse.de>
2 Subject: Accept failed paths for multipath maps
3 References: bnc#458037,bnc#458393
4
5 The multipath kernel module is rejecting any map with an invalid
6 device. However, as the multipathd is processing the events serially
7 it will try to push a map with invalid devices if more than one
8 device failed at the same time.
9 So we can as well accept those maps and make sure to mark the
10 paths as down.
11
12 Signed-off-by: Hannes Reinecke <hare@suse.de>
13
14 ---
15 drivers/md/dm-mpath.c | 66 ++++++++++++++++++++++++++++++++++++++++----------
16 drivers/md/dm-mpath.h | 1
17 drivers/md/dm-table.c | 3 ++
18 drivers/md/dm.c | 4 +++
19 4 files changed, 61 insertions(+), 13 deletions(-)
20
21 --- a/drivers/md/dm-mpath.c
22 +++ b/drivers/md/dm-mpath.c
23 @@ -141,7 +141,8 @@ static void deactivate_path(struct work_
24 struct pgpath *pgpath =
25 container_of(work, struct pgpath, deactivate_path);
26
27 - blk_abort_queue(pgpath->path.dev->bdev->bd_disk->queue);
28 + if (pgpath->path.dev)
29 + blk_abort_queue(pgpath->path.dev->bdev->bd_disk->queue);
30 }
31
32 static struct priority_group *alloc_priority_group(void)
33 @@ -253,6 +254,11 @@ static int __choose_path_in_pg(struct mu
34
35 m->current_pgpath = path_to_pgpath(path);
36
37 + if (!m->current_pgpath->path.dev) {
38 + m->current_pgpath = NULL;
39 + return -ENODEV;
40 + }
41 +
42 if (m->current_pg != pg)
43 __switch_pg(m, m->current_pgpath);
44
45 @@ -576,6 +582,7 @@ static struct pgpath *parse_path(struct
46 {
47 int r;
48 struct pgpath *p;
49 + char *path;
50 struct multipath *m = ti->private;
51
52 /* we need at least a path arg */
53 @@ -588,14 +595,37 @@ static struct pgpath *parse_path(struct
54 if (!p)
55 return NULL;
56
57 - r = dm_get_device(ti, shift(as), ti->begin, ti->len,
58 + path = shift(as);
59 + r = dm_get_device(ti, path, ti->begin, ti->len,
60 dm_table_get_mode(ti->table), &p->path.dev);
61 if (r) {
62 - ti->error = "error getting device";
63 - goto bad;
64 + unsigned major, minor;
65 +
66 + /* Try to add a failed device */
67 + if (r == -ENXIO && sscanf(path, "%u:%u", &major, &minor) == 2) {
68 + dev_t dev;
69 +
70 + /* Extract the major/minor numbers */
71 + dev = MKDEV(major, minor);
72 + if (MAJOR(dev) != major || MINOR(dev) != minor) {
73 + /* Nice try, didn't work */
74 + DMWARN("Invalid device path %s", path);
75 + ti->error = "error converting devnum";
76 + goto bad;
77 + }
78 + DMWARN("adding disabled device %d:%d", major, minor);
79 + p->path.dev = NULL;
80 + format_dev_t(p->path.pdev, dev);
81 + p->is_active = 0;
82 + } else {
83 + ti->error = "error getting device";
84 + goto bad;
85 + }
86 + } else {
87 + memcpy(p->path.pdev, p->path.dev->name, 16);
88 }
89
90 - if (m->hw_handler_name) {
91 + if (m->hw_handler_name && p->path.dev) {
92 struct request_queue *q = bdev_get_queue(p->path.dev->bdev);
93
94 r = scsi_dh_attach(q, m->hw_handler_name);
95 @@ -891,7 +921,7 @@ static int fail_path(struct pgpath *pgpa
96 if (!pgpath->is_active)
97 goto out;
98
99 - DMWARN("Failing path %s.", pgpath->path.dev->name);
100 + DMWARN("Failing path %s.", pgpath->path.pdev);
101
102 pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
103 pgpath->is_active = 0;
104 @@ -903,7 +933,7 @@ static int fail_path(struct pgpath *pgpa
105 m->current_pgpath = NULL;
106
107 dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
108 - pgpath->path.dev->name, m->nr_valid_paths);
109 + pgpath->path.pdev, m->nr_valid_paths);
110
111 queue_work(kmultipathd, &m->trigger_event);
112 queue_work(kmultipathd, &pgpath->deactivate_path);
113 @@ -928,6 +958,12 @@ static int reinstate_path(struct pgpath
114 if (pgpath->is_active)
115 goto out;
116
117 + if (!pgpath->path.dev) {
118 + DMWARN("Cannot reinstate disabled path %s", pgpath->path.pdev);
119 + r = -ENODEV;
120 + goto out;
121 + }
122 +
123 if (!pgpath->pg->ps.type->reinstate_path) {
124 DMWARN("Reinstate path not supported by path selector %s",
125 pgpath->pg->ps.type->name);
126 @@ -946,7 +982,7 @@ static int reinstate_path(struct pgpath
127 queue_work(kmultipathd, &m->process_queued_ios);
128
129 dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
130 - pgpath->path.dev->name, m->nr_valid_paths);
131 + pgpath->path.pdev, m->nr_valid_paths);
132
133 queue_work(kmultipathd, &m->trigger_event);
134
135 @@ -966,6 +1002,9 @@ static int action_dev(struct multipath *
136 struct pgpath *pgpath;
137 struct priority_group *pg;
138
139 + if (!dev)
140 + return 0;
141 +
142 list_for_each_entry(pg, &m->priority_groups, list) {
143 list_for_each_entry(pgpath, &pg->pgpaths, list) {
144 if (pgpath->path.dev == dev)
145 @@ -1135,11 +1174,12 @@ static void pg_init_done(struct dm_path
146
147 static void activate_path(struct work_struct *work)
148 {
149 - int ret;
150 + int ret = SCSI_DH_DEV_OFFLINED;
151 struct pgpath *pgpath =
152 container_of(work, struct pgpath, activate_path);
153
154 - ret = scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev));
155 + if (pgpath->path.dev)
156 + ret = scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev));
157 pg_init_done(&pgpath->path, ret);
158 }
159
160 @@ -1306,7 +1346,7 @@ static int multipath_status(struct dm_ta
161 pg->ps.type->info_args);
162
163 list_for_each_entry(p, &pg->pgpaths, list) {
164 - DMEMIT("%s %s %u ", p->path.dev->name,
165 + DMEMIT("%s %s %u ", p->path.pdev,
166 p->is_active ? "A" : "F",
167 p->fail_count);
168 if (pg->ps.type->status)
169 @@ -1332,7 +1372,7 @@ static int multipath_status(struct dm_ta
170 pg->ps.type->table_args);
171
172 list_for_each_entry(p, &pg->pgpaths, list) {
173 - DMEMIT("%s ", p->path.dev->name);
174 + DMEMIT("%s ", p->path.pdev);
175 if (pg->ps.type->status)
176 sz += pg->ps.type->status(&pg->ps,
177 &p->path, type, result + sz,
178 @@ -1414,7 +1454,7 @@ static int multipath_ioctl(struct dm_tar
179 if (!m->current_pgpath)
180 __choose_pgpath(m, 1 << 19); /* Assume 512KB */
181
182 - if (m->current_pgpath) {
183 + if (m->current_pgpath && m->current_pgpath->path.dev) {
184 bdev = m->current_pgpath->path.dev->bdev;
185 fake_dentry.d_inode = bdev->bd_inode;
186 fake_file.f_mode = m->current_pgpath->path.dev->mode;
187 --- a/drivers/md/dm-mpath.h
188 +++ b/drivers/md/dm-mpath.h
189 @@ -12,6 +12,7 @@
190 struct dm_dev;
191
192 struct dm_path {
193 + char pdev[16]; /* Requested physical device */
194 struct dm_dev *dev; /* Read-only */
195 void *pscontext; /* For path-selector use */
196 };
197 --- a/drivers/md/dm-table.c
198 +++ b/drivers/md/dm-table.c
199 @@ -566,6 +566,9 @@ int dm_get_device(struct dm_target *ti,
200 */
201 void dm_put_device(struct dm_target *ti, struct dm_dev *dd)
202 {
203 + if (!dd)
204 + return;
205 +
206 if (atomic_dec_and_test(&dd->count)) {
207 close_dev(dd, ti->table->md);
208 list_del(&dd->list);
209 --- a/drivers/md/dm.c
210 +++ b/drivers/md/dm.c
211 @@ -1503,6 +1503,9 @@ static void map_request(struct dm_target
212 tio->ti = ti;
213 atomic_inc(&md->pending);
214
215 +#if 0
216 + /* This might trigger accidentally */
217 +
218 /*
219 * Although submitted requests to the md->queue are checked against
220 * the table/queue limitations at the submission time, the limitations
221 @@ -1525,6 +1528,7 @@ static void map_request(struct dm_target
222 dm_kill_request(clone, r);
223 return;
224 }
225 +#endif
226
227 r = ti->type->map_rq(ti, clone, &tio->info);
228 switch (r) {