]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blob - src/patches/suse-2.6.27.25/patches.fixes/dm-mpath-send-activate-to-every-path
Reenabled linux-xen and xen-image build
[people/pmueller/ipfire-2.x.git] / src / patches / suse-2.6.27.25 / patches.fixes / dm-mpath-send-activate-to-every-path
1 From 67a6e30aa338e2e4b8ec3629df247bd8f653988b Mon Sep 17 00:00:00 2001
2 From: Hannes Reinecke <hare@suse.de>
3 Date: Tue, 16 Dec 2008 09:25:25 +0100
4 Subject: [PATCH] Handle multiple paths in a path group properly during pg_init
5
6 All of the hardware handlers, do have a state now, and they are set to
7 active and (some form of) inactive. All of them have prep_fn, which use
8 this "state" to fail the I/O without it ever being sent to the device.
9
10 As Babu has noted in his email, the pg_init/activate is sent on only one
11 path and the "state" of that path is changed appropriately to "active"
12 while other paths in the same path group are never changed as they never
13 got an "activate".
14
15 This patch makes changes in the dm-multipath layer to send an "activate"
16 on each paths in the path groups.
17
18 Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
19 Acked-by: "Moger, Babu" <Babu.Moger@lsi.com>
20 Signed-off-by: Hannes Reinecke <hare@suse.de>
21 ---
22 drivers/md/dm-mpath.c | 48 ++++++++++++++++--------------------------------
23 1 files changed, 16 insertions(+), 32 deletions(-)
24
25 diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
26 index fa4a362..510fd26 100644
27 --- a/drivers/md/dm-mpath.c
28 +++ b/drivers/md/dm-mpath.c
29 @@ -33,6 +33,7 @@ struct pgpath {
30
31 struct dm_path path;
32 struct work_struct deactivate_path;
33 + struct work_struct activate_path;
34 };
35
36 #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
37 @@ -62,8 +63,6 @@ struct multipath {
38 spinlock_t lock;
39
40 const char *hw_handler_name;
41 - struct work_struct activate_path;
42 - struct pgpath *pgpath_to_activate;
43 unsigned nr_priority_groups;
44 struct list_head priority_groups;
45 unsigned pg_init_required; /* pg_init needs calling? */
46 @@ -126,6 +125,7 @@ static struct pgpath *alloc_pgpath(void)
47 if (pgpath) {
48 pgpath->is_active = 1;
49 INIT_WORK(&pgpath->deactivate_path, deactivate_path);
50 + INIT_WORK(&pgpath->activate_path, activate_path);
51 }
52
53 return pgpath;
54 @@ -165,10 +165,6 @@ static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
55 list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
56 list_del(&pgpath->list);
57 dm_put_device(ti, pgpath->path.dev);
58 - spin_lock_irqsave(&m->lock, flags);
59 - if (m->pgpath_to_activate == pgpath)
60 - m->pgpath_to_activate = NULL;
61 - spin_unlock_irqrestore(&m->lock, flags);
62 free_pgpath(pgpath);
63 }
64 }
65 @@ -199,7 +195,6 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
66 m->queue_io = 1;
67 INIT_WORK(&m->process_queued_ios, process_queued_ios);
68 INIT_WORK(&m->trigger_event, trigger_event);
69 - INIT_WORK(&m->activate_path, activate_path);
70 m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
71 if (!m->mpio_pool) {
72 kfree(m);
73 @@ -435,8 +430,8 @@ static void process_queued_ios(struct work_struct *work)
74 {
75 struct multipath *m =
76 container_of(work, struct multipath, process_queued_ios);
77 - struct pgpath *pgpath = NULL;
78 - unsigned init_required = 0, must_queue = 1;
79 + struct pgpath *pgpath = NULL, *tmp;
80 + unsigned must_queue = 1;
81 unsigned long flags;
82
83 spin_lock_irqsave(&m->lock, flags);
84 @@ -454,19 +449,15 @@ static void process_queued_ios(struct work_struct *work)
85 must_queue = 0;
86
87 if (m->pg_init_required && !m->pg_init_in_progress && pgpath) {
88 - m->pgpath_to_activate = pgpath;
89 m->pg_init_count++;
90 m->pg_init_required = 0;
91 - m->pg_init_in_progress = 1;
92 - init_required = 1;
93 + list_for_each_entry(tmp, &pgpath->pg->pgpaths, list) {
94 + queue_work(kmpath_handlerd, &tmp->activate_path);
95 + m->pg_init_in_progress++;
96 + }
97 }
98 -
99 out:
100 spin_unlock_irqrestore(&m->lock, flags);
101 -
102 - if (init_required)
103 - queue_work(kmpath_handlerd, &m->activate_path);
104 -
105 if (!must_queue)
106 dispatch_queued_ios(m);
107 }
108 @@ -1125,27 +1116,20 @@ static void pg_init_done(struct dm_path *path, int errors)
109 pg->bypassed = 0;
110 }
111
112 - m->pg_init_in_progress = 0;
113 - queue_work(kmultipathd, &m->process_queued_ios);
114 - spin_unlock_irqrestore(&m->lock, flags);
115 + m->pg_init_in_progress--;
116 + if (!m->pg_init_in_progress)
117 + queue_work(kmultipathd, &m->process_queued_ios);
118 + spin_unlock_irqrestore(&m->lock, flags);
119 }
120
121 static void activate_path(struct work_struct *work)
122 {
123 int ret;
124 - struct multipath *m =
125 - container_of(work, struct multipath, activate_path);
126 - struct dm_path *path;
127 - unsigned long flags;
128 + struct pgpath *pgpath =
129 + container_of(work, struct pgpath, activate_path);
130
131 - spin_lock_irqsave(&m->lock, flags);
132 - path = &m->pgpath_to_activate->path;
133 - m->pgpath_to_activate = NULL;
134 - spin_unlock_irqrestore(&m->lock, flags);
135 - if (!path)
136 - return;
137 - ret = scsi_dh_activate(bdev_get_queue(path->dev->bdev));
138 - pg_init_done(path, ret);
139 + ret = scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev));
140 + pg_init_done(&pgpath->path, ret);
141 }
142
143 /*
144 --
145 1.5.3.2
146