1 From 67a6e30aa338e2e4b8ec3629df247bd8f653988b Mon Sep 17 00:00:00 2001
2 From: Hannes Reinecke <hare@suse.de>
3 Date: Tue, 16 Dec 2008 09:25:25 +0100
4 Subject: [PATCH] Handle multiple paths in a path group properly during pg_init
6 All of the hardware handlers, do have a state now, and they are set to
7 active and (some form of) inactive. All of them have prep_fn, which use
8 this "state" to fail the I/O without it ever being sent to the device.
10 As Babu has noted in his email, the pg_init/activate is sent on only one
11 path and the "state" of that path is changed appropriately to "active"
12 while other paths in the same path group are never changed as they never
15 This patch makes changes in the dm-multipath layer to send an "activate"
16 on each paths in the path groups.
18 Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
19 Acked-by: "Moger, Babu" <Babu.Moger@lsi.com>
20 Signed-off-by: Hannes Reinecke <hare@suse.de>
22 drivers/md/dm-mpath.c | 48 ++++++++++++++++--------------------------------
23 1 file changed, 16 insertions(+), 32 deletions(-)
25 --- a/drivers/md/dm-mpath.c
26 +++ b/drivers/md/dm-mpath.c
27 @@ -33,6 +33,7 @@ struct pgpath {
30 struct work_struct deactivate_path;
31 + struct work_struct activate_path;
34 #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
35 @@ -62,8 +63,6 @@ struct multipath {
38 const char *hw_handler_name;
39 - struct work_struct activate_path;
40 - struct pgpath *pgpath_to_activate;
41 unsigned nr_priority_groups;
42 struct list_head priority_groups;
43 unsigned pg_init_required; /* pg_init needs calling? */
44 @@ -126,6 +125,7 @@ static struct pgpath *alloc_pgpath(void)
46 pgpath->is_active = 1;
47 INIT_WORK(&pgpath->deactivate_path, deactivate_path);
48 + INIT_WORK(&pgpath->activate_path, activate_path);
52 @@ -165,10 +165,6 @@ static void free_pgpaths(struct list_hea
53 list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
54 list_del(&pgpath->list);
55 dm_put_device(ti, pgpath->path.dev);
56 - spin_lock_irqsave(&m->lock, flags);
57 - if (m->pgpath_to_activate == pgpath)
58 - m->pgpath_to_activate = NULL;
59 - spin_unlock_irqrestore(&m->lock, flags);
63 @@ -199,7 +195,6 @@ static struct multipath *alloc_multipath
65 INIT_WORK(&m->process_queued_ios, process_queued_ios);
66 INIT_WORK(&m->trigger_event, trigger_event);
67 - INIT_WORK(&m->activate_path, activate_path);
68 m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
71 @@ -435,8 +430,8 @@ static void process_queued_ios(struct wo
74 container_of(work, struct multipath, process_queued_ios);
75 - struct pgpath *pgpath = NULL;
76 - unsigned init_required = 0, must_queue = 1;
77 + struct pgpath *pgpath = NULL, *tmp;
78 + unsigned must_queue = 1;
81 spin_lock_irqsave(&m->lock, flags);
82 @@ -454,19 +449,15 @@ static void process_queued_ios(struct wo
85 if (m->pg_init_required && !m->pg_init_in_progress && pgpath) {
86 - m->pgpath_to_activate = pgpath;
88 m->pg_init_required = 0;
89 - m->pg_init_in_progress = 1;
91 + list_for_each_entry(tmp, &pgpath->pg->pgpaths, list) {
92 + queue_work(kmpath_handlerd, &tmp->activate_path);
93 + m->pg_init_in_progress++;
98 spin_unlock_irqrestore(&m->lock, flags);
101 - queue_work(kmpath_handlerd, &m->activate_path);
104 dispatch_queued_ios(m);
106 @@ -1136,27 +1127,20 @@ static void pg_init_done(struct dm_path
110 - m->pg_init_in_progress = 0;
111 - queue_work(kmultipathd, &m->process_queued_ios);
112 - spin_unlock_irqrestore(&m->lock, flags);
113 + m->pg_init_in_progress--;
114 + if (!m->pg_init_in_progress)
115 + queue_work(kmultipathd, &m->process_queued_ios);
116 + spin_unlock_irqrestore(&m->lock, flags);
119 static void activate_path(struct work_struct *work)
122 - struct multipath *m =
123 - container_of(work, struct multipath, activate_path);
124 - struct dm_path *path;
125 - unsigned long flags;
126 + struct pgpath *pgpath =
127 + container_of(work, struct pgpath, activate_path);
129 - spin_lock_irqsave(&m->lock, flags);
130 - path = &m->pgpath_to_activate->path;
131 - m->pgpath_to_activate = NULL;
132 - spin_unlock_irqrestore(&m->lock, flags);
135 - ret = scsi_dh_activate(bdev_get_queue(path->dev->bdev));
136 - pg_init_done(path, ret);
137 + ret = scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev));
138 + pg_init_done(&pgpath->path, ret);