]> git.ipfire.org Git - ipfire-2.x.git/blame - src/patches/suse-2.6.27.39/patches.suse/dm-mpath-lockdep-irqsave
Imported linux-2.6.27.39 suse/xen patches.
[ipfire-2.x.git] / src / patches / suse-2.6.27.39 / patches.suse / dm-mpath-lockdep-irqsave
CommitLineData
2cb7cef9
BS
1From: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
2Subject: dm: Fix lock dependency warning for request based dm
3References: bnc#477927
4
5> > From: Christof Schmitt <christof.schmitt@de.ibm.com>
6> >
7> > Testing with request based dm multipathing and lock dependency checking
8> > revealed this problem. Fix this by disabling interrupts when acquiring the
9> > map_lock from the ioctl call in __bind and __unbind.
10> >
11> > It seems that the problem has been introduced with this patch:
12> > http://lkml.indiana.edu/hypermail/linux/kernel/0810.0/1067.html
13
14Thank you for your testing request-based dm-multipath and the patch.
15
16Attached is a patch to fix it.
17Since request-based dm gets map_lock after taking queue_lock with
18interrupt disabled, we have to use save/restore variant.
19(By the way, although lockdep warns the deadlock possibility, currently
20 there should be no such code path in request-based dm where request_fn
21 is called from the interrupt context.)
22
23I have done simple build and boot testings, but haven't done other
24testings (e.g. stress testing) yet.
25I will include this patch to the next update after such testings.
26
27Thanks,
28Kiyoshi Ueda
29
30Signed-off-by: Nikanth karthikesan <knikanth@suse.de>
31
32Index: linux-2.6.27-SLE11_BRANCH/drivers/md/dm.c
33===================================================================
34--- linux-2.6.27-SLE11_BRANCH.orig/drivers/md/dm.c
35+++ linux-2.6.27-SLE11_BRANCH/drivers/md/dm.c
36@@ -525,12 +525,13 @@ static int queue_io(struct mapped_device
37 struct dm_table *dm_get_table(struct mapped_device *md)
38 {
39 struct dm_table *t;
40+ unsigned long flags;
41
42- read_lock(&md->map_lock);
43+ read_lock_irqsave(&md->map_lock, flags);
44 t = md->map;
45 if (t)
46 dm_table_get(t);
47- read_unlock(&md->map_lock);
48+ read_unlock_irqrestore(&md->map_lock, flags);
49
50 return t;
51 }
52@@ -1913,6 +1914,7 @@ static int __bind(struct mapped_device *
53 {
54 struct request_queue *q = md->queue;
55 sector_t size;
56+ unsigned long flags;
57
58 size = dm_table_get_size(t);
59
60@@ -1942,7 +1944,7 @@ static int __bind(struct mapped_device *
61 if (dm_table_request_based(t) && !blk_queue_stopped(q))
62 stop_queue(q);
63
64- write_lock(&md->map_lock);
65+ write_lock_irqsave(&md->map_lock, flags);
66 md->map = t;
67 dm_table_set_restrictions(t, q);
68 dm_table_set_integrity(t, md);
69@@ -1951,7 +1953,7 @@ static int __bind(struct mapped_device *
70 } else {
71 set_disk_ro(md->disk, 0);
72 }
73- write_unlock(&md->map_lock);
74+ write_unlock_irqrestore(&md->map_lock, flags);
75
76 return 0;
77 }
78@@ -1959,14 +1961,15 @@ static int __bind(struct mapped_device *
79 static void __unbind(struct mapped_device *md)
80 {
81 struct dm_table *map = md->map;
82+ unsigned long flags;
83
84 if (!map)
85 return;
86
87 dm_table_event_callback(map, NULL, NULL);
88- write_lock(&md->map_lock);
89+ write_lock_irqsave(&md->map_lock, flags);
90 md->map = NULL;
91- write_unlock(&md->map_lock);
92+ write_unlock_irqrestore(&md->map_lock, flags);
93 dm_table_destroy(map);
94 }
95