]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blame_incremental - src/patches/suse-2.6.27.25/patches.fixes/dm-avoid-put-table-dm_any_congested
Reenabled linux-xen and xen-image build
[people/pmueller/ipfire-2.x.git] / src / patches / suse-2.6.27.25 / patches.fixes / dm-avoid-put-table-dm_any_congested
... / ...
CommitLineData
1From: Chandra Seetharaman <sekharan@us.ibm.com>
2Subject: dm: avoid destroying table in dm_any_congested
3References: bnc#457205
4Patch-mainline: 2.6.28-rc4
5Signed-off-by: Nikanth Karthikesan <knikanth@suse.de>
6
7[PATCH 1/3] for bnc 457205. Even though this fix is almost
8reverted by 3rd patch in this series, this patch is used as it
9fixes a suble bug of calling dm_table_put only when dm_get_table
10succeeds. Also this makes code more inline with the mainline.
11
12mainline commit 8a57dfc6f943c92b861c9a19b0c86ddcb2aba768
13
14 dm_any_congested() just checks for the DMF_BLOCK_IO and has no
15 code to make sure that suspend waits for dm_any_congested() to
16 complete. This patch adds such a check.
17
18 Without it, a race can occur with dm_table_put() attempting to
19 destroying the table in the wrong thread, the one running
20 dm_any_congested() which is meant to be quick and return
21 immediately.
22
23 Two examples of problems:
24 1. Sleeping functions called from congested code, the caller
25 of which holds a spin lock.
26 2. An ABBA deadlock between pdflush and multipathd. The two locks
27 in contention are inode lock and kernel lock.
28
29 Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
30 Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
31 Signed-off-by: Alasdair G Kergon <agk@redhat.com>
32
33Index: linux-2.6.27/drivers/md/dm.c
34===================================================================
35--- linux-2.6.27.orig/drivers/md/dm.c
36+++ linux-2.6.27/drivers/md/dm.c
37@@ -1640,22 +1640,32 @@ static void dm_unplug_all(struct request
38
39 static int dm_any_congested(void *congested_data, int bdi_bits)
40 {
41- int r;
42+ int r = bdi_bits;
43 struct mapped_device *md = (struct mapped_device *) congested_data;
44- struct dm_table *map = dm_get_table(md);
45+ struct dm_table *map;
46
47- if (!map || test_bit(DMF_BLOCK_IO, &md->flags))
48- r = bdi_bits;
49- else if (dm_request_based(md))
50- /*
51- * Request-based dm cares about only own queue for
52- * the query about congestion status of request_queue
53- */
54- r = md->queue->backing_dev_info.state & bdi_bits;
55- else
56- r = dm_table_any_congested(map, bdi_bits);
57+ atomic_inc(&md->pending);
58+
59+ if (!test_bit(DMF_BLOCK_IO, &md->flags)) {
60+ map = dm_get_table(md);
61+ if (map) {
62+ if (dm_request_based(md))
63+ /*
64+ * Request-based dm cares about only own queue for
65+ * the query about congestion status of request_queue
66+ */
67+ r = md->queue->backing_dev_info.state & bdi_bits;
68+ else
69+ r = dm_table_any_congested(map, bdi_bits);
70+ dm_table_put(map);
71+ }
72+ }
73+
74+
75+ if (!atomic_dec_return(&md->pending))
76+ /* nudge anyone waiting on suspend queue */
77+ wake_up(&md->wait);
78
79- dm_table_put(map);
80 return r;
81 }
82