]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob
1af0d7abdd974d17474bfbca0810f839ac593734
[thirdparty/kernel/stable-queue.git] /
1 From 66cb1910df17b38334153462ec8166e48058035f Mon Sep 17 00:00:00 2001
2 From: Joe Thornber <ejt@redhat.com>
3 Date: Wed, 30 Oct 2013 17:11:58 +0000
4 Subject: dm cache: fix a race condition between queuing new migrations and quiescing for a shutdown
5
6 From: Joe Thornber <ejt@redhat.com>
7
8 commit 66cb1910df17b38334153462ec8166e48058035f upstream.
9
10 The code that was trying to do this was inadequate. The postsuspend
11 method (in ioctl context), needs to wait for the worker thread to
12 acknowledge the request to quiesce. Otherwise the migration count may
13 drop to zero temporarily before the worker thread realises we're
14 quiescing. In this case the target will be taken down, but the worker
15 thread may have issued a new migration, which will cause an oops when
16 it completes.
17
18 Signed-off-by: Joe Thornber <ejt@redhat.com>
19 Signed-off-by: Mike Snitzer <snitzer@redhat.com>
20 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
21
22 ---
23 drivers/md/dm-cache-target.c | 54 +++++++++++++++++++++++++++++++------------
24 1 file changed, 40 insertions(+), 14 deletions(-)
25
26 --- a/drivers/md/dm-cache-target.c
27 +++ b/drivers/md/dm-cache-target.c
28 @@ -151,6 +151,9 @@ struct cache {
29 atomic_t nr_migrations;
30 wait_queue_head_t migration_wait;
31
32 + wait_queue_head_t quiescing_wait;
33 + atomic_t quiescing_ack;
34 +
35 /*
36 * cache_size entries, dirty if set
37 */
38 @@ -742,8 +745,9 @@ static void cell_defer(struct cache *cac
39
40 static void cleanup_migration(struct dm_cache_migration *mg)
41 {
42 - dec_nr_migrations(mg->cache);
43 + struct cache *cache = mg->cache;
44 free_migration(mg);
45 + dec_nr_migrations(cache);
46 }
47
48 static void migration_failure(struct dm_cache_migration *mg)
49 @@ -1340,34 +1344,51 @@ static void writeback_some_dirty_blocks(
50 /*----------------------------------------------------------------
51 * Main worker loop
52 *--------------------------------------------------------------*/
53 -static void start_quiescing(struct cache *cache)
54 +static bool is_quiescing(struct cache *cache)
55 {
56 + int r;
57 unsigned long flags;
58
59 spin_lock_irqsave(&cache->lock, flags);
60 - cache->quiescing = 1;
61 + r = cache->quiescing;
62 spin_unlock_irqrestore(&cache->lock, flags);
63 +
64 + return r;
65 }
66
67 -static void stop_quiescing(struct cache *cache)
68 +static void ack_quiescing(struct cache *cache)
69 +{
70 + if (is_quiescing(cache)) {
71 + atomic_inc(&cache->quiescing_ack);
72 + wake_up(&cache->quiescing_wait);
73 + }
74 +}
75 +
76 +static void wait_for_quiescing_ack(struct cache *cache)
77 +{
78 + wait_event(cache->quiescing_wait, atomic_read(&cache->quiescing_ack));
79 +}
80 +
81 +static void start_quiescing(struct cache *cache)
82 {
83 unsigned long flags;
84
85 spin_lock_irqsave(&cache->lock, flags);
86 - cache->quiescing = 0;
87 + cache->quiescing = true;
88 spin_unlock_irqrestore(&cache->lock, flags);
89 +
90 + wait_for_quiescing_ack(cache);
91 }
92
93 -static bool is_quiescing(struct cache *cache)
94 +static void stop_quiescing(struct cache *cache)
95 {
96 - int r;
97 unsigned long flags;
98
99 spin_lock_irqsave(&cache->lock, flags);
100 - r = cache->quiescing;
101 + cache->quiescing = false;
102 spin_unlock_irqrestore(&cache->lock, flags);
103
104 - return r;
105 + atomic_set(&cache->quiescing_ack, 0);
106 }
107
108 static void wait_for_migrations(struct cache *cache)
109 @@ -1414,16 +1435,15 @@ static void do_worker(struct work_struct
110 struct cache *cache = container_of(ws, struct cache, worker);
111
112 do {
113 - if (!is_quiescing(cache))
114 + if (!is_quiescing(cache)) {
115 + writeback_some_dirty_blocks(cache);
116 + process_deferred_writethrough_bios(cache);
117 process_deferred_bios(cache);
118 + }
119
120 process_migrations(cache, &cache->quiesced_migrations, issue_copy);
121 process_migrations(cache, &cache->completed_migrations, complete_migration);
122
123 - writeback_some_dirty_blocks(cache);
124 -
125 - process_deferred_writethrough_bios(cache);
126 -
127 if (commit_if_needed(cache)) {
128 process_deferred_flush_bios(cache, false);
129
130 @@ -1436,6 +1456,9 @@ static void do_worker(struct work_struct
131 process_migrations(cache, &cache->need_commit_migrations,
132 migration_success_post_commit);
133 }
134 +
135 + ack_quiescing(cache);
136 +
137 } while (more_work(cache));
138 }
139
140 @@ -1998,6 +2021,9 @@ static int cache_create(struct cache_arg
141 atomic_set(&cache->nr_migrations, 0);
142 init_waitqueue_head(&cache->migration_wait);
143
144 + init_waitqueue_head(&cache->quiescing_wait);
145 + atomic_set(&cache->quiescing_ack, 0);
146 +
147 r = -ENOMEM;
148 cache->nr_dirty = 0;
149 cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));