]> git.ipfire.org Git - ipfire-2.x.git/blame - src/patches/suse-2.6.27.31/patches.fixes/ocfs2-dlm-refactor-dlm_clean_master_list.patch
Move xen patchset to new version's subdir.
[ipfire-2.x.git] / src / patches / suse-2.6.27.31 / patches.fixes / ocfs2-dlm-refactor-dlm_clean_master_list.patch
CommitLineData
00e5a55c
BS
1From: Sunil Mushran <sunil.mushran@oracle.com>
2Date: Thu, 26 Feb 2009 15:00:39 -0800
3Subject: ocfs2/dlm: Refactor dlm_clean_master_list()
4Patch-mainline: 2.6.30
5References: bnc#408304
6
7This patch refactors dlm_clean_master_list() so as to make it
8easier to convert the mle list to a hash.
9
10Signed-off-by: Sunil Mushran <sunil.mushran@oracle.com>
11Signed-off-by: Mark Fasheh <mfasheh@suse.com>
12---
13 fs/ocfs2/dlm/dlmmaster.c | 148 ++++++++++++++++++++++++++-------------------
14 1 files changed, 85 insertions(+), 63 deletions(-)
15
16Index: linux-2.6.27-sle11_ocfs2_update/fs/ocfs2/dlm/dlmmaster.c
17===================================================================
18--- linux-2.6.27-sle11_ocfs2_update.orig/fs/ocfs2/dlm/dlmmaster.c
19+++ linux-2.6.27-sle11_ocfs2_update/fs/ocfs2/dlm/dlmmaster.c
20@@ -3185,12 +3185,87 @@ static int dlm_add_migration_mle(struct
21 return ret;
22 }
23
24+/*
25+ * Sets the owner of the lockres, associated to the mle, to UNKNOWN
26+ */
27+static struct dlm_lock_resource *dlm_reset_mleres_owner(struct dlm_ctxt *dlm,
28+ struct dlm_master_list_entry *mle)
29+{
30+ struct dlm_lock_resource *res;
31+ unsigned int hash;
32+
33+ /* Find the lockres associated to the mle and set its owner to UNK */
34+ hash = dlm_lockid_hash(mle->u.mlename.name, mle->u.mlename.len);
35+ res = __dlm_lookup_lockres(dlm, mle->u.mlename.name, mle->u.mlename.len,
36+ hash);
37+ if (res) {
38+ spin_unlock(&dlm->master_lock);
39+
40+ /* move lockres onto recovery list */
41+ spin_lock(&res->spinlock);
42+ dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
43+ dlm_move_lockres_to_recovery_list(dlm, res);
44+ spin_unlock(&res->spinlock);
45+ dlm_lockres_put(res);
46+
47+ /* about to get rid of mle, detach from heartbeat */
48+ __dlm_mle_detach_hb_events(dlm, mle);
49+
50+ /* dump the mle */
51+ spin_lock(&dlm->master_lock);
52+ __dlm_put_mle(mle);
53+ spin_unlock(&dlm->master_lock);
54+ }
55+
56+ return res;
57+}
58+
59+static void dlm_clean_migration_mle(struct dlm_ctxt *dlm,
60+ struct dlm_master_list_entry *mle)
61+{
62+ __dlm_mle_detach_hb_events(dlm, mle);
63+
64+ spin_lock(&mle->spinlock);
65+ __dlm_unlink_mle(dlm, mle);
66+ atomic_set(&mle->woken, 1);
67+ spin_unlock(&mle->spinlock);
68+
69+ wake_up(&mle->wq);
70+}
71+
72+static void dlm_clean_block_mle(struct dlm_ctxt *dlm,
73+ struct dlm_master_list_entry *mle, u8 dead_node)
74+{
75+ int bit;
76+
77+ BUG_ON(mle->type != DLM_MLE_BLOCK);
78+
79+ spin_lock(&mle->spinlock);
80+ bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
81+ if (bit != dead_node) {
82+ mlog(0, "mle found, but dead node %u would not have been "
83+ "master\n", dead_node);
84+ spin_unlock(&mle->spinlock);
85+ } else {
86+ /* Must drop the refcount by one since the assert_master will
87+ * never arrive. This may result in the mle being unlinked and
88+ * freed, but there may still be a process waiting in the
89+ * dlmlock path which is fine. */
90+ mlog(0, "node %u was expected master\n", dead_node);
91+ atomic_set(&mle->woken, 1);
92+ spin_unlock(&mle->spinlock);
93+ wake_up(&mle->wq);
94+
95+ /* Do not need events any longer, so detach from heartbeat */
96+ __dlm_mle_detach_hb_events(dlm, mle);
97+ __dlm_put_mle(mle);
98+ }
99+}
100
101 void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node)
102 {
103 struct dlm_master_list_entry *mle, *next;
104 struct dlm_lock_resource *res;
105- unsigned int hash;
106
107 mlog_entry("dlm=%s, dead node=%u\n", dlm->name, dead_node);
108 top:
109@@ -3214,30 +3289,7 @@ top:
110 * need to clean up if the dead node would have
111 * been the master. */
112 if (mle->type == DLM_MLE_BLOCK) {
113- int bit;
114-
115- spin_lock(&mle->spinlock);
116- bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
117- if (bit != dead_node) {
118- mlog(0, "mle found, but dead node %u would "
119- "not have been master\n", dead_node);
120- spin_unlock(&mle->spinlock);
121- } else {
122- /* must drop the refcount by one since the
123- * assert_master will never arrive. this
124- * may result in the mle being unlinked and
125- * freed, but there may still be a process
126- * waiting in the dlmlock path which is fine. */
127- mlog(0, "node %u was expected master\n",
128- dead_node);
129- atomic_set(&mle->woken, 1);
130- spin_unlock(&mle->spinlock);
131- wake_up(&mle->wq);
132- /* do not need events any longer, so detach
133- * from heartbeat */
134- __dlm_mle_detach_hb_events(dlm, mle);
135- __dlm_put_mle(mle);
136- }
137+ dlm_clean_block_mle(dlm, mle, dead_node);
138 continue;
139 }
140
141@@ -3258,51 +3310,21 @@ top:
142
143 /* if we have reached this point, this mle needs to
144 * be removed from the list and freed. */
145-
146- /* remove from the list early. NOTE: unlinking
147- * list_head while in list_for_each_safe */
148- __dlm_mle_detach_hb_events(dlm, mle);
149- spin_lock(&mle->spinlock);
150- __dlm_unlink_mle(dlm, mle);
151- atomic_set(&mle->woken, 1);
152- spin_unlock(&mle->spinlock);
153- wake_up(&mle->wq);
154+ dlm_clean_migration_mle(dlm, mle);
155
156 mlog(0, "%s: node %u died during migration from "
157 "%u to %u!\n", dlm->name, dead_node,
158 mle->master, mle->new_master);
159- /* if there is a lockres associated with this
160- * mle, find it and set its owner to UNKNOWN */
161- hash = dlm_lockid_hash(mle->u.mlename.name, mle->u.mlename.len);
162- res = __dlm_lookup_lockres(dlm, mle->u.mlename.name,
163- mle->u.mlename.len, hash);
164- if (res) {
165- /* unfortunately if we hit this rare case, our
166- * lock ordering is messed. we need to drop
167- * the master lock so that we can take the
168- * lockres lock, meaning that we will have to
169- * restart from the head of list. */
170- spin_unlock(&dlm->master_lock);
171-
172- /* move lockres onto recovery list */
173- spin_lock(&res->spinlock);
174- dlm_set_lockres_owner(dlm, res,
175- DLM_LOCK_RES_OWNER_UNKNOWN);
176- dlm_move_lockres_to_recovery_list(dlm, res);
177- spin_unlock(&res->spinlock);
178- dlm_lockres_put(res);
179-
180- /* about to get rid of mle, detach from heartbeat */
181- __dlm_mle_detach_hb_events(dlm, mle);
182-
183- /* dump the mle */
184- spin_lock(&dlm->master_lock);
185- __dlm_put_mle(mle);
186- spin_unlock(&dlm->master_lock);
187
188+ /* If we find a lockres associated with the mle, we've
189+ * hit this rare case that messes up our lock ordering.
190+ * If so, we need to drop the master lock so that we can
191+ * take the lockres lock, meaning that we will have to
192+ * restart from the head of list. */
193+ res = dlm_reset_mleres_owner(dlm, mle);
194+ if (res)
195 /* restart */
196 goto top;
197- }
198
199 /* this may be the last reference */
200 __dlm_put_mle(mle);