]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blob - src/patches/suse-2.6.27.25/patches.suse/dlm-change-rsbtbl-rwlock-to-spinlock.patch
Updated xen patches taken from suse.
[people/pmueller/ipfire-2.x.git] / src / patches / suse-2.6.27.25 / patches.suse / dlm-change-rsbtbl-rwlock-to-spinlock.patch
1 From: David Teigland <teigland@redhat.com>
2 commit c7be761a8163d2f1ac0b606c21e4316b7abc5af7
3 Author: David Teigland <teigland@redhat.com>
4 Date: Wed Jan 7 16:50:41 2009 -0600
5 Subject: dlm: change rsbtbl rwlock to spinlock
6
7 The rwlock is almost always used in write mode, so there's no reason
8 to not use a spinlock instead.
9
10 Signed-off-by: David Teigland <teigland@redhat.com>
11 Signed-off-by: Coly Li <coly.li@suse.de>
12
13 diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c
14 index bc4af3e..1d1d274 100644
15 --- a/fs/dlm/debug_fs.c
16 +++ b/fs/dlm/debug_fs.c
17 @@ -416,7 +416,7 @@ static void *table_seq_start(struct seq_file *seq, loff_t *pos)
18 if (seq->op == &format3_seq_ops)
19 ri->format = 3;
20
21 - read_lock(&ls->ls_rsbtbl[bucket].lock);
22 + spin_lock(&ls->ls_rsbtbl[bucket].lock);
23 if (!list_empty(&ls->ls_rsbtbl[bucket].list)) {
24 list_for_each_entry(r, &ls->ls_rsbtbl[bucket].list,
25 res_hashchain) {
26 @@ -424,12 +424,12 @@ static void *table_seq_start(struct seq_file *seq, loff_t *pos)
27 dlm_hold_rsb(r);
28 ri->rsb = r;
29 ri->bucket = bucket;
30 - read_unlock(&ls->ls_rsbtbl[bucket].lock);
31 + spin_unlock(&ls->ls_rsbtbl[bucket].lock);
32 return ri;
33 }
34 }
35 }
36 - read_unlock(&ls->ls_rsbtbl[bucket].lock);
37 + spin_unlock(&ls->ls_rsbtbl[bucket].lock);
38
39 /*
40 * move to the first rsb in the next non-empty bucket
41 @@ -447,18 +447,18 @@ static void *table_seq_start(struct seq_file *seq, loff_t *pos)
42 return NULL;
43 }
44
45 - read_lock(&ls->ls_rsbtbl[bucket].lock);
46 + spin_lock(&ls->ls_rsbtbl[bucket].lock);
47 if (!list_empty(&ls->ls_rsbtbl[bucket].list)) {
48 r = list_first_entry(&ls->ls_rsbtbl[bucket].list,
49 struct dlm_rsb, res_hashchain);
50 dlm_hold_rsb(r);
51 ri->rsb = r;
52 ri->bucket = bucket;
53 - read_unlock(&ls->ls_rsbtbl[bucket].lock);
54 + spin_unlock(&ls->ls_rsbtbl[bucket].lock);
55 *pos = n;
56 return ri;
57 }
58 - read_unlock(&ls->ls_rsbtbl[bucket].lock);
59 + spin_unlock(&ls->ls_rsbtbl[bucket].lock);
60 }
61 }
62
63 @@ -477,7 +477,7 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos)
64 * move to the next rsb in the same bucket
65 */
66
67 - read_lock(&ls->ls_rsbtbl[bucket].lock);
68 + spin_lock(&ls->ls_rsbtbl[bucket].lock);
69 rp = ri->rsb;
70 next = rp->res_hashchain.next;
71
72 @@ -485,12 +485,12 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos)
73 r = list_entry(next, struct dlm_rsb, res_hashchain);
74 dlm_hold_rsb(r);
75 ri->rsb = r;
76 - read_unlock(&ls->ls_rsbtbl[bucket].lock);
77 + spin_unlock(&ls->ls_rsbtbl[bucket].lock);
78 dlm_put_rsb(rp);
79 ++*pos;
80 return ri;
81 }
82 - read_unlock(&ls->ls_rsbtbl[bucket].lock);
83 + spin_unlock(&ls->ls_rsbtbl[bucket].lock);
84 dlm_put_rsb(rp);
85
86 /*
87 @@ -509,18 +509,18 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos)
88 return NULL;
89 }
90
91 - read_lock(&ls->ls_rsbtbl[bucket].lock);
92 + spin_lock(&ls->ls_rsbtbl[bucket].lock);
93 if (!list_empty(&ls->ls_rsbtbl[bucket].list)) {
94 r = list_first_entry(&ls->ls_rsbtbl[bucket].list,
95 struct dlm_rsb, res_hashchain);
96 dlm_hold_rsb(r);
97 ri->rsb = r;
98 ri->bucket = bucket;
99 - read_unlock(&ls->ls_rsbtbl[bucket].lock);
100 + spin_unlock(&ls->ls_rsbtbl[bucket].lock);
101 *pos = n;
102 return ri;
103 }
104 - read_unlock(&ls->ls_rsbtbl[bucket].lock);
105 + spin_unlock(&ls->ls_rsbtbl[bucket].lock);
106 }
107 }
108
109 diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
110 index ef2f1e3..076e86f 100644
111 --- a/fs/dlm/dlm_internal.h
112 +++ b/fs/dlm/dlm_internal.h
113 @@ -105,7 +105,7 @@ struct dlm_dirtable {
114 struct dlm_rsbtable {
115 struct list_head list;
116 struct list_head toss;
117 - rwlock_t lock;
118 + spinlock_t lock;
119 };
120
121 struct dlm_lkbtable {
122 diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
123 index 6cfe65b..01e7d39 100644
124 --- a/fs/dlm/lock.c
125 +++ b/fs/dlm/lock.c
126 @@ -412,9 +412,9 @@ static int search_rsb(struct dlm_ls *ls, char *name, int len, int b,
127 unsigned int flags, struct dlm_rsb **r_ret)
128 {
129 int error;
130 - write_lock(&ls->ls_rsbtbl[b].lock);
131 + spin_lock(&ls->ls_rsbtbl[b].lock);
132 error = _search_rsb(ls, name, len, b, flags, r_ret);
133 - write_unlock(&ls->ls_rsbtbl[b].lock);
134 + spin_unlock(&ls->ls_rsbtbl[b].lock);
135 return error;
136 }
137
138 @@ -478,16 +478,16 @@ static int find_rsb(struct dlm_ls *ls, char *name, int namelen,
139 r->res_nodeid = nodeid;
140 }
141
142 - write_lock(&ls->ls_rsbtbl[bucket].lock);
143 + spin_lock(&ls->ls_rsbtbl[bucket].lock);
144 error = _search_rsb(ls, name, namelen, bucket, 0, &tmp);
145 if (!error) {
146 - write_unlock(&ls->ls_rsbtbl[bucket].lock);
147 + spin_unlock(&ls->ls_rsbtbl[bucket].lock);
148 dlm_free_rsb(r);
149 r = tmp;
150 goto out;
151 }
152 list_add(&r->res_hashchain, &ls->ls_rsbtbl[bucket].list);
153 - write_unlock(&ls->ls_rsbtbl[bucket].lock);
154 + spin_unlock(&ls->ls_rsbtbl[bucket].lock);
155 error = 0;
156 out:
157 *r_ret = r;
158 @@ -530,9 +530,9 @@ static void put_rsb(struct dlm_rsb *r)
159 struct dlm_ls *ls = r->res_ls;
160 uint32_t bucket = r->res_bucket;
161
162 - write_lock(&ls->ls_rsbtbl[bucket].lock);
163 + spin_lock(&ls->ls_rsbtbl[bucket].lock);
164 kref_put(&r->res_ref, toss_rsb);
165 - write_unlock(&ls->ls_rsbtbl[bucket].lock);
166 + spin_unlock(&ls->ls_rsbtbl[bucket].lock);
167 }
168
169 void dlm_put_rsb(struct dlm_rsb *r)
170 @@ -967,7 +967,7 @@ static int shrink_bucket(struct dlm_ls *ls, int b)
171
172 for (;;) {
173 found = 0;
174 - write_lock(&ls->ls_rsbtbl[b].lock);
175 + spin_lock(&ls->ls_rsbtbl[b].lock);
176 list_for_each_entry_reverse(r, &ls->ls_rsbtbl[b].toss,
177 res_hashchain) {
178 if (!time_after_eq(jiffies, r->res_toss_time +
179 @@ -978,20 +978,20 @@ static int shrink_bucket(struct dlm_ls *ls, int b)
180 }
181
182 if (!found) {
183 - write_unlock(&ls->ls_rsbtbl[b].lock);
184 + spin_unlock(&ls->ls_rsbtbl[b].lock);
185 break;
186 }
187
188 if (kref_put(&r->res_ref, kill_rsb)) {
189 list_del(&r->res_hashchain);
190 - write_unlock(&ls->ls_rsbtbl[b].lock);
191 + spin_unlock(&ls->ls_rsbtbl[b].lock);
192
193 if (is_master(r))
194 dir_remove(r);
195 dlm_free_rsb(r);
196 count++;
197 } else {
198 - write_unlock(&ls->ls_rsbtbl[b].lock);
199 + spin_unlock(&ls->ls_rsbtbl[b].lock);
200 log_error(ls, "tossed rsb in use %s", r->res_name);
201 }
202 }
203 @@ -4224,7 +4224,7 @@ static struct dlm_rsb *find_purged_rsb(struct dlm_ls *ls, int bucket)
204 {
205 struct dlm_rsb *r, *r_ret = NULL;
206
207 - read_lock(&ls->ls_rsbtbl[bucket].lock);
208 + spin_lock(&ls->ls_rsbtbl[bucket].lock);
209 list_for_each_entry(r, &ls->ls_rsbtbl[bucket].list, res_hashchain) {
210 if (!rsb_flag(r, RSB_LOCKS_PURGED))
211 continue;
212 @@ -4233,7 +4233,7 @@ static struct dlm_rsb *find_purged_rsb(struct dlm_ls *ls, int bucket)
213 r_ret = r;
214 break;
215 }
216 - read_unlock(&ls->ls_rsbtbl[bucket].lock);
217 + spin_unlock(&ls->ls_rsbtbl[bucket].lock);
218 return r_ret;
219 }
220
221 diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
222 index 8d86b79..aa32e5f 100644
223 --- a/fs/dlm/lockspace.c
224 +++ b/fs/dlm/lockspace.c
225 @@ -464,7 +464,7 @@ static int new_lockspace(char *name, int namelen, void **lockspace,
226 for (i = 0; i < size; i++) {
227 INIT_LIST_HEAD(&ls->ls_rsbtbl[i].list);
228 INIT_LIST_HEAD(&ls->ls_rsbtbl[i].toss);
229 - rwlock_init(&ls->ls_rsbtbl[i].lock);
230 + spin_lock_init(&ls->ls_rsbtbl[i].lock);
231 }
232
233 size = dlm_config.ci_lkbtbl_size;
234 diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c
235 index 80aba5b..eda43f3 100644
236 --- a/fs/dlm/recover.c
237 +++ b/fs/dlm/recover.c
238 @@ -726,7 +726,7 @@ int dlm_create_root_list(struct dlm_ls *ls)
239 }
240
241 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
242 - read_lock(&ls->ls_rsbtbl[i].lock);
243 + spin_lock(&ls->ls_rsbtbl[i].lock);
244 list_for_each_entry(r, &ls->ls_rsbtbl[i].list, res_hashchain) {
245 list_add(&r->res_root_list, &ls->ls_root_list);
246 dlm_hold_rsb(r);
247 @@ -737,7 +737,7 @@ int dlm_create_root_list(struct dlm_ls *ls)
248 but no other recovery steps should do anything with them. */
249
250 if (dlm_no_directory(ls)) {
251 - read_unlock(&ls->ls_rsbtbl[i].lock);
252 + spin_unlock(&ls->ls_rsbtbl[i].lock);
253 continue;
254 }
255
256 @@ -745,7 +745,7 @@ int dlm_create_root_list(struct dlm_ls *ls)
257 list_add(&r->res_root_list, &ls->ls_root_list);
258 dlm_hold_rsb(r);
259 }
260 - read_unlock(&ls->ls_rsbtbl[i].lock);
261 + spin_unlock(&ls->ls_rsbtbl[i].lock);
262 }
263 out:
264 up_write(&ls->ls_root_sem);
265 @@ -775,7 +775,7 @@ void dlm_clear_toss_list(struct dlm_ls *ls)
266 int i;
267
268 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
269 - write_lock(&ls->ls_rsbtbl[i].lock);
270 + spin_lock(&ls->ls_rsbtbl[i].lock);
271 list_for_each_entry_safe(r, safe, &ls->ls_rsbtbl[i].toss,
272 res_hashchain) {
273 if (dlm_no_directory(ls) || !is_master(r)) {
274 @@ -783,7 +783,7 @@ void dlm_clear_toss_list(struct dlm_ls *ls)
275 dlm_free_rsb(r);
276 }
277 }
278 - write_unlock(&ls->ls_rsbtbl[i].lock);
279 + spin_unlock(&ls->ls_rsbtbl[i].lock);
280 }
281 }
282