1 From: David Teigland <teigland@redhat.com>
2 commit c7be761a8163d2f1ac0b606c21e4316b7abc5af7
3 Author: David Teigland <teigland@redhat.com>
4 Date: Wed Jan 7 16:50:41 2009 -0600
5 Subject: dlm: change rsbtbl rwlock to spinlock
7 The rwlock is almost always used in write mode, so there's no reason
8 to not use a spinlock instead.
10 Signed-off-by: David Teigland <teigland@redhat.com>
11 Signed-off-by: Coly Li <coly.li@suse.de>
13 diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c
14 index bc4af3e..1d1d274 100644
15 --- a/fs/dlm/debug_fs.c
16 +++ b/fs/dlm/debug_fs.c
17 @@ -416,7 +416,7 @@ static void *table_seq_start(struct seq_file *seq, loff_t *pos)
18 if (seq->op == &format3_seq_ops)
21 - read_lock(&ls->ls_rsbtbl[bucket].lock);
22 + spin_lock(&ls->ls_rsbtbl[bucket].lock);
23 if (!list_empty(&ls->ls_rsbtbl[bucket].list)) {
24 list_for_each_entry(r, &ls->ls_rsbtbl[bucket].list,
26 @@ -424,12 +424,12 @@ static void *table_seq_start(struct seq_file *seq, loff_t *pos)
30 - read_unlock(&ls->ls_rsbtbl[bucket].lock);
31 + spin_unlock(&ls->ls_rsbtbl[bucket].lock);
36 - read_unlock(&ls->ls_rsbtbl[bucket].lock);
37 + spin_unlock(&ls->ls_rsbtbl[bucket].lock);
40 * move to the first rsb in the next non-empty bucket
41 @@ -447,18 +447,18 @@ static void *table_seq_start(struct seq_file *seq, loff_t *pos)
45 - read_lock(&ls->ls_rsbtbl[bucket].lock);
46 + spin_lock(&ls->ls_rsbtbl[bucket].lock);
47 if (!list_empty(&ls->ls_rsbtbl[bucket].list)) {
48 r = list_first_entry(&ls->ls_rsbtbl[bucket].list,
49 struct dlm_rsb, res_hashchain);
53 - read_unlock(&ls->ls_rsbtbl[bucket].lock);
54 + spin_unlock(&ls->ls_rsbtbl[bucket].lock);
58 - read_unlock(&ls->ls_rsbtbl[bucket].lock);
59 + spin_unlock(&ls->ls_rsbtbl[bucket].lock);
63 @@ -477,7 +477,7 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos)
64 * move to the next rsb in the same bucket
67 - read_lock(&ls->ls_rsbtbl[bucket].lock);
68 + spin_lock(&ls->ls_rsbtbl[bucket].lock);
70 next = rp->res_hashchain.next;
72 @@ -485,12 +485,12 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos)
73 r = list_entry(next, struct dlm_rsb, res_hashchain);
76 - read_unlock(&ls->ls_rsbtbl[bucket].lock);
77 + spin_unlock(&ls->ls_rsbtbl[bucket].lock);
82 - read_unlock(&ls->ls_rsbtbl[bucket].lock);
83 + spin_unlock(&ls->ls_rsbtbl[bucket].lock);
87 @@ -509,18 +509,18 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos)
91 - read_lock(&ls->ls_rsbtbl[bucket].lock);
92 + spin_lock(&ls->ls_rsbtbl[bucket].lock);
93 if (!list_empty(&ls->ls_rsbtbl[bucket].list)) {
94 r = list_first_entry(&ls->ls_rsbtbl[bucket].list,
95 struct dlm_rsb, res_hashchain);
99 - read_unlock(&ls->ls_rsbtbl[bucket].lock);
100 + spin_unlock(&ls->ls_rsbtbl[bucket].lock);
104 - read_unlock(&ls->ls_rsbtbl[bucket].lock);
105 + spin_unlock(&ls->ls_rsbtbl[bucket].lock);
109 diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
110 index ef2f1e3..076e86f 100644
111 --- a/fs/dlm/dlm_internal.h
112 +++ b/fs/dlm/dlm_internal.h
113 @@ -105,7 +105,7 @@ struct dlm_dirtable {
114 struct dlm_rsbtable {
115 struct list_head list;
116 struct list_head toss;
121 struct dlm_lkbtable {
122 diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
123 index 6cfe65b..01e7d39 100644
126 @@ -412,9 +412,9 @@ static int search_rsb(struct dlm_ls *ls, char *name, int len, int b,
127 unsigned int flags, struct dlm_rsb **r_ret)
130 - write_lock(&ls->ls_rsbtbl[b].lock);
131 + spin_lock(&ls->ls_rsbtbl[b].lock);
132 error = _search_rsb(ls, name, len, b, flags, r_ret);
133 - write_unlock(&ls->ls_rsbtbl[b].lock);
134 + spin_unlock(&ls->ls_rsbtbl[b].lock);
138 @@ -478,16 +478,16 @@ static int find_rsb(struct dlm_ls *ls, char *name, int namelen,
139 r->res_nodeid = nodeid;
142 - write_lock(&ls->ls_rsbtbl[bucket].lock);
143 + spin_lock(&ls->ls_rsbtbl[bucket].lock);
144 error = _search_rsb(ls, name, namelen, bucket, 0, &tmp);
146 - write_unlock(&ls->ls_rsbtbl[bucket].lock);
147 + spin_unlock(&ls->ls_rsbtbl[bucket].lock);
152 list_add(&r->res_hashchain, &ls->ls_rsbtbl[bucket].list);
153 - write_unlock(&ls->ls_rsbtbl[bucket].lock);
154 + spin_unlock(&ls->ls_rsbtbl[bucket].lock);
158 @@ -530,9 +530,9 @@ static void put_rsb(struct dlm_rsb *r)
159 struct dlm_ls *ls = r->res_ls;
160 uint32_t bucket = r->res_bucket;
162 - write_lock(&ls->ls_rsbtbl[bucket].lock);
163 + spin_lock(&ls->ls_rsbtbl[bucket].lock);
164 kref_put(&r->res_ref, toss_rsb);
165 - write_unlock(&ls->ls_rsbtbl[bucket].lock);
166 + spin_unlock(&ls->ls_rsbtbl[bucket].lock);
169 void dlm_put_rsb(struct dlm_rsb *r)
170 @@ -967,7 +967,7 @@ static int shrink_bucket(struct dlm_ls *ls, int b)
174 - write_lock(&ls->ls_rsbtbl[b].lock);
175 + spin_lock(&ls->ls_rsbtbl[b].lock);
176 list_for_each_entry_reverse(r, &ls->ls_rsbtbl[b].toss,
178 if (!time_after_eq(jiffies, r->res_toss_time +
179 @@ -978,20 +978,20 @@ static int shrink_bucket(struct dlm_ls *ls, int b)
183 - write_unlock(&ls->ls_rsbtbl[b].lock);
184 + spin_unlock(&ls->ls_rsbtbl[b].lock);
188 if (kref_put(&r->res_ref, kill_rsb)) {
189 list_del(&r->res_hashchain);
190 - write_unlock(&ls->ls_rsbtbl[b].lock);
191 + spin_unlock(&ls->ls_rsbtbl[b].lock);
198 - write_unlock(&ls->ls_rsbtbl[b].lock);
199 + spin_unlock(&ls->ls_rsbtbl[b].lock);
200 log_error(ls, "tossed rsb in use %s", r->res_name);
203 @@ -4224,7 +4224,7 @@ static struct dlm_rsb *find_purged_rsb(struct dlm_ls *ls, int bucket)
205 struct dlm_rsb *r, *r_ret = NULL;
207 - read_lock(&ls->ls_rsbtbl[bucket].lock);
208 + spin_lock(&ls->ls_rsbtbl[bucket].lock);
209 list_for_each_entry(r, &ls->ls_rsbtbl[bucket].list, res_hashchain) {
210 if (!rsb_flag(r, RSB_LOCKS_PURGED))
212 @@ -4233,7 +4233,7 @@ static struct dlm_rsb *find_purged_rsb(struct dlm_ls *ls, int bucket)
216 - read_unlock(&ls->ls_rsbtbl[bucket].lock);
217 + spin_unlock(&ls->ls_rsbtbl[bucket].lock);
221 diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
222 index 8d86b79..aa32e5f 100644
223 --- a/fs/dlm/lockspace.c
224 +++ b/fs/dlm/lockspace.c
225 @@ -464,7 +464,7 @@ static int new_lockspace(char *name, int namelen, void **lockspace,
226 for (i = 0; i < size; i++) {
227 INIT_LIST_HEAD(&ls->ls_rsbtbl[i].list);
228 INIT_LIST_HEAD(&ls->ls_rsbtbl[i].toss);
229 - rwlock_init(&ls->ls_rsbtbl[i].lock);
230 + spin_lock_init(&ls->ls_rsbtbl[i].lock);
233 size = dlm_config.ci_lkbtbl_size;
234 diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c
235 index 80aba5b..eda43f3 100644
236 --- a/fs/dlm/recover.c
237 +++ b/fs/dlm/recover.c
238 @@ -726,7 +726,7 @@ int dlm_create_root_list(struct dlm_ls *ls)
241 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
242 - read_lock(&ls->ls_rsbtbl[i].lock);
243 + spin_lock(&ls->ls_rsbtbl[i].lock);
244 list_for_each_entry(r, &ls->ls_rsbtbl[i].list, res_hashchain) {
245 list_add(&r->res_root_list, &ls->ls_root_list);
247 @@ -737,7 +737,7 @@ int dlm_create_root_list(struct dlm_ls *ls)
248 but no other recovery steps should do anything with them. */
250 if (dlm_no_directory(ls)) {
251 - read_unlock(&ls->ls_rsbtbl[i].lock);
252 + spin_unlock(&ls->ls_rsbtbl[i].lock);
256 @@ -745,7 +745,7 @@ int dlm_create_root_list(struct dlm_ls *ls)
257 list_add(&r->res_root_list, &ls->ls_root_list);
260 - read_unlock(&ls->ls_rsbtbl[i].lock);
261 + spin_unlock(&ls->ls_rsbtbl[i].lock);
264 up_write(&ls->ls_root_sem);
265 @@ -775,7 +775,7 @@ void dlm_clear_toss_list(struct dlm_ls *ls)
268 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
269 - write_lock(&ls->ls_rsbtbl[i].lock);
270 + spin_lock(&ls->ls_rsbtbl[i].lock);
271 list_for_each_entry_safe(r, safe, &ls->ls_rsbtbl[i].toss,
273 if (dlm_no_directory(ls) || !is_master(r)) {
274 @@ -783,7 +783,7 @@ void dlm_clear_toss_list(struct dlm_ls *ls)
278 - write_unlock(&ls->ls_rsbtbl[i].lock);
279 + spin_unlock(&ls->ls_rsbtbl[i].lock);