]> git.ipfire.org Git - ipfire-2.x.git/blob - src/patches/suse-2.6.27.39/patches.suse/ocfs2-Implement-quota-recovery.patch
Add a patch to fix Intel E100 wake-on-lan problems.
[ipfire-2.x.git] / src / patches / suse-2.6.27.39 / patches.suse / ocfs2-Implement-quota-recovery.patch
1 From: Jan Kara <jack@suse.cz>
2 References: fate#302681
3 Subject: [PATCH 28/28] ocfs2: Implement quota recovery
4 Patch-mainline: 2.6.29?
5
6 Implement functions for recovery after a crash. Functions just
7 read local quota file and sync info to global quota file.
8
9 Signed-off-by: Jan Kara <jack@suse.cz>
10 ---
11 fs/ocfs2/journal.c | 103 +++++++++--
12 fs/ocfs2/journal.h | 1
13 fs/ocfs2/ocfs2.h | 4
14 fs/ocfs2/quota.h | 21 ++
15 fs/ocfs2/quota_local.c | 428 ++++++++++++++++++++++++++++++++++++++++++++++++-
16 fs/ocfs2/super.c | 3
17 6 files changed, 530 insertions(+), 30 deletions(-)
18
19 --- a/fs/ocfs2/journal.c
20 +++ b/fs/ocfs2/journal.c
21 @@ -45,6 +45,7 @@
22 #include "slot_map.h"
23 #include "super.h"
24 #include "sysfile.h"
25 +#include "quota.h"
26
27 #include "buffer_head_io.h"
28
29 @@ -52,7 +53,7 @@ DEFINE_SPINLOCK(trans_inc_lock);
30
31 static int ocfs2_force_read_journal(struct inode *inode);
32 static int ocfs2_recover_node(struct ocfs2_super *osb,
33 - int node_num);
34 + int node_num, int slot_num);
35 static int __ocfs2_recovery_thread(void *arg);
36 static int ocfs2_commit_cache(struct ocfs2_super *osb);
37 static int __ocfs2_wait_on_mount(struct ocfs2_super *osb, int quota);
38 @@ -889,6 +890,7 @@ struct ocfs2_la_recovery_item {
39 int lri_slot;
40 struct ocfs2_dinode *lri_la_dinode;
41 struct ocfs2_dinode *lri_tl_dinode;
42 + struct ocfs2_quota_recovery *lri_qrec;
43 };
44
45 /* Does the second half of the recovery process. By this point, the
46 @@ -909,6 +911,7 @@ void ocfs2_complete_recovery(struct work
47 struct ocfs2_super *osb = journal->j_osb;
48 struct ocfs2_dinode *la_dinode, *tl_dinode;
49 struct ocfs2_la_recovery_item *item, *n;
50 + struct ocfs2_quota_recovery *qrec;
51 LIST_HEAD(tmp_la_list);
52
53 mlog_entry_void();
54 @@ -956,6 +959,16 @@ void ocfs2_complete_recovery(struct work
55 if (ret < 0)
56 mlog_errno(ret);
57
58 + qrec = item->lri_qrec;
59 + if (qrec) {
60 + mlog(0, "Recovering quota files");
61 + ret = ocfs2_finish_quota_recovery(osb, qrec,
62 + item->lri_slot);
63 + if (ret < 0)
64 + mlog_errno(ret);
65 + /* Recovery info is already freed now */
66 + }
67 +
68 kfree(item);
69 }
70
71 @@ -969,7 +982,8 @@ void ocfs2_complete_recovery(struct work
72 static void ocfs2_queue_recovery_completion(struct ocfs2_journal *journal,
73 int slot_num,
74 struct ocfs2_dinode *la_dinode,
75 - struct ocfs2_dinode *tl_dinode)
76 + struct ocfs2_dinode *tl_dinode,
77 + struct ocfs2_quota_recovery *qrec)
78 {
79 struct ocfs2_la_recovery_item *item;
80
81 @@ -984,6 +998,9 @@ static void ocfs2_queue_recovery_complet
82 if (tl_dinode)
83 kfree(tl_dinode);
84
85 + if (qrec)
86 + ocfs2_free_quota_recovery(qrec);
87 +
88 mlog_errno(-ENOMEM);
89 return;
90 }
91 @@ -992,6 +1009,7 @@ static void ocfs2_queue_recovery_complet
92 item->lri_la_dinode = la_dinode;
93 item->lri_slot = slot_num;
94 item->lri_tl_dinode = tl_dinode;
95 + item->lri_qrec = qrec;
96
97 spin_lock(&journal->j_lock);
98 list_add_tail(&item->lri_list, &journal->j_la_cleanups);
99 @@ -1011,6 +1029,7 @@ void ocfs2_complete_mount_recovery(struc
100 ocfs2_queue_recovery_completion(journal,
101 osb->slot_num,
102 osb->local_alloc_copy,
103 + NULL,
104 NULL);
105 ocfs2_schedule_truncate_log_flush(osb, 0);
106
107 @@ -1019,11 +1038,26 @@ void ocfs2_complete_mount_recovery(struc
108 }
109 }
110
111 +void ocfs2_complete_quota_recovery(struct ocfs2_super *osb)
112 +{
113 + if (osb->quota_rec) {
114 + ocfs2_queue_recovery_completion(osb->journal,
115 + osb->slot_num,
116 + NULL,
117 + NULL,
118 + osb->quota_rec);
119 + osb->quota_rec = NULL;
120 + }
121 +}
122 +
123 static int __ocfs2_recovery_thread(void *arg)
124 {
125 - int status, node_num;
126 + int status, node_num, slot_num;
127 struct ocfs2_super *osb = arg;
128 struct ocfs2_recovery_map *rm = osb->recovery_map;
129 + int *rm_quota = NULL;
130 + int rm_quota_used = 0, i;
131 + struct ocfs2_quota_recovery *qrec;
132
133 mlog_entry_void();
134
135 @@ -1032,6 +1066,11 @@ static int __ocfs2_recovery_thread(void
136 goto bail;
137 }
138
139 + rm_quota = kzalloc(osb->max_slots * sizeof(int), GFP_NOFS);
140 + if (!rm_quota) {
141 + status = -ENOMEM;
142 + goto bail;
143 + }
144 restart:
145 status = ocfs2_super_lock(osb, 1);
146 if (status < 0) {
147 @@ -1045,8 +1084,28 @@ restart:
148 * clear it until ocfs2_recover_node() has succeeded. */
149 node_num = rm->rm_entries[0];
150 spin_unlock(&osb->osb_lock);
151 + mlog(0, "checking node %d\n", node_num);
152 + slot_num = ocfs2_node_num_to_slot(osb, node_num);
153 + if (slot_num == -ENOENT) {
154 + status = 0;
155 + mlog(0, "no slot for this node, so no recovery"
156 + "required.\n");
157 + goto skip_recovery;
158 + }
159 + mlog(0, "node %d was using slot %d\n", node_num, slot_num);
160
161 - status = ocfs2_recover_node(osb, node_num);
162 + /* It is a bit subtle with quota recovery. We cannot do it
163 + * immediately because we have to obtain cluster locks from
164 + * quota files and we also don't want to just skip it because
165 + * then quota usage would be out of sync until some node takes
166 + * the slot. So we remember which nodes need quota recovery
167 + * and when everything else is done, we recover quotas. */
168 + for (i = 0; i < rm_quota_used && rm_quota[i] != slot_num; i++);
169 + if (i == rm_quota_used)
170 + rm_quota[rm_quota_used++] = slot_num;
171 +
172 + status = ocfs2_recover_node(osb, node_num, slot_num);
173 +skip_recovery:
174 if (!status) {
175 ocfs2_recovery_map_clear(osb, node_num);
176 } else {
177 @@ -1070,11 +1129,22 @@ restart:
178
179 ocfs2_super_unlock(osb, 1);
180
181 + /* Now it is right time to recover quotas... */
182 + for (i = 0; i < rm_quota_used; i++) {
183 + qrec = ocfs2_begin_quota_recovery(osb, rm_quota[i]);
184 + if (IS_ERR(qrec)) {
185 + status = PTR_ERR(qrec);
186 + mlog_errno(status);
187 + }
188 + ocfs2_queue_recovery_completion(osb->journal, rm_quota[i],
189 + NULL, NULL, qrec);
190 + }
191 +
192 /* We always run recovery on our own orphan dir - the dead
193 * node(s) may have disallowd a previos inode delete. Re-processing
194 * is therefore required. */
195 ocfs2_queue_recovery_completion(osb->journal, osb->slot_num, NULL,
196 - NULL);
197 + NULL, NULL);
198
199 bail:
200 mutex_lock(&osb->recovery_lock);
201 @@ -1089,6 +1159,9 @@ bail:
202
203 mutex_unlock(&osb->recovery_lock);
204
205 + if (rm_quota)
206 + kfree(rm_quota);
207 +
208 mlog_exit(status);
209 /* no one is callint kthread_stop() for us so the kthread() api
210 * requires that we call do_exit(). And it isn't exported, but
211 @@ -1317,31 +1390,19 @@ done:
212 * far less concerning.
213 */
214 static int ocfs2_recover_node(struct ocfs2_super *osb,
215 - int node_num)
216 + int node_num, int slot_num)
217 {
218 int status = 0;
219 - int slot_num;
220 struct ocfs2_dinode *la_copy = NULL;
221 struct ocfs2_dinode *tl_copy = NULL;
222
223 - mlog_entry("(node_num=%d, osb->node_num = %d)\n",
224 - node_num, osb->node_num);
225 -
226 - mlog(0, "checking node %d\n", node_num);
227 + mlog_entry("(node_num=%d, slot_num=%d, osb->node_num = %d)\n",
228 + node_num, slot_num, osb->node_num);
229
230 /* Should not ever be called to recover ourselves -- in that
231 * case we should've called ocfs2_journal_load instead. */
232 BUG_ON(osb->node_num == node_num);
233
234 - slot_num = ocfs2_node_num_to_slot(osb, node_num);
235 - if (slot_num == -ENOENT) {
236 - status = 0;
237 - mlog(0, "no slot for this node, so no recovery required.\n");
238 - goto done;
239 - }
240 -
241 - mlog(0, "node %d was using slot %d\n", node_num, slot_num);
242 -
243 status = ocfs2_replay_journal(osb, node_num, slot_num);
244 if (status < 0) {
245 if (status == -EBUSY) {
246 @@ -1377,7 +1438,7 @@ static int ocfs2_recover_node(struct ocf
247
248 /* This will kfree the memory pointed to by la_copy and tl_copy */
249 ocfs2_queue_recovery_completion(osb->journal, slot_num, la_copy,
250 - tl_copy);
251 + tl_copy, NULL);
252
253 status = 0;
254 done:
255 --- a/fs/ocfs2/journal.h
256 +++ b/fs/ocfs2/journal.h
257 @@ -173,6 +173,7 @@ void ocfs2_recovery_thread(struct ocfs
258 int node_num);
259 int ocfs2_mark_dead_nodes(struct ocfs2_super *osb);
260 void ocfs2_complete_mount_recovery(struct ocfs2_super *osb);
261 +void ocfs2_complete_quota_recovery(struct ocfs2_super *osb);
262
263 static inline void ocfs2_start_checkpoint(struct ocfs2_super *osb)
264 {
265 --- a/fs/ocfs2/ocfs2.h
266 +++ b/fs/ocfs2/ocfs2.h
267 @@ -209,6 +209,7 @@ enum ocfs2_mount_options
268 struct ocfs2_journal;
269 struct ocfs2_slot_info;
270 struct ocfs2_recovery_map;
271 +struct ocfs2_quota_recovery;
272 struct ocfs2_super
273 {
274 struct task_struct *commit_task;
275 @@ -290,10 +291,11 @@ struct ocfs2_super
276 char *local_alloc_debug_buf;
277 #endif
278
279 - /* Next two fields are for local node slot recovery during
280 + /* Next three fields are for local node slot recovery during
281 * mount. */
282 int dirty;
283 struct ocfs2_dinode *local_alloc_copy;
284 + struct ocfs2_quota_recovery *quota_rec;
285
286 struct ocfs2_alloc_stats alloc_stats;
287 char dev_str[20]; /* "major,minor" of the device */
288 --- a/fs/ocfs2/quota.h
289 +++ b/fs/ocfs2/quota.h
290 @@ -38,6 +38,17 @@ struct ocfs2_dquot {
291 s64 dq_originodes; /* Last globally synced inode usage */
292 };
293
294 +/* Description of one chunk to recover in memory */
295 +struct ocfs2_recovery_chunk {
296 + struct list_head rc_list; /* List of chunks */
297 + int rc_chunk; /* Chunk number */
298 + unsigned long *rc_bitmap; /* Bitmap of entries to recover */
299 +};
300 +
301 +struct ocfs2_quota_recovery {
302 + struct list_head r_list[MAXQUOTAS]; /* List of chunks to recover */
303 +};
304 +
305 /* In-memory structure with quota header information */
306 struct ocfs2_mem_dqinfo {
307 unsigned int dqi_type; /* Quota type this structure describes */
308 @@ -54,6 +65,10 @@ struct ocfs2_mem_dqinfo {
309 struct buffer_head *dqi_ibh; /* Buffer with information header */
310 struct qtree_mem_dqinfo dqi_gi; /* Info about global file */
311 struct timer_list dqi_sync_timer; /* Timer for syncing dquots */
312 + struct ocfs2_quota_recovery *dqi_rec; /* Pointer to recovery
313 + * information, in case we
314 + * enable quotas on file
315 + * needing it */
316 };
317
318 static inline struct ocfs2_dquot *OCFS2_DQUOT(struct dquot *dquot)
319 @@ -72,6 +87,12 @@ extern struct kmem_cache *ocfs2_qf_chunk
320
321 extern struct qtree_fmt_operations ocfs2_global_ops;
322
323 +struct ocfs2_quota_recovery *ocfs2_begin_quota_recovery(
324 + struct ocfs2_super *osb, int slot_num);
325 +int ocfs2_finish_quota_recovery(struct ocfs2_super *osb,
326 + struct ocfs2_quota_recovery *rec,
327 + int slot_num);
328 +void ocfs2_free_quota_recovery(struct ocfs2_quota_recovery *rec);
329 ssize_t ocfs2_quota_read(struct super_block *sb, int type, char *data,
330 size_t len, loff_t off);
331 ssize_t ocfs2_quota_write(struct super_block *sb, int type,
332 --- a/fs/ocfs2/quota_local.c
333 +++ b/fs/ocfs2/quota_local.c
334 @@ -49,14 +49,25 @@ static unsigned int ol_quota_chunk_block
335 return 1 + (ol_chunk_blocks(sb) + 1) * c;
336 }
337
338 -/* Offset of the dquot structure in the quota file */
339 -static loff_t ol_dqblk_off(struct super_block *sb, int c, int off)
340 +static unsigned int ol_dqblk_block(struct super_block *sb, int c, int off)
341 +{
342 + int epb = ol_quota_entries_per_block(sb);
343 +
344 + return ol_quota_chunk_block(sb, c) + 1 + off / epb;
345 +}
346 +
347 +static unsigned int ol_dqblk_block_off(struct super_block *sb, int c, int off)
348 {
349 int epb = ol_quota_entries_per_block(sb);
350
351 - return ((ol_quota_chunk_block(sb, c) + 1 + off / epb)
352 - << sb->s_blocksize_bits) +
353 - (off % epb) * sizeof(struct ocfs2_local_disk_dqblk);
354 + return (off % epb) * sizeof(struct ocfs2_local_disk_dqblk);
355 +}
356 +
357 +/* Offset of the dquot structure in the quota file */
358 +static loff_t ol_dqblk_off(struct super_block *sb, int c, int off)
359 +{
360 + return (ol_dqblk_block(sb, c, off) << sb->s_blocksize_bits) +
361 + ol_dqblk_block_off(sb, c, off);
362 }
363
364 /* Compute block number from given offset */
365 @@ -253,6 +264,382 @@ static void olq_update_info(struct buffe
366 spin_unlock(&dq_data_lock);
367 }
368
369 +static int ocfs2_add_recovery_chunk(struct super_block *sb,
370 + struct ocfs2_local_disk_chunk *dchunk,
371 + int chunk,
372 + struct list_head *head)
373 +{
374 + struct ocfs2_recovery_chunk *rc;
375 +
376 + rc = kmalloc(sizeof(struct ocfs2_recovery_chunk), GFP_NOFS);
377 + if (!rc)
378 + return -ENOMEM;
379 + rc->rc_chunk = chunk;
380 + rc->rc_bitmap = kmalloc(sb->s_blocksize, GFP_NOFS);
381 + if (!rc->rc_bitmap) {
382 + kfree(rc);
383 + return -ENOMEM;
384 + }
385 + memcpy(rc->rc_bitmap, dchunk->dqc_bitmap,
386 + (ol_chunk_entries(sb) + 7) >> 3);
387 + list_add_tail(&rc->rc_list, head);
388 + return 0;
389 +}
390 +
391 +static void free_recovery_list(struct list_head *head)
392 +{
393 + struct ocfs2_recovery_chunk *next;
394 + struct ocfs2_recovery_chunk *rchunk;
395 +
396 + list_for_each_entry_safe(rchunk, next, head, rc_list) {
397 + list_del(&rchunk->rc_list);
398 + kfree(rchunk->rc_bitmap);
399 + kfree(rchunk);
400 + }
401 +}
402 +
403 +void ocfs2_free_quota_recovery(struct ocfs2_quota_recovery *rec)
404 +{
405 + int type;
406 +
407 + for (type = 0; type < MAXQUOTAS; type++)
408 + free_recovery_list(&(rec->r_list[type]));
409 + kfree(rec);
410 +}
411 +
412 +/* Load entries in our quota file we have to recover*/
413 +static int ocfs2_recovery_load_quota(struct inode *lqinode,
414 + struct ocfs2_local_disk_dqinfo *ldinfo,
415 + int type,
416 + struct list_head *head)
417 +{
418 + struct super_block *sb = lqinode->i_sb;
419 + struct buffer_head *hbh;
420 + struct ocfs2_local_disk_chunk *dchunk;
421 + int i, chunks = le32_to_cpu(ldinfo->dqi_chunks);
422 + int status = 0;
423 +
424 + for (i = 0; i < chunks; i++) {
425 + hbh = ocfs2_bread(lqinode, ol_quota_chunk_block(sb, i),
426 + &status, 0);
427 + if (!hbh) {
428 + mlog_errno(status);
429 + break;
430 + }
431 + dchunk = (struct ocfs2_local_disk_chunk *)hbh->b_data;
432 + if (le32_to_cpu(dchunk->dqc_free) < ol_chunk_entries(sb))
433 + status = ocfs2_add_recovery_chunk(sb, dchunk, i, head);
434 + brelse(hbh);
435 + if (status < 0)
436 + break;
437 + }
438 + if (status < 0)
439 + free_recovery_list(head);
440 + return status;
441 +}
442 +
443 +static struct ocfs2_quota_recovery *ocfs2_alloc_quota_recovery(void)
444 +{
445 + int type;
446 + struct ocfs2_quota_recovery *rec;
447 +
448 + rec = kmalloc(sizeof(struct ocfs2_quota_recovery), GFP_NOFS);
449 + if (!rec)
450 + return NULL;
451 + for (type = 0; type < MAXQUOTAS; type++)
452 + INIT_LIST_HEAD(&(rec->r_list[type]));
453 + return rec;
454 +}
455 +
456 +/* Load information we need for quota recovery into memory */
457 +struct ocfs2_quota_recovery *ocfs2_begin_quota_recovery(
458 + struct ocfs2_super *osb,
459 + int slot_num)
460 +{
461 + unsigned int feature[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA,
462 + OCFS2_FEATURE_RO_COMPAT_GRPQUOTA};
463 + unsigned int ino[MAXQUOTAS] = { LOCAL_USER_QUOTA_SYSTEM_INODE,
464 + LOCAL_GROUP_QUOTA_SYSTEM_INODE };
465 + struct super_block *sb = osb->sb;
466 + struct ocfs2_local_disk_dqinfo *ldinfo;
467 + struct inode *lqinode;
468 + struct buffer_head *bh;
469 + int type;
470 + int status;
471 + struct ocfs2_quota_recovery *rec;
472 +
473 + mlog(ML_NOTICE, "Beginning quota recovery in slot %u\n", slot_num);
474 + rec = ocfs2_alloc_quota_recovery();
475 + if (!rec)
476 + return ERR_PTR(-ENOMEM);
477 + /* First init... */
478 +
479 + for (type = 0; type < MAXQUOTAS; type++) {
480 + if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, feature[type]))
481 + continue;
482 + lqinode = ocfs2_get_system_file_inode(osb, ino[type], slot_num);
483 + if (!lqinode) {
484 + status = -ENOENT;
485 + goto out;
486 + }
487 + status = ocfs2_inode_lock_full(lqinode, NULL, 1,
488 + OCFS2_META_LOCK_NOQUEUE);
489 + /* Someone else is holding the lock? Then he must be
490 + * doing the recovery. Just skip the file... */
491 + if (status == -EAGAIN) {
492 + mlog(ML_NOTICE, "skipping quota recovery for slot %d "
493 + "because quota file is locked.\n", slot_num);
494 + status = 0;
495 + goto out_put;
496 + } else if (status < 0) {
497 + mlog_errno(status);
498 + goto out_put;
499 + }
500 + /* Now read local header */
501 + bh = ocfs2_bread(lqinode, 0, &status, 0);
502 + if (!bh) {
503 + mlog_errno(status);
504 + mlog(ML_ERROR, "failed to read quota file info header "
505 + "(slot=%d type=%d)\n", slot_num, type);
506 + goto out_lock;
507 + }
508 + ldinfo = (struct ocfs2_local_disk_dqinfo *)(bh->b_data +
509 + OCFS2_LOCAL_INFO_OFF);
510 + status = ocfs2_recovery_load_quota(lqinode, ldinfo, type,
511 + &rec->r_list[type]);
512 + brelse(bh);
513 +out_lock:
514 + ocfs2_inode_unlock(lqinode, 1);
515 +out_put:
516 + iput(lqinode);
517 + if (status < 0)
518 + break;
519 + }
520 +out:
521 + if (status < 0) {
522 + ocfs2_free_quota_recovery(rec);
523 + rec = ERR_PTR(status);
524 + }
525 + return rec;
526 +}
527 +
528 +/* Sync changes in local quota file into global quota file and
529 + * reinitialize local quota file.
530 + * The function expects local quota file to be already locked and
531 + * dqonoff_mutex locked. */
532 +static int ocfs2_recover_local_quota_file(struct inode *lqinode,
533 + int type,
534 + struct ocfs2_quota_recovery *rec)
535 +{
536 + struct super_block *sb = lqinode->i_sb;
537 + struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
538 + struct ocfs2_local_disk_chunk *dchunk;
539 + struct ocfs2_local_disk_dqblk *dqblk;
540 + struct dquot *dquot;
541 + handle_t *handle;
542 + struct buffer_head *hbh = NULL, *qbh = NULL;
543 + int status = 0;
544 + int bit, chunk;
545 + struct ocfs2_recovery_chunk *rchunk, *next;
546 + qsize_t spacechange, inodechange;
547 +
548 + mlog_entry("ino=%lu type=%u", (unsigned long)lqinode->i_ino, type);
549 +
550 + status = ocfs2_lock_global_qf(oinfo, 1);
551 + if (status < 0)
552 + goto out;
553 +
554 + list_for_each_entry_safe(rchunk, next, &(rec->r_list[type]), rc_list) {
555 + chunk = rchunk->rc_chunk;
556 + hbh = ocfs2_bread(lqinode, ol_quota_chunk_block(sb, chunk),
557 + &status, 0);
558 + if (!hbh) {
559 + mlog_errno(status);
560 + break;
561 + }
562 + dchunk = (struct ocfs2_local_disk_chunk *)hbh->b_data;
563 + for_each_bit(bit, rchunk->rc_bitmap, ol_chunk_entries(sb)) {
564 + qbh = ocfs2_bread(lqinode,
565 + ol_dqblk_block(sb, chunk, bit),
566 + &status, 0);
567 + if (!qbh) {
568 + mlog_errno(status);
569 + break;
570 + }
571 + dqblk = (struct ocfs2_local_disk_dqblk *)(qbh->b_data +
572 + ol_dqblk_block_off(sb, chunk, bit));
573 + dquot = dqget(sb, le64_to_cpu(dqblk->dqb_id), type);
574 + if (!dquot) {
575 + status = -EIO;
576 + mlog(ML_ERROR, "Failed to get quota structure "
577 + "for id %u, type %d. Cannot finish quota "
578 + "file recovery.\n",
579 + (unsigned)le64_to_cpu(dqblk->dqb_id),
580 + type);
581 + goto out_put_bh;
582 + }
583 + handle = ocfs2_start_trans(OCFS2_SB(sb),
584 + OCFS2_QSYNC_CREDITS);
585 + if (IS_ERR(handle)) {
586 + status = PTR_ERR(handle);
587 + mlog_errno(status);
588 + goto out_put_dquot;
589 + }
590 + mutex_lock(&sb_dqopt(sb)->dqio_mutex);
591 + spin_lock(&dq_data_lock);
592 + /* Add usage from quota entry into quota changes
593 + * of our node. Auxiliary variables are important
594 + * due to signedness */
595 + spacechange = le64_to_cpu(dqblk->dqb_spacemod);
596 + inodechange = le64_to_cpu(dqblk->dqb_inodemod);
597 + dquot->dq_dqb.dqb_curspace += spacechange;
598 + dquot->dq_dqb.dqb_curinodes += inodechange;
599 + spin_unlock(&dq_data_lock);
600 + /* We want to drop reference held by the crashed
601 + * node. Since we have our own reference we know
602 + * global structure actually won't be freed. */
603 + status = ocfs2_global_release_dquot(dquot);
604 + if (status < 0) {
605 + mlog_errno(status);
606 + goto out_commit;
607 + }
608 + /* Release local quota file entry */
609 + status = ocfs2_journal_access(handle, lqinode,
610 + qbh, OCFS2_JOURNAL_ACCESS_WRITE);
611 + if (status < 0) {
612 + mlog_errno(status);
613 + goto out_commit;
614 + }
615 + lock_buffer(qbh);
616 + WARN_ON(!ocfs2_test_bit(bit, dchunk->dqc_bitmap));
617 + ocfs2_clear_bit(bit, dchunk->dqc_bitmap);
618 + le32_add_cpu(&dchunk->dqc_free, 1);
619 + unlock_buffer(qbh);
620 + status = ocfs2_journal_dirty(handle, qbh);
621 + if (status < 0)
622 + mlog_errno(status);
623 +out_commit:
624 + mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
625 + ocfs2_commit_trans(OCFS2_SB(sb), handle);
626 +out_put_dquot:
627 + dqput(dquot);
628 +out_put_bh:
629 + brelse(qbh);
630 + if (status < 0)
631 + break;
632 + }
633 + brelse(hbh);
634 + list_del(&rchunk->rc_list);
635 + kfree(rchunk->rc_bitmap);
636 + kfree(rchunk);
637 + if (status < 0)
638 + break;
639 + }
640 + ocfs2_unlock_global_qf(oinfo, 1);
641 +out:
642 + if (status < 0)
643 + free_recovery_list(&(rec->r_list[type]));
644 + mlog_exit(status);
645 + return status;
646 +}
647 +
648 +/* Recover local quota files for given node different from us */
649 +int ocfs2_finish_quota_recovery(struct ocfs2_super *osb,
650 + struct ocfs2_quota_recovery *rec,
651 + int slot_num)
652 +{
653 + unsigned int ino[MAXQUOTAS] = { LOCAL_USER_QUOTA_SYSTEM_INODE,
654 + LOCAL_GROUP_QUOTA_SYSTEM_INODE };
655 + struct super_block *sb = osb->sb;
656 + struct ocfs2_local_disk_dqinfo *ldinfo;
657 + struct buffer_head *bh;
658 + handle_t *handle;
659 + int type;
660 + int status = 0;
661 + struct inode *lqinode;
662 + unsigned int flags;
663 +
664 + mlog(ML_NOTICE, "Finishing quota recovery in slot %u\n", slot_num);
665 + mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
666 + for (type = 0; type < MAXQUOTAS; type++) {
667 + if (list_empty(&(rec->r_list[type])))
668 + continue;
669 + mlog(0, "Recovering quota in slot %d\n", slot_num);
670 + lqinode = ocfs2_get_system_file_inode(osb, ino[type], slot_num);
671 + if (!lqinode) {
672 + status = -ENOENT;
673 + goto out;
674 + }
675 + status = ocfs2_inode_lock_full(lqinode, NULL, 1,
676 + OCFS2_META_LOCK_NOQUEUE);
677 + /* Someone else is holding the lock? Then he must be
678 + * doing the recovery. Just skip the file... */
679 + if (status == -EAGAIN) {
680 + mlog(ML_NOTICE, "skipping quota recovery for slot %d "
681 + "because quota file is locked.\n", slot_num);
682 + status = 0;
683 + goto out_put;
684 + } else if (status < 0) {
685 + mlog_errno(status);
686 + goto out_put;
687 + }
688 + /* Now read local header */
689 + bh = ocfs2_bread(lqinode, 0, &status, 0);
690 + if (!bh) {
691 + mlog_errno(status);
692 + mlog(ML_ERROR, "failed to read quota file info header "
693 + "(slot=%d type=%d)\n", slot_num, type);
694 + goto out_lock;
695 + }
696 + ldinfo = (struct ocfs2_local_disk_dqinfo *)(bh->b_data +
697 + OCFS2_LOCAL_INFO_OFF);
698 + /* Is recovery still needed? */
699 + flags = le32_to_cpu(ldinfo->dqi_flags);
700 + if (!(flags & OLQF_CLEAN))
701 + status = ocfs2_recover_local_quota_file(lqinode,
702 + type,
703 + rec);
704 + /* We don't want to mark file as clean when it is actually
705 + * active */
706 + if (slot_num == osb->slot_num)
707 + goto out_bh;
708 + /* Mark quota file as clean if we are recovering quota file of
709 + * some other node. */
710 + handle = ocfs2_start_trans(osb, 1);
711 + if (IS_ERR(handle)) {
712 + status = PTR_ERR(handle);
713 + mlog_errno(status);
714 + goto out_bh;
715 + }
716 + status = ocfs2_journal_access(handle, lqinode, bh,
717 + OCFS2_JOURNAL_ACCESS_WRITE);
718 + if (status < 0) {
719 + mlog_errno(status);
720 + goto out_trans;
721 + }
722 + lock_buffer(bh);
723 + ldinfo->dqi_flags = cpu_to_le32(flags | OLQF_CLEAN);
724 + unlock_buffer(bh);
725 + status = ocfs2_journal_dirty(handle, bh);
726 + if (status < 0)
727 + mlog_errno(status);
728 +out_trans:
729 + ocfs2_commit_trans(osb, handle);
730 +out_bh:
731 + brelse(bh);
732 +out_lock:
733 + ocfs2_inode_unlock(lqinode, 1);
734 +out_put:
735 + iput(lqinode);
736 + if (status < 0)
737 + break;
738 + }
739 +out:
740 + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
741 + kfree(rec);
742 + return status;
743 +}
744 +
745 /* Read information header from quota file */
746 static int ocfs2_local_read_info(struct super_block *sb, int type)
747 {
748 @@ -262,6 +649,7 @@ static int ocfs2_local_read_info(struct
749 struct inode *lqinode = sb_dqopt(sb)->files[type];
750 int status;
751 struct buffer_head *bh = NULL;
752 + struct ocfs2_quota_recovery *rec;
753 int locked = 0;
754
755 info->dqi_maxblimit = 0x7fffffffffffffffLL;
756 @@ -275,6 +663,7 @@ static int ocfs2_local_read_info(struct
757 info->dqi_priv = oinfo;
758 oinfo->dqi_type = type;
759 INIT_LIST_HEAD(&oinfo->dqi_chunk);
760 + oinfo->dqi_rec = NULL;
761 oinfo->dqi_lqi_bh = NULL;
762 oinfo->dqi_ibh = NULL;
763
764 @@ -305,10 +694,27 @@ static int ocfs2_local_read_info(struct
765 oinfo->dqi_ibh = bh;
766
767 /* We crashed when using local quota file? */
768 - if (!(info->dqi_flags & OLQF_CLEAN))
769 - goto out_err; /* So far we just bail out. Later we should resync here */
770 + if (!(info->dqi_flags & OLQF_CLEAN)) {
771 + rec = OCFS2_SB(sb)->quota_rec;
772 + if (!rec) {
773 + rec = ocfs2_alloc_quota_recovery();
774 + if (!rec) {
775 + status = -ENOMEM;
776 + mlog_errno(status);
777 + goto out_err;
778 + }
779 + OCFS2_SB(sb)->quota_rec = rec;
780 + }
781
782 - status = ocfs2_load_local_quota_bitmaps(sb_dqopt(sb)->files[type],
783 + status = ocfs2_recovery_load_quota(lqinode, ldinfo, type,
784 + &rec->r_list[type]);
785 + if (status < 0) {
786 + mlog_errno(status);
787 + goto out_err;
788 + }
789 + }
790 +
791 + status = ocfs2_load_local_quota_bitmaps(lqinode,
792 ldinfo,
793 &oinfo->dqi_chunk);
794 if (status < 0) {
795 @@ -394,6 +800,12 @@ static int ocfs2_local_free_info(struct
796 }
797 ocfs2_release_local_quota_bitmaps(&oinfo->dqi_chunk);
798
799 + /* dqonoff_mutex protects us against racing with recovery thread... */
800 + if (oinfo->dqi_rec) {
801 + ocfs2_free_quota_recovery(oinfo->dqi_rec);
802 + mark_clean = 0;
803 + }
804 +
805 if (!mark_clean)
806 goto out;
807
808 --- a/fs/ocfs2/super.c
809 +++ b/fs/ocfs2/super.c
810 @@ -973,6 +973,9 @@ static int ocfs2_fill_super(struct super
811 goto read_super_error;
812 }
813 }
814 +
815 + ocfs2_complete_quota_recovery(osb);
816 +
817 /* Now we wake up again for processes waiting for quotas */
818 atomic_set(&osb->vol_state, VOLUME_MOUNTED_QUOTAS);
819 wake_up(&osb->osb_mount_event);