]> git.ipfire.org Git - thirdparty/e2fsprogs.git/blob - e2fsck/recovery.c
libext2fs: teach ext2fs_flush() to check if group descriptors are loaded
[thirdparty/e2fsprogs.git] / e2fsck / recovery.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * linux/fs/jbd2/recovery.c
4 *
5 * Written by Stephen C. Tweedie <sct@redhat.com>, 1999
6 *
7 * Copyright 1999-2000 Red Hat Software --- All Rights Reserved
8 *
9 * Journal recovery routines for the generic filesystem journaling code;
10 * part of the ext2fs journaling system.
11 */
12
13 #ifndef __KERNEL__
14 #include "jfs_user.h"
15 #else
16 #include <linux/time.h>
17 #include <linux/fs.h>
18 #include <linux/jbd2.h>
19 #include <linux/errno.h>
20 #include <linux/crc32.h>
21 #include <linux/blkdev.h>
22 #endif
23
24 /*
25 * Maintain information about the progress of the recovery job, so that
26 * the different passes can carry information between them.
27 */
28 struct recovery_info
29 {
30 tid_t start_transaction;
31 tid_t end_transaction;
32
33 int nr_replays;
34 int nr_revokes;
35 int nr_revoke_hits;
36 };
37
38 enum passtype {PASS_SCAN, PASS_REVOKE, PASS_REPLAY};
39 static int do_one_pass(journal_t *journal,
40 struct recovery_info *info, enum passtype pass);
41 static int scan_revoke_records(journal_t *, struct buffer_head *,
42 tid_t, struct recovery_info *);
43
44 #ifdef __KERNEL__
45
46 /* Release readahead buffers after use */
47 static void journal_brelse_array(struct buffer_head *b[], int n)
48 {
49 while (--n >= 0)
50 brelse (b[n]);
51 }
52
53
54 /*
55 * When reading from the journal, we are going through the block device
56 * layer directly and so there is no readahead being done for us. We
57 * need to implement any readahead ourselves if we want it to happen at
58 * all. Recovery is basically one long sequential read, so make sure we
59 * do the IO in reasonably large chunks.
60 *
61 * This is not so critical that we need to be enormously clever about
62 * the readahead size, though. 128K is a purely arbitrary, good-enough
63 * fixed value.
64 */
65
66 #define MAXBUF 8
67 static int do_readahead(journal_t *journal, unsigned int start)
68 {
69 int err;
70 unsigned int max, nbufs, next;
71 unsigned long long blocknr;
72 struct buffer_head *bh;
73
74 struct buffer_head * bufs[MAXBUF];
75
76 /* Do up to 128K of readahead */
77 max = start + (128 * 1024 / journal->j_blocksize);
78 if (max > journal->j_maxlen)
79 max = journal->j_maxlen;
80
81 /* Do the readahead itself. We'll submit MAXBUF buffer_heads at
82 * a time to the block device IO layer. */
83
84 nbufs = 0;
85
86 for (next = start; next < max; next++) {
87 err = jbd2_journal_bmap(journal, next, &blocknr);
88
89 if (err) {
90 printk(KERN_ERR "JBD2: bad block at offset %u\n",
91 next);
92 goto failed;
93 }
94
95 bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
96 if (!bh) {
97 err = -ENOMEM;
98 goto failed;
99 }
100
101 if (!buffer_uptodate(bh) && !buffer_locked(bh)) {
102 bufs[nbufs++] = bh;
103 if (nbufs == MAXBUF) {
104 ll_rw_block(REQ_OP_READ, 0, nbufs, bufs);
105 journal_brelse_array(bufs, nbufs);
106 nbufs = 0;
107 }
108 } else
109 brelse(bh);
110 }
111
112 if (nbufs)
113 ll_rw_block(REQ_OP_READ, 0, nbufs, bufs);
114 err = 0;
115
116 failed:
117 if (nbufs)
118 journal_brelse_array(bufs, nbufs);
119 return err;
120 }
121
122 #endif /* __KERNEL__ */
123
124 static inline __u32 get_be32(__be32 *p)
125 {
126 unsigned char *cp = (unsigned char *) p;
127 __u32 ret;
128
129 ret = *cp++;
130 ret = (ret << 8) + *cp++;
131 ret = (ret << 8) + *cp++;
132 ret = (ret << 8) + *cp++;
133 return ret;
134 }
135
136 static inline __u16 get_be16(__be16 *p)
137 {
138 unsigned char *cp = (unsigned char *) p;
139 __u16 ret;
140
141 ret = *cp++;
142 ret = (ret << 8) + *cp++;
143 return ret;
144 }
145
146 /*
147 * Read a block from the journal
148 */
149
150 static int jread(struct buffer_head **bhp, journal_t *journal,
151 unsigned int offset)
152 {
153 int err;
154 unsigned long long blocknr;
155 struct buffer_head *bh;
156
157 *bhp = NULL;
158
159 if (offset >= journal->j_maxlen) {
160 printk(KERN_ERR "JBD2: corrupted journal superblock\n");
161 return -EFSCORRUPTED;
162 }
163
164 err = jbd2_journal_bmap(journal, offset, &blocknr);
165
166 if (err) {
167 printk(KERN_ERR "JBD2: bad block at offset %u\n",
168 offset);
169 return err;
170 }
171
172 bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
173 if (!bh)
174 return -ENOMEM;
175
176 if (!buffer_uptodate(bh)) {
177 /* If this is a brand new buffer, start readahead.
178 Otherwise, we assume we are already reading it. */
179 if (!buffer_req(bh))
180 do_readahead(journal, offset);
181 wait_on_buffer(bh);
182 }
183
184 if (!buffer_uptodate(bh)) {
185 printk(KERN_ERR "JBD2: Failed to read block at offset %u\n",
186 offset);
187 brelse(bh);
188 return -EIO;
189 }
190
191 *bhp = bh;
192 return 0;
193 }
194
195 static int jbd2_descriptor_block_csum_verify(journal_t *j, void *buf)
196 {
197 struct jbd2_journal_block_tail *tail;
198 __be32 provided;
199 __u32 calculated;
200
201 if (!jbd2_journal_has_csum_v2or3(j))
202 return 1;
203
204 tail = (struct jbd2_journal_block_tail *)((char *)buf + j->j_blocksize -
205 sizeof(struct jbd2_journal_block_tail));
206 provided = tail->t_checksum;
207 tail->t_checksum = 0;
208 calculated = jbd2_chksum(j, j->j_csum_seed, buf, j->j_blocksize);
209 tail->t_checksum = provided;
210
211 return provided == cpu_to_be32(calculated);
212 }
213
214 /*
215 * Count the number of in-use tags in a journal descriptor block.
216 */
217
218 static int count_tags(journal_t *journal, struct buffer_head *bh)
219 {
220 char * tagp;
221 journal_block_tag_t * tag;
222 int nr = 0, size = journal->j_blocksize;
223 int tag_bytes = journal_tag_bytes(journal);
224
225 if (jbd2_journal_has_csum_v2or3(journal))
226 size -= sizeof(struct jbd2_journal_block_tail);
227
228 tagp = &bh->b_data[sizeof(journal_header_t)];
229
230 while ((tagp - bh->b_data + tag_bytes) <= size) {
231 tag = (journal_block_tag_t *) tagp;
232
233 nr++;
234 tagp += tag_bytes;
235 if (!(get_be16(&tag->t_flags) & JBD2_FLAG_SAME_UUID))
236 tagp += 16;
237
238 if (get_be16(&tag->t_flags) & JBD2_FLAG_LAST_TAG)
239 break;
240 }
241
242 return nr;
243 }
244
245
246 /* Make sure we wrap around the log correctly! */
247 #define wrap(journal, var) \
248 do { \
249 if (var >= (journal)->j_last) \
250 var -= ((journal)->j_last - (journal)->j_first); \
251 } while (0)
252
253 /**
254 * jbd2_journal_recover - recovers a on-disk journal
255 * @journal: the journal to recover
256 *
257 * The primary function for recovering the log contents when mounting a
258 * journaled device.
259 *
260 * Recovery is done in three passes. In the first pass, we look for the
261 * end of the log. In the second, we assemble the list of revoke
262 * blocks. In the third and final pass, we replay any un-revoked blocks
263 * in the log.
264 */
265 int jbd2_journal_recover(journal_t *journal)
266 {
267 int err, err2;
268 journal_superblock_t * sb;
269
270 struct recovery_info info;
271
272 memset(&info, 0, sizeof(info));
273 sb = journal->j_superblock;
274
275 /*
276 * The journal superblock's s_start field (the current log head)
277 * is always zero if, and only if, the journal was cleanly
278 * unmounted.
279 */
280
281 if (!sb->s_start) {
282 jbd_debug(1, "No recovery required, last transaction %d\n",
283 be32_to_cpu(sb->s_sequence));
284 journal->j_transaction_sequence = be32_to_cpu(sb->s_sequence) + 1;
285 return 0;
286 }
287
288 err = do_one_pass(journal, &info, PASS_SCAN);
289 if (!err)
290 err = do_one_pass(journal, &info, PASS_REVOKE);
291 if (!err)
292 err = do_one_pass(journal, &info, PASS_REPLAY);
293
294 jbd_debug(1, "JBD2: recovery, exit status %d, "
295 "recovered transactions %u to %u\n",
296 err, info.start_transaction, info.end_transaction);
297 jbd_debug(1, "JBD2: Replayed %d and revoked %d/%d blocks\n",
298 info.nr_replays, info.nr_revoke_hits, info.nr_revokes);
299
300 /* Restart the log at the next transaction ID, thus invalidating
301 * any existing commit records in the log. */
302 journal->j_transaction_sequence = ++info.end_transaction;
303
304 jbd2_journal_clear_revoke(journal);
305 err2 = sync_blockdev(journal->j_fs_dev);
306 if (!err)
307 err = err2;
308 /* Make sure all replayed data is on permanent storage */
309 if (journal->j_flags & JBD2_BARRIER) {
310 err2 = blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
311 if (!err)
312 err = err2;
313 }
314 return err;
315 }
316
317 /**
318 * jbd2_journal_skip_recovery - Start journal and wipe exiting records
319 * @journal: journal to startup
320 *
321 * Locate any valid recovery information from the journal and set up the
322 * journal structures in memory to ignore it (presumably because the
323 * caller has evidence that it is out of date).
324 * This function doesn't appear to be exported..
325 *
326 * We perform one pass over the journal to allow us to tell the user how
327 * much recovery information is being erased, and to let us initialise
328 * the journal transaction sequence numbers to the next unused ID.
329 */
330 int jbd2_journal_skip_recovery(journal_t *journal)
331 {
332 int err;
333
334 struct recovery_info info;
335
336 memset (&info, 0, sizeof(info));
337
338 err = do_one_pass(journal, &info, PASS_SCAN);
339
340 if (err) {
341 printk(KERN_ERR "JBD2: error %d scanning journal\n", err);
342 ++journal->j_transaction_sequence;
343 } else {
344 #ifdef CONFIG_JBD2_DEBUG
345 int dropped = info.end_transaction -
346 be32_to_cpu(journal->j_superblock->s_sequence);
347 jbd_debug(1,
348 "JBD2: ignoring %d transaction%s from the journal.\n",
349 dropped, (dropped == 1) ? "" : "s");
350 #endif
351 journal->j_transaction_sequence = ++info.end_transaction;
352 }
353
354 journal->j_tail = 0;
355 return err;
356 }
357
358 static inline unsigned long long read_tag_block(journal_t *journal,
359 journal_block_tag_t *tag)
360 {
361 unsigned long long block = get_be32(&tag->t_blocknr);
362 if (jbd2_has_feature_64bit(journal))
363 block |= (u64)get_be32(&tag->t_blocknr_high) << 32;
364 return block;
365 }
366
367 /*
368 * calc_chksums calculates the checksums for the blocks described in the
369 * descriptor block.
370 */
371 static int calc_chksums(journal_t *journal, struct buffer_head *bh,
372 unsigned long *next_log_block, __u32 *crc32_sum)
373 {
374 int i, num_blks, err;
375 unsigned long io_block;
376 struct buffer_head *obh;
377
378 num_blks = count_tags(journal, bh);
379 /* Calculate checksum of the descriptor block. */
380 *crc32_sum = crc32_be(*crc32_sum, (void *)bh->b_data, bh->b_size);
381
382 for (i = 0; i < num_blks; i++) {
383 io_block = (*next_log_block)++;
384 wrap(journal, *next_log_block);
385 err = jread(&obh, journal, io_block);
386 if (err) {
387 printk(KERN_ERR "JBD2: IO error %d recovering block "
388 "%lu in log\n", err, io_block);
389 return 1;
390 } else {
391 *crc32_sum = crc32_be(*crc32_sum, (void *)obh->b_data,
392 obh->b_size);
393 }
394 put_bh(obh);
395 }
396 return 0;
397 }
398
399 static int jbd2_commit_block_csum_verify(journal_t *j, void *buf)
400 {
401 struct commit_header *h;
402 __be32 provided;
403 __u32 calculated;
404
405 if (!jbd2_journal_has_csum_v2or3(j))
406 return 1;
407
408 h = buf;
409 provided = h->h_chksum[0];
410 h->h_chksum[0] = 0;
411 calculated = jbd2_chksum(j, j->j_csum_seed, buf, j->j_blocksize);
412 h->h_chksum[0] = provided;
413
414 return provided == cpu_to_be32(calculated);
415 }
416
417 static int jbd2_block_tag_csum_verify(journal_t *j, journal_block_tag_t *tag,
418 void *buf, __u32 sequence)
419 {
420 journal_block_tag3_t *tag3 = (journal_block_tag3_t *)tag;
421 __u32 csum32;
422 __be32 seq;
423
424 if (!jbd2_journal_has_csum_v2or3(j))
425 return 1;
426
427 seq = cpu_to_be32(sequence);
428 csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&seq, sizeof(seq));
429 csum32 = jbd2_chksum(j, csum32, buf, j->j_blocksize);
430
431 if (jbd2_has_feature_csum3(j))
432 return get_be32(&tag3->t_checksum) == csum32;
433
434 return get_be16(&tag->t_checksum) == (csum32 & 0xFFFF);
435 }
436
437 static int do_one_pass(journal_t *journal,
438 struct recovery_info *info, enum passtype pass)
439 {
440 unsigned int first_commit_ID, next_commit_ID;
441 unsigned long next_log_block;
442 int err, success = 0;
443 journal_superblock_t * sb;
444 journal_header_t * tmp;
445 struct buffer_head * bh;
446 unsigned int sequence;
447 int blocktype;
448 int tag_bytes = journal_tag_bytes(journal);
449 __u32 crc32_sum = ~0; /* Transactional Checksums */
450 int descr_csum_size = 0;
451 int block_error = 0;
452
453 /*
454 * First thing is to establish what we expect to find in the log
455 * (in terms of transaction IDs), and where (in terms of log
456 * block offsets): query the superblock.
457 */
458
459 sb = journal->j_superblock;
460 next_commit_ID = be32_to_cpu(sb->s_sequence);
461 next_log_block = be32_to_cpu(sb->s_start);
462
463 first_commit_ID = next_commit_ID;
464 if (pass == PASS_SCAN)
465 info->start_transaction = first_commit_ID;
466
467 jbd_debug(1, "Starting recovery pass %d\n", pass);
468
469 /*
470 * Now we walk through the log, transaction by transaction,
471 * making sure that each transaction has a commit block in the
472 * expected place. Each complete transaction gets replayed back
473 * into the main filesystem.
474 */
475
476 while (1) {
477 int flags;
478 char * tagp;
479 journal_block_tag_t * tag;
480 struct buffer_head * obh;
481 struct buffer_head * nbh;
482
483 cond_resched();
484
485 /* If we already know where to stop the log traversal,
486 * check right now that we haven't gone past the end of
487 * the log. */
488
489 if (pass != PASS_SCAN)
490 if (tid_geq(next_commit_ID, info->end_transaction))
491 break;
492
493 jbd_debug(2, "Scanning for sequence ID %u at %lu/%lu\n",
494 next_commit_ID, next_log_block, journal->j_last);
495
496 /* Skip over each chunk of the transaction looking
497 * either the next descriptor block or the final commit
498 * record. */
499
500 jbd_debug(3, "JBD2: checking block %ld\n", next_log_block);
501 err = jread(&bh, journal, next_log_block);
502 if (err)
503 goto failed;
504
505 next_log_block++;
506 wrap(journal, next_log_block);
507
508 /* What kind of buffer is it?
509 *
510 * If it is a descriptor block, check that it has the
511 * expected sequence number. Otherwise, we're all done
512 * here. */
513
514 tmp = (journal_header_t *)bh->b_data;
515
516 if (tmp->h_magic != cpu_to_be32(JBD2_MAGIC_NUMBER)) {
517 brelse(bh);
518 break;
519 }
520
521 blocktype = be32_to_cpu(tmp->h_blocktype);
522 sequence = be32_to_cpu(tmp->h_sequence);
523 jbd_debug(3, "Found magic %d, sequence %d\n",
524 blocktype, sequence);
525
526 if (sequence != next_commit_ID) {
527 brelse(bh);
528 break;
529 }
530
531 /* OK, we have a valid descriptor block which matches
532 * all of the sequence number checks. What are we going
533 * to do with it? That depends on the pass... */
534
535 switch(blocktype) {
536 case JBD2_DESCRIPTOR_BLOCK:
537 /* Verify checksum first */
538 if (jbd2_journal_has_csum_v2or3(journal))
539 descr_csum_size =
540 sizeof(struct jbd2_journal_block_tail);
541 if (descr_csum_size > 0 &&
542 !jbd2_descriptor_block_csum_verify(journal,
543 bh->b_data)) {
544 err = -EFSBADCRC;
545 brelse(bh);
546 goto failed;
547 }
548
549 /* If it is a valid descriptor block, replay it
550 * in pass REPLAY; if journal_checksums enabled, then
551 * calculate checksums in PASS_SCAN, otherwise,
552 * just skip over the blocks it describes. */
553 if (pass != PASS_REPLAY) {
554 if (pass == PASS_SCAN &&
555 jbd2_has_feature_checksum(journal) &&
556 !info->end_transaction) {
557 if (calc_chksums(journal, bh,
558 &next_log_block,
559 &crc32_sum)) {
560 put_bh(bh);
561 break;
562 }
563 put_bh(bh);
564 continue;
565 }
566 next_log_block += count_tags(journal, bh);
567 wrap(journal, next_log_block);
568 put_bh(bh);
569 continue;
570 }
571
572 /* A descriptor block: we can now write all of
573 * the data blocks. Yay, useful work is finally
574 * getting done here! */
575
576 tagp = &bh->b_data[sizeof(journal_header_t)];
577 while ((tagp - bh->b_data + tag_bytes)
578 <= journal->j_blocksize - descr_csum_size) {
579 unsigned long io_block;
580
581 tag = (journal_block_tag_t *) tagp;
582 flags = get_be16(&tag->t_flags);
583
584 io_block = next_log_block++;
585 wrap(journal, next_log_block);
586 err = jread(&obh, journal, io_block);
587 if (err) {
588 /* Recover what we can, but
589 * report failure at the end. */
590 success = err;
591 printk(KERN_ERR
592 "JBD2: IO error %d recovering "
593 "block %ld in log\n",
594 err, io_block);
595 } else {
596 unsigned long long blocknr;
597
598 J_ASSERT(obh != NULL);
599 blocknr = read_tag_block(journal,
600 tag);
601
602 /* If the block has been
603 * revoked, then we're all done
604 * here. */
605 if (jbd2_journal_test_revoke
606 (journal, blocknr,
607 next_commit_ID)) {
608 brelse(obh);
609 ++info->nr_revoke_hits;
610 goto skip_write;
611 }
612
613 /* Look for block corruption */
614 if (!jbd2_block_tag_csum_verify(
615 journal, tag, obh->b_data,
616 be32_to_cpu(tmp->h_sequence))) {
617 brelse(obh);
618 success = -EFSBADCRC;
619 printk(KERN_ERR "JBD2: Invalid "
620 "checksum recovering "
621 "data block %llu in "
622 "log\n", blocknr);
623 block_error = 1;
624 goto skip_write;
625 }
626
627 /* Find a buffer for the new
628 * data being restored */
629 nbh = __getblk(journal->j_fs_dev,
630 blocknr,
631 journal->j_blocksize);
632 if (nbh == NULL) {
633 printk(KERN_ERR
634 "JBD2: Out of memory "
635 "during recovery.\n");
636 err = -ENOMEM;
637 brelse(bh);
638 brelse(obh);
639 goto failed;
640 }
641
642 lock_buffer(nbh);
643 memcpy(nbh->b_data, obh->b_data,
644 journal->j_blocksize);
645 if (flags & JBD2_FLAG_ESCAPE) {
646 __be32 magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
647 memcpy(nbh->b_data, &magic,
648 sizeof(magic));
649 }
650
651 BUFFER_TRACE(nbh, "marking dirty");
652 set_buffer_uptodate(nbh);
653 mark_buffer_dirty(nbh);
654 BUFFER_TRACE(nbh, "marking uptodate");
655 ++info->nr_replays;
656 /* ll_rw_block(WRITE, 1, &nbh); */
657 unlock_buffer(nbh);
658 brelse(obh);
659 brelse(nbh);
660 }
661
662 skip_write:
663 tagp += tag_bytes;
664 if (!(flags & JBD2_FLAG_SAME_UUID))
665 tagp += 16;
666
667 if (flags & JBD2_FLAG_LAST_TAG)
668 break;
669 }
670
671 brelse(bh);
672 continue;
673
674 case JBD2_COMMIT_BLOCK:
675 /* How to differentiate between interrupted commit
676 * and journal corruption ?
677 *
678 * {nth transaction}
679 * Checksum Verification Failed
680 * |
681 * ____________________
682 * | |
683 * async_commit sync_commit
684 * | |
685 * | GO TO NEXT "Journal Corruption"
686 * | TRANSACTION
687 * |
688 * {(n+1)th transaction}
689 * |
690 * _______|______________
691 * | |
692 * Commit block found Commit block not found
693 * | |
694 * "Journal Corruption" |
695 * _____________|_________
696 * | |
697 * nth trans corrupt OR nth trans
698 * and (n+1)th interrupted interrupted
699 * before commit block
700 * could reach the disk.
701 * (Cannot find the difference in above
702 * mentioned conditions. Hence assume
703 * "Interrupted Commit".)
704 */
705
706 /* Found an expected commit block: if checksums
707 * are present verify them in PASS_SCAN; else not
708 * much to do other than move on to the next sequence
709 * number. */
710 if (pass == PASS_SCAN &&
711 jbd2_has_feature_checksum(journal)) {
712 int chksum_err, chksum_seen;
713 struct commit_header *cbh =
714 (struct commit_header *)bh->b_data;
715 unsigned found_chksum =
716 be32_to_cpu(cbh->h_chksum[0]);
717
718 chksum_err = chksum_seen = 0;
719
720 if (info->end_transaction) {
721 journal->j_failed_commit =
722 info->end_transaction;
723 brelse(bh);
724 break;
725 }
726
727 if (crc32_sum == found_chksum &&
728 cbh->h_chksum_type == JBD2_CRC32_CHKSUM &&
729 cbh->h_chksum_size ==
730 JBD2_CRC32_CHKSUM_SIZE)
731 chksum_seen = 1;
732 else if (!(cbh->h_chksum_type == 0 &&
733 cbh->h_chksum_size == 0 &&
734 found_chksum == 0 &&
735 !chksum_seen))
736 /*
737 * If fs is mounted using an old kernel and then
738 * kernel with journal_chksum is used then we
739 * get a situation where the journal flag has
740 * checksum flag set but checksums are not
741 * present i.e chksum = 0, in the individual
742 * commit blocks.
743 * Hence to avoid checksum failures, in this
744 * situation, this extra check is added.
745 */
746 chksum_err = 1;
747
748 if (chksum_err) {
749 info->end_transaction = next_commit_ID;
750
751 if (!jbd2_has_feature_async_commit(journal)) {
752 journal->j_failed_commit =
753 next_commit_ID;
754 brelse(bh);
755 break;
756 }
757 }
758 crc32_sum = ~0;
759 }
760 if (pass == PASS_SCAN &&
761 !jbd2_commit_block_csum_verify(journal,
762 bh->b_data)) {
763 info->end_transaction = next_commit_ID;
764
765 if (!jbd2_has_feature_async_commit(journal)) {
766 journal->j_failed_commit =
767 next_commit_ID;
768 brelse(bh);
769 break;
770 }
771 }
772 brelse(bh);
773 next_commit_ID++;
774 continue;
775
776 case JBD2_REVOKE_BLOCK:
777 /* If we aren't in the REVOKE pass, then we can
778 * just skip over this block. */
779 if (pass != PASS_REVOKE) {
780 brelse(bh);
781 continue;
782 }
783
784 err = scan_revoke_records(journal, bh,
785 next_commit_ID, info);
786 brelse(bh);
787 if (err)
788 goto failed;
789 continue;
790
791 default:
792 jbd_debug(3, "Unrecognised magic %d, end of scan.\n",
793 blocktype);
794 brelse(bh);
795 goto done;
796 }
797 }
798
799 done:
800 /*
801 * We broke out of the log scan loop: either we came to the
802 * known end of the log or we found an unexpected block in the
803 * log. If the latter happened, then we know that the "current"
804 * transaction marks the end of the valid log.
805 */
806
807 if (pass == PASS_SCAN) {
808 if (!info->end_transaction)
809 info->end_transaction = next_commit_ID;
810 } else {
811 /* It's really bad news if different passes end up at
812 * different places (but possible due to IO errors). */
813 if (info->end_transaction != next_commit_ID) {
814 printk(KERN_ERR "JBD2: recovery pass %d ended at "
815 "transaction %u, expected %u\n",
816 pass, next_commit_ID, info->end_transaction);
817 if (!success)
818 success = -EIO;
819 }
820 }
821 if (block_error && success == 0)
822 success = -EIO;
823 return success;
824
825 failed:
826 return err;
827 }
828
829 /* Scan a revoke record, marking all blocks mentioned as revoked. */
830
831 static int scan_revoke_records(journal_t *journal, struct buffer_head *bh,
832 tid_t sequence, struct recovery_info *info)
833 {
834 jbd2_journal_revoke_header_t *header;
835 int offset, max;
836 unsigned csum_size = 0;
837 __u32 rcount;
838 int record_len = 4;
839
840 header = (jbd2_journal_revoke_header_t *) bh->b_data;
841 offset = sizeof(jbd2_journal_revoke_header_t);
842 rcount = be32_to_cpu(header->r_count);
843
844 if (!jbd2_descriptor_block_csum_verify(journal, header))
845 return -EFSBADCRC;
846
847 if (jbd2_journal_has_csum_v2or3(journal))
848 csum_size = sizeof(struct jbd2_journal_block_tail);
849 if (rcount > journal->j_blocksize - csum_size)
850 return -EINVAL;
851 max = rcount;
852
853 if (jbd2_has_feature_64bit(journal))
854 record_len = 8;
855
856 while (offset + record_len <= max) {
857 unsigned long long blocknr;
858 int err;
859
860 if (record_len == 4)
861 blocknr = be32_to_cpu(* ((__be32 *) (bh->b_data+offset)));
862 else
863 blocknr = be64_to_cpu(* ((__be64 *) (bh->b_data+offset)));
864 offset += record_len;
865 err = jbd2_journal_set_revoke(journal, blocknr, sequence);
866 if (err)
867 return err;
868 ++info->nr_revokes;
869 }
870 return 0;
871 }