]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - db/metadump.c
609a5d7bbbc6c949c99235f64c5b5df4f131de48
[thirdparty/xfsprogs-dev.git] / db / metadump.c
1 /*
2 * Copyright (c) 2007, 2011 SGI
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19 #include "libxfs.h"
20 #include "libxlog.h"
21 #include "bmap.h"
22 #include "command.h"
23 #include "metadump.h"
24 #include "io.h"
25 #include "output.h"
26 #include "type.h"
27 #include "init.h"
28 #include "sig.h"
29 #include "xfs_metadump.h"
30 #include "fprint.h"
31 #include "faddr.h"
32 #include "field.h"
33 #include "dir2.h"
34
35 #define DEFAULT_MAX_EXT_SIZE 1000
36
37 /*
38 * It's possible that multiple files in a directory (or attributes
39 * in a file) produce the same obfuscated name. If that happens, we
40 * try to create another one. After several rounds of this though,
41 * we just give up and leave the original name as-is.
42 */
43 #define DUP_MAX 5 /* Max duplicates before we give up */
44
45 /* copy all metadata structures to/from a file */
46
47 static int metadump_f(int argc, char **argv);
48 static void metadump_help(void);
49
50 /*
51 * metadump commands issue info/wornings/errors to standard error as
52 * metadump supports stdout as a destination.
53 *
54 * All static functions return zero on failure, while the public functions
55 * return zero on success.
56 */
57
58 static const cmdinfo_t metadump_cmd =
59 { "metadump", NULL, metadump_f, 0, -1, 0,
60 N_("[-a] [-e] [-g] [-m max_extent] [-w] [-o] filename"),
61 N_("dump metadata to a file"), metadump_help };
62
63 static FILE *outf; /* metadump file */
64
65 static xfs_metablock_t *metablock; /* header + index + buffers */
66 static __be64 *block_index;
67 static char *block_buffer;
68
69 static int num_indices;
70 static int cur_index;
71
72 static xfs_ino_t cur_ino;
73
74 static int show_progress = 0;
75 static int stop_on_read_error = 0;
76 static int max_extent_size = DEFAULT_MAX_EXT_SIZE;
77 static int obfuscate = 1;
78 static int zero_stale_data = 1;
79 static int show_warnings = 0;
80 static int progress_since_warning = 0;
81
82 void
83 metadump_init(void)
84 {
85 add_command(&metadump_cmd);
86 }
87
88 static void
89 metadump_help(void)
90 {
91 dbprintf(_(
92 "\n"
93 " The 'metadump' command dumps the known metadata to a compact file suitable\n"
94 " for compressing and sending to an XFS maintainer for corruption analysis \n"
95 " or xfs_repair failures.\n\n"
96 " Options:\n"
97 " -a -- Copy full metadata blocks without zeroing unused space\n"
98 " -e -- Ignore read errors and keep going\n"
99 " -g -- Display dump progress\n"
100 " -m -- Specify max extent size in blocks to copy (default = %d blocks)\n"
101 " -o -- Don't obfuscate names and extended attributes\n"
102 " -w -- Show warnings of bad metadata information\n"
103 "\n"), DEFAULT_MAX_EXT_SIZE);
104 }
105
106 static void
107 print_warning(const char *fmt, ...)
108 {
109 char buf[200];
110 va_list ap;
111
112 if (seenint())
113 return;
114
115 va_start(ap, fmt);
116 vsnprintf(buf, sizeof(buf), fmt, ap);
117 va_end(ap);
118 buf[sizeof(buf)-1] = '\0';
119
120 fprintf(stderr, "%s%s: %s\n", progress_since_warning ? "\n" : "",
121 progname, buf);
122 progress_since_warning = 0;
123 }
124
125 static void
126 print_progress(const char *fmt, ...)
127 {
128 char buf[60];
129 va_list ap;
130 FILE *f;
131
132 if (seenint())
133 return;
134
135 va_start(ap, fmt);
136 vsnprintf(buf, sizeof(buf), fmt, ap);
137 va_end(ap);
138 buf[sizeof(buf)-1] = '\0';
139
140 f = (outf == stdout) ? stderr : stdout;
141 fprintf(f, "\r%-59s", buf);
142 fflush(f);
143 progress_since_warning = 1;
144 }
145
146 /*
147 * A complete dump file will have a "zero" entry in the last index block,
148 * even if the dump is exactly aligned, the last index will be full of
149 * zeros. If the last index entry is non-zero, the dump is incomplete.
150 * Correspondingly, the last chunk will have a count < num_indices.
151 *
152 * Return 0 for success, -1 for failure.
153 */
154
155 static int
156 write_index(void)
157 {
158 /*
159 * write index block and following data blocks (streaming)
160 */
161 metablock->mb_count = cpu_to_be16(cur_index);
162 if (fwrite(metablock, (cur_index + 1) << BBSHIFT, 1, outf) != 1) {
163 print_warning("error writing to file: %s", strerror(errno));
164 return -errno;
165 }
166
167 memset(block_index, 0, num_indices * sizeof(__be64));
168 cur_index = 0;
169 return 0;
170 }
171
172 /*
173 * Return 0 for success, -errno for failure.
174 */
175 static int
176 write_buf_segment(
177 char *data,
178 __int64_t off,
179 int len)
180 {
181 int i;
182 int ret;
183
184 for (i = 0; i < len; i++, off++, data += BBSIZE) {
185 block_index[cur_index] = cpu_to_be64(off);
186 memcpy(&block_buffer[cur_index << BBSHIFT], data, BBSIZE);
187 if (++cur_index == num_indices) {
188 ret = write_index();
189 if (ret)
190 return -EIO;
191 }
192 }
193 return 0;
194 }
195
196 /*
197 * we want to preserve the state of the metadata in the dump - whether it is
198 * intact or corrupt, so even if the buffer has a verifier attached to it we
199 * don't want to run it prior to writing the buffer to the metadump image.
200 *
201 * The only reason for running the verifier is to recalculate the CRCs on a
202 * buffer that has been obfuscated. i.e. a buffer than metadump modified itself.
203 * In this case, we only run the verifier if the buffer was not corrupt to begin
204 * with so that we don't accidentally correct buffers with CRC or errors in them
205 * when we are obfuscating them.
206 */
207 static int
208 write_buf(
209 iocur_t *buf)
210 {
211 struct xfs_buf *bp = buf->bp;
212 int i;
213 int ret;
214
215 /*
216 * Run the write verifier to recalculate the buffer CRCs and check
217 * metadump didn't introduce a new corruption. Warn if the verifier
218 * failed, but still continue to dump it into the output file.
219 */
220 if (buf->need_crc && bp && bp->b_ops && !bp->b_error) {
221 bp->b_ops->verify_write(bp);
222 if (bp->b_error) {
223 print_warning(
224 "obfuscation corrupted block at %s bno 0x%llx/0x%x",
225 bp->b_ops->name,
226 (long long)bp->b_bn, bp->b_bcount);
227 }
228 }
229
230 /* handle discontiguous buffers */
231 if (!buf->bbmap) {
232 ret = write_buf_segment(buf->data, buf->bb, buf->blen);
233 if (ret)
234 return ret;
235 } else {
236 int len = 0;
237 for (i = 0; i < buf->bbmap->nmaps; i++) {
238 ret = write_buf_segment(buf->data + BBTOB(len),
239 buf->bbmap->b[i].bm_bn,
240 buf->bbmap->b[i].bm_len);
241 if (ret)
242 return ret;
243 len += buf->bbmap->b[i].bm_len;
244 }
245 }
246 return seenint() ? -EINTR : 0;
247 }
248
249 /*
250 * We could be processing a corrupt block, so we can't trust any of
251 * the offsets or lengths to be within the buffer range. Hence check
252 * carefully!
253 */
254 static void
255 zero_btree_node(
256 struct xfs_btree_block *block,
257 typnm_t btype)
258 {
259 int nrecs;
260 xfs_bmbt_ptr_t *bpp;
261 xfs_bmbt_key_t *bkp;
262 xfs_inobt_ptr_t *ipp;
263 xfs_inobt_key_t *ikp;
264 xfs_alloc_ptr_t *app;
265 xfs_alloc_key_t *akp;
266 char *zp1, *zp2;
267 char *key_end;
268
269 nrecs = be16_to_cpu(block->bb_numrecs);
270 if (nrecs < 0)
271 return;
272
273 switch (btype) {
274 case TYP_BMAPBTA:
275 case TYP_BMAPBTD:
276 if (nrecs > mp->m_bmap_dmxr[1])
277 return;
278
279 bkp = XFS_BMBT_KEY_ADDR(mp, block, 1);
280 bpp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
281 zp1 = (char *)&bkp[nrecs];
282 zp2 = (char *)&bpp[nrecs];
283 key_end = (char *)bpp;
284 break;
285 case TYP_INOBT:
286 case TYP_FINOBT:
287 if (nrecs > mp->m_inobt_mxr[1])
288 return;
289
290 ikp = XFS_INOBT_KEY_ADDR(mp, block, 1);
291 ipp = XFS_INOBT_PTR_ADDR(mp, block, 1, mp->m_inobt_mxr[1]);
292 zp1 = (char *)&ikp[nrecs];
293 zp2 = (char *)&ipp[nrecs];
294 key_end = (char *)ipp;
295 break;
296 case TYP_BNOBT:
297 case TYP_CNTBT:
298 if (nrecs > mp->m_alloc_mxr[1])
299 return;
300
301 akp = XFS_ALLOC_KEY_ADDR(mp, block, 1);
302 app = XFS_ALLOC_PTR_ADDR(mp, block, 1, mp->m_alloc_mxr[1]);
303 zp1 = (char *)&akp[nrecs];
304 zp2 = (char *)&app[nrecs];
305 key_end = (char *)app;
306 break;
307 default:
308 return;
309 }
310
311
312 /* Zero from end of keys to beginning of pointers */
313 memset(zp1, 0, key_end - zp1);
314
315 /* Zero from end of pointers to end of block */
316 memset(zp2, 0, (char *)block + mp->m_sb.sb_blocksize - zp2);
317 }
318
319 /*
320 * We could be processing a corrupt block, so we can't trust any of
321 * the offsets or lengths to be within the buffer range. Hence check
322 * carefully!
323 */
324 static void
325 zero_btree_leaf(
326 struct xfs_btree_block *block,
327 typnm_t btype)
328 {
329 int nrecs;
330 struct xfs_bmbt_rec *brp;
331 struct xfs_inobt_rec *irp;
332 struct xfs_alloc_rec *arp;
333 char *zp;
334
335 nrecs = be16_to_cpu(block->bb_numrecs);
336 if (nrecs < 0)
337 return;
338
339 switch (btype) {
340 case TYP_BMAPBTA:
341 case TYP_BMAPBTD:
342 if (nrecs > mp->m_bmap_dmxr[0])
343 return;
344
345 brp = XFS_BMBT_REC_ADDR(mp, block, 1);
346 zp = (char *)&brp[nrecs];
347 break;
348 case TYP_INOBT:
349 case TYP_FINOBT:
350 if (nrecs > mp->m_inobt_mxr[0])
351 return;
352
353 irp = XFS_INOBT_REC_ADDR(mp, block, 1);
354 zp = (char *)&irp[nrecs];
355 break;
356 case TYP_BNOBT:
357 case TYP_CNTBT:
358 if (nrecs > mp->m_alloc_mxr[0])
359 return;
360
361 arp = XFS_ALLOC_REC_ADDR(mp, block, 1);
362 zp = (char *)&arp[nrecs];
363 break;
364 default:
365 return;
366 }
367
368 /* Zero from end of records to end of block */
369 memset(zp, 0, (char *)block + mp->m_sb.sb_blocksize - zp);
370 }
371
372 static void
373 zero_btree_block(
374 struct xfs_btree_block *block,
375 typnm_t btype)
376 {
377 int level;
378
379 level = be16_to_cpu(block->bb_level);
380
381 if (level > 0)
382 zero_btree_node(block, btype);
383 else
384 zero_btree_leaf(block, btype);
385 }
386
387 static int
388 scan_btree(
389 xfs_agnumber_t agno,
390 xfs_agblock_t agbno,
391 int level,
392 typnm_t btype,
393 void *arg,
394 int (*func)(struct xfs_btree_block *block,
395 xfs_agnumber_t agno,
396 xfs_agblock_t agbno,
397 int level,
398 typnm_t btype,
399 void *arg))
400 {
401 int rval = 0;
402
403 push_cur();
404 set_cur(&typtab[btype], XFS_AGB_TO_DADDR(mp, agno, agbno), blkbb,
405 DB_RING_IGN, NULL);
406 if (iocur_top->data == NULL) {
407 print_warning("cannot read %s block %u/%u", typtab[btype].name,
408 agno, agbno);
409 rval = !stop_on_read_error;
410 goto pop_out;
411 }
412
413 if (zero_stale_data) {
414 zero_btree_block(iocur_top->data, btype);
415 iocur_top->need_crc = 1;
416 }
417
418 if (write_buf(iocur_top))
419 goto pop_out;
420
421 if (!(*func)(iocur_top->data, agno, agbno, level - 1, btype, arg))
422 goto pop_out;
423 rval = 1;
424 pop_out:
425 pop_cur();
426 return rval;
427 }
428
429 /* free space tree copy routines */
430
431 static int
432 valid_bno(
433 xfs_agnumber_t agno,
434 xfs_agblock_t agbno)
435 {
436 if (agno < (mp->m_sb.sb_agcount - 1) && agbno > 0 &&
437 agbno <= mp->m_sb.sb_agblocks)
438 return 1;
439 if (agno == (mp->m_sb.sb_agcount - 1) && agbno > 0 &&
440 agbno <= (mp->m_sb.sb_dblocks -
441 (xfs_rfsblock_t)(mp->m_sb.sb_agcount - 1) *
442 mp->m_sb.sb_agblocks))
443 return 1;
444
445 return 0;
446 }
447
448
449 static int
450 scanfunc_freesp(
451 struct xfs_btree_block *block,
452 xfs_agnumber_t agno,
453 xfs_agblock_t agbno,
454 int level,
455 typnm_t btype,
456 void *arg)
457 {
458 xfs_alloc_ptr_t *pp;
459 int i;
460 int numrecs;
461
462 if (level == 0)
463 return 1;
464
465 numrecs = be16_to_cpu(block->bb_numrecs);
466 if (numrecs > mp->m_alloc_mxr[1]) {
467 if (show_warnings)
468 print_warning("invalid numrecs (%u) in %s block %u/%u",
469 numrecs, typtab[btype].name, agno, agbno);
470 return 1;
471 }
472
473 pp = XFS_ALLOC_PTR_ADDR(mp, block, 1, mp->m_alloc_mxr[1]);
474 for (i = 0; i < numrecs; i++) {
475 if (!valid_bno(agno, be32_to_cpu(pp[i]))) {
476 if (show_warnings)
477 print_warning("invalid block number (%u/%u) "
478 "in %s block %u/%u",
479 agno, be32_to_cpu(pp[i]),
480 typtab[btype].name, agno, agbno);
481 continue;
482 }
483 if (!scan_btree(agno, be32_to_cpu(pp[i]), level, btype, arg,
484 scanfunc_freesp))
485 return 0;
486 }
487 return 1;
488 }
489
490 static int
491 copy_free_bno_btree(
492 xfs_agnumber_t agno,
493 xfs_agf_t *agf)
494 {
495 xfs_agblock_t root;
496 int levels;
497
498 root = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]);
499 levels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
500
501 /* validate root and levels before processing the tree */
502 if (root == 0 || root > mp->m_sb.sb_agblocks) {
503 if (show_warnings)
504 print_warning("invalid block number (%u) in bnobt "
505 "root in agf %u", root, agno);
506 return 1;
507 }
508 if (levels >= XFS_BTREE_MAXLEVELS) {
509 if (show_warnings)
510 print_warning("invalid level (%u) in bnobt root "
511 "in agf %u", levels, agno);
512 return 1;
513 }
514
515 return scan_btree(agno, root, levels, TYP_BNOBT, agf, scanfunc_freesp);
516 }
517
518 static int
519 copy_free_cnt_btree(
520 xfs_agnumber_t agno,
521 xfs_agf_t *agf)
522 {
523 xfs_agblock_t root;
524 int levels;
525
526 root = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]);
527 levels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
528
529 /* validate root and levels before processing the tree */
530 if (root == 0 || root > mp->m_sb.sb_agblocks) {
531 if (show_warnings)
532 print_warning("invalid block number (%u) in cntbt "
533 "root in agf %u", root, agno);
534 return 1;
535 }
536 if (levels >= XFS_BTREE_MAXLEVELS) {
537 if (show_warnings)
538 print_warning("invalid level (%u) in cntbt root "
539 "in agf %u", levels, agno);
540 return 1;
541 }
542
543 return scan_btree(agno, root, levels, TYP_CNTBT, agf, scanfunc_freesp);
544 }
545
546 static int
547 scanfunc_rmapbt(
548 struct xfs_btree_block *block,
549 xfs_agnumber_t agno,
550 xfs_agblock_t agbno,
551 int level,
552 typnm_t btype,
553 void *arg)
554 {
555 xfs_rmap_ptr_t *pp;
556 int i;
557 int numrecs;
558
559 if (level == 0)
560 return 1;
561
562 numrecs = be16_to_cpu(block->bb_numrecs);
563 if (numrecs > mp->m_rmap_mxr[1]) {
564 if (show_warnings)
565 print_warning("invalid numrecs (%u) in %s block %u/%u",
566 numrecs, typtab[btype].name, agno, agbno);
567 return 1;
568 }
569
570 pp = XFS_RMAP_PTR_ADDR(block, 1, mp->m_rmap_mxr[1]);
571 for (i = 0; i < numrecs; i++) {
572 if (!valid_bno(agno, be32_to_cpu(pp[i]))) {
573 if (show_warnings)
574 print_warning("invalid block number (%u/%u) "
575 "in %s block %u/%u",
576 agno, be32_to_cpu(pp[i]),
577 typtab[btype].name, agno, agbno);
578 continue;
579 }
580 if (!scan_btree(agno, be32_to_cpu(pp[i]), level, btype, arg,
581 scanfunc_rmapbt))
582 return 0;
583 }
584 return 1;
585 }
586
587 static int
588 copy_rmap_btree(
589 xfs_agnumber_t agno,
590 struct xfs_agf *agf)
591 {
592 xfs_agblock_t root;
593 int levels;
594
595 if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
596 return 1;
597
598 root = be32_to_cpu(agf->agf_roots[XFS_BTNUM_RMAP]);
599 levels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
600
601 /* validate root and levels before processing the tree */
602 if (root == 0 || root > mp->m_sb.sb_agblocks) {
603 if (show_warnings)
604 print_warning("invalid block number (%u) in rmapbt "
605 "root in agf %u", root, agno);
606 return 1;
607 }
608 if (levels >= XFS_BTREE_MAXLEVELS) {
609 if (show_warnings)
610 print_warning("invalid level (%u) in rmapbt root "
611 "in agf %u", levels, agno);
612 return 1;
613 }
614
615 return scan_btree(agno, root, levels, TYP_RMAPBT, agf, scanfunc_rmapbt);
616 }
617
618 /* filename and extended attribute obfuscation routines */
619
620 struct name_ent {
621 struct name_ent *next;
622 xfs_dahash_t hash;
623 int namelen;
624 unsigned char name[1];
625 };
626
627 #define NAME_TABLE_SIZE 4096
628
629 static struct name_ent *nametable[NAME_TABLE_SIZE];
630
631 static void
632 nametable_clear(void)
633 {
634 int i;
635 struct name_ent *ent;
636
637 for (i = 0; i < NAME_TABLE_SIZE; i++) {
638 while ((ent = nametable[i])) {
639 nametable[i] = ent->next;
640 free(ent);
641 }
642 }
643 }
644
645 /*
646 * See if the given name is already in the name table. If so,
647 * return a pointer to its entry, otherwise return a null pointer.
648 */
649 static struct name_ent *
650 nametable_find(xfs_dahash_t hash, int namelen, unsigned char *name)
651 {
652 struct name_ent *ent;
653
654 for (ent = nametable[hash % NAME_TABLE_SIZE]; ent; ent = ent->next) {
655 if (ent->hash == hash && ent->namelen == namelen &&
656 !memcmp(ent->name, name, namelen))
657 return ent;
658 }
659 return NULL;
660 }
661
662 /*
663 * Add the given name to the name table. Returns a pointer to the
664 * name's new entry, or a null pointer if an error occurs.
665 */
666 static struct name_ent *
667 nametable_add(xfs_dahash_t hash, int namelen, unsigned char *name)
668 {
669 struct name_ent *ent;
670
671 ent = malloc(sizeof *ent + namelen);
672 if (!ent)
673 return NULL;
674
675 ent->namelen = namelen;
676 memcpy(ent->name, name, namelen);
677 ent->hash = hash;
678 ent->next = nametable[hash % NAME_TABLE_SIZE];
679
680 nametable[hash % NAME_TABLE_SIZE] = ent;
681
682 return ent;
683 }
684
685 #define is_invalid_char(c) ((c) == '/' || (c) == '\0')
686 #define rol32(x,y) (((x) << (y)) | ((x) >> (32 - (y))))
687
688 static inline unsigned char
689 random_filename_char(void)
690 {
691 static unsigned char filename_alphabet[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
692 "abcdefghijklmnopqrstuvwxyz"
693 "0123456789-_";
694
695 return filename_alphabet[random() % (sizeof filename_alphabet - 1)];
696 }
697
698 #define ORPHANAGE "lost+found"
699 #define ORPHANAGE_LEN (sizeof (ORPHANAGE) - 1)
700
701 static inline int
702 is_orphanage_dir(
703 struct xfs_mount *mp,
704 xfs_ino_t dir_ino,
705 size_t name_len,
706 unsigned char *name)
707 {
708 return dir_ino == mp->m_sb.sb_rootino &&
709 name_len == ORPHANAGE_LEN &&
710 !memcmp(name, ORPHANAGE, ORPHANAGE_LEN);
711 }
712
713 /*
714 * Determine whether a name is one we shouldn't obfuscate because
715 * it's an orphan (or the "lost+found" directory itself). Note
716 * "cur_ino" is the inode for the directory currently being
717 * processed.
718 *
719 * Returns 1 if the name should NOT be obfuscated or 0 otherwise.
720 */
721 static int
722 in_lost_found(
723 xfs_ino_t ino,
724 int namelen,
725 unsigned char *name)
726 {
727 static xfs_ino_t orphanage_ino = 0;
728 char s[24]; /* 21 is enough (64 bits in decimal) */
729 int slen;
730
731 /* Record the "lost+found" inode if we haven't done so already */
732
733 ASSERT(ino != 0);
734 if (!orphanage_ino && is_orphanage_dir(mp, cur_ino, namelen, name))
735 orphanage_ino = ino;
736
737 /* We don't obfuscate the "lost+found" directory itself */
738
739 if (ino == orphanage_ino)
740 return 1;
741
742 /* Most files aren't in "lost+found" at all */
743
744 if (cur_ino != orphanage_ino)
745 return 0;
746
747 /*
748 * Within "lost+found", we don't obfuscate any file whose
749 * name is the same as its inode number. Any others are
750 * stray files and can be obfuscated.
751 */
752 slen = snprintf(s, sizeof (s), "%llu", (unsigned long long) ino);
753
754 return slen == namelen && !memcmp(name, s, namelen);
755 }
756
757 /*
758 * Given a name and its hash value, massage the name in such a way
759 * that the result is another name of equal length which shares the
760 * same hash value.
761 */
762 static void
763 obfuscate_name(
764 xfs_dahash_t hash,
765 size_t name_len,
766 unsigned char *name)
767 {
768 unsigned char *newp = name;
769 int i;
770 xfs_dahash_t new_hash = 0;
771 unsigned char *first;
772 unsigned char high_bit;
773 int shift;
774
775 /*
776 * Our obfuscation algorithm requires at least 5-character
777 * names, so don't bother if the name is too short. We
778 * work backward from a hash value to determine the last
779 * five bytes in a name required to produce a new name
780 * with the same hash.
781 */
782 if (name_len < 5)
783 return;
784
785 /*
786 * The beginning of the obfuscated name can be pretty much
787 * anything, so fill it in with random characters.
788 * Accumulate its new hash value as we go.
789 */
790 for (i = 0; i < name_len - 5; i++) {
791 *newp = random_filename_char();
792 new_hash = *newp ^ rol32(new_hash, 7);
793 newp++;
794 }
795
796 /*
797 * Compute which five bytes need to be used at the end of
798 * the name so the hash of the obfuscated name is the same
799 * as the hash of the original. If any result in an invalid
800 * character, flip a bit and arrange for a corresponding bit
801 * in a neighboring byte to be flipped as well. For the
802 * last byte, the "neighbor" to change is the first byte
803 * we're computing here.
804 */
805 new_hash = rol32(new_hash, 3) ^ hash;
806
807 first = newp;
808 high_bit = 0;
809 for (shift = 28; shift >= 0; shift -= 7) {
810 *newp = (new_hash >> shift & 0x7f) ^ high_bit;
811 if (is_invalid_char(*newp)) {
812 *newp ^= 1;
813 high_bit = 0x80;
814 } else
815 high_bit = 0;
816 ASSERT(!is_invalid_char(*newp));
817 newp++;
818 }
819
820 /*
821 * If we flipped a bit on the last byte, we need to fix up
822 * the matching bit in the first byte. The result will
823 * be a valid character, because we know that first byte
824 * has 0's in its upper four bits (it was produced by a
825 * 28-bit right-shift of a 32-bit unsigned value).
826 */
827 if (high_bit) {
828 *first ^= 0x10;
829 ASSERT(!is_invalid_char(*first));
830 }
831 ASSERT(libxfs_da_hashname(name, name_len) == hash);
832 }
833
834 /*
835 * Flip a bit in each of two bytes at the end of the given name.
836 * This is used in generating a series of alternate names to be used
837 * in the event a duplicate is found.
838 *
839 * The bits flipped are selected such that they both affect the same
840 * bit in the name's computed hash value, so flipping them both will
841 * preserve the hash.
842 *
843 * The following diagram aims to show the portion of a computed
844 * hash that a given byte of a name affects.
845 *
846 * 31 28 24 21 14 8 7 3 0
847 * +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
848 * hash: | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
849 * +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
850 * last-4 ->| |<-- last-2 --->| |<--- last ---->|
851 * |<-- last-3 --->| |<-- last-1 --->| |<- last-4
852 * |<-- last-7 --->| |<-- last-5 --->|
853 * |<-- last-8 --->| |<-- last-6 --->|
854 * . . . and so on
855 *
856 * The last byte of the name directly affects the low-order byte of
857 * the hash. The next-to-last affects bits 7-14, the next one back
858 * affects bits 14-21, and so on. The effect wraps around when it
859 * goes beyond the top of the hash (as happens for byte last-4).
860 *
861 * Bits that are flipped together "overlap" on the hash value. As
862 * an example of overlap, the last two bytes both affect bit 7 in
863 * the hash. That pair of bytes (and their overlapping bits) can be
864 * used for this "flip bit" operation (it's the first pair tried,
865 * actually).
866 *
867 * A table defines overlapping pairs--the bytes involved and bits
868 * within them--that can be used this way. The byte offset is
869 * relative to a starting point within the name, which will be set
870 * to affect the bytes at the end of the name. The function is
871 * called with a "bitseq" value which indicates which bit flip is
872 * desired, and this translates directly into selecting which entry
873 * in the bit_to_flip[] table to apply.
874 *
875 * The function returns 1 if the operation was successful. It
876 * returns 0 if the result produced a character that's not valid in
877 * a name (either '/' or a '\0'). Finally, it returns -1 if the bit
878 * sequence number is beyond what is supported for a name of this
879 * length.
880 *
881 * Discussion
882 * ----------
883 * (Also see the discussion above find_alternate(), below.)
884 *
885 * In order to make this function work for any length name, the
886 * table is ordered by increasing byte offset, so that the earliest
887 * entries can apply to the shortest strings. This way all names
888 * are done consistently.
889 *
890 * When bit flips occur, they can convert printable characters
891 * into non-printable ones. In an effort to reduce the impact of
892 * this, the first bit flips are chosen to affect bytes the end of
893 * the name (and furthermore, toward the low bits of a byte). Those
894 * bytes are often non-printable anyway because of the way they are
895 * initially selected by obfuscate_name()). This is accomplished,
896 * using later table entries first.
897 *
898 * Each row in the table doubles the number of alternates that
899 * can be generated. A two-byte name is limited to using only
900 * the first row, so it's possible to generate two alternates
901 * (the original name, plus the alternate produced by flipping
902 * the one pair of bits). In a 5-byte name, the effect of the
903 * first byte overlaps the last by 4 its, and there are 8 bits
904 * to flip, allowing for 256 possible alternates.
905 *
906 * Short names (less than 5 bytes) are never even obfuscated, so for
907 * such names the relatively small number of alternates should never
908 * really be a problem.
909 *
910 * Long names (more than 6 bytes, say) are not likely to exhaust
911 * the number of available alternates. In fact, the table could
912 * probably have stopped at 8 entries, on the assumption that 256
913 * alternates should be enough for most any situation. The entries
914 * beyond those are present mostly for demonstration of how it could
915 * be populated with more entries, should it ever be necessary to do
916 * so.
917 */
918 static int
919 flip_bit(
920 size_t name_len,
921 unsigned char *name,
922 uint32_t bitseq)
923 {
924 int index;
925 size_t offset;
926 unsigned char *p0, *p1;
927 unsigned char m0, m1;
928 struct {
929 int byte; /* Offset from start within name */
930 unsigned char bit; /* Bit within that byte */
931 } bit_to_flip[][2] = { /* Sorted by second entry's byte */
932 { { 0, 0 }, { 1, 7 } }, /* Each row defines a pair */
933 { { 1, 0 }, { 2, 7 } }, /* of bytes and a bit within */
934 { { 2, 0 }, { 3, 7 } }, /* each byte. Each bit in */
935 { { 0, 4 }, { 4, 0 } }, /* a pair affects the same */
936 { { 0, 5 }, { 4, 1 } }, /* bit in the hash, so flipping */
937 { { 0, 6 }, { 4, 2 } }, /* both will change the name */
938 { { 0, 7 }, { 4, 3 } }, /* while preserving the hash. */
939 { { 3, 0 }, { 4, 7 } },
940 { { 0, 0 }, { 5, 3 } }, /* The first entry's byte offset */
941 { { 0, 1 }, { 5, 4 } }, /* must be less than the second. */
942 { { 0, 2 }, { 5, 5 } },
943 { { 0, 3 }, { 5, 6 } }, /* The table can be extended to */
944 { { 0, 4 }, { 5, 7 } }, /* an arbitrary number of entries */
945 { { 4, 0 }, { 5, 7 } }, /* but there's not much point. */
946 /* . . . */
947 };
948
949 /* Find the first entry *not* usable for name of this length */
950
951 for (index = 0; index < ARRAY_SIZE(bit_to_flip); index++)
952 if (bit_to_flip[index][1].byte >= name_len)
953 break;
954
955 /*
956 * Back up to the last usable entry. If that number is
957 * smaller than the bit sequence number, inform the caller
958 * that nothing this large (or larger) will work.
959 */
960 if (bitseq > --index)
961 return -1;
962
963 /*
964 * We will be switching bits at the end of name, with a
965 * preference for affecting the last bytes first. Compute
966 * where in the name we'll start applying the changes.
967 */
968 offset = name_len - (bit_to_flip[index][1].byte + 1);
969 index -= bitseq; /* Use later table entries first */
970
971 p0 = name + offset + bit_to_flip[index][0].byte;
972 p1 = name + offset + bit_to_flip[index][1].byte;
973 m0 = 1 << bit_to_flip[index][0].bit;
974 m1 = 1 << bit_to_flip[index][1].bit;
975
976 /* Only change the bytes if it produces valid characters */
977
978 if (is_invalid_char(*p0 ^ m0) || is_invalid_char(*p1 ^ m1))
979 return 0;
980
981 *p0 ^= m0;
982 *p1 ^= m1;
983
984 return 1;
985 }
986
987 /*
988 * This function generates a well-defined sequence of "alternate"
989 * names for a given name. An alternate is a name having the same
990 * length and same hash value as the original name. This is needed
991 * because the algorithm produces only one obfuscated name to use
992 * for a given original name, and it's possible that result matches
993 * a name already seen. This function checks for this, and if it
994 * occurs, finds another suitable obfuscated name to use.
995 *
996 * Each bit in the binary representation of the sequence number is
997 * used to select one possible "bit flip" operation to perform on
998 * the name. So for example:
999 * seq = 0: selects no bits to flip
1000 * seq = 1: selects the 0th bit to flip
1001 * seq = 2: selects the 1st bit to flip
1002 * seq = 3: selects the 0th and 1st bit to flip
1003 * ... and so on.
1004 *
1005 * The flip_bit() function takes care of the details of the bit
1006 * flipping within the name. Note that the "1st bit" in this
1007 * context is a bit sequence number; i.e. it doesn't necessarily
1008 * mean bit 0x02 will be changed.
1009 *
1010 * If a valid name (one that contains no '/' or '\0' characters) is
1011 * produced by this process for the given sequence number, this
1012 * function returns 1. If the result is not valid, it returns 0.
1013 * Returns -1 if the sequence number is beyond the the maximum for
1014 * names of the given length.
1015 *
1016 *
1017 * Discussion
1018 * ----------
1019 * The number of alternates available for a given name is dependent
1020 * on its length. A "bit flip" involves inverting two bits in
1021 * a name--the two bits being selected such that their values
1022 * affect the name's hash value in the same way. Alternates are
1023 * thus generated by inverting the value of pairs of such
1024 * "overlapping" bits in the original name. Each byte after the
1025 * first in a name adds at least one bit of overlap to work with.
1026 * (See comments above flip_bit() for more discussion on this.)
1027 *
1028 * So the number of alternates is dependent on the number of such
1029 * overlapping bits in a name. If there are N bit overlaps, there
1030 * 2^N alternates for that hash value.
1031 *
1032 * Here are the number of overlapping bits available for generating
1033 * alternates for names of specific lengths:
1034 * 1 0 (must have 2 bytes to have any overlap)
1035 * 2 1 One bit overlaps--so 2 possible alternates
1036 * 3 2 Two bits overlap--so 4 possible alternates
1037 * 4 4 Three bits overlap, so 2^3 alternates
1038 * 5 8 8 bits overlap (due to wrapping), 256 alternates
1039 * 6 18 2^18 alternates
1040 * 7 28 2^28 alternates
1041 * ...
1042 * It's clear that the number of alternates grows very quickly with
1043 * the length of the name. But note that the set of alternates
1044 * includes invalid names. And for certain (contrived) names, the
1045 * number of valid names is a fairly small fraction of the total
1046 * number of alternates.
1047 *
1048 * The main driver for this infrastructure for coming up with
1049 * alternate names is really related to names 5 (or possibly 6)
1050 * bytes in length. 5-byte obfuscated names contain no randomly-
1051 * generated bytes in them, and the chance of an obfuscated name
1052 * matching an already-seen name is too high to just ignore. This
1053 * methodical selection of alternates ensures we don't produce
1054 * duplicate names unless we have exhausted our options.
1055 */
1056 static int
1057 find_alternate(
1058 size_t name_len,
1059 unsigned char *name,
1060 uint32_t seq)
1061 {
1062 uint32_t bitseq = 0;
1063 uint32_t bits = seq;
1064
1065 if (!seq)
1066 return 1; /* alternate 0 is the original name */
1067 if (name_len < 2) /* Must have 2 bytes to flip */
1068 return -1;
1069
1070 for (bitseq = 0; bits; bitseq++) {
1071 uint32_t mask = 1 << bitseq;
1072 int fb;
1073
1074 if (!(bits & mask))
1075 continue;
1076
1077 fb = flip_bit(name_len, name, bitseq);
1078 if (fb < 1)
1079 return fb ? -1 : 0;
1080 bits ^= mask;
1081 }
1082
1083 return 1;
1084 }
1085
1086 /*
1087 * Look up the given name in the name table. If it is already
1088 * present, iterate through a well-defined sequence of alternate
1089 * names and attempt to use an alternate name instead.
1090 *
1091 * Returns 1 if the (possibly modified) name is not present in the
1092 * name table. Returns 0 if the name and all possible alternates
1093 * are already in the table.
1094 */
1095 static int
1096 handle_duplicate_name(xfs_dahash_t hash, size_t name_len, unsigned char *name)
1097 {
1098 unsigned char new_name[name_len + 1];
1099 uint32_t seq = 1;
1100
1101 if (!nametable_find(hash, name_len, name))
1102 return 1; /* No duplicate */
1103
1104 /* Name is already in use. Need to find an alternate. */
1105
1106 do {
1107 int found;
1108
1109 /* Only change incoming name if we find an alternate */
1110 do {
1111 memcpy(new_name, name, name_len);
1112 found = find_alternate(name_len, new_name, seq++);
1113 if (found < 0)
1114 return 0; /* No more to check */
1115 } while (!found);
1116 } while (nametable_find(hash, name_len, new_name));
1117
1118 /*
1119 * The alternate wasn't in the table already. Pass it back
1120 * to the caller.
1121 */
1122 memcpy(name, new_name, name_len);
1123
1124 return 1;
1125 }
1126
1127 static void
1128 generate_obfuscated_name(
1129 xfs_ino_t ino,
1130 int namelen,
1131 unsigned char *name)
1132 {
1133 xfs_dahash_t hash;
1134
1135 /*
1136 * We don't obfuscate "lost+found" or any orphan files
1137 * therein. When the name table is used for extended
1138 * attributes, the inode number provided is 0, in which
1139 * case we don't need to make this check.
1140 */
1141 if (ino && in_lost_found(ino, namelen, name))
1142 return;
1143
1144 /*
1145 * If the name starts with a slash, just skip over it. It
1146 * isn't included in the hash and we don't record it in the
1147 * name table. Note that the namelen value passed in does
1148 * not count the leading slash (if one is present).
1149 */
1150 if (*name == '/')
1151 name++;
1152
1153 /* Obfuscate the name (if possible) */
1154
1155 hash = libxfs_da_hashname(name, namelen);
1156 obfuscate_name(hash, namelen, name);
1157
1158 /*
1159 * Make sure the name is not something already seen. If we
1160 * fail to find a suitable alternate, we're dealing with a
1161 * very pathological situation, and we may end up creating
1162 * a duplicate name in the metadump, so issue a warning.
1163 */
1164 if (!handle_duplicate_name(hash, namelen, name)) {
1165 print_warning("duplicate name for inode %llu "
1166 "in dir inode %llu\n",
1167 (unsigned long long) ino,
1168 (unsigned long long) cur_ino);
1169 return;
1170 }
1171
1172 /* Create an entry for the new name in the name table. */
1173
1174 if (!nametable_add(hash, namelen, name))
1175 print_warning("unable to record name for inode %llu "
1176 "in dir inode %llu\n",
1177 (unsigned long long) ino,
1178 (unsigned long long) cur_ino);
1179 }
1180
1181 static void
1182 process_sf_dir(
1183 xfs_dinode_t *dip)
1184 {
1185 struct xfs_dir2_sf_hdr *sfp;
1186 xfs_dir2_sf_entry_t *sfep;
1187 __uint64_t ino_dir_size;
1188 int i;
1189
1190 sfp = (struct xfs_dir2_sf_hdr *)XFS_DFORK_DPTR(dip);
1191 ino_dir_size = be64_to_cpu(dip->di_size);
1192 if (ino_dir_size > XFS_DFORK_DSIZE(dip, mp)) {
1193 ino_dir_size = XFS_DFORK_DSIZE(dip, mp);
1194 if (show_warnings)
1195 print_warning("invalid size in dir inode %llu",
1196 (long long)cur_ino);
1197 }
1198
1199 sfep = xfs_dir2_sf_firstentry(sfp);
1200 for (i = 0; (i < sfp->count) &&
1201 ((char *)sfep - (char *)sfp < ino_dir_size); i++) {
1202
1203 /*
1204 * first check for bad name lengths. If they are bad, we
1205 * have limitations to how much can be obfuscated.
1206 */
1207 int namelen = sfep->namelen;
1208
1209 if (namelen == 0) {
1210 if (show_warnings)
1211 print_warning("zero length entry in dir inode "
1212 "%llu", (long long)cur_ino);
1213 if (i != sfp->count - 1)
1214 break;
1215 namelen = ino_dir_size - ((char *)&sfep->name[0] -
1216 (char *)sfp);
1217 } else if ((char *)sfep - (char *)sfp +
1218 M_DIROPS(mp)->sf_entsize(sfp, sfep->namelen) >
1219 ino_dir_size) {
1220 if (show_warnings)
1221 print_warning("entry length in dir inode %llu "
1222 "overflows space", (long long)cur_ino);
1223 if (i != sfp->count - 1)
1224 break;
1225 namelen = ino_dir_size - ((char *)&sfep->name[0] -
1226 (char *)sfp);
1227 }
1228
1229 if (obfuscate)
1230 generate_obfuscated_name(
1231 M_DIROPS(mp)->sf_get_ino(sfp, sfep),
1232 namelen, &sfep->name[0]);
1233
1234 sfep = (xfs_dir2_sf_entry_t *)((char *)sfep +
1235 M_DIROPS(mp)->sf_entsize(sfp, namelen));
1236 }
1237
1238 /* zero stale data in rest of space in data fork, if any */
1239 if (zero_stale_data && (ino_dir_size < XFS_DFORK_DSIZE(dip, mp)))
1240 memset(sfep, 0, XFS_DFORK_DSIZE(dip, mp) - ino_dir_size);
1241 }
1242
1243 /*
1244 * The pathname may not be null terminated. It may be terminated by the end of
1245 * a buffer or inode literal area, and the start of the next region contains
1246 * unknown data. Therefore, when we get to the last component of the symlink, we
1247 * cannot assume that strlen() will give us the right result. Hence we need to
1248 * track the remaining pathname length and use that instead.
1249 */
1250 static void
1251 obfuscate_path_components(
1252 char *buf,
1253 __uint64_t len)
1254 {
1255 unsigned char *comp = (unsigned char *)buf;
1256 unsigned char *end = comp + len;
1257 xfs_dahash_t hash;
1258
1259 while (comp < end) {
1260 char *slash;
1261 int namelen;
1262
1263 /* find slash at end of this component */
1264 slash = strchr((char *)comp, '/');
1265 if (!slash) {
1266 /* last (or single) component */
1267 namelen = strnlen((char *)comp, len);
1268 hash = libxfs_da_hashname(comp, namelen);
1269 obfuscate_name(hash, namelen, comp);
1270 break;
1271 }
1272 namelen = slash - (char *)comp;
1273 /* handle leading or consecutive slashes */
1274 if (!namelen) {
1275 comp++;
1276 len--;
1277 continue;
1278 }
1279 hash = libxfs_da_hashname(comp, namelen);
1280 obfuscate_name(hash, namelen, comp);
1281 comp += namelen + 1;
1282 len -= namelen + 1;
1283 }
1284 }
1285
1286 static void
1287 process_sf_symlink(
1288 xfs_dinode_t *dip)
1289 {
1290 __uint64_t len;
1291 char *buf;
1292
1293 len = be64_to_cpu(dip->di_size);
1294 if (len > XFS_DFORK_DSIZE(dip, mp)) {
1295 if (show_warnings)
1296 print_warning("invalid size (%d) in symlink inode %llu",
1297 len, (long long)cur_ino);
1298 len = XFS_DFORK_DSIZE(dip, mp);
1299 }
1300
1301 buf = (char *)XFS_DFORK_DPTR(dip);
1302 if (obfuscate)
1303 obfuscate_path_components(buf, len);
1304
1305 /* zero stale data in rest of space in data fork, if any */
1306 if (zero_stale_data && len < XFS_DFORK_DSIZE(dip, mp))
1307 memset(&buf[len], 0, XFS_DFORK_DSIZE(dip, mp) - len);
1308 }
1309
1310 static void
1311 process_sf_attr(
1312 xfs_dinode_t *dip)
1313 {
1314 /*
1315 * with extended attributes, obfuscate the names and fill the actual
1316 * values with 'v' (to see a valid string length, as opposed to NULLs)
1317 */
1318
1319 xfs_attr_shortform_t *asfp;
1320 xfs_attr_sf_entry_t *asfep;
1321 int ino_attr_size;
1322 int i;
1323
1324 asfp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip);
1325 if (asfp->hdr.count == 0)
1326 return;
1327
1328 ino_attr_size = be16_to_cpu(asfp->hdr.totsize);
1329 if (ino_attr_size > XFS_DFORK_ASIZE(dip, mp)) {
1330 ino_attr_size = XFS_DFORK_ASIZE(dip, mp);
1331 if (show_warnings)
1332 print_warning("invalid attr size in inode %llu",
1333 (long long)cur_ino);
1334 }
1335
1336 asfep = &asfp->list[0];
1337 for (i = 0; (i < asfp->hdr.count) &&
1338 ((char *)asfep - (char *)asfp < ino_attr_size); i++) {
1339
1340 int namelen = asfep->namelen;
1341
1342 if (namelen == 0) {
1343 if (show_warnings)
1344 print_warning("zero length attr entry in inode "
1345 "%llu", (long long)cur_ino);
1346 break;
1347 } else if ((char *)asfep - (char *)asfp +
1348 XFS_ATTR_SF_ENTSIZE(asfep) > ino_attr_size) {
1349 if (show_warnings)
1350 print_warning("attr entry length in inode %llu "
1351 "overflows space", (long long)cur_ino);
1352 break;
1353 }
1354
1355 if (obfuscate) {
1356 generate_obfuscated_name(0, asfep->namelen,
1357 &asfep->nameval[0]);
1358 memset(&asfep->nameval[asfep->namelen], 'v',
1359 asfep->valuelen);
1360 }
1361
1362 asfep = (xfs_attr_sf_entry_t *)((char *)asfep +
1363 XFS_ATTR_SF_ENTSIZE(asfep));
1364 }
1365
1366 /* zero stale data in rest of space in attr fork, if any */
1367 if (zero_stale_data && (ino_attr_size < XFS_DFORK_ASIZE(dip, mp)))
1368 memset(asfep, 0, XFS_DFORK_ASIZE(dip, mp) - ino_attr_size);
1369 }
1370
1371 static void
1372 process_dir_data_block(
1373 char *block,
1374 xfs_fileoff_t offset,
1375 int is_block_format)
1376 {
1377 /*
1378 * we have to rely on the fileoffset and signature of the block to
1379 * handle it's contents. If it's invalid, leave it alone.
1380 * for multi-fsblock dir blocks, if a name crosses an extent boundary,
1381 * ignore it and continue.
1382 */
1383 int dir_offset;
1384 char *ptr;
1385 char *endptr;
1386 int end_of_data;
1387 int wantmagic;
1388 struct xfs_dir2_data_hdr *datahdr;
1389
1390 datahdr = (struct xfs_dir2_data_hdr *)block;
1391
1392 if (is_block_format) {
1393 xfs_dir2_leaf_entry_t *blp;
1394 xfs_dir2_block_tail_t *btp;
1395
1396 btp = xfs_dir2_block_tail_p(mp->m_dir_geo, datahdr);
1397 blp = xfs_dir2_block_leaf_p(btp);
1398 if ((char *)blp > (char *)btp)
1399 blp = (xfs_dir2_leaf_entry_t *)btp;
1400
1401 end_of_data = (char *)blp - block;
1402 if (xfs_sb_version_hascrc(&mp->m_sb))
1403 wantmagic = XFS_DIR3_BLOCK_MAGIC;
1404 else
1405 wantmagic = XFS_DIR2_BLOCK_MAGIC;
1406 } else { /* leaf/node format */
1407 end_of_data = mp->m_dir_geo->fsbcount << mp->m_sb.sb_blocklog;
1408 if (xfs_sb_version_hascrc(&mp->m_sb))
1409 wantmagic = XFS_DIR3_DATA_MAGIC;
1410 else
1411 wantmagic = XFS_DIR2_DATA_MAGIC;
1412 }
1413
1414 if (be32_to_cpu(datahdr->magic) != wantmagic) {
1415 if (show_warnings)
1416 print_warning(
1417 "invalid magic in dir inode %llu block %ld",
1418 (long long)cur_ino, (long)offset);
1419 return;
1420 }
1421
1422 dir_offset = M_DIROPS(mp)->data_entry_offset;
1423 ptr = block + dir_offset;
1424 endptr = block + mp->m_dir_geo->blksize;
1425
1426 while (ptr < endptr && dir_offset < end_of_data) {
1427 xfs_dir2_data_entry_t *dep;
1428 xfs_dir2_data_unused_t *dup;
1429 int length;
1430
1431 dup = (xfs_dir2_data_unused_t *)ptr;
1432
1433 if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
1434 int length = be16_to_cpu(dup->length);
1435 if (dir_offset + length > end_of_data ||
1436 !length || (length & (XFS_DIR2_DATA_ALIGN - 1))) {
1437 if (show_warnings)
1438 print_warning(
1439 "invalid length for dir free space in inode %llu",
1440 (long long)cur_ino);
1441 return;
1442 }
1443 if (be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup)) !=
1444 dir_offset)
1445 return;
1446 dir_offset += length;
1447 ptr += length;
1448 /*
1449 * Zero the unused space up to the tag - the tag is
1450 * actually at a variable offset, so zeroing &dup->tag
1451 * is zeroing the free space in between
1452 */
1453 if (zero_stale_data) {
1454 int zlen = length -
1455 sizeof(xfs_dir2_data_unused_t);
1456
1457 if (zlen > 0) {
1458 memset(&dup->tag, 0, zlen);
1459 iocur_top->need_crc = 1;
1460 }
1461 }
1462 if (dir_offset >= end_of_data || ptr >= endptr)
1463 return;
1464 }
1465
1466 dep = (xfs_dir2_data_entry_t *)ptr;
1467 length = M_DIROPS(mp)->data_entsize(dep->namelen);
1468
1469 if (dir_offset + length > end_of_data ||
1470 ptr + length > endptr) {
1471 if (show_warnings)
1472 print_warning(
1473 "invalid length for dir entry name in inode %llu",
1474 (long long)cur_ino);
1475 return;
1476 }
1477 if (be16_to_cpu(*M_DIROPS(mp)->data_entry_tag_p(dep)) !=
1478 dir_offset)
1479 return;
1480
1481 if (obfuscate)
1482 generate_obfuscated_name(be64_to_cpu(dep->inumber),
1483 dep->namelen, &dep->name[0]);
1484 dir_offset += length;
1485 ptr += length;
1486 /* Zero the unused space after name, up to the tag */
1487 if (zero_stale_data) {
1488 /* 1 byte for ftype; don't bother with conditional */
1489 int zlen =
1490 (char *)M_DIROPS(mp)->data_entry_tag_p(dep) -
1491 (char *)&dep->name[dep->namelen] - 1;
1492 if (zlen > 0) {
1493 memset(&dep->name[dep->namelen] + 1, 0, zlen);
1494 iocur_top->need_crc = 1;
1495 }
1496 }
1497 }
1498 }
1499
1500 static void
1501 process_symlink_block(
1502 char *block)
1503 {
1504 char *link = block;
1505
1506 if (xfs_sb_version_hascrc(&(mp)->m_sb))
1507 link += sizeof(struct xfs_dsymlink_hdr);
1508
1509 if (obfuscate)
1510 obfuscate_path_components(link, XFS_SYMLINK_BUF_SPACE(mp,
1511 mp->m_sb.sb_blocksize));
1512 if (zero_stale_data) {
1513 size_t linklen, zlen;
1514
1515 linklen = strlen(link);
1516 zlen = mp->m_sb.sb_blocksize - linklen;
1517 if (xfs_sb_version_hascrc(&mp->m_sb))
1518 zlen -= sizeof(struct xfs_dsymlink_hdr);
1519 if (zlen < mp->m_sb.sb_blocksize)
1520 memset(link + linklen, 0, zlen);
1521 }
1522 }
1523
1524 #define MAX_REMOTE_VALS 4095
1525
1526 static struct attr_data_s {
1527 int remote_val_count;
1528 xfs_dablk_t remote_vals[MAX_REMOTE_VALS];
1529 } attr_data;
1530
1531 static inline void
1532 add_remote_vals(
1533 xfs_dablk_t blockidx,
1534 int length)
1535 {
1536 while (length > 0 && attr_data.remote_val_count < MAX_REMOTE_VALS) {
1537 attr_data.remote_vals[attr_data.remote_val_count] = blockidx;
1538 attr_data.remote_val_count++;
1539 blockidx++;
1540 length -= mp->m_sb.sb_blocksize;
1541 }
1542
1543 if (attr_data.remote_val_count >= MAX_REMOTE_VALS) {
1544 print_warning(
1545 "Overflowed attr obfuscation array. No longer obfuscating remote attrs.");
1546 }
1547 }
1548
1549 /* Handle remote and leaf attributes */
1550 static void
1551 process_attr_block(
1552 char *block,
1553 xfs_fileoff_t offset)
1554 {
1555 struct xfs_attr_leafblock *leaf;
1556 struct xfs_attr3_icleaf_hdr hdr;
1557 int i;
1558 int nentries;
1559 xfs_attr_leaf_entry_t *entry;
1560 xfs_attr_leaf_name_local_t *local;
1561 xfs_attr_leaf_name_remote_t *remote;
1562 __uint32_t bs = mp->m_sb.sb_blocksize;
1563 char *first_name;
1564
1565
1566 leaf = (xfs_attr_leafblock_t *)block;
1567
1568 /* Remote attributes - attr3 has XFS_ATTR3_RMT_MAGIC, attr has none */
1569 if ((be16_to_cpu(leaf->hdr.info.magic) != XFS_ATTR_LEAF_MAGIC) &&
1570 (be16_to_cpu(leaf->hdr.info.magic) != XFS_ATTR3_LEAF_MAGIC)) {
1571 for (i = 0; i < attr_data.remote_val_count; i++) {
1572 if (obfuscate && attr_data.remote_vals[i] == offset)
1573 /* Macros to handle both attr and attr3 */
1574 memset(block +
1575 (bs - XFS_ATTR3_RMT_BUF_SPACE(mp, bs)),
1576 'v', XFS_ATTR3_RMT_BUF_SPACE(mp, bs));
1577 }
1578 return;
1579 }
1580
1581 /* Ok, it's a leaf - get header; accounts for crc & non-crc */
1582 xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &hdr, leaf);
1583
1584 nentries = hdr.count;
1585 if (nentries * sizeof(xfs_attr_leaf_entry_t) +
1586 xfs_attr3_leaf_hdr_size(leaf) >
1587 XFS_ATTR3_RMT_BUF_SPACE(mp, bs)) {
1588 if (show_warnings)
1589 print_warning("invalid attr count in inode %llu",
1590 (long long)cur_ino);
1591 return;
1592 }
1593
1594 entry = xfs_attr3_leaf_entryp(leaf);
1595 /* We will move this as we parse */
1596 first_name = NULL;
1597 for (i = 0; i < nentries; i++, entry++) {
1598 int nlen, vlen, zlen;
1599
1600 /* Grows up; if this name is topmost, move first_name */
1601 if (!first_name || xfs_attr3_leaf_name(leaf, i) < first_name)
1602 first_name = xfs_attr3_leaf_name(leaf, i);
1603
1604 if (be16_to_cpu(entry->nameidx) > mp->m_sb.sb_blocksize) {
1605 if (show_warnings)
1606 print_warning(
1607 "invalid attr nameidx in inode %llu",
1608 (long long)cur_ino);
1609 break;
1610 }
1611 if (entry->flags & XFS_ATTR_LOCAL) {
1612 local = xfs_attr3_leaf_name_local(leaf, i);
1613 if (local->namelen == 0) {
1614 if (show_warnings)
1615 print_warning(
1616 "zero length for attr name in inode %llu",
1617 (long long)cur_ino);
1618 break;
1619 }
1620 if (obfuscate) {
1621 generate_obfuscated_name(0, local->namelen,
1622 &local->nameval[0]);
1623 memset(&local->nameval[local->namelen], 'v',
1624 be16_to_cpu(local->valuelen));
1625 }
1626 /* zero from end of nameval[] to next name start */
1627 nlen = local->namelen;
1628 vlen = be16_to_cpu(local->valuelen);
1629 zlen = xfs_attr_leaf_entsize_local(nlen, vlen) -
1630 (sizeof(xfs_attr_leaf_name_local_t) - 1 +
1631 nlen + vlen);
1632 if (zero_stale_data)
1633 memset(&local->nameval[nlen + vlen], 0, zlen);
1634 } else {
1635 remote = xfs_attr3_leaf_name_remote(leaf, i);
1636 if (remote->namelen == 0 || remote->valueblk == 0) {
1637 if (show_warnings)
1638 print_warning(
1639 "invalid attr entry in inode %llu",
1640 (long long)cur_ino);
1641 break;
1642 }
1643 if (obfuscate) {
1644 generate_obfuscated_name(0, remote->namelen,
1645 &remote->name[0]);
1646 add_remote_vals(be32_to_cpu(remote->valueblk),
1647 be32_to_cpu(remote->valuelen));
1648 }
1649 /* zero from end of name[] to next name start */
1650 nlen = remote->namelen;
1651 zlen = xfs_attr_leaf_entsize_remote(nlen) -
1652 (sizeof(xfs_attr_leaf_name_remote_t) - 1 +
1653 nlen);
1654 if (zero_stale_data)
1655 memset(&remote->name[nlen], 0, zlen);
1656 }
1657 }
1658
1659 /* Zero from end of entries array to the first name/val */
1660 if (zero_stale_data) {
1661 struct xfs_attr_leaf_entry *entries;
1662
1663 entries = xfs_attr3_leaf_entryp(leaf);
1664 memset(&entries[nentries], 0,
1665 first_name - (char *)&entries[nentries]);
1666 }
1667 }
1668
1669 /* Processes symlinks, attrs, directories ... */
1670 static int
1671 process_single_fsb_objects(
1672 xfs_fileoff_t o,
1673 xfs_fsblock_t s,
1674 xfs_filblks_t c,
1675 typnm_t btype,
1676 xfs_fileoff_t last)
1677 {
1678 char *dp;
1679 int ret = 0;
1680 int i;
1681
1682 for (i = 0; i < c; i++) {
1683 push_cur();
1684 set_cur(&typtab[btype], XFS_FSB_TO_DADDR(mp, s), blkbb,
1685 DB_RING_IGN, NULL);
1686
1687 if (!iocur_top->data) {
1688 xfs_agnumber_t agno = XFS_FSB_TO_AGNO(mp, s);
1689 xfs_agblock_t agbno = XFS_FSB_TO_AGBNO(mp, s);
1690
1691 print_warning("cannot read %s block %u/%u (%llu)",
1692 typtab[btype].name, agno, agbno, s);
1693 if (stop_on_read_error)
1694 ret = -EIO;
1695 goto out_pop;
1696
1697 }
1698
1699 if (!obfuscate && !zero_stale_data)
1700 goto write;
1701
1702 /* Zero unused part of interior nodes */
1703 if (zero_stale_data) {
1704 xfs_da_intnode_t *node = iocur_top->data;
1705 int magic = be16_to_cpu(node->hdr.info.magic);
1706
1707 if (magic == XFS_DA_NODE_MAGIC ||
1708 magic == XFS_DA3_NODE_MAGIC) {
1709 struct xfs_da3_icnode_hdr hdr;
1710 int used;
1711
1712 M_DIROPS(mp)->node_hdr_from_disk(&hdr, node);
1713 used = M_DIROPS(mp)->node_hdr_size;
1714
1715 used += hdr.count
1716 * sizeof(struct xfs_da_node_entry);
1717
1718 if (used < mp->m_sb.sb_blocksize) {
1719 memset((char *)node + used, 0,
1720 mp->m_sb.sb_blocksize - used);
1721 iocur_top->need_crc = 1;
1722 }
1723 }
1724 }
1725
1726 /* Handle leaf nodes */
1727 dp = iocur_top->data;
1728 switch (btype) {
1729 case TYP_DIR2:
1730 if (o >= mp->m_dir_geo->leafblk)
1731 break;
1732
1733 process_dir_data_block(dp, o,
1734 last == mp->m_dir_geo->fsbcount);
1735 iocur_top->need_crc = 1;
1736 break;
1737 case TYP_SYMLINK:
1738 process_symlink_block(dp);
1739 iocur_top->need_crc = 1;
1740 break;
1741 case TYP_ATTR:
1742 process_attr_block(dp, o);
1743 iocur_top->need_crc = 1;
1744 break;
1745 default:
1746 break;
1747 }
1748
1749 write:
1750 ret = write_buf(iocur_top);
1751 out_pop:
1752 pop_cur();
1753 if (ret)
1754 break;
1755 o++;
1756 s++;
1757 }
1758
1759 return ret;
1760 }
1761
1762 /*
1763 * Static map to aggregate multiple extents into a single directory block.
1764 */
1765 static struct bbmap mfsb_map;
1766 static int mfsb_length;
1767
1768 static int
1769 process_multi_fsb_objects(
1770 xfs_fileoff_t o,
1771 xfs_fsblock_t s,
1772 xfs_filblks_t c,
1773 typnm_t btype,
1774 xfs_fileoff_t last)
1775 {
1776 int ret = 0;
1777
1778 switch (btype) {
1779 case TYP_DIR2:
1780 break;
1781 default:
1782 print_warning("bad type for multi-fsb object %d", btype);
1783 return -EINVAL;
1784 }
1785
1786 while (c > 0) {
1787 unsigned int bm_len;
1788
1789 if (mfsb_length + c >= mp->m_dir_geo->fsbcount) {
1790 bm_len = mp->m_dir_geo->fsbcount - mfsb_length;
1791 mfsb_length = 0;
1792 } else {
1793 mfsb_length += c;
1794 bm_len = c;
1795 }
1796
1797 mfsb_map.b[mfsb_map.nmaps].bm_bn = XFS_FSB_TO_DADDR(mp, s);
1798 mfsb_map.b[mfsb_map.nmaps].bm_len = XFS_FSB_TO_BB(mp, bm_len);
1799 mfsb_map.nmaps++;
1800
1801 if (mfsb_length == 0) {
1802 push_cur();
1803 set_cur(&typtab[btype], 0, 0, DB_RING_IGN, &mfsb_map);
1804 if (!iocur_top->data) {
1805 xfs_agnumber_t agno = XFS_FSB_TO_AGNO(mp, s);
1806 xfs_agblock_t agbno = XFS_FSB_TO_AGBNO(mp, s);
1807
1808 print_warning("cannot read %s block %u/%u (%llu)",
1809 typtab[btype].name, agno, agbno, s);
1810 if (stop_on_read_error)
1811 ret = -1;
1812 goto out_pop;
1813
1814 }
1815
1816 if ((!obfuscate && !zero_stale_data) ||
1817 o >= mp->m_dir_geo->leafblk) {
1818 ret = write_buf(iocur_top);
1819 goto out_pop;
1820 }
1821
1822 process_dir_data_block(iocur_top->data, o,
1823 last == mp->m_dir_geo->fsbcount);
1824 iocur_top->need_crc = 1;
1825 ret = write_buf(iocur_top);
1826 out_pop:
1827 pop_cur();
1828 mfsb_map.nmaps = 0;
1829 if (ret)
1830 break;
1831 }
1832 c -= bm_len;
1833 s += bm_len;
1834 }
1835
1836 return ret;
1837 }
1838
1839 /* inode copy routines */
1840 static int
1841 process_bmbt_reclist(
1842 xfs_bmbt_rec_t *rp,
1843 int numrecs,
1844 typnm_t btype)
1845 {
1846 int i;
1847 xfs_fileoff_t o, op = NULLFILEOFF;
1848 xfs_fsblock_t s;
1849 xfs_filblks_t c, cp = NULLFILEOFF;
1850 int f;
1851 xfs_fileoff_t last;
1852 xfs_agnumber_t agno;
1853 xfs_agblock_t agbno;
1854 int error;
1855
1856 if (btype == TYP_DATA)
1857 return 1;
1858
1859 convert_extent(&rp[numrecs - 1], &o, &s, &c, &f);
1860 last = o + c;
1861
1862 for (i = 0; i < numrecs; i++, rp++) {
1863 convert_extent(rp, &o, &s, &c, &f);
1864
1865 /*
1866 * ignore extents that are clearly bogus, and if a bogus
1867 * one is found, stop processing remaining extents
1868 */
1869 if (i > 0 && op + cp > o) {
1870 if (show_warnings)
1871 print_warning("bmap extent %d in %s ino %llu "
1872 "starts at %llu, previous extent "
1873 "ended at %llu", i,
1874 typtab[btype].name, (long long)cur_ino,
1875 o, op + cp - 1);
1876 break;
1877 }
1878
1879 if (c > max_extent_size) {
1880 /*
1881 * since we are only processing non-data extents,
1882 * large numbers of blocks in a metadata extent is
1883 * extremely rare and more than likely to be corrupt.
1884 */
1885 if (show_warnings)
1886 print_warning("suspicious count %u in bmap "
1887 "extent %d in %s ino %llu", c, i,
1888 typtab[btype].name, (long long)cur_ino);
1889 break;
1890 }
1891
1892 op = o;
1893 cp = c;
1894
1895 agno = XFS_FSB_TO_AGNO(mp, s);
1896 agbno = XFS_FSB_TO_AGBNO(mp, s);
1897
1898 if (!valid_bno(agno, agbno)) {
1899 if (show_warnings)
1900 print_warning("invalid block number %u/%u "
1901 "(%llu) in bmap extent %d in %s ino "
1902 "%llu", agno, agbno, s, i,
1903 typtab[btype].name, (long long)cur_ino);
1904 break;
1905 }
1906
1907 if (!valid_bno(agno, agbno + c - 1)) {
1908 if (show_warnings)
1909 print_warning("bmap extent %i in %s inode %llu "
1910 "overflows AG (end is %u/%u)", i,
1911 typtab[btype].name, (long long)cur_ino,
1912 agno, agbno + c - 1);
1913 break;
1914 }
1915
1916 /* multi-extent blocks require special handling */
1917 if (btype != TYP_DIR2 || mp->m_dir_geo->fsbcount == 1) {
1918 error = process_single_fsb_objects(o, s, c, btype, last);
1919 } else {
1920 error = process_multi_fsb_objects(o, s, c, btype, last);
1921 }
1922 if (error)
1923 return 0;
1924 }
1925
1926 return 1;
1927 }
1928
1929 static int
1930 scanfunc_bmap(
1931 struct xfs_btree_block *block,
1932 xfs_agnumber_t agno,
1933 xfs_agblock_t agbno,
1934 int level,
1935 typnm_t btype,
1936 void *arg) /* ptr to itype */
1937 {
1938 int i;
1939 xfs_bmbt_ptr_t *pp;
1940 int nrecs;
1941
1942 nrecs = be16_to_cpu(block->bb_numrecs);
1943
1944 if (level == 0) {
1945 if (nrecs > mp->m_bmap_dmxr[0]) {
1946 if (show_warnings)
1947 print_warning("invalid numrecs (%u) in %s "
1948 "block %u/%u", nrecs,
1949 typtab[btype].name, agno, agbno);
1950 return 1;
1951 }
1952 return process_bmbt_reclist(XFS_BMBT_REC_ADDR(mp, block, 1),
1953 nrecs, *(typnm_t*)arg);
1954 }
1955
1956 if (nrecs > mp->m_bmap_dmxr[1]) {
1957 if (show_warnings)
1958 print_warning("invalid numrecs (%u) in %s block %u/%u",
1959 nrecs, typtab[btype].name, agno, agbno);
1960 return 1;
1961 }
1962 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
1963 for (i = 0; i < nrecs; i++) {
1964 xfs_agnumber_t ag;
1965 xfs_agblock_t bno;
1966
1967 ag = XFS_FSB_TO_AGNO(mp, get_unaligned_be64(&pp[i]));
1968 bno = XFS_FSB_TO_AGBNO(mp, get_unaligned_be64(&pp[i]));
1969
1970 if (bno == 0 || bno > mp->m_sb.sb_agblocks ||
1971 ag > mp->m_sb.sb_agcount) {
1972 if (show_warnings)
1973 print_warning("invalid block number (%u/%u) "
1974 "in %s block %u/%u", ag, bno,
1975 typtab[btype].name, agno, agbno);
1976 continue;
1977 }
1978
1979 if (!scan_btree(ag, bno, level, btype, arg, scanfunc_bmap))
1980 return 0;
1981 }
1982 return 1;
1983 }
1984
1985 static int
1986 process_btinode(
1987 xfs_dinode_t *dip,
1988 typnm_t itype)
1989 {
1990 xfs_bmdr_block_t *dib;
1991 int i;
1992 xfs_bmbt_ptr_t *pp;
1993 int level;
1994 int nrecs;
1995 int maxrecs;
1996 int whichfork;
1997 typnm_t btype;
1998
1999 whichfork = (itype == TYP_ATTR) ? XFS_ATTR_FORK : XFS_DATA_FORK;
2000 btype = (itype == TYP_ATTR) ? TYP_BMAPBTA : TYP_BMAPBTD;
2001
2002 dib = (xfs_bmdr_block_t *)XFS_DFORK_PTR(dip, whichfork);
2003 level = be16_to_cpu(dib->bb_level);
2004 nrecs = be16_to_cpu(dib->bb_numrecs);
2005
2006 if (level > XFS_BM_MAXLEVELS(mp, whichfork)) {
2007 if (show_warnings)
2008 print_warning("invalid level (%u) in inode %lld %s "
2009 "root", level, (long long)cur_ino,
2010 typtab[btype].name);
2011 return 1;
2012 }
2013
2014 if (level == 0) {
2015 return process_bmbt_reclist(XFS_BMDR_REC_ADDR(dib, 1),
2016 nrecs, itype);
2017 }
2018
2019 maxrecs = xfs_bmdr_maxrecs(XFS_DFORK_SIZE(dip, mp, whichfork), 0);
2020 if (nrecs > maxrecs) {
2021 if (show_warnings)
2022 print_warning("invalid numrecs (%u) in inode %lld %s "
2023 "root", nrecs, (long long)cur_ino,
2024 typtab[btype].name);
2025 return 1;
2026 }
2027
2028 pp = XFS_BMDR_PTR_ADDR(dib, 1, maxrecs);
2029 for (i = 0; i < nrecs; i++) {
2030 xfs_agnumber_t ag;
2031 xfs_agblock_t bno;
2032
2033 ag = XFS_FSB_TO_AGNO(mp, get_unaligned_be64(&pp[i]));
2034 bno = XFS_FSB_TO_AGBNO(mp, get_unaligned_be64(&pp[i]));
2035
2036 if (bno == 0 || bno > mp->m_sb.sb_agblocks ||
2037 ag > mp->m_sb.sb_agcount) {
2038 if (show_warnings)
2039 print_warning("invalid block number (%u/%u) "
2040 "in inode %llu %s root", ag,
2041 bno, (long long)cur_ino,
2042 typtab[btype].name);
2043 continue;
2044 }
2045
2046 if (!scan_btree(ag, bno, level, btype, &itype, scanfunc_bmap))
2047 return 0;
2048 }
2049 return 1;
2050 }
2051
2052 static int
2053 process_exinode(
2054 xfs_dinode_t *dip,
2055 typnm_t itype)
2056 {
2057 int whichfork;
2058 int used;
2059 xfs_extnum_t nex;
2060
2061 whichfork = (itype == TYP_ATTR) ? XFS_ATTR_FORK : XFS_DATA_FORK;
2062
2063 nex = XFS_DFORK_NEXTENTS(dip, whichfork);
2064 used = nex * sizeof(xfs_bmbt_rec_t);
2065 if (nex < 0 || used > XFS_DFORK_SIZE(dip, mp, whichfork)) {
2066 if (show_warnings)
2067 print_warning("bad number of extents %d in inode %lld",
2068 nex, (long long)cur_ino);
2069 return 1;
2070 }
2071
2072 /* Zero unused data fork past used extents */
2073 if (zero_stale_data && (used < XFS_DFORK_SIZE(dip, mp, whichfork)))
2074 memset(XFS_DFORK_PTR(dip, whichfork) + used, 0,
2075 XFS_DFORK_SIZE(dip, mp, whichfork) - used);
2076
2077
2078 return process_bmbt_reclist((xfs_bmbt_rec_t *)XFS_DFORK_PTR(dip,
2079 whichfork), nex, itype);
2080 }
2081
2082 static int
2083 process_inode_data(
2084 xfs_dinode_t *dip,
2085 typnm_t itype)
2086 {
2087 switch (dip->di_format) {
2088 case XFS_DINODE_FMT_LOCAL:
2089 if (obfuscate || zero_stale_data)
2090 switch (itype) {
2091 case TYP_DIR2:
2092 process_sf_dir(dip);
2093 break;
2094
2095 case TYP_SYMLINK:
2096 process_sf_symlink(dip);
2097 break;
2098
2099 default: ;
2100 }
2101 break;
2102
2103 case XFS_DINODE_FMT_EXTENTS:
2104 return process_exinode(dip, itype);
2105
2106 case XFS_DINODE_FMT_BTREE:
2107 return process_btinode(dip, itype);
2108 }
2109 return 1;
2110 }
2111
2112 /*
2113 * when we process the inode, we may change the data in the data and/or
2114 * attribute fork if they are in short form and we are obfuscating names.
2115 * In this case we need to recalculate the CRC of the inode, but we should
2116 * only do that if the CRC in the inode is good to begin with. If the crc
2117 * is not ok, we just leave it alone.
2118 */
2119 static int
2120 process_inode(
2121 xfs_agnumber_t agno,
2122 xfs_agino_t agino,
2123 xfs_dinode_t *dip,
2124 bool free_inode)
2125 {
2126 int success;
2127 bool crc_was_ok = false; /* no recalc by default */
2128 bool need_new_crc = false;
2129
2130 success = 1;
2131 cur_ino = XFS_AGINO_TO_INO(mp, agno, agino);
2132
2133 /* we only care about crc recalculation if we will modify the inode. */
2134 if (obfuscate || zero_stale_data) {
2135 crc_was_ok = xfs_verify_cksum((char *)dip,
2136 mp->m_sb.sb_inodesize,
2137 offsetof(struct xfs_dinode, di_crc));
2138 }
2139
2140 if (free_inode) {
2141 if (zero_stale_data) {
2142 /* Zero all of the inode literal area */
2143 memset(XFS_DFORK_DPTR(dip), 0,
2144 XFS_LITINO(mp, dip->di_version));
2145 }
2146 goto done;
2147 }
2148
2149 /* copy appropriate data fork metadata */
2150 switch (be16_to_cpu(dip->di_mode) & S_IFMT) {
2151 case S_IFDIR:
2152 success = process_inode_data(dip, TYP_DIR2);
2153 if (dip->di_format == XFS_DINODE_FMT_LOCAL)
2154 need_new_crc = 1;
2155 break;
2156 case S_IFLNK:
2157 success = process_inode_data(dip, TYP_SYMLINK);
2158 if (dip->di_format == XFS_DINODE_FMT_LOCAL)
2159 need_new_crc = 1;
2160 break;
2161 case S_IFREG:
2162 success = process_inode_data(dip, TYP_DATA);
2163 break;
2164 default: ;
2165 }
2166 nametable_clear();
2167
2168 /* copy extended attributes if they exist and forkoff is valid */
2169 if (success &&
2170 XFS_DFORK_DSIZE(dip, mp) < XFS_LITINO(mp, dip->di_version)) {
2171 attr_data.remote_val_count = 0;
2172 switch (dip->di_aformat) {
2173 case XFS_DINODE_FMT_LOCAL:
2174 need_new_crc = 1;
2175 if (obfuscate || zero_stale_data)
2176 process_sf_attr(dip);
2177 break;
2178
2179 case XFS_DINODE_FMT_EXTENTS:
2180 success = process_exinode(dip, TYP_ATTR);
2181 break;
2182
2183 case XFS_DINODE_FMT_BTREE:
2184 success = process_btinode(dip, TYP_ATTR);
2185 break;
2186 }
2187 nametable_clear();
2188 }
2189
2190 done:
2191 /* Heavy handed but low cost; just do it as a catch-all. */
2192 if (zero_stale_data)
2193 need_new_crc = 1;
2194
2195 if (crc_was_ok && need_new_crc)
2196 libxfs_dinode_calc_crc(mp, dip);
2197 return success;
2198 }
2199
2200 static __uint32_t inodes_copied = 0;
2201
2202 static int
2203 copy_inode_chunk(
2204 xfs_agnumber_t agno,
2205 xfs_inobt_rec_t *rp)
2206 {
2207 xfs_agino_t agino;
2208 int off;
2209 xfs_agblock_t agbno;
2210 xfs_agblock_t end_agbno;
2211 int i;
2212 int rval = 0;
2213 int blks_per_buf;
2214 int inodes_per_buf;
2215 int ioff;
2216
2217 agino = be32_to_cpu(rp->ir_startino);
2218 agbno = XFS_AGINO_TO_AGBNO(mp, agino);
2219 end_agbno = agbno + mp->m_ialloc_blks;
2220 off = XFS_INO_TO_OFFSET(mp, agino);
2221
2222 /*
2223 * If the fs supports sparse inode records, we must process inodes a
2224 * cluster at a time because that is the sparse allocation granularity.
2225 * Otherwise, we risk CRC corruption errors on reads of inode chunks.
2226 *
2227 * Also make sure that that we don't process more than the single record
2228 * we've been passed (large block sizes can hold multiple inode chunks).
2229 */
2230 if (xfs_sb_version_hassparseinodes(&mp->m_sb))
2231 blks_per_buf = xfs_icluster_size_fsb(mp);
2232 else
2233 blks_per_buf = mp->m_ialloc_blks;
2234 inodes_per_buf = min(blks_per_buf << mp->m_sb.sb_inopblog,
2235 XFS_INODES_PER_CHUNK);
2236
2237 /*
2238 * Sanity check that we only process a single buffer if ir_startino has
2239 * a buffer offset. A non-zero offset implies that the entire chunk lies
2240 * within a block.
2241 */
2242 if (off && inodes_per_buf != XFS_INODES_PER_CHUNK) {
2243 print_warning("bad starting inode offset %d", off);
2244 return 0;
2245 }
2246
2247 if (agino == 0 || agino == NULLAGINO || !valid_bno(agno, agbno) ||
2248 !valid_bno(agno, XFS_AGINO_TO_AGBNO(mp,
2249 agino + XFS_INODES_PER_CHUNK - 1))) {
2250 if (show_warnings)
2251 print_warning("bad inode number %llu (%u/%u)",
2252 XFS_AGINO_TO_INO(mp, agno, agino), agno, agino);
2253 return 1;
2254 }
2255
2256 /*
2257 * check for basic assumptions about inode chunks, and if any
2258 * assumptions fail, don't process the inode chunk.
2259 */
2260 if ((mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK && off != 0) ||
2261 (mp->m_sb.sb_inopblock > XFS_INODES_PER_CHUNK &&
2262 off % XFS_INODES_PER_CHUNK != 0) ||
2263 (xfs_sb_version_hasalign(&mp->m_sb) &&
2264 mp->m_sb.sb_inoalignmt != 0 &&
2265 agbno % mp->m_sb.sb_inoalignmt != 0)) {
2266 if (show_warnings)
2267 print_warning("badly aligned inode (start = %llu)",
2268 XFS_AGINO_TO_INO(mp, agno, agino));
2269 return 1;
2270 }
2271
2272 push_cur();
2273 ioff = 0;
2274 while (agbno < end_agbno && ioff < XFS_INODES_PER_CHUNK) {
2275 if (xfs_inobt_is_sparse_disk(rp, ioff))
2276 goto next_bp;
2277
2278 set_cur(&typtab[TYP_INODE], XFS_AGB_TO_DADDR(mp, agno, agbno),
2279 XFS_FSB_TO_BB(mp, blks_per_buf), DB_RING_IGN, NULL);
2280 if (iocur_top->data == NULL) {
2281 print_warning("cannot read inode block %u/%u",
2282 agno, agbno);
2283 rval = !stop_on_read_error;
2284 goto pop_out;
2285 }
2286
2287 for (i = 0; i < inodes_per_buf; i++) {
2288 xfs_dinode_t *dip;
2289
2290 dip = (xfs_dinode_t *)((char *)iocur_top->data +
2291 ((off + i) << mp->m_sb.sb_inodelog));
2292
2293 /* process_inode handles free inodes, too */
2294 if (!process_inode(agno, agino + ioff + i, dip,
2295 XFS_INOBT_IS_FREE_DISK(rp, i)))
2296 goto pop_out;
2297
2298 inodes_copied++;
2299 }
2300
2301 if (write_buf(iocur_top))
2302 goto pop_out;
2303
2304 next_bp:
2305 agbno += blks_per_buf;
2306 ioff += inodes_per_buf;
2307 }
2308
2309 if (show_progress)
2310 print_progress("Copied %u of %u inodes (%u of %u AGs)",
2311 inodes_copied, mp->m_sb.sb_icount, agno,
2312 mp->m_sb.sb_agcount);
2313 rval = 1;
2314 pop_out:
2315 pop_cur();
2316 return rval;
2317 }
2318
2319 static int
2320 scanfunc_ino(
2321 struct xfs_btree_block *block,
2322 xfs_agnumber_t agno,
2323 xfs_agblock_t agbno,
2324 int level,
2325 typnm_t btype,
2326 void *arg)
2327 {
2328 xfs_inobt_rec_t *rp;
2329 xfs_inobt_ptr_t *pp;
2330 int i;
2331 int numrecs;
2332 int finobt = *(int *) arg;
2333
2334 numrecs = be16_to_cpu(block->bb_numrecs);
2335
2336 if (level == 0) {
2337 if (numrecs > mp->m_inobt_mxr[0]) {
2338 if (show_warnings)
2339 print_warning("invalid numrecs %d in %s "
2340 "block %u/%u", numrecs,
2341 typtab[btype].name, agno, agbno);
2342 numrecs = mp->m_inobt_mxr[0];
2343 }
2344
2345 /*
2346 * Only copy the btree blocks for the finobt. The inobt scan
2347 * copies the inode chunks.
2348 */
2349 if (finobt)
2350 return 1;
2351
2352 rp = XFS_INOBT_REC_ADDR(mp, block, 1);
2353 for (i = 0; i < numrecs; i++, rp++) {
2354 if (!copy_inode_chunk(agno, rp))
2355 return 0;
2356 }
2357 return 1;
2358 }
2359
2360 if (numrecs > mp->m_inobt_mxr[1]) {
2361 if (show_warnings)
2362 print_warning("invalid numrecs %d in %s block %u/%u",
2363 numrecs, typtab[btype].name, agno, agbno);
2364 numrecs = mp->m_inobt_mxr[1];
2365 }
2366
2367 pp = XFS_INOBT_PTR_ADDR(mp, block, 1, mp->m_inobt_mxr[1]);
2368 for (i = 0; i < numrecs; i++) {
2369 if (!valid_bno(agno, be32_to_cpu(pp[i]))) {
2370 if (show_warnings)
2371 print_warning("invalid block number (%u/%u) "
2372 "in %s block %u/%u",
2373 agno, be32_to_cpu(pp[i]),
2374 typtab[btype].name, agno, agbno);
2375 continue;
2376 }
2377 if (!scan_btree(agno, be32_to_cpu(pp[i]), level,
2378 btype, arg, scanfunc_ino))
2379 return 0;
2380 }
2381 return 1;
2382 }
2383
2384 static int
2385 copy_inodes(
2386 xfs_agnumber_t agno,
2387 xfs_agi_t *agi)
2388 {
2389 xfs_agblock_t root;
2390 int levels;
2391 int finobt = 0;
2392
2393 root = be32_to_cpu(agi->agi_root);
2394 levels = be32_to_cpu(agi->agi_level);
2395
2396 /* validate root and levels before processing the tree */
2397 if (root == 0 || root > mp->m_sb.sb_agblocks) {
2398 if (show_warnings)
2399 print_warning("invalid block number (%u) in inobt "
2400 "root in agi %u", root, agno);
2401 return 1;
2402 }
2403 if (levels >= XFS_BTREE_MAXLEVELS) {
2404 if (show_warnings)
2405 print_warning("invalid level (%u) in inobt root "
2406 "in agi %u", levels, agno);
2407 return 1;
2408 }
2409
2410 if (!scan_btree(agno, root, levels, TYP_INOBT, &finobt, scanfunc_ino))
2411 return 0;
2412
2413 if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
2414 root = be32_to_cpu(agi->agi_free_root);
2415 levels = be32_to_cpu(agi->agi_free_level);
2416
2417 finobt = 1;
2418 if (!scan_btree(agno, root, levels, TYP_INOBT, &finobt,
2419 scanfunc_ino))
2420 return 0;
2421 }
2422
2423 return 1;
2424 }
2425
2426 static int
2427 scan_ag(
2428 xfs_agnumber_t agno)
2429 {
2430 xfs_agf_t *agf;
2431 xfs_agi_t *agi;
2432 int stack_count = 0;
2433 int rval = 0;
2434
2435 /* copy the superblock of the AG */
2436 push_cur();
2437 stack_count++;
2438 set_cur(&typtab[TYP_SB], XFS_AG_DADDR(mp, agno, XFS_SB_DADDR),
2439 XFS_FSS_TO_BB(mp, 1), DB_RING_IGN, NULL);
2440 if (!iocur_top->data) {
2441 print_warning("cannot read superblock for ag %u", agno);
2442 if (stop_on_read_error)
2443 goto pop_out;
2444 } else {
2445 /* Replace any filesystem label with "L's" */
2446 if (obfuscate) {
2447 struct xfs_sb *sb = iocur_top->data;
2448 memset(sb->sb_fname, 'L',
2449 min(strlen(sb->sb_fname), sizeof(sb->sb_fname)));
2450 iocur_top->need_crc = 1;
2451 }
2452 if (write_buf(iocur_top))
2453 goto pop_out;
2454 }
2455
2456 /* copy the AG free space btree root */
2457 push_cur();
2458 stack_count++;
2459 set_cur(&typtab[TYP_AGF], XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
2460 XFS_FSS_TO_BB(mp, 1), DB_RING_IGN, NULL);
2461 agf = iocur_top->data;
2462 if (iocur_top->data == NULL) {
2463 print_warning("cannot read agf block for ag %u", agno);
2464 if (stop_on_read_error)
2465 goto pop_out;
2466 } else {
2467 if (write_buf(iocur_top))
2468 goto pop_out;
2469 }
2470
2471 /* copy the AG inode btree root */
2472 push_cur();
2473 stack_count++;
2474 set_cur(&typtab[TYP_AGI], XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
2475 XFS_FSS_TO_BB(mp, 1), DB_RING_IGN, NULL);
2476 agi = iocur_top->data;
2477 if (iocur_top->data == NULL) {
2478 print_warning("cannot read agi block for ag %u", agno);
2479 if (stop_on_read_error)
2480 goto pop_out;
2481 } else {
2482 if (write_buf(iocur_top))
2483 goto pop_out;
2484 }
2485
2486 /* copy the AG free list header */
2487 push_cur();
2488 stack_count++;
2489 set_cur(&typtab[TYP_AGFL], XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
2490 XFS_FSS_TO_BB(mp, 1), DB_RING_IGN, NULL);
2491 if (iocur_top->data == NULL) {
2492 print_warning("cannot read agfl block for ag %u", agno);
2493 if (stop_on_read_error)
2494 goto pop_out;
2495 } else {
2496 if (agf && zero_stale_data) {
2497 /* Zero out unused bits of agfl */
2498 int i;
2499 __be32 *agfl_bno;
2500
2501 agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, iocur_top->bp);
2502 i = be32_to_cpu(agf->agf_fllast);
2503
2504 for (;;) {
2505 if (++i == XFS_AGFL_SIZE(mp))
2506 i = 0;
2507 if (i == be32_to_cpu(agf->agf_flfirst))
2508 break;
2509 agfl_bno[i] = cpu_to_be32(NULLAGBLOCK);
2510 }
2511 iocur_top->need_crc = 1;
2512 }
2513 if (write_buf(iocur_top))
2514 goto pop_out;
2515 }
2516
2517 /* copy AG free space btrees */
2518 if (agf) {
2519 if (show_progress)
2520 print_progress("Copying free space trees of AG %u",
2521 agno);
2522 if (!copy_free_bno_btree(agno, agf))
2523 goto pop_out;
2524 if (!copy_free_cnt_btree(agno, agf))
2525 goto pop_out;
2526 if (!copy_rmap_btree(agno, agf))
2527 goto pop_out;
2528 }
2529
2530 /* copy inode btrees and the inodes and their associated metadata */
2531 if (agi) {
2532 if (!copy_inodes(agno, agi))
2533 goto pop_out;
2534 }
2535 rval = 1;
2536 pop_out:
2537 while (stack_count--)
2538 pop_cur();
2539 return rval;
2540 }
2541
2542 static int
2543 copy_ino(
2544 xfs_ino_t ino,
2545 typnm_t itype)
2546 {
2547 xfs_agnumber_t agno;
2548 xfs_agblock_t agbno;
2549 xfs_agino_t agino;
2550 int offset;
2551 int rval = 0;
2552
2553 if (ino == 0 || ino == NULLFSINO)
2554 return 1;
2555
2556 agno = XFS_INO_TO_AGNO(mp, ino);
2557 agino = XFS_INO_TO_AGINO(mp, ino);
2558 agbno = XFS_AGINO_TO_AGBNO(mp, agino);
2559 offset = XFS_AGINO_TO_OFFSET(mp, agino);
2560
2561 if (agno >= mp->m_sb.sb_agcount || agbno >= mp->m_sb.sb_agblocks ||
2562 offset >= mp->m_sb.sb_inopblock) {
2563 if (show_warnings)
2564 print_warning("invalid %s inode number (%lld)",
2565 typtab[itype].name, (long long)ino);
2566 return 1;
2567 }
2568
2569 push_cur();
2570 set_cur(&typtab[TYP_INODE], XFS_AGB_TO_DADDR(mp, agno, agbno),
2571 blkbb, DB_RING_IGN, NULL);
2572 if (iocur_top->data == NULL) {
2573 print_warning("cannot read %s inode %lld",
2574 typtab[itype].name, (long long)ino);
2575 rval = !stop_on_read_error;
2576 goto pop_out;
2577 }
2578 off_cur(offset << mp->m_sb.sb_inodelog, mp->m_sb.sb_inodesize);
2579
2580 cur_ino = ino;
2581 rval = process_inode_data(iocur_top->data, itype);
2582 pop_out:
2583 pop_cur();
2584 return rval;
2585 }
2586
2587
2588 static int
2589 copy_sb_inodes(void)
2590 {
2591 if (!copy_ino(mp->m_sb.sb_rbmino, TYP_RTBITMAP))
2592 return 0;
2593
2594 if (!copy_ino(mp->m_sb.sb_rsumino, TYP_RTSUMMARY))
2595 return 0;
2596
2597 if (!copy_ino(mp->m_sb.sb_uquotino, TYP_DQBLK))
2598 return 0;
2599
2600 if (!copy_ino(mp->m_sb.sb_gquotino, TYP_DQBLK))
2601 return 0;
2602
2603 return copy_ino(mp->m_sb.sb_pquotino, TYP_DQBLK);
2604 }
2605
2606 static int
2607 copy_log(void)
2608 {
2609 struct xlog log;
2610 int dirty;
2611 xfs_daddr_t logstart;
2612 int logblocks;
2613 int logversion;
2614 int cycle = XLOG_INIT_CYCLE;
2615
2616 if (show_progress)
2617 print_progress("Copying log");
2618
2619 push_cur();
2620 set_cur(&typtab[TYP_LOG], XFS_FSB_TO_DADDR(mp, mp->m_sb.sb_logstart),
2621 mp->m_sb.sb_logblocks * blkbb, DB_RING_IGN, NULL);
2622 if (iocur_top->data == NULL) {
2623 pop_cur();
2624 print_warning("cannot read log");
2625 return !stop_on_read_error;
2626 }
2627
2628 /* If not obfuscating or zeroing, just copy the log as it is */
2629 if (!obfuscate && !zero_stale_data)
2630 goto done;
2631
2632 dirty = xlog_is_dirty(mp, &log, &x, 0);
2633
2634 switch (dirty) {
2635 case 0:
2636 /* clear out a clean log */
2637 if (show_progress)
2638 print_progress("Zeroing clean log");
2639
2640 logstart = XFS_FSB_TO_DADDR(mp, mp->m_sb.sb_logstart);
2641 logblocks = XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks);
2642 logversion = xfs_sb_version_haslogv2(&mp->m_sb) ? 2 : 1;
2643 if (xfs_sb_version_hascrc(&mp->m_sb))
2644 cycle = log.l_curr_cycle + 1;
2645
2646 libxfs_log_clear(NULL, iocur_top->data, logstart, logblocks,
2647 &mp->m_sb.sb_uuid, logversion,
2648 mp->m_sb.sb_logsunit, XLOG_FMT, cycle, true);
2649 break;
2650 case 1:
2651 /* keep the dirty log */
2652 print_warning(
2653 _("Filesystem log is dirty; image will contain unobfuscated metadata in log."));
2654 break;
2655 case -1:
2656 /* log detection error */
2657 print_warning(
2658 _("Could not discern log; image will contain unobfuscated metadata in log."));
2659 break;
2660 }
2661
2662 done:
2663 return !write_buf(iocur_top);
2664 }
2665
2666 static int
2667 metadump_f(
2668 int argc,
2669 char **argv)
2670 {
2671 xfs_agnumber_t agno;
2672 int c;
2673 int start_iocur_sp;
2674 char *p;
2675
2676 exitcode = 1;
2677 show_progress = 0;
2678 show_warnings = 0;
2679 stop_on_read_error = 0;
2680
2681 if (mp->m_sb.sb_magicnum != XFS_SB_MAGIC) {
2682 print_warning("bad superblock magic number %x, giving up",
2683 mp->m_sb.sb_magicnum);
2684 return 0;
2685 }
2686
2687 while ((c = getopt(argc, argv, "aegm:ow")) != EOF) {
2688 switch (c) {
2689 case 'a':
2690 zero_stale_data = 0;
2691 break;
2692 case 'e':
2693 stop_on_read_error = 1;
2694 break;
2695 case 'g':
2696 show_progress = 1;
2697 break;
2698 case 'm':
2699 max_extent_size = (int)strtol(optarg, &p, 0);
2700 if (*p != '\0' || max_extent_size <= 0) {
2701 print_warning("bad max extent size %s",
2702 optarg);
2703 return 0;
2704 }
2705 break;
2706 case 'o':
2707 obfuscate = 0;
2708 break;
2709 case 'w':
2710 show_warnings = 1;
2711 break;
2712 default:
2713 print_warning("bad option for metadump command");
2714 return 0;
2715 }
2716 }
2717
2718 if (optind != argc - 1) {
2719 print_warning("too few options for metadump (no filename given)");
2720 return 0;
2721 }
2722
2723 metablock = (xfs_metablock_t *)calloc(BBSIZE + 1, BBSIZE);
2724 if (metablock == NULL) {
2725 print_warning("memory allocation failure");
2726 return 0;
2727 }
2728 metablock->mb_blocklog = BBSHIFT;
2729 metablock->mb_magic = cpu_to_be32(XFS_MD_MAGIC);
2730
2731 block_index = (__be64 *)((char *)metablock + sizeof(xfs_metablock_t));
2732 block_buffer = (char *)metablock + BBSIZE;
2733 num_indices = (BBSIZE - sizeof(xfs_metablock_t)) / sizeof(__be64);
2734
2735 /*
2736 * A metadump block can hold at most num_indices of BBSIZE sectors;
2737 * do not try to dump a filesystem with a sector size which does not
2738 * fit within num_indices (i.e. within a single metablock).
2739 */
2740 if (mp->m_sb.sb_sectsize > num_indices * BBSIZE) {
2741 print_warning("Cannot dump filesystem with sector size %u",
2742 mp->m_sb.sb_sectsize);
2743 free(metablock);
2744 return 0;
2745 }
2746
2747 cur_index = 0;
2748 start_iocur_sp = iocur_sp;
2749
2750 if (strcmp(argv[optind], "-") == 0) {
2751 if (isatty(fileno(stdout))) {
2752 print_warning("cannot write to a terminal");
2753 free(metablock);
2754 return 0;
2755 }
2756 outf = stdout;
2757 } else {
2758 outf = fopen(argv[optind], "wb");
2759 if (outf == NULL) {
2760 print_warning("cannot create dump file");
2761 free(metablock);
2762 return 0;
2763 }
2764 }
2765
2766 exitcode = 0;
2767
2768 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
2769 if (!scan_ag(agno)) {
2770 exitcode = 1;
2771 break;
2772 }
2773 }
2774
2775 /* copy realtime and quota inode contents */
2776 if (!exitcode)
2777 exitcode = !copy_sb_inodes();
2778
2779 /* copy log if it's internal */
2780 if ((mp->m_sb.sb_logstart != 0) && !exitcode)
2781 exitcode = !copy_log();
2782
2783 /* write the remaining index */
2784 if (!exitcode)
2785 exitcode = write_index() < 0;
2786
2787 if (progress_since_warning)
2788 fputc('\n', (outf == stdout) ? stderr : stdout);
2789
2790 if (outf != stdout)
2791 fclose(outf);
2792
2793 /* cleanup iocur stack */
2794 while (iocur_sp > start_iocur_sp)
2795 pop_cur();
2796
2797 free(metablock);
2798
2799 return 0;
2800 }