2 * Copyright (c) 2007, 2011 SGI
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
29 #include "xfs_metadump.h"
35 #define DEFAULT_MAX_EXT_SIZE 1000
38 * It's possible that multiple files in a directory (or attributes
39 * in a file) produce the same obfuscated name. If that happens, we
40 * try to create another one. After several rounds of this though,
41 * we just give up and leave the original name as-is.
43 #define DUP_MAX 5 /* Max duplicates before we give up */
45 /* copy all metadata structures to/from a file */
47 static int metadump_f(int argc
, char **argv
);
48 static void metadump_help(void);
51 * metadump commands issue info/wornings/errors to standard error as
52 * metadump supports stdout as a destination.
54 * All static functions return zero on failure, while the public functions
55 * return zero on success.
58 static const cmdinfo_t metadump_cmd
=
59 { "metadump", NULL
, metadump_f
, 0, -1, 0,
60 N_("[-a] [-e] [-g] [-m max_extent] [-w] [-o] filename"),
61 N_("dump metadata to a file"), metadump_help
};
63 static FILE *outf
; /* metadump file */
65 static xfs_metablock_t
*metablock
; /* header + index + buffers */
66 static __be64
*block_index
;
67 static char *block_buffer
;
69 static int num_indicies
;
72 static xfs_ino_t cur_ino
;
74 static int show_progress
= 0;
75 static int stop_on_read_error
= 0;
76 static int max_extent_size
= DEFAULT_MAX_EXT_SIZE
;
77 static int obfuscate
= 1;
78 static int zero_stale_data
= 1;
79 static int show_warnings
= 0;
80 static int progress_since_warning
= 0;
85 add_command(&metadump_cmd
);
93 " The 'metadump' command dumps the known metadata to a compact file suitable\n"
94 " for compressing and sending to an XFS maintainer for corruption analysis \n"
95 " or xfs_repair failures.\n\n"
97 " -a -- Copy full metadata blocks without zeroing unused space\n"
98 " -e -- Ignore read errors and keep going\n"
99 " -g -- Display dump progress\n"
100 " -m -- Specify max extent size in blocks to copy (default = %d blocks)\n"
101 " -o -- Don't obfuscate names and extended attributes\n"
102 " -w -- Show warnings of bad metadata information\n"
103 "\n"), DEFAULT_MAX_EXT_SIZE
);
107 print_warning(const char *fmt
, ...)
116 vsnprintf(buf
, sizeof(buf
), fmt
, ap
);
118 buf
[sizeof(buf
)-1] = '\0';
120 fprintf(stderr
, "%s%s: %s\n", progress_since_warning
? "\n" : "",
122 progress_since_warning
= 0;
126 print_progress(const char *fmt
, ...)
136 vsnprintf(buf
, sizeof(buf
), fmt
, ap
);
138 buf
[sizeof(buf
)-1] = '\0';
140 f
= (outf
== stdout
) ? stderr
: stdout
;
141 fprintf(f
, "\r%-59s", buf
);
143 progress_since_warning
= 1;
147 * A complete dump file will have a "zero" entry in the last index block,
148 * even if the dump is exactly aligned, the last index will be full of
149 * zeros. If the last index entry is non-zero, the dump is incomplete.
150 * Correspondingly, the last chunk will have a count < num_indicies.
152 * Return 0 for success, -1 for failure.
159 * write index block and following data blocks (streaming)
161 metablock
->mb_count
= cpu_to_be16(cur_index
);
162 if (fwrite(metablock
, (cur_index
+ 1) << BBSHIFT
, 1, outf
) != 1) {
163 print_warning("error writing to file: %s", strerror(errno
));
167 memset(block_index
, 0, num_indicies
* sizeof(__be64
));
173 * Return 0 for success, -errno for failure.
184 for (i
= 0; i
< len
; i
++, off
++, data
+= BBSIZE
) {
185 block_index
[cur_index
] = cpu_to_be64(off
);
186 memcpy(&block_buffer
[cur_index
<< BBSHIFT
], data
, BBSIZE
);
187 if (++cur_index
== num_indicies
) {
197 * we want to preserve the state of the metadata in the dump - whether it is
198 * intact or corrupt, so even if the buffer has a verifier attached to it we
199 * don't want to run it prior to writing the buffer to the metadump image.
201 * The only reason for running the verifier is to recalculate the CRCs on a
202 * buffer that has been obfuscated. i.e. a buffer than metadump modified itself.
203 * In this case, we only run the verifier if the buffer was not corrupt to begin
204 * with so that we don't accidentally correct buffers with CRC or errors in them
205 * when we are obfuscating them.
211 struct xfs_buf
*bp
= buf
->bp
;
216 * Run the write verifier to recalculate the buffer CRCs and check
217 * metadump didn't introduce a new corruption. Warn if the verifier
218 * failed, but still continue to dump it into the output file.
220 if (buf
->need_crc
&& bp
&& bp
->b_ops
&& !bp
->b_error
) {
221 bp
->b_ops
->verify_write(bp
);
224 "obfuscation corrupted block at %s bno 0x%llx/0x%x",
226 (long long)bp
->b_bn
, bp
->b_bcount
);
230 /* handle discontiguous buffers */
232 ret
= write_buf_segment(buf
->data
, buf
->bb
, buf
->blen
);
237 for (i
= 0; i
< buf
->bbmap
->nmaps
; i
++) {
238 ret
= write_buf_segment(buf
->data
+ BBTOB(len
),
239 buf
->bbmap
->b
[i
].bm_bn
,
240 buf
->bbmap
->b
[i
].bm_len
);
243 len
+= buf
->bbmap
->b
[i
].bm_len
;
246 return seenint() ? -EINTR
: 0;
250 * We could be processing a corrupt block, so we can't trust any of
251 * the offsets or lengths to be within the buffer range. Hence check
256 struct xfs_btree_block
*block
,
262 xfs_inobt_ptr_t
*ipp
;
263 xfs_inobt_key_t
*ikp
;
264 xfs_alloc_ptr_t
*app
;
265 xfs_alloc_key_t
*akp
;
269 nrecs
= be16_to_cpu(block
->bb_numrecs
);
276 if (nrecs
> mp
->m_bmap_dmxr
[1])
279 bkp
= XFS_BMBT_KEY_ADDR(mp
, block
, 1);
280 bpp
= XFS_BMBT_PTR_ADDR(mp
, block
, 1, mp
->m_bmap_dmxr
[1]);
281 zp1
= (char *)&bkp
[nrecs
];
282 zp2
= (char *)&bpp
[nrecs
];
283 key_end
= (char *)bpp
;
287 if (nrecs
> mp
->m_inobt_mxr
[1])
290 ikp
= XFS_INOBT_KEY_ADDR(mp
, block
, 1);
291 ipp
= XFS_INOBT_PTR_ADDR(mp
, block
, 1, mp
->m_inobt_mxr
[1]);
292 zp1
= (char *)&ikp
[nrecs
];
293 zp2
= (char *)&ipp
[nrecs
];
294 key_end
= (char *)ipp
;
298 if (nrecs
> mp
->m_alloc_mxr
[1])
301 akp
= XFS_ALLOC_KEY_ADDR(mp
, block
, 1);
302 app
= XFS_ALLOC_PTR_ADDR(mp
, block
, 1, mp
->m_alloc_mxr
[1]);
303 zp1
= (char *)&akp
[nrecs
];
304 zp2
= (char *)&app
[nrecs
];
305 key_end
= (char *)app
;
312 /* Zero from end of keys to beginning of pointers */
313 memset(zp1
, 0, key_end
- zp1
);
315 /* Zero from end of pointers to end of block */
316 memset(zp2
, 0, (char *)block
+ mp
->m_sb
.sb_blocksize
- zp2
);
320 * We could be processing a corrupt block, so we can't trust any of
321 * the offsets or lengths to be within the buffer range. Hence check
326 struct xfs_btree_block
*block
,
330 struct xfs_bmbt_rec
*brp
;
331 struct xfs_inobt_rec
*irp
;
332 struct xfs_alloc_rec
*arp
;
335 nrecs
= be16_to_cpu(block
->bb_numrecs
);
342 if (nrecs
> mp
->m_bmap_dmxr
[0])
345 brp
= XFS_BMBT_REC_ADDR(mp
, block
, 1);
346 zp
= (char *)&brp
[nrecs
];
350 if (nrecs
> mp
->m_inobt_mxr
[0])
353 irp
= XFS_INOBT_REC_ADDR(mp
, block
, 1);
354 zp
= (char *)&irp
[nrecs
];
358 if (nrecs
> mp
->m_alloc_mxr
[0])
361 arp
= XFS_ALLOC_REC_ADDR(mp
, block
, 1);
362 zp
= (char *)&arp
[nrecs
];
368 /* Zero from end of records to end of block */
369 memset(zp
, 0, (char *)block
+ mp
->m_sb
.sb_blocksize
- zp
);
374 struct xfs_btree_block
*block
,
379 level
= be16_to_cpu(block
->bb_level
);
382 zero_btree_node(block
, btype
);
384 zero_btree_leaf(block
, btype
);
394 int (*func
)(struct xfs_btree_block
*block
,
404 set_cur(&typtab
[btype
], XFS_AGB_TO_DADDR(mp
, agno
, agbno
), blkbb
,
406 if (iocur_top
->data
== NULL
) {
407 print_warning("cannot read %s block %u/%u", typtab
[btype
].name
,
409 rval
= !stop_on_read_error
;
413 if (zero_stale_data
) {
414 zero_btree_block(iocur_top
->data
, btype
);
415 iocur_top
->need_crc
= 1;
418 if (write_buf(iocur_top
))
421 if (!(*func
)(iocur_top
->data
, agno
, agbno
, level
- 1, btype
, arg
))
429 /* free space tree copy routines */
436 if (agno
< (mp
->m_sb
.sb_agcount
- 1) && agbno
> 0 &&
437 agbno
<= mp
->m_sb
.sb_agblocks
)
439 if (agno
== (mp
->m_sb
.sb_agcount
- 1) && agbno
> 0 &&
440 agbno
<= (mp
->m_sb
.sb_dblocks
-
441 (xfs_rfsblock_t
)(mp
->m_sb
.sb_agcount
- 1) *
442 mp
->m_sb
.sb_agblocks
))
451 struct xfs_btree_block
*block
,
465 numrecs
= be16_to_cpu(block
->bb_numrecs
);
466 if (numrecs
> mp
->m_alloc_mxr
[1]) {
468 print_warning("invalid numrecs (%u) in %s block %u/%u",
469 numrecs
, typtab
[btype
].name
, agno
, agbno
);
473 pp
= XFS_ALLOC_PTR_ADDR(mp
, block
, 1, mp
->m_alloc_mxr
[1]);
474 for (i
= 0; i
< numrecs
; i
++) {
475 if (!valid_bno(agno
, be32_to_cpu(pp
[i
]))) {
477 print_warning("invalid block number (%u/%u) "
479 agno
, be32_to_cpu(pp
[i
]),
480 typtab
[btype
].name
, agno
, agbno
);
483 if (!scan_btree(agno
, be32_to_cpu(pp
[i
]), level
, btype
, arg
,
498 root
= be32_to_cpu(agf
->agf_roots
[XFS_BTNUM_BNO
]);
499 levels
= be32_to_cpu(agf
->agf_levels
[XFS_BTNUM_BNO
]);
501 /* validate root and levels before processing the tree */
502 if (root
== 0 || root
> mp
->m_sb
.sb_agblocks
) {
504 print_warning("invalid block number (%u) in bnobt "
505 "root in agf %u", root
, agno
);
508 if (levels
>= XFS_BTREE_MAXLEVELS
) {
510 print_warning("invalid level (%u) in bnobt root "
511 "in agf %u", levels
, agno
);
515 return scan_btree(agno
, root
, levels
, TYP_BNOBT
, agf
, scanfunc_freesp
);
526 root
= be32_to_cpu(agf
->agf_roots
[XFS_BTNUM_CNT
]);
527 levels
= be32_to_cpu(agf
->agf_levels
[XFS_BTNUM_CNT
]);
529 /* validate root and levels before processing the tree */
530 if (root
== 0 || root
> mp
->m_sb
.sb_agblocks
) {
532 print_warning("invalid block number (%u) in cntbt "
533 "root in agf %u", root
, agno
);
536 if (levels
>= XFS_BTREE_MAXLEVELS
) {
538 print_warning("invalid level (%u) in cntbt root "
539 "in agf %u", levels
, agno
);
543 return scan_btree(agno
, root
, levels
, TYP_CNTBT
, agf
, scanfunc_freesp
);
546 /* filename and extended attribute obfuscation routines */
549 struct name_ent
*next
;
552 unsigned char name
[1];
555 #define NAME_TABLE_SIZE 4096
557 static struct name_ent
*nametable
[NAME_TABLE_SIZE
];
560 nametable_clear(void)
563 struct name_ent
*ent
;
565 for (i
= 0; i
< NAME_TABLE_SIZE
; i
++) {
566 while ((ent
= nametable
[i
])) {
567 nametable
[i
] = ent
->next
;
574 * See if the given name is already in the name table. If so,
575 * return a pointer to its entry, otherwise return a null pointer.
577 static struct name_ent
*
578 nametable_find(xfs_dahash_t hash
, int namelen
, unsigned char *name
)
580 struct name_ent
*ent
;
582 for (ent
= nametable
[hash
% NAME_TABLE_SIZE
]; ent
; ent
= ent
->next
) {
583 if (ent
->hash
== hash
&& ent
->namelen
== namelen
&&
584 !memcmp(ent
->name
, name
, namelen
))
591 * Add the given name to the name table. Returns a pointer to the
592 * name's new entry, or a null pointer if an error occurs.
594 static struct name_ent
*
595 nametable_add(xfs_dahash_t hash
, int namelen
, unsigned char *name
)
597 struct name_ent
*ent
;
599 ent
= malloc(sizeof *ent
+ namelen
);
603 ent
->namelen
= namelen
;
604 memcpy(ent
->name
, name
, namelen
);
606 ent
->next
= nametable
[hash
% NAME_TABLE_SIZE
];
608 nametable
[hash
% NAME_TABLE_SIZE
] = ent
;
613 #define is_invalid_char(c) ((c) == '/' || (c) == '\0')
614 #define rol32(x,y) (((x) << (y)) | ((x) >> (32 - (y))))
616 static inline unsigned char
617 random_filename_char(void)
619 static unsigned char filename_alphabet
[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
620 "abcdefghijklmnopqrstuvwxyz"
623 return filename_alphabet
[random() % (sizeof filename_alphabet
- 1)];
626 #define ORPHANAGE "lost+found"
627 #define ORPHANAGE_LEN (sizeof (ORPHANAGE) - 1)
631 struct xfs_mount
*mp
,
636 return dir_ino
== mp
->m_sb
.sb_rootino
&&
637 name_len
== ORPHANAGE_LEN
&&
638 !memcmp(name
, ORPHANAGE
, ORPHANAGE_LEN
);
642 * Determine whether a name is one we shouldn't obfuscate because
643 * it's an orphan (or the "lost+found" directory itself). Note
644 * "cur_ino" is the inode for the directory currently being
647 * Returns 1 if the name should NOT be obfuscated or 0 otherwise.
655 static xfs_ino_t orphanage_ino
= 0;
656 char s
[24]; /* 21 is enough (64 bits in decimal) */
659 /* Record the "lost+found" inode if we haven't done so already */
662 if (!orphanage_ino
&& is_orphanage_dir(mp
, cur_ino
, namelen
, name
))
665 /* We don't obfuscate the "lost+found" directory itself */
667 if (ino
== orphanage_ino
)
670 /* Most files aren't in "lost+found" at all */
672 if (cur_ino
!= orphanage_ino
)
676 * Within "lost+found", we don't obfuscate any file whose
677 * name is the same as its inode number. Any others are
678 * stray files and can be obfuscated.
680 slen
= snprintf(s
, sizeof (s
), "%llu", (unsigned long long) ino
);
682 return slen
== namelen
&& !memcmp(name
, s
, namelen
);
686 * Given a name and its hash value, massage the name in such a way
687 * that the result is another name of equal length which shares the
696 unsigned char *newp
= name
;
698 xfs_dahash_t new_hash
= 0;
699 unsigned char *first
;
700 unsigned char high_bit
;
704 * Our obfuscation algorithm requires at least 5-character
705 * names, so don't bother if the name is too short. We
706 * work backward from a hash value to determine the last
707 * five bytes in a name required to produce a new name
708 * with the same hash.
714 * The beginning of the obfuscated name can be pretty much
715 * anything, so fill it in with random characters.
716 * Accumulate its new hash value as we go.
718 for (i
= 0; i
< name_len
- 5; i
++) {
719 *newp
= random_filename_char();
720 new_hash
= *newp
^ rol32(new_hash
, 7);
725 * Compute which five bytes need to be used at the end of
726 * the name so the hash of the obfuscated name is the same
727 * as the hash of the original. If any result in an invalid
728 * character, flip a bit and arrange for a corresponding bit
729 * in a neighboring byte to be flipped as well. For the
730 * last byte, the "neighbor" to change is the first byte
731 * we're computing here.
733 new_hash
= rol32(new_hash
, 3) ^ hash
;
737 for (shift
= 28; shift
>= 0; shift
-= 7) {
738 *newp
= (new_hash
>> shift
& 0x7f) ^ high_bit
;
739 if (is_invalid_char(*newp
)) {
744 ASSERT(!is_invalid_char(*newp
));
749 * If we flipped a bit on the last byte, we need to fix up
750 * the matching bit in the first byte. The result will
751 * be a valid character, because we know that first byte
752 * has 0's in its upper four bits (it was produced by a
753 * 28-bit right-shift of a 32-bit unsigned value).
757 ASSERT(!is_invalid_char(*first
));
759 ASSERT(libxfs_da_hashname(name
, name_len
) == hash
);
763 * Flip a bit in each of two bytes at the end of the given name.
764 * This is used in generating a series of alternate names to be used
765 * in the event a duplicate is found.
767 * The bits flipped are selected such that they both affect the same
768 * bit in the name's computed hash value, so flipping them both will
771 * The following diagram aims to show the portion of a computed
772 * hash that a given byte of a name affects.
774 * 31 28 24 21 14 8 7 3 0
775 * +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
776 * hash: | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
777 * +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
778 * last-4 ->| |<-- last-2 --->| |<--- last ---->|
779 * |<-- last-3 --->| |<-- last-1 --->| |<- last-4
780 * |<-- last-7 --->| |<-- last-5 --->|
781 * |<-- last-8 --->| |<-- last-6 --->|
784 * The last byte of the name directly affects the low-order byte of
785 * the hash. The next-to-last affects bits 7-14, the next one back
786 * affects bits 14-21, and so on. The effect wraps around when it
787 * goes beyond the top of the hash (as happens for byte last-4).
789 * Bits that are flipped together "overlap" on the hash value. As
790 * an example of overlap, the last two bytes both affect bit 7 in
791 * the hash. That pair of bytes (and their overlapping bits) can be
792 * used for this "flip bit" operation (it's the first pair tried,
795 * A table defines overlapping pairs--the bytes involved and bits
796 * within them--that can be used this way. The byte offset is
797 * relative to a starting point within the name, which will be set
798 * to affect the bytes at the end of the name. The function is
799 * called with a "bitseq" value which indicates which bit flip is
800 * desired, and this translates directly into selecting which entry
801 * in the bit_to_flip[] table to apply.
803 * The function returns 1 if the operation was successful. It
804 * returns 0 if the result produced a character that's not valid in
805 * a name (either '/' or a '\0'). Finally, it returns -1 if the bit
806 * sequence number is beyond what is supported for a name of this
811 * (Also see the discussion above find_alternate(), below.)
813 * In order to make this function work for any length name, the
814 * table is ordered by increasing byte offset, so that the earliest
815 * entries can apply to the shortest strings. This way all names
816 * are done consistently.
818 * When bit flips occur, they can convert printable characters
819 * into non-printable ones. In an effort to reduce the impact of
820 * this, the first bit flips are chosen to affect bytes the end of
821 * the name (and furthermore, toward the low bits of a byte). Those
822 * bytes are often non-printable anyway because of the way they are
823 * initially selected by obfuscate_name()). This is accomplished,
824 * using later table entries first.
826 * Each row in the table doubles the number of alternates that
827 * can be generated. A two-byte name is limited to using only
828 * the first row, so it's possible to generate two alternates
829 * (the original name, plus the alternate produced by flipping
830 * the one pair of bits). In a 5-byte name, the effect of the
831 * first byte overlaps the last by 4 its, and there are 8 bits
832 * to flip, allowing for 256 possible alternates.
834 * Short names (less than 5 bytes) are never even obfuscated, so for
835 * such names the relatively small number of alternates should never
836 * really be a problem.
838 * Long names (more than 6 bytes, say) are not likely to exhaust
839 * the number of available alternates. In fact, the table could
840 * probably have stopped at 8 entries, on the assumption that 256
841 * alternates should be enough for most any situation. The entries
842 * beyond those are present mostly for demonstration of how it could
843 * be populated with more entries, should it ever be necessary to do
854 unsigned char *p0
, *p1
;
855 unsigned char m0
, m1
;
857 int byte
; /* Offset from start within name */
858 unsigned char bit
; /* Bit within that byte */
859 } bit_to_flip
[][2] = { /* Sorted by second entry's byte */
860 { { 0, 0 }, { 1, 7 } }, /* Each row defines a pair */
861 { { 1, 0 }, { 2, 7 } }, /* of bytes and a bit within */
862 { { 2, 0 }, { 3, 7 } }, /* each byte. Each bit in */
863 { { 0, 4 }, { 4, 0 } }, /* a pair affects the same */
864 { { 0, 5 }, { 4, 1 } }, /* bit in the hash, so flipping */
865 { { 0, 6 }, { 4, 2 } }, /* both will change the name */
866 { { 0, 7 }, { 4, 3 } }, /* while preserving the hash. */
867 { { 3, 0 }, { 4, 7 } },
868 { { 0, 0 }, { 5, 3 } }, /* The first entry's byte offset */
869 { { 0, 1 }, { 5, 4 } }, /* must be less than the second. */
870 { { 0, 2 }, { 5, 5 } },
871 { { 0, 3 }, { 5, 6 } }, /* The table can be extended to */
872 { { 0, 4 }, { 5, 7 } }, /* an arbitrary number of entries */
873 { { 4, 0 }, { 5, 7 } }, /* but there's not much point. */
877 /* Find the first entry *not* usable for name of this length */
879 for (index
= 0; index
< ARRAY_SIZE(bit_to_flip
); index
++)
880 if (bit_to_flip
[index
][1].byte
>= name_len
)
884 * Back up to the last usable entry. If that number is
885 * smaller than the bit sequence number, inform the caller
886 * that nothing this large (or larger) will work.
888 if (bitseq
> --index
)
892 * We will be switching bits at the end of name, with a
893 * preference for affecting the last bytes first. Compute
894 * where in the name we'll start applying the changes.
896 offset
= name_len
- (bit_to_flip
[index
][1].byte
+ 1);
897 index
-= bitseq
; /* Use later table entries first */
899 p0
= name
+ offset
+ bit_to_flip
[index
][0].byte
;
900 p1
= name
+ offset
+ bit_to_flip
[index
][1].byte
;
901 m0
= 1 << bit_to_flip
[index
][0].bit
;
902 m1
= 1 << bit_to_flip
[index
][1].bit
;
904 /* Only change the bytes if it produces valid characters */
906 if (is_invalid_char(*p0
^ m0
) || is_invalid_char(*p1
^ m1
))
916 * This function generates a well-defined sequence of "alternate"
917 * names for a given name. An alternate is a name having the same
918 * length and same hash value as the original name. This is needed
919 * because the algorithm produces only one obfuscated name to use
920 * for a given original name, and it's possible that result matches
921 * a name already seen. This function checks for this, and if it
922 * occurs, finds another suitable obfuscated name to use.
924 * Each bit in the binary representation of the sequence number is
925 * used to select one possible "bit flip" operation to perform on
926 * the name. So for example:
927 * seq = 0: selects no bits to flip
928 * seq = 1: selects the 0th bit to flip
929 * seq = 2: selects the 1st bit to flip
930 * seq = 3: selects the 0th and 1st bit to flip
933 * The flip_bit() function takes care of the details of the bit
934 * flipping within the name. Note that the "1st bit" in this
935 * context is a bit sequence number; i.e. it doesn't necessarily
936 * mean bit 0x02 will be changed.
938 * If a valid name (one that contains no '/' or '\0' characters) is
939 * produced by this process for the given sequence number, this
940 * function returns 1. If the result is not valid, it returns 0.
941 * Returns -1 if the sequence number is beyond the the maximum for
942 * names of the given length.
947 * The number of alternates available for a given name is dependent
948 * on its length. A "bit flip" involves inverting two bits in
949 * a name--the two bits being selected such that their values
950 * affect the name's hash value in the same way. Alternates are
951 * thus generated by inverting the value of pairs of such
952 * "overlapping" bits in the original name. Each byte after the
953 * first in a name adds at least one bit of overlap to work with.
954 * (See comments above flip_bit() for more discussion on this.)
956 * So the number of alternates is dependent on the number of such
957 * overlapping bits in a name. If there are N bit overlaps, there
958 * 2^N alternates for that hash value.
960 * Here are the number of overlapping bits available for generating
961 * alternates for names of specific lengths:
962 * 1 0 (must have 2 bytes to have any overlap)
963 * 2 1 One bit overlaps--so 2 possible alternates
964 * 3 2 Two bits overlap--so 4 possible alternates
965 * 4 4 Three bits overlap, so 2^3 alternates
966 * 5 8 8 bits overlap (due to wrapping), 256 alternates
967 * 6 18 2^18 alternates
968 * 7 28 2^28 alternates
970 * It's clear that the number of alternates grows very quickly with
971 * the length of the name. But note that the set of alternates
972 * includes invalid names. And for certain (contrived) names, the
973 * number of valid names is a fairly small fraction of the total
974 * number of alternates.
976 * The main driver for this infrastructure for coming up with
977 * alternate names is really related to names 5 (or possibly 6)
978 * bytes in length. 5-byte obfuscated names contain no randomly-
979 * generated bytes in them, and the chance of an obfuscated name
980 * matching an already-seen name is too high to just ignore. This
981 * methodical selection of alternates ensures we don't produce
982 * duplicate names unless we have exhausted our options.
994 return 1; /* alternate 0 is the original name */
995 if (name_len
< 2) /* Must have 2 bytes to flip */
998 for (bitseq
= 0; bits
; bitseq
++) {
999 uint32_t mask
= 1 << bitseq
;
1005 fb
= flip_bit(name_len
, name
, bitseq
);
1015 * Look up the given name in the name table. If it is already
1016 * present, iterate through a well-defined sequence of alternate
1017 * names and attempt to use an alternate name instead.
1019 * Returns 1 if the (possibly modified) name is not present in the
1020 * name table. Returns 0 if the name and all possible alternates
1021 * are already in the table.
1024 handle_duplicate_name(xfs_dahash_t hash
, size_t name_len
, unsigned char *name
)
1026 unsigned char new_name
[name_len
+ 1];
1029 if (!nametable_find(hash
, name_len
, name
))
1030 return 1; /* No duplicate */
1032 /* Name is already in use. Need to find an alternate. */
1037 /* Only change incoming name if we find an alternate */
1039 memcpy(new_name
, name
, name_len
);
1040 found
= find_alternate(name_len
, new_name
, seq
++);
1042 return 0; /* No more to check */
1044 } while (nametable_find(hash
, name_len
, new_name
));
1047 * The alternate wasn't in the table already. Pass it back
1050 memcpy(name
, new_name
, name_len
);
1056 generate_obfuscated_name(
1059 unsigned char *name
)
1064 * We don't obfuscate "lost+found" or any orphan files
1065 * therein. When the name table is used for extended
1066 * attributes, the inode number provided is 0, in which
1067 * case we don't need to make this check.
1069 if (ino
&& in_lost_found(ino
, namelen
, name
))
1073 * If the name starts with a slash, just skip over it. It
1074 * isn't included in the hash and we don't record it in the
1075 * name table. Note that the namelen value passed in does
1076 * not count the leading slash (if one is present).
1081 /* Obfuscate the name (if possible) */
1083 hash
= libxfs_da_hashname(name
, namelen
);
1084 obfuscate_name(hash
, namelen
, name
);
1087 * Make sure the name is not something already seen. If we
1088 * fail to find a suitable alternate, we're dealing with a
1089 * very pathological situation, and we may end up creating
1090 * a duplicate name in the metadump, so issue a warning.
1092 if (!handle_duplicate_name(hash
, namelen
, name
)) {
1093 print_warning("duplicate name for inode %llu "
1094 "in dir inode %llu\n",
1095 (unsigned long long) ino
,
1096 (unsigned long long) cur_ino
);
1100 /* Create an entry for the new name in the name table. */
1102 if (!nametable_add(hash
, namelen
, name
))
1103 print_warning("unable to record name for inode %llu "
1104 "in dir inode %llu\n",
1105 (unsigned long long) ino
,
1106 (unsigned long long) cur_ino
);
1113 struct xfs_dir2_sf_hdr
*sfp
;
1114 xfs_dir2_sf_entry_t
*sfep
;
1115 __uint64_t ino_dir_size
;
1118 sfp
= (struct xfs_dir2_sf_hdr
*)XFS_DFORK_DPTR(dip
);
1119 ino_dir_size
= be64_to_cpu(dip
->di_size
);
1120 if (ino_dir_size
> XFS_DFORK_DSIZE(dip
, mp
)) {
1121 ino_dir_size
= XFS_DFORK_DSIZE(dip
, mp
);
1123 print_warning("invalid size in dir inode %llu",
1124 (long long)cur_ino
);
1127 sfep
= xfs_dir2_sf_firstentry(sfp
);
1128 for (i
= 0; (i
< sfp
->count
) &&
1129 ((char *)sfep
- (char *)sfp
< ino_dir_size
); i
++) {
1132 * first check for bad name lengths. If they are bad, we
1133 * have limitations to how much can be obfuscated.
1135 int namelen
= sfep
->namelen
;
1139 print_warning("zero length entry in dir inode "
1140 "%llu", (long long)cur_ino
);
1141 if (i
!= sfp
->count
- 1)
1143 namelen
= ino_dir_size
- ((char *)&sfep
->name
[0] -
1145 } else if ((char *)sfep
- (char *)sfp
+
1146 M_DIROPS(mp
)->sf_entsize(sfp
, sfep
->namelen
) >
1149 print_warning("entry length in dir inode %llu "
1150 "overflows space", (long long)cur_ino
);
1151 if (i
!= sfp
->count
- 1)
1153 namelen
= ino_dir_size
- ((char *)&sfep
->name
[0] -
1158 generate_obfuscated_name(
1159 M_DIROPS(mp
)->sf_get_ino(sfp
, sfep
),
1160 namelen
, &sfep
->name
[0]);
1162 sfep
= (xfs_dir2_sf_entry_t
*)((char *)sfep
+
1163 M_DIROPS(mp
)->sf_entsize(sfp
, namelen
));
1166 /* zero stale data in rest of space in data fork, if any */
1167 if (zero_stale_data
&& (ino_dir_size
< XFS_DFORK_DSIZE(dip
, mp
)))
1168 memset(sfep
, 0, XFS_DFORK_DSIZE(dip
, mp
) - ino_dir_size
);
1172 * The pathname may not be null terminated. It may be terminated by the end of
1173 * a buffer or inode literal area, and the start of the next region contains
1174 * unknown data. Therefore, when we get to the last component of the symlink, we
1175 * cannot assume that strlen() will give us the right result. Hence we need to
1176 * track the remaining pathname length and use that instead.
1179 obfuscate_path_components(
1183 unsigned char *comp
= (unsigned char *)buf
;
1184 unsigned char *end
= comp
+ len
;
1187 while (comp
< end
) {
1191 /* find slash at end of this component */
1192 slash
= strchr((char *)comp
, '/');
1194 /* last (or single) component */
1195 namelen
= strnlen((char *)comp
, len
);
1196 hash
= libxfs_da_hashname(comp
, namelen
);
1197 obfuscate_name(hash
, namelen
, comp
);
1200 namelen
= slash
- (char *)comp
;
1201 /* handle leading or consecutive slashes */
1207 hash
= libxfs_da_hashname(comp
, namelen
);
1208 obfuscate_name(hash
, namelen
, comp
);
1209 comp
+= namelen
+ 1;
1221 len
= be64_to_cpu(dip
->di_size
);
1222 if (len
> XFS_DFORK_DSIZE(dip
, mp
)) {
1224 print_warning("invalid size (%d) in symlink inode %llu",
1225 len
, (long long)cur_ino
);
1226 len
= XFS_DFORK_DSIZE(dip
, mp
);
1229 buf
= (char *)XFS_DFORK_DPTR(dip
);
1231 obfuscate_path_components(buf
, len
);
1233 /* zero stale data in rest of space in data fork, if any */
1234 if (zero_stale_data
&& len
< XFS_DFORK_DSIZE(dip
, mp
))
1235 memset(&buf
[len
], 0, XFS_DFORK_DSIZE(dip
, mp
) - len
);
1243 * with extended attributes, obfuscate the names and fill the actual
1244 * values with 'v' (to see a valid string length, as opposed to NULLs)
1247 xfs_attr_shortform_t
*asfp
;
1248 xfs_attr_sf_entry_t
*asfep
;
1252 asfp
= (xfs_attr_shortform_t
*)XFS_DFORK_APTR(dip
);
1253 if (asfp
->hdr
.count
== 0)
1256 ino_attr_size
= be16_to_cpu(asfp
->hdr
.totsize
);
1257 if (ino_attr_size
> XFS_DFORK_ASIZE(dip
, mp
)) {
1258 ino_attr_size
= XFS_DFORK_ASIZE(dip
, mp
);
1260 print_warning("invalid attr size in inode %llu",
1261 (long long)cur_ino
);
1264 asfep
= &asfp
->list
[0];
1265 for (i
= 0; (i
< asfp
->hdr
.count
) &&
1266 ((char *)asfep
- (char *)asfp
< ino_attr_size
); i
++) {
1268 int namelen
= asfep
->namelen
;
1272 print_warning("zero length attr entry in inode "
1273 "%llu", (long long)cur_ino
);
1275 } else if ((char *)asfep
- (char *)asfp
+
1276 XFS_ATTR_SF_ENTSIZE(asfep
) > ino_attr_size
) {
1278 print_warning("attr entry length in inode %llu "
1279 "overflows space", (long long)cur_ino
);
1284 generate_obfuscated_name(0, asfep
->namelen
,
1285 &asfep
->nameval
[0]);
1286 memset(&asfep
->nameval
[asfep
->namelen
], 'v',
1290 asfep
= (xfs_attr_sf_entry_t
*)((char *)asfep
+
1291 XFS_ATTR_SF_ENTSIZE(asfep
));
1294 /* zero stale data in rest of space in attr fork, if any */
1295 if (zero_stale_data
&& (ino_attr_size
< XFS_DFORK_ASIZE(dip
, mp
)))
1296 memset(asfep
, 0, XFS_DFORK_ASIZE(dip
, mp
) - ino_attr_size
);
1300 process_dir_data_block(
1302 xfs_fileoff_t offset
,
1303 int is_block_format
)
1306 * we have to rely on the fileoffset and signature of the block to
1307 * handle it's contents. If it's invalid, leave it alone.
1308 * for multi-fsblock dir blocks, if a name crosses an extent boundary,
1309 * ignore it and continue.
1316 struct xfs_dir2_data_hdr
*datahdr
;
1318 datahdr
= (struct xfs_dir2_data_hdr
*)block
;
1320 if (is_block_format
) {
1321 xfs_dir2_leaf_entry_t
*blp
;
1322 xfs_dir2_block_tail_t
*btp
;
1324 btp
= xfs_dir2_block_tail_p(mp
->m_dir_geo
, datahdr
);
1325 blp
= xfs_dir2_block_leaf_p(btp
);
1326 if ((char *)blp
> (char *)btp
)
1327 blp
= (xfs_dir2_leaf_entry_t
*)btp
;
1329 end_of_data
= (char *)blp
- block
;
1330 if (xfs_sb_version_hascrc(&mp
->m_sb
))
1331 wantmagic
= XFS_DIR3_BLOCK_MAGIC
;
1333 wantmagic
= XFS_DIR2_BLOCK_MAGIC
;
1334 } else { /* leaf/node format */
1335 end_of_data
= mp
->m_dir_geo
->fsbcount
<< mp
->m_sb
.sb_blocklog
;
1336 if (xfs_sb_version_hascrc(&mp
->m_sb
))
1337 wantmagic
= XFS_DIR3_DATA_MAGIC
;
1339 wantmagic
= XFS_DIR2_DATA_MAGIC
;
1342 if (be32_to_cpu(datahdr
->magic
) != wantmagic
) {
1345 "invalid magic in dir inode %llu block %ld",
1346 (long long)cur_ino
, (long)offset
);
1350 dir_offset
= M_DIROPS(mp
)->data_entry_offset
;
1351 ptr
= block
+ dir_offset
;
1352 endptr
= block
+ mp
->m_dir_geo
->blksize
;
1354 while (ptr
< endptr
&& dir_offset
< end_of_data
) {
1355 xfs_dir2_data_entry_t
*dep
;
1356 xfs_dir2_data_unused_t
*dup
;
1359 dup
= (xfs_dir2_data_unused_t
*)ptr
;
1361 if (be16_to_cpu(dup
->freetag
) == XFS_DIR2_DATA_FREE_TAG
) {
1362 int length
= be16_to_cpu(dup
->length
);
1363 if (dir_offset
+ length
> end_of_data
||
1364 !length
|| (length
& (XFS_DIR2_DATA_ALIGN
- 1))) {
1367 "invalid length for dir free space in inode %llu",
1368 (long long)cur_ino
);
1371 if (be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup
)) !=
1374 dir_offset
+= length
;
1377 * Zero the unused space up to the tag - the tag is
1378 * actually at a variable offset, so zeroing &dup->tag
1379 * is zeroing the free space in between
1381 if (zero_stale_data
) {
1383 sizeof(xfs_dir2_data_unused_t
);
1386 memset(&dup
->tag
, 0, zlen
);
1387 iocur_top
->need_crc
= 1;
1390 if (dir_offset
>= end_of_data
|| ptr
>= endptr
)
1394 dep
= (xfs_dir2_data_entry_t
*)ptr
;
1395 length
= M_DIROPS(mp
)->data_entsize(dep
->namelen
);
1397 if (dir_offset
+ length
> end_of_data
||
1398 ptr
+ length
> endptr
) {
1401 "invalid length for dir entry name in inode %llu",
1402 (long long)cur_ino
);
1405 if (be16_to_cpu(*M_DIROPS(mp
)->data_entry_tag_p(dep
)) !=
1410 generate_obfuscated_name(be64_to_cpu(dep
->inumber
),
1411 dep
->namelen
, &dep
->name
[0]);
1412 dir_offset
+= length
;
1414 /* Zero the unused space after name, up to the tag */
1415 if (zero_stale_data
) {
1416 /* 1 byte for ftype; don't bother with conditional */
1418 (char *)M_DIROPS(mp
)->data_entry_tag_p(dep
) -
1419 (char *)&dep
->name
[dep
->namelen
] - 1;
1421 memset(&dep
->name
[dep
->namelen
] + 1, 0, zlen
);
1422 iocur_top
->need_crc
= 1;
1429 process_symlink_block(
1434 if (xfs_sb_version_hascrc(&(mp
)->m_sb
))
1435 link
+= sizeof(struct xfs_dsymlink_hdr
);
1438 obfuscate_path_components(link
, XFS_SYMLINK_BUF_SPACE(mp
,
1439 mp
->m_sb
.sb_blocksize
));
1440 if (zero_stale_data
) {
1441 size_t linklen
, zlen
;
1443 linklen
= strlen(link
);
1444 zlen
= mp
->m_sb
.sb_blocksize
- linklen
;
1445 if (xfs_sb_version_hascrc(&mp
->m_sb
))
1446 zlen
-= sizeof(struct xfs_dsymlink_hdr
);
1447 if (zlen
< mp
->m_sb
.sb_blocksize
)
1448 memset(link
+ linklen
, 0, zlen
);
1452 #define MAX_REMOTE_VALS 4095
1454 static struct attr_data_s
{
1455 int remote_val_count
;
1456 xfs_dablk_t remote_vals
[MAX_REMOTE_VALS
];
1461 xfs_dablk_t blockidx
,
1464 while (length
> 0 && attr_data
.remote_val_count
< MAX_REMOTE_VALS
) {
1465 attr_data
.remote_vals
[attr_data
.remote_val_count
] = blockidx
;
1466 attr_data
.remote_val_count
++;
1468 length
-= mp
->m_sb
.sb_blocksize
;
1471 if (attr_data
.remote_val_count
>= MAX_REMOTE_VALS
) {
1473 "Overflowed attr obfuscation array. No longer obfuscating remote attrs.");
1477 /* Handle remote and leaf attributes */
1481 xfs_fileoff_t offset
)
1483 struct xfs_attr_leafblock
*leaf
;
1484 struct xfs_attr3_icleaf_hdr hdr
;
1487 xfs_attr_leaf_entry_t
*entry
;
1488 xfs_attr_leaf_name_local_t
*local
;
1489 xfs_attr_leaf_name_remote_t
*remote
;
1490 __uint32_t bs
= mp
->m_sb
.sb_blocksize
;
1494 leaf
= (xfs_attr_leafblock_t
*)block
;
1496 /* Remote attributes - attr3 has XFS_ATTR3_RMT_MAGIC, attr has none */
1497 if ((be16_to_cpu(leaf
->hdr
.info
.magic
) != XFS_ATTR_LEAF_MAGIC
) &&
1498 (be16_to_cpu(leaf
->hdr
.info
.magic
) != XFS_ATTR3_LEAF_MAGIC
)) {
1499 for (i
= 0; i
< attr_data
.remote_val_count
; i
++) {
1500 if (obfuscate
&& attr_data
.remote_vals
[i
] == offset
)
1501 /* Macros to handle both attr and attr3 */
1503 (bs
- XFS_ATTR3_RMT_BUF_SPACE(mp
, bs
)),
1504 'v', XFS_ATTR3_RMT_BUF_SPACE(mp
, bs
));
1509 /* Ok, it's a leaf - get header; accounts for crc & non-crc */
1510 xfs_attr3_leaf_hdr_from_disk(mp
->m_attr_geo
, &hdr
, leaf
);
1512 nentries
= hdr
.count
;
1513 if (nentries
* sizeof(xfs_attr_leaf_entry_t
) +
1514 xfs_attr3_leaf_hdr_size(leaf
) >
1515 XFS_ATTR3_RMT_BUF_SPACE(mp
, bs
)) {
1517 print_warning("invalid attr count in inode %llu",
1518 (long long)cur_ino
);
1522 entry
= xfs_attr3_leaf_entryp(leaf
);
1523 /* We will move this as we parse */
1525 for (i
= 0; i
< nentries
; i
++, entry
++) {
1526 int nlen
, vlen
, zlen
;
1528 /* Grows up; if this name is topmost, move first_name */
1529 if (!first_name
|| xfs_attr3_leaf_name(leaf
, i
) < first_name
)
1530 first_name
= xfs_attr3_leaf_name(leaf
, i
);
1532 if (be16_to_cpu(entry
->nameidx
) > mp
->m_sb
.sb_blocksize
) {
1535 "invalid attr nameidx in inode %llu",
1536 (long long)cur_ino
);
1539 if (entry
->flags
& XFS_ATTR_LOCAL
) {
1540 local
= xfs_attr3_leaf_name_local(leaf
, i
);
1541 if (local
->namelen
== 0) {
1544 "zero length for attr name in inode %llu",
1545 (long long)cur_ino
);
1549 generate_obfuscated_name(0, local
->namelen
,
1550 &local
->nameval
[0]);
1551 memset(&local
->nameval
[local
->namelen
], 'v',
1552 be16_to_cpu(local
->valuelen
));
1554 /* zero from end of nameval[] to next name start */
1555 nlen
= local
->namelen
;
1556 vlen
= be16_to_cpu(local
->valuelen
);
1557 zlen
= xfs_attr_leaf_entsize_local(nlen
, vlen
) -
1558 (sizeof(xfs_attr_leaf_name_local_t
) - 1 +
1560 if (zero_stale_data
)
1561 memset(&local
->nameval
[nlen
+ vlen
], 0, zlen
);
1563 remote
= xfs_attr3_leaf_name_remote(leaf
, i
);
1564 if (remote
->namelen
== 0 || remote
->valueblk
== 0) {
1567 "invalid attr entry in inode %llu",
1568 (long long)cur_ino
);
1572 generate_obfuscated_name(0, remote
->namelen
,
1574 add_remote_vals(be32_to_cpu(remote
->valueblk
),
1575 be32_to_cpu(remote
->valuelen
));
1577 /* zero from end of name[] to next name start */
1578 nlen
= remote
->namelen
;
1579 zlen
= xfs_attr_leaf_entsize_remote(nlen
) -
1580 (sizeof(xfs_attr_leaf_name_remote_t
) - 1 +
1582 if (zero_stale_data
)
1583 memset(&remote
->name
[nlen
], 0, zlen
);
1587 /* Zero from end of entries array to the first name/val */
1588 if (zero_stale_data
) {
1589 struct xfs_attr_leaf_entry
*entries
;
1591 entries
= xfs_attr3_leaf_entryp(leaf
);
1592 memset(&entries
[nentries
], 0,
1593 first_name
- (char *)&entries
[nentries
]);
1597 /* Processes symlinks, attrs, directories ... */
1599 process_single_fsb_objects(
1610 for (i
= 0; i
< c
; i
++) {
1612 set_cur(&typtab
[btype
], XFS_FSB_TO_DADDR(mp
, s
), blkbb
,
1615 if (!iocur_top
->data
) {
1616 xfs_agnumber_t agno
= XFS_FSB_TO_AGNO(mp
, s
);
1617 xfs_agblock_t agbno
= XFS_FSB_TO_AGBNO(mp
, s
);
1619 print_warning("cannot read %s block %u/%u (%llu)",
1620 typtab
[btype
].name
, agno
, agbno
, s
);
1621 if (stop_on_read_error
)
1627 if (!obfuscate
&& !zero_stale_data
)
1630 /* Zero unused part of interior nodes */
1631 if (zero_stale_data
) {
1632 xfs_da_intnode_t
*node
= iocur_top
->data
;
1633 int magic
= be16_to_cpu(node
->hdr
.info
.magic
);
1635 if (magic
== XFS_DA_NODE_MAGIC
||
1636 magic
== XFS_DA3_NODE_MAGIC
) {
1637 struct xfs_da3_icnode_hdr hdr
;
1640 M_DIROPS(mp
)->node_hdr_from_disk(&hdr
, node
);
1641 used
= M_DIROPS(mp
)->node_hdr_size
;
1644 * sizeof(struct xfs_da_node_entry
);
1646 if (used
< mp
->m_sb
.sb_blocksize
) {
1647 memset((char *)node
+ used
, 0,
1648 mp
->m_sb
.sb_blocksize
- used
);
1649 iocur_top
->need_crc
= 1;
1654 /* Handle leaf nodes */
1655 dp
= iocur_top
->data
;
1658 if (o
>= mp
->m_dir_geo
->leafblk
)
1661 process_dir_data_block(dp
, o
,
1662 last
== mp
->m_dir_geo
->fsbcount
);
1663 iocur_top
->need_crc
= 1;
1666 process_symlink_block(dp
);
1667 iocur_top
->need_crc
= 1;
1670 process_attr_block(dp
, o
);
1671 iocur_top
->need_crc
= 1;
1678 ret
= write_buf(iocur_top
);
1691 * Static map to aggregate multiple extents into a single directory block.
1693 static struct bbmap mfsb_map
;
1694 static int mfsb_length
;
1697 process_multi_fsb_objects(
1710 print_warning("bad type for multi-fsb object %d", btype
);
1715 unsigned int bm_len
;
1717 if (mfsb_length
+ c
>= mp
->m_dir_geo
->fsbcount
) {
1718 bm_len
= mp
->m_dir_geo
->fsbcount
- mfsb_length
;
1725 mfsb_map
.b
[mfsb_map
.nmaps
].bm_bn
= XFS_FSB_TO_DADDR(mp
, s
);
1726 mfsb_map
.b
[mfsb_map
.nmaps
].bm_len
= XFS_FSB_TO_BB(mp
, bm_len
);
1729 if (mfsb_length
== 0) {
1731 set_cur(&typtab
[btype
], 0, 0, DB_RING_IGN
, &mfsb_map
);
1732 if (!iocur_top
->data
) {
1733 xfs_agnumber_t agno
= XFS_FSB_TO_AGNO(mp
, s
);
1734 xfs_agblock_t agbno
= XFS_FSB_TO_AGBNO(mp
, s
);
1736 print_warning("cannot read %s block %u/%u (%llu)",
1737 typtab
[btype
].name
, agno
, agbno
, s
);
1738 if (stop_on_read_error
)
1744 if ((!obfuscate
&& !zero_stale_data
) ||
1745 o
>= mp
->m_dir_geo
->leafblk
) {
1746 ret
= write_buf(iocur_top
);
1750 process_dir_data_block(iocur_top
->data
, o
,
1751 last
== mp
->m_dir_geo
->fsbcount
);
1752 iocur_top
->need_crc
= 1;
1753 ret
= write_buf(iocur_top
);
1767 /* inode copy routines */
1769 process_bmbt_reclist(
1775 xfs_fileoff_t o
, op
= NULLFILEOFF
;
1777 xfs_filblks_t c
, cp
= NULLFILEOFF
;
1780 xfs_agnumber_t agno
;
1781 xfs_agblock_t agbno
;
1784 if (btype
== TYP_DATA
)
1787 convert_extent(&rp
[numrecs
- 1], &o
, &s
, &c
, &f
);
1790 for (i
= 0; i
< numrecs
; i
++, rp
++) {
1791 convert_extent(rp
, &o
, &s
, &c
, &f
);
1794 * ignore extents that are clearly bogus, and if a bogus
1795 * one is found, stop processing remaining extents
1797 if (i
> 0 && op
+ cp
> o
) {
1799 print_warning("bmap extent %d in %s ino %llu "
1800 "starts at %llu, previous extent "
1802 typtab
[btype
].name
, (long long)cur_ino
,
1807 if (c
> max_extent_size
) {
1809 * since we are only processing non-data extents,
1810 * large numbers of blocks in a metadata extent is
1811 * extremely rare and more than likely to be corrupt.
1814 print_warning("suspicious count %u in bmap "
1815 "extent %d in %s ino %llu", c
, i
,
1816 typtab
[btype
].name
, (long long)cur_ino
);
1823 agno
= XFS_FSB_TO_AGNO(mp
, s
);
1824 agbno
= XFS_FSB_TO_AGBNO(mp
, s
);
1826 if (!valid_bno(agno
, agbno
)) {
1828 print_warning("invalid block number %u/%u "
1829 "(%llu) in bmap extent %d in %s ino "
1830 "%llu", agno
, agbno
, s
, i
,
1831 typtab
[btype
].name
, (long long)cur_ino
);
1835 if (!valid_bno(agno
, agbno
+ c
- 1)) {
1837 print_warning("bmap extent %i in %s inode %llu "
1838 "overflows AG (end is %u/%u)", i
,
1839 typtab
[btype
].name
, (long long)cur_ino
,
1840 agno
, agbno
+ c
- 1);
1844 /* multi-extent blocks require special handling */
1845 if (btype
!= TYP_DIR2
|| mp
->m_dir_geo
->fsbcount
== 1) {
1846 error
= process_single_fsb_objects(o
, s
, c
, btype
, last
);
1848 error
= process_multi_fsb_objects(o
, s
, c
, btype
, last
);
1859 struct xfs_btree_block
*block
,
1860 xfs_agnumber_t agno
,
1861 xfs_agblock_t agbno
,
1864 void *arg
) /* ptr to itype */
1870 nrecs
= be16_to_cpu(block
->bb_numrecs
);
1873 if (nrecs
> mp
->m_bmap_dmxr
[0]) {
1875 print_warning("invalid numrecs (%u) in %s "
1876 "block %u/%u", nrecs
,
1877 typtab
[btype
].name
, agno
, agbno
);
1880 return process_bmbt_reclist(XFS_BMBT_REC_ADDR(mp
, block
, 1),
1881 nrecs
, *(typnm_t
*)arg
);
1884 if (nrecs
> mp
->m_bmap_dmxr
[1]) {
1886 print_warning("invalid numrecs (%u) in %s block %u/%u",
1887 nrecs
, typtab
[btype
].name
, agno
, agbno
);
1890 pp
= XFS_BMBT_PTR_ADDR(mp
, block
, 1, mp
->m_bmap_dmxr
[1]);
1891 for (i
= 0; i
< nrecs
; i
++) {
1895 ag
= XFS_FSB_TO_AGNO(mp
, get_unaligned_be64(&pp
[i
]));
1896 bno
= XFS_FSB_TO_AGBNO(mp
, get_unaligned_be64(&pp
[i
]));
1898 if (bno
== 0 || bno
> mp
->m_sb
.sb_agblocks
||
1899 ag
> mp
->m_sb
.sb_agcount
) {
1901 print_warning("invalid block number (%u/%u) "
1902 "in %s block %u/%u", ag
, bno
,
1903 typtab
[btype
].name
, agno
, agbno
);
1907 if (!scan_btree(ag
, bno
, level
, btype
, arg
, scanfunc_bmap
))
1918 xfs_bmdr_block_t
*dib
;
1927 whichfork
= (itype
== TYP_ATTR
) ? XFS_ATTR_FORK
: XFS_DATA_FORK
;
1928 btype
= (itype
== TYP_ATTR
) ? TYP_BMAPBTA
: TYP_BMAPBTD
;
1930 dib
= (xfs_bmdr_block_t
*)XFS_DFORK_PTR(dip
, whichfork
);
1931 level
= be16_to_cpu(dib
->bb_level
);
1932 nrecs
= be16_to_cpu(dib
->bb_numrecs
);
1934 if (level
> XFS_BM_MAXLEVELS(mp
, whichfork
)) {
1936 print_warning("invalid level (%u) in inode %lld %s "
1937 "root", level
, (long long)cur_ino
,
1938 typtab
[btype
].name
);
1943 return process_bmbt_reclist(XFS_BMDR_REC_ADDR(dib
, 1),
1947 maxrecs
= xfs_bmdr_maxrecs(XFS_DFORK_SIZE(dip
, mp
, whichfork
), 0);
1948 if (nrecs
> maxrecs
) {
1950 print_warning("invalid numrecs (%u) in inode %lld %s "
1951 "root", nrecs
, (long long)cur_ino
,
1952 typtab
[btype
].name
);
1956 pp
= XFS_BMDR_PTR_ADDR(dib
, 1, maxrecs
);
1957 for (i
= 0; i
< nrecs
; i
++) {
1961 ag
= XFS_FSB_TO_AGNO(mp
, get_unaligned_be64(&pp
[i
]));
1962 bno
= XFS_FSB_TO_AGBNO(mp
, get_unaligned_be64(&pp
[i
]));
1964 if (bno
== 0 || bno
> mp
->m_sb
.sb_agblocks
||
1965 ag
> mp
->m_sb
.sb_agcount
) {
1967 print_warning("invalid block number (%u/%u) "
1968 "in inode %llu %s root", ag
,
1969 bno
, (long long)cur_ino
,
1970 typtab
[btype
].name
);
1974 if (!scan_btree(ag
, bno
, level
, btype
, &itype
, scanfunc_bmap
))
1989 whichfork
= (itype
== TYP_ATTR
) ? XFS_ATTR_FORK
: XFS_DATA_FORK
;
1991 nex
= XFS_DFORK_NEXTENTS(dip
, whichfork
);
1992 used
= nex
* sizeof(xfs_bmbt_rec_t
);
1993 if (nex
< 0 || used
> XFS_DFORK_SIZE(dip
, mp
, whichfork
)) {
1995 print_warning("bad number of extents %d in inode %lld",
1996 nex
, (long long)cur_ino
);
2000 /* Zero unused data fork past used extents */
2001 if (zero_stale_data
&& (used
< XFS_DFORK_SIZE(dip
, mp
, whichfork
)))
2002 memset(XFS_DFORK_PTR(dip
, whichfork
) + used
, 0,
2003 XFS_DFORK_SIZE(dip
, mp
, whichfork
) - used
);
2006 return process_bmbt_reclist((xfs_bmbt_rec_t
*)XFS_DFORK_PTR(dip
,
2007 whichfork
), nex
, itype
);
2015 switch (dip
->di_format
) {
2016 case XFS_DINODE_FMT_LOCAL
:
2017 if (obfuscate
|| zero_stale_data
)
2020 process_sf_dir(dip
);
2024 process_sf_symlink(dip
);
2031 case XFS_DINODE_FMT_EXTENTS
:
2032 return process_exinode(dip
, itype
);
2034 case XFS_DINODE_FMT_BTREE
:
2035 return process_btinode(dip
, itype
);
2041 * when we process the inode, we may change the data in the data and/or
2042 * attribute fork if they are in short form and we are obfuscating names.
2043 * In this case we need to recalculate the CRC of the inode, but we should
2044 * only do that if the CRC in the inode is good to begin with. If the crc
2045 * is not ok, we just leave it alone.
2049 xfs_agnumber_t agno
,
2055 bool crc_was_ok
= false; /* no recalc by default */
2056 bool need_new_crc
= false;
2059 cur_ino
= XFS_AGINO_TO_INO(mp
, agno
, agino
);
2061 /* we only care about crc recalculation if we will modify the inode. */
2062 if (obfuscate
|| zero_stale_data
) {
2063 crc_was_ok
= xfs_verify_cksum((char *)dip
,
2064 mp
->m_sb
.sb_inodesize
,
2065 offsetof(struct xfs_dinode
, di_crc
));
2069 if (zero_stale_data
) {
2070 /* Zero all of the inode literal area */
2071 memset(XFS_DFORK_DPTR(dip
), 0,
2072 XFS_LITINO(mp
, dip
->di_version
));
2077 /* copy appropriate data fork metadata */
2078 switch (be16_to_cpu(dip
->di_mode
) & S_IFMT
) {
2080 success
= process_inode_data(dip
, TYP_DIR2
);
2081 if (dip
->di_format
== XFS_DINODE_FMT_LOCAL
)
2085 success
= process_inode_data(dip
, TYP_SYMLINK
);
2086 if (dip
->di_format
== XFS_DINODE_FMT_LOCAL
)
2090 success
= process_inode_data(dip
, TYP_DATA
);
2096 /* copy extended attributes if they exist and forkoff is valid */
2098 XFS_DFORK_DSIZE(dip
, mp
) < XFS_LITINO(mp
, dip
->di_version
)) {
2099 attr_data
.remote_val_count
= 0;
2100 switch (dip
->di_aformat
) {
2101 case XFS_DINODE_FMT_LOCAL
:
2103 if (obfuscate
|| zero_stale_data
)
2104 process_sf_attr(dip
);
2107 case XFS_DINODE_FMT_EXTENTS
:
2108 success
= process_exinode(dip
, TYP_ATTR
);
2111 case XFS_DINODE_FMT_BTREE
:
2112 success
= process_btinode(dip
, TYP_ATTR
);
2119 /* Heavy handed but low cost; just do it as a catch-all. */
2120 if (zero_stale_data
)
2123 if (crc_was_ok
&& need_new_crc
)
2124 libxfs_dinode_calc_crc(mp
, dip
);
2128 static __uint32_t inodes_copied
= 0;
2132 xfs_agnumber_t agno
,
2133 xfs_inobt_rec_t
*rp
)
2137 xfs_agblock_t agbno
;
2138 xfs_agblock_t end_agbno
;
2145 agino
= be32_to_cpu(rp
->ir_startino
);
2146 agbno
= XFS_AGINO_TO_AGBNO(mp
, agino
);
2147 end_agbno
= agbno
+ mp
->m_ialloc_blks
;
2148 off
= XFS_INO_TO_OFFSET(mp
, agino
);
2151 * If the fs supports sparse inode records, we must process inodes a
2152 * cluster at a time because that is the sparse allocation granularity.
2153 * Otherwise, we risk CRC corruption errors on reads of inode chunks.
2155 * Also make sure that that we don't process more than the single record
2156 * we've been passed (large block sizes can hold multiple inode chunks).
2158 if (xfs_sb_version_hassparseinodes(&mp
->m_sb
))
2159 blks_per_buf
= xfs_icluster_size_fsb(mp
);
2161 blks_per_buf
= mp
->m_ialloc_blks
;
2162 inodes_per_buf
= min(blks_per_buf
<< mp
->m_sb
.sb_inopblog
,
2163 XFS_INODES_PER_CHUNK
);
2166 * Sanity check that we only process a single buffer if ir_startino has
2167 * a buffer offset. A non-zero offset implies that the entire chunk lies
2170 if (off
&& inodes_per_buf
!= XFS_INODES_PER_CHUNK
) {
2171 print_warning("bad starting inode offset %d", off
);
2175 if (agino
== 0 || agino
== NULLAGINO
|| !valid_bno(agno
, agbno
) ||
2176 !valid_bno(agno
, XFS_AGINO_TO_AGBNO(mp
,
2177 agino
+ XFS_INODES_PER_CHUNK
- 1))) {
2179 print_warning("bad inode number %llu (%u/%u)",
2180 XFS_AGINO_TO_INO(mp
, agno
, agino
), agno
, agino
);
2185 * check for basic assumptions about inode chunks, and if any
2186 * assumptions fail, don't process the inode chunk.
2188 if ((mp
->m_sb
.sb_inopblock
<= XFS_INODES_PER_CHUNK
&& off
!= 0) ||
2189 (mp
->m_sb
.sb_inopblock
> XFS_INODES_PER_CHUNK
&&
2190 off
% XFS_INODES_PER_CHUNK
!= 0) ||
2191 (xfs_sb_version_hasalign(&mp
->m_sb
) &&
2192 mp
->m_sb
.sb_inoalignmt
!= 0 &&
2193 agbno
% mp
->m_sb
.sb_inoalignmt
!= 0)) {
2195 print_warning("badly aligned inode (start = %llu)",
2196 XFS_AGINO_TO_INO(mp
, agno
, agino
));
2202 while (agbno
< end_agbno
&& ioff
< XFS_INODES_PER_CHUNK
) {
2203 if (xfs_inobt_is_sparse_disk(rp
, ioff
))
2206 set_cur(&typtab
[TYP_INODE
], XFS_AGB_TO_DADDR(mp
, agno
, agbno
),
2207 XFS_FSB_TO_BB(mp
, blks_per_buf
), DB_RING_IGN
, NULL
);
2208 if (iocur_top
->data
== NULL
) {
2209 print_warning("cannot read inode block %u/%u",
2211 rval
= !stop_on_read_error
;
2215 for (i
= 0; i
< inodes_per_buf
; i
++) {
2218 dip
= (xfs_dinode_t
*)((char *)iocur_top
->data
+
2219 ((off
+ i
) << mp
->m_sb
.sb_inodelog
));
2221 /* process_inode handles free inodes, too */
2222 if (!process_inode(agno
, agino
+ ioff
+ i
, dip
,
2223 XFS_INOBT_IS_FREE_DISK(rp
, i
)))
2229 if (write_buf(iocur_top
))
2233 agbno
+= blks_per_buf
;
2234 ioff
+= inodes_per_buf
;
2238 print_progress("Copied %u of %u inodes (%u of %u AGs)",
2239 inodes_copied
, mp
->m_sb
.sb_icount
, agno
,
2240 mp
->m_sb
.sb_agcount
);
2249 struct xfs_btree_block
*block
,
2250 xfs_agnumber_t agno
,
2251 xfs_agblock_t agbno
,
2256 xfs_inobt_rec_t
*rp
;
2257 xfs_inobt_ptr_t
*pp
;
2260 int finobt
= *(int *) arg
;
2262 numrecs
= be16_to_cpu(block
->bb_numrecs
);
2265 if (numrecs
> mp
->m_inobt_mxr
[0]) {
2267 print_warning("invalid numrecs %d in %s "
2268 "block %u/%u", numrecs
,
2269 typtab
[btype
].name
, agno
, agbno
);
2270 numrecs
= mp
->m_inobt_mxr
[0];
2274 * Only copy the btree blocks for the finobt. The inobt scan
2275 * copies the inode chunks.
2280 rp
= XFS_INOBT_REC_ADDR(mp
, block
, 1);
2281 for (i
= 0; i
< numrecs
; i
++, rp
++) {
2282 if (!copy_inode_chunk(agno
, rp
))
2288 if (numrecs
> mp
->m_inobt_mxr
[1]) {
2290 print_warning("invalid numrecs %d in %s block %u/%u",
2291 numrecs
, typtab
[btype
].name
, agno
, agbno
);
2292 numrecs
= mp
->m_inobt_mxr
[1];
2295 pp
= XFS_INOBT_PTR_ADDR(mp
, block
, 1, mp
->m_inobt_mxr
[1]);
2296 for (i
= 0; i
< numrecs
; i
++) {
2297 if (!valid_bno(agno
, be32_to_cpu(pp
[i
]))) {
2299 print_warning("invalid block number (%u/%u) "
2300 "in %s block %u/%u",
2301 agno
, be32_to_cpu(pp
[i
]),
2302 typtab
[btype
].name
, agno
, agbno
);
2305 if (!scan_btree(agno
, be32_to_cpu(pp
[i
]), level
,
2306 btype
, arg
, scanfunc_ino
))
2314 xfs_agnumber_t agno
,
2321 root
= be32_to_cpu(agi
->agi_root
);
2322 levels
= be32_to_cpu(agi
->agi_level
);
2324 /* validate root and levels before processing the tree */
2325 if (root
== 0 || root
> mp
->m_sb
.sb_agblocks
) {
2327 print_warning("invalid block number (%u) in inobt "
2328 "root in agi %u", root
, agno
);
2331 if (levels
>= XFS_BTREE_MAXLEVELS
) {
2333 print_warning("invalid level (%u) in inobt root "
2334 "in agi %u", levels
, agno
);
2338 if (!scan_btree(agno
, root
, levels
, TYP_INOBT
, &finobt
, scanfunc_ino
))
2341 if (xfs_sb_version_hasfinobt(&mp
->m_sb
)) {
2342 root
= be32_to_cpu(agi
->agi_free_root
);
2343 levels
= be32_to_cpu(agi
->agi_free_level
);
2346 if (!scan_btree(agno
, root
, levels
, TYP_INOBT
, &finobt
,
2356 xfs_agnumber_t agno
)
2360 int stack_count
= 0;
2363 /* copy the superblock of the AG */
2366 set_cur(&typtab
[TYP_SB
], XFS_AG_DADDR(mp
, agno
, XFS_SB_DADDR
),
2367 XFS_FSS_TO_BB(mp
, 1), DB_RING_IGN
, NULL
);
2368 if (!iocur_top
->data
) {
2369 print_warning("cannot read superblock for ag %u", agno
);
2370 if (stop_on_read_error
)
2373 /* Replace any filesystem label with "L's" */
2375 struct xfs_sb
*sb
= iocur_top
->data
;
2376 memset(sb
->sb_fname
, 'L',
2377 min(strlen(sb
->sb_fname
), sizeof(sb
->sb_fname
)));
2378 iocur_top
->need_crc
= 1;
2380 if (write_buf(iocur_top
))
2384 /* copy the AG free space btree root */
2387 set_cur(&typtab
[TYP_AGF
], XFS_AG_DADDR(mp
, agno
, XFS_AGF_DADDR(mp
)),
2388 XFS_FSS_TO_BB(mp
, 1), DB_RING_IGN
, NULL
);
2389 agf
= iocur_top
->data
;
2390 if (iocur_top
->data
== NULL
) {
2391 print_warning("cannot read agf block for ag %u", agno
);
2392 if (stop_on_read_error
)
2395 if (write_buf(iocur_top
))
2399 /* copy the AG inode btree root */
2402 set_cur(&typtab
[TYP_AGI
], XFS_AG_DADDR(mp
, agno
, XFS_AGI_DADDR(mp
)),
2403 XFS_FSS_TO_BB(mp
, 1), DB_RING_IGN
, NULL
);
2404 agi
= iocur_top
->data
;
2405 if (iocur_top
->data
== NULL
) {
2406 print_warning("cannot read agi block for ag %u", agno
);
2407 if (stop_on_read_error
)
2410 if (write_buf(iocur_top
))
2414 /* copy the AG free list header */
2417 set_cur(&typtab
[TYP_AGFL
], XFS_AG_DADDR(mp
, agno
, XFS_AGFL_DADDR(mp
)),
2418 XFS_FSS_TO_BB(mp
, 1), DB_RING_IGN
, NULL
);
2419 if (iocur_top
->data
== NULL
) {
2420 print_warning("cannot read agfl block for ag %u", agno
);
2421 if (stop_on_read_error
)
2424 if (agf
&& zero_stale_data
) {
2425 /* Zero out unused bits of agfl */
2429 agfl_bno
= XFS_BUF_TO_AGFL_BNO(mp
, iocur_top
->bp
);
2430 i
= be32_to_cpu(agf
->agf_fllast
);
2433 if (++i
== XFS_AGFL_SIZE(mp
))
2435 if (i
== be32_to_cpu(agf
->agf_flfirst
))
2437 agfl_bno
[i
] = cpu_to_be32(NULLAGBLOCK
);
2439 iocur_top
->need_crc
= 1;
2441 if (write_buf(iocur_top
))
2445 /* copy AG free space btrees */
2448 print_progress("Copying free space trees of AG %u",
2450 if (!copy_free_bno_btree(agno
, agf
))
2452 if (!copy_free_cnt_btree(agno
, agf
))
2456 /* copy inode btrees and the inodes and their associated metadata */
2458 if (!copy_inodes(agno
, agi
))
2463 while (stack_count
--)
2473 xfs_agnumber_t agno
;
2474 xfs_agblock_t agbno
;
2479 if (ino
== 0 || ino
== NULLFSINO
)
2482 agno
= XFS_INO_TO_AGNO(mp
, ino
);
2483 agino
= XFS_INO_TO_AGINO(mp
, ino
);
2484 agbno
= XFS_AGINO_TO_AGBNO(mp
, agino
);
2485 offset
= XFS_AGINO_TO_OFFSET(mp
, agino
);
2487 if (agno
>= mp
->m_sb
.sb_agcount
|| agbno
>= mp
->m_sb
.sb_agblocks
||
2488 offset
>= mp
->m_sb
.sb_inopblock
) {
2490 print_warning("invalid %s inode number (%lld)",
2491 typtab
[itype
].name
, (long long)ino
);
2496 set_cur(&typtab
[TYP_INODE
], XFS_AGB_TO_DADDR(mp
, agno
, agbno
),
2497 blkbb
, DB_RING_IGN
, NULL
);
2498 if (iocur_top
->data
== NULL
) {
2499 print_warning("cannot read %s inode %lld",
2500 typtab
[itype
].name
, (long long)ino
);
2501 rval
= !stop_on_read_error
;
2504 off_cur(offset
<< mp
->m_sb
.sb_inodelog
, mp
->m_sb
.sb_inodesize
);
2507 rval
= process_inode_data(iocur_top
->data
, itype
);
2515 copy_sb_inodes(void)
2517 if (!copy_ino(mp
->m_sb
.sb_rbmino
, TYP_RTBITMAP
))
2520 if (!copy_ino(mp
->m_sb
.sb_rsumino
, TYP_RTSUMMARY
))
2523 if (!copy_ino(mp
->m_sb
.sb_uquotino
, TYP_DQBLK
))
2526 if (!copy_ino(mp
->m_sb
.sb_gquotino
, TYP_DQBLK
))
2529 return copy_ino(mp
->m_sb
.sb_pquotino
, TYP_DQBLK
);
2537 xfs_daddr_t logstart
;
2540 int cycle
= XLOG_INIT_CYCLE
;
2543 print_progress("Copying log");
2546 set_cur(&typtab
[TYP_LOG
], XFS_FSB_TO_DADDR(mp
, mp
->m_sb
.sb_logstart
),
2547 mp
->m_sb
.sb_logblocks
* blkbb
, DB_RING_IGN
, NULL
);
2548 if (iocur_top
->data
== NULL
) {
2550 print_warning("cannot read log");
2551 return !stop_on_read_error
;
2554 /* If not obfuscating or zeroing, just copy the log as it is */
2555 if (!obfuscate
&& !zero_stale_data
)
2558 dirty
= xlog_is_dirty(mp
, &log
, &x
, 0);
2562 /* clear out a clean log */
2564 print_progress("Zeroing clean log");
2566 logstart
= XFS_FSB_TO_DADDR(mp
, mp
->m_sb
.sb_logstart
);
2567 logblocks
= XFS_FSB_TO_BB(mp
, mp
->m_sb
.sb_logblocks
);
2568 logversion
= xfs_sb_version_haslogv2(&mp
->m_sb
) ? 2 : 1;
2569 if (xfs_sb_version_hascrc(&mp
->m_sb
))
2570 cycle
= log
.l_curr_cycle
+ 1;
2572 libxfs_log_clear(NULL
, iocur_top
->data
, logstart
, logblocks
,
2573 &mp
->m_sb
.sb_uuid
, logversion
,
2574 mp
->m_sb
.sb_logsunit
, XLOG_FMT
, cycle
, true);
2577 /* keep the dirty log */
2579 _("Filesystem log is dirty; image will contain unobfuscated metadata in log."));
2582 /* log detection error */
2584 _("Could not discern log; image will contain unobfuscated metadata in log."));
2589 return !write_buf(iocur_top
);
2597 xfs_agnumber_t agno
;
2605 stop_on_read_error
= 0;
2607 if (mp
->m_sb
.sb_magicnum
!= XFS_SB_MAGIC
) {
2608 print_warning("bad superblock magic number %x, giving up",
2609 mp
->m_sb
.sb_magicnum
);
2613 while ((c
= getopt(argc
, argv
, "aegm:ow")) != EOF
) {
2616 zero_stale_data
= 0;
2619 stop_on_read_error
= 1;
2625 max_extent_size
= (int)strtol(optarg
, &p
, 0);
2626 if (*p
!= '\0' || max_extent_size
<= 0) {
2627 print_warning("bad max extent size %s",
2639 print_warning("bad option for metadump command");
2644 if (optind
!= argc
- 1) {
2645 print_warning("too few options for metadump (no filename given)");
2649 metablock
= (xfs_metablock_t
*)calloc(BBSIZE
+ 1, BBSIZE
);
2650 if (metablock
== NULL
) {
2651 print_warning("memory allocation failure");
2654 metablock
->mb_blocklog
= BBSHIFT
;
2655 metablock
->mb_magic
= cpu_to_be32(XFS_MD_MAGIC
);
2657 block_index
= (__be64
*)((char *)metablock
+ sizeof(xfs_metablock_t
));
2658 block_buffer
= (char *)metablock
+ BBSIZE
;
2659 num_indicies
= (BBSIZE
- sizeof(xfs_metablock_t
)) / sizeof(__be64
);
2661 start_iocur_sp
= iocur_sp
;
2663 if (strcmp(argv
[optind
], "-") == 0) {
2664 if (isatty(fileno(stdout
))) {
2665 print_warning("cannot write to a terminal");
2671 outf
= fopen(argv
[optind
], "wb");
2673 print_warning("cannot create dump file");
2681 for (agno
= 0; agno
< mp
->m_sb
.sb_agcount
; agno
++) {
2682 if (!scan_ag(agno
)) {
2688 /* copy realtime and quota inode contents */
2690 exitcode
= !copy_sb_inodes();
2692 /* copy log if it's internal */
2693 if ((mp
->m_sb
.sb_logstart
!= 0) && !exitcode
)
2694 exitcode
= !copy_log();
2696 /* write the remaining index */
2698 exitcode
= write_index() < 0;
2700 if (progress_since_warning
)
2701 fputc('\n', (outf
== stdout
) ? stderr
: stdout
);
2706 /* cleanup iocur stack */
2707 while (iocur_sp
> start_iocur_sp
)