2 * Copyright (c) 2007, 2011 SGI
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
29 #include "xfs_metadump.h"
35 #define DEFAULT_MAX_EXT_SIZE 1000
38 * It's possible that multiple files in a directory (or attributes
39 * in a file) produce the same obfuscated name. If that happens, we
40 * try to create another one. After several rounds of this though,
41 * we just give up and leave the original name as-is.
43 #define DUP_MAX 5 /* Max duplicates before we give up */
45 /* copy all metadata structures to/from a file */
47 static int metadump_f(int argc
, char **argv
);
48 static void metadump_help(void);
51 * metadump commands issue info/wornings/errors to standard error as
52 * metadump supports stdout as a destination.
54 * All static functions return zero on failure, while the public functions
55 * return zero on success.
58 static const cmdinfo_t metadump_cmd
=
59 { "metadump", NULL
, metadump_f
, 0, -1, 0,
60 N_("[-a] [-e] [-g] [-m max_extent] [-w] [-o] filename"),
61 N_("dump metadata to a file"), metadump_help
};
63 static FILE *outf
; /* metadump file */
65 static xfs_metablock_t
*metablock
; /* header + index + buffers */
66 static __be64
*block_index
;
67 static char *block_buffer
;
69 static int num_indices
;
72 static xfs_ino_t cur_ino
;
74 static int show_progress
= 0;
75 static int stop_on_read_error
= 0;
76 static int max_extent_size
= DEFAULT_MAX_EXT_SIZE
;
77 static int obfuscate
= 1;
78 static int zero_stale_data
= 1;
79 static int show_warnings
= 0;
80 static int progress_since_warning
= 0;
85 add_command(&metadump_cmd
);
93 " The 'metadump' command dumps the known metadata to a compact file suitable\n"
94 " for compressing and sending to an XFS maintainer for corruption analysis \n"
95 " or xfs_repair failures.\n\n"
97 " -a -- Copy full metadata blocks without zeroing unused space\n"
98 " -e -- Ignore read errors and keep going\n"
99 " -g -- Display dump progress\n"
100 " -m -- Specify max extent size in blocks to copy (default = %d blocks)\n"
101 " -o -- Don't obfuscate names and extended attributes\n"
102 " -w -- Show warnings of bad metadata information\n"
103 "\n"), DEFAULT_MAX_EXT_SIZE
);
107 print_warning(const char *fmt
, ...)
116 vsnprintf(buf
, sizeof(buf
), fmt
, ap
);
118 buf
[sizeof(buf
)-1] = '\0';
120 fprintf(stderr
, "%s%s: %s\n", progress_since_warning
? "\n" : "",
122 progress_since_warning
= 0;
126 print_progress(const char *fmt
, ...)
136 vsnprintf(buf
, sizeof(buf
), fmt
, ap
);
138 buf
[sizeof(buf
)-1] = '\0';
140 f
= (outf
== stdout
) ? stderr
: stdout
;
141 fprintf(f
, "\r%-59s", buf
);
143 progress_since_warning
= 1;
147 * A complete dump file will have a "zero" entry in the last index block,
148 * even if the dump is exactly aligned, the last index will be full of
149 * zeros. If the last index entry is non-zero, the dump is incomplete.
150 * Correspondingly, the last chunk will have a count < num_indices.
152 * Return 0 for success, -1 for failure.
159 * write index block and following data blocks (streaming)
161 metablock
->mb_count
= cpu_to_be16(cur_index
);
162 if (fwrite(metablock
, (cur_index
+ 1) << BBSHIFT
, 1, outf
) != 1) {
163 print_warning("error writing to file: %s", strerror(errno
));
167 memset(block_index
, 0, num_indices
* sizeof(__be64
));
173 * Return 0 for success, -errno for failure.
184 for (i
= 0; i
< len
; i
++, off
++, data
+= BBSIZE
) {
185 block_index
[cur_index
] = cpu_to_be64(off
);
186 memcpy(&block_buffer
[cur_index
<< BBSHIFT
], data
, BBSIZE
);
187 if (++cur_index
== num_indices
) {
197 * we want to preserve the state of the metadata in the dump - whether it is
198 * intact or corrupt, so even if the buffer has a verifier attached to it we
199 * don't want to run it prior to writing the buffer to the metadump image.
201 * The only reason for running the verifier is to recalculate the CRCs on a
202 * buffer that has been obfuscated. i.e. a buffer than metadump modified itself.
203 * In this case, we only run the verifier if the buffer was not corrupt to begin
204 * with so that we don't accidentally correct buffers with CRC or errors in them
205 * when we are obfuscating them.
211 struct xfs_buf
*bp
= buf
->bp
;
216 * Run the write verifier to recalculate the buffer CRCs and check
217 * metadump didn't introduce a new corruption. Warn if the verifier
218 * failed, but still continue to dump it into the output file.
220 if (buf
->need_crc
&& bp
&& bp
->b_ops
&& !bp
->b_error
) {
221 bp
->b_ops
->verify_write(bp
);
224 "obfuscation corrupted block at %s bno 0x%llx/0x%x",
226 (long long)bp
->b_bn
, bp
->b_bcount
);
230 /* handle discontiguous buffers */
232 ret
= write_buf_segment(buf
->data
, buf
->bb
, buf
->blen
);
237 for (i
= 0; i
< buf
->bbmap
->nmaps
; i
++) {
238 ret
= write_buf_segment(buf
->data
+ BBTOB(len
),
239 buf
->bbmap
->b
[i
].bm_bn
,
240 buf
->bbmap
->b
[i
].bm_len
);
243 len
+= buf
->bbmap
->b
[i
].bm_len
;
246 return seenint() ? -EINTR
: 0;
250 * We could be processing a corrupt block, so we can't trust any of
251 * the offsets or lengths to be within the buffer range. Hence check
256 struct xfs_btree_block
*block
,
262 xfs_inobt_ptr_t
*ipp
;
263 xfs_inobt_key_t
*ikp
;
264 xfs_alloc_ptr_t
*app
;
265 xfs_alloc_key_t
*akp
;
269 nrecs
= be16_to_cpu(block
->bb_numrecs
);
276 if (nrecs
> mp
->m_bmap_dmxr
[1])
279 bkp
= XFS_BMBT_KEY_ADDR(mp
, block
, 1);
280 bpp
= XFS_BMBT_PTR_ADDR(mp
, block
, 1, mp
->m_bmap_dmxr
[1]);
281 zp1
= (char *)&bkp
[nrecs
];
282 zp2
= (char *)&bpp
[nrecs
];
283 key_end
= (char *)bpp
;
287 if (nrecs
> mp
->m_inobt_mxr
[1])
290 ikp
= XFS_INOBT_KEY_ADDR(mp
, block
, 1);
291 ipp
= XFS_INOBT_PTR_ADDR(mp
, block
, 1, mp
->m_inobt_mxr
[1]);
292 zp1
= (char *)&ikp
[nrecs
];
293 zp2
= (char *)&ipp
[nrecs
];
294 key_end
= (char *)ipp
;
298 if (nrecs
> mp
->m_alloc_mxr
[1])
301 akp
= XFS_ALLOC_KEY_ADDR(mp
, block
, 1);
302 app
= XFS_ALLOC_PTR_ADDR(mp
, block
, 1, mp
->m_alloc_mxr
[1]);
303 zp1
= (char *)&akp
[nrecs
];
304 zp2
= (char *)&app
[nrecs
];
305 key_end
= (char *)app
;
312 /* Zero from end of keys to beginning of pointers */
313 memset(zp1
, 0, key_end
- zp1
);
315 /* Zero from end of pointers to end of block */
316 memset(zp2
, 0, (char *)block
+ mp
->m_sb
.sb_blocksize
- zp2
);
320 * We could be processing a corrupt block, so we can't trust any of
321 * the offsets or lengths to be within the buffer range. Hence check
326 struct xfs_btree_block
*block
,
330 struct xfs_bmbt_rec
*brp
;
331 struct xfs_inobt_rec
*irp
;
332 struct xfs_alloc_rec
*arp
;
335 nrecs
= be16_to_cpu(block
->bb_numrecs
);
342 if (nrecs
> mp
->m_bmap_dmxr
[0])
345 brp
= XFS_BMBT_REC_ADDR(mp
, block
, 1);
346 zp
= (char *)&brp
[nrecs
];
350 if (nrecs
> mp
->m_inobt_mxr
[0])
353 irp
= XFS_INOBT_REC_ADDR(mp
, block
, 1);
354 zp
= (char *)&irp
[nrecs
];
358 if (nrecs
> mp
->m_alloc_mxr
[0])
361 arp
= XFS_ALLOC_REC_ADDR(mp
, block
, 1);
362 zp
= (char *)&arp
[nrecs
];
368 /* Zero from end of records to end of block */
369 memset(zp
, 0, (char *)block
+ mp
->m_sb
.sb_blocksize
- zp
);
374 struct xfs_btree_block
*block
,
379 level
= be16_to_cpu(block
->bb_level
);
382 zero_btree_node(block
, btype
);
384 zero_btree_leaf(block
, btype
);
394 int (*func
)(struct xfs_btree_block
*block
,
404 set_cur(&typtab
[btype
], XFS_AGB_TO_DADDR(mp
, agno
, agbno
), blkbb
,
406 if (iocur_top
->data
== NULL
) {
407 print_warning("cannot read %s block %u/%u", typtab
[btype
].name
,
409 rval
= !stop_on_read_error
;
413 if (zero_stale_data
) {
414 zero_btree_block(iocur_top
->data
, btype
);
415 iocur_top
->need_crc
= 1;
418 if (write_buf(iocur_top
))
421 if (!(*func
)(iocur_top
->data
, agno
, agbno
, level
- 1, btype
, arg
))
429 /* free space tree copy routines */
436 if (agno
< (mp
->m_sb
.sb_agcount
- 1) && agbno
> 0 &&
437 agbno
<= mp
->m_sb
.sb_agblocks
)
439 if (agno
== (mp
->m_sb
.sb_agcount
- 1) && agbno
> 0 &&
440 agbno
<= (mp
->m_sb
.sb_dblocks
-
441 (xfs_rfsblock_t
)(mp
->m_sb
.sb_agcount
- 1) *
442 mp
->m_sb
.sb_agblocks
))
451 struct xfs_btree_block
*block
,
465 numrecs
= be16_to_cpu(block
->bb_numrecs
);
466 if (numrecs
> mp
->m_alloc_mxr
[1]) {
468 print_warning("invalid numrecs (%u) in %s block %u/%u",
469 numrecs
, typtab
[btype
].name
, agno
, agbno
);
473 pp
= XFS_ALLOC_PTR_ADDR(mp
, block
, 1, mp
->m_alloc_mxr
[1]);
474 for (i
= 0; i
< numrecs
; i
++) {
475 if (!valid_bno(agno
, be32_to_cpu(pp
[i
]))) {
477 print_warning("invalid block number (%u/%u) "
479 agno
, be32_to_cpu(pp
[i
]),
480 typtab
[btype
].name
, agno
, agbno
);
483 if (!scan_btree(agno
, be32_to_cpu(pp
[i
]), level
, btype
, arg
,
498 root
= be32_to_cpu(agf
->agf_roots
[XFS_BTNUM_BNO
]);
499 levels
= be32_to_cpu(agf
->agf_levels
[XFS_BTNUM_BNO
]);
501 /* validate root and levels before processing the tree */
502 if (root
== 0 || root
> mp
->m_sb
.sb_agblocks
) {
504 print_warning("invalid block number (%u) in bnobt "
505 "root in agf %u", root
, agno
);
508 if (levels
>= XFS_BTREE_MAXLEVELS
) {
510 print_warning("invalid level (%u) in bnobt root "
511 "in agf %u", levels
, agno
);
515 return scan_btree(agno
, root
, levels
, TYP_BNOBT
, agf
, scanfunc_freesp
);
526 root
= be32_to_cpu(agf
->agf_roots
[XFS_BTNUM_CNT
]);
527 levels
= be32_to_cpu(agf
->agf_levels
[XFS_BTNUM_CNT
]);
529 /* validate root and levels before processing the tree */
530 if (root
== 0 || root
> mp
->m_sb
.sb_agblocks
) {
532 print_warning("invalid block number (%u) in cntbt "
533 "root in agf %u", root
, agno
);
536 if (levels
>= XFS_BTREE_MAXLEVELS
) {
538 print_warning("invalid level (%u) in cntbt root "
539 "in agf %u", levels
, agno
);
543 return scan_btree(agno
, root
, levels
, TYP_CNTBT
, agf
, scanfunc_freesp
);
548 struct xfs_btree_block
*block
,
562 numrecs
= be16_to_cpu(block
->bb_numrecs
);
563 if (numrecs
> mp
->m_rmap_mxr
[1]) {
565 print_warning("invalid numrecs (%u) in %s block %u/%u",
566 numrecs
, typtab
[btype
].name
, agno
, agbno
);
570 pp
= XFS_RMAP_PTR_ADDR(block
, 1, mp
->m_rmap_mxr
[1]);
571 for (i
= 0; i
< numrecs
; i
++) {
572 if (!valid_bno(agno
, be32_to_cpu(pp
[i
]))) {
574 print_warning("invalid block number (%u/%u) "
576 agno
, be32_to_cpu(pp
[i
]),
577 typtab
[btype
].name
, agno
, agbno
);
580 if (!scan_btree(agno
, be32_to_cpu(pp
[i
]), level
, btype
, arg
,
595 if (!xfs_sb_version_hasrmapbt(&mp
->m_sb
))
598 root
= be32_to_cpu(agf
->agf_roots
[XFS_BTNUM_RMAP
]);
599 levels
= be32_to_cpu(agf
->agf_levels
[XFS_BTNUM_RMAP
]);
601 /* validate root and levels before processing the tree */
602 if (root
== 0 || root
> mp
->m_sb
.sb_agblocks
) {
604 print_warning("invalid block number (%u) in rmapbt "
605 "root in agf %u", root
, agno
);
608 if (levels
>= XFS_BTREE_MAXLEVELS
) {
610 print_warning("invalid level (%u) in rmapbt root "
611 "in agf %u", levels
, agno
);
615 return scan_btree(agno
, root
, levels
, TYP_RMAPBT
, agf
, scanfunc_rmapbt
);
620 struct xfs_btree_block
*block
,
627 xfs_refcount_ptr_t
*pp
;
634 numrecs
= be16_to_cpu(block
->bb_numrecs
);
635 if (numrecs
> mp
->m_refc_mxr
[1]) {
637 print_warning("invalid numrecs (%u) in %s block %u/%u",
638 numrecs
, typtab
[btype
].name
, agno
, agbno
);
642 pp
= XFS_REFCOUNT_PTR_ADDR(block
, 1, mp
->m_refc_mxr
[1]);
643 for (i
= 0; i
< numrecs
; i
++) {
644 if (!valid_bno(agno
, be32_to_cpu(pp
[i
]))) {
646 print_warning("invalid block number (%u/%u) "
648 agno
, be32_to_cpu(pp
[i
]),
649 typtab
[btype
].name
, agno
, agbno
);
652 if (!scan_btree(agno
, be32_to_cpu(pp
[i
]), level
, btype
, arg
,
667 if (!xfs_sb_version_hasreflink(&mp
->m_sb
))
670 root
= be32_to_cpu(agf
->agf_refcount_root
);
671 levels
= be32_to_cpu(agf
->agf_refcount_level
);
673 /* validate root and levels before processing the tree */
674 if (root
== 0 || root
> mp
->m_sb
.sb_agblocks
) {
676 print_warning("invalid block number (%u) in refcntbt "
677 "root in agf %u", root
, agno
);
680 if (levels
>= XFS_BTREE_MAXLEVELS
) {
682 print_warning("invalid level (%u) in refcntbt root "
683 "in agf %u", levels
, agno
);
687 return scan_btree(agno
, root
, levels
, TYP_REFCBT
, agf
, scanfunc_refcntbt
);
690 /* filename and extended attribute obfuscation routines */
693 struct name_ent
*next
;
696 unsigned char name
[1];
699 #define NAME_TABLE_SIZE 4096
701 static struct name_ent
*nametable
[NAME_TABLE_SIZE
];
704 nametable_clear(void)
707 struct name_ent
*ent
;
709 for (i
= 0; i
< NAME_TABLE_SIZE
; i
++) {
710 while ((ent
= nametable
[i
])) {
711 nametable
[i
] = ent
->next
;
718 * See if the given name is already in the name table. If so,
719 * return a pointer to its entry, otherwise return a null pointer.
721 static struct name_ent
*
722 nametable_find(xfs_dahash_t hash
, int namelen
, unsigned char *name
)
724 struct name_ent
*ent
;
726 for (ent
= nametable
[hash
% NAME_TABLE_SIZE
]; ent
; ent
= ent
->next
) {
727 if (ent
->hash
== hash
&& ent
->namelen
== namelen
&&
728 !memcmp(ent
->name
, name
, namelen
))
735 * Add the given name to the name table. Returns a pointer to the
736 * name's new entry, or a null pointer if an error occurs.
738 static struct name_ent
*
739 nametable_add(xfs_dahash_t hash
, int namelen
, unsigned char *name
)
741 struct name_ent
*ent
;
743 ent
= malloc(sizeof *ent
+ namelen
);
747 ent
->namelen
= namelen
;
748 memcpy(ent
->name
, name
, namelen
);
750 ent
->next
= nametable
[hash
% NAME_TABLE_SIZE
];
752 nametable
[hash
% NAME_TABLE_SIZE
] = ent
;
757 #define is_invalid_char(c) ((c) == '/' || (c) == '\0')
758 #define rol32(x,y) (((x) << (y)) | ((x) >> (32 - (y))))
760 static inline unsigned char
761 random_filename_char(void)
763 static unsigned char filename_alphabet
[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
764 "abcdefghijklmnopqrstuvwxyz"
767 return filename_alphabet
[random() % (sizeof filename_alphabet
- 1)];
770 #define ORPHANAGE "lost+found"
771 #define ORPHANAGE_LEN (sizeof (ORPHANAGE) - 1)
775 struct xfs_mount
*mp
,
780 return dir_ino
== mp
->m_sb
.sb_rootino
&&
781 name_len
== ORPHANAGE_LEN
&&
782 !memcmp(name
, ORPHANAGE
, ORPHANAGE_LEN
);
786 * Determine whether a name is one we shouldn't obfuscate because
787 * it's an orphan (or the "lost+found" directory itself). Note
788 * "cur_ino" is the inode for the directory currently being
791 * Returns 1 if the name should NOT be obfuscated or 0 otherwise.
799 static xfs_ino_t orphanage_ino
= 0;
800 char s
[24]; /* 21 is enough (64 bits in decimal) */
803 /* Record the "lost+found" inode if we haven't done so already */
806 if (!orphanage_ino
&& is_orphanage_dir(mp
, cur_ino
, namelen
, name
))
809 /* We don't obfuscate the "lost+found" directory itself */
811 if (ino
== orphanage_ino
)
814 /* Most files aren't in "lost+found" at all */
816 if (cur_ino
!= orphanage_ino
)
820 * Within "lost+found", we don't obfuscate any file whose
821 * name is the same as its inode number. Any others are
822 * stray files and can be obfuscated.
824 slen
= snprintf(s
, sizeof (s
), "%llu", (unsigned long long) ino
);
826 return slen
== namelen
&& !memcmp(name
, s
, namelen
);
830 * Given a name and its hash value, massage the name in such a way
831 * that the result is another name of equal length which shares the
840 unsigned char *newp
= name
;
842 xfs_dahash_t new_hash
= 0;
843 unsigned char *first
;
844 unsigned char high_bit
;
848 * Our obfuscation algorithm requires at least 5-character
849 * names, so don't bother if the name is too short. We
850 * work backward from a hash value to determine the last
851 * five bytes in a name required to produce a new name
852 * with the same hash.
858 * The beginning of the obfuscated name can be pretty much
859 * anything, so fill it in with random characters.
860 * Accumulate its new hash value as we go.
862 for (i
= 0; i
< name_len
- 5; i
++) {
863 *newp
= random_filename_char();
864 new_hash
= *newp
^ rol32(new_hash
, 7);
869 * Compute which five bytes need to be used at the end of
870 * the name so the hash of the obfuscated name is the same
871 * as the hash of the original. If any result in an invalid
872 * character, flip a bit and arrange for a corresponding bit
873 * in a neighboring byte to be flipped as well. For the
874 * last byte, the "neighbor" to change is the first byte
875 * we're computing here.
877 new_hash
= rol32(new_hash
, 3) ^ hash
;
881 for (shift
= 28; shift
>= 0; shift
-= 7) {
882 *newp
= (new_hash
>> shift
& 0x7f) ^ high_bit
;
883 if (is_invalid_char(*newp
)) {
888 ASSERT(!is_invalid_char(*newp
));
893 * If we flipped a bit on the last byte, we need to fix up
894 * the matching bit in the first byte. The result will
895 * be a valid character, because we know that first byte
896 * has 0's in its upper four bits (it was produced by a
897 * 28-bit right-shift of a 32-bit unsigned value).
901 ASSERT(!is_invalid_char(*first
));
903 ASSERT(libxfs_da_hashname(name
, name_len
) == hash
);
907 * Flip a bit in each of two bytes at the end of the given name.
908 * This is used in generating a series of alternate names to be used
909 * in the event a duplicate is found.
911 * The bits flipped are selected such that they both affect the same
912 * bit in the name's computed hash value, so flipping them both will
915 * The following diagram aims to show the portion of a computed
916 * hash that a given byte of a name affects.
918 * 31 28 24 21 14 8 7 3 0
919 * +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
920 * hash: | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
921 * +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+
922 * last-4 ->| |<-- last-2 --->| |<--- last ---->|
923 * |<-- last-3 --->| |<-- last-1 --->| |<- last-4
924 * |<-- last-7 --->| |<-- last-5 --->|
925 * |<-- last-8 --->| |<-- last-6 --->|
928 * The last byte of the name directly affects the low-order byte of
929 * the hash. The next-to-last affects bits 7-14, the next one back
930 * affects bits 14-21, and so on. The effect wraps around when it
931 * goes beyond the top of the hash (as happens for byte last-4).
933 * Bits that are flipped together "overlap" on the hash value. As
934 * an example of overlap, the last two bytes both affect bit 7 in
935 * the hash. That pair of bytes (and their overlapping bits) can be
936 * used for this "flip bit" operation (it's the first pair tried,
939 * A table defines overlapping pairs--the bytes involved and bits
940 * within them--that can be used this way. The byte offset is
941 * relative to a starting point within the name, which will be set
942 * to affect the bytes at the end of the name. The function is
943 * called with a "bitseq" value which indicates which bit flip is
944 * desired, and this translates directly into selecting which entry
945 * in the bit_to_flip[] table to apply.
947 * The function returns 1 if the operation was successful. It
948 * returns 0 if the result produced a character that's not valid in
949 * a name (either '/' or a '\0'). Finally, it returns -1 if the bit
950 * sequence number is beyond what is supported for a name of this
955 * (Also see the discussion above find_alternate(), below.)
957 * In order to make this function work for any length name, the
958 * table is ordered by increasing byte offset, so that the earliest
959 * entries can apply to the shortest strings. This way all names
960 * are done consistently.
962 * When bit flips occur, they can convert printable characters
963 * into non-printable ones. In an effort to reduce the impact of
964 * this, the first bit flips are chosen to affect bytes the end of
965 * the name (and furthermore, toward the low bits of a byte). Those
966 * bytes are often non-printable anyway because of the way they are
967 * initially selected by obfuscate_name()). This is accomplished,
968 * using later table entries first.
970 * Each row in the table doubles the number of alternates that
971 * can be generated. A two-byte name is limited to using only
972 * the first row, so it's possible to generate two alternates
973 * (the original name, plus the alternate produced by flipping
974 * the one pair of bits). In a 5-byte name, the effect of the
975 * first byte overlaps the last by 4 its, and there are 8 bits
976 * to flip, allowing for 256 possible alternates.
978 * Short names (less than 5 bytes) are never even obfuscated, so for
979 * such names the relatively small number of alternates should never
980 * really be a problem.
982 * Long names (more than 6 bytes, say) are not likely to exhaust
983 * the number of available alternates. In fact, the table could
984 * probably have stopped at 8 entries, on the assumption that 256
985 * alternates should be enough for most any situation. The entries
986 * beyond those are present mostly for demonstration of how it could
987 * be populated with more entries, should it ever be necessary to do
998 unsigned char *p0
, *p1
;
999 unsigned char m0
, m1
;
1001 int byte
; /* Offset from start within name */
1002 unsigned char bit
; /* Bit within that byte */
1003 } bit_to_flip
[][2] = { /* Sorted by second entry's byte */
1004 { { 0, 0 }, { 1, 7 } }, /* Each row defines a pair */
1005 { { 1, 0 }, { 2, 7 } }, /* of bytes and a bit within */
1006 { { 2, 0 }, { 3, 7 } }, /* each byte. Each bit in */
1007 { { 0, 4 }, { 4, 0 } }, /* a pair affects the same */
1008 { { 0, 5 }, { 4, 1 } }, /* bit in the hash, so flipping */
1009 { { 0, 6 }, { 4, 2 } }, /* both will change the name */
1010 { { 0, 7 }, { 4, 3 } }, /* while preserving the hash. */
1011 { { 3, 0 }, { 4, 7 } },
1012 { { 0, 0 }, { 5, 3 } }, /* The first entry's byte offset */
1013 { { 0, 1 }, { 5, 4 } }, /* must be less than the second. */
1014 { { 0, 2 }, { 5, 5 } },
1015 { { 0, 3 }, { 5, 6 } }, /* The table can be extended to */
1016 { { 0, 4 }, { 5, 7 } }, /* an arbitrary number of entries */
1017 { { 4, 0 }, { 5, 7 } }, /* but there's not much point. */
1021 /* Find the first entry *not* usable for name of this length */
1023 for (index
= 0; index
< ARRAY_SIZE(bit_to_flip
); index
++)
1024 if (bit_to_flip
[index
][1].byte
>= name_len
)
1028 * Back up to the last usable entry. If that number is
1029 * smaller than the bit sequence number, inform the caller
1030 * that nothing this large (or larger) will work.
1032 if (bitseq
> --index
)
1036 * We will be switching bits at the end of name, with a
1037 * preference for affecting the last bytes first. Compute
1038 * where in the name we'll start applying the changes.
1040 offset
= name_len
- (bit_to_flip
[index
][1].byte
+ 1);
1041 index
-= bitseq
; /* Use later table entries first */
1043 p0
= name
+ offset
+ bit_to_flip
[index
][0].byte
;
1044 p1
= name
+ offset
+ bit_to_flip
[index
][1].byte
;
1045 m0
= 1 << bit_to_flip
[index
][0].bit
;
1046 m1
= 1 << bit_to_flip
[index
][1].bit
;
1048 /* Only change the bytes if it produces valid characters */
1050 if (is_invalid_char(*p0
^ m0
) || is_invalid_char(*p1
^ m1
))
1060 * This function generates a well-defined sequence of "alternate"
1061 * names for a given name. An alternate is a name having the same
1062 * length and same hash value as the original name. This is needed
1063 * because the algorithm produces only one obfuscated name to use
1064 * for a given original name, and it's possible that result matches
1065 * a name already seen. This function checks for this, and if it
1066 * occurs, finds another suitable obfuscated name to use.
1068 * Each bit in the binary representation of the sequence number is
1069 * used to select one possible "bit flip" operation to perform on
1070 * the name. So for example:
1071 * seq = 0: selects no bits to flip
1072 * seq = 1: selects the 0th bit to flip
1073 * seq = 2: selects the 1st bit to flip
1074 * seq = 3: selects the 0th and 1st bit to flip
1077 * The flip_bit() function takes care of the details of the bit
1078 * flipping within the name. Note that the "1st bit" in this
1079 * context is a bit sequence number; i.e. it doesn't necessarily
1080 * mean bit 0x02 will be changed.
1082 * If a valid name (one that contains no '/' or '\0' characters) is
1083 * produced by this process for the given sequence number, this
1084 * function returns 1. If the result is not valid, it returns 0.
1085 * Returns -1 if the sequence number is beyond the the maximum for
1086 * names of the given length.
1091 * The number of alternates available for a given name is dependent
1092 * on its length. A "bit flip" involves inverting two bits in
1093 * a name--the two bits being selected such that their values
1094 * affect the name's hash value in the same way. Alternates are
1095 * thus generated by inverting the value of pairs of such
1096 * "overlapping" bits in the original name. Each byte after the
1097 * first in a name adds at least one bit of overlap to work with.
1098 * (See comments above flip_bit() for more discussion on this.)
1100 * So the number of alternates is dependent on the number of such
1101 * overlapping bits in a name. If there are N bit overlaps, there
1102 * 2^N alternates for that hash value.
1104 * Here are the number of overlapping bits available for generating
1105 * alternates for names of specific lengths:
1106 * 1 0 (must have 2 bytes to have any overlap)
1107 * 2 1 One bit overlaps--so 2 possible alternates
1108 * 3 2 Two bits overlap--so 4 possible alternates
1109 * 4 4 Three bits overlap, so 2^3 alternates
1110 * 5 8 8 bits overlap (due to wrapping), 256 alternates
1111 * 6 18 2^18 alternates
1112 * 7 28 2^28 alternates
1114 * It's clear that the number of alternates grows very quickly with
1115 * the length of the name. But note that the set of alternates
1116 * includes invalid names. And for certain (contrived) names, the
1117 * number of valid names is a fairly small fraction of the total
1118 * number of alternates.
1120 * The main driver for this infrastructure for coming up with
1121 * alternate names is really related to names 5 (or possibly 6)
1122 * bytes in length. 5-byte obfuscated names contain no randomly-
1123 * generated bytes in them, and the chance of an obfuscated name
1124 * matching an already-seen name is too high to just ignore. This
1125 * methodical selection of alternates ensures we don't produce
1126 * duplicate names unless we have exhausted our options.
1131 unsigned char *name
,
1134 uint32_t bitseq
= 0;
1135 uint32_t bits
= seq
;
1138 return 1; /* alternate 0 is the original name */
1139 if (name_len
< 2) /* Must have 2 bytes to flip */
1142 for (bitseq
= 0; bits
; bitseq
++) {
1143 uint32_t mask
= 1 << bitseq
;
1149 fb
= flip_bit(name_len
, name
, bitseq
);
1159 * Look up the given name in the name table. If it is already
1160 * present, iterate through a well-defined sequence of alternate
1161 * names and attempt to use an alternate name instead.
1163 * Returns 1 if the (possibly modified) name is not present in the
1164 * name table. Returns 0 if the name and all possible alternates
1165 * are already in the table.
1168 handle_duplicate_name(xfs_dahash_t hash
, size_t name_len
, unsigned char *name
)
1170 unsigned char new_name
[name_len
+ 1];
1173 if (!nametable_find(hash
, name_len
, name
))
1174 return 1; /* No duplicate */
1176 /* Name is already in use. Need to find an alternate. */
1181 /* Only change incoming name if we find an alternate */
1183 memcpy(new_name
, name
, name_len
);
1184 found
= find_alternate(name_len
, new_name
, seq
++);
1186 return 0; /* No more to check */
1188 } while (nametable_find(hash
, name_len
, new_name
));
1191 * The alternate wasn't in the table already. Pass it back
1194 memcpy(name
, new_name
, name_len
);
1200 generate_obfuscated_name(
1203 unsigned char *name
)
1208 * We don't obfuscate "lost+found" or any orphan files
1209 * therein. When the name table is used for extended
1210 * attributes, the inode number provided is 0, in which
1211 * case we don't need to make this check.
1213 if (ino
&& in_lost_found(ino
, namelen
, name
))
1217 * If the name starts with a slash, just skip over it. It
1218 * isn't included in the hash and we don't record it in the
1219 * name table. Note that the namelen value passed in does
1220 * not count the leading slash (if one is present).
1225 /* Obfuscate the name (if possible) */
1227 hash
= libxfs_da_hashname(name
, namelen
);
1228 obfuscate_name(hash
, namelen
, name
);
1231 * Make sure the name is not something already seen. If we
1232 * fail to find a suitable alternate, we're dealing with a
1233 * very pathological situation, and we may end up creating
1234 * a duplicate name in the metadump, so issue a warning.
1236 if (!handle_duplicate_name(hash
, namelen
, name
)) {
1237 print_warning("duplicate name for inode %llu "
1238 "in dir inode %llu\n",
1239 (unsigned long long) ino
,
1240 (unsigned long long) cur_ino
);
1244 /* Create an entry for the new name in the name table. */
1246 if (!nametable_add(hash
, namelen
, name
))
1247 print_warning("unable to record name for inode %llu "
1248 "in dir inode %llu\n",
1249 (unsigned long long) ino
,
1250 (unsigned long long) cur_ino
);
1257 struct xfs_dir2_sf_hdr
*sfp
;
1258 xfs_dir2_sf_entry_t
*sfep
;
1259 __uint64_t ino_dir_size
;
1262 sfp
= (struct xfs_dir2_sf_hdr
*)XFS_DFORK_DPTR(dip
);
1263 ino_dir_size
= be64_to_cpu(dip
->di_size
);
1264 if (ino_dir_size
> XFS_DFORK_DSIZE(dip
, mp
)) {
1265 ino_dir_size
= XFS_DFORK_DSIZE(dip
, mp
);
1267 print_warning("invalid size in dir inode %llu",
1268 (long long)cur_ino
);
1271 sfep
= xfs_dir2_sf_firstentry(sfp
);
1272 for (i
= 0; (i
< sfp
->count
) &&
1273 ((char *)sfep
- (char *)sfp
< ino_dir_size
); i
++) {
1276 * first check for bad name lengths. If they are bad, we
1277 * have limitations to how much can be obfuscated.
1279 int namelen
= sfep
->namelen
;
1283 print_warning("zero length entry in dir inode "
1284 "%llu", (long long)cur_ino
);
1285 if (i
!= sfp
->count
- 1)
1287 namelen
= ino_dir_size
- ((char *)&sfep
->name
[0] -
1289 } else if ((char *)sfep
- (char *)sfp
+
1290 M_DIROPS(mp
)->sf_entsize(sfp
, sfep
->namelen
) >
1293 print_warning("entry length in dir inode %llu "
1294 "overflows space", (long long)cur_ino
);
1295 if (i
!= sfp
->count
- 1)
1297 namelen
= ino_dir_size
- ((char *)&sfep
->name
[0] -
1302 generate_obfuscated_name(
1303 M_DIROPS(mp
)->sf_get_ino(sfp
, sfep
),
1304 namelen
, &sfep
->name
[0]);
1306 sfep
= (xfs_dir2_sf_entry_t
*)((char *)sfep
+
1307 M_DIROPS(mp
)->sf_entsize(sfp
, namelen
));
1310 /* zero stale data in rest of space in data fork, if any */
1311 if (zero_stale_data
&& (ino_dir_size
< XFS_DFORK_DSIZE(dip
, mp
)))
1312 memset(sfep
, 0, XFS_DFORK_DSIZE(dip
, mp
) - ino_dir_size
);
1316 * The pathname may not be null terminated. It may be terminated by the end of
1317 * a buffer or inode literal area, and the start of the next region contains
1318 * unknown data. Therefore, when we get to the last component of the symlink, we
1319 * cannot assume that strlen() will give us the right result. Hence we need to
1320 * track the remaining pathname length and use that instead.
1323 obfuscate_path_components(
1327 unsigned char *comp
= (unsigned char *)buf
;
1328 unsigned char *end
= comp
+ len
;
1331 while (comp
< end
) {
1335 /* find slash at end of this component */
1336 slash
= strchr((char *)comp
, '/');
1338 /* last (or single) component */
1339 namelen
= strnlen((char *)comp
, len
);
1340 hash
= libxfs_da_hashname(comp
, namelen
);
1341 obfuscate_name(hash
, namelen
, comp
);
1344 namelen
= slash
- (char *)comp
;
1345 /* handle leading or consecutive slashes */
1351 hash
= libxfs_da_hashname(comp
, namelen
);
1352 obfuscate_name(hash
, namelen
, comp
);
1353 comp
+= namelen
+ 1;
1365 len
= be64_to_cpu(dip
->di_size
);
1366 if (len
> XFS_DFORK_DSIZE(dip
, mp
)) {
1368 print_warning("invalid size (%d) in symlink inode %llu",
1369 len
, (long long)cur_ino
);
1370 len
= XFS_DFORK_DSIZE(dip
, mp
);
1373 buf
= (char *)XFS_DFORK_DPTR(dip
);
1375 obfuscate_path_components(buf
, len
);
1377 /* zero stale data in rest of space in data fork, if any */
1378 if (zero_stale_data
&& len
< XFS_DFORK_DSIZE(dip
, mp
))
1379 memset(&buf
[len
], 0, XFS_DFORK_DSIZE(dip
, mp
) - len
);
1387 * with extended attributes, obfuscate the names and fill the actual
1388 * values with 'v' (to see a valid string length, as opposed to NULLs)
1391 xfs_attr_shortform_t
*asfp
;
1392 xfs_attr_sf_entry_t
*asfep
;
1396 asfp
= (xfs_attr_shortform_t
*)XFS_DFORK_APTR(dip
);
1397 if (asfp
->hdr
.count
== 0)
1400 ino_attr_size
= be16_to_cpu(asfp
->hdr
.totsize
);
1401 if (ino_attr_size
> XFS_DFORK_ASIZE(dip
, mp
)) {
1402 ino_attr_size
= XFS_DFORK_ASIZE(dip
, mp
);
1404 print_warning("invalid attr size in inode %llu",
1405 (long long)cur_ino
);
1408 asfep
= &asfp
->list
[0];
1409 for (i
= 0; (i
< asfp
->hdr
.count
) &&
1410 ((char *)asfep
- (char *)asfp
< ino_attr_size
); i
++) {
1412 int namelen
= asfep
->namelen
;
1416 print_warning("zero length attr entry in inode "
1417 "%llu", (long long)cur_ino
);
1419 } else if ((char *)asfep
- (char *)asfp
+
1420 XFS_ATTR_SF_ENTSIZE(asfep
) > ino_attr_size
) {
1422 print_warning("attr entry length in inode %llu "
1423 "overflows space", (long long)cur_ino
);
1428 generate_obfuscated_name(0, asfep
->namelen
,
1429 &asfep
->nameval
[0]);
1430 memset(&asfep
->nameval
[asfep
->namelen
], 'v',
1434 asfep
= (xfs_attr_sf_entry_t
*)((char *)asfep
+
1435 XFS_ATTR_SF_ENTSIZE(asfep
));
1438 /* zero stale data in rest of space in attr fork, if any */
1439 if (zero_stale_data
&& (ino_attr_size
< XFS_DFORK_ASIZE(dip
, mp
)))
1440 memset(asfep
, 0, XFS_DFORK_ASIZE(dip
, mp
) - ino_attr_size
);
1444 process_dir_data_block(
1446 xfs_fileoff_t offset
,
1447 int is_block_format
)
1450 * we have to rely on the fileoffset and signature of the block to
1451 * handle it's contents. If it's invalid, leave it alone.
1452 * for multi-fsblock dir blocks, if a name crosses an extent boundary,
1453 * ignore it and continue.
1460 struct xfs_dir2_data_hdr
*datahdr
;
1462 datahdr
= (struct xfs_dir2_data_hdr
*)block
;
1464 if (is_block_format
) {
1465 xfs_dir2_leaf_entry_t
*blp
;
1466 xfs_dir2_block_tail_t
*btp
;
1468 btp
= xfs_dir2_block_tail_p(mp
->m_dir_geo
, datahdr
);
1469 blp
= xfs_dir2_block_leaf_p(btp
);
1470 if ((char *)blp
> (char *)btp
)
1471 blp
= (xfs_dir2_leaf_entry_t
*)btp
;
1473 end_of_data
= (char *)blp
- block
;
1474 if (xfs_sb_version_hascrc(&mp
->m_sb
))
1475 wantmagic
= XFS_DIR3_BLOCK_MAGIC
;
1477 wantmagic
= XFS_DIR2_BLOCK_MAGIC
;
1478 } else { /* leaf/node format */
1479 end_of_data
= mp
->m_dir_geo
->fsbcount
<< mp
->m_sb
.sb_blocklog
;
1480 if (xfs_sb_version_hascrc(&mp
->m_sb
))
1481 wantmagic
= XFS_DIR3_DATA_MAGIC
;
1483 wantmagic
= XFS_DIR2_DATA_MAGIC
;
1486 if (be32_to_cpu(datahdr
->magic
) != wantmagic
) {
1489 "invalid magic in dir inode %llu block %ld",
1490 (long long)cur_ino
, (long)offset
);
1494 dir_offset
= M_DIROPS(mp
)->data_entry_offset
;
1495 ptr
= block
+ dir_offset
;
1496 endptr
= block
+ mp
->m_dir_geo
->blksize
;
1498 while (ptr
< endptr
&& dir_offset
< end_of_data
) {
1499 xfs_dir2_data_entry_t
*dep
;
1500 xfs_dir2_data_unused_t
*dup
;
1503 dup
= (xfs_dir2_data_unused_t
*)ptr
;
1505 if (be16_to_cpu(dup
->freetag
) == XFS_DIR2_DATA_FREE_TAG
) {
1506 int length
= be16_to_cpu(dup
->length
);
1507 if (dir_offset
+ length
> end_of_data
||
1508 !length
|| (length
& (XFS_DIR2_DATA_ALIGN
- 1))) {
1511 "invalid length for dir free space in inode %llu",
1512 (long long)cur_ino
);
1515 if (be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup
)) !=
1518 dir_offset
+= length
;
1521 * Zero the unused space up to the tag - the tag is
1522 * actually at a variable offset, so zeroing &dup->tag
1523 * is zeroing the free space in between
1525 if (zero_stale_data
) {
1527 sizeof(xfs_dir2_data_unused_t
);
1530 memset(&dup
->tag
, 0, zlen
);
1531 iocur_top
->need_crc
= 1;
1534 if (dir_offset
>= end_of_data
|| ptr
>= endptr
)
1538 dep
= (xfs_dir2_data_entry_t
*)ptr
;
1539 length
= M_DIROPS(mp
)->data_entsize(dep
->namelen
);
1541 if (dir_offset
+ length
> end_of_data
||
1542 ptr
+ length
> endptr
) {
1545 "invalid length for dir entry name in inode %llu",
1546 (long long)cur_ino
);
1549 if (be16_to_cpu(*M_DIROPS(mp
)->data_entry_tag_p(dep
)) !=
1554 generate_obfuscated_name(be64_to_cpu(dep
->inumber
),
1555 dep
->namelen
, &dep
->name
[0]);
1556 dir_offset
+= length
;
1558 /* Zero the unused space after name, up to the tag */
1559 if (zero_stale_data
) {
1560 /* 1 byte for ftype; don't bother with conditional */
1562 (char *)M_DIROPS(mp
)->data_entry_tag_p(dep
) -
1563 (char *)&dep
->name
[dep
->namelen
] - 1;
1565 memset(&dep
->name
[dep
->namelen
] + 1, 0, zlen
);
1566 iocur_top
->need_crc
= 1;
1573 process_symlink_block(
1578 if (xfs_sb_version_hascrc(&(mp
)->m_sb
))
1579 link
+= sizeof(struct xfs_dsymlink_hdr
);
1582 obfuscate_path_components(link
, XFS_SYMLINK_BUF_SPACE(mp
,
1583 mp
->m_sb
.sb_blocksize
));
1584 if (zero_stale_data
) {
1585 size_t linklen
, zlen
;
1587 linklen
= strlen(link
);
1588 zlen
= mp
->m_sb
.sb_blocksize
- linklen
;
1589 if (xfs_sb_version_hascrc(&mp
->m_sb
))
1590 zlen
-= sizeof(struct xfs_dsymlink_hdr
);
1591 if (zlen
< mp
->m_sb
.sb_blocksize
)
1592 memset(link
+ linklen
, 0, zlen
);
1596 #define MAX_REMOTE_VALS 4095
1598 static struct attr_data_s
{
1599 int remote_val_count
;
1600 xfs_dablk_t remote_vals
[MAX_REMOTE_VALS
];
1605 xfs_dablk_t blockidx
,
1608 while (length
> 0 && attr_data
.remote_val_count
< MAX_REMOTE_VALS
) {
1609 attr_data
.remote_vals
[attr_data
.remote_val_count
] = blockidx
;
1610 attr_data
.remote_val_count
++;
1612 length
-= mp
->m_sb
.sb_blocksize
;
1615 if (attr_data
.remote_val_count
>= MAX_REMOTE_VALS
) {
1617 "Overflowed attr obfuscation array. No longer obfuscating remote attrs.");
1621 /* Handle remote and leaf attributes */
1625 xfs_fileoff_t offset
)
1627 struct xfs_attr_leafblock
*leaf
;
1628 struct xfs_attr3_icleaf_hdr hdr
;
1631 xfs_attr_leaf_entry_t
*entry
;
1632 xfs_attr_leaf_name_local_t
*local
;
1633 xfs_attr_leaf_name_remote_t
*remote
;
1634 __uint32_t bs
= mp
->m_sb
.sb_blocksize
;
1638 leaf
= (xfs_attr_leafblock_t
*)block
;
1640 /* Remote attributes - attr3 has XFS_ATTR3_RMT_MAGIC, attr has none */
1641 if ((be16_to_cpu(leaf
->hdr
.info
.magic
) != XFS_ATTR_LEAF_MAGIC
) &&
1642 (be16_to_cpu(leaf
->hdr
.info
.magic
) != XFS_ATTR3_LEAF_MAGIC
)) {
1643 for (i
= 0; i
< attr_data
.remote_val_count
; i
++) {
1644 if (obfuscate
&& attr_data
.remote_vals
[i
] == offset
)
1645 /* Macros to handle both attr and attr3 */
1647 (bs
- XFS_ATTR3_RMT_BUF_SPACE(mp
, bs
)),
1648 'v', XFS_ATTR3_RMT_BUF_SPACE(mp
, bs
));
1653 /* Ok, it's a leaf - get header; accounts for crc & non-crc */
1654 xfs_attr3_leaf_hdr_from_disk(mp
->m_attr_geo
, &hdr
, leaf
);
1656 nentries
= hdr
.count
;
1657 if (nentries
== 0 ||
1658 nentries
* sizeof(xfs_attr_leaf_entry_t
) +
1659 xfs_attr3_leaf_hdr_size(leaf
) >
1660 XFS_ATTR3_RMT_BUF_SPACE(mp
, bs
)) {
1662 print_warning("invalid attr count in inode %llu",
1663 (long long)cur_ino
);
1667 entry
= xfs_attr3_leaf_entryp(leaf
);
1668 /* We will move this as we parse */
1670 for (i
= 0; i
< nentries
; i
++, entry
++) {
1671 int nlen
, vlen
, zlen
;
1673 /* Grows up; if this name is topmost, move first_name */
1674 if (!first_name
|| xfs_attr3_leaf_name(leaf
, i
) < first_name
)
1675 first_name
= xfs_attr3_leaf_name(leaf
, i
);
1677 if (be16_to_cpu(entry
->nameidx
) > mp
->m_sb
.sb_blocksize
) {
1680 "invalid attr nameidx in inode %llu",
1681 (long long)cur_ino
);
1684 if (entry
->flags
& XFS_ATTR_LOCAL
) {
1685 local
= xfs_attr3_leaf_name_local(leaf
, i
);
1686 if (local
->namelen
== 0) {
1689 "zero length for attr name in inode %llu",
1690 (long long)cur_ino
);
1694 generate_obfuscated_name(0, local
->namelen
,
1695 &local
->nameval
[0]);
1696 memset(&local
->nameval
[local
->namelen
], 'v',
1697 be16_to_cpu(local
->valuelen
));
1699 /* zero from end of nameval[] to next name start */
1700 nlen
= local
->namelen
;
1701 vlen
= be16_to_cpu(local
->valuelen
);
1702 zlen
= xfs_attr_leaf_entsize_local(nlen
, vlen
) -
1703 (sizeof(xfs_attr_leaf_name_local_t
) - 1 +
1705 if (zero_stale_data
)
1706 memset(&local
->nameval
[nlen
+ vlen
], 0, zlen
);
1708 remote
= xfs_attr3_leaf_name_remote(leaf
, i
);
1709 if (remote
->namelen
== 0 || remote
->valueblk
== 0) {
1712 "invalid attr entry in inode %llu",
1713 (long long)cur_ino
);
1717 generate_obfuscated_name(0, remote
->namelen
,
1719 add_remote_vals(be32_to_cpu(remote
->valueblk
),
1720 be32_to_cpu(remote
->valuelen
));
1722 /* zero from end of name[] to next name start */
1723 nlen
= remote
->namelen
;
1724 zlen
= xfs_attr_leaf_entsize_remote(nlen
) -
1725 (sizeof(xfs_attr_leaf_name_remote_t
) - 1 +
1727 if (zero_stale_data
)
1728 memset(&remote
->name
[nlen
], 0, zlen
);
1732 /* Zero from end of entries array to the first name/val */
1733 if (zero_stale_data
) {
1734 struct xfs_attr_leaf_entry
*entries
;
1736 entries
= xfs_attr3_leaf_entryp(leaf
);
1737 memset(&entries
[nentries
], 0,
1738 first_name
- (char *)&entries
[nentries
]);
1742 /* Processes symlinks, attrs, directories ... */
1744 process_single_fsb_objects(
1755 for (i
= 0; i
< c
; i
++) {
1757 set_cur(&typtab
[btype
], XFS_FSB_TO_DADDR(mp
, s
), blkbb
,
1760 if (!iocur_top
->data
) {
1761 xfs_agnumber_t agno
= XFS_FSB_TO_AGNO(mp
, s
);
1762 xfs_agblock_t agbno
= XFS_FSB_TO_AGBNO(mp
, s
);
1764 print_warning("cannot read %s block %u/%u (%llu)",
1765 typtab
[btype
].name
, agno
, agbno
, s
);
1766 if (stop_on_read_error
)
1772 if (!obfuscate
&& !zero_stale_data
)
1775 /* Zero unused part of interior nodes */
1776 if (zero_stale_data
) {
1777 xfs_da_intnode_t
*node
= iocur_top
->data
;
1778 int magic
= be16_to_cpu(node
->hdr
.info
.magic
);
1780 if (magic
== XFS_DA_NODE_MAGIC
||
1781 magic
== XFS_DA3_NODE_MAGIC
) {
1782 struct xfs_da3_icnode_hdr hdr
;
1785 M_DIROPS(mp
)->node_hdr_from_disk(&hdr
, node
);
1786 used
= M_DIROPS(mp
)->node_hdr_size
;
1789 * sizeof(struct xfs_da_node_entry
);
1791 if (used
< mp
->m_sb
.sb_blocksize
) {
1792 memset((char *)node
+ used
, 0,
1793 mp
->m_sb
.sb_blocksize
- used
);
1794 iocur_top
->need_crc
= 1;
1799 /* Handle leaf nodes */
1800 dp
= iocur_top
->data
;
1803 if (o
>= mp
->m_dir_geo
->leafblk
)
1806 process_dir_data_block(dp
, o
,
1807 last
== mp
->m_dir_geo
->fsbcount
);
1808 iocur_top
->need_crc
= 1;
1811 process_symlink_block(dp
);
1812 iocur_top
->need_crc
= 1;
1815 process_attr_block(dp
, o
);
1816 iocur_top
->need_crc
= 1;
1823 ret
= write_buf(iocur_top
);
1836 * Static map to aggregate multiple extents into a single directory block.
1838 static struct bbmap mfsb_map
;
1839 static int mfsb_length
;
1842 process_multi_fsb_objects(
1855 print_warning("bad type for multi-fsb object %d", btype
);
1860 unsigned int bm_len
;
1862 if (mfsb_length
+ c
>= mp
->m_dir_geo
->fsbcount
) {
1863 bm_len
= mp
->m_dir_geo
->fsbcount
- mfsb_length
;
1870 mfsb_map
.b
[mfsb_map
.nmaps
].bm_bn
= XFS_FSB_TO_DADDR(mp
, s
);
1871 mfsb_map
.b
[mfsb_map
.nmaps
].bm_len
= XFS_FSB_TO_BB(mp
, bm_len
);
1874 if (mfsb_length
== 0) {
1876 set_cur(&typtab
[btype
], 0, 0, DB_RING_IGN
, &mfsb_map
);
1877 if (!iocur_top
->data
) {
1878 xfs_agnumber_t agno
= XFS_FSB_TO_AGNO(mp
, s
);
1879 xfs_agblock_t agbno
= XFS_FSB_TO_AGBNO(mp
, s
);
1881 print_warning("cannot read %s block %u/%u (%llu)",
1882 typtab
[btype
].name
, agno
, agbno
, s
);
1883 if (stop_on_read_error
)
1889 if ((!obfuscate
&& !zero_stale_data
) ||
1890 o
>= mp
->m_dir_geo
->leafblk
) {
1891 ret
= write_buf(iocur_top
);
1895 process_dir_data_block(iocur_top
->data
, o
,
1896 last
== mp
->m_dir_geo
->fsbcount
);
1897 iocur_top
->need_crc
= 1;
1898 ret
= write_buf(iocur_top
);
1912 /* inode copy routines */
1914 process_bmbt_reclist(
1920 xfs_fileoff_t o
, op
= NULLFILEOFF
;
1922 xfs_filblks_t c
, cp
= NULLFILEOFF
;
1925 xfs_agnumber_t agno
;
1926 xfs_agblock_t agbno
;
1929 if (btype
== TYP_DATA
)
1932 convert_extent(&rp
[numrecs
- 1], &o
, &s
, &c
, &f
);
1935 for (i
= 0; i
< numrecs
; i
++, rp
++) {
1936 convert_extent(rp
, &o
, &s
, &c
, &f
);
1939 * ignore extents that are clearly bogus, and if a bogus
1940 * one is found, stop processing remaining extents
1942 if (i
> 0 && op
+ cp
> o
) {
1944 print_warning("bmap extent %d in %s ino %llu "
1945 "starts at %llu, previous extent "
1947 typtab
[btype
].name
, (long long)cur_ino
,
1952 if (c
> max_extent_size
) {
1954 * since we are only processing non-data extents,
1955 * large numbers of blocks in a metadata extent is
1956 * extremely rare and more than likely to be corrupt.
1959 print_warning("suspicious count %u in bmap "
1960 "extent %d in %s ino %llu", c
, i
,
1961 typtab
[btype
].name
, (long long)cur_ino
);
1968 agno
= XFS_FSB_TO_AGNO(mp
, s
);
1969 agbno
= XFS_FSB_TO_AGBNO(mp
, s
);
1971 if (!valid_bno(agno
, agbno
)) {
1973 print_warning("invalid block number %u/%u "
1974 "(%llu) in bmap extent %d in %s ino "
1975 "%llu", agno
, agbno
, s
, i
,
1976 typtab
[btype
].name
, (long long)cur_ino
);
1980 if (!valid_bno(agno
, agbno
+ c
- 1)) {
1982 print_warning("bmap extent %i in %s inode %llu "
1983 "overflows AG (end is %u/%u)", i
,
1984 typtab
[btype
].name
, (long long)cur_ino
,
1985 agno
, agbno
+ c
- 1);
1989 /* multi-extent blocks require special handling */
1990 if (btype
!= TYP_DIR2
|| mp
->m_dir_geo
->fsbcount
== 1) {
1991 error
= process_single_fsb_objects(o
, s
, c
, btype
, last
);
1993 error
= process_multi_fsb_objects(o
, s
, c
, btype
, last
);
2004 struct xfs_btree_block
*block
,
2005 xfs_agnumber_t agno
,
2006 xfs_agblock_t agbno
,
2009 void *arg
) /* ptr to itype */
2015 nrecs
= be16_to_cpu(block
->bb_numrecs
);
2018 if (nrecs
> mp
->m_bmap_dmxr
[0]) {
2020 print_warning("invalid numrecs (%u) in %s "
2021 "block %u/%u", nrecs
,
2022 typtab
[btype
].name
, agno
, agbno
);
2025 return process_bmbt_reclist(XFS_BMBT_REC_ADDR(mp
, block
, 1),
2026 nrecs
, *(typnm_t
*)arg
);
2029 if (nrecs
> mp
->m_bmap_dmxr
[1]) {
2031 print_warning("invalid numrecs (%u) in %s block %u/%u",
2032 nrecs
, typtab
[btype
].name
, agno
, agbno
);
2035 pp
= XFS_BMBT_PTR_ADDR(mp
, block
, 1, mp
->m_bmap_dmxr
[1]);
2036 for (i
= 0; i
< nrecs
; i
++) {
2040 ag
= XFS_FSB_TO_AGNO(mp
, get_unaligned_be64(&pp
[i
]));
2041 bno
= XFS_FSB_TO_AGBNO(mp
, get_unaligned_be64(&pp
[i
]));
2043 if (bno
== 0 || bno
> mp
->m_sb
.sb_agblocks
||
2044 ag
> mp
->m_sb
.sb_agcount
) {
2046 print_warning("invalid block number (%u/%u) "
2047 "in %s block %u/%u", ag
, bno
,
2048 typtab
[btype
].name
, agno
, agbno
);
2052 if (!scan_btree(ag
, bno
, level
, btype
, arg
, scanfunc_bmap
))
2063 xfs_bmdr_block_t
*dib
;
2072 whichfork
= (itype
== TYP_ATTR
) ? XFS_ATTR_FORK
: XFS_DATA_FORK
;
2073 btype
= (itype
== TYP_ATTR
) ? TYP_BMAPBTA
: TYP_BMAPBTD
;
2075 dib
= (xfs_bmdr_block_t
*)XFS_DFORK_PTR(dip
, whichfork
);
2076 level
= be16_to_cpu(dib
->bb_level
);
2077 nrecs
= be16_to_cpu(dib
->bb_numrecs
);
2079 if (level
> XFS_BM_MAXLEVELS(mp
, whichfork
)) {
2081 print_warning("invalid level (%u) in inode %lld %s "
2082 "root", level
, (long long)cur_ino
,
2083 typtab
[btype
].name
);
2088 return process_bmbt_reclist(XFS_BMDR_REC_ADDR(dib
, 1),
2092 maxrecs
= libxfs_bmdr_maxrecs(XFS_DFORK_SIZE(dip
, mp
, whichfork
), 0);
2093 if (nrecs
> maxrecs
) {
2095 print_warning("invalid numrecs (%u) in inode %lld %s "
2096 "root", nrecs
, (long long)cur_ino
,
2097 typtab
[btype
].name
);
2101 pp
= XFS_BMDR_PTR_ADDR(dib
, 1, maxrecs
);
2102 for (i
= 0; i
< nrecs
; i
++) {
2106 ag
= XFS_FSB_TO_AGNO(mp
, get_unaligned_be64(&pp
[i
]));
2107 bno
= XFS_FSB_TO_AGBNO(mp
, get_unaligned_be64(&pp
[i
]));
2109 if (bno
== 0 || bno
> mp
->m_sb
.sb_agblocks
||
2110 ag
> mp
->m_sb
.sb_agcount
) {
2112 print_warning("invalid block number (%u/%u) "
2113 "in inode %llu %s root", ag
,
2114 bno
, (long long)cur_ino
,
2115 typtab
[btype
].name
);
2119 if (!scan_btree(ag
, bno
, level
, btype
, &itype
, scanfunc_bmap
))
2134 whichfork
= (itype
== TYP_ATTR
) ? XFS_ATTR_FORK
: XFS_DATA_FORK
;
2136 nex
= XFS_DFORK_NEXTENTS(dip
, whichfork
);
2137 used
= nex
* sizeof(xfs_bmbt_rec_t
);
2138 if (nex
< 0 || used
> XFS_DFORK_SIZE(dip
, mp
, whichfork
)) {
2140 print_warning("bad number of extents %d in inode %lld",
2141 nex
, (long long)cur_ino
);
2145 /* Zero unused data fork past used extents */
2146 if (zero_stale_data
&& (used
< XFS_DFORK_SIZE(dip
, mp
, whichfork
)))
2147 memset(XFS_DFORK_PTR(dip
, whichfork
) + used
, 0,
2148 XFS_DFORK_SIZE(dip
, mp
, whichfork
) - used
);
2151 return process_bmbt_reclist((xfs_bmbt_rec_t
*)XFS_DFORK_PTR(dip
,
2152 whichfork
), nex
, itype
);
2160 switch (dip
->di_format
) {
2161 case XFS_DINODE_FMT_LOCAL
:
2162 if (obfuscate
|| zero_stale_data
)
2165 process_sf_dir(dip
);
2169 process_sf_symlink(dip
);
2176 case XFS_DINODE_FMT_EXTENTS
:
2177 return process_exinode(dip
, itype
);
2179 case XFS_DINODE_FMT_BTREE
:
2180 return process_btinode(dip
, itype
);
2186 * when we process the inode, we may change the data in the data and/or
2187 * attribute fork if they are in short form and we are obfuscating names.
2188 * In this case we need to recalculate the CRC of the inode, but we should
2189 * only do that if the CRC in the inode is good to begin with. If the crc
2190 * is not ok, we just leave it alone.
2194 xfs_agnumber_t agno
,
2200 bool crc_was_ok
= false; /* no recalc by default */
2201 bool need_new_crc
= false;
2204 cur_ino
= XFS_AGINO_TO_INO(mp
, agno
, agino
);
2206 /* we only care about crc recalculation if we will modify the inode. */
2207 if (obfuscate
|| zero_stale_data
) {
2208 crc_was_ok
= libxfs_verify_cksum((char *)dip
,
2209 mp
->m_sb
.sb_inodesize
,
2210 offsetof(struct xfs_dinode
, di_crc
));
2214 if (zero_stale_data
) {
2215 /* Zero all of the inode literal area */
2216 memset(XFS_DFORK_DPTR(dip
), 0,
2217 XFS_LITINO(mp
, dip
->di_version
));
2222 /* copy appropriate data fork metadata */
2223 switch (be16_to_cpu(dip
->di_mode
) & S_IFMT
) {
2225 success
= process_inode_data(dip
, TYP_DIR2
);
2226 if (dip
->di_format
== XFS_DINODE_FMT_LOCAL
)
2230 success
= process_inode_data(dip
, TYP_SYMLINK
);
2231 if (dip
->di_format
== XFS_DINODE_FMT_LOCAL
)
2235 success
= process_inode_data(dip
, TYP_DATA
);
2241 /* copy extended attributes if they exist and forkoff is valid */
2243 XFS_DFORK_DSIZE(dip
, mp
) < XFS_LITINO(mp
, dip
->di_version
)) {
2244 attr_data
.remote_val_count
= 0;
2245 switch (dip
->di_aformat
) {
2246 case XFS_DINODE_FMT_LOCAL
:
2248 if (obfuscate
|| zero_stale_data
)
2249 process_sf_attr(dip
);
2252 case XFS_DINODE_FMT_EXTENTS
:
2253 success
= process_exinode(dip
, TYP_ATTR
);
2256 case XFS_DINODE_FMT_BTREE
:
2257 success
= process_btinode(dip
, TYP_ATTR
);
2264 /* Heavy handed but low cost; just do it as a catch-all. */
2265 if (zero_stale_data
)
2268 if (crc_was_ok
&& need_new_crc
)
2269 libxfs_dinode_calc_crc(mp
, dip
);
2273 static __uint32_t inodes_copied
= 0;
2277 xfs_agnumber_t agno
,
2278 xfs_inobt_rec_t
*rp
)
2282 xfs_agblock_t agbno
;
2283 xfs_agblock_t end_agbno
;
2290 agino
= be32_to_cpu(rp
->ir_startino
);
2291 agbno
= XFS_AGINO_TO_AGBNO(mp
, agino
);
2292 end_agbno
= agbno
+ mp
->m_ialloc_blks
;
2293 off
= XFS_INO_TO_OFFSET(mp
, agino
);
2296 * If the fs supports sparse inode records, we must process inodes a
2297 * cluster at a time because that is the sparse allocation granularity.
2298 * Otherwise, we risk CRC corruption errors on reads of inode chunks.
2300 * Also make sure that that we don't process more than the single record
2301 * we've been passed (large block sizes can hold multiple inode chunks).
2303 if (xfs_sb_version_hassparseinodes(&mp
->m_sb
))
2304 blks_per_buf
= xfs_icluster_size_fsb(mp
);
2306 blks_per_buf
= mp
->m_ialloc_blks
;
2307 inodes_per_buf
= min(blks_per_buf
<< mp
->m_sb
.sb_inopblog
,
2308 XFS_INODES_PER_CHUNK
);
2311 * Sanity check that we only process a single buffer if ir_startino has
2312 * a buffer offset. A non-zero offset implies that the entire chunk lies
2315 if (off
&& inodes_per_buf
!= XFS_INODES_PER_CHUNK
) {
2316 print_warning("bad starting inode offset %d", off
);
2320 if (agino
== 0 || agino
== NULLAGINO
|| !valid_bno(agno
, agbno
) ||
2321 !valid_bno(agno
, XFS_AGINO_TO_AGBNO(mp
,
2322 agino
+ XFS_INODES_PER_CHUNK
- 1))) {
2324 print_warning("bad inode number %llu (%u/%u)",
2325 XFS_AGINO_TO_INO(mp
, agno
, agino
), agno
, agino
);
2330 * check for basic assumptions about inode chunks, and if any
2331 * assumptions fail, don't process the inode chunk.
2333 if ((mp
->m_sb
.sb_inopblock
<= XFS_INODES_PER_CHUNK
&& off
!= 0) ||
2334 (mp
->m_sb
.sb_inopblock
> XFS_INODES_PER_CHUNK
&&
2335 off
% XFS_INODES_PER_CHUNK
!= 0) ||
2336 (xfs_sb_version_hasalign(&mp
->m_sb
) &&
2337 mp
->m_sb
.sb_inoalignmt
!= 0 &&
2338 agbno
% mp
->m_sb
.sb_inoalignmt
!= 0)) {
2340 print_warning("badly aligned inode (start = %llu)",
2341 XFS_AGINO_TO_INO(mp
, agno
, agino
));
2347 while (agbno
< end_agbno
&& ioff
< XFS_INODES_PER_CHUNK
) {
2348 if (xfs_inobt_is_sparse_disk(rp
, ioff
))
2351 set_cur(&typtab
[TYP_INODE
], XFS_AGB_TO_DADDR(mp
, agno
, agbno
),
2352 XFS_FSB_TO_BB(mp
, blks_per_buf
), DB_RING_IGN
, NULL
);
2353 if (iocur_top
->data
== NULL
) {
2354 print_warning("cannot read inode block %u/%u",
2356 rval
= !stop_on_read_error
;
2360 for (i
= 0; i
< inodes_per_buf
; i
++) {
2363 dip
= (xfs_dinode_t
*)((char *)iocur_top
->data
+
2364 ((off
+ i
) << mp
->m_sb
.sb_inodelog
));
2366 /* process_inode handles free inodes, too */
2367 if (!process_inode(agno
, agino
+ ioff
+ i
, dip
,
2368 XFS_INOBT_IS_FREE_DISK(rp
, ioff
+ i
)))
2374 if (write_buf(iocur_top
))
2378 agbno
+= blks_per_buf
;
2379 ioff
+= inodes_per_buf
;
2383 print_progress("Copied %u of %u inodes (%u of %u AGs)",
2384 inodes_copied
, mp
->m_sb
.sb_icount
, agno
,
2385 mp
->m_sb
.sb_agcount
);
2394 struct xfs_btree_block
*block
,
2395 xfs_agnumber_t agno
,
2396 xfs_agblock_t agbno
,
2401 xfs_inobt_rec_t
*rp
;
2402 xfs_inobt_ptr_t
*pp
;
2405 int finobt
= *(int *) arg
;
2407 numrecs
= be16_to_cpu(block
->bb_numrecs
);
2410 if (numrecs
> mp
->m_inobt_mxr
[0]) {
2412 print_warning("invalid numrecs %d in %s "
2413 "block %u/%u", numrecs
,
2414 typtab
[btype
].name
, agno
, agbno
);
2415 numrecs
= mp
->m_inobt_mxr
[0];
2419 * Only copy the btree blocks for the finobt. The inobt scan
2420 * copies the inode chunks.
2425 rp
= XFS_INOBT_REC_ADDR(mp
, block
, 1);
2426 for (i
= 0; i
< numrecs
; i
++, rp
++) {
2427 if (!copy_inode_chunk(agno
, rp
))
2433 if (numrecs
> mp
->m_inobt_mxr
[1]) {
2435 print_warning("invalid numrecs %d in %s block %u/%u",
2436 numrecs
, typtab
[btype
].name
, agno
, agbno
);
2437 numrecs
= mp
->m_inobt_mxr
[1];
2440 pp
= XFS_INOBT_PTR_ADDR(mp
, block
, 1, mp
->m_inobt_mxr
[1]);
2441 for (i
= 0; i
< numrecs
; i
++) {
2442 if (!valid_bno(agno
, be32_to_cpu(pp
[i
]))) {
2444 print_warning("invalid block number (%u/%u) "
2445 "in %s block %u/%u",
2446 agno
, be32_to_cpu(pp
[i
]),
2447 typtab
[btype
].name
, agno
, agbno
);
2450 if (!scan_btree(agno
, be32_to_cpu(pp
[i
]), level
,
2451 btype
, arg
, scanfunc_ino
))
2459 xfs_agnumber_t agno
,
2466 root
= be32_to_cpu(agi
->agi_root
);
2467 levels
= be32_to_cpu(agi
->agi_level
);
2469 /* validate root and levels before processing the tree */
2470 if (root
== 0 || root
> mp
->m_sb
.sb_agblocks
) {
2472 print_warning("invalid block number (%u) in inobt "
2473 "root in agi %u", root
, agno
);
2476 if (levels
>= XFS_BTREE_MAXLEVELS
) {
2478 print_warning("invalid level (%u) in inobt root "
2479 "in agi %u", levels
, agno
);
2483 if (!scan_btree(agno
, root
, levels
, TYP_INOBT
, &finobt
, scanfunc_ino
))
2486 if (xfs_sb_version_hasfinobt(&mp
->m_sb
)) {
2487 root
= be32_to_cpu(agi
->agi_free_root
);
2488 levels
= be32_to_cpu(agi
->agi_free_level
);
2491 if (!scan_btree(agno
, root
, levels
, TYP_INOBT
, &finobt
,
2501 xfs_agnumber_t agno
)
2505 int stack_count
= 0;
2508 /* copy the superblock of the AG */
2511 set_cur(&typtab
[TYP_SB
], XFS_AG_DADDR(mp
, agno
, XFS_SB_DADDR
),
2512 XFS_FSS_TO_BB(mp
, 1), DB_RING_IGN
, NULL
);
2513 if (!iocur_top
->data
) {
2514 print_warning("cannot read superblock for ag %u", agno
);
2515 if (stop_on_read_error
)
2518 /* Replace any filesystem label with "L's" */
2520 struct xfs_sb
*sb
= iocur_top
->data
;
2521 memset(sb
->sb_fname
, 'L',
2522 min(strlen(sb
->sb_fname
), sizeof(sb
->sb_fname
)));
2523 iocur_top
->need_crc
= 1;
2525 if (write_buf(iocur_top
))
2529 /* copy the AG free space btree root */
2532 set_cur(&typtab
[TYP_AGF
], XFS_AG_DADDR(mp
, agno
, XFS_AGF_DADDR(mp
)),
2533 XFS_FSS_TO_BB(mp
, 1), DB_RING_IGN
, NULL
);
2534 agf
= iocur_top
->data
;
2535 if (iocur_top
->data
== NULL
) {
2536 print_warning("cannot read agf block for ag %u", agno
);
2537 if (stop_on_read_error
)
2540 if (write_buf(iocur_top
))
2544 /* copy the AG inode btree root */
2547 set_cur(&typtab
[TYP_AGI
], XFS_AG_DADDR(mp
, agno
, XFS_AGI_DADDR(mp
)),
2548 XFS_FSS_TO_BB(mp
, 1), DB_RING_IGN
, NULL
);
2549 agi
= iocur_top
->data
;
2550 if (iocur_top
->data
== NULL
) {
2551 print_warning("cannot read agi block for ag %u", agno
);
2552 if (stop_on_read_error
)
2555 if (write_buf(iocur_top
))
2559 /* copy the AG free list header */
2562 set_cur(&typtab
[TYP_AGFL
], XFS_AG_DADDR(mp
, agno
, XFS_AGFL_DADDR(mp
)),
2563 XFS_FSS_TO_BB(mp
, 1), DB_RING_IGN
, NULL
);
2564 if (iocur_top
->data
== NULL
) {
2565 print_warning("cannot read agfl block for ag %u", agno
);
2566 if (stop_on_read_error
)
2569 if (agf
&& zero_stale_data
) {
2570 /* Zero out unused bits of agfl */
2574 agfl_bno
= XFS_BUF_TO_AGFL_BNO(mp
, iocur_top
->bp
);
2575 i
= be32_to_cpu(agf
->agf_fllast
);
2578 if (++i
== XFS_AGFL_SIZE(mp
))
2580 if (i
== be32_to_cpu(agf
->agf_flfirst
))
2582 agfl_bno
[i
] = cpu_to_be32(NULLAGBLOCK
);
2584 iocur_top
->need_crc
= 1;
2586 if (write_buf(iocur_top
))
2590 /* copy AG free space btrees */
2593 print_progress("Copying free space trees of AG %u",
2595 if (!copy_free_bno_btree(agno
, agf
))
2597 if (!copy_free_cnt_btree(agno
, agf
))
2599 if (!copy_rmap_btree(agno
, agf
))
2601 if (!copy_refcount_btree(agno
, agf
))
2605 /* copy inode btrees and the inodes and their associated metadata */
2607 if (!copy_inodes(agno
, agi
))
2612 while (stack_count
--)
2622 xfs_agnumber_t agno
;
2623 xfs_agblock_t agbno
;
2628 if (ino
== 0 || ino
== NULLFSINO
)
2631 agno
= XFS_INO_TO_AGNO(mp
, ino
);
2632 agino
= XFS_INO_TO_AGINO(mp
, ino
);
2633 agbno
= XFS_AGINO_TO_AGBNO(mp
, agino
);
2634 offset
= XFS_AGINO_TO_OFFSET(mp
, agino
);
2636 if (agno
>= mp
->m_sb
.sb_agcount
|| agbno
>= mp
->m_sb
.sb_agblocks
||
2637 offset
>= mp
->m_sb
.sb_inopblock
) {
2639 print_warning("invalid %s inode number (%lld)",
2640 typtab
[itype
].name
, (long long)ino
);
2645 set_cur(&typtab
[TYP_INODE
], XFS_AGB_TO_DADDR(mp
, agno
, agbno
),
2646 blkbb
, DB_RING_IGN
, NULL
);
2647 if (iocur_top
->data
== NULL
) {
2648 print_warning("cannot read %s inode %lld",
2649 typtab
[itype
].name
, (long long)ino
);
2650 rval
= !stop_on_read_error
;
2653 off_cur(offset
<< mp
->m_sb
.sb_inodelog
, mp
->m_sb
.sb_inodesize
);
2656 rval
= process_inode_data(iocur_top
->data
, itype
);
2664 copy_sb_inodes(void)
2666 if (!copy_ino(mp
->m_sb
.sb_rbmino
, TYP_RTBITMAP
))
2669 if (!copy_ino(mp
->m_sb
.sb_rsumino
, TYP_RTSUMMARY
))
2672 if (!copy_ino(mp
->m_sb
.sb_uquotino
, TYP_DQBLK
))
2675 if (!copy_ino(mp
->m_sb
.sb_gquotino
, TYP_DQBLK
))
2678 return copy_ino(mp
->m_sb
.sb_pquotino
, TYP_DQBLK
);
2686 xfs_daddr_t logstart
;
2689 int cycle
= XLOG_INIT_CYCLE
;
2692 print_progress("Copying log");
2695 set_cur(&typtab
[TYP_LOG
], XFS_FSB_TO_DADDR(mp
, mp
->m_sb
.sb_logstart
),
2696 mp
->m_sb
.sb_logblocks
* blkbb
, DB_RING_IGN
, NULL
);
2697 if (iocur_top
->data
== NULL
) {
2699 print_warning("cannot read log");
2700 return !stop_on_read_error
;
2703 /* If not obfuscating or zeroing, just copy the log as it is */
2704 if (!obfuscate
&& !zero_stale_data
)
2707 dirty
= xlog_is_dirty(mp
, &log
, &x
, 0);
2711 /* clear out a clean log */
2713 print_progress("Zeroing clean log");
2715 logstart
= XFS_FSB_TO_DADDR(mp
, mp
->m_sb
.sb_logstart
);
2716 logblocks
= XFS_FSB_TO_BB(mp
, mp
->m_sb
.sb_logblocks
);
2717 logversion
= xfs_sb_version_haslogv2(&mp
->m_sb
) ? 2 : 1;
2718 if (xfs_sb_version_hascrc(&mp
->m_sb
))
2719 cycle
= log
.l_curr_cycle
+ 1;
2721 libxfs_log_clear(NULL
, iocur_top
->data
, logstart
, logblocks
,
2722 &mp
->m_sb
.sb_uuid
, logversion
,
2723 mp
->m_sb
.sb_logsunit
, XLOG_FMT
, cycle
, true);
2726 /* keep the dirty log */
2729 _("Filesystem log is dirty; image will contain unobfuscated metadata in log."));
2732 /* log detection error */
2735 _("Could not discern log; image will contain unobfuscated metadata in log."));
2740 return !write_buf(iocur_top
);
2748 xfs_agnumber_t agno
;
2756 stop_on_read_error
= 0;
2758 if (mp
->m_sb
.sb_magicnum
!= XFS_SB_MAGIC
) {
2759 print_warning("bad superblock magic number %x, giving up",
2760 mp
->m_sb
.sb_magicnum
);
2765 * on load, we sanity-checked agcount and possibly set to 1
2766 * if it was corrupted and large.
2768 if (mp
->m_sb
.sb_agcount
== 1 &&
2769 XFS_MAX_DBLOCKS(&mp
->m_sb
) < mp
->m_sb
.sb_dblocks
) {
2770 print_warning("truncated agcount, giving up");
2774 while ((c
= getopt(argc
, argv
, "aegm:ow")) != EOF
) {
2777 zero_stale_data
= 0;
2780 stop_on_read_error
= 1;
2786 max_extent_size
= (int)strtol(optarg
, &p
, 0);
2787 if (*p
!= '\0' || max_extent_size
<= 0) {
2788 print_warning("bad max extent size %s",
2800 print_warning("bad option for metadump command");
2805 if (optind
!= argc
- 1) {
2806 print_warning("too few options for metadump (no filename given)");
2810 metablock
= (xfs_metablock_t
*)calloc(BBSIZE
+ 1, BBSIZE
);
2811 if (metablock
== NULL
) {
2812 print_warning("memory allocation failure");
2815 metablock
->mb_blocklog
= BBSHIFT
;
2816 metablock
->mb_magic
= cpu_to_be32(XFS_MD_MAGIC
);
2818 block_index
= (__be64
*)((char *)metablock
+ sizeof(xfs_metablock_t
));
2819 block_buffer
= (char *)metablock
+ BBSIZE
;
2820 num_indices
= (BBSIZE
- sizeof(xfs_metablock_t
)) / sizeof(__be64
);
2823 * A metadump block can hold at most num_indices of BBSIZE sectors;
2824 * do not try to dump a filesystem with a sector size which does not
2825 * fit within num_indices (i.e. within a single metablock).
2827 if (mp
->m_sb
.sb_sectsize
> num_indices
* BBSIZE
) {
2828 print_warning("Cannot dump filesystem with sector size %u",
2829 mp
->m_sb
.sb_sectsize
);
2835 start_iocur_sp
= iocur_sp
;
2837 if (strcmp(argv
[optind
], "-") == 0) {
2838 if (isatty(fileno(stdout
))) {
2839 print_warning("cannot write to a terminal");
2845 outf
= fopen(argv
[optind
], "wb");
2847 print_warning("cannot create dump file");
2855 for (agno
= 0; agno
< mp
->m_sb
.sb_agcount
; agno
++) {
2856 if (!scan_ag(agno
)) {
2862 /* copy realtime and quota inode contents */
2864 exitcode
= !copy_sb_inodes();
2866 /* copy log if it's internal */
2867 if ((mp
->m_sb
.sb_logstart
!= 0) && !exitcode
)
2868 exitcode
= !copy_log();
2870 /* write the remaining index */
2872 exitcode
= write_index() < 0;
2874 if (progress_since_warning
)
2875 fputc('\n', (outf
== stdout
) ? stderr
: stdout
);
2880 /* cleanup iocur stack */
2881 while (iocur_sp
> start_iocur_sp
)