2 * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it would be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 * Further, this software is distributed without any warranty that it is
13 * free of the rightful claim of any third person regarding infringement
14 * or the like. Any license provided herein, whether implied or
15 * otherwise, applies only to this software file. Patent licenses, if
16 * any, provided herein do not apply to combinations of this program with
17 * other software, or any other product whatsoever.
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston MA 02111-1307, USA.
23 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24 * Mountain View, CA 94043, or:
28 * For further information regarding this notice, see:
30 * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
39 #include "err_protos.h"
45 * validates inode block or chunk, returns # of good inodes
46 * the dinodes are verified using verify_uncertain_dinode() which
47 * means only the basic inode info is checked, no fork checks.
51 check_aginode_block(xfs_mount_t
*mp
,
62 * it's ok to read these possible inode blocks in one at
63 * a time because they don't belong to known inodes (if
64 * they did, we'd know about them courtesy of the incore inode
65 * tree and we wouldn't be here and we stale the buffers out
66 * so no one else will overlap them.
68 bp
= libxfs_readbuf(mp
->m_dev
, XFS_AGB_TO_DADDR(mp
, agno
, agbno
),
69 XFS_FSB_TO_BB(mp
, 1), 0);
71 do_warn(_("cannot read agbno (%u/%u), disk block %lld\n"), agno
,
72 agbno
, (xfs_daddr_t
)XFS_AGB_TO_DADDR(mp
, agno
, agbno
));
76 for (i
= 0; i
< mp
->m_sb
.sb_inopblock
; i
++) {
77 dino_p
= XFS_MAKE_IPTR(mp
, bp
, i
);
78 if (!verify_uncertain_dinode(mp
, dino_p
, agno
,
79 XFS_OFFBNO_TO_AGINO(mp
, agbno
, i
)))
88 check_inode_block(xfs_mount_t
*mp
,
91 return(check_aginode_block(mp
, XFS_INO_TO_AGNO(mp
, ino
),
92 XFS_INO_TO_AGBNO(mp
, ino
)));
96 * tries to establish if the inode really exists in a valid
97 * inode chunk. returns number of new inodes if things are good
98 * and 0 if bad. start is the start of the discovered inode chunk.
99 * routine assumes that ino is a legal inode number
100 * (verified by verify_inum()). If the inode chunk turns out
101 * to be good, this routine will put the inode chunk into
102 * the good inode chunk tree if required.
104 * the verify_(ag)inode* family of routines are utility
105 * routines called by check_uncertain_aginodes() and
106 * process_uncertain_aginodes().
109 verify_inode_chunk(xfs_mount_t
*mp
,
111 xfs_ino_t
*start_ino
)
115 xfs_agino_t start_agino
;
117 xfs_agblock_t start_agbno
= 0;
118 xfs_agblock_t end_agbno
;
119 xfs_agblock_t max_agbno
;
120 xfs_agblock_t cur_agbno
;
121 xfs_agblock_t chunk_start_agbno
;
122 xfs_agblock_t chunk_stop_agbno
;
123 ino_tree_node_t
*irec_before_p
= NULL
;
124 ino_tree_node_t
*irec_after_p
= NULL
;
125 ino_tree_node_t
*irec_p
;
126 ino_tree_node_t
*irec_next_p
;
134 agno
= XFS_INO_TO_AGNO(mp
, ino
);
135 agino
= XFS_INO_TO_AGINO(mp
, ino
);
136 agbno
= XFS_INO_TO_AGBNO(mp
, ino
);
137 *start_ino
= NULLFSINO
;
139 ASSERT(XFS_IALLOC_BLOCKS(mp
) > 0);
141 if (agno
== mp
->m_sb
.sb_agcount
- 1)
142 max_agbno
= mp
->m_sb
.sb_dblocks
-
143 (xfs_drfsbno_t
) mp
->m_sb
.sb_agblocks
* agno
;
145 max_agbno
= mp
->m_sb
.sb_agblocks
;
148 * is the inode beyond the end of the AG?
150 if (agbno
>= max_agbno
)
154 * check for the easy case, inodes per block >= XFS_INODES_PER_CHUNK
155 * (multiple chunks per block)
157 if (XFS_IALLOC_BLOCKS(mp
) == 1) {
158 if (agbno
> max_agbno
)
161 if (check_inode_block(mp
, ino
) == 0)
164 switch (state
= get_agbno_state(mp
, agno
, agbno
)) {
167 _("uncertain inode block %d/%d already known\n"),
173 set_agbno_state(mp
, agno
, agbno
, XR_E_INO
);
180 * if block is already claimed, forget it.
183 _("inode block %d/%d multiply claimed, (state %d)\n"),
185 set_agbno_state(mp
, agno
, agbno
, XR_E_MULT
);
189 _("inode block %d/%d bad state, (state %d)\n"),
191 set_agbno_state(mp
, agno
, agbno
, XR_E_INO
);
195 start_agino
= XFS_OFFBNO_TO_AGINO(mp
, agbno
, 0);
196 *start_ino
= XFS_AGINO_TO_INO(mp
, agno
, start_agino
);
199 * put new inode record(s) into inode tree
201 for (j
= 0; j
< chunks_pblock
; j
++) {
202 if ((irec_p
= find_inode_rec(agno
, start_agino
))
204 irec_p
= set_inode_free_alloc(agno
,
206 for (i
= 1; i
< XFS_INODES_PER_CHUNK
; i
++)
207 set_inode_free(irec_p
, i
);
209 if (start_agino
<= agino
&& agino
<
210 start_agino
+ XFS_INODES_PER_CHUNK
)
211 set_inode_used(irec_p
, agino
- start_agino
);
213 start_agino
+= XFS_INODES_PER_CHUNK
;
214 ino_cnt
+= XFS_INODES_PER_CHUNK
;
218 } else if (fs_aligned_inodes
) {
220 * next easy case -- aligned inode filesystem.
221 * just check out the chunk
223 start_agbno
= rounddown(XFS_INO_TO_AGBNO(mp
, ino
),
225 end_agbno
= start_agbno
+ XFS_IALLOC_BLOCKS(mp
);
228 * if this fs has aligned inodes but the end of the
229 * chunk is beyond the end of the ag, this is a bad
232 if (end_agbno
> max_agbno
)
236 * check out all blocks in chunk
239 for (cur_agbno
= start_agbno
; cur_agbno
< end_agbno
;
241 ino_cnt
+= check_aginode_block(mp
, agno
, cur_agbno
);
245 * if we lose either 2 blocks worth of inodes or >25% of
246 * the chunk, just forget it.
248 if (ino_cnt
< XFS_INODES_PER_CHUNK
- 2 * mp
->m_sb
.sb_inopblock
249 || ino_cnt
< XFS_INODES_PER_CHUNK
- 16)
253 * ok, put the record into the tree, if no conflict.
255 if (find_uncertain_inode_rec(agno
,
256 XFS_OFFBNO_TO_AGINO(mp
, start_agbno
, 0)))
259 start_agino
= XFS_OFFBNO_TO_AGINO(mp
, start_agbno
, 0);
260 *start_ino
= XFS_AGINO_TO_INO(mp
, agno
, start_agino
);
262 irec_p
= set_inode_free_alloc(agno
,
263 XFS_OFFBNO_TO_AGINO(mp
, start_agbno
, 0));
265 for (i
= 1; i
< XFS_INODES_PER_CHUNK
; i
++)
266 set_inode_free(irec_p
, i
);
268 ASSERT(start_agino
<= agino
&&
269 start_agino
+ XFS_INODES_PER_CHUNK
> agino
);
271 set_inode_used(irec_p
, agino
- start_agino
);
273 return(XFS_INODES_PER_CHUNK
);
277 * hard case -- pre-6.3 filesystem.
278 * set default start/end agbnos and ensure agbnos are legal.
279 * we're setting a range [start_agbno, end_agbno) such that
280 * a discovered inode chunk completely within that range
281 * would include the inode passed into us.
283 if (XFS_IALLOC_BLOCKS(mp
) > 1) {
284 if (agino
> XFS_IALLOC_INODES(mp
))
285 start_agbno
= agbno
- XFS_IALLOC_BLOCKS(mp
) + 1;
290 end_agbno
= agbno
+ XFS_IALLOC_BLOCKS(mp
);
292 if (end_agbno
> max_agbno
)
293 end_agbno
= max_agbno
;
296 * search tree for known inodes within +/- 1 inode chunk range
298 irec_before_p
= irec_after_p
= NULL
;
300 find_inode_rec_range(agno
, XFS_OFFBNO_TO_AGINO(mp
, start_agbno
, 0),
301 XFS_OFFBNO_TO_AGINO(mp
, end_agbno
, mp
->m_sb
.sb_inopblock
- 1),
302 &irec_before_p
, &irec_after_p
);
305 * if we have known inode chunks in our search range, establish
306 * their start and end-points to tighten our search range. range
307 * is [start, end) -- e.g. max/end agbno is one beyond the
308 * last block to be examined. the avl routines work this way.
312 * only one inode record in the range, move one boundary in
314 if (irec_before_p
== irec_after_p
) {
315 if (irec_before_p
->ino_startnum
< agino
)
316 start_agbno
= XFS_AGINO_TO_AGBNO(mp
,
317 irec_before_p
->ino_startnum
+
318 XFS_INODES_PER_CHUNK
);
320 end_agbno
= XFS_AGINO_TO_AGBNO(mp
,
321 irec_before_p
->ino_startnum
);
325 * find the start of the gap in the search range (which
326 * should contain our unknown inode). if the only irec
327 * within +/- 1 chunks starts after the inode we're
328 * looking for, skip this stuff since the end_agbno
329 * of the range has already been trimmed in to not
332 if (irec_before_p
->ino_startnum
< agino
) {
333 irec_p
= irec_before_p
;
334 irec_next_p
= next_ino_rec(irec_p
);
336 while(irec_next_p
!= NULL
&&
337 irec_p
->ino_startnum
+ XFS_INODES_PER_CHUNK
==
338 irec_next_p
->ino_startnum
) {
339 irec_p
= irec_next_p
;
340 irec_next_p
= next_ino_rec(irec_next_p
);
343 start_agbno
= XFS_AGINO_TO_AGBNO(mp
,
344 irec_p
->ino_startnum
) +
345 XFS_IALLOC_BLOCKS(mp
);
348 * we know that the inode we're trying to verify isn't
349 * in an inode chunk so the next ino_rec marks the end
350 * of the gap -- is it within the search range?
352 if (irec_next_p
!= NULL
&&
353 agino
+ XFS_IALLOC_INODES(mp
) >=
354 irec_next_p
->ino_startnum
)
355 end_agbno
= XFS_AGINO_TO_AGBNO(mp
,
356 irec_next_p
->ino_startnum
);
359 ASSERT(start_agbno
< end_agbno
);
363 * if the gap is too small to contain a chunk, we lose.
364 * this means that inode chunks known to be good surround
365 * the inode in question and that the space between them
366 * is too small for a legal inode chunk
368 if (end_agbno
- start_agbno
< XFS_IALLOC_BLOCKS(mp
))
372 * now grunge around the disk, start at the inode block and
373 * go in each direction until you hit a non-inode block or
374 * run into a range boundary. A non-inode block is block
375 * with *no* good inodes in it. Unfortunately, we can't
376 * co-opt bad blocks into inode chunks (which might take
377 * care of disk blocks that turn into zeroes) because the
378 * filesystem could very well allocate two inode chunks
379 * with a one block file in between and we'd zap the file.
380 * We're better off just losing the rest of the
381 * inode chunk instead.
383 for (cur_agbno
= agbno
; cur_agbno
>= start_agbno
; cur_agbno
--) {
385 * if the block has no inodes, it's a bad block so
386 * break out now without decrementing cur_agbno so
387 * chunk start blockno will be set to the last good block
389 if (!(irec_cnt
= check_aginode_block(mp
, agno
, cur_agbno
)))
394 chunk_start_agbno
= cur_agbno
+ 1;
396 for (cur_agbno
= agbno
+ 1; cur_agbno
< end_agbno
; cur_agbno
++) {
398 * if the block has no inodes, it's a bad block so
399 * break out now without incrementing cur_agbno so
400 * chunk start blockno will be set to the block
401 * immediately after the last good block.
403 if (!(irec_cnt
= check_aginode_block(mp
, agno
, cur_agbno
)))
408 chunk_stop_agbno
= cur_agbno
;
410 num_blks
= chunk_stop_agbno
- chunk_start_agbno
;
412 if (num_blks
< XFS_IALLOC_BLOCKS(mp
) || ino_cnt
== 0)
416 * XXX - later - if the entire range is selected and they're all
417 * good inodes, keep searching in either direction.
418 * until you the range of inodes end, then split into chunks
419 * for now, just take one chunk's worth starting at the lowest
420 * possible point and hopefully we'll pick the rest up later.
422 * XXX - if we were going to fix up an inode chunk for
423 * any good inodes in the chunk, this is where we would
424 * do it. For now, keep it simple and lose the rest of
428 if (num_blks
% XFS_IALLOC_BLOCKS(mp
) != 0) {
429 num_blks
= rounddown(num_blks
, XFS_IALLOC_BLOCKS(mp
));
430 chunk_stop_agbno
= chunk_start_agbno
+ num_blks
;
434 * ok, we've got a candidate inode chunk. now we have to
435 * verify that we aren't trying to use blocks that are already
436 * in use. If so, mark them as multiply claimed since odds
437 * are very low that we found this chunk by stumbling across
438 * user data -- we're probably here as a result of a directory
439 * entry or an iunlinked pointer
441 for (j
= 0, cur_agbno
= chunk_start_agbno
;
442 cur_agbno
< chunk_stop_agbno
; cur_agbno
++) {
443 switch (state
= get_agbno_state(mp
, agno
, cur_agbno
)) {
449 _("inode block %d/%d multiply claimed, (state %d)\n"),
450 agno
, cur_agbno
, state
);
451 set_agbno_state(mp
, agno
, cur_agbno
, XR_E_MULT
);
456 _("uncertain inode block overlap, agbno = %d, ino = %llu\n"),
468 * ok, chunk is good. put the record into the tree if required,
469 * and fill in the bitmap. All inodes will be marked as "free"
470 * except for the one that led us to discover the chunk. That's
471 * ok because we'll override the free setting later if the
472 * contents of the inode indicate it's in use.
474 start_agino
= XFS_OFFBNO_TO_AGINO(mp
, chunk_start_agbno
, 0);
475 *start_ino
= XFS_AGINO_TO_INO(mp
, agno
, start_agino
);
477 ASSERT(find_inode_rec(agno
, start_agino
) == NULL
);
479 irec_p
= set_inode_free_alloc(agno
, start_agino
);
480 for (i
= 1; i
< XFS_INODES_PER_CHUNK
; i
++)
481 set_inode_free(irec_p
, i
);
483 ASSERT(start_agino
<= agino
&&
484 start_agino
+ XFS_INODES_PER_CHUNK
> agino
);
486 set_inode_used(irec_p
, agino
- start_agino
);
488 for (cur_agbno
= chunk_start_agbno
;
489 cur_agbno
< chunk_stop_agbno
; cur_agbno
++) {
490 switch (state
= get_agbno_state(mp
, agno
, cur_agbno
)) {
493 _("uncertain inode block %llu already known\n"),
494 XFS_AGB_TO_FSB(mp
, agno
, cur_agbno
));
499 set_agbno_state(mp
, agno
, cur_agbno
, XR_E_INO
);
506 _("inode block %d/%d multiply claimed, (state %d)\n"),
507 agno
, cur_agbno
, state
);
511 _("inode block %d/%d bad state, (state %d)\n"),
512 agno
, cur_agbno
, state
);
513 set_agbno_state(mp
, agno
, cur_agbno
, XR_E_INO
);
522 * same as above only for ag inode chunks
525 verify_aginode_chunk(xfs_mount_t
*mp
,
528 xfs_agino_t
*agino_start
)
533 res
= verify_inode_chunk(mp
, XFS_AGINO_TO_INO(mp
, agno
, agino
), &ino
);
536 *agino_start
= XFS_INO_TO_AGINO(mp
, ino
);
538 *agino_start
= NULLAGINO
;
544 * this does the same as the two above only it returns a pointer
545 * to the inode record in the good inode tree
548 verify_aginode_chunk_irec(xfs_mount_t
*mp
,
552 xfs_agino_t start_agino
;
553 ino_tree_node_t
*irec
= NULL
;
555 if (verify_aginode_chunk(mp
, agno
, agino
, &start_agino
))
556 irec
= find_inode_rec(agno
, start_agino
);
564 * processes an inode allocation chunk/block, returns 1 on I/O errors,
567 * *bogus is set to 1 if the entire set of inodes is bad.
571 process_inode_chunk(xfs_mount_t
*mp
, xfs_agnumber_t agno
, int num_inos
,
572 ino_tree_node_t
*first_irec
, int ino_discovery
,
573 int check_dups
, int extra_attr_check
, int *bogus
)
576 ino_tree_node_t
*ino_rec
;
593 ASSERT(first_irec
!= NULL
);
594 ASSERT(XFS_AGINO_TO_OFFSET(mp
, first_irec
->ino_startnum
) == 0);
597 ASSERT(XFS_IALLOC_BLOCKS(mp
) > 0);
600 * get all blocks required to read in this chunk (may wind up
601 * having to process more chunks in a multi-chunk per block fs)
603 agbno
= XFS_AGINO_TO_AGBNO(mp
, first_irec
->ino_startnum
);
605 bp
= libxfs_readbuf(mp
->m_dev
, XFS_AGB_TO_DADDR(mp
, agno
, agbno
),
606 XFS_FSB_TO_BB(mp
, XFS_IALLOC_BLOCKS(mp
)), 0);
608 do_warn(_("cannot read inode %llu, disk block %lld, cnt %d\n"),
609 XFS_AGINO_TO_INO(mp
, agno
, first_irec
->ino_startnum
),
610 XFS_AGB_TO_DADDR(mp
, agno
, agbno
),
611 (int)XFS_FSB_TO_BB(mp
, XFS_IALLOC_BLOCKS(mp
)));
618 ino_rec
= first_irec
;
620 * initialize counters
629 * verify inode chunk if necessary
636 dino
= XFS_MAKE_IPTR(mp
, bp
, icnt
);
637 agino
= irec_offset
+ ino_rec
->ino_startnum
;
640 * we always think that the root and realtime
641 * inodes are verified even though we may have
642 * to reset them later to keep from losing the
643 * chunk that they're in
645 if (verify_dinode(mp
, dino
, agno
, agino
) == 0 ||
647 (mp
->m_sb
.sb_rootino
== agino
||
648 mp
->m_sb
.sb_rsumino
== agino
||
649 mp
->m_sb
.sb_rbmino
== agino
)))
655 if (icnt
== XFS_IALLOC_INODES(mp
) &&
656 irec_offset
== XFS_INODES_PER_CHUNK
) {
658 * done! - finished up irec and block
664 } else if (irec_offset
== XFS_INODES_PER_CHUNK
) {
666 * get new irec (multiple chunks per block fs)
668 ino_rec
= next_ino_rec(ino_rec
);
669 ASSERT(ino_rec
->ino_startnum
== agino
+ 1);
675 * if chunk/block is bad, blow it off. the inode records
676 * will be deleted by the caller if appropriate.
680 if (!done
) /* already free'd */
686 * reset irec and counters
688 ino_rec
= first_irec
;
696 /* nathans TODO ... memory leak here?: */
701 bp
= libxfs_readbuf(mp
->m_dev
,
702 XFS_AGB_TO_DADDR(mp
, agno
, agbno
),
703 XFS_FSB_TO_BB(mp
, XFS_IALLOC_BLOCKS(mp
)), 0);
705 do_warn(_("can't read inode %llu, disk block %lld, "
706 "cnt %d\n"), XFS_AGINO_TO_INO(mp
, agno
, agino
),
707 XFS_AGB_TO_DADDR(mp
, agno
, agbno
),
708 (int)XFS_FSB_TO_BB(mp
, XFS_IALLOC_BLOCKS(mp
)));
714 * mark block as an inode block in the incore bitmap
716 switch (state
= get_agbno_state(mp
, agno
, agbno
)) {
717 case XR_E_INO
: /* already marked */
722 set_agbno_state(mp
, agno
, agbno
, XR_E_INO
);
725 do_error(_("bad state in block map %d\n"), state
);
728 set_agbno_state(mp
, agno
, agbno
, XR_E_MULT
);
729 do_warn(_("inode block %llu multiply claimed, state was %d\n"),
730 XFS_AGB_TO_FSB(mp
, agno
, agbno
), state
);
738 dino
= XFS_MAKE_IPTR(mp
, bp
, icnt
);
739 agino
= irec_offset
+ ino_rec
->ino_startnum
;
745 status
= process_dinode(mp
, dino
, agno
, agino
,
746 is_inode_free(ino_rec
, irec_offset
),
747 &ino_dirty
, &cleared
, &is_used
,
748 ino_discovery
, check_dups
,
749 extra_attr_check
, &isa_dir
, &parent
);
751 ASSERT(is_used
!= 3);
755 * XXX - if we want to try and keep
756 * track of whether we need to bang on
757 * the inode maps (instead of just
758 * blindly reconstructing them like
759 * we do now, this is where to start.
762 if (is_inode_free(ino_rec
, irec_offset
)) {
763 if (verbose
|| no_modify
||
764 XFS_AGINO_TO_INO(mp
, agno
, agino
) !=
766 do_warn(_("imap claims in-use inode "
768 XFS_AGINO_TO_INO(mp
, agno
,
772 if (verbose
|| (!no_modify
&&
773 XFS_AGINO_TO_INO(mp
, agno
, agino
) !=
775 do_warn(_("correcting imap\n"));
777 do_warn(_("would correct imap\n"));
779 set_inode_used(ino_rec
, irec_offset
);
781 set_inode_free(ino_rec
, irec_offset
);
785 * if we lose the root inode, or it turns into
786 * a non-directory, that allows us to double-check
787 * later whether or not we need to reinitialize it.
790 set_inode_isadir(ino_rec
, irec_offset
);
792 * we always set the parent but
793 * we may as well wait until
794 * phase 4 (no inode discovery)
795 * because the parent info will
798 if (!ino_discovery
) {
800 set_inode_parent(ino_rec
, irec_offset
, parent
);
802 get_inode_parent(ino_rec
, irec_offset
));
805 clear_inode_isadir(ino_rec
, irec_offset
);
809 if (mp
->m_sb
.sb_rootino
==
810 XFS_AGINO_TO_INO(mp
, agno
, agino
)) {
814 do_warn(_("cleared root inode %llu\n"),
815 XFS_AGINO_TO_INO(mp
, agno
,
818 do_warn(_("would clear root inode %llu\n"),
819 XFS_AGINO_TO_INO(mp
, agno
,
822 } else if (mp
->m_sb
.sb_rbmino
==
823 XFS_AGINO_TO_INO(mp
, agno
, agino
)) {
827 do_warn(_("cleared realtime bitmap "
829 XFS_AGINO_TO_INO(mp
, agno
,
832 do_warn(_("would clear realtime bitmap "
834 XFS_AGINO_TO_INO(mp
, agno
,
837 } else if (mp
->m_sb
.sb_rsumino
==
838 XFS_AGINO_TO_INO(mp
, agno
, agino
)) {
842 do_warn(_("cleared realtime summary "
844 XFS_AGINO_TO_INO(mp
, agno
,
847 do_warn(_("would clear realtime summary"
849 XFS_AGINO_TO_INO(mp
, agno
,
852 } else if (!no_modify
) {
853 do_warn(_("cleared inode %llu\n"),
854 XFS_AGINO_TO_INO(mp
, agno
, agino
));
856 do_warn(_("would have cleared inode %llu\n"),
857 XFS_AGINO_TO_INO(mp
, agno
, agino
));
865 if (icnt
== XFS_IALLOC_INODES(mp
) &&
866 irec_offset
== XFS_INODES_PER_CHUNK
) {
868 * done! - finished up irec and block simultaneously
870 if (dirty
&& !no_modify
)
871 libxfs_writebuf(bp
, 0);
877 } else if (ibuf_offset
== mp
->m_sb
.sb_inopblock
) {
879 * mark block as an inode block in the incore bitmap
880 * and reset inode buffer offset counter
885 switch (state
= get_agbno_state(mp
, agno
, agbno
)) {
886 case XR_E_INO
: /* already marked */
891 set_agbno_state(mp
, agno
, agbno
, XR_E_INO
);
894 do_error(_("bad state in block map %d\n"),
898 set_agbno_state(mp
, agno
, agbno
, XR_E_MULT
);
899 do_warn(_("inode block %llu multiply claimed, "
901 XFS_AGB_TO_FSB(mp
, agno
, agbno
), state
);
905 } else if (irec_offset
== XFS_INODES_PER_CHUNK
) {
907 * get new irec (multiple chunks per block fs)
909 ino_rec
= next_ino_rec(ino_rec
);
910 ASSERT(ino_rec
->ino_startnum
== agino
+ 1);
918 * check all inodes mentioned in the ag's incore inode maps.
919 * the map may be incomplete. If so, we'll catch the missing
920 * inodes (hopefully) when we traverse the directory tree.
921 * check_dirs is set to 1 if directory inodes should be
922 * processed for internal consistency, parent setting and
923 * discovery of unknown inodes. this only happens
924 * in phase 3. check_dups is set to 1 if we're looking for
925 * inodes that reference duplicate blocks so we can trash
926 * the inode right then and there. this is set only in
927 * phase 4 after we've run through and set the bitmap once.
930 process_aginodes(xfs_mount_t
*mp
, xfs_agnumber_t agno
,
931 int ino_discovery
, int check_dups
, int extra_attr_check
)
934 ino_tree_node_t
*ino_rec
, *first_ino_rec
, *prev_ino_rec
;
936 first_ino_rec
= ino_rec
= findfirst_inode_rec(agno
);
937 while (ino_rec
!= NULL
) {
939 * paranoia - step through inode records until we step
940 * through a full allocation of inodes. this could
941 * be an issue in big-block filesystems where a block
942 * can hold more than one inode chunk. make sure to
943 * grab the record corresponding to the beginning of
944 * the next block before we call the processing routines.
946 num_inos
= XFS_INODES_PER_CHUNK
;
947 while (num_inos
< XFS_IALLOC_INODES(mp
) && ino_rec
!= NULL
) {
948 ASSERT(ino_rec
!= NULL
);
950 * inodes chunks will always be aligned and sized
953 if ((ino_rec
= next_ino_rec(ino_rec
)) != NULL
)
954 num_inos
+= XFS_INODES_PER_CHUNK
;
957 ASSERT(num_inos
== XFS_IALLOC_INODES(mp
));
959 if (process_inode_chunk(mp
, agno
, num_inos
, first_ino_rec
,
960 ino_discovery
, check_dups
, extra_attr_check
, &bogus
)) {
961 /* XXX - i/o error, we've got a problem */
966 first_ino_rec
= ino_rec
= next_ino_rec(ino_rec
);
969 * inodes pointed to by this record are
970 * completely bogus, blow the records for
972 * the inode block(s) will get reclaimed
973 * in phase 4 when the block map is
974 * reconstructed after inodes claiming
975 * duplicate blocks are deleted.
978 ino_rec
= first_ino_rec
;
979 while (num_inos
< XFS_IALLOC_INODES(mp
) &&
981 prev_ino_rec
= ino_rec
;
983 if ((ino_rec
= next_ino_rec(ino_rec
)) != NULL
)
984 num_inos
+= XFS_INODES_PER_CHUNK
;
986 get_inode_rec(agno
, prev_ino_rec
);
987 free_inode_rec(agno
, prev_ino_rec
);
990 first_ino_rec
= ino_rec
;
996 * verify the uncertain inode list for an ag.
997 * Good inodes get moved into the good inode tree.
998 * returns 0 if there are no uncertain inode records to
999 * be processed, 1 otherwise. This routine destroys the
1000 * the entire uncertain inode tree for the ag as a side-effect.
1003 check_uncertain_aginodes(xfs_mount_t
*mp
, xfs_agnumber_t agno
)
1005 ino_tree_node_t
*irec
;
1006 ino_tree_node_t
*nrec
;
1015 clear_uncertain_ino_cache(agno
);
1017 if ((irec
= findfirst_uncertain_inode_rec(agno
)) == NULL
)
1021 * the trick here is to find a contiguous range
1022 * of inodes, make sure that it doesn't overlap
1023 * with a known to exist chunk, and then make
1024 * sure it is a number of entire chunks.
1025 * we check on-disk once we have an idea of what's
1026 * going on just to double-check.
1028 * process the uncertain inode record list and look
1029 * on disk to see if the referenced inodes are good
1032 do_warn(_("found inodes not in the inode allocation tree\n"));
1036 * check every confirmed (which in this case means
1037 * inode that we really suspect to be an inode) inode
1039 for (i
= 0; i
< XFS_INODES_PER_CHUNK
; i
++) {
1040 if (!is_inode_confirmed(irec
, i
))
1043 agino
= i
+ irec
->ino_startnum
;
1045 if (verify_aginum(mp
, agno
, agino
))
1048 if (nrec
!= NULL
&& nrec
->ino_startnum
<= agino
&&
1049 agino
< nrec
->ino_startnum
+
1050 XFS_INODES_PER_CHUNK
)
1053 if ((nrec
= find_inode_rec(agno
, agino
)) == NULL
)
1054 if (!verify_aginum(mp
, agno
, agino
))
1055 if (verify_aginode_chunk(mp
, agno
,
1060 get_uncertain_inode_rec(agno
, irec
);
1061 free_inode_rec(agno
, irec
);
1063 irec
= findfirst_uncertain_inode_rec(agno
);
1064 } while (irec
!= NULL
);
1067 do_warn(_("found inodes not in the inode allocation tree\n"));
1073 * verify and process the uncertain inodes for an ag.
1074 * this is different from check_ in that we can't just
1075 * move the good inodes into the good inode tree and let
1076 * process_aginodes() deal with them because this gets called
1077 * after process_aginodes() has been run on the ag inode tree.
1078 * So we have to process the inodes as well as verify since
1079 * we don't want to rerun process_aginodes() on a tree that has
1080 * mostly been processed.
1082 * Note that if this routine does process some inodes, it can
1083 * add uncertain inodes to any ag which would require that
1084 * the routine be called again to process those newly-added
1087 * returns 0 if no inodes were processed and 1 if inodes
1088 * were processed (and it is possible that new uncertain
1089 * inodes were discovered).
1091 * as a side-effect, this routine tears down the uncertain
1092 * inode tree for the ag.
1095 process_uncertain_aginodes(xfs_mount_t
*mp
, xfs_agnumber_t agno
)
1097 ino_tree_node_t
*irec
;
1098 ino_tree_node_t
*nrec
;
1105 #ifdef XR_INODE_TRACE
1106 fprintf(stderr
, "in process_uncertain_aginodes, agno = %d\n", agno
);
1111 clear_uncertain_ino_cache(agno
);
1113 if ((irec
= findfirst_uncertain_inode_rec(agno
)) == NULL
)
1120 * check every confirmed inode
1122 for (cnt
= i
= 0; i
< XFS_INODES_PER_CHUNK
; i
++) {
1123 if (!is_inode_confirmed(irec
, i
))
1126 agino
= i
+ irec
->ino_startnum
;
1127 #ifdef XR_INODE_TRACE
1128 fprintf(stderr
, "ag inode = %d (0x%x)\n", agino
, agino
);
1131 * skip over inodes already processed (in the
1132 * good tree), bad inode numbers, and inode numbers
1133 * pointing to bogus inodes
1135 if (verify_aginum(mp
, agno
, agino
))
1138 if (nrec
!= NULL
&& nrec
->ino_startnum
<= agino
&&
1139 agino
< nrec
->ino_startnum
+
1140 XFS_INODES_PER_CHUNK
)
1143 if ((nrec
= find_inode_rec(agno
, agino
)) != NULL
)
1147 * verify the chunk. if good, it will be
1148 * added to the good inode tree.
1150 if ((nrec
= verify_aginode_chunk_irec(mp
,
1151 agno
, agino
)) == NULL
)
1157 * process the inode record we just added
1158 * to the good inode tree. The inode
1159 * processing may add more records to the
1160 * uncertain inode lists.
1162 if (process_inode_chunk(mp
, agno
, XFS_IALLOC_INODES(mp
),
1163 nrec
, 1, 0, 0, &bogus
)) {
1164 /* XXX - i/o error, we've got a problem */
1171 * now return the uncertain inode record to the free pool
1172 * and pull another one off the list for processing
1174 get_uncertain_inode_rec(agno
, irec
);
1175 free_inode_rec(agno
, irec
);
1177 irec
= findfirst_uncertain_inode_rec(agno
);
1178 } while (irec
!= NULL
);
1181 do_warn(_("found inodes not in the inode allocation tree\n"));