2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25 #include "err_protos.h"
33 * validates inode block or chunk, returns # of good inodes
34 * the dinodes are verified using verify_uncertain_dinode() which
35 * means only the basic inode info is checked, no fork checks.
39 check_aginode_block(xfs_mount_t
*mp
,
50 * it's ok to read these possible inode blocks in one at
51 * a time because they don't belong to known inodes (if
52 * they did, we'd know about them courtesy of the incore inode
53 * tree and we wouldn't be here and we stale the buffers out
54 * so no one else will overlap them.
56 bp
= libxfs_readbuf(mp
->m_dev
, XFS_AGB_TO_DADDR(mp
, agno
, agbno
),
57 XFS_FSB_TO_BB(mp
, 1), 0);
59 do_warn(_("cannot read agbno (%u/%u), disk block %lld\n"), agno
,
60 agbno
, (xfs_daddr_t
)XFS_AGB_TO_DADDR(mp
, agno
, agbno
));
64 for (i
= 0; i
< mp
->m_sb
.sb_inopblock
; i
++) {
65 dino_p
= XFS_MAKE_IPTR(mp
, bp
, i
);
66 if (!verify_uncertain_dinode(mp
, dino_p
, agno
,
67 XFS_OFFBNO_TO_AGINO(mp
, agbno
, i
)))
76 check_inode_block(xfs_mount_t
*mp
,
79 return(check_aginode_block(mp
, XFS_INO_TO_AGNO(mp
, ino
),
80 XFS_INO_TO_AGBNO(mp
, ino
)));
84 * tries to establish if the inode really exists in a valid
85 * inode chunk. returns number of new inodes if things are good
86 * and 0 if bad. start is the start of the discovered inode chunk.
87 * routine assumes that ino is a legal inode number
88 * (verified by verify_inum()). If the inode chunk turns out
89 * to be good, this routine will put the inode chunk into
90 * the good inode chunk tree if required.
92 * the verify_(ag)inode* family of routines are utility
93 * routines called by check_uncertain_aginodes() and
94 * process_uncertain_aginodes().
97 verify_inode_chunk(xfs_mount_t
*mp
,
103 xfs_agino_t start_agino
;
105 xfs_agblock_t start_agbno
= 0;
106 xfs_agblock_t end_agbno
;
107 xfs_agblock_t max_agbno
;
108 xfs_agblock_t cur_agbno
;
109 xfs_agblock_t chunk_start_agbno
;
110 xfs_agblock_t chunk_stop_agbno
;
111 ino_tree_node_t
*irec_before_p
= NULL
;
112 ino_tree_node_t
*irec_after_p
= NULL
;
113 ino_tree_node_t
*irec_p
;
114 ino_tree_node_t
*irec_next_p
;
122 agno
= XFS_INO_TO_AGNO(mp
, ino
);
123 agino
= XFS_INO_TO_AGINO(mp
, ino
);
124 agbno
= XFS_INO_TO_AGBNO(mp
, ino
);
125 *start_ino
= NULLFSINO
;
127 ASSERT(XFS_IALLOC_BLOCKS(mp
) > 0);
129 if (agno
== mp
->m_sb
.sb_agcount
- 1)
130 max_agbno
= mp
->m_sb
.sb_dblocks
-
131 (xfs_drfsbno_t
) mp
->m_sb
.sb_agblocks
* agno
;
133 max_agbno
= mp
->m_sb
.sb_agblocks
;
136 * is the inode beyond the end of the AG?
138 if (agbno
>= max_agbno
)
142 * check for the easy case, inodes per block >= XFS_INODES_PER_CHUNK
143 * (multiple chunks per block)
145 if (XFS_IALLOC_BLOCKS(mp
) == 1) {
146 if (agbno
> max_agbno
)
149 if (check_inode_block(mp
, ino
) == 0)
152 pthread_mutex_lock(&ag_locks
[agno
]);
154 switch (state
= get_agbno_state(mp
, agno
, agbno
)) {
157 _("uncertain inode block %d/%d already known\n"),
163 set_agbno_state(mp
, agno
, agbno
, XR_E_INO
);
170 * if block is already claimed, forget it.
173 _("inode block %d/%d multiply claimed, (state %d)\n"),
175 set_agbno_state(mp
, agno
, agbno
, XR_E_MULT
);
176 pthread_mutex_unlock(&ag_locks
[agno
]);
180 _("inode block %d/%d bad state, (state %d)\n"),
182 set_agbno_state(mp
, agno
, agbno
, XR_E_INO
);
186 pthread_mutex_unlock(&ag_locks
[agno
]);
188 start_agino
= XFS_OFFBNO_TO_AGINO(mp
, agbno
, 0);
189 *start_ino
= XFS_AGINO_TO_INO(mp
, agno
, start_agino
);
192 * put new inode record(s) into inode tree
194 for (j
= 0; j
< chunks_pblock
; j
++) {
195 if ((irec_p
= find_inode_rec(agno
, start_agino
))
197 irec_p
= set_inode_free_alloc(agno
,
199 for (i
= 1; i
< XFS_INODES_PER_CHUNK
; i
++)
200 set_inode_free(irec_p
, i
);
202 if (start_agino
<= agino
&& agino
<
203 start_agino
+ XFS_INODES_PER_CHUNK
)
204 set_inode_used(irec_p
, agino
- start_agino
);
206 start_agino
+= XFS_INODES_PER_CHUNK
;
207 ino_cnt
+= XFS_INODES_PER_CHUNK
;
211 } else if (fs_aligned_inodes
) {
213 * next easy case -- aligned inode filesystem.
214 * just check out the chunk
216 start_agbno
= rounddown(XFS_INO_TO_AGBNO(mp
, ino
),
218 end_agbno
= start_agbno
+ XFS_IALLOC_BLOCKS(mp
);
221 * if this fs has aligned inodes but the end of the
222 * chunk is beyond the end of the ag, this is a bad
225 if (end_agbno
> max_agbno
)
229 * check out all blocks in chunk
232 for (cur_agbno
= start_agbno
; cur_agbno
< end_agbno
;
234 ino_cnt
+= check_aginode_block(mp
, agno
, cur_agbno
);
238 * if we lose either 2 blocks worth of inodes or >25% of
239 * the chunk, just forget it.
241 if (ino_cnt
< XFS_INODES_PER_CHUNK
- 2 * mp
->m_sb
.sb_inopblock
242 || ino_cnt
< XFS_INODES_PER_CHUNK
- 16)
246 * ok, put the record into the tree, if no conflict.
248 if (find_uncertain_inode_rec(agno
,
249 XFS_OFFBNO_TO_AGINO(mp
, start_agbno
, 0)))
252 start_agino
= XFS_OFFBNO_TO_AGINO(mp
, start_agbno
, 0);
253 *start_ino
= XFS_AGINO_TO_INO(mp
, agno
, start_agino
);
255 irec_p
= set_inode_free_alloc(agno
,
256 XFS_OFFBNO_TO_AGINO(mp
, start_agbno
, 0));
258 for (i
= 1; i
< XFS_INODES_PER_CHUNK
; i
++)
259 set_inode_free(irec_p
, i
);
261 ASSERT(start_agino
<= agino
&&
262 start_agino
+ XFS_INODES_PER_CHUNK
> agino
);
264 set_inode_used(irec_p
, agino
- start_agino
);
266 return(XFS_INODES_PER_CHUNK
);
270 * hard case -- pre-6.3 filesystem.
271 * set default start/end agbnos and ensure agbnos are legal.
272 * we're setting a range [start_agbno, end_agbno) such that
273 * a discovered inode chunk completely within that range
274 * would include the inode passed into us.
276 if (XFS_IALLOC_BLOCKS(mp
) > 1) {
277 if (agino
> XFS_IALLOC_INODES(mp
))
278 start_agbno
= agbno
- XFS_IALLOC_BLOCKS(mp
) + 1;
283 end_agbno
= agbno
+ XFS_IALLOC_BLOCKS(mp
);
285 if (end_agbno
> max_agbno
)
286 end_agbno
= max_agbno
;
289 * search tree for known inodes within +/- 1 inode chunk range
291 irec_before_p
= irec_after_p
= NULL
;
293 find_inode_rec_range(agno
, XFS_OFFBNO_TO_AGINO(mp
, start_agbno
, 0),
294 XFS_OFFBNO_TO_AGINO(mp
, end_agbno
, mp
->m_sb
.sb_inopblock
- 1),
295 &irec_before_p
, &irec_after_p
);
298 * if we have known inode chunks in our search range, establish
299 * their start and end-points to tighten our search range. range
300 * is [start, end) -- e.g. max/end agbno is one beyond the
301 * last block to be examined. the avl routines work this way.
305 * only one inode record in the range, move one boundary in
307 if (irec_before_p
== irec_after_p
) {
308 if (irec_before_p
->ino_startnum
< agino
)
309 start_agbno
= XFS_AGINO_TO_AGBNO(mp
,
310 irec_before_p
->ino_startnum
+
311 XFS_INODES_PER_CHUNK
);
313 end_agbno
= XFS_AGINO_TO_AGBNO(mp
,
314 irec_before_p
->ino_startnum
);
318 * find the start of the gap in the search range (which
319 * should contain our unknown inode). if the only irec
320 * within +/- 1 chunks starts after the inode we're
321 * looking for, skip this stuff since the end_agbno
322 * of the range has already been trimmed in to not
325 if (irec_before_p
->ino_startnum
< agino
) {
326 irec_p
= irec_before_p
;
327 irec_next_p
= next_ino_rec(irec_p
);
329 while(irec_next_p
!= NULL
&&
330 irec_p
->ino_startnum
+ XFS_INODES_PER_CHUNK
==
331 irec_next_p
->ino_startnum
) {
332 irec_p
= irec_next_p
;
333 irec_next_p
= next_ino_rec(irec_next_p
);
336 start_agbno
= XFS_AGINO_TO_AGBNO(mp
,
337 irec_p
->ino_startnum
) +
338 XFS_IALLOC_BLOCKS(mp
);
341 * we know that the inode we're trying to verify isn't
342 * in an inode chunk so the next ino_rec marks the end
343 * of the gap -- is it within the search range?
345 if (irec_next_p
!= NULL
&&
346 agino
+ XFS_IALLOC_INODES(mp
) >=
347 irec_next_p
->ino_startnum
)
348 end_agbno
= XFS_AGINO_TO_AGBNO(mp
,
349 irec_next_p
->ino_startnum
);
352 ASSERT(start_agbno
< end_agbno
);
356 * if the gap is too small to contain a chunk, we lose.
357 * this means that inode chunks known to be good surround
358 * the inode in question and that the space between them
359 * is too small for a legal inode chunk
361 if (end_agbno
- start_agbno
< XFS_IALLOC_BLOCKS(mp
))
365 * now grunge around the disk, start at the inode block and
366 * go in each direction until you hit a non-inode block or
367 * run into a range boundary. A non-inode block is block
368 * with *no* good inodes in it. Unfortunately, we can't
369 * co-opt bad blocks into inode chunks (which might take
370 * care of disk blocks that turn into zeroes) because the
371 * filesystem could very well allocate two inode chunks
372 * with a one block file in between and we'd zap the file.
373 * We're better off just losing the rest of the
374 * inode chunk instead.
376 for (cur_agbno
= agbno
; cur_agbno
>= start_agbno
; cur_agbno
--) {
378 * if the block has no inodes, it's a bad block so
379 * break out now without decrementing cur_agbno so
380 * chunk start blockno will be set to the last good block
382 if (!(irec_cnt
= check_aginode_block(mp
, agno
, cur_agbno
)))
387 chunk_start_agbno
= cur_agbno
+ 1;
389 for (cur_agbno
= agbno
+ 1; cur_agbno
< end_agbno
; cur_agbno
++) {
391 * if the block has no inodes, it's a bad block so
392 * break out now without incrementing cur_agbno so
393 * chunk start blockno will be set to the block
394 * immediately after the last good block.
396 if (!(irec_cnt
= check_aginode_block(mp
, agno
, cur_agbno
)))
401 chunk_stop_agbno
= cur_agbno
;
403 num_blks
= chunk_stop_agbno
- chunk_start_agbno
;
405 if (num_blks
< XFS_IALLOC_BLOCKS(mp
) || ino_cnt
== 0)
409 * XXX - later - if the entire range is selected and they're all
410 * good inodes, keep searching in either direction.
411 * until you the range of inodes end, then split into chunks
412 * for now, just take one chunk's worth starting at the lowest
413 * possible point and hopefully we'll pick the rest up later.
415 * XXX - if we were going to fix up an inode chunk for
416 * any good inodes in the chunk, this is where we would
417 * do it. For now, keep it simple and lose the rest of
421 if (num_blks
% XFS_IALLOC_BLOCKS(mp
) != 0) {
422 num_blks
= rounddown(num_blks
, XFS_IALLOC_BLOCKS(mp
));
423 chunk_stop_agbno
= chunk_start_agbno
+ num_blks
;
427 * ok, we've got a candidate inode chunk. now we have to
428 * verify that we aren't trying to use blocks that are already
429 * in use. If so, mark them as multiply claimed since odds
430 * are very low that we found this chunk by stumbling across
431 * user data -- we're probably here as a result of a directory
432 * entry or an iunlinked pointer
434 pthread_mutex_lock(&ag_locks
[agno
]);
435 for (j
= 0, cur_agbno
= chunk_start_agbno
;
436 cur_agbno
< chunk_stop_agbno
; cur_agbno
++) {
437 switch (state
= get_agbno_state(mp
, agno
, cur_agbno
)) {
443 _("inode block %d/%d multiply claimed, (state %d)\n"),
444 agno
, cur_agbno
, state
);
445 set_agbno_state(mp
, agno
, cur_agbno
, XR_E_MULT
);
450 _("uncertain inode block overlap, agbno = %d, ino = %llu\n"),
458 pthread_mutex_unlock(&ag_locks
[agno
]);
462 pthread_mutex_unlock(&ag_locks
[agno
]);
465 * ok, chunk is good. put the record into the tree if required,
466 * and fill in the bitmap. All inodes will be marked as "free"
467 * except for the one that led us to discover the chunk. That's
468 * ok because we'll override the free setting later if the
469 * contents of the inode indicate it's in use.
471 start_agino
= XFS_OFFBNO_TO_AGINO(mp
, chunk_start_agbno
, 0);
472 *start_ino
= XFS_AGINO_TO_INO(mp
, agno
, start_agino
);
474 ASSERT(find_inode_rec(agno
, start_agino
) == NULL
);
476 irec_p
= set_inode_free_alloc(agno
, start_agino
);
477 for (i
= 1; i
< XFS_INODES_PER_CHUNK
; i
++)
478 set_inode_free(irec_p
, i
);
480 ASSERT(start_agino
<= agino
&&
481 start_agino
+ XFS_INODES_PER_CHUNK
> agino
);
483 set_inode_used(irec_p
, agino
- start_agino
);
485 pthread_mutex_lock(&ag_locks
[agno
]);
487 for (cur_agbno
= chunk_start_agbno
;
488 cur_agbno
< chunk_stop_agbno
; cur_agbno
++) {
489 switch (state
= get_agbno_state(mp
, agno
, cur_agbno
)) {
492 _("uncertain inode block %llu already known\n"),
493 XFS_AGB_TO_FSB(mp
, agno
, cur_agbno
));
498 set_agbno_state(mp
, agno
, cur_agbno
, XR_E_INO
);
505 _("inode block %d/%d multiply claimed, (state %d)\n"),
506 agno
, cur_agbno
, state
);
510 _("inode block %d/%d bad state, (state %d)\n"),
511 agno
, cur_agbno
, state
);
512 set_agbno_state(mp
, agno
, cur_agbno
, XR_E_INO
);
516 pthread_mutex_unlock(&ag_locks
[agno
]);
522 * same as above only for ag inode chunks
525 verify_aginode_chunk(xfs_mount_t
*mp
,
528 xfs_agino_t
*agino_start
)
533 res
= verify_inode_chunk(mp
, XFS_AGINO_TO_INO(mp
, agno
, agino
), &ino
);
536 *agino_start
= XFS_INO_TO_AGINO(mp
, ino
);
538 *agino_start
= NULLAGINO
;
544 * this does the same as the two above only it returns a pointer
545 * to the inode record in the good inode tree
548 verify_aginode_chunk_irec(xfs_mount_t
*mp
,
552 xfs_agino_t start_agino
;
553 ino_tree_node_t
*irec
= NULL
;
555 if (verify_aginode_chunk(mp
, agno
, agino
, &start_agino
))
556 irec
= find_inode_rec(agno
, start_agino
);
564 * processes an inode allocation chunk/block, returns 1 on I/O errors,
567 * *bogus is set to 1 if the entire set of inodes is bad.
576 ino_tree_node_t
*first_irec
,
579 int extra_attr_check
,
583 ino_tree_node_t
*ino_rec
;
597 int blks_per_cluster
;
602 ASSERT(first_irec
!= NULL
);
603 ASSERT(XFS_AGINO_TO_OFFSET(mp
, first_irec
->ino_startnum
) == 0);
606 ASSERT(XFS_IALLOC_BLOCKS(mp
) > 0);
608 blks_per_cluster
= XFS_INODE_CLUSTER_SIZE(mp
) >> mp
->m_sb
.sb_blocklog
;
609 if (blks_per_cluster
== 0)
610 blks_per_cluster
= 1;
611 cluster_count
= XFS_INODES_PER_CHUNK
/ inodes_per_cluster
;
612 ASSERT(cluster_count
> 0);
615 * get all blocks required to read in this chunk (may wind up
616 * having to process more chunks in a multi-chunk per block fs)
618 agbno
= XFS_AGINO_TO_AGBNO(mp
, first_irec
->ino_startnum
);
623 ino_rec
= first_irec
;
625 bplist
= malloc(cluster_count
* sizeof(xfs_buf_t
*));
627 do_error(_("failed to allocate %d bytes of memory\n"),
628 cluster_count
* sizeof(xfs_buf_t
*));
630 for (bp_index
= 0; bp_index
< cluster_count
; bp_index
++) {
632 pftrace("about to read off %llu in AG %d",
633 (long long)XFS_AGB_TO_DADDR(mp
, agno
, agbno
), agno
);
635 bplist
[bp_index
] = libxfs_readbuf(mp
->m_dev
,
636 XFS_AGB_TO_DADDR(mp
, agno
, agbno
),
637 XFS_FSB_TO_BB(mp
, blks_per_cluster
), 0);
638 if (!bplist
[bp_index
]) {
639 do_warn(_("cannot read inode %llu, disk block %lld, cnt %d\n"),
640 XFS_AGINO_TO_INO(mp
, agno
, first_irec
->ino_startnum
),
641 XFS_AGB_TO_DADDR(mp
, agno
, agbno
),
642 (int)XFS_FSB_TO_BB(mp
, blks_per_cluster
));
643 while (bp_index
> 0) {
645 libxfs_putbuf(bplist
[bp_index
]);
650 agbno
+= blks_per_cluster
;
653 pftrace("readbuf %p (%llu, %d) in AG %d", bplist
[bp_index
],
654 (long long)XFS_BUF_ADDR(bplist
[bp_index
]),
655 XFS_BUF_COUNT(bplist
[bp_index
]), agno
);
658 agbno
= XFS_AGINO_TO_AGBNO(mp
, first_irec
->ino_startnum
);
661 * initialize counters
671 * verify inode chunk if necessary
678 dino
= XFS_MAKE_IPTR(mp
, bplist
[bp_index
], cluster_offset
);
679 agino
= irec_offset
+ ino_rec
->ino_startnum
;
682 * we always think that the root and realtime
683 * inodes are verified even though we may have
684 * to reset them later to keep from losing the
685 * chunk that they're in
687 if (verify_dinode(mp
, dino
, agno
, agino
) == 0 ||
689 (mp
->m_sb
.sb_rootino
== agino
||
690 mp
->m_sb
.sb_rsumino
== agino
||
691 mp
->m_sb
.sb_rbmino
== agino
)))
698 if (icnt
== XFS_IALLOC_INODES(mp
) &&
699 irec_offset
== XFS_INODES_PER_CHUNK
) {
701 * done! - finished up irec and block
705 } else if (irec_offset
== XFS_INODES_PER_CHUNK
) {
707 * get new irec (multiple chunks per block fs)
709 ino_rec
= next_ino_rec(ino_rec
);
710 ASSERT(ino_rec
->ino_startnum
== agino
+ 1);
713 if (cluster_offset
== inodes_per_cluster
) {
720 * if chunk/block is bad, blow it off. the inode records
721 * will be deleted by the caller if appropriate.
725 for (bp_index
= 0; bp_index
< cluster_count
; bp_index
++)
726 libxfs_putbuf(bplist
[bp_index
]);
732 * reset irec and counters
734 ino_rec
= first_irec
;
744 * mark block as an inode block in the incore bitmap
746 pthread_mutex_lock(&ag_locks
[agno
]);
747 switch (state
= get_agbno_state(mp
, agno
, agbno
)) {
748 case XR_E_INO
: /* already marked */
753 set_agbno_state(mp
, agno
, agbno
, XR_E_INO
);
756 do_error(_("bad state in block map %d\n"), state
);
759 set_agbno_state(mp
, agno
, agbno
, XR_E_MULT
);
760 do_warn(_("inode block %llu multiply claimed, state was %d\n"),
761 XFS_AGB_TO_FSB(mp
, agno
, agbno
), state
);
764 pthread_mutex_unlock(&ag_locks
[agno
]);
770 dino
= XFS_MAKE_IPTR(mp
, bplist
[bp_index
], cluster_offset
);
771 agino
= irec_offset
+ ino_rec
->ino_startnum
;
777 status
= process_dinode(mp
, dino
, agno
, agino
,
778 is_inode_free(ino_rec
, irec_offset
),
779 &ino_dirty
, &is_used
,ino_discovery
, check_dups
,
780 extra_attr_check
, &isa_dir
, &parent
);
782 ASSERT(is_used
!= 3);
786 * XXX - if we want to try and keep
787 * track of whether we need to bang on
788 * the inode maps (instead of just
789 * blindly reconstructing them like
790 * we do now, this is where to start.
793 if (is_inode_free(ino_rec
, irec_offset
)) {
794 if (verbose
|| no_modify
) {
795 do_warn(_("imap claims in-use inode "
797 XFS_AGINO_TO_INO(mp
, agno
,
801 if (verbose
|| !no_modify
)
802 do_warn(_("correcting imap\n"));
804 do_warn(_("would correct imap\n"));
806 set_inode_used(ino_rec
, irec_offset
);
809 * store on-disk nlink count for comparing in phase 7
811 set_inode_disk_nlinks(ino_rec
, irec_offset
,
812 dino
->di_core
.di_version
> XFS_DINODE_VERSION_1
813 ? be32_to_cpu(dino
->di_core
.di_nlink
)
814 : be16_to_cpu(dino
->di_core
.di_onlink
));
817 set_inode_free(ino_rec
, irec_offset
);
821 * if we lose the root inode, or it turns into
822 * a non-directory, that allows us to double-check
823 * later whether or not we need to reinitialize it.
826 set_inode_isadir(ino_rec
, irec_offset
);
828 * we always set the parent but
829 * we may as well wait until
830 * phase 4 (no inode discovery)
831 * because the parent info will
834 if (!ino_discovery
) {
836 set_inode_parent(ino_rec
, irec_offset
, parent
);
838 get_inode_parent(ino_rec
, irec_offset
));
841 clear_inode_isadir(ino_rec
, irec_offset
);
845 if (mp
->m_sb
.sb_rootino
==
846 XFS_AGINO_TO_INO(mp
, agno
, agino
)) {
850 do_warn(_("cleared root inode %llu\n"),
851 XFS_AGINO_TO_INO(mp
, agno
,
854 do_warn(_("would clear root inode %llu\n"),
855 XFS_AGINO_TO_INO(mp
, agno
,
858 } else if (mp
->m_sb
.sb_rbmino
==
859 XFS_AGINO_TO_INO(mp
, agno
, agino
)) {
863 do_warn(_("cleared realtime bitmap "
865 XFS_AGINO_TO_INO(mp
, agno
,
868 do_warn(_("would clear realtime bitmap "
870 XFS_AGINO_TO_INO(mp
, agno
,
873 } else if (mp
->m_sb
.sb_rsumino
==
874 XFS_AGINO_TO_INO(mp
, agno
, agino
)) {
878 do_warn(_("cleared realtime summary "
880 XFS_AGINO_TO_INO(mp
, agno
,
883 do_warn(_("would clear realtime summary"
885 XFS_AGINO_TO_INO(mp
, agno
,
888 } else if (!no_modify
) {
889 do_warn(_("cleared inode %llu\n"),
890 XFS_AGINO_TO_INO(mp
, agno
, agino
));
892 do_warn(_("would have cleared inode %llu\n"),
893 XFS_AGINO_TO_INO(mp
, agno
, agino
));
902 if (icnt
== XFS_IALLOC_INODES(mp
) &&
903 irec_offset
== XFS_INODES_PER_CHUNK
) {
905 * done! - finished up irec and block simultaneously
907 for (bp_index
= 0; bp_index
< cluster_count
; bp_index
++) {
909 pftrace("put/writebuf %p (%llu) in AG %d", bplist
[bp_index
],
910 (long long)XFS_BUF_ADDR(bplist
[bp_index
]), agno
);
912 if (dirty
&& !no_modify
)
913 libxfs_writebuf(bplist
[bp_index
], 0);
915 libxfs_putbuf(bplist
[bp_index
]);
919 } else if (ibuf_offset
== mp
->m_sb
.sb_inopblock
) {
921 * mark block as an inode block in the incore bitmap
922 * and reset inode buffer offset counter
927 pthread_mutex_lock(&ag_locks
[agno
]);
928 switch (state
= get_agbno_state(mp
, agno
, agbno
)) {
929 case XR_E_INO
: /* already marked */
934 set_agbno_state(mp
, agno
, agbno
, XR_E_INO
);
937 do_error(_("bad state in block map %d\n"),
941 set_agbno_state(mp
, agno
, agbno
, XR_E_MULT
);
942 do_warn(_("inode block %llu multiply claimed, "
944 XFS_AGB_TO_FSB(mp
, agno
, agbno
), state
);
947 pthread_mutex_unlock(&ag_locks
[agno
]);
949 } else if (irec_offset
== XFS_INODES_PER_CHUNK
) {
951 * get new irec (multiple chunks per block fs)
953 ino_rec
= next_ino_rec(ino_rec
);
954 ASSERT(ino_rec
->ino_startnum
== agino
+ 1);
957 if (cluster_offset
== inodes_per_cluster
) {
966 * check all inodes mentioned in the ag's incore inode maps.
967 * the map may be incomplete. If so, we'll catch the missing
968 * inodes (hopefully) when we traverse the directory tree.
969 * check_dirs is set to 1 if directory inodes should be
970 * processed for internal consistency, parent setting and
971 * discovery of unknown inodes. this only happens
972 * in phase 3. check_dups is set to 1 if we're looking for
973 * inodes that reference duplicate blocks so we can trash
974 * the inode right then and there. this is set only in
975 * phase 4 after we've run through and set the bitmap once.
980 prefetch_args_t
*pf_args
,
984 int extra_attr_check
)
987 ino_tree_node_t
*ino_rec
, *first_ino_rec
, *prev_ino_rec
;
991 first_ino_rec
= ino_rec
= findfirst_inode_rec(agno
);
993 while (ino_rec
!= NULL
) {
995 * paranoia - step through inode records until we step
996 * through a full allocation of inodes. this could
997 * be an issue in big-block filesystems where a block
998 * can hold more than one inode chunk. make sure to
999 * grab the record corresponding to the beginning of
1000 * the next block before we call the processing routines.
1002 num_inos
= XFS_INODES_PER_CHUNK
;
1003 while (num_inos
< XFS_IALLOC_INODES(mp
) && ino_rec
!= NULL
) {
1005 * inodes chunks will always be aligned and sized
1008 if ((ino_rec
= next_ino_rec(ino_rec
)) != NULL
)
1009 num_inos
+= XFS_INODES_PER_CHUNK
;
1012 ASSERT(num_inos
== XFS_IALLOC_INODES(mp
));
1015 sem_post(&pf_args
->ra_count
);
1017 sem_getvalue(&pf_args
->ra_count
, &count
);
1018 pftrace("processing inode chunk %p in AG %d (sem count = %d)",
1019 first_ino_rec
, agno
, count
);
1023 if (process_inode_chunk(mp
, agno
, num_inos
, first_ino_rec
,
1024 ino_discovery
, check_dups
, extra_attr_check
,
1026 /* XXX - i/o error, we've got a problem */
1031 first_ino_rec
= ino_rec
= next_ino_rec(ino_rec
);
1034 * inodes pointed to by this record are
1035 * completely bogus, blow the records for
1037 * the inode block(s) will get reclaimed
1038 * in phase 4 when the block map is
1039 * reconstructed after inodes claiming
1040 * duplicate blocks are deleted.
1043 ino_rec
= first_ino_rec
;
1044 while (num_inos
< XFS_IALLOC_INODES(mp
) &&
1046 prev_ino_rec
= ino_rec
;
1048 if ((ino_rec
= next_ino_rec(ino_rec
)) != NULL
)
1049 num_inos
+= XFS_INODES_PER_CHUNK
;
1051 get_inode_rec(agno
, prev_ino_rec
);
1052 free_inode_rec(agno
, prev_ino_rec
);
1055 first_ino_rec
= ino_rec
;
1057 PROG_RPT_INC(prog_rpt_done
[agno
], num_inos
);
1062 * verify the uncertain inode list for an ag.
1063 * Good inodes get moved into the good inode tree.
1064 * returns 0 if there are no uncertain inode records to
1065 * be processed, 1 otherwise. This routine destroys the
1066 * the entire uncertain inode tree for the ag as a side-effect.
1069 check_uncertain_aginodes(xfs_mount_t
*mp
, xfs_agnumber_t agno
)
1071 ino_tree_node_t
*irec
;
1072 ino_tree_node_t
*nrec
;
1081 clear_uncertain_ino_cache(agno
);
1083 if ((irec
= findfirst_uncertain_inode_rec(agno
)) == NULL
)
1087 * the trick here is to find a contiguous range
1088 * of inodes, make sure that it doesn't overlap
1089 * with a known to exist chunk, and then make
1090 * sure it is a number of entire chunks.
1091 * we check on-disk once we have an idea of what's
1092 * going on just to double-check.
1094 * process the uncertain inode record list and look
1095 * on disk to see if the referenced inodes are good
1098 do_warn(_("found inodes not in the inode allocation tree\n"));
1102 * check every confirmed (which in this case means
1103 * inode that we really suspect to be an inode) inode
1105 for (i
= 0; i
< XFS_INODES_PER_CHUNK
; i
++) {
1106 if (!is_inode_confirmed(irec
, i
))
1109 agino
= i
+ irec
->ino_startnum
;
1111 if (verify_aginum(mp
, agno
, agino
))
1114 if (nrec
!= NULL
&& nrec
->ino_startnum
<= agino
&&
1115 agino
< nrec
->ino_startnum
+
1116 XFS_INODES_PER_CHUNK
)
1119 if ((nrec
= find_inode_rec(agno
, agino
)) == NULL
)
1120 if (!verify_aginum(mp
, agno
, agino
))
1121 if (verify_aginode_chunk(mp
, agno
,
1126 get_uncertain_inode_rec(agno
, irec
);
1127 free_inode_rec(agno
, irec
);
1129 irec
= findfirst_uncertain_inode_rec(agno
);
1130 } while (irec
!= NULL
);
1133 do_warn(_("found inodes not in the inode allocation tree\n"));
1139 * verify and process the uncertain inodes for an ag.
1140 * this is different from check_ in that we can't just
1141 * move the good inodes into the good inode tree and let
1142 * process_aginodes() deal with them because this gets called
1143 * after process_aginodes() has been run on the ag inode tree.
1144 * So we have to process the inodes as well as verify since
1145 * we don't want to rerun process_aginodes() on a tree that has
1146 * mostly been processed.
1148 * Note that if this routine does process some inodes, it can
1149 * add uncertain inodes to any ag which would require that
1150 * the routine be called again to process those newly-added
1153 * returns 0 if no inodes were processed and 1 if inodes
1154 * were processed (and it is possible that new uncertain
1155 * inodes were discovered).
1157 * as a side-effect, this routine tears down the uncertain
1158 * inode tree for the ag.
1161 process_uncertain_aginodes(xfs_mount_t
*mp
, xfs_agnumber_t agno
)
1163 ino_tree_node_t
*irec
;
1164 ino_tree_node_t
*nrec
;
1171 #ifdef XR_INODE_TRACE
1172 fprintf(stderr
, "in process_uncertain_aginodes, agno = %d\n", agno
);
1177 clear_uncertain_ino_cache(agno
);
1179 if ((irec
= findfirst_uncertain_inode_rec(agno
)) == NULL
)
1186 * check every confirmed inode
1188 for (cnt
= i
= 0; i
< XFS_INODES_PER_CHUNK
; i
++) {
1189 if (!is_inode_confirmed(irec
, i
))
1192 agino
= i
+ irec
->ino_startnum
;
1193 #ifdef XR_INODE_TRACE
1194 fprintf(stderr
, "ag inode = %d (0x%x)\n", agino
, agino
);
1197 * skip over inodes already processed (in the
1198 * good tree), bad inode numbers, and inode numbers
1199 * pointing to bogus inodes
1201 if (verify_aginum(mp
, agno
, agino
))
1204 if (nrec
!= NULL
&& nrec
->ino_startnum
<= agino
&&
1205 agino
< nrec
->ino_startnum
+
1206 XFS_INODES_PER_CHUNK
)
1209 if ((nrec
= find_inode_rec(agno
, agino
)) != NULL
)
1213 * verify the chunk. if good, it will be
1214 * added to the good inode tree.
1216 if ((nrec
= verify_aginode_chunk_irec(mp
,
1217 agno
, agino
)) == NULL
)
1223 * process the inode record we just added
1224 * to the good inode tree. The inode
1225 * processing may add more records to the
1226 * uncertain inode lists.
1228 if (process_inode_chunk(mp
, agno
, XFS_IALLOC_INODES(mp
),
1229 nrec
, 1, 0, 0, &bogus
)) {
1230 /* XXX - i/o error, we've got a problem */
1237 * now return the uncertain inode record to the free pool
1238 * and pull another one off the list for processing
1240 get_uncertain_inode_rec(agno
, irec
);
1241 free_inode_rec(agno
, irec
);
1243 irec
= findfirst_uncertain_inode_rec(agno
);
1244 } while (irec
!= NULL
);
1247 do_warn(_("found inodes not in the inode allocation tree\n"));