From 6ceb56931f20cdea9f400368d5382af494cf1a4f Mon Sep 17 00:00:00 2001 From: Barry Naujok Date: Wed, 14 Oct 2009 00:29:29 +0200 Subject: [PATCH] repair: split up scanfunc_ino Split out a helper to scan a single inode chunk for suspect inodes from scanfunc_ino to make it more readable. Signed-off-by: Barry Naujok Signed-off-by: Christoph Hellwig Reviewed-by: Eric Sandeen --- repair/scan.c | 328 +++++++++++++++++++++++++------------------------- 1 file changed, 162 insertions(+), 166 deletions(-) diff --git a/repair/scan.c b/repair/scan.c index 2196f8152..c9322bf46 100644 --- a/repair/scan.c +++ b/repair/scan.c @@ -627,6 +627,166 @@ scanfunc_cnt( suspect, isroot, XFS_ABTC_MAGIC); } +static int +scan_single_ino_chunk( + xfs_agnumber_t agno, + xfs_inobt_rec_t *rp, + int suspect) +{ + xfs_ino_t lino; + xfs_agino_t ino; + xfs_agblock_t agbno; + int j; + int nfree; + int off; + int state; + ino_tree_node_t *ino_rec, *first_rec, *last_rec; + + ino = be32_to_cpu(rp->ir_startino); + off = XFS_AGINO_TO_OFFSET(mp, ino); + agbno = XFS_AGINO_TO_AGBNO(mp, ino); + lino = XFS_AGINO_TO_INO(mp, agno, ino); + + /* + * on multi-block block chunks, all chunks start + * at the beginning of the block. with multi-chunk + * blocks, all chunks must start on 64-inode boundaries + * since each block can hold N complete chunks. if + * fs has aligned inodes, all chunks must start + * at a fs_ino_alignment*N'th agbno. skip recs + * with badly aligned starting inodes. + */ + if (ino == 0 || + (inodes_per_block <= XFS_INODES_PER_CHUNK && off != 0) || + (inodes_per_block > XFS_INODES_PER_CHUNK && + off % XFS_INODES_PER_CHUNK != 0) || + (fs_aligned_inodes && agbno % fs_ino_alignment != 0)) { + do_warn( + _("badly aligned inode rec (starting inode = %llu)\n"), + lino); + suspect++; + } + + /* + * verify numeric validity of inode chunk first + * before inserting into a tree. don't have to + * worry about the overflow case because the + * starting ino number of a chunk can only get + * within 255 inodes of max (NULLAGINO). if it + * gets closer, the agino number will be illegal + * as the agbno will be too large. + */ + if (verify_aginum(mp, agno, ino)) { + do_warn( +_("bad starting inode # (%llu (0x%x 0x%x)) in ino rec, skipping rec\n"), + lino, agno, ino); + return ++suspect; + } + + if (verify_aginum(mp, agno, + ino + XFS_INODES_PER_CHUNK - 1)) { + do_warn( +_("bad ending inode # (%llu (0x%x 0x%x)) in ino rec, skipping rec\n"), + lino + XFS_INODES_PER_CHUNK - 1, + agno, ino + XFS_INODES_PER_CHUNK - 1); + return ++suspect; + } + + /* + * set state of each block containing inodes + */ + if (off == 0 && !suspect) { + for (j = 0; + j < XFS_INODES_PER_CHUNK; + j += mp->m_sb.sb_inopblock) { + agbno = XFS_AGINO_TO_AGBNO(mp, ino + j); + state = get_agbno_state(mp, agno, agbno); + if (state == XR_E_UNKNOWN) { + set_agbno_state(mp, agno, agbno, XR_E_INO); + } else if (state == XR_E_INUSE_FS && agno == 0 && + ino + j >= first_prealloc_ino && + ino + j < last_prealloc_ino) { + set_agbno_state(mp, agno, agbno, XR_E_INO); + } else { + do_warn( +_("inode chunk claims used block, inobt block - agno %d, bno %d, inopb %d\n"), + agno, agbno, mp->m_sb.sb_inopblock); + /* + * XXX - maybe should mark + * block a duplicate + */ + return ++suspect; + } + } + } + + /* + * ensure only one avl entry per chunk + */ + find_inode_rec_range(agno, ino, ino + XFS_INODES_PER_CHUNK, + &first_rec, &last_rec); + if (first_rec != NULL) { + /* + * this chunk overlaps with one (or more) + * already in the tree + */ + do_warn( +_("inode rec for ino %llu (%d/%d) overlaps existing rec (start %d/%d)\n"), + lino, agno, ino, agno, first_rec->ino_startnum); + suspect++; + + /* + * if the 2 chunks start at the same place, + * then we don't have to put this one + * in the uncertain list. go to the next one. + */ + if (first_rec->ino_startnum == ino) + return suspect; + } + + nfree = 0; + + /* + * now mark all the inodes as existing and free or used. + * if the tree is suspect, put them into the uncertain + * inode tree. + */ + if (!suspect) { + if (XFS_INOBT_IS_FREE_DISK(rp, 0)) { + nfree++; + ino_rec = set_inode_free_alloc(agno, ino); + } else { + ino_rec = set_inode_used_alloc(agno, ino); + } + for (j = 1; j < XFS_INODES_PER_CHUNK; j++) { + if (XFS_INOBT_IS_FREE_DISK(rp, j)) { + nfree++; + set_inode_free(ino_rec, j); + } else { + set_inode_used(ino_rec, j); + } + } + } else { + for (j = 0; j < XFS_INODES_PER_CHUNK; j++) { + if (XFS_INOBT_IS_FREE_DISK(rp, j)) { + nfree++; + add_aginode_uncertain(agno, ino + j, 1); + } else { + add_aginode_uncertain(agno, ino + j, 0); + } + } + } + + if (nfree != be32_to_cpu(rp->ir_freecount)) { + do_warn(_("ir_freecount/free mismatch, inode " + "chunk %d/%d, freecount %d nfree %d\n"), + agno, ino, be32_to_cpu(rp->ir_freecount), nfree); + } + + return suspect; +} + + /* * this one walks the inode btrees sucking the info there into * the incore avl tree. We try and rescue corrupted btree records @@ -653,18 +813,11 @@ scanfunc_ino( int isroot ) { - xfs_ino_t lino; int i; - xfs_agino_t ino; - xfs_agblock_t agbno; - int j; - int nfree; - int off; int numrecs; int state; xfs_inobt_ptr_t *pp; xfs_inobt_rec_t *rp; - ino_tree_node_t *ino_rec, *first_rec, *last_rec; int hdr_errors; hdr_errors = 0; @@ -739,165 +892,8 @@ _("inode btree block claimed (state %d), agno %d, bno %d, suspect %d\n"), * of INODES_PER_CHUNK (64) inodes. off is the offset into * the block. skip processing of bogus records. */ - for (i = 0; i < numrecs; i++) { - ino = be32_to_cpu(rp[i].ir_startino); - off = XFS_AGINO_TO_OFFSET(mp, ino); - agbno = XFS_AGINO_TO_AGBNO(mp, ino); - lino = XFS_AGINO_TO_INO(mp, agno, ino); - /* - * on multi-block block chunks, all chunks start - * at the beginning of the block. with multi-chunk - * blocks, all chunks must start on 64-inode boundaries - * since each block can hold N complete chunks. if - * fs has aligned inodes, all chunks must start - * at a fs_ino_alignment*N'th agbno. skip recs - * with badly aligned starting inodes. - */ - if (ino == 0 || - (inodes_per_block <= XFS_INODES_PER_CHUNK && - off != 0) || - (inodes_per_block > XFS_INODES_PER_CHUNK && - off % XFS_INODES_PER_CHUNK != 0) || - (fs_aligned_inodes && - agbno % fs_ino_alignment != 0)) { - do_warn( - _("badly aligned inode rec (starting inode = %llu)\n"), - lino); - suspect++; - } - - /* - * verify numeric validity of inode chunk first - * before inserting into a tree. don't have to - * worry about the overflow case because the - * starting ino number of a chunk can only get - * within 255 inodes of max (NULLAGINO). if it - * gets closer, the agino number will be illegal - * as the agbno will be too large. - */ - if (verify_aginum(mp, agno, ino)) { - do_warn( -_("bad starting inode # (%llu (0x%x 0x%x)) in ino rec, skipping rec\n"), - lino, agno, ino); - suspect++; - continue; - } - - if (verify_aginum(mp, agno, - ino + XFS_INODES_PER_CHUNK - 1)) { - do_warn( -_("bad ending inode # (%llu (0x%x 0x%x)) in ino rec, skipping rec\n"), - lino + XFS_INODES_PER_CHUNK - 1, - agno, ino + XFS_INODES_PER_CHUNK - 1); - suspect++; - continue; - } - - /* - * set state of each block containing inodes - */ - if (off == 0 && !suspect) { - for (j = 0; - j < XFS_INODES_PER_CHUNK; - j += mp->m_sb.sb_inopblock) { - agbno = XFS_AGINO_TO_AGBNO(mp, ino + j); - state = get_agbno_state(mp, - agno, agbno); - - if (state == XR_E_UNKNOWN) { - set_agbno_state(mp, agno, - agbno, XR_E_INO); - } else if (state == XR_E_INUSE_FS && - agno == 0 && - ino + j >= first_prealloc_ino && - ino + j < last_prealloc_ino) { - set_agbno_state(mp, agno, - agbno, XR_E_INO); - } else { - do_warn( -_("inode chunk claims used block, inobt block - agno %d, bno %d, inopb %d\n"), - agno, bno, - mp->m_sb.sb_inopblock); - suspect++; - /* - * XXX - maybe should mark - * block a duplicate - */ - continue; - } - } - } - /* - * ensure only one avl entry per chunk - */ - find_inode_rec_range(agno, ino, - ino + XFS_INODES_PER_CHUNK, - &first_rec, - &last_rec); - if (first_rec != NULL) { - /* - * this chunk overlaps with one (or more) - * already in the tree - */ - do_warn( -_("inode rec for ino %llu (%d/%d) overlaps existing rec (start %d/%d)\n"), - lino, agno, ino, - agno, first_rec->ino_startnum); - suspect++; - - /* - * if the 2 chunks start at the same place, - * then we don't have to put this one - * in the uncertain list. go to the next one. - */ - if (first_rec->ino_startnum == ino) - continue; - } - - nfree = 0; - - /* - * now mark all the inodes as existing and free or used. - * if the tree is suspect, put them into the uncertain - * inode tree. - */ - if (!suspect) { - if (XFS_INOBT_IS_FREE_DISK(&rp[i], 0)) { - nfree++; - ino_rec = set_inode_free_alloc(agno, - ino); - } else { - ino_rec = set_inode_used_alloc(agno, - ino); - } - for (j = 1; j < XFS_INODES_PER_CHUNK; j++) { - if (XFS_INOBT_IS_FREE_DISK(&rp[i], j)) { - nfree++; - set_inode_free(ino_rec, j); - } else { - set_inode_used(ino_rec, j); - } - } - } else { - for (j = 0; j < XFS_INODES_PER_CHUNK; j++) { - if (XFS_INOBT_IS_FREE_DISK(&rp[i], j)) { - nfree++; - add_aginode_uncertain(agno, - ino + j, 1); - } else { - add_aginode_uncertain(agno, - ino + j, 0); - } - } - } - - if (nfree != be32_to_cpu(rp[i].ir_freecount)) { - do_warn(_("ir_freecount/free mismatch, inode " - "chunk %d/%d, freecount %d nfree %d\n"), - agno, ino, - be32_to_cpu(rp[i].ir_freecount), nfree); - } - } + for (i = 0; i < numrecs; i++) + suspect = scan_single_ino_chunk(agno, &rp[i], suspect); if (suspect) bad_ino_btree = 1; -- 2.47.2