+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
* All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#include <libxfs.h>
+#include "libxfs.h"
#include "avl.h"
#include "globals.h"
#include "agheader.h"
if (agno == mp->m_sb.sb_agcount - 1)
max_agbno = mp->m_sb.sb_dblocks -
- (xfs_drfsbno_t) mp->m_sb.sb_agblocks * agno;
+ (xfs_rfsblock_t) mp->m_sb.sb_agblocks * agno;
else
max_agbno = mp->m_sb.sb_agblocks;
return(irec);
}
+/*
+ * Set the state of an inode block during inode chunk processing. The block is
+ * expected to be in the free or inode state. If free, it transitions to the
+ * inode state. Warn if the block is in neither expected state as this indicates
+ * multiply claimed blocks.
+ */
+static void
+process_inode_agbno_state(
+ struct xfs_mount *mp,
+ xfs_agnumber_t agno,
+ xfs_agblock_t agbno)
+{
+ int state;
+ pthread_mutex_lock(&ag_locks[agno].lock);
+ state = get_bmap(agno, agbno);
+ switch (state) {
+ case XR_E_INO: /* already marked */
+ break;
+ case XR_E_UNKNOWN:
+ case XR_E_FREE:
+ case XR_E_FREE1:
+ set_bmap(agno, agbno, XR_E_INO);
+ break;
+ case XR_E_BAD_STATE:
+ do_error(_("bad state in block map %d\n"), state);
+ break;
+ default:
+ set_bmap(agno, agbno, XR_E_MULT);
+ do_warn(
+ _("inode block %" PRIu64 " multiply claimed, state was %d\n"),
+ XFS_AGB_TO_FSB(mp, agno, agbno), state);
+ break;
+ }
+ pthread_mutex_unlock(&ag_locks[agno].lock);
+}
/*
* processes an inode allocation chunk/block, returns 1 on I/O errors,
*
* *bogus is set to 1 if the entire set of inodes is bad.
*/
-
-/* ARGSUSED */
static int
process_inode_chunk(
xfs_mount_t *mp,
int icnt;
int status;
int is_used;
- int state;
int ino_dirty;
int irec_offset;
int ibuf_offset;
* set up first irec
*/
ino_rec = first_irec;
+ irec_offset = 0;
bplist = malloc(cluster_count * sizeof(xfs_buf_t *));
if (bplist == NULL)
cluster_count * sizeof(xfs_buf_t *));
for (bp_index = 0; bp_index < cluster_count; bp_index++) {
+ /*
+ * Skip the cluster buffer if the first inode is sparse. The
+ * remaining inodes in the cluster share the same state as
+ * sparse inodes occur at cluster granularity.
+ */
+ if (is_inode_sparse(ino_rec, irec_offset)) {
+ pftrace("skip sparse inode, startnum 0x%x idx %d",
+ ino_rec->ino_startnum, irec_offset);
+ bplist[bp_index] = NULL;
+ goto next_readbuf;
+ }
+
pftrace("about to read off %llu in AG %d",
XFS_AGB_TO_DADDR(mp, agno, agbno), agno);
free(bplist);
return(1);
}
- agbno += blks_per_cluster;
- bplist[bp_index]->b_ops = &xfs_inode_buf_ops;
pftrace("readbuf %p (%llu, %d) in AG %d", bplist[bp_index],
(long long)XFS_BUF_ADDR(bplist[bp_index]),
- XFS_BUF_COUNT(bplist[bp_index]), agno);
+ bplist[bp_index]->b_bcount, agno);
+
+ bplist[bp_index]->b_ops = &xfs_inode_buf_ops;
+
+next_readbuf:
+ irec_offset += mp->m_sb.sb_inopblock * blks_per_cluster;
+ agbno += blks_per_cluster;
}
agbno = XFS_AGINO_TO_AGBNO(mp, first_irec->ino_startnum);
*/
if (ino_discovery) {
for (;;) {
- /*
- * make inode pointer
- */
- dino = xfs_make_iptr(mp, bplist[bp_index], cluster_offset);
agino = irec_offset + ino_rec->ino_startnum;
- /*
- * we always think that the root and realtime
- * inodes are verified even though we may have
- * to reset them later to keep from losing the
- * chunk that they're in
- */
- if (verify_dinode(mp, dino, agno, agino) == 0 ||
- (agno == 0 &&
- (mp->m_sb.sb_rootino == agino ||
- mp->m_sb.sb_rsumino == agino ||
- mp->m_sb.sb_rbmino == agino)))
- status++;
+ /* no buffers for sparse clusters */
+ if (bplist[bp_index]) {
+ /* make inode pointer */
+ dino = xfs_make_iptr(mp, bplist[bp_index],
+ cluster_offset);
+
+ /*
+ * we always think that the root and realtime
+ * inodes are verified even though we may have
+ * to reset them later to keep from losing the
+ * chunk that they're in
+ */
+ if (verify_dinode(mp, dino, agno, agino) == 0 ||
+ (agno == 0 &&
+ (mp->m_sb.sb_rootino == agino ||
+ mp->m_sb.sb_rsumino == agino ||
+ mp->m_sb.sb_rbmino == agino)))
+ status++;
+ }
irec_offset++;
icnt++;
if (!status) {
*bogus = 1;
for (bp_index = 0; bp_index < cluster_count; bp_index++)
- libxfs_putbuf(bplist[bp_index]);
+ if (bplist[bp_index])
+ libxfs_putbuf(bplist[bp_index]);
free(bplist);
return(0);
}
/*
* mark block as an inode block in the incore bitmap
*/
- pthread_mutex_lock(&ag_locks[agno].lock);
- state = get_bmap(agno, agbno);
- switch (state) {
- case XR_E_INO: /* already marked */
- break;
- case XR_E_UNKNOWN:
- case XR_E_FREE:
- case XR_E_FREE1:
- set_bmap(agno, agbno, XR_E_INO);
- break;
- case XR_E_BAD_STATE:
- do_error(_("bad state in block map %d\n"), state);
- break;
- default:
- set_bmap(agno, agbno, XR_E_MULT);
- do_warn(_("inode block %" PRIu64 " multiply claimed, state was %d\n"),
- XFS_AGB_TO_FSB(mp, agno, agbno), state);
- break;
- }
- pthread_mutex_unlock(&ag_locks[agno].lock);
+ if (!is_inode_sparse(ino_rec, irec_offset))
+ process_inode_agbno_state(mp, agno, agbno);
for (;;) {
- /*
- * make inode pointer
- */
- dino = xfs_make_iptr(mp, bplist[bp_index], cluster_offset);
agino = irec_offset + ino_rec->ino_startnum;
ino = XFS_AGINO_TO_INO(mp, agno, agino);
+ if (is_inode_sparse(ino_rec, irec_offset))
+ goto process_next;
+
+ /* make inode pointer */
+ dino = xfs_make_iptr(mp, bplist[bp_index], cluster_offset);
+
+
is_used = 3;
ino_dirty = 0;
parent = 0;
* we do now, this is where to start.
*/
if (is_used) {
- __uint16_t di_mode;
-
if (is_inode_free(ino_rec, irec_offset)) {
if (verbose || no_modify) {
do_warn(
* store the on-disk file type for comparing in
* phase 6.
*/
- di_mode = be16_to_cpu(dino->di_mode);
- di_mode = (di_mode & S_IFMT) >> S_SHIFT;
set_inode_ftype(ino_rec, irec_offset,
- xfs_mode_to_ftype[di_mode]);
+ libxfs_mode_to_ftype(be16_to_cpu(dino->di_mode)));
/*
* store on-disk nlink count for comparing in phase 7
do_warn(_("would have cleared inode %" PRIu64 "\n"),
ino);
}
+ clear_inode_was_rl(ino_rec, irec_offset);
}
+process_next:
irec_offset++;
ibuf_offset++;
icnt++;
* done! - finished up irec and block simultaneously
*/
for (bp_index = 0; bp_index < cluster_count; bp_index++) {
+ if (!bplist[bp_index])
+ continue;
+
pftrace("put/writebuf %p (%llu) in AG %d",
bplist[bp_index], (long long)
XFS_BUF_ADDR(bplist[bp_index]), agno);
ibuf_offset = 0;
agbno++;
- pthread_mutex_lock(&ag_locks[agno].lock);
- state = get_bmap(agno, agbno);
- switch (state) {
- case XR_E_INO: /* already marked */
- break;
- case XR_E_UNKNOWN:
- case XR_E_FREE:
- case XR_E_FREE1:
- set_bmap(agno, agbno, XR_E_INO);
- break;
- case XR_E_BAD_STATE:
- do_error(_("bad state in block map %d\n"),
- state);
- break;
- default:
- set_bmap(agno, agbno, XR_E_MULT);
- do_warn(
- _("inode block %" PRIu64 " multiply claimed, state was %d\n"),
- XFS_AGB_TO_FSB(mp, agno, agbno), state);
- break;
- }
- pthread_mutex_unlock(&ag_locks[agno].lock);
-
+ if (!is_inode_sparse(ino_rec, irec_offset))
+ process_inode_agbno_state(mp, agno, agbno);
} else if (irec_offset == XFS_INODES_PER_CHUNK) {
/*
* get new irec (multiple chunks per block fs)