+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
* All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#include <libxfs.h>
+#include "libxfs.h"
#include "avl.h"
#include "globals.h"
#include "agheader.h"
agbno = XFS_INO_TO_AGBNO(mp, ino);
*start_ino = NULLFSINO;
- ASSERT(XFS_IALLOC_BLOCKS(mp) > 0);
+ ASSERT(mp->m_ialloc_blks > 0);
if (agno == mp->m_sb.sb_agcount - 1)
max_agbno = mp->m_sb.sb_dblocks -
- (xfs_drfsbno_t) mp->m_sb.sb_agblocks * agno;
+ (xfs_rfsblock_t) mp->m_sb.sb_agblocks * agno;
else
max_agbno = mp->m_sb.sb_agblocks;
* check for the easy case, inodes per block >= XFS_INODES_PER_CHUNK
* (multiple chunks per block)
*/
- if (XFS_IALLOC_BLOCKS(mp) == 1) {
+ if (mp->m_ialloc_blks == 1) {
if (agbno > max_agbno)
return 0;
if (check_aginode_block(mp, agno, agino) == 0)
return 0;
- pthread_mutex_lock(&ag_locks[agno]);
+ pthread_mutex_lock(&ag_locks[agno].lock);
state = get_bmap(agno, agbno);
switch (state) {
_("inode block %d/%d multiply claimed, (state %d)\n"),
agno, agbno, state);
set_bmap(agno, agbno, XR_E_MULT);
- pthread_mutex_unlock(&ag_locks[agno]);
+ pthread_mutex_unlock(&ag_locks[agno].lock);
return(0);
default:
do_warn(
break;
}
- pthread_mutex_unlock(&ag_locks[agno]);
+ pthread_mutex_unlock(&ag_locks[agno].lock);
start_agino = XFS_OFFBNO_TO_AGINO(mp, agbno, 0);
*start_ino = XFS_AGINO_TO_INO(mp, agno, start_agino);
*/
start_agbno = rounddown(XFS_INO_TO_AGBNO(mp, ino),
fs_ino_alignment);
- end_agbno = start_agbno + XFS_IALLOC_BLOCKS(mp);
+ end_agbno = start_agbno + mp->m_ialloc_blks;
/*
* if this fs has aligned inodes but the end of the
* a discovered inode chunk completely within that range
* would include the inode passed into us.
*/
- if (XFS_IALLOC_BLOCKS(mp) > 1) {
- if (agino > XFS_IALLOC_INODES(mp))
- start_agbno = agbno - XFS_IALLOC_BLOCKS(mp) + 1;
+ if (mp->m_ialloc_blks > 1) {
+ if (agino > mp->m_ialloc_inos)
+ start_agbno = agbno - mp->m_ialloc_blks + 1;
else
start_agbno = 1;
}
- end_agbno = agbno + XFS_IALLOC_BLOCKS(mp);
+ end_agbno = agbno + mp->m_ialloc_blks;
if (end_agbno > max_agbno)
end_agbno = max_agbno;
start_agbno = XFS_AGINO_TO_AGBNO(mp,
irec_p->ino_startnum) +
- XFS_IALLOC_BLOCKS(mp);
+ mp->m_ialloc_blks;
/*
* we know that the inode we're trying to verify isn't
* of the gap -- is it within the search range?
*/
if (irec_next_p != NULL &&
- agino + XFS_IALLOC_INODES(mp) >=
+ agino + mp->m_ialloc_inos >=
irec_next_p->ino_startnum)
end_agbno = XFS_AGINO_TO_AGBNO(mp,
irec_next_p->ino_startnum);
* the inode in question and that the space between them
* is too small for a legal inode chunk
*/
- if (end_agbno - start_agbno < XFS_IALLOC_BLOCKS(mp))
+ if (end_agbno - start_agbno < mp->m_ialloc_blks)
return(0);
/*
num_blks = chunk_stop_agbno - chunk_start_agbno;
- if (num_blks < XFS_IALLOC_BLOCKS(mp) || ino_cnt == 0)
- return(0);
+ if (num_blks < mp->m_ialloc_blks || ino_cnt == 0)
+ return 0;
/*
* XXX - later - if the entire range is selected and they're all
* the chunk
*/
- if (num_blks % XFS_IALLOC_BLOCKS(mp) != 0) {
- num_blks = rounddown(num_blks, XFS_IALLOC_BLOCKS(mp));
+ if (num_blks % mp->m_ialloc_blks != 0) {
+ num_blks = rounddown(num_blks, mp->m_ialloc_blks);
chunk_stop_agbno = chunk_start_agbno + num_blks;
}
* user data -- we're probably here as a result of a directory
* entry or an iunlinked pointer
*/
- pthread_mutex_lock(&ag_locks[agno]);
+ pthread_mutex_lock(&ag_locks[agno].lock);
for (cur_agbno = chunk_start_agbno;
cur_agbno < chunk_stop_agbno;
cur_agbno += blen) {
_("inode block %d/%d multiply claimed, (state %d)\n"),
agno, cur_agbno, state);
set_bmap_ext(agno, cur_agbno, blen, XR_E_MULT);
- pthread_mutex_unlock(&ag_locks[agno]);
+ pthread_mutex_unlock(&ag_locks[agno].lock);
return 0;
case XR_E_INO:
do_error(
break;
}
}
- pthread_mutex_unlock(&ag_locks[agno]);
+ pthread_mutex_unlock(&ag_locks[agno].lock);
/*
* ok, chunk is good. put the record into the tree if required,
set_inode_used(irec_p, agino - start_agino);
- pthread_mutex_lock(&ag_locks[agno]);
+ pthread_mutex_lock(&ag_locks[agno].lock);
for (cur_agbno = chunk_start_agbno;
cur_agbno < chunk_stop_agbno;
break;
}
}
- pthread_mutex_unlock(&ag_locks[agno]);
+ pthread_mutex_unlock(&ag_locks[agno].lock);
return(ino_cnt);
}
return(irec);
}
+/*
+ * Set the state of an inode block during inode chunk processing. The block is
+ * expected to be in the free or inode state. If free, it transitions to the
+ * inode state. Warn if the block is in neither expected state as this indicates
+ * multiply claimed blocks.
+ */
+static void
+process_inode_agbno_state(
+ struct xfs_mount *mp,
+ xfs_agnumber_t agno,
+ xfs_agblock_t agbno)
+{
+ int state;
+ pthread_mutex_lock(&ag_locks[agno].lock);
+ state = get_bmap(agno, agbno);
+ switch (state) {
+ case XR_E_INO: /* already marked */
+ break;
+ case XR_E_UNKNOWN:
+ case XR_E_FREE:
+ case XR_E_FREE1:
+ set_bmap(agno, agbno, XR_E_INO);
+ break;
+ case XR_E_BAD_STATE:
+ do_error(_("bad state in block map %d\n"), state);
+ break;
+ default:
+ set_bmap(agno, agbno, XR_E_MULT);
+ do_warn(
+ _("inode block %" PRIu64 " multiply claimed, state was %d\n"),
+ XFS_AGB_TO_FSB(mp, agno, agbno), state);
+ break;
+ }
+ pthread_mutex_unlock(&ag_locks[agno].lock);
+}
/*
* processes an inode allocation chunk/block, returns 1 on I/O errors,
*
* *bogus is set to 1 if the entire set of inodes is bad.
*/
-
-/* ARGSUSED */
static int
process_inode_chunk(
xfs_mount_t *mp,
int icnt;
int status;
int is_used;
- int state;
int ino_dirty;
int irec_offset;
int ibuf_offset;
ASSERT(XFS_AGINO_TO_OFFSET(mp, first_irec->ino_startnum) == 0);
*bogus = 0;
- ASSERT(XFS_IALLOC_BLOCKS(mp) > 0);
+ ASSERT(mp->m_ialloc_blks > 0);
- blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_blocklog;
+ blks_per_cluster = mp->m_inode_cluster_size >> mp->m_sb.sb_blocklog;
if (blks_per_cluster == 0)
blks_per_cluster = 1;
cluster_count = XFS_INODES_PER_CHUNK / inodes_per_cluster;
* set up first irec
*/
ino_rec = first_irec;
+ irec_offset = 0;
bplist = malloc(cluster_count * sizeof(xfs_buf_t *));
if (bplist == NULL)
cluster_count * sizeof(xfs_buf_t *));
for (bp_index = 0; bp_index < cluster_count; bp_index++) {
+ /*
+ * Skip the cluster buffer if the first inode is sparse. The
+ * remaining inodes in the cluster share the same state as
+ * sparse inodes occur at cluster granularity.
+ */
+ if (is_inode_sparse(ino_rec, irec_offset)) {
+ pftrace("skip sparse inode, startnum 0x%x idx %d",
+ ino_rec->ino_startnum, irec_offset);
+ bplist[bp_index] = NULL;
+ goto next_readbuf;
+ }
+
pftrace("about to read off %llu in AG %d",
XFS_AGB_TO_DADDR(mp, agno, agbno), agno);
bplist[bp_index] = libxfs_readbuf(mp->m_dev,
XFS_AGB_TO_DADDR(mp, agno, agbno),
XFS_FSB_TO_BB(mp, blks_per_cluster), 0,
- NULL);
+ &xfs_inode_buf_ops);
if (!bplist[bp_index]) {
do_warn(_("cannot read inode %" PRIu64 ", disk block %" PRId64 ", cnt %d\n"),
XFS_AGINO_TO_INO(mp, agno, first_irec->ino_startnum),
free(bplist);
return(1);
}
- agbno += blks_per_cluster;
- bplist[bp_index]->b_ops = &xfs_inode_buf_ops;
pftrace("readbuf %p (%llu, %d) in AG %d", bplist[bp_index],
(long long)XFS_BUF_ADDR(bplist[bp_index]),
- XFS_BUF_COUNT(bplist[bp_index]), agno);
+ bplist[bp_index]->b_bcount, agno);
+
+ bplist[bp_index]->b_ops = &xfs_inode_buf_ops;
+
+next_readbuf:
+ irec_offset += mp->m_sb.sb_inopblock * blks_per_cluster;
+ agbno += blks_per_cluster;
}
agbno = XFS_AGINO_TO_AGBNO(mp, first_irec->ino_startnum);
*/
if (ino_discovery) {
for (;;) {
- /*
- * make inode pointer
- */
- dino = xfs_make_iptr(mp, bplist[bp_index], cluster_offset);
agino = irec_offset + ino_rec->ino_startnum;
- /*
- * we always think that the root and realtime
- * inodes are verified even though we may have
- * to reset them later to keep from losing the
- * chunk that they're in
- */
- if (verify_dinode(mp, dino, agno, agino) == 0 ||
- (agno == 0 &&
- (mp->m_sb.sb_rootino == agino ||
- mp->m_sb.sb_rsumino == agino ||
- mp->m_sb.sb_rbmino == agino)))
- status++;
+ /* no buffers for sparse clusters */
+ if (bplist[bp_index]) {
+ /* make inode pointer */
+ dino = xfs_make_iptr(mp, bplist[bp_index],
+ cluster_offset);
+
+ /*
+ * we always think that the root and realtime
+ * inodes are verified even though we may have
+ * to reset them later to keep from losing the
+ * chunk that they're in
+ */
+ if (verify_dinode(mp, dino, agno, agino) == 0 ||
+ (agno == 0 &&
+ (mp->m_sb.sb_rootino == agino ||
+ mp->m_sb.sb_rsumino == agino ||
+ mp->m_sb.sb_rbmino == agino)))
+ status++;
+ }
irec_offset++;
icnt++;
cluster_offset++;
- if (icnt == XFS_IALLOC_INODES(mp) &&
+ if (icnt == mp->m_ialloc_inos &&
irec_offset == XFS_INODES_PER_CHUNK) {
/*
* done! - finished up irec and block
if (!status) {
*bogus = 1;
for (bp_index = 0; bp_index < cluster_count; bp_index++)
- libxfs_putbuf(bplist[bp_index]);
+ if (bplist[bp_index])
+ libxfs_putbuf(bplist[bp_index]);
free(bplist);
return(0);
}
/*
* mark block as an inode block in the incore bitmap
*/
- pthread_mutex_lock(&ag_locks[agno]);
- state = get_bmap(agno, agbno);
- switch (state) {
- case XR_E_INO: /* already marked */
- break;
- case XR_E_UNKNOWN:
- case XR_E_FREE:
- case XR_E_FREE1:
- set_bmap(agno, agbno, XR_E_INO);
- break;
- case XR_E_BAD_STATE:
- do_error(_("bad state in block map %d\n"), state);
- break;
- default:
- set_bmap(agno, agbno, XR_E_MULT);
- do_warn(_("inode block %" PRIu64 " multiply claimed, state was %d\n"),
- XFS_AGB_TO_FSB(mp, agno, agbno), state);
- break;
- }
- pthread_mutex_unlock(&ag_locks[agno]);
+ if (!is_inode_sparse(ino_rec, irec_offset))
+ process_inode_agbno_state(mp, agno, agbno);
for (;;) {
- /*
- * make inode pointer
- */
- dino = xfs_make_iptr(mp, bplist[bp_index], cluster_offset);
agino = irec_offset + ino_rec->ino_startnum;
ino = XFS_AGINO_TO_INO(mp, agno, agino);
+ if (is_inode_sparse(ino_rec, irec_offset))
+ goto process_next;
+
+ /* make inode pointer */
+ dino = xfs_make_iptr(mp, bplist[bp_index], cluster_offset);
+
+
is_used = 3;
ino_dirty = 0;
parent = 0;
extra_attr_check, &isa_dir, &parent);
ASSERT(is_used != 3);
- if (ino_dirty)
+ if (ino_dirty) {
dirty = 1;
+ libxfs_dinode_calc_crc(mp, dino);
+ }
+
/*
* XXX - if we want to try and keep
* track of whether we need to bang on
}
set_inode_used(ino_rec, irec_offset);
+ /*
+ * store the on-disk file type for comparing in
+ * phase 6.
+ */
+ set_inode_ftype(ino_rec, irec_offset,
+ libxfs_mode_to_ftype(be16_to_cpu(dino->di_mode)));
+
/*
* store on-disk nlink count for comparing in phase 7
*/
do_warn(_("would have cleared inode %" PRIu64 "\n"),
ino);
}
+ clear_inode_was_rl(ino_rec, irec_offset);
}
+process_next:
irec_offset++;
ibuf_offset++;
icnt++;
cluster_offset++;
- if (icnt == XFS_IALLOC_INODES(mp) &&
+ if (icnt == mp->m_ialloc_inos &&
irec_offset == XFS_INODES_PER_CHUNK) {
/*
* done! - finished up irec and block simultaneously
*/
for (bp_index = 0; bp_index < cluster_count; bp_index++) {
+ if (!bplist[bp_index])
+ continue;
+
pftrace("put/writebuf %p (%llu) in AG %d",
bplist[bp_index], (long long)
XFS_BUF_ADDR(bplist[bp_index]), agno);
ibuf_offset = 0;
agbno++;
- pthread_mutex_lock(&ag_locks[agno]);
- state = get_bmap(agno, agbno);
- switch (state) {
- case XR_E_INO: /* already marked */
- break;
- case XR_E_UNKNOWN:
- case XR_E_FREE:
- case XR_E_FREE1:
- set_bmap(agno, agbno, XR_E_INO);
- break;
- case XR_E_BAD_STATE:
- do_error(_("bad state in block map %d\n"),
- state);
- break;
- default:
- set_bmap(agno, agbno, XR_E_MULT);
- do_warn(
- _("inode block %" PRIu64 " multiply claimed, state was %d\n"),
- XFS_AGB_TO_FSB(mp, agno, agbno), state);
- break;
- }
- pthread_mutex_unlock(&ag_locks[agno]);
-
+ if (!is_inode_sparse(ino_rec, irec_offset))
+ process_inode_agbno_state(mp, agno, agbno);
} else if (irec_offset == XFS_INODES_PER_CHUNK) {
/*
* get new irec (multiple chunks per block fs)
* the next block before we call the processing routines.
*/
num_inos = XFS_INODES_PER_CHUNK;
- while (num_inos < XFS_IALLOC_INODES(mp) && ino_rec != NULL) {
+ while (num_inos < mp->m_ialloc_inos && ino_rec != NULL) {
/*
* inodes chunks will always be aligned and sized
* correctly
num_inos += XFS_INODES_PER_CHUNK;
}
- ASSERT(num_inos == XFS_IALLOC_INODES(mp));
+ ASSERT(num_inos == mp->m_ialloc_inos);
if (pf_args) {
sem_post(&pf_args->ra_count);
*/
num_inos = 0;
ino_rec = first_ino_rec;
- while (num_inos < XFS_IALLOC_INODES(mp) &&
+ while (num_inos < mp->m_ialloc_inos &&
ino_rec != NULL) {
prev_ino_rec = ino_rec;
* processing may add more records to the
* uncertain inode lists.
*/
- if (process_inode_chunk(mp, agno, XFS_IALLOC_INODES(mp),
+ if (process_inode_chunk(mp, agno, mp->m_ialloc_inos,
nrec, 1, 0, 0, &bogus)) {
/* XXX - i/o error, we've got a problem */
abort();