int blks_per_buf;
int inodes_per_buf;
int ioff;
+ struct xfs_ino_geometry *igeo = M_IGEO(mp);
if (xfs_sb_version_hassparseinodes(&mp->m_sb))
- blks_per_buf = mp->m_blocks_per_cluster;
+ blks_per_buf = igeo->blocks_per_cluster;
else
- blks_per_buf = mp->m_ialloc_blks;
+ blks_per_buf = igeo->ialloc_blks;
inodes_per_buf = min(XFS_FSB_TO_INO(mp, blks_per_buf),
XFS_INODES_PER_CHUNK);
}
set_dbmap(seqno, bno, 1, DBM_BTINO, seqno, bno);
if (level == 0) {
- if (be16_to_cpu(block->bb_numrecs) > mp->m_inobt_mxr[0] ||
- (isroot == 0 && be16_to_cpu(block->bb_numrecs) < mp->m_inobt_mnr[0])) {
+ if (be16_to_cpu(block->bb_numrecs) > igeo->inobt_mxr[0] ||
+ (isroot == 0 && be16_to_cpu(block->bb_numrecs) < igeo->inobt_mnr[0])) {
dbprintf(_("bad btree nrecs (%u, min=%u, max=%u) in "
"inobt block %u/%u\n"),
- be16_to_cpu(block->bb_numrecs), mp->m_inobt_mnr[0],
- mp->m_inobt_mxr[0], seqno, bno);
+ be16_to_cpu(block->bb_numrecs), igeo->inobt_mnr[0],
+ igeo->inobt_mxr[0], seqno, bno);
serious_error++;
return;
}
agino = be32_to_cpu(rp[i].ir_startino);
agbno = XFS_AGINO_TO_AGBNO(mp, agino);
off = XFS_AGINO_TO_OFFSET(mp, agino);
- end_agbno = agbno + mp->m_ialloc_blks;
+ end_agbno = agbno + igeo->ialloc_blks;
if (off == 0) {
if ((sbversion & XFS_SB_VERSION_ALIGNBIT) &&
mp->m_sb.sb_inoalignmt &&
}
return;
}
- if (be16_to_cpu(block->bb_numrecs) > mp->m_inobt_mxr[1] ||
- (isroot == 0 && be16_to_cpu(block->bb_numrecs) < mp->m_inobt_mnr[1])) {
+ if (be16_to_cpu(block->bb_numrecs) > igeo->inobt_mxr[1] ||
+ (isroot == 0 && be16_to_cpu(block->bb_numrecs) < igeo->inobt_mnr[1])) {
dbprintf(_("bad btree nrecs (%u, min=%u, max=%u) in inobt block "
"%u/%u\n"),
- be16_to_cpu(block->bb_numrecs), mp->m_inobt_mnr[1],
- mp->m_inobt_mxr[1], seqno, bno);
+ be16_to_cpu(block->bb_numrecs), igeo->inobt_mnr[1],
+ igeo->inobt_mxr[1], seqno, bno);
serious_error++;
return;
}
- pp = XFS_INOBT_PTR_ADDR(mp, block, 1, mp->m_inobt_mxr[1]);
+ pp = XFS_INOBT_PTR_ADDR(mp, block, 1, igeo->inobt_mxr[1]);
for (i = 0; i < be16_to_cpu(block->bb_numrecs); i++)
scan_sbtree(agf, be32_to_cpu(pp[i]), level, 0, scanfunc_ino, TYP_INOBT);
}
int blks_per_buf;
int inodes_per_buf;
int ioff;
+ struct xfs_ino_geometry *igeo = M_IGEO(mp);
if (xfs_sb_version_hassparseinodes(&mp->m_sb))
- blks_per_buf = mp->m_blocks_per_cluster;
+ blks_per_buf = igeo->blocks_per_cluster;
else
- blks_per_buf = mp->m_ialloc_blks;
+ blks_per_buf = igeo->ialloc_blks;
inodes_per_buf = min(XFS_FSB_TO_INO(mp, blks_per_buf),
XFS_INODES_PER_CHUNK);
}
set_dbmap(seqno, bno, 1, DBM_BTFINO, seqno, bno);
if (level == 0) {
- if (be16_to_cpu(block->bb_numrecs) > mp->m_inobt_mxr[0] ||
- (isroot == 0 && be16_to_cpu(block->bb_numrecs) < mp->m_inobt_mnr[0])) {
+ if (be16_to_cpu(block->bb_numrecs) > igeo->inobt_mxr[0] ||
+ (isroot == 0 && be16_to_cpu(block->bb_numrecs) < igeo->inobt_mnr[0])) {
dbprintf(_("bad btree nrecs (%u, min=%u, max=%u) in "
"finobt block %u/%u\n"),
- be16_to_cpu(block->bb_numrecs), mp->m_inobt_mnr[0],
- mp->m_inobt_mxr[0], seqno, bno);
+ be16_to_cpu(block->bb_numrecs), igeo->inobt_mnr[0],
+ igeo->inobt_mxr[0], seqno, bno);
serious_error++;
return;
}
agino = be32_to_cpu(rp[i].ir_startino);
agbno = XFS_AGINO_TO_AGBNO(mp, agino);
off = XFS_AGINO_TO_OFFSET(mp, agino);
- end_agbno = agbno + mp->m_ialloc_blks;
+ end_agbno = agbno + igeo->ialloc_blks;
if (off == 0) {
if ((sbversion & XFS_SB_VERSION_ALIGNBIT) &&
mp->m_sb.sb_inoalignmt &&
}
return;
}
- if (be16_to_cpu(block->bb_numrecs) > mp->m_inobt_mxr[1] ||
- (isroot == 0 && be16_to_cpu(block->bb_numrecs) < mp->m_inobt_mnr[1])) {
+ if (be16_to_cpu(block->bb_numrecs) > igeo->inobt_mxr[1] ||
+ (isroot == 0 && be16_to_cpu(block->bb_numrecs) < igeo->inobt_mnr[1])) {
dbprintf(_("bad btree nrecs (%u, min=%u, max=%u) in finobt block "
"%u/%u\n"),
- be16_to_cpu(block->bb_numrecs), mp->m_inobt_mnr[1],
- mp->m_inobt_mxr[1], seqno, bno);
+ be16_to_cpu(block->bb_numrecs), igeo->inobt_mnr[1],
+ igeo->inobt_mxr[1], seqno, bno);
serious_error++;
return;
}
- pp = XFS_INOBT_PTR_ADDR(mp, block, 1, mp->m_inobt_mxr[1]);
+ pp = XFS_INOBT_PTR_ADDR(mp, block, 1, igeo->inobt_mxr[1]);
for (i = 0; i < be16_to_cpu(block->bb_numrecs); i++)
scan_sbtree(agf, be32_to_cpu(pp[i]), level, 0, scanfunc_fino, TYP_FINOBT);
}
int blks_per_buf;
int inodes_per_buf;
int ioff;
+ struct xfs_ino_geometry *igeo = M_IGEO(mp);
if (xfs_sb_version_hassparseinodes(&mp->m_sb))
- blks_per_buf = mp->m_blocks_per_cluster;
+ blks_per_buf = igeo->blocks_per_cluster;
else
- blks_per_buf = mp->m_ialloc_blks;
+ blks_per_buf = igeo->ialloc_blks;
inodes_per_buf = min(XFS_FSB_TO_INO(mp, blks_per_buf),
XFS_INODES_PER_CHUNK);
agino = be32_to_cpu(rp[i].ir_startino);
agbno = XFS_AGINO_TO_AGBNO(mp, agino);
off = XFS_AGINO_TO_OFFSET(mp, agino);
- end_agbno = agbno + mp->m_ialloc_blks;
+ end_agbno = agbno + igeo->ialloc_blks;
push_cur();
ioff = 0;
}
return;
}
- pp = XFS_INOBT_PTR_ADDR(mp, block, 1, mp->m_inobt_mxr[1]);
+ pp = XFS_INOBT_PTR_ADDR(mp, block, 1, igeo->inobt_mxr[1]);
for (i = 0; i < be16_to_cpu(block->bb_numrecs); i++)
scan_sbtree(agf, be32_to_cpu(pp[i]), level, scanfunc_ino,
TYP_INOBT);
int offset;
int numblks = blkbb;
xfs_agblock_t cluster_agbno;
+ struct xfs_ino_geometry *igeo = M_IGEO(mp);
agno = XFS_INO_TO_AGNO(mp, ino);
}
cur_agno = agno;
- if (mp->m_inode_cluster_size > mp->m_sb.sb_blocksize &&
- mp->m_inoalign_mask) {
+ if (igeo->inode_cluster_size > mp->m_sb.sb_blocksize &&
+ igeo->inoalign_mask) {
xfs_agblock_t chunk_agbno;
xfs_agblock_t offset_agbno;
int blks_per_cluster;
- blks_per_cluster = mp->m_inode_cluster_size >>
+ blks_per_cluster = igeo->inode_cluster_size >>
mp->m_sb.sb_blocklog;
- offset_agbno = agbno & mp->m_inoalign_mask;
+ offset_agbno = agbno & igeo->inoalign_mask;
chunk_agbno = agbno - offset_agbno;
cluster_agbno = chunk_agbno +
((offset_agbno / blks_per_cluster) * blks_per_cluster);
xfs_alloc_key_t *akp;
char *zp1, *zp2;
char *key_end;
+ struct xfs_ino_geometry *igeo = M_IGEO(mp);
nrecs = be16_to_cpu(block->bb_numrecs);
if (nrecs < 0)
break;
case TYP_INOBT:
case TYP_FINOBT:
- if (nrecs > mp->m_inobt_mxr[1])
+ if (nrecs > igeo->inobt_mxr[1])
return;
ikp = XFS_INOBT_KEY_ADDR(mp, block, 1);
- ipp = XFS_INOBT_PTR_ADDR(mp, block, 1, mp->m_inobt_mxr[1]);
+ ipp = XFS_INOBT_PTR_ADDR(mp, block, 1, igeo->inobt_mxr[1]);
zp1 = (char *)&ikp[nrecs];
zp2 = (char *)&ipp[nrecs];
key_end = (char *)ipp;
break;
case TYP_INOBT:
case TYP_FINOBT:
- if (nrecs > mp->m_inobt_mxr[0])
+ if (nrecs > M_IGEO(mp)->inobt_mxr[0])
return;
irp = XFS_INOBT_REC_ADDR(mp, block, 1);
int blks_per_buf;
int inodes_per_buf;
int ioff;
+ struct xfs_ino_geometry *igeo = M_IGEO(mp);
agino = be32_to_cpu(rp->ir_startino);
agbno = XFS_AGINO_TO_AGBNO(mp, agino);
- end_agbno = agbno + mp->m_ialloc_blks;
+ end_agbno = agbno + igeo->ialloc_blks;
off = XFS_INO_TO_OFFSET(mp, agino);
/*
* we've been passed (large block sizes can hold multiple inode chunks).
*/
if (xfs_sb_version_hassparseinodes(&mp->m_sb))
- blks_per_buf = mp->m_blocks_per_cluster;
+ blks_per_buf = igeo->blocks_per_cluster;
else
- blks_per_buf = mp->m_ialloc_blks;
+ blks_per_buf = igeo->ialloc_blks;
inodes_per_buf = min(XFS_FSB_TO_INO(mp, blks_per_buf),
XFS_INODES_PER_CHUNK);
int i;
int numrecs;
int finobt = *(int *) arg;
+ struct xfs_ino_geometry *igeo = M_IGEO(mp);
numrecs = be16_to_cpu(block->bb_numrecs);
if (level == 0) {
- if (numrecs > mp->m_inobt_mxr[0]) {
+ if (numrecs > igeo->inobt_mxr[0]) {
if (show_warnings)
print_warning("invalid numrecs %d in %s "
"block %u/%u", numrecs,
typtab[btype].name, agno, agbno);
- numrecs = mp->m_inobt_mxr[0];
+ numrecs = igeo->inobt_mxr[0];
}
/*
return 1;
}
- if (numrecs > mp->m_inobt_mxr[1]) {
+ if (numrecs > igeo->inobt_mxr[1]) {
if (show_warnings)
print_warning("invalid numrecs %d in %s block %u/%u",
numrecs, typtab[btype].name, agno, agbno);
- numrecs = mp->m_inobt_mxr[1];
+ numrecs = igeo->inobt_mxr[1];
}
- pp = XFS_INOBT_PTR_ADDR(mp, block, 1, mp->m_inobt_mxr[1]);
+ pp = XFS_INOBT_PTR_ADDR(mp, block, 1, igeo->inobt_mxr[1]);
for (i = 0; i < numrecs; i++) {
if (!valid_bno(agno, be32_to_cpu(pp[i]))) {
if (show_warnings)
xfs_agnumber_t m_agfrotor; /* last ag where space found */
xfs_agnumber_t m_agirotor; /* last ag dir inode alloced */
xfs_agnumber_t m_maxagi; /* highest inode alloc group */
+ struct xfs_ino_geometry m_ino_geo; /* inode geometry */
uint m_rsumlevels; /* rt summary levels */
uint m_rsumsize; /* size of rt summary, bytes */
/*
uint8_t m_blkbb_log; /* blocklog - BBSHIFT */
uint8_t m_sectbb_log; /* sectorlog - BBSHIFT */
uint8_t m_agno_log; /* log #ag's */
- uint8_t m_agino_log; /* #bits for agino in inum */
- uint m_inode_cluster_size;/* min inode buf size */
- unsigned int m_inodes_per_cluster;
- unsigned int m_blocks_per_cluster;
- unsigned int m_cluster_align;
- unsigned int m_cluster_align_inodes;
uint m_blockmask; /* sb_blocksize-1 */
uint m_blockwsize; /* sb_blocksize in words */
uint m_blockwmask; /* blockwsize-1 */
uint m_alloc_mnr[2]; /* XFS_ALLOC_BLOCK_MINRECS */
uint m_bmap_dmxr[2]; /* XFS_BMAP_BLOCK_DMAXRECS */
uint m_bmap_dmnr[2]; /* XFS_BMAP_BLOCK_DMINRECS */
- uint m_inobt_mxr[2]; /* XFS_INOBT_BLOCK_MAXRECS */
- uint m_inobt_mnr[2]; /* XFS_INOBT_BLOCK_MINRECS */
uint m_rmap_mxr[2]; /* max rmap btree records */
uint m_rmap_mnr[2]; /* min rmap btree records */
uint m_refc_mxr[2]; /* max refc btree records */
uint m_refc_mnr[2]; /* min refc btree records */
uint m_ag_maxlevels; /* XFS_AG_MAXLEVELS */
uint m_bm_maxlevels[2]; /* XFS_BM_MAXLEVELS */
- uint m_in_maxlevels; /* XFS_IN_MAXLEVELS */
uint m_rmap_maxlevels; /* max rmap btree levels */
uint m_refc_maxlevels; /* max refc btree levels */
xfs_extlen_t m_ag_prealloc_blocks; /* reserved ag blocks */
bool m_finobt_nores; /* no per-AG finobt resv. */
uint m_qflags; /* quota status flags */
uint m_attroffset; /* inode attribute offset */
- int m_ialloc_inos; /* inodes in inode allocation */
- int m_ialloc_blks; /* blocks in inode allocation */
- int m_ialloc_min_blks; /* min blocks in sparse inode
- * allocation */
- int m_litino; /* size of inode union area */
- int m_inoalign_mask;/* mask sb_inoalignmt if used */
struct xfs_trans_resv m_resv; /* precomputed res values */
- uint64_t m_maxicount; /* maximum inode count */
int m_dalign; /* stripe unit */
int m_swidth; /* stripe width */
- int m_sinoalign; /* stripe unit inode alignmnt */
const struct xfs_nameops *m_dirnameops; /* vector of dir name ops */
struct xfs_da_geometry *m_dir_geo; /* directory block geometry */
* allows an application to initialize and store a reference to the log
* if warranted.
*/
- struct xlog *m_log;
+ struct xlog *m_log; /* log specific stuff */
} xfs_mount_t;
+#define M_IGEO(mp) (&(mp)->m_ino_geo)
+
/* per-AG block reservation data structures*/
enum xfs_ag_resv_type {
XFS_AG_RESV_NONE = 0,
* Calculate how much should be reserved for inodes to meet
* the max inode percentage.
*/
- if (mp->m_maxicount) {
+ if (M_IGEO(mp)->maxicount) {
uint64_t icount;
icount = sbp->sb_dblocks * sbp->sb_imax_pct;
xfs_buf_t *bp;
xfs_sb_t *sbp;
int error;
+ struct xfs_ino_geometry *igeo = M_IGEO(mp);
+
libxfs_buftarg_init(mp, dev, logdev, rtdev);
/* Make sure the maximum inode count is a multiple of the
* units we allocate inodes in.
*/
- mp->m_maxicount = (sbp->sb_dblocks * sbp->sb_imax_pct) / 100;
- mp->m_maxicount = XFS_FSB_TO_INO(mp,
- (mp->m_maxicount / mp->m_ialloc_blks) *
- mp->m_ialloc_blks);
+ igeo->maxicount = (sbp->sb_dblocks * sbp->sb_imax_pct) / 100;
+ igeo->maxicount = XFS_FSB_TO_INO(mp,
+ (igeo->maxicount / igeo->ialloc_blks) *
+ igeo->ialloc_blks);
} else
- mp->m_maxicount = 0;
+ igeo->maxicount = 0;
- mp->m_inode_cluster_size = XFS_INODE_BIG_CLUSTER_SIZE;
+ igeo->inode_cluster_size = XFS_INODE_BIG_CLUSTER_SIZE;
if (xfs_sb_version_hascrc(&mp->m_sb)) {
- int new_size = mp->m_inode_cluster_size;
+ int new_size = igeo->inode_cluster_size;
new_size *= mp->m_sb.sb_inodesize / XFS_DINODE_MIN_SIZE;
if (mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(mp, new_size))
- mp->m_inode_cluster_size = new_size;
+ igeo->inode_cluster_size = new_size;
}
- mp->m_blocks_per_cluster = xfs_icluster_size_fsb(mp);
- mp->m_inodes_per_cluster = XFS_FSB_TO_INO(mp, mp->m_blocks_per_cluster);
- mp->m_cluster_align = xfs_ialloc_cluster_alignment(mp);
- mp->m_cluster_align_inodes = XFS_FSB_TO_INO(mp, mp->m_cluster_align);
+ igeo->blocks_per_cluster = xfs_icluster_size_fsb(mp);
+ igeo->inodes_per_cluster = XFS_FSB_TO_INO(mp, igeo->blocks_per_cluster);
+ igeo->cluster_align = xfs_ialloc_cluster_alignment(mp);
+ igeo->cluster_align_inodes = XFS_FSB_TO_INO(mp, igeo->cluster_align);
/*
* Set whether we're using stripe alignment.
*/
if (xfs_sb_version_hasalign(&mp->m_sb) &&
mp->m_sb.sb_inoalignmt >=
- XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size))
- mp->m_inoalign_mask = mp->m_sb.sb_inoalignmt - 1;
+ XFS_B_TO_FSBT(mp, igeo->inode_cluster_size))
+ igeo->inoalign_mask = mp->m_sb.sb_inoalignmt - 1;
else
- mp->m_inoalign_mask = 0;
+ igeo->inoalign_mask = 0;
/*
* If we are using stripe alignment, check whether
* the stripe unit is a multiple of the inode alignment
*/
- if (mp->m_dalign && mp->m_inoalign_mask &&
- !(mp->m_dalign & mp->m_inoalign_mask))
- mp->m_sinoalign = mp->m_dalign;
+ if (mp->m_dalign && igeo->inoalign_mask &&
+ !(mp->m_dalign & igeo->inoalign_mask))
+ igeo->ialloc_align = mp->m_dalign;
else
- mp->m_sinoalign = 0;
+ igeo->ialloc_align = 0;
/*
* Check that the data (and log if separate) are an ok size.
#define XFS_INO_MASK(k) (uint32_t)((1ULL << (k)) - 1)
#define XFS_INO_OFFSET_BITS(mp) (mp)->m_sb.sb_inopblog
#define XFS_INO_AGBNO_BITS(mp) (mp)->m_sb.sb_agblklog
-#define XFS_INO_AGINO_BITS(mp) (mp)->m_agino_log
+#define XFS_INO_AGINO_BITS(mp) ((mp)->m_ino_geo.agino_log)
#define XFS_INO_AGNO_BITS(mp) (mp)->m_agno_log
#define XFS_INO_BITS(mp) \
XFS_INO_AGNO_BITS(mp) + XFS_INO_AGINO_BITS(mp)
#define SGI_ACL_FILE_SIZE (sizeof(SGI_ACL_FILE)-1)
#define SGI_ACL_DEFAULT_SIZE (sizeof(SGI_ACL_DEFAULT)-1)
+struct xfs_ino_geometry {
+ /* Maximum inode count in this filesystem. */
+ uint64_t maxicount;
+
+ /*
+ * Desired inode cluster buffer size, in bytes. This value is not
+ * rounded up to at least one filesystem block.
+ */
+ unsigned int inode_cluster_size;
+
+ /* Inode cluster sizes, adjusted to be at least 1 fsb. */
+ unsigned int inodes_per_cluster;
+ unsigned int blocks_per_cluster;
+
+ /* Inode cluster alignment. */
+ unsigned int cluster_align;
+ unsigned int cluster_align_inodes;
+ unsigned int inoalign_mask; /* mask sb_inoalignmt if used */
+
+ unsigned int inobt_mxr[2]; /* max inobt btree records */
+ unsigned int inobt_mnr[2]; /* min inobt btree records */
+ unsigned int inobt_maxlevels; /* max inobt btree levels. */
+
+ /* Size of inode allocations under normal operation. */
+ unsigned int ialloc_inos;
+ unsigned int ialloc_blks;
+
+ /* Minimum inode blocks for a sparse allocation. */
+ unsigned int ialloc_min_blks;
+
+ /* stripe unit inode alignment */
+ unsigned int ialloc_align;
+
+ unsigned int agino_log; /* #bits for agino in inum */
+};
+
#endif /* __XFS_FORMAT_H__ */
* sizes, manipulate the inodes in buffers which are multiples of the
* blocks size.
*/
- nbufs = length / mp->m_blocks_per_cluster;
+ nbufs = length / M_IGEO(mp)->blocks_per_cluster;
/*
* Figure out what version number to use in the inodes we create. If
* Get the block.
*/
d = XFS_AGB_TO_DADDR(mp, agno, agbno +
- (j * mp->m_blocks_per_cluster));
+ (j * M_IGEO(mp)->blocks_per_cluster));
fbuf = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
- mp->m_bsize * mp->m_blocks_per_cluster,
+ mp->m_bsize *
+ M_IGEO(mp)->blocks_per_cluster,
XBF_UNMAPPED);
if (!fbuf)
return -ENOMEM;
/* Initialize the inode buffers and log them appropriately. */
fbuf->b_ops = &xfs_inode_buf_ops;
xfs_buf_zero(fbuf, 0, BBTOB(fbuf->b_length));
- for (i = 0; i < mp->m_inodes_per_cluster; i++) {
+ for (i = 0; i < M_IGEO(mp)->inodes_per_cluster; i++) {
int ioffset = i << mp->m_sb.sb_inodelog;
uint isize = xfs_dinode_size(version);
* Allocate new inodes in the allocation group specified by agbp.
* Return 0 for success, else error code.
*/
-STATIC int /* error code or 0 */
+STATIC int
xfs_ialloc_ag_alloc(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_buf_t *agbp, /* alloc group buffer */
- int *alloc)
+ struct xfs_trans *tp,
+ struct xfs_buf *agbp,
+ int *alloc)
{
- xfs_agi_t *agi; /* allocation group header */
- xfs_alloc_arg_t args; /* allocation argument structure */
- xfs_agnumber_t agno;
- int error;
- xfs_agino_t newino; /* new first inode's number */
- xfs_agino_t newlen; /* new number of inodes */
- int isaligned = 0; /* inode allocation at stripe unit */
- /* boundary */
- uint16_t allocmask = (uint16_t) -1; /* init. to full chunk */
+ struct xfs_agi *agi;
+ struct xfs_alloc_arg args;
+ xfs_agnumber_t agno;
+ int error;
+ xfs_agino_t newino; /* new first inode's number */
+ xfs_agino_t newlen; /* new number of inodes */
+ int isaligned = 0; /* inode allocation at stripe */
+ /* unit boundary */
+ /* init. to full chunk */
+ uint16_t allocmask = (uint16_t) -1;
struct xfs_inobt_rec_incore rec;
- struct xfs_perag *pag;
- int do_sparse = 0;
+ struct xfs_perag *pag;
+ struct xfs_ino_geometry *igeo = M_IGEO(tp->t_mountp);
+ int do_sparse = 0;
memset(&args, 0, sizeof(args));
args.tp = tp;
#ifdef DEBUG
/* randomly do sparse inode allocations */
if (xfs_sb_version_hassparseinodes(&tp->t_mountp->m_sb) &&
- args.mp->m_ialloc_min_blks < args.mp->m_ialloc_blks)
+ igeo->ialloc_min_blks < igeo->ialloc_blks)
do_sparse = prandom_u32() & 1;
#endif
* Locking will ensure that we don't have two callers in here
* at one time.
*/
- newlen = args.mp->m_ialloc_inos;
- if (args.mp->m_maxicount &&
+ newlen = igeo->ialloc_inos;
+ if (igeo->maxicount &&
percpu_counter_read_positive(&args.mp->m_icount) + newlen >
- args.mp->m_maxicount)
+ igeo->maxicount)
return -ENOSPC;
- args.minlen = args.maxlen = args.mp->m_ialloc_blks;
+ args.minlen = args.maxlen = igeo->ialloc_blks;
/*
* First try to allocate inodes contiguous with the last-allocated
* chunk of inodes. If the filesystem is striped, this will fill
newino = be32_to_cpu(agi->agi_newino);
agno = be32_to_cpu(agi->agi_seqno);
args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) +
- args.mp->m_ialloc_blks;
+ igeo->ialloc_blks;
if (do_sparse)
goto sparse_alloc;
if (likely(newino != NULLAGINO &&
* but not to use them in the actual exact allocation.
*/
args.alignment = 1;
- args.minalignslop = args.mp->m_cluster_align - 1;
+ args.minalignslop = igeo->cluster_align - 1;
/* Allow space for the inode btree to split. */
- args.minleft = args.mp->m_in_maxlevels - 1;
+ args.minleft = igeo->inobt_maxlevels - 1;
if ((error = xfs_alloc_vextent(&args)))
return error;
* pieces, so don't need alignment anyway.
*/
isaligned = 0;
- if (args.mp->m_sinoalign) {
+ if (igeo->ialloc_align) {
ASSERT(!(args.mp->m_flags & XFS_MOUNT_NOALIGN));
args.alignment = args.mp->m_dalign;
isaligned = 1;
} else
- args.alignment = args.mp->m_cluster_align;
+ args.alignment = igeo->cluster_align;
/*
* Need to figure out where to allocate the inode blocks.
* Ideally they should be spaced out through the a.g.
/*
* Allow space for the inode btree to split.
*/
- args.minleft = args.mp->m_in_maxlevels - 1;
+ args.minleft = igeo->inobt_maxlevels - 1;
if ((error = xfs_alloc_vextent(&args)))
return error;
}
args.type = XFS_ALLOCTYPE_NEAR_BNO;
args.agbno = be32_to_cpu(agi->agi_root);
args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
- args.alignment = args.mp->m_cluster_align;
+ args.alignment = igeo->cluster_align;
if ((error = xfs_alloc_vextent(&args)))
return error;
}
* the sparse allocation length is smaller than a full chunk.
*/
if (xfs_sb_version_hassparseinodes(&args.mp->m_sb) &&
- args.mp->m_ialloc_min_blks < args.mp->m_ialloc_blks &&
+ igeo->ialloc_min_blks < igeo->ialloc_blks &&
args.fsbno == NULLFSBLOCK) {
sparse_alloc:
args.type = XFS_ALLOCTYPE_NEAR_BNO;
args.alignment = args.mp->m_sb.sb_spino_align;
args.prod = 1;
- args.minlen = args.mp->m_ialloc_min_blks;
+ args.minlen = igeo->ialloc_min_blks;
args.maxlen = args.minlen;
/*
args.min_agbno = args.mp->m_sb.sb_inoalignmt;
args.max_agbno = round_down(args.mp->m_sb.sb_agblocks,
args.mp->m_sb.sb_inoalignmt) -
- args.mp->m_ialloc_blks;
+ igeo->ialloc_blks;
error = xfs_alloc_vextent(&args);
if (error)
* space needed for alignment of inode chunks when checking the
* longest contiguous free space in the AG - this prevents us
* from getting ENOSPC because we have free space larger than
- * m_ialloc_blks but alignment constraints prevent us from using
+ * ialloc_blks but alignment constraints prevent us from using
* it.
*
* If we can't find an AG with space for full alignment slack to
* if we fail allocation due to alignment issues then it is most
* likely a real ENOSPC condition.
*/
- ineed = mp->m_ialloc_min_blks;
+ ineed = M_IGEO(mp)->ialloc_min_blks;
if (flags && ineed > 1)
- ineed += mp->m_cluster_align;
+ ineed += M_IGEO(mp)->cluster_align;
longest = pag->pagf_longest;
if (!longest)
longest = pag->pagf_flcount > 0;
int noroom = 0;
xfs_agnumber_t start_agno;
struct xfs_perag *pag;
+ struct xfs_ino_geometry *igeo = M_IGEO(mp);
int okalloc = 1;
if (*IO_agbp) {
* Read rough value of mp->m_icount by percpu_counter_read_positive,
* which will sacrifice the preciseness but improve the performance.
*/
- if (mp->m_maxicount &&
- percpu_counter_read_positive(&mp->m_icount) + mp->m_ialloc_inos
- > mp->m_maxicount) {
+ if (igeo->maxicount &&
+ percpu_counter_read_positive(&mp->m_icount) + igeo->ialloc_inos
+ > igeo->maxicount) {
noroom = 1;
okalloc = 0;
}
if (!xfs_inobt_issparse(rec->ir_holemask)) {
/* not sparse, calculate extent info directly */
xfs_bmap_add_free(tp, XFS_AGB_TO_FSB(mp, agno, sagbno),
- mp->m_ialloc_blks, &XFS_RMAP_OINFO_INODES);
+ M_IGEO(mp)->ialloc_blks,
+ &XFS_RMAP_OINFO_INODES);
return;
}
/* check that the returned record contains the required inode */
if (rec.ir_startino > agino ||
- rec.ir_startino + mp->m_ialloc_inos <= agino)
+ rec.ir_startino + M_IGEO(mp)->ialloc_inos <= agino)
return -EINVAL;
/* for untrusted inodes check it is allocated first */
* If the inode cluster size is the same as the blocksize or
* smaller we get to the buffer by simple arithmetics.
*/
- if (mp->m_blocks_per_cluster == 1) {
+ if (M_IGEO(mp)->blocks_per_cluster == 1) {
offset = XFS_INO_TO_OFFSET(mp, ino);
ASSERT(offset < mp->m_sb.sb_inopblock);
* find the location. Otherwise we have to do a btree
* lookup to find the location.
*/
- if (mp->m_inoalign_mask) {
- offset_agbno = agbno & mp->m_inoalign_mask;
+ if (M_IGEO(mp)->inoalign_mask) {
+ offset_agbno = agbno & M_IGEO(mp)->inoalign_mask;
chunk_agbno = agbno - offset_agbno;
} else {
error = xfs_imap_lookup(mp, tp, agno, agino, agbno,
out_map:
ASSERT(agbno >= chunk_agbno);
cluster_agbno = chunk_agbno +
- ((offset_agbno / mp->m_blocks_per_cluster) *
- mp->m_blocks_per_cluster);
+ ((offset_agbno / M_IGEO(mp)->blocks_per_cluster) *
+ M_IGEO(mp)->blocks_per_cluster);
offset = ((agbno - cluster_agbno) * mp->m_sb.sb_inopblock) +
XFS_INO_TO_OFFSET(mp, ino);
imap->im_blkno = XFS_AGB_TO_DADDR(mp, agno, cluster_agbno);
- imap->im_len = XFS_FSB_TO_BB(mp, mp->m_blocks_per_cluster);
+ imap->im_len = XFS_FSB_TO_BB(mp, M_IGEO(mp)->blocks_per_cluster);
imap->im_boffset = (unsigned short)(offset << mp->m_sb.sb_inodelog);
/*
}
/*
- * Compute and fill in value of m_in_maxlevels.
+ * Compute and fill in value of m_ino_geo.inobt_maxlevels.
*/
void
xfs_ialloc_compute_maxlevels(
uint inodes;
inodes = (1LL << XFS_INO_AGINO_BITS(mp)) >> XFS_INODES_PER_CHUNK_LOG;
- mp->m_in_maxlevels = xfs_btree_compute_maxlevels(mp->m_inobt_mnr,
- inodes);
+ M_IGEO(mp)->inobt_maxlevels = xfs_btree_compute_maxlevels(
+ M_IGEO(mp)->inobt_mnr, inodes);
}
/*
xfs_icluster_size_fsb(
struct xfs_mount *mp)
{
- if (mp->m_sb.sb_blocksize >= mp->m_inode_cluster_size)
+ if (mp->m_sb.sb_blocksize >= M_IGEO(mp)->inode_cluster_size)
return 1;
- return mp->m_inode_cluster_size >> mp->m_sb.sb_blocklog;
+ return M_IGEO(mp)->inode_cluster_size >> mp->m_sb.sb_blocklog;
}
/*
uint flags); /* flags for inode btree lookup */
/*
- * Compute and fill in value of m_in_maxlevels.
+ * Compute and fill in value of m_ino_geo.inobt_maxlevels.
*/
void
xfs_ialloc_compute_maxlevels(
struct xfs_btree_cur *cur,
int level)
{
- return cur->bc_mp->m_inobt_mnr[level != 0];
+ return M_IGEO(cur->bc_mp)->inobt_mnr[level != 0];
}
STATIC struct xfs_btree_cur *
struct xfs_btree_cur *cur,
int level)
{
- return cur->bc_mp->m_inobt_mxr[level != 0];
+ return M_IGEO(cur->bc_mp)->inobt_mxr[level != 0];
}
STATIC void
/* level verification */
level = be16_to_cpu(block->bb_level);
- if (level >= mp->m_in_maxlevels)
+ if (level >= M_IGEO(mp)->inobt_maxlevels)
return __this_address;
- return xfs_btree_sblock_verify(bp, mp->m_inobt_mxr[level != 0]);
+ return xfs_btree_sblock_verify(bp,
+ M_IGEO(mp)->inobt_mxr[level != 0]);
}
static void
xfs_agblock_t agblocks = xfs_ag_block_count(mp, agno);
/* Bail out if we're uninitialized, which can happen in mkfs. */
- if (mp->m_inobt_mxr[0] == 0)
+ if (M_IGEO(mp)->inobt_mxr[0] == 0)
return 0;
/*
XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart) == agno)
agblocks -= mp->m_sb.sb_logblocks;
- return xfs_btree_calc_size(mp->m_inobt_mnr,
+ return xfs_btree_calc_size(M_IGEO(mp)->inobt_mnr,
(uint64_t)agblocks * mp->m_sb.sb_inopblock /
XFS_INODES_PER_CHUNK);
}
struct xfs_mount *mp,
unsigned long long len)
{
- return xfs_btree_calc_size(mp->m_inobt_mnr, len);
+ return xfs_btree_calc_size(M_IGEO(mp)->inobt_mnr, len);
}
int j;
xfs_dinode_t *dip;
- j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
+ j = M_IGEO(mp)->inode_cluster_size >> mp->m_sb.sb_inodelog;
for (i = 0; i < j; i++) {
dip = xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize);
*/
void
xfs_sb_mount_common(
- struct xfs_mount *mp,
- struct xfs_sb *sbp)
+ struct xfs_mount *mp,
+ struct xfs_sb *sbp)
{
+ struct xfs_ino_geometry *igeo = M_IGEO(mp);
+
mp->m_agfrotor = mp->m_agirotor = 0;
mp->m_maxagi = mp->m_sb.sb_agcount;
mp->m_blkbit_log = sbp->sb_blocklog + XFS_NBBYLOG;
mp->m_blkbb_log = sbp->sb_blocklog - BBSHIFT;
mp->m_sectbb_log = sbp->sb_sectlog - BBSHIFT;
mp->m_agno_log = xfs_highbit32(sbp->sb_agcount - 1) + 1;
- mp->m_agino_log = sbp->sb_inopblog + sbp->sb_agblklog;
+ igeo->agino_log = sbp->sb_inopblog + sbp->sb_agblklog;
mp->m_blockmask = sbp->sb_blocksize - 1;
mp->m_blockwsize = sbp->sb_blocksize >> XFS_WORDLOG;
mp->m_blockwmask = mp->m_blockwsize - 1;
mp->m_alloc_mnr[0] = mp->m_alloc_mxr[0] / 2;
mp->m_alloc_mnr[1] = mp->m_alloc_mxr[1] / 2;
- mp->m_inobt_mxr[0] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 1);
- mp->m_inobt_mxr[1] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 0);
- mp->m_inobt_mnr[0] = mp->m_inobt_mxr[0] / 2;
- mp->m_inobt_mnr[1] = mp->m_inobt_mxr[1] / 2;
+ igeo->inobt_mxr[0] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 1);
+ igeo->inobt_mxr[1] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 0);
+ igeo->inobt_mnr[0] = igeo->inobt_mxr[0] / 2;
+ igeo->inobt_mnr[1] = igeo->inobt_mxr[1] / 2;
mp->m_bmap_dmxr[0] = xfs_bmbt_maxrecs(mp, sbp->sb_blocksize, 1);
mp->m_bmap_dmxr[1] = xfs_bmbt_maxrecs(mp, sbp->sb_blocksize, 0);
mp->m_refc_mnr[1] = mp->m_refc_mxr[1] / 2;
mp->m_bsize = XFS_FSB_TO_BB(mp, 1);
- mp->m_ialloc_inos = max_t(uint16_t, XFS_INODES_PER_CHUNK,
+ igeo->ialloc_inos = max_t(uint16_t, XFS_INODES_PER_CHUNK,
sbp->sb_inopblock);
- mp->m_ialloc_blks = mp->m_ialloc_inos >> sbp->sb_inopblog;
+ igeo->ialloc_blks = igeo->ialloc_inos >> sbp->sb_inopblog;
if (sbp->sb_spino_align)
- mp->m_ialloc_min_blks = sbp->sb_spino_align;
+ igeo->ialloc_min_blks = sbp->sb_spino_align;
else
- mp->m_ialloc_min_blks = mp->m_ialloc_blks;
+ igeo->ialloc_min_blks = igeo->ialloc_blks;
mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
mp->m_ag_max_usable = xfs_alloc_ag_max_usable(mp);
}
xfs_calc_inobt_res(
struct xfs_mount *mp)
{
- return xfs_calc_buf_res(mp->m_in_maxlevels, XFS_FSB_TO_B(mp, 1)) +
- xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1),
- XFS_FSB_TO_B(mp, 1));
+ return xfs_calc_buf_res(M_IGEO(mp)->inobt_maxlevels,
+ XFS_FSB_TO_B(mp, 1)) +
+ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1),
+ XFS_FSB_TO_B(mp, 1));
}
/*
* includes:
*
* the allocation btrees: 2 trees * (max depth - 1) * block size
- * the inode chunk: m_ialloc_blks * N
+ * the inode chunk: m_ino_geo.ialloc_blks * N
*
* The size N of the inode chunk reservation depends on whether it is for
* allocation or free and which type of create transaction is in use. An inode
size = XFS_FSB_TO_B(mp, 1);
}
- res += xfs_calc_buf_res(mp->m_ialloc_blks, size);
+ res += xfs_calc_buf_res(M_IGEO(mp)->ialloc_blks, size);
return res;
}
struct xfs_mount *mp)
{
return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
- 2 * max_t(uint, XFS_FSB_TO_B(mp, 1), mp->m_inode_cluster_size);
+ 2 * max_t(uint, XFS_FSB_TO_B(mp, 1),
+ M_IGEO(mp)->inode_cluster_size);
}
/*
xfs_calc_iunlink_add_reservation(xfs_mount_t *mp)
{
return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
- max_t(uint, XFS_FSB_TO_B(mp, 1), mp->m_inode_cluster_size);
+ max_t(uint, XFS_FSB_TO_B(mp, 1),
+ M_IGEO(mp)->inode_cluster_size);
}
/*
#define XFS_DIRREMOVE_SPACE_RES(mp) \
XFS_DAREMOVE_SPACE_RES(mp, XFS_DATA_FORK)
#define XFS_IALLOC_SPACE_RES(mp) \
- ((mp)->m_ialloc_blks + \
+ (M_IGEO(mp)->ialloc_blks + \
(xfs_sb_version_hasfinobt(&mp->m_sb) ? 2 : 1 * \
- ((mp)->m_in_maxlevels - 1)))
+ (M_IGEO(mp)->inobt_maxlevels - 1)))
/*
* Space reservation values for various transactions.
#define XFS_SYMLINK_SPACE_RES(mp,nl,b) \
(XFS_IALLOC_SPACE_RES(mp) + XFS_DIRENTER_SPACE_RES(mp,nl) + (b))
#define XFS_IFREE_SPACE_RES(mp) \
- (xfs_sb_version_hasfinobt(&mp->m_sb) ? (mp)->m_in_maxlevels : 0)
+ (xfs_sb_version_hasfinobt(&mp->m_sb) ? \
+ M_IGEO(mp)->inobt_maxlevels : 0)
#endif /* __XFS_TRANS_SPACE_H__ */
* Calculate the first inode, which will be in the first
* cluster-aligned block after the AGFL.
*/
- bno = round_up(XFS_AGFL_BLOCK(mp) + 1, mp->m_cluster_align);
+ bno = round_up(XFS_AGFL_BLOCK(mp) + 1, M_IGEO(mp)->cluster_align);
*first = XFS_AGB_TO_AGINO(mp, bno);
/*
* Calculate the last inode, which will be at the end of the
* last (aligned) cluster that can be allocated in the AG.
*/
- bno = round_down(eoag, mp->m_cluster_align);
+ bno = round_down(eoag, M_IGEO(mp)->cluster_align);
*last = XFS_AGB_TO_AGINO(mp, bno) - 1;
}
* (basically no fragmentation).
*/
#define MKFS_BLOCKRES_INODE \
- ((uint)(mp->m_ialloc_blks + (mp->m_in_maxlevels - 1)))
+ ((uint)(M_IGEO(mp)->ialloc_blks + (M_IGEO(mp)->inobt_maxlevels - 1)))
#define MKFS_BLOCKRES(rb) \
((uint)(MKFS_BLOCKRES_INODE + XFS_DA_NODE_MAXDEPTH + \
(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) - 1) + (rb)))
int j;
int state;
xfs_extlen_t blen;
+ struct xfs_ino_geometry *igeo = M_IGEO(mp);
agno = XFS_INO_TO_AGNO(mp, ino);
agino = XFS_INO_TO_AGINO(mp, ino);
agbno = XFS_INO_TO_AGBNO(mp, ino);
*start_ino = NULLFSINO;
- ASSERT(mp->m_ialloc_blks > 0);
+ ASSERT(igeo->ialloc_blks > 0);
if (agno == mp->m_sb.sb_agcount - 1)
max_agbno = mp->m_sb.sb_dblocks -
* check for the easy case, inodes per block >= XFS_INODES_PER_CHUNK
* (multiple chunks per block)
*/
- if (mp->m_ialloc_blks == 1) {
+ if (igeo->ialloc_blks == 1) {
if (agbno > max_agbno)
return 0;
if (check_aginode_block(mp, agno, agino) == 0)
*/
start_agbno = rounddown(XFS_INO_TO_AGBNO(mp, ino),
fs_ino_alignment);
- end_agbno = start_agbno + mp->m_ialloc_blks;
+ end_agbno = start_agbno + igeo->ialloc_blks;
/*
* if this fs has aligned inodes but the end of the
* a discovered inode chunk completely within that range
* would include the inode passed into us.
*/
- if (mp->m_ialloc_blks > 1) {
- if (agino > mp->m_ialloc_inos)
- start_agbno = agbno - mp->m_ialloc_blks + 1;
+ if (igeo->ialloc_blks > 1) {
+ if (agino > igeo->ialloc_inos)
+ start_agbno = agbno - igeo->ialloc_blks + 1;
else
start_agbno = 1;
}
- end_agbno = agbno + mp->m_ialloc_blks;
+ end_agbno = agbno + igeo->ialloc_blks;
if (end_agbno > max_agbno)
end_agbno = max_agbno;
start_agbno = XFS_AGINO_TO_AGBNO(mp,
irec_p->ino_startnum) +
- mp->m_ialloc_blks;
+ igeo->ialloc_blks;
/*
* we know that the inode we're trying to verify isn't
* of the gap -- is it within the search range?
*/
if (irec_next_p != NULL &&
- agino + mp->m_ialloc_inos >=
+ agino + igeo->ialloc_inos >=
irec_next_p->ino_startnum)
end_agbno = XFS_AGINO_TO_AGBNO(mp,
irec_next_p->ino_startnum);
* the inode in question and that the space between them
* is too small for a legal inode chunk
*/
- if (end_agbno - start_agbno < mp->m_ialloc_blks)
+ if (end_agbno - start_agbno < igeo->ialloc_blks)
return(0);
/*
num_blks = chunk_stop_agbno - chunk_start_agbno;
- if (num_blks < mp->m_ialloc_blks || ino_cnt == 0)
+ if (num_blks < igeo->ialloc_blks || ino_cnt == 0)
return 0;
/*
* the chunk
*/
- if (num_blks % mp->m_ialloc_blks != 0) {
- num_blks = rounddown(num_blks, mp->m_ialloc_blks);
+ if (num_blks % igeo->ialloc_blks != 0) {
+ num_blks = rounddown(num_blks, igeo->ialloc_blks);
chunk_stop_agbno = chunk_start_agbno + num_blks;
}
int cluster_count;
int bp_index;
int cluster_offset;
+ struct xfs_ino_geometry *igeo = M_IGEO(mp);
ASSERT(first_irec != NULL);
ASSERT(XFS_AGINO_TO_OFFSET(mp, first_irec->ino_startnum) == 0);
*bogus = 0;
- ASSERT(mp->m_ialloc_blks > 0);
+ ASSERT(igeo->ialloc_blks > 0);
- blks_per_cluster = mp->m_inode_cluster_size >> mp->m_sb.sb_blocklog;
+ blks_per_cluster = M_IGEO(mp)->inode_cluster_size >> mp->m_sb.sb_blocklog;
if (blks_per_cluster == 0)
blks_per_cluster = 1;
cluster_count = XFS_INODES_PER_CHUNK / inodes_per_cluster;
icnt++;
cluster_offset++;
- if (icnt == mp->m_ialloc_inos &&
+ if (icnt == igeo->ialloc_inos &&
irec_offset == XFS_INODES_PER_CHUNK) {
/*
* done! - finished up irec and block
icnt++;
cluster_offset++;
- if (icnt == mp->m_ialloc_inos &&
+ if (icnt == igeo->ialloc_inos &&
irec_offset == XFS_INODES_PER_CHUNK) {
/*
* done! - finished up irec and block simultaneously
{
int num_inos, bogus;
ino_tree_node_t *ino_rec, *first_ino_rec, *prev_ino_rec;
+ struct xfs_ino_geometry *igeo = M_IGEO(mp);
#ifdef XR_PF_TRACE
int count;
#endif
* the next block before we call the processing routines.
*/
num_inos = XFS_INODES_PER_CHUNK;
- while (num_inos < mp->m_ialloc_inos && ino_rec != NULL) {
+ while (num_inos < igeo->ialloc_inos && ino_rec != NULL) {
/*
* inodes chunks will always be aligned and sized
* correctly
num_inos += XFS_INODES_PER_CHUNK;
}
- ASSERT(num_inos == mp->m_ialloc_inos);
+ ASSERT(num_inos == igeo->ialloc_inos);
if (pf_args) {
sem_post(&pf_args->ra_count);
*/
num_inos = 0;
ino_rec = first_ino_rec;
- while (num_inos < mp->m_ialloc_inos &&
+ while (num_inos < igeo->ialloc_inos &&
ino_rec != NULL) {
prev_ino_rec = ino_rec;
int bogus;
int cnt;
int got_some;
+ struct xfs_ino_geometry *igeo = M_IGEO(mp);
#ifdef XR_INODE_TRACE
fprintf(stderr, "in process_uncertain_aginodes, agno = %d\n", agno);
* processing may add more records to the
* uncertain inode lists.
*/
- if (process_inode_chunk(mp, agno, mp->m_ialloc_inos,
+ if (process_inode_chunk(mp, agno, igeo->ialloc_inos,
nrec, 1, 0, 0, &bogus)) {
/* XXX - i/o error, we've got a problem */
abort();
xfs_agino_t cluster_agino;
xfs_daddr_t cluster_daddr;
xfs_daddr_t cluster_blks;
+ struct xfs_ino_geometry *igeo = M_IGEO(mp);
/*
* Inode buffers have been read into memory in inode_cluster_size
* we must find the buffer for its cluster, add the appropriate
* offset, and return that.
*/
- cluster_size = max(mp->m_inode_cluster_size, mp->m_sb.sb_blocksize);
+ cluster_size = max(igeo->inode_cluster_size, mp->m_sb.sb_blocksize);
ino_per_cluster = cluster_size / mp->m_sb.sb_inodesize;
cluster_agino = agino & ~(ino_per_cluster - 1);
cluster_blks = XFS_FSB_TO_DADDR(mp, max(1,
- mp->m_inode_cluster_size >> mp->m_sb.sb_blocklog));
+ igeo->inode_cluster_size >> mp->m_sb.sb_blocklog));
cluster_daddr = XFS_AGB_TO_DADDR(mp, agno,
XFS_AGINO_TO_AGBNO(mp, cluster_agino));
* also mark blocks
*/
set_bmap_ext(0, XFS_INO_TO_AGBNO(mp, mp->m_sb.sb_rootino),
- mp->m_ialloc_blks, XR_E_INO);
+ M_IGEO(mp)->ialloc_blks, XR_E_INO);
} else {
do_log(_(" - found root inode chunk\n"));
be16_to_cpu(bt_hdr->bb_numrecs));
bt_ptr = XFS_INOBT_PTR_ADDR(mp, bt_hdr,
be16_to_cpu(bt_hdr->bb_numrecs),
- mp->m_inobt_mxr[1]);
+ M_IGEO(mp)->inobt_mxr[1]);
bt_key->ir_startino = cpu_to_be32(startino);
*bt_ptr = cpu_to_be32(btree_curs->level[level-1].agbno);
int i;
int err;
uint64_t sparse;
+ struct xfs_ino_geometry *igeo = M_IGEO(mp);
- blks_per_cluster = mp->m_inode_cluster_size >> mp->m_sb.sb_blocklog;
+ blks_per_cluster = igeo->inode_cluster_size >> mp->m_sb.sb_blocklog;
if (blks_per_cluster == 0)
blks_per_cluster = 1;
cur_irec = irec;
num_inos = XFS_INODES_PER_CHUNK;
- while (num_inos < mp->m_ialloc_inos && irec != NULL) {
+ while (num_inos < igeo->ialloc_inos && irec != NULL) {
irec = next_ino_rec(irec);
num_inos += XFS_INODES_PER_CHUNK;
}
bno += blks_per_cluster;
num_inos += inodes_per_cluster;
sparse >>= inodes_per_cluster;
- } while (num_inos < mp->m_ialloc_inos);
+ } while (num_inos < igeo->ialloc_inos);
}
pthread_mutex_lock(&args->lock);
{
prefetch_args_t *args;
long max_queue;
+ struct xfs_ino_geometry *igeo = M_IGEO(mp);
if (!do_prefetch || agno >= mp->m_sb.sb_agcount)
return NULL;
*/
max_queue = libxfs_bcache->c_maxcount / thread_count / 8;
- if (mp->m_inode_cluster_size > mp->m_sb.sb_blocksize)
+ if (igeo->inode_cluster_size > mp->m_sb.sb_blocksize)
max_queue = max_queue *
- (mp->m_inode_cluster_size >> mp->m_sb.sb_blocklog) /
- mp->m_ialloc_blks;
+ (igeo->inode_cluster_size >> mp->m_sb.sb_blocklog) /
+ igeo->ialloc_blks;
sem_init(&args->ra_count, 0, max_queue);
xfs_inobt_rec_t *rp;
int hdr_errors;
int freecount;
+ struct xfs_ino_geometry *igeo = M_IGEO(mp);
hdr_errors = 0;
if (level == 0) {
/* check for trashed btree block */
- if (numrecs > mp->m_inobt_mxr[0]) {
- numrecs = mp->m_inobt_mxr[0];
+ if (numrecs > igeo->inobt_mxr[0]) {
+ numrecs = igeo->inobt_mxr[0];
hdr_errors++;
}
- if (isroot == 0 && numrecs < mp->m_inobt_mnr[0]) {
- numrecs = mp->m_inobt_mnr[0];
+ if (isroot == 0 && numrecs < igeo->inobt_mnr[0]) {
+ numrecs = igeo->inobt_mnr[0];
hdr_errors++;
}
/*
* interior record, continue on
*/
- if (numrecs > mp->m_inobt_mxr[1]) {
- numrecs = mp->m_inobt_mxr[1];
+ if (numrecs > igeo->inobt_mxr[1]) {
+ numrecs = igeo->inobt_mxr[1];
hdr_errors++;
}
- if (isroot == 0 && numrecs < mp->m_inobt_mnr[1]) {
- numrecs = mp->m_inobt_mnr[1];
+ if (isroot == 0 && numrecs < igeo->inobt_mnr[1]) {
+ numrecs = igeo->inobt_mnr[1];
hdr_errors++;
}
- pp = XFS_INOBT_PTR_ADDR(mp, block, 1, mp->m_inobt_mxr[1]);
+ pp = XFS_INOBT_PTR_ADDR(mp, block, 1, igeo->inobt_mxr[1]);
/*
* don't pass bogus tree flag down further if this block
xfs_agblock_t fino_bno;
int do_inoalign;
- do_inoalign = mp->m_sinoalign;
+ do_inoalign = M_IGEO(mp)->ialloc_align;
/*
* Pre-calculate the geometry of ag 0. We know what it looks like
first_prealloc_ino = XFS_AGB_TO_AGINO(mp, fino_bno);
}
- ASSERT(mp->m_ialloc_blks > 0);
+ ASSERT(M_IGEO(mp)->ialloc_blks > 0);
- if (mp->m_ialloc_blks > 1)
+ if (M_IGEO(mp)->ialloc_blks > 1)
last_prealloc_ino = first_prealloc_ino + XFS_INODES_PER_CHUNK;
else
last_prealloc_ino = XFS_AGB_TO_AGINO(mp, fino_bno + 1);
char *msgbuf;
struct xfs_sb psb;
int rval;
+ struct xfs_ino_geometry *igeo;
progname = basename(argv[0]);
setlocale(LC_ALL, "");
exit(1);
}
mp->m_log = &log;
+ igeo = M_IGEO(mp);
/* Spit out function & line on these corruption macros */
if (verbose > 2)
chunks_pblock = mp->m_sb.sb_inopblock / XFS_INODES_PER_CHUNK;
max_symlink_blocks = libxfs_symlink_blocks(mp, XFS_SYMLINK_MAXLEN);
inodes_per_cluster = max(mp->m_sb.sb_inopblock,
- mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog);
+ igeo->inode_cluster_size >> mp->m_sb.sb_inodelog);
/*
* Automatic striding for high agcount filesystems.
if (max_mem >= (1 << 30))
max_mem = 1 << 30;
libxfs_bhash_size = max_mem / (HASH_CACHE_RATIO *
- (mp->m_inode_cluster_size >> 10));
+ (igeo->inode_cluster_size >> 10));
if (libxfs_bhash_size < 512)
libxfs_bhash_size = 512;