M_IGEO(mp)->inoalign_mask) {
xfs_agblock_t chunk_agbno;
xfs_agblock_t offset_agbno;
- int blks_per_cluster;
- blks_per_cluster = M_IGEO(mp)->inode_cluster_size >>
- mp->m_sb.sb_blocklog;
offset_agbno = agbno & M_IGEO(mp)->inoalign_mask;
chunk_agbno = agbno - offset_agbno;
cluster_agbno = chunk_agbno +
- ((offset_agbno / blks_per_cluster) * blks_per_cluster);
+ ((offset_agbno / M_IGEO(mp)->blocks_per_cluster) *
+ M_IGEO(mp)->blocks_per_cluster);
offset += ((agbno - cluster_agbno) * mp->m_sb.sb_inopblock);
- numblks = XFS_FSB_TO_BB(mp, blks_per_cluster);
+ numblks = XFS_FSB_TO_BB(mp, M_IGEO(mp)->blocks_per_cluster);
} else
cluster_agbno = agbno;
xfs_buf_t *bp)
{
int i;
- int j;
xfs_dinode_t *dip;
- j = M_IGEO(mp)->inode_cluster_size >> mp->m_sb.sb_inodelog;
-
- for (i = 0; i < j; i++) {
+ for (i = 0; i < M_IGEO(mp)->inodes_per_cluster; i++) {
dip = xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize);
if (!dip->di_next_unlinked) {
xfs_alert(mp,
xfs_ino_t ino;
int dirty = 0;
int isa_dir = 0;
- int blks_per_cluster;
int cluster_count;
int bp_index;
int cluster_offset;
*bogus = 0;
ASSERT(M_IGEO(mp)->ialloc_blks > 0);
- blks_per_cluster = M_IGEO(mp)->inode_cluster_size >> mp->m_sb.sb_blocklog;
- if (blks_per_cluster == 0)
- blks_per_cluster = 1;
- cluster_count = XFS_INODES_PER_CHUNK / inodes_per_cluster;
+ cluster_count = XFS_INODES_PER_CHUNK / M_IGEO(mp)->inodes_per_cluster;
if (cluster_count == 0)
cluster_count = 1;
bplist[bp_index] = libxfs_readbuf(mp->m_dev,
XFS_AGB_TO_DADDR(mp, agno, agbno),
- XFS_FSB_TO_BB(mp, blks_per_cluster), 0,
+ XFS_FSB_TO_BB(mp,
+ M_IGEO(mp)->blocks_per_cluster),
+ 0,
&xfs_inode_buf_ops);
if (!bplist[bp_index]) {
do_warn(_("cannot read inode %" PRIu64 ", disk block %" PRId64 ", cnt %d\n"),
XFS_AGINO_TO_INO(mp, agno, first_irec->ino_startnum),
XFS_AGB_TO_DADDR(mp, agno, agbno),
- XFS_FSB_TO_BB(mp, blks_per_cluster));
+ XFS_FSB_TO_BB(mp,
+ M_IGEO(mp)->blocks_per_cluster));
while (bp_index > 0) {
bp_index--;
libxfs_putbuf(bplist[bp_index]);
bplist[bp_index]->b_ops = &xfs_inode_buf_ops;
next_readbuf:
- irec_offset += mp->m_sb.sb_inopblock * blks_per_cluster;
- agbno += blks_per_cluster;
+ irec_offset += mp->m_sb.sb_inopblock *
+ M_IGEO(mp)->blocks_per_cluster;
+ agbno += M_IGEO(mp)->blocks_per_cluster;
}
agbno = XFS_AGINO_TO_AGBNO(mp, first_irec->ino_startnum);
ASSERT(ino_rec->ino_startnum == agino + 1);
irec_offset = 0;
}
- if (cluster_offset == inodes_per_cluster) {
+ if (cluster_offset == M_IGEO(mp)->inodes_per_cluster) {
bp_index++;
cluster_offset = 0;
}
ASSERT(ino_rec->ino_startnum == agino + 1);
irec_offset = 0;
}
- if (cluster_offset == inodes_per_cluster) {
+ if (cluster_offset == M_IGEO(mp)->inodes_per_cluster) {
bp_index++;
cluster_offset = 0;
}
struct xfs_dinode **dipp)
{
struct xfs_buf *bp;
- int cluster_size;
- int ino_per_cluster;
xfs_agino_t cluster_agino;
xfs_daddr_t cluster_daddr;
xfs_daddr_t cluster_blks;
* we must find the buffer for its cluster, add the appropriate
* offset, and return that.
*/
- cluster_size = M_IGEO(mp)->inode_cluster_size;
- ino_per_cluster = cluster_size / mp->m_sb.sb_inodesize;
- cluster_agino = agino & ~(ino_per_cluster - 1);
- cluster_blks = XFS_FSB_TO_DADDR(mp, max(1,
- M_IGEO(mp)->inode_cluster_size >> mp->m_sb.sb_blocklog));
+ cluster_agino = agino & ~(M_IGEO(mp)->inodes_per_cluster - 1);
+ cluster_blks = XFS_FSB_TO_DADDR(mp, M_IGEO(mp)->blocks_per_cluster);
cluster_daddr = XFS_AGB_TO_DADDR(mp, agno,
XFS_AGINO_TO_AGBNO(mp, cluster_agino));
#ifdef XR_INODE_TRACE
printf("cluster_size %d ipc %d clusagino %d daddr %lld sectors %lld\n",
- cluster_size, ino_per_cluster, cluster_agino, cluster_daddr,
- cluster_blks);
+ M_IGEO(mp)->inode_cluster_size, M_IGEO(mp)->inodes_per_cluster,
+ cluster_agino, cluster_daddr, cluster_blks);
#endif
bp = libxfs_readbuf(mp->m_dev, cluster_daddr, cluster_blks,
/* configuration vars -- fs geometry dependent */
int inodes_per_block;
-int inodes_per_cluster;
unsigned int glob_agcount;
int chunks_pblock; /* # of 64-ino chunks per allocation */
int max_symlink_blocks;
/* configuration vars -- fs geometry dependent */
extern int inodes_per_block;
-extern int inodes_per_cluster;
extern unsigned int glob_agcount;
extern int chunks_pblock; /* # of 64-ino chunks per allocation */
extern int max_symlink_blocks;
int num_inos;
ino_tree_node_t *irec;
ino_tree_node_t *cur_irec;
- int blks_per_cluster;
xfs_agblock_t bno;
int i;
int err;
uint64_t sparse;
- blks_per_cluster = M_IGEO(mp)->inode_cluster_size >> mp->m_sb.sb_blocklog;
- if (blks_per_cluster == 0)
- blks_per_cluster = 1;
-
for (i = 0; i < PF_THREAD_COUNT; i++) {
err = pthread_create(&args->io_threads[i], NULL,
pf_io_worker, args);
struct xfs_buf_map map;
map.bm_bn = XFS_AGB_TO_DADDR(mp, args->agno, bno);
- map.bm_len = XFS_FSB_TO_BB(mp, blks_per_cluster);
+ map.bm_len = XFS_FSB_TO_BB(mp,
+ M_IGEO(mp)->blocks_per_cluster);
/*
* Queue I/O for each non-sparse cluster. We can check
* sparse state in cluster sized chunks as cluster size
* is the min. granularity of sparse irec regions.
*/
- if ((sparse & ((1ULL << inodes_per_cluster) - 1)) == 0)
+ if ((sparse & ((1ULL << M_IGEO(mp)->inodes_per_cluster) - 1)) == 0)
pf_queue_io(args, &map, 1,
(cur_irec->ino_isa_dir != 0) ?
B_DIR_INODE : B_INODE);
- bno += blks_per_cluster;
- num_inos += inodes_per_cluster;
- sparse >>= inodes_per_cluster;
+ bno += M_IGEO(mp)->blocks_per_cluster;
+ num_inos += M_IGEO(mp)->inodes_per_cluster;
+ sparse >>= M_IGEO(mp)->inodes_per_cluster;
} while (num_inos < M_IGEO(mp)->ialloc_inos);
}
max_queue = libxfs_bcache->c_maxcount / thread_count / 8;
if (M_IGEO(mp)->inode_cluster_size > mp->m_sb.sb_blocksize)
- max_queue = max_queue *
- (M_IGEO(mp)->inode_cluster_size >> mp->m_sb.sb_blocklog) /
+ max_queue = max_queue * M_IGEO(mp)->blocks_per_cluster /
M_IGEO(mp)->ialloc_blks;
sem_init(&args->ra_count, 0, max_queue);
chunks_pblock = mp->m_sb.sb_inopblock / XFS_INODES_PER_CHUNK;
max_symlink_blocks = libxfs_symlink_blocks(mp, XFS_SYMLINK_MAXLEN);
- inodes_per_cluster = max(mp->m_sb.sb_inopblock,
- M_IGEO(mp)->inode_cluster_size >> mp->m_sb.sb_inodelog);
/*
* Automatic striding for high agcount filesystems.