return pag->pagf_flcount > 0 || pag->pagf_longest > 0;
}
+unsigned int
+xfs_alloc_min_freelist(
+ struct xfs_mount *mp,
+ struct xfs_perag *pag)
+{
+ unsigned int min_free;
+
+ /* space needed by-bno freespace btree */
+ min_free = min_t(unsigned int, pag->pagf_levels[XFS_BTNUM_BNOi] + 1,
+ mp->m_ag_maxlevels);
+ /* space needed by-size freespace btree */
+ min_free += min_t(unsigned int, pag->pagf_levels[XFS_BTNUM_CNTi] + 1,
+ mp->m_ag_maxlevels);
+
+ return min_free;
+}
+
/*
* Check if the operation we are fixing up the freelist for should go ahead or
* not. If we are freeing blocks, we always allow it, otherwise the allocation
goto out_agbp_relse;
}
- need = XFS_MIN_FREELIST_PAG(pag, mp);
+ need = xfs_alloc_min_freelist(mp, pag);
if (!xfs_alloc_space_available(args, need, flags))
goto out_agbp_relse;
}
}
-
/* If there isn't enough total space or single-extent, reject it. */
- need = XFS_MIN_FREELIST_PAG(pag, mp);
+ need = xfs_alloc_min_freelist(mp, pag);
if (!xfs_alloc_space_available(args, need, flags))
goto out_agbp_relse;
xfs_extlen_t xfs_alloc_longest_free_extent(struct xfs_mount *mp,
struct xfs_perag *pag, xfs_extlen_t need);
+unsigned int xfs_alloc_min_freelist(struct xfs_mount *mp,
+ struct xfs_perag *pag);
/*
* Compute and fill in value of m_ag_maxlevels.
}
longest = xfs_alloc_longest_free_extent(mp, pag,
- XFS_MIN_FREELIST_PAG(pag, mp));
+ xfs_alloc_min_freelist(mp, pag));
if (*blen < longest)
*blen = longest;
#define XFS_AGFL_CRC_OFF offsetof(struct xfs_agfl, agfl_crc)
-
-#define XFS_AG_MAXLEVELS(mp) ((mp)->m_ag_maxlevels)
-#define XFS_MIN_FREELIST_RAW(bl,cl,mp) \
- (MIN(bl + 1, XFS_AG_MAXLEVELS(mp)) + MIN(cl + 1, XFS_AG_MAXLEVELS(mp)))
-#define XFS_MIN_FREELIST(a,mp) \
- (XFS_MIN_FREELIST_RAW( \
- be32_to_cpu((a)->agf_levels[XFS_BTNUM_BNOi]), \
- be32_to_cpu((a)->agf_levels[XFS_BTNUM_CNTi]), mp))
-#define XFS_MIN_FREELIST_PAG(pag,mp) \
- (XFS_MIN_FREELIST_RAW( \
- (unsigned int)(pag)->pagf_levels[XFS_BTNUM_BNOi], \
- (unsigned int)(pag)->pagf_levels[XFS_BTNUM_CNTi], mp))
-
#define XFS_AGB_TO_FSB(mp,agno,agbno) \
(((xfs_fsblock_t)(agno) << (mp)->m_sb.sb_agblklog) | (agbno))
#define XFS_FSB_TO_AGNO(mp,fsbno) \
* 2 trees * (2 blocks/level * max depth - 1) * block size
*/
#define XFS_ALLOCFREE_LOG_RES(mp,nx) \
- ((nx) * (2 * XFS_FSB_TO_B((mp), 2 * XFS_AG_MAXLEVELS(mp) - 1)))
+ ((nx) * (2 * XFS_FSB_TO_B((mp), 2 * (mp)->m_ag_maxlevels - 1)))
#define XFS_ALLOCFREE_LOG_COUNT(mp,nx) \
- ((nx) * (2 * (2 * XFS_AG_MAXLEVELS(mp) - 1)))
+ ((nx) * (2 * (2 * (mp)->m_ag_maxlevels - 1)))
/*
* Per-directory log reservation for any directory change.
#define XFS_DIOSTRAT_SPACE_RES(mp, v) \
(XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK) + (v))
#define XFS_GROWFS_SPACE_RES(mp) \
- (2 * XFS_AG_MAXLEVELS(mp))
+ (2 * (mp)->m_ag_maxlevels)
#define XFS_GROWFSRT_SPACE_RES(mp,b) \
((b) + XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK))
#define XFS_LINK_SPACE_RES(mp,nl) \
for (agno = 0; agno < agcount; agno++) {
struct xfs_agfl *agfl;
int bucket;
+ struct xfs_perag *pag = xfs_perag_get(mp, agno);
/*
* Superblock.
agf->agf_roots[XFS_BTNUM_CNTi] = cpu_to_be32(XFS_CNT_BLOCK(mp));
agf->agf_levels[XFS_BTNUM_BNOi] = cpu_to_be32(1);
agf->agf_levels[XFS_BTNUM_CNTi] = cpu_to_be32(1);
+ pag->pagf_levels[XFS_BTNUM_BNOi] = 1;
+ pag->pagf_levels[XFS_BTNUM_CNTi] = 1;
agf->agf_flfirst = 0;
agf->agf_fllast = cpu_to_be32(XFS_AGFL_SIZE(mp) - 1);
agf->agf_flcount = 0;
agf->agf_longest = cpu_to_be32(agsize -
XFS_FSB_TO_AGBNO(mp, logstart) - logblocks);
}
- if (XFS_MIN_FREELIST(agf, mp) > worst_freelist)
- worst_freelist = XFS_MIN_FREELIST(agf, mp);
+ if (xfs_alloc_min_freelist(mp, pag) > worst_freelist)
+ worst_freelist = xfs_alloc_min_freelist(mp, pag);
libxfs_writebuf(buf, LIBXFS_EXIT_ON_FAILURE);
/*
/*
* Free INO btree root block
*/
- if (!finobt)
+ if (!finobt) {
+ xfs_perag_put(pag);
continue;
+ }
buf = libxfs_getbuf(mp->m_ddev_targp,
XFS_AGB_TO_DADDR(mp, agno, XFS_FIBT_BLOCK(mp)),
xfs_btree_init_block(mp, buf, XFS_FIBT_MAGIC, 0, 0,
agno, 0);
libxfs_writebuf(buf, LIBXFS_EXIT_ON_FAILURE);
+ xfs_perag_put(pag);
}
/*
args.agno = agno;
args.alignment = 1;
args.pag = xfs_perag_get(mp,agno);
- libxfs_trans_reserve(tp, &tres, XFS_MIN_FREELIST(agf, mp), 0);
+ libxfs_trans_reserve(tp, &tres,
+ xfs_alloc_min_freelist(mp, args.pag), 0);
error = libxfs_alloc_fix_freelist(&args, 0);
xfs_perag_put(args.pag);
if (error) {
* and by size), the inode allocation btree root, the free inode
* allocation btree root (if enabled) and some number of blocks to
* prefill the agfl.
+ *
+ * Because the current shape of the btrees may differ from the current
+ * shape, we open code the mkfs freelist block count here. mkfs creates
+ * single level trees, so the calculation is pertty straight forward for
+ * the two trees that use the AGFL.
*/
bnobt_root = howmany(4 * mp->m_sb.sb_sectsize, mp->m_sb.sb_blocksize);
bcntbt_root = bnobt_root + 1;
inobt_root = bnobt_root + 2;
- fino_bno = inobt_root + XFS_MIN_FREELIST_RAW(1, 1, mp) + 1;
+ fino_bno = inobt_root + (2 * min(2, mp->m_ag_maxlevels)) + 1;
if (xfs_sb_version_hasfinobt(&mp->m_sb))
fino_bno++;