xfs_alloc_compute_maxlevels(
xfs_mount_t *mp) /* file system mount structure */
{
- mp->m_ag_maxlevels = xfs_btree_compute_maxlevels(mp, mp->m_alloc_mnr,
+ mp->m_ag_maxlevels = xfs_btree_compute_maxlevels(mp->m_alloc_mnr,
(mp->m_sb.sb_agblocks + 1) / 2);
}
*/
xfs_extlen_t
xfs_alloc_longest_free_extent(
- struct xfs_mount *mp,
struct xfs_perag *pag,
xfs_extlen_t need,
xfs_extlen_t reserved)
/* do we have enough contiguous free space for the allocation? */
alloc_len = args->minlen + (args->alignment - 1) + args->minalignslop;
- longest = xfs_alloc_longest_free_extent(args->mp, pag, min_free,
- reservation);
+ longest = xfs_alloc_longest_free_extent(pag, min_free, reservation);
if (longest < alloc_len)
return false;
unsigned int xfs_alloc_set_aside(struct xfs_mount *mp);
unsigned int xfs_alloc_ag_max_usable(struct xfs_mount *mp);
-xfs_extlen_t xfs_alloc_longest_free_extent(struct xfs_mount *mp,
- struct xfs_perag *pag, xfs_extlen_t need,
- xfs_extlen_t reserved);
+xfs_extlen_t xfs_alloc_longest_free_extent(struct xfs_perag *pag,
+ xfs_extlen_t need, xfs_extlen_t reserved);
unsigned int xfs_alloc_min_freelist(struct xfs_mount *mp,
struct xfs_perag *pag);
}
}
- longest = xfs_alloc_longest_free_extent(mp, pag,
+ longest = xfs_alloc_longest_free_extent(pag,
xfs_alloc_min_freelist(mp, pag),
xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE));
if (*blen < longest)
xfs_fileoff_t *next_fsb,
xfs_fileoff_t offset_shift_fsb,
bool *done,
- xfs_fileoff_t stop_fsb,
xfs_fsblock_t *firstblock,
struct xfs_defer_ops *dfops)
{
uint xfs_default_attroffset(struct xfs_inode *ip);
int xfs_bmap_collapse_extents(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t *next_fsb, xfs_fileoff_t offset_shift_fsb,
- bool *done, xfs_fileoff_t stop_fsb, xfs_fsblock_t *firstblock,
+ bool *done, xfs_fsblock_t *firstblock,
struct xfs_defer_ops *dfops);
int xfs_bmap_insert_extents(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t *next_fsb, xfs_fileoff_t offset_shift_fsb,
*/
uint
xfs_btree_compute_maxlevels(
- struct xfs_mount *mp,
uint *limits,
unsigned long len)
{
*/
xfs_extlen_t
xfs_btree_calc_size(
- struct xfs_mount *mp,
uint *limits,
unsigned long long len)
{
xfs_failaddr_t xfs_btree_lblock_verify(struct xfs_buf *bp,
unsigned int max_recs);
-uint xfs_btree_compute_maxlevels(struct xfs_mount *mp, uint *limits,
- unsigned long len);
-xfs_extlen_t xfs_btree_calc_size(struct xfs_mount *mp, uint *limits,
- unsigned long long len);
+uint xfs_btree_compute_maxlevels(uint *limits, unsigned long len);
+xfs_extlen_t xfs_btree_calc_size(uint *limits, unsigned long long len);
/* return codes */
#define XFS_BTREE_QUERY_RANGE_CONTINUE 0 /* keep iterating */
uint inodes;
inodes = (1LL << XFS_INO_AGINO_BITS(mp)) >> XFS_INODES_PER_CHUNK_LOG;
- mp->m_in_maxlevels = xfs_btree_compute_maxlevels(mp, mp->m_inobt_mnr,
+ mp->m_in_maxlevels = xfs_btree_compute_maxlevels(mp->m_inobt_mnr,
inodes);
}
if (mp->m_inobt_mxr[0] == 0)
return 0;
- return xfs_btree_calc_size(mp, mp->m_inobt_mnr,
+ return xfs_btree_calc_size(mp->m_inobt_mnr,
(uint64_t)mp->m_sb.sb_agblocks * mp->m_sb.sb_inopblock /
XFS_INODES_PER_CHUNK);
}
struct xfs_refcount_irec *center,
struct xfs_refcount_irec *right,
unsigned long long extlen,
- xfs_agblock_t *agbno,
xfs_extlen_t *aglen)
{
int error;
struct xfs_btree_cur *cur,
struct xfs_refcount_irec *right,
struct xfs_refcount_irec *cright,
- xfs_agblock_t *agbno,
xfs_extlen_t *aglen)
{
int error;
ulen < MAXREFCEXTLEN) {
*shape_changed = true;
return xfs_refcount_merge_center_extents(cur, &left, &cleft,
- &right, ulen, agbno, aglen);
+ &right, ulen, aglen);
}
/* Try to merge left and cleft. */
ulen < MAXREFCEXTLEN) {
*shape_changed = true;
return xfs_refcount_merge_right_extent(cur, &right, &cright,
- agbno, aglen);
+ aglen);
}
return error;
struct xfs_btree_cur *cur,
xfs_agblock_t agbno,
xfs_extlen_t aglen,
- enum xfs_refc_adjust_op adj,
- struct xfs_defer_ops *dfops,
- struct xfs_owner_info *oinfo)
+ enum xfs_refc_adjust_op adj)
{
struct xfs_refcount_irec ext, tmp;
int error;
struct xfs_btree_cur *cur,
xfs_agblock_t agbno,
xfs_extlen_t aglen,
- enum xfs_refc_adjust_op adj,
- struct xfs_defer_ops *dfops)
+ enum xfs_refc_adjust_op adj)
{
bool shape_changed;
int error;
goto out_error;
/* Now that we've taken care of the ends, adjust the middle extents */
- error = xfs_refcount_adjust_cow_extents(cur, agbno, aglen, adj,
- dfops, NULL);
+ error = xfs_refcount_adjust_cow_extents(cur, agbno, aglen, adj);
if (error)
goto out_error;
/* Add refcount btree reservation */
return xfs_refcount_adjust_cow(rcur, agbno, aglen,
- XFS_REFCOUNT_ADJUST_COW_ALLOC, dfops);
+ XFS_REFCOUNT_ADJUST_COW_ALLOC);
}
/*
/* Remove refcount btree reservation */
return xfs_refcount_adjust_cow(rcur, agbno, aglen,
- XFS_REFCOUNT_ADJUST_COW_FREE, dfops);
+ XFS_REFCOUNT_ADJUST_COW_FREE);
}
/* Record a CoW staging extent in the refcount btree. */
/* Stuff an extent on the recovery list. */
STATIC int
xfs_refcount_recover_extent(
- struct xfs_btree_cur *cur,
+ struct xfs_btree_cur *cur,
union xfs_btree_rec *rec,
void *priv)
{
*/
int
xfs_refcountbt_maxrecs(
- struct xfs_mount *mp,
int blocklen,
bool leaf)
{
xfs_refcountbt_compute_maxlevels(
struct xfs_mount *mp)
{
- mp->m_refc_maxlevels = xfs_btree_compute_maxlevels(mp,
+ mp->m_refc_maxlevels = xfs_btree_compute_maxlevels(
mp->m_refc_mnr, mp->m_sb.sb_agblocks);
}
struct xfs_mount *mp,
unsigned long long len)
{
- return xfs_btree_calc_size(mp, mp->m_refc_mnr, len);
+ return xfs_btree_calc_size(mp->m_refc_mnr, len);
}
/*
extern struct xfs_btree_cur *xfs_refcountbt_init_cursor(struct xfs_mount *mp,
struct xfs_trans *tp, struct xfs_buf *agbp, xfs_agnumber_t agno,
struct xfs_defer_ops *dfops);
-extern int xfs_refcountbt_maxrecs(struct xfs_mount *mp, int blocklen,
- bool leaf);
+extern int xfs_refcountbt_maxrecs(int blocklen, bool leaf);
extern void xfs_refcountbt_compute_maxlevels(struct xfs_mount *mp);
extern xfs_extlen_t xfs_refcountbt_calc_size(struct xfs_mount *mp,
struct xfs_mount *mp,
uint64_t ltoff,
struct xfs_rmap_irec *rec,
- xfs_fsblock_t bno,
xfs_filblks_t len,
uint64_t owner,
uint64_t offset,
bno + len, out_error);
/* Check owner information. */
- error = xfs_rmap_free_check_owner(mp, ltoff, <rec, bno, len, owner,
+ error = xfs_rmap_free_check_owner(mp, ltoff, <rec, len, owner,
offset, flags);
if (error)
goto out_error;
*/
int
xfs_rmapbt_maxrecs(
- struct xfs_mount *mp,
int blocklen,
int leaf)
{
if (xfs_sb_version_hasreflink(&mp->m_sb))
mp->m_rmap_maxlevels = XFS_BTREE_MAXLEVELS;
else
- mp->m_rmap_maxlevels = xfs_btree_compute_maxlevels(mp,
+ mp->m_rmap_maxlevels = xfs_btree_compute_maxlevels(
mp->m_rmap_mnr, mp->m_sb.sb_agblocks);
}
struct xfs_mount *mp,
unsigned long long len)
{
- return xfs_btree_calc_size(mp, mp->m_rmap_mnr, len);
+ return xfs_btree_calc_size(mp->m_rmap_mnr, len);
}
/*
struct xfs_btree_cur *xfs_rmapbt_init_cursor(struct xfs_mount *mp,
struct xfs_trans *tp, struct xfs_buf *bp,
xfs_agnumber_t agno);
-int xfs_rmapbt_maxrecs(struct xfs_mount *mp, int blocklen, int leaf);
+int xfs_rmapbt_maxrecs(int blocklen, int leaf);
extern void xfs_rmapbt_compute_maxlevels(struct xfs_mount *mp);
extern xfs_extlen_t xfs_rmapbt_calc_size(struct xfs_mount *mp,
mp->m_bmap_dmnr[0] = mp->m_bmap_dmxr[0] / 2;
mp->m_bmap_dmnr[1] = mp->m_bmap_dmxr[1] / 2;
- mp->m_rmap_mxr[0] = xfs_rmapbt_maxrecs(mp, sbp->sb_blocksize, 1);
- mp->m_rmap_mxr[1] = xfs_rmapbt_maxrecs(mp, sbp->sb_blocksize, 0);
+ mp->m_rmap_mxr[0] = xfs_rmapbt_maxrecs(sbp->sb_blocksize, 1);
+ mp->m_rmap_mxr[1] = xfs_rmapbt_maxrecs(sbp->sb_blocksize, 0);
mp->m_rmap_mnr[0] = mp->m_rmap_mxr[0] / 2;
mp->m_rmap_mnr[1] = mp->m_rmap_mxr[1] / 2;
- mp->m_refc_mxr[0] = xfs_refcountbt_maxrecs(mp, sbp->sb_blocksize,
- true);
- mp->m_refc_mxr[1] = xfs_refcountbt_maxrecs(mp, sbp->sb_blocksize,
- false);
+ mp->m_refc_mxr[0] = xfs_refcountbt_maxrecs(sbp->sb_blocksize, true);
+ mp->m_refc_mxr[1] = xfs_refcountbt_maxrecs(sbp->sb_blocksize, false);
mp->m_refc_mnr[0] = mp->m_refc_mxr[0] / 2;
mp->m_refc_mnr[1] = mp->m_refc_mxr[1] / 2;
* the xfs_disk_dquot_t: sizeof(struct xfs_disk_dquot)
*/
STATIC uint
-xfs_calc_qm_setqlim_reservation(
- struct xfs_mount *mp)
+xfs_calc_qm_setqlim_reservation(void)
{
return xfs_calc_buf_res(1, sizeof(struct xfs_disk_dquot));
}
* the xfs_qoff_logitem_t: sizeof(struct xfs_qoff_logitem) * 2
*/
STATIC uint
-xfs_calc_qm_quotaoff_end_reservation(
- struct xfs_mount *mp)
+xfs_calc_qm_quotaoff_end_reservation(void)
{
return sizeof(struct xfs_qoff_logitem) * 2;
}
* The following transactions are logged in logical format with
* a default log count.
*/
- resp->tr_qm_setqlim.tr_logres = xfs_calc_qm_setqlim_reservation(mp);
+ resp->tr_qm_setqlim.tr_logres = xfs_calc_qm_setqlim_reservation();
resp->tr_qm_setqlim.tr_logcount = XFS_DEFAULT_LOG_COUNT;
resp->tr_qm_quotaoff.tr_logres = xfs_calc_qm_quotaoff_reservation(mp);
resp->tr_qm_quotaoff.tr_logcount = XFS_DEFAULT_LOG_COUNT;
resp->tr_qm_equotaoff.tr_logres =
- xfs_calc_qm_quotaoff_end_reservation(mp);
+ xfs_calc_qm_quotaoff_end_reservation();
resp->tr_qm_equotaoff.tr_logcount = XFS_DEFAULT_LOG_COUNT;
resp->tr_sb.tr_logres = xfs_calc_sb_reservation(mp);