void *obj,
int startoff)
{
- return XFS_AGFL_SIZE(mp);
+ return libxfs_agfl_size(mp);
}
static void
i = be32_to_cpu(agf->agf_flfirst);
/* verify agf values before proceeding */
- if (be32_to_cpu(agf->agf_flfirst) >= XFS_AGFL_SIZE(mp) ||
- be32_to_cpu(agf->agf_fllast) >= XFS_AGFL_SIZE(mp)) {
+ if (be32_to_cpu(agf->agf_flfirst) >= libxfs_agfl_size(mp) ||
+ be32_to_cpu(agf->agf_fllast) >= libxfs_agfl_size(mp)) {
dbprintf(_("agf %d freelist blocks bad, skipping "
"freelist scan\n"), i);
pop_cur();
count++;
if (i == be32_to_cpu(agf->agf_fllast))
break;
- if (++i == XFS_AGFL_SIZE(mp))
+ if (++i == libxfs_agfl_size(mp))
i = 0;
}
if (count != be32_to_cpu(agf->agf_flcount)) {
: (__be32 *)agfl;
/* verify agf values before proceeding */
- if (be32_to_cpu(agf->agf_flfirst) >= XFS_AGFL_SIZE(mp) ||
- be32_to_cpu(agf->agf_fllast) >= XFS_AGFL_SIZE(mp)) {
+ if (be32_to_cpu(agf->agf_flfirst) >= libxfs_agfl_size(mp) ||
+ be32_to_cpu(agf->agf_fllast) >= libxfs_agfl_size(mp)) {
dbprintf(_("agf %d freelist blocks bad, skipping "
"freelist scan\n"), i);
pop_cur();
addtohist(seqno, bno, 1);
if (i == be32_to_cpu(agf->agf_fllast))
break;
- if (++i == XFS_AGFL_SIZE(mp))
+ if (++i == libxfs_agfl_size(mp))
i = 0;
}
pop_cur();
i = be32_to_cpu(agf->agf_fllast);
for (;;) {
- if (++i == XFS_AGFL_SIZE(mp))
+ if (++i == libxfs_agfl_size(mp))
i = 0;
if (i == be32_to_cpu(agf->agf_flfirst))
break;
#define xfs_refcount_lookup_le libxfs_refcount_lookup_le
#define xfs_refcount_get_rec libxfs_refcount_get_rec
#define xfs_rmap_lookup_le_range libxfs_rmap_lookup_le_range
+#define xfs_agfl_size libxfs_agfl_size
#define xfs_refc_block libxfs_refc_block
#define xfs_rmap_compare libxfs_rmap_compare
#define xfs_dir_get_ops libxfs_dir_get_ops
STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *,
xfs_btree_cur_t *, xfs_agblock_t *, xfs_extlen_t *, int *);
+/*
+ * Size of the AGFL. For CRC-enabled filesystes we steal a couple of slots in
+ * the beginning of the block for a proper header with the location information
+ * and CRC.
+ */
+unsigned int
+xfs_agfl_size(
+ struct xfs_mount *mp)
+{
+ unsigned int size = mp->m_sb.sb_sectsize;
+
+ if (xfs_sb_version_hascrc(&mp->m_sb))
+ size -= sizeof(struct xfs_agfl);
+
+ return size / sizeof(xfs_agblock_t);
+}
+
unsigned int
xfs_refc_block(
struct xfs_mount *mp)
if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != bp->b_pag->pag_agno)
return __this_address;
- for (i = 0; i < XFS_AGFL_SIZE(mp); i++) {
+ for (i = 0; i < xfs_agfl_size(mp); i++) {
if (be32_to_cpu(agfl->agfl_bno[i]) != NULLAGBLOCK &&
be32_to_cpu(agfl->agfl_bno[i]) >= mp->m_sb.sb_agblocks)
return __this_address;
bno = be32_to_cpu(agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
be32_add_cpu(&agf->agf_flfirst, 1);
xfs_trans_brelse(tp, agflbp);
- if (be32_to_cpu(agf->agf_flfirst) == XFS_AGFL_SIZE(mp))
+ if (be32_to_cpu(agf->agf_flfirst) == xfs_agfl_size(mp))
agf->agf_flfirst = 0;
pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
be32_to_cpu(agf->agf_seqno), &agflbp)))
return error;
be32_add_cpu(&agf->agf_fllast, 1);
- if (be32_to_cpu(agf->agf_fllast) == XFS_AGFL_SIZE(mp))
+ if (be32_to_cpu(agf->agf_fllast) == xfs_agfl_size(mp))
agf->agf_fllast = 0;
pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
xfs_alloc_log_agf(tp, agbp, logflags);
- ASSERT(be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp));
+ ASSERT(be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp));
agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
blockp = &agfl_bno[be32_to_cpu(agf->agf_fllast)];
if (!(agf->agf_magicnum == cpu_to_be32(XFS_AGF_MAGIC) &&
XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) &&
be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) &&
- be32_to_cpu(agf->agf_flfirst) < XFS_AGFL_SIZE(mp) &&
- be32_to_cpu(agf->agf_fllast) < XFS_AGFL_SIZE(mp) &&
- be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp)))
+ be32_to_cpu(agf->agf_flfirst) < xfs_agfl_size(mp) &&
+ be32_to_cpu(agf->agf_fllast) < xfs_agfl_size(mp) &&
+ be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp)))
return __this_address;
if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 ||
extern struct workqueue_struct *xfs_alloc_wq;
+unsigned int xfs_agfl_size(struct xfs_mount *mp);
+
/*
* Freespace allocation types. Argument to xfs_alloc_[v]extent.
*/
&(XFS_BUF_TO_AGFL(bp)->agfl_bno[0]) : \
(__be32 *)(bp)->b_addr)
-/*
- * Size of the AGFL. For CRC-enabled filesystes we steal a couple of
- * slots in the beginning of the block for a proper header with the
- * location information and CRC.
- */
-#define XFS_AGFL_SIZE(mp) \
- (((mp)->m_sb.sb_sectsize - \
- (xfs_sb_version_hascrc(&((mp)->m_sb)) ? \
- sizeof(struct xfs_agfl) : 0)) / \
- sizeof(xfs_agblock_t))
-
typedef struct xfs_agfl {
__be32 agfl_magicnum;
__be32 agfl_seqno;
uuid_t agfl_uuid;
__be64 agfl_lsn;
__be32 agfl_crc;
- __be32 agfl_bno[]; /* actually XFS_AGFL_SIZE(mp) */
+ __be32 agfl_bno[]; /* actually xfs_agfl_size(mp) */
} __attribute__((packed)) xfs_agfl_t;
#define XFS_AGFL_CRC_OFF offsetof(struct xfs_agfl, agfl_crc)
}
agf->agf_flfirst = 0;
- agf->agf_fllast = cpu_to_be32(XFS_AGFL_SIZE(mp) - 1);
+ agf->agf_fllast = cpu_to_be32(libxfs_agfl_size(mp) - 1);
agf->agf_flcount = 0;
agblocks = (xfs_agblock_t)(agsize - libxfs_prealloc_blocks(mp));
agf->agf_freeblks = cpu_to_be32(agblocks);
agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC);
agfl->agfl_seqno = cpu_to_be32(agno);
platform_uuid_copy(&agfl->agfl_uuid, &sbp->sb_uuid);
- for (bucket = 0; bucket < XFS_AGFL_SIZE(mp); bucket++)
+ for (bucket = 0; bucket < libxfs_agfl_size(mp); bucket++)
agfl->agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK);
}
* check first/last AGF fields. if need be, lose the free
* space in the AGFL, we'll reclaim it later.
*/
- if (be32_to_cpu(agf->agf_flfirst) >= XFS_AGFL_SIZE(mp)) {
- do_warn(_("flfirst %d in agf %d too large (max = %zu)\n"),
+ if (be32_to_cpu(agf->agf_flfirst) >= libxfs_agfl_size(mp)) {
+ do_warn(_("flfirst %d in agf %d too large (max = %u)\n"),
be32_to_cpu(agf->agf_flfirst),
- i, XFS_AGFL_SIZE(mp) - 1);
+ i, libxfs_agfl_size(mp) - 1);
if (!no_modify)
agf->agf_flfirst = cpu_to_be32(0);
}
- if (be32_to_cpu(agf->agf_fllast) >= XFS_AGFL_SIZE(mp)) {
- do_warn(_("fllast %d in agf %d too large (max = %zu)\n"),
+ if (be32_to_cpu(agf->agf_fllast) >= libxfs_agfl_size(mp)) {
+ do_warn(_("fllast %d in agf %d too large (max = %u)\n"),
be32_to_cpu(agf->agf_fllast),
- i, XFS_AGFL_SIZE(mp) - 1);
+ i, libxfs_agfl_size(mp) - 1);
if (!no_modify)
agf->agf_fllast = cpu_to_be32(0);
}
* as they will be *after* accounting for the free space
* we've used up will need fewer blocks to to represent
* than we've allocated. We can use the AGFL to hold
- * XFS_AGFL_SIZE (sector/xfs_agfl_t) blocks but that's it.
- * Thus we limit things to XFS_AGFL_SIZE/2 for each of the 2 btrees.
+ * xfs_agfl_size (sector/xfs_agfl_t) blocks but that's it.
+ * Thus we limit things to xfs_agfl_size/2 for each of the 2 btrees.
* if the number of extra blocks is more than that,
* we'll have to be called again.
*/
!= btree_curs->level[0].num_blocks) {
/*
* yes -- recalculate the cursor. If the number of
- * excess (overallocated) blocks is < XFS_AGFL_SIZE/2, we're ok.
+ * excess (overallocated) blocks is < xfs_agfl_size/2, we're ok.
* we can put those into the AGFL. we don't try
* and get things to converge exactly (reach a
* state with zero excess blocks) because there
agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC);
agfl->agfl_seqno = cpu_to_be32(agno);
platform_uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid);
- for (i = 0; i < XFS_AGFL_SIZE(mp); i++)
+ for (i = 0; i < libxfs_agfl_size(mp); i++)
agfl->agfl_bno[i] = cpu_to_be32(NULLAGBLOCK);
}
freelist = XFS_BUF_TO_AGFL_BNO(mp, agfl_buf);
* yes, now grab as many blocks as we can
*/
i = 0;
- while (bno_bt->num_free_blocks > 0 && i < XFS_AGFL_SIZE(mp)) {
+ while (bno_bt->num_free_blocks > 0 && i < libxfs_agfl_size(mp))
+ {
freelist[i] = cpu_to_be32(
get_next_blockaddr(agno, 0, bno_bt));
i++;
}
- while (bcnt_bt->num_free_blocks > 0 && i < XFS_AGFL_SIZE(mp)) {
+ while (bcnt_bt->num_free_blocks > 0 && i < libxfs_agfl_size(mp))
+ {
freelist[i] = cpu_to_be32(
get_next_blockaddr(agno, 0, bcnt_bt));
i++;
} else {
agf->agf_flfirst = 0;
- agf->agf_fllast = cpu_to_be32(XFS_AGFL_SIZE(mp) - 1);
+ agf->agf_fllast = cpu_to_be32(libxfs_agfl_size(mp) - 1);
agf->agf_flcount = 0;
}
/*
* see if we can fit all the extra blocks into the AGFL
*/
- extra_blocks = (extra_blocks - XFS_AGFL_SIZE(mp) > 0)
- ? extra_blocks - XFS_AGFL_SIZE(mp)
+ extra_blocks = (extra_blocks - libxfs_agfl_size(mp) > 0)
+ ? extra_blocks - libxfs_agfl_size(mp)
: 0;
if (extra_blocks > 0)
*/
agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
b = agfl_bno + ag_rmaps[agno].ar_flcount;
- while (*b != NULLAGBLOCK && b - agfl_bno < XFS_AGFL_SIZE(mp)) {
+ while (*b != NULLAGBLOCK && b - agfl_bno < libxfs_agfl_size(mp)) {
error = rmap_add_ag_rec(mp, agno, be32_to_cpu(*b), 1,
XFS_RMAP_OWN_AG);
if (error)
if (no_modify) {
/* agf values not fixed in verify_set_agf, so recheck */
- if (be32_to_cpu(agf->agf_flfirst) >= XFS_AGFL_SIZE(mp) ||
- be32_to_cpu(agf->agf_fllast) >= XFS_AGFL_SIZE(mp)) {
+ if (be32_to_cpu(agf->agf_flfirst) >= libxfs_agfl_size(mp) ||
+ be32_to_cpu(agf->agf_fllast) >= libxfs_agfl_size(mp)) {
do_warn(_("agf %d freelist blocks bad, skipping "
"freelist scan\n"), i);
return;
count++;
if (i == be32_to_cpu(agf->agf_fllast))
break;
- if (++i == XFS_AGFL_SIZE(mp))
+ if (++i == libxfs_agfl_size(mp))
i = 0;
}
if (count != be32_to_cpu(agf->agf_flcount)) {