xfs_da_state_blk_t *save_blk);
-kmem_zone_t *xfs_da_state_zone; /* anchor for state struct zone */
+struct kmem_cache *xfs_da_state_cache; /* anchor for dir/attr state */
/*
* Allocate a dir-state structure.
* We don't put them on the stack since they're large.
*/
-xfs_da_state_t *
-xfs_da_state_alloc(void)
+struct xfs_da_state *
+xfs_da_state_alloc(
+ struct xfs_da_args *args)
{
- return kmem_zone_zalloc(xfs_da_state_zone, KM_NOFS);
+ struct xfs_da_state *state;
+
+ state = kmem_cache_zalloc(xfs_da_state_cache, GFP_NOFS | __GFP_NOFAIL);
+ state->args = args;
+ state->mp = args->dp->i_mount;
+ return state;
}
/*
#ifdef DEBUG
memset((char *)state, 0, sizeof(*state));
#endif /* DEBUG */
- kmem_cache_free(xfs_da_state_zone, state);
+ kmem_cache_free(xfs_da_state_cache, state);
+}
+
+void
+xfs_da_state_reset(
+ struct xfs_da_state *state,
+ struct xfs_da_args *args)
+{
+ xfs_da_state_kill_altpath(state);
+ memset(state, 0, sizeof(struct xfs_da_state));
+ state->args = args;
+ state->mp = state->args->dp->i_mount;
}
static inline int xfs_dabuf_nfsb(struct xfs_mount *mp, int whichfork)
struct xfs_da3_icnode_hdr *to,
struct xfs_da_intnode *from)
{
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (xfs_has_crc(mp)) {
struct xfs_da3_intnode *from3 = (struct xfs_da3_intnode *)from;
to->forw = be32_to_cpu(from3->hdr.info.hdr.forw);
struct xfs_da_intnode *to,
struct xfs_da3_icnode_hdr *from)
{
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (xfs_has_crc(mp)) {
struct xfs_da3_intnode *to3 = (struct xfs_da3_intnode *)to;
ASSERT(from->magic == XFS_DA3_NODE_MAGIC);
if (!xfs_verify_magic16(bp, hdr->magic))
return __this_address;
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (xfs_has_crc(mp)) {
if (!uuid_equal(&hdr3->uuid, &mp->m_sb.sb_meta_uuid))
return __this_address;
- if (be64_to_cpu(hdr3->blkno) != bp->b_bn)
+ if (be64_to_cpu(hdr3->blkno) != xfs_buf_daddr(bp))
return __this_address;
if (!xfs_log_check_lsn(mp, be64_to_cpu(hdr3->lsn)))
return __this_address;
return;
}
- if (!xfs_sb_version_hascrc(&mp->m_sb))
+ if (!xfs_has_crc(mp))
return;
if (bip)
__this_address);
break;
}
- /* fall through */
+ fallthrough;
case XFS_DA_NODE_MAGIC:
fa = xfs_da3_node_verify(bp);
if (fa)
return xfs_da3_node_set_type(tp, *bpp);
}
+/*
+ * Copy src directory/attr leaf/node buffer to the dst.
+ * For v5 file systems make sure the right blkno is stamped in.
+ */
+void
+xfs_da_buf_copy(
+ struct xfs_buf *dst,
+ struct xfs_buf *src,
+ size_t size)
+{
+ struct xfs_da3_blkinfo *da3 = dst->b_addr;
+
+ memcpy(dst->b_addr, src->b_addr, size);
+ dst->b_ops = src->b_ops;
+ xfs_trans_buf_copy_type(dst, src);
+ if (xfs_has_crc(dst->b_mount))
+ da3->blkno = cpu_to_be64(xfs_buf_daddr(dst));
+}
+
/*========================================================================
* Routines used for growing the Btree.
*========================================================================*/
xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
node = bp->b_addr;
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (xfs_has_crc(mp)) {
struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
memset(hdr3, 0, sizeof(struct xfs_da3_node_hdr));
ichdr.magic = XFS_DA3_NODE_MAGIC;
- hdr3->info.blkno = cpu_to_be64(bp->b_bn);
+ hdr3->info.blkno = cpu_to_be64(xfs_buf_daddr(bp));
hdr3->info.owner = cpu_to_be64(args->dp->i_ino);
uuid_copy(&hdr3->info.uuid, &mp->m_sb.sb_meta_uuid);
} else {
trace_xfs_da_split(state->args);
+ if (XFS_TEST_ERROR(false, state->mp, XFS_ERRTAG_DA_LEAF_SPLIT))
+ return -EIO;
+
/*
* Walk back up the tree splitting/inserting/adjusting as necessary.
* If we need to insert and there isn't room, split the node, then
node = oldblk->bp->b_addr;
if (node->hdr.info.forw) {
if (be32_to_cpu(node->hdr.info.forw) != addblk->blkno) {
- xfs_buf_corruption_error(oldblk->bp);
+ xfs_buf_mark_corrupt(oldblk->bp);
error = -EFSCORRUPTED;
goto out;
}
node = oldblk->bp->b_addr;
if (node->hdr.info.back) {
if (be32_to_cpu(node->hdr.info.back) != addblk->blkno) {
- xfs_buf_corruption_error(oldblk->bp);
+ xfs_buf_mark_corrupt(oldblk->bp);
error = -EFSCORRUPTED;
goto out;
}
btree = icnodehdr.btree;
size = (int)((char *)&btree[icnodehdr.count] - (char *)oldroot);
level = icnodehdr.level;
-
- /*
- * we are about to copy oldroot to bp, so set up the type
- * of bp while we know exactly what it will be.
- */
- xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
} else {
struct xfs_dir3_icleaf_hdr leafhdr;
size = (int)((char *)&leafhdr.ents[leafhdr.count] -
(char *)leaf);
level = 0;
-
- /*
- * we are about to copy oldroot to bp, so set up the type
- * of bp while we know exactly what it will be.
- */
- xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_LEAFN_BUF);
}
/*
- * we can copy most of the information in the node from one block to
- * another, but for CRC enabled headers we have to make sure that the
- * block specific identifiers are kept intact. We update the buffer
- * directly for this.
+ * Copy old root to new buffer and log it.
*/
- memcpy(node, oldroot, size);
- if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) ||
- oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
- struct xfs_da3_intnode *node3 = (struct xfs_da3_intnode *)node;
-
- node3->hdr.info.blkno = cpu_to_be64(bp->b_bn);
- }
+ xfs_da_buf_copy(bp, blk1->bp, size);
xfs_trans_log_buf(tp, bp, 0, size - 1);
- bp->b_ops = blk1->bp->b_ops;
- xfs_trans_buf_copy_type(bp, blk1->bp);
+ /*
+ * Update blk1 to point to new buffer.
+ */
blk1->bp = bp;
blk1->blkno = blkno;
{
struct xfs_da_intnode *node1;
struct xfs_da_intnode *node2;
- struct xfs_da_intnode *tmpnode;
struct xfs_da_node_entry *btree1;
struct xfs_da_node_entry *btree2;
struct xfs_da_node_entry *btree_s;
((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
(be32_to_cpu(btree2[nodehdr2.count - 1].hashval) <
be32_to_cpu(btree1[nodehdr1.count - 1].hashval)))) {
- tmpnode = node1;
- node1 = node2;
- node2 = tmpnode;
+ swap(node1, node2);
xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr1, node1);
xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr2, node2);
btree1 = nodehdr1.btree;
xfs_da_blkinfo_onlychild_validate(bp->b_addr, oldroothdr.level);
/*
- * This could be copying a leaf back into the root block in the case of
- * there only being a single leaf block left in the tree. Hence we have
- * to update the b_ops pointer as well to match the buffer type change
- * that could occur. For dir3 blocks we also need to update the block
- * number in the buffer header.
+ * Copy child to root buffer and log it.
*/
- memcpy(root_blk->bp->b_addr, bp->b_addr, args->geo->blksize);
- root_blk->bp->b_ops = bp->b_ops;
- xfs_trans_buf_copy_type(root_blk->bp, bp);
- if (oldroothdr.magic == XFS_DA3_NODE_MAGIC) {
- struct xfs_da3_blkinfo *da3 = root_blk->bp->b_addr;
- da3->blkno = cpu_to_be64(root_blk->bp->b_bn);
- }
+ xfs_da_buf_copy(root_blk->bp, bp, args->geo->blksize);
xfs_trans_log_buf(args->trans, root_blk->bp, 0,
args->geo->blksize - 1);
+ /*
+ * Now we can drop the child buffer.
+ */
error = xfs_da_shrink_inode(args, child, bp);
return error;
}
}
if (magic != XFS_DA_NODE_MAGIC && magic != XFS_DA3_NODE_MAGIC) {
- xfs_buf_corruption_error(blk->bp);
+ xfs_buf_mark_corrupt(blk->bp);
return -EFSCORRUPTED;
}
/* Tree taller than we can handle; bail out! */
if (nodehdr.level >= XFS_DA_NODE_MAXDEPTH) {
- xfs_buf_corruption_error(blk->bp);
+ xfs_buf_mark_corrupt(blk->bp);
return -EFSCORRUPTED;
}
if (blkno == args->geo->leafblk)
expected_level = nodehdr.level - 1;
else if (expected_level != nodehdr.level) {
- xfs_buf_corruption_error(blk->bp);
+ xfs_buf_mark_corrupt(blk->bp);
return -EFSCORRUPTED;
} else
expected_level--;
ASSERT(path != NULL);
ASSERT((path->active > 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
level = (path->active-1) - 1; /* skip bottom layer in path */
- for (blk = &path->blk[level]; level >= 0; blk--, level--) {
+ for (; level >= 0; level--) {
+ blk = &path->blk[level];
xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr,
blk->bp->b_addr);
struct xfs_trans *tp = args->trans;
struct xfs_inode *dp = args->dp;
int w = args->whichfork;
- xfs_rfsblock_t nblks = dp->i_d.di_nblocks;
+ xfs_rfsblock_t nblks = dp->i_nblocks;
struct xfs_bmbt_irec map, *mapp;
int nmap, error, got, i, mapi;
*/
mapp = kmem_alloc(sizeof(*mapp) * count, 0);
for (b = *bno, mapi = 0; b < *bno + count; ) {
- nmap = min(XFS_BMAP_MAX_NMAP, count);
c = (int)(*bno + count - b);
+ nmap = min(XFS_BMAP_MAX_NMAP, c);
error = xfs_bmapi_write(tp, dp, b, c,
xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
args->total, &mapp[mapi], &nmap);
}
/* account for newly allocated blocks in reserved blocks total */
- args->total -= dp->i_d.di_nblocks - nblks;
+ args->total -= dp->i_nblocks - nblks;
out_free_map:
if (mapp != &map)
/*
* Copy the last block into the dead buffer and log it.
*/
- memcpy(dead_buf->b_addr, last_buf->b_addr, args->geo->blksize);
+ xfs_da_buf_copy(dead_buf, last_buf, args->geo->blksize);
xfs_trans_log_buf(tp, dead_buf, 0, args->geo->blksize - 1);
dead_info = dead_buf->b_addr;
+
/*
* Get values from the moved block.
*/
*/
if (nirecs > 1) {
map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map), KM_NOFS);
- if (!map)
+ if (!map) {
+ error = -ENOMEM;
goto out_free_irecs;
+ }
*mapp = map;
}
if (error || nmap == 0)
goto out_free;
- bp = xfs_trans_get_buf_map(tp, mp->m_ddev_targp, mapp, nmap, 0);
- error = bp ? bp->b_error : -EIO;
- if (error) {
- if (bp)
- xfs_trans_brelse(tp, bp);
+ error = xfs_trans_get_buf_map(tp, mp->m_ddev_targp, mapp, nmap, 0, &bp);
+ if (error)
goto out_free;
- }
*bpp = bp;