Source kernel commit:
710d707d2fa9cf4c2aa9def129e71e99513466ea
During testing of xfs/141 on a V4 filesystem, I observed some
inconsistent behavior with regards to resources that are held (i.e.
remain locked) across a defer roll. The transaction roll always gives
the defer roll function a new transaction, even if committing the old
transaction fails. However, the defer roll function only rejoins the
held resources if the transaction commit succeedied. This means that
callers of defer roll have to figure out whether the held resources are
attached to the transaction being passed back.
Worse yet, if the defer roll was part of a defer finish call, we have a
third possibility: the defer finish could pass back a dirty transaction
with dirty held resources and an error code.
The only sane way to handle all of these scenarios is to require that
the code that held the resource either cancel the transaction before
unlocking and releasing the resources, or use functions that detach
resources from a transaction properly (e.g. xfs_trans_brelse) if they
need to drop the reference before committing or cancelling the
transaction.
In order to make this so, change the defer roll code to join held
resources to the new transaction unconditionally and fix all the bhold
callers to release the held buffers correctly.
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Signed-off-by: Eric Sandeen <sandeen@sandeen.net>
#define trace_xfs_trans_binval(a) ((void) 0)
#define trace_xfs_trans_bjoin(a) ((void) 0)
#define trace_xfs_trans_bhold(a) ((void) 0)
+#define trace_xfs_trans_bhold_release(a) ((void) 0)
#define trace_xfs_trans_get_buf(a) ((void) 0)
#define trace_xfs_trans_getsb_recur(a) ((void) 0)
#define trace_xfs_trans_getsb(a) ((void) 0)
void libxfs_trans_binval(struct xfs_trans *, struct xfs_buf *);
void libxfs_trans_bjoin(struct xfs_trans *, struct xfs_buf *);
void libxfs_trans_bhold(struct xfs_trans *, struct xfs_buf *);
+void libxfs_trans_bhold_release(struct xfs_trans *, struct xfs_buf *);
void libxfs_trans_dirty_buf(struct xfs_trans *, struct xfs_buf *);
void libxfs_trans_log_buf(struct xfs_trans *, struct xfs_buf *,
uint, uint);
#define xfs_trans_alloc_empty libxfs_trans_alloc_empty
#define xfs_trans_add_item libxfs_trans_add_item
#define xfs_trans_bhold libxfs_trans_bhold
+#define xfs_trans_bhold_release libxfs_trans_bhold_release
#define xfs_trans_binval libxfs_trans_binval
#define xfs_trans_bjoin libxfs_trans_bjoin
#define xfs_trans_brelse libxfs_trans_brelse
trace_xfs_trans_bjoin(bp->b_log_item);
}
+/*
+ * Mark the buffer as not needing to be unlocked when the buf item's
+ * iop_unlock() routine is called. The buffer must already be locked
+ * and associated with the given transaction.
+ */
+/* ARGSUSED */
void
libxfs_trans_bhold(
xfs_trans_t *tp,
trace_xfs_trans_bhold(bip);
}
+/*
+ * Cancel the previous buffer hold request made on this buffer
+ * for this transaction.
+ */
+void
+libxfs_trans_bhold_release(
+ xfs_trans_t *tp,
+ xfs_buf_t *bp)
+{
+ struct xfs_buf_log_item *bip = bp->b_log_item;
+
+ ASSERT(bp->b_transp == tp);
+ ASSERT(bip != NULL);
+
+ bip->bli_flags &= ~XFS_BLI_HOLD;
+ trace_xfs_trans_bhold_release(bip);
+}
+
xfs_buf_t *
libxfs_trans_get_buf_map(
xfs_trans_t *tp,
*/
int
xfs_attr_set_args(
- struct xfs_da_args *args,
- struct xfs_buf **leaf_bp)
+ struct xfs_da_args *args)
{
struct xfs_inode *dp = args->dp;
+ struct xfs_buf *leaf_bp = NULL;
int error;
/*
* It won't fit in the shortform, transform to a leaf block.
* GROT: another possible req'mt for a double-split btree op.
*/
- error = xfs_attr_shortform_to_leaf(args, leaf_bp);
+ error = xfs_attr_shortform_to_leaf(args, &leaf_bp);
if (error)
return error;
* Prevent the leaf buffer from being unlocked so that a
* concurrent AIL push cannot grab the half-baked leaf
* buffer and run into problems with the write verifier.
+ * Once we're done rolling the transaction we can release
+ * the hold and add the attr to the leaf.
*/
- xfs_trans_bhold(args->trans, *leaf_bp);
-
+ xfs_trans_bhold(args->trans, leaf_bp);
error = xfs_defer_finish(&args->trans);
- if (error)
- return error;
-
- /*
- * Commit the leaf transformation. We'll need another
- * (linked) transaction to add the new attribute to the
- * leaf.
- */
- error = xfs_trans_roll_inode(&args->trans, dp);
- if (error)
+ xfs_trans_bhold_release(args->trans, leaf_bp);
+ if (error) {
+ xfs_trans_brelse(args->trans, leaf_bp);
return error;
- xfs_trans_bjoin(args->trans, *leaf_bp);
- *leaf_bp = NULL;
+ }
}
if (xfs_bmap_one_block(dp, XFS_ATTR_FORK))
int flags)
{
struct xfs_mount *mp = dp->i_mount;
- struct xfs_buf *leaf_bp = NULL;
struct xfs_da_args args;
struct xfs_trans_res tres;
int rsvd = (flags & ATTR_ROOT) != 0;
goto out_trans_cancel;
xfs_trans_ijoin(args.trans, dp, 0);
- error = xfs_attr_set_args(&args, &leaf_bp);
+ error = xfs_attr_set_args(&args);
if (error)
- goto out_release_leaf;
+ goto out_trans_cancel;
if (!args.trans) {
/* shortform attribute has already been committed */
goto out_unlock;
xfs_iunlock(dp, XFS_ILOCK_EXCL);
return error;
-out_release_leaf:
- if (leaf_bp)
- xfs_trans_brelse(args.trans, leaf_bp);
out_trans_cancel:
if (args.trans)
xfs_trans_cancel(args.trans);
unsigned char *value, int *valuelenp, int flags);
int xfs_attr_set(struct xfs_inode *dp, const unsigned char *name,
unsigned char *value, int valuelen, int flags);
-int xfs_attr_set_args(struct xfs_da_args *args, struct xfs_buf **leaf_bp);
+int xfs_attr_set_args(struct xfs_da_args *args);
int xfs_attr_remove(struct xfs_inode *dp, const unsigned char *name, int flags);
int xfs_attr_remove_args(struct xfs_da_args *args);
int xfs_attr_list(struct xfs_inode *dp, char *buffer, int bufsize,
trace_xfs_defer_trans_roll(tp, _RET_IP_);
- /* Roll the transaction. */
+ /*
+ * Roll the transaction. Rolling always given a new transaction (even
+ * if committing the old one fails!) to hand back to the caller, so we
+ * join the held resources to the new transaction so that we always
+ * return with the held resources joined to @tpp, no matter what
+ * happened.
+ */
error = xfs_trans_roll(tpp);
tp = *tpp;
- if (error) {
- trace_xfs_defer_trans_roll_error(tp, error);
- return error;
- }
/* Rejoin the joined inodes. */
for (i = 0; i < ipcount; i++)
xfs_trans_bhold(tp, bplist[i]);
}
+ if (error)
+ trace_xfs_defer_trans_roll_error(tp, error);
return error;
}