]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/commitdiff
xfs: formalize the process of holding onto resources across a defer roll
authorDarrick J. Wong <djwong@kernel.org>
Thu, 28 Apr 2022 19:39:02 +0000 (15:39 -0400)
committerEric Sandeen <sandeen@sandeen.net>
Thu, 28 Apr 2022 19:39:02 +0000 (15:39 -0400)
Source kernel commit: c5db9f937b2971c78d6c6bbaa61a6450efa8b845

Transaction users are allowed to flag up to two buffers and two inodes
for ownership preservation across a deferred transaction roll.  Hoist
the variables and code responsible for this out of xfs_defer_trans_roll
so that we can use it for the defer capture mechanism.

Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Allison Henderson <allison.henderson@oracle.com>
Signed-off-by: Eric Sandeen <sandeen@sandeen.net>
include/xfs_trans.h
libxfs/xfs_defer.c
libxfs/xfs_defer.h

index 2c55bb8573699b0091aa4ecb8fa03582bb34e7c9..690759ece3af79b2599636dc50be5bf4fd882c78 100644 (file)
@@ -58,9 +58,6 @@ typedef struct xfs_qoff_logitem {
        xfs_qoff_logformat_t    qql_format;     /* logged structure */
 } xfs_qoff_logitem_t;
 
-#define XFS_DEFER_OPS_NR_INODES        2       /* join up to two inodes */
-#define XFS_DEFER_OPS_NR_BUFS  2       /* join up to two buffers */
-
 typedef struct xfs_trans {
        unsigned int            t_log_res;      /* amt of log space resvd */
        unsigned int            t_log_count;    /* count for perm log res */
index 1fdf6c7203577326f2c099550b5cfdaf9b70b8ec..35f51f8720f5ab4dbd7e934378a167e69bf60db1 100644 (file)
@@ -228,23 +228,20 @@ xfs_defer_trans_abort(
        }
 }
 
-/* Roll a transaction so we can do some deferred op processing. */
-STATIC int
-xfs_defer_trans_roll(
-       struct xfs_trans                **tpp)
+/*
+ * Capture resources that the caller said not to release ("held") when the
+ * transaction commits.  Caller is responsible for zero-initializing @dres.
+ */
+static int
+xfs_defer_save_resources(
+       struct xfs_defer_resources      *dres,
+       struct xfs_trans                *tp)
 {
-       struct xfs_trans                *tp = *tpp;
        struct xfs_buf_log_item         *bli;
        struct xfs_inode_log_item       *ili;
        struct xfs_log_item             *lip;
-       struct xfs_buf                  *bplist[XFS_DEFER_OPS_NR_BUFS];
-       struct xfs_inode                *iplist[XFS_DEFER_OPS_NR_INODES];
-       unsigned int                    ordered = 0; /* bitmap */
-       int                             bpcount = 0, ipcount = 0;
-       int                             i;
-       int                             error;
 
-       BUILD_BUG_ON(NBBY * sizeof(ordered) < XFS_DEFER_OPS_NR_BUFS);
+       BUILD_BUG_ON(NBBY * sizeof(dres->dr_ordered) < XFS_DEFER_OPS_NR_BUFS);
 
        list_for_each_entry(lip, &tp->t_items, li_trans) {
                switch (lip->li_type) {
@@ -252,28 +249,29 @@ xfs_defer_trans_roll(
                        bli = container_of(lip, struct xfs_buf_log_item,
                                           bli_item);
                        if (bli->bli_flags & XFS_BLI_HOLD) {
-                               if (bpcount >= XFS_DEFER_OPS_NR_BUFS) {
+                               if (dres->dr_bufs >= XFS_DEFER_OPS_NR_BUFS) {
                                        ASSERT(0);
                                        return -EFSCORRUPTED;
                                }
                                if (bli->bli_flags & XFS_BLI_ORDERED)
-                                       ordered |= (1U << bpcount);
+                                       dres->dr_ordered |=
+                                                       (1U << dres->dr_bufs);
                                else
                                        xfs_trans_dirty_buf(tp, bli->bli_buf);
-                               bplist[bpcount++] = bli->bli_buf;
+                               dres->dr_bp[dres->dr_bufs++] = bli->bli_buf;
                        }
                        break;
                case XFS_LI_INODE:
                        ili = container_of(lip, struct xfs_inode_log_item,
                                           ili_item);
                        if (ili->ili_lock_flags == 0) {
-                               if (ipcount >= XFS_DEFER_OPS_NR_INODES) {
+                               if (dres->dr_inos >= XFS_DEFER_OPS_NR_INODES) {
                                        ASSERT(0);
                                        return -EFSCORRUPTED;
                                }
                                xfs_trans_log_inode(tp, ili->ili_inode,
                                                    XFS_ILOG_CORE);
-                               iplist[ipcount++] = ili->ili_inode;
+                               dres->dr_ip[dres->dr_inos++] = ili->ili_inode;
                        }
                        break;
                default:
@@ -281,7 +279,43 @@ xfs_defer_trans_roll(
                }
        }
 
-       trace_xfs_defer_trans_roll(tp, _RET_IP_);
+       return 0;
+}
+
+/* Attach the held resources to the transaction. */
+static void
+xfs_defer_restore_resources(
+       struct xfs_trans                *tp,
+       struct xfs_defer_resources      *dres)
+{
+       unsigned short                  i;
+
+       /* Rejoin the joined inodes. */
+       for (i = 0; i < dres->dr_inos; i++)
+               xfs_trans_ijoin(tp, dres->dr_ip[i], 0);
+
+       /* Rejoin the buffers and dirty them so the log moves forward. */
+       for (i = 0; i < dres->dr_bufs; i++) {
+               xfs_trans_bjoin(tp, dres->dr_bp[i]);
+               if (dres->dr_ordered & (1U << i))
+                       xfs_trans_ordered_buf(tp, dres->dr_bp[i]);
+               xfs_trans_bhold(tp, dres->dr_bp[i]);
+       }
+}
+
+/* Roll a transaction so we can do some deferred op processing. */
+STATIC int
+xfs_defer_trans_roll(
+       struct xfs_trans                **tpp)
+{
+       struct xfs_defer_resources      dres = { };
+       int                             error;
+
+       error = xfs_defer_save_resources(&dres, *tpp);
+       if (error)
+               return error;
+
+       trace_xfs_defer_trans_roll(*tpp, _RET_IP_);
 
        /*
         * Roll the transaction.  Rolling always given a new transaction (even
@@ -291,22 +325,11 @@ xfs_defer_trans_roll(
         * happened.
         */
        error = xfs_trans_roll(tpp);
-       tp = *tpp;
 
-       /* Rejoin the joined inodes. */
-       for (i = 0; i < ipcount; i++)
-               xfs_trans_ijoin(tp, iplist[i], 0);
-
-       /* Rejoin the buffers and dirty them so the log moves forward. */
-       for (i = 0; i < bpcount; i++) {
-               xfs_trans_bjoin(tp, bplist[i]);
-               if (ordered & (1U << i))
-                       xfs_trans_ordered_buf(tp, bplist[i]);
-               xfs_trans_bhold(tp, bplist[i]);
-       }
+       xfs_defer_restore_resources(*tpp, &dres);
 
        if (error)
-               trace_xfs_defer_trans_roll_error(tp, error);
+               trace_xfs_defer_trans_roll_error(*tpp, error);
        return error;
 }
 
index 05472f71fffe432b20613854bf7fc6e91abacc39..e095abb96f1ab24ad0688bdfbf32edd1d0145aaa 100644 (file)
@@ -64,6 +64,30 @@ extern const struct xfs_defer_op_type xfs_rmap_update_defer_type;
 extern const struct xfs_defer_op_type xfs_extent_free_defer_type;
 extern const struct xfs_defer_op_type xfs_agfl_free_defer_type;
 
+/*
+ * Deferred operation item relogging limits.
+ */
+#define XFS_DEFER_OPS_NR_INODES        2       /* join up to two inodes */
+#define XFS_DEFER_OPS_NR_BUFS  2       /* join up to two buffers */
+
+/* Resources that must be held across a transaction roll. */
+struct xfs_defer_resources {
+       /* held buffers */
+       struct xfs_buf          *dr_bp[XFS_DEFER_OPS_NR_BUFS];
+
+       /* inodes with no unlock flags */
+       struct xfs_inode        *dr_ip[XFS_DEFER_OPS_NR_INODES];
+
+       /* number of held buffers */
+       unsigned short          dr_bufs;
+
+       /* bitmap of ordered buffers */
+       unsigned short          dr_ordered;
+
+       /* number of held inodes */
+       unsigned short          dr_inos;
+};
+
 /*
  * This structure enables a dfops user to detach the chain of deferred
  * operations from a transaction so that they can be continued later.