]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blobdiff - libxfs/xfs_defer.c
xfsprogs: Release v6.10.1
[thirdparty/xfsprogs-dev.git] / libxfs / xfs_defer.c
index 77a94f58f4143a582cb5e3a34c233c1229540f9f..7cf392e2ff698bd1363ab9bfa1adab653dc93ed8 100644 (file)
@@ -21,6 +21,7 @@
 #include "xfs_da_format.h"
 #include "xfs_da_btree.h"
 #include "xfs_attr.h"
+#include "xfs_exchmaps.h"
 
 static struct kmem_cache       *xfs_defer_pending_cache;
 
@@ -176,16 +177,89 @@ static struct kmem_cache  *xfs_defer_pending_cache;
  * Note that the continuation requested between t2 and t3 is likely to
  * reoccur.
  */
+STATIC struct xfs_log_item *
+xfs_defer_barrier_create_intent(
+       struct xfs_trans                *tp,
+       struct list_head                *items,
+       unsigned int                    count,
+       bool                            sort)
+{
+       return NULL;
+}
+
+STATIC void
+xfs_defer_barrier_abort_intent(
+       struct xfs_log_item             *intent)
+{
+       /* empty */
+}
 
-static const struct xfs_defer_op_type *defer_op_types[] = {
-       [XFS_DEFER_OPS_TYPE_BMAP]       = &xfs_bmap_update_defer_type,
-       [XFS_DEFER_OPS_TYPE_REFCOUNT]   = &xfs_refcount_update_defer_type,
-       [XFS_DEFER_OPS_TYPE_RMAP]       = &xfs_rmap_update_defer_type,
-       [XFS_DEFER_OPS_TYPE_FREE]       = &xfs_extent_free_defer_type,
-       [XFS_DEFER_OPS_TYPE_AGFL_FREE]  = &xfs_agfl_free_defer_type,
-       [XFS_DEFER_OPS_TYPE_ATTR]       = &xfs_attr_defer_type,
+STATIC struct xfs_log_item *
+xfs_defer_barrier_create_done(
+       struct xfs_trans                *tp,
+       struct xfs_log_item             *intent,
+       unsigned int                    count)
+{
+       return NULL;
+}
+
+STATIC int
+xfs_defer_barrier_finish_item(
+       struct xfs_trans                *tp,
+       struct xfs_log_item             *done,
+       struct list_head                *item,
+       struct xfs_btree_cur            **state)
+{
+       ASSERT(0);
+       return -EFSCORRUPTED;
+}
+
+STATIC void
+xfs_defer_barrier_cancel_item(
+       struct list_head                *item)
+{
+       ASSERT(0);
+}
+
+static const struct xfs_defer_op_type xfs_barrier_defer_type = {
+       .max_items      = 1,
+       .create_intent  = xfs_defer_barrier_create_intent,
+       .abort_intent   = xfs_defer_barrier_abort_intent,
+       .create_done    = xfs_defer_barrier_create_done,
+       .finish_item    = xfs_defer_barrier_finish_item,
+       .cancel_item    = xfs_defer_barrier_cancel_item,
 };
 
+/* Create a log intent done item for a log intent item. */
+static inline void
+xfs_defer_create_done(
+       struct xfs_trans                *tp,
+       struct xfs_defer_pending        *dfp)
+{
+       struct xfs_log_item             *lip;
+
+       /* If there is no log intent item, there can be no log done item. */
+       if (!dfp->dfp_intent)
+               return;
+
+       /*
+        * Mark the transaction dirty, even on error. This ensures the
+        * transaction is aborted, which:
+        *
+        * 1.) releases the log intent item and frees the log done item
+        * 2.) shuts down the filesystem
+        */
+       tp->t_flags |= XFS_TRANS_DIRTY;
+       lip = dfp->dfp_ops->create_done(tp, dfp->dfp_intent, dfp->dfp_count);
+       if (!lip)
+               return;
+
+       tp->t_flags |= XFS_TRANS_HAS_INTENT_DONE;
+       xfs_trans_add_item(tp, lip);
+       set_bit(XFS_LI_DIRTY, &lip->li_flags);
+       dfp->dfp_done = lip;
+}
+
 /*
  * Ensure there's a log intent item associated with this deferred work item if
  * the operation must be restarted on crash.  Returns 1 if there's a log item;
@@ -197,18 +271,21 @@ xfs_defer_create_intent(
        struct xfs_defer_pending        *dfp,
        bool                            sort)
 {
-       const struct xfs_defer_op_type  *ops = defer_op_types[dfp->dfp_type];
        struct xfs_log_item             *lip;
 
        if (dfp->dfp_intent)
                return 1;
 
-       lip = ops->create_intent(tp, &dfp->dfp_work, dfp->dfp_count, sort);
+       lip = dfp->dfp_ops->create_intent(tp, &dfp->dfp_work, dfp->dfp_count,
+                       sort);
        if (!lip)
                return 0;
        if (IS_ERR(lip))
                return PTR_ERR(lip);
 
+       tp->t_flags |= XFS_TRANS_DIRTY;
+       xfs_trans_add_item(tp, lip);
+       set_bit(XFS_LI_DIRTY, &lip->li_flags);
        dfp->dfp_intent = lip;
        return 1;
 }
@@ -240,26 +317,60 @@ xfs_defer_create_intents(
        return ret;
 }
 
+static inline void
+xfs_defer_pending_abort(
+       struct xfs_mount                *mp,
+       struct xfs_defer_pending        *dfp)
+{
+       trace_xfs_defer_pending_abort(mp, dfp);
+
+       if (dfp->dfp_intent && !dfp->dfp_done) {
+               dfp->dfp_ops->abort_intent(dfp->dfp_intent);
+               dfp->dfp_intent = NULL;
+       }
+}
+
+static inline void
+xfs_defer_pending_cancel_work(
+       struct xfs_mount                *mp,
+       struct xfs_defer_pending        *dfp)
+{
+       struct list_head                *pwi;
+       struct list_head                *n;
+
+       trace_xfs_defer_cancel_list(mp, dfp);
+
+       list_del(&dfp->dfp_list);
+       list_for_each_safe(pwi, n, &dfp->dfp_work) {
+               list_del(pwi);
+               dfp->dfp_count--;
+               trace_xfs_defer_cancel_item(mp, dfp, pwi);
+               dfp->dfp_ops->cancel_item(pwi);
+       }
+       ASSERT(dfp->dfp_count == 0);
+       kmem_cache_free(xfs_defer_pending_cache, dfp);
+}
+
+STATIC void
+xfs_defer_pending_abort_list(
+       struct xfs_mount                *mp,
+       struct list_head                *dop_list)
+{
+       struct xfs_defer_pending        *dfp;
+
+       /* Abort intent items that don't have a done item. */
+       list_for_each_entry(dfp, dop_list, dfp_list)
+               xfs_defer_pending_abort(mp, dfp);
+}
+
 /* Abort all the intents that were committed. */
 STATIC void
 xfs_defer_trans_abort(
        struct xfs_trans                *tp,
        struct list_head                *dop_pending)
 {
-       struct xfs_defer_pending        *dfp;
-       const struct xfs_defer_op_type  *ops;
-
        trace_xfs_defer_trans_abort(tp, _RET_IP_);
-
-       /* Abort intent items that don't have a done item. */
-       list_for_each_entry(dfp, dop_pending, dfp_list) {
-               ops = defer_op_types[dfp->dfp_type];
-               trace_xfs_defer_pending_abort(tp->t_mountp, dfp);
-               if (dfp->dfp_intent && !dfp->dfp_done) {
-                       ops->abort_intent(dfp->dfp_intent);
-                       dfp->dfp_intent = NULL;
-               }
-       }
+       xfs_defer_pending_abort_list(tp->t_mountp, dop_pending);
 }
 
 /*
@@ -377,27 +488,31 @@ xfs_defer_cancel_list(
 {
        struct xfs_defer_pending        *dfp;
        struct xfs_defer_pending        *pli;
-       struct list_head                *pwi;
-       struct list_head                *n;
-       const struct xfs_defer_op_type  *ops;
 
        /*
         * Free the pending items.  Caller should already have arranged
         * for the intent items to be released.
         */
-       list_for_each_entry_safe(dfp, pli, dop_list, dfp_list) {
-               ops = defer_op_types[dfp->dfp_type];
-               trace_xfs_defer_cancel_list(mp, dfp);
-               list_del(&dfp->dfp_list);
-               list_for_each_safe(pwi, n, &dfp->dfp_work) {
-                       list_del(pwi);
-                       dfp->dfp_count--;
-                       trace_xfs_defer_cancel_item(mp, dfp, pwi);
-                       ops->cancel_item(pwi);
-               }
-               ASSERT(dfp->dfp_count == 0);
-               kmem_cache_free(xfs_defer_pending_cache, dfp);
+       list_for_each_entry_safe(dfp, pli, dop_list, dfp_list)
+               xfs_defer_pending_cancel_work(mp, dfp);
+}
+
+static inline void
+xfs_defer_relog_intent(
+       struct xfs_trans                *tp,
+       struct xfs_defer_pending        *dfp)
+{
+       struct xfs_log_item             *lip;
+
+       xfs_defer_create_done(tp, dfp);
+
+       lip = dfp->dfp_ops->relog_intent(tp, dfp->dfp_intent, dfp->dfp_done);
+       if (lip) {
+               xfs_trans_add_item(tp, lip);
+               set_bit(XFS_LI_DIRTY, &lip->li_flags);
        }
+       dfp->dfp_done = NULL;
+       dfp->dfp_intent = lip;
 }
 
 /*
@@ -405,7 +520,7 @@ xfs_defer_cancel_list(
  * done item to release the intent item; and then log a new intent item.
  * The caller should provide a fresh transaction and roll it after we're done.
  */
-static int
+static void
 xfs_defer_relog(
        struct xfs_trans                **tpp,
        struct list_head                *dfops)
@@ -444,31 +559,28 @@ xfs_defer_relog(
 
                trace_xfs_defer_relog_intent((*tpp)->t_mountp, dfp);
                XFS_STATS_INC((*tpp)->t_mountp, defer_relog);
-               dfp->dfp_intent = xfs_trans_item_relog(dfp->dfp_intent, *tpp);
-       }
 
-       if ((*tpp)->t_flags & XFS_TRANS_DIRTY)
-               return xfs_defer_trans_roll(tpp);
-       return 0;
+               xfs_defer_relog_intent(*tpp, dfp);
+       }
 }
 
 /*
  * Log an intent-done item for the first pending intent, and finish the work
  * items.
  */
-static int
+int
 xfs_defer_finish_one(
        struct xfs_trans                *tp,
        struct xfs_defer_pending        *dfp)
 {
-       const struct xfs_defer_op_type  *ops = defer_op_types[dfp->dfp_type];
+       const struct xfs_defer_op_type  *ops = dfp->dfp_ops;
        struct xfs_btree_cur            *state = NULL;
        struct list_head                *li, *n;
        int                             error;
 
        trace_xfs_defer_pending_finish(tp->t_mountp, dfp);
 
-       dfp->dfp_done = ops->create_done(tp, dfp->dfp_intent, dfp->dfp_count);
+       xfs_defer_create_done(tp, dfp);
        list_for_each_safe(li, n, &dfp->dfp_work) {
                list_del(li);
                dfp->dfp_count--;
@@ -505,6 +617,24 @@ out:
        return error;
 }
 
+/* Move all paused deferred work from @tp to @paused_list. */
+static void
+xfs_defer_isolate_paused(
+       struct xfs_trans                *tp,
+       struct list_head                *paused_list)
+{
+       struct xfs_defer_pending        *dfp;
+       struct xfs_defer_pending        *pli;
+
+       list_for_each_entry_safe(dfp, pli, &tp->t_dfops, dfp_list) {
+               if (!(dfp->dfp_flags & XFS_DEFER_PAUSED))
+                       continue;
+
+               list_move_tail(&dfp->dfp_list, paused_list);
+               trace_xfs_defer_isolate_paused(tp->t_mountp, dfp);
+       }
+}
+
 /*
  * Finish all the pending work.  This involves logging intent items for
  * any work items that wandered in since the last transaction roll (if
@@ -520,6 +650,7 @@ xfs_defer_finish_noroll(
        struct xfs_defer_pending        *dfp = NULL;
        int                             error = 0;
        LIST_HEAD(dop_pending);
+       LIST_HEAD(dop_paused);
 
        ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
 
@@ -538,6 +669,8 @@ xfs_defer_finish_noroll(
                 */
                int has_intents = xfs_defer_create_intents(*tp);
 
+               xfs_defer_isolate_paused(*tp, &dop_paused);
+
                list_splice_init(&(*tp)->t_dfops, &dop_pending);
 
                if (has_intents < 0) {
@@ -550,22 +683,33 @@ xfs_defer_finish_noroll(
                                goto out_shutdown;
 
                        /* Relog intent items to keep the log moving. */
-                       error = xfs_defer_relog(tp, &dop_pending);
-                       if (error)
-                               goto out_shutdown;
+                       xfs_defer_relog(tp, &dop_pending);
+                       xfs_defer_relog(tp, &dop_paused);
+
+                       if ((*tp)->t_flags & XFS_TRANS_DIRTY) {
+                               error = xfs_defer_trans_roll(tp);
+                               if (error)
+                                       goto out_shutdown;
+                       }
                }
 
-               dfp = list_first_entry(&dop_pending, struct xfs_defer_pending,
-                                      dfp_list);
+               dfp = list_first_entry_or_null(&dop_pending,
+                               struct xfs_defer_pending, dfp_list);
+               if (!dfp)
+                       break;
                error = xfs_defer_finish_one(*tp, dfp);
                if (error && error != -EAGAIN)
                        goto out_shutdown;
        }
 
+       /* Requeue the paused items in the outgoing transaction. */
+       list_splice_tail_init(&dop_paused, &(*tp)->t_dfops);
+
        trace_xfs_defer_finish_done(*tp, _RET_IP_);
        return 0;
 
 out_shutdown:
+       list_splice_tail_init(&dop_paused, &dop_pending);
        xfs_defer_trans_abort(*tp, &dop_pending);
        xfs_force_shutdown((*tp)->t_mountp, SHUTDOWN_CORRUPT_INCORE);
        trace_xfs_defer_finish_error(*tp, error);
@@ -578,6 +722,9 @@ int
 xfs_defer_finish(
        struct xfs_trans        **tp)
 {
+#ifdef DEBUG
+       struct xfs_defer_pending *dfp;
+#endif
        int                     error;
 
        /*
@@ -597,7 +744,10 @@ xfs_defer_finish(
        }
 
        /* Reset LOWMODE now that we've finished all the dfops. */
-       ASSERT(list_empty(&(*tp)->t_dfops));
+#ifdef DEBUG
+       list_for_each_entry(dfp, &(*tp)->t_dfops, dfp_list)
+               ASSERT(dfp->dfp_flags & XFS_DEFER_PAUSED);
+#endif
        (*tp)->t_flags &= ~XFS_TRANS_LOWMODE;
        return 0;
 }
@@ -609,48 +759,160 @@ xfs_defer_cancel(
        struct xfs_mount        *mp = tp->t_mountp;
 
        trace_xfs_defer_cancel(tp, _RET_IP_);
+       xfs_defer_trans_abort(tp, &tp->t_dfops);
        xfs_defer_cancel_list(mp, &tp->t_dfops);
 }
 
+/*
+ * Return the last pending work item attached to this transaction if it matches
+ * the deferred op type.
+ */
+static inline struct xfs_defer_pending *
+xfs_defer_find_last(
+       struct xfs_trans                *tp,
+       const struct xfs_defer_op_type  *ops)
+{
+       struct xfs_defer_pending        *dfp = NULL;
+
+       /* No dfops at all? */
+       if (list_empty(&tp->t_dfops))
+               return NULL;
+
+       dfp = list_last_entry(&tp->t_dfops, struct xfs_defer_pending,
+                       dfp_list);
+
+       /* Wrong type? */
+       if (dfp->dfp_ops != ops)
+               return NULL;
+       return dfp;
+}
+
+/*
+ * Decide if we can add a deferred work item to the last dfops item attached
+ * to the transaction.
+ */
+static inline bool
+xfs_defer_can_append(
+       struct xfs_defer_pending        *dfp,
+       const struct xfs_defer_op_type  *ops)
+{
+       /* Already logged? */
+       if (dfp->dfp_intent)
+               return false;
+
+       /* Paused items cannot absorb more work */
+       if (dfp->dfp_flags & XFS_DEFER_PAUSED)
+               return NULL;
+
+       /* Already full? */
+       if (ops->max_items && dfp->dfp_count >= ops->max_items)
+               return false;
+
+       return true;
+}
+
+/* Create a new pending item at the end of the transaction list. */
+static inline struct xfs_defer_pending *
+xfs_defer_alloc(
+       struct list_head                *dfops,
+       const struct xfs_defer_op_type  *ops)
+{
+       struct xfs_defer_pending        *dfp;
+
+       dfp = kmem_cache_zalloc(xfs_defer_pending_cache,
+                       GFP_KERNEL | __GFP_NOFAIL);
+       dfp->dfp_ops = ops;
+       INIT_LIST_HEAD(&dfp->dfp_work);
+       list_add_tail(&dfp->dfp_list, dfops);
+
+       return dfp;
+}
+
 /* Add an item for later deferred processing. */
-void
+struct xfs_defer_pending *
 xfs_defer_add(
        struct xfs_trans                *tp,
-       enum xfs_defer_ops_type         type,
-       struct list_head                *li)
+       struct list_head                *li,
+       const struct xfs_defer_op_type  *ops)
 {
        struct xfs_defer_pending        *dfp = NULL;
-       const struct xfs_defer_op_type  *ops = defer_op_types[type];
 
        ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
-       BUILD_BUG_ON(ARRAY_SIZE(defer_op_types) != XFS_DEFER_OPS_TYPE_MAX);
 
-       /*
-        * Add the item to a pending item at the end of the intake list.
-        * If the last pending item has the same type, reuse it.  Else,
-        * create a new pending item at the end of the intake list.
-        */
-       if (!list_empty(&tp->t_dfops)) {
-               dfp = list_last_entry(&tp->t_dfops,
-                               struct xfs_defer_pending, dfp_list);
-               if (dfp->dfp_type != type ||
-                   (ops->max_items && dfp->dfp_count >= ops->max_items))
-                       dfp = NULL;
-       }
-       if (!dfp) {
-               dfp = kmem_cache_zalloc(xfs_defer_pending_cache,
-                               GFP_NOFS | __GFP_NOFAIL);
-               dfp->dfp_type = type;
-               dfp->dfp_intent = NULL;
-               dfp->dfp_done = NULL;
-               dfp->dfp_count = 0;
-               INIT_LIST_HEAD(&dfp->dfp_work);
-               list_add_tail(&dfp->dfp_list, &tp->t_dfops);
-       }
+       dfp = xfs_defer_find_last(tp, ops);
+       if (!dfp || !xfs_defer_can_append(dfp, ops))
+               dfp = xfs_defer_alloc(&tp->t_dfops, ops);
 
-       list_add_tail(li, &dfp->dfp_work);
+       xfs_defer_add_item(dfp, li);
        trace_xfs_defer_add_item(tp->t_mountp, dfp, li);
-       dfp->dfp_count++;
+       return dfp;
+}
+
+/*
+ * Add a defer ops barrier to force two otherwise adjacent deferred work items
+ * to be tracked separately and have separate log items.
+ */
+void
+xfs_defer_add_barrier(
+       struct xfs_trans                *tp)
+{
+       struct xfs_defer_pending        *dfp;
+
+       ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
+
+       /* If the last defer op added was a barrier, we're done. */
+       dfp = xfs_defer_find_last(tp, &xfs_barrier_defer_type);
+       if (dfp)
+               return;
+
+       xfs_defer_alloc(&tp->t_dfops, &xfs_barrier_defer_type);
+
+       trace_xfs_defer_add_item(tp->t_mountp, dfp, NULL);
+}
+
+/*
+ * Create a pending deferred work item to replay the recovered intent item
+ * and add it to the list.
+ */
+void
+xfs_defer_start_recovery(
+       struct xfs_log_item             *lip,
+       struct list_head                *r_dfops,
+       const struct xfs_defer_op_type  *ops)
+{
+       struct xfs_defer_pending        *dfp = xfs_defer_alloc(r_dfops, ops);
+
+       dfp->dfp_intent = lip;
+}
+
+/*
+ * Cancel a deferred work item created to recover a log intent item.  @dfp
+ * will be freed after this function returns.
+ */
+void
+xfs_defer_cancel_recovery(
+       struct xfs_mount                *mp,
+       struct xfs_defer_pending        *dfp)
+{
+       xfs_defer_pending_abort(mp, dfp);
+       xfs_defer_pending_cancel_work(mp, dfp);
+}
+
+/* Replay the deferred work item created from a recovered log intent item. */
+int
+xfs_defer_finish_recovery(
+       struct xfs_mount                *mp,
+       struct xfs_defer_pending        *dfp,
+       struct list_head                *capture_list)
+{
+       const struct xfs_defer_op_type  *ops = dfp->dfp_ops;
+       int                             error;
+
+       /* dfp is freed by recover_work and must not be accessed afterwards */
+       error = ops->recover_work(dfp, capture_list);
+       if (error)
+               trace_xlog_intent_recovery_failed(mp, ops, error);
+       return error;
 }
 
 /*
@@ -707,7 +969,7 @@ xfs_defer_ops_capture(
                return ERR_PTR(error);
 
        /* Create an object to capture the defer ops. */
-       dfc = kmem_zalloc(sizeof(*dfc), KM_NOFS);
+       dfc = kzalloc(sizeof(*dfc), GFP_KERNEL | __GFP_NOFAIL);
        INIT_LIST_HEAD(&dfc->dfc_list);
        INIT_LIST_HEAD(&dfc->dfc_dfops);
 
@@ -739,7 +1001,7 @@ xfs_defer_ops_capture(
         * transaction.
         */
        for (i = 0; i < dfc->dfc_held.dr_inos; i++) {
-               ASSERT(xfs_isilocked(dfc->dfc_held.dr_ip[i], XFS_ILOCK_EXCL));
+               xfs_assert_ilocked(dfc->dfc_held.dr_ip[i], XFS_ILOCK_EXCL);
                ihold(VFS_I(dfc->dfc_held.dr_ip[i]));
        }
 
@@ -751,12 +1013,13 @@ xfs_defer_ops_capture(
 
 /* Release all resources that we used to capture deferred ops. */
 void
-xfs_defer_ops_capture_free(
+xfs_defer_ops_capture_abort(
        struct xfs_mount                *mp,
        struct xfs_defer_capture        *dfc)
 {
        unsigned short                  i;
 
+       xfs_defer_pending_abort_list(mp, &dfc->dfc_dfops);
        xfs_defer_cancel_list(mp, &dfc->dfc_dfops);
 
        for (i = 0; i < dfc->dfc_held.dr_bufs; i++)
@@ -765,7 +1028,7 @@ xfs_defer_ops_capture_free(
        for (i = 0; i < dfc->dfc_held.dr_inos; i++)
                xfs_irele(dfc->dfc_held.dr_ip[i]);
 
-       kmem_free(dfc);
+       kfree(dfc);
 }
 
 /*
@@ -797,7 +1060,7 @@ xfs_defer_ops_capture_and_commit(
        /* Commit the transaction and add the capture structure to the list. */
        error = xfs_trans_commit(tp);
        if (error) {
-               xfs_defer_ops_capture_free(mp, dfc);
+               xfs_defer_ops_capture_abort(mp, dfc);
                return error;
        }
 
@@ -823,7 +1086,11 @@ xfs_defer_ops_continue(
        ASSERT(!(tp->t_flags & XFS_TRANS_DIRTY));
 
        /* Lock the captured resources to the new transaction. */
-       if (dfc->dfc_held.dr_inos == 2)
+       if (dfc->dfc_held.dr_inos > 2) {
+               xfs_sort_inodes(dfc->dfc_held.dr_ip, dfc->dfc_held.dr_inos);
+               xfs_lock_inodes(dfc->dfc_held.dr_ip, dfc->dfc_held.dr_inos,
+                               XFS_ILOCK_EXCL);
+       } else if (dfc->dfc_held.dr_inos == 2)
                xfs_lock_two_inodes(dfc->dfc_held.dr_ip[0], XFS_ILOCK_EXCL,
                                    dfc->dfc_held.dr_ip[1], XFS_ILOCK_EXCL);
        else if (dfc->dfc_held.dr_inos == 1)
@@ -841,7 +1108,7 @@ xfs_defer_ops_continue(
        list_splice_init(&dfc->dfc_dfops, &tp->t_dfops);
        tp->t_flags |= dfc->dfc_tpflags;
 
-       kmem_free(dfc);
+       kfree(dfc);
 }
 
 /* Release the resources captured and continued during recovery. */
@@ -908,6 +1175,10 @@ xfs_defer_init_item_caches(void)
        error = xfs_attr_intent_init_cache();
        if (error)
                goto err;
+       error = xfs_exchmaps_intent_init_cache();
+       if (error)
+               goto err;
+
        return 0;
 err:
        xfs_defer_destroy_item_caches();
@@ -918,6 +1189,7 @@ err:
 void
 xfs_defer_destroy_item_caches(void)
 {
+       xfs_exchmaps_intent_destroy_cache();
        xfs_attr_intent_destroy_cache();
        xfs_extfree_intent_destroy_cache();
        xfs_bmap_intent_destroy_cache();
@@ -925,3 +1197,36 @@ xfs_defer_destroy_item_caches(void)
        xfs_rmap_intent_destroy_cache();
        xfs_defer_destroy_cache();
 }
+
+/*
+ * Mark a deferred work item so that it will be requeued indefinitely without
+ * being finished.  Caller must ensure there are no data dependencies on this
+ * work item in the meantime.
+ */
+void
+xfs_defer_item_pause(
+       struct xfs_trans                *tp,
+       struct xfs_defer_pending        *dfp)
+{
+       ASSERT(!(dfp->dfp_flags & XFS_DEFER_PAUSED));
+
+       dfp->dfp_flags |= XFS_DEFER_PAUSED;
+
+       trace_xfs_defer_item_pause(tp->t_mountp, dfp);
+}
+
+/*
+ * Release a paused deferred work item so that it will be finished during the
+ * next transaction roll.
+ */
+void
+xfs_defer_item_unpause(
+       struct xfs_trans                *tp,
+       struct xfs_defer_pending        *dfp)
+{
+       ASSERT(dfp->dfp_flags & XFS_DEFER_PAUSED);
+
+       dfp->dfp_flags &= ~XFS_DEFER_PAUSED;
+
+       trace_xfs_defer_item_unpause(tp->t_mountp, dfp);
+}