1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2001,2005-2006 Silicon Graphics, Inc.
4 * Copyright (C) 2010 Red Hat, Inc.
8 #include "libxfs_priv.h"
10 #include "xfs_shared.h"
11 #include "xfs_format.h"
12 #include "xfs_log_format.h"
13 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_inode_buf.h"
16 #include "xfs_inode_fork.h"
17 #include "xfs_inode.h"
18 #include "xfs_trans.h"
20 #include "xfs_defer.h"
22 static void xfs_trans_free_items(struct xfs_trans
*tp
);
23 STATIC
struct xfs_trans
*xfs_trans_dup(struct xfs_trans
*tp
);
24 static int xfs_trans_reserve(struct xfs_trans
*tp
, struct xfs_trans_res
*resp
,
25 uint blocks
, uint rtextents
);
26 static int __xfs_trans_commit(struct xfs_trans
*tp
, bool regrant
);
29 * Simple transaction interface
32 kmem_zone_t
*xfs_trans_zone
;
35 * Initialize the precomputed transaction reservation values
36 * in the mount structure.
42 xfs_trans_resv_calc(mp
, &mp
->m_resv
);
46 * Add the given log item to the transaction's list of log items.
49 libxfs_trans_add_item(
51 struct xfs_log_item
*lip
)
53 ASSERT(lip
->li_mountp
== tp
->t_mountp
);
54 ASSERT(lip
->li_ailp
== tp
->t_mountp
->m_ail
);
55 ASSERT(list_empty(&lip
->li_trans
));
56 ASSERT(!test_bit(XFS_LI_DIRTY
, &lip
->li_flags
));
58 list_add_tail(&lip
->li_trans
, &tp
->t_items
);
62 * Unlink and free the given descriptor.
65 libxfs_trans_del_item(
66 struct xfs_log_item
*lip
)
68 clear_bit(XFS_LI_DIRTY
, &lip
->li_flags
);
69 list_del_init(&lip
->li_trans
);
73 * Roll from one trans in the sequence of PERMANENT transactions to
74 * the next: permanent transactions are only flushed out when
75 * committed with XFS_TRANS_RELEASE_LOG_RES, but we still want as soon
76 * as possible to let chunks of it go to the log. So we commit the
77 * chunk we've been working on and get a new transaction to continue.
81 struct xfs_trans
**tpp
)
83 struct xfs_trans
*trans
= *tpp
;
84 struct xfs_trans_res tres
;
88 * Copy the critical parameters from one trans to the next.
90 tres
.tr_logres
= trans
->t_log_res
;
91 tres
.tr_logcount
= trans
->t_log_count
;
93 *tpp
= xfs_trans_dup(trans
);
96 * Commit the current transaction.
97 * If this commit failed, then it'd just unlock those items that
98 * are marked to be released. That also means that a filesystem shutdown
99 * is in progress. The caller takes the responsibility to cancel
100 * the duplicate transaction that gets returned.
102 error
= __xfs_trans_commit(trans
, true);
107 * Reserve space in the log for the next transaction.
108 * This also pushes items in the "AIL", the list of logged items,
109 * out to disk if they are taking up space at the tail of the log
110 * that we want to use. This requires that either nothing be locked
111 * across this call, or that anything that is locked be logged in
112 * the prior and the next transactions.
114 tres
.tr_logflags
= XFS_TRANS_PERM_LOG_RES
;
115 return xfs_trans_reserve(*tpp
, &tres
, 0, 0);
119 * Free the transaction structure. If there is more clean up
120 * to do when the structure is freed, add it here.
124 struct xfs_trans
*tp
)
126 kmem_zone_free(xfs_trans_zone
, tp
);
130 * This is called to create a new transaction which will share the
131 * permanent log reservation of the given transaction. The remaining
132 * unused block and rt extent reservations are also inherited. This
133 * implies that the original transaction is no longer allowed to allocate
134 * blocks. Locks and log items, however, are no inherited. They must
135 * be added to the new transaction explicitly.
137 STATIC
struct xfs_trans
*
139 struct xfs_trans
*tp
)
141 struct xfs_trans
*ntp
;
143 ntp
= kmem_zone_zalloc(xfs_trans_zone
, KM_SLEEP
);
146 * Initialize the new transaction structure.
148 ntp
->t_mountp
= tp
->t_mountp
;
149 INIT_LIST_HEAD(&ntp
->t_items
);
150 INIT_LIST_HEAD(&ntp
->t_dfops
);
151 ntp
->t_firstblock
= NULLFSBLOCK
;
153 ASSERT(tp
->t_flags
& XFS_TRANS_PERM_LOG_RES
);
155 ntp
->t_flags
= XFS_TRANS_PERM_LOG_RES
|
156 (tp
->t_flags
& XFS_TRANS_RESERVE
) |
157 (tp
->t_flags
& XFS_TRANS_NO_WRITECOUNT
);
158 /* We gave our writer reference to the new transaction */
159 tp
->t_flags
|= XFS_TRANS_NO_WRITECOUNT
;
161 ntp
->t_blk_res
= tp
->t_blk_res
- tp
->t_blk_res_used
;
162 tp
->t_blk_res
= tp
->t_blk_res_used
;
164 /* move deferred ops over to the new tp */
165 xfs_defer_move(ntp
, tp
);
171 * This is called to reserve free disk blocks and log space for the
172 * given transaction. This must be done before allocating any resources
173 * within the transaction.
175 * This will return ENOSPC if there are not enough blocks available.
176 * It will sleep waiting for available log space.
177 * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which
178 * is used by long running transactions. If any one of the reservations
179 * fails then they will all be backed out.
181 * This does not do quota reservations. That typically is done by the
186 struct xfs_trans
*tp
,
187 struct xfs_trans_res
*resp
,
194 * Attempt to reserve the needed disk blocks by decrementing
195 * the number needed from the number available. This will
196 * fail if the count would go below zero.
199 if (tp
->t_mountp
->m_sb
.sb_fdblocks
< blocks
)
201 tp
->t_blk_res
+= blocks
;
205 * Reserve the log space needed for this transaction.
207 if (resp
->tr_logres
> 0) {
208 ASSERT(tp
->t_log_res
== 0 ||
209 tp
->t_log_res
== resp
->tr_logres
);
210 ASSERT(tp
->t_log_count
== 0 ||
211 tp
->t_log_count
== resp
->tr_logcount
);
213 if (resp
->tr_logflags
& XFS_TRANS_PERM_LOG_RES
)
214 tp
->t_flags
|= XFS_TRANS_PERM_LOG_RES
;
216 ASSERT(!(tp
->t_flags
& XFS_TRANS_PERM_LOG_RES
));
218 tp
->t_log_res
= resp
->tr_logres
;
219 tp
->t_log_count
= resp
->tr_logcount
;
223 * Attempt to reserve the needed realtime extents by decrementing
224 * the number needed from the number available. This will
225 * fail if the count would go below zero.
228 if (tp
->t_mountp
->m_sb
.sb_rextents
< rtextents
) {
237 * Error cases jump to one of these labels to undo any
238 * reservations which have already been performed.
249 struct xfs_mount
*mp
,
250 struct xfs_trans_res
*resp
,
252 unsigned int rtextents
,
254 struct xfs_trans
**tpp
)
257 struct xfs_trans
*tp
;
260 tp
= kmem_zone_zalloc(xfs_trans_zone
,
261 (flags
& XFS_TRANS_NOFS
) ? KM_NOFS
: KM_SLEEP
);
263 INIT_LIST_HEAD(&tp
->t_items
);
264 INIT_LIST_HEAD(&tp
->t_dfops
);
265 tp
->t_firstblock
= NULLFSBLOCK
;
267 error
= xfs_trans_reserve(tp
, resp
, blocks
, rtextents
);
269 xfs_trans_cancel(tp
);
273 fprintf(stderr
, "allocated new transaction %p\n", tp
);
280 * Create an empty transaction with no reservation. This is a defensive
281 * mechanism for routines that query metadata without actually modifying
282 * them -- if the metadata being queried is somehow cross-linked (think a
283 * btree block pointer that points higher in the tree), we risk deadlock.
284 * However, blocks grabbed as part of a transaction can be re-grabbed.
285 * The verifiers will notice the corrupt block and the operation will fail
286 * back to userspace without deadlocking.
288 * Note the zero-length reservation; this transaction MUST be cancelled
289 * without any dirty data.
292 libxfs_trans_alloc_empty(
293 struct xfs_mount
*mp
,
294 struct xfs_trans
**tpp
)
296 struct xfs_trans_res resv
= {0};
298 return xfs_trans_alloc(mp
, &resv
, 0, 0, XFS_TRANS_NO_WRITECOUNT
, tpp
);
302 * Allocate a transaction that can be rolled. Since userspace doesn't have
303 * a need for log reservations, we really only tr_itruncate to get the
304 * permanent log reservation flag to avoid blowing asserts.
307 libxfs_trans_alloc_rollable(
308 struct xfs_mount
*mp
,
310 struct xfs_trans
**tpp
)
312 return libxfs_trans_alloc(mp
, &M_RES(mp
)->tr_itruncate
, blocks
,
318 struct xfs_trans
*tp
)
321 struct xfs_trans
*otp
= tp
;
326 if (tp
->t_flags
& XFS_TRANS_PERM_LOG_RES
)
327 xfs_defer_cancel(tp
);
329 xfs_trans_free_items(tp
);
334 fprintf(stderr
, "## cancelled transaction %p\n", otp
);
345 xfs_inode_log_item_t
*iip
;
347 ASSERT(ip
->i_transp
== NULL
);
348 if (ip
->i_itemp
== NULL
)
349 xfs_inode_item_init(ip
, ip
->i_mount
);
351 ASSERT(iip
->ili_flags
== 0);
352 ASSERT(iip
->ili_inode
!= NULL
);
354 ASSERT(iip
->ili_lock_flags
== 0);
355 iip
->ili_lock_flags
= lock_flags
;
357 xfs_trans_add_item(tp
, (xfs_log_item_t
*)(iip
));
361 fprintf(stderr
, "ijoin'd inode %llu, transaction %p\n", ip
->i_ino
, tp
);
366 libxfs_trans_ijoin_ref(
371 ASSERT(ip
->i_transp
== tp
);
372 ASSERT(ip
->i_itemp
!= NULL
);
374 xfs_trans_ijoin(tp
, ip
, lock_flags
);
377 fprintf(stderr
, "ijoin_ref'd inode %llu, transaction %p\n", ip
->i_ino
, tp
);
382 libxfs_trans_inode_alloc_buf(
386 xfs_buf_log_item_t
*bip
= bp
->b_log_item
;
388 ASSERT(bp
->bp_transp
== tp
);
390 bip
->bli_flags
|= XFS_BLI_INODE_ALLOC_BUF
;
391 xfs_trans_buf_set_type(tp
, bp
, XFS_BLFT_DINO_BUF
);
395 * This is called to mark the fields indicated in fieldmask as needing
396 * to be logged when the transaction is committed. The inode must
397 * already be associated with the given transaction.
399 * The values for fieldmask are defined in xfs_log_format.h. We always
400 * log all of the core inode if any of it has changed, and we always log
401 * all of the inline data/extents/b-tree root if any of them has changed.
409 ASSERT(ip
->i_transp
== tp
);
410 ASSERT(ip
->i_itemp
!= NULL
);
412 fprintf(stderr
, "dirtied inode %llu, transaction %p\n", ip
->i_ino
, tp
);
415 tp
->t_flags
|= XFS_TRANS_DIRTY
;
416 set_bit(XFS_LI_DIRTY
, &ip
->i_itemp
->ili_item
.li_flags
);
419 * Always OR in the bits from the ili_last_fields field.
420 * This is to coordinate with the xfs_iflush() and xfs_iflush_done()
421 * routines in the eventual clearing of the ilf_fields bits.
422 * See the big comment in xfs_iflush() for an explanation of
423 * this coordination mechanism.
425 flags
|= ip
->i_itemp
->ili_last_fields
;
426 ip
->i_itemp
->ili_fields
|= flags
;
430 libxfs_trans_roll_inode(
431 struct xfs_trans
**tpp
,
432 struct xfs_inode
*ip
)
436 xfs_trans_log_inode(*tpp
, ip
, XFS_ILOG_CORE
);
437 error
= xfs_trans_roll(tpp
);
439 xfs_trans_ijoin(*tpp
, ip
, 0);
445 * Mark a buffer dirty in the transaction.
448 libxfs_trans_dirty_buf(
449 struct xfs_trans
*tp
,
452 struct xfs_buf_log_item
*bip
= bp
->b_log_item
;
454 ASSERT(bp
->bp_transp
== tp
);
458 fprintf(stderr
, "dirtied buffer %p, transaction %p\n", bp
, tp
);
460 tp
->t_flags
|= XFS_TRANS_DIRTY
;
461 set_bit(XFS_LI_DIRTY
, &bip
->bli_item
.li_flags
);
465 * This is called to mark bytes first through last inclusive of the given
466 * buffer as needing to be logged when the transaction is committed.
467 * The buffer must already be associated with the given transaction.
469 * First and last are numbers relative to the beginning of this buffer,
470 * so the first byte in the buffer is numbered 0 regardless of the
474 libxfs_trans_log_buf(
475 struct xfs_trans
*tp
,
480 struct xfs_buf_log_item
*bip
= bp
->b_log_item
;
482 ASSERT((first
<= last
) && (last
< bp
->b_bcount
));
484 xfs_trans_dirty_buf(tp
, bp
);
485 xfs_buf_item_log(bip
, first
, last
);
489 * For userspace, ordered buffers just need to be marked dirty so
490 * the transaction commit will write them and mark them up-to-date.
491 * In essence, they are just like any other logged buffer in userspace.
493 * If the buffer is already dirty, trigger the "already logged" return condition.
496 libxfs_trans_ordered_buf(
497 struct xfs_trans
*tp
,
500 struct xfs_buf_log_item
*bip
= bp
->b_log_item
;
503 ret
= test_bit(XFS_LI_DIRTY
, &bip
->bli_item
.li_flags
);
504 libxfs_trans_log_buf(tp
, bp
, 0, bp
->b_bcount
);
510 struct xfs_buf_log_item
*bip
)
512 struct xfs_buf
*bp
= bip
->bli_buf
;
514 bp
->b_log_item
= NULL
;
515 kmem_zone_free(xfs_buf_item_zone
, bip
);
523 xfs_buf_log_item_t
*bip
;
525 fprintf(stderr
, "released buffer %p, transaction %p\n", bp
, tp
);
529 ASSERT(bp
->bp_transp
== NULL
);
533 ASSERT(bp
->bp_transp
== tp
);
534 bip
= bp
->b_log_item
;
535 ASSERT(bip
->bli_item
.li_type
== XFS_LI_BUF
);
536 if (bip
->bli_recur
> 0) {
540 /* If dirty/stale, can't release till transaction committed */
541 if (bip
->bli_flags
& XFS_BLI_STALE
)
543 if (test_bit(XFS_LI_DIRTY
, &bip
->bli_item
.li_flags
))
545 xfs_trans_del_item(&bip
->bli_item
);
546 if (bip
->bli_flags
& XFS_BLI_HOLD
)
547 bip
->bli_flags
&= ~XFS_BLI_HOLD
;
548 xfs_buf_item_put(bip
);
558 xfs_buf_log_item_t
*bip
= bp
->b_log_item
;
560 fprintf(stderr
, "binval'd buffer %p, transaction %p\n", bp
, tp
);
563 ASSERT(bp
->bp_transp
== tp
);
566 if (bip
->bli_flags
& XFS_BLI_STALE
)
568 XFS_BUF_UNDELAYWRITE(bp
);
570 bip
->bli_flags
|= XFS_BLI_STALE
;
571 bip
->bli_flags
&= ~XFS_BLI_DIRTY
;
572 bip
->bli_format
.blf_flags
&= ~XFS_BLF_INODE_BUF
;
573 bip
->bli_format
.blf_flags
|= XFS_BLF_CANCEL
;
574 set_bit(XFS_LI_DIRTY
, &bip
->bli_item
.li_flags
);
575 tp
->t_flags
|= XFS_TRANS_DIRTY
;
583 xfs_buf_log_item_t
*bip
;
585 ASSERT(bp
->bp_transp
== NULL
);
587 fprintf(stderr
, "bjoin'd buffer %p, transaction %p\n", bp
, tp
);
590 xfs_buf_item_init(bp
, tp
->t_mountp
);
591 bip
= bp
->b_log_item
;
592 xfs_trans_add_item(tp
, (xfs_log_item_t
*)bip
);
601 xfs_buf_log_item_t
*bip
= bp
->b_log_item
;
603 ASSERT(bp
->bp_transp
== tp
);
606 fprintf(stderr
, "bhold'd buffer %p, transaction %p\n", bp
, tp
);
609 bip
->bli_flags
|= XFS_BLI_HOLD
;
613 libxfs_trans_get_buf_map(
615 struct xfs_buftarg
*btp
,
616 struct xfs_buf_map
*map
,
621 xfs_buf_log_item_t
*bip
;
624 return libxfs_getbuf_map(btp
, map
, nmaps
, 0);
626 bp
= xfs_trans_buf_item_match(tp
, btp
, map
, nmaps
);
628 ASSERT(bp
->bp_transp
== tp
);
629 bip
= bp
->b_log_item
;
635 bp
= libxfs_getbuf_map(btp
, map
, nmaps
, 0);
639 fprintf(stderr
, "trans_get_buf buffer %p, transaction %p\n", bp
, tp
);
642 libxfs_trans_bjoin(tp
, bp
);
643 bip
= bp
->b_log_item
;
655 xfs_buf_log_item_t
*bip
;
656 int len
= XFS_FSS_TO_BB(mp
, 1);
657 DEFINE_SINGLE_BUF_MAP(map
, XFS_SB_DADDR
, len
);
660 return libxfs_getsb(mp
, flags
);
662 bp
= xfs_trans_buf_item_match(tp
, mp
->m_dev
, &map
, 1);
664 ASSERT(bp
->bp_transp
== tp
);
665 bip
= bp
->b_log_item
;
671 bp
= libxfs_getsb(mp
, flags
);
673 fprintf(stderr
, "trans_get_sb buffer %p, transaction %p\n", bp
, tp
);
676 libxfs_trans_bjoin(tp
, bp
);
677 bip
= bp
->b_log_item
;
683 libxfs_trans_read_buf_map(
686 struct xfs_buftarg
*btp
,
687 struct xfs_buf_map
*map
,
691 const struct xfs_buf_ops
*ops
)
694 xfs_buf_log_item_t
*bip
;
700 bp
= libxfs_readbuf_map(btp
, map
, nmaps
, flags
, ops
);
702 return (flags
& XBF_TRYLOCK
) ? -EAGAIN
: -ENOMEM
;
709 bp
= xfs_trans_buf_item_match(tp
, btp
, map
, nmaps
);
711 ASSERT(bp
->bp_transp
== tp
);
712 ASSERT(bp
->b_log_item
!= NULL
);
713 bip
= bp
->b_log_item
;
718 bp
= libxfs_readbuf_map(btp
, map
, nmaps
, flags
, ops
);
720 return (flags
& XBF_TRYLOCK
) ? -EAGAIN
: -ENOMEM
;
726 fprintf(stderr
, "trans_read_buf buffer %p, transaction %p\n", bp
, tp
);
729 xfs_trans_bjoin(tp
, bp
);
730 bip
= bp
->b_log_item
;
742 * Record the indicated change to the given field for application
743 * to the file system's superblock when the transaction commits.
744 * For now, just store the change in the transaction structure.
745 * Mark the transaction structure to indicate that the superblock
746 * needs to be updated before committing.
748 * Originally derived from xfs_trans_mod_sb().
757 case XFS_TRANS_SB_RES_FDBLOCKS
:
759 case XFS_TRANS_SB_FDBLOCKS
:
761 tp
->t_blk_res_used
+= (uint
)-delta
;
762 if (tp
->t_blk_res_used
> tp
->t_blk_res
) {
764 _("Transaction block reservation exceeded! %u > %u\n"),
765 tp
->t_blk_res_used
, tp
->t_blk_res
);
769 tp
->t_fdblocks_delta
+= delta
;
771 case XFS_TRANS_SB_ICOUNT
:
773 tp
->t_icount_delta
+= delta
;
775 case XFS_TRANS_SB_IFREE
:
776 tp
->t_ifree_delta
+= delta
;
778 case XFS_TRANS_SB_FREXTENTS
:
779 tp
->t_frextents_delta
+= delta
;
785 tp
->t_flags
|= (XFS_TRANS_SB_DIRTY
| XFS_TRANS_DIRTY
);
790 struct xfs_inode_log_item
*iip
)
792 struct xfs_inode
*ip
= iip
->ili_inode
;
795 kmem_zone_free(xfs_ili_zone
, iip
);
800 * Transaction commital code follows (i.e. write to disk in libxfs)
802 * XXX (dgc): should failure to flush the inode (e.g. due to uncorrected
803 * corruption) result in transaction commit failure w/ EFSCORRUPTED?
807 xfs_inode_log_item_t
*iip
)
816 mp
= iip
->ili_item
.li_mountp
;
819 if (!(iip
->ili_fields
& XFS_ILOG_ALL
)) {
820 ip
->i_transp
= NULL
; /* disassociate from transaction */
821 iip
->ili_flags
= 0; /* reset all flags */
826 * Get the buffer containing the on-disk inode.
828 error
= xfs_imap_to_bp(mp
, NULL
, &ip
->i_imap
, &dip
, &bp
, 0, 0);
830 fprintf(stderr
, _("%s: warning - imap_to_bp failed (%d)\n"),
836 * Flush the inode and disassociate it from the transaction regardless
837 * of whether the flush succeed or not. If we fail the flush, make sure
838 * we still release the buffer reference we currently hold.
840 error
= libxfs_iflush_int(ip
, bp
);
841 ip
->i_transp
= NULL
; /* disassociate from transaction */
842 bp
->b_transp
= NULL
; /* remove xact ptr */
845 fprintf(stderr
, _("%s: warning - iflush_int failed (%d)\n"),
851 libxfs_writebuf(bp
, 0);
853 fprintf(stderr
, "flushing dirty inode %llu, buffer %p\n",
857 xfs_inode_item_put(iip
);
862 xfs_buf_log_item_t
*bip
)
866 extern kmem_zone_t
*xfs_buf_item_zone
;
870 bp
->b_transp
= NULL
; /* remove xact ptr */
872 hold
= (bip
->bli_flags
& XFS_BLI_HOLD
);
873 if (bip
->bli_flags
& XFS_BLI_DIRTY
) {
875 fprintf(stderr
, "flushing/staling buffer %p (hold=%d)\n",
878 libxfs_writebuf_int(bp
, 0);
881 bip
->bli_flags
&= ~XFS_BLI_HOLD
;
882 xfs_buf_item_put(bip
);
892 struct xfs_log_item
*lip
, *next
;
894 list_for_each_entry_safe(lip
, next
, &tp
->t_items
, li_trans
) {
895 xfs_trans_del_item(lip
);
897 if (lip
->li_type
== XFS_LI_BUF
)
898 buf_item_done((xfs_buf_log_item_t
*)lip
);
899 else if (lip
->li_type
== XFS_LI_INODE
)
900 inode_item_done((xfs_inode_log_item_t
*)lip
);
902 fprintf(stderr
, _("%s: unrecognised log item type\n"),
911 xfs_buf_log_item_t
*bip
)
913 xfs_buf_t
*bp
= bip
->bli_buf
;
916 /* Clear the buffer's association with this transaction. */
917 bip
->bli_buf
->b_transp
= NULL
;
919 hold
= bip
->bli_flags
& XFS_BLI_HOLD
;
920 bip
->bli_flags
&= ~XFS_BLI_HOLD
;
921 xfs_buf_item_put(bip
);
928 xfs_inode_log_item_t
*iip
)
930 xfs_inode_t
*ip
= iip
->ili_inode
;
932 /* Clear the transaction pointer in the inode. */
936 xfs_inode_item_put(iip
);
939 /* Detach and unlock all of the items in a transaction */
941 xfs_trans_free_items(
942 struct xfs_trans
*tp
)
944 struct xfs_log_item
*lip
, *next
;
946 list_for_each_entry_safe(lip
, next
, &tp
->t_items
, li_trans
) {
947 xfs_trans_del_item(lip
);
948 if (lip
->li_type
== XFS_LI_BUF
)
949 buf_item_unlock((xfs_buf_log_item_t
*)lip
);
950 else if (lip
->li_type
== XFS_LI_INODE
)
951 inode_item_unlock((xfs_inode_log_item_t
*)lip
);
953 fprintf(stderr
, _("%s: unrecognised log item type\n"),
961 * Commit the changes represented by this transaction
965 struct xfs_trans
*tp
,
975 * Finish deferred items on final commit. Only permanent transactions
976 * should ever have deferred ops.
978 WARN_ON_ONCE(!list_empty(&tp
->t_dfops
) &&
979 !(tp
->t_flags
& XFS_TRANS_PERM_LOG_RES
));
980 if (!regrant
&& (tp
->t_flags
& XFS_TRANS_PERM_LOG_RES
)) {
981 error
= xfs_defer_finish_noroll(&tp
);
986 if (!(tp
->t_flags
& XFS_TRANS_DIRTY
)) {
988 fprintf(stderr
, "committed clean transaction %p\n", tp
);
993 if (tp
->t_flags
& XFS_TRANS_SB_DIRTY
) {
994 sbp
= &(tp
->t_mountp
->m_sb
);
995 if (tp
->t_icount_delta
)
996 sbp
->sb_icount
+= tp
->t_icount_delta
;
997 if (tp
->t_ifree_delta
)
998 sbp
->sb_ifree
+= tp
->t_ifree_delta
;
999 if (tp
->t_fdblocks_delta
)
1000 sbp
->sb_fdblocks
+= tp
->t_fdblocks_delta
;
1001 if (tp
->t_frextents_delta
)
1002 sbp
->sb_frextents
+= tp
->t_frextents_delta
;
1007 fprintf(stderr
, "committing dirty transaction %p\n", tp
);
1009 trans_committed(tp
);
1011 /* That's it for the transaction structure. Free it. */
1016 xfs_trans_free_items(tp
);
1022 libxfs_trans_commit(
1023 struct xfs_trans
*tp
)
1025 return __xfs_trans_commit(tp
, false);