1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2001,2005-2006 Silicon Graphics, Inc.
4 * Copyright (C) 2010 Red Hat, Inc.
8 #include "libxfs_priv.h"
10 #include "xfs_shared.h"
11 #include "xfs_format.h"
12 #include "xfs_log_format.h"
13 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_inode_buf.h"
16 #include "xfs_inode_fork.h"
17 #include "xfs_inode.h"
18 #include "xfs_trans.h"
20 #include "xfs_defer.h"
21 #include "xfs_trace.h"
22 #include "xfs_rtbitmap.h"
24 static void xfs_trans_free_items(struct xfs_trans
*tp
);
25 STATIC
struct xfs_trans
*xfs_trans_dup(struct xfs_trans
*tp
);
26 static int xfs_trans_reserve(struct xfs_trans
*tp
, struct xfs_trans_res
*resp
,
27 uint blocks
, uint rtextents
);
28 static int __xfs_trans_commit(struct xfs_trans
*tp
, bool regrant
);
31 * Simple transaction interface
34 struct kmem_cache
*xfs_trans_cache
;
37 * Initialize the precomputed transaction reservation values
38 * in the mount structure.
44 xfs_trans_resv_calc(mp
, &mp
->m_resv
);
48 * Add the given log item to the transaction's list of log items.
51 libxfs_trans_add_item(
53 struct xfs_log_item
*lip
)
55 ASSERT(lip
->li_mountp
== tp
->t_mountp
);
56 ASSERT(lip
->li_ailp
== tp
->t_mountp
->m_ail
);
57 ASSERT(list_empty(&lip
->li_trans
));
58 ASSERT(!test_bit(XFS_LI_DIRTY
, &lip
->li_flags
));
60 list_add_tail(&lip
->li_trans
, &tp
->t_items
);
64 * Unlink and free the given descriptor.
67 libxfs_trans_del_item(
68 struct xfs_log_item
*lip
)
70 clear_bit(XFS_LI_DIRTY
, &lip
->li_flags
);
71 list_del_init(&lip
->li_trans
);
75 * Roll from one trans in the sequence of PERMANENT transactions to
76 * the next: permanent transactions are only flushed out when
77 * committed with XFS_TRANS_RELEASE_LOG_RES, but we still want as soon
78 * as possible to let chunks of it go to the log. So we commit the
79 * chunk we've been working on and get a new transaction to continue.
83 struct xfs_trans
**tpp
)
85 struct xfs_trans
*trans
= *tpp
;
86 struct xfs_trans_res tres
;
90 * Copy the critical parameters from one trans to the next.
92 tres
.tr_logres
= trans
->t_log_res
;
93 tres
.tr_logcount
= trans
->t_log_count
;
95 *tpp
= xfs_trans_dup(trans
);
98 * Commit the current transaction.
99 * If this commit failed, then it'd just unlock those items that
100 * are marked to be released. That also means that a filesystem shutdown
101 * is in progress. The caller takes the responsibility to cancel
102 * the duplicate transaction that gets returned.
104 error
= __xfs_trans_commit(trans
, true);
109 * Reserve space in the log for the next transaction.
110 * This also pushes items in the "AIL", the list of logged items,
111 * out to disk if they are taking up space at the tail of the log
112 * that we want to use. This requires that either nothing be locked
113 * across this call, or that anything that is locked be logged in
114 * the prior and the next transactions.
116 tres
.tr_logflags
= XFS_TRANS_PERM_LOG_RES
;
117 return xfs_trans_reserve(*tpp
, &tres
, 0, 0);
121 * Free the transaction structure. If there is more clean up
122 * to do when the structure is freed, add it here.
126 struct xfs_trans
*tp
)
128 kmem_cache_free(xfs_trans_cache
, tp
);
132 * This is called to create a new transaction which will share the
133 * permanent log reservation of the given transaction. The remaining
134 * unused block and rt extent reservations are also inherited. This
135 * implies that the original transaction is no longer allowed to allocate
136 * blocks. Locks and log items, however, are no inherited. They must
137 * be added to the new transaction explicitly.
139 STATIC
struct xfs_trans
*
141 struct xfs_trans
*tp
)
143 struct xfs_trans
*ntp
;
145 ntp
= kmem_cache_zalloc(xfs_trans_cache
, 0);
148 * Initialize the new transaction structure.
150 ntp
->t_mountp
= tp
->t_mountp
;
151 INIT_LIST_HEAD(&ntp
->t_items
);
152 INIT_LIST_HEAD(&ntp
->t_dfops
);
153 ntp
->t_highest_agno
= NULLAGNUMBER
;
155 ASSERT(tp
->t_flags
& XFS_TRANS_PERM_LOG_RES
);
157 ntp
->t_flags
= XFS_TRANS_PERM_LOG_RES
|
158 (tp
->t_flags
& XFS_TRANS_RESERVE
) |
159 (tp
->t_flags
& XFS_TRANS_NO_WRITECOUNT
);
160 /* We gave our writer reference to the new transaction */
161 tp
->t_flags
|= XFS_TRANS_NO_WRITECOUNT
;
163 ntp
->t_blk_res
= tp
->t_blk_res
- tp
->t_blk_res_used
;
164 tp
->t_blk_res
= tp
->t_blk_res_used
;
166 /* move deferred ops over to the new tp */
167 xfs_defer_move(ntp
, tp
);
173 * This is called to reserve free disk blocks and log space for the
174 * given transaction. This must be done before allocating any resources
175 * within the transaction.
177 * This will return ENOSPC if there are not enough blocks available.
178 * It will sleep waiting for available log space.
179 * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which
180 * is used by long running transactions. If any one of the reservations
181 * fails then they will all be backed out.
183 * This does not do quota reservations. That typically is done by the
188 struct xfs_trans
*tp
,
189 struct xfs_trans_res
*resp
,
196 * Attempt to reserve the needed disk blocks by decrementing
197 * the number needed from the number available. This will
198 * fail if the count would go below zero.
201 if (tp
->t_mountp
->m_sb
.sb_fdblocks
< blocks
)
203 tp
->t_blk_res
+= blocks
;
207 * Reserve the log space needed for this transaction.
209 if (resp
->tr_logres
> 0) {
210 ASSERT(tp
->t_log_res
== 0 ||
211 tp
->t_log_res
== resp
->tr_logres
);
212 ASSERT(tp
->t_log_count
== 0 ||
213 tp
->t_log_count
== resp
->tr_logcount
);
215 if (resp
->tr_logflags
& XFS_TRANS_PERM_LOG_RES
)
216 tp
->t_flags
|= XFS_TRANS_PERM_LOG_RES
;
218 ASSERT(!(tp
->t_flags
& XFS_TRANS_PERM_LOG_RES
));
220 tp
->t_log_res
= resp
->tr_logres
;
221 tp
->t_log_count
= resp
->tr_logcount
;
225 * Attempt to reserve the needed realtime extents by decrementing
226 * the number needed from the number available. This will
227 * fail if the count would go below zero.
230 if (tp
->t_mountp
->m_sb
.sb_rextents
< rtextents
) {
234 tp
->t_rtx_res
+= rtextents
;
240 * Error cases jump to one of these labels to undo any
241 * reservations which have already been performed.
252 struct xfs_mount
*mp
,
253 struct xfs_trans_res
*resp
,
255 unsigned int rtextents
,
257 struct xfs_trans
**tpp
)
260 struct xfs_trans
*tp
;
263 tp
= kmem_cache_zalloc(xfs_trans_cache
, 0);
265 INIT_LIST_HEAD(&tp
->t_items
);
266 INIT_LIST_HEAD(&tp
->t_dfops
);
267 tp
->t_highest_agno
= NULLAGNUMBER
;
269 error
= xfs_trans_reserve(tp
, resp
, blocks
, rtextents
);
271 xfs_trans_cancel(tp
);
275 trace_xfs_trans_alloc(tp
, _RET_IP_
);
282 * Create an empty transaction with no reservation. This is a defensive
283 * mechanism for routines that query metadata without actually modifying
284 * them -- if the metadata being queried is somehow cross-linked (think a
285 * btree block pointer that points higher in the tree), we risk deadlock.
286 * However, blocks grabbed as part of a transaction can be re-grabbed.
287 * The verifiers will notice the corrupt block and the operation will fail
288 * back to userspace without deadlocking.
290 * Note the zero-length reservation; this transaction MUST be cancelled
291 * without any dirty data.
294 libxfs_trans_alloc_empty(
295 struct xfs_mount
*mp
,
296 struct xfs_trans
**tpp
)
298 struct xfs_trans_res resv
= {0};
300 return xfs_trans_alloc(mp
, &resv
, 0, 0, XFS_TRANS_NO_WRITECOUNT
, tpp
);
304 * Allocate a transaction that can be rolled. Since userspace doesn't have
305 * a need for log reservations, we really only tr_itruncate to get the
306 * permanent log reservation flag to avoid blowing asserts.
309 libxfs_trans_alloc_rollable(
310 struct xfs_mount
*mp
,
312 struct xfs_trans
**tpp
)
314 return libxfs_trans_alloc(mp
, &M_RES(mp
)->tr_itruncate
, blocks
,
320 struct xfs_trans
*tp
)
324 trace_xfs_trans_cancel(tp
, _RET_IP_
);
328 dirty
= (tp
->t_flags
& XFS_TRANS_DIRTY
);
331 * It's never valid to cancel a transaction with deferred ops attached,
332 * because the transaction is effectively dirty. Complain about this
333 * loudly before freeing the in-memory defer items.
335 if (!list_empty(&tp
->t_dfops
)) {
336 ASSERT(list_empty(&tp
->t_dfops
));
337 ASSERT(tp
->t_flags
& XFS_TRANS_PERM_LOG_RES
);
339 xfs_defer_cancel(tp
);
343 fprintf(stderr
, _("Cancelling dirty transaction!\n"));
347 xfs_trans_free_items(tp
);
353 struct xfs_buf_log_item
*bip
)
355 struct xfs_buf
*bp
= bip
->bli_buf
;
357 bp
->b_log_item
= NULL
;
358 kmem_cache_free(xfs_buf_item_cache
, bip
);
361 /* from xfs_trans_buf.c */
364 * Add the locked buffer to the transaction.
366 * The buffer must be locked, and it cannot be associated with any
369 * If the buffer does not yet have a buf log item associated with it,
370 * then allocate one for it. Then add the buf item to the transaction.
374 struct xfs_trans
*tp
,
378 struct xfs_buf_log_item
*bip
;
380 ASSERT(bp
->b_transp
== NULL
);
383 * The xfs_buf_log_item pointer is stored in b_log_item. If
384 * it doesn't have one yet, then allocate one and initialize it.
385 * The checks to see if one is there are in xfs_buf_item_init().
387 xfs_buf_item_init(bp
, tp
->t_mountp
);
388 bip
= bp
->b_log_item
;
393 * Attach the item to the transaction so we can find it in
394 * xfs_trans_get_buf() and friends.
396 xfs_trans_add_item(tp
, &bip
->bli_item
);
403 struct xfs_trans
*tp
,
406 _libxfs_trans_bjoin(tp
, bp
, 0);
407 trace_xfs_trans_bjoin(bp
->b_log_item
);
411 * Cancel the previous buffer hold request made on this buffer
412 * for this transaction.
415 libxfs_trans_bhold_release(
419 struct xfs_buf_log_item
*bip
= bp
->b_log_item
;
421 ASSERT(bp
->b_transp
== tp
);
424 bip
->bli_flags
&= ~XFS_BLI_HOLD
;
425 trace_xfs_trans_bhold_release(bip
);
429 * Get and lock the buffer for the caller if it is not already
430 * locked within the given transaction. If it is already locked
431 * within the transaction, just increment its lock recursion count
432 * and return a pointer to it.
434 * If the transaction pointer is NULL, make this just a normal
438 libxfs_trans_get_buf_map(
439 struct xfs_trans
*tp
,
440 struct xfs_buftarg
*target
,
441 struct xfs_buf_map
*map
,
443 xfs_buf_flags_t flags
,
444 struct xfs_buf
**bpp
)
447 struct xfs_buf_log_item
*bip
;
452 return libxfs_buf_get_map(target
, map
, nmaps
, 0, bpp
);
455 * If we find the buffer in the cache with this transaction
456 * pointer in its b_fsprivate2 field, then we know we already
457 * have it locked. In this case we just increment the lock
458 * recursion count and return the buffer to the caller.
460 bp
= xfs_trans_buf_item_match(tp
, target
, map
, nmaps
);
462 ASSERT(bp
->b_transp
== tp
);
463 bip
= bp
->b_log_item
;
466 trace_xfs_trans_get_buf_recur(bip
);
471 error
= libxfs_buf_get_map(target
, map
, nmaps
, 0, &bp
);
475 ASSERT(!bp
->b_error
);
477 _libxfs_trans_bjoin(tp
, bp
, 1);
478 trace_xfs_trans_get_buf(bp
->b_log_item
);
485 struct xfs_trans
*tp
)
487 struct xfs_mount
*mp
= tp
->t_mountp
;
489 struct xfs_buf_log_item
*bip
;
490 int len
= XFS_FSS_TO_BB(mp
, 1);
491 DEFINE_SINGLE_BUF_MAP(map
, XFS_SB_DADDR
, len
);
494 return libxfs_getsb(mp
);
496 bp
= xfs_trans_buf_item_match(tp
, mp
->m_dev
, &map
, 1);
498 ASSERT(bp
->b_transp
== tp
);
499 bip
= bp
->b_log_item
;
502 trace_xfs_trans_getsb_recur(bip
);
506 bp
= libxfs_getsb(mp
);
510 _libxfs_trans_bjoin(tp
, bp
, 1);
511 trace_xfs_trans_getsb(bp
->b_log_item
);
516 libxfs_trans_read_buf_map(
517 struct xfs_mount
*mp
,
518 struct xfs_trans
*tp
,
519 struct xfs_buftarg
*target
,
520 struct xfs_buf_map
*map
,
522 xfs_buf_flags_t flags
,
523 struct xfs_buf
**bpp
,
524 const struct xfs_buf_ops
*ops
)
527 struct xfs_buf_log_item
*bip
;
533 return libxfs_buf_read_map(target
, map
, nmaps
, flags
, bpp
, ops
);
535 bp
= xfs_trans_buf_item_match(tp
, target
, map
, nmaps
);
537 ASSERT(bp
->b_transp
== tp
);
538 ASSERT(bp
->b_log_item
!= NULL
);
539 bip
= bp
->b_log_item
;
541 trace_xfs_trans_read_buf_recur(bip
);
545 error
= libxfs_buf_read_map(target
, map
, nmaps
, flags
, &bp
, ops
);
549 _libxfs_trans_bjoin(tp
, bp
, 1);
551 trace_xfs_trans_read_buf(bp
->b_log_item
);
557 * Release a buffer previously joined to the transaction. If the buffer is
558 * modified within this transaction, decrement the recursion count but do not
559 * release the buffer even if the count goes to 0. If the buffer is not modified
560 * within the transaction, decrement the recursion count and release the buffer
561 * if the recursion count goes to 0.
563 * If the buffer is to be released and it was not already dirty before this
564 * transaction began, then also free the buf_log_item associated with it.
566 * If the transaction pointer is NULL, this is a normal xfs_buf_relse() call.
570 struct xfs_trans
*tp
,
573 struct xfs_buf_log_item
*bip
= bp
->b_log_item
;
575 ASSERT(bp
->b_transp
== tp
);
578 libxfs_buf_relse(bp
);
582 trace_xfs_trans_brelse(bip
);
583 ASSERT(bip
->bli_item
.li_type
== XFS_LI_BUF
);
586 * If the release is for a recursive lookup, then decrement the count
589 if (bip
->bli_recur
> 0) {
595 * If the buffer is invalidated or dirty in this transaction, we can't
596 * release it until we commit.
598 if (test_bit(XFS_LI_DIRTY
, &bip
->bli_item
.li_flags
))
600 if (bip
->bli_flags
& XFS_BLI_STALE
)
604 * Unlink the log item from the transaction and clear the hold flag, if
605 * set. We wouldn't want the next user of the buffer to get confused.
607 xfs_trans_del_item(&bip
->bli_item
);
608 bip
->bli_flags
&= ~XFS_BLI_HOLD
;
610 /* drop the reference to the bli */
611 xfs_buf_item_put(bip
);
614 libxfs_buf_relse(bp
);
618 * Mark the buffer as not needing to be unlocked when the buf item's
619 * iop_unlock() routine is called. The buffer must already be locked
620 * and associated with the given transaction.
628 struct xfs_buf_log_item
*bip
= bp
->b_log_item
;
630 ASSERT(bp
->b_transp
== tp
);
633 bip
->bli_flags
|= XFS_BLI_HOLD
;
634 trace_xfs_trans_bhold(bip
);
638 * Mark a buffer dirty in the transaction.
641 libxfs_trans_dirty_buf(
642 struct xfs_trans
*tp
,
645 struct xfs_buf_log_item
*bip
= bp
->b_log_item
;
647 ASSERT(bp
->b_transp
== tp
);
650 tp
->t_flags
|= XFS_TRANS_DIRTY
;
651 set_bit(XFS_LI_DIRTY
, &bip
->bli_item
.li_flags
);
655 * This is called to mark bytes first through last inclusive of the given
656 * buffer as needing to be logged when the transaction is committed.
657 * The buffer must already be associated with the given transaction.
659 * First and last are numbers relative to the beginning of this buffer,
660 * so the first byte in the buffer is numbered 0 regardless of the
664 libxfs_trans_log_buf(
665 struct xfs_trans
*tp
,
670 struct xfs_buf_log_item
*bip
= bp
->b_log_item
;
672 ASSERT(first
<= last
&& last
< BBTOB(bp
->b_length
));
674 xfs_trans_dirty_buf(tp
, bp
);
676 trace_xfs_trans_log_buf(bip
);
677 xfs_buf_item_log(bip
, first
, last
);
685 struct xfs_buf_log_item
*bip
= bp
->b_log_item
;
687 ASSERT(bp
->b_transp
== tp
);
690 trace_xfs_trans_binval(bip
);
692 if (bip
->bli_flags
& XFS_BLI_STALE
)
694 XFS_BUF_UNDELAYWRITE(bp
);
697 bip
->bli_flags
|= XFS_BLI_STALE
;
698 bip
->bli_flags
&= ~XFS_BLI_DIRTY
;
699 bip
->__bli_format
.blf_flags
&= ~XFS_BLF_INODE_BUF
;
700 bip
->__bli_format
.blf_flags
|= XFS_BLF_CANCEL
;
701 set_bit(XFS_LI_DIRTY
, &bip
->bli_item
.li_flags
);
702 tp
->t_flags
|= XFS_TRANS_DIRTY
;
706 * Mark the buffer as being one which contains newly allocated
707 * inodes. We need to make sure that even if this buffer is
708 * relogged as an 'inode buf' we still recover all of the inode
709 * images in the face of a crash. This works in coordination with
710 * xfs_buf_item_committed() to ensure that the buffer remains in the
711 * AIL at its original location even after it has been relogged.
715 libxfs_trans_inode_alloc_buf(
719 struct xfs_buf_log_item
*bip
= bp
->b_log_item
;
721 ASSERT(bp
->b_transp
== tp
);
723 bip
->bli_flags
|= XFS_BLI_INODE_ALLOC_BUF
;
724 xfs_trans_buf_set_type(tp
, bp
, XFS_BLFT_DINO_BUF
);
728 * For userspace, ordered buffers just need to be marked dirty so
729 * the transaction commit will write them and mark them up-to-date.
730 * In essence, they are just like any other logged buffer in userspace.
732 * If the buffer is already dirty, trigger the "already logged" return condition.
735 libxfs_trans_ordered_buf(
736 struct xfs_trans
*tp
,
739 struct xfs_buf_log_item
*bip
= bp
->b_log_item
;
742 ret
= test_bit(XFS_LI_DIRTY
, &bip
->bli_item
.li_flags
);
743 libxfs_trans_log_buf(tp
, bp
, 0, BBTOB(bp
->b_length
));
747 /* end of xfs_trans_buf.c */
750 * Record the indicated change to the given field for application
751 * to the file system's superblock when the transaction commits.
752 * For now, just store the change in the transaction structure.
753 * Mark the transaction structure to indicate that the superblock
754 * needs to be updated before committing.
756 * Originally derived from xfs_trans_mod_sb().
765 case XFS_TRANS_SB_RES_FDBLOCKS
:
767 case XFS_TRANS_SB_FDBLOCKS
:
769 tp
->t_blk_res_used
+= (uint
)-delta
;
770 if (tp
->t_blk_res_used
> tp
->t_blk_res
) {
772 _("Transaction block reservation exceeded! %u > %u\n"),
773 tp
->t_blk_res_used
, tp
->t_blk_res
);
777 tp
->t_fdblocks_delta
+= delta
;
779 case XFS_TRANS_SB_ICOUNT
:
781 tp
->t_icount_delta
+= delta
;
783 case XFS_TRANS_SB_IFREE
:
784 tp
->t_ifree_delta
+= delta
;
786 case XFS_TRANS_SB_FREXTENTS
:
788 * Track the number of rt extents allocated in the transaction.
789 * Make sure it does not exceed the number reserved.
792 tp
->t_rtx_res_used
+= (uint
)-delta
;
793 if (tp
->t_rtx_res_used
> tp
->t_rtx_res
) {
795 _("Transaction rt block reservation exceeded! %u > %u\n"),
796 tp
->t_rtx_res_used
, tp
->t_rtx_res
);
800 tp
->t_frextents_delta
+= delta
;
806 tp
->t_flags
|= (XFS_TRANS_SB_DIRTY
| XFS_TRANS_DIRTY
);
811 struct xfs_inode_log_item
*iip
)
813 struct xfs_inode
*ip
= iip
->ili_inode
;
815 ASSERT(iip
->ili_item
.li_buf
== NULL
);
819 list_del_init(&iip
->ili_item
.li_bio_list
);
820 kmem_cache_free(xfs_ili_cache
, iip
);
825 * Transaction commital code follows (i.e. write to disk in libxfs)
827 * XXX (dgc): should failure to flush the inode (e.g. due to uncorrected
828 * corruption) result in transaction commit failure w/ EFSCORRUPTED?
832 struct xfs_inode_log_item
*iip
)
837 ASSERT(iip
->ili_inode
!= NULL
);
839 if (!(iip
->ili_fields
& XFS_ILOG_ALL
))
842 bp
= iip
->ili_item
.li_buf
;
843 iip
->ili_item
.li_buf
= NULL
;
846 * Flush the inode and disassociate it from the transaction regardless
847 * of whether the flush succeed or not. If we fail the flush, make sure
848 * we still release the buffer reference we currently hold.
850 error
= libxfs_iflush_int(iip
->ili_inode
, bp
);
851 bp
->b_transp
= NULL
; /* remove xact ptr */
854 fprintf(stderr
, _("%s: warning - iflush_int failed (%d)\n"),
859 libxfs_buf_mark_dirty(bp
);
861 libxfs_buf_relse(bp
);
863 xfs_inode_item_put(iip
);
868 xfs_buf_log_item_t
*bip
)
872 extern struct kmem_cache
*xfs_buf_item_cache
;
876 bp
->b_transp
= NULL
; /* remove xact ptr */
878 hold
= (bip
->bli_flags
& XFS_BLI_HOLD
);
879 if (bip
->bli_flags
& XFS_BLI_DIRTY
)
880 libxfs_buf_mark_dirty(bp
);
882 bip
->bli_flags
&= ~XFS_BLI_HOLD
;
883 xfs_buf_item_put(bip
);
886 libxfs_buf_relse(bp
);
893 struct xfs_log_item
*lip
, *next
;
895 list_for_each_entry_safe(lip
, next
, &tp
->t_items
, li_trans
) {
896 xfs_trans_del_item(lip
);
898 if (lip
->li_type
== XFS_LI_BUF
)
899 buf_item_done((xfs_buf_log_item_t
*)lip
);
900 else if (lip
->li_type
== XFS_LI_INODE
)
901 inode_item_done((struct xfs_inode_log_item
*)lip
);
903 fprintf(stderr
, _("%s: unrecognised log item type\n"),
912 xfs_buf_log_item_t
*bip
)
914 struct xfs_buf
*bp
= bip
->bli_buf
;
917 /* Clear the buffer's association with this transaction. */
918 bip
->bli_buf
->b_transp
= NULL
;
920 hold
= bip
->bli_flags
& XFS_BLI_HOLD
;
921 bip
->bli_flags
&= ~XFS_BLI_HOLD
;
922 xfs_buf_item_put(bip
);
924 libxfs_buf_relse(bp
);
929 struct xfs_inode_log_item
*iip
)
931 xfs_inode_item_put(iip
);
934 /* Detach and unlock all of the items in a transaction */
936 xfs_trans_free_items(
937 struct xfs_trans
*tp
)
939 struct xfs_log_item
*lip
, *next
;
941 list_for_each_entry_safe(lip
, next
, &tp
->t_items
, li_trans
) {
942 xfs_trans_del_item(lip
);
943 if (lip
->li_type
== XFS_LI_BUF
)
944 buf_item_unlock((xfs_buf_log_item_t
*)lip
);
945 else if (lip
->li_type
== XFS_LI_INODE
)
946 inode_item_unlock((struct xfs_inode_log_item
*)lip
);
948 fprintf(stderr
, _("%s: unrecognised log item type\n"),
956 * Sort transaction items prior to running precommit operations. This will
957 * attempt to order the items such that they will always be locked in the same
958 * order. Items that have no sort function are moved to the end of the list
959 * and so are locked last.
961 * This may need refinement as different types of objects add sort functions.
963 * Function is more complex than it needs to be because we are comparing 64 bit
964 * values and the function only returns 32 bit values.
967 xfs_trans_precommit_sort(
969 const struct list_head
*a
,
970 const struct list_head
*b
)
972 struct xfs_log_item
*lia
= container_of(a
,
973 struct xfs_log_item
, li_trans
);
974 struct xfs_log_item
*lib
= container_of(b
,
975 struct xfs_log_item
, li_trans
);
979 * If both items are non-sortable, leave them alone. If only one is
980 * sortable, move the non-sortable item towards the end of the list.
982 if (!lia
->li_ops
->iop_sort
&& !lib
->li_ops
->iop_sort
)
984 if (!lia
->li_ops
->iop_sort
)
986 if (!lib
->li_ops
->iop_sort
)
989 diff
= lia
->li_ops
->iop_sort(lia
) - lib
->li_ops
->iop_sort(lib
);
998 * Run transaction precommit functions.
1000 * If there is an error in any of the callouts, then stop immediately and
1001 * trigger a shutdown to abort the transaction. There is no recovery possible
1002 * from errors at this point as the transaction is dirty....
1005 xfs_trans_run_precommits(
1006 struct xfs_trans
*tp
)
1008 //struct xfs_mount *mp = tp->t_mountp;
1009 struct xfs_log_item
*lip
, *n
;
1013 * Sort the item list to avoid ABBA deadlocks with other transactions
1014 * running precommit operations that lock multiple shared items such as
1015 * inode cluster buffers.
1017 list_sort(NULL
, &tp
->t_items
, xfs_trans_precommit_sort
);
1020 * Precommit operations can remove the log item from the transaction
1021 * if the log item exists purely to delay modifications until they
1022 * can be ordered against other operations. Hence we have to use
1023 * list_for_each_entry_safe() here.
1025 list_for_each_entry_safe(lip
, n
, &tp
->t_items
, li_trans
) {
1026 if (!test_bit(XFS_LI_DIRTY
, &lip
->li_flags
))
1028 if (lip
->li_ops
->iop_precommit
) {
1029 error
= lip
->li_ops
->iop_precommit(tp
, lip
);
1035 xfs_force_shutdown(mp
, SHUTDOWN_CORRUPT_INCORE
);
1040 * Commit the changes represented by this transaction
1044 struct xfs_trans
*tp
,
1050 trace_xfs_trans_commit(tp
, _RET_IP_
);
1055 error
= xfs_trans_run_precommits(tp
);
1057 if (tp
->t_flags
& XFS_TRANS_PERM_LOG_RES
)
1058 xfs_defer_cancel(tp
);
1063 * Finish deferred items on final commit. Only permanent transactions
1064 * should ever have deferred ops.
1066 WARN_ON_ONCE(!list_empty(&tp
->t_dfops
) &&
1067 !(tp
->t_flags
& XFS_TRANS_PERM_LOG_RES
));
1068 if (!regrant
&& (tp
->t_flags
& XFS_TRANS_PERM_LOG_RES
)) {
1069 error
= xfs_defer_finish_noroll(&tp
);
1073 /* Run precommits from final tx in defer chain. */
1074 error
= xfs_trans_run_precommits(tp
);
1079 if (!(tp
->t_flags
& XFS_TRANS_DIRTY
))
1082 if (tp
->t_flags
& XFS_TRANS_SB_DIRTY
) {
1083 sbp
= &(tp
->t_mountp
->m_sb
);
1084 if (tp
->t_icount_delta
)
1085 sbp
->sb_icount
+= tp
->t_icount_delta
;
1086 if (tp
->t_ifree_delta
)
1087 sbp
->sb_ifree
+= tp
->t_ifree_delta
;
1088 if (tp
->t_fdblocks_delta
)
1089 sbp
->sb_fdblocks
+= tp
->t_fdblocks_delta
;
1090 if (tp
->t_frextents_delta
)
1091 sbp
->sb_frextents
+= tp
->t_frextents_delta
;
1095 trans_committed(tp
);
1097 /* That's it for the transaction structure. Free it. */
1102 xfs_trans_free_items(tp
);
1108 libxfs_trans_commit(
1109 struct xfs_trans
*tp
)
1111 return __xfs_trans_commit(tp
, false);
1115 * Allocate an transaction, lock and join the inode to it, and reserve quota.
1117 * The caller must ensure that the on-disk dquots attached to this inode have
1118 * already been allocated and initialized. The caller is responsible for
1119 * releasing ILOCK_EXCL if a new transaction is returned.
1122 libxfs_trans_alloc_inode(
1123 struct xfs_inode
*ip
,
1124 struct xfs_trans_res
*resv
,
1125 unsigned int dblocks
,
1126 unsigned int rblocks
,
1128 struct xfs_trans
**tpp
)
1130 struct xfs_trans
*tp
;
1131 struct xfs_mount
*mp
= ip
->i_mount
;
1134 error
= libxfs_trans_alloc(mp
, resv
, dblocks
,
1135 xfs_rtb_to_rtx(mp
, rblocks
),
1136 force
? XFS_TRANS_RESERVE
: 0, &tp
);
1140 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
1141 xfs_trans_ijoin(tp
, ip
, 0);