]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - libxfs/trans.c
xfsprogs: Release v6.7.0
[thirdparty/xfsprogs-dev.git] / libxfs / trans.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2001,2005-2006 Silicon Graphics, Inc.
4 * Copyright (C) 2010 Red Hat, Inc.
5 * All Rights Reserved.
6 */
7
8 #include "libxfs_priv.h"
9 #include "xfs_fs.h"
10 #include "xfs_shared.h"
11 #include "xfs_format.h"
12 #include "xfs_log_format.h"
13 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_inode_buf.h"
16 #include "xfs_inode_fork.h"
17 #include "xfs_inode.h"
18 #include "xfs_trans.h"
19 #include "xfs_sb.h"
20 #include "xfs_defer.h"
21 #include "xfs_trace.h"
22
23 static void xfs_trans_free_items(struct xfs_trans *tp);
24 STATIC struct xfs_trans *xfs_trans_dup(struct xfs_trans *tp);
25 static int xfs_trans_reserve(struct xfs_trans *tp, struct xfs_trans_res *resp,
26 uint blocks, uint rtextents);
27 static int __xfs_trans_commit(struct xfs_trans *tp, bool regrant);
28
29 /*
30 * Simple transaction interface
31 */
32
33 kmem_zone_t *xfs_trans_zone;
34
35 /*
36 * Initialize the precomputed transaction reservation values
37 * in the mount structure.
38 */
39 void
40 libxfs_trans_init(
41 struct xfs_mount *mp)
42 {
43 xfs_trans_resv_calc(mp, &mp->m_resv);
44 }
45
46 /*
47 * Add the given log item to the transaction's list of log items.
48 */
49 void
50 libxfs_trans_add_item(
51 struct xfs_trans *tp,
52 struct xfs_log_item *lip)
53 {
54 ASSERT(lip->li_mountp == tp->t_mountp);
55 ASSERT(lip->li_ailp == tp->t_mountp->m_ail);
56 ASSERT(list_empty(&lip->li_trans));
57 ASSERT(!test_bit(XFS_LI_DIRTY, &lip->li_flags));
58
59 list_add_tail(&lip->li_trans, &tp->t_items);
60 }
61
62 /*
63 * Unlink and free the given descriptor.
64 */
65 void
66 libxfs_trans_del_item(
67 struct xfs_log_item *lip)
68 {
69 clear_bit(XFS_LI_DIRTY, &lip->li_flags);
70 list_del_init(&lip->li_trans);
71 }
72
73 /*
74 * Roll from one trans in the sequence of PERMANENT transactions to
75 * the next: permanent transactions are only flushed out when
76 * committed with XFS_TRANS_RELEASE_LOG_RES, but we still want as soon
77 * as possible to let chunks of it go to the log. So we commit the
78 * chunk we've been working on and get a new transaction to continue.
79 */
80 int
81 libxfs_trans_roll(
82 struct xfs_trans **tpp)
83 {
84 struct xfs_trans *trans = *tpp;
85 struct xfs_trans_res tres;
86 int error;
87
88 /*
89 * Copy the critical parameters from one trans to the next.
90 */
91 tres.tr_logres = trans->t_log_res;
92 tres.tr_logcount = trans->t_log_count;
93
94 *tpp = xfs_trans_dup(trans);
95
96 /*
97 * Commit the current transaction.
98 * If this commit failed, then it'd just unlock those items that
99 * are marked to be released. That also means that a filesystem shutdown
100 * is in progress. The caller takes the responsibility to cancel
101 * the duplicate transaction that gets returned.
102 */
103 error = __xfs_trans_commit(trans, true);
104 if (error)
105 return error;
106
107 /*
108 * Reserve space in the log for the next transaction.
109 * This also pushes items in the "AIL", the list of logged items,
110 * out to disk if they are taking up space at the tail of the log
111 * that we want to use. This requires that either nothing be locked
112 * across this call, or that anything that is locked be logged in
113 * the prior and the next transactions.
114 */
115 tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
116 return xfs_trans_reserve(*tpp, &tres, 0, 0);
117 }
118
119 /*
120 * Free the transaction structure. If there is more clean up
121 * to do when the structure is freed, add it here.
122 */
123 static void
124 xfs_trans_free(
125 struct xfs_trans *tp)
126 {
127 kmem_zone_free(xfs_trans_zone, tp);
128 }
129
130 /*
131 * This is called to create a new transaction which will share the
132 * permanent log reservation of the given transaction. The remaining
133 * unused block and rt extent reservations are also inherited. This
134 * implies that the original transaction is no longer allowed to allocate
135 * blocks. Locks and log items, however, are no inherited. They must
136 * be added to the new transaction explicitly.
137 */
138 STATIC struct xfs_trans *
139 xfs_trans_dup(
140 struct xfs_trans *tp)
141 {
142 struct xfs_trans *ntp;
143
144 ntp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP);
145
146 /*
147 * Initialize the new transaction structure.
148 */
149 ntp->t_mountp = tp->t_mountp;
150 INIT_LIST_HEAD(&ntp->t_items);
151 INIT_LIST_HEAD(&ntp->t_dfops);
152 ntp->t_firstblock = NULLFSBLOCK;
153
154 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
155
156 ntp->t_flags = XFS_TRANS_PERM_LOG_RES |
157 (tp->t_flags & XFS_TRANS_RESERVE) |
158 (tp->t_flags & XFS_TRANS_NO_WRITECOUNT);
159 /* We gave our writer reference to the new transaction */
160 tp->t_flags |= XFS_TRANS_NO_WRITECOUNT;
161
162 ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used;
163 tp->t_blk_res = tp->t_blk_res_used;
164
165 /* move deferred ops over to the new tp */
166 xfs_defer_move(ntp, tp);
167
168 return ntp;
169 }
170
171 /*
172 * This is called to reserve free disk blocks and log space for the
173 * given transaction. This must be done before allocating any resources
174 * within the transaction.
175 *
176 * This will return ENOSPC if there are not enough blocks available.
177 * It will sleep waiting for available log space.
178 * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which
179 * is used by long running transactions. If any one of the reservations
180 * fails then they will all be backed out.
181 *
182 * This does not do quota reservations. That typically is done by the
183 * caller afterwards.
184 */
185 static int
186 xfs_trans_reserve(
187 struct xfs_trans *tp,
188 struct xfs_trans_res *resp,
189 uint blocks,
190 uint rtextents)
191 {
192 int error = 0;
193
194 /*
195 * Attempt to reserve the needed disk blocks by decrementing
196 * the number needed from the number available. This will
197 * fail if the count would go below zero.
198 */
199 if (blocks > 0) {
200 if (tp->t_mountp->m_sb.sb_fdblocks < blocks)
201 return -ENOSPC;
202 tp->t_blk_res += blocks;
203 }
204
205 /*
206 * Reserve the log space needed for this transaction.
207 */
208 if (resp->tr_logres > 0) {
209 ASSERT(tp->t_log_res == 0 ||
210 tp->t_log_res == resp->tr_logres);
211 ASSERT(tp->t_log_count == 0 ||
212 tp->t_log_count == resp->tr_logcount);
213
214 if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES)
215 tp->t_flags |= XFS_TRANS_PERM_LOG_RES;
216 else
217 ASSERT(!(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
218
219 tp->t_log_res = resp->tr_logres;
220 tp->t_log_count = resp->tr_logcount;
221 }
222
223 /*
224 * Attempt to reserve the needed realtime extents by decrementing
225 * the number needed from the number available. This will
226 * fail if the count would go below zero.
227 */
228 if (rtextents > 0) {
229 if (tp->t_mountp->m_sb.sb_rextents < rtextents) {
230 error = -ENOSPC;
231 goto undo_blocks;
232 }
233 }
234
235 return 0;
236
237 /*
238 * Error cases jump to one of these labels to undo any
239 * reservations which have already been performed.
240 */
241 undo_blocks:
242 if (blocks > 0)
243 tp->t_blk_res = 0;
244
245 return error;
246 }
247
248 int
249 libxfs_trans_alloc(
250 struct xfs_mount *mp,
251 struct xfs_trans_res *resp,
252 unsigned int blocks,
253 unsigned int rtextents,
254 unsigned int flags,
255 struct xfs_trans **tpp)
256
257 {
258 struct xfs_trans *tp;
259 int error;
260
261 tp = kmem_zone_zalloc(xfs_trans_zone,
262 (flags & XFS_TRANS_NOFS) ? KM_NOFS : KM_SLEEP);
263 tp->t_mountp = mp;
264 INIT_LIST_HEAD(&tp->t_items);
265 INIT_LIST_HEAD(&tp->t_dfops);
266 tp->t_firstblock = NULLFSBLOCK;
267
268 error = xfs_trans_reserve(tp, resp, blocks, rtextents);
269 if (error) {
270 xfs_trans_cancel(tp);
271 return error;
272 }
273
274 trace_xfs_trans_alloc(tp, _RET_IP_);
275
276 *tpp = tp;
277 return 0;
278 }
279
280 /*
281 * Create an empty transaction with no reservation. This is a defensive
282 * mechanism for routines that query metadata without actually modifying
283 * them -- if the metadata being queried is somehow cross-linked (think a
284 * btree block pointer that points higher in the tree), we risk deadlock.
285 * However, blocks grabbed as part of a transaction can be re-grabbed.
286 * The verifiers will notice the corrupt block and the operation will fail
287 * back to userspace without deadlocking.
288 *
289 * Note the zero-length reservation; this transaction MUST be cancelled
290 * without any dirty data.
291 */
292 int
293 libxfs_trans_alloc_empty(
294 struct xfs_mount *mp,
295 struct xfs_trans **tpp)
296 {
297 struct xfs_trans_res resv = {0};
298
299 return xfs_trans_alloc(mp, &resv, 0, 0, XFS_TRANS_NO_WRITECOUNT, tpp);
300 }
301
302 /*
303 * Allocate a transaction that can be rolled. Since userspace doesn't have
304 * a need for log reservations, we really only tr_itruncate to get the
305 * permanent log reservation flag to avoid blowing asserts.
306 */
307 int
308 libxfs_trans_alloc_rollable(
309 struct xfs_mount *mp,
310 unsigned int blocks,
311 struct xfs_trans **tpp)
312 {
313 return libxfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, blocks,
314 0, 0, tpp);
315 }
316
317 void
318 libxfs_trans_cancel(
319 struct xfs_trans *tp)
320 {
321 trace_xfs_trans_cancel(tp, _RET_IP_);
322
323 if (tp == NULL)
324 return;
325
326 if (tp->t_flags & XFS_TRANS_PERM_LOG_RES)
327 xfs_defer_cancel(tp);
328
329 xfs_trans_free_items(tp);
330 xfs_trans_free(tp);
331 }
332
333 static void
334 xfs_buf_item_put(
335 struct xfs_buf_log_item *bip)
336 {
337 struct xfs_buf *bp = bip->bli_buf;
338
339 bp->b_log_item = NULL;
340 kmem_zone_free(xfs_buf_item_zone, bip);
341 }
342
343 /* from xfs_trans_buf.c */
344
345 /*
346 * Add the locked buffer to the transaction.
347 *
348 * The buffer must be locked, and it cannot be associated with any
349 * transaction.
350 *
351 * If the buffer does not yet have a buf log item associated with it,
352 * then allocate one for it. Then add the buf item to the transaction.
353 */
354 STATIC void
355 _libxfs_trans_bjoin(
356 struct xfs_trans *tp,
357 struct xfs_buf *bp,
358 int reset_recur)
359 {
360 struct xfs_buf_log_item *bip;
361
362 ASSERT(bp->b_transp == NULL);
363
364 /*
365 * The xfs_buf_log_item pointer is stored in b_log_item. If
366 * it doesn't have one yet, then allocate one and initialize it.
367 * The checks to see if one is there are in xfs_buf_item_init().
368 */
369 xfs_buf_item_init(bp, tp->t_mountp);
370 bip = bp->b_log_item;
371 if (reset_recur)
372 bip->bli_recur = 0;
373
374 /*
375 * Attach the item to the transaction so we can find it in
376 * xfs_trans_get_buf() and friends.
377 */
378 xfs_trans_add_item(tp, &bip->bli_item);
379 bp->b_transp = tp;
380
381 }
382
383 void
384 libxfs_trans_bjoin(
385 struct xfs_trans *tp,
386 struct xfs_buf *bp)
387 {
388 _libxfs_trans_bjoin(tp, bp, 0);
389 trace_xfs_trans_bjoin(bp->b_log_item);
390 }
391
392 /*
393 * Cancel the previous buffer hold request made on this buffer
394 * for this transaction.
395 */
396 void
397 libxfs_trans_bhold_release(
398 xfs_trans_t *tp,
399 xfs_buf_t *bp)
400 {
401 struct xfs_buf_log_item *bip = bp->b_log_item;
402
403 ASSERT(bp->b_transp == tp);
404 ASSERT(bip != NULL);
405
406 bip->bli_flags &= ~XFS_BLI_HOLD;
407 trace_xfs_trans_bhold_release(bip);
408 }
409
410 /*
411 * Get and lock the buffer for the caller if it is not already
412 * locked within the given transaction. If it is already locked
413 * within the transaction, just increment its lock recursion count
414 * and return a pointer to it.
415 *
416 * If the transaction pointer is NULL, make this just a normal
417 * get_buf() call.
418 */
419 struct xfs_buf *
420 libxfs_trans_get_buf_map(
421 struct xfs_trans *tp,
422 struct xfs_buftarg *target,
423 struct xfs_buf_map *map,
424 int nmaps,
425 xfs_buf_flags_t flags)
426 {
427 xfs_buf_t *bp;
428 struct xfs_buf_log_item *bip;
429
430 if (!tp)
431 return libxfs_getbuf_map(target, map, nmaps, 0);
432
433 /*
434 * If we find the buffer in the cache with this transaction
435 * pointer in its b_fsprivate2 field, then we know we already
436 * have it locked. In this case we just increment the lock
437 * recursion count and return the buffer to the caller.
438 */
439 bp = xfs_trans_buf_item_match(tp, target, map, nmaps);
440 if (bp != NULL) {
441 ASSERT(bp->b_transp == tp);
442 bip = bp->b_log_item;
443 ASSERT(bip != NULL);
444 bip->bli_recur++;
445 trace_xfs_trans_get_buf_recur(bip);
446 return bp;
447 }
448
449 bp = libxfs_getbuf_map(target, map, nmaps, 0);
450 if (bp == NULL) {
451 return NULL;
452 }
453
454 ASSERT(!bp->b_error);
455
456 _libxfs_trans_bjoin(tp, bp, 1);
457 trace_xfs_trans_get_buf(bp->b_log_item);
458 return bp;
459 }
460
461 xfs_buf_t *
462 libxfs_trans_getsb(
463 xfs_trans_t *tp,
464 struct xfs_mount *mp,
465 int flags)
466 {
467 xfs_buf_t *bp;
468 struct xfs_buf_log_item *bip;
469 int len = XFS_FSS_TO_BB(mp, 1);
470 DEFINE_SINGLE_BUF_MAP(map, XFS_SB_DADDR, len);
471
472 if (tp == NULL)
473 return libxfs_getsb(mp, flags);
474
475 bp = xfs_trans_buf_item_match(tp, mp->m_dev, &map, 1);
476 if (bp != NULL) {
477 ASSERT(bp->b_transp == tp);
478 bip = bp->b_log_item;
479 ASSERT(bip != NULL);
480 bip->bli_recur++;
481 trace_xfs_trans_getsb_recur(bip);
482 return bp;
483 }
484
485 bp = libxfs_getsb(mp, flags);
486 if (bp == NULL)
487 return NULL;
488
489 _libxfs_trans_bjoin(tp, bp, 1);
490 trace_xfs_trans_getsb(bp->b_log_item);
491 return bp;
492 }
493
494 int
495 libxfs_trans_read_buf_map(
496 struct xfs_mount *mp,
497 struct xfs_trans *tp,
498 struct xfs_buftarg *target,
499 struct xfs_buf_map *map,
500 int nmaps,
501 xfs_buf_flags_t flags,
502 struct xfs_buf **bpp,
503 const struct xfs_buf_ops *ops)
504 {
505 struct xfs_buf *bp;
506 struct xfs_buf_log_item *bip;
507 int error;
508
509 *bpp = NULL;
510
511 if (tp == NULL) {
512 bp = libxfs_readbuf_map(target, map, nmaps, flags, ops);
513 if (!bp) {
514 return (flags & XBF_TRYLOCK) ? -EAGAIN : -ENOMEM;
515 }
516 if (bp->b_error)
517 goto out_relse;
518 goto done;
519 }
520
521 bp = xfs_trans_buf_item_match(tp, target, map, nmaps);
522 if (bp) {
523 ASSERT(bp->b_transp == tp);
524 ASSERT(bp->b_log_item != NULL);
525 bip = bp->b_log_item;
526 bip->bli_recur++;
527 trace_xfs_trans_read_buf_recur(bip);
528 goto done;
529 }
530
531 bp = libxfs_readbuf_map(target, map, nmaps, flags, ops);
532 if (!bp) {
533 return (flags & XBF_TRYLOCK) ? -EAGAIN : -ENOMEM;
534 }
535 if (bp->b_error)
536 goto out_relse;
537
538 _libxfs_trans_bjoin(tp, bp, 1);
539 done:
540 trace_xfs_trans_read_buf(bp->b_log_item);
541 *bpp = bp;
542 return 0;
543 out_relse:
544 error = bp->b_error;
545 xfs_buf_relse(bp);
546 return error;
547 }
548
549 /*
550 * Release a buffer previously joined to the transaction. If the buffer is
551 * modified within this transaction, decrement the recursion count but do not
552 * release the buffer even if the count goes to 0. If the buffer is not modified
553 * within the transaction, decrement the recursion count and release the buffer
554 * if the recursion count goes to 0.
555 *
556 * If the buffer is to be released and it was not already dirty before this
557 * transaction began, then also free the buf_log_item associated with it.
558 *
559 * If the transaction pointer is NULL, this is a normal xfs_buf_relse() call.
560 */
561 void
562 libxfs_trans_brelse(
563 struct xfs_trans *tp,
564 struct xfs_buf *bp)
565 {
566 struct xfs_buf_log_item *bip = bp->b_log_item;
567
568 ASSERT(bp->b_transp == tp);
569
570 if (!tp) {
571 libxfs_putbuf(bp);
572 return;
573 }
574
575 trace_xfs_trans_brelse(bip);
576 ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
577
578 /*
579 * If the release is for a recursive lookup, then decrement the count
580 * and return.
581 */
582 if (bip->bli_recur > 0) {
583 bip->bli_recur--;
584 return;
585 }
586
587 /*
588 * If the buffer is invalidated or dirty in this transaction, we can't
589 * release it until we commit.
590 */
591 if (test_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags))
592 return;
593 if (bip->bli_flags & XFS_BLI_STALE)
594 return;
595
596 /*
597 * Unlink the log item from the transaction and clear the hold flag, if
598 * set. We wouldn't want the next user of the buffer to get confused.
599 */
600 xfs_trans_del_item(&bip->bli_item);
601 bip->bli_flags &= ~XFS_BLI_HOLD;
602
603 /* drop the reference to the bli */
604 xfs_buf_item_put(bip);
605
606 bp->b_transp = NULL;
607 libxfs_putbuf(bp);
608 }
609
610 /*
611 * Mark the buffer as not needing to be unlocked when the buf item's
612 * iop_unlock() routine is called. The buffer must already be locked
613 * and associated with the given transaction.
614 */
615 /* ARGSUSED */
616 void
617 libxfs_trans_bhold(
618 xfs_trans_t *tp,
619 xfs_buf_t *bp)
620 {
621 struct xfs_buf_log_item *bip = bp->b_log_item;
622
623 ASSERT(bp->b_transp == tp);
624 ASSERT(bip != NULL);
625
626 bip->bli_flags |= XFS_BLI_HOLD;
627 trace_xfs_trans_bhold(bip);
628 }
629
630 /*
631 * Mark a buffer dirty in the transaction.
632 */
633 void
634 libxfs_trans_dirty_buf(
635 struct xfs_trans *tp,
636 struct xfs_buf *bp)
637 {
638 struct xfs_buf_log_item *bip = bp->b_log_item;
639
640 ASSERT(bp->b_transp == tp);
641 ASSERT(bip != NULL);
642
643 tp->t_flags |= XFS_TRANS_DIRTY;
644 set_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags);
645 }
646
647 /*
648 * This is called to mark bytes first through last inclusive of the given
649 * buffer as needing to be logged when the transaction is committed.
650 * The buffer must already be associated with the given transaction.
651 *
652 * First and last are numbers relative to the beginning of this buffer,
653 * so the first byte in the buffer is numbered 0 regardless of the
654 * value of b_blkno.
655 */
656 void
657 libxfs_trans_log_buf(
658 struct xfs_trans *tp,
659 struct xfs_buf *bp,
660 uint first,
661 uint last)
662 {
663 struct xfs_buf_log_item *bip = bp->b_log_item;
664
665 ASSERT(first <= last && last < BBTOB(bp->b_length));
666
667 xfs_trans_dirty_buf(tp, bp);
668
669 trace_xfs_trans_log_buf(bip);
670 xfs_buf_item_log(bip, first, last);
671 }
672
673 void
674 libxfs_trans_binval(
675 xfs_trans_t *tp,
676 xfs_buf_t *bp)
677 {
678 struct xfs_buf_log_item *bip = bp->b_log_item;
679
680 ASSERT(bp->b_transp == tp);
681 ASSERT(bip != NULL);
682
683 trace_xfs_trans_binval(bip);
684
685 if (bip->bli_flags & XFS_BLI_STALE)
686 return;
687 XFS_BUF_UNDELAYWRITE(bp);
688 xfs_buf_stale(bp);
689
690 bip->bli_flags |= XFS_BLI_STALE;
691 bip->bli_flags &= ~XFS_BLI_DIRTY;
692 bip->__bli_format.blf_flags &= ~XFS_BLF_INODE_BUF;
693 bip->__bli_format.blf_flags |= XFS_BLF_CANCEL;
694 set_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags);
695 tp->t_flags |= XFS_TRANS_DIRTY;
696 }
697
698 /*
699 * Mark the buffer as being one which contains newly allocated
700 * inodes. We need to make sure that even if this buffer is
701 * relogged as an 'inode buf' we still recover all of the inode
702 * images in the face of a crash. This works in coordination with
703 * xfs_buf_item_committed() to ensure that the buffer remains in the
704 * AIL at its original location even after it has been relogged.
705 */
706 /* ARGSUSED */
707 void
708 libxfs_trans_inode_alloc_buf(
709 xfs_trans_t *tp,
710 xfs_buf_t *bp)
711 {
712 struct xfs_buf_log_item *bip = bp->b_log_item;
713
714 ASSERT(bp->b_transp == tp);
715 ASSERT(bip != NULL);
716 bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF;
717 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF);
718 }
719
720 /*
721 * For userspace, ordered buffers just need to be marked dirty so
722 * the transaction commit will write them and mark them up-to-date.
723 * In essence, they are just like any other logged buffer in userspace.
724 *
725 * If the buffer is already dirty, trigger the "already logged" return condition.
726 */
727 bool
728 libxfs_trans_ordered_buf(
729 struct xfs_trans *tp,
730 struct xfs_buf *bp)
731 {
732 struct xfs_buf_log_item *bip = bp->b_log_item;
733 bool ret;
734
735 ret = test_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags);
736 libxfs_trans_log_buf(tp, bp, 0, bp->b_bcount);
737 return ret;
738 }
739
740 /* end of xfs_trans_buf.c */
741
742 /*
743 * Record the indicated change to the given field for application
744 * to the file system's superblock when the transaction commits.
745 * For now, just store the change in the transaction structure.
746 * Mark the transaction structure to indicate that the superblock
747 * needs to be updated before committing.
748 *
749 * Originally derived from xfs_trans_mod_sb().
750 */
751 void
752 libxfs_trans_mod_sb(
753 xfs_trans_t *tp,
754 uint field,
755 long delta)
756 {
757 switch (field) {
758 case XFS_TRANS_SB_RES_FDBLOCKS:
759 return;
760 case XFS_TRANS_SB_FDBLOCKS:
761 if (delta < 0) {
762 tp->t_blk_res_used += (uint)-delta;
763 if (tp->t_blk_res_used > tp->t_blk_res) {
764 fprintf(stderr,
765 _("Transaction block reservation exceeded! %u > %u\n"),
766 tp->t_blk_res_used, tp->t_blk_res);
767 ASSERT(0);
768 }
769 }
770 tp->t_fdblocks_delta += delta;
771 break;
772 case XFS_TRANS_SB_ICOUNT:
773 ASSERT(delta > 0);
774 tp->t_icount_delta += delta;
775 break;
776 case XFS_TRANS_SB_IFREE:
777 tp->t_ifree_delta += delta;
778 break;
779 case XFS_TRANS_SB_FREXTENTS:
780 tp->t_frextents_delta += delta;
781 break;
782 default:
783 ASSERT(0);
784 return;
785 }
786 tp->t_flags |= (XFS_TRANS_SB_DIRTY | XFS_TRANS_DIRTY);
787 }
788
789 static void
790 xfs_inode_item_put(
791 struct xfs_inode_log_item *iip)
792 {
793 struct xfs_inode *ip = iip->ili_inode;
794
795 ip->i_itemp = NULL;
796 kmem_zone_free(xfs_ili_zone, iip);
797 }
798
799
800 /*
801 * Transaction commital code follows (i.e. write to disk in libxfs)
802 *
803 * XXX (dgc): should failure to flush the inode (e.g. due to uncorrected
804 * corruption) result in transaction commit failure w/ EFSCORRUPTED?
805 */
806 static void
807 inode_item_done(
808 xfs_inode_log_item_t *iip)
809 {
810 xfs_dinode_t *dip;
811 xfs_inode_t *ip;
812 xfs_mount_t *mp;
813 xfs_buf_t *bp;
814 int error;
815
816 ip = iip->ili_inode;
817 mp = iip->ili_item.li_mountp;
818 ASSERT(ip != NULL);
819
820 if (!(iip->ili_fields & XFS_ILOG_ALL))
821 goto free;
822
823 /*
824 * Get the buffer containing the on-disk inode.
825 */
826 error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, 0, 0);
827 if (error) {
828 fprintf(stderr, _("%s: warning - imap_to_bp failed (%d)\n"),
829 progname, error);
830 goto free;
831 }
832
833 /*
834 * Flush the inode and disassociate it from the transaction regardless
835 * of whether the flush succeed or not. If we fail the flush, make sure
836 * we still release the buffer reference we currently hold.
837 */
838 error = libxfs_iflush_int(ip, bp);
839 bp->b_transp = NULL; /* remove xact ptr */
840
841 if (error) {
842 fprintf(stderr, _("%s: warning - iflush_int failed (%d)\n"),
843 progname, error);
844 libxfs_putbuf(bp);
845 goto free;
846 }
847
848 libxfs_writebuf(bp, 0);
849 free:
850 xfs_inode_item_put(iip);
851 }
852
853 static void
854 buf_item_done(
855 xfs_buf_log_item_t *bip)
856 {
857 xfs_buf_t *bp;
858 int hold;
859 extern kmem_zone_t *xfs_buf_item_zone;
860
861 bp = bip->bli_buf;
862 ASSERT(bp != NULL);
863 bp->b_transp = NULL; /* remove xact ptr */
864
865 hold = (bip->bli_flags & XFS_BLI_HOLD);
866 if (bip->bli_flags & XFS_BLI_DIRTY)
867 libxfs_writebuf_int(bp, 0);
868
869 bip->bli_flags &= ~XFS_BLI_HOLD;
870 xfs_buf_item_put(bip);
871 if (hold)
872 return;
873 libxfs_putbuf(bp);
874 }
875
876 static void
877 trans_committed(
878 xfs_trans_t *tp)
879 {
880 struct xfs_log_item *lip, *next;
881
882 list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) {
883 xfs_trans_del_item(lip);
884
885 if (lip->li_type == XFS_LI_BUF)
886 buf_item_done((xfs_buf_log_item_t *)lip);
887 else if (lip->li_type == XFS_LI_INODE)
888 inode_item_done((xfs_inode_log_item_t *)lip);
889 else {
890 fprintf(stderr, _("%s: unrecognised log item type\n"),
891 progname);
892 ASSERT(0);
893 }
894 }
895 }
896
897 static void
898 buf_item_unlock(
899 xfs_buf_log_item_t *bip)
900 {
901 xfs_buf_t *bp = bip->bli_buf;
902 uint hold;
903
904 /* Clear the buffer's association with this transaction. */
905 bip->bli_buf->b_transp = NULL;
906
907 hold = bip->bli_flags & XFS_BLI_HOLD;
908 bip->bli_flags &= ~XFS_BLI_HOLD;
909 xfs_buf_item_put(bip);
910 if (!hold)
911 libxfs_putbuf(bp);
912 }
913
914 static void
915 inode_item_unlock(
916 xfs_inode_log_item_t *iip)
917 {
918 xfs_inode_item_put(iip);
919 }
920
921 /* Detach and unlock all of the items in a transaction */
922 static void
923 xfs_trans_free_items(
924 struct xfs_trans *tp)
925 {
926 struct xfs_log_item *lip, *next;
927
928 list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) {
929 xfs_trans_del_item(lip);
930 if (lip->li_type == XFS_LI_BUF)
931 buf_item_unlock((xfs_buf_log_item_t *)lip);
932 else if (lip->li_type == XFS_LI_INODE)
933 inode_item_unlock((xfs_inode_log_item_t *)lip);
934 else {
935 fprintf(stderr, _("%s: unrecognised log item type\n"),
936 progname);
937 ASSERT(0);
938 }
939 }
940 }
941
942 /*
943 * Commit the changes represented by this transaction
944 */
945 static int
946 __xfs_trans_commit(
947 struct xfs_trans *tp,
948 bool regrant)
949 {
950 struct xfs_sb *sbp;
951 int error = 0;
952
953 trace_xfs_trans_commit(tp, _RET_IP_);
954
955 if (tp == NULL)
956 return 0;
957
958 /*
959 * Finish deferred items on final commit. Only permanent transactions
960 * should ever have deferred ops.
961 */
962 WARN_ON_ONCE(!list_empty(&tp->t_dfops) &&
963 !(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
964 if (!regrant && (tp->t_flags & XFS_TRANS_PERM_LOG_RES)) {
965 error = xfs_defer_finish_noroll(&tp);
966 if (error)
967 goto out_unreserve;
968 }
969
970 if (!(tp->t_flags & XFS_TRANS_DIRTY))
971 goto out_unreserve;
972
973 if (tp->t_flags & XFS_TRANS_SB_DIRTY) {
974 sbp = &(tp->t_mountp->m_sb);
975 if (tp->t_icount_delta)
976 sbp->sb_icount += tp->t_icount_delta;
977 if (tp->t_ifree_delta)
978 sbp->sb_ifree += tp->t_ifree_delta;
979 if (tp->t_fdblocks_delta)
980 sbp->sb_fdblocks += tp->t_fdblocks_delta;
981 if (tp->t_frextents_delta)
982 sbp->sb_frextents += tp->t_frextents_delta;
983 xfs_log_sb(tp);
984 }
985
986 trans_committed(tp);
987
988 /* That's it for the transaction structure. Free it. */
989 xfs_trans_free(tp);
990 return 0;
991
992 out_unreserve:
993 xfs_trans_free_items(tp);
994 xfs_trans_free(tp);
995 return error;
996 }
997
998 int
999 libxfs_trans_commit(
1000 struct xfs_trans *tp)
1001 {
1002 return __xfs_trans_commit(tp, false);
1003 }