1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
7 #include <linux/slab.h>
8 #include <linux/sched.h>
9 #include <linux/sched/mm.h>
10 #include <linux/writeback.h>
11 #include <linux/pagemap.h>
12 #include <linux/blkdev.h>
13 #include <linux/uuid.h>
14 #include <linux/timekeeping.h>
18 #include "transaction.h"
22 #include "dev-replace.h"
24 #include "block-group.h"
25 #include "space-info.h"
28 #include "accessors.h"
29 #include "extent-tree.h"
30 #include "root-tree.h"
33 #include "uuid-tree.h"
35 #include "relocation.h"
38 static struct kmem_cache
*btrfs_trans_handle_cachep
;
40 #define BTRFS_ROOT_TRANS_TAG 0
43 * Transaction states and transitions
45 * No running transaction (fs tree blocks are not modified)
48 * | Call start_transaction() variants. Except btrfs_join_transaction_nostart().
50 * Transaction N [[TRANS_STATE_RUNNING]]
52 * | New trans handles can be attached to transaction N by calling all
53 * | start_transaction() variants.
56 * | Call btrfs_commit_transaction() on any trans handle attached to
59 * Transaction N [[TRANS_STATE_COMMIT_PREP]]
61 * | If there are simultaneous calls to btrfs_commit_transaction() one will win
62 * | the race and the rest will wait for the winner to commit the transaction.
64 * | The winner will wait for previous running transaction to completely finish
67 * Transaction N [[TRANS_STATE_COMMIT_START]]
69 * | Then one of the following happens:
70 * | - Wait for all other trans handle holders to release.
71 * | The btrfs_commit_transaction() caller will do the commit work.
72 * | - Wait for current transaction to be committed by others.
73 * | Other btrfs_commit_transaction() caller will do the commit work.
75 * | At this stage, only btrfs_join_transaction*() variants can attach
76 * | to this running transaction.
77 * | All other variants will wait for current one to finish and attach to
81 * | Caller is chosen to commit transaction N, and all other trans handle
82 * | haven been released.
84 * Transaction N [[TRANS_STATE_COMMIT_DOING]]
86 * | The heavy lifting transaction work is started.
87 * | From running delayed refs (modifying extent tree) to creating pending
88 * | snapshots, running qgroups.
89 * | In short, modify supporting trees to reflect modifications of subvolume
92 * | At this stage, all start_transaction() calls will wait for this
93 * | transaction to finish and attach to transaction N+1.
96 * | Until all supporting trees are updated.
98 * Transaction N [[TRANS_STATE_UNBLOCKED]]
100 * | All needed trees are modified, thus we only [[TRANS_STATE_RUNNING]]
101 * | need to write them back to disk and update |
104 * | At this stage, new transaction is allowed to |
106 * | All new start_transaction() calls will be |
107 * | attached to transid N+1. |
110 * | Until all tree blocks are super blocks are |
111 * | written to block devices |
113 * Transaction N [[TRANS_STATE_COMPLETED]] V
114 * All tree blocks and super blocks are written. Transaction N+1
115 * This transaction is finished and all its [[TRANS_STATE_COMMIT_START]]
116 * data structures will be cleaned up. | Life goes on
118 static const unsigned int btrfs_blocked_trans_types
[TRANS_STATE_MAX
] = {
119 [TRANS_STATE_RUNNING
] = 0U,
120 [TRANS_STATE_COMMIT_PREP
] = 0U,
121 [TRANS_STATE_COMMIT_START
] = (__TRANS_START
| __TRANS_ATTACH
),
122 [TRANS_STATE_COMMIT_DOING
] = (__TRANS_START
|
125 __TRANS_JOIN_NOSTART
),
126 [TRANS_STATE_UNBLOCKED
] = (__TRANS_START
|
129 __TRANS_JOIN_NOLOCK
|
130 __TRANS_JOIN_NOSTART
),
131 [TRANS_STATE_SUPER_COMMITTED
] = (__TRANS_START
|
134 __TRANS_JOIN_NOLOCK
|
135 __TRANS_JOIN_NOSTART
),
136 [TRANS_STATE_COMPLETED
] = (__TRANS_START
|
139 __TRANS_JOIN_NOLOCK
|
140 __TRANS_JOIN_NOSTART
),
143 void btrfs_put_transaction(struct btrfs_transaction
*transaction
)
145 WARN_ON(refcount_read(&transaction
->use_count
) == 0);
146 if (refcount_dec_and_test(&transaction
->use_count
)) {
147 BUG_ON(!list_empty(&transaction
->list
));
148 WARN_ON(!RB_EMPTY_ROOT(
149 &transaction
->delayed_refs
.href_root
.rb_root
));
150 WARN_ON(!RB_EMPTY_ROOT(
151 &transaction
->delayed_refs
.dirty_extent_root
));
152 if (transaction
->delayed_refs
.pending_csums
)
153 btrfs_err(transaction
->fs_info
,
154 "pending csums is %llu",
155 transaction
->delayed_refs
.pending_csums
);
157 * If any block groups are found in ->deleted_bgs then it's
158 * because the transaction was aborted and a commit did not
159 * happen (things failed before writing the new superblock
160 * and calling btrfs_finish_extent_commit()), so we can not
161 * discard the physical locations of the block groups.
163 while (!list_empty(&transaction
->deleted_bgs
)) {
164 struct btrfs_block_group
*cache
;
166 cache
= list_first_entry(&transaction
->deleted_bgs
,
167 struct btrfs_block_group
,
169 list_del_init(&cache
->bg_list
);
170 btrfs_unfreeze_block_group(cache
);
171 btrfs_put_block_group(cache
);
173 WARN_ON(!list_empty(&transaction
->dev_update_list
));
178 static noinline
void switch_commit_roots(struct btrfs_trans_handle
*trans
)
180 struct btrfs_transaction
*cur_trans
= trans
->transaction
;
181 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
182 struct btrfs_root
*root
, *tmp
;
185 * At this point no one can be using this transaction to modify any tree
186 * and no one can start another transaction to modify any tree either.
188 ASSERT(cur_trans
->state
== TRANS_STATE_COMMIT_DOING
);
190 down_write(&fs_info
->commit_root_sem
);
192 if (test_bit(BTRFS_FS_RELOC_RUNNING
, &fs_info
->flags
))
193 fs_info
->last_reloc_trans
= trans
->transid
;
195 list_for_each_entry_safe(root
, tmp
, &cur_trans
->switch_commits
,
197 list_del_init(&root
->dirty_list
);
198 free_extent_buffer(root
->commit_root
);
199 root
->commit_root
= btrfs_root_node(root
);
200 extent_io_tree_release(&root
->dirty_log_pages
);
201 btrfs_qgroup_clean_swapped_blocks(root
);
204 /* We can free old roots now. */
205 spin_lock(&cur_trans
->dropped_roots_lock
);
206 while (!list_empty(&cur_trans
->dropped_roots
)) {
207 root
= list_first_entry(&cur_trans
->dropped_roots
,
208 struct btrfs_root
, root_list
);
209 list_del_init(&root
->root_list
);
210 spin_unlock(&cur_trans
->dropped_roots_lock
);
211 btrfs_free_log(trans
, root
);
212 btrfs_drop_and_free_fs_root(fs_info
, root
);
213 spin_lock(&cur_trans
->dropped_roots_lock
);
215 spin_unlock(&cur_trans
->dropped_roots_lock
);
217 up_write(&fs_info
->commit_root_sem
);
220 static inline void extwriter_counter_inc(struct btrfs_transaction
*trans
,
223 if (type
& TRANS_EXTWRITERS
)
224 atomic_inc(&trans
->num_extwriters
);
227 static inline void extwriter_counter_dec(struct btrfs_transaction
*trans
,
230 if (type
& TRANS_EXTWRITERS
)
231 atomic_dec(&trans
->num_extwriters
);
234 static inline void extwriter_counter_init(struct btrfs_transaction
*trans
,
237 atomic_set(&trans
->num_extwriters
, ((type
& TRANS_EXTWRITERS
) ? 1 : 0));
240 static inline int extwriter_counter_read(struct btrfs_transaction
*trans
)
242 return atomic_read(&trans
->num_extwriters
);
246 * To be called after doing the chunk btree updates right after allocating a new
247 * chunk (after btrfs_chunk_alloc_add_chunk_item() is called), when removing a
248 * chunk after all chunk btree updates and after finishing the second phase of
249 * chunk allocation (btrfs_create_pending_block_groups()) in case some block
250 * group had its chunk item insertion delayed to the second phase.
252 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle
*trans
)
254 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
256 if (!trans
->chunk_bytes_reserved
)
259 btrfs_block_rsv_release(fs_info
, &fs_info
->chunk_block_rsv
,
260 trans
->chunk_bytes_reserved
, NULL
);
261 trans
->chunk_bytes_reserved
= 0;
265 * either allocate a new transaction or hop into the existing one
267 static noinline
int join_transaction(struct btrfs_fs_info
*fs_info
,
270 struct btrfs_transaction
*cur_trans
;
272 spin_lock(&fs_info
->trans_lock
);
274 /* The file system has been taken offline. No new transactions. */
275 if (BTRFS_FS_ERROR(fs_info
)) {
276 spin_unlock(&fs_info
->trans_lock
);
280 cur_trans
= fs_info
->running_transaction
;
282 if (TRANS_ABORTED(cur_trans
)) {
283 spin_unlock(&fs_info
->trans_lock
);
284 return cur_trans
->aborted
;
286 if (btrfs_blocked_trans_types
[cur_trans
->state
] & type
) {
287 spin_unlock(&fs_info
->trans_lock
);
290 refcount_inc(&cur_trans
->use_count
);
291 atomic_inc(&cur_trans
->num_writers
);
292 extwriter_counter_inc(cur_trans
, type
);
293 spin_unlock(&fs_info
->trans_lock
);
294 btrfs_lockdep_acquire(fs_info
, btrfs_trans_num_writers
);
295 btrfs_lockdep_acquire(fs_info
, btrfs_trans_num_extwriters
);
298 spin_unlock(&fs_info
->trans_lock
);
301 * If we are ATTACH or TRANS_JOIN_NOSTART, we just want to catch the
302 * current transaction, and commit it. If there is no transaction, just
305 if (type
== TRANS_ATTACH
|| type
== TRANS_JOIN_NOSTART
)
309 * JOIN_NOLOCK only happens during the transaction commit, so
310 * it is impossible that ->running_transaction is NULL
312 BUG_ON(type
== TRANS_JOIN_NOLOCK
);
314 cur_trans
= kmalloc(sizeof(*cur_trans
), GFP_NOFS
);
318 btrfs_lockdep_acquire(fs_info
, btrfs_trans_num_writers
);
319 btrfs_lockdep_acquire(fs_info
, btrfs_trans_num_extwriters
);
321 spin_lock(&fs_info
->trans_lock
);
322 if (fs_info
->running_transaction
) {
324 * someone started a transaction after we unlocked. Make sure
325 * to redo the checks above
327 btrfs_lockdep_release(fs_info
, btrfs_trans_num_extwriters
);
328 btrfs_lockdep_release(fs_info
, btrfs_trans_num_writers
);
331 } else if (BTRFS_FS_ERROR(fs_info
)) {
332 spin_unlock(&fs_info
->trans_lock
);
333 btrfs_lockdep_release(fs_info
, btrfs_trans_num_extwriters
);
334 btrfs_lockdep_release(fs_info
, btrfs_trans_num_writers
);
339 cur_trans
->fs_info
= fs_info
;
340 atomic_set(&cur_trans
->pending_ordered
, 0);
341 init_waitqueue_head(&cur_trans
->pending_wait
);
342 atomic_set(&cur_trans
->num_writers
, 1);
343 extwriter_counter_init(cur_trans
, type
);
344 init_waitqueue_head(&cur_trans
->writer_wait
);
345 init_waitqueue_head(&cur_trans
->commit_wait
);
346 cur_trans
->state
= TRANS_STATE_RUNNING
;
348 * One for this trans handle, one so it will live on until we
349 * commit the transaction.
351 refcount_set(&cur_trans
->use_count
, 2);
352 cur_trans
->flags
= 0;
353 cur_trans
->start_time
= ktime_get_seconds();
355 memset(&cur_trans
->delayed_refs
, 0, sizeof(cur_trans
->delayed_refs
));
357 cur_trans
->delayed_refs
.href_root
= RB_ROOT_CACHED
;
358 cur_trans
->delayed_refs
.dirty_extent_root
= RB_ROOT
;
359 atomic_set(&cur_trans
->delayed_refs
.num_entries
, 0);
362 * although the tree mod log is per file system and not per transaction,
363 * the log must never go across transaction boundaries.
366 if (!list_empty(&fs_info
->tree_mod_seq_list
))
367 WARN(1, KERN_ERR
"BTRFS: tree_mod_seq_list not empty when creating a fresh transaction\n");
368 if (!RB_EMPTY_ROOT(&fs_info
->tree_mod_log
))
369 WARN(1, KERN_ERR
"BTRFS: tree_mod_log rb tree not empty when creating a fresh transaction\n");
370 atomic64_set(&fs_info
->tree_mod_seq
, 0);
372 spin_lock_init(&cur_trans
->delayed_refs
.lock
);
374 INIT_LIST_HEAD(&cur_trans
->pending_snapshots
);
375 INIT_LIST_HEAD(&cur_trans
->dev_update_list
);
376 INIT_LIST_HEAD(&cur_trans
->switch_commits
);
377 INIT_LIST_HEAD(&cur_trans
->dirty_bgs
);
378 INIT_LIST_HEAD(&cur_trans
->io_bgs
);
379 INIT_LIST_HEAD(&cur_trans
->dropped_roots
);
380 mutex_init(&cur_trans
->cache_write_mutex
);
381 spin_lock_init(&cur_trans
->dirty_bgs_lock
);
382 INIT_LIST_HEAD(&cur_trans
->deleted_bgs
);
383 spin_lock_init(&cur_trans
->dropped_roots_lock
);
384 list_add_tail(&cur_trans
->list
, &fs_info
->trans_list
);
385 extent_io_tree_init(fs_info
, &cur_trans
->dirty_pages
,
386 IO_TREE_TRANS_DIRTY_PAGES
);
387 extent_io_tree_init(fs_info
, &cur_trans
->pinned_extents
,
388 IO_TREE_FS_PINNED_EXTENTS
);
389 fs_info
->generation
++;
390 cur_trans
->transid
= fs_info
->generation
;
391 fs_info
->running_transaction
= cur_trans
;
392 cur_trans
->aborted
= 0;
393 spin_unlock(&fs_info
->trans_lock
);
399 * This does all the record keeping required to make sure that a shareable root
400 * is properly recorded in a given transaction. This is required to make sure
401 * the old root from before we joined the transaction is deleted when the
402 * transaction commits.
404 static int record_root_in_trans(struct btrfs_trans_handle
*trans
,
405 struct btrfs_root
*root
,
408 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
411 if ((test_bit(BTRFS_ROOT_SHAREABLE
, &root
->state
) &&
412 root
->last_trans
< trans
->transid
) || force
) {
413 WARN_ON(!force
&& root
->commit_root
!= root
->node
);
416 * see below for IN_TRANS_SETUP usage rules
417 * we have the reloc mutex held now, so there
418 * is only one writer in this function
420 set_bit(BTRFS_ROOT_IN_TRANS_SETUP
, &root
->state
);
422 /* make sure readers find IN_TRANS_SETUP before
423 * they find our root->last_trans update
427 spin_lock(&fs_info
->fs_roots_radix_lock
);
428 if (root
->last_trans
== trans
->transid
&& !force
) {
429 spin_unlock(&fs_info
->fs_roots_radix_lock
);
432 radix_tree_tag_set(&fs_info
->fs_roots_radix
,
433 (unsigned long)root
->root_key
.objectid
,
434 BTRFS_ROOT_TRANS_TAG
);
435 spin_unlock(&fs_info
->fs_roots_radix_lock
);
436 root
->last_trans
= trans
->transid
;
438 /* this is pretty tricky. We don't want to
439 * take the relocation lock in btrfs_record_root_in_trans
440 * unless we're really doing the first setup for this root in
443 * Normally we'd use root->last_trans as a flag to decide
444 * if we want to take the expensive mutex.
446 * But, we have to set root->last_trans before we
447 * init the relocation root, otherwise, we trip over warnings
448 * in ctree.c. The solution used here is to flag ourselves
449 * with root IN_TRANS_SETUP. When this is 1, we're still
450 * fixing up the reloc trees and everyone must wait.
452 * When this is zero, they can trust root->last_trans and fly
453 * through btrfs_record_root_in_trans without having to take the
454 * lock. smp_wmb() makes sure that all the writes above are
455 * done before we pop in the zero below
457 ret
= btrfs_init_reloc_root(trans
, root
);
458 smp_mb__before_atomic();
459 clear_bit(BTRFS_ROOT_IN_TRANS_SETUP
, &root
->state
);
465 void btrfs_add_dropped_root(struct btrfs_trans_handle
*trans
,
466 struct btrfs_root
*root
)
468 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
469 struct btrfs_transaction
*cur_trans
= trans
->transaction
;
471 /* Add ourselves to the transaction dropped list */
472 spin_lock(&cur_trans
->dropped_roots_lock
);
473 list_add_tail(&root
->root_list
, &cur_trans
->dropped_roots
);
474 spin_unlock(&cur_trans
->dropped_roots_lock
);
476 /* Make sure we don't try to update the root at commit time */
477 spin_lock(&fs_info
->fs_roots_radix_lock
);
478 radix_tree_tag_clear(&fs_info
->fs_roots_radix
,
479 (unsigned long)root
->root_key
.objectid
,
480 BTRFS_ROOT_TRANS_TAG
);
481 spin_unlock(&fs_info
->fs_roots_radix_lock
);
484 int btrfs_record_root_in_trans(struct btrfs_trans_handle
*trans
,
485 struct btrfs_root
*root
)
487 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
490 if (!test_bit(BTRFS_ROOT_SHAREABLE
, &root
->state
))
494 * see record_root_in_trans for comments about IN_TRANS_SETUP usage
498 if (root
->last_trans
== trans
->transid
&&
499 !test_bit(BTRFS_ROOT_IN_TRANS_SETUP
, &root
->state
))
502 mutex_lock(&fs_info
->reloc_mutex
);
503 ret
= record_root_in_trans(trans
, root
, 0);
504 mutex_unlock(&fs_info
->reloc_mutex
);
509 static inline int is_transaction_blocked(struct btrfs_transaction
*trans
)
511 return (trans
->state
>= TRANS_STATE_COMMIT_START
&&
512 trans
->state
< TRANS_STATE_UNBLOCKED
&&
513 !TRANS_ABORTED(trans
));
516 /* wait for commit against the current transaction to become unblocked
517 * when this is done, it is safe to start a new transaction, but the current
518 * transaction might not be fully on disk.
520 static void wait_current_trans(struct btrfs_fs_info
*fs_info
)
522 struct btrfs_transaction
*cur_trans
;
524 spin_lock(&fs_info
->trans_lock
);
525 cur_trans
= fs_info
->running_transaction
;
526 if (cur_trans
&& is_transaction_blocked(cur_trans
)) {
527 refcount_inc(&cur_trans
->use_count
);
528 spin_unlock(&fs_info
->trans_lock
);
530 btrfs_might_wait_for_state(fs_info
, BTRFS_LOCKDEP_TRANS_UNBLOCKED
);
531 wait_event(fs_info
->transaction_wait
,
532 cur_trans
->state
>= TRANS_STATE_UNBLOCKED
||
533 TRANS_ABORTED(cur_trans
));
534 btrfs_put_transaction(cur_trans
);
536 spin_unlock(&fs_info
->trans_lock
);
540 static int may_wait_transaction(struct btrfs_fs_info
*fs_info
, int type
)
542 if (test_bit(BTRFS_FS_LOG_RECOVERING
, &fs_info
->flags
))
545 if (type
== TRANS_START
)
551 static inline bool need_reserve_reloc_root(struct btrfs_root
*root
)
553 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
555 if (!fs_info
->reloc_ctl
||
556 !test_bit(BTRFS_ROOT_SHAREABLE
, &root
->state
) ||
557 root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
||
564 static int btrfs_reserve_trans_metadata(struct btrfs_fs_info
*fs_info
,
565 enum btrfs_reserve_flush_enum flush
,
567 u64
*delayed_refs_bytes
)
569 struct btrfs_block_rsv
*delayed_refs_rsv
= &fs_info
->delayed_refs_rsv
;
570 struct btrfs_space_info
*si
= fs_info
->trans_block_rsv
.space_info
;
571 u64 extra_delayed_refs_bytes
= 0;
576 * If there's a gap between the size of the delayed refs reserve and
577 * its reserved space, than some tasks have added delayed refs or bumped
578 * its size otherwise (due to block group creation or removal, or block
579 * group item update). Also try to allocate that gap in order to prevent
580 * using (and possibly abusing) the global reserve when committing the
583 if (flush
== BTRFS_RESERVE_FLUSH_ALL
&&
584 !btrfs_block_rsv_full(delayed_refs_rsv
)) {
585 spin_lock(&delayed_refs_rsv
->lock
);
586 if (delayed_refs_rsv
->size
> delayed_refs_rsv
->reserved
)
587 extra_delayed_refs_bytes
= delayed_refs_rsv
->size
-
588 delayed_refs_rsv
->reserved
;
589 spin_unlock(&delayed_refs_rsv
->lock
);
592 bytes
= num_bytes
+ *delayed_refs_bytes
+ extra_delayed_refs_bytes
;
595 * We want to reserve all the bytes we may need all at once, so we only
596 * do 1 enospc flushing cycle per transaction start.
598 ret
= btrfs_reserve_metadata_bytes(fs_info
, si
, bytes
, flush
);
600 if (extra_delayed_refs_bytes
> 0)
601 btrfs_migrate_to_delayed_refs_rsv(fs_info
,
602 extra_delayed_refs_bytes
);
606 if (extra_delayed_refs_bytes
> 0) {
607 bytes
-= extra_delayed_refs_bytes
;
608 ret
= btrfs_reserve_metadata_bytes(fs_info
, si
, bytes
, flush
);
614 * If we are an emergency flush, which can steal from the global block
615 * reserve, then attempt to not reserve space for the delayed refs, as
616 * we will consume space for them from the global block reserve.
618 if (flush
== BTRFS_RESERVE_FLUSH_ALL_STEAL
) {
619 bytes
-= *delayed_refs_bytes
;
620 *delayed_refs_bytes
= 0;
621 ret
= btrfs_reserve_metadata_bytes(fs_info
, si
, bytes
, flush
);
627 static struct btrfs_trans_handle
*
628 start_transaction(struct btrfs_root
*root
, unsigned int num_items
,
629 unsigned int type
, enum btrfs_reserve_flush_enum flush
,
630 bool enforce_qgroups
)
632 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
633 struct btrfs_block_rsv
*delayed_refs_rsv
= &fs_info
->delayed_refs_rsv
;
634 struct btrfs_block_rsv
*trans_rsv
= &fs_info
->trans_block_rsv
;
635 struct btrfs_trans_handle
*h
;
636 struct btrfs_transaction
*cur_trans
;
638 u64 qgroup_reserved
= 0;
639 u64 delayed_refs_bytes
= 0;
640 bool reloc_reserved
= false;
641 bool do_chunk_alloc
= false;
644 if (BTRFS_FS_ERROR(fs_info
))
645 return ERR_PTR(-EROFS
);
647 if (current
->journal_info
) {
648 WARN_ON(type
& TRANS_EXTWRITERS
);
649 h
= current
->journal_info
;
650 refcount_inc(&h
->use_count
);
651 WARN_ON(refcount_read(&h
->use_count
) > 2);
652 h
->orig_rsv
= h
->block_rsv
;
658 * Do the reservation before we join the transaction so we can do all
659 * the appropriate flushing if need be.
661 if (num_items
&& root
!= fs_info
->chunk_root
) {
662 qgroup_reserved
= num_items
* fs_info
->nodesize
;
664 * Use prealloc for now, as there might be a currently running
665 * transaction that could free this reserved space prematurely
668 ret
= btrfs_qgroup_reserve_meta_prealloc(root
, qgroup_reserved
,
669 enforce_qgroups
, false);
673 num_bytes
= btrfs_calc_insert_metadata_size(fs_info
, num_items
);
675 * If we plan to insert/update/delete "num_items" from a btree,
676 * we will also generate delayed refs for extent buffers in the
677 * respective btree paths, so reserve space for the delayed refs
678 * that will be generated by the caller as it modifies btrees.
679 * Try to reserve them to avoid excessive use of the global
682 delayed_refs_bytes
= btrfs_calc_delayed_ref_bytes(fs_info
, num_items
);
685 * Do the reservation for the relocation root creation
687 if (need_reserve_reloc_root(root
)) {
688 num_bytes
+= fs_info
->nodesize
;
689 reloc_reserved
= true;
692 ret
= btrfs_reserve_trans_metadata(fs_info
, flush
, num_bytes
,
693 &delayed_refs_bytes
);
697 btrfs_block_rsv_add_bytes(trans_rsv
, num_bytes
, true);
699 if (trans_rsv
->space_info
->force_alloc
)
700 do_chunk_alloc
= true;
701 } else if (num_items
== 0 && flush
== BTRFS_RESERVE_FLUSH_ALL
&&
702 !btrfs_block_rsv_full(delayed_refs_rsv
)) {
704 * Some people call with btrfs_start_transaction(root, 0)
705 * because they can be throttled, but have some other mechanism
706 * for reserving space. We still want these guys to refill the
707 * delayed block_rsv so just add 1 items worth of reservation
710 ret
= btrfs_delayed_refs_rsv_refill(fs_info
, flush
);
715 h
= kmem_cache_zalloc(btrfs_trans_handle_cachep
, GFP_NOFS
);
722 * If we are JOIN_NOLOCK we're already committing a transaction and
723 * waiting on this guy, so we don't need to do the sb_start_intwrite
724 * because we're already holding a ref. We need this because we could
725 * have raced in and did an fsync() on a file which can kick a commit
726 * and then we deadlock with somebody doing a freeze.
728 * If we are ATTACH, it means we just want to catch the current
729 * transaction and commit it, so we needn't do sb_start_intwrite().
731 if (type
& __TRANS_FREEZABLE
)
732 sb_start_intwrite(fs_info
->sb
);
734 if (may_wait_transaction(fs_info
, type
))
735 wait_current_trans(fs_info
);
738 ret
= join_transaction(fs_info
, type
);
740 wait_current_trans(fs_info
);
741 if (unlikely(type
== TRANS_ATTACH
||
742 type
== TRANS_JOIN_NOSTART
))
745 } while (ret
== -EBUSY
);
750 cur_trans
= fs_info
->running_transaction
;
752 h
->transid
= cur_trans
->transid
;
753 h
->transaction
= cur_trans
;
754 refcount_set(&h
->use_count
, 1);
755 h
->fs_info
= root
->fs_info
;
758 INIT_LIST_HEAD(&h
->new_bgs
);
759 btrfs_init_metadata_block_rsv(fs_info
, &h
->delayed_rsv
, BTRFS_BLOCK_RSV_DELOPS
);
762 if (cur_trans
->state
>= TRANS_STATE_COMMIT_START
&&
763 may_wait_transaction(fs_info
, type
)) {
764 current
->journal_info
= h
;
765 btrfs_commit_transaction(h
);
770 trace_btrfs_space_reservation(fs_info
, "transaction",
771 h
->transid
, num_bytes
, 1);
772 h
->block_rsv
= trans_rsv
;
773 h
->bytes_reserved
= num_bytes
;
774 if (delayed_refs_bytes
> 0) {
775 trace_btrfs_space_reservation(fs_info
,
776 "local_delayed_refs_rsv",
778 delayed_refs_bytes
, 1);
779 h
->delayed_refs_bytes_reserved
= delayed_refs_bytes
;
780 btrfs_block_rsv_add_bytes(&h
->delayed_rsv
, delayed_refs_bytes
, true);
781 delayed_refs_bytes
= 0;
783 h
->reloc_reserved
= reloc_reserved
;
787 * Now that we have found a transaction to be a part of, convert the
788 * qgroup reservation from prealloc to pertrans. A different transaction
789 * can't race in and free our pertrans out from under us.
792 btrfs_qgroup_convert_reserved_meta(root
, qgroup_reserved
);
795 if (!current
->journal_info
)
796 current
->journal_info
= h
;
799 * If the space_info is marked ALLOC_FORCE then we'll get upgraded to
800 * ALLOC_FORCE the first run through, and then we won't allocate for
801 * anybody else who races in later. We don't care about the return
804 if (do_chunk_alloc
&& num_bytes
) {
805 u64 flags
= h
->block_rsv
->space_info
->flags
;
807 btrfs_chunk_alloc(h
, btrfs_get_alloc_profile(fs_info
, flags
),
808 CHUNK_ALLOC_NO_FORCE
);
812 * btrfs_record_root_in_trans() needs to alloc new extents, and may
813 * call btrfs_join_transaction() while we're also starting a
816 * Thus it need to be called after current->journal_info initialized,
817 * or we can deadlock.
819 ret
= btrfs_record_root_in_trans(h
, root
);
822 * The transaction handle is fully initialized and linked with
823 * other structures so it needs to be ended in case of errors,
826 btrfs_end_transaction(h
);
833 if (type
& __TRANS_FREEZABLE
)
834 sb_end_intwrite(fs_info
->sb
);
835 kmem_cache_free(btrfs_trans_handle_cachep
, h
);
838 btrfs_block_rsv_release(fs_info
, trans_rsv
, num_bytes
, NULL
);
839 if (delayed_refs_bytes
)
840 btrfs_space_info_free_bytes_may_use(fs_info
, trans_rsv
->space_info
,
843 btrfs_qgroup_free_meta_prealloc(root
, qgroup_reserved
);
847 struct btrfs_trans_handle
*btrfs_start_transaction(struct btrfs_root
*root
,
848 unsigned int num_items
)
850 return start_transaction(root
, num_items
, TRANS_START
,
851 BTRFS_RESERVE_FLUSH_ALL
, true);
854 struct btrfs_trans_handle
*btrfs_start_transaction_fallback_global_rsv(
855 struct btrfs_root
*root
,
856 unsigned int num_items
)
858 return start_transaction(root
, num_items
, TRANS_START
,
859 BTRFS_RESERVE_FLUSH_ALL_STEAL
, false);
862 struct btrfs_trans_handle
*btrfs_join_transaction(struct btrfs_root
*root
)
864 return start_transaction(root
, 0, TRANS_JOIN
, BTRFS_RESERVE_NO_FLUSH
,
868 struct btrfs_trans_handle
*btrfs_join_transaction_spacecache(struct btrfs_root
*root
)
870 return start_transaction(root
, 0, TRANS_JOIN_NOLOCK
,
871 BTRFS_RESERVE_NO_FLUSH
, true);
875 * Similar to regular join but it never starts a transaction when none is
876 * running or when there's a running one at a state >= TRANS_STATE_UNBLOCKED.
877 * This is similar to btrfs_attach_transaction() but it allows the join to
878 * happen if the transaction commit already started but it's not yet in the
879 * "doing" phase (the state is < TRANS_STATE_COMMIT_DOING).
881 struct btrfs_trans_handle
*btrfs_join_transaction_nostart(struct btrfs_root
*root
)
883 return start_transaction(root
, 0, TRANS_JOIN_NOSTART
,
884 BTRFS_RESERVE_NO_FLUSH
, true);
888 * Catch the running transaction.
890 * It is used when we want to commit the current the transaction, but
891 * don't want to start a new one.
893 * Note: If this function return -ENOENT, it just means there is no
894 * running transaction. But it is possible that the inactive transaction
895 * is still in the memory, not fully on disk. If you hope there is no
896 * inactive transaction in the fs when -ENOENT is returned, you should
898 * btrfs_attach_transaction_barrier()
900 struct btrfs_trans_handle
*btrfs_attach_transaction(struct btrfs_root
*root
)
902 return start_transaction(root
, 0, TRANS_ATTACH
,
903 BTRFS_RESERVE_NO_FLUSH
, true);
907 * Catch the running transaction.
909 * It is similar to the above function, the difference is this one
910 * will wait for all the inactive transactions until they fully
913 struct btrfs_trans_handle
*
914 btrfs_attach_transaction_barrier(struct btrfs_root
*root
)
916 struct btrfs_trans_handle
*trans
;
918 trans
= start_transaction(root
, 0, TRANS_ATTACH
,
919 BTRFS_RESERVE_NO_FLUSH
, true);
920 if (trans
== ERR_PTR(-ENOENT
)) {
923 ret
= btrfs_wait_for_commit(root
->fs_info
, 0);
931 /* Wait for a transaction commit to reach at least the given state. */
932 static noinline
void wait_for_commit(struct btrfs_transaction
*commit
,
933 const enum btrfs_trans_state min_state
)
935 struct btrfs_fs_info
*fs_info
= commit
->fs_info
;
936 u64 transid
= commit
->transid
;
940 * At the moment this function is called with min_state either being
941 * TRANS_STATE_COMPLETED or TRANS_STATE_SUPER_COMMITTED.
943 if (min_state
== TRANS_STATE_COMPLETED
)
944 btrfs_might_wait_for_state(fs_info
, BTRFS_LOCKDEP_TRANS_COMPLETED
);
946 btrfs_might_wait_for_state(fs_info
, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED
);
949 wait_event(commit
->commit_wait
, commit
->state
>= min_state
);
951 btrfs_put_transaction(commit
);
953 if (min_state
< TRANS_STATE_COMPLETED
)
957 * A transaction isn't really completed until all of the
958 * previous transactions are completed, but with fsync we can
959 * end up with SUPER_COMMITTED transactions before a COMPLETED
960 * transaction. Wait for those.
963 spin_lock(&fs_info
->trans_lock
);
964 commit
= list_first_entry_or_null(&fs_info
->trans_list
,
965 struct btrfs_transaction
,
967 if (!commit
|| commit
->transid
> transid
) {
968 spin_unlock(&fs_info
->trans_lock
);
971 refcount_inc(&commit
->use_count
);
973 spin_unlock(&fs_info
->trans_lock
);
977 int btrfs_wait_for_commit(struct btrfs_fs_info
*fs_info
, u64 transid
)
979 struct btrfs_transaction
*cur_trans
= NULL
, *t
;
983 if (transid
<= fs_info
->last_trans_committed
)
986 /* find specified transaction */
987 spin_lock(&fs_info
->trans_lock
);
988 list_for_each_entry(t
, &fs_info
->trans_list
, list
) {
989 if (t
->transid
== transid
) {
991 refcount_inc(&cur_trans
->use_count
);
995 if (t
->transid
> transid
) {
1000 spin_unlock(&fs_info
->trans_lock
);
1003 * The specified transaction doesn't exist, or we
1004 * raced with btrfs_commit_transaction
1007 if (transid
> fs_info
->last_trans_committed
)
1012 /* find newest transaction that is committing | committed */
1013 spin_lock(&fs_info
->trans_lock
);
1014 list_for_each_entry_reverse(t
, &fs_info
->trans_list
,
1016 if (t
->state
>= TRANS_STATE_COMMIT_START
) {
1017 if (t
->state
== TRANS_STATE_COMPLETED
)
1020 refcount_inc(&cur_trans
->use_count
);
1024 spin_unlock(&fs_info
->trans_lock
);
1026 goto out
; /* nothing committing|committed */
1029 wait_for_commit(cur_trans
, TRANS_STATE_COMPLETED
);
1030 ret
= cur_trans
->aborted
;
1031 btrfs_put_transaction(cur_trans
);
1036 void btrfs_throttle(struct btrfs_fs_info
*fs_info
)
1038 wait_current_trans(fs_info
);
1041 bool btrfs_should_end_transaction(struct btrfs_trans_handle
*trans
)
1043 struct btrfs_transaction
*cur_trans
= trans
->transaction
;
1045 if (cur_trans
->state
>= TRANS_STATE_COMMIT_START
||
1046 test_bit(BTRFS_DELAYED_REFS_FLUSHING
, &cur_trans
->delayed_refs
.flags
))
1049 if (btrfs_check_space_for_delayed_refs(trans
->fs_info
))
1052 return !!btrfs_block_rsv_check(&trans
->fs_info
->global_block_rsv
, 50);
1055 static void btrfs_trans_release_metadata(struct btrfs_trans_handle
*trans
)
1058 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1060 if (!trans
->block_rsv
) {
1061 ASSERT(!trans
->bytes_reserved
);
1062 ASSERT(!trans
->delayed_refs_bytes_reserved
);
1066 if (!trans
->bytes_reserved
) {
1067 ASSERT(!trans
->delayed_refs_bytes_reserved
);
1071 ASSERT(trans
->block_rsv
== &fs_info
->trans_block_rsv
);
1072 trace_btrfs_space_reservation(fs_info
, "transaction",
1073 trans
->transid
, trans
->bytes_reserved
, 0);
1074 btrfs_block_rsv_release(fs_info
, trans
->block_rsv
,
1075 trans
->bytes_reserved
, NULL
);
1076 trans
->bytes_reserved
= 0;
1078 if (!trans
->delayed_refs_bytes_reserved
)
1081 trace_btrfs_space_reservation(fs_info
, "local_delayed_refs_rsv",
1083 trans
->delayed_refs_bytes_reserved
, 0);
1084 btrfs_block_rsv_release(fs_info
, &trans
->delayed_rsv
,
1085 trans
->delayed_refs_bytes_reserved
, NULL
);
1086 trans
->delayed_refs_bytes_reserved
= 0;
1089 static int __btrfs_end_transaction(struct btrfs_trans_handle
*trans
,
1092 struct btrfs_fs_info
*info
= trans
->fs_info
;
1093 struct btrfs_transaction
*cur_trans
= trans
->transaction
;
1096 if (refcount_read(&trans
->use_count
) > 1) {
1097 refcount_dec(&trans
->use_count
);
1098 trans
->block_rsv
= trans
->orig_rsv
;
1102 btrfs_trans_release_metadata(trans
);
1103 trans
->block_rsv
= NULL
;
1105 btrfs_create_pending_block_groups(trans
);
1107 btrfs_trans_release_chunk_metadata(trans
);
1109 if (trans
->type
& __TRANS_FREEZABLE
)
1110 sb_end_intwrite(info
->sb
);
1112 WARN_ON(cur_trans
!= info
->running_transaction
);
1113 WARN_ON(atomic_read(&cur_trans
->num_writers
) < 1);
1114 atomic_dec(&cur_trans
->num_writers
);
1115 extwriter_counter_dec(cur_trans
, trans
->type
);
1117 cond_wake_up(&cur_trans
->writer_wait
);
1119 btrfs_lockdep_release(info
, btrfs_trans_num_extwriters
);
1120 btrfs_lockdep_release(info
, btrfs_trans_num_writers
);
1122 btrfs_put_transaction(cur_trans
);
1124 if (current
->journal_info
== trans
)
1125 current
->journal_info
= NULL
;
1128 btrfs_run_delayed_iputs(info
);
1130 if (TRANS_ABORTED(trans
) || BTRFS_FS_ERROR(info
)) {
1131 wake_up_process(info
->transaction_kthread
);
1132 if (TRANS_ABORTED(trans
))
1133 err
= trans
->aborted
;
1138 kmem_cache_free(btrfs_trans_handle_cachep
, trans
);
1142 int btrfs_end_transaction(struct btrfs_trans_handle
*trans
)
1144 return __btrfs_end_transaction(trans
, 0);
1147 int btrfs_end_transaction_throttle(struct btrfs_trans_handle
*trans
)
1149 return __btrfs_end_transaction(trans
, 1);
1153 * when btree blocks are allocated, they have some corresponding bits set for
1154 * them in one of two extent_io trees. This is used to make sure all of
1155 * those extents are sent to disk but does not wait on them
1157 int btrfs_write_marked_extents(struct btrfs_fs_info
*fs_info
,
1158 struct extent_io_tree
*dirty_pages
, int mark
)
1162 struct address_space
*mapping
= fs_info
->btree_inode
->i_mapping
;
1163 struct extent_state
*cached_state
= NULL
;
1167 while (find_first_extent_bit(dirty_pages
, start
, &start
, &end
,
1168 mark
, &cached_state
)) {
1169 bool wait_writeback
= false;
1171 err
= convert_extent_bit(dirty_pages
, start
, end
,
1173 mark
, &cached_state
);
1175 * convert_extent_bit can return -ENOMEM, which is most of the
1176 * time a temporary error. So when it happens, ignore the error
1177 * and wait for writeback of this range to finish - because we
1178 * failed to set the bit EXTENT_NEED_WAIT for the range, a call
1179 * to __btrfs_wait_marked_extents() would not know that
1180 * writeback for this range started and therefore wouldn't
1181 * wait for it to finish - we don't want to commit a
1182 * superblock that points to btree nodes/leafs for which
1183 * writeback hasn't finished yet (and without errors).
1184 * We cleanup any entries left in the io tree when committing
1185 * the transaction (through extent_io_tree_release()).
1187 if (err
== -ENOMEM
) {
1189 wait_writeback
= true;
1192 err
= filemap_fdatawrite_range(mapping
, start
, end
);
1195 else if (wait_writeback
)
1196 werr
= filemap_fdatawait_range(mapping
, start
, end
);
1197 free_extent_state(cached_state
);
1198 cached_state
= NULL
;
1206 * when btree blocks are allocated, they have some corresponding bits set for
1207 * them in one of two extent_io trees. This is used to make sure all of
1208 * those extents are on disk for transaction or log commit. We wait
1209 * on all the pages and clear them from the dirty pages state tree
1211 static int __btrfs_wait_marked_extents(struct btrfs_fs_info
*fs_info
,
1212 struct extent_io_tree
*dirty_pages
)
1216 struct address_space
*mapping
= fs_info
->btree_inode
->i_mapping
;
1217 struct extent_state
*cached_state
= NULL
;
1221 while (find_first_extent_bit(dirty_pages
, start
, &start
, &end
,
1222 EXTENT_NEED_WAIT
, &cached_state
)) {
1224 * Ignore -ENOMEM errors returned by clear_extent_bit().
1225 * When committing the transaction, we'll remove any entries
1226 * left in the io tree. For a log commit, we don't remove them
1227 * after committing the log because the tree can be accessed
1228 * concurrently - we do it only at transaction commit time when
1229 * it's safe to do it (through extent_io_tree_release()).
1231 err
= clear_extent_bit(dirty_pages
, start
, end
,
1232 EXTENT_NEED_WAIT
, &cached_state
);
1236 err
= filemap_fdatawait_range(mapping
, start
, end
);
1239 free_extent_state(cached_state
);
1240 cached_state
= NULL
;
1249 static int btrfs_wait_extents(struct btrfs_fs_info
*fs_info
,
1250 struct extent_io_tree
*dirty_pages
)
1252 bool errors
= false;
1255 err
= __btrfs_wait_marked_extents(fs_info
, dirty_pages
);
1256 if (test_and_clear_bit(BTRFS_FS_BTREE_ERR
, &fs_info
->flags
))
1264 int btrfs_wait_tree_log_extents(struct btrfs_root
*log_root
, int mark
)
1266 struct btrfs_fs_info
*fs_info
= log_root
->fs_info
;
1267 struct extent_io_tree
*dirty_pages
= &log_root
->dirty_log_pages
;
1268 bool errors
= false;
1271 ASSERT(log_root
->root_key
.objectid
== BTRFS_TREE_LOG_OBJECTID
);
1273 err
= __btrfs_wait_marked_extents(fs_info
, dirty_pages
);
1274 if ((mark
& EXTENT_DIRTY
) &&
1275 test_and_clear_bit(BTRFS_FS_LOG1_ERR
, &fs_info
->flags
))
1278 if ((mark
& EXTENT_NEW
) &&
1279 test_and_clear_bit(BTRFS_FS_LOG2_ERR
, &fs_info
->flags
))
1288 * When btree blocks are allocated the corresponding extents are marked dirty.
1289 * This function ensures such extents are persisted on disk for transaction or
1292 * @trans: transaction whose dirty pages we'd like to write
1294 static int btrfs_write_and_wait_transaction(struct btrfs_trans_handle
*trans
)
1298 struct extent_io_tree
*dirty_pages
= &trans
->transaction
->dirty_pages
;
1299 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1300 struct blk_plug plug
;
1302 blk_start_plug(&plug
);
1303 ret
= btrfs_write_marked_extents(fs_info
, dirty_pages
, EXTENT_DIRTY
);
1304 blk_finish_plug(&plug
);
1305 ret2
= btrfs_wait_extents(fs_info
, dirty_pages
);
1307 extent_io_tree_release(&trans
->transaction
->dirty_pages
);
1318 * this is used to update the root pointer in the tree of tree roots.
1320 * But, in the case of the extent allocation tree, updating the root
1321 * pointer may allocate blocks which may change the root of the extent
1324 * So, this loops and repeats and makes sure the cowonly root didn't
1325 * change while the root pointer was being updated in the metadata.
1327 static int update_cowonly_root(struct btrfs_trans_handle
*trans
,
1328 struct btrfs_root
*root
)
1331 u64 old_root_bytenr
;
1333 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1334 struct btrfs_root
*tree_root
= fs_info
->tree_root
;
1336 old_root_used
= btrfs_root_used(&root
->root_item
);
1339 old_root_bytenr
= btrfs_root_bytenr(&root
->root_item
);
1340 if (old_root_bytenr
== root
->node
->start
&&
1341 old_root_used
== btrfs_root_used(&root
->root_item
))
1344 btrfs_set_root_node(&root
->root_item
, root
->node
);
1345 ret
= btrfs_update_root(trans
, tree_root
,
1351 old_root_used
= btrfs_root_used(&root
->root_item
);
1358 * update all the cowonly tree roots on disk
1360 * The error handling in this function may not be obvious. Any of the
1361 * failures will cause the file system to go offline. We still need
1362 * to clean up the delayed refs.
1364 static noinline
int commit_cowonly_roots(struct btrfs_trans_handle
*trans
)
1366 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1367 struct list_head
*dirty_bgs
= &trans
->transaction
->dirty_bgs
;
1368 struct list_head
*io_bgs
= &trans
->transaction
->io_bgs
;
1369 struct list_head
*next
;
1370 struct extent_buffer
*eb
;
1374 * At this point no one can be using this transaction to modify any tree
1375 * and no one can start another transaction to modify any tree either.
1377 ASSERT(trans
->transaction
->state
== TRANS_STATE_COMMIT_DOING
);
1379 eb
= btrfs_lock_root_node(fs_info
->tree_root
);
1380 ret
= btrfs_cow_block(trans
, fs_info
->tree_root
, eb
, NULL
,
1381 0, &eb
, BTRFS_NESTING_COW
);
1382 btrfs_tree_unlock(eb
);
1383 free_extent_buffer(eb
);
1388 ret
= btrfs_run_dev_stats(trans
);
1391 ret
= btrfs_run_dev_replace(trans
);
1394 ret
= btrfs_run_qgroups(trans
);
1398 ret
= btrfs_setup_space_cache(trans
);
1403 while (!list_empty(&fs_info
->dirty_cowonly_roots
)) {
1404 struct btrfs_root
*root
;
1405 next
= fs_info
->dirty_cowonly_roots
.next
;
1406 list_del_init(next
);
1407 root
= list_entry(next
, struct btrfs_root
, dirty_list
);
1408 clear_bit(BTRFS_ROOT_DIRTY
, &root
->state
);
1410 list_add_tail(&root
->dirty_list
,
1411 &trans
->transaction
->switch_commits
);
1412 ret
= update_cowonly_root(trans
, root
);
1417 /* Now flush any delayed refs generated by updating all of the roots */
1418 ret
= btrfs_run_delayed_refs(trans
, U64_MAX
);
1422 while (!list_empty(dirty_bgs
) || !list_empty(io_bgs
)) {
1423 ret
= btrfs_write_dirty_block_groups(trans
);
1428 * We're writing the dirty block groups, which could generate
1429 * delayed refs, which could generate more dirty block groups,
1430 * so we want to keep this flushing in this loop to make sure
1431 * everything gets run.
1433 ret
= btrfs_run_delayed_refs(trans
, U64_MAX
);
1438 if (!list_empty(&fs_info
->dirty_cowonly_roots
))
1441 /* Update dev-replace pointer once everything is committed */
1442 fs_info
->dev_replace
.committed_cursor_left
=
1443 fs_info
->dev_replace
.cursor_left_last_write_of_item
;
1449 * If we had a pending drop we need to see if there are any others left in our
1450 * dead roots list, and if not clear our bit and wake any waiters.
1452 void btrfs_maybe_wake_unfinished_drop(struct btrfs_fs_info
*fs_info
)
1455 * We put the drop in progress roots at the front of the list, so if the
1456 * first entry doesn't have UNFINISHED_DROP set we can wake everybody
1459 spin_lock(&fs_info
->trans_lock
);
1460 if (!list_empty(&fs_info
->dead_roots
)) {
1461 struct btrfs_root
*root
= list_first_entry(&fs_info
->dead_roots
,
1464 if (test_bit(BTRFS_ROOT_UNFINISHED_DROP
, &root
->state
)) {
1465 spin_unlock(&fs_info
->trans_lock
);
1469 spin_unlock(&fs_info
->trans_lock
);
1471 btrfs_wake_unfinished_drop(fs_info
);
1475 * dead roots are old snapshots that need to be deleted. This allocates
1476 * a dirty root struct and adds it into the list of dead roots that need to
1479 void btrfs_add_dead_root(struct btrfs_root
*root
)
1481 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1483 spin_lock(&fs_info
->trans_lock
);
1484 if (list_empty(&root
->root_list
)) {
1485 btrfs_grab_root(root
);
1487 /* We want to process the partially complete drops first. */
1488 if (test_bit(BTRFS_ROOT_UNFINISHED_DROP
, &root
->state
))
1489 list_add(&root
->root_list
, &fs_info
->dead_roots
);
1491 list_add_tail(&root
->root_list
, &fs_info
->dead_roots
);
1493 spin_unlock(&fs_info
->trans_lock
);
1497 * Update each subvolume root and its relocation root, if it exists, in the tree
1498 * of tree roots. Also free log roots if they exist.
1500 static noinline
int commit_fs_roots(struct btrfs_trans_handle
*trans
)
1502 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1503 struct btrfs_root
*gang
[8];
1508 * At this point no one can be using this transaction to modify any tree
1509 * and no one can start another transaction to modify any tree either.
1511 ASSERT(trans
->transaction
->state
== TRANS_STATE_COMMIT_DOING
);
1513 spin_lock(&fs_info
->fs_roots_radix_lock
);
1515 ret
= radix_tree_gang_lookup_tag(&fs_info
->fs_roots_radix
,
1518 BTRFS_ROOT_TRANS_TAG
);
1521 for (i
= 0; i
< ret
; i
++) {
1522 struct btrfs_root
*root
= gang
[i
];
1526 * At this point we can neither have tasks logging inodes
1527 * from a root nor trying to commit a log tree.
1529 ASSERT(atomic_read(&root
->log_writers
) == 0);
1530 ASSERT(atomic_read(&root
->log_commit
[0]) == 0);
1531 ASSERT(atomic_read(&root
->log_commit
[1]) == 0);
1533 radix_tree_tag_clear(&fs_info
->fs_roots_radix
,
1534 (unsigned long)root
->root_key
.objectid
,
1535 BTRFS_ROOT_TRANS_TAG
);
1536 spin_unlock(&fs_info
->fs_roots_radix_lock
);
1538 btrfs_free_log(trans
, root
);
1539 ret2
= btrfs_update_reloc_root(trans
, root
);
1543 /* see comments in should_cow_block() */
1544 clear_bit(BTRFS_ROOT_FORCE_COW
, &root
->state
);
1545 smp_mb__after_atomic();
1547 if (root
->commit_root
!= root
->node
) {
1548 list_add_tail(&root
->dirty_list
,
1549 &trans
->transaction
->switch_commits
);
1550 btrfs_set_root_node(&root
->root_item
,
1554 ret2
= btrfs_update_root(trans
, fs_info
->tree_root
,
1559 spin_lock(&fs_info
->fs_roots_radix_lock
);
1560 btrfs_qgroup_free_meta_all_pertrans(root
);
1563 spin_unlock(&fs_info
->fs_roots_radix_lock
);
1568 * defrag a given btree.
1569 * Every leaf in the btree is read and defragged.
1571 int btrfs_defrag_root(struct btrfs_root
*root
)
1573 struct btrfs_fs_info
*info
= root
->fs_info
;
1574 struct btrfs_trans_handle
*trans
;
1577 if (test_and_set_bit(BTRFS_ROOT_DEFRAG_RUNNING
, &root
->state
))
1581 trans
= btrfs_start_transaction(root
, 0);
1582 if (IS_ERR(trans
)) {
1583 ret
= PTR_ERR(trans
);
1587 ret
= btrfs_defrag_leaves(trans
, root
);
1589 btrfs_end_transaction(trans
);
1590 btrfs_btree_balance_dirty(info
);
1593 if (btrfs_fs_closing(info
) || ret
!= -EAGAIN
)
1596 if (btrfs_defrag_cancelled(info
)) {
1597 btrfs_debug(info
, "defrag_root cancelled");
1602 clear_bit(BTRFS_ROOT_DEFRAG_RUNNING
, &root
->state
);
1607 * Do all special snapshot related qgroup dirty hack.
1609 * Will do all needed qgroup inherit and dirty hack like switch commit
1610 * roots inside one transaction and write all btree into disk, to make
1613 static int qgroup_account_snapshot(struct btrfs_trans_handle
*trans
,
1614 struct btrfs_root
*src
,
1615 struct btrfs_root
*parent
,
1616 struct btrfs_qgroup_inherit
*inherit
,
1619 struct btrfs_fs_info
*fs_info
= src
->fs_info
;
1623 * Save some performance in the case that qgroups are not enabled. If
1624 * this check races with the ioctl, rescan will kick in anyway.
1626 if (!btrfs_qgroup_full_accounting(fs_info
))
1630 * Ensure dirty @src will be committed. Or, after coming
1631 * commit_fs_roots() and switch_commit_roots(), any dirty but not
1632 * recorded root will never be updated again, causing an outdated root
1635 ret
= record_root_in_trans(trans
, src
, 1);
1640 * btrfs_qgroup_inherit relies on a consistent view of the usage for the
1641 * src root, so we must run the delayed refs here.
1643 * However this isn't particularly fool proof, because there's no
1644 * synchronization keeping us from changing the tree after this point
1645 * before we do the qgroup_inherit, or even from making changes while
1646 * we're doing the qgroup_inherit. But that's a problem for the future,
1647 * for now flush the delayed refs to narrow the race window where the
1648 * qgroup counters could end up wrong.
1650 ret
= btrfs_run_delayed_refs(trans
, U64_MAX
);
1652 btrfs_abort_transaction(trans
, ret
);
1656 ret
= commit_fs_roots(trans
);
1659 ret
= btrfs_qgroup_account_extents(trans
);
1663 /* Now qgroup are all updated, we can inherit it to new qgroups */
1664 ret
= btrfs_qgroup_inherit(trans
, src
->root_key
.objectid
, dst_objectid
,
1665 parent
->root_key
.objectid
, inherit
);
1670 * Now we do a simplified commit transaction, which will:
1671 * 1) commit all subvolume and extent tree
1672 * To ensure all subvolume and extent tree have a valid
1673 * commit_root to accounting later insert_dir_item()
1674 * 2) write all btree blocks onto disk
1675 * This is to make sure later btree modification will be cowed
1676 * Or commit_root can be populated and cause wrong qgroup numbers
1677 * In this simplified commit, we don't really care about other trees
1678 * like chunk and root tree, as they won't affect qgroup.
1679 * And we don't write super to avoid half committed status.
1681 ret
= commit_cowonly_roots(trans
);
1684 switch_commit_roots(trans
);
1685 ret
= btrfs_write_and_wait_transaction(trans
);
1687 btrfs_handle_fs_error(fs_info
, ret
,
1688 "Error while writing out transaction for qgroup");
1692 * Force parent root to be updated, as we recorded it before so its
1693 * last_trans == cur_transid.
1694 * Or it won't be committed again onto disk after later
1698 ret
= record_root_in_trans(trans
, parent
, 1);
1703 * new snapshots need to be created at a very specific time in the
1704 * transaction commit. This does the actual creation.
1707 * If the error which may affect the commitment of the current transaction
1708 * happens, we should return the error number. If the error which just affect
1709 * the creation of the pending snapshots, just return 0.
1711 static noinline
int create_pending_snapshot(struct btrfs_trans_handle
*trans
,
1712 struct btrfs_pending_snapshot
*pending
)
1715 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1716 struct btrfs_key key
;
1717 struct btrfs_root_item
*new_root_item
;
1718 struct btrfs_root
*tree_root
= fs_info
->tree_root
;
1719 struct btrfs_root
*root
= pending
->root
;
1720 struct btrfs_root
*parent_root
;
1721 struct btrfs_block_rsv
*rsv
;
1722 struct inode
*parent_inode
= pending
->dir
;
1723 struct btrfs_path
*path
;
1724 struct btrfs_dir_item
*dir_item
;
1725 struct extent_buffer
*tmp
;
1726 struct extent_buffer
*old
;
1727 struct timespec64 cur_time
;
1733 unsigned int nofs_flags
;
1734 struct fscrypt_name fname
;
1736 ASSERT(pending
->path
);
1737 path
= pending
->path
;
1739 ASSERT(pending
->root_item
);
1740 new_root_item
= pending
->root_item
;
1743 * We're inside a transaction and must make sure that any potential
1744 * allocations with GFP_KERNEL in fscrypt won't recurse back to
1747 nofs_flags
= memalloc_nofs_save();
1748 pending
->error
= fscrypt_setup_filename(parent_inode
,
1749 &pending
->dentry
->d_name
, 0,
1751 memalloc_nofs_restore(nofs_flags
);
1755 pending
->error
= btrfs_get_free_objectid(tree_root
, &objectid
);
1760 * Make qgroup to skip current new snapshot's qgroupid, as it is
1761 * accounted by later btrfs_qgroup_inherit().
1763 btrfs_set_skip_qgroup(trans
, objectid
);
1765 btrfs_reloc_pre_snapshot(pending
, &to_reserve
);
1767 if (to_reserve
> 0) {
1768 pending
->error
= btrfs_block_rsv_add(fs_info
,
1769 &pending
->block_rsv
,
1771 BTRFS_RESERVE_NO_FLUSH
);
1773 goto clear_skip_qgroup
;
1776 key
.objectid
= objectid
;
1777 key
.offset
= (u64
)-1;
1778 key
.type
= BTRFS_ROOT_ITEM_KEY
;
1780 rsv
= trans
->block_rsv
;
1781 trans
->block_rsv
= &pending
->block_rsv
;
1782 trans
->bytes_reserved
= trans
->block_rsv
->reserved
;
1783 trace_btrfs_space_reservation(fs_info
, "transaction",
1785 trans
->bytes_reserved
, 1);
1786 parent_root
= BTRFS_I(parent_inode
)->root
;
1787 ret
= record_root_in_trans(trans
, parent_root
, 0);
1790 cur_time
= current_time(parent_inode
);
1793 * insert the directory item
1795 ret
= btrfs_set_inode_index(BTRFS_I(parent_inode
), &index
);
1797 btrfs_abort_transaction(trans
, ret
);
1801 /* check if there is a file/dir which has the same name. */
1802 dir_item
= btrfs_lookup_dir_item(NULL
, parent_root
, path
,
1803 btrfs_ino(BTRFS_I(parent_inode
)),
1804 &fname
.disk_name
, 0);
1805 if (dir_item
!= NULL
&& !IS_ERR(dir_item
)) {
1806 pending
->error
= -EEXIST
;
1807 goto dir_item_existed
;
1808 } else if (IS_ERR(dir_item
)) {
1809 ret
= PTR_ERR(dir_item
);
1810 btrfs_abort_transaction(trans
, ret
);
1813 btrfs_release_path(path
);
1815 ret
= btrfs_create_qgroup(trans
, objectid
);
1817 btrfs_abort_transaction(trans
, ret
);
1822 * pull in the delayed directory update
1823 * and the delayed inode item
1824 * otherwise we corrupt the FS during
1827 ret
= btrfs_run_delayed_items(trans
);
1828 if (ret
) { /* Transaction aborted */
1829 btrfs_abort_transaction(trans
, ret
);
1833 ret
= record_root_in_trans(trans
, root
, 0);
1835 btrfs_abort_transaction(trans
, ret
);
1838 btrfs_set_root_last_snapshot(&root
->root_item
, trans
->transid
);
1839 memcpy(new_root_item
, &root
->root_item
, sizeof(*new_root_item
));
1840 btrfs_check_and_init_root_item(new_root_item
);
1842 root_flags
= btrfs_root_flags(new_root_item
);
1843 if (pending
->readonly
)
1844 root_flags
|= BTRFS_ROOT_SUBVOL_RDONLY
;
1846 root_flags
&= ~BTRFS_ROOT_SUBVOL_RDONLY
;
1847 btrfs_set_root_flags(new_root_item
, root_flags
);
1849 btrfs_set_root_generation_v2(new_root_item
,
1851 generate_random_guid(new_root_item
->uuid
);
1852 memcpy(new_root_item
->parent_uuid
, root
->root_item
.uuid
,
1854 if (!(root_flags
& BTRFS_ROOT_SUBVOL_RDONLY
)) {
1855 memset(new_root_item
->received_uuid
, 0,
1856 sizeof(new_root_item
->received_uuid
));
1857 memset(&new_root_item
->stime
, 0, sizeof(new_root_item
->stime
));
1858 memset(&new_root_item
->rtime
, 0, sizeof(new_root_item
->rtime
));
1859 btrfs_set_root_stransid(new_root_item
, 0);
1860 btrfs_set_root_rtransid(new_root_item
, 0);
1862 btrfs_set_stack_timespec_sec(&new_root_item
->otime
, cur_time
.tv_sec
);
1863 btrfs_set_stack_timespec_nsec(&new_root_item
->otime
, cur_time
.tv_nsec
);
1864 btrfs_set_root_otransid(new_root_item
, trans
->transid
);
1866 old
= btrfs_lock_root_node(root
);
1867 ret
= btrfs_cow_block(trans
, root
, old
, NULL
, 0, &old
,
1870 btrfs_tree_unlock(old
);
1871 free_extent_buffer(old
);
1872 btrfs_abort_transaction(trans
, ret
);
1876 ret
= btrfs_copy_root(trans
, root
, old
, &tmp
, objectid
);
1877 /* clean up in any case */
1878 btrfs_tree_unlock(old
);
1879 free_extent_buffer(old
);
1881 btrfs_abort_transaction(trans
, ret
);
1884 /* see comments in should_cow_block() */
1885 set_bit(BTRFS_ROOT_FORCE_COW
, &root
->state
);
1888 btrfs_set_root_node(new_root_item
, tmp
);
1889 /* record when the snapshot was created in key.offset */
1890 key
.offset
= trans
->transid
;
1891 ret
= btrfs_insert_root(trans
, tree_root
, &key
, new_root_item
);
1892 btrfs_tree_unlock(tmp
);
1893 free_extent_buffer(tmp
);
1895 btrfs_abort_transaction(trans
, ret
);
1900 * insert root back/forward references
1902 ret
= btrfs_add_root_ref(trans
, objectid
,
1903 parent_root
->root_key
.objectid
,
1904 btrfs_ino(BTRFS_I(parent_inode
)), index
,
1907 btrfs_abort_transaction(trans
, ret
);
1911 key
.offset
= (u64
)-1;
1912 pending
->snap
= btrfs_get_new_fs_root(fs_info
, objectid
, pending
->anon_dev
);
1913 if (IS_ERR(pending
->snap
)) {
1914 ret
= PTR_ERR(pending
->snap
);
1915 pending
->snap
= NULL
;
1916 btrfs_abort_transaction(trans
, ret
);
1920 ret
= btrfs_reloc_post_snapshot(trans
, pending
);
1922 btrfs_abort_transaction(trans
, ret
);
1927 * Do special qgroup accounting for snapshot, as we do some qgroup
1928 * snapshot hack to do fast snapshot.
1929 * To co-operate with that hack, we do hack again.
1930 * Or snapshot will be greatly slowed down by a subtree qgroup rescan
1932 if (btrfs_qgroup_mode(fs_info
) == BTRFS_QGROUP_MODE_FULL
)
1933 ret
= qgroup_account_snapshot(trans
, root
, parent_root
,
1934 pending
->inherit
, objectid
);
1935 else if (btrfs_qgroup_mode(fs_info
) == BTRFS_QGROUP_MODE_SIMPLE
)
1936 ret
= btrfs_qgroup_inherit(trans
, root
->root_key
.objectid
, objectid
,
1937 parent_root
->root_key
.objectid
, pending
->inherit
);
1941 ret
= btrfs_insert_dir_item(trans
, &fname
.disk_name
,
1942 BTRFS_I(parent_inode
), &key
, BTRFS_FT_DIR
,
1944 /* We have check then name at the beginning, so it is impossible. */
1945 BUG_ON(ret
== -EEXIST
|| ret
== -EOVERFLOW
);
1947 btrfs_abort_transaction(trans
, ret
);
1951 btrfs_i_size_write(BTRFS_I(parent_inode
), parent_inode
->i_size
+
1952 fname
.disk_name
.len
* 2);
1953 parent_inode
->i_mtime
= inode_set_ctime_current(parent_inode
);
1954 ret
= btrfs_update_inode_fallback(trans
, BTRFS_I(parent_inode
));
1956 btrfs_abort_transaction(trans
, ret
);
1959 ret
= btrfs_uuid_tree_add(trans
, new_root_item
->uuid
,
1960 BTRFS_UUID_KEY_SUBVOL
,
1963 btrfs_abort_transaction(trans
, ret
);
1966 if (!btrfs_is_empty_uuid(new_root_item
->received_uuid
)) {
1967 ret
= btrfs_uuid_tree_add(trans
, new_root_item
->received_uuid
,
1968 BTRFS_UUID_KEY_RECEIVED_SUBVOL
,
1970 if (ret
&& ret
!= -EEXIST
) {
1971 btrfs_abort_transaction(trans
, ret
);
1977 pending
->error
= ret
;
1979 trans
->block_rsv
= rsv
;
1980 trans
->bytes_reserved
= 0;
1982 btrfs_clear_skip_qgroup(trans
);
1984 fscrypt_free_filename(&fname
);
1986 kfree(new_root_item
);
1987 pending
->root_item
= NULL
;
1988 btrfs_free_path(path
);
1989 pending
->path
= NULL
;
1995 * create all the snapshots we've scheduled for creation
1997 static noinline
int create_pending_snapshots(struct btrfs_trans_handle
*trans
)
1999 struct btrfs_pending_snapshot
*pending
, *next
;
2000 struct list_head
*head
= &trans
->transaction
->pending_snapshots
;
2003 list_for_each_entry_safe(pending
, next
, head
, list
) {
2004 list_del(&pending
->list
);
2005 ret
= create_pending_snapshot(trans
, pending
);
2012 static void update_super_roots(struct btrfs_fs_info
*fs_info
)
2014 struct btrfs_root_item
*root_item
;
2015 struct btrfs_super_block
*super
;
2017 super
= fs_info
->super_copy
;
2019 root_item
= &fs_info
->chunk_root
->root_item
;
2020 super
->chunk_root
= root_item
->bytenr
;
2021 super
->chunk_root_generation
= root_item
->generation
;
2022 super
->chunk_root_level
= root_item
->level
;
2024 root_item
= &fs_info
->tree_root
->root_item
;
2025 super
->root
= root_item
->bytenr
;
2026 super
->generation
= root_item
->generation
;
2027 super
->root_level
= root_item
->level
;
2028 if (btrfs_test_opt(fs_info
, SPACE_CACHE
))
2029 super
->cache_generation
= root_item
->generation
;
2030 else if (test_bit(BTRFS_FS_CLEANUP_SPACE_CACHE_V1
, &fs_info
->flags
))
2031 super
->cache_generation
= 0;
2032 if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN
, &fs_info
->flags
))
2033 super
->uuid_tree_generation
= root_item
->generation
;
2036 int btrfs_transaction_in_commit(struct btrfs_fs_info
*info
)
2038 struct btrfs_transaction
*trans
;
2041 spin_lock(&info
->trans_lock
);
2042 trans
= info
->running_transaction
;
2044 ret
= (trans
->state
>= TRANS_STATE_COMMIT_START
);
2045 spin_unlock(&info
->trans_lock
);
2049 int btrfs_transaction_blocked(struct btrfs_fs_info
*info
)
2051 struct btrfs_transaction
*trans
;
2054 spin_lock(&info
->trans_lock
);
2055 trans
= info
->running_transaction
;
2057 ret
= is_transaction_blocked(trans
);
2058 spin_unlock(&info
->trans_lock
);
2062 void btrfs_commit_transaction_async(struct btrfs_trans_handle
*trans
)
2064 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2065 struct btrfs_transaction
*cur_trans
;
2067 /* Kick the transaction kthread. */
2068 set_bit(BTRFS_FS_COMMIT_TRANS
, &fs_info
->flags
);
2069 wake_up_process(fs_info
->transaction_kthread
);
2071 /* take transaction reference */
2072 cur_trans
= trans
->transaction
;
2073 refcount_inc(&cur_trans
->use_count
);
2075 btrfs_end_transaction(trans
);
2078 * Wait for the current transaction commit to start and block
2079 * subsequent transaction joins
2081 btrfs_might_wait_for_state(fs_info
, BTRFS_LOCKDEP_TRANS_COMMIT_PREP
);
2082 wait_event(fs_info
->transaction_blocked_wait
,
2083 cur_trans
->state
>= TRANS_STATE_COMMIT_START
||
2084 TRANS_ABORTED(cur_trans
));
2085 btrfs_put_transaction(cur_trans
);
2088 static void cleanup_transaction(struct btrfs_trans_handle
*trans
, int err
)
2090 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2091 struct btrfs_transaction
*cur_trans
= trans
->transaction
;
2093 WARN_ON(refcount_read(&trans
->use_count
) > 1);
2095 btrfs_abort_transaction(trans
, err
);
2097 spin_lock(&fs_info
->trans_lock
);
2100 * If the transaction is removed from the list, it means this
2101 * transaction has been committed successfully, so it is impossible
2102 * to call the cleanup function.
2104 BUG_ON(list_empty(&cur_trans
->list
));
2106 if (cur_trans
== fs_info
->running_transaction
) {
2107 cur_trans
->state
= TRANS_STATE_COMMIT_DOING
;
2108 spin_unlock(&fs_info
->trans_lock
);
2111 * The thread has already released the lockdep map as reader
2112 * already in btrfs_commit_transaction().
2114 btrfs_might_wait_for_event(fs_info
, btrfs_trans_num_writers
);
2115 wait_event(cur_trans
->writer_wait
,
2116 atomic_read(&cur_trans
->num_writers
) == 1);
2118 spin_lock(&fs_info
->trans_lock
);
2122 * Now that we know no one else is still using the transaction we can
2123 * remove the transaction from the list of transactions. This avoids
2124 * the transaction kthread from cleaning up the transaction while some
2125 * other task is still using it, which could result in a use-after-free
2126 * on things like log trees, as it forces the transaction kthread to
2127 * wait for this transaction to be cleaned up by us.
2129 list_del_init(&cur_trans
->list
);
2131 spin_unlock(&fs_info
->trans_lock
);
2133 btrfs_cleanup_one_transaction(trans
->transaction
, fs_info
);
2135 spin_lock(&fs_info
->trans_lock
);
2136 if (cur_trans
== fs_info
->running_transaction
)
2137 fs_info
->running_transaction
= NULL
;
2138 spin_unlock(&fs_info
->trans_lock
);
2140 if (trans
->type
& __TRANS_FREEZABLE
)
2141 sb_end_intwrite(fs_info
->sb
);
2142 btrfs_put_transaction(cur_trans
);
2143 btrfs_put_transaction(cur_trans
);
2145 trace_btrfs_transaction_commit(fs_info
);
2147 if (current
->journal_info
== trans
)
2148 current
->journal_info
= NULL
;
2151 * If relocation is running, we can't cancel scrub because that will
2152 * result in a deadlock. Before relocating a block group, relocation
2153 * pauses scrub, then starts and commits a transaction before unpausing
2154 * scrub. If the transaction commit is being done by the relocation
2155 * task or triggered by another task and the relocation task is waiting
2156 * for the commit, and we end up here due to an error in the commit
2157 * path, then calling btrfs_scrub_cancel() will deadlock, as we are
2158 * asking for scrub to stop while having it asked to be paused higher
2159 * above in relocation code.
2161 if (!test_bit(BTRFS_FS_RELOC_RUNNING
, &fs_info
->flags
))
2162 btrfs_scrub_cancel(fs_info
);
2164 kmem_cache_free(btrfs_trans_handle_cachep
, trans
);
2168 * Release reserved delayed ref space of all pending block groups of the
2169 * transaction and remove them from the list
2171 static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle
*trans
)
2173 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2174 struct btrfs_block_group
*block_group
, *tmp
;
2176 list_for_each_entry_safe(block_group
, tmp
, &trans
->new_bgs
, bg_list
) {
2177 btrfs_delayed_refs_rsv_release(fs_info
, 1, 0);
2178 list_del_init(&block_group
->bg_list
);
2182 static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info
*fs_info
)
2185 * We use try_to_writeback_inodes_sb() here because if we used
2186 * btrfs_start_delalloc_roots we would deadlock with fs freeze.
2187 * Currently are holding the fs freeze lock, if we do an async flush
2188 * we'll do btrfs_join_transaction() and deadlock because we need to
2189 * wait for the fs freeze lock. Using the direct flushing we benefit
2190 * from already being in a transaction and our join_transaction doesn't
2191 * have to re-take the fs freeze lock.
2193 * Note that try_to_writeback_inodes_sb() will only trigger writeback
2194 * if it can read lock sb->s_umount. It will always be able to lock it,
2195 * except when the filesystem is being unmounted or being frozen, but in
2196 * those cases sync_filesystem() is called, which results in calling
2197 * writeback_inodes_sb() while holding a write lock on sb->s_umount.
2198 * Note that we don't call writeback_inodes_sb() directly, because it
2199 * will emit a warning if sb->s_umount is not locked.
2201 if (btrfs_test_opt(fs_info
, FLUSHONCOMMIT
))
2202 try_to_writeback_inodes_sb(fs_info
->sb
, WB_REASON_SYNC
);
2206 static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info
*fs_info
)
2208 if (btrfs_test_opt(fs_info
, FLUSHONCOMMIT
))
2209 btrfs_wait_ordered_roots(fs_info
, U64_MAX
, 0, (u64
)-1);
2213 * Add a pending snapshot associated with the given transaction handle to the
2214 * respective handle. This must be called after the transaction commit started
2215 * and while holding fs_info->trans_lock.
2216 * This serves to guarantee a caller of btrfs_commit_transaction() that it can
2217 * safely free the pending snapshot pointer in case btrfs_commit_transaction()
2220 static void add_pending_snapshot(struct btrfs_trans_handle
*trans
)
2222 struct btrfs_transaction
*cur_trans
= trans
->transaction
;
2224 if (!trans
->pending_snapshot
)
2227 lockdep_assert_held(&trans
->fs_info
->trans_lock
);
2228 ASSERT(cur_trans
->state
>= TRANS_STATE_COMMIT_PREP
);
2230 list_add(&trans
->pending_snapshot
->list
, &cur_trans
->pending_snapshots
);
2233 static void update_commit_stats(struct btrfs_fs_info
*fs_info
, ktime_t interval
)
2235 fs_info
->commit_stats
.commit_count
++;
2236 fs_info
->commit_stats
.last_commit_dur
= interval
;
2237 fs_info
->commit_stats
.max_commit_dur
=
2238 max_t(u64
, fs_info
->commit_stats
.max_commit_dur
, interval
);
2239 fs_info
->commit_stats
.total_commit_dur
+= interval
;
2242 int btrfs_commit_transaction(struct btrfs_trans_handle
*trans
)
2244 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2245 struct btrfs_transaction
*cur_trans
= trans
->transaction
;
2246 struct btrfs_transaction
*prev_trans
= NULL
;
2251 ASSERT(refcount_read(&trans
->use_count
) == 1);
2252 btrfs_trans_state_lockdep_acquire(fs_info
, BTRFS_LOCKDEP_TRANS_COMMIT_PREP
);
2254 clear_bit(BTRFS_FS_NEED_TRANS_COMMIT
, &fs_info
->flags
);
2256 /* Stop the commit early if ->aborted is set */
2257 if (TRANS_ABORTED(cur_trans
)) {
2258 ret
= cur_trans
->aborted
;
2259 goto lockdep_trans_commit_start_release
;
2262 btrfs_trans_release_metadata(trans
);
2263 trans
->block_rsv
= NULL
;
2266 * We only want one transaction commit doing the flushing so we do not
2267 * waste a bunch of time on lock contention on the extent root node.
2269 if (!test_and_set_bit(BTRFS_DELAYED_REFS_FLUSHING
,
2270 &cur_trans
->delayed_refs
.flags
)) {
2272 * Make a pass through all the delayed refs we have so far.
2273 * Any running threads may add more while we are here.
2275 ret
= btrfs_run_delayed_refs(trans
, 0);
2277 goto lockdep_trans_commit_start_release
;
2280 btrfs_create_pending_block_groups(trans
);
2282 if (!test_bit(BTRFS_TRANS_DIRTY_BG_RUN
, &cur_trans
->flags
)) {
2285 /* this mutex is also taken before trying to set
2286 * block groups readonly. We need to make sure
2287 * that nobody has set a block group readonly
2288 * after a extents from that block group have been
2289 * allocated for cache files. btrfs_set_block_group_ro
2290 * will wait for the transaction to commit if it
2291 * finds BTRFS_TRANS_DIRTY_BG_RUN set.
2293 * The BTRFS_TRANS_DIRTY_BG_RUN flag is also used to make sure
2294 * only one process starts all the block group IO. It wouldn't
2295 * hurt to have more than one go through, but there's no
2296 * real advantage to it either.
2298 mutex_lock(&fs_info
->ro_block_group_mutex
);
2299 if (!test_and_set_bit(BTRFS_TRANS_DIRTY_BG_RUN
,
2302 mutex_unlock(&fs_info
->ro_block_group_mutex
);
2305 ret
= btrfs_start_dirty_block_groups(trans
);
2307 goto lockdep_trans_commit_start_release
;
2311 spin_lock(&fs_info
->trans_lock
);
2312 if (cur_trans
->state
>= TRANS_STATE_COMMIT_PREP
) {
2313 enum btrfs_trans_state want_state
= TRANS_STATE_COMPLETED
;
2315 add_pending_snapshot(trans
);
2317 spin_unlock(&fs_info
->trans_lock
);
2318 refcount_inc(&cur_trans
->use_count
);
2320 if (trans
->in_fsync
)
2321 want_state
= TRANS_STATE_SUPER_COMMITTED
;
2323 btrfs_trans_state_lockdep_release(fs_info
,
2324 BTRFS_LOCKDEP_TRANS_COMMIT_PREP
);
2325 ret
= btrfs_end_transaction(trans
);
2326 wait_for_commit(cur_trans
, want_state
);
2328 if (TRANS_ABORTED(cur_trans
))
2329 ret
= cur_trans
->aborted
;
2331 btrfs_put_transaction(cur_trans
);
2336 cur_trans
->state
= TRANS_STATE_COMMIT_PREP
;
2337 wake_up(&fs_info
->transaction_blocked_wait
);
2338 btrfs_trans_state_lockdep_release(fs_info
, BTRFS_LOCKDEP_TRANS_COMMIT_PREP
);
2340 if (cur_trans
->list
.prev
!= &fs_info
->trans_list
) {
2341 enum btrfs_trans_state want_state
= TRANS_STATE_COMPLETED
;
2343 if (trans
->in_fsync
)
2344 want_state
= TRANS_STATE_SUPER_COMMITTED
;
2346 prev_trans
= list_entry(cur_trans
->list
.prev
,
2347 struct btrfs_transaction
, list
);
2348 if (prev_trans
->state
< want_state
) {
2349 refcount_inc(&prev_trans
->use_count
);
2350 spin_unlock(&fs_info
->trans_lock
);
2352 wait_for_commit(prev_trans
, want_state
);
2354 ret
= READ_ONCE(prev_trans
->aborted
);
2356 btrfs_put_transaction(prev_trans
);
2358 goto lockdep_release
;
2359 spin_lock(&fs_info
->trans_lock
);
2363 * The previous transaction was aborted and was already removed
2364 * from the list of transactions at fs_info->trans_list. So we
2365 * abort to prevent writing a new superblock that reflects a
2366 * corrupt state (pointing to trees with unwritten nodes/leafs).
2368 if (BTRFS_FS_ERROR(fs_info
)) {
2369 spin_unlock(&fs_info
->trans_lock
);
2371 goto lockdep_release
;
2375 cur_trans
->state
= TRANS_STATE_COMMIT_START
;
2376 wake_up(&fs_info
->transaction_blocked_wait
);
2377 spin_unlock(&fs_info
->trans_lock
);
2380 * Get the time spent on the work done by the commit thread and not
2381 * the time spent waiting on a previous commit
2383 start_time
= ktime_get_ns();
2385 extwriter_counter_dec(cur_trans
, trans
->type
);
2387 ret
= btrfs_start_delalloc_flush(fs_info
);
2389 goto lockdep_release
;
2391 ret
= btrfs_run_delayed_items(trans
);
2393 goto lockdep_release
;
2396 * The thread has started/joined the transaction thus it holds the
2397 * lockdep map as a reader. It has to release it before acquiring the
2398 * lockdep map as a writer.
2400 btrfs_lockdep_release(fs_info
, btrfs_trans_num_extwriters
);
2401 btrfs_might_wait_for_event(fs_info
, btrfs_trans_num_extwriters
);
2402 wait_event(cur_trans
->writer_wait
,
2403 extwriter_counter_read(cur_trans
) == 0);
2405 /* some pending stuffs might be added after the previous flush. */
2406 ret
= btrfs_run_delayed_items(trans
);
2408 btrfs_lockdep_release(fs_info
, btrfs_trans_num_writers
);
2409 goto cleanup_transaction
;
2412 btrfs_wait_delalloc_flush(fs_info
);
2415 * Wait for all ordered extents started by a fast fsync that joined this
2416 * transaction. Otherwise if this transaction commits before the ordered
2417 * extents complete we lose logged data after a power failure.
2419 btrfs_might_wait_for_event(fs_info
, btrfs_trans_pending_ordered
);
2420 wait_event(cur_trans
->pending_wait
,
2421 atomic_read(&cur_trans
->pending_ordered
) == 0);
2423 btrfs_scrub_pause(fs_info
);
2425 * Ok now we need to make sure to block out any other joins while we
2426 * commit the transaction. We could have started a join before setting
2427 * COMMIT_DOING so make sure to wait for num_writers to == 1 again.
2429 spin_lock(&fs_info
->trans_lock
);
2430 add_pending_snapshot(trans
);
2431 cur_trans
->state
= TRANS_STATE_COMMIT_DOING
;
2432 spin_unlock(&fs_info
->trans_lock
);
2435 * The thread has started/joined the transaction thus it holds the
2436 * lockdep map as a reader. It has to release it before acquiring the
2437 * lockdep map as a writer.
2439 btrfs_lockdep_release(fs_info
, btrfs_trans_num_writers
);
2440 btrfs_might_wait_for_event(fs_info
, btrfs_trans_num_writers
);
2441 wait_event(cur_trans
->writer_wait
,
2442 atomic_read(&cur_trans
->num_writers
) == 1);
2445 * Make lockdep happy by acquiring the state locks after
2446 * btrfs_trans_num_writers is released. If we acquired the state locks
2447 * before releasing the btrfs_trans_num_writers lock then lockdep would
2448 * complain because we did not follow the reverse order unlocking rule.
2450 btrfs_trans_state_lockdep_acquire(fs_info
, BTRFS_LOCKDEP_TRANS_COMPLETED
);
2451 btrfs_trans_state_lockdep_acquire(fs_info
, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED
);
2452 btrfs_trans_state_lockdep_acquire(fs_info
, BTRFS_LOCKDEP_TRANS_UNBLOCKED
);
2455 * We've started the commit, clear the flag in case we were triggered to
2456 * do an async commit but somebody else started before the transaction
2457 * kthread could do the work.
2459 clear_bit(BTRFS_FS_COMMIT_TRANS
, &fs_info
->flags
);
2461 if (TRANS_ABORTED(cur_trans
)) {
2462 ret
= cur_trans
->aborted
;
2463 btrfs_trans_state_lockdep_release(fs_info
, BTRFS_LOCKDEP_TRANS_UNBLOCKED
);
2464 goto scrub_continue
;
2467 * the reloc mutex makes sure that we stop
2468 * the balancing code from coming in and moving
2469 * extents around in the middle of the commit
2471 mutex_lock(&fs_info
->reloc_mutex
);
2474 * We needn't worry about the delayed items because we will
2475 * deal with them in create_pending_snapshot(), which is the
2476 * core function of the snapshot creation.
2478 ret
= create_pending_snapshots(trans
);
2483 * We insert the dir indexes of the snapshots and update the inode
2484 * of the snapshots' parents after the snapshot creation, so there
2485 * are some delayed items which are not dealt with. Now deal with
2488 * We needn't worry that this operation will corrupt the snapshots,
2489 * because all the tree which are snapshoted will be forced to COW
2490 * the nodes and leaves.
2492 ret
= btrfs_run_delayed_items(trans
);
2496 ret
= btrfs_run_delayed_refs(trans
, U64_MAX
);
2501 * make sure none of the code above managed to slip in a
2504 btrfs_assert_delayed_root_empty(fs_info
);
2506 WARN_ON(cur_trans
!= trans
->transaction
);
2508 ret
= commit_fs_roots(trans
);
2512 /* commit_fs_roots gets rid of all the tree log roots, it is now
2513 * safe to free the root of tree log roots
2515 btrfs_free_log_root_tree(trans
, fs_info
);
2518 * Since fs roots are all committed, we can get a quite accurate
2519 * new_roots. So let's do quota accounting.
2521 ret
= btrfs_qgroup_account_extents(trans
);
2525 ret
= commit_cowonly_roots(trans
);
2530 * The tasks which save the space cache and inode cache may also
2531 * update ->aborted, check it.
2533 if (TRANS_ABORTED(cur_trans
)) {
2534 ret
= cur_trans
->aborted
;
2538 cur_trans
= fs_info
->running_transaction
;
2540 btrfs_set_root_node(&fs_info
->tree_root
->root_item
,
2541 fs_info
->tree_root
->node
);
2542 list_add_tail(&fs_info
->tree_root
->dirty_list
,
2543 &cur_trans
->switch_commits
);
2545 btrfs_set_root_node(&fs_info
->chunk_root
->root_item
,
2546 fs_info
->chunk_root
->node
);
2547 list_add_tail(&fs_info
->chunk_root
->dirty_list
,
2548 &cur_trans
->switch_commits
);
2550 if (btrfs_fs_incompat(fs_info
, EXTENT_TREE_V2
)) {
2551 btrfs_set_root_node(&fs_info
->block_group_root
->root_item
,
2552 fs_info
->block_group_root
->node
);
2553 list_add_tail(&fs_info
->block_group_root
->dirty_list
,
2554 &cur_trans
->switch_commits
);
2557 switch_commit_roots(trans
);
2559 ASSERT(list_empty(&cur_trans
->dirty_bgs
));
2560 ASSERT(list_empty(&cur_trans
->io_bgs
));
2561 update_super_roots(fs_info
);
2563 btrfs_set_super_log_root(fs_info
->super_copy
, 0);
2564 btrfs_set_super_log_root_level(fs_info
->super_copy
, 0);
2565 memcpy(fs_info
->super_for_commit
, fs_info
->super_copy
,
2566 sizeof(*fs_info
->super_copy
));
2568 btrfs_commit_device_sizes(cur_trans
);
2570 clear_bit(BTRFS_FS_LOG1_ERR
, &fs_info
->flags
);
2571 clear_bit(BTRFS_FS_LOG2_ERR
, &fs_info
->flags
);
2573 btrfs_trans_release_chunk_metadata(trans
);
2576 * Before changing the transaction state to TRANS_STATE_UNBLOCKED and
2577 * setting fs_info->running_transaction to NULL, lock tree_log_mutex to
2578 * make sure that before we commit our superblock, no other task can
2579 * start a new transaction and commit a log tree before we commit our
2580 * superblock. Anyone trying to commit a log tree locks this mutex before
2581 * writing its superblock.
2583 mutex_lock(&fs_info
->tree_log_mutex
);
2585 spin_lock(&fs_info
->trans_lock
);
2586 cur_trans
->state
= TRANS_STATE_UNBLOCKED
;
2587 fs_info
->running_transaction
= NULL
;
2588 spin_unlock(&fs_info
->trans_lock
);
2589 mutex_unlock(&fs_info
->reloc_mutex
);
2591 wake_up(&fs_info
->transaction_wait
);
2592 btrfs_trans_state_lockdep_release(fs_info
, BTRFS_LOCKDEP_TRANS_UNBLOCKED
);
2594 /* If we have features changed, wake up the cleaner to update sysfs. */
2595 if (test_bit(BTRFS_FS_FEATURE_CHANGED
, &fs_info
->flags
) &&
2596 fs_info
->cleaner_kthread
)
2597 wake_up_process(fs_info
->cleaner_kthread
);
2599 ret
= btrfs_write_and_wait_transaction(trans
);
2601 btrfs_handle_fs_error(fs_info
, ret
,
2602 "Error while writing out transaction");
2603 mutex_unlock(&fs_info
->tree_log_mutex
);
2604 goto scrub_continue
;
2607 ret
= write_all_supers(fs_info
, 0);
2609 * the super is written, we can safely allow the tree-loggers
2610 * to go about their business
2612 mutex_unlock(&fs_info
->tree_log_mutex
);
2614 goto scrub_continue
;
2617 * We needn't acquire the lock here because there is no other task
2618 * which can change it.
2620 cur_trans
->state
= TRANS_STATE_SUPER_COMMITTED
;
2621 wake_up(&cur_trans
->commit_wait
);
2622 btrfs_trans_state_lockdep_release(fs_info
, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED
);
2624 btrfs_finish_extent_commit(trans
);
2626 if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS
, &cur_trans
->flags
))
2627 btrfs_clear_space_info_full(fs_info
);
2629 fs_info
->last_trans_committed
= cur_trans
->transid
;
2631 * We needn't acquire the lock here because there is no other task
2632 * which can change it.
2634 cur_trans
->state
= TRANS_STATE_COMPLETED
;
2635 wake_up(&cur_trans
->commit_wait
);
2636 btrfs_trans_state_lockdep_release(fs_info
, BTRFS_LOCKDEP_TRANS_COMPLETED
);
2638 spin_lock(&fs_info
->trans_lock
);
2639 list_del_init(&cur_trans
->list
);
2640 spin_unlock(&fs_info
->trans_lock
);
2642 btrfs_put_transaction(cur_trans
);
2643 btrfs_put_transaction(cur_trans
);
2645 if (trans
->type
& __TRANS_FREEZABLE
)
2646 sb_end_intwrite(fs_info
->sb
);
2648 trace_btrfs_transaction_commit(fs_info
);
2650 interval
= ktime_get_ns() - start_time
;
2652 btrfs_scrub_continue(fs_info
);
2654 if (current
->journal_info
== trans
)
2655 current
->journal_info
= NULL
;
2657 kmem_cache_free(btrfs_trans_handle_cachep
, trans
);
2659 update_commit_stats(fs_info
, interval
);
2664 mutex_unlock(&fs_info
->reloc_mutex
);
2665 btrfs_trans_state_lockdep_release(fs_info
, BTRFS_LOCKDEP_TRANS_UNBLOCKED
);
2667 btrfs_trans_state_lockdep_release(fs_info
, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED
);
2668 btrfs_trans_state_lockdep_release(fs_info
, BTRFS_LOCKDEP_TRANS_COMPLETED
);
2669 btrfs_scrub_continue(fs_info
);
2670 cleanup_transaction
:
2671 btrfs_trans_release_metadata(trans
);
2672 btrfs_cleanup_pending_block_groups(trans
);
2673 btrfs_trans_release_chunk_metadata(trans
);
2674 trans
->block_rsv
= NULL
;
2675 btrfs_warn(fs_info
, "Skipping commit of aborted transaction.");
2676 if (current
->journal_info
== trans
)
2677 current
->journal_info
= NULL
;
2678 cleanup_transaction(trans
, ret
);
2683 btrfs_lockdep_release(fs_info
, btrfs_trans_num_extwriters
);
2684 btrfs_lockdep_release(fs_info
, btrfs_trans_num_writers
);
2685 goto cleanup_transaction
;
2687 lockdep_trans_commit_start_release
:
2688 btrfs_trans_state_lockdep_release(fs_info
, BTRFS_LOCKDEP_TRANS_COMMIT_PREP
);
2689 btrfs_end_transaction(trans
);
2694 * return < 0 if error
2695 * 0 if there are no more dead_roots at the time of call
2696 * 1 there are more to be processed, call me again
2698 * The return value indicates there are certainly more snapshots to delete, but
2699 * if there comes a new one during processing, it may return 0. We don't mind,
2700 * because btrfs_commit_super will poke cleaner thread and it will process it a
2701 * few seconds later.
2703 int btrfs_clean_one_deleted_snapshot(struct btrfs_fs_info
*fs_info
)
2705 struct btrfs_root
*root
;
2708 spin_lock(&fs_info
->trans_lock
);
2709 if (list_empty(&fs_info
->dead_roots
)) {
2710 spin_unlock(&fs_info
->trans_lock
);
2713 root
= list_first_entry(&fs_info
->dead_roots
,
2714 struct btrfs_root
, root_list
);
2715 list_del_init(&root
->root_list
);
2716 spin_unlock(&fs_info
->trans_lock
);
2718 btrfs_debug(fs_info
, "cleaner removing %llu", root
->root_key
.objectid
);
2720 btrfs_kill_all_delayed_nodes(root
);
2722 if (btrfs_header_backref_rev(root
->node
) <
2723 BTRFS_MIXED_BACKREF_REV
)
2724 ret
= btrfs_drop_snapshot(root
, 0, 0);
2726 ret
= btrfs_drop_snapshot(root
, 1, 0);
2728 btrfs_put_root(root
);
2729 return (ret
< 0) ? 0 : 1;
2733 * We only mark the transaction aborted and then set the file system read-only.
2734 * This will prevent new transactions from starting or trying to join this
2737 * This means that error recovery at the call site is limited to freeing
2738 * any local memory allocations and passing the error code up without
2739 * further cleanup. The transaction should complete as it normally would
2740 * in the call path but will return -EIO.
2742 * We'll complete the cleanup in btrfs_end_transaction and
2743 * btrfs_commit_transaction.
2745 void __cold
__btrfs_abort_transaction(struct btrfs_trans_handle
*trans
,
2746 const char *function
,
2747 unsigned int line
, int error
, bool first_hit
)
2749 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2751 WRITE_ONCE(trans
->aborted
, error
);
2752 WRITE_ONCE(trans
->transaction
->aborted
, error
);
2753 if (first_hit
&& error
== -ENOSPC
)
2754 btrfs_dump_space_info_for_trans_abort(fs_info
);
2755 /* Wake up anybody who may be waiting on this transaction */
2756 wake_up(&fs_info
->transaction_wait
);
2757 wake_up(&fs_info
->transaction_blocked_wait
);
2758 __btrfs_handle_fs_error(fs_info
, function
, line
, error
, NULL
);
2761 int __init
btrfs_transaction_init(void)
2763 btrfs_trans_handle_cachep
= kmem_cache_create("btrfs_trans_handle",
2764 sizeof(struct btrfs_trans_handle
), 0,
2765 SLAB_TEMPORARY
| SLAB_MEM_SPREAD
, NULL
);
2766 if (!btrfs_trans_handle_cachep
)
2771 void __cold
btrfs_transaction_exit(void)
2773 kmem_cache_destroy(btrfs_trans_handle_cachep
);