1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
6 #include <linux/slab.h>
7 #include <linux/blkdev.h>
8 #include <linux/writeback.h>
9 #include <linux/sched/mm.h>
13 #include "transaction.h"
14 #include "btrfs_inode.h"
15 #include "extent_io.h"
17 #include "compression.h"
18 #include "delalloc-space.h"
24 static struct kmem_cache
*btrfs_ordered_extent_cache
;
26 static u64
entry_end(struct btrfs_ordered_extent
*entry
)
28 if (entry
->file_offset
+ entry
->num_bytes
< entry
->file_offset
)
30 return entry
->file_offset
+ entry
->num_bytes
;
33 /* returns NULL if the insertion worked, or it returns the node it did find
36 static struct rb_node
*tree_insert(struct rb_root
*root
, u64 file_offset
,
39 struct rb_node
**p
= &root
->rb_node
;
40 struct rb_node
*parent
= NULL
;
41 struct btrfs_ordered_extent
*entry
;
45 entry
= rb_entry(parent
, struct btrfs_ordered_extent
, rb_node
);
47 if (file_offset
< entry
->file_offset
)
49 else if (file_offset
>= entry_end(entry
))
55 rb_link_node(node
, parent
, p
);
56 rb_insert_color(node
, root
);
61 * look for a given offset in the tree, and if it can't be found return the
64 static struct rb_node
*__tree_search(struct rb_root
*root
, u64 file_offset
,
65 struct rb_node
**prev_ret
)
67 struct rb_node
*n
= root
->rb_node
;
68 struct rb_node
*prev
= NULL
;
70 struct btrfs_ordered_extent
*entry
;
71 struct btrfs_ordered_extent
*prev_entry
= NULL
;
74 entry
= rb_entry(n
, struct btrfs_ordered_extent
, rb_node
);
78 if (file_offset
< entry
->file_offset
)
80 else if (file_offset
>= entry_end(entry
))
88 while (prev
&& file_offset
>= entry_end(prev_entry
)) {
92 prev_entry
= rb_entry(test
, struct btrfs_ordered_extent
,
94 if (file_offset
< entry_end(prev_entry
))
100 prev_entry
= rb_entry(prev
, struct btrfs_ordered_extent
,
102 while (prev
&& file_offset
< entry_end(prev_entry
)) {
103 test
= rb_prev(prev
);
106 prev_entry
= rb_entry(test
, struct btrfs_ordered_extent
,
114 static int range_overlaps(struct btrfs_ordered_extent
*entry
, u64 file_offset
,
117 if (file_offset
+ len
<= entry
->file_offset
||
118 entry
->file_offset
+ entry
->num_bytes
<= file_offset
)
124 * look find the first ordered struct that has this offset, otherwise
125 * the first one less than this offset
127 static inline struct rb_node
*ordered_tree_search(struct btrfs_inode
*inode
,
130 struct rb_node
*prev
= NULL
;
132 struct btrfs_ordered_extent
*entry
;
134 if (inode
->ordered_tree_last
) {
135 entry
= rb_entry(inode
->ordered_tree_last
, struct btrfs_ordered_extent
,
137 if (in_range(file_offset
, entry
->file_offset
, entry
->num_bytes
))
138 return inode
->ordered_tree_last
;
140 ret
= __tree_search(&inode
->ordered_tree
, file_offset
, &prev
);
144 inode
->ordered_tree_last
= ret
;
148 static struct btrfs_ordered_extent
*alloc_ordered_extent(
149 struct btrfs_inode
*inode
, u64 file_offset
, u64 num_bytes
,
150 u64 ram_bytes
, u64 disk_bytenr
, u64 disk_num_bytes
,
151 u64 offset
, unsigned long flags
, int compress_type
)
153 struct btrfs_ordered_extent
*entry
;
157 ((1 << BTRFS_ORDERED_NOCOW
) | (1 << BTRFS_ORDERED_PREALLOC
))) {
158 /* For nocow write, we can release the qgroup rsv right now */
159 ret
= btrfs_qgroup_free_data(inode
, NULL
, file_offset
, num_bytes
);
164 * The ordered extent has reserved qgroup space, release now
165 * and pass the reserved number for qgroup_record to free.
167 ret
= btrfs_qgroup_release_data(inode
, file_offset
, num_bytes
);
171 entry
= kmem_cache_zalloc(btrfs_ordered_extent_cache
, GFP_NOFS
);
173 return ERR_PTR(-ENOMEM
);
175 entry
->file_offset
= file_offset
;
176 entry
->num_bytes
= num_bytes
;
177 entry
->ram_bytes
= ram_bytes
;
178 entry
->disk_bytenr
= disk_bytenr
;
179 entry
->disk_num_bytes
= disk_num_bytes
;
180 entry
->offset
= offset
;
181 entry
->bytes_left
= num_bytes
;
182 entry
->inode
= igrab(&inode
->vfs_inode
);
183 entry
->compress_type
= compress_type
;
184 entry
->truncated_len
= (u64
)-1;
185 entry
->qgroup_rsv
= ret
;
186 entry
->flags
= flags
;
187 refcount_set(&entry
->refs
, 1);
188 init_waitqueue_head(&entry
->wait
);
189 INIT_LIST_HEAD(&entry
->list
);
190 INIT_LIST_HEAD(&entry
->log_list
);
191 INIT_LIST_HEAD(&entry
->root_extent_list
);
192 INIT_LIST_HEAD(&entry
->work_list
);
193 INIT_LIST_HEAD(&entry
->bioc_list
);
194 init_completion(&entry
->completion
);
197 * We don't need the count_max_extents here, we can assume that all of
198 * that work has been done at higher layers, so this is truly the
199 * smallest the extent is going to get.
201 spin_lock(&inode
->lock
);
202 btrfs_mod_outstanding_extents(inode
, 1);
203 spin_unlock(&inode
->lock
);
208 static void insert_ordered_extent(struct btrfs_ordered_extent
*entry
)
210 struct btrfs_inode
*inode
= BTRFS_I(entry
->inode
);
211 struct btrfs_root
*root
= inode
->root
;
212 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
213 struct rb_node
*node
;
215 trace_btrfs_ordered_extent_add(inode
, entry
);
217 percpu_counter_add_batch(&fs_info
->ordered_bytes
, entry
->num_bytes
,
218 fs_info
->delalloc_batch
);
220 /* One ref for the tree. */
221 refcount_inc(&entry
->refs
);
223 spin_lock_irq(&inode
->ordered_tree_lock
);
224 node
= tree_insert(&inode
->ordered_tree
, entry
->file_offset
,
227 btrfs_panic(fs_info
, -EEXIST
,
228 "inconsistency in ordered tree at offset %llu",
230 spin_unlock_irq(&inode
->ordered_tree_lock
);
232 spin_lock(&root
->ordered_extent_lock
);
233 list_add_tail(&entry
->root_extent_list
,
234 &root
->ordered_extents
);
235 root
->nr_ordered_extents
++;
236 if (root
->nr_ordered_extents
== 1) {
237 spin_lock(&fs_info
->ordered_root_lock
);
238 BUG_ON(!list_empty(&root
->ordered_root
));
239 list_add_tail(&root
->ordered_root
, &fs_info
->ordered_roots
);
240 spin_unlock(&fs_info
->ordered_root_lock
);
242 spin_unlock(&root
->ordered_extent_lock
);
246 * Add an ordered extent to the per-inode tree.
248 * @inode: Inode that this extent is for.
249 * @file_offset: Logical offset in file where the extent starts.
250 * @num_bytes: Logical length of extent in file.
251 * @ram_bytes: Full length of unencoded data.
252 * @disk_bytenr: Offset of extent on disk.
253 * @disk_num_bytes: Size of extent on disk.
254 * @offset: Offset into unencoded data where file data starts.
255 * @flags: Flags specifying type of extent (1 << BTRFS_ORDERED_*).
256 * @compress_type: Compression algorithm used for data.
258 * Most of these parameters correspond to &struct btrfs_file_extent_item. The
259 * tree is given a single reference on the ordered extent that was inserted, and
260 * the returned pointer is given a second reference.
262 * Return: the new ordered extent or error pointer.
264 struct btrfs_ordered_extent
*btrfs_alloc_ordered_extent(
265 struct btrfs_inode
*inode
, u64 file_offset
,
266 u64 num_bytes
, u64 ram_bytes
, u64 disk_bytenr
,
267 u64 disk_num_bytes
, u64 offset
, unsigned long flags
,
270 struct btrfs_ordered_extent
*entry
;
272 ASSERT((flags
& ~BTRFS_ORDERED_TYPE_FLAGS
) == 0);
274 entry
= alloc_ordered_extent(inode
, file_offset
, num_bytes
, ram_bytes
,
275 disk_bytenr
, disk_num_bytes
, offset
, flags
,
278 insert_ordered_extent(entry
);
283 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
284 * when an ordered extent is finished. If the list covers more than one
285 * ordered extent, it is split across multiples.
287 void btrfs_add_ordered_sum(struct btrfs_ordered_extent
*entry
,
288 struct btrfs_ordered_sum
*sum
)
290 struct btrfs_inode
*inode
= BTRFS_I(entry
->inode
);
292 spin_lock_irq(&inode
->ordered_tree_lock
);
293 list_add_tail(&sum
->list
, &entry
->list
);
294 spin_unlock_irq(&inode
->ordered_tree_lock
);
297 static void finish_ordered_fn(struct btrfs_work
*work
)
299 struct btrfs_ordered_extent
*ordered_extent
;
301 ordered_extent
= container_of(work
, struct btrfs_ordered_extent
, work
);
302 btrfs_finish_ordered_io(ordered_extent
);
305 static bool can_finish_ordered_extent(struct btrfs_ordered_extent
*ordered
,
306 struct page
*page
, u64 file_offset
,
307 u64 len
, bool uptodate
)
309 struct btrfs_inode
*inode
= BTRFS_I(ordered
->inode
);
310 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
312 lockdep_assert_held(&inode
->ordered_tree_lock
);
315 ASSERT(page
->mapping
);
316 ASSERT(page_offset(page
) <= file_offset
);
317 ASSERT(file_offset
+ len
<= page_offset(page
) + PAGE_SIZE
);
320 * Ordered (Private2) bit indicates whether we still have
321 * pending io unfinished for the ordered extent.
323 * If there's no such bit, we need to skip to next range.
325 if (!btrfs_page_test_ordered(fs_info
, page
, file_offset
, len
))
327 btrfs_page_clear_ordered(fs_info
, page
, file_offset
, len
);
330 /* Now we're fine to update the accounting. */
331 if (WARN_ON_ONCE(len
> ordered
->bytes_left
)) {
333 "bad ordered extent accounting, root=%llu ino=%llu OE offset=%llu OE len=%llu to_dec=%llu left=%llu",
334 inode
->root
->root_key
.objectid
, btrfs_ino(inode
),
335 ordered
->file_offset
, ordered
->num_bytes
,
336 len
, ordered
->bytes_left
);
337 ordered
->bytes_left
= 0;
339 ordered
->bytes_left
-= len
;
343 set_bit(BTRFS_ORDERED_IOERR
, &ordered
->flags
);
345 if (ordered
->bytes_left
)
349 * All the IO of the ordered extent is finished, we need to queue
350 * the finish_func to be executed.
352 set_bit(BTRFS_ORDERED_IO_DONE
, &ordered
->flags
);
353 cond_wake_up(&ordered
->wait
);
354 refcount_inc(&ordered
->refs
);
355 trace_btrfs_ordered_extent_mark_finished(inode
, ordered
);
359 static void btrfs_queue_ordered_fn(struct btrfs_ordered_extent
*ordered
)
361 struct btrfs_inode
*inode
= BTRFS_I(ordered
->inode
);
362 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
363 struct btrfs_workqueue
*wq
= btrfs_is_free_space_inode(inode
) ?
364 fs_info
->endio_freespace_worker
: fs_info
->endio_write_workers
;
366 btrfs_init_work(&ordered
->work
, finish_ordered_fn
, NULL
);
367 btrfs_queue_work(wq
, &ordered
->work
);
370 bool btrfs_finish_ordered_extent(struct btrfs_ordered_extent
*ordered
,
371 struct page
*page
, u64 file_offset
, u64 len
,
374 struct btrfs_inode
*inode
= BTRFS_I(ordered
->inode
);
378 trace_btrfs_finish_ordered_extent(inode
, file_offset
, len
, uptodate
);
380 spin_lock_irqsave(&inode
->ordered_tree_lock
, flags
);
381 ret
= can_finish_ordered_extent(ordered
, page
, file_offset
, len
, uptodate
);
382 spin_unlock_irqrestore(&inode
->ordered_tree_lock
, flags
);
385 btrfs_queue_ordered_fn(ordered
);
390 * Mark all ordered extents io inside the specified range finished.
392 * @page: The involved page for the operation.
393 * For uncompressed buffered IO, the page status also needs to be
394 * updated to indicate whether the pending ordered io is finished.
395 * Can be NULL for direct IO and compressed write.
396 * For these cases, callers are ensured they won't execute the
397 * endio function twice.
399 * This function is called for endio, thus the range must have ordered
400 * extent(s) covering it.
402 void btrfs_mark_ordered_io_finished(struct btrfs_inode
*inode
,
403 struct page
*page
, u64 file_offset
,
404 u64 num_bytes
, bool uptodate
)
406 struct rb_node
*node
;
407 struct btrfs_ordered_extent
*entry
= NULL
;
409 u64 cur
= file_offset
;
411 trace_btrfs_writepage_end_io_hook(inode
, file_offset
,
412 file_offset
+ num_bytes
- 1,
415 spin_lock_irqsave(&inode
->ordered_tree_lock
, flags
);
416 while (cur
< file_offset
+ num_bytes
) {
421 node
= ordered_tree_search(inode
, cur
);
422 /* No ordered extents at all */
426 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
427 entry_end
= entry
->file_offset
+ entry
->num_bytes
;
433 if (cur
>= entry_end
) {
434 node
= rb_next(node
);
435 /* No more ordered extents, exit */
438 entry
= rb_entry(node
, struct btrfs_ordered_extent
,
441 /* Go to next ordered extent and continue */
442 cur
= entry
->file_offset
;
448 * Go to the start of OE.
450 if (cur
< entry
->file_offset
) {
451 cur
= entry
->file_offset
;
456 * Now we are definitely inside one ordered extent.
462 end
= min(entry
->file_offset
+ entry
->num_bytes
,
463 file_offset
+ num_bytes
) - 1;
464 ASSERT(end
+ 1 - cur
< U32_MAX
);
467 if (can_finish_ordered_extent(entry
, page
, cur
, len
, uptodate
)) {
468 spin_unlock_irqrestore(&inode
->ordered_tree_lock
, flags
);
469 btrfs_queue_ordered_fn(entry
);
470 spin_lock_irqsave(&inode
->ordered_tree_lock
, flags
);
474 spin_unlock_irqrestore(&inode
->ordered_tree_lock
, flags
);
478 * Finish IO for one ordered extent across a given range. The range can only
479 * contain one ordered extent.
481 * @cached: The cached ordered extent. If not NULL, we can skip the tree
482 * search and use the ordered extent directly.
483 * Will be also used to store the finished ordered extent.
484 * @file_offset: File offset for the finished IO
485 * @io_size: Length of the finish IO range
487 * Return true if the ordered extent is finished in the range, and update
489 * Return false otherwise.
491 * NOTE: The range can NOT cross multiple ordered extents.
492 * Thus caller should ensure the range doesn't cross ordered extents.
494 bool btrfs_dec_test_ordered_pending(struct btrfs_inode
*inode
,
495 struct btrfs_ordered_extent
**cached
,
496 u64 file_offset
, u64 io_size
)
498 struct rb_node
*node
;
499 struct btrfs_ordered_extent
*entry
= NULL
;
501 bool finished
= false;
503 spin_lock_irqsave(&inode
->ordered_tree_lock
, flags
);
504 if (cached
&& *cached
) {
509 node
= ordered_tree_search(inode
, file_offset
);
513 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
515 if (!in_range(file_offset
, entry
->file_offset
, entry
->num_bytes
))
518 if (io_size
> entry
->bytes_left
)
519 btrfs_crit(inode
->root
->fs_info
,
520 "bad ordered accounting left %llu size %llu",
521 entry
->bytes_left
, io_size
);
523 entry
->bytes_left
-= io_size
;
525 if (entry
->bytes_left
== 0) {
527 * Ensure only one caller can set the flag and finished_ret
530 finished
= !test_and_set_bit(BTRFS_ORDERED_IO_DONE
, &entry
->flags
);
531 /* test_and_set_bit implies a barrier */
532 cond_wake_up_nomb(&entry
->wait
);
535 if (finished
&& cached
&& entry
) {
537 refcount_inc(&entry
->refs
);
538 trace_btrfs_ordered_extent_dec_test_pending(inode
, entry
);
540 spin_unlock_irqrestore(&inode
->ordered_tree_lock
, flags
);
545 * used to drop a reference on an ordered extent. This will free
546 * the extent if the last reference is dropped
548 void btrfs_put_ordered_extent(struct btrfs_ordered_extent
*entry
)
550 struct list_head
*cur
;
551 struct btrfs_ordered_sum
*sum
;
553 trace_btrfs_ordered_extent_put(BTRFS_I(entry
->inode
), entry
);
555 if (refcount_dec_and_test(&entry
->refs
)) {
556 ASSERT(list_empty(&entry
->root_extent_list
));
557 ASSERT(list_empty(&entry
->log_list
));
558 ASSERT(RB_EMPTY_NODE(&entry
->rb_node
));
560 btrfs_add_delayed_iput(BTRFS_I(entry
->inode
));
561 while (!list_empty(&entry
->list
)) {
562 cur
= entry
->list
.next
;
563 sum
= list_entry(cur
, struct btrfs_ordered_sum
, list
);
564 list_del(&sum
->list
);
567 kmem_cache_free(btrfs_ordered_extent_cache
, entry
);
572 * remove an ordered extent from the tree. No references are dropped
573 * and waiters are woken up.
575 void btrfs_remove_ordered_extent(struct btrfs_inode
*btrfs_inode
,
576 struct btrfs_ordered_extent
*entry
)
578 struct btrfs_root
*root
= btrfs_inode
->root
;
579 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
580 struct rb_node
*node
;
582 bool freespace_inode
;
585 * If this is a free space inode the thread has not acquired the ordered
586 * extents lockdep map.
588 freespace_inode
= btrfs_is_free_space_inode(btrfs_inode
);
590 btrfs_lockdep_acquire(fs_info
, btrfs_trans_pending_ordered
);
591 /* This is paired with btrfs_alloc_ordered_extent. */
592 spin_lock(&btrfs_inode
->lock
);
593 btrfs_mod_outstanding_extents(btrfs_inode
, -1);
594 spin_unlock(&btrfs_inode
->lock
);
595 if (root
!= fs_info
->tree_root
) {
598 if (test_bit(BTRFS_ORDERED_ENCODED
, &entry
->flags
))
599 release
= entry
->disk_num_bytes
;
601 release
= entry
->num_bytes
;
602 btrfs_delalloc_release_metadata(btrfs_inode
, release
, false);
605 percpu_counter_add_batch(&fs_info
->ordered_bytes
, -entry
->num_bytes
,
606 fs_info
->delalloc_batch
);
608 spin_lock_irq(&btrfs_inode
->ordered_tree_lock
);
609 node
= &entry
->rb_node
;
610 rb_erase(node
, &btrfs_inode
->ordered_tree
);
612 if (btrfs_inode
->ordered_tree_last
== node
)
613 btrfs_inode
->ordered_tree_last
= NULL
;
614 set_bit(BTRFS_ORDERED_COMPLETE
, &entry
->flags
);
615 pending
= test_and_clear_bit(BTRFS_ORDERED_PENDING
, &entry
->flags
);
616 spin_unlock_irq(&btrfs_inode
->ordered_tree_lock
);
619 * The current running transaction is waiting on us, we need to let it
620 * know that we're complete and wake it up.
623 struct btrfs_transaction
*trans
;
626 * The checks for trans are just a formality, it should be set,
627 * but if it isn't we don't want to deref/assert under the spin
628 * lock, so be nice and check if trans is set, but ASSERT() so
629 * if it isn't set a developer will notice.
631 spin_lock(&fs_info
->trans_lock
);
632 trans
= fs_info
->running_transaction
;
634 refcount_inc(&trans
->use_count
);
635 spin_unlock(&fs_info
->trans_lock
);
637 ASSERT(trans
|| BTRFS_FS_ERROR(fs_info
));
639 if (atomic_dec_and_test(&trans
->pending_ordered
))
640 wake_up(&trans
->pending_wait
);
641 btrfs_put_transaction(trans
);
645 btrfs_lockdep_release(fs_info
, btrfs_trans_pending_ordered
);
647 spin_lock(&root
->ordered_extent_lock
);
648 list_del_init(&entry
->root_extent_list
);
649 root
->nr_ordered_extents
--;
651 trace_btrfs_ordered_extent_remove(btrfs_inode
, entry
);
653 if (!root
->nr_ordered_extents
) {
654 spin_lock(&fs_info
->ordered_root_lock
);
655 BUG_ON(list_empty(&root
->ordered_root
));
656 list_del_init(&root
->ordered_root
);
657 spin_unlock(&fs_info
->ordered_root_lock
);
659 spin_unlock(&root
->ordered_extent_lock
);
660 wake_up(&entry
->wait
);
661 if (!freespace_inode
)
662 btrfs_lockdep_release(fs_info
, btrfs_ordered_extent
);
665 static void btrfs_run_ordered_extent_work(struct btrfs_work
*work
)
667 struct btrfs_ordered_extent
*ordered
;
669 ordered
= container_of(work
, struct btrfs_ordered_extent
, flush_work
);
670 btrfs_start_ordered_extent(ordered
);
671 complete(&ordered
->completion
);
675 * wait for all the ordered extents in a root. This is done when balancing
676 * space between drives.
678 u64
btrfs_wait_ordered_extents(struct btrfs_root
*root
, u64 nr
,
679 const u64 range_start
, const u64 range_len
)
681 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
685 struct btrfs_ordered_extent
*ordered
, *next
;
687 const u64 range_end
= range_start
+ range_len
;
689 mutex_lock(&root
->ordered_extent_mutex
);
690 spin_lock(&root
->ordered_extent_lock
);
691 list_splice_init(&root
->ordered_extents
, &splice
);
692 while (!list_empty(&splice
) && nr
) {
693 ordered
= list_first_entry(&splice
, struct btrfs_ordered_extent
,
696 if (range_end
<= ordered
->disk_bytenr
||
697 ordered
->disk_bytenr
+ ordered
->disk_num_bytes
<= range_start
) {
698 list_move_tail(&ordered
->root_extent_list
, &skipped
);
699 cond_resched_lock(&root
->ordered_extent_lock
);
703 list_move_tail(&ordered
->root_extent_list
,
704 &root
->ordered_extents
);
705 refcount_inc(&ordered
->refs
);
706 spin_unlock(&root
->ordered_extent_lock
);
708 btrfs_init_work(&ordered
->flush_work
,
709 btrfs_run_ordered_extent_work
, NULL
);
710 list_add_tail(&ordered
->work_list
, &works
);
711 btrfs_queue_work(fs_info
->flush_workers
, &ordered
->flush_work
);
714 spin_lock(&root
->ordered_extent_lock
);
719 list_splice_tail(&skipped
, &root
->ordered_extents
);
720 list_splice_tail(&splice
, &root
->ordered_extents
);
721 spin_unlock(&root
->ordered_extent_lock
);
723 list_for_each_entry_safe(ordered
, next
, &works
, work_list
) {
724 list_del_init(&ordered
->work_list
);
725 wait_for_completion(&ordered
->completion
);
726 btrfs_put_ordered_extent(ordered
);
729 mutex_unlock(&root
->ordered_extent_mutex
);
734 void btrfs_wait_ordered_roots(struct btrfs_fs_info
*fs_info
, u64 nr
,
735 const u64 range_start
, const u64 range_len
)
737 struct btrfs_root
*root
;
741 mutex_lock(&fs_info
->ordered_operations_mutex
);
742 spin_lock(&fs_info
->ordered_root_lock
);
743 list_splice_init(&fs_info
->ordered_roots
, &splice
);
744 while (!list_empty(&splice
) && nr
) {
745 root
= list_first_entry(&splice
, struct btrfs_root
,
747 root
= btrfs_grab_root(root
);
749 list_move_tail(&root
->ordered_root
,
750 &fs_info
->ordered_roots
);
751 spin_unlock(&fs_info
->ordered_root_lock
);
753 done
= btrfs_wait_ordered_extents(root
, nr
,
754 range_start
, range_len
);
755 btrfs_put_root(root
);
757 spin_lock(&fs_info
->ordered_root_lock
);
762 list_splice_tail(&splice
, &fs_info
->ordered_roots
);
763 spin_unlock(&fs_info
->ordered_root_lock
);
764 mutex_unlock(&fs_info
->ordered_operations_mutex
);
768 * Start IO and wait for a given ordered extent to finish.
770 * Wait on page writeback for all the pages in the extent and the IO completion
771 * code to insert metadata into the btree corresponding to the extent.
773 void btrfs_start_ordered_extent(struct btrfs_ordered_extent
*entry
)
775 u64 start
= entry
->file_offset
;
776 u64 end
= start
+ entry
->num_bytes
- 1;
777 struct btrfs_inode
*inode
= BTRFS_I(entry
->inode
);
778 bool freespace_inode
;
780 trace_btrfs_ordered_extent_start(inode
, entry
);
783 * If this is a free space inode do not take the ordered extents lockdep
786 freespace_inode
= btrfs_is_free_space_inode(inode
);
789 * pages in the range can be dirty, clean or writeback. We
790 * start IO on any dirty ones so the wait doesn't stall waiting
791 * for the flusher thread to find them
793 if (!test_bit(BTRFS_ORDERED_DIRECT
, &entry
->flags
))
794 filemap_fdatawrite_range(inode
->vfs_inode
.i_mapping
, start
, end
);
796 if (!freespace_inode
)
797 btrfs_might_wait_for_event(inode
->root
->fs_info
, btrfs_ordered_extent
);
798 wait_event(entry
->wait
, test_bit(BTRFS_ORDERED_COMPLETE
, &entry
->flags
));
802 * Used to wait on ordered extents across a large range of bytes.
804 int btrfs_wait_ordered_range(struct inode
*inode
, u64 start
, u64 len
)
810 struct btrfs_ordered_extent
*ordered
;
812 if (start
+ len
< start
) {
813 orig_end
= OFFSET_MAX
;
815 orig_end
= start
+ len
- 1;
816 if (orig_end
> OFFSET_MAX
)
817 orig_end
= OFFSET_MAX
;
820 /* start IO across the range first to instantiate any delalloc
823 ret
= btrfs_fdatawrite_range(inode
, start
, orig_end
);
828 * If we have a writeback error don't return immediately. Wait first
829 * for any ordered extents that haven't completed yet. This is to make
830 * sure no one can dirty the same page ranges and call writepages()
831 * before the ordered extents complete - to avoid failures (-EEXIST)
832 * when adding the new ordered extents to the ordered tree.
834 ret_wb
= filemap_fdatawait_range(inode
->i_mapping
, start
, orig_end
);
838 ordered
= btrfs_lookup_first_ordered_extent(BTRFS_I(inode
), end
);
841 if (ordered
->file_offset
> orig_end
) {
842 btrfs_put_ordered_extent(ordered
);
845 if (ordered
->file_offset
+ ordered
->num_bytes
<= start
) {
846 btrfs_put_ordered_extent(ordered
);
849 btrfs_start_ordered_extent(ordered
);
850 end
= ordered
->file_offset
;
852 * If the ordered extent had an error save the error but don't
853 * exit without waiting first for all other ordered extents in
854 * the range to complete.
856 if (test_bit(BTRFS_ORDERED_IOERR
, &ordered
->flags
))
858 btrfs_put_ordered_extent(ordered
);
859 if (end
== 0 || end
== start
)
863 return ret_wb
? ret_wb
: ret
;
867 * find an ordered extent corresponding to file_offset. return NULL if
868 * nothing is found, otherwise take a reference on the extent and return it
870 struct btrfs_ordered_extent
*btrfs_lookup_ordered_extent(struct btrfs_inode
*inode
,
873 struct rb_node
*node
;
874 struct btrfs_ordered_extent
*entry
= NULL
;
877 spin_lock_irqsave(&inode
->ordered_tree_lock
, flags
);
878 node
= ordered_tree_search(inode
, file_offset
);
882 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
883 if (!in_range(file_offset
, entry
->file_offset
, entry
->num_bytes
))
886 refcount_inc(&entry
->refs
);
887 trace_btrfs_ordered_extent_lookup(inode
, entry
);
890 spin_unlock_irqrestore(&inode
->ordered_tree_lock
, flags
);
894 /* Since the DIO code tries to lock a wide area we need to look for any ordered
895 * extents that exist in the range, rather than just the start of the range.
897 struct btrfs_ordered_extent
*btrfs_lookup_ordered_range(
898 struct btrfs_inode
*inode
, u64 file_offset
, u64 len
)
900 struct rb_node
*node
;
901 struct btrfs_ordered_extent
*entry
= NULL
;
903 spin_lock_irq(&inode
->ordered_tree_lock
);
904 node
= ordered_tree_search(inode
, file_offset
);
906 node
= ordered_tree_search(inode
, file_offset
+ len
);
912 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
913 if (range_overlaps(entry
, file_offset
, len
))
916 if (entry
->file_offset
>= file_offset
+ len
) {
921 node
= rb_next(node
);
927 refcount_inc(&entry
->refs
);
928 trace_btrfs_ordered_extent_lookup_range(inode
, entry
);
930 spin_unlock_irq(&inode
->ordered_tree_lock
);
935 * Adds all ordered extents to the given list. The list ends up sorted by the
936 * file_offset of the ordered extents.
938 void btrfs_get_ordered_extents_for_logging(struct btrfs_inode
*inode
,
939 struct list_head
*list
)
943 ASSERT(inode_is_locked(&inode
->vfs_inode
));
945 spin_lock_irq(&inode
->ordered_tree_lock
);
946 for (n
= rb_first(&inode
->ordered_tree
); n
; n
= rb_next(n
)) {
947 struct btrfs_ordered_extent
*ordered
;
949 ordered
= rb_entry(n
, struct btrfs_ordered_extent
, rb_node
);
951 if (test_bit(BTRFS_ORDERED_LOGGED
, &ordered
->flags
))
954 ASSERT(list_empty(&ordered
->log_list
));
955 list_add_tail(&ordered
->log_list
, list
);
956 refcount_inc(&ordered
->refs
);
957 trace_btrfs_ordered_extent_lookup_for_logging(inode
, ordered
);
959 spin_unlock_irq(&inode
->ordered_tree_lock
);
963 * lookup and return any extent before 'file_offset'. NULL is returned
966 struct btrfs_ordered_extent
*
967 btrfs_lookup_first_ordered_extent(struct btrfs_inode
*inode
, u64 file_offset
)
969 struct rb_node
*node
;
970 struct btrfs_ordered_extent
*entry
= NULL
;
972 spin_lock_irq(&inode
->ordered_tree_lock
);
973 node
= ordered_tree_search(inode
, file_offset
);
977 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
978 refcount_inc(&entry
->refs
);
979 trace_btrfs_ordered_extent_lookup_first(inode
, entry
);
981 spin_unlock_irq(&inode
->ordered_tree_lock
);
986 * Lookup the first ordered extent that overlaps the range
987 * [@file_offset, @file_offset + @len).
989 * The difference between this and btrfs_lookup_first_ordered_extent() is
990 * that this one won't return any ordered extent that does not overlap the range.
991 * And the difference against btrfs_lookup_ordered_extent() is, this function
992 * ensures the first ordered extent gets returned.
994 struct btrfs_ordered_extent
*btrfs_lookup_first_ordered_range(
995 struct btrfs_inode
*inode
, u64 file_offset
, u64 len
)
997 struct rb_node
*node
;
999 struct rb_node
*prev
;
1000 struct rb_node
*next
;
1001 struct btrfs_ordered_extent
*entry
= NULL
;
1003 spin_lock_irq(&inode
->ordered_tree_lock
);
1004 node
= inode
->ordered_tree
.rb_node
;
1006 * Here we don't want to use tree_search() which will use tree->last
1007 * and screw up the search order.
1008 * And __tree_search() can't return the adjacent ordered extents
1009 * either, thus here we do our own search.
1012 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
1014 if (file_offset
< entry
->file_offset
) {
1015 node
= node
->rb_left
;
1016 } else if (file_offset
>= entry_end(entry
)) {
1017 node
= node
->rb_right
;
1020 * Direct hit, got an ordered extent that starts at
1031 cur
= &entry
->rb_node
;
1032 /* We got an entry around @file_offset, check adjacent entries */
1033 if (entry
->file_offset
< file_offset
) {
1035 next
= rb_next(cur
);
1037 prev
= rb_prev(cur
);
1041 entry
= rb_entry(prev
, struct btrfs_ordered_extent
, rb_node
);
1042 if (range_overlaps(entry
, file_offset
, len
))
1046 entry
= rb_entry(next
, struct btrfs_ordered_extent
, rb_node
);
1047 if (range_overlaps(entry
, file_offset
, len
))
1050 /* No ordered extent in the range */
1054 refcount_inc(&entry
->refs
);
1055 trace_btrfs_ordered_extent_lookup_first_range(inode
, entry
);
1058 spin_unlock_irq(&inode
->ordered_tree_lock
);
1063 * Lock the passed range and ensures all pending ordered extents in it are run
1066 * @inode: Inode whose ordered tree is to be searched
1067 * @start: Beginning of range to flush
1068 * @end: Last byte of range to lock
1069 * @cached_state: If passed, will return the extent state responsible for the
1070 * locked range. It's the caller's responsibility to free the
1073 * Always return with the given range locked, ensuring after it's called no
1074 * order extent can be pending.
1076 void btrfs_lock_and_flush_ordered_range(struct btrfs_inode
*inode
, u64 start
,
1078 struct extent_state
**cached_state
)
1080 struct btrfs_ordered_extent
*ordered
;
1081 struct extent_state
*cache
= NULL
;
1082 struct extent_state
**cachedp
= &cache
;
1085 cachedp
= cached_state
;
1088 lock_extent(&inode
->io_tree
, start
, end
, cachedp
);
1089 ordered
= btrfs_lookup_ordered_range(inode
, start
,
1093 * If no external cached_state has been passed then
1094 * decrement the extra ref taken for cachedp since we
1095 * aren't exposing it outside of this function
1098 refcount_dec(&cache
->refs
);
1101 unlock_extent(&inode
->io_tree
, start
, end
, cachedp
);
1102 btrfs_start_ordered_extent(ordered
);
1103 btrfs_put_ordered_extent(ordered
);
1108 * Lock the passed range and ensure all pending ordered extents in it are run
1109 * to completion in nowait mode.
1111 * Return true if btrfs_lock_ordered_range does not return any extents,
1114 bool btrfs_try_lock_ordered_range(struct btrfs_inode
*inode
, u64 start
, u64 end
,
1115 struct extent_state
**cached_state
)
1117 struct btrfs_ordered_extent
*ordered
;
1119 if (!try_lock_extent(&inode
->io_tree
, start
, end
, cached_state
))
1122 ordered
= btrfs_lookup_ordered_range(inode
, start
, end
- start
+ 1);
1126 btrfs_put_ordered_extent(ordered
);
1127 unlock_extent(&inode
->io_tree
, start
, end
, cached_state
);
1132 /* Split out a new ordered extent for this first @len bytes of @ordered. */
1133 struct btrfs_ordered_extent
*btrfs_split_ordered_extent(
1134 struct btrfs_ordered_extent
*ordered
, u64 len
)
1136 struct btrfs_inode
*inode
= BTRFS_I(ordered
->inode
);
1137 struct btrfs_root
*root
= inode
->root
;
1138 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1139 u64 file_offset
= ordered
->file_offset
;
1140 u64 disk_bytenr
= ordered
->disk_bytenr
;
1141 unsigned long flags
= ordered
->flags
;
1142 struct btrfs_ordered_sum
*sum
, *tmpsum
;
1143 struct btrfs_ordered_extent
*new;
1144 struct rb_node
*node
;
1147 trace_btrfs_ordered_extent_split(inode
, ordered
);
1149 ASSERT(!(flags
& (1U << BTRFS_ORDERED_COMPRESSED
)));
1152 * The entire bio must be covered by the ordered extent, but we can't
1153 * reduce the original extent to a zero length either.
1155 if (WARN_ON_ONCE(len
>= ordered
->num_bytes
))
1156 return ERR_PTR(-EINVAL
);
1157 /* We cannot split partially completed ordered extents. */
1158 if (ordered
->bytes_left
) {
1159 ASSERT(!(flags
& ~BTRFS_ORDERED_TYPE_FLAGS
));
1160 if (WARN_ON_ONCE(ordered
->bytes_left
!= ordered
->disk_num_bytes
))
1161 return ERR_PTR(-EINVAL
);
1163 /* We cannot split a compressed ordered extent. */
1164 if (WARN_ON_ONCE(ordered
->disk_num_bytes
!= ordered
->num_bytes
))
1165 return ERR_PTR(-EINVAL
);
1167 new = alloc_ordered_extent(inode
, file_offset
, len
, len
, disk_bytenr
,
1168 len
, 0, flags
, ordered
->compress_type
);
1172 /* One ref for the tree. */
1173 refcount_inc(&new->refs
);
1175 spin_lock_irq(&root
->ordered_extent_lock
);
1176 spin_lock(&inode
->ordered_tree_lock
);
1177 /* Remove from tree once */
1178 node
= &ordered
->rb_node
;
1179 rb_erase(node
, &inode
->ordered_tree
);
1180 RB_CLEAR_NODE(node
);
1181 if (inode
->ordered_tree_last
== node
)
1182 inode
->ordered_tree_last
= NULL
;
1184 ordered
->file_offset
+= len
;
1185 ordered
->disk_bytenr
+= len
;
1186 ordered
->num_bytes
-= len
;
1187 ordered
->disk_num_bytes
-= len
;
1189 if (test_bit(BTRFS_ORDERED_IO_DONE
, &ordered
->flags
)) {
1190 ASSERT(ordered
->bytes_left
== 0);
1191 new->bytes_left
= 0;
1193 ordered
->bytes_left
-= len
;
1196 if (test_bit(BTRFS_ORDERED_TRUNCATED
, &ordered
->flags
)) {
1197 if (ordered
->truncated_len
> len
) {
1198 ordered
->truncated_len
-= len
;
1200 new->truncated_len
= ordered
->truncated_len
;
1201 ordered
->truncated_len
= 0;
1205 list_for_each_entry_safe(sum
, tmpsum
, &ordered
->list
, list
) {
1208 list_move_tail(&sum
->list
, &new->list
);
1212 /* Re-insert the node */
1213 node
= tree_insert(&inode
->ordered_tree
, ordered
->file_offset
,
1216 btrfs_panic(fs_info
, -EEXIST
,
1217 "zoned: inconsistency in ordered tree at offset %llu",
1218 ordered
->file_offset
);
1220 node
= tree_insert(&inode
->ordered_tree
, new->file_offset
, &new->rb_node
);
1222 btrfs_panic(fs_info
, -EEXIST
,
1223 "zoned: inconsistency in ordered tree at offset %llu",
1225 spin_unlock(&inode
->ordered_tree_lock
);
1227 list_add_tail(&new->root_extent_list
, &root
->ordered_extents
);
1228 root
->nr_ordered_extents
++;
1229 spin_unlock_irq(&root
->ordered_extent_lock
);
1233 int __init
ordered_data_init(void)
1235 btrfs_ordered_extent_cache
= kmem_cache_create("btrfs_ordered_extent",
1236 sizeof(struct btrfs_ordered_extent
), 0,
1239 if (!btrfs_ordered_extent_cache
)
1245 void __cold
ordered_data_exit(void)
1247 kmem_cache_destroy(btrfs_ordered_extent_cache
);