1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/bitops.h>
4 #include <linux/slab.h>
7 #include <linux/pagemap.h>
8 #include <linux/page-flags.h>
9 #include <linux/sched/mm.h>
10 #include <linux/spinlock.h>
11 #include <linux/blkdev.h>
12 #include <linux/swap.h>
13 #include <linux/writeback.h>
14 #include <linux/pagevec.h>
15 #include <linux/prefetch.h>
16 #include <linux/fsverity.h>
18 #include "extent_io.h"
19 #include "extent-io-tree.h"
20 #include "extent_map.h"
22 #include "btrfs_inode.h"
25 #include "rcu-string.h"
30 #include "block-group.h"
31 #include "compression.h"
33 #include "accessors.h"
34 #include "file-item.h"
36 #include "dev-replace.h"
38 #include "transaction.h"
40 static struct kmem_cache
*extent_buffer_cache
;
42 #ifdef CONFIG_BTRFS_DEBUG
43 static inline void btrfs_leak_debug_add_eb(struct extent_buffer
*eb
)
45 struct btrfs_fs_info
*fs_info
= eb
->fs_info
;
48 spin_lock_irqsave(&fs_info
->eb_leak_lock
, flags
);
49 list_add(&eb
->leak_list
, &fs_info
->allocated_ebs
);
50 spin_unlock_irqrestore(&fs_info
->eb_leak_lock
, flags
);
53 static inline void btrfs_leak_debug_del_eb(struct extent_buffer
*eb
)
55 struct btrfs_fs_info
*fs_info
= eb
->fs_info
;
58 spin_lock_irqsave(&fs_info
->eb_leak_lock
, flags
);
59 list_del(&eb
->leak_list
);
60 spin_unlock_irqrestore(&fs_info
->eb_leak_lock
, flags
);
63 void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info
*fs_info
)
65 struct extent_buffer
*eb
;
69 * If we didn't get into open_ctree our allocated_ebs will not be
70 * initialized, so just skip this.
72 if (!fs_info
->allocated_ebs
.next
)
75 WARN_ON(!list_empty(&fs_info
->allocated_ebs
));
76 spin_lock_irqsave(&fs_info
->eb_leak_lock
, flags
);
77 while (!list_empty(&fs_info
->allocated_ebs
)) {
78 eb
= list_first_entry(&fs_info
->allocated_ebs
,
79 struct extent_buffer
, leak_list
);
81 "BTRFS: buffer leak start %llu len %lu refs %d bflags %lu owner %llu\n",
82 eb
->start
, eb
->len
, atomic_read(&eb
->refs
), eb
->bflags
,
83 btrfs_header_owner(eb
));
84 list_del(&eb
->leak_list
);
85 kmem_cache_free(extent_buffer_cache
, eb
);
87 spin_unlock_irqrestore(&fs_info
->eb_leak_lock
, flags
);
90 #define btrfs_leak_debug_add_eb(eb) do {} while (0)
91 #define btrfs_leak_debug_del_eb(eb) do {} while (0)
95 * Structure to record info about the bio being assembled, and other info like
96 * how many bytes are there before stripe/ordered extent boundary.
98 struct btrfs_bio_ctrl
{
99 struct btrfs_bio
*bbio
;
100 enum btrfs_compression_type compress_type
;
101 u32 len_to_oe_boundary
;
103 btrfs_bio_end_io_t end_io_func
;
104 struct writeback_control
*wbc
;
107 static void submit_one_bio(struct btrfs_bio_ctrl
*bio_ctrl
)
109 struct btrfs_bio
*bbio
= bio_ctrl
->bbio
;
114 /* Caller should ensure the bio has at least some range added */
115 ASSERT(bbio
->bio
.bi_iter
.bi_size
);
117 if (btrfs_op(&bbio
->bio
) == BTRFS_MAP_READ
&&
118 bio_ctrl
->compress_type
!= BTRFS_COMPRESS_NONE
)
119 btrfs_submit_compressed_read(bbio
);
121 btrfs_submit_bio(bbio
, 0);
123 /* The bbio is owned by the end_io handler now */
124 bio_ctrl
->bbio
= NULL
;
128 * Submit or fail the current bio in the bio_ctrl structure.
130 static void submit_write_bio(struct btrfs_bio_ctrl
*bio_ctrl
, int ret
)
132 struct btrfs_bio
*bbio
= bio_ctrl
->bbio
;
139 btrfs_bio_end_io(bbio
, errno_to_blk_status(ret
));
140 /* The bio is owned by the end_io handler now */
141 bio_ctrl
->bbio
= NULL
;
143 submit_one_bio(bio_ctrl
);
147 int __init
extent_buffer_init_cachep(void)
149 extent_buffer_cache
= kmem_cache_create("btrfs_extent_buffer",
150 sizeof(struct extent_buffer
), 0,
151 SLAB_MEM_SPREAD
, NULL
);
152 if (!extent_buffer_cache
)
158 void __cold
extent_buffer_free_cachep(void)
161 * Make sure all delayed rcu free are flushed before we
165 kmem_cache_destroy(extent_buffer_cache
);
168 void extent_range_clear_dirty_for_io(struct inode
*inode
, u64 start
, u64 end
)
170 unsigned long index
= start
>> PAGE_SHIFT
;
171 unsigned long end_index
= end
>> PAGE_SHIFT
;
174 while (index
<= end_index
) {
175 page
= find_get_page(inode
->i_mapping
, index
);
176 BUG_ON(!page
); /* Pages should be in the extent_io_tree */
177 clear_page_dirty_for_io(page
);
183 static void process_one_page(struct btrfs_fs_info
*fs_info
,
184 struct page
*page
, struct page
*locked_page
,
185 unsigned long page_ops
, u64 start
, u64 end
)
187 struct folio
*folio
= page_folio(page
);
190 ASSERT(end
+ 1 - start
!= 0 && end
+ 1 - start
< U32_MAX
);
191 len
= end
+ 1 - start
;
193 if (page_ops
& PAGE_SET_ORDERED
)
194 btrfs_folio_clamp_set_ordered(fs_info
, folio
, start
, len
);
195 if (page_ops
& PAGE_START_WRITEBACK
) {
196 btrfs_folio_clamp_clear_dirty(fs_info
, folio
, start
, len
);
197 btrfs_folio_clamp_set_writeback(fs_info
, folio
, start
, len
);
199 if (page_ops
& PAGE_END_WRITEBACK
)
200 btrfs_folio_clamp_clear_writeback(fs_info
, folio
, start
, len
);
202 if (page
!= locked_page
&& (page_ops
& PAGE_UNLOCK
))
203 btrfs_folio_end_writer_lock(fs_info
, folio
, start
, len
);
206 static void __process_pages_contig(struct address_space
*mapping
,
207 struct page
*locked_page
, u64 start
, u64 end
,
208 unsigned long page_ops
)
210 struct btrfs_fs_info
*fs_info
= btrfs_sb(mapping
->host
->i_sb
);
211 pgoff_t start_index
= start
>> PAGE_SHIFT
;
212 pgoff_t end_index
= end
>> PAGE_SHIFT
;
213 pgoff_t index
= start_index
;
214 struct folio_batch fbatch
;
217 folio_batch_init(&fbatch
);
218 while (index
<= end_index
) {
221 found_folios
= filemap_get_folios_contig(mapping
, &index
,
223 for (i
= 0; i
< found_folios
; i
++) {
224 struct folio
*folio
= fbatch
.folios
[i
];
226 process_one_page(fs_info
, &folio
->page
, locked_page
,
227 page_ops
, start
, end
);
229 folio_batch_release(&fbatch
);
234 static noinline
void __unlock_for_delalloc(struct inode
*inode
,
235 struct page
*locked_page
,
238 unsigned long index
= start
>> PAGE_SHIFT
;
239 unsigned long end_index
= end
>> PAGE_SHIFT
;
242 if (index
== locked_page
->index
&& end_index
== index
)
245 __process_pages_contig(inode
->i_mapping
, locked_page
, start
, end
,
249 static noinline
int lock_delalloc_pages(struct inode
*inode
,
250 struct page
*locked_page
,
254 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
255 struct address_space
*mapping
= inode
->i_mapping
;
256 pgoff_t start_index
= start
>> PAGE_SHIFT
;
257 pgoff_t end_index
= end
>> PAGE_SHIFT
;
258 pgoff_t index
= start_index
;
259 u64 processed_end
= start
;
260 struct folio_batch fbatch
;
262 if (index
== locked_page
->index
&& index
== end_index
)
265 folio_batch_init(&fbatch
);
266 while (index
<= end_index
) {
267 unsigned int found_folios
, i
;
269 found_folios
= filemap_get_folios_contig(mapping
, &index
,
271 if (found_folios
== 0)
274 for (i
= 0; i
< found_folios
; i
++) {
275 struct folio
*folio
= fbatch
.folios
[i
];
276 struct page
*page
= folio_page(folio
, 0);
277 u32 len
= end
+ 1 - start
;
279 if (page
== locked_page
)
282 if (btrfs_folio_start_writer_lock(fs_info
, folio
, start
,
286 if (!PageDirty(page
) || page
->mapping
!= mapping
) {
287 btrfs_folio_end_writer_lock(fs_info
, folio
, start
,
292 processed_end
= page_offset(page
) + PAGE_SIZE
- 1;
294 folio_batch_release(&fbatch
);
300 folio_batch_release(&fbatch
);
301 if (processed_end
> start
)
302 __unlock_for_delalloc(inode
, locked_page
, start
, processed_end
);
307 * Find and lock a contiguous range of bytes in the file marked as delalloc, no
308 * more than @max_bytes.
310 * @start: The original start bytenr to search.
311 * Will store the extent range start bytenr.
312 * @end: The original end bytenr of the search range
313 * Will store the extent range end bytenr.
315 * Return true if we find a delalloc range which starts inside the original
316 * range, and @start/@end will store the delalloc range start/end.
318 * Return false if we can't find any delalloc range which starts inside the
319 * original range, and @start/@end will be the non-delalloc range start/end.
322 noinline_for_stack
bool find_lock_delalloc_range(struct inode
*inode
,
323 struct page
*locked_page
, u64
*start
,
326 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
327 struct extent_io_tree
*tree
= &BTRFS_I(inode
)->io_tree
;
328 const u64 orig_start
= *start
;
329 const u64 orig_end
= *end
;
330 /* The sanity tests may not set a valid fs_info. */
331 u64 max_bytes
= fs_info
? fs_info
->max_extent_size
: BTRFS_MAX_EXTENT_SIZE
;
335 struct extent_state
*cached_state
= NULL
;
339 /* Caller should pass a valid @end to indicate the search range end */
340 ASSERT(orig_end
> orig_start
);
342 /* The range should at least cover part of the page */
343 ASSERT(!(orig_start
>= page_offset(locked_page
) + PAGE_SIZE
||
344 orig_end
<= page_offset(locked_page
)));
346 /* step one, find a bunch of delalloc bytes starting at start */
347 delalloc_start
= *start
;
349 found
= btrfs_find_delalloc_range(tree
, &delalloc_start
, &delalloc_end
,
350 max_bytes
, &cached_state
);
351 if (!found
|| delalloc_end
<= *start
|| delalloc_start
> orig_end
) {
352 *start
= delalloc_start
;
354 /* @delalloc_end can be -1, never go beyond @orig_end */
355 *end
= min(delalloc_end
, orig_end
);
356 free_extent_state(cached_state
);
361 * start comes from the offset of locked_page. We have to lock
362 * pages in order, so we can't process delalloc bytes before
365 if (delalloc_start
< *start
)
366 delalloc_start
= *start
;
369 * make sure to limit the number of pages we try to lock down
371 if (delalloc_end
+ 1 - delalloc_start
> max_bytes
)
372 delalloc_end
= delalloc_start
+ max_bytes
- 1;
374 /* step two, lock all the pages after the page that has start */
375 ret
= lock_delalloc_pages(inode
, locked_page
,
376 delalloc_start
, delalloc_end
);
377 ASSERT(!ret
|| ret
== -EAGAIN
);
378 if (ret
== -EAGAIN
) {
379 /* some of the pages are gone, lets avoid looping by
380 * shortening the size of the delalloc range we're searching
382 free_extent_state(cached_state
);
385 max_bytes
= PAGE_SIZE
;
394 /* step three, lock the state bits for the whole range */
395 lock_extent(tree
, delalloc_start
, delalloc_end
, &cached_state
);
397 /* then test to make sure it is all still delalloc */
398 ret
= test_range_bit(tree
, delalloc_start
, delalloc_end
,
399 EXTENT_DELALLOC
, cached_state
);
401 unlock_extent(tree
, delalloc_start
, delalloc_end
,
403 __unlock_for_delalloc(inode
, locked_page
,
404 delalloc_start
, delalloc_end
);
408 free_extent_state(cached_state
);
409 *start
= delalloc_start
;
415 void extent_clear_unlock_delalloc(struct btrfs_inode
*inode
, u64 start
, u64 end
,
416 struct page
*locked_page
,
417 u32 clear_bits
, unsigned long page_ops
)
419 clear_extent_bit(&inode
->io_tree
, start
, end
, clear_bits
, NULL
);
421 __process_pages_contig(inode
->vfs_inode
.i_mapping
, locked_page
,
422 start
, end
, page_ops
);
425 static bool btrfs_verify_page(struct page
*page
, u64 start
)
427 if (!fsverity_active(page
->mapping
->host
) ||
428 PageUptodate(page
) ||
429 start
>= i_size_read(page
->mapping
->host
))
431 return fsverity_verify_page(page
);
434 static void end_page_read(struct page
*page
, bool uptodate
, u64 start
, u32 len
)
436 struct btrfs_fs_info
*fs_info
= btrfs_sb(page
->mapping
->host
->i_sb
);
437 struct folio
*folio
= page_folio(page
);
439 ASSERT(page_offset(page
) <= start
&&
440 start
+ len
<= page_offset(page
) + PAGE_SIZE
);
442 if (uptodate
&& btrfs_verify_page(page
, start
))
443 btrfs_folio_set_uptodate(fs_info
, folio
, start
, len
);
445 btrfs_folio_clear_uptodate(fs_info
, folio
, start
, len
);
447 if (!btrfs_is_subpage(fs_info
, page
->mapping
))
450 btrfs_subpage_end_reader(fs_info
, folio
, start
, len
);
454 * After a write IO is done, we need to:
456 * - clear the uptodate bits on error
457 * - clear the writeback bits in the extent tree for the range
458 * - filio_end_writeback() if there is no more pending io for the folio
460 * Scheduling is not allowed, so the extent state tree is expected
461 * to have one and only one object corresponding to this IO.
463 static void end_bbio_data_write(struct btrfs_bio
*bbio
)
465 struct bio
*bio
= &bbio
->bio
;
466 int error
= blk_status_to_errno(bio
->bi_status
);
467 struct folio_iter fi
;
469 ASSERT(!bio_flagged(bio
, BIO_CLONED
));
470 bio_for_each_folio_all(fi
, bio
) {
471 struct folio
*folio
= fi
.folio
;
472 struct inode
*inode
= folio
->mapping
->host
;
473 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
474 const u32 sectorsize
= fs_info
->sectorsize
;
475 u64 start
= folio_pos(folio
) + fi
.offset
;
478 /* Only order 0 (single page) folios are allowed for data. */
479 ASSERT(folio_order(folio
) == 0);
481 /* Our read/write should always be sector aligned. */
482 if (!IS_ALIGNED(fi
.offset
, sectorsize
))
484 "partial page write in btrfs with offset %zu and length %zu",
485 fi
.offset
, fi
.length
);
486 else if (!IS_ALIGNED(fi
.length
, sectorsize
))
488 "incomplete page write with offset %zu and length %zu",
489 fi
.offset
, fi
.length
);
491 btrfs_finish_ordered_extent(bbio
->ordered
,
492 folio_page(folio
, 0), start
, len
, !error
);
494 mapping_set_error(folio
->mapping
, error
);
495 btrfs_folio_clear_writeback(fs_info
, folio
, start
, len
);
502 * Record previously processed extent range
504 * For endio_readpage_release_extent() to handle a full extent range, reducing
505 * the extent io operations.
507 struct processed_extent
{
508 struct btrfs_inode
*inode
;
509 /* Start of the range in @inode */
511 /* End of the range in @inode */
517 * Try to release processed extent range
519 * May not release the extent range right now if the current range is
520 * contiguous to processed extent.
522 * Will release processed extent when any of @inode, @uptodate, the range is
523 * no longer contiguous to the processed range.
525 * Passing @inode == NULL will force processed extent to be released.
527 static void endio_readpage_release_extent(struct processed_extent
*processed
,
528 struct btrfs_inode
*inode
, u64 start
, u64 end
,
531 struct extent_state
*cached
= NULL
;
532 struct extent_io_tree
*tree
;
534 /* The first extent, initialize @processed */
535 if (!processed
->inode
)
539 * Contiguous to processed extent, just uptodate the end.
541 * Several things to notice:
543 * - bio can be merged as long as on-disk bytenr is contiguous
544 * This means we can have page belonging to other inodes, thus need to
545 * check if the inode still matches.
546 * - bvec can contain range beyond current page for multi-page bvec
547 * Thus we need to do processed->end + 1 >= start check
549 if (processed
->inode
== inode
&& processed
->uptodate
== uptodate
&&
550 processed
->end
+ 1 >= start
&& end
>= processed
->end
) {
551 processed
->end
= end
;
555 tree
= &processed
->inode
->io_tree
;
557 * Now we don't have range contiguous to the processed range, release
558 * the processed range now.
560 unlock_extent(tree
, processed
->start
, processed
->end
, &cached
);
563 /* Update processed to current range */
564 processed
->inode
= inode
;
565 processed
->start
= start
;
566 processed
->end
= end
;
567 processed
->uptodate
= uptodate
;
570 static void begin_page_read(struct btrfs_fs_info
*fs_info
, struct page
*page
)
572 struct folio
*folio
= page_folio(page
);
574 ASSERT(folio_test_locked(folio
));
575 if (!btrfs_is_subpage(fs_info
, folio
->mapping
))
578 ASSERT(folio_test_private(folio
));
579 btrfs_subpage_start_reader(fs_info
, folio
, page_offset(page
), PAGE_SIZE
);
583 * After a data read IO is done, we need to:
585 * - clear the uptodate bits on error
586 * - set the uptodate bits if things worked
587 * - set the folio up to date if all extents in the tree are uptodate
588 * - clear the lock bit in the extent tree
589 * - unlock the folio if there are no other extents locked for it
591 * Scheduling is not allowed, so the extent state tree is expected
592 * to have one and only one object corresponding to this IO.
594 static void end_bbio_data_read(struct btrfs_bio
*bbio
)
596 struct bio
*bio
= &bbio
->bio
;
597 struct processed_extent processed
= { 0 };
598 struct folio_iter fi
;
600 * The offset to the beginning of a bio, since one bio can never be
601 * larger than UINT_MAX, u32 here is enough.
605 ASSERT(!bio_flagged(bio
, BIO_CLONED
));
606 bio_for_each_folio_all(fi
, &bbio
->bio
) {
607 bool uptodate
= !bio
->bi_status
;
608 struct folio
*folio
= fi
.folio
;
609 struct inode
*inode
= folio
->mapping
->host
;
610 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
611 const u32 sectorsize
= fs_info
->sectorsize
;
616 /* For now only order 0 folios are supported for data. */
617 ASSERT(folio_order(folio
) == 0);
619 "%s: bi_sector=%llu, err=%d, mirror=%u",
620 __func__
, bio
->bi_iter
.bi_sector
, bio
->bi_status
,
624 * We always issue full-sector reads, but if some block in a
625 * folio fails to read, blk_update_request() will advance
626 * bv_offset and adjust bv_len to compensate. Print a warning
627 * for unaligned offsets, and an error if they don't add up to
630 if (!IS_ALIGNED(fi
.offset
, sectorsize
))
632 "partial page read in btrfs with offset %zu and length %zu",
633 fi
.offset
, fi
.length
);
634 else if (!IS_ALIGNED(fi
.offset
+ fi
.length
, sectorsize
))
636 "incomplete page read with offset %zu and length %zu",
637 fi
.offset
, fi
.length
);
639 start
= folio_pos(folio
) + fi
.offset
;
640 end
= start
+ fi
.length
- 1;
643 if (likely(uptodate
)) {
644 loff_t i_size
= i_size_read(inode
);
645 pgoff_t end_index
= i_size
>> folio_shift(folio
);
648 * Zero out the remaining part if this range straddles
651 * Here we should only zero the range inside the folio,
652 * not touch anything else.
654 * NOTE: i_size is exclusive while end is inclusive.
656 if (folio_index(folio
) == end_index
&& i_size
<= end
) {
657 u32 zero_start
= max(offset_in_folio(folio
, i_size
),
658 offset_in_folio(folio
, start
));
659 u32 zero_len
= offset_in_folio(folio
, end
) + 1 -
662 folio_zero_range(folio
, zero_start
, zero_len
);
666 /* Update page status and unlock. */
667 end_page_read(folio_page(folio
, 0), uptodate
, start
, len
);
668 endio_readpage_release_extent(&processed
, BTRFS_I(inode
),
669 start
, end
, uptodate
);
671 ASSERT(bio_offset
+ len
> bio_offset
);
675 /* Release the last extent */
676 endio_readpage_release_extent(&processed
, NULL
, 0, 0, false);
681 * Populate every free slot in a provided array with pages.
683 * @nr_pages: number of pages to allocate
684 * @page_array: the array to fill with pages; any existing non-null entries in
685 * the array will be skipped
686 * @extra_gfp: the extra GFP flags for the allocation.
688 * Return: 0 if all pages were able to be allocated;
689 * -ENOMEM otherwise, the partially allocated pages would be freed and
690 * the array slots zeroed
692 int btrfs_alloc_page_array(unsigned int nr_pages
, struct page
**page_array
,
695 unsigned int allocated
;
697 for (allocated
= 0; allocated
< nr_pages
;) {
698 unsigned int last
= allocated
;
700 allocated
= alloc_pages_bulk_array(GFP_NOFS
| extra_gfp
,
701 nr_pages
, page_array
);
703 if (allocated
== nr_pages
)
707 * During this iteration, no page could be allocated, even
708 * though alloc_pages_bulk_array() falls back to alloc_page()
709 * if it could not bulk-allocate. So we must be out of memory.
711 if (allocated
== last
) {
712 for (int i
= 0; i
< allocated
; i
++) {
713 __free_page(page_array
[i
]);
714 page_array
[i
] = NULL
;
719 memalloc_retry_wait(GFP_NOFS
);
725 * Populate needed folios for the extent buffer.
727 * For now, the folios populated are always in order 0 (aka, single page).
729 static int alloc_eb_folio_array(struct extent_buffer
*eb
, gfp_t extra_gfp
)
731 struct page
*page_array
[INLINE_EXTENT_BUFFER_PAGES
] = { 0 };
732 int num_pages
= num_extent_pages(eb
);
735 ret
= btrfs_alloc_page_array(num_pages
, page_array
, extra_gfp
);
739 for (int i
= 0; i
< num_pages
; i
++)
740 eb
->folios
[i
] = page_folio(page_array
[i
]);
744 static bool btrfs_bio_is_contig(struct btrfs_bio_ctrl
*bio_ctrl
,
745 struct page
*page
, u64 disk_bytenr
,
746 unsigned int pg_offset
)
748 struct bio
*bio
= &bio_ctrl
->bbio
->bio
;
749 struct bio_vec
*bvec
= bio_last_bvec_all(bio
);
750 const sector_t sector
= disk_bytenr
>> SECTOR_SHIFT
;
752 if (bio_ctrl
->compress_type
!= BTRFS_COMPRESS_NONE
) {
754 * For compression, all IO should have its logical bytenr set
755 * to the starting bytenr of the compressed extent.
757 return bio
->bi_iter
.bi_sector
== sector
;
761 * The contig check requires the following conditions to be met:
763 * 1) The pages are belonging to the same inode
764 * This is implied by the call chain.
766 * 2) The range has adjacent logical bytenr
768 * 3) The range has adjacent file offset
769 * This is required for the usage of btrfs_bio->file_offset.
771 return bio_end_sector(bio
) == sector
&&
772 page_offset(bvec
->bv_page
) + bvec
->bv_offset
+ bvec
->bv_len
==
773 page_offset(page
) + pg_offset
;
776 static void alloc_new_bio(struct btrfs_inode
*inode
,
777 struct btrfs_bio_ctrl
*bio_ctrl
,
778 u64 disk_bytenr
, u64 file_offset
)
780 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
781 struct btrfs_bio
*bbio
;
783 bbio
= btrfs_bio_alloc(BIO_MAX_VECS
, bio_ctrl
->opf
, fs_info
,
784 bio_ctrl
->end_io_func
, NULL
);
785 bbio
->bio
.bi_iter
.bi_sector
= disk_bytenr
>> SECTOR_SHIFT
;
787 bbio
->file_offset
= file_offset
;
788 bio_ctrl
->bbio
= bbio
;
789 bio_ctrl
->len_to_oe_boundary
= U32_MAX
;
791 /* Limit data write bios to the ordered boundary. */
793 struct btrfs_ordered_extent
*ordered
;
795 ordered
= btrfs_lookup_ordered_extent(inode
, file_offset
);
797 bio_ctrl
->len_to_oe_boundary
= min_t(u32
, U32_MAX
,
798 ordered
->file_offset
+
799 ordered
->disk_num_bytes
- file_offset
);
800 bbio
->ordered
= ordered
;
804 * Pick the last added device to support cgroup writeback. For
805 * multi-device file systems this means blk-cgroup policies have
806 * to always be set on the last added/replaced device.
807 * This is a bit odd but has been like that for a long time.
809 bio_set_dev(&bbio
->bio
, fs_info
->fs_devices
->latest_dev
->bdev
);
810 wbc_init_bio(bio_ctrl
->wbc
, &bbio
->bio
);
815 * @disk_bytenr: logical bytenr where the write will be
816 * @page: page to add to the bio
817 * @size: portion of page that we want to write to
818 * @pg_offset: offset of the new bio or to check whether we are adding
819 * a contiguous page to the previous one
821 * The will either add the page into the existing @bio_ctrl->bbio, or allocate a
822 * new one in @bio_ctrl->bbio.
823 * The mirror number for this IO should already be initizlied in
824 * @bio_ctrl->mirror_num.
826 static void submit_extent_page(struct btrfs_bio_ctrl
*bio_ctrl
,
827 u64 disk_bytenr
, struct page
*page
,
828 size_t size
, unsigned long pg_offset
)
830 struct btrfs_inode
*inode
= BTRFS_I(page
->mapping
->host
);
832 ASSERT(pg_offset
+ size
<= PAGE_SIZE
);
833 ASSERT(bio_ctrl
->end_io_func
);
835 if (bio_ctrl
->bbio
&&
836 !btrfs_bio_is_contig(bio_ctrl
, page
, disk_bytenr
, pg_offset
))
837 submit_one_bio(bio_ctrl
);
842 /* Allocate new bio if needed */
843 if (!bio_ctrl
->bbio
) {
844 alloc_new_bio(inode
, bio_ctrl
, disk_bytenr
,
845 page_offset(page
) + pg_offset
);
848 /* Cap to the current ordered extent boundary if there is one. */
849 if (len
> bio_ctrl
->len_to_oe_boundary
) {
850 ASSERT(bio_ctrl
->compress_type
== BTRFS_COMPRESS_NONE
);
851 ASSERT(is_data_inode(&inode
->vfs_inode
));
852 len
= bio_ctrl
->len_to_oe_boundary
;
855 if (bio_add_page(&bio_ctrl
->bbio
->bio
, page
, len
, pg_offset
) != len
) {
856 /* bio full: move on to a new one */
857 submit_one_bio(bio_ctrl
);
862 wbc_account_cgroup_owner(bio_ctrl
->wbc
, page
, len
);
869 * len_to_oe_boundary defaults to U32_MAX, which isn't page or
870 * sector aligned. alloc_new_bio() then sets it to the end of
871 * our ordered extent for writes into zoned devices.
873 * When len_to_oe_boundary is tracking an ordered extent, we
874 * trust the ordered extent code to align things properly, and
875 * the check above to cap our write to the ordered extent
876 * boundary is correct.
878 * When len_to_oe_boundary is U32_MAX, the cap above would
879 * result in a 4095 byte IO for the last page right before
880 * we hit the bio limit of UINT_MAX. bio_add_page() has all
881 * the checks required to make sure we don't overflow the bio,
882 * and we should just ignore len_to_oe_boundary completely
883 * unless we're using it to track an ordered extent.
885 * It's pretty hard to make a bio sized U32_MAX, but it can
886 * happen when the page cache is able to feed us contiguous
887 * pages for large extents.
889 if (bio_ctrl
->len_to_oe_boundary
!= U32_MAX
)
890 bio_ctrl
->len_to_oe_boundary
-= len
;
892 /* Ordered extent boundary: move on to a new bio. */
893 if (bio_ctrl
->len_to_oe_boundary
== 0)
894 submit_one_bio(bio_ctrl
);
898 static int attach_extent_buffer_folio(struct extent_buffer
*eb
,
900 struct btrfs_subpage
*prealloc
)
902 struct btrfs_fs_info
*fs_info
= eb
->fs_info
;
906 * If the page is mapped to btree inode, we should hold the private
907 * lock to prevent race.
908 * For cloned or dummy extent buffers, their pages are not mapped and
909 * will not race with any other ebs.
912 lockdep_assert_held(&folio
->mapping
->private_lock
);
914 if (fs_info
->nodesize
>= PAGE_SIZE
) {
915 if (!folio_test_private(folio
))
916 folio_attach_private(folio
, eb
);
918 WARN_ON(folio_get_private(folio
) != eb
);
922 /* Already mapped, just free prealloc */
923 if (folio_test_private(folio
)) {
924 btrfs_free_subpage(prealloc
);
929 /* Has preallocated memory for subpage */
930 folio_attach_private(folio
, prealloc
);
932 /* Do new allocation to attach subpage */
933 ret
= btrfs_attach_subpage(fs_info
, folio
, BTRFS_SUBPAGE_METADATA
);
937 int set_page_extent_mapped(struct page
*page
)
939 struct folio
*folio
= page_folio(page
);
940 struct btrfs_fs_info
*fs_info
;
942 ASSERT(page
->mapping
);
944 if (folio_test_private(folio
))
947 fs_info
= btrfs_sb(page
->mapping
->host
->i_sb
);
949 if (btrfs_is_subpage(fs_info
, page
->mapping
))
950 return btrfs_attach_subpage(fs_info
, folio
, BTRFS_SUBPAGE_DATA
);
952 folio_attach_private(folio
, (void *)EXTENT_FOLIO_PRIVATE
);
956 void clear_page_extent_mapped(struct page
*page
)
958 struct folio
*folio
= page_folio(page
);
959 struct btrfs_fs_info
*fs_info
;
961 ASSERT(page
->mapping
);
963 if (!folio_test_private(folio
))
966 fs_info
= btrfs_sb(page
->mapping
->host
->i_sb
);
967 if (btrfs_is_subpage(fs_info
, page
->mapping
))
968 return btrfs_detach_subpage(fs_info
, folio
);
970 folio_detach_private(folio
);
973 static struct extent_map
*
974 __get_extent_map(struct inode
*inode
, struct page
*page
, size_t pg_offset
,
975 u64 start
, u64 len
, struct extent_map
**em_cached
)
977 struct extent_map
*em
;
979 if (em_cached
&& *em_cached
) {
981 if (extent_map_in_tree(em
) && start
>= em
->start
&&
982 start
< extent_map_end(em
)) {
983 refcount_inc(&em
->refs
);
991 em
= btrfs_get_extent(BTRFS_I(inode
), page
, pg_offset
, start
, len
);
992 if (em_cached
&& !IS_ERR(em
)) {
994 refcount_inc(&em
->refs
);
1000 * basic readpage implementation. Locked extent state structs are inserted
1001 * into the tree that are removed when the IO is done (by the end_io
1003 * XXX JDM: This needs looking at to ensure proper page locking
1004 * return 0 on success, otherwise return error
1006 static int btrfs_do_readpage(struct page
*page
, struct extent_map
**em_cached
,
1007 struct btrfs_bio_ctrl
*bio_ctrl
, u64
*prev_em_start
)
1009 struct inode
*inode
= page
->mapping
->host
;
1010 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
1011 u64 start
= page_offset(page
);
1012 const u64 end
= start
+ PAGE_SIZE
- 1;
1015 u64 last_byte
= i_size_read(inode
);
1017 struct extent_map
*em
;
1019 size_t pg_offset
= 0;
1021 size_t blocksize
= inode
->i_sb
->s_blocksize
;
1022 struct extent_io_tree
*tree
= &BTRFS_I(inode
)->io_tree
;
1024 ret
= set_page_extent_mapped(page
);
1026 unlock_extent(tree
, start
, end
, NULL
);
1031 if (page
->index
== last_byte
>> PAGE_SHIFT
) {
1032 size_t zero_offset
= offset_in_page(last_byte
);
1035 iosize
= PAGE_SIZE
- zero_offset
;
1036 memzero_page(page
, zero_offset
, iosize
);
1039 bio_ctrl
->end_io_func
= end_bbio_data_read
;
1040 begin_page_read(fs_info
, page
);
1041 while (cur
<= end
) {
1042 enum btrfs_compression_type compress_type
= BTRFS_COMPRESS_NONE
;
1043 bool force_bio_submit
= false;
1046 ASSERT(IS_ALIGNED(cur
, fs_info
->sectorsize
));
1047 if (cur
>= last_byte
) {
1048 iosize
= PAGE_SIZE
- pg_offset
;
1049 memzero_page(page
, pg_offset
, iosize
);
1050 unlock_extent(tree
, cur
, cur
+ iosize
- 1, NULL
);
1051 end_page_read(page
, true, cur
, iosize
);
1054 em
= __get_extent_map(inode
, page
, pg_offset
, cur
,
1055 end
- cur
+ 1, em_cached
);
1057 unlock_extent(tree
, cur
, end
, NULL
);
1058 end_page_read(page
, false, cur
, end
+ 1 - cur
);
1061 extent_offset
= cur
- em
->start
;
1062 BUG_ON(extent_map_end(em
) <= cur
);
1065 compress_type
= extent_map_compression(em
);
1067 iosize
= min(extent_map_end(em
) - cur
, end
- cur
+ 1);
1068 iosize
= ALIGN(iosize
, blocksize
);
1069 if (compress_type
!= BTRFS_COMPRESS_NONE
)
1070 disk_bytenr
= em
->block_start
;
1072 disk_bytenr
= em
->block_start
+ extent_offset
;
1073 block_start
= em
->block_start
;
1074 if (em
->flags
& EXTENT_FLAG_PREALLOC
)
1075 block_start
= EXTENT_MAP_HOLE
;
1078 * If we have a file range that points to a compressed extent
1079 * and it's followed by a consecutive file range that points
1080 * to the same compressed extent (possibly with a different
1081 * offset and/or length, so it either points to the whole extent
1082 * or only part of it), we must make sure we do not submit a
1083 * single bio to populate the pages for the 2 ranges because
1084 * this makes the compressed extent read zero out the pages
1085 * belonging to the 2nd range. Imagine the following scenario:
1088 * [0 - 8K] [8K - 24K]
1091 * points to extent X, points to extent X,
1092 * offset 4K, length of 8K offset 0, length 16K
1094 * [extent X, compressed length = 4K uncompressed length = 16K]
1096 * If the bio to read the compressed extent covers both ranges,
1097 * it will decompress extent X into the pages belonging to the
1098 * first range and then it will stop, zeroing out the remaining
1099 * pages that belong to the other range that points to extent X.
1100 * So here we make sure we submit 2 bios, one for the first
1101 * range and another one for the third range. Both will target
1102 * the same physical extent from disk, but we can't currently
1103 * make the compressed bio endio callback populate the pages
1104 * for both ranges because each compressed bio is tightly
1105 * coupled with a single extent map, and each range can have
1106 * an extent map with a different offset value relative to the
1107 * uncompressed data of our extent and different lengths. This
1108 * is a corner case so we prioritize correctness over
1109 * non-optimal behavior (submitting 2 bios for the same extent).
1111 if (compress_type
!= BTRFS_COMPRESS_NONE
&&
1112 prev_em_start
&& *prev_em_start
!= (u64
)-1 &&
1113 *prev_em_start
!= em
->start
)
1114 force_bio_submit
= true;
1117 *prev_em_start
= em
->start
;
1119 free_extent_map(em
);
1122 /* we've found a hole, just zero and go on */
1123 if (block_start
== EXTENT_MAP_HOLE
) {
1124 memzero_page(page
, pg_offset
, iosize
);
1126 unlock_extent(tree
, cur
, cur
+ iosize
- 1, NULL
);
1127 end_page_read(page
, true, cur
, iosize
);
1129 pg_offset
+= iosize
;
1132 /* the get_extent function already copied into the page */
1133 if (block_start
== EXTENT_MAP_INLINE
) {
1134 unlock_extent(tree
, cur
, cur
+ iosize
- 1, NULL
);
1135 end_page_read(page
, true, cur
, iosize
);
1137 pg_offset
+= iosize
;
1141 if (bio_ctrl
->compress_type
!= compress_type
) {
1142 submit_one_bio(bio_ctrl
);
1143 bio_ctrl
->compress_type
= compress_type
;
1146 if (force_bio_submit
)
1147 submit_one_bio(bio_ctrl
);
1148 submit_extent_page(bio_ctrl
, disk_bytenr
, page
, iosize
,
1151 pg_offset
+= iosize
;
1157 int btrfs_read_folio(struct file
*file
, struct folio
*folio
)
1159 struct page
*page
= &folio
->page
;
1160 struct btrfs_inode
*inode
= BTRFS_I(page
->mapping
->host
);
1161 u64 start
= page_offset(page
);
1162 u64 end
= start
+ PAGE_SIZE
- 1;
1163 struct btrfs_bio_ctrl bio_ctrl
= { .opf
= REQ_OP_READ
};
1166 btrfs_lock_and_flush_ordered_range(inode
, start
, end
, NULL
);
1168 ret
= btrfs_do_readpage(page
, NULL
, &bio_ctrl
, NULL
);
1170 * If btrfs_do_readpage() failed we will want to submit the assembled
1171 * bio to do the cleanup.
1173 submit_one_bio(&bio_ctrl
);
1177 static inline void contiguous_readpages(struct page
*pages
[], int nr_pages
,
1179 struct extent_map
**em_cached
,
1180 struct btrfs_bio_ctrl
*bio_ctrl
,
1183 struct btrfs_inode
*inode
= BTRFS_I(pages
[0]->mapping
->host
);
1186 btrfs_lock_and_flush_ordered_range(inode
, start
, end
, NULL
);
1188 for (index
= 0; index
< nr_pages
; index
++) {
1189 btrfs_do_readpage(pages
[index
], em_cached
, bio_ctrl
,
1191 put_page(pages
[index
]);
1196 * helper for __extent_writepage, doing all of the delayed allocation setup.
1198 * This returns 1 if btrfs_run_delalloc_range function did all the work required
1199 * to write the page (copy into inline extent). In this case the IO has
1200 * been started and the page is already unlocked.
1202 * This returns 0 if all went well (page still locked)
1203 * This returns < 0 if there were errors (page still locked)
1205 static noinline_for_stack
int writepage_delalloc(struct btrfs_inode
*inode
,
1206 struct page
*page
, struct writeback_control
*wbc
)
1208 const u64 page_start
= page_offset(page
);
1209 const u64 page_end
= page_start
+ PAGE_SIZE
- 1;
1210 u64 delalloc_start
= page_start
;
1211 u64 delalloc_end
= page_end
;
1212 u64 delalloc_to_write
= 0;
1215 while (delalloc_start
< page_end
) {
1216 delalloc_end
= page_end
;
1217 if (!find_lock_delalloc_range(&inode
->vfs_inode
, page
,
1218 &delalloc_start
, &delalloc_end
)) {
1219 delalloc_start
= delalloc_end
+ 1;
1223 ret
= btrfs_run_delalloc_range(inode
, page
, delalloc_start
,
1228 delalloc_start
= delalloc_end
+ 1;
1232 * delalloc_end is already one less than the total length, so
1233 * we don't subtract one from PAGE_SIZE
1235 delalloc_to_write
+=
1236 DIV_ROUND_UP(delalloc_end
+ 1 - page_start
, PAGE_SIZE
);
1239 * If btrfs_run_dealloc_range() already started I/O and unlocked
1240 * the pages, we just need to account for them here.
1243 wbc
->nr_to_write
-= delalloc_to_write
;
1247 if (wbc
->nr_to_write
< delalloc_to_write
) {
1250 if (delalloc_to_write
< thresh
* 2)
1251 thresh
= delalloc_to_write
;
1252 wbc
->nr_to_write
= min_t(u64
, delalloc_to_write
,
1260 * Find the first byte we need to write.
1262 * For subpage, one page can contain several sectors, and
1263 * __extent_writepage_io() will just grab all extent maps in the page
1264 * range and try to submit all non-inline/non-compressed extents.
1266 * This is a big problem for subpage, we shouldn't re-submit already written
1268 * This function will lookup subpage dirty bit to find which range we really
1271 * Return the next dirty range in [@start, @end).
1272 * If no dirty range is found, @start will be page_offset(page) + PAGE_SIZE.
1274 static void find_next_dirty_byte(struct btrfs_fs_info
*fs_info
,
1275 struct page
*page
, u64
*start
, u64
*end
)
1277 struct folio
*folio
= page_folio(page
);
1278 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
1279 struct btrfs_subpage_info
*spi
= fs_info
->subpage_info
;
1280 u64 orig_start
= *start
;
1281 /* Declare as unsigned long so we can use bitmap ops */
1282 unsigned long flags
;
1283 int range_start_bit
;
1287 * For regular sector size == page size case, since one page only
1288 * contains one sector, we return the page offset directly.
1290 if (!btrfs_is_subpage(fs_info
, page
->mapping
)) {
1291 *start
= page_offset(page
);
1292 *end
= page_offset(page
) + PAGE_SIZE
;
1296 range_start_bit
= spi
->dirty_offset
+
1297 (offset_in_page(orig_start
) >> fs_info
->sectorsize_bits
);
1299 /* We should have the page locked, but just in case */
1300 spin_lock_irqsave(&subpage
->lock
, flags
);
1301 bitmap_next_set_region(subpage
->bitmaps
, &range_start_bit
, &range_end_bit
,
1302 spi
->dirty_offset
+ spi
->bitmap_nr_bits
);
1303 spin_unlock_irqrestore(&subpage
->lock
, flags
);
1305 range_start_bit
-= spi
->dirty_offset
;
1306 range_end_bit
-= spi
->dirty_offset
;
1308 *start
= page_offset(page
) + range_start_bit
* fs_info
->sectorsize
;
1309 *end
= page_offset(page
) + range_end_bit
* fs_info
->sectorsize
;
1313 * helper for __extent_writepage. This calls the writepage start hooks,
1314 * and does the loop to map the page into extents and bios.
1316 * We return 1 if the IO is started and the page is unlocked,
1317 * 0 if all went well (page still locked)
1318 * < 0 if there were errors (page still locked)
1320 static noinline_for_stack
int __extent_writepage_io(struct btrfs_inode
*inode
,
1322 struct btrfs_bio_ctrl
*bio_ctrl
,
1326 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
1327 u64 cur
= page_offset(page
);
1328 u64 end
= cur
+ PAGE_SIZE
- 1;
1331 struct extent_map
*em
;
1335 ret
= btrfs_writepage_cow_fixup(page
);
1337 /* Fixup worker will requeue */
1338 redirty_page_for_writepage(bio_ctrl
->wbc
, page
);
1343 bio_ctrl
->end_io_func
= end_bbio_data_write
;
1344 while (cur
<= end
) {
1345 u32 len
= end
- cur
+ 1;
1348 u64 dirty_range_start
= cur
;
1349 u64 dirty_range_end
;
1352 if (cur
>= i_size
) {
1353 btrfs_mark_ordered_io_finished(inode
, page
, cur
, len
,
1356 * This range is beyond i_size, thus we don't need to
1357 * bother writing back.
1358 * But we still need to clear the dirty subpage bit, or
1359 * the next time the page gets dirtied, we will try to
1360 * writeback the sectors with subpage dirty bits,
1361 * causing writeback without ordered extent.
1363 btrfs_folio_clear_dirty(fs_info
, page_folio(page
), cur
, len
);
1367 find_next_dirty_byte(fs_info
, page
, &dirty_range_start
,
1369 if (cur
< dirty_range_start
) {
1370 cur
= dirty_range_start
;
1374 em
= btrfs_get_extent(inode
, NULL
, 0, cur
, len
);
1376 ret
= PTR_ERR_OR_ZERO(em
);
1380 extent_offset
= cur
- em
->start
;
1381 em_end
= extent_map_end(em
);
1382 ASSERT(cur
<= em_end
);
1384 ASSERT(IS_ALIGNED(em
->start
, fs_info
->sectorsize
));
1385 ASSERT(IS_ALIGNED(em
->len
, fs_info
->sectorsize
));
1387 block_start
= em
->block_start
;
1388 disk_bytenr
= em
->block_start
+ extent_offset
;
1390 ASSERT(!extent_map_is_compressed(em
));
1391 ASSERT(block_start
!= EXTENT_MAP_HOLE
);
1392 ASSERT(block_start
!= EXTENT_MAP_INLINE
);
1395 * Note that em_end from extent_map_end() and dirty_range_end from
1396 * find_next_dirty_byte() are all exclusive
1398 iosize
= min(min(em_end
, end
+ 1), dirty_range_end
) - cur
;
1399 free_extent_map(em
);
1402 btrfs_set_range_writeback(inode
, cur
, cur
+ iosize
- 1);
1403 if (!PageWriteback(page
)) {
1404 btrfs_err(inode
->root
->fs_info
,
1405 "page %lu not writeback, cur %llu end %llu",
1406 page
->index
, cur
, end
);
1410 * Although the PageDirty bit is cleared before entering this
1411 * function, subpage dirty bit is not cleared.
1412 * So clear subpage dirty bit here so next time we won't submit
1413 * page for range already written to disk.
1415 btrfs_folio_clear_dirty(fs_info
, page_folio(page
), cur
, iosize
);
1417 submit_extent_page(bio_ctrl
, disk_bytenr
, page
, iosize
,
1418 cur
- page_offset(page
));
1423 btrfs_folio_assert_not_dirty(fs_info
, page_folio(page
));
1429 * If we finish without problem, we should not only clear page dirty,
1430 * but also empty subpage dirty bits
1437 * the writepage semantics are similar to regular writepage. extent
1438 * records are inserted to lock ranges in the tree, and as dirty areas
1439 * are found, they are marked writeback. Then the lock bits are removed
1440 * and the end_io handler clears the writeback ranges
1442 * Return 0 if everything goes well.
1443 * Return <0 for error.
1445 static int __extent_writepage(struct page
*page
, struct btrfs_bio_ctrl
*bio_ctrl
)
1447 struct folio
*folio
= page_folio(page
);
1448 struct inode
*inode
= page
->mapping
->host
;
1449 const u64 page_start
= page_offset(page
);
1453 loff_t i_size
= i_size_read(inode
);
1454 unsigned long end_index
= i_size
>> PAGE_SHIFT
;
1456 trace___extent_writepage(page
, inode
, bio_ctrl
->wbc
);
1458 WARN_ON(!PageLocked(page
));
1460 pg_offset
= offset_in_page(i_size
);
1461 if (page
->index
> end_index
||
1462 (page
->index
== end_index
&& !pg_offset
)) {
1463 folio_invalidate(folio
, 0, folio_size(folio
));
1464 folio_unlock(folio
);
1468 if (page
->index
== end_index
)
1469 memzero_page(page
, pg_offset
, PAGE_SIZE
- pg_offset
);
1471 ret
= set_page_extent_mapped(page
);
1475 ret
= writepage_delalloc(BTRFS_I(inode
), page
, bio_ctrl
->wbc
);
1481 ret
= __extent_writepage_io(BTRFS_I(inode
), page
, bio_ctrl
, i_size
, &nr
);
1485 bio_ctrl
->wbc
->nr_to_write
--;
1489 /* make sure the mapping tag for page dirty gets cleared */
1490 set_page_writeback(page
);
1491 end_page_writeback(page
);
1494 btrfs_mark_ordered_io_finished(BTRFS_I(inode
), page
, page_start
,
1496 mapping_set_error(page
->mapping
, ret
);
1503 void wait_on_extent_buffer_writeback(struct extent_buffer
*eb
)
1505 wait_on_bit_io(&eb
->bflags
, EXTENT_BUFFER_WRITEBACK
,
1506 TASK_UNINTERRUPTIBLE
);
1510 * Lock extent buffer status and pages for writeback.
1512 * Return %false if the extent buffer doesn't need to be submitted (e.g. the
1513 * extent buffer is not dirty)
1514 * Return %true is the extent buffer is submitted to bio.
1516 static noinline_for_stack
bool lock_extent_buffer_for_io(struct extent_buffer
*eb
,
1517 struct writeback_control
*wbc
)
1519 struct btrfs_fs_info
*fs_info
= eb
->fs_info
;
1522 btrfs_tree_lock(eb
);
1523 while (test_bit(EXTENT_BUFFER_WRITEBACK
, &eb
->bflags
)) {
1524 btrfs_tree_unlock(eb
);
1525 if (wbc
->sync_mode
!= WB_SYNC_ALL
)
1527 wait_on_extent_buffer_writeback(eb
);
1528 btrfs_tree_lock(eb
);
1532 * We need to do this to prevent races in people who check if the eb is
1533 * under IO since we can end up having no IO bits set for a short period
1536 spin_lock(&eb
->refs_lock
);
1537 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY
, &eb
->bflags
)) {
1538 set_bit(EXTENT_BUFFER_WRITEBACK
, &eb
->bflags
);
1539 spin_unlock(&eb
->refs_lock
);
1540 btrfs_set_header_flag(eb
, BTRFS_HEADER_FLAG_WRITTEN
);
1541 percpu_counter_add_batch(&fs_info
->dirty_metadata_bytes
,
1543 fs_info
->dirty_metadata_batch
);
1546 spin_unlock(&eb
->refs_lock
);
1548 btrfs_tree_unlock(eb
);
1552 static void set_btree_ioerr(struct extent_buffer
*eb
)
1554 struct btrfs_fs_info
*fs_info
= eb
->fs_info
;
1556 set_bit(EXTENT_BUFFER_WRITE_ERR
, &eb
->bflags
);
1559 * A read may stumble upon this buffer later, make sure that it gets an
1560 * error and knows there was an error.
1562 clear_bit(EXTENT_BUFFER_UPTODATE
, &eb
->bflags
);
1565 * We need to set the mapping with the io error as well because a write
1566 * error will flip the file system readonly, and then syncfs() will
1567 * return a 0 because we are readonly if we don't modify the err seq for
1570 mapping_set_error(eb
->fs_info
->btree_inode
->i_mapping
, -EIO
);
1573 * If writeback for a btree extent that doesn't belong to a log tree
1574 * failed, increment the counter transaction->eb_write_errors.
1575 * We do this because while the transaction is running and before it's
1576 * committing (when we call filemap_fdata[write|wait]_range against
1577 * the btree inode), we might have
1578 * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
1579 * returns an error or an error happens during writeback, when we're
1580 * committing the transaction we wouldn't know about it, since the pages
1581 * can be no longer dirty nor marked anymore for writeback (if a
1582 * subsequent modification to the extent buffer didn't happen before the
1583 * transaction commit), which makes filemap_fdata[write|wait]_range not
1584 * able to find the pages tagged with SetPageError at transaction
1585 * commit time. So if this happens we must abort the transaction,
1586 * otherwise we commit a super block with btree roots that point to
1587 * btree nodes/leafs whose content on disk is invalid - either garbage
1588 * or the content of some node/leaf from a past generation that got
1589 * cowed or deleted and is no longer valid.
1591 * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
1592 * not be enough - we need to distinguish between log tree extents vs
1593 * non-log tree extents, and the next filemap_fdatawait_range() call
1594 * will catch and clear such errors in the mapping - and that call might
1595 * be from a log sync and not from a transaction commit. Also, checking
1596 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
1597 * not done and would not be reliable - the eb might have been released
1598 * from memory and reading it back again means that flag would not be
1599 * set (since it's a runtime flag, not persisted on disk).
1601 * Using the flags below in the btree inode also makes us achieve the
1602 * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
1603 * writeback for all dirty pages and before filemap_fdatawait_range()
1604 * is called, the writeback for all dirty pages had already finished
1605 * with errors - because we were not using AS_EIO/AS_ENOSPC,
1606 * filemap_fdatawait_range() would return success, as it could not know
1607 * that writeback errors happened (the pages were no longer tagged for
1610 switch (eb
->log_index
) {
1612 set_bit(BTRFS_FS_BTREE_ERR
, &fs_info
->flags
);
1615 set_bit(BTRFS_FS_LOG1_ERR
, &fs_info
->flags
);
1618 set_bit(BTRFS_FS_LOG2_ERR
, &fs_info
->flags
);
1621 BUG(); /* unexpected, logic error */
1626 * The endio specific version which won't touch any unsafe spinlock in endio
1629 static struct extent_buffer
*find_extent_buffer_nolock(
1630 struct btrfs_fs_info
*fs_info
, u64 start
)
1632 struct extent_buffer
*eb
;
1635 eb
= radix_tree_lookup(&fs_info
->buffer_radix
,
1636 start
>> fs_info
->sectorsize_bits
);
1637 if (eb
&& atomic_inc_not_zero(&eb
->refs
)) {
1645 static void end_bbio_meta_write(struct btrfs_bio
*bbio
)
1647 struct extent_buffer
*eb
= bbio
->private;
1648 struct btrfs_fs_info
*fs_info
= eb
->fs_info
;
1649 bool uptodate
= !bbio
->bio
.bi_status
;
1650 struct folio_iter fi
;
1654 set_btree_ioerr(eb
);
1656 bio_for_each_folio_all(fi
, &bbio
->bio
) {
1657 u64 start
= eb
->start
+ bio_offset
;
1658 struct folio
*folio
= fi
.folio
;
1659 u32 len
= fi
.length
;
1661 btrfs_folio_clear_writeback(fs_info
, folio
, start
, len
);
1665 clear_bit(EXTENT_BUFFER_WRITEBACK
, &eb
->bflags
);
1666 smp_mb__after_atomic();
1667 wake_up_bit(&eb
->bflags
, EXTENT_BUFFER_WRITEBACK
);
1669 bio_put(&bbio
->bio
);
1672 static void prepare_eb_write(struct extent_buffer
*eb
)
1675 unsigned long start
;
1678 clear_bit(EXTENT_BUFFER_WRITE_ERR
, &eb
->bflags
);
1680 /* Set btree blocks beyond nritems with 0 to avoid stale content */
1681 nritems
= btrfs_header_nritems(eb
);
1682 if (btrfs_header_level(eb
) > 0) {
1683 end
= btrfs_node_key_ptr_offset(eb
, nritems
);
1684 memzero_extent_buffer(eb
, end
, eb
->len
- end
);
1688 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
1690 start
= btrfs_item_nr_offset(eb
, nritems
);
1691 end
= btrfs_item_nr_offset(eb
, 0);
1693 end
+= BTRFS_LEAF_DATA_SIZE(eb
->fs_info
);
1695 end
+= btrfs_item_offset(eb
, nritems
- 1);
1696 memzero_extent_buffer(eb
, start
, end
- start
);
1700 static noinline_for_stack
void write_one_eb(struct extent_buffer
*eb
,
1701 struct writeback_control
*wbc
)
1703 struct btrfs_fs_info
*fs_info
= eb
->fs_info
;
1704 struct btrfs_bio
*bbio
;
1706 prepare_eb_write(eb
);
1708 bbio
= btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES
,
1709 REQ_OP_WRITE
| REQ_META
| wbc_to_write_flags(wbc
),
1710 eb
->fs_info
, end_bbio_meta_write
, eb
);
1711 bbio
->bio
.bi_iter
.bi_sector
= eb
->start
>> SECTOR_SHIFT
;
1712 bio_set_dev(&bbio
->bio
, fs_info
->fs_devices
->latest_dev
->bdev
);
1713 wbc_init_bio(wbc
, &bbio
->bio
);
1714 bbio
->inode
= BTRFS_I(eb
->fs_info
->btree_inode
);
1715 bbio
->file_offset
= eb
->start
;
1716 if (fs_info
->nodesize
< PAGE_SIZE
) {
1717 struct folio
*folio
= eb
->folios
[0];
1721 btrfs_subpage_set_writeback(fs_info
, folio
, eb
->start
, eb
->len
);
1722 if (btrfs_subpage_clear_and_test_dirty(fs_info
, folio
, eb
->start
,
1724 folio_clear_dirty_for_io(folio
);
1727 ret
= bio_add_folio(&bbio
->bio
, folio
, eb
->len
,
1728 eb
->start
- folio_pos(folio
));
1730 wbc_account_cgroup_owner(wbc
, folio_page(folio
, 0), eb
->len
);
1731 folio_unlock(folio
);
1733 int num_folios
= num_extent_folios(eb
);
1735 for (int i
= 0; i
< num_folios
; i
++) {
1736 struct folio
*folio
= eb
->folios
[i
];
1740 folio_clear_dirty_for_io(folio
);
1741 folio_start_writeback(folio
);
1742 ret
= bio_add_folio(&bbio
->bio
, folio
, folio_size(folio
), 0);
1744 wbc_account_cgroup_owner(wbc
, folio_page(folio
, 0),
1746 wbc
->nr_to_write
-= folio_nr_pages(folio
);
1747 folio_unlock(folio
);
1750 btrfs_submit_bio(bbio
, 0);
1754 * Submit one subpage btree page.
1756 * The main difference to submit_eb_page() is:
1758 * For subpage, we don't rely on page locking at all.
1761 * We only flush bio if we may be unable to fit current extent buffers into
1764 * Return >=0 for the number of submitted extent buffers.
1765 * Return <0 for fatal error.
1767 static int submit_eb_subpage(struct page
*page
, struct writeback_control
*wbc
)
1769 struct btrfs_fs_info
*fs_info
= btrfs_sb(page
->mapping
->host
->i_sb
);
1770 struct folio
*folio
= page_folio(page
);
1772 u64 page_start
= page_offset(page
);
1774 int sectors_per_node
= fs_info
->nodesize
>> fs_info
->sectorsize_bits
;
1776 /* Lock and write each dirty extent buffers in the range */
1777 while (bit_start
< fs_info
->subpage_info
->bitmap_nr_bits
) {
1778 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
1779 struct extent_buffer
*eb
;
1780 unsigned long flags
;
1784 * Take private lock to ensure the subpage won't be detached
1787 spin_lock(&page
->mapping
->private_lock
);
1788 if (!folio_test_private(folio
)) {
1789 spin_unlock(&page
->mapping
->private_lock
);
1792 spin_lock_irqsave(&subpage
->lock
, flags
);
1793 if (!test_bit(bit_start
+ fs_info
->subpage_info
->dirty_offset
,
1794 subpage
->bitmaps
)) {
1795 spin_unlock_irqrestore(&subpage
->lock
, flags
);
1796 spin_unlock(&page
->mapping
->private_lock
);
1801 start
= page_start
+ bit_start
* fs_info
->sectorsize
;
1802 bit_start
+= sectors_per_node
;
1805 * Here we just want to grab the eb without touching extra
1806 * spin locks, so call find_extent_buffer_nolock().
1808 eb
= find_extent_buffer_nolock(fs_info
, start
);
1809 spin_unlock_irqrestore(&subpage
->lock
, flags
);
1810 spin_unlock(&page
->mapping
->private_lock
);
1813 * The eb has already reached 0 refs thus find_extent_buffer()
1814 * doesn't return it. We don't need to write back such eb
1820 if (lock_extent_buffer_for_io(eb
, wbc
)) {
1821 write_one_eb(eb
, wbc
);
1824 free_extent_buffer(eb
);
1830 * Submit all page(s) of one extent buffer.
1832 * @page: the page of one extent buffer
1833 * @eb_context: to determine if we need to submit this page, if current page
1834 * belongs to this eb, we don't need to submit
1836 * The caller should pass each page in their bytenr order, and here we use
1837 * @eb_context to determine if we have submitted pages of one extent buffer.
1839 * If we have, we just skip until we hit a new page that doesn't belong to
1840 * current @eb_context.
1842 * If not, we submit all the page(s) of the extent buffer.
1844 * Return >0 if we have submitted the extent buffer successfully.
1845 * Return 0 if we don't need to submit the page, as it's already submitted by
1847 * Return <0 for fatal error.
1849 static int submit_eb_page(struct page
*page
, struct btrfs_eb_write_context
*ctx
)
1851 struct writeback_control
*wbc
= ctx
->wbc
;
1852 struct address_space
*mapping
= page
->mapping
;
1853 struct folio
*folio
= page_folio(page
);
1854 struct extent_buffer
*eb
;
1857 if (!folio_test_private(folio
))
1860 if (btrfs_sb(page
->mapping
->host
->i_sb
)->nodesize
< PAGE_SIZE
)
1861 return submit_eb_subpage(page
, wbc
);
1863 spin_lock(&mapping
->private_lock
);
1864 if (!folio_test_private(folio
)) {
1865 spin_unlock(&mapping
->private_lock
);
1869 eb
= folio_get_private(folio
);
1872 * Shouldn't happen and normally this would be a BUG_ON but no point
1873 * crashing the machine for something we can survive anyway.
1876 spin_unlock(&mapping
->private_lock
);
1880 if (eb
== ctx
->eb
) {
1881 spin_unlock(&mapping
->private_lock
);
1884 ret
= atomic_inc_not_zero(&eb
->refs
);
1885 spin_unlock(&mapping
->private_lock
);
1891 ret
= btrfs_check_meta_write_pointer(eb
->fs_info
, ctx
);
1895 free_extent_buffer(eb
);
1899 if (!lock_extent_buffer_for_io(eb
, wbc
)) {
1900 free_extent_buffer(eb
);
1903 /* Implies write in zoned mode. */
1904 if (ctx
->zoned_bg
) {
1905 /* Mark the last eb in the block group. */
1906 btrfs_schedule_zone_finish_bg(ctx
->zoned_bg
, eb
);
1907 ctx
->zoned_bg
->meta_write_pointer
+= eb
->len
;
1909 write_one_eb(eb
, wbc
);
1910 free_extent_buffer(eb
);
1914 int btree_write_cache_pages(struct address_space
*mapping
,
1915 struct writeback_control
*wbc
)
1917 struct btrfs_eb_write_context ctx
= { .wbc
= wbc
};
1918 struct btrfs_fs_info
*fs_info
= BTRFS_I(mapping
->host
)->root
->fs_info
;
1921 int nr_to_write_done
= 0;
1922 struct folio_batch fbatch
;
1923 unsigned int nr_folios
;
1925 pgoff_t end
; /* Inclusive */
1929 folio_batch_init(&fbatch
);
1930 if (wbc
->range_cyclic
) {
1931 index
= mapping
->writeback_index
; /* Start from prev offset */
1934 * Start from the beginning does not need to cycle over the
1935 * range, mark it as scanned.
1937 scanned
= (index
== 0);
1939 index
= wbc
->range_start
>> PAGE_SHIFT
;
1940 end
= wbc
->range_end
>> PAGE_SHIFT
;
1943 if (wbc
->sync_mode
== WB_SYNC_ALL
)
1944 tag
= PAGECACHE_TAG_TOWRITE
;
1946 tag
= PAGECACHE_TAG_DIRTY
;
1947 btrfs_zoned_meta_io_lock(fs_info
);
1949 if (wbc
->sync_mode
== WB_SYNC_ALL
)
1950 tag_pages_for_writeback(mapping
, index
, end
);
1951 while (!done
&& !nr_to_write_done
&& (index
<= end
) &&
1952 (nr_folios
= filemap_get_folios_tag(mapping
, &index
, end
,
1956 for (i
= 0; i
< nr_folios
; i
++) {
1957 struct folio
*folio
= fbatch
.folios
[i
];
1959 ret
= submit_eb_page(&folio
->page
, &ctx
);
1968 * the filesystem may choose to bump up nr_to_write.
1969 * We have to make sure to honor the new nr_to_write
1972 nr_to_write_done
= wbc
->nr_to_write
<= 0;
1974 folio_batch_release(&fbatch
);
1977 if (!scanned
&& !done
) {
1979 * We hit the last page and there is more work to be done: wrap
1980 * back to the start of the file
1987 * If something went wrong, don't allow any metadata write bio to be
1990 * This would prevent use-after-free if we had dirty pages not
1991 * cleaned up, which can still happen by fuzzed images.
1994 * Allowing existing tree block to be allocated for other trees.
1996 * - Log tree operations
1997 * Exiting tree blocks get allocated to log tree, bumps its
1998 * generation, then get cleaned in tree re-balance.
1999 * Such tree block will not be written back, since it's clean,
2000 * thus no WRITTEN flag set.
2001 * And after log writes back, this tree block is not traced by
2002 * any dirty extent_io_tree.
2004 * - Offending tree block gets re-dirtied from its original owner
2005 * Since it has bumped generation, no WRITTEN flag, it can be
2006 * reused without COWing. This tree block will not be traced
2007 * by btrfs_transaction::dirty_pages.
2009 * Now such dirty tree block will not be cleaned by any dirty
2010 * extent io tree. Thus we don't want to submit such wild eb
2011 * if the fs already has error.
2013 * We can get ret > 0 from submit_extent_page() indicating how many ebs
2014 * were submitted. Reset it to 0 to avoid false alerts for the caller.
2018 if (!ret
&& BTRFS_FS_ERROR(fs_info
))
2022 btrfs_put_block_group(ctx
.zoned_bg
);
2023 btrfs_zoned_meta_io_unlock(fs_info
);
2028 * Walk the list of dirty pages of the given address space and write all of them.
2030 * @mapping: address space structure to write
2031 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2032 * @bio_ctrl: holds context for the write, namely the bio
2034 * If a page is already under I/O, write_cache_pages() skips it, even
2035 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
2036 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
2037 * and msync() need to guarantee that all the data which was dirty at the time
2038 * the call was made get new I/O started against them. If wbc->sync_mode is
2039 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2040 * existing IO to complete.
2042 static int extent_write_cache_pages(struct address_space
*mapping
,
2043 struct btrfs_bio_ctrl
*bio_ctrl
)
2045 struct writeback_control
*wbc
= bio_ctrl
->wbc
;
2046 struct inode
*inode
= mapping
->host
;
2049 int nr_to_write_done
= 0;
2050 struct folio_batch fbatch
;
2051 unsigned int nr_folios
;
2053 pgoff_t end
; /* Inclusive */
2055 int range_whole
= 0;
2060 * We have to hold onto the inode so that ordered extents can do their
2061 * work when the IO finishes. The alternative to this is failing to add
2062 * an ordered extent if the igrab() fails there and that is a huge pain
2063 * to deal with, so instead just hold onto the inode throughout the
2064 * writepages operation. If it fails here we are freeing up the inode
2065 * anyway and we'd rather not waste our time writing out stuff that is
2066 * going to be truncated anyway.
2071 folio_batch_init(&fbatch
);
2072 if (wbc
->range_cyclic
) {
2073 index
= mapping
->writeback_index
; /* Start from prev offset */
2076 * Start from the beginning does not need to cycle over the
2077 * range, mark it as scanned.
2079 scanned
= (index
== 0);
2081 index
= wbc
->range_start
>> PAGE_SHIFT
;
2082 end
= wbc
->range_end
>> PAGE_SHIFT
;
2083 if (wbc
->range_start
== 0 && wbc
->range_end
== LLONG_MAX
)
2089 * We do the tagged writepage as long as the snapshot flush bit is set
2090 * and we are the first one who do the filemap_flush() on this inode.
2092 * The nr_to_write == LONG_MAX is needed to make sure other flushers do
2093 * not race in and drop the bit.
2095 if (range_whole
&& wbc
->nr_to_write
== LONG_MAX
&&
2096 test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH
,
2097 &BTRFS_I(inode
)->runtime_flags
))
2098 wbc
->tagged_writepages
= 1;
2100 if (wbc
->sync_mode
== WB_SYNC_ALL
|| wbc
->tagged_writepages
)
2101 tag
= PAGECACHE_TAG_TOWRITE
;
2103 tag
= PAGECACHE_TAG_DIRTY
;
2105 if (wbc
->sync_mode
== WB_SYNC_ALL
|| wbc
->tagged_writepages
)
2106 tag_pages_for_writeback(mapping
, index
, end
);
2108 while (!done
&& !nr_to_write_done
&& (index
<= end
) &&
2109 (nr_folios
= filemap_get_folios_tag(mapping
, &index
,
2110 end
, tag
, &fbatch
))) {
2113 for (i
= 0; i
< nr_folios
; i
++) {
2114 struct folio
*folio
= fbatch
.folios
[i
];
2116 done_index
= folio_next_index(folio
);
2118 * At this point we hold neither the i_pages lock nor
2119 * the page lock: the page may be truncated or
2120 * invalidated (changing page->mapping to NULL),
2121 * or even swizzled back from swapper_space to
2122 * tmpfs file mapping
2124 if (!folio_trylock(folio
)) {
2125 submit_write_bio(bio_ctrl
, 0);
2129 if (unlikely(folio
->mapping
!= mapping
)) {
2130 folio_unlock(folio
);
2134 if (!folio_test_dirty(folio
)) {
2135 /* Someone wrote it for us. */
2136 folio_unlock(folio
);
2140 if (wbc
->sync_mode
!= WB_SYNC_NONE
) {
2141 if (folio_test_writeback(folio
))
2142 submit_write_bio(bio_ctrl
, 0);
2143 folio_wait_writeback(folio
);
2146 if (folio_test_writeback(folio
) ||
2147 !folio_clear_dirty_for_io(folio
)) {
2148 folio_unlock(folio
);
2152 ret
= __extent_writepage(&folio
->page
, bio_ctrl
);
2159 * The filesystem may choose to bump up nr_to_write.
2160 * We have to make sure to honor the new nr_to_write
2163 nr_to_write_done
= (wbc
->sync_mode
== WB_SYNC_NONE
&&
2164 wbc
->nr_to_write
<= 0);
2166 folio_batch_release(&fbatch
);
2169 if (!scanned
&& !done
) {
2171 * We hit the last page and there is more work to be done: wrap
2172 * back to the start of the file
2178 * If we're looping we could run into a page that is locked by a
2179 * writer and that writer could be waiting on writeback for a
2180 * page in our current bio, and thus deadlock, so flush the
2183 submit_write_bio(bio_ctrl
, 0);
2187 if (wbc
->range_cyclic
|| (wbc
->nr_to_write
> 0 && range_whole
))
2188 mapping
->writeback_index
= done_index
;
2190 btrfs_add_delayed_iput(BTRFS_I(inode
));
2195 * Submit the pages in the range to bio for call sites which delalloc range has
2196 * already been ran (aka, ordered extent inserted) and all pages are still
2199 void extent_write_locked_range(struct inode
*inode
, struct page
*locked_page
,
2200 u64 start
, u64 end
, struct writeback_control
*wbc
,
2203 bool found_error
= false;
2205 struct address_space
*mapping
= inode
->i_mapping
;
2206 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
2207 const u32 sectorsize
= fs_info
->sectorsize
;
2208 loff_t i_size
= i_size_read(inode
);
2210 struct btrfs_bio_ctrl bio_ctrl
= {
2212 .opf
= REQ_OP_WRITE
| wbc_to_write_flags(wbc
),
2215 if (wbc
->no_cgroup_owner
)
2216 bio_ctrl
.opf
|= REQ_BTRFS_CGROUP_PUNT
;
2218 ASSERT(IS_ALIGNED(start
, sectorsize
) && IS_ALIGNED(end
+ 1, sectorsize
));
2220 while (cur
<= end
) {
2221 u64 cur_end
= min(round_down(cur
, PAGE_SIZE
) + PAGE_SIZE
- 1, end
);
2222 u32 cur_len
= cur_end
+ 1 - cur
;
2226 page
= find_get_page(mapping
, cur
>> PAGE_SHIFT
);
2227 ASSERT(PageLocked(page
));
2228 if (pages_dirty
&& page
!= locked_page
) {
2229 ASSERT(PageDirty(page
));
2230 clear_page_dirty_for_io(page
);
2233 ret
= __extent_writepage_io(BTRFS_I(inode
), page
, &bio_ctrl
,
2238 /* Make sure the mapping tag for page dirty gets cleared. */
2240 set_page_writeback(page
);
2241 end_page_writeback(page
);
2244 btrfs_mark_ordered_io_finished(BTRFS_I(inode
), page
,
2245 cur
, cur_len
, !ret
);
2246 mapping_set_error(page
->mapping
, ret
);
2248 btrfs_folio_unlock_writer(fs_info
, page_folio(page
), cur
, cur_len
);
2256 submit_write_bio(&bio_ctrl
, found_error
? ret
: 0);
2259 int extent_writepages(struct address_space
*mapping
,
2260 struct writeback_control
*wbc
)
2262 struct inode
*inode
= mapping
->host
;
2264 struct btrfs_bio_ctrl bio_ctrl
= {
2266 .opf
= REQ_OP_WRITE
| wbc_to_write_flags(wbc
),
2270 * Allow only a single thread to do the reloc work in zoned mode to
2271 * protect the write pointer updates.
2273 btrfs_zoned_data_reloc_lock(BTRFS_I(inode
));
2274 ret
= extent_write_cache_pages(mapping
, &bio_ctrl
);
2275 submit_write_bio(&bio_ctrl
, ret
);
2276 btrfs_zoned_data_reloc_unlock(BTRFS_I(inode
));
2280 void extent_readahead(struct readahead_control
*rac
)
2282 struct btrfs_bio_ctrl bio_ctrl
= { .opf
= REQ_OP_READ
| REQ_RAHEAD
};
2283 struct page
*pagepool
[16];
2284 struct extent_map
*em_cached
= NULL
;
2285 u64 prev_em_start
= (u64
)-1;
2288 while ((nr
= readahead_page_batch(rac
, pagepool
))) {
2289 u64 contig_start
= readahead_pos(rac
);
2290 u64 contig_end
= contig_start
+ readahead_batch_length(rac
) - 1;
2292 contiguous_readpages(pagepool
, nr
, contig_start
, contig_end
,
2293 &em_cached
, &bio_ctrl
, &prev_em_start
);
2297 free_extent_map(em_cached
);
2298 submit_one_bio(&bio_ctrl
);
2302 * basic invalidate_folio code, this waits on any locked or writeback
2303 * ranges corresponding to the folio, and then deletes any extent state
2304 * records from the tree
2306 int extent_invalidate_folio(struct extent_io_tree
*tree
,
2307 struct folio
*folio
, size_t offset
)
2309 struct extent_state
*cached_state
= NULL
;
2310 u64 start
= folio_pos(folio
);
2311 u64 end
= start
+ folio_size(folio
) - 1;
2312 size_t blocksize
= folio
->mapping
->host
->i_sb
->s_blocksize
;
2314 /* This function is only called for the btree inode */
2315 ASSERT(tree
->owner
== IO_TREE_BTREE_INODE_IO
);
2317 start
+= ALIGN(offset
, blocksize
);
2321 lock_extent(tree
, start
, end
, &cached_state
);
2322 folio_wait_writeback(folio
);
2325 * Currently for btree io tree, only EXTENT_LOCKED is utilized,
2326 * so here we only need to unlock the extent range to free any
2327 * existing extent state.
2329 unlock_extent(tree
, start
, end
, &cached_state
);
2334 * a helper for release_folio, this tests for areas of the page that
2335 * are locked or under IO and drops the related state bits if it is safe
2338 static int try_release_extent_state(struct extent_io_tree
*tree
,
2339 struct page
*page
, gfp_t mask
)
2341 u64 start
= page_offset(page
);
2342 u64 end
= start
+ PAGE_SIZE
- 1;
2345 if (test_range_bit_exists(tree
, start
, end
, EXTENT_LOCKED
)) {
2348 u32 clear_bits
= ~(EXTENT_LOCKED
| EXTENT_NODATASUM
|
2349 EXTENT_DELALLOC_NEW
| EXTENT_CTLBITS
|
2350 EXTENT_QGROUP_RESERVED
);
2353 * At this point we can safely clear everything except the
2354 * locked bit, the nodatasum bit and the delalloc new bit.
2355 * The delalloc new bit will be cleared by ordered extent
2358 ret
= __clear_extent_bit(tree
, start
, end
, clear_bits
, NULL
, NULL
);
2360 /* if clear_extent_bit failed for enomem reasons,
2361 * we can't allow the release to continue.
2372 * a helper for release_folio. As long as there are no locked extents
2373 * in the range corresponding to the page, both state records and extent
2374 * map records are removed
2376 int try_release_extent_mapping(struct page
*page
, gfp_t mask
)
2378 struct extent_map
*em
;
2379 u64 start
= page_offset(page
);
2380 u64 end
= start
+ PAGE_SIZE
- 1;
2381 struct btrfs_inode
*btrfs_inode
= BTRFS_I(page
->mapping
->host
);
2382 struct extent_io_tree
*tree
= &btrfs_inode
->io_tree
;
2383 struct extent_map_tree
*map
= &btrfs_inode
->extent_tree
;
2385 if (gfpflags_allow_blocking(mask
) &&
2386 page
->mapping
->host
->i_size
> SZ_16M
) {
2388 while (start
<= end
) {
2389 struct btrfs_fs_info
*fs_info
;
2392 len
= end
- start
+ 1;
2393 write_lock(&map
->lock
);
2394 em
= lookup_extent_mapping(map
, start
, len
);
2396 write_unlock(&map
->lock
);
2399 if ((em
->flags
& EXTENT_FLAG_PINNED
) ||
2400 em
->start
!= start
) {
2401 write_unlock(&map
->lock
);
2402 free_extent_map(em
);
2405 if (test_range_bit_exists(tree
, em
->start
,
2406 extent_map_end(em
) - 1,
2410 * If it's not in the list of modified extents, used
2411 * by a fast fsync, we can remove it. If it's being
2412 * logged we can safely remove it since fsync took an
2413 * extra reference on the em.
2415 if (list_empty(&em
->list
) ||
2416 (em
->flags
& EXTENT_FLAG_LOGGING
))
2419 * If it's in the list of modified extents, remove it
2420 * only if its generation is older then the current one,
2421 * in which case we don't need it for a fast fsync.
2422 * Otherwise don't remove it, we could be racing with an
2423 * ongoing fast fsync that could miss the new extent.
2425 fs_info
= btrfs_inode
->root
->fs_info
;
2426 spin_lock(&fs_info
->trans_lock
);
2427 cur_gen
= fs_info
->generation
;
2428 spin_unlock(&fs_info
->trans_lock
);
2429 if (em
->generation
>= cur_gen
)
2433 * We only remove extent maps that are not in the list of
2434 * modified extents or that are in the list but with a
2435 * generation lower then the current generation, so there
2436 * is no need to set the full fsync flag on the inode (it
2437 * hurts the fsync performance for workloads with a data
2438 * size that exceeds or is close to the system's memory).
2440 remove_extent_mapping(map
, em
);
2441 /* once for the rb tree */
2442 free_extent_map(em
);
2444 start
= extent_map_end(em
);
2445 write_unlock(&map
->lock
);
2448 free_extent_map(em
);
2450 cond_resched(); /* Allow large-extent preemption. */
2453 return try_release_extent_state(tree
, page
, mask
);
2457 * To cache previous fiemap extent
2459 * Will be used for merging fiemap extent
2461 struct fiemap_cache
{
2470 * Helper to submit fiemap extent.
2472 * Will try to merge current fiemap extent specified by @offset, @phys,
2473 * @len and @flags with cached one.
2474 * And only when we fails to merge, cached one will be submitted as
2477 * Return value is the same as fiemap_fill_next_extent().
2479 static int emit_fiemap_extent(struct fiemap_extent_info
*fieinfo
,
2480 struct fiemap_cache
*cache
,
2481 u64 offset
, u64 phys
, u64 len
, u32 flags
)
2485 /* Set at the end of extent_fiemap(). */
2486 ASSERT((flags
& FIEMAP_EXTENT_LAST
) == 0);
2492 * Sanity check, extent_fiemap() should have ensured that new
2493 * fiemap extent won't overlap with cached one.
2496 * NOTE: Physical address can overlap, due to compression
2498 if (cache
->offset
+ cache
->len
> offset
) {
2504 * Only merges fiemap extents if
2505 * 1) Their logical addresses are continuous
2507 * 2) Their physical addresses are continuous
2508 * So truly compressed (physical size smaller than logical size)
2509 * extents won't get merged with each other
2511 * 3) Share same flags
2513 if (cache
->offset
+ cache
->len
== offset
&&
2514 cache
->phys
+ cache
->len
== phys
&&
2515 cache
->flags
== flags
) {
2520 /* Not mergeable, need to submit cached one */
2521 ret
= fiemap_fill_next_extent(fieinfo
, cache
->offset
, cache
->phys
,
2522 cache
->len
, cache
->flags
);
2523 cache
->cached
= false;
2527 cache
->cached
= true;
2528 cache
->offset
= offset
;
2531 cache
->flags
= flags
;
2537 * Emit last fiemap cache
2539 * The last fiemap cache may still be cached in the following case:
2541 * |<- Fiemap range ->|
2542 * |<------------ First extent ----------->|
2544 * In this case, the first extent range will be cached but not emitted.
2545 * So we must emit it before ending extent_fiemap().
2547 static int emit_last_fiemap_cache(struct fiemap_extent_info
*fieinfo
,
2548 struct fiemap_cache
*cache
)
2555 ret
= fiemap_fill_next_extent(fieinfo
, cache
->offset
, cache
->phys
,
2556 cache
->len
, cache
->flags
);
2557 cache
->cached
= false;
2563 static int fiemap_next_leaf_item(struct btrfs_inode
*inode
, struct btrfs_path
*path
)
2565 struct extent_buffer
*clone
;
2566 struct btrfs_key key
;
2571 if (path
->slots
[0] < btrfs_header_nritems(path
->nodes
[0]))
2574 ret
= btrfs_next_leaf(inode
->root
, path
);
2579 * Don't bother with cloning if there are no more file extent items for
2582 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
2583 if (key
.objectid
!= btrfs_ino(inode
) || key
.type
!= BTRFS_EXTENT_DATA_KEY
)
2586 /* See the comment at fiemap_search_slot() about why we clone. */
2587 clone
= btrfs_clone_extent_buffer(path
->nodes
[0]);
2591 slot
= path
->slots
[0];
2592 btrfs_release_path(path
);
2593 path
->nodes
[0] = clone
;
2594 path
->slots
[0] = slot
;
2600 * Search for the first file extent item that starts at a given file offset or
2601 * the one that starts immediately before that offset.
2602 * Returns: 0 on success, < 0 on error, 1 if not found.
2604 static int fiemap_search_slot(struct btrfs_inode
*inode
, struct btrfs_path
*path
,
2607 const u64 ino
= btrfs_ino(inode
);
2608 struct btrfs_root
*root
= inode
->root
;
2609 struct extent_buffer
*clone
;
2610 struct btrfs_key key
;
2615 key
.type
= BTRFS_EXTENT_DATA_KEY
;
2616 key
.offset
= file_offset
;
2618 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2622 if (ret
> 0 && path
->slots
[0] > 0) {
2623 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0] - 1);
2624 if (key
.objectid
== ino
&& key
.type
== BTRFS_EXTENT_DATA_KEY
)
2628 if (path
->slots
[0] >= btrfs_header_nritems(path
->nodes
[0])) {
2629 ret
= btrfs_next_leaf(root
, path
);
2633 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
2634 if (key
.objectid
!= ino
|| key
.type
!= BTRFS_EXTENT_DATA_KEY
)
2639 * We clone the leaf and use it during fiemap. This is because while
2640 * using the leaf we do expensive things like checking if an extent is
2641 * shared, which can take a long time. In order to prevent blocking
2642 * other tasks for too long, we use a clone of the leaf. We have locked
2643 * the file range in the inode's io tree, so we know none of our file
2644 * extent items can change. This way we avoid blocking other tasks that
2645 * want to insert items for other inodes in the same leaf or b+tree
2646 * rebalance operations (triggered for example when someone is trying
2647 * to push items into this leaf when trying to insert an item in a
2649 * We also need the private clone because holding a read lock on an
2650 * extent buffer of the subvolume's b+tree will make lockdep unhappy
2651 * when we call fiemap_fill_next_extent(), because that may cause a page
2652 * fault when filling the user space buffer with fiemap data.
2654 clone
= btrfs_clone_extent_buffer(path
->nodes
[0]);
2658 slot
= path
->slots
[0];
2659 btrfs_release_path(path
);
2660 path
->nodes
[0] = clone
;
2661 path
->slots
[0] = slot
;
2667 * Process a range which is a hole or a prealloc extent in the inode's subvolume
2668 * btree. If @disk_bytenr is 0, we are dealing with a hole, otherwise a prealloc
2669 * extent. The end offset (@end) is inclusive.
2671 static int fiemap_process_hole(struct btrfs_inode
*inode
,
2672 struct fiemap_extent_info
*fieinfo
,
2673 struct fiemap_cache
*cache
,
2674 struct extent_state
**delalloc_cached_state
,
2675 struct btrfs_backref_share_check_ctx
*backref_ctx
,
2676 u64 disk_bytenr
, u64 extent_offset
,
2680 const u64 i_size
= i_size_read(&inode
->vfs_inode
);
2681 u64 cur_offset
= start
;
2682 u64 last_delalloc_end
= 0;
2683 u32 prealloc_flags
= FIEMAP_EXTENT_UNWRITTEN
;
2684 bool checked_extent_shared
= false;
2688 * There can be no delalloc past i_size, so don't waste time looking for
2691 while (cur_offset
< end
&& cur_offset
< i_size
) {
2692 struct extent_state
*cached_state
= NULL
;
2698 u64 prealloc_len
= 0;
2701 lockstart
= round_down(cur_offset
, inode
->root
->fs_info
->sectorsize
);
2702 lockend
= round_up(end
, inode
->root
->fs_info
->sectorsize
);
2705 * We are only locking for the delalloc range because that's the
2706 * only thing that can change here. With fiemap we have a lock
2707 * on the inode, so no buffered or direct writes can happen.
2709 * However mmaps and normal page writeback will cause this to
2710 * change arbitrarily. We have to lock the extent lock here to
2711 * make sure that nobody messes with the tree while we're doing
2712 * btrfs_find_delalloc_in_range.
2714 lock_extent(&inode
->io_tree
, lockstart
, lockend
, &cached_state
);
2715 delalloc
= btrfs_find_delalloc_in_range(inode
, cur_offset
, end
,
2716 delalloc_cached_state
,
2719 unlock_extent(&inode
->io_tree
, lockstart
, lockend
, &cached_state
);
2724 * If this is a prealloc extent we have to report every section
2725 * of it that has no delalloc.
2727 if (disk_bytenr
!= 0) {
2728 if (last_delalloc_end
== 0) {
2729 prealloc_start
= start
;
2730 prealloc_len
= delalloc_start
- start
;
2732 prealloc_start
= last_delalloc_end
+ 1;
2733 prealloc_len
= delalloc_start
- prealloc_start
;
2737 if (prealloc_len
> 0) {
2738 if (!checked_extent_shared
&& fieinfo
->fi_extents_max
) {
2739 ret
= btrfs_is_data_extent_shared(inode
,
2746 prealloc_flags
|= FIEMAP_EXTENT_SHARED
;
2748 checked_extent_shared
= true;
2750 ret
= emit_fiemap_extent(fieinfo
, cache
, prealloc_start
,
2751 disk_bytenr
+ extent_offset
,
2752 prealloc_len
, prealloc_flags
);
2755 extent_offset
+= prealloc_len
;
2758 ret
= emit_fiemap_extent(fieinfo
, cache
, delalloc_start
, 0,
2759 delalloc_end
+ 1 - delalloc_start
,
2760 FIEMAP_EXTENT_DELALLOC
|
2761 FIEMAP_EXTENT_UNKNOWN
);
2765 last_delalloc_end
= delalloc_end
;
2766 cur_offset
= delalloc_end
+ 1;
2767 extent_offset
+= cur_offset
- delalloc_start
;
2772 * Either we found no delalloc for the whole prealloc extent or we have
2773 * a prealloc extent that spans i_size or starts at or after i_size.
2775 if (disk_bytenr
!= 0 && last_delalloc_end
< end
) {
2779 if (last_delalloc_end
== 0) {
2780 prealloc_start
= start
;
2781 prealloc_len
= end
+ 1 - start
;
2783 prealloc_start
= last_delalloc_end
+ 1;
2784 prealloc_len
= end
+ 1 - prealloc_start
;
2787 if (!checked_extent_shared
&& fieinfo
->fi_extents_max
) {
2788 ret
= btrfs_is_data_extent_shared(inode
,
2795 prealloc_flags
|= FIEMAP_EXTENT_SHARED
;
2797 ret
= emit_fiemap_extent(fieinfo
, cache
, prealloc_start
,
2798 disk_bytenr
+ extent_offset
,
2799 prealloc_len
, prealloc_flags
);
2807 static int fiemap_find_last_extent_offset(struct btrfs_inode
*inode
,
2808 struct btrfs_path
*path
,
2809 u64
*last_extent_end_ret
)
2811 const u64 ino
= btrfs_ino(inode
);
2812 struct btrfs_root
*root
= inode
->root
;
2813 struct extent_buffer
*leaf
;
2814 struct btrfs_file_extent_item
*ei
;
2815 struct btrfs_key key
;
2820 * Lookup the last file extent. We're not using i_size here because
2821 * there might be preallocation past i_size.
2823 ret
= btrfs_lookup_file_extent(NULL
, root
, path
, ino
, (u64
)-1, 0);
2824 /* There can't be a file extent item at offset (u64)-1 */
2830 * For a non-existing key, btrfs_search_slot() always leaves us at a
2831 * slot > 0, except if the btree is empty, which is impossible because
2832 * at least it has the inode item for this inode and all the items for
2833 * the root inode 256.
2835 ASSERT(path
->slots
[0] > 0);
2837 leaf
= path
->nodes
[0];
2838 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
2839 if (key
.objectid
!= ino
|| key
.type
!= BTRFS_EXTENT_DATA_KEY
) {
2840 /* No file extent items in the subvolume tree. */
2841 *last_extent_end_ret
= 0;
2846 * For an inline extent, the disk_bytenr is where inline data starts at,
2847 * so first check if we have an inline extent item before checking if we
2848 * have an implicit hole (disk_bytenr == 0).
2850 ei
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_file_extent_item
);
2851 if (btrfs_file_extent_type(leaf
, ei
) == BTRFS_FILE_EXTENT_INLINE
) {
2852 *last_extent_end_ret
= btrfs_file_extent_end(path
);
2857 * Find the last file extent item that is not a hole (when NO_HOLES is
2858 * not enabled). This should take at most 2 iterations in the worst
2859 * case: we have one hole file extent item at slot 0 of a leaf and
2860 * another hole file extent item as the last item in the previous leaf.
2861 * This is because we merge file extent items that represent holes.
2863 disk_bytenr
= btrfs_file_extent_disk_bytenr(leaf
, ei
);
2864 while (disk_bytenr
== 0) {
2865 ret
= btrfs_previous_item(root
, path
, ino
, BTRFS_EXTENT_DATA_KEY
);
2868 } else if (ret
> 0) {
2869 /* No file extent items that are not holes. */
2870 *last_extent_end_ret
= 0;
2873 leaf
= path
->nodes
[0];
2874 ei
= btrfs_item_ptr(leaf
, path
->slots
[0],
2875 struct btrfs_file_extent_item
);
2876 disk_bytenr
= btrfs_file_extent_disk_bytenr(leaf
, ei
);
2879 *last_extent_end_ret
= btrfs_file_extent_end(path
);
2883 int extent_fiemap(struct btrfs_inode
*inode
, struct fiemap_extent_info
*fieinfo
,
2886 const u64 ino
= btrfs_ino(inode
);
2887 struct extent_state
*delalloc_cached_state
= NULL
;
2888 struct btrfs_path
*path
;
2889 struct fiemap_cache cache
= { 0 };
2890 struct btrfs_backref_share_check_ctx
*backref_ctx
;
2891 u64 last_extent_end
;
2892 u64 prev_extent_end
;
2895 const u64 sectorsize
= inode
->root
->fs_info
->sectorsize
;
2896 bool stopped
= false;
2899 backref_ctx
= btrfs_alloc_backref_share_check_ctx();
2900 path
= btrfs_alloc_path();
2901 if (!backref_ctx
|| !path
) {
2906 range_start
= round_down(start
, sectorsize
);
2907 range_end
= round_up(start
+ len
, sectorsize
);
2908 prev_extent_end
= range_start
;
2910 btrfs_inode_lock(inode
, BTRFS_ILOCK_SHARED
);
2912 ret
= fiemap_find_last_extent_offset(inode
, path
, &last_extent_end
);
2915 btrfs_release_path(path
);
2917 path
->reada
= READA_FORWARD
;
2918 ret
= fiemap_search_slot(inode
, path
, range_start
);
2921 } else if (ret
> 0) {
2923 * No file extent item found, but we may have delalloc between
2924 * the current offset and i_size. So check for that.
2927 goto check_eof_delalloc
;
2930 while (prev_extent_end
< range_end
) {
2931 struct extent_buffer
*leaf
= path
->nodes
[0];
2932 struct btrfs_file_extent_item
*ei
;
2933 struct btrfs_key key
;
2936 u64 extent_offset
= 0;
2938 u64 disk_bytenr
= 0;
2943 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
2944 if (key
.objectid
!= ino
|| key
.type
!= BTRFS_EXTENT_DATA_KEY
)
2947 extent_end
= btrfs_file_extent_end(path
);
2950 * The first iteration can leave us at an extent item that ends
2951 * before our range's start. Move to the next item.
2953 if (extent_end
<= range_start
)
2956 backref_ctx
->curr_leaf_bytenr
= leaf
->start
;
2958 /* We have in implicit hole (NO_HOLES feature enabled). */
2959 if (prev_extent_end
< key
.offset
) {
2960 const u64 hole_end
= min(key
.offset
, range_end
) - 1;
2962 ret
= fiemap_process_hole(inode
, fieinfo
, &cache
,
2963 &delalloc_cached_state
,
2964 backref_ctx
, 0, 0, 0,
2965 prev_extent_end
, hole_end
);
2968 } else if (ret
> 0) {
2969 /* fiemap_fill_next_extent() told us to stop. */
2974 /* We've reached the end of the fiemap range, stop. */
2975 if (key
.offset
>= range_end
) {
2981 extent_len
= extent_end
- key
.offset
;
2982 ei
= btrfs_item_ptr(leaf
, path
->slots
[0],
2983 struct btrfs_file_extent_item
);
2984 compression
= btrfs_file_extent_compression(leaf
, ei
);
2985 extent_type
= btrfs_file_extent_type(leaf
, ei
);
2986 extent_gen
= btrfs_file_extent_generation(leaf
, ei
);
2988 if (extent_type
!= BTRFS_FILE_EXTENT_INLINE
) {
2989 disk_bytenr
= btrfs_file_extent_disk_bytenr(leaf
, ei
);
2990 if (compression
== BTRFS_COMPRESS_NONE
)
2991 extent_offset
= btrfs_file_extent_offset(leaf
, ei
);
2994 if (compression
!= BTRFS_COMPRESS_NONE
)
2995 flags
|= FIEMAP_EXTENT_ENCODED
;
2997 if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
2998 flags
|= FIEMAP_EXTENT_DATA_INLINE
;
2999 flags
|= FIEMAP_EXTENT_NOT_ALIGNED
;
3000 ret
= emit_fiemap_extent(fieinfo
, &cache
, key
.offset
, 0,
3002 } else if (extent_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
3003 ret
= fiemap_process_hole(inode
, fieinfo
, &cache
,
3004 &delalloc_cached_state
,
3006 disk_bytenr
, extent_offset
,
3007 extent_gen
, key
.offset
,
3009 } else if (disk_bytenr
== 0) {
3010 /* We have an explicit hole. */
3011 ret
= fiemap_process_hole(inode
, fieinfo
, &cache
,
3012 &delalloc_cached_state
,
3013 backref_ctx
, 0, 0, 0,
3014 key
.offset
, extent_end
- 1);
3016 /* We have a regular extent. */
3017 if (fieinfo
->fi_extents_max
) {
3018 ret
= btrfs_is_data_extent_shared(inode
,
3025 flags
|= FIEMAP_EXTENT_SHARED
;
3028 ret
= emit_fiemap_extent(fieinfo
, &cache
, key
.offset
,
3029 disk_bytenr
+ extent_offset
,
3035 } else if (ret
> 0) {
3036 /* fiemap_fill_next_extent() told us to stop. */
3041 prev_extent_end
= extent_end
;
3043 if (fatal_signal_pending(current
)) {
3048 ret
= fiemap_next_leaf_item(inode
, path
);
3051 } else if (ret
> 0) {
3052 /* No more file extent items for this inode. */
3060 * Release (and free) the path before emitting any final entries to
3061 * fiemap_fill_next_extent() to keep lockdep happy. This is because
3062 * once we find no more file extent items exist, we may have a
3063 * non-cloned leaf, and fiemap_fill_next_extent() can trigger page
3064 * faults when copying data to the user space buffer.
3066 btrfs_free_path(path
);
3069 if (!stopped
&& prev_extent_end
< range_end
) {
3070 ret
= fiemap_process_hole(inode
, fieinfo
, &cache
,
3071 &delalloc_cached_state
, backref_ctx
,
3072 0, 0, 0, prev_extent_end
, range_end
- 1);
3075 prev_extent_end
= range_end
;
3078 if (cache
.cached
&& cache
.offset
+ cache
.len
>= last_extent_end
) {
3079 const u64 i_size
= i_size_read(&inode
->vfs_inode
);
3081 if (prev_extent_end
< i_size
) {
3082 struct extent_state
*cached_state
= NULL
;
3089 lockstart
= round_down(prev_extent_end
, sectorsize
);
3090 lockend
= round_up(i_size
, sectorsize
);
3093 * See the comment in fiemap_process_hole as to why
3094 * we're doing the locking here.
3096 lock_extent(&inode
->io_tree
, lockstart
, lockend
, &cached_state
);
3097 delalloc
= btrfs_find_delalloc_in_range(inode
,
3100 &delalloc_cached_state
,
3103 unlock_extent(&inode
->io_tree
, lockstart
, lockend
, &cached_state
);
3105 cache
.flags
|= FIEMAP_EXTENT_LAST
;
3107 cache
.flags
|= FIEMAP_EXTENT_LAST
;
3111 ret
= emit_last_fiemap_cache(fieinfo
, &cache
);
3114 btrfs_inode_unlock(inode
, BTRFS_ILOCK_SHARED
);
3116 free_extent_state(delalloc_cached_state
);
3117 btrfs_free_backref_share_ctx(backref_ctx
);
3118 btrfs_free_path(path
);
3122 static void __free_extent_buffer(struct extent_buffer
*eb
)
3124 kmem_cache_free(extent_buffer_cache
, eb
);
3127 static int extent_buffer_under_io(const struct extent_buffer
*eb
)
3129 return (test_bit(EXTENT_BUFFER_WRITEBACK
, &eb
->bflags
) ||
3130 test_bit(EXTENT_BUFFER_DIRTY
, &eb
->bflags
));
3133 static bool folio_range_has_eb(struct btrfs_fs_info
*fs_info
, struct folio
*folio
)
3135 struct btrfs_subpage
*subpage
;
3137 lockdep_assert_held(&folio
->mapping
->private_lock
);
3139 if (folio_test_private(folio
)) {
3140 subpage
= folio_get_private(folio
);
3141 if (atomic_read(&subpage
->eb_refs
))
3144 * Even there is no eb refs here, we may still have
3145 * end_page_read() call relying on page::private.
3147 if (atomic_read(&subpage
->readers
))
3153 static void detach_extent_buffer_folio(struct extent_buffer
*eb
, struct folio
*folio
)
3155 struct btrfs_fs_info
*fs_info
= eb
->fs_info
;
3156 const bool mapped
= !test_bit(EXTENT_BUFFER_UNMAPPED
, &eb
->bflags
);
3159 * For mapped eb, we're going to change the folio private, which should
3160 * be done under the private_lock.
3163 spin_lock(&folio
->mapping
->private_lock
);
3165 if (!folio_test_private(folio
)) {
3167 spin_unlock(&folio
->mapping
->private_lock
);
3171 if (fs_info
->nodesize
>= PAGE_SIZE
) {
3173 * We do this since we'll remove the pages after we've
3174 * removed the eb from the radix tree, so we could race
3175 * and have this page now attached to the new eb. So
3176 * only clear folio if it's still connected to
3179 if (folio_test_private(folio
) && folio_get_private(folio
) == eb
) {
3180 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY
, &eb
->bflags
));
3181 BUG_ON(folio_test_dirty(folio
));
3182 BUG_ON(folio_test_writeback(folio
));
3183 /* We need to make sure we haven't be attached to a new eb. */
3184 folio_detach_private(folio
);
3187 spin_unlock(&folio
->mapping
->private_lock
);
3192 * For subpage, we can have dummy eb with folio private attached. In
3193 * this case, we can directly detach the private as such folio is only
3194 * attached to one dummy eb, no sharing.
3197 btrfs_detach_subpage(fs_info
, folio
);
3201 btrfs_folio_dec_eb_refs(fs_info
, folio
);
3204 * We can only detach the folio private if there are no other ebs in the
3205 * page range and no unfinished IO.
3207 if (!folio_range_has_eb(fs_info
, folio
))
3208 btrfs_detach_subpage(fs_info
, folio
);
3210 spin_unlock(&folio
->mapping
->private_lock
);
3213 /* Release all pages attached to the extent buffer */
3214 static void btrfs_release_extent_buffer_pages(struct extent_buffer
*eb
)
3216 ASSERT(!extent_buffer_under_io(eb
));
3218 for (int i
= 0; i
< INLINE_EXTENT_BUFFER_PAGES
; i
++) {
3219 struct folio
*folio
= eb
->folios
[i
];
3224 detach_extent_buffer_folio(eb
, folio
);
3226 /* One for when we allocated the folio. */
3232 * Helper for releasing the extent buffer.
3234 static inline void btrfs_release_extent_buffer(struct extent_buffer
*eb
)
3236 btrfs_release_extent_buffer_pages(eb
);
3237 btrfs_leak_debug_del_eb(eb
);
3238 __free_extent_buffer(eb
);
3241 static struct extent_buffer
*
3242 __alloc_extent_buffer(struct btrfs_fs_info
*fs_info
, u64 start
,
3245 struct extent_buffer
*eb
= NULL
;
3247 eb
= kmem_cache_zalloc(extent_buffer_cache
, GFP_NOFS
|__GFP_NOFAIL
);
3250 eb
->fs_info
= fs_info
;
3251 init_rwsem(&eb
->lock
);
3253 btrfs_leak_debug_add_eb(eb
);
3255 spin_lock_init(&eb
->refs_lock
);
3256 atomic_set(&eb
->refs
, 1);
3258 ASSERT(len
<= BTRFS_MAX_METADATA_BLOCKSIZE
);
3263 struct extent_buffer
*btrfs_clone_extent_buffer(const struct extent_buffer
*src
)
3265 struct extent_buffer
*new;
3266 int num_folios
= num_extent_folios(src
);
3269 new = __alloc_extent_buffer(src
->fs_info
, src
->start
, src
->len
);
3274 * Set UNMAPPED before calling btrfs_release_extent_buffer(), as
3275 * btrfs_release_extent_buffer() have different behavior for
3276 * UNMAPPED subpage extent buffer.
3278 set_bit(EXTENT_BUFFER_UNMAPPED
, &new->bflags
);
3280 ret
= alloc_eb_folio_array(new, 0);
3282 btrfs_release_extent_buffer(new);
3286 for (int i
= 0; i
< num_folios
; i
++) {
3287 struct folio
*folio
= new->folios
[i
];
3290 ret
= attach_extent_buffer_folio(new, folio
, NULL
);
3292 btrfs_release_extent_buffer(new);
3295 WARN_ON(folio_test_dirty(folio
));
3297 copy_extent_buffer_full(new, src
);
3298 set_extent_buffer_uptodate(new);
3303 struct extent_buffer
*__alloc_dummy_extent_buffer(struct btrfs_fs_info
*fs_info
,
3304 u64 start
, unsigned long len
)
3306 struct extent_buffer
*eb
;
3310 eb
= __alloc_extent_buffer(fs_info
, start
, len
);
3314 ret
= alloc_eb_folio_array(eb
, 0);
3318 num_folios
= num_extent_folios(eb
);
3319 for (int i
= 0; i
< num_folios
; i
++) {
3320 ret
= attach_extent_buffer_folio(eb
, eb
->folios
[i
], NULL
);
3325 set_extent_buffer_uptodate(eb
);
3326 btrfs_set_header_nritems(eb
, 0);
3327 set_bit(EXTENT_BUFFER_UNMAPPED
, &eb
->bflags
);
3331 for (int i
= 0; i
< num_folios
; i
++) {
3332 if (eb
->folios
[i
]) {
3333 detach_extent_buffer_folio(eb
, eb
->folios
[i
]);
3334 __folio_put(eb
->folios
[i
]);
3337 __free_extent_buffer(eb
);
3341 struct extent_buffer
*alloc_dummy_extent_buffer(struct btrfs_fs_info
*fs_info
,
3344 return __alloc_dummy_extent_buffer(fs_info
, start
, fs_info
->nodesize
);
3347 static void check_buffer_tree_ref(struct extent_buffer
*eb
)
3351 * The TREE_REF bit is first set when the extent_buffer is added
3352 * to the radix tree. It is also reset, if unset, when a new reference
3353 * is created by find_extent_buffer.
3355 * It is only cleared in two cases: freeing the last non-tree
3356 * reference to the extent_buffer when its STALE bit is set or
3357 * calling release_folio when the tree reference is the only reference.
3359 * In both cases, care is taken to ensure that the extent_buffer's
3360 * pages are not under io. However, release_folio can be concurrently
3361 * called with creating new references, which is prone to race
3362 * conditions between the calls to check_buffer_tree_ref in those
3363 * codepaths and clearing TREE_REF in try_release_extent_buffer.
3365 * The actual lifetime of the extent_buffer in the radix tree is
3366 * adequately protected by the refcount, but the TREE_REF bit and
3367 * its corresponding reference are not. To protect against this
3368 * class of races, we call check_buffer_tree_ref from the codepaths
3369 * which trigger io. Note that once io is initiated, TREE_REF can no
3370 * longer be cleared, so that is the moment at which any such race is
3373 refs
= atomic_read(&eb
->refs
);
3374 if (refs
>= 2 && test_bit(EXTENT_BUFFER_TREE_REF
, &eb
->bflags
))
3377 spin_lock(&eb
->refs_lock
);
3378 if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF
, &eb
->bflags
))
3379 atomic_inc(&eb
->refs
);
3380 spin_unlock(&eb
->refs_lock
);
3383 static void mark_extent_buffer_accessed(struct extent_buffer
*eb
)
3385 int num_folios
= num_extent_folios(eb
);
3387 check_buffer_tree_ref(eb
);
3389 for (int i
= 0; i
< num_folios
; i
++)
3390 folio_mark_accessed(eb
->folios
[i
]);
3393 struct extent_buffer
*find_extent_buffer(struct btrfs_fs_info
*fs_info
,
3396 struct extent_buffer
*eb
;
3398 eb
= find_extent_buffer_nolock(fs_info
, start
);
3402 * Lock our eb's refs_lock to avoid races with free_extent_buffer().
3403 * When we get our eb it might be flagged with EXTENT_BUFFER_STALE and
3404 * another task running free_extent_buffer() might have seen that flag
3405 * set, eb->refs == 2, that the buffer isn't under IO (dirty and
3406 * writeback flags not set) and it's still in the tree (flag
3407 * EXTENT_BUFFER_TREE_REF set), therefore being in the process of
3408 * decrementing the extent buffer's reference count twice. So here we
3409 * could race and increment the eb's reference count, clear its stale
3410 * flag, mark it as dirty and drop our reference before the other task
3411 * finishes executing free_extent_buffer, which would later result in
3412 * an attempt to free an extent buffer that is dirty.
3414 if (test_bit(EXTENT_BUFFER_STALE
, &eb
->bflags
)) {
3415 spin_lock(&eb
->refs_lock
);
3416 spin_unlock(&eb
->refs_lock
);
3418 mark_extent_buffer_accessed(eb
);
3422 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3423 struct extent_buffer
*alloc_test_extent_buffer(struct btrfs_fs_info
*fs_info
,
3426 struct extent_buffer
*eb
, *exists
= NULL
;
3429 eb
= find_extent_buffer(fs_info
, start
);
3432 eb
= alloc_dummy_extent_buffer(fs_info
, start
);
3434 return ERR_PTR(-ENOMEM
);
3435 eb
->fs_info
= fs_info
;
3437 ret
= radix_tree_preload(GFP_NOFS
);
3439 exists
= ERR_PTR(ret
);
3442 spin_lock(&fs_info
->buffer_lock
);
3443 ret
= radix_tree_insert(&fs_info
->buffer_radix
,
3444 start
>> fs_info
->sectorsize_bits
, eb
);
3445 spin_unlock(&fs_info
->buffer_lock
);
3446 radix_tree_preload_end();
3447 if (ret
== -EEXIST
) {
3448 exists
= find_extent_buffer(fs_info
, start
);
3454 check_buffer_tree_ref(eb
);
3455 set_bit(EXTENT_BUFFER_IN_TREE
, &eb
->bflags
);
3459 btrfs_release_extent_buffer(eb
);
3464 static struct extent_buffer
*grab_extent_buffer(
3465 struct btrfs_fs_info
*fs_info
, struct page
*page
)
3467 struct folio
*folio
= page_folio(page
);
3468 struct extent_buffer
*exists
;
3471 * For subpage case, we completely rely on radix tree to ensure we
3472 * don't try to insert two ebs for the same bytenr. So here we always
3473 * return NULL and just continue.
3475 if (fs_info
->nodesize
< PAGE_SIZE
)
3478 /* Page not yet attached to an extent buffer */
3479 if (!folio_test_private(folio
))
3483 * We could have already allocated an eb for this page and attached one
3484 * so lets see if we can get a ref on the existing eb, and if we can we
3485 * know it's good and we can just return that one, else we know we can
3486 * just overwrite folio private.
3488 exists
= folio_get_private(folio
);
3489 if (atomic_inc_not_zero(&exists
->refs
))
3492 WARN_ON(PageDirty(page
));
3493 folio_detach_private(folio
);
3497 static int check_eb_alignment(struct btrfs_fs_info
*fs_info
, u64 start
)
3499 if (!IS_ALIGNED(start
, fs_info
->sectorsize
)) {
3500 btrfs_err(fs_info
, "bad tree block start %llu", start
);
3504 if (fs_info
->nodesize
< PAGE_SIZE
&&
3505 offset_in_page(start
) + fs_info
->nodesize
> PAGE_SIZE
) {
3507 "tree block crosses page boundary, start %llu nodesize %u",
3508 start
, fs_info
->nodesize
);
3511 if (fs_info
->nodesize
>= PAGE_SIZE
&&
3512 !PAGE_ALIGNED(start
)) {
3514 "tree block is not page aligned, start %llu nodesize %u",
3515 start
, fs_info
->nodesize
);
3518 if (!IS_ALIGNED(start
, fs_info
->nodesize
) &&
3519 !test_and_set_bit(BTRFS_FS_UNALIGNED_TREE_BLOCK
, &fs_info
->flags
)) {
3521 "tree block not nodesize aligned, start %llu nodesize %u, can be resolved by a full metadata balance",
3522 start
, fs_info
->nodesize
);
3529 * Return 0 if eb->folios[i] is attached to btree inode successfully.
3530 * Return >0 if there is already another extent buffer for the range,
3531 * and @found_eb_ret would be updated.
3532 * Return -EAGAIN if the filemap has an existing folio but with different size
3534 * The caller needs to free the existing folios and retry using the same order.
3536 static int attach_eb_folio_to_filemap(struct extent_buffer
*eb
, int i
,
3537 struct extent_buffer
**found_eb_ret
)
3540 struct btrfs_fs_info
*fs_info
= eb
->fs_info
;
3541 struct address_space
*mapping
= fs_info
->btree_inode
->i_mapping
;
3542 const unsigned long index
= eb
->start
>> PAGE_SHIFT
;
3543 struct folio
*existing_folio
;
3546 ASSERT(found_eb_ret
);
3548 /* Caller should ensure the folio exists. */
3549 ASSERT(eb
->folios
[i
]);
3552 ret
= filemap_add_folio(mapping
, eb
->folios
[i
], index
+ i
,
3553 GFP_NOFS
| __GFP_NOFAIL
);
3557 existing_folio
= filemap_lock_folio(mapping
, index
+ i
);
3558 /* The page cache only exists for a very short time, just retry. */
3559 if (IS_ERR(existing_folio
))
3562 /* For now, we should only have single-page folios for btree inode. */
3563 ASSERT(folio_nr_pages(existing_folio
) == 1);
3565 if (folio_size(existing_folio
) != folio_size(eb
->folios
[0])) {
3566 folio_unlock(existing_folio
);
3567 folio_put(existing_folio
);
3571 if (fs_info
->nodesize
< PAGE_SIZE
) {
3573 * We're going to reuse the existing page, can drop our page
3574 * and subpage structure now.
3576 __free_page(folio_page(eb
->folios
[i
], 0));
3577 eb
->folios
[i
] = existing_folio
;
3579 struct extent_buffer
*existing_eb
;
3581 existing_eb
= grab_extent_buffer(fs_info
,
3582 folio_page(existing_folio
, 0));
3584 /* The extent buffer still exists, we can use it directly. */
3585 *found_eb_ret
= existing_eb
;
3586 folio_unlock(existing_folio
);
3587 folio_put(existing_folio
);
3590 /* The extent buffer no longer exists, we can reuse the folio. */
3591 __free_page(folio_page(eb
->folios
[i
], 0));
3592 eb
->folios
[i
] = existing_folio
;
3597 struct extent_buffer
*alloc_extent_buffer(struct btrfs_fs_info
*fs_info
,
3598 u64 start
, u64 owner_root
, int level
)
3600 unsigned long len
= fs_info
->nodesize
;
3603 struct extent_buffer
*eb
;
3604 struct extent_buffer
*existing_eb
= NULL
;
3605 struct address_space
*mapping
= fs_info
->btree_inode
->i_mapping
;
3606 struct btrfs_subpage
*prealloc
= NULL
;
3607 u64 lockdep_owner
= owner_root
;
3608 bool page_contig
= true;
3612 if (check_eb_alignment(fs_info
, start
))
3613 return ERR_PTR(-EINVAL
);
3615 #if BITS_PER_LONG == 32
3616 if (start
>= MAX_LFS_FILESIZE
) {
3617 btrfs_err_rl(fs_info
,
3618 "extent buffer %llu is beyond 32bit page cache limit", start
);
3619 btrfs_err_32bit_limit(fs_info
);
3620 return ERR_PTR(-EOVERFLOW
);
3622 if (start
>= BTRFS_32BIT_EARLY_WARN_THRESHOLD
)
3623 btrfs_warn_32bit_limit(fs_info
);
3626 eb
= find_extent_buffer(fs_info
, start
);
3630 eb
= __alloc_extent_buffer(fs_info
, start
, len
);
3632 return ERR_PTR(-ENOMEM
);
3635 * The reloc trees are just snapshots, so we need them to appear to be
3636 * just like any other fs tree WRT lockdep.
3638 if (lockdep_owner
== BTRFS_TREE_RELOC_OBJECTID
)
3639 lockdep_owner
= BTRFS_FS_TREE_OBJECTID
;
3641 btrfs_set_buffer_lockdep_class(lockdep_owner
, eb
, level
);
3644 * Preallocate folio private for subpage case, so that we won't
3645 * allocate memory with private_lock nor page lock hold.
3647 * The memory will be freed by attach_extent_buffer_page() or freed
3648 * manually if we exit earlier.
3650 if (fs_info
->nodesize
< PAGE_SIZE
) {
3651 prealloc
= btrfs_alloc_subpage(fs_info
, BTRFS_SUBPAGE_METADATA
);
3652 if (IS_ERR(prealloc
)) {
3653 ret
= PTR_ERR(prealloc
);
3659 /* Allocate all pages first. */
3660 ret
= alloc_eb_folio_array(eb
, __GFP_NOFAIL
);
3662 btrfs_free_subpage(prealloc
);
3666 num_folios
= num_extent_folios(eb
);
3667 /* Attach all pages to the filemap. */
3668 for (int i
= 0; i
< num_folios
; i
++) {
3669 struct folio
*folio
;
3671 ret
= attach_eb_folio_to_filemap(eb
, i
, &existing_eb
);
3673 ASSERT(existing_eb
);
3678 * TODO: Special handling for a corner case where the order of
3679 * folios mismatch between the new eb and filemap.
3681 * This happens when:
3683 * - the new eb is using higher order folio
3685 * - the filemap is still using 0-order folios for the range
3686 * This can happen at the previous eb allocation, and we don't
3687 * have higher order folio for the call.
3689 * - the existing eb has already been freed
3691 * In this case, we have to free the existing folios first, and
3692 * re-allocate using the same order.
3693 * Thankfully this is not going to happen yet, as we're still
3694 * using 0-order folios.
3696 if (unlikely(ret
== -EAGAIN
)) {
3703 * Only after attach_eb_folio_to_filemap(), eb->folios[] is
3704 * reliable, as we may choose to reuse the existing page cache
3705 * and free the allocated page.
3707 folio
= eb
->folios
[i
];
3708 spin_lock(&mapping
->private_lock
);
3709 /* Should not fail, as we have preallocated the memory */
3710 ret
= attach_extent_buffer_folio(eb
, folio
, prealloc
);
3713 * To inform we have extra eb under allocation, so that
3714 * detach_extent_buffer_page() won't release the folio private
3715 * when the eb hasn't yet been inserted into radix tree.
3717 * The ref will be decreased when the eb released the page, in
3718 * detach_extent_buffer_page().
3719 * Thus needs no special handling in error path.
3721 btrfs_folio_inc_eb_refs(fs_info
, folio
);
3722 spin_unlock(&mapping
->private_lock
);
3724 WARN_ON(btrfs_folio_test_dirty(fs_info
, folio
, eb
->start
, eb
->len
));
3727 * Check if the current page is physically contiguous with previous eb
3729 * At this stage, either we allocated a large folio, thus @i
3730 * would only be 0, or we fall back to per-page allocation.
3732 if (i
&& folio_page(eb
->folios
[i
- 1], 0) + 1 != folio_page(folio
, 0))
3733 page_contig
= false;
3735 if (!btrfs_folio_test_uptodate(fs_info
, folio
, eb
->start
, eb
->len
))
3739 * We can't unlock the pages just yet since the extent buffer
3740 * hasn't been properly inserted in the radix tree, this
3741 * opens a race with btree_release_folio which can free a page
3742 * while we are still filling in all pages for the buffer and
3747 set_bit(EXTENT_BUFFER_UPTODATE
, &eb
->bflags
);
3748 /* All pages are physically contiguous, can skip cross page handling. */
3750 eb
->addr
= folio_address(eb
->folios
[0]) + offset_in_page(eb
->start
);
3752 ret
= radix_tree_preload(GFP_NOFS
);
3756 spin_lock(&fs_info
->buffer_lock
);
3757 ret
= radix_tree_insert(&fs_info
->buffer_radix
,
3758 start
>> fs_info
->sectorsize_bits
, eb
);
3759 spin_unlock(&fs_info
->buffer_lock
);
3760 radix_tree_preload_end();
3761 if (ret
== -EEXIST
) {
3763 existing_eb
= find_extent_buffer(fs_info
, start
);
3769 /* add one reference for the tree */
3770 check_buffer_tree_ref(eb
);
3771 set_bit(EXTENT_BUFFER_IN_TREE
, &eb
->bflags
);
3774 * Now it's safe to unlock the pages because any calls to
3775 * btree_release_folio will correctly detect that a page belongs to a
3776 * live buffer and won't free them prematurely.
3778 for (int i
= 0; i
< num_folios
; i
++)
3779 unlock_page(folio_page(eb
->folios
[i
], 0));
3783 WARN_ON(!atomic_dec_and_test(&eb
->refs
));
3786 * Any attached folios need to be detached before we unlock them. This
3787 * is because when we're inserting our new folios into the mapping, and
3788 * then attaching our eb to that folio. If we fail to insert our folio
3789 * we'll lookup the folio for that index, and grab that EB. We do not
3790 * want that to grab this eb, as we're getting ready to free it. So we
3791 * have to detach it first and then unlock it.
3793 * We have to drop our reference and NULL it out here because in the
3794 * subpage case detaching does a btrfs_folio_dec_eb_refs() for our eb.
3795 * Below when we call btrfs_release_extent_buffer() we will call
3796 * detach_extent_buffer_folio() on our remaining pages in the !subpage
3797 * case. If we left eb->folios[i] populated in the subpage case we'd
3798 * double put our reference and be super sad.
3800 for (int i
= 0; i
< attached
; i
++) {
3801 ASSERT(eb
->folios
[i
]);
3802 detach_extent_buffer_folio(eb
, eb
->folios
[i
]);
3803 unlock_page(folio_page(eb
->folios
[i
], 0));
3804 folio_put(eb
->folios
[i
]);
3805 eb
->folios
[i
] = NULL
;
3808 * Now all pages of that extent buffer is unmapped, set UNMAPPED flag,
3809 * so it can be cleaned up without utlizing page->mapping.
3811 set_bit(EXTENT_BUFFER_UNMAPPED
, &eb
->bflags
);
3813 btrfs_release_extent_buffer(eb
);
3815 return ERR_PTR(ret
);
3816 ASSERT(existing_eb
);
3820 static inline void btrfs_release_extent_buffer_rcu(struct rcu_head
*head
)
3822 struct extent_buffer
*eb
=
3823 container_of(head
, struct extent_buffer
, rcu_head
);
3825 __free_extent_buffer(eb
);
3828 static int release_extent_buffer(struct extent_buffer
*eb
)
3829 __releases(&eb
->refs_lock
)
3831 lockdep_assert_held(&eb
->refs_lock
);
3833 WARN_ON(atomic_read(&eb
->refs
) == 0);
3834 if (atomic_dec_and_test(&eb
->refs
)) {
3835 if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE
, &eb
->bflags
)) {
3836 struct btrfs_fs_info
*fs_info
= eb
->fs_info
;
3838 spin_unlock(&eb
->refs_lock
);
3840 spin_lock(&fs_info
->buffer_lock
);
3841 radix_tree_delete(&fs_info
->buffer_radix
,
3842 eb
->start
>> fs_info
->sectorsize_bits
);
3843 spin_unlock(&fs_info
->buffer_lock
);
3845 spin_unlock(&eb
->refs_lock
);
3848 btrfs_leak_debug_del_eb(eb
);
3849 /* Should be safe to release our pages at this point */
3850 btrfs_release_extent_buffer_pages(eb
);
3851 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3852 if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED
, &eb
->bflags
))) {
3853 __free_extent_buffer(eb
);
3857 call_rcu(&eb
->rcu_head
, btrfs_release_extent_buffer_rcu
);
3860 spin_unlock(&eb
->refs_lock
);
3865 void free_extent_buffer(struct extent_buffer
*eb
)
3871 refs
= atomic_read(&eb
->refs
);
3873 if ((!test_bit(EXTENT_BUFFER_UNMAPPED
, &eb
->bflags
) && refs
<= 3)
3874 || (test_bit(EXTENT_BUFFER_UNMAPPED
, &eb
->bflags
) &&
3877 if (atomic_try_cmpxchg(&eb
->refs
, &refs
, refs
- 1))
3881 spin_lock(&eb
->refs_lock
);
3882 if (atomic_read(&eb
->refs
) == 2 &&
3883 test_bit(EXTENT_BUFFER_STALE
, &eb
->bflags
) &&
3884 !extent_buffer_under_io(eb
) &&
3885 test_and_clear_bit(EXTENT_BUFFER_TREE_REF
, &eb
->bflags
))
3886 atomic_dec(&eb
->refs
);
3889 * I know this is terrible, but it's temporary until we stop tracking
3890 * the uptodate bits and such for the extent buffers.
3892 release_extent_buffer(eb
);
3895 void free_extent_buffer_stale(struct extent_buffer
*eb
)
3900 spin_lock(&eb
->refs_lock
);
3901 set_bit(EXTENT_BUFFER_STALE
, &eb
->bflags
);
3903 if (atomic_read(&eb
->refs
) == 2 && !extent_buffer_under_io(eb
) &&
3904 test_and_clear_bit(EXTENT_BUFFER_TREE_REF
, &eb
->bflags
))
3905 atomic_dec(&eb
->refs
);
3906 release_extent_buffer(eb
);
3909 static void btree_clear_folio_dirty(struct folio
*folio
)
3911 ASSERT(folio_test_dirty(folio
));
3912 ASSERT(folio_test_locked(folio
));
3913 folio_clear_dirty_for_io(folio
);
3914 xa_lock_irq(&folio
->mapping
->i_pages
);
3915 if (!folio_test_dirty(folio
))
3916 __xa_clear_mark(&folio
->mapping
->i_pages
,
3917 folio_index(folio
), PAGECACHE_TAG_DIRTY
);
3918 xa_unlock_irq(&folio
->mapping
->i_pages
);
3921 static void clear_subpage_extent_buffer_dirty(const struct extent_buffer
*eb
)
3923 struct btrfs_fs_info
*fs_info
= eb
->fs_info
;
3924 struct folio
*folio
= eb
->folios
[0];
3927 /* btree_clear_folio_dirty() needs page locked. */
3929 last
= btrfs_subpage_clear_and_test_dirty(fs_info
, folio
, eb
->start
, eb
->len
);
3931 btree_clear_folio_dirty(folio
);
3932 folio_unlock(folio
);
3933 WARN_ON(atomic_read(&eb
->refs
) == 0);
3936 void btrfs_clear_buffer_dirty(struct btrfs_trans_handle
*trans
,
3937 struct extent_buffer
*eb
)
3939 struct btrfs_fs_info
*fs_info
= eb
->fs_info
;
3942 btrfs_assert_tree_write_locked(eb
);
3944 if (trans
&& btrfs_header_generation(eb
) != trans
->transid
)
3948 * Instead of clearing the dirty flag off of the buffer, mark it as
3949 * EXTENT_BUFFER_ZONED_ZEROOUT. This allows us to preserve
3950 * write-ordering in zoned mode, without the need to later re-dirty
3951 * the extent_buffer.
3953 * The actual zeroout of the buffer will happen later in
3954 * btree_csum_one_bio.
3956 if (btrfs_is_zoned(fs_info
)) {
3957 set_bit(EXTENT_BUFFER_ZONED_ZEROOUT
, &eb
->bflags
);
3961 if (!test_and_clear_bit(EXTENT_BUFFER_DIRTY
, &eb
->bflags
))
3964 percpu_counter_add_batch(&fs_info
->dirty_metadata_bytes
, -eb
->len
,
3965 fs_info
->dirty_metadata_batch
);
3967 if (eb
->fs_info
->nodesize
< PAGE_SIZE
)
3968 return clear_subpage_extent_buffer_dirty(eb
);
3970 num_folios
= num_extent_folios(eb
);
3971 for (int i
= 0; i
< num_folios
; i
++) {
3972 struct folio
*folio
= eb
->folios
[i
];
3974 if (!folio_test_dirty(folio
))
3977 btree_clear_folio_dirty(folio
);
3978 folio_unlock(folio
);
3980 WARN_ON(atomic_read(&eb
->refs
) == 0);
3983 void set_extent_buffer_dirty(struct extent_buffer
*eb
)
3988 check_buffer_tree_ref(eb
);
3990 was_dirty
= test_and_set_bit(EXTENT_BUFFER_DIRTY
, &eb
->bflags
);
3992 num_folios
= num_extent_folios(eb
);
3993 WARN_ON(atomic_read(&eb
->refs
) == 0);
3994 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF
, &eb
->bflags
));
3997 bool subpage
= eb
->fs_info
->nodesize
< PAGE_SIZE
;
4000 * For subpage case, we can have other extent buffers in the
4001 * same page, and in clear_subpage_extent_buffer_dirty() we
4002 * have to clear page dirty without subpage lock held.
4003 * This can cause race where our page gets dirty cleared after
4006 * Thankfully, clear_subpage_extent_buffer_dirty() has locked
4007 * its page for other reasons, we can use page lock to prevent
4011 lock_page(folio_page(eb
->folios
[0], 0));
4012 for (int i
= 0; i
< num_folios
; i
++)
4013 btrfs_folio_set_dirty(eb
->fs_info
, eb
->folios
[i
],
4014 eb
->start
, eb
->len
);
4016 unlock_page(folio_page(eb
->folios
[0], 0));
4017 percpu_counter_add_batch(&eb
->fs_info
->dirty_metadata_bytes
,
4019 eb
->fs_info
->dirty_metadata_batch
);
4021 #ifdef CONFIG_BTRFS_DEBUG
4022 for (int i
= 0; i
< num_folios
; i
++)
4023 ASSERT(folio_test_dirty(eb
->folios
[i
]));
4027 void clear_extent_buffer_uptodate(struct extent_buffer
*eb
)
4029 struct btrfs_fs_info
*fs_info
= eb
->fs_info
;
4030 int num_folios
= num_extent_folios(eb
);
4032 clear_bit(EXTENT_BUFFER_UPTODATE
, &eb
->bflags
);
4033 for (int i
= 0; i
< num_folios
; i
++) {
4034 struct folio
*folio
= eb
->folios
[i
];
4040 * This is special handling for metadata subpage, as regular
4041 * btrfs_is_subpage() can not handle cloned/dummy metadata.
4043 if (fs_info
->nodesize
>= PAGE_SIZE
)
4044 folio_clear_uptodate(folio
);
4046 btrfs_subpage_clear_uptodate(fs_info
, folio
,
4047 eb
->start
, eb
->len
);
4051 void set_extent_buffer_uptodate(struct extent_buffer
*eb
)
4053 struct btrfs_fs_info
*fs_info
= eb
->fs_info
;
4054 int num_folios
= num_extent_folios(eb
);
4056 set_bit(EXTENT_BUFFER_UPTODATE
, &eb
->bflags
);
4057 for (int i
= 0; i
< num_folios
; i
++) {
4058 struct folio
*folio
= eb
->folios
[i
];
4061 * This is special handling for metadata subpage, as regular
4062 * btrfs_is_subpage() can not handle cloned/dummy metadata.
4064 if (fs_info
->nodesize
>= PAGE_SIZE
)
4065 folio_mark_uptodate(folio
);
4067 btrfs_subpage_set_uptodate(fs_info
, folio
,
4068 eb
->start
, eb
->len
);
4072 static void end_bbio_meta_read(struct btrfs_bio
*bbio
)
4074 struct extent_buffer
*eb
= bbio
->private;
4075 struct btrfs_fs_info
*fs_info
= eb
->fs_info
;
4076 bool uptodate
= !bbio
->bio
.bi_status
;
4077 struct folio_iter fi
;
4080 eb
->read_mirror
= bbio
->mirror_num
;
4083 btrfs_validate_extent_buffer(eb
, &bbio
->parent_check
) < 0)
4087 set_extent_buffer_uptodate(eb
);
4089 clear_extent_buffer_uptodate(eb
);
4090 set_bit(EXTENT_BUFFER_READ_ERR
, &eb
->bflags
);
4093 bio_for_each_folio_all(fi
, &bbio
->bio
) {
4094 struct folio
*folio
= fi
.folio
;
4095 u64 start
= eb
->start
+ bio_offset
;
4096 u32 len
= fi
.length
;
4099 btrfs_folio_set_uptodate(fs_info
, folio
, start
, len
);
4101 btrfs_folio_clear_uptodate(fs_info
, folio
, start
, len
);
4106 clear_bit(EXTENT_BUFFER_READING
, &eb
->bflags
);
4107 smp_mb__after_atomic();
4108 wake_up_bit(&eb
->bflags
, EXTENT_BUFFER_READING
);
4109 free_extent_buffer(eb
);
4111 bio_put(&bbio
->bio
);
4114 int read_extent_buffer_pages(struct extent_buffer
*eb
, int wait
, int mirror_num
,
4115 struct btrfs_tree_parent_check
*check
)
4117 struct btrfs_bio
*bbio
;
4120 if (test_bit(EXTENT_BUFFER_UPTODATE
, &eb
->bflags
))
4124 * We could have had EXTENT_BUFFER_UPTODATE cleared by the write
4125 * operation, which could potentially still be in flight. In this case
4126 * we simply want to return an error.
4128 if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR
, &eb
->bflags
)))
4131 /* Someone else is already reading the buffer, just wait for it. */
4132 if (test_and_set_bit(EXTENT_BUFFER_READING
, &eb
->bflags
))
4135 clear_bit(EXTENT_BUFFER_READ_ERR
, &eb
->bflags
);
4136 eb
->read_mirror
= 0;
4137 check_buffer_tree_ref(eb
);
4138 atomic_inc(&eb
->refs
);
4140 bbio
= btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES
,
4141 REQ_OP_READ
| REQ_META
, eb
->fs_info
,
4142 end_bbio_meta_read
, eb
);
4143 bbio
->bio
.bi_iter
.bi_sector
= eb
->start
>> SECTOR_SHIFT
;
4144 bbio
->inode
= BTRFS_I(eb
->fs_info
->btree_inode
);
4145 bbio
->file_offset
= eb
->start
;
4146 memcpy(&bbio
->parent_check
, check
, sizeof(*check
));
4147 if (eb
->fs_info
->nodesize
< PAGE_SIZE
) {
4148 ret
= bio_add_folio(&bbio
->bio
, eb
->folios
[0], eb
->len
,
4149 eb
->start
- folio_pos(eb
->folios
[0]));
4152 int num_folios
= num_extent_folios(eb
);
4154 for (int i
= 0; i
< num_folios
; i
++) {
4155 struct folio
*folio
= eb
->folios
[i
];
4157 ret
= bio_add_folio(&bbio
->bio
, folio
, folio_size(folio
), 0);
4161 btrfs_submit_bio(bbio
, mirror_num
);
4164 if (wait
== WAIT_COMPLETE
) {
4165 wait_on_bit_io(&eb
->bflags
, EXTENT_BUFFER_READING
, TASK_UNINTERRUPTIBLE
);
4166 if (!test_bit(EXTENT_BUFFER_UPTODATE
, &eb
->bflags
))
4173 static bool report_eb_range(const struct extent_buffer
*eb
, unsigned long start
,
4176 btrfs_warn(eb
->fs_info
,
4177 "access to eb bytenr %llu len %lu out of range start %lu len %lu",
4178 eb
->start
, eb
->len
, start
, len
);
4179 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG
));
4185 * Check if the [start, start + len) range is valid before reading/writing
4187 * NOTE: @start and @len are offset inside the eb, not logical address.
4189 * Caller should not touch the dst/src memory if this function returns error.
4191 static inline int check_eb_range(const struct extent_buffer
*eb
,
4192 unsigned long start
, unsigned long len
)
4194 unsigned long offset
;
4196 /* start, start + len should not go beyond eb->len nor overflow */
4197 if (unlikely(check_add_overflow(start
, len
, &offset
) || offset
> eb
->len
))
4198 return report_eb_range(eb
, start
, len
);
4203 void read_extent_buffer(const struct extent_buffer
*eb
, void *dstv
,
4204 unsigned long start
, unsigned long len
)
4206 const int unit_size
= folio_size(eb
->folios
[0]);
4209 char *dst
= (char *)dstv
;
4210 unsigned long i
= get_eb_folio_index(eb
, start
);
4212 if (check_eb_range(eb
, start
, len
)) {
4214 * Invalid range hit, reset the memory, so callers won't get
4215 * some random garbage for their uninitialized memory.
4217 memset(dstv
, 0, len
);
4222 memcpy(dstv
, eb
->addr
+ start
, len
);
4226 offset
= get_eb_offset_in_folio(eb
, start
);
4231 cur
= min(len
, unit_size
- offset
);
4232 kaddr
= folio_address(eb
->folios
[i
]);
4233 memcpy(dst
, kaddr
+ offset
, cur
);
4242 int read_extent_buffer_to_user_nofault(const struct extent_buffer
*eb
,
4244 unsigned long start
, unsigned long len
)
4246 const int unit_size
= folio_size(eb
->folios
[0]);
4249 char __user
*dst
= (char __user
*)dstv
;
4250 unsigned long i
= get_eb_folio_index(eb
, start
);
4253 WARN_ON(start
> eb
->len
);
4254 WARN_ON(start
+ len
> eb
->start
+ eb
->len
);
4257 if (copy_to_user_nofault(dstv
, eb
->addr
+ start
, len
))
4262 offset
= get_eb_offset_in_folio(eb
, start
);
4267 cur
= min(len
, unit_size
- offset
);
4268 kaddr
= folio_address(eb
->folios
[i
]);
4269 if (copy_to_user_nofault(dst
, kaddr
+ offset
, cur
)) {
4283 int memcmp_extent_buffer(const struct extent_buffer
*eb
, const void *ptrv
,
4284 unsigned long start
, unsigned long len
)
4286 const int unit_size
= folio_size(eb
->folios
[0]);
4290 char *ptr
= (char *)ptrv
;
4291 unsigned long i
= get_eb_folio_index(eb
, start
);
4294 if (check_eb_range(eb
, start
, len
))
4298 return memcmp(ptrv
, eb
->addr
+ start
, len
);
4300 offset
= get_eb_offset_in_folio(eb
, start
);
4303 cur
= min(len
, unit_size
- offset
);
4304 kaddr
= folio_address(eb
->folios
[i
]);
4305 ret
= memcmp(ptr
, kaddr
+ offset
, cur
);
4318 * Check that the extent buffer is uptodate.
4320 * For regular sector size == PAGE_SIZE case, check if @page is uptodate.
4321 * For subpage case, check if the range covered by the eb has EXTENT_UPTODATE.
4323 static void assert_eb_folio_uptodate(const struct extent_buffer
*eb
, int i
)
4325 struct btrfs_fs_info
*fs_info
= eb
->fs_info
;
4326 struct folio
*folio
= eb
->folios
[i
];
4331 * If we are using the commit root we could potentially clear a page
4332 * Uptodate while we're using the extent buffer that we've previously
4333 * looked up. We don't want to complain in this case, as the page was
4334 * valid before, we just didn't write it out. Instead we want to catch
4335 * the case where we didn't actually read the block properly, which
4336 * would have !PageUptodate and !EXTENT_BUFFER_WRITE_ERR.
4338 if (test_bit(EXTENT_BUFFER_WRITE_ERR
, &eb
->bflags
))
4341 if (fs_info
->nodesize
< PAGE_SIZE
) {
4342 struct folio
*folio
= eb
->folios
[0];
4345 if (WARN_ON(!btrfs_subpage_test_uptodate(fs_info
, folio
,
4346 eb
->start
, eb
->len
)))
4347 btrfs_subpage_dump_bitmap(fs_info
, folio
, eb
->start
, eb
->len
);
4349 WARN_ON(!folio_test_uptodate(folio
));
4353 static void __write_extent_buffer(const struct extent_buffer
*eb
,
4354 const void *srcv
, unsigned long start
,
4355 unsigned long len
, bool use_memmove
)
4357 const int unit_size
= folio_size(eb
->folios
[0]);
4361 char *src
= (char *)srcv
;
4362 unsigned long i
= get_eb_folio_index(eb
, start
);
4363 /* For unmapped (dummy) ebs, no need to check their uptodate status. */
4364 const bool check_uptodate
= !test_bit(EXTENT_BUFFER_UNMAPPED
, &eb
->bflags
);
4366 if (check_eb_range(eb
, start
, len
))
4371 memmove(eb
->addr
+ start
, srcv
, len
);
4373 memcpy(eb
->addr
+ start
, srcv
, len
);
4377 offset
= get_eb_offset_in_folio(eb
, start
);
4381 assert_eb_folio_uptodate(eb
, i
);
4383 cur
= min(len
, unit_size
- offset
);
4384 kaddr
= folio_address(eb
->folios
[i
]);
4386 memmove(kaddr
+ offset
, src
, cur
);
4388 memcpy(kaddr
+ offset
, src
, cur
);
4397 void write_extent_buffer(const struct extent_buffer
*eb
, const void *srcv
,
4398 unsigned long start
, unsigned long len
)
4400 return __write_extent_buffer(eb
, srcv
, start
, len
, false);
4403 static void memset_extent_buffer(const struct extent_buffer
*eb
, int c
,
4404 unsigned long start
, unsigned long len
)
4406 const int unit_size
= folio_size(eb
->folios
[0]);
4407 unsigned long cur
= start
;
4410 memset(eb
->addr
+ start
, c
, len
);
4414 while (cur
< start
+ len
) {
4415 unsigned long index
= get_eb_folio_index(eb
, cur
);
4416 unsigned int offset
= get_eb_offset_in_folio(eb
, cur
);
4417 unsigned int cur_len
= min(start
+ len
- cur
, unit_size
- offset
);
4419 assert_eb_folio_uptodate(eb
, index
);
4420 memset(folio_address(eb
->folios
[index
]) + offset
, c
, cur_len
);
4426 void memzero_extent_buffer(const struct extent_buffer
*eb
, unsigned long start
,
4429 if (check_eb_range(eb
, start
, len
))
4431 return memset_extent_buffer(eb
, 0, start
, len
);
4434 void copy_extent_buffer_full(const struct extent_buffer
*dst
,
4435 const struct extent_buffer
*src
)
4437 const int unit_size
= folio_size(src
->folios
[0]);
4438 unsigned long cur
= 0;
4440 ASSERT(dst
->len
== src
->len
);
4442 while (cur
< src
->len
) {
4443 unsigned long index
= get_eb_folio_index(src
, cur
);
4444 unsigned long offset
= get_eb_offset_in_folio(src
, cur
);
4445 unsigned long cur_len
= min(src
->len
, unit_size
- offset
);
4446 void *addr
= folio_address(src
->folios
[index
]) + offset
;
4448 write_extent_buffer(dst
, addr
, cur
, cur_len
);
4454 void copy_extent_buffer(const struct extent_buffer
*dst
,
4455 const struct extent_buffer
*src
,
4456 unsigned long dst_offset
, unsigned long src_offset
,
4459 const int unit_size
= folio_size(dst
->folios
[0]);
4460 u64 dst_len
= dst
->len
;
4464 unsigned long i
= get_eb_folio_index(dst
, dst_offset
);
4466 if (check_eb_range(dst
, dst_offset
, len
) ||
4467 check_eb_range(src
, src_offset
, len
))
4470 WARN_ON(src
->len
!= dst_len
);
4472 offset
= get_eb_offset_in_folio(dst
, dst_offset
);
4475 assert_eb_folio_uptodate(dst
, i
);
4477 cur
= min(len
, (unsigned long)(unit_size
- offset
));
4479 kaddr
= folio_address(dst
->folios
[i
]);
4480 read_extent_buffer(src
, kaddr
+ offset
, src_offset
, cur
);
4490 * Calculate the folio and offset of the byte containing the given bit number.
4492 * @eb: the extent buffer
4493 * @start: offset of the bitmap item in the extent buffer
4495 * @folio_index: return index of the folio in the extent buffer that contains
4496 * the given bit number
4497 * @folio_offset: return offset into the folio given by folio_index
4499 * This helper hides the ugliness of finding the byte in an extent buffer which
4500 * contains a given bit.
4502 static inline void eb_bitmap_offset(const struct extent_buffer
*eb
,
4503 unsigned long start
, unsigned long nr
,
4504 unsigned long *folio_index
,
4505 size_t *folio_offset
)
4507 size_t byte_offset
= BIT_BYTE(nr
);
4511 * The byte we want is the offset of the extent buffer + the offset of
4512 * the bitmap item in the extent buffer + the offset of the byte in the
4515 offset
= start
+ offset_in_folio(eb
->folios
[0], eb
->start
) + byte_offset
;
4517 *folio_index
= offset
>> folio_shift(eb
->folios
[0]);
4518 *folio_offset
= offset_in_folio(eb
->folios
[0], offset
);
4522 * Determine whether a bit in a bitmap item is set.
4524 * @eb: the extent buffer
4525 * @start: offset of the bitmap item in the extent buffer
4526 * @nr: bit number to test
4528 int extent_buffer_test_bit(const struct extent_buffer
*eb
, unsigned long start
,
4535 eb_bitmap_offset(eb
, start
, nr
, &i
, &offset
);
4536 assert_eb_folio_uptodate(eb
, i
);
4537 kaddr
= folio_address(eb
->folios
[i
]);
4538 return 1U & (kaddr
[offset
] >> (nr
& (BITS_PER_BYTE
- 1)));
4541 static u8
*extent_buffer_get_byte(const struct extent_buffer
*eb
, unsigned long bytenr
)
4543 unsigned long index
= get_eb_folio_index(eb
, bytenr
);
4545 if (check_eb_range(eb
, bytenr
, 1))
4547 return folio_address(eb
->folios
[index
]) + get_eb_offset_in_folio(eb
, bytenr
);
4551 * Set an area of a bitmap to 1.
4553 * @eb: the extent buffer
4554 * @start: offset of the bitmap item in the extent buffer
4555 * @pos: bit number of the first bit
4556 * @len: number of bits to set
4558 void extent_buffer_bitmap_set(const struct extent_buffer
*eb
, unsigned long start
,
4559 unsigned long pos
, unsigned long len
)
4561 unsigned int first_byte
= start
+ BIT_BYTE(pos
);
4562 unsigned int last_byte
= start
+ BIT_BYTE(pos
+ len
- 1);
4563 const bool same_byte
= (first_byte
== last_byte
);
4564 u8 mask
= BITMAP_FIRST_BYTE_MASK(pos
);
4568 mask
&= BITMAP_LAST_BYTE_MASK(pos
+ len
);
4570 /* Handle the first byte. */
4571 kaddr
= extent_buffer_get_byte(eb
, first_byte
);
4576 /* Handle the byte aligned part. */
4577 ASSERT(first_byte
+ 1 <= last_byte
);
4578 memset_extent_buffer(eb
, 0xff, first_byte
+ 1, last_byte
- first_byte
- 1);
4580 /* Handle the last byte. */
4581 kaddr
= extent_buffer_get_byte(eb
, last_byte
);
4582 *kaddr
|= BITMAP_LAST_BYTE_MASK(pos
+ len
);
4587 * Clear an area of a bitmap.
4589 * @eb: the extent buffer
4590 * @start: offset of the bitmap item in the extent buffer
4591 * @pos: bit number of the first bit
4592 * @len: number of bits to clear
4594 void extent_buffer_bitmap_clear(const struct extent_buffer
*eb
,
4595 unsigned long start
, unsigned long pos
,
4598 unsigned int first_byte
= start
+ BIT_BYTE(pos
);
4599 unsigned int last_byte
= start
+ BIT_BYTE(pos
+ len
- 1);
4600 const bool same_byte
= (first_byte
== last_byte
);
4601 u8 mask
= BITMAP_FIRST_BYTE_MASK(pos
);
4605 mask
&= BITMAP_LAST_BYTE_MASK(pos
+ len
);
4607 /* Handle the first byte. */
4608 kaddr
= extent_buffer_get_byte(eb
, first_byte
);
4613 /* Handle the byte aligned part. */
4614 ASSERT(first_byte
+ 1 <= last_byte
);
4615 memset_extent_buffer(eb
, 0, first_byte
+ 1, last_byte
- first_byte
- 1);
4617 /* Handle the last byte. */
4618 kaddr
= extent_buffer_get_byte(eb
, last_byte
);
4619 *kaddr
&= ~BITMAP_LAST_BYTE_MASK(pos
+ len
);
4622 static inline bool areas_overlap(unsigned long src
, unsigned long dst
, unsigned long len
)
4624 unsigned long distance
= (src
> dst
) ? src
- dst
: dst
- src
;
4625 return distance
< len
;
4628 void memcpy_extent_buffer(const struct extent_buffer
*dst
,
4629 unsigned long dst_offset
, unsigned long src_offset
,
4632 const int unit_size
= folio_size(dst
->folios
[0]);
4633 unsigned long cur_off
= 0;
4635 if (check_eb_range(dst
, dst_offset
, len
) ||
4636 check_eb_range(dst
, src_offset
, len
))
4640 const bool use_memmove
= areas_overlap(src_offset
, dst_offset
, len
);
4643 memmove(dst
->addr
+ dst_offset
, dst
->addr
+ src_offset
, len
);
4645 memcpy(dst
->addr
+ dst_offset
, dst
->addr
+ src_offset
, len
);
4649 while (cur_off
< len
) {
4650 unsigned long cur_src
= cur_off
+ src_offset
;
4651 unsigned long folio_index
= get_eb_folio_index(dst
, cur_src
);
4652 unsigned long folio_off
= get_eb_offset_in_folio(dst
, cur_src
);
4653 unsigned long cur_len
= min(src_offset
+ len
- cur_src
,
4654 unit_size
- folio_off
);
4655 void *src_addr
= folio_address(dst
->folios
[folio_index
]) + folio_off
;
4656 const bool use_memmove
= areas_overlap(src_offset
+ cur_off
,
4657 dst_offset
+ cur_off
, cur_len
);
4659 __write_extent_buffer(dst
, src_addr
, dst_offset
+ cur_off
, cur_len
,
4665 void memmove_extent_buffer(const struct extent_buffer
*dst
,
4666 unsigned long dst_offset
, unsigned long src_offset
,
4669 unsigned long dst_end
= dst_offset
+ len
- 1;
4670 unsigned long src_end
= src_offset
+ len
- 1;
4672 if (check_eb_range(dst
, dst_offset
, len
) ||
4673 check_eb_range(dst
, src_offset
, len
))
4676 if (dst_offset
< src_offset
) {
4677 memcpy_extent_buffer(dst
, dst_offset
, src_offset
, len
);
4682 memmove(dst
->addr
+ dst_offset
, dst
->addr
+ src_offset
, len
);
4687 unsigned long src_i
;
4689 size_t dst_off_in_folio
;
4690 size_t src_off_in_folio
;
4694 src_i
= get_eb_folio_index(dst
, src_end
);
4696 dst_off_in_folio
= get_eb_offset_in_folio(dst
, dst_end
);
4697 src_off_in_folio
= get_eb_offset_in_folio(dst
, src_end
);
4699 cur
= min_t(unsigned long, len
, src_off_in_folio
+ 1);
4700 cur
= min(cur
, dst_off_in_folio
+ 1);
4702 src_addr
= folio_address(dst
->folios
[src_i
]) + src_off_in_folio
-
4704 use_memmove
= areas_overlap(src_end
- cur
+ 1, dst_end
- cur
+ 1,
4707 __write_extent_buffer(dst
, src_addr
, dst_end
- cur
+ 1, cur
,
4716 #define GANG_LOOKUP_SIZE 16
4717 static struct extent_buffer
*get_next_extent_buffer(
4718 struct btrfs_fs_info
*fs_info
, struct page
*page
, u64 bytenr
)
4720 struct extent_buffer
*gang
[GANG_LOOKUP_SIZE
];
4721 struct extent_buffer
*found
= NULL
;
4722 u64 page_start
= page_offset(page
);
4723 u64 cur
= page_start
;
4725 ASSERT(in_range(bytenr
, page_start
, PAGE_SIZE
));
4726 lockdep_assert_held(&fs_info
->buffer_lock
);
4728 while (cur
< page_start
+ PAGE_SIZE
) {
4732 ret
= radix_tree_gang_lookup(&fs_info
->buffer_radix
,
4733 (void **)gang
, cur
>> fs_info
->sectorsize_bits
,
4734 min_t(unsigned int, GANG_LOOKUP_SIZE
,
4735 PAGE_SIZE
/ fs_info
->nodesize
));
4738 for (i
= 0; i
< ret
; i
++) {
4739 /* Already beyond page end */
4740 if (gang
[i
]->start
>= page_start
+ PAGE_SIZE
)
4743 if (gang
[i
]->start
>= bytenr
) {
4748 cur
= gang
[ret
- 1]->start
+ gang
[ret
- 1]->len
;
4754 static int try_release_subpage_extent_buffer(struct page
*page
)
4756 struct btrfs_fs_info
*fs_info
= btrfs_sb(page
->mapping
->host
->i_sb
);
4757 u64 cur
= page_offset(page
);
4758 const u64 end
= page_offset(page
) + PAGE_SIZE
;
4762 struct extent_buffer
*eb
= NULL
;
4765 * Unlike try_release_extent_buffer() which uses folio private
4766 * to grab buffer, for subpage case we rely on radix tree, thus
4767 * we need to ensure radix tree consistency.
4769 * We also want an atomic snapshot of the radix tree, thus go
4770 * with spinlock rather than RCU.
4772 spin_lock(&fs_info
->buffer_lock
);
4773 eb
= get_next_extent_buffer(fs_info
, page
, cur
);
4775 /* No more eb in the page range after or at cur */
4776 spin_unlock(&fs_info
->buffer_lock
);
4779 cur
= eb
->start
+ eb
->len
;
4782 * The same as try_release_extent_buffer(), to ensure the eb
4783 * won't disappear out from under us.
4785 spin_lock(&eb
->refs_lock
);
4786 if (atomic_read(&eb
->refs
) != 1 || extent_buffer_under_io(eb
)) {
4787 spin_unlock(&eb
->refs_lock
);
4788 spin_unlock(&fs_info
->buffer_lock
);
4791 spin_unlock(&fs_info
->buffer_lock
);
4794 * If tree ref isn't set then we know the ref on this eb is a
4795 * real ref, so just return, this eb will likely be freed soon
4798 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF
, &eb
->bflags
)) {
4799 spin_unlock(&eb
->refs_lock
);
4804 * Here we don't care about the return value, we will always
4805 * check the folio private at the end. And
4806 * release_extent_buffer() will release the refs_lock.
4808 release_extent_buffer(eb
);
4811 * Finally to check if we have cleared folio private, as if we have
4812 * released all ebs in the page, the folio private should be cleared now.
4814 spin_lock(&page
->mapping
->private_lock
);
4815 if (!folio_test_private(page_folio(page
)))
4819 spin_unlock(&page
->mapping
->private_lock
);
4824 int try_release_extent_buffer(struct page
*page
)
4826 struct folio
*folio
= page_folio(page
);
4827 struct extent_buffer
*eb
;
4829 if (btrfs_sb(page
->mapping
->host
->i_sb
)->nodesize
< PAGE_SIZE
)
4830 return try_release_subpage_extent_buffer(page
);
4833 * We need to make sure nobody is changing folio private, as we rely on
4834 * folio private as the pointer to extent buffer.
4836 spin_lock(&page
->mapping
->private_lock
);
4837 if (!folio_test_private(folio
)) {
4838 spin_unlock(&page
->mapping
->private_lock
);
4842 eb
= folio_get_private(folio
);
4846 * This is a little awful but should be ok, we need to make sure that
4847 * the eb doesn't disappear out from under us while we're looking at
4850 spin_lock(&eb
->refs_lock
);
4851 if (atomic_read(&eb
->refs
) != 1 || extent_buffer_under_io(eb
)) {
4852 spin_unlock(&eb
->refs_lock
);
4853 spin_unlock(&page
->mapping
->private_lock
);
4856 spin_unlock(&page
->mapping
->private_lock
);
4859 * If tree ref isn't set then we know the ref on this eb is a real ref,
4860 * so just return, this page will likely be freed soon anyway.
4862 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF
, &eb
->bflags
)) {
4863 spin_unlock(&eb
->refs_lock
);
4867 return release_extent_buffer(eb
);
4871 * Attempt to readahead a child block.
4873 * @fs_info: the fs_info
4874 * @bytenr: bytenr to read
4875 * @owner_root: objectid of the root that owns this eb
4876 * @gen: generation for the uptodate check, can be 0
4877 * @level: level for the eb
4879 * Attempt to readahead a tree block at @bytenr. If @gen is 0 then we do a
4880 * normal uptodate check of the eb, without checking the generation. If we have
4881 * to read the block we will not block on anything.
4883 void btrfs_readahead_tree_block(struct btrfs_fs_info
*fs_info
,
4884 u64 bytenr
, u64 owner_root
, u64 gen
, int level
)
4886 struct btrfs_tree_parent_check check
= {
4891 struct extent_buffer
*eb
;
4894 eb
= btrfs_find_create_tree_block(fs_info
, bytenr
, owner_root
, level
);
4898 if (btrfs_buffer_uptodate(eb
, gen
, 1)) {
4899 free_extent_buffer(eb
);
4903 ret
= read_extent_buffer_pages(eb
, WAIT_NONE
, 0, &check
);
4905 free_extent_buffer_stale(eb
);
4907 free_extent_buffer(eb
);
4911 * Readahead a node's child block.
4913 * @node: parent node we're reading from
4914 * @slot: slot in the parent node for the child we want to read
4916 * A helper for btrfs_readahead_tree_block, we simply read the bytenr pointed at
4917 * the slot in the node provided.
4919 void btrfs_readahead_node_child(struct extent_buffer
*node
, int slot
)
4921 btrfs_readahead_tree_block(node
->fs_info
,
4922 btrfs_node_blockptr(node
, slot
),
4923 btrfs_header_owner(node
),
4924 btrfs_node_ptr_generation(node
, slot
),
4925 btrfs_header_level(node
) - 1);