]>
git.ipfire.org Git - thirdparty/linux.git/blob - fs/btrfs/subpage.c
1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/slab.h>
7 #include "btrfs_inode.h"
10 * Subpage (sectorsize < PAGE_SIZE) support overview:
14 * - Only support 64K page size for now
15 * This is to make metadata handling easier, as 64K page would ensure
16 * all nodesize would fit inside one page, thus we don't need to handle
17 * cases where a tree block crosses several pages.
19 * - Only metadata read-write for now
20 * The data read-write part is in development.
22 * - Metadata can't cross 64K page boundary
23 * btrfs-progs and kernel have done that for a while, thus only ancient
24 * filesystems could have such problem. For such case, do a graceful
30 * Metadata read is fully supported.
31 * Meaning when reading one tree block will only trigger the read for the
32 * needed range, other unrelated range in the same page will not be touched.
34 * Metadata write support is partial.
35 * The writeback is still for the full page, but we will only submit
36 * the dirty extent buffers in the page.
38 * This means, if we have a metadata page like this:
42 * |/////////| |///////////|
43 * \- Tree block A \- Tree block B
45 * Even if we just want to writeback tree block A, we will also writeback
46 * tree block B if it's also dirty.
48 * This may cause extra metadata writeback which results more COW.
53 * Both metadata and data will use a new structure, btrfs_subpage, to
54 * record the status of each sector inside a page. This provides the extra
58 * Since we have multiple tree blocks inside one page, we can't rely on page
59 * locking anymore, or we will have greatly reduced concurrency or even
60 * deadlocks (hold one tree lock while trying to lock another tree lock in
63 * Thus for metadata locking, subpage support relies on io_tree locking only.
64 * This means a slightly higher tree locking latency.
67 bool btrfs_is_subpage(const struct btrfs_fs_info
*fs_info
, struct address_space
*mapping
)
69 if (fs_info
->sectorsize
>= PAGE_SIZE
)
73 * Only data pages (either through DIO or compression) can have no
74 * mapping. And if page->mapping->host is data inode, it's subpage.
75 * As we have ruled our sectorsize >= PAGE_SIZE case already.
77 if (!mapping
|| !mapping
->host
|| is_data_inode(mapping
->host
))
81 * Now the only remaining case is metadata, which we only go subpage
82 * routine if nodesize < PAGE_SIZE.
84 if (fs_info
->nodesize
< PAGE_SIZE
)
89 void btrfs_init_subpage_info(struct btrfs_subpage_info
*subpage_info
, u32 sectorsize
)
94 ASSERT(IS_ALIGNED(PAGE_SIZE
, sectorsize
));
96 nr_bits
= PAGE_SIZE
/ sectorsize
;
97 subpage_info
->bitmap_nr_bits
= nr_bits
;
99 subpage_info
->uptodate_offset
= cur
;
102 subpage_info
->dirty_offset
= cur
;
105 subpage_info
->writeback_offset
= cur
;
108 subpage_info
->ordered_offset
= cur
;
111 subpage_info
->checked_offset
= cur
;
114 subpage_info
->locked_offset
= cur
;
117 subpage_info
->total_nr_bits
= cur
;
120 int btrfs_attach_subpage(const struct btrfs_fs_info
*fs_info
,
121 struct folio
*folio
, enum btrfs_subpage_type type
)
123 struct btrfs_subpage
*subpage
;
126 * We have cases like a dummy extent buffer page, which is not mapped
127 * and doesn't need to be locked.
130 ASSERT(folio_test_locked(folio
));
132 /* Either not subpage, or the folio already has private attached. */
133 if (!btrfs_is_subpage(fs_info
, folio
->mapping
) || folio_test_private(folio
))
136 subpage
= btrfs_alloc_subpage(fs_info
, type
);
138 return PTR_ERR(subpage
);
140 folio_attach_private(folio
, subpage
);
144 void btrfs_detach_subpage(const struct btrfs_fs_info
*fs_info
, struct folio
*folio
)
146 struct btrfs_subpage
*subpage
;
148 /* Either not subpage, or the folio already has private attached. */
149 if (!btrfs_is_subpage(fs_info
, folio
->mapping
) || !folio_test_private(folio
))
152 subpage
= folio_detach_private(folio
);
154 btrfs_free_subpage(subpage
);
157 struct btrfs_subpage
*btrfs_alloc_subpage(const struct btrfs_fs_info
*fs_info
,
158 enum btrfs_subpage_type type
)
160 struct btrfs_subpage
*ret
;
161 unsigned int real_size
;
163 ASSERT(fs_info
->sectorsize
< PAGE_SIZE
);
165 real_size
= struct_size(ret
, bitmaps
,
166 BITS_TO_LONGS(fs_info
->subpage_info
->total_nr_bits
));
167 ret
= kzalloc(real_size
, GFP_NOFS
);
169 return ERR_PTR(-ENOMEM
);
171 spin_lock_init(&ret
->lock
);
172 if (type
== BTRFS_SUBPAGE_METADATA
) {
173 atomic_set(&ret
->eb_refs
, 0);
175 atomic_set(&ret
->readers
, 0);
176 atomic_set(&ret
->writers
, 0);
181 void btrfs_free_subpage(struct btrfs_subpage
*subpage
)
187 * Increase the eb_refs of current subpage.
189 * This is important for eb allocation, to prevent race with last eb freeing
191 * With the eb_refs increased before the eb inserted into radix tree,
192 * detach_extent_buffer_page() won't detach the folio private while we're still
193 * allocating the extent buffer.
195 void btrfs_folio_inc_eb_refs(const struct btrfs_fs_info
*fs_info
, struct folio
*folio
)
197 struct btrfs_subpage
*subpage
;
199 if (!btrfs_is_subpage(fs_info
, folio
->mapping
))
202 ASSERT(folio_test_private(folio
) && folio
->mapping
);
203 lockdep_assert_held(&folio
->mapping
->i_private_lock
);
205 subpage
= folio_get_private(folio
);
206 atomic_inc(&subpage
->eb_refs
);
209 void btrfs_folio_dec_eb_refs(const struct btrfs_fs_info
*fs_info
, struct folio
*folio
)
211 struct btrfs_subpage
*subpage
;
213 if (!btrfs_is_subpage(fs_info
, folio
->mapping
))
216 ASSERT(folio_test_private(folio
) && folio
->mapping
);
217 lockdep_assert_held(&folio
->mapping
->i_private_lock
);
219 subpage
= folio_get_private(folio
);
220 ASSERT(atomic_read(&subpage
->eb_refs
));
221 atomic_dec(&subpage
->eb_refs
);
224 static void btrfs_subpage_assert(const struct btrfs_fs_info
*fs_info
,
225 struct folio
*folio
, u64 start
, u32 len
)
227 /* For subpage support, the folio must be single page. */
228 ASSERT(folio_order(folio
) == 0);
231 ASSERT(folio_test_private(folio
) && folio_get_private(folio
));
232 ASSERT(IS_ALIGNED(start
, fs_info
->sectorsize
) &&
233 IS_ALIGNED(len
, fs_info
->sectorsize
));
235 * The range check only works for mapped page, we can still have
236 * unmapped page like dummy extent buffer pages.
239 ASSERT(folio_pos(folio
) <= start
&&
240 start
+ len
<= folio_pos(folio
) + PAGE_SIZE
);
243 #define subpage_calc_start_bit(fs_info, folio, name, start, len) \
245 unsigned int start_bit; \
247 btrfs_subpage_assert(fs_info, folio, start, len); \
248 start_bit = offset_in_page(start) >> fs_info->sectorsize_bits; \
249 start_bit += fs_info->subpage_info->name##_offset; \
253 void btrfs_subpage_start_reader(const struct btrfs_fs_info
*fs_info
,
254 struct folio
*folio
, u64 start
, u32 len
)
256 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
257 const int start_bit
= subpage_calc_start_bit(fs_info
, folio
, locked
, start
, len
);
258 const int nbits
= len
>> fs_info
->sectorsize_bits
;
262 btrfs_subpage_assert(fs_info
, folio
, start
, len
);
264 spin_lock_irqsave(&subpage
->lock
, flags
);
266 * Even though it's just for reading the page, no one should have
267 * locked the subpage range.
269 ASSERT(bitmap_test_range_all_zero(subpage
->bitmaps
, start_bit
, nbits
));
270 bitmap_set(subpage
->bitmaps
, start_bit
, nbits
);
271 atomic_add(nbits
, &subpage
->readers
);
272 spin_unlock_irqrestore(&subpage
->lock
, flags
);
275 void btrfs_subpage_end_reader(const struct btrfs_fs_info
*fs_info
,
276 struct folio
*folio
, u64 start
, u32 len
)
278 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
279 const int start_bit
= subpage_calc_start_bit(fs_info
, folio
, locked
, start
, len
);
280 const int nbits
= len
>> fs_info
->sectorsize_bits
;
285 btrfs_subpage_assert(fs_info
, folio
, start
, len
);
286 is_data
= is_data_inode(folio
->mapping
->host
);
288 spin_lock_irqsave(&subpage
->lock
, flags
);
290 /* The range should have already been locked. */
291 ASSERT(bitmap_test_range_all_set(subpage
->bitmaps
, start_bit
, nbits
));
292 ASSERT(atomic_read(&subpage
->readers
) >= nbits
);
294 bitmap_clear(subpage
->bitmaps
, start_bit
, nbits
);
295 last
= atomic_sub_and_test(nbits
, &subpage
->readers
);
298 * For data we need to unlock the page if the last read has finished.
300 * And please don't replace @last with atomic_sub_and_test() call
301 * inside if () condition.
302 * As we want the atomic_sub_and_test() to be always executed.
306 spin_unlock_irqrestore(&subpage
->lock
, flags
);
309 static void btrfs_subpage_clamp_range(struct folio
*folio
, u64
*start
, u32
*len
)
311 u64 orig_start
= *start
;
314 *start
= max_t(u64
, folio_pos(folio
), orig_start
);
316 * For certain call sites like btrfs_drop_pages(), we may have pages
317 * beyond the target range. In that case, just set @len to 0, subpage
318 * helpers can handle @len == 0 without any problem.
320 if (folio_pos(folio
) >= orig_start
+ orig_len
)
323 *len
= min_t(u64
, folio_pos(folio
) + PAGE_SIZE
,
324 orig_start
+ orig_len
) - *start
;
327 static void btrfs_subpage_start_writer(const struct btrfs_fs_info
*fs_info
,
328 struct folio
*folio
, u64 start
, u32 len
)
330 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
331 const int nbits
= (len
>> fs_info
->sectorsize_bits
);
334 btrfs_subpage_assert(fs_info
, folio
, start
, len
);
336 ASSERT(atomic_read(&subpage
->readers
) == 0);
337 ret
= atomic_add_return(nbits
, &subpage
->writers
);
338 ASSERT(ret
== nbits
);
341 static bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info
*fs_info
,
342 struct folio
*folio
, u64 start
, u32 len
)
344 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
345 const int nbits
= (len
>> fs_info
->sectorsize_bits
);
347 btrfs_subpage_assert(fs_info
, folio
, start
, len
);
350 * We have call sites passing @lock_page into
351 * extent_clear_unlock_delalloc() for compression path.
353 * This @locked_page is locked by plain lock_page(), thus its
354 * subpage::writers is 0. Handle them in a special way.
356 if (atomic_read(&subpage
->writers
) == 0)
359 ASSERT(atomic_read(&subpage
->writers
) >= nbits
);
360 return atomic_sub_and_test(nbits
, &subpage
->writers
);
364 * Lock a folio for delalloc page writeback.
366 * Return -EAGAIN if the page is not properly initialized.
367 * Return 0 with the page locked, and writer counter updated.
369 * Even with 0 returned, the page still need extra check to make sure
370 * it's really the correct page, as the caller is using
371 * filemap_get_folios_contig(), which can race with page invalidating.
373 int btrfs_folio_start_writer_lock(const struct btrfs_fs_info
*fs_info
,
374 struct folio
*folio
, u64 start
, u32 len
)
376 if (unlikely(!fs_info
) || !btrfs_is_subpage(fs_info
, folio
->mapping
)) {
381 if (!folio_test_private(folio
) || !folio_get_private(folio
)) {
385 btrfs_subpage_clamp_range(folio
, &start
, &len
);
386 btrfs_subpage_start_writer(fs_info
, folio
, start
, len
);
390 void btrfs_folio_end_writer_lock(const struct btrfs_fs_info
*fs_info
,
391 struct folio
*folio
, u64 start
, u32 len
)
393 if (unlikely(!fs_info
) || !btrfs_is_subpage(fs_info
, folio
->mapping
)) {
397 btrfs_subpage_clamp_range(folio
, &start
, &len
);
398 if (btrfs_subpage_end_and_test_writer(fs_info
, folio
, start
, len
))
402 #define subpage_test_bitmap_all_set(fs_info, subpage, name) \
403 bitmap_test_range_all_set(subpage->bitmaps, \
404 fs_info->subpage_info->name##_offset, \
405 fs_info->subpage_info->bitmap_nr_bits)
407 #define subpage_test_bitmap_all_zero(fs_info, subpage, name) \
408 bitmap_test_range_all_zero(subpage->bitmaps, \
409 fs_info->subpage_info->name##_offset, \
410 fs_info->subpage_info->bitmap_nr_bits)
412 void btrfs_subpage_set_uptodate(const struct btrfs_fs_info
*fs_info
,
413 struct folio
*folio
, u64 start
, u32 len
)
415 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
416 unsigned int start_bit
= subpage_calc_start_bit(fs_info
, folio
,
417 uptodate
, start
, len
);
420 spin_lock_irqsave(&subpage
->lock
, flags
);
421 bitmap_set(subpage
->bitmaps
, start_bit
, len
>> fs_info
->sectorsize_bits
);
422 if (subpage_test_bitmap_all_set(fs_info
, subpage
, uptodate
))
423 folio_mark_uptodate(folio
);
424 spin_unlock_irqrestore(&subpage
->lock
, flags
);
427 void btrfs_subpage_clear_uptodate(const struct btrfs_fs_info
*fs_info
,
428 struct folio
*folio
, u64 start
, u32 len
)
430 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
431 unsigned int start_bit
= subpage_calc_start_bit(fs_info
, folio
,
432 uptodate
, start
, len
);
435 spin_lock_irqsave(&subpage
->lock
, flags
);
436 bitmap_clear(subpage
->bitmaps
, start_bit
, len
>> fs_info
->sectorsize_bits
);
437 folio_clear_uptodate(folio
);
438 spin_unlock_irqrestore(&subpage
->lock
, flags
);
441 void btrfs_subpage_set_dirty(const struct btrfs_fs_info
*fs_info
,
442 struct folio
*folio
, u64 start
, u32 len
)
444 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
445 unsigned int start_bit
= subpage_calc_start_bit(fs_info
, folio
,
449 spin_lock_irqsave(&subpage
->lock
, flags
);
450 bitmap_set(subpage
->bitmaps
, start_bit
, len
>> fs_info
->sectorsize_bits
);
451 spin_unlock_irqrestore(&subpage
->lock
, flags
);
452 folio_mark_dirty(folio
);
456 * Extra clear_and_test function for subpage dirty bitmap.
458 * Return true if we're the last bits in the dirty_bitmap and clear the
460 * Return false otherwise.
462 * NOTE: Callers should manually clear page dirty for true case, as we have
463 * extra handling for tree blocks.
465 bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info
*fs_info
,
466 struct folio
*folio
, u64 start
, u32 len
)
468 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
469 unsigned int start_bit
= subpage_calc_start_bit(fs_info
, folio
,
474 spin_lock_irqsave(&subpage
->lock
, flags
);
475 bitmap_clear(subpage
->bitmaps
, start_bit
, len
>> fs_info
->sectorsize_bits
);
476 if (subpage_test_bitmap_all_zero(fs_info
, subpage
, dirty
))
478 spin_unlock_irqrestore(&subpage
->lock
, flags
);
482 void btrfs_subpage_clear_dirty(const struct btrfs_fs_info
*fs_info
,
483 struct folio
*folio
, u64 start
, u32 len
)
487 last
= btrfs_subpage_clear_and_test_dirty(fs_info
, folio
, start
, len
);
489 folio_clear_dirty_for_io(folio
);
492 void btrfs_subpage_set_writeback(const struct btrfs_fs_info
*fs_info
,
493 struct folio
*folio
, u64 start
, u32 len
)
495 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
496 unsigned int start_bit
= subpage_calc_start_bit(fs_info
, folio
,
497 writeback
, start
, len
);
500 spin_lock_irqsave(&subpage
->lock
, flags
);
501 bitmap_set(subpage
->bitmaps
, start_bit
, len
>> fs_info
->sectorsize_bits
);
502 if (!folio_test_writeback(folio
))
503 folio_start_writeback(folio
);
504 spin_unlock_irqrestore(&subpage
->lock
, flags
);
507 void btrfs_subpage_clear_writeback(const struct btrfs_fs_info
*fs_info
,
508 struct folio
*folio
, u64 start
, u32 len
)
510 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
511 unsigned int start_bit
= subpage_calc_start_bit(fs_info
, folio
,
512 writeback
, start
, len
);
515 spin_lock_irqsave(&subpage
->lock
, flags
);
516 bitmap_clear(subpage
->bitmaps
, start_bit
, len
>> fs_info
->sectorsize_bits
);
517 if (subpage_test_bitmap_all_zero(fs_info
, subpage
, writeback
)) {
518 ASSERT(folio_test_writeback(folio
));
519 folio_end_writeback(folio
);
521 spin_unlock_irqrestore(&subpage
->lock
, flags
);
524 void btrfs_subpage_set_ordered(const struct btrfs_fs_info
*fs_info
,
525 struct folio
*folio
, u64 start
, u32 len
)
527 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
528 unsigned int start_bit
= subpage_calc_start_bit(fs_info
, folio
,
529 ordered
, start
, len
);
532 spin_lock_irqsave(&subpage
->lock
, flags
);
533 bitmap_set(subpage
->bitmaps
, start_bit
, len
>> fs_info
->sectorsize_bits
);
534 folio_set_ordered(folio
);
535 spin_unlock_irqrestore(&subpage
->lock
, flags
);
538 void btrfs_subpage_clear_ordered(const struct btrfs_fs_info
*fs_info
,
539 struct folio
*folio
, u64 start
, u32 len
)
541 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
542 unsigned int start_bit
= subpage_calc_start_bit(fs_info
, folio
,
543 ordered
, start
, len
);
546 spin_lock_irqsave(&subpage
->lock
, flags
);
547 bitmap_clear(subpage
->bitmaps
, start_bit
, len
>> fs_info
->sectorsize_bits
);
548 if (subpage_test_bitmap_all_zero(fs_info
, subpage
, ordered
))
549 folio_clear_ordered(folio
);
550 spin_unlock_irqrestore(&subpage
->lock
, flags
);
553 void btrfs_subpage_set_checked(const struct btrfs_fs_info
*fs_info
,
554 struct folio
*folio
, u64 start
, u32 len
)
556 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
557 unsigned int start_bit
= subpage_calc_start_bit(fs_info
, folio
,
558 checked
, start
, len
);
561 spin_lock_irqsave(&subpage
->lock
, flags
);
562 bitmap_set(subpage
->bitmaps
, start_bit
, len
>> fs_info
->sectorsize_bits
);
563 if (subpage_test_bitmap_all_set(fs_info
, subpage
, checked
))
564 folio_set_checked(folio
);
565 spin_unlock_irqrestore(&subpage
->lock
, flags
);
568 void btrfs_subpage_clear_checked(const struct btrfs_fs_info
*fs_info
,
569 struct folio
*folio
, u64 start
, u32 len
)
571 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
572 unsigned int start_bit
= subpage_calc_start_bit(fs_info
, folio
,
573 checked
, start
, len
);
576 spin_lock_irqsave(&subpage
->lock
, flags
);
577 bitmap_clear(subpage
->bitmaps
, start_bit
, len
>> fs_info
->sectorsize_bits
);
578 folio_clear_checked(folio
);
579 spin_unlock_irqrestore(&subpage
->lock
, flags
);
583 * Unlike set/clear which is dependent on each page status, for test all bits
584 * are tested in the same way.
586 #define IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(name) \
587 bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info, \
588 struct folio *folio, u64 start, u32 len) \
590 struct btrfs_subpage *subpage = folio_get_private(folio); \
591 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, \
593 unsigned long flags; \
596 spin_lock_irqsave(&subpage->lock, flags); \
597 ret = bitmap_test_range_all_set(subpage->bitmaps, start_bit, \
598 len >> fs_info->sectorsize_bits); \
599 spin_unlock_irqrestore(&subpage->lock, flags); \
602 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(uptodate
);
603 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(dirty
);
604 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(writeback
);
605 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(ordered
);
606 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(checked
);
609 * Note that, in selftests (extent-io-tests), we can have empty fs_info passed
610 * in. We only test sectorsize == PAGE_SIZE cases so far, thus we can fall
611 * back to regular sectorsize branch.
613 #define IMPLEMENT_BTRFS_PAGE_OPS(name, folio_set_func, \
614 folio_clear_func, folio_test_func) \
615 void btrfs_folio_set_##name(const struct btrfs_fs_info *fs_info, \
616 struct folio *folio, u64 start, u32 len) \
618 if (unlikely(!fs_info) || \
619 !btrfs_is_subpage(fs_info, folio->mapping)) { \
620 folio_set_func(folio); \
623 btrfs_subpage_set_##name(fs_info, folio, start, len); \
625 void btrfs_folio_clear_##name(const struct btrfs_fs_info *fs_info, \
626 struct folio *folio, u64 start, u32 len) \
628 if (unlikely(!fs_info) || \
629 !btrfs_is_subpage(fs_info, folio->mapping)) { \
630 folio_clear_func(folio); \
633 btrfs_subpage_clear_##name(fs_info, folio, start, len); \
635 bool btrfs_folio_test_##name(const struct btrfs_fs_info *fs_info, \
636 struct folio *folio, u64 start, u32 len) \
638 if (unlikely(!fs_info) || \
639 !btrfs_is_subpage(fs_info, folio->mapping)) \
640 return folio_test_func(folio); \
641 return btrfs_subpage_test_##name(fs_info, folio, start, len); \
643 void btrfs_folio_clamp_set_##name(const struct btrfs_fs_info *fs_info, \
644 struct folio *folio, u64 start, u32 len) \
646 if (unlikely(!fs_info) || \
647 !btrfs_is_subpage(fs_info, folio->mapping)) { \
648 folio_set_func(folio); \
651 btrfs_subpage_clamp_range(folio, &start, &len); \
652 btrfs_subpage_set_##name(fs_info, folio, start, len); \
654 void btrfs_folio_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \
655 struct folio *folio, u64 start, u32 len) \
657 if (unlikely(!fs_info) || \
658 !btrfs_is_subpage(fs_info, folio->mapping)) { \
659 folio_clear_func(folio); \
662 btrfs_subpage_clamp_range(folio, &start, &len); \
663 btrfs_subpage_clear_##name(fs_info, folio, start, len); \
665 bool btrfs_folio_clamp_test_##name(const struct btrfs_fs_info *fs_info, \
666 struct folio *folio, u64 start, u32 len) \
668 if (unlikely(!fs_info) || \
669 !btrfs_is_subpage(fs_info, folio->mapping)) \
670 return folio_test_func(folio); \
671 btrfs_subpage_clamp_range(folio, &start, &len); \
672 return btrfs_subpage_test_##name(fs_info, folio, start, len); \
674 IMPLEMENT_BTRFS_PAGE_OPS(uptodate
, folio_mark_uptodate
, folio_clear_uptodate
,
675 folio_test_uptodate
);
676 IMPLEMENT_BTRFS_PAGE_OPS(dirty
, folio_mark_dirty
, folio_clear_dirty_for_io
,
678 IMPLEMENT_BTRFS_PAGE_OPS(writeback
, folio_start_writeback
, folio_end_writeback
,
679 folio_test_writeback
);
680 IMPLEMENT_BTRFS_PAGE_OPS(ordered
, folio_set_ordered
, folio_clear_ordered
,
682 IMPLEMENT_BTRFS_PAGE_OPS(checked
, folio_set_checked
, folio_clear_checked
,
686 * Make sure not only the page dirty bit is cleared, but also subpage dirty bit
689 void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info
*fs_info
, struct folio
*folio
)
691 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
693 if (!IS_ENABLED(CONFIG_BTRFS_ASSERT
))
696 ASSERT(!folio_test_dirty(folio
));
697 if (!btrfs_is_subpage(fs_info
, folio
->mapping
))
700 ASSERT(folio_test_private(folio
) && folio_get_private(folio
));
701 ASSERT(subpage_test_bitmap_all_zero(fs_info
, subpage
, dirty
));
705 * Handle different locked pages with different page sizes:
707 * - Page locked by plain lock_page()
708 * It should not have any subpage::writers count.
709 * Can be unlocked by unlock_page().
710 * This is the most common locked page for __extent_writepage() called
711 * inside extent_write_cache_pages().
712 * Rarer cases include the @locked_page from extent_write_locked_range().
714 * - Page locked by lock_delalloc_pages()
715 * There is only one caller, all pages except @locked_page for
716 * extent_write_locked_range().
717 * In this case, we have to call subpage helper to handle the case.
719 void btrfs_folio_unlock_writer(struct btrfs_fs_info
*fs_info
,
720 struct folio
*folio
, u64 start
, u32 len
)
722 struct btrfs_subpage
*subpage
;
724 ASSERT(folio_test_locked(folio
));
725 /* For non-subpage case, we just unlock the page */
726 if (!btrfs_is_subpage(fs_info
, folio
->mapping
)) {
731 ASSERT(folio_test_private(folio
) && folio_get_private(folio
));
732 subpage
= folio_get_private(folio
);
735 * For subpage case, there are two types of locked page. With or
736 * without writers number.
738 * Since we own the page lock, no one else could touch subpage::writers
739 * and we are safe to do several atomic operations without spinlock.
741 if (atomic_read(&subpage
->writers
) == 0) {
742 /* No writers, locked by plain lock_page() */
747 /* Have writers, use proper subpage helper to end it */
748 btrfs_folio_end_writer_lock(fs_info
, folio
, start
, len
);
751 #define GET_SUBPAGE_BITMAP(subpage, subpage_info, name, dst) \
752 bitmap_cut(dst, subpage->bitmaps, 0, \
753 subpage_info->name##_offset, subpage_info->bitmap_nr_bits)
755 void __cold
btrfs_subpage_dump_bitmap(const struct btrfs_fs_info
*fs_info
,
756 struct folio
*folio
, u64 start
, u32 len
)
758 struct btrfs_subpage_info
*subpage_info
= fs_info
->subpage_info
;
759 struct btrfs_subpage
*subpage
;
760 unsigned long uptodate_bitmap
;
761 unsigned long error_bitmap
;
762 unsigned long dirty_bitmap
;
763 unsigned long writeback_bitmap
;
764 unsigned long ordered_bitmap
;
765 unsigned long checked_bitmap
;
768 ASSERT(folio_test_private(folio
) && folio_get_private(folio
));
769 ASSERT(subpage_info
);
770 subpage
= folio_get_private(folio
);
772 spin_lock_irqsave(&subpage
->lock
, flags
);
773 GET_SUBPAGE_BITMAP(subpage
, subpage_info
, uptodate
, &uptodate_bitmap
);
774 GET_SUBPAGE_BITMAP(subpage
, subpage_info
, dirty
, &dirty_bitmap
);
775 GET_SUBPAGE_BITMAP(subpage
, subpage_info
, writeback
, &writeback_bitmap
);
776 GET_SUBPAGE_BITMAP(subpage
, subpage_info
, ordered
, &ordered_bitmap
);
777 GET_SUBPAGE_BITMAP(subpage
, subpage_info
, checked
, &checked_bitmap
);
778 GET_SUBPAGE_BITMAP(subpage
, subpage_info
, locked
, &checked_bitmap
);
779 spin_unlock_irqrestore(&subpage
->lock
, flags
);
781 dump_page(folio_page(folio
, 0), "btrfs subpage dump");
783 "start=%llu len=%u page=%llu, bitmaps uptodate=%*pbl error=%*pbl dirty=%*pbl writeback=%*pbl ordered=%*pbl checked=%*pbl",
784 start
, len
, folio_pos(folio
),
785 subpage_info
->bitmap_nr_bits
, &uptodate_bitmap
,
786 subpage_info
->bitmap_nr_bits
, &error_bitmap
,
787 subpage_info
->bitmap_nr_bits
, &dirty_bitmap
,
788 subpage_info
->bitmap_nr_bits
, &writeback_bitmap
,
789 subpage_info
->bitmap_nr_bits
, &ordered_bitmap
,
790 subpage_info
->bitmap_nr_bits
, &checked_bitmap
);