if (WARN_ON_ONCE(found_start != eb->start))
return BLK_STS_IOERR;
- if (WARN_ON(!btrfs_folio_test_uptodate(fs_info, eb->folios[0],
- eb->start, eb->len)))
+ if (WARN_ON(!btrfs_meta_folio_test_uptodate(fs_info, eb->folios[0],
+ eb->start, eb->len)))
return BLK_STS_IOERR;
ASSERT(memcmp_extent_buffer(eb, fs_info->fs_devices->metadata_uuid,
struct extent_buffer *eb = bbio->private;
struct btrfs_fs_info *fs_info = eb->fs_info;
struct folio_iter fi;
- u32 bio_offset = 0;
if (bbio->bio.bi_status != BLK_STS_OK)
set_btree_ioerr(eb);
bio_for_each_folio_all(fi, &bbio->bio) {
- u64 start = eb->start + bio_offset;
struct folio *folio = fi.folio;
- u32 len = fi.length;
- btrfs_folio_clear_writeback(fs_info, folio, start, len);
- bio_offset += len;
+ btrfs_meta_folio_clear_writeback(fs_info, folio, eb->start, eb->len);
}
clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
* and free the allocated page.
*/
folio = eb->folios[i];
- WARN_ON(btrfs_folio_test_dirty(fs_info, folio, eb->start, eb->len));
+ WARN_ON(btrfs_meta_folio_test_dirty(fs_info, folio, eb->start, eb->len));
/*
* Check if the current page is physically contiguous with previous eb
if (i && folio_page(eb->folios[i - 1], 0) + 1 != folio_page(folio, 0))
page_contig = false;
- if (!btrfs_folio_test_uptodate(fs_info, folio, eb->start, eb->len))
+ if (!btrfs_meta_folio_test_uptodate(fs_info, folio, eb->start, eb->len))
uptodate = 0;
/*
* This means a slightly higher tree locking latency.
*/
-#if PAGE_SIZE > SZ_4K
-bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct address_space *mapping)
-{
- if (fs_info->sectorsize >= PAGE_SIZE)
- return false;
-
- /*
- * Only data pages (either through DIO or compression) can have no
- * mapping. And if mapping->host is data inode, it's subpage.
- * As we have ruled our sectorsize >= PAGE_SIZE case already.
- */
- if (!mapping || !mapping->host || is_data_inode(BTRFS_I(mapping->host)))
- return true;
-
- /*
- * Now the only remaining case is metadata, which we only go subpage
- * routine if nodesize < PAGE_SIZE.
- */
- if (fs_info->nodesize < PAGE_SIZE)
- return true;
- return false;
-}
-#endif
-
int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
struct folio *folio, enum btrfs_subpage_type type)
{
#include <linux/spinlock.h>
#include <linux/atomic.h>
#include <linux/sizes.h>
+#include "btrfs_inode.h"
#include "fs.h"
struct address_space;
{
return fs_info->nodesize < PAGE_SIZE;
}
-bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct address_space *mapping);
+static inline bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info,
+ struct address_space *mapping)
+{
+ if (mapping && mapping->host)
+ ASSERT(is_data_inode(BTRFS_I(mapping->host)));
+ return fs_info->sectorsize < PAGE_SIZE;
+}
#else
static inline bool btrfs_meta_is_subpage(const struct btrfs_fs_info *fs_info)
{
static inline bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info,
struct address_space *mapping)
{
+ if (mapping && mapping->host)
+ ASSERT(is_data_inode(BTRFS_I(mapping->host)));
return false;
}
#endif