bio_put(&cb->bbio.bio);
}
-static void btrfs_add_compressed_bio_folios(struct compressed_bio *cb)
-{
- struct bio *bio = &cb->bbio.bio;
- u32 offset = 0;
- unsigned int findex = 0;
-
- while (offset < cb->compressed_len) {
- struct folio *folio = cb->compressed_folios[findex];
- u32 len = min_t(u32, cb->compressed_len - offset, folio_size(folio));
- int ret;
-
- /* Maximum compressed extent is smaller than bio size limit. */
- ret = bio_add_folio(bio, folio, len, 0);
- ASSERT(ret);
- offset += len;
- findex++;
- }
-}
-
/*
* worker function to build and submit bios for previously compressed pages.
* The corresponding pages in the inode should be marked for writeback
* the end io hooks.
*/
void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered,
- struct folio **compressed_folios,
- unsigned int nr_folios,
- blk_opf_t write_flags,
- bool writeback)
+ struct compressed_bio *cb)
{
struct btrfs_inode *inode = ordered->inode;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
- struct compressed_bio *cb;
ASSERT(IS_ALIGNED(ordered->file_offset, fs_info->sectorsize));
ASSERT(IS_ALIGNED(ordered->num_bytes, fs_info->sectorsize));
+ ASSERT(cb->writeback);
- cb = alloc_compressed_bio(inode, ordered->file_offset,
- REQ_OP_WRITE | write_flags,
- end_bbio_compressed_write);
cb->start = ordered->file_offset;
cb->len = ordered->num_bytes;
- cb->compressed_folios = compressed_folios;
cb->compressed_len = ordered->disk_num_bytes;
- cb->writeback = writeback;
- cb->nr_folios = nr_folios;
cb->bbio.bio.bi_iter.bi_sector = ordered->disk_bytenr >> SECTOR_SHIFT;
cb->bbio.ordered = ordered;
- btrfs_add_compressed_bio_folios(cb);
btrfs_submit_bbio(&cb->bbio, 0);
}
+/*
+ * Allocate a compressed write bio for @inode file offset @start length @len.
+ *
+ * The caller still needs to properly queue all folios and populate involved
+ * members.
+ */
+struct compressed_bio *btrfs_alloc_compressed_write(struct btrfs_inode *inode,
+ u64 start, u64 len)
+{
+ struct compressed_bio *cb;
+
+ cb = alloc_compressed_bio(inode, start, REQ_OP_WRITE, end_bbio_compressed_write);
+ cb->start = start;
+ cb->len = len;
+ cb->writeback = true;
+
+ return cb;
+}
+
/*
* Add extra pages in the same compressed file extent so that we don't need to
* re-read the same extent again and again.
int btrfs_decompress_buf2page(const char *buf, u32 buf_len,
struct compressed_bio *cb, u32 decompressed);
+struct compressed_bio *btrfs_alloc_compressed_write(struct btrfs_inode *inode,
+ u64 start, u64 len);
void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered,
- struct folio **compressed_folios,
- unsigned int nr_folios, blk_opf_t write_flags,
- bool writeback);
+ struct compressed_bio *cb);
void btrfs_submit_compressed_read(struct btrfs_bio *bbio);
int btrfs_compress_str2level(unsigned int type, const char *str, int *level_ret);
struct extent_state *cached_state = NULL;
struct btrfs_ordered_extent *ordered;
struct btrfs_file_extent file_extent;
+ struct compressed_bio *cb = NULL;
int compression;
size_t orig_count;
+ const u32 min_folio_size = btrfs_min_folio_size(fs_info);
u64 start, end;
u64 num_bytes, ram_bytes, disk_num_bytes;
- unsigned long nr_folios, i;
- struct folio **folios;
struct btrfs_key ins;
bool extent_reserved = false;
struct extent_map *em;
* isn't.
*/
disk_num_bytes = ALIGN(orig_count, fs_info->sectorsize);
- nr_folios = DIV_ROUND_UP(disk_num_bytes, PAGE_SIZE);
- folios = kvcalloc(nr_folios, sizeof(struct folio *), GFP_KERNEL_ACCOUNT);
- if (!folios)
- return -ENOMEM;
- for (i = 0; i < nr_folios; i++) {
- size_t bytes = min_t(size_t, PAGE_SIZE, iov_iter_count(from));
+
+ cb = btrfs_alloc_compressed_write(inode, start, num_bytes);
+ for (int i = 0; i * min_folio_size < disk_num_bytes; i++) {
+ struct folio *folio;
+ size_t bytes = min(min_folio_size, iov_iter_count(from));
char *kaddr;
- folios[i] = folio_alloc(GFP_KERNEL_ACCOUNT, 0);
- if (!folios[i]) {
+ folio = btrfs_alloc_compr_folio(fs_info);
+ if (!folio) {
ret = -ENOMEM;
- goto out_folios;
+ goto out_cb;
}
- kaddr = kmap_local_folio(folios[i], 0);
- if (copy_from_iter(kaddr, bytes, from) != bytes) {
- kunmap_local(kaddr);
+ kaddr = kmap_local_folio(folio, 0);
+ ret = copy_from_iter(kaddr, bytes, from);
+ kunmap_local(kaddr);
+ if (ret != bytes) {
+ folio_put(folio);
ret = -EFAULT;
- goto out_folios;
+ goto out_cb;
+ }
+ if (bytes < min_folio_size)
+ folio_zero_range(folio, bytes, min_folio_size - bytes);
+ ret = bio_add_folio(&cb->bbio.bio, folio, folio_size(folio), 0);
+ if (unlikely(!ret)) {
+ folio_put(folio);
+ ret = -EINVAL;
+ goto out_cb;
}
- if (bytes < PAGE_SIZE)
- memset(kaddr + bytes, 0, PAGE_SIZE - bytes);
- kunmap_local(kaddr);
}
+ ASSERT(cb->bbio.bio.bi_iter.bi_size == disk_num_bytes);
for (;;) {
ret = btrfs_wait_ordered_range(inode, start, num_bytes);
if (ret)
- goto out_folios;
+ goto out_cb;
ret = invalidate_inode_pages2_range(inode->vfs_inode.i_mapping,
start >> PAGE_SHIFT,
end >> PAGE_SHIFT);
if (ret)
- goto out_folios;
+ goto out_cb;
btrfs_lock_extent(io_tree, start, end, &cached_state);
ordered = btrfs_lookup_ordered_range(inode, start, num_bytes);
if (!ordered &&
encoded->unencoded_offset == 0 &&
can_cow_file_range_inline(inode, start, encoded->len, orig_count)) {
ret = __cow_file_range_inline(inode, encoded->len,
- orig_count, compression, folios[0],
+ orig_count, compression,
+ bio_first_folio_all(&cb->bbio.bio),
true);
if (ret <= 0) {
if (ret == 0)
btrfs_delalloc_release_extents(inode, num_bytes);
- btrfs_submit_compressed_write(ordered, folios, nr_folios, 0, false);
+ btrfs_submit_compressed_write(ordered, cb);
ret = orig_count;
goto out;
btrfs_free_reserved_data_space_noquota(inode, disk_num_bytes);
out_unlock:
btrfs_unlock_extent(io_tree, start, end, &cached_state);
-out_folios:
- for (i = 0; i < nr_folios; i++) {
- if (folios[i])
- folio_put(folios[i]);
- }
- kvfree(folios);
+out_cb:
+ if (cb)
+ cleanup_compressed_bio(cb);
out:
if (ret >= 0)
iocb->ki_pos += encoded->len;