struct bio_vec bvec;
struct bvec_iter iter;
const u32 blocksize = fs_info->sectorsize;
+ const u32 alignment = min(blocksize, PAGE_SIZE);
+ const u64 logical = bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT;
+ const u32 length = bbio->bio.bi_iter.bi_size;
- /* Metadata has no extra bs > ps alignment requirement. */
- if (!is_data_bbio(bbio))
- return;
+ /* The logical and length should still be aligned to blocksize. */
+ ASSERT(IS_ALIGNED(logical, blocksize) && IS_ALIGNED(length, blocksize) &&
+ length != 0, "root=%llu inode=%llu logical=%llu length=%u",
+ btrfs_root_id(bbio->inode->root),
+ btrfs_ino(bbio->inode), logical, length);
bio_for_each_bvec(bvec, &bbio->bio, iter)
- ASSERT(IS_ALIGNED(bvec.bv_offset, blocksize) &&
- IS_ALIGNED(bvec.bv_len, blocksize),
+ ASSERT(IS_ALIGNED(bvec.bv_offset, alignment) &&
+ IS_ALIGNED(bvec.bv_len, alignment),
"root=%llu inode=%llu logical=%llu length=%u index=%u bv_offset=%u bv_len=%u",
btrfs_root_id(bbio->inode->root),
- btrfs_ino(bbio->inode),
- bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT,
- bbio->bio.bi_iter.bi_size, iter.bi_idx,
- bvec.bv_offset,
- bvec.bv_len);
+ btrfs_ino(bbio->inode), logical, length, iter.bi_idx,
+ bvec.bv_offset, bvec.bv_len);
#endif
}
static void btrfs_add_compressed_bio_folios(struct compressed_bio *cb)
{
- struct btrfs_fs_info *fs_info = cb->bbio.inode->root->fs_info;
struct bio *bio = &cb->bbio.bio;
u32 offset = 0;
+ unsigned int findex = 0;
while (offset < cb->compressed_len) {
- struct folio *folio;
+ struct folio *folio = cb->compressed_folios[findex];
+ u32 len = min_t(u32, cb->compressed_len - offset, folio_size(folio));
int ret;
- u32 len = min_t(u32, cb->compressed_len - offset,
- btrfs_min_folio_size(fs_info));
- folio = cb->compressed_folios[offset >> (PAGE_SHIFT + fs_info->block_min_order)];
/* Maximum compressed extent is smaller than bio size limit. */
ret = bio_add_folio(bio, folio, len, 0);
ASSERT(ret);
offset += len;
+ findex++;
}
}
goto out_acct;
}
- if (fs_info->sectorsize > PAGE_SIZE) {
- ret = -ENOTTY;
- goto out_acct;
- }
if (compat) {
#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
struct btrfs_ioctl_encoded_io_args_32 args32;
static int btrfs_ioctl_encoded_write(struct file *file, void __user *argp, bool compat)
{
- struct btrfs_fs_info *fs_info = inode_to_fs_info(file->f_inode);
struct btrfs_ioctl_encoded_io_args args;
struct iovec iovstack[UIO_FASTIOV];
struct iovec *iov = iovstack;
goto out_acct;
}
- if (fs_info->sectorsize > PAGE_SIZE) {
- ret = -ENOTTY;
- goto out_acct;
- }
-
if (!(file->f_mode & FMODE_WRITE)) {
ret = -EBADF;
goto out_acct;
ret = -EPERM;
goto out_acct;
}
- if (fs_info->sectorsize > PAGE_SIZE) {
- ret = -ENOTTY;
- goto out_acct;
- }
-
sqe_addr = u64_to_user_ptr(READ_ONCE(cmd->sqe->addr));
if (issue_flags & IO_URING_F_COMPAT) {
static int btrfs_uring_encoded_write(struct io_uring_cmd *cmd, unsigned int issue_flags)
{
struct file *file = cmd->file;
- struct btrfs_fs_info *fs_info = inode_to_fs_info(file->f_inode);
loff_t pos;
struct kiocb kiocb;
ssize_t ret;
ret = -EPERM;
goto out_acct;
}
- if (fs_info->sectorsize > PAGE_SIZE) {
- ret = -ENOTTY;
- goto out_acct;
- }
-
sqe_addr = u64_to_user_ptr(READ_ONCE(cmd->sqe->addr));
if (!(file->f_mode & FMODE_WRITE)) {
ei = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
- /*
- * Do not go through encoded read for bs > ps cases.
- *
- * Encoded send is using vmallocated pages as buffer, which we can
- * not ensure every folio is large enough to contain a block.
- */
- if (sctx->send_root->fs_info->sectorsize <= PAGE_SIZE &&
- (sctx->flags & BTRFS_SEND_FLAG_COMPRESSED) &&
+ if ((sctx->flags & BTRFS_SEND_FLAG_COMPRESSED) &&
btrfs_file_extent_compression(leaf, ei) != BTRFS_COMPRESS_NONE) {
bool is_inline = (btrfs_file_extent_type(leaf, ei) ==
BTRFS_FILE_EXTENT_INLINE);