gfp_t gfp_mask)
{
const struct address_space *mapping = folio_mapping(bh->b_folio);
- const struct inode *inode;
- u64 lblk;
/*
* The ext4 journal (jbd2) can submit a buffer_head it directly created
*/
if (!mapping)
return;
- inode = mapping->host;
- lblk = (folio_pos(bh->b_folio) + bh_offset(bh)) >> inode->i_blkbits;
- fscrypt_set_bio_crypt_ctx(bio, inode, lblk, gfp_mask);
+ fscrypt_set_bio_crypt_ctx(bio, mapping->host,
+ folio_pos(bh->b_folio) + bh_offset(bh), gfp_mask);
}
static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
{
const unsigned int blockbits = inode->i_blkbits;
const unsigned int blocks_per_page = 1 << (PAGE_SHIFT - blockbits);
+ loff_t pos = (loff_t)lblk << blockbits;
struct fscrypt_zero_done done = {
.pending = ATOMIC_INIT(1),
.done = COMPLETION_INITIALIZER_ONSTACK(done.done),
bio->bi_iter.bi_sector = sector;
bio->bi_private = &done;
bio->bi_end_io = fscrypt_zeroout_range_end_io;
- fscrypt_set_bio_crypt_ctx(bio, inode, lblk, GFP_NOFS);
+ fscrypt_set_bio_crypt_ctx(bio, inode, pos, GFP_NOFS);
for (n = 0; n < BIO_MAX_VECS; n++) {
unsigned int blocks_this_page =
__bio_add_page(bio, ZERO_PAGE(0), bytes_this_page, 0);
len -= blocks_this_page;
- lblk += blocks_this_page;
+ pos += bytes_this_page;
sector += (bytes_this_page >> SECTOR_SHIFT);
- if (!len || !fscrypt_mergeable_bio(bio, inode,
- (loff_t)lblk << blockbits))
+ if (!len || !fscrypt_mergeable_bio(bio, inode, pos))
break;
}
* fscrypt_set_bio_crypt_ctx() - prepare a file contents bio for inline crypto
* @bio: a bio which will eventually be submitted to the file
* @inode: the file's inode
- * @first_lblk: the first file logical block number in the I/O
+ * @pos: the first file position (in bytes) in the I/O
* @gfp_mask: memory allocation flags - these must be a waiting mask so that
* bio_crypt_set_ctx can't fail.
*
* The encryption context will be freed automatically when the bio is freed.
*/
void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
- u64 first_lblk, gfp_t gfp_mask)
+ loff_t pos, gfp_t gfp_mask)
{
const struct fscrypt_inode_info *ci;
u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
return;
ci = fscrypt_get_inode_info_raw(inode);
- fscrypt_generate_dun(ci, first_lblk << inode->i_blkbits, dun);
+ fscrypt_generate_dun(ci, pos, dun);
bio_crypt_set_ctx(bio, ci->ci_enc_key.blk_key, dun, gfp_mask);
}
EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx);
* __GFP_DIRECT_RECLAIM is set, see comments for bio_alloc_bioset().
*/
bio = bio_alloc(bh->b_bdev, BIO_MAX_VECS, REQ_OP_WRITE, GFP_NOIO);
- fscrypt_set_bio_crypt_ctx(bio, inode,
- (folio_pos(folio) + bh_offset(bh)) >> inode->i_blkbits,
- GFP_NOIO);
+ fscrypt_set_bio_crypt_ctx(bio, inode, folio_pos(folio) + bh_offset(bh),
+ GFP_NOIO);
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio->bi_end_io = ext4_end_bio;
bio->bi_private = ext4_get_io_end(io->io_end);
*/
bio = bio_alloc(bdev, bio_max_segs(nr_pages),
REQ_OP_READ, GFP_KERNEL);
- fscrypt_set_bio_crypt_ctx(bio, inode, next_block,
- GFP_KERNEL);
+ fscrypt_set_bio_crypt_ctx(bio, inode,
+ (loff_t)next_block << blkbits, GFP_KERNEL);
ext4_set_bio_post_read_ctx(bio, inode, vi);
bio->bi_iter.bi_sector = first_block << (blkbits - 9);
bio->bi_end_io = mpage_end_io;
* read/write raw data without encryption.
*/
if (!fio || !fio->encrypted_page)
- fscrypt_set_bio_crypt_ctx(bio, inode, first_idx, gfp_mask);
+ fscrypt_set_bio_crypt_ctx(bio, inode,
+ (loff_t)first_idx << inode->i_blkbits,
+ gfp_mask);
}
static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode,
bio = iomap_dio_alloc_bio(iter, dio, nr_vecs,
REQ_OP_WRITE | REQ_SYNC | REQ_IDLE);
- fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits,
- GFP_KERNEL);
+ fscrypt_set_bio_crypt_ctx(bio, inode, pos, GFP_KERNEL);
bio->bi_iter.bi_sector = iomap_sector(&iter->iomap, pos);
bio->bi_private = dio;
bio->bi_end_io = iomap_dio_bio_end_io;
nr_vecs = bio_iov_vecs_to_alloc(dio->submit.iter, BIO_MAX_VECS);
bio = iomap_dio_alloc_bio(iter, dio, nr_vecs, op);
- fscrypt_set_bio_crypt_ctx(bio, iter->inode,
- pos >> iter->inode->i_blkbits, GFP_KERNEL);
+ fscrypt_set_bio_crypt_ctx(bio, iter->inode, pos, GFP_KERNEL);
bio->bi_iter.bi_sector = iomap_sector(&iter->iomap, pos);
bio->bi_write_hint = iter->inode->i_write_hint;
bio->bi_ioprio = dio->iocb->ki_ioprio;
bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode);
-void fscrypt_set_bio_crypt_ctx(struct bio *bio,
- const struct inode *inode, u64 first_lblk,
- gfp_t gfp_mask);
+void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
+ loff_t pos, gfp_t gfp_mask);
bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode,
loff_t pos);
static inline void fscrypt_set_bio_crypt_ctx(struct bio *bio,
const struct inode *inode,
- u64 first_lblk, gfp_t gfp_mask) { }
+ loff_t pos, gfp_t gfp_mask) { }
static inline bool fscrypt_mergeable_bio(struct bio *bio,
const struct inode *inode,