]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
fscrypt: pass a byte offset to fscrypt_set_bio_crypt_ctx
authorChristoph Hellwig <hch@lst.de>
Mon, 2 Mar 2026 14:18:13 +0000 (06:18 -0800)
committerEric Biggers <ebiggers@kernel.org>
Mon, 9 Mar 2026 20:31:50 +0000 (13:31 -0700)
Logical offsets into an inode are usually expressed as bytes in the VFS.
Switch fscrypt_set_bio_crypt_ctx to that convention.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20260302141922.370070-9-hch@lst.de
Signed-off-by: Eric Biggers <ebiggers@kernel.org>
fs/buffer.c
fs/crypto/bio.c
fs/crypto/inline_crypt.c
fs/ext4/page-io.c
fs/ext4/readpage.c
fs/f2fs/data.c
fs/iomap/direct-io.c
include/linux/fscrypt.h

index b6504ec7fa4cd7fbbc32ed043337c4410216c279..1c8ee5a59f88adf01a80ed8edfcde2cbe50dbe19 100644 (file)
@@ -2778,8 +2778,6 @@ static void buffer_set_crypto_ctx(struct bio *bio, const struct buffer_head *bh,
                                  gfp_t gfp_mask)
 {
        const struct address_space *mapping = folio_mapping(bh->b_folio);
-       const struct inode *inode;
-       u64 lblk;
 
        /*
         * The ext4 journal (jbd2) can submit a buffer_head it directly created
@@ -2787,9 +2785,8 @@ static void buffer_set_crypto_ctx(struct bio *bio, const struct buffer_head *bh,
         */
        if (!mapping)
                return;
-       inode = mapping->host;
-       lblk = (folio_pos(bh->b_folio) + bh_offset(bh)) >> inode->i_blkbits;
-       fscrypt_set_bio_crypt_ctx(bio, inode, lblk, gfp_mask);
+       fscrypt_set_bio_crypt_ctx(bio, mapping->host,
+                       folio_pos(bh->b_folio) + bh_offset(bh), gfp_mask);
 }
 
 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
index 0a701d4a17ef2e628c19918e6f6b219922c608c1..e7fb2fdd972855c818e82b78414585ea374cb4be 100644 (file)
@@ -75,6 +75,7 @@ static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode,
 {
        const unsigned int blockbits = inode->i_blkbits;
        const unsigned int blocks_per_page = 1 << (PAGE_SHIFT - blockbits);
+       loff_t pos = (loff_t)lblk << blockbits;
        struct fscrypt_zero_done done = {
                .pending        = ATOMIC_INIT(1),
                .done           = COMPLETION_INITIALIZER_ONSTACK(done.done),
@@ -89,7 +90,7 @@ static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode,
                bio->bi_iter.bi_sector = sector;
                bio->bi_private = &done;
                bio->bi_end_io = fscrypt_zeroout_range_end_io;
-               fscrypt_set_bio_crypt_ctx(bio, inode, lblk, GFP_NOFS);
+               fscrypt_set_bio_crypt_ctx(bio, inode, pos, GFP_NOFS);
 
                for (n = 0; n < BIO_MAX_VECS; n++) {
                        unsigned int blocks_this_page =
@@ -98,10 +99,9 @@ static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode,
 
                        __bio_add_page(bio, ZERO_PAGE(0), bytes_this_page, 0);
                        len -= blocks_this_page;
-                       lblk += blocks_this_page;
+                       pos += bytes_this_page;
                        sector += (bytes_this_page >> SECTOR_SHIFT);
-                       if (!len || !fscrypt_mergeable_bio(bio, inode,
-                                       (loff_t)lblk << blockbits))
+                       if (!len || !fscrypt_mergeable_bio(bio, inode, pos))
                                break;
                }
 
index b0954d17904b1e960a638385143ed96abcc52186..37d42d357925eafa305f6a2e112a29946227a4bb 100644 (file)
@@ -285,7 +285,7 @@ static void fscrypt_generate_dun(const struct fscrypt_inode_info *ci,
  * fscrypt_set_bio_crypt_ctx() - prepare a file contents bio for inline crypto
  * @bio: a bio which will eventually be submitted to the file
  * @inode: the file's inode
- * @first_lblk: the first file logical block number in the I/O
+ * @pos: the first file position (in bytes) in the I/O
  * @gfp_mask: memory allocation flags - these must be a waiting mask so that
  *                                     bio_crypt_set_ctx can't fail.
  *
@@ -298,7 +298,7 @@ static void fscrypt_generate_dun(const struct fscrypt_inode_info *ci,
  * The encryption context will be freed automatically when the bio is freed.
  */
 void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
-                              u64 first_lblk, gfp_t gfp_mask)
+                              loff_t pos, gfp_t gfp_mask)
 {
        const struct fscrypt_inode_info *ci;
        u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
@@ -307,7 +307,7 @@ void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
                return;
        ci = fscrypt_get_inode_info_raw(inode);
 
-       fscrypt_generate_dun(ci, first_lblk << inode->i_blkbits, dun);
+       fscrypt_generate_dun(ci, pos, dun);
        bio_crypt_set_ctx(bio, ci->ci_enc_key.blk_key, dun, gfp_mask);
 }
 EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx);
index 50f507bab82c3e675e136aed9fadb3ada924aeb8..181cda58d387bbe0fe090d86329f166033be8a39 100644 (file)
@@ -427,9 +427,8 @@ static void io_submit_init_bio(struct ext4_io_submit *io,
         * __GFP_DIRECT_RECLAIM is set, see comments for bio_alloc_bioset().
         */
        bio = bio_alloc(bh->b_bdev, BIO_MAX_VECS, REQ_OP_WRITE, GFP_NOIO);
-       fscrypt_set_bio_crypt_ctx(bio, inode,
-                       (folio_pos(folio) + bh_offset(bh)) >> inode->i_blkbits,
-                       GFP_NOIO);
+       fscrypt_set_bio_crypt_ctx(bio, inode, folio_pos(folio) + bh_offset(bh),
+                                 GFP_NOIO);
        bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
        bio->bi_end_io = ext4_end_bio;
        bio->bi_private = ext4_get_io_end(io->io_end);
index ba7cfddd6038f9564ac601fa3bd5dc720eaf595e..fbfa4d830d9a338828b5fd3f6c206f9b14e8016e 100644 (file)
@@ -355,8 +355,8 @@ static int ext4_mpage_readpages(struct inode *inode, struct fsverity_info *vi,
                         */
                        bio = bio_alloc(bdev, bio_max_segs(nr_pages),
                                        REQ_OP_READ, GFP_KERNEL);
-                       fscrypt_set_bio_crypt_ctx(bio, inode, next_block,
-                                                 GFP_KERNEL);
+                       fscrypt_set_bio_crypt_ctx(bio, inode,
+                                       (loff_t)next_block << blkbits, GFP_KERNEL);
                        ext4_set_bio_post_read_ctx(bio, inode, vi);
                        bio->bi_iter.bi_sector = first_block << (blkbits - 9);
                        bio->bi_end_io = mpage_end_io;
index dca273fedfde6eb25ebce9ef9568aa98c06c88f3..07b4ed6bb0ccb99efad43ab26dc779131226d195 100644 (file)
@@ -527,7 +527,9 @@ static void f2fs_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
         * read/write raw data without encryption.
         */
        if (!fio || !fio->encrypted_page)
-               fscrypt_set_bio_crypt_ctx(bio, inode, first_idx, gfp_mask);
+               fscrypt_set_bio_crypt_ctx(bio, inode,
+                               (loff_t)first_idx << inode->i_blkbits,
+                               gfp_mask);
 }
 
 static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode,
index e911daedff65ae60ecb9b7f710ada6ddaefaf425..9da5d862ef9e0eabfbe346610b91094bccb4c40d 100644 (file)
@@ -311,8 +311,7 @@ static int iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
 
        bio = iomap_dio_alloc_bio(iter, dio, nr_vecs,
                                  REQ_OP_WRITE | REQ_SYNC | REQ_IDLE);
-       fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits,
-                                 GFP_KERNEL);
+       fscrypt_set_bio_crypt_ctx(bio, inode, pos, GFP_KERNEL);
        bio->bi_iter.bi_sector = iomap_sector(&iter->iomap, pos);
        bio->bi_private = dio;
        bio->bi_end_io = iomap_dio_bio_end_io;
@@ -342,8 +341,7 @@ static ssize_t iomap_dio_bio_iter_one(struct iomap_iter *iter,
                nr_vecs = bio_iov_vecs_to_alloc(dio->submit.iter, BIO_MAX_VECS);
 
        bio = iomap_dio_alloc_bio(iter, dio, nr_vecs, op);
-       fscrypt_set_bio_crypt_ctx(bio, iter->inode,
-                       pos >> iter->inode->i_blkbits, GFP_KERNEL);
+       fscrypt_set_bio_crypt_ctx(bio, iter->inode, pos, GFP_KERNEL);
        bio->bi_iter.bi_sector = iomap_sector(&iter->iomap, pos);
        bio->bi_write_hint = iter->inode->i_write_hint;
        bio->bi_ioprio = dio->iocb->ki_ioprio;
index 98fb14660d40d61d8afc27c4e59d2f2859c274ff..90f75fe0e1c9a7048686d367033becc599f2cf8b 100644 (file)
@@ -865,9 +865,8 @@ static inline void fscrypt_set_ops(struct super_block *sb,
 
 bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode);
 
-void fscrypt_set_bio_crypt_ctx(struct bio *bio,
-                              const struct inode *inode, u64 first_lblk,
-                              gfp_t gfp_mask);
+void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
+                              loff_t pos, gfp_t gfp_mask);
 
 bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode,
                           loff_t pos);
@@ -885,7 +884,7 @@ static inline bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode)
 
 static inline void fscrypt_set_bio_crypt_ctx(struct bio *bio,
                                             const struct inode *inode,
-                                            u64 first_lblk, gfp_t gfp_mask) { }
+                                            loff_t pos, gfp_t gfp_mask) { }
 
 static inline bool fscrypt_mergeable_bio(struct bio *bio,
                                         const struct inode *inode,