for en/decryption. Users don't need to worry about freeing the bio_crypt_ctx
later, as that happens automatically when the bio is freed or reset.
+To submit a bio that uses inline encryption, users must call
+``blk_crypto_submit_bio()`` instead of the usual ``submit_bio()``. This will
+submit the bio to the underlying driver if it supports inline crypto, or else
+call the blk-crypto fallback routines before submitting normal bios to the
+underlying drivers.
+
Finally, when done using inline encryption with a blk_crypto_key on a
block_device, users must call ``blk_crypto_evict_key()``. This ensures that
the key is evicted from all keyslots it may be programmed into and unlinked from
/* If plug is not used, add new plug here to cache nsecs time. */
struct blk_plug plug;
- if (unlikely(!blk_crypto_bio_prep(bio)))
- return;
-
blk_start_plug(&plug);
if (!bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO)) {
if ((bio->bi_opf & REQ_NOWAIT) && !bdev_nowait(bdev))
goto not_supported;
+ if (bio_has_crypt_ctx(bio)) {
+ if (WARN_ON_ONCE(!bio_has_data(bio)))
+ goto end_io;
+ if (!blk_crypto_supported(bio))
+ goto not_supported;
+ }
+
if (should_fail_bio(bio))
goto end_io;
bio_check_ro(bio);
int blk_crypto_ioctl(struct block_device *bdev, unsigned int cmd,
void __user *argp);
+static inline bool blk_crypto_supported(struct bio *bio)
+{
+ return blk_crypto_config_supported_natively(bio->bi_bdev,
+ &bio->bi_crypt_context->bc_key->crypto_cfg);
+}
+
#else /* CONFIG_BLK_INLINE_ENCRYPTION */
static inline int blk_crypto_sysfs_register(struct gendisk *disk)
return -ENOTTY;
}
+static inline bool blk_crypto_supported(struct bio *bio)
+{
+ return false;
+}
+
#endif /* CONFIG_BLK_INLINE_ENCRYPTION */
void __bio_crypt_advance(struct bio *bio, unsigned int bytes);
#endif
}
-bool __blk_crypto_bio_prep(struct bio *bio);
-static inline bool blk_crypto_bio_prep(struct bio *bio)
-{
- if (bio_has_crypt_ctx(bio))
- return __blk_crypto_bio_prep(bio);
- return true;
-}
-
blk_status_t __blk_crypto_rq_get_keyslot(struct request *rq);
static inline blk_status_t blk_crypto_rq_get_keyslot(struct request *rq)
{
rq->crypt_ctx = NULL;
}
-/**
- * __blk_crypto_bio_prep - Prepare bio for inline encryption
- * @bio: bio to prepare
- *
- * If the bio crypt context provided for the bio is supported by the underlying
- * device's inline encryption hardware, do nothing.
- *
- * Otherwise, try to perform en/decryption for this bio by falling back to the
- * kernel crypto API. For encryption this means submitting newly allocated
- * bios for the encrypted payload while keeping back the source bio until they
- * complete, while for reads the decryption happens in-place by a hooked in
- * completion handler.
- *
- * Caller must ensure bio has bio_crypt_ctx.
+/*
+ * Process a bio with a crypto context. Returns true if the caller should
+ * submit the passed in bio, false if the bio is consumed.
*
- * Return: true if @bio should be submitted to the driver by the caller, else
- * false. Sets bio->bi_status, calls bio_endio and returns false on error.
+ * See the kerneldoc comment for blk_crypto_submit_bio for further details.
*/
-bool __blk_crypto_bio_prep(struct bio *bio)
+bool __blk_crypto_submit_bio(struct bio *bio)
{
const struct blk_crypto_key *bc_key = bio->bi_crypt_context->bc_key;
struct block_device *bdev = bio->bi_bdev;
return true;
}
+EXPORT_SYMBOL_GPL(__blk_crypto_submit_bio);
int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
gfp_t gfp_mask)
#include <linux/slab.h>
#include <linux/capability.h>
#include <linux/blkdev.h>
+#include <linux/blk-crypto.h>
#include <linux/file.h>
#include <linux/quotaops.h>
#include <linux/highmem.h>
wbc_account_cgroup_owner(wbc, bh->b_folio, bh->b_size);
}
- submit_bio(bio);
+ blk_crypto_submit_bio(bio);
}
void submit_bh(blk_opf_t opf, struct buffer_head *bh)
}
atomic_inc(&done.pending);
- submit_bio(bio);
+ blk_crypto_submit_bio(bio);
}
fscrypt_zeroout_range_done(&done);
* Written by Theodore Ts'o, 2010.
*/
+#include <linux/blk-crypto.h>
#include <linux/fs.h>
#include <linux/time.h>
#include <linux/highuid.h>
if (bio) {
if (io->io_wbc->sync_mode == WB_SYNC_ALL)
io->io_bio->bi_opf |= REQ_SYNC;
- submit_bio(io->io_bio);
+ blk_crypto_submit_bio(io->io_bio);
}
io->io_bio = NULL;
}
#include <linux/bio.h>
#include <linux/fs.h>
#include <linux/buffer_head.h>
+#include <linux/blk-crypto.h>
#include <linux/blkdev.h>
#include <linux/highmem.h>
#include <linux/prefetch.h>
if (bio && (last_block_in_bio != first_block - 1 ||
!fscrypt_mergeable_bio(bio, inode, next_block))) {
submit_and_realloc:
- submit_bio(bio);
+ blk_crypto_submit_bio(bio);
bio = NULL;
}
if (bio == NULL) {
if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
(relative_block == map.m_len)) ||
(first_hole != blocks_per_folio)) {
- submit_bio(bio);
+ blk_crypto_submit_bio(bio);
bio = NULL;
} else
last_block_in_bio = first_block + blocks_per_folio - 1;
continue;
confused:
if (bio) {
- submit_bio(bio);
+ blk_crypto_submit_bio(bio);
bio = NULL;
}
if (!folio_test_uptodate(folio))
; /* A label shall be followed by a statement until C23 */
}
if (bio)
- submit_bio(bio);
+ blk_crypto_submit_bio(bio);
return 0;
}
trace_f2fs_submit_read_bio(sbi->sb, type, bio);
iostat_update_submit_ctx(bio, type);
- submit_bio(bio);
+ blk_crypto_submit_bio(bio);
}
static void f2fs_submit_write_bio(struct f2fs_sb_info *sbi, struct bio *bio,
WARN_ON_ONCE(is_read_io(bio_op(bio)));
trace_f2fs_submit_write_bio(sbi->sb, type, bio);
iostat_update_submit_ctx(bio, type);
- submit_bio(bio);
+ blk_crypto_submit_bio(bio);
}
static void __submit_merged_bio(struct f2fs_bio_info *io)
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*/
+#include <linux/blk-crypto.h>
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/stat.h>
enum temp_type temp = f2fs_get_segment_temp(sbi, type);
bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi, DATA, temp);
- submit_bio(bio);
+ blk_crypto_submit_bio(bio);
}
static const struct iomap_dio_ops f2fs_iomap_dio_write_ops = {
* Copyright (C) 2010 Red Hat, Inc.
* Copyright (c) 2016-2025 Christoph Hellwig.
*/
+#include <linux/blk-crypto.h>
#include <linux/fscrypt.h>
#include <linux/pagemap.h>
#include <linux/iomap.h>
dio->dops->submit_io(iter, bio, pos);
} else {
WARN_ON_ONCE(iter->iomap.flags & IOMAP_F_ANON_WRITE);
- submit_bio(bio);
+ blk_crypto_submit_bio(bio);
}
}
#endif /* CONFIG_BLK_INLINE_ENCRYPTION */
+bool __blk_crypto_submit_bio(struct bio *bio);
+
+/**
+ * blk_crypto_submit_bio - Submit a bio that may have a crypto context
+ * @bio: bio to submit
+ *
+ * If @bio has no crypto context, or the crypt context attached to @bio is
+ * supported by the underlying device's inline encryption hardware, just submit
+ * @bio.
+ *
+ * Otherwise, try to perform en/decryption for this bio by falling back to the
+ * kernel crypto API. For encryption this means submitting newly allocated
+ * bios for the encrypted payload while keeping back the source bio until they
+ * complete, while for reads the decryption happens in-place by a hooked in
+ * completion handler.
+ */
+static inline void blk_crypto_submit_bio(struct bio *bio)
+{
+ if (!bio_has_crypt_ctx(bio) || __blk_crypto_submit_bio(bio))
+ submit_bio(bio);
+}
+
int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask);
/**
* bio_crypt_clone - clone bio encryption context