bio_iter_iovec(src_bio, src_bio->bi_iter);
struct page *enc_page = enc_pages[enc_idx];
+ if (!IS_ALIGNED(src_bv.bv_len | src_bv.bv_offset,
+ data_unit_size)) {
+ enc_bio->bi_status = BLK_STS_INVAL;
+ goto out_free_enc_bio;
+ }
+
__bio_add_page(enc_bio, enc_page, src_bv.bv_len,
src_bv.bv_offset);
*/
for (i = 0; i < src_bv.bv_len; i += data_unit_size) {
blk_crypto_dun_to_iv(curr_dun, &iv);
- if (crypto_skcipher_encrypt(ciph_req))
+ if (crypto_skcipher_encrypt(ciph_req)) {
+ enc_bio->bi_status = BLK_STS_IOERR;
goto out_free_enc_bio;
+ }
bio_crypt_dun_increment(curr_dun, 1);
src.offset += data_unit_size;
dst.offset += data_unit_size;
*/
for (; enc_idx < nr_enc_pages; enc_idx++)
__bio_add_page(enc_bio, enc_pages[enc_idx], PAGE_SIZE, 0);
- bio_io_error(enc_bio);
+ bio_endio(enc_bio);
}
/*
__bio_for_each_segment(bv, bio, iter, iter) {
struct page *page = bv.bv_page;
+ if (!IS_ALIGNED(bv.bv_len | bv.bv_offset, data_unit_size))
+ return BLK_STS_INVAL;
+
sg_set_page(&sg, page, data_unit_size, bv.bv_offset);
/* Decrypt each data unit in the segment */
return !bc1 || bio_crypt_dun_is_contiguous(bc1, bc1_bytes, bc2->bc_dun);
}
-/* Check that all I/O segments are data unit aligned. */
-static bool bio_crypt_check_alignment(struct bio *bio)
-{
- const unsigned int data_unit_size =
- bio->bi_crypt_context->bc_key->crypto_cfg.data_unit_size;
- struct bvec_iter iter;
- struct bio_vec bv;
-
- bio_for_each_segment(bv, bio, iter) {
- if (!IS_ALIGNED(bv.bv_len | bv.bv_offset, data_unit_size))
- return false;
- }
-
- return true;
-}
-
blk_status_t __blk_crypto_rq_get_keyslot(struct request *rq)
{
return blk_crypto_get_keyslot(rq->q->crypto_profile,
return false;
}
- if (!bio_crypt_check_alignment(bio)) {
- bio->bi_status = BLK_STS_INVAL;
- bio_endio(bio);
- return false;
- }
-
/*
* If the device does not natively support the encryption context, try to use
* the fallback if available.
int bio_split_io_at(struct bio *bio, const struct queue_limits *lim,
unsigned *segs, unsigned max_bytes, unsigned len_align_mask)
{
+ struct bio_crypt_ctx *bc = bio_crypt_ctx(bio);
struct bio_vec bv, bvprv, *bvprvp = NULL;
unsigned nsegs = 0, bytes = 0, gaps = 0;
struct bvec_iter iter;
+ unsigned start_align_mask = lim->dma_alignment;
+
+ if (bc) {
+ start_align_mask |= (bc->bc_key->crypto_cfg.data_unit_size - 1);
+ len_align_mask |= (bc->bc_key->crypto_cfg.data_unit_size - 1);
+ }
bio_for_each_bvec(bv, bio, iter) {
- if (bv.bv_offset & lim->dma_alignment ||
+ if (bv.bv_offset & start_align_mask ||
bv.bv_len & len_align_mask)
return -EINVAL;