1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2003 Jana Saout <jana@saout.de>
4 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
5 * Copyright (C) 2006-2020 Red Hat, Inc. All rights reserved.
6 * Copyright (C) 2013-2020 Milan Broz <gmazyland@gmail.com>
8 * This file is released under the GPL.
11 #include <linux/completion.h>
12 #include <linux/err.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/key.h>
17 #include <linux/bio.h>
18 #include <linux/blkdev.h>
19 #include <linux/blk-integrity.h>
20 #include <linux/mempool.h>
21 #include <linux/slab.h>
22 #include <linux/crypto.h>
23 #include <linux/workqueue.h>
24 #include <linux/kthread.h>
25 #include <linux/backing-dev.h>
26 #include <linux/atomic.h>
27 #include <linux/scatterlist.h>
28 #include <linux/rbtree.h>
29 #include <linux/ctype.h>
31 #include <asm/unaligned.h>
32 #include <crypto/hash.h>
33 #include <crypto/md5.h>
34 #include <crypto/skcipher.h>
35 #include <crypto/aead.h>
36 #include <crypto/authenc.h>
37 #include <crypto/utils.h>
38 #include <linux/rtnetlink.h> /* for struct rtattr and RTA macros only */
39 #include <linux/key-type.h>
40 #include <keys/user-type.h>
41 #include <keys/encrypted-type.h>
42 #include <keys/trusted-type.h>
44 #include <linux/device-mapper.h>
48 #define DM_MSG_PREFIX "crypt"
51 * context holding the current state of a multi-part conversion
53 struct convert_context
{
54 struct completion restart
;
57 struct bvec_iter iter_in
;
58 struct bvec_iter iter_out
;
62 struct skcipher_request
*req
;
63 struct aead_request
*req_aead
;
69 * per bio private data
72 struct crypt_config
*cc
;
74 u8
*integrity_metadata
;
75 bool integrity_metadata_from_pool
:1;
78 struct work_struct work
;
79 struct tasklet_struct tasklet
;
81 struct convert_context ctx
;
87 struct rb_node rb_node
;
88 } CRYPTO_MINALIGN_ATTR
;
90 struct dm_crypt_request
{
91 struct convert_context
*ctx
;
92 struct scatterlist sg_in
[4];
93 struct scatterlist sg_out
[4];
99 struct crypt_iv_operations
{
100 int (*ctr
)(struct crypt_config
*cc
, struct dm_target
*ti
,
102 void (*dtr
)(struct crypt_config
*cc
);
103 int (*init
)(struct crypt_config
*cc
);
104 int (*wipe
)(struct crypt_config
*cc
);
105 int (*generator
)(struct crypt_config
*cc
, u8
*iv
,
106 struct dm_crypt_request
*dmreq
);
107 int (*post
)(struct crypt_config
*cc
, u8
*iv
,
108 struct dm_crypt_request
*dmreq
);
111 struct iv_benbi_private
{
115 #define LMK_SEED_SIZE 64 /* hash + 0 */
116 struct iv_lmk_private
{
117 struct crypto_shash
*hash_tfm
;
121 #define TCW_WHITENING_SIZE 16
122 struct iv_tcw_private
{
123 struct crypto_shash
*crc32_tfm
;
128 #define ELEPHANT_MAX_KEY_SIZE 32
129 struct iv_elephant_private
{
130 struct crypto_skcipher
*tfm
;
134 * Crypt: maps a linear range of a block device
135 * and encrypts / decrypts at the same time.
137 enum flags
{ DM_CRYPT_SUSPENDED
, DM_CRYPT_KEY_VALID
,
138 DM_CRYPT_SAME_CPU
, DM_CRYPT_NO_OFFLOAD
,
139 DM_CRYPT_NO_READ_WORKQUEUE
, DM_CRYPT_NO_WRITE_WORKQUEUE
,
140 DM_CRYPT_WRITE_INLINE
};
143 CRYPT_MODE_INTEGRITY_AEAD
, /* Use authenticated mode for cipher */
144 CRYPT_IV_LARGE_SECTORS
, /* Calculate IV from sector_size, not 512B sectors */
145 CRYPT_ENCRYPT_PREPROCESS
, /* Must preprocess data for encryption (elephant) */
149 * The fields in here must be read only after initialization.
151 struct crypt_config
{
155 struct percpu_counter n_allocated_pages
;
157 struct workqueue_struct
*io_queue
;
158 struct workqueue_struct
*crypt_queue
;
160 spinlock_t write_thread_lock
;
161 struct task_struct
*write_thread
;
162 struct rb_root write_tree
;
168 const struct crypt_iv_operations
*iv_gen_ops
;
170 struct iv_benbi_private benbi
;
171 struct iv_lmk_private lmk
;
172 struct iv_tcw_private tcw
;
173 struct iv_elephant_private elephant
;
176 unsigned int iv_size
;
177 unsigned short sector_size
;
178 unsigned char sector_shift
;
181 struct crypto_skcipher
**tfms
;
182 struct crypto_aead
**tfms_aead
;
184 unsigned int tfms_count
;
185 unsigned long cipher_flags
;
188 * Layout of each crypto request:
190 * struct skcipher_request
193 * struct dm_crypt_request
197 * The padding is added so that dm_crypt_request and the IV are
200 unsigned int dmreq_start
;
202 unsigned int per_bio_data_size
;
205 unsigned int key_size
;
206 unsigned int key_parts
; /* independent parts in key buffer */
207 unsigned int key_extra_size
; /* additional keys length */
208 unsigned int key_mac_size
; /* MAC key size for authenc(...) */
210 unsigned int integrity_tag_size
;
211 unsigned int integrity_iv_size
;
212 unsigned int on_disk_tag_size
;
215 * pool for per bio private data, crypto requests,
216 * encryption requeusts/buffer pages and integrity tags
218 unsigned int tag_pool_max_sectors
;
224 struct mutex bio_alloc_lock
;
226 u8
*authenc_key
; /* space for keys in authenc() format (if used) */
231 #define MAX_TAG_SIZE 480
232 #define POOL_ENTRY_SIZE 512
234 static DEFINE_SPINLOCK(dm_crypt_clients_lock
);
235 static unsigned int dm_crypt_clients_n
;
236 static volatile unsigned long dm_crypt_pages_per_client
;
237 #define DM_CRYPT_MEMORY_PERCENT 2
238 #define DM_CRYPT_MIN_PAGES_PER_CLIENT (BIO_MAX_VECS * 16)
240 static void crypt_endio(struct bio
*clone
);
241 static void kcryptd_queue_crypt(struct dm_crypt_io
*io
);
242 static struct scatterlist
*crypt_get_sg_data(struct crypt_config
*cc
,
243 struct scatterlist
*sg
);
245 static bool crypt_integrity_aead(struct crypt_config
*cc
);
248 * Use this to access cipher attributes that are independent of the key.
250 static struct crypto_skcipher
*any_tfm(struct crypt_config
*cc
)
252 return cc
->cipher_tfm
.tfms
[0];
255 static struct crypto_aead
*any_tfm_aead(struct crypt_config
*cc
)
257 return cc
->cipher_tfm
.tfms_aead
[0];
261 * Different IV generation algorithms:
263 * plain: the initial vector is the 32-bit little-endian version of the sector
264 * number, padded with zeros if necessary.
266 * plain64: the initial vector is the 64-bit little-endian version of the sector
267 * number, padded with zeros if necessary.
269 * plain64be: the initial vector is the 64-bit big-endian version of the sector
270 * number, padded with zeros if necessary.
272 * essiv: "encrypted sector|salt initial vector", the sector number is
273 * encrypted with the bulk cipher using a salt as key. The salt
274 * should be derived from the bulk cipher's key via hashing.
276 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
277 * (needed for LRW-32-AES and possible other narrow block modes)
279 * null: the initial vector is always zero. Provides compatibility with
280 * obsolete loop_fish2 devices. Do not use for new devices.
282 * lmk: Compatible implementation of the block chaining mode used
283 * by the Loop-AES block device encryption system
284 * designed by Jari Ruusu. See http://loop-aes.sourceforge.net/
285 * It operates on full 512 byte sectors and uses CBC
286 * with an IV derived from the sector number, the data and
287 * optionally extra IV seed.
288 * This means that after decryption the first block
289 * of sector must be tweaked according to decrypted data.
290 * Loop-AES can use three encryption schemes:
291 * version 1: is plain aes-cbc mode
292 * version 2: uses 64 multikey scheme with lmk IV generator
293 * version 3: the same as version 2 with additional IV seed
294 * (it uses 65 keys, last key is used as IV seed)
296 * tcw: Compatible implementation of the block chaining mode used
297 * by the TrueCrypt device encryption system (prior to version 4.1).
298 * For more info see: https://gitlab.com/cryptsetup/cryptsetup/wikis/TrueCryptOnDiskFormat
299 * It operates on full 512 byte sectors and uses CBC
300 * with an IV derived from initial key and the sector number.
301 * In addition, whitening value is applied on every sector, whitening
302 * is calculated from initial key, sector number and mixed using CRC32.
303 * Note that this encryption scheme is vulnerable to watermarking attacks
304 * and should be used for old compatible containers access only.
306 * eboiv: Encrypted byte-offset IV (used in Bitlocker in CBC mode)
307 * The IV is encrypted little-endian byte-offset (with the same key
308 * and cipher as the volume).
310 * elephant: The extended version of eboiv with additional Elephant diffuser
311 * used with Bitlocker CBC mode.
312 * This mode was used in older Windows systems
313 * https://download.microsoft.com/download/0/2/3/0238acaf-d3bf-4a6d-b3d6-0a0be4bbb36e/bitlockercipher200608.pdf
316 static int crypt_iv_plain_gen(struct crypt_config
*cc
, u8
*iv
,
317 struct dm_crypt_request
*dmreq
)
319 memset(iv
, 0, cc
->iv_size
);
320 *(__le32
*)iv
= cpu_to_le32(dmreq
->iv_sector
& 0xffffffff);
325 static int crypt_iv_plain64_gen(struct crypt_config
*cc
, u8
*iv
,
326 struct dm_crypt_request
*dmreq
)
328 memset(iv
, 0, cc
->iv_size
);
329 *(__le64
*)iv
= cpu_to_le64(dmreq
->iv_sector
);
334 static int crypt_iv_plain64be_gen(struct crypt_config
*cc
, u8
*iv
,
335 struct dm_crypt_request
*dmreq
)
337 memset(iv
, 0, cc
->iv_size
);
338 /* iv_size is at least of size u64; usually it is 16 bytes */
339 *(__be64
*)&iv
[cc
->iv_size
- sizeof(u64
)] = cpu_to_be64(dmreq
->iv_sector
);
344 static int crypt_iv_essiv_gen(struct crypt_config
*cc
, u8
*iv
,
345 struct dm_crypt_request
*dmreq
)
348 * ESSIV encryption of the IV is now handled by the crypto API,
349 * so just pass the plain sector number here.
351 memset(iv
, 0, cc
->iv_size
);
352 *(__le64
*)iv
= cpu_to_le64(dmreq
->iv_sector
);
357 static int crypt_iv_benbi_ctr(struct crypt_config
*cc
, struct dm_target
*ti
,
363 if (crypt_integrity_aead(cc
))
364 bs
= crypto_aead_blocksize(any_tfm_aead(cc
));
366 bs
= crypto_skcipher_blocksize(any_tfm(cc
));
370 * We need to calculate how far we must shift the sector count
371 * to get the cipher block count, we use this shift in _gen.
373 if (1 << log
!= bs
) {
374 ti
->error
= "cypher blocksize is not a power of 2";
379 ti
->error
= "cypher blocksize is > 512";
383 cc
->iv_gen_private
.benbi
.shift
= 9 - log
;
388 static void crypt_iv_benbi_dtr(struct crypt_config
*cc
)
392 static int crypt_iv_benbi_gen(struct crypt_config
*cc
, u8
*iv
,
393 struct dm_crypt_request
*dmreq
)
397 memset(iv
, 0, cc
->iv_size
- sizeof(u64
)); /* rest is cleared below */
399 val
= cpu_to_be64(((u64
)dmreq
->iv_sector
<< cc
->iv_gen_private
.benbi
.shift
) + 1);
400 put_unaligned(val
, (__be64
*)(iv
+ cc
->iv_size
- sizeof(u64
)));
405 static int crypt_iv_null_gen(struct crypt_config
*cc
, u8
*iv
,
406 struct dm_crypt_request
*dmreq
)
408 memset(iv
, 0, cc
->iv_size
);
413 static void crypt_iv_lmk_dtr(struct crypt_config
*cc
)
415 struct iv_lmk_private
*lmk
= &cc
->iv_gen_private
.lmk
;
417 if (lmk
->hash_tfm
&& !IS_ERR(lmk
->hash_tfm
))
418 crypto_free_shash(lmk
->hash_tfm
);
419 lmk
->hash_tfm
= NULL
;
421 kfree_sensitive(lmk
->seed
);
425 static int crypt_iv_lmk_ctr(struct crypt_config
*cc
, struct dm_target
*ti
,
428 struct iv_lmk_private
*lmk
= &cc
->iv_gen_private
.lmk
;
430 if (cc
->sector_size
!= (1 << SECTOR_SHIFT
)) {
431 ti
->error
= "Unsupported sector size for LMK";
435 lmk
->hash_tfm
= crypto_alloc_shash("md5", 0,
436 CRYPTO_ALG_ALLOCATES_MEMORY
);
437 if (IS_ERR(lmk
->hash_tfm
)) {
438 ti
->error
= "Error initializing LMK hash";
439 return PTR_ERR(lmk
->hash_tfm
);
442 /* No seed in LMK version 2 */
443 if (cc
->key_parts
== cc
->tfms_count
) {
448 lmk
->seed
= kzalloc(LMK_SEED_SIZE
, GFP_KERNEL
);
450 crypt_iv_lmk_dtr(cc
);
451 ti
->error
= "Error kmallocing seed storage in LMK";
458 static int crypt_iv_lmk_init(struct crypt_config
*cc
)
460 struct iv_lmk_private
*lmk
= &cc
->iv_gen_private
.lmk
;
461 int subkey_size
= cc
->key_size
/ cc
->key_parts
;
463 /* LMK seed is on the position of LMK_KEYS + 1 key */
465 memcpy(lmk
->seed
, cc
->key
+ (cc
->tfms_count
* subkey_size
),
466 crypto_shash_digestsize(lmk
->hash_tfm
));
471 static int crypt_iv_lmk_wipe(struct crypt_config
*cc
)
473 struct iv_lmk_private
*lmk
= &cc
->iv_gen_private
.lmk
;
476 memset(lmk
->seed
, 0, LMK_SEED_SIZE
);
481 static int crypt_iv_lmk_one(struct crypt_config
*cc
, u8
*iv
,
482 struct dm_crypt_request
*dmreq
,
485 struct iv_lmk_private
*lmk
= &cc
->iv_gen_private
.lmk
;
486 SHASH_DESC_ON_STACK(desc
, lmk
->hash_tfm
);
487 struct md5_state md5state
;
491 desc
->tfm
= lmk
->hash_tfm
;
493 r
= crypto_shash_init(desc
);
498 r
= crypto_shash_update(desc
, lmk
->seed
, LMK_SEED_SIZE
);
503 /* Sector is always 512B, block size 16, add data of blocks 1-31 */
504 r
= crypto_shash_update(desc
, data
+ 16, 16 * 31);
508 /* Sector is cropped to 56 bits here */
509 buf
[0] = cpu_to_le32(dmreq
->iv_sector
& 0xFFFFFFFF);
510 buf
[1] = cpu_to_le32((((u64
)dmreq
->iv_sector
>> 32) & 0x00FFFFFF) | 0x80000000);
511 buf
[2] = cpu_to_le32(4024);
513 r
= crypto_shash_update(desc
, (u8
*)buf
, sizeof(buf
));
517 /* No MD5 padding here */
518 r
= crypto_shash_export(desc
, &md5state
);
522 for (i
= 0; i
< MD5_HASH_WORDS
; i
++)
523 __cpu_to_le32s(&md5state
.hash
[i
]);
524 memcpy(iv
, &md5state
.hash
, cc
->iv_size
);
529 static int crypt_iv_lmk_gen(struct crypt_config
*cc
, u8
*iv
,
530 struct dm_crypt_request
*dmreq
)
532 struct scatterlist
*sg
;
536 if (bio_data_dir(dmreq
->ctx
->bio_in
) == WRITE
) {
537 sg
= crypt_get_sg_data(cc
, dmreq
->sg_in
);
538 src
= kmap_local_page(sg_page(sg
));
539 r
= crypt_iv_lmk_one(cc
, iv
, dmreq
, src
+ sg
->offset
);
542 memset(iv
, 0, cc
->iv_size
);
547 static int crypt_iv_lmk_post(struct crypt_config
*cc
, u8
*iv
,
548 struct dm_crypt_request
*dmreq
)
550 struct scatterlist
*sg
;
554 if (bio_data_dir(dmreq
->ctx
->bio_in
) == WRITE
)
557 sg
= crypt_get_sg_data(cc
, dmreq
->sg_out
);
558 dst
= kmap_local_page(sg_page(sg
));
559 r
= crypt_iv_lmk_one(cc
, iv
, dmreq
, dst
+ sg
->offset
);
561 /* Tweak the first block of plaintext sector */
563 crypto_xor(dst
+ sg
->offset
, iv
, cc
->iv_size
);
569 static void crypt_iv_tcw_dtr(struct crypt_config
*cc
)
571 struct iv_tcw_private
*tcw
= &cc
->iv_gen_private
.tcw
;
573 kfree_sensitive(tcw
->iv_seed
);
575 kfree_sensitive(tcw
->whitening
);
576 tcw
->whitening
= NULL
;
578 if (tcw
->crc32_tfm
&& !IS_ERR(tcw
->crc32_tfm
))
579 crypto_free_shash(tcw
->crc32_tfm
);
580 tcw
->crc32_tfm
= NULL
;
583 static int crypt_iv_tcw_ctr(struct crypt_config
*cc
, struct dm_target
*ti
,
586 struct iv_tcw_private
*tcw
= &cc
->iv_gen_private
.tcw
;
588 if (cc
->sector_size
!= (1 << SECTOR_SHIFT
)) {
589 ti
->error
= "Unsupported sector size for TCW";
593 if (cc
->key_size
<= (cc
->iv_size
+ TCW_WHITENING_SIZE
)) {
594 ti
->error
= "Wrong key size for TCW";
598 tcw
->crc32_tfm
= crypto_alloc_shash("crc32", 0,
599 CRYPTO_ALG_ALLOCATES_MEMORY
);
600 if (IS_ERR(tcw
->crc32_tfm
)) {
601 ti
->error
= "Error initializing CRC32 in TCW";
602 return PTR_ERR(tcw
->crc32_tfm
);
605 tcw
->iv_seed
= kzalloc(cc
->iv_size
, GFP_KERNEL
);
606 tcw
->whitening
= kzalloc(TCW_WHITENING_SIZE
, GFP_KERNEL
);
607 if (!tcw
->iv_seed
|| !tcw
->whitening
) {
608 crypt_iv_tcw_dtr(cc
);
609 ti
->error
= "Error allocating seed storage in TCW";
616 static int crypt_iv_tcw_init(struct crypt_config
*cc
)
618 struct iv_tcw_private
*tcw
= &cc
->iv_gen_private
.tcw
;
619 int key_offset
= cc
->key_size
- cc
->iv_size
- TCW_WHITENING_SIZE
;
621 memcpy(tcw
->iv_seed
, &cc
->key
[key_offset
], cc
->iv_size
);
622 memcpy(tcw
->whitening
, &cc
->key
[key_offset
+ cc
->iv_size
],
628 static int crypt_iv_tcw_wipe(struct crypt_config
*cc
)
630 struct iv_tcw_private
*tcw
= &cc
->iv_gen_private
.tcw
;
632 memset(tcw
->iv_seed
, 0, cc
->iv_size
);
633 memset(tcw
->whitening
, 0, TCW_WHITENING_SIZE
);
638 static int crypt_iv_tcw_whitening(struct crypt_config
*cc
,
639 struct dm_crypt_request
*dmreq
,
642 struct iv_tcw_private
*tcw
= &cc
->iv_gen_private
.tcw
;
643 __le64 sector
= cpu_to_le64(dmreq
->iv_sector
);
644 u8 buf
[TCW_WHITENING_SIZE
];
645 SHASH_DESC_ON_STACK(desc
, tcw
->crc32_tfm
);
648 /* xor whitening with sector number */
649 crypto_xor_cpy(buf
, tcw
->whitening
, (u8
*)§or
, 8);
650 crypto_xor_cpy(&buf
[8], tcw
->whitening
+ 8, (u8
*)§or
, 8);
652 /* calculate crc32 for every 32bit part and xor it */
653 desc
->tfm
= tcw
->crc32_tfm
;
654 for (i
= 0; i
< 4; i
++) {
655 r
= crypto_shash_digest(desc
, &buf
[i
* 4], 4, &buf
[i
* 4]);
659 crypto_xor(&buf
[0], &buf
[12], 4);
660 crypto_xor(&buf
[4], &buf
[8], 4);
662 /* apply whitening (8 bytes) to whole sector */
663 for (i
= 0; i
< ((1 << SECTOR_SHIFT
) / 8); i
++)
664 crypto_xor(data
+ i
* 8, buf
, 8);
666 memzero_explicit(buf
, sizeof(buf
));
670 static int crypt_iv_tcw_gen(struct crypt_config
*cc
, u8
*iv
,
671 struct dm_crypt_request
*dmreq
)
673 struct scatterlist
*sg
;
674 struct iv_tcw_private
*tcw
= &cc
->iv_gen_private
.tcw
;
675 __le64 sector
= cpu_to_le64(dmreq
->iv_sector
);
679 /* Remove whitening from ciphertext */
680 if (bio_data_dir(dmreq
->ctx
->bio_in
) != WRITE
) {
681 sg
= crypt_get_sg_data(cc
, dmreq
->sg_in
);
682 src
= kmap_local_page(sg_page(sg
));
683 r
= crypt_iv_tcw_whitening(cc
, dmreq
, src
+ sg
->offset
);
688 crypto_xor_cpy(iv
, tcw
->iv_seed
, (u8
*)§or
, 8);
690 crypto_xor_cpy(&iv
[8], tcw
->iv_seed
+ 8, (u8
*)§or
,
696 static int crypt_iv_tcw_post(struct crypt_config
*cc
, u8
*iv
,
697 struct dm_crypt_request
*dmreq
)
699 struct scatterlist
*sg
;
703 if (bio_data_dir(dmreq
->ctx
->bio_in
) != WRITE
)
706 /* Apply whitening on ciphertext */
707 sg
= crypt_get_sg_data(cc
, dmreq
->sg_out
);
708 dst
= kmap_local_page(sg_page(sg
));
709 r
= crypt_iv_tcw_whitening(cc
, dmreq
, dst
+ sg
->offset
);
715 static int crypt_iv_random_gen(struct crypt_config
*cc
, u8
*iv
,
716 struct dm_crypt_request
*dmreq
)
718 /* Used only for writes, there must be an additional space to store IV */
719 get_random_bytes(iv
, cc
->iv_size
);
723 static int crypt_iv_eboiv_ctr(struct crypt_config
*cc
, struct dm_target
*ti
,
726 if (crypt_integrity_aead(cc
)) {
727 ti
->error
= "AEAD transforms not supported for EBOIV";
731 if (crypto_skcipher_blocksize(any_tfm(cc
)) != cc
->iv_size
) {
732 ti
->error
= "Block size of EBOIV cipher does not match IV size of block cipher";
739 static int crypt_iv_eboiv_gen(struct crypt_config
*cc
, u8
*iv
,
740 struct dm_crypt_request
*dmreq
)
742 struct crypto_skcipher
*tfm
= any_tfm(cc
);
743 struct skcipher_request
*req
;
744 struct scatterlist src
, dst
;
745 DECLARE_CRYPTO_WAIT(wait
);
746 unsigned int reqsize
;
750 reqsize
= ALIGN(crypto_skcipher_reqsize(tfm
), __alignof__(__le64
));
752 req
= kmalloc(reqsize
+ cc
->iv_size
, GFP_NOIO
);
756 skcipher_request_set_tfm(req
, tfm
);
758 buf
= (u8
*)req
+ reqsize
;
759 memset(buf
, 0, cc
->iv_size
);
760 *(__le64
*)buf
= cpu_to_le64(dmreq
->iv_sector
* cc
->sector_size
);
762 sg_init_one(&src
, page_address(ZERO_PAGE(0)), cc
->iv_size
);
763 sg_init_one(&dst
, iv
, cc
->iv_size
);
764 skcipher_request_set_crypt(req
, &src
, &dst
, cc
->iv_size
, buf
);
765 skcipher_request_set_callback(req
, 0, crypto_req_done
, &wait
);
766 err
= crypto_wait_req(crypto_skcipher_encrypt(req
), &wait
);
767 kfree_sensitive(req
);
772 static void crypt_iv_elephant_dtr(struct crypt_config
*cc
)
774 struct iv_elephant_private
*elephant
= &cc
->iv_gen_private
.elephant
;
776 crypto_free_skcipher(elephant
->tfm
);
777 elephant
->tfm
= NULL
;
780 static int crypt_iv_elephant_ctr(struct crypt_config
*cc
, struct dm_target
*ti
,
783 struct iv_elephant_private
*elephant
= &cc
->iv_gen_private
.elephant
;
786 elephant
->tfm
= crypto_alloc_skcipher("ecb(aes)", 0,
787 CRYPTO_ALG_ALLOCATES_MEMORY
);
788 if (IS_ERR(elephant
->tfm
)) {
789 r
= PTR_ERR(elephant
->tfm
);
790 elephant
->tfm
= NULL
;
794 r
= crypt_iv_eboiv_ctr(cc
, ti
, NULL
);
796 crypt_iv_elephant_dtr(cc
);
800 static void diffuser_disk_to_cpu(u32
*d
, size_t n
)
802 #ifndef __LITTLE_ENDIAN
805 for (i
= 0; i
< n
; i
++)
806 d
[i
] = le32_to_cpu((__le32
)d
[i
]);
810 static void diffuser_cpu_to_disk(__le32
*d
, size_t n
)
812 #ifndef __LITTLE_ENDIAN
815 for (i
= 0; i
< n
; i
++)
816 d
[i
] = cpu_to_le32((u32
)d
[i
]);
820 static void diffuser_a_decrypt(u32
*d
, size_t n
)
824 for (i
= 0; i
< 5; i
++) {
829 while (i1
< (n
- 1)) {
830 d
[i1
] += d
[i2
] ^ (d
[i3
] << 9 | d
[i3
] >> 23);
836 d
[i1
] += d
[i2
] ^ d
[i3
];
842 d
[i1
] += d
[i2
] ^ (d
[i3
] << 13 | d
[i3
] >> 19);
845 d
[i1
] += d
[i2
] ^ d
[i3
];
851 static void diffuser_a_encrypt(u32
*d
, size_t n
)
855 for (i
= 0; i
< 5; i
++) {
861 d
[i1
] -= d
[i2
] ^ d
[i3
];
864 d
[i1
] -= d
[i2
] ^ (d
[i3
] << 13 | d
[i3
] >> 19);
870 d
[i1
] -= d
[i2
] ^ d
[i3
];
876 d
[i1
] -= d
[i2
] ^ (d
[i3
] << 9 | d
[i3
] >> 23);
882 static void diffuser_b_decrypt(u32
*d
, size_t n
)
886 for (i
= 0; i
< 3; i
++) {
891 while (i1
< (n
- 1)) {
892 d
[i1
] += d
[i2
] ^ d
[i3
];
895 d
[i1
] += d
[i2
] ^ (d
[i3
] << 10 | d
[i3
] >> 22);
901 d
[i1
] += d
[i2
] ^ d
[i3
];
907 d
[i1
] += d
[i2
] ^ (d
[i3
] << 25 | d
[i3
] >> 7);
913 static void diffuser_b_encrypt(u32
*d
, size_t n
)
917 for (i
= 0; i
< 3; i
++) {
923 d
[i1
] -= d
[i2
] ^ (d
[i3
] << 25 | d
[i3
] >> 7);
929 d
[i1
] -= d
[i2
] ^ d
[i3
];
935 d
[i1
] -= d
[i2
] ^ (d
[i3
] << 10 | d
[i3
] >> 22);
938 d
[i1
] -= d
[i2
] ^ d
[i3
];
944 static int crypt_iv_elephant(struct crypt_config
*cc
, struct dm_crypt_request
*dmreq
)
946 struct iv_elephant_private
*elephant
= &cc
->iv_gen_private
.elephant
;
947 u8
*es
, *ks
, *data
, *data2
, *data_offset
;
948 struct skcipher_request
*req
;
949 struct scatterlist
*sg
, *sg2
, src
, dst
;
950 DECLARE_CRYPTO_WAIT(wait
);
953 req
= skcipher_request_alloc(elephant
->tfm
, GFP_NOIO
);
954 es
= kzalloc(16, GFP_NOIO
); /* Key for AES */
955 ks
= kzalloc(32, GFP_NOIO
); /* Elephant sector key */
957 if (!req
|| !es
|| !ks
) {
962 *(__le64
*)es
= cpu_to_le64(dmreq
->iv_sector
* cc
->sector_size
);
965 sg_init_one(&src
, es
, 16);
966 sg_init_one(&dst
, ks
, 16);
967 skcipher_request_set_crypt(req
, &src
, &dst
, 16, NULL
);
968 skcipher_request_set_callback(req
, 0, crypto_req_done
, &wait
);
969 r
= crypto_wait_req(crypto_skcipher_encrypt(req
), &wait
);
975 sg_init_one(&dst
, &ks
[16], 16);
976 r
= crypto_wait_req(crypto_skcipher_encrypt(req
), &wait
);
980 sg
= crypt_get_sg_data(cc
, dmreq
->sg_out
);
981 data
= kmap_local_page(sg_page(sg
));
982 data_offset
= data
+ sg
->offset
;
984 /* Cannot modify original bio, copy to sg_out and apply Elephant to it */
985 if (bio_data_dir(dmreq
->ctx
->bio_in
) == WRITE
) {
986 sg2
= crypt_get_sg_data(cc
, dmreq
->sg_in
);
987 data2
= kmap_local_page(sg_page(sg2
));
988 memcpy(data_offset
, data2
+ sg2
->offset
, cc
->sector_size
);
992 if (bio_data_dir(dmreq
->ctx
->bio_in
) != WRITE
) {
993 diffuser_disk_to_cpu((u32
*)data_offset
, cc
->sector_size
/ sizeof(u32
));
994 diffuser_b_decrypt((u32
*)data_offset
, cc
->sector_size
/ sizeof(u32
));
995 diffuser_a_decrypt((u32
*)data_offset
, cc
->sector_size
/ sizeof(u32
));
996 diffuser_cpu_to_disk((__le32
*)data_offset
, cc
->sector_size
/ sizeof(u32
));
999 for (i
= 0; i
< (cc
->sector_size
/ 32); i
++)
1000 crypto_xor(data_offset
+ i
* 32, ks
, 32);
1002 if (bio_data_dir(dmreq
->ctx
->bio_in
) == WRITE
) {
1003 diffuser_disk_to_cpu((u32
*)data_offset
, cc
->sector_size
/ sizeof(u32
));
1004 diffuser_a_encrypt((u32
*)data_offset
, cc
->sector_size
/ sizeof(u32
));
1005 diffuser_b_encrypt((u32
*)data_offset
, cc
->sector_size
/ sizeof(u32
));
1006 diffuser_cpu_to_disk((__le32
*)data_offset
, cc
->sector_size
/ sizeof(u32
));
1011 kfree_sensitive(ks
);
1012 kfree_sensitive(es
);
1013 skcipher_request_free(req
);
1017 static int crypt_iv_elephant_gen(struct crypt_config
*cc
, u8
*iv
,
1018 struct dm_crypt_request
*dmreq
)
1022 if (bio_data_dir(dmreq
->ctx
->bio_in
) == WRITE
) {
1023 r
= crypt_iv_elephant(cc
, dmreq
);
1028 return crypt_iv_eboiv_gen(cc
, iv
, dmreq
);
1031 static int crypt_iv_elephant_post(struct crypt_config
*cc
, u8
*iv
,
1032 struct dm_crypt_request
*dmreq
)
1034 if (bio_data_dir(dmreq
->ctx
->bio_in
) != WRITE
)
1035 return crypt_iv_elephant(cc
, dmreq
);
1040 static int crypt_iv_elephant_init(struct crypt_config
*cc
)
1042 struct iv_elephant_private
*elephant
= &cc
->iv_gen_private
.elephant
;
1043 int key_offset
= cc
->key_size
- cc
->key_extra_size
;
1045 return crypto_skcipher_setkey(elephant
->tfm
, &cc
->key
[key_offset
], cc
->key_extra_size
);
1048 static int crypt_iv_elephant_wipe(struct crypt_config
*cc
)
1050 struct iv_elephant_private
*elephant
= &cc
->iv_gen_private
.elephant
;
1051 u8 key
[ELEPHANT_MAX_KEY_SIZE
];
1053 memset(key
, 0, cc
->key_extra_size
);
1054 return crypto_skcipher_setkey(elephant
->tfm
, key
, cc
->key_extra_size
);
1057 static const struct crypt_iv_operations crypt_iv_plain_ops
= {
1058 .generator
= crypt_iv_plain_gen
1061 static const struct crypt_iv_operations crypt_iv_plain64_ops
= {
1062 .generator
= crypt_iv_plain64_gen
1065 static const struct crypt_iv_operations crypt_iv_plain64be_ops
= {
1066 .generator
= crypt_iv_plain64be_gen
1069 static const struct crypt_iv_operations crypt_iv_essiv_ops
= {
1070 .generator
= crypt_iv_essiv_gen
1073 static const struct crypt_iv_operations crypt_iv_benbi_ops
= {
1074 .ctr
= crypt_iv_benbi_ctr
,
1075 .dtr
= crypt_iv_benbi_dtr
,
1076 .generator
= crypt_iv_benbi_gen
1079 static const struct crypt_iv_operations crypt_iv_null_ops
= {
1080 .generator
= crypt_iv_null_gen
1083 static const struct crypt_iv_operations crypt_iv_lmk_ops
= {
1084 .ctr
= crypt_iv_lmk_ctr
,
1085 .dtr
= crypt_iv_lmk_dtr
,
1086 .init
= crypt_iv_lmk_init
,
1087 .wipe
= crypt_iv_lmk_wipe
,
1088 .generator
= crypt_iv_lmk_gen
,
1089 .post
= crypt_iv_lmk_post
1092 static const struct crypt_iv_operations crypt_iv_tcw_ops
= {
1093 .ctr
= crypt_iv_tcw_ctr
,
1094 .dtr
= crypt_iv_tcw_dtr
,
1095 .init
= crypt_iv_tcw_init
,
1096 .wipe
= crypt_iv_tcw_wipe
,
1097 .generator
= crypt_iv_tcw_gen
,
1098 .post
= crypt_iv_tcw_post
1101 static const struct crypt_iv_operations crypt_iv_random_ops
= {
1102 .generator
= crypt_iv_random_gen
1105 static const struct crypt_iv_operations crypt_iv_eboiv_ops
= {
1106 .ctr
= crypt_iv_eboiv_ctr
,
1107 .generator
= crypt_iv_eboiv_gen
1110 static const struct crypt_iv_operations crypt_iv_elephant_ops
= {
1111 .ctr
= crypt_iv_elephant_ctr
,
1112 .dtr
= crypt_iv_elephant_dtr
,
1113 .init
= crypt_iv_elephant_init
,
1114 .wipe
= crypt_iv_elephant_wipe
,
1115 .generator
= crypt_iv_elephant_gen
,
1116 .post
= crypt_iv_elephant_post
1120 * Integrity extensions
1122 static bool crypt_integrity_aead(struct crypt_config
*cc
)
1124 return test_bit(CRYPT_MODE_INTEGRITY_AEAD
, &cc
->cipher_flags
);
1127 static bool crypt_integrity_hmac(struct crypt_config
*cc
)
1129 return crypt_integrity_aead(cc
) && cc
->key_mac_size
;
1132 /* Get sg containing data */
1133 static struct scatterlist
*crypt_get_sg_data(struct crypt_config
*cc
,
1134 struct scatterlist
*sg
)
1136 if (unlikely(crypt_integrity_aead(cc
)))
1142 static int dm_crypt_integrity_io_alloc(struct dm_crypt_io
*io
, struct bio
*bio
)
1144 struct bio_integrity_payload
*bip
;
1145 unsigned int tag_len
;
1148 if (!bio_sectors(bio
) || !io
->cc
->on_disk_tag_size
)
1151 bip
= bio_integrity_alloc(bio
, GFP_NOIO
, 1);
1153 return PTR_ERR(bip
);
1155 tag_len
= io
->cc
->on_disk_tag_size
* (bio_sectors(bio
) >> io
->cc
->sector_shift
);
1157 bip
->bip_iter
.bi_sector
= io
->cc
->start
+ io
->sector
;
1159 ret
= bio_integrity_add_page(bio
, virt_to_page(io
->integrity_metadata
),
1160 tag_len
, offset_in_page(io
->integrity_metadata
));
1161 if (unlikely(ret
!= tag_len
))
1167 static int crypt_integrity_ctr(struct crypt_config
*cc
, struct dm_target
*ti
)
1169 #ifdef CONFIG_BLK_DEV_INTEGRITY
1170 struct blk_integrity
*bi
= blk_get_integrity(cc
->dev
->bdev
->bd_disk
);
1171 struct mapped_device
*md
= dm_table_get_md(ti
->table
);
1173 /* From now we require underlying device with our integrity profile */
1174 if (!bi
|| strcasecmp(bi
->profile
->name
, "DM-DIF-EXT-TAG")) {
1175 ti
->error
= "Integrity profile not supported.";
1179 if (bi
->tag_size
!= cc
->on_disk_tag_size
||
1180 bi
->tuple_size
!= cc
->on_disk_tag_size
) {
1181 ti
->error
= "Integrity profile tag size mismatch.";
1184 if (1 << bi
->interval_exp
!= cc
->sector_size
) {
1185 ti
->error
= "Integrity profile sector size mismatch.";
1189 if (crypt_integrity_aead(cc
)) {
1190 cc
->integrity_tag_size
= cc
->on_disk_tag_size
- cc
->integrity_iv_size
;
1191 DMDEBUG("%s: Integrity AEAD, tag size %u, IV size %u.", dm_device_name(md
),
1192 cc
->integrity_tag_size
, cc
->integrity_iv_size
);
1194 if (crypto_aead_setauthsize(any_tfm_aead(cc
), cc
->integrity_tag_size
)) {
1195 ti
->error
= "Integrity AEAD auth tag size is not supported.";
1198 } else if (cc
->integrity_iv_size
)
1199 DMDEBUG("%s: Additional per-sector space %u bytes for IV.", dm_device_name(md
),
1200 cc
->integrity_iv_size
);
1202 if ((cc
->integrity_tag_size
+ cc
->integrity_iv_size
) != bi
->tag_size
) {
1203 ti
->error
= "Not enough space for integrity tag in the profile.";
1209 ti
->error
= "Integrity profile not supported.";
1214 static void crypt_convert_init(struct crypt_config
*cc
,
1215 struct convert_context
*ctx
,
1216 struct bio
*bio_out
, struct bio
*bio_in
,
1219 ctx
->bio_in
= bio_in
;
1220 ctx
->bio_out
= bio_out
;
1222 ctx
->iter_in
= bio_in
->bi_iter
;
1224 ctx
->iter_out
= bio_out
->bi_iter
;
1225 ctx
->cc_sector
= sector
+ cc
->iv_offset
;
1226 init_completion(&ctx
->restart
);
1229 static struct dm_crypt_request
*dmreq_of_req(struct crypt_config
*cc
,
1232 return (struct dm_crypt_request
*)((char *)req
+ cc
->dmreq_start
);
1235 static void *req_of_dmreq(struct crypt_config
*cc
, struct dm_crypt_request
*dmreq
)
1237 return (void *)((char *)dmreq
- cc
->dmreq_start
);
1240 static u8
*iv_of_dmreq(struct crypt_config
*cc
,
1241 struct dm_crypt_request
*dmreq
)
1243 if (crypt_integrity_aead(cc
))
1244 return (u8
*)ALIGN((unsigned long)(dmreq
+ 1),
1245 crypto_aead_alignmask(any_tfm_aead(cc
)) + 1);
1247 return (u8
*)ALIGN((unsigned long)(dmreq
+ 1),
1248 crypto_skcipher_alignmask(any_tfm(cc
)) + 1);
1251 static u8
*org_iv_of_dmreq(struct crypt_config
*cc
,
1252 struct dm_crypt_request
*dmreq
)
1254 return iv_of_dmreq(cc
, dmreq
) + cc
->iv_size
;
1257 static __le64
*org_sector_of_dmreq(struct crypt_config
*cc
,
1258 struct dm_crypt_request
*dmreq
)
1260 u8
*ptr
= iv_of_dmreq(cc
, dmreq
) + cc
->iv_size
+ cc
->iv_size
;
1262 return (__le64
*) ptr
;
1265 static unsigned int *org_tag_of_dmreq(struct crypt_config
*cc
,
1266 struct dm_crypt_request
*dmreq
)
1268 u8
*ptr
= iv_of_dmreq(cc
, dmreq
) + cc
->iv_size
+
1269 cc
->iv_size
+ sizeof(uint64_t);
1271 return (unsigned int *)ptr
;
1274 static void *tag_from_dmreq(struct crypt_config
*cc
,
1275 struct dm_crypt_request
*dmreq
)
1277 struct convert_context
*ctx
= dmreq
->ctx
;
1278 struct dm_crypt_io
*io
= container_of(ctx
, struct dm_crypt_io
, ctx
);
1280 return &io
->integrity_metadata
[*org_tag_of_dmreq(cc
, dmreq
) *
1281 cc
->on_disk_tag_size
];
1284 static void *iv_tag_from_dmreq(struct crypt_config
*cc
,
1285 struct dm_crypt_request
*dmreq
)
1287 return tag_from_dmreq(cc
, dmreq
) + cc
->integrity_tag_size
;
1290 static int crypt_convert_block_aead(struct crypt_config
*cc
,
1291 struct convert_context
*ctx
,
1292 struct aead_request
*req
,
1293 unsigned int tag_offset
)
1295 struct bio_vec bv_in
= bio_iter_iovec(ctx
->bio_in
, ctx
->iter_in
);
1296 struct bio_vec bv_out
= bio_iter_iovec(ctx
->bio_out
, ctx
->iter_out
);
1297 struct dm_crypt_request
*dmreq
;
1298 u8
*iv
, *org_iv
, *tag_iv
, *tag
;
1302 BUG_ON(cc
->integrity_iv_size
&& cc
->integrity_iv_size
!= cc
->iv_size
);
1304 /* Reject unexpected unaligned bio. */
1305 if (unlikely(bv_in
.bv_len
& (cc
->sector_size
- 1)))
1308 dmreq
= dmreq_of_req(cc
, req
);
1309 dmreq
->iv_sector
= ctx
->cc_sector
;
1310 if (test_bit(CRYPT_IV_LARGE_SECTORS
, &cc
->cipher_flags
))
1311 dmreq
->iv_sector
>>= cc
->sector_shift
;
1314 *org_tag_of_dmreq(cc
, dmreq
) = tag_offset
;
1316 sector
= org_sector_of_dmreq(cc
, dmreq
);
1317 *sector
= cpu_to_le64(ctx
->cc_sector
- cc
->iv_offset
);
1319 iv
= iv_of_dmreq(cc
, dmreq
);
1320 org_iv
= org_iv_of_dmreq(cc
, dmreq
);
1321 tag
= tag_from_dmreq(cc
, dmreq
);
1322 tag_iv
= iv_tag_from_dmreq(cc
, dmreq
);
1325 * |----- AAD -------|------ DATA -------|-- AUTH TAG --|
1326 * | (authenticated) | (auth+encryption) | |
1327 * | sector_LE | IV | sector in/out | tag in/out |
1329 sg_init_table(dmreq
->sg_in
, 4);
1330 sg_set_buf(&dmreq
->sg_in
[0], sector
, sizeof(uint64_t));
1331 sg_set_buf(&dmreq
->sg_in
[1], org_iv
, cc
->iv_size
);
1332 sg_set_page(&dmreq
->sg_in
[2], bv_in
.bv_page
, cc
->sector_size
, bv_in
.bv_offset
);
1333 sg_set_buf(&dmreq
->sg_in
[3], tag
, cc
->integrity_tag_size
);
1335 sg_init_table(dmreq
->sg_out
, 4);
1336 sg_set_buf(&dmreq
->sg_out
[0], sector
, sizeof(uint64_t));
1337 sg_set_buf(&dmreq
->sg_out
[1], org_iv
, cc
->iv_size
);
1338 sg_set_page(&dmreq
->sg_out
[2], bv_out
.bv_page
, cc
->sector_size
, bv_out
.bv_offset
);
1339 sg_set_buf(&dmreq
->sg_out
[3], tag
, cc
->integrity_tag_size
);
1341 if (cc
->iv_gen_ops
) {
1342 /* For READs use IV stored in integrity metadata */
1343 if (cc
->integrity_iv_size
&& bio_data_dir(ctx
->bio_in
) != WRITE
) {
1344 memcpy(org_iv
, tag_iv
, cc
->iv_size
);
1346 r
= cc
->iv_gen_ops
->generator(cc
, org_iv
, dmreq
);
1349 /* Store generated IV in integrity metadata */
1350 if (cc
->integrity_iv_size
)
1351 memcpy(tag_iv
, org_iv
, cc
->iv_size
);
1353 /* Working copy of IV, to be modified in crypto API */
1354 memcpy(iv
, org_iv
, cc
->iv_size
);
1357 aead_request_set_ad(req
, sizeof(uint64_t) + cc
->iv_size
);
1358 if (bio_data_dir(ctx
->bio_in
) == WRITE
) {
1359 aead_request_set_crypt(req
, dmreq
->sg_in
, dmreq
->sg_out
,
1360 cc
->sector_size
, iv
);
1361 r
= crypto_aead_encrypt(req
);
1362 if (cc
->integrity_tag_size
+ cc
->integrity_iv_size
!= cc
->on_disk_tag_size
)
1363 memset(tag
+ cc
->integrity_tag_size
+ cc
->integrity_iv_size
, 0,
1364 cc
->on_disk_tag_size
- (cc
->integrity_tag_size
+ cc
->integrity_iv_size
));
1366 aead_request_set_crypt(req
, dmreq
->sg_in
, dmreq
->sg_out
,
1367 cc
->sector_size
+ cc
->integrity_tag_size
, iv
);
1368 r
= crypto_aead_decrypt(req
);
1371 if (r
== -EBADMSG
) {
1372 sector_t s
= le64_to_cpu(*sector
);
1374 DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
1375 ctx
->bio_in
->bi_bdev
, s
);
1376 dm_audit_log_bio(DM_MSG_PREFIX
, "integrity-aead",
1380 if (!r
&& cc
->iv_gen_ops
&& cc
->iv_gen_ops
->post
)
1381 r
= cc
->iv_gen_ops
->post(cc
, org_iv
, dmreq
);
1383 bio_advance_iter(ctx
->bio_in
, &ctx
->iter_in
, cc
->sector_size
);
1384 bio_advance_iter(ctx
->bio_out
, &ctx
->iter_out
, cc
->sector_size
);
1389 static int crypt_convert_block_skcipher(struct crypt_config
*cc
,
1390 struct convert_context
*ctx
,
1391 struct skcipher_request
*req
,
1392 unsigned int tag_offset
)
1394 struct bio_vec bv_in
= bio_iter_iovec(ctx
->bio_in
, ctx
->iter_in
);
1395 struct bio_vec bv_out
= bio_iter_iovec(ctx
->bio_out
, ctx
->iter_out
);
1396 struct scatterlist
*sg_in
, *sg_out
;
1397 struct dm_crypt_request
*dmreq
;
1398 u8
*iv
, *org_iv
, *tag_iv
;
1402 /* Reject unexpected unaligned bio. */
1403 if (unlikely(bv_in
.bv_len
& (cc
->sector_size
- 1)))
1406 dmreq
= dmreq_of_req(cc
, req
);
1407 dmreq
->iv_sector
= ctx
->cc_sector
;
1408 if (test_bit(CRYPT_IV_LARGE_SECTORS
, &cc
->cipher_flags
))
1409 dmreq
->iv_sector
>>= cc
->sector_shift
;
1412 *org_tag_of_dmreq(cc
, dmreq
) = tag_offset
;
1414 iv
= iv_of_dmreq(cc
, dmreq
);
1415 org_iv
= org_iv_of_dmreq(cc
, dmreq
);
1416 tag_iv
= iv_tag_from_dmreq(cc
, dmreq
);
1418 sector
= org_sector_of_dmreq(cc
, dmreq
);
1419 *sector
= cpu_to_le64(ctx
->cc_sector
- cc
->iv_offset
);
1421 /* For skcipher we use only the first sg item */
1422 sg_in
= &dmreq
->sg_in
[0];
1423 sg_out
= &dmreq
->sg_out
[0];
1425 sg_init_table(sg_in
, 1);
1426 sg_set_page(sg_in
, bv_in
.bv_page
, cc
->sector_size
, bv_in
.bv_offset
);
1428 sg_init_table(sg_out
, 1);
1429 sg_set_page(sg_out
, bv_out
.bv_page
, cc
->sector_size
, bv_out
.bv_offset
);
1431 if (cc
->iv_gen_ops
) {
1432 /* For READs use IV stored in integrity metadata */
1433 if (cc
->integrity_iv_size
&& bio_data_dir(ctx
->bio_in
) != WRITE
) {
1434 memcpy(org_iv
, tag_iv
, cc
->integrity_iv_size
);
1436 r
= cc
->iv_gen_ops
->generator(cc
, org_iv
, dmreq
);
1439 /* Data can be already preprocessed in generator */
1440 if (test_bit(CRYPT_ENCRYPT_PREPROCESS
, &cc
->cipher_flags
))
1442 /* Store generated IV in integrity metadata */
1443 if (cc
->integrity_iv_size
)
1444 memcpy(tag_iv
, org_iv
, cc
->integrity_iv_size
);
1446 /* Working copy of IV, to be modified in crypto API */
1447 memcpy(iv
, org_iv
, cc
->iv_size
);
1450 skcipher_request_set_crypt(req
, sg_in
, sg_out
, cc
->sector_size
, iv
);
1452 if (bio_data_dir(ctx
->bio_in
) == WRITE
)
1453 r
= crypto_skcipher_encrypt(req
);
1455 r
= crypto_skcipher_decrypt(req
);
1457 if (!r
&& cc
->iv_gen_ops
&& cc
->iv_gen_ops
->post
)
1458 r
= cc
->iv_gen_ops
->post(cc
, org_iv
, dmreq
);
1460 bio_advance_iter(ctx
->bio_in
, &ctx
->iter_in
, cc
->sector_size
);
1461 bio_advance_iter(ctx
->bio_out
, &ctx
->iter_out
, cc
->sector_size
);
1466 static void kcryptd_async_done(void *async_req
, int error
);
1468 static int crypt_alloc_req_skcipher(struct crypt_config
*cc
,
1469 struct convert_context
*ctx
)
1471 unsigned int key_index
= ctx
->cc_sector
& (cc
->tfms_count
- 1);
1474 ctx
->r
.req
= mempool_alloc(&cc
->req_pool
, in_interrupt() ? GFP_ATOMIC
: GFP_NOIO
);
1479 skcipher_request_set_tfm(ctx
->r
.req
, cc
->cipher_tfm
.tfms
[key_index
]);
1482 * Use REQ_MAY_BACKLOG so a cipher driver internally backlogs
1483 * requests if driver request queue is full.
1485 skcipher_request_set_callback(ctx
->r
.req
,
1486 CRYPTO_TFM_REQ_MAY_BACKLOG
,
1487 kcryptd_async_done
, dmreq_of_req(cc
, ctx
->r
.req
));
1492 static int crypt_alloc_req_aead(struct crypt_config
*cc
,
1493 struct convert_context
*ctx
)
1495 if (!ctx
->r
.req_aead
) {
1496 ctx
->r
.req_aead
= mempool_alloc(&cc
->req_pool
, in_interrupt() ? GFP_ATOMIC
: GFP_NOIO
);
1497 if (!ctx
->r
.req_aead
)
1501 aead_request_set_tfm(ctx
->r
.req_aead
, cc
->cipher_tfm
.tfms_aead
[0]);
1504 * Use REQ_MAY_BACKLOG so a cipher driver internally backlogs
1505 * requests if driver request queue is full.
1507 aead_request_set_callback(ctx
->r
.req_aead
,
1508 CRYPTO_TFM_REQ_MAY_BACKLOG
,
1509 kcryptd_async_done
, dmreq_of_req(cc
, ctx
->r
.req_aead
));
1514 static int crypt_alloc_req(struct crypt_config
*cc
,
1515 struct convert_context
*ctx
)
1517 if (crypt_integrity_aead(cc
))
1518 return crypt_alloc_req_aead(cc
, ctx
);
1520 return crypt_alloc_req_skcipher(cc
, ctx
);
1523 static void crypt_free_req_skcipher(struct crypt_config
*cc
,
1524 struct skcipher_request
*req
, struct bio
*base_bio
)
1526 struct dm_crypt_io
*io
= dm_per_bio_data(base_bio
, cc
->per_bio_data_size
);
1528 if ((struct skcipher_request
*)(io
+ 1) != req
)
1529 mempool_free(req
, &cc
->req_pool
);
1532 static void crypt_free_req_aead(struct crypt_config
*cc
,
1533 struct aead_request
*req
, struct bio
*base_bio
)
1535 struct dm_crypt_io
*io
= dm_per_bio_data(base_bio
, cc
->per_bio_data_size
);
1537 if ((struct aead_request
*)(io
+ 1) != req
)
1538 mempool_free(req
, &cc
->req_pool
);
1541 static void crypt_free_req(struct crypt_config
*cc
, void *req
, struct bio
*base_bio
)
1543 if (crypt_integrity_aead(cc
))
1544 crypt_free_req_aead(cc
, req
, base_bio
);
1546 crypt_free_req_skcipher(cc
, req
, base_bio
);
1550 * Encrypt / decrypt data from one bio to another one (can be the same one)
1552 static blk_status_t
crypt_convert(struct crypt_config
*cc
,
1553 struct convert_context
*ctx
, bool atomic
, bool reset_pending
)
1555 unsigned int tag_offset
= 0;
1556 unsigned int sector_step
= cc
->sector_size
>> SECTOR_SHIFT
;
1560 * if reset_pending is set we are dealing with the bio for the first time,
1561 * else we're continuing to work on the previous bio, so don't mess with
1562 * the cc_pending counter
1565 atomic_set(&ctx
->cc_pending
, 1);
1567 while (ctx
->iter_in
.bi_size
&& ctx
->iter_out
.bi_size
) {
1569 r
= crypt_alloc_req(cc
, ctx
);
1571 complete(&ctx
->restart
);
1572 return BLK_STS_DEV_RESOURCE
;
1575 atomic_inc(&ctx
->cc_pending
);
1577 if (crypt_integrity_aead(cc
))
1578 r
= crypt_convert_block_aead(cc
, ctx
, ctx
->r
.req_aead
, tag_offset
);
1580 r
= crypt_convert_block_skcipher(cc
, ctx
, ctx
->r
.req
, tag_offset
);
1584 * The request was queued by a crypto driver
1585 * but the driver request queue is full, let's wait.
1588 if (in_interrupt()) {
1589 if (try_wait_for_completion(&ctx
->restart
)) {
1591 * we don't have to block to wait for completion,
1596 * we can't wait for completion without blocking
1597 * exit and continue processing in a workqueue
1600 ctx
->cc_sector
+= sector_step
;
1602 return BLK_STS_DEV_RESOURCE
;
1605 wait_for_completion(&ctx
->restart
);
1607 reinit_completion(&ctx
->restart
);
1610 * The request is queued and processed asynchronously,
1611 * completion function kcryptd_async_done() will be called.
1615 ctx
->cc_sector
+= sector_step
;
1619 * The request was already processed (synchronously).
1622 atomic_dec(&ctx
->cc_pending
);
1623 ctx
->cc_sector
+= sector_step
;
1629 * There was a data integrity error.
1632 atomic_dec(&ctx
->cc_pending
);
1633 return BLK_STS_PROTECTION
;
1635 * There was an error while processing the request.
1638 atomic_dec(&ctx
->cc_pending
);
1639 return BLK_STS_IOERR
;
1646 static void crypt_free_buffer_pages(struct crypt_config
*cc
, struct bio
*clone
);
1649 * Generate a new unfragmented bio with the given size
1650 * This should never violate the device limitations (but only because
1651 * max_segment_size is being constrained to PAGE_SIZE).
1653 * This function may be called concurrently. If we allocate from the mempool
1654 * concurrently, there is a possibility of deadlock. For example, if we have
1655 * mempool of 256 pages, two processes, each wanting 256, pages allocate from
1656 * the mempool concurrently, it may deadlock in a situation where both processes
1657 * have allocated 128 pages and the mempool is exhausted.
1659 * In order to avoid this scenario we allocate the pages under a mutex.
1661 * In order to not degrade performance with excessive locking, we try
1662 * non-blocking allocations without a mutex first but on failure we fallback
1663 * to blocking allocations with a mutex.
1665 * In order to reduce allocation overhead, we try to allocate compound pages in
1666 * the first pass. If they are not available, we fall back to the mempool.
1668 static struct bio
*crypt_alloc_buffer(struct dm_crypt_io
*io
, unsigned int size
)
1670 struct crypt_config
*cc
= io
->cc
;
1672 unsigned int nr_iovecs
= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1673 gfp_t gfp_mask
= GFP_NOWAIT
| __GFP_HIGHMEM
;
1674 unsigned int remaining_size
;
1675 unsigned int order
= MAX_ORDER
- 1;
1678 if (unlikely(gfp_mask
& __GFP_DIRECT_RECLAIM
))
1679 mutex_lock(&cc
->bio_alloc_lock
);
1681 clone
= bio_alloc_bioset(cc
->dev
->bdev
, nr_iovecs
, io
->base_bio
->bi_opf
,
1683 clone
->bi_private
= io
;
1684 clone
->bi_end_io
= crypt_endio
;
1686 remaining_size
= size
;
1688 while (remaining_size
) {
1690 unsigned size_to_add
;
1691 unsigned remaining_order
= __fls((remaining_size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
);
1692 order
= min(order
, remaining_order
);
1695 pages
= alloc_pages(gfp_mask
1696 | __GFP_NOMEMALLOC
| __GFP_NORETRY
| __GFP_NOWARN
| __GFP_COMP
,
1698 if (likely(pages
!= NULL
))
1703 pages
= mempool_alloc(&cc
->page_pool
, gfp_mask
);
1705 crypt_free_buffer_pages(cc
, clone
);
1707 gfp_mask
|= __GFP_DIRECT_RECLAIM
;
1713 size_to_add
= min((unsigned)PAGE_SIZE
<< order
, remaining_size
);
1714 __bio_add_page(clone
, pages
, size_to_add
, 0);
1715 remaining_size
-= size_to_add
;
1718 /* Allocate space for integrity tags */
1719 if (dm_crypt_integrity_io_alloc(io
, clone
)) {
1720 crypt_free_buffer_pages(cc
, clone
);
1725 if (unlikely(gfp_mask
& __GFP_DIRECT_RECLAIM
))
1726 mutex_unlock(&cc
->bio_alloc_lock
);
1731 static void crypt_free_buffer_pages(struct crypt_config
*cc
, struct bio
*clone
)
1733 struct folio_iter fi
;
1735 if (clone
->bi_vcnt
> 0) { /* bio_for_each_folio_all crashes with an empty bio */
1736 bio_for_each_folio_all(fi
, clone
) {
1737 if (folio_test_large(fi
.folio
))
1738 folio_put(fi
.folio
);
1740 mempool_free(&fi
.folio
->page
, &cc
->page_pool
);
1745 static void crypt_io_init(struct dm_crypt_io
*io
, struct crypt_config
*cc
,
1746 struct bio
*bio
, sector_t sector
)
1750 io
->sector
= sector
;
1752 io
->ctx
.r
.req
= NULL
;
1753 io
->integrity_metadata
= NULL
;
1754 io
->integrity_metadata_from_pool
= false;
1755 io
->in_tasklet
= false;
1756 atomic_set(&io
->io_pending
, 0);
1759 static void crypt_inc_pending(struct dm_crypt_io
*io
)
1761 atomic_inc(&io
->io_pending
);
1764 static void kcryptd_io_bio_endio(struct work_struct
*work
)
1766 struct dm_crypt_io
*io
= container_of(work
, struct dm_crypt_io
, work
);
1768 bio_endio(io
->base_bio
);
1772 * One of the bios was finished. Check for completion of
1773 * the whole request and correctly clean up the buffer.
1775 static void crypt_dec_pending(struct dm_crypt_io
*io
)
1777 struct crypt_config
*cc
= io
->cc
;
1778 struct bio
*base_bio
= io
->base_bio
;
1779 blk_status_t error
= io
->error
;
1781 if (!atomic_dec_and_test(&io
->io_pending
))
1785 crypt_free_req(cc
, io
->ctx
.r
.req
, base_bio
);
1787 if (unlikely(io
->integrity_metadata_from_pool
))
1788 mempool_free(io
->integrity_metadata
, &io
->cc
->tag_pool
);
1790 kfree(io
->integrity_metadata
);
1792 base_bio
->bi_status
= error
;
1795 * If we are running this function from our tasklet,
1796 * we can't call bio_endio() here, because it will call
1797 * clone_endio() from dm.c, which in turn will
1798 * free the current struct dm_crypt_io structure with
1799 * our tasklet. In this case we need to delay bio_endio()
1800 * execution to after the tasklet is done and dequeued.
1802 if (io
->in_tasklet
) {
1803 INIT_WORK(&io
->work
, kcryptd_io_bio_endio
);
1804 queue_work(cc
->io_queue
, &io
->work
);
1808 bio_endio(base_bio
);
1812 * kcryptd/kcryptd_io:
1814 * Needed because it would be very unwise to do decryption in an
1815 * interrupt context.
1817 * kcryptd performs the actual encryption or decryption.
1819 * kcryptd_io performs the IO submission.
1821 * They must be separated as otherwise the final stages could be
1822 * starved by new requests which can block in the first stages due
1823 * to memory allocation.
1825 * The work is done per CPU global for all dm-crypt instances.
1826 * They should not depend on each other and do not block.
1828 static void crypt_endio(struct bio
*clone
)
1830 struct dm_crypt_io
*io
= clone
->bi_private
;
1831 struct crypt_config
*cc
= io
->cc
;
1832 unsigned int rw
= bio_data_dir(clone
);
1836 * free the processed pages
1839 crypt_free_buffer_pages(cc
, clone
);
1841 error
= clone
->bi_status
;
1844 if (rw
== READ
&& !error
) {
1845 kcryptd_queue_crypt(io
);
1849 if (unlikely(error
))
1852 crypt_dec_pending(io
);
1855 #define CRYPT_MAP_READ_GFP GFP_NOWAIT
1857 static int kcryptd_io_read(struct dm_crypt_io
*io
, gfp_t gfp
)
1859 struct crypt_config
*cc
= io
->cc
;
1863 * We need the original biovec array in order to decrypt the whole bio
1864 * data *afterwards* -- thanks to immutable biovecs we don't need to
1865 * worry about the block layer modifying the biovec array; so leverage
1866 * bio_alloc_clone().
1868 clone
= bio_alloc_clone(cc
->dev
->bdev
, io
->base_bio
, gfp
, &cc
->bs
);
1871 clone
->bi_private
= io
;
1872 clone
->bi_end_io
= crypt_endio
;
1874 crypt_inc_pending(io
);
1876 clone
->bi_iter
.bi_sector
= cc
->start
+ io
->sector
;
1878 if (dm_crypt_integrity_io_alloc(io
, clone
)) {
1879 crypt_dec_pending(io
);
1884 dm_submit_bio_remap(io
->base_bio
, clone
);
1888 static void kcryptd_io_read_work(struct work_struct
*work
)
1890 struct dm_crypt_io
*io
= container_of(work
, struct dm_crypt_io
, work
);
1892 crypt_inc_pending(io
);
1893 if (kcryptd_io_read(io
, GFP_NOIO
))
1894 io
->error
= BLK_STS_RESOURCE
;
1895 crypt_dec_pending(io
);
1898 static void kcryptd_queue_read(struct dm_crypt_io
*io
)
1900 struct crypt_config
*cc
= io
->cc
;
1902 INIT_WORK(&io
->work
, kcryptd_io_read_work
);
1903 queue_work(cc
->io_queue
, &io
->work
);
1906 static void kcryptd_io_write(struct dm_crypt_io
*io
)
1908 struct bio
*clone
= io
->ctx
.bio_out
;
1910 dm_submit_bio_remap(io
->base_bio
, clone
);
1913 #define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
1915 static int dmcrypt_write(void *data
)
1917 struct crypt_config
*cc
= data
;
1918 struct dm_crypt_io
*io
;
1921 struct rb_root write_tree
;
1922 struct blk_plug plug
;
1924 spin_lock_irq(&cc
->write_thread_lock
);
1927 if (!RB_EMPTY_ROOT(&cc
->write_tree
))
1930 set_current_state(TASK_INTERRUPTIBLE
);
1932 spin_unlock_irq(&cc
->write_thread_lock
);
1934 if (unlikely(kthread_should_stop())) {
1935 set_current_state(TASK_RUNNING
);
1941 set_current_state(TASK_RUNNING
);
1942 spin_lock_irq(&cc
->write_thread_lock
);
1943 goto continue_locked
;
1946 write_tree
= cc
->write_tree
;
1947 cc
->write_tree
= RB_ROOT
;
1948 spin_unlock_irq(&cc
->write_thread_lock
);
1950 BUG_ON(rb_parent(write_tree
.rb_node
));
1953 * Note: we cannot walk the tree here with rb_next because
1954 * the structures may be freed when kcryptd_io_write is called.
1956 blk_start_plug(&plug
);
1958 io
= crypt_io_from_node(rb_first(&write_tree
));
1959 rb_erase(&io
->rb_node
, &write_tree
);
1960 kcryptd_io_write(io
);
1962 } while (!RB_EMPTY_ROOT(&write_tree
));
1963 blk_finish_plug(&plug
);
1968 static void kcryptd_crypt_write_io_submit(struct dm_crypt_io
*io
, int async
)
1970 struct bio
*clone
= io
->ctx
.bio_out
;
1971 struct crypt_config
*cc
= io
->cc
;
1972 unsigned long flags
;
1974 struct rb_node
**rbp
, *parent
;
1976 if (unlikely(io
->error
)) {
1977 crypt_free_buffer_pages(cc
, clone
);
1979 crypt_dec_pending(io
);
1983 /* crypt_convert should have filled the clone bio */
1984 BUG_ON(io
->ctx
.iter_out
.bi_size
);
1986 clone
->bi_iter
.bi_sector
= cc
->start
+ io
->sector
;
1988 if ((likely(!async
) && test_bit(DM_CRYPT_NO_OFFLOAD
, &cc
->flags
)) ||
1989 test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE
, &cc
->flags
)) {
1990 dm_submit_bio_remap(io
->base_bio
, clone
);
1994 spin_lock_irqsave(&cc
->write_thread_lock
, flags
);
1995 if (RB_EMPTY_ROOT(&cc
->write_tree
))
1996 wake_up_process(cc
->write_thread
);
1997 rbp
= &cc
->write_tree
.rb_node
;
1999 sector
= io
->sector
;
2002 if (sector
< crypt_io_from_node(parent
)->sector
)
2003 rbp
= &(*rbp
)->rb_left
;
2005 rbp
= &(*rbp
)->rb_right
;
2007 rb_link_node(&io
->rb_node
, parent
, rbp
);
2008 rb_insert_color(&io
->rb_node
, &cc
->write_tree
);
2009 spin_unlock_irqrestore(&cc
->write_thread_lock
, flags
);
2012 static bool kcryptd_crypt_write_inline(struct crypt_config
*cc
,
2013 struct convert_context
*ctx
)
2016 if (!test_bit(DM_CRYPT_WRITE_INLINE
, &cc
->flags
))
2020 * Note: zone append writes (REQ_OP_ZONE_APPEND) do not have ordering
2021 * constraints so they do not need to be issued inline by
2022 * kcryptd_crypt_write_convert().
2024 switch (bio_op(ctx
->bio_in
)) {
2026 case REQ_OP_WRITE_ZEROES
:
2033 static void kcryptd_crypt_write_continue(struct work_struct
*work
)
2035 struct dm_crypt_io
*io
= container_of(work
, struct dm_crypt_io
, work
);
2036 struct crypt_config
*cc
= io
->cc
;
2037 struct convert_context
*ctx
= &io
->ctx
;
2039 sector_t sector
= io
->sector
;
2042 wait_for_completion(&ctx
->restart
);
2043 reinit_completion(&ctx
->restart
);
2045 r
= crypt_convert(cc
, &io
->ctx
, true, false);
2048 crypt_finished
= atomic_dec_and_test(&ctx
->cc_pending
);
2049 if (!crypt_finished
&& kcryptd_crypt_write_inline(cc
, ctx
)) {
2050 /* Wait for completion signaled by kcryptd_async_done() */
2051 wait_for_completion(&ctx
->restart
);
2055 /* Encryption was already finished, submit io now */
2056 if (crypt_finished
) {
2057 kcryptd_crypt_write_io_submit(io
, 0);
2058 io
->sector
= sector
;
2061 crypt_dec_pending(io
);
2064 static void kcryptd_crypt_write_convert(struct dm_crypt_io
*io
)
2066 struct crypt_config
*cc
= io
->cc
;
2067 struct convert_context
*ctx
= &io
->ctx
;
2070 sector_t sector
= io
->sector
;
2074 * Prevent io from disappearing until this function completes.
2076 crypt_inc_pending(io
);
2077 crypt_convert_init(cc
, ctx
, NULL
, io
->base_bio
, sector
);
2079 clone
= crypt_alloc_buffer(io
, io
->base_bio
->bi_iter
.bi_size
);
2080 if (unlikely(!clone
)) {
2081 io
->error
= BLK_STS_IOERR
;
2085 io
->ctx
.bio_out
= clone
;
2086 io
->ctx
.iter_out
= clone
->bi_iter
;
2088 sector
+= bio_sectors(clone
);
2090 crypt_inc_pending(io
);
2091 r
= crypt_convert(cc
, ctx
,
2092 test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE
, &cc
->flags
), true);
2094 * Crypto API backlogged the request, because its queue was full
2095 * and we're in softirq context, so continue from a workqueue
2096 * (TODO: is it actually possible to be in softirq in the write path?)
2098 if (r
== BLK_STS_DEV_RESOURCE
) {
2099 INIT_WORK(&io
->work
, kcryptd_crypt_write_continue
);
2100 queue_work(cc
->crypt_queue
, &io
->work
);
2105 crypt_finished
= atomic_dec_and_test(&ctx
->cc_pending
);
2106 if (!crypt_finished
&& kcryptd_crypt_write_inline(cc
, ctx
)) {
2107 /* Wait for completion signaled by kcryptd_async_done() */
2108 wait_for_completion(&ctx
->restart
);
2112 /* Encryption was already finished, submit io now */
2113 if (crypt_finished
) {
2114 kcryptd_crypt_write_io_submit(io
, 0);
2115 io
->sector
= sector
;
2119 crypt_dec_pending(io
);
2122 static void kcryptd_crypt_read_done(struct dm_crypt_io
*io
)
2124 crypt_dec_pending(io
);
2127 static void kcryptd_crypt_read_continue(struct work_struct
*work
)
2129 struct dm_crypt_io
*io
= container_of(work
, struct dm_crypt_io
, work
);
2130 struct crypt_config
*cc
= io
->cc
;
2133 wait_for_completion(&io
->ctx
.restart
);
2134 reinit_completion(&io
->ctx
.restart
);
2136 r
= crypt_convert(cc
, &io
->ctx
, true, false);
2140 if (atomic_dec_and_test(&io
->ctx
.cc_pending
))
2141 kcryptd_crypt_read_done(io
);
2143 crypt_dec_pending(io
);
2146 static void kcryptd_crypt_read_convert(struct dm_crypt_io
*io
)
2148 struct crypt_config
*cc
= io
->cc
;
2151 crypt_inc_pending(io
);
2153 crypt_convert_init(cc
, &io
->ctx
, io
->base_bio
, io
->base_bio
,
2156 r
= crypt_convert(cc
, &io
->ctx
,
2157 test_bit(DM_CRYPT_NO_READ_WORKQUEUE
, &cc
->flags
), true);
2159 * Crypto API backlogged the request, because its queue was full
2160 * and we're in softirq context, so continue from a workqueue
2162 if (r
== BLK_STS_DEV_RESOURCE
) {
2163 INIT_WORK(&io
->work
, kcryptd_crypt_read_continue
);
2164 queue_work(cc
->crypt_queue
, &io
->work
);
2170 if (atomic_dec_and_test(&io
->ctx
.cc_pending
))
2171 kcryptd_crypt_read_done(io
);
2173 crypt_dec_pending(io
);
2176 static void kcryptd_async_done(void *data
, int error
)
2178 struct dm_crypt_request
*dmreq
= data
;
2179 struct convert_context
*ctx
= dmreq
->ctx
;
2180 struct dm_crypt_io
*io
= container_of(ctx
, struct dm_crypt_io
, ctx
);
2181 struct crypt_config
*cc
= io
->cc
;
2184 * A request from crypto driver backlog is going to be processed now,
2185 * finish the completion and continue in crypt_convert().
2186 * (Callback will be called for the second time for this request.)
2188 if (error
== -EINPROGRESS
) {
2189 complete(&ctx
->restart
);
2193 if (!error
&& cc
->iv_gen_ops
&& cc
->iv_gen_ops
->post
)
2194 error
= cc
->iv_gen_ops
->post(cc
, org_iv_of_dmreq(cc
, dmreq
), dmreq
);
2196 if (error
== -EBADMSG
) {
2197 sector_t s
= le64_to_cpu(*org_sector_of_dmreq(cc
, dmreq
));
2199 DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
2200 ctx
->bio_in
->bi_bdev
, s
);
2201 dm_audit_log_bio(DM_MSG_PREFIX
, "integrity-aead",
2203 io
->error
= BLK_STS_PROTECTION
;
2204 } else if (error
< 0)
2205 io
->error
= BLK_STS_IOERR
;
2207 crypt_free_req(cc
, req_of_dmreq(cc
, dmreq
), io
->base_bio
);
2209 if (!atomic_dec_and_test(&ctx
->cc_pending
))
2213 * The request is fully completed: for inline writes, let
2214 * kcryptd_crypt_write_convert() do the IO submission.
2216 if (bio_data_dir(io
->base_bio
) == READ
) {
2217 kcryptd_crypt_read_done(io
);
2221 if (kcryptd_crypt_write_inline(cc
, ctx
)) {
2222 complete(&ctx
->restart
);
2226 kcryptd_crypt_write_io_submit(io
, 1);
2229 static void kcryptd_crypt(struct work_struct
*work
)
2231 struct dm_crypt_io
*io
= container_of(work
, struct dm_crypt_io
, work
);
2233 if (bio_data_dir(io
->base_bio
) == READ
)
2234 kcryptd_crypt_read_convert(io
);
2236 kcryptd_crypt_write_convert(io
);
2239 static void kcryptd_crypt_tasklet(unsigned long work
)
2241 kcryptd_crypt((struct work_struct
*)work
);
2244 static void kcryptd_queue_crypt(struct dm_crypt_io
*io
)
2246 struct crypt_config
*cc
= io
->cc
;
2248 if ((bio_data_dir(io
->base_bio
) == READ
&& test_bit(DM_CRYPT_NO_READ_WORKQUEUE
, &cc
->flags
)) ||
2249 (bio_data_dir(io
->base_bio
) == WRITE
&& test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE
, &cc
->flags
))) {
2251 * in_hardirq(): Crypto API's skcipher_walk_first() refuses to work in hard IRQ context.
2252 * irqs_disabled(): the kernel may run some IO completion from the idle thread, but
2253 * it is being executed with irqs disabled.
2255 if (in_hardirq() || irqs_disabled()) {
2256 io
->in_tasklet
= true;
2257 tasklet_init(&io
->tasklet
, kcryptd_crypt_tasklet
, (unsigned long)&io
->work
);
2258 tasklet_schedule(&io
->tasklet
);
2262 kcryptd_crypt(&io
->work
);
2266 INIT_WORK(&io
->work
, kcryptd_crypt
);
2267 queue_work(cc
->crypt_queue
, &io
->work
);
2270 static void crypt_free_tfms_aead(struct crypt_config
*cc
)
2272 if (!cc
->cipher_tfm
.tfms_aead
)
2275 if (cc
->cipher_tfm
.tfms_aead
[0] && !IS_ERR(cc
->cipher_tfm
.tfms_aead
[0])) {
2276 crypto_free_aead(cc
->cipher_tfm
.tfms_aead
[0]);
2277 cc
->cipher_tfm
.tfms_aead
[0] = NULL
;
2280 kfree(cc
->cipher_tfm
.tfms_aead
);
2281 cc
->cipher_tfm
.tfms_aead
= NULL
;
2284 static void crypt_free_tfms_skcipher(struct crypt_config
*cc
)
2288 if (!cc
->cipher_tfm
.tfms
)
2291 for (i
= 0; i
< cc
->tfms_count
; i
++)
2292 if (cc
->cipher_tfm
.tfms
[i
] && !IS_ERR(cc
->cipher_tfm
.tfms
[i
])) {
2293 crypto_free_skcipher(cc
->cipher_tfm
.tfms
[i
]);
2294 cc
->cipher_tfm
.tfms
[i
] = NULL
;
2297 kfree(cc
->cipher_tfm
.tfms
);
2298 cc
->cipher_tfm
.tfms
= NULL
;
2301 static void crypt_free_tfms(struct crypt_config
*cc
)
2303 if (crypt_integrity_aead(cc
))
2304 crypt_free_tfms_aead(cc
);
2306 crypt_free_tfms_skcipher(cc
);
2309 static int crypt_alloc_tfms_skcipher(struct crypt_config
*cc
, char *ciphermode
)
2314 cc
->cipher_tfm
.tfms
= kcalloc(cc
->tfms_count
,
2315 sizeof(struct crypto_skcipher
*),
2317 if (!cc
->cipher_tfm
.tfms
)
2320 for (i
= 0; i
< cc
->tfms_count
; i
++) {
2321 cc
->cipher_tfm
.tfms
[i
] = crypto_alloc_skcipher(ciphermode
, 0,
2322 CRYPTO_ALG_ALLOCATES_MEMORY
);
2323 if (IS_ERR(cc
->cipher_tfm
.tfms
[i
])) {
2324 err
= PTR_ERR(cc
->cipher_tfm
.tfms
[i
]);
2325 crypt_free_tfms(cc
);
2331 * dm-crypt performance can vary greatly depending on which crypto
2332 * algorithm implementation is used. Help people debug performance
2333 * problems by logging the ->cra_driver_name.
2335 DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode
,
2336 crypto_skcipher_alg(any_tfm(cc
))->base
.cra_driver_name
);
2340 static int crypt_alloc_tfms_aead(struct crypt_config
*cc
, char *ciphermode
)
2344 cc
->cipher_tfm
.tfms
= kmalloc(sizeof(struct crypto_aead
*), GFP_KERNEL
);
2345 if (!cc
->cipher_tfm
.tfms
)
2348 cc
->cipher_tfm
.tfms_aead
[0] = crypto_alloc_aead(ciphermode
, 0,
2349 CRYPTO_ALG_ALLOCATES_MEMORY
);
2350 if (IS_ERR(cc
->cipher_tfm
.tfms_aead
[0])) {
2351 err
= PTR_ERR(cc
->cipher_tfm
.tfms_aead
[0]);
2352 crypt_free_tfms(cc
);
2356 DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode
,
2357 crypto_aead_alg(any_tfm_aead(cc
))->base
.cra_driver_name
);
2361 static int crypt_alloc_tfms(struct crypt_config
*cc
, char *ciphermode
)
2363 if (crypt_integrity_aead(cc
))
2364 return crypt_alloc_tfms_aead(cc
, ciphermode
);
2366 return crypt_alloc_tfms_skcipher(cc
, ciphermode
);
2369 static unsigned int crypt_subkey_size(struct crypt_config
*cc
)
2371 return (cc
->key_size
- cc
->key_extra_size
) >> ilog2(cc
->tfms_count
);
2374 static unsigned int crypt_authenckey_size(struct crypt_config
*cc
)
2376 return crypt_subkey_size(cc
) + RTA_SPACE(sizeof(struct crypto_authenc_key_param
));
2380 * If AEAD is composed like authenc(hmac(sha256),xts(aes)),
2381 * the key must be for some reason in special format.
2382 * This funcion converts cc->key to this special format.
2384 static void crypt_copy_authenckey(char *p
, const void *key
,
2385 unsigned int enckeylen
, unsigned int authkeylen
)
2387 struct crypto_authenc_key_param
*param
;
2390 rta
= (struct rtattr
*)p
;
2391 param
= RTA_DATA(rta
);
2392 param
->enckeylen
= cpu_to_be32(enckeylen
);
2393 rta
->rta_len
= RTA_LENGTH(sizeof(*param
));
2394 rta
->rta_type
= CRYPTO_AUTHENC_KEYA_PARAM
;
2395 p
+= RTA_SPACE(sizeof(*param
));
2396 memcpy(p
, key
+ enckeylen
, authkeylen
);
2398 memcpy(p
, key
, enckeylen
);
2401 static int crypt_setkey(struct crypt_config
*cc
)
2403 unsigned int subkey_size
;
2406 /* Ignore extra keys (which are used for IV etc) */
2407 subkey_size
= crypt_subkey_size(cc
);
2409 if (crypt_integrity_hmac(cc
)) {
2410 if (subkey_size
< cc
->key_mac_size
)
2413 crypt_copy_authenckey(cc
->authenc_key
, cc
->key
,
2414 subkey_size
- cc
->key_mac_size
,
2418 for (i
= 0; i
< cc
->tfms_count
; i
++) {
2419 if (crypt_integrity_hmac(cc
))
2420 r
= crypto_aead_setkey(cc
->cipher_tfm
.tfms_aead
[i
],
2421 cc
->authenc_key
, crypt_authenckey_size(cc
));
2422 else if (crypt_integrity_aead(cc
))
2423 r
= crypto_aead_setkey(cc
->cipher_tfm
.tfms_aead
[i
],
2424 cc
->key
+ (i
* subkey_size
),
2427 r
= crypto_skcipher_setkey(cc
->cipher_tfm
.tfms
[i
],
2428 cc
->key
+ (i
* subkey_size
),
2434 if (crypt_integrity_hmac(cc
))
2435 memzero_explicit(cc
->authenc_key
, crypt_authenckey_size(cc
));
2442 static bool contains_whitespace(const char *str
)
2445 if (isspace(*str
++))
2450 static int set_key_user(struct crypt_config
*cc
, struct key
*key
)
2452 const struct user_key_payload
*ukp
;
2454 ukp
= user_key_payload_locked(key
);
2456 return -EKEYREVOKED
;
2458 if (cc
->key_size
!= ukp
->datalen
)
2461 memcpy(cc
->key
, ukp
->data
, cc
->key_size
);
2466 static int set_key_encrypted(struct crypt_config
*cc
, struct key
*key
)
2468 const struct encrypted_key_payload
*ekp
;
2470 ekp
= key
->payload
.data
[0];
2472 return -EKEYREVOKED
;
2474 if (cc
->key_size
!= ekp
->decrypted_datalen
)
2477 memcpy(cc
->key
, ekp
->decrypted_data
, cc
->key_size
);
2482 static int set_key_trusted(struct crypt_config
*cc
, struct key
*key
)
2484 const struct trusted_key_payload
*tkp
;
2486 tkp
= key
->payload
.data
[0];
2488 return -EKEYREVOKED
;
2490 if (cc
->key_size
!= tkp
->key_len
)
2493 memcpy(cc
->key
, tkp
->key
, cc
->key_size
);
2498 static int crypt_set_keyring_key(struct crypt_config
*cc
, const char *key_string
)
2500 char *new_key_string
, *key_desc
;
2502 struct key_type
*type
;
2504 int (*set_key
)(struct crypt_config
*cc
, struct key
*key
);
2507 * Reject key_string with whitespace. dm core currently lacks code for
2508 * proper whitespace escaping in arguments on DM_TABLE_STATUS path.
2510 if (contains_whitespace(key_string
)) {
2511 DMERR("whitespace chars not allowed in key string");
2515 /* look for next ':' separating key_type from key_description */
2516 key_desc
= strchr(key_string
, ':');
2517 if (!key_desc
|| key_desc
== key_string
|| !strlen(key_desc
+ 1))
2520 if (!strncmp(key_string
, "logon:", key_desc
- key_string
+ 1)) {
2521 type
= &key_type_logon
;
2522 set_key
= set_key_user
;
2523 } else if (!strncmp(key_string
, "user:", key_desc
- key_string
+ 1)) {
2524 type
= &key_type_user
;
2525 set_key
= set_key_user
;
2526 } else if (IS_ENABLED(CONFIG_ENCRYPTED_KEYS
) &&
2527 !strncmp(key_string
, "encrypted:", key_desc
- key_string
+ 1)) {
2528 type
= &key_type_encrypted
;
2529 set_key
= set_key_encrypted
;
2530 } else if (IS_ENABLED(CONFIG_TRUSTED_KEYS
) &&
2531 !strncmp(key_string
, "trusted:", key_desc
- key_string
+ 1)) {
2532 type
= &key_type_trusted
;
2533 set_key
= set_key_trusted
;
2538 new_key_string
= kstrdup(key_string
, GFP_KERNEL
);
2539 if (!new_key_string
)
2542 key
= request_key(type
, key_desc
+ 1, NULL
);
2544 kfree_sensitive(new_key_string
);
2545 return PTR_ERR(key
);
2548 down_read(&key
->sem
);
2550 ret
= set_key(cc
, key
);
2554 kfree_sensitive(new_key_string
);
2561 /* clear the flag since following operations may invalidate previously valid key */
2562 clear_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
);
2564 ret
= crypt_setkey(cc
);
2567 set_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
);
2568 kfree_sensitive(cc
->key_string
);
2569 cc
->key_string
= new_key_string
;
2571 kfree_sensitive(new_key_string
);
2576 static int get_key_size(char **key_string
)
2581 if (*key_string
[0] != ':')
2582 return strlen(*key_string
) >> 1;
2584 /* look for next ':' in key string */
2585 colon
= strpbrk(*key_string
+ 1, ":");
2589 if (sscanf(*key_string
+ 1, "%u%c", &ret
, &dummy
) != 2 || dummy
!= ':')
2592 *key_string
= colon
;
2594 /* remaining key string should be :<logon|user>:<key_desc> */
2601 static int crypt_set_keyring_key(struct crypt_config
*cc
, const char *key_string
)
2606 static int get_key_size(char **key_string
)
2608 return (*key_string
[0] == ':') ? -EINVAL
: (int)(strlen(*key_string
) >> 1);
2611 #endif /* CONFIG_KEYS */
2613 static int crypt_set_key(struct crypt_config
*cc
, char *key
)
2616 int key_string_len
= strlen(key
);
2618 /* Hyphen (which gives a key_size of zero) means there is no key. */
2619 if (!cc
->key_size
&& strcmp(key
, "-"))
2622 /* ':' means the key is in kernel keyring, short-circuit normal key processing */
2623 if (key
[0] == ':') {
2624 r
= crypt_set_keyring_key(cc
, key
+ 1);
2628 /* clear the flag since following operations may invalidate previously valid key */
2629 clear_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
);
2631 /* wipe references to any kernel keyring key */
2632 kfree_sensitive(cc
->key_string
);
2633 cc
->key_string
= NULL
;
2635 /* Decode key from its hex representation. */
2636 if (cc
->key_size
&& hex2bin(cc
->key
, key
, cc
->key_size
) < 0)
2639 r
= crypt_setkey(cc
);
2641 set_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
);
2644 /* Hex key string not needed after here, so wipe it. */
2645 memset(key
, '0', key_string_len
);
2650 static int crypt_wipe_key(struct crypt_config
*cc
)
2654 clear_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
);
2655 get_random_bytes(&cc
->key
, cc
->key_size
);
2657 /* Wipe IV private keys */
2658 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->wipe
) {
2659 r
= cc
->iv_gen_ops
->wipe(cc
);
2664 kfree_sensitive(cc
->key_string
);
2665 cc
->key_string
= NULL
;
2666 r
= crypt_setkey(cc
);
2667 memset(&cc
->key
, 0, cc
->key_size
* sizeof(u8
));
2672 static void crypt_calculate_pages_per_client(void)
2674 unsigned long pages
= (totalram_pages() - totalhigh_pages()) * DM_CRYPT_MEMORY_PERCENT
/ 100;
2676 if (!dm_crypt_clients_n
)
2679 pages
/= dm_crypt_clients_n
;
2680 if (pages
< DM_CRYPT_MIN_PAGES_PER_CLIENT
)
2681 pages
= DM_CRYPT_MIN_PAGES_PER_CLIENT
;
2682 dm_crypt_pages_per_client
= pages
;
2685 static void *crypt_page_alloc(gfp_t gfp_mask
, void *pool_data
)
2687 struct crypt_config
*cc
= pool_data
;
2691 * Note, percpu_counter_read_positive() may over (and under) estimate
2692 * the current usage by at most (batch - 1) * num_online_cpus() pages,
2693 * but avoids potential spinlock contention of an exact result.
2695 if (unlikely(percpu_counter_read_positive(&cc
->n_allocated_pages
) >= dm_crypt_pages_per_client
) &&
2696 likely(gfp_mask
& __GFP_NORETRY
))
2699 page
= alloc_page(gfp_mask
);
2700 if (likely(page
!= NULL
))
2701 percpu_counter_add(&cc
->n_allocated_pages
, 1);
2706 static void crypt_page_free(void *page
, void *pool_data
)
2708 struct crypt_config
*cc
= pool_data
;
2711 percpu_counter_sub(&cc
->n_allocated_pages
, 1);
2714 static void crypt_dtr(struct dm_target
*ti
)
2716 struct crypt_config
*cc
= ti
->private;
2723 if (cc
->write_thread
)
2724 kthread_stop(cc
->write_thread
);
2727 destroy_workqueue(cc
->io_queue
);
2728 if (cc
->crypt_queue
)
2729 destroy_workqueue(cc
->crypt_queue
);
2731 crypt_free_tfms(cc
);
2733 bioset_exit(&cc
->bs
);
2735 mempool_exit(&cc
->page_pool
);
2736 mempool_exit(&cc
->req_pool
);
2737 mempool_exit(&cc
->tag_pool
);
2739 WARN_ON(percpu_counter_sum(&cc
->n_allocated_pages
) != 0);
2740 percpu_counter_destroy(&cc
->n_allocated_pages
);
2742 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->dtr
)
2743 cc
->iv_gen_ops
->dtr(cc
);
2746 dm_put_device(ti
, cc
->dev
);
2748 kfree_sensitive(cc
->cipher_string
);
2749 kfree_sensitive(cc
->key_string
);
2750 kfree_sensitive(cc
->cipher_auth
);
2751 kfree_sensitive(cc
->authenc_key
);
2753 mutex_destroy(&cc
->bio_alloc_lock
);
2755 /* Must zero key material before freeing */
2756 kfree_sensitive(cc
);
2758 spin_lock(&dm_crypt_clients_lock
);
2759 WARN_ON(!dm_crypt_clients_n
);
2760 dm_crypt_clients_n
--;
2761 crypt_calculate_pages_per_client();
2762 spin_unlock(&dm_crypt_clients_lock
);
2764 dm_audit_log_dtr(DM_MSG_PREFIX
, ti
, 1);
2767 static int crypt_ctr_ivmode(struct dm_target
*ti
, const char *ivmode
)
2769 struct crypt_config
*cc
= ti
->private;
2771 if (crypt_integrity_aead(cc
))
2772 cc
->iv_size
= crypto_aead_ivsize(any_tfm_aead(cc
));
2774 cc
->iv_size
= crypto_skcipher_ivsize(any_tfm(cc
));
2777 /* at least a 64 bit sector number should fit in our buffer */
2778 cc
->iv_size
= max(cc
->iv_size
,
2779 (unsigned int)(sizeof(u64
) / sizeof(u8
)));
2781 DMWARN("Selected cipher does not support IVs");
2785 /* Choose ivmode, see comments at iv code. */
2787 cc
->iv_gen_ops
= NULL
;
2788 else if (strcmp(ivmode
, "plain") == 0)
2789 cc
->iv_gen_ops
= &crypt_iv_plain_ops
;
2790 else if (strcmp(ivmode
, "plain64") == 0)
2791 cc
->iv_gen_ops
= &crypt_iv_plain64_ops
;
2792 else if (strcmp(ivmode
, "plain64be") == 0)
2793 cc
->iv_gen_ops
= &crypt_iv_plain64be_ops
;
2794 else if (strcmp(ivmode
, "essiv") == 0)
2795 cc
->iv_gen_ops
= &crypt_iv_essiv_ops
;
2796 else if (strcmp(ivmode
, "benbi") == 0)
2797 cc
->iv_gen_ops
= &crypt_iv_benbi_ops
;
2798 else if (strcmp(ivmode
, "null") == 0)
2799 cc
->iv_gen_ops
= &crypt_iv_null_ops
;
2800 else if (strcmp(ivmode
, "eboiv") == 0)
2801 cc
->iv_gen_ops
= &crypt_iv_eboiv_ops
;
2802 else if (strcmp(ivmode
, "elephant") == 0) {
2803 cc
->iv_gen_ops
= &crypt_iv_elephant_ops
;
2805 cc
->key_extra_size
= cc
->key_size
/ 2;
2806 if (cc
->key_extra_size
> ELEPHANT_MAX_KEY_SIZE
)
2808 set_bit(CRYPT_ENCRYPT_PREPROCESS
, &cc
->cipher_flags
);
2809 } else if (strcmp(ivmode
, "lmk") == 0) {
2810 cc
->iv_gen_ops
= &crypt_iv_lmk_ops
;
2812 * Version 2 and 3 is recognised according
2813 * to length of provided multi-key string.
2814 * If present (version 3), last key is used as IV seed.
2815 * All keys (including IV seed) are always the same size.
2817 if (cc
->key_size
% cc
->key_parts
) {
2819 cc
->key_extra_size
= cc
->key_size
/ cc
->key_parts
;
2821 } else if (strcmp(ivmode
, "tcw") == 0) {
2822 cc
->iv_gen_ops
= &crypt_iv_tcw_ops
;
2823 cc
->key_parts
+= 2; /* IV + whitening */
2824 cc
->key_extra_size
= cc
->iv_size
+ TCW_WHITENING_SIZE
;
2825 } else if (strcmp(ivmode
, "random") == 0) {
2826 cc
->iv_gen_ops
= &crypt_iv_random_ops
;
2827 /* Need storage space in integrity fields. */
2828 cc
->integrity_iv_size
= cc
->iv_size
;
2830 ti
->error
= "Invalid IV mode";
2838 * Workaround to parse HMAC algorithm from AEAD crypto API spec.
2839 * The HMAC is needed to calculate tag size (HMAC digest size).
2840 * This should be probably done by crypto-api calls (once available...)
2842 static int crypt_ctr_auth_cipher(struct crypt_config
*cc
, char *cipher_api
)
2844 char *start
, *end
, *mac_alg
= NULL
;
2845 struct crypto_ahash
*mac
;
2847 if (!strstarts(cipher_api
, "authenc("))
2850 start
= strchr(cipher_api
, '(');
2851 end
= strchr(cipher_api
, ',');
2852 if (!start
|| !end
|| ++start
> end
)
2855 mac_alg
= kmemdup_nul(start
, end
- start
, GFP_KERNEL
);
2859 mac
= crypto_alloc_ahash(mac_alg
, 0, CRYPTO_ALG_ALLOCATES_MEMORY
);
2863 return PTR_ERR(mac
);
2865 cc
->key_mac_size
= crypto_ahash_digestsize(mac
);
2866 crypto_free_ahash(mac
);
2868 cc
->authenc_key
= kmalloc(crypt_authenckey_size(cc
), GFP_KERNEL
);
2869 if (!cc
->authenc_key
)
2875 static int crypt_ctr_cipher_new(struct dm_target
*ti
, char *cipher_in
, char *key
,
2876 char **ivmode
, char **ivopts
)
2878 struct crypt_config
*cc
= ti
->private;
2879 char *tmp
, *cipher_api
, buf
[CRYPTO_MAX_ALG_NAME
];
2885 * New format (capi: prefix)
2886 * capi:cipher_api_spec-iv:ivopts
2888 tmp
= &cipher_in
[strlen("capi:")];
2890 /* Separate IV options if present, it can contain another '-' in hash name */
2891 *ivopts
= strrchr(tmp
, ':');
2897 *ivmode
= strrchr(tmp
, '-');
2902 /* The rest is crypto API spec */
2905 /* Alloc AEAD, can be used only in new format. */
2906 if (crypt_integrity_aead(cc
)) {
2907 ret
= crypt_ctr_auth_cipher(cc
, cipher_api
);
2909 ti
->error
= "Invalid AEAD cipher spec";
2914 if (*ivmode
&& !strcmp(*ivmode
, "lmk"))
2915 cc
->tfms_count
= 64;
2917 if (*ivmode
&& !strcmp(*ivmode
, "essiv")) {
2919 ti
->error
= "Digest algorithm missing for ESSIV mode";
2922 ret
= snprintf(buf
, CRYPTO_MAX_ALG_NAME
, "essiv(%s,%s)",
2923 cipher_api
, *ivopts
);
2924 if (ret
< 0 || ret
>= CRYPTO_MAX_ALG_NAME
) {
2925 ti
->error
= "Cannot allocate cipher string";
2931 cc
->key_parts
= cc
->tfms_count
;
2933 /* Allocate cipher */
2934 ret
= crypt_alloc_tfms(cc
, cipher_api
);
2936 ti
->error
= "Error allocating crypto tfm";
2940 if (crypt_integrity_aead(cc
))
2941 cc
->iv_size
= crypto_aead_ivsize(any_tfm_aead(cc
));
2943 cc
->iv_size
= crypto_skcipher_ivsize(any_tfm(cc
));
2948 static int crypt_ctr_cipher_old(struct dm_target
*ti
, char *cipher_in
, char *key
,
2949 char **ivmode
, char **ivopts
)
2951 struct crypt_config
*cc
= ti
->private;
2952 char *tmp
, *cipher
, *chainmode
, *keycount
;
2953 char *cipher_api
= NULL
;
2957 if (strchr(cipher_in
, '(') || crypt_integrity_aead(cc
)) {
2958 ti
->error
= "Bad cipher specification";
2963 * Legacy dm-crypt cipher specification
2964 * cipher[:keycount]-mode-iv:ivopts
2967 keycount
= strsep(&tmp
, "-");
2968 cipher
= strsep(&keycount
, ":");
2972 else if (sscanf(keycount
, "%u%c", &cc
->tfms_count
, &dummy
) != 1 ||
2973 !is_power_of_2(cc
->tfms_count
)) {
2974 ti
->error
= "Bad cipher key count specification";
2977 cc
->key_parts
= cc
->tfms_count
;
2979 chainmode
= strsep(&tmp
, "-");
2980 *ivmode
= strsep(&tmp
, ":");
2984 * For compatibility with the original dm-crypt mapping format, if
2985 * only the cipher name is supplied, use cbc-plain.
2987 if (!chainmode
|| (!strcmp(chainmode
, "plain") && !*ivmode
)) {
2992 if (strcmp(chainmode
, "ecb") && !*ivmode
) {
2993 ti
->error
= "IV mechanism required";
2997 cipher_api
= kmalloc(CRYPTO_MAX_ALG_NAME
, GFP_KERNEL
);
3001 if (*ivmode
&& !strcmp(*ivmode
, "essiv")) {
3003 ti
->error
= "Digest algorithm missing for ESSIV mode";
3007 ret
= snprintf(cipher_api
, CRYPTO_MAX_ALG_NAME
,
3008 "essiv(%s(%s),%s)", chainmode
, cipher
, *ivopts
);
3010 ret
= snprintf(cipher_api
, CRYPTO_MAX_ALG_NAME
,
3011 "%s(%s)", chainmode
, cipher
);
3013 if (ret
< 0 || ret
>= CRYPTO_MAX_ALG_NAME
) {
3018 /* Allocate cipher */
3019 ret
= crypt_alloc_tfms(cc
, cipher_api
);
3021 ti
->error
= "Error allocating crypto tfm";
3029 ti
->error
= "Cannot allocate cipher strings";
3033 static int crypt_ctr_cipher(struct dm_target
*ti
, char *cipher_in
, char *key
)
3035 struct crypt_config
*cc
= ti
->private;
3036 char *ivmode
= NULL
, *ivopts
= NULL
;
3039 cc
->cipher_string
= kstrdup(cipher_in
, GFP_KERNEL
);
3040 if (!cc
->cipher_string
) {
3041 ti
->error
= "Cannot allocate cipher strings";
3045 if (strstarts(cipher_in
, "capi:"))
3046 ret
= crypt_ctr_cipher_new(ti
, cipher_in
, key
, &ivmode
, &ivopts
);
3048 ret
= crypt_ctr_cipher_old(ti
, cipher_in
, key
, &ivmode
, &ivopts
);
3053 ret
= crypt_ctr_ivmode(ti
, ivmode
);
3057 /* Initialize and set key */
3058 ret
= crypt_set_key(cc
, key
);
3060 ti
->error
= "Error decoding and setting key";
3065 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->ctr
) {
3066 ret
= cc
->iv_gen_ops
->ctr(cc
, ti
, ivopts
);
3068 ti
->error
= "Error creating IV";
3073 /* Initialize IV (set keys for ESSIV etc) */
3074 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->init
) {
3075 ret
= cc
->iv_gen_ops
->init(cc
);
3077 ti
->error
= "Error initialising IV";
3082 /* wipe the kernel key payload copy */
3084 memset(cc
->key
, 0, cc
->key_size
* sizeof(u8
));
3089 static int crypt_ctr_optional(struct dm_target
*ti
, unsigned int argc
, char **argv
)
3091 struct crypt_config
*cc
= ti
->private;
3092 struct dm_arg_set as
;
3093 static const struct dm_arg _args
[] = {
3094 {0, 8, "Invalid number of feature args"},
3096 unsigned int opt_params
, val
;
3097 const char *opt_string
, *sval
;
3101 /* Optional parameters */
3105 ret
= dm_read_arg_group(_args
, &as
, &opt_params
, &ti
->error
);
3109 while (opt_params
--) {
3110 opt_string
= dm_shift_arg(&as
);
3112 ti
->error
= "Not enough feature arguments";
3116 if (!strcasecmp(opt_string
, "allow_discards"))
3117 ti
->num_discard_bios
= 1;
3119 else if (!strcasecmp(opt_string
, "same_cpu_crypt"))
3120 set_bit(DM_CRYPT_SAME_CPU
, &cc
->flags
);
3122 else if (!strcasecmp(opt_string
, "submit_from_crypt_cpus"))
3123 set_bit(DM_CRYPT_NO_OFFLOAD
, &cc
->flags
);
3124 else if (!strcasecmp(opt_string
, "no_read_workqueue"))
3125 set_bit(DM_CRYPT_NO_READ_WORKQUEUE
, &cc
->flags
);
3126 else if (!strcasecmp(opt_string
, "no_write_workqueue"))
3127 set_bit(DM_CRYPT_NO_WRITE_WORKQUEUE
, &cc
->flags
);
3128 else if (sscanf(opt_string
, "integrity:%u:", &val
) == 1) {
3129 if (val
== 0 || val
> MAX_TAG_SIZE
) {
3130 ti
->error
= "Invalid integrity arguments";
3133 cc
->on_disk_tag_size
= val
;
3134 sval
= strchr(opt_string
+ strlen("integrity:"), ':') + 1;
3135 if (!strcasecmp(sval
, "aead")) {
3136 set_bit(CRYPT_MODE_INTEGRITY_AEAD
, &cc
->cipher_flags
);
3137 } else if (strcasecmp(sval
, "none")) {
3138 ti
->error
= "Unknown integrity profile";
3142 cc
->cipher_auth
= kstrdup(sval
, GFP_KERNEL
);
3143 if (!cc
->cipher_auth
)
3145 } else if (sscanf(opt_string
, "sector_size:%hu%c", &cc
->sector_size
, &dummy
) == 1) {
3146 if (cc
->sector_size
< (1 << SECTOR_SHIFT
) ||
3147 cc
->sector_size
> 4096 ||
3148 (cc
->sector_size
& (cc
->sector_size
- 1))) {
3149 ti
->error
= "Invalid feature value for sector_size";
3152 if (ti
->len
& ((cc
->sector_size
>> SECTOR_SHIFT
) - 1)) {
3153 ti
->error
= "Device size is not multiple of sector_size feature";
3156 cc
->sector_shift
= __ffs(cc
->sector_size
) - SECTOR_SHIFT
;
3157 } else if (!strcasecmp(opt_string
, "iv_large_sectors"))
3158 set_bit(CRYPT_IV_LARGE_SECTORS
, &cc
->cipher_flags
);
3160 ti
->error
= "Invalid feature arguments";
3168 #ifdef CONFIG_BLK_DEV_ZONED
3169 static int crypt_report_zones(struct dm_target
*ti
,
3170 struct dm_report_zones_args
*args
, unsigned int nr_zones
)
3172 struct crypt_config
*cc
= ti
->private;
3174 return dm_report_zones(cc
->dev
->bdev
, cc
->start
,
3175 cc
->start
+ dm_target_offset(ti
, args
->next_sector
),
3179 #define crypt_report_zones NULL
3183 * Construct an encryption mapping:
3184 * <cipher> [<key>|:<key_size>:<user|logon>:<key_description>] <iv_offset> <dev_path> <start>
3186 static int crypt_ctr(struct dm_target
*ti
, unsigned int argc
, char **argv
)
3188 struct crypt_config
*cc
;
3189 const char *devname
= dm_table_device_name(ti
->table
);
3191 unsigned int align_mask
;
3192 unsigned long long tmpll
;
3194 size_t iv_size_padding
, additional_req_size
;
3198 ti
->error
= "Not enough arguments";
3202 key_size
= get_key_size(&argv
[1]);
3204 ti
->error
= "Cannot parse key size";
3208 cc
= kzalloc(struct_size(cc
, key
, key_size
), GFP_KERNEL
);
3210 ti
->error
= "Cannot allocate encryption context";
3213 cc
->key_size
= key_size
;
3214 cc
->sector_size
= (1 << SECTOR_SHIFT
);
3215 cc
->sector_shift
= 0;
3219 spin_lock(&dm_crypt_clients_lock
);
3220 dm_crypt_clients_n
++;
3221 crypt_calculate_pages_per_client();
3222 spin_unlock(&dm_crypt_clients_lock
);
3224 ret
= percpu_counter_init(&cc
->n_allocated_pages
, 0, GFP_KERNEL
);
3228 /* Optional parameters need to be read before cipher constructor */
3230 ret
= crypt_ctr_optional(ti
, argc
- 5, &argv
[5]);
3235 ret
= crypt_ctr_cipher(ti
, argv
[0], argv
[1]);
3239 if (crypt_integrity_aead(cc
)) {
3240 cc
->dmreq_start
= sizeof(struct aead_request
);
3241 cc
->dmreq_start
+= crypto_aead_reqsize(any_tfm_aead(cc
));
3242 align_mask
= crypto_aead_alignmask(any_tfm_aead(cc
));
3244 cc
->dmreq_start
= sizeof(struct skcipher_request
);
3245 cc
->dmreq_start
+= crypto_skcipher_reqsize(any_tfm(cc
));
3246 align_mask
= crypto_skcipher_alignmask(any_tfm(cc
));
3248 cc
->dmreq_start
= ALIGN(cc
->dmreq_start
, __alignof__(struct dm_crypt_request
));
3250 if (align_mask
< CRYPTO_MINALIGN
) {
3251 /* Allocate the padding exactly */
3252 iv_size_padding
= -(cc
->dmreq_start
+ sizeof(struct dm_crypt_request
))
3256 * If the cipher requires greater alignment than kmalloc
3257 * alignment, we don't know the exact position of the
3258 * initialization vector. We must assume worst case.
3260 iv_size_padding
= align_mask
;
3263 /* ...| IV + padding | original IV | original sec. number | bio tag offset | */
3264 additional_req_size
= sizeof(struct dm_crypt_request
) +
3265 iv_size_padding
+ cc
->iv_size
+
3268 sizeof(unsigned int);
3270 ret
= mempool_init_kmalloc_pool(&cc
->req_pool
, MIN_IOS
, cc
->dmreq_start
+ additional_req_size
);
3272 ti
->error
= "Cannot allocate crypt request mempool";
3276 cc
->per_bio_data_size
= ti
->per_io_data_size
=
3277 ALIGN(sizeof(struct dm_crypt_io
) + cc
->dmreq_start
+ additional_req_size
,
3280 ret
= mempool_init(&cc
->page_pool
, BIO_MAX_VECS
, crypt_page_alloc
, crypt_page_free
, cc
);
3282 ti
->error
= "Cannot allocate page mempool";
3286 ret
= bioset_init(&cc
->bs
, MIN_IOS
, 0, BIOSET_NEED_BVECS
);
3288 ti
->error
= "Cannot allocate crypt bioset";
3292 mutex_init(&cc
->bio_alloc_lock
);
3295 if ((sscanf(argv
[2], "%llu%c", &tmpll
, &dummy
) != 1) ||
3296 (tmpll
& ((cc
->sector_size
>> SECTOR_SHIFT
) - 1))) {
3297 ti
->error
= "Invalid iv_offset sector";
3300 cc
->iv_offset
= tmpll
;
3302 ret
= dm_get_device(ti
, argv
[3], dm_table_get_mode(ti
->table
), &cc
->dev
);
3304 ti
->error
= "Device lookup failed";
3309 if (sscanf(argv
[4], "%llu%c", &tmpll
, &dummy
) != 1 || tmpll
!= (sector_t
)tmpll
) {
3310 ti
->error
= "Invalid device sector";
3315 if (bdev_is_zoned(cc
->dev
->bdev
)) {
3317 * For zoned block devices, we need to preserve the issuer write
3318 * ordering. To do so, disable write workqueues and force inline
3319 * encryption completion.
3321 set_bit(DM_CRYPT_NO_WRITE_WORKQUEUE
, &cc
->flags
);
3322 set_bit(DM_CRYPT_WRITE_INLINE
, &cc
->flags
);
3325 * All zone append writes to a zone of a zoned block device will
3326 * have the same BIO sector, the start of the zone. When the
3327 * cypher IV mode uses sector values, all data targeting a
3328 * zone will be encrypted using the first sector numbers of the
3329 * zone. This will not result in write errors but will
3330 * cause most reads to fail as reads will use the sector values
3331 * for the actual data locations, resulting in IV mismatch.
3332 * To avoid this problem, ask DM core to emulate zone append
3333 * operations with regular writes.
3335 DMDEBUG("Zone append operations will be emulated");
3336 ti
->emulate_zone_append
= true;
3339 if (crypt_integrity_aead(cc
) || cc
->integrity_iv_size
) {
3340 ret
= crypt_integrity_ctr(cc
, ti
);
3344 cc
->tag_pool_max_sectors
= POOL_ENTRY_SIZE
/ cc
->on_disk_tag_size
;
3345 if (!cc
->tag_pool_max_sectors
)
3346 cc
->tag_pool_max_sectors
= 1;
3348 ret
= mempool_init_kmalloc_pool(&cc
->tag_pool
, MIN_IOS
,
3349 cc
->tag_pool_max_sectors
* cc
->on_disk_tag_size
);
3351 ti
->error
= "Cannot allocate integrity tags mempool";
3355 cc
->tag_pool_max_sectors
<<= cc
->sector_shift
;
3359 cc
->io_queue
= alloc_workqueue("kcryptd_io/%s", WQ_MEM_RECLAIM
, 1, devname
);
3360 if (!cc
->io_queue
) {
3361 ti
->error
= "Couldn't create kcryptd io queue";
3365 if (test_bit(DM_CRYPT_SAME_CPU
, &cc
->flags
))
3366 cc
->crypt_queue
= alloc_workqueue("kcryptd/%s", WQ_CPU_INTENSIVE
| WQ_MEM_RECLAIM
,
3369 cc
->crypt_queue
= alloc_workqueue("kcryptd/%s",
3370 WQ_CPU_INTENSIVE
| WQ_MEM_RECLAIM
| WQ_UNBOUND
,
3371 num_online_cpus(), devname
);
3372 if (!cc
->crypt_queue
) {
3373 ti
->error
= "Couldn't create kcryptd queue";
3377 spin_lock_init(&cc
->write_thread_lock
);
3378 cc
->write_tree
= RB_ROOT
;
3380 cc
->write_thread
= kthread_run(dmcrypt_write
, cc
, "dmcrypt_write/%s", devname
);
3381 if (IS_ERR(cc
->write_thread
)) {
3382 ret
= PTR_ERR(cc
->write_thread
);
3383 cc
->write_thread
= NULL
;
3384 ti
->error
= "Couldn't spawn write thread";
3388 ti
->num_flush_bios
= 1;
3389 ti
->limit_swap_bios
= true;
3390 ti
->accounts_remapped_io
= true;
3392 dm_audit_log_ctr(DM_MSG_PREFIX
, ti
, 1);
3396 dm_audit_log_ctr(DM_MSG_PREFIX
, ti
, 0);
3401 static int crypt_map(struct dm_target
*ti
, struct bio
*bio
)
3403 struct dm_crypt_io
*io
;
3404 struct crypt_config
*cc
= ti
->private;
3407 * If bio is REQ_PREFLUSH or REQ_OP_DISCARD, just bypass crypt queues.
3408 * - for REQ_PREFLUSH device-mapper core ensures that no IO is in-flight
3409 * - for REQ_OP_DISCARD caller must use flush if IO ordering matters
3411 if (unlikely(bio
->bi_opf
& REQ_PREFLUSH
||
3412 bio_op(bio
) == REQ_OP_DISCARD
)) {
3413 bio_set_dev(bio
, cc
->dev
->bdev
);
3414 if (bio_sectors(bio
))
3415 bio
->bi_iter
.bi_sector
= cc
->start
+
3416 dm_target_offset(ti
, bio
->bi_iter
.bi_sector
);
3417 return DM_MAPIO_REMAPPED
;
3421 * Check if bio is too large, split as needed.
3423 if (unlikely(bio
->bi_iter
.bi_size
> (BIO_MAX_VECS
<< PAGE_SHIFT
)) &&
3424 (bio_data_dir(bio
) == WRITE
|| cc
->on_disk_tag_size
))
3425 dm_accept_partial_bio(bio
, ((BIO_MAX_VECS
<< PAGE_SHIFT
) >> SECTOR_SHIFT
));
3428 * Ensure that bio is a multiple of internal sector encryption size
3429 * and is aligned to this size as defined in IO hints.
3431 if (unlikely((bio
->bi_iter
.bi_sector
& ((cc
->sector_size
>> SECTOR_SHIFT
) - 1)) != 0))
3432 return DM_MAPIO_KILL
;
3434 if (unlikely(bio
->bi_iter
.bi_size
& (cc
->sector_size
- 1)))
3435 return DM_MAPIO_KILL
;
3437 io
= dm_per_bio_data(bio
, cc
->per_bio_data_size
);
3438 crypt_io_init(io
, cc
, bio
, dm_target_offset(ti
, bio
->bi_iter
.bi_sector
));
3440 if (cc
->on_disk_tag_size
) {
3441 unsigned int tag_len
= cc
->on_disk_tag_size
* (bio_sectors(bio
) >> cc
->sector_shift
);
3443 if (unlikely(tag_len
> KMALLOC_MAX_SIZE
))
3444 io
->integrity_metadata
= NULL
;
3446 io
->integrity_metadata
= kmalloc(tag_len
, GFP_NOIO
| __GFP_NORETRY
| __GFP_NOMEMALLOC
| __GFP_NOWARN
);
3448 if (unlikely(!io
->integrity_metadata
)) {
3449 if (bio_sectors(bio
) > cc
->tag_pool_max_sectors
)
3450 dm_accept_partial_bio(bio
, cc
->tag_pool_max_sectors
);
3451 io
->integrity_metadata
= mempool_alloc(&cc
->tag_pool
, GFP_NOIO
);
3452 io
->integrity_metadata_from_pool
= true;
3456 if (crypt_integrity_aead(cc
))
3457 io
->ctx
.r
.req_aead
= (struct aead_request
*)(io
+ 1);
3459 io
->ctx
.r
.req
= (struct skcipher_request
*)(io
+ 1);
3461 if (bio_data_dir(io
->base_bio
) == READ
) {
3462 if (kcryptd_io_read(io
, CRYPT_MAP_READ_GFP
))
3463 kcryptd_queue_read(io
);
3465 kcryptd_queue_crypt(io
);
3467 return DM_MAPIO_SUBMITTED
;
3470 static char hex2asc(unsigned char c
)
3472 return c
+ '0' + ((unsigned int)(9 - c
) >> 4 & 0x27);
3475 static void crypt_status(struct dm_target
*ti
, status_type_t type
,
3476 unsigned int status_flags
, char *result
, unsigned int maxlen
)
3478 struct crypt_config
*cc
= ti
->private;
3479 unsigned int i
, sz
= 0;
3480 int num_feature_args
= 0;
3483 case STATUSTYPE_INFO
:
3487 case STATUSTYPE_TABLE
:
3488 DMEMIT("%s ", cc
->cipher_string
);
3490 if (cc
->key_size
> 0) {
3492 DMEMIT(":%u:%s", cc
->key_size
, cc
->key_string
);
3494 for (i
= 0; i
< cc
->key_size
; i
++) {
3495 DMEMIT("%c%c", hex2asc(cc
->key
[i
] >> 4),
3496 hex2asc(cc
->key
[i
] & 0xf));
3502 DMEMIT(" %llu %s %llu", (unsigned long long)cc
->iv_offset
,
3503 cc
->dev
->name
, (unsigned long long)cc
->start
);
3505 num_feature_args
+= !!ti
->num_discard_bios
;
3506 num_feature_args
+= test_bit(DM_CRYPT_SAME_CPU
, &cc
->flags
);
3507 num_feature_args
+= test_bit(DM_CRYPT_NO_OFFLOAD
, &cc
->flags
);
3508 num_feature_args
+= test_bit(DM_CRYPT_NO_READ_WORKQUEUE
, &cc
->flags
);
3509 num_feature_args
+= test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE
, &cc
->flags
);
3510 num_feature_args
+= cc
->sector_size
!= (1 << SECTOR_SHIFT
);
3511 num_feature_args
+= test_bit(CRYPT_IV_LARGE_SECTORS
, &cc
->cipher_flags
);
3512 if (cc
->on_disk_tag_size
)
3514 if (num_feature_args
) {
3515 DMEMIT(" %d", num_feature_args
);
3516 if (ti
->num_discard_bios
)
3517 DMEMIT(" allow_discards");
3518 if (test_bit(DM_CRYPT_SAME_CPU
, &cc
->flags
))
3519 DMEMIT(" same_cpu_crypt");
3520 if (test_bit(DM_CRYPT_NO_OFFLOAD
, &cc
->flags
))
3521 DMEMIT(" submit_from_crypt_cpus");
3522 if (test_bit(DM_CRYPT_NO_READ_WORKQUEUE
, &cc
->flags
))
3523 DMEMIT(" no_read_workqueue");
3524 if (test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE
, &cc
->flags
))
3525 DMEMIT(" no_write_workqueue");
3526 if (cc
->on_disk_tag_size
)
3527 DMEMIT(" integrity:%u:%s", cc
->on_disk_tag_size
, cc
->cipher_auth
);
3528 if (cc
->sector_size
!= (1 << SECTOR_SHIFT
))
3529 DMEMIT(" sector_size:%d", cc
->sector_size
);
3530 if (test_bit(CRYPT_IV_LARGE_SECTORS
, &cc
->cipher_flags
))
3531 DMEMIT(" iv_large_sectors");
3535 case STATUSTYPE_IMA
:
3536 DMEMIT_TARGET_NAME_VERSION(ti
->type
);
3537 DMEMIT(",allow_discards=%c", ti
->num_discard_bios
? 'y' : 'n');
3538 DMEMIT(",same_cpu_crypt=%c", test_bit(DM_CRYPT_SAME_CPU
, &cc
->flags
) ? 'y' : 'n');
3539 DMEMIT(",submit_from_crypt_cpus=%c", test_bit(DM_CRYPT_NO_OFFLOAD
, &cc
->flags
) ?
3541 DMEMIT(",no_read_workqueue=%c", test_bit(DM_CRYPT_NO_READ_WORKQUEUE
, &cc
->flags
) ?
3543 DMEMIT(",no_write_workqueue=%c", test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE
, &cc
->flags
) ?
3545 DMEMIT(",iv_large_sectors=%c", test_bit(CRYPT_IV_LARGE_SECTORS
, &cc
->cipher_flags
) ?
3548 if (cc
->on_disk_tag_size
)
3549 DMEMIT(",integrity_tag_size=%u,cipher_auth=%s",
3550 cc
->on_disk_tag_size
, cc
->cipher_auth
);
3551 if (cc
->sector_size
!= (1 << SECTOR_SHIFT
))
3552 DMEMIT(",sector_size=%d", cc
->sector_size
);
3553 if (cc
->cipher_string
)
3554 DMEMIT(",cipher_string=%s", cc
->cipher_string
);
3556 DMEMIT(",key_size=%u", cc
->key_size
);
3557 DMEMIT(",key_parts=%u", cc
->key_parts
);
3558 DMEMIT(",key_extra_size=%u", cc
->key_extra_size
);
3559 DMEMIT(",key_mac_size=%u", cc
->key_mac_size
);
3565 static void crypt_postsuspend(struct dm_target
*ti
)
3567 struct crypt_config
*cc
= ti
->private;
3569 set_bit(DM_CRYPT_SUSPENDED
, &cc
->flags
);
3572 static int crypt_preresume(struct dm_target
*ti
)
3574 struct crypt_config
*cc
= ti
->private;
3576 if (!test_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
)) {
3577 DMERR("aborting resume - crypt key is not set.");
3584 static void crypt_resume(struct dm_target
*ti
)
3586 struct crypt_config
*cc
= ti
->private;
3588 clear_bit(DM_CRYPT_SUSPENDED
, &cc
->flags
);
3591 /* Message interface
3595 static int crypt_message(struct dm_target
*ti
, unsigned int argc
, char **argv
,
3596 char *result
, unsigned int maxlen
)
3598 struct crypt_config
*cc
= ti
->private;
3599 int key_size
, ret
= -EINVAL
;
3604 if (!strcasecmp(argv
[0], "key")) {
3605 if (!test_bit(DM_CRYPT_SUSPENDED
, &cc
->flags
)) {
3606 DMWARN("not suspended during key manipulation.");
3609 if (argc
== 3 && !strcasecmp(argv
[1], "set")) {
3610 /* The key size may not be changed. */
3611 key_size
= get_key_size(&argv
[2]);
3612 if (key_size
< 0 || cc
->key_size
!= key_size
) {
3613 memset(argv
[2], '0', strlen(argv
[2]));
3617 ret
= crypt_set_key(cc
, argv
[2]);
3620 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->init
)
3621 ret
= cc
->iv_gen_ops
->init(cc
);
3622 /* wipe the kernel key payload copy */
3624 memset(cc
->key
, 0, cc
->key_size
* sizeof(u8
));
3627 if (argc
== 2 && !strcasecmp(argv
[1], "wipe"))
3628 return crypt_wipe_key(cc
);
3632 DMWARN("unrecognised message received.");
3636 static int crypt_iterate_devices(struct dm_target
*ti
,
3637 iterate_devices_callout_fn fn
, void *data
)
3639 struct crypt_config
*cc
= ti
->private;
3641 return fn(ti
, cc
->dev
, cc
->start
, ti
->len
, data
);
3644 static void crypt_io_hints(struct dm_target
*ti
, struct queue_limits
*limits
)
3646 struct crypt_config
*cc
= ti
->private;
3649 * Unfortunate constraint that is required to avoid the potential
3650 * for exceeding underlying device's max_segments limits -- due to
3651 * crypt_alloc_buffer() possibly allocating pages for the encryption
3652 * bio that are not as physically contiguous as the original bio.
3654 limits
->max_segment_size
= PAGE_SIZE
;
3656 limits
->logical_block_size
=
3657 max_t(unsigned int, limits
->logical_block_size
, cc
->sector_size
);
3658 limits
->physical_block_size
=
3659 max_t(unsigned int, limits
->physical_block_size
, cc
->sector_size
);
3660 limits
->io_min
= max_t(unsigned int, limits
->io_min
, cc
->sector_size
);
3661 limits
->dma_alignment
= limits
->logical_block_size
- 1;
3664 static struct target_type crypt_target
= {
3666 .version
= {1, 24, 0},
3667 .module
= THIS_MODULE
,
3670 .features
= DM_TARGET_ZONED_HM
,
3671 .report_zones
= crypt_report_zones
,
3673 .status
= crypt_status
,
3674 .postsuspend
= crypt_postsuspend
,
3675 .preresume
= crypt_preresume
,
3676 .resume
= crypt_resume
,
3677 .message
= crypt_message
,
3678 .iterate_devices
= crypt_iterate_devices
,
3679 .io_hints
= crypt_io_hints
,
3683 MODULE_AUTHOR("Jana Saout <jana@saout.de>");
3684 MODULE_DESCRIPTION(DM_NAME
" target for transparent encryption / decryption");
3685 MODULE_LICENSE("GPL");