]>
Commit | Line | Data |
---|---|---|
2874c5fd | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
f19f5111 RS |
2 | /* XTS: as defined in IEEE1619/D16 |
3 | * http://grouper.ieee.org/groups/1619/email/pdf00086.pdf | |
f19f5111 RS |
4 | * |
5 | * Copyright (c) 2007 Rik Snel <rsnel@cube.dyndns.org> | |
6 | * | |
ddbc7361 | 7 | * Based on ecb.c |
f19f5111 | 8 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> |
f19f5111 | 9 | */ |
f1c131b4 HX |
10 | #include <crypto/internal/skcipher.h> |
11 | #include <crypto/scatterwalk.h> | |
f19f5111 RS |
12 | #include <linux/err.h> |
13 | #include <linux/init.h> | |
14 | #include <linux/kernel.h> | |
15 | #include <linux/module.h> | |
16 | #include <linux/scatterlist.h> | |
17 | #include <linux/slab.h> | |
18 | ||
ce004556 | 19 | #include <crypto/xts.h> |
f19f5111 RS |
20 | #include <crypto/b128ops.h> |
21 | #include <crypto/gf128mul.h> | |
22 | ||
23 | struct priv { | |
f1c131b4 | 24 | struct crypto_skcipher *child; |
f19f5111 RS |
25 | struct crypto_cipher *tweak; |
26 | }; | |
27 | ||
f1c131b4 HX |
28 | struct xts_instance_ctx { |
29 | struct crypto_skcipher_spawn spawn; | |
30 | char name[CRYPTO_MAX_ALG_NAME]; | |
31 | }; | |
32 | ||
33 | struct rctx { | |
e55318c8 | 34 | le128 t; |
8083b1bf AB |
35 | struct scatterlist *tail; |
36 | struct scatterlist sg[2]; | |
f1c131b4 HX |
37 | struct skcipher_request subreq; |
38 | }; | |
39 | ||
40 | static int setkey(struct crypto_skcipher *parent, const u8 *key, | |
f19f5111 RS |
41 | unsigned int keylen) |
42 | { | |
f1c131b4 HX |
43 | struct priv *ctx = crypto_skcipher_ctx(parent); |
44 | struct crypto_skcipher *child; | |
45 | struct crypto_cipher *tweak; | |
f19f5111 RS |
46 | int err; |
47 | ||
f1c131b4 | 48 | err = xts_verify_key(parent, key, keylen); |
28856a9e SM |
49 | if (err) |
50 | return err; | |
f19f5111 | 51 | |
f1c131b4 HX |
52 | keylen /= 2; |
53 | ||
25985edc | 54 | /* we need two cipher instances: one to compute the initial 'tweak' |
f19f5111 RS |
55 | * by encrypting the IV (usually the 'plain' iv) and the other |
56 | * one to encrypt and decrypt the data */ | |
57 | ||
58 | /* tweak cipher, uses Key2 i.e. the second half of *key */ | |
f1c131b4 HX |
59 | tweak = ctx->tweak; |
60 | crypto_cipher_clear_flags(tweak, CRYPTO_TFM_REQ_MASK); | |
61 | crypto_cipher_set_flags(tweak, crypto_skcipher_get_flags(parent) & | |
f19f5111 | 62 | CRYPTO_TFM_REQ_MASK); |
f1c131b4 | 63 | err = crypto_cipher_setkey(tweak, key + keylen, keylen); |
f19f5111 RS |
64 | if (err) |
65 | return err; | |
66 | ||
f1c131b4 | 67 | /* data cipher, uses Key1 i.e. the first half of *key */ |
f19f5111 | 68 | child = ctx->child; |
f1c131b4 HX |
69 | crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
70 | crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & | |
71 | CRYPTO_TFM_REQ_MASK); | |
af5034e8 | 72 | return crypto_skcipher_setkey(child, key, keylen); |
f1c131b4 | 73 | } |
f19f5111 | 74 | |
78105c7e OM |
75 | /* |
76 | * We compute the tweak masks twice (both before and after the ECB encryption or | |
77 | * decryption) to avoid having to allocate a temporary buffer and/or make | |
78 | * mutliple calls to the 'ecb(..)' instance, which usually would be slower than | |
79 | * just doing the gf128mul_x_ble() calls again. | |
80 | */ | |
8083b1bf | 81 | static int xor_tweak(struct skcipher_request *req, bool second_pass, bool enc) |
f1c131b4 HX |
82 | { |
83 | struct rctx *rctx = skcipher_request_ctx(req); | |
78105c7e | 84 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
8083b1bf | 85 | const bool cts = (req->cryptlen % XTS_BLOCK_SIZE); |
f1c131b4 HX |
86 | const int bs = XTS_BLOCK_SIZE; |
87 | struct skcipher_walk w; | |
78105c7e | 88 | le128 t = rctx->t; |
f1c131b4 | 89 | int err; |
f19f5111 | 90 | |
78105c7e OM |
91 | if (second_pass) { |
92 | req = &rctx->subreq; | |
93 | /* set to our TFM to enforce correct alignment: */ | |
94 | skcipher_request_set_tfm(req, tfm); | |
f1c131b4 | 95 | } |
78105c7e | 96 | err = skcipher_walk_virt(&w, req, false); |
f19f5111 | 97 | |
f1c131b4 HX |
98 | while (w.nbytes) { |
99 | unsigned int avail = w.nbytes; | |
e55318c8 OM |
100 | le128 *wsrc; |
101 | le128 *wdst; | |
f19f5111 | 102 | |
f1c131b4 HX |
103 | wsrc = w.src.virt.addr; |
104 | wdst = w.dst.virt.addr; | |
f19f5111 | 105 | |
f1c131b4 | 106 | do { |
8083b1bf AB |
107 | if (unlikely(cts) && |
108 | w.total - w.nbytes + avail < 2 * XTS_BLOCK_SIZE) { | |
109 | if (!enc) { | |
110 | if (second_pass) | |
111 | rctx->t = t; | |
112 | gf128mul_x_ble(&t, &t); | |
113 | } | |
114 | le128_xor(wdst, &t, wsrc); | |
115 | if (enc && second_pass) | |
116 | gf128mul_x_ble(&rctx->t, &t); | |
117 | skcipher_walk_done(&w, avail - bs); | |
118 | return 0; | |
119 | } | |
120 | ||
78105c7e OM |
121 | le128_xor(wdst++, &t, wsrc++); |
122 | gf128mul_x_ble(&t, &t); | |
f19f5111 RS |
123 | } while ((avail -= bs) >= bs); |
124 | ||
f1c131b4 HX |
125 | err = skcipher_walk_done(&w, avail); |
126 | } | |
127 | ||
f19f5111 RS |
128 | return err; |
129 | } | |
130 | ||
8083b1bf AB |
131 | static int xor_tweak_pre(struct skcipher_request *req, bool enc) |
132 | { | |
133 | return xor_tweak(req, false, enc); | |
134 | } | |
135 | ||
136 | static int xor_tweak_post(struct skcipher_request *req, bool enc) | |
137 | { | |
138 | return xor_tweak(req, true, enc); | |
139 | } | |
140 | ||
141 | static void cts_done(struct crypto_async_request *areq, int err) | |
f19f5111 | 142 | { |
8083b1bf AB |
143 | struct skcipher_request *req = areq->data; |
144 | le128 b; | |
145 | ||
146 | if (!err) { | |
147 | struct rctx *rctx = skcipher_request_ctx(req); | |
148 | ||
149 | scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 0); | |
150 | le128_xor(&b, &rctx->t, &b); | |
151 | scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 1); | |
152 | } | |
153 | ||
154 | skcipher_request_complete(req, err); | |
155 | } | |
156 | ||
157 | static int cts_final(struct skcipher_request *req, | |
158 | int (*crypt)(struct skcipher_request *req)) | |
159 | { | |
160 | struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); | |
161 | int offset = req->cryptlen & ~(XTS_BLOCK_SIZE - 1); | |
162 | struct rctx *rctx = skcipher_request_ctx(req); | |
163 | struct skcipher_request *subreq = &rctx->subreq; | |
164 | int tail = req->cryptlen % XTS_BLOCK_SIZE; | |
165 | le128 b[2]; | |
166 | int err; | |
167 | ||
168 | rctx->tail = scatterwalk_ffwd(rctx->sg, req->dst, | |
169 | offset - XTS_BLOCK_SIZE); | |
170 | ||
171 | scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0); | |
172 | memcpy(b + 1, b, tail); | |
173 | scatterwalk_map_and_copy(b, req->src, offset, tail, 0); | |
174 | ||
175 | le128_xor(b, &rctx->t, b); | |
176 | ||
177 | scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE + tail, 1); | |
178 | ||
179 | skcipher_request_set_tfm(subreq, ctx->child); | |
180 | skcipher_request_set_callback(subreq, req->base.flags, cts_done, req); | |
181 | skcipher_request_set_crypt(subreq, rctx->tail, rctx->tail, | |
182 | XTS_BLOCK_SIZE, NULL); | |
183 | ||
184 | err = crypt(subreq); | |
185 | if (err) | |
186 | return err; | |
187 | ||
188 | scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0); | |
189 | le128_xor(b, &rctx->t, b); | |
190 | scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 1); | |
191 | ||
192 | return 0; | |
f1c131b4 HX |
193 | } |
194 | ||
8083b1bf | 195 | static void encrypt_done(struct crypto_async_request *areq, int err) |
f1c131b4 | 196 | { |
8083b1bf AB |
197 | struct skcipher_request *req = areq->data; |
198 | ||
199 | if (!err) { | |
200 | struct rctx *rctx = skcipher_request_ctx(req); | |
201 | ||
202 | rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | |
203 | err = xor_tweak_post(req, true); | |
204 | ||
205 | if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) { | |
206 | err = cts_final(req, crypto_skcipher_encrypt); | |
207 | if (err == -EINPROGRESS) | |
208 | return; | |
209 | } | |
210 | } | |
211 | ||
212 | skcipher_request_complete(req, err); | |
f1c131b4 HX |
213 | } |
214 | ||
8083b1bf | 215 | static void decrypt_done(struct crypto_async_request *areq, int err) |
f1c131b4 HX |
216 | { |
217 | struct skcipher_request *req = areq->data; | |
aa4a829b | 218 | |
44427c0f HX |
219 | if (!err) { |
220 | struct rctx *rctx = skcipher_request_ctx(req); | |
221 | ||
222 | rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | |
8083b1bf AB |
223 | err = xor_tweak_post(req, false); |
224 | ||
225 | if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) { | |
226 | err = cts_final(req, crypto_skcipher_decrypt); | |
227 | if (err == -EINPROGRESS) | |
228 | return; | |
229 | } | |
44427c0f | 230 | } |
f1c131b4 | 231 | |
f1c131b4 HX |
232 | skcipher_request_complete(req, err); |
233 | } | |
234 | ||
8083b1bf | 235 | static int init_crypt(struct skcipher_request *req, crypto_completion_t compl) |
f1c131b4 | 236 | { |
78105c7e | 237 | struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); |
f1c131b4 | 238 | struct rctx *rctx = skcipher_request_ctx(req); |
78105c7e | 239 | struct skcipher_request *subreq = &rctx->subreq; |
f1c131b4 | 240 | |
8083b1bf AB |
241 | if (req->cryptlen < XTS_BLOCK_SIZE) |
242 | return -EINVAL; | |
243 | ||
78105c7e | 244 | skcipher_request_set_tfm(subreq, ctx->child); |
8083b1bf | 245 | skcipher_request_set_callback(subreq, req->base.flags, compl, req); |
78105c7e | 246 | skcipher_request_set_crypt(subreq, req->dst, req->dst, |
8083b1bf | 247 | req->cryptlen & ~(XTS_BLOCK_SIZE - 1), NULL); |
f1c131b4 | 248 | |
78105c7e OM |
249 | /* calculate first value of T */ |
250 | crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv); | |
8083b1bf AB |
251 | |
252 | return 0; | |
f1c131b4 HX |
253 | } |
254 | ||
78105c7e | 255 | static int encrypt(struct skcipher_request *req) |
f1c131b4 | 256 | { |
78105c7e OM |
257 | struct rctx *rctx = skcipher_request_ctx(req); |
258 | struct skcipher_request *subreq = &rctx->subreq; | |
8083b1bf AB |
259 | int err; |
260 | ||
261 | err = init_crypt(req, encrypt_done) ?: | |
262 | xor_tweak_pre(req, true) ?: | |
263 | crypto_skcipher_encrypt(subreq) ?: | |
264 | xor_tweak_post(req, true); | |
f1c131b4 | 265 | |
8083b1bf AB |
266 | if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0)) |
267 | return err; | |
268 | ||
269 | return cts_final(req, crypto_skcipher_encrypt); | |
f1c131b4 HX |
270 | } |
271 | ||
272 | static int decrypt(struct skcipher_request *req) | |
273 | { | |
78105c7e OM |
274 | struct rctx *rctx = skcipher_request_ctx(req); |
275 | struct skcipher_request *subreq = &rctx->subreq; | |
8083b1bf AB |
276 | int err; |
277 | ||
278 | err = init_crypt(req, decrypt_done) ?: | |
279 | xor_tweak_pre(req, false) ?: | |
280 | crypto_skcipher_decrypt(subreq) ?: | |
281 | xor_tweak_post(req, false); | |
282 | ||
283 | if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0)) | |
284 | return err; | |
78105c7e | 285 | |
8083b1bf | 286 | return cts_final(req, crypto_skcipher_decrypt); |
f19f5111 RS |
287 | } |
288 | ||
f1c131b4 | 289 | static int init_tfm(struct crypto_skcipher *tfm) |
f19f5111 | 290 | { |
f1c131b4 HX |
291 | struct skcipher_instance *inst = skcipher_alg_instance(tfm); |
292 | struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst); | |
293 | struct priv *ctx = crypto_skcipher_ctx(tfm); | |
294 | struct crypto_skcipher *child; | |
295 | struct crypto_cipher *tweak; | |
f19f5111 | 296 | |
f1c131b4 HX |
297 | child = crypto_spawn_skcipher(&ictx->spawn); |
298 | if (IS_ERR(child)) | |
299 | return PTR_ERR(child); | |
f19f5111 | 300 | |
f1c131b4 | 301 | ctx->child = child; |
f19f5111 | 302 | |
f1c131b4 HX |
303 | tweak = crypto_alloc_cipher(ictx->name, 0, 0); |
304 | if (IS_ERR(tweak)) { | |
305 | crypto_free_skcipher(ctx->child); | |
306 | return PTR_ERR(tweak); | |
f19f5111 RS |
307 | } |
308 | ||
f1c131b4 HX |
309 | ctx->tweak = tweak; |
310 | ||
311 | crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) + | |
312 | sizeof(struct rctx)); | |
f19f5111 RS |
313 | |
314 | return 0; | |
315 | } | |
316 | ||
f1c131b4 | 317 | static void exit_tfm(struct crypto_skcipher *tfm) |
f19f5111 | 318 | { |
f1c131b4 HX |
319 | struct priv *ctx = crypto_skcipher_ctx(tfm); |
320 | ||
321 | crypto_free_skcipher(ctx->child); | |
f19f5111 RS |
322 | crypto_free_cipher(ctx->tweak); |
323 | } | |
324 | ||
1a263ae6 | 325 | static void free_inst(struct skcipher_instance *inst) |
f1c131b4 HX |
326 | { |
327 | crypto_drop_skcipher(skcipher_instance_ctx(inst)); | |
328 | kfree(inst); | |
329 | } | |
330 | ||
331 | static int create(struct crypto_template *tmpl, struct rtattr **tb) | |
f19f5111 | 332 | { |
f1c131b4 HX |
333 | struct skcipher_instance *inst; |
334 | struct crypto_attr_type *algt; | |
335 | struct xts_instance_ctx *ctx; | |
336 | struct skcipher_alg *alg; | |
337 | const char *cipher_name; | |
89027579 | 338 | u32 mask; |
f19f5111 RS |
339 | int err; |
340 | ||
f1c131b4 HX |
341 | algt = crypto_get_attr_type(tb); |
342 | if (IS_ERR(algt)) | |
343 | return PTR_ERR(algt); | |
344 | ||
345 | if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) | |
346 | return -EINVAL; | |
347 | ||
348 | cipher_name = crypto_attr_alg_name(tb[1]); | |
349 | if (IS_ERR(cipher_name)) | |
350 | return PTR_ERR(cipher_name); | |
351 | ||
352 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); | |
353 | if (!inst) | |
354 | return -ENOMEM; | |
355 | ||
356 | ctx = skcipher_instance_ctx(inst); | |
357 | ||
89027579 HX |
358 | mask = crypto_requires_off(algt->type, algt->mask, |
359 | CRYPTO_ALG_NEED_FALLBACK | | |
360 | CRYPTO_ALG_ASYNC); | |
361 | ||
b9f76ddd EB |
362 | err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst), |
363 | cipher_name, 0, mask); | |
f1c131b4 HX |
364 | if (err == -ENOENT) { |
365 | err = -ENAMETOOLONG; | |
366 | if (snprintf(ctx->name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", | |
367 | cipher_name) >= CRYPTO_MAX_ALG_NAME) | |
368 | goto err_free_inst; | |
369 | ||
b9f76ddd EB |
370 | err = crypto_grab_skcipher(&ctx->spawn, |
371 | skcipher_crypto_instance(inst), | |
372 | ctx->name, 0, mask); | |
f1c131b4 HX |
373 | } |
374 | ||
f19f5111 | 375 | if (err) |
f1c131b4 | 376 | goto err_free_inst; |
f19f5111 | 377 | |
f1c131b4 | 378 | alg = crypto_skcipher_spawn_alg(&ctx->spawn); |
f19f5111 | 379 | |
f1c131b4 HX |
380 | err = -EINVAL; |
381 | if (alg->base.cra_blocksize != XTS_BLOCK_SIZE) | |
732e5409 | 382 | goto err_free_inst; |
f19f5111 | 383 | |
f1c131b4 | 384 | if (crypto_skcipher_alg_ivsize(alg)) |
732e5409 | 385 | goto err_free_inst; |
f19f5111 | 386 | |
f1c131b4 HX |
387 | err = crypto_inst_setname(skcipher_crypto_instance(inst), "xts", |
388 | &alg->base); | |
389 | if (err) | |
732e5409 | 390 | goto err_free_inst; |
f19f5111 | 391 | |
f1c131b4 HX |
392 | err = -EINVAL; |
393 | cipher_name = alg->base.cra_name; | |
f19f5111 | 394 | |
f1c131b4 HX |
395 | /* Alas we screwed up the naming so we have to mangle the |
396 | * cipher name. | |
397 | */ | |
398 | if (!strncmp(cipher_name, "ecb(", 4)) { | |
399 | unsigned len; | |
f19f5111 | 400 | |
f1c131b4 HX |
401 | len = strlcpy(ctx->name, cipher_name + 4, sizeof(ctx->name)); |
402 | if (len < 2 || len >= sizeof(ctx->name)) | |
732e5409 | 403 | goto err_free_inst; |
f19f5111 | 404 | |
f1c131b4 | 405 | if (ctx->name[len - 1] != ')') |
732e5409 | 406 | goto err_free_inst; |
f19f5111 | 407 | |
f1c131b4 | 408 | ctx->name[len - 1] = 0; |
f19f5111 | 409 | |
f1c131b4 | 410 | if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, |
5125e4e8 CJ |
411 | "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) { |
412 | err = -ENAMETOOLONG; | |
732e5409 | 413 | goto err_free_inst; |
5125e4e8 | 414 | } |
f1c131b4 | 415 | } else |
732e5409 | 416 | goto err_free_inst; |
f19f5111 | 417 | |
f1c131b4 HX |
418 | inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; |
419 | inst->alg.base.cra_priority = alg->base.cra_priority; | |
420 | inst->alg.base.cra_blocksize = XTS_BLOCK_SIZE; | |
421 | inst->alg.base.cra_alignmask = alg->base.cra_alignmask | | |
422 | (__alignof__(u64) - 1); | |
423 | ||
424 | inst->alg.ivsize = XTS_BLOCK_SIZE; | |
425 | inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) * 2; | |
426 | inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) * 2; | |
427 | ||
428 | inst->alg.base.cra_ctxsize = sizeof(struct priv); | |
429 | ||
430 | inst->alg.init = init_tfm; | |
431 | inst->alg.exit = exit_tfm; | |
432 | ||
433 | inst->alg.setkey = setkey; | |
434 | inst->alg.encrypt = encrypt; | |
435 | inst->alg.decrypt = decrypt; | |
436 | ||
1a263ae6 | 437 | inst->free = free_inst; |
f1c131b4 HX |
438 | |
439 | err = skcipher_register_instance(tmpl, inst); | |
732e5409 | 440 | if (err) { |
f1c131b4 | 441 | err_free_inst: |
1a263ae6 | 442 | free_inst(inst); |
732e5409 EB |
443 | } |
444 | return err; | |
f19f5111 RS |
445 | } |
446 | ||
447 | static struct crypto_template crypto_tmpl = { | |
448 | .name = "xts", | |
f1c131b4 | 449 | .create = create, |
f19f5111 RS |
450 | .module = THIS_MODULE, |
451 | }; | |
452 | ||
453 | static int __init crypto_module_init(void) | |
454 | { | |
455 | return crypto_register_template(&crypto_tmpl); | |
456 | } | |
457 | ||
458 | static void __exit crypto_module_exit(void) | |
459 | { | |
460 | crypto_unregister_template(&crypto_tmpl); | |
461 | } | |
462 | ||
c4741b23 | 463 | subsys_initcall(crypto_module_init); |
f19f5111 RS |
464 | module_exit(crypto_module_exit); |
465 | ||
466 | MODULE_LICENSE("GPL"); | |
467 | MODULE_DESCRIPTION("XTS block cipher mode"); | |
4943ba16 | 468 | MODULE_ALIAS_CRYPTO("xts"); |