]> git.ipfire.org Git - thirdparty/linux.git/blob - crypto/xts.c
io_uring: reset -EBUSY error when io sq thread is waken up
[thirdparty/linux.git] / crypto / xts.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* XTS: as defined in IEEE1619/D16
3 * http://grouper.ieee.org/groups/1619/email/pdf00086.pdf
4 *
5 * Copyright (c) 2007 Rik Snel <rsnel@cube.dyndns.org>
6 *
7 * Based on ecb.c
8 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
9 */
10 #include <crypto/internal/skcipher.h>
11 #include <crypto/scatterwalk.h>
12 #include <linux/err.h>
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/scatterlist.h>
17 #include <linux/slab.h>
18
19 #include <crypto/xts.h>
20 #include <crypto/b128ops.h>
21 #include <crypto/gf128mul.h>
22
23 struct priv {
24 struct crypto_skcipher *child;
25 struct crypto_cipher *tweak;
26 };
27
28 struct xts_instance_ctx {
29 struct crypto_skcipher_spawn spawn;
30 char name[CRYPTO_MAX_ALG_NAME];
31 };
32
33 struct rctx {
34 le128 t;
35 struct scatterlist *tail;
36 struct scatterlist sg[2];
37 struct skcipher_request subreq;
38 };
39
40 static int setkey(struct crypto_skcipher *parent, const u8 *key,
41 unsigned int keylen)
42 {
43 struct priv *ctx = crypto_skcipher_ctx(parent);
44 struct crypto_skcipher *child;
45 struct crypto_cipher *tweak;
46 int err;
47
48 err = xts_verify_key(parent, key, keylen);
49 if (err)
50 return err;
51
52 keylen /= 2;
53
54 /* we need two cipher instances: one to compute the initial 'tweak'
55 * by encrypting the IV (usually the 'plain' iv) and the other
56 * one to encrypt and decrypt the data */
57
58 /* tweak cipher, uses Key2 i.e. the second half of *key */
59 tweak = ctx->tweak;
60 crypto_cipher_clear_flags(tweak, CRYPTO_TFM_REQ_MASK);
61 crypto_cipher_set_flags(tweak, crypto_skcipher_get_flags(parent) &
62 CRYPTO_TFM_REQ_MASK);
63 err = crypto_cipher_setkey(tweak, key + keylen, keylen);
64 if (err)
65 return err;
66
67 /* data cipher, uses Key1 i.e. the first half of *key */
68 child = ctx->child;
69 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
70 crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
71 CRYPTO_TFM_REQ_MASK);
72 return crypto_skcipher_setkey(child, key, keylen);
73 }
74
75 /*
76 * We compute the tweak masks twice (both before and after the ECB encryption or
77 * decryption) to avoid having to allocate a temporary buffer and/or make
78 * mutliple calls to the 'ecb(..)' instance, which usually would be slower than
79 * just doing the gf128mul_x_ble() calls again.
80 */
81 static int xor_tweak(struct skcipher_request *req, bool second_pass, bool enc)
82 {
83 struct rctx *rctx = skcipher_request_ctx(req);
84 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
85 const bool cts = (req->cryptlen % XTS_BLOCK_SIZE);
86 const int bs = XTS_BLOCK_SIZE;
87 struct skcipher_walk w;
88 le128 t = rctx->t;
89 int err;
90
91 if (second_pass) {
92 req = &rctx->subreq;
93 /* set to our TFM to enforce correct alignment: */
94 skcipher_request_set_tfm(req, tfm);
95 }
96 err = skcipher_walk_virt(&w, req, false);
97
98 while (w.nbytes) {
99 unsigned int avail = w.nbytes;
100 le128 *wsrc;
101 le128 *wdst;
102
103 wsrc = w.src.virt.addr;
104 wdst = w.dst.virt.addr;
105
106 do {
107 if (unlikely(cts) &&
108 w.total - w.nbytes + avail < 2 * XTS_BLOCK_SIZE) {
109 if (!enc) {
110 if (second_pass)
111 rctx->t = t;
112 gf128mul_x_ble(&t, &t);
113 }
114 le128_xor(wdst, &t, wsrc);
115 if (enc && second_pass)
116 gf128mul_x_ble(&rctx->t, &t);
117 skcipher_walk_done(&w, avail - bs);
118 return 0;
119 }
120
121 le128_xor(wdst++, &t, wsrc++);
122 gf128mul_x_ble(&t, &t);
123 } while ((avail -= bs) >= bs);
124
125 err = skcipher_walk_done(&w, avail);
126 }
127
128 return err;
129 }
130
131 static int xor_tweak_pre(struct skcipher_request *req, bool enc)
132 {
133 return xor_tweak(req, false, enc);
134 }
135
136 static int xor_tweak_post(struct skcipher_request *req, bool enc)
137 {
138 return xor_tweak(req, true, enc);
139 }
140
141 static void cts_done(struct crypto_async_request *areq, int err)
142 {
143 struct skcipher_request *req = areq->data;
144 le128 b;
145
146 if (!err) {
147 struct rctx *rctx = skcipher_request_ctx(req);
148
149 scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
150 le128_xor(&b, &rctx->t, &b);
151 scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 1);
152 }
153
154 skcipher_request_complete(req, err);
155 }
156
157 static int cts_final(struct skcipher_request *req,
158 int (*crypt)(struct skcipher_request *req))
159 {
160 struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
161 int offset = req->cryptlen & ~(XTS_BLOCK_SIZE - 1);
162 struct rctx *rctx = skcipher_request_ctx(req);
163 struct skcipher_request *subreq = &rctx->subreq;
164 int tail = req->cryptlen % XTS_BLOCK_SIZE;
165 le128 b[2];
166 int err;
167
168 rctx->tail = scatterwalk_ffwd(rctx->sg, req->dst,
169 offset - XTS_BLOCK_SIZE);
170
171 scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
172 memcpy(b + 1, b, tail);
173 scatterwalk_map_and_copy(b, req->src, offset, tail, 0);
174
175 le128_xor(b, &rctx->t, b);
176
177 scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE + tail, 1);
178
179 skcipher_request_set_tfm(subreq, ctx->child);
180 skcipher_request_set_callback(subreq, req->base.flags, cts_done, req);
181 skcipher_request_set_crypt(subreq, rctx->tail, rctx->tail,
182 XTS_BLOCK_SIZE, NULL);
183
184 err = crypt(subreq);
185 if (err)
186 return err;
187
188 scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
189 le128_xor(b, &rctx->t, b);
190 scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 1);
191
192 return 0;
193 }
194
195 static void encrypt_done(struct crypto_async_request *areq, int err)
196 {
197 struct skcipher_request *req = areq->data;
198
199 if (!err) {
200 struct rctx *rctx = skcipher_request_ctx(req);
201
202 rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
203 err = xor_tweak_post(req, true);
204
205 if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
206 err = cts_final(req, crypto_skcipher_encrypt);
207 if (err == -EINPROGRESS)
208 return;
209 }
210 }
211
212 skcipher_request_complete(req, err);
213 }
214
215 static void decrypt_done(struct crypto_async_request *areq, int err)
216 {
217 struct skcipher_request *req = areq->data;
218
219 if (!err) {
220 struct rctx *rctx = skcipher_request_ctx(req);
221
222 rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
223 err = xor_tweak_post(req, false);
224
225 if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
226 err = cts_final(req, crypto_skcipher_decrypt);
227 if (err == -EINPROGRESS)
228 return;
229 }
230 }
231
232 skcipher_request_complete(req, err);
233 }
234
235 static int init_crypt(struct skcipher_request *req, crypto_completion_t compl)
236 {
237 struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
238 struct rctx *rctx = skcipher_request_ctx(req);
239 struct skcipher_request *subreq = &rctx->subreq;
240
241 if (req->cryptlen < XTS_BLOCK_SIZE)
242 return -EINVAL;
243
244 skcipher_request_set_tfm(subreq, ctx->child);
245 skcipher_request_set_callback(subreq, req->base.flags, compl, req);
246 skcipher_request_set_crypt(subreq, req->dst, req->dst,
247 req->cryptlen & ~(XTS_BLOCK_SIZE - 1), NULL);
248
249 /* calculate first value of T */
250 crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv);
251
252 return 0;
253 }
254
255 static int encrypt(struct skcipher_request *req)
256 {
257 struct rctx *rctx = skcipher_request_ctx(req);
258 struct skcipher_request *subreq = &rctx->subreq;
259 int err;
260
261 err = init_crypt(req, encrypt_done) ?:
262 xor_tweak_pre(req, true) ?:
263 crypto_skcipher_encrypt(subreq) ?:
264 xor_tweak_post(req, true);
265
266 if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0))
267 return err;
268
269 return cts_final(req, crypto_skcipher_encrypt);
270 }
271
272 static int decrypt(struct skcipher_request *req)
273 {
274 struct rctx *rctx = skcipher_request_ctx(req);
275 struct skcipher_request *subreq = &rctx->subreq;
276 int err;
277
278 err = init_crypt(req, decrypt_done) ?:
279 xor_tweak_pre(req, false) ?:
280 crypto_skcipher_decrypt(subreq) ?:
281 xor_tweak_post(req, false);
282
283 if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0))
284 return err;
285
286 return cts_final(req, crypto_skcipher_decrypt);
287 }
288
289 static int init_tfm(struct crypto_skcipher *tfm)
290 {
291 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
292 struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst);
293 struct priv *ctx = crypto_skcipher_ctx(tfm);
294 struct crypto_skcipher *child;
295 struct crypto_cipher *tweak;
296
297 child = crypto_spawn_skcipher(&ictx->spawn);
298 if (IS_ERR(child))
299 return PTR_ERR(child);
300
301 ctx->child = child;
302
303 tweak = crypto_alloc_cipher(ictx->name, 0, 0);
304 if (IS_ERR(tweak)) {
305 crypto_free_skcipher(ctx->child);
306 return PTR_ERR(tweak);
307 }
308
309 ctx->tweak = tweak;
310
311 crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) +
312 sizeof(struct rctx));
313
314 return 0;
315 }
316
317 static void exit_tfm(struct crypto_skcipher *tfm)
318 {
319 struct priv *ctx = crypto_skcipher_ctx(tfm);
320
321 crypto_free_skcipher(ctx->child);
322 crypto_free_cipher(ctx->tweak);
323 }
324
325 static void free(struct skcipher_instance *inst)
326 {
327 crypto_drop_skcipher(skcipher_instance_ctx(inst));
328 kfree(inst);
329 }
330
331 static int create(struct crypto_template *tmpl, struct rtattr **tb)
332 {
333 struct skcipher_instance *inst;
334 struct crypto_attr_type *algt;
335 struct xts_instance_ctx *ctx;
336 struct skcipher_alg *alg;
337 const char *cipher_name;
338 u32 mask;
339 int err;
340
341 algt = crypto_get_attr_type(tb);
342 if (IS_ERR(algt))
343 return PTR_ERR(algt);
344
345 if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
346 return -EINVAL;
347
348 cipher_name = crypto_attr_alg_name(tb[1]);
349 if (IS_ERR(cipher_name))
350 return PTR_ERR(cipher_name);
351
352 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
353 if (!inst)
354 return -ENOMEM;
355
356 ctx = skcipher_instance_ctx(inst);
357
358 mask = crypto_requires_off(algt->type, algt->mask,
359 CRYPTO_ALG_NEED_FALLBACK |
360 CRYPTO_ALG_ASYNC);
361
362 err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst),
363 cipher_name, 0, mask);
364 if (err == -ENOENT) {
365 err = -ENAMETOOLONG;
366 if (snprintf(ctx->name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
367 cipher_name) >= CRYPTO_MAX_ALG_NAME)
368 goto err_free_inst;
369
370 err = crypto_grab_skcipher(&ctx->spawn,
371 skcipher_crypto_instance(inst),
372 ctx->name, 0, mask);
373 }
374
375 if (err)
376 goto err_free_inst;
377
378 alg = crypto_skcipher_spawn_alg(&ctx->spawn);
379
380 err = -EINVAL;
381 if (alg->base.cra_blocksize != XTS_BLOCK_SIZE)
382 goto err_free_inst;
383
384 if (crypto_skcipher_alg_ivsize(alg))
385 goto err_free_inst;
386
387 err = crypto_inst_setname(skcipher_crypto_instance(inst), "xts",
388 &alg->base);
389 if (err)
390 goto err_free_inst;
391
392 err = -EINVAL;
393 cipher_name = alg->base.cra_name;
394
395 /* Alas we screwed up the naming so we have to mangle the
396 * cipher name.
397 */
398 if (!strncmp(cipher_name, "ecb(", 4)) {
399 unsigned len;
400
401 len = strlcpy(ctx->name, cipher_name + 4, sizeof(ctx->name));
402 if (len < 2 || len >= sizeof(ctx->name))
403 goto err_free_inst;
404
405 if (ctx->name[len - 1] != ')')
406 goto err_free_inst;
407
408 ctx->name[len - 1] = 0;
409
410 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
411 "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) {
412 err = -ENAMETOOLONG;
413 goto err_free_inst;
414 }
415 } else
416 goto err_free_inst;
417
418 inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
419 inst->alg.base.cra_priority = alg->base.cra_priority;
420 inst->alg.base.cra_blocksize = XTS_BLOCK_SIZE;
421 inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
422 (__alignof__(u64) - 1);
423
424 inst->alg.ivsize = XTS_BLOCK_SIZE;
425 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) * 2;
426 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) * 2;
427
428 inst->alg.base.cra_ctxsize = sizeof(struct priv);
429
430 inst->alg.init = init_tfm;
431 inst->alg.exit = exit_tfm;
432
433 inst->alg.setkey = setkey;
434 inst->alg.encrypt = encrypt;
435 inst->alg.decrypt = decrypt;
436
437 inst->free = free;
438
439 err = skcipher_register_instance(tmpl, inst);
440 if (err) {
441 err_free_inst:
442 free(inst);
443 }
444 return err;
445 }
446
447 static struct crypto_template crypto_tmpl = {
448 .name = "xts",
449 .create = create,
450 .module = THIS_MODULE,
451 };
452
453 static int __init crypto_module_init(void)
454 {
455 return crypto_register_template(&crypto_tmpl);
456 }
457
458 static void __exit crypto_module_exit(void)
459 {
460 crypto_unregister_template(&crypto_tmpl);
461 }
462
463 subsys_initcall(crypto_module_init);
464 module_exit(crypto_module_exit);
465
466 MODULE_LICENSE("GPL");
467 MODULE_DESCRIPTION("XTS block cipher mode");
468 MODULE_ALIAS_CRYPTO("xts");