]> git.ipfire.org Git - people/arne_f/kernel.git/blob - crypto/ahash.c
arm64: futex: Fix FUTEX_WAKE_OP atomic ops with non-zero result value
[people/arne_f/kernel.git] / crypto / ahash.c
1 /*
2 * Asynchronous Cryptographic Hash operations.
3 *
4 * This is the asynchronous version of hash.c with notification of
5 * completion via a callback.
6 *
7 * Copyright (c) 2008 Loc Ho <lho@amcc.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 */
15
16 #include <crypto/internal/hash.h>
17 #include <crypto/scatterwalk.h>
18 #include <linux/bug.h>
19 #include <linux/err.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/sched.h>
23 #include <linux/slab.h>
24 #include <linux/seq_file.h>
25 #include <linux/cryptouser.h>
26 #include <net/netlink.h>
27
28 #include "internal.h"
29
30 struct ahash_request_priv {
31 crypto_completion_t complete;
32 void *data;
33 u8 *result;
34 u32 flags;
35 void *ubuf[] CRYPTO_MINALIGN_ATTR;
36 };
37
38 static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
39 {
40 return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
41 halg);
42 }
43
44 static int hash_walk_next(struct crypto_hash_walk *walk)
45 {
46 unsigned int alignmask = walk->alignmask;
47 unsigned int offset = walk->offset;
48 unsigned int nbytes = min(walk->entrylen,
49 ((unsigned int)(PAGE_SIZE)) - offset);
50
51 if (walk->flags & CRYPTO_ALG_ASYNC)
52 walk->data = kmap(walk->pg);
53 else
54 walk->data = kmap_atomic(walk->pg);
55 walk->data += offset;
56
57 if (offset & alignmask) {
58 unsigned int unaligned = alignmask + 1 - (offset & alignmask);
59
60 if (nbytes > unaligned)
61 nbytes = unaligned;
62 }
63
64 walk->entrylen -= nbytes;
65 return nbytes;
66 }
67
68 static int hash_walk_new_entry(struct crypto_hash_walk *walk)
69 {
70 struct scatterlist *sg;
71
72 sg = walk->sg;
73 walk->offset = sg->offset;
74 walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
75 walk->offset = offset_in_page(walk->offset);
76 walk->entrylen = sg->length;
77
78 if (walk->entrylen > walk->total)
79 walk->entrylen = walk->total;
80 walk->total -= walk->entrylen;
81
82 return hash_walk_next(walk);
83 }
84
85 int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
86 {
87 unsigned int alignmask = walk->alignmask;
88
89 walk->data -= walk->offset;
90
91 if (walk->entrylen && (walk->offset & alignmask) && !err) {
92 unsigned int nbytes;
93
94 walk->offset = ALIGN(walk->offset, alignmask + 1);
95 nbytes = min(walk->entrylen,
96 (unsigned int)(PAGE_SIZE - walk->offset));
97 if (nbytes) {
98 walk->entrylen -= nbytes;
99 walk->data += walk->offset;
100 return nbytes;
101 }
102 }
103
104 if (walk->flags & CRYPTO_ALG_ASYNC)
105 kunmap(walk->pg);
106 else {
107 kunmap_atomic(walk->data);
108 /*
109 * The may sleep test only makes sense for sync users.
110 * Async users don't need to sleep here anyway.
111 */
112 crypto_yield(walk->flags);
113 }
114
115 if (err)
116 return err;
117
118 if (walk->entrylen) {
119 walk->offset = 0;
120 walk->pg++;
121 return hash_walk_next(walk);
122 }
123
124 if (!walk->total)
125 return 0;
126
127 walk->sg = sg_next(walk->sg);
128
129 return hash_walk_new_entry(walk);
130 }
131 EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
132
133 int crypto_hash_walk_first(struct ahash_request *req,
134 struct crypto_hash_walk *walk)
135 {
136 walk->total = req->nbytes;
137
138 if (!walk->total) {
139 walk->entrylen = 0;
140 return 0;
141 }
142
143 walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
144 walk->sg = req->src;
145 walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
146
147 return hash_walk_new_entry(walk);
148 }
149 EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
150
151 int crypto_ahash_walk_first(struct ahash_request *req,
152 struct crypto_hash_walk *walk)
153 {
154 walk->total = req->nbytes;
155
156 if (!walk->total) {
157 walk->entrylen = 0;
158 return 0;
159 }
160
161 walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
162 walk->sg = req->src;
163 walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
164 walk->flags |= CRYPTO_ALG_ASYNC;
165
166 BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC);
167
168 return hash_walk_new_entry(walk);
169 }
170 EXPORT_SYMBOL_GPL(crypto_ahash_walk_first);
171
172 static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
173 unsigned int keylen)
174 {
175 unsigned long alignmask = crypto_ahash_alignmask(tfm);
176 int ret;
177 u8 *buffer, *alignbuffer;
178 unsigned long absize;
179
180 absize = keylen + alignmask;
181 buffer = kmalloc(absize, GFP_KERNEL);
182 if (!buffer)
183 return -ENOMEM;
184
185 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
186 memcpy(alignbuffer, key, keylen);
187 ret = tfm->setkey(tfm, alignbuffer, keylen);
188 kzfree(buffer);
189 return ret;
190 }
191
192 static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
193 unsigned int keylen)
194 {
195 return -ENOSYS;
196 }
197
198 static void ahash_set_needkey(struct crypto_ahash *tfm)
199 {
200 const struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
201
202 if (tfm->setkey != ahash_nosetkey &&
203 !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
204 crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
205 }
206
207 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
208 unsigned int keylen)
209 {
210 unsigned long alignmask = crypto_ahash_alignmask(tfm);
211 int err;
212
213 if ((unsigned long)key & alignmask)
214 err = ahash_setkey_unaligned(tfm, key, keylen);
215 else
216 err = tfm->setkey(tfm, key, keylen);
217
218 if (unlikely(err)) {
219 ahash_set_needkey(tfm);
220 return err;
221 }
222
223 crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
224 return 0;
225 }
226 EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
227
228 static inline unsigned int ahash_align_buffer_size(unsigned len,
229 unsigned long mask)
230 {
231 return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
232 }
233
234 static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
235 {
236 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
237 unsigned long alignmask = crypto_ahash_alignmask(tfm);
238 unsigned int ds = crypto_ahash_digestsize(tfm);
239 struct ahash_request_priv *priv;
240
241 priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
242 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
243 GFP_KERNEL : GFP_ATOMIC);
244 if (!priv)
245 return -ENOMEM;
246
247 /*
248 * WARNING: Voodoo programming below!
249 *
250 * The code below is obscure and hard to understand, thus explanation
251 * is necessary. See include/crypto/hash.h and include/linux/crypto.h
252 * to understand the layout of structures used here!
253 *
254 * The code here will replace portions of the ORIGINAL request with
255 * pointers to new code and buffers so the hashing operation can store
256 * the result in aligned buffer. We will call the modified request
257 * an ADJUSTED request.
258 *
259 * The newly mangled request will look as such:
260 *
261 * req {
262 * .result = ADJUSTED[new aligned buffer]
263 * .base.complete = ADJUSTED[pointer to completion function]
264 * .base.data = ADJUSTED[*req (pointer to self)]
265 * .priv = ADJUSTED[new priv] {
266 * .result = ORIGINAL(result)
267 * .complete = ORIGINAL(base.complete)
268 * .data = ORIGINAL(base.data)
269 * }
270 */
271
272 priv->result = req->result;
273 priv->complete = req->base.complete;
274 priv->data = req->base.data;
275 priv->flags = req->base.flags;
276
277 /*
278 * WARNING: We do not backup req->priv here! The req->priv
279 * is for internal use of the Crypto API and the
280 * user must _NOT_ _EVER_ depend on it's content!
281 */
282
283 req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
284 req->base.complete = cplt;
285 req->base.data = req;
286 req->priv = priv;
287
288 return 0;
289 }
290
291 static void ahash_restore_req(struct ahash_request *req, int err)
292 {
293 struct ahash_request_priv *priv = req->priv;
294
295 if (!err)
296 memcpy(priv->result, req->result,
297 crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
298
299 /* Restore the original crypto request. */
300 req->result = priv->result;
301
302 ahash_request_set_callback(req, priv->flags,
303 priv->complete, priv->data);
304 req->priv = NULL;
305
306 /* Free the req->priv.priv from the ADJUSTED request. */
307 kzfree(priv);
308 }
309
310 static void ahash_notify_einprogress(struct ahash_request *req)
311 {
312 struct ahash_request_priv *priv = req->priv;
313 struct crypto_async_request oreq;
314
315 oreq.data = priv->data;
316
317 priv->complete(&oreq, -EINPROGRESS);
318 }
319
320 static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
321 {
322 struct ahash_request *areq = req->data;
323
324 if (err == -EINPROGRESS) {
325 ahash_notify_einprogress(areq);
326 return;
327 }
328
329 /*
330 * Restore the original request, see ahash_op_unaligned() for what
331 * goes where.
332 *
333 * The "struct ahash_request *req" here is in fact the "req.base"
334 * from the ADJUSTED request from ahash_op_unaligned(), thus as it
335 * is a pointer to self, it is also the ADJUSTED "req" .
336 */
337
338 /* First copy req->result into req->priv.result */
339 ahash_restore_req(areq, err);
340
341 /* Complete the ORIGINAL request. */
342 areq->base.complete(&areq->base, err);
343 }
344
345 static int ahash_op_unaligned(struct ahash_request *req,
346 int (*op)(struct ahash_request *))
347 {
348 int err;
349
350 err = ahash_save_req(req, ahash_op_unaligned_done);
351 if (err)
352 return err;
353
354 err = op(req);
355 if (err == -EINPROGRESS ||
356 (err == -EBUSY && (ahash_request_flags(req) &
357 CRYPTO_TFM_REQ_MAY_BACKLOG)))
358 return err;
359
360 ahash_restore_req(req, err);
361
362 return err;
363 }
364
365 static int crypto_ahash_op(struct ahash_request *req,
366 int (*op)(struct ahash_request *))
367 {
368 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
369 unsigned long alignmask = crypto_ahash_alignmask(tfm);
370
371 if ((unsigned long)req->result & alignmask)
372 return ahash_op_unaligned(req, op);
373
374 return op(req);
375 }
376
377 int crypto_ahash_final(struct ahash_request *req)
378 {
379 return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
380 }
381 EXPORT_SYMBOL_GPL(crypto_ahash_final);
382
383 int crypto_ahash_finup(struct ahash_request *req)
384 {
385 return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
386 }
387 EXPORT_SYMBOL_GPL(crypto_ahash_finup);
388
389 int crypto_ahash_digest(struct ahash_request *req)
390 {
391 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
392
393 if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
394 return -ENOKEY;
395
396 return crypto_ahash_op(req, tfm->digest);
397 }
398 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
399
400 static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
401 {
402 struct ahash_request *areq = req->data;
403
404 if (err == -EINPROGRESS)
405 return;
406
407 ahash_restore_req(areq, err);
408
409 areq->base.complete(&areq->base, err);
410 }
411
412 static int ahash_def_finup_finish1(struct ahash_request *req, int err)
413 {
414 if (err)
415 goto out;
416
417 req->base.complete = ahash_def_finup_done2;
418
419 err = crypto_ahash_reqtfm(req)->final(req);
420 if (err == -EINPROGRESS ||
421 (err == -EBUSY && (ahash_request_flags(req) &
422 CRYPTO_TFM_REQ_MAY_BACKLOG)))
423 return err;
424
425 out:
426 ahash_restore_req(req, err);
427 return err;
428 }
429
430 static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
431 {
432 struct ahash_request *areq = req->data;
433
434 if (err == -EINPROGRESS) {
435 ahash_notify_einprogress(areq);
436 return;
437 }
438
439 areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
440
441 err = ahash_def_finup_finish1(areq, err);
442 if (areq->priv)
443 return;
444
445 areq->base.complete(&areq->base, err);
446 }
447
448 static int ahash_def_finup(struct ahash_request *req)
449 {
450 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
451 int err;
452
453 err = ahash_save_req(req, ahash_def_finup_done1);
454 if (err)
455 return err;
456
457 err = tfm->update(req);
458 if (err == -EINPROGRESS ||
459 (err == -EBUSY && (ahash_request_flags(req) &
460 CRYPTO_TFM_REQ_MAY_BACKLOG)))
461 return err;
462
463 return ahash_def_finup_finish1(req, err);
464 }
465
466 static int ahash_no_export(struct ahash_request *req, void *out)
467 {
468 return -ENOSYS;
469 }
470
471 static int ahash_no_import(struct ahash_request *req, const void *in)
472 {
473 return -ENOSYS;
474 }
475
476 static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
477 {
478 struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
479 struct ahash_alg *alg = crypto_ahash_alg(hash);
480
481 hash->setkey = ahash_nosetkey;
482 hash->export = ahash_no_export;
483 hash->import = ahash_no_import;
484
485 if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
486 return crypto_init_shash_ops_async(tfm);
487
488 hash->init = alg->init;
489 hash->update = alg->update;
490 hash->final = alg->final;
491 hash->finup = alg->finup ?: ahash_def_finup;
492 hash->digest = alg->digest;
493
494 if (alg->setkey) {
495 hash->setkey = alg->setkey;
496 ahash_set_needkey(hash);
497 }
498 if (alg->export)
499 hash->export = alg->export;
500 if (alg->import)
501 hash->import = alg->import;
502
503 return 0;
504 }
505
506 static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
507 {
508 if (alg->cra_type != &crypto_ahash_type)
509 return sizeof(struct crypto_shash *);
510
511 return crypto_alg_extsize(alg);
512 }
513
514 #ifdef CONFIG_NET
515 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
516 {
517 struct crypto_report_hash rhash;
518
519 strncpy(rhash.type, "ahash", sizeof(rhash.type));
520
521 rhash.blocksize = alg->cra_blocksize;
522 rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
523
524 if (nla_put(skb, CRYPTOCFGA_REPORT_HASH,
525 sizeof(struct crypto_report_hash), &rhash))
526 goto nla_put_failure;
527 return 0;
528
529 nla_put_failure:
530 return -EMSGSIZE;
531 }
532 #else
533 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
534 {
535 return -ENOSYS;
536 }
537 #endif
538
539 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
540 __attribute__ ((unused));
541 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
542 {
543 seq_printf(m, "type : ahash\n");
544 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
545 "yes" : "no");
546 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
547 seq_printf(m, "digestsize : %u\n",
548 __crypto_hash_alg_common(alg)->digestsize);
549 }
550
551 const struct crypto_type crypto_ahash_type = {
552 .extsize = crypto_ahash_extsize,
553 .init_tfm = crypto_ahash_init_tfm,
554 #ifdef CONFIG_PROC_FS
555 .show = crypto_ahash_show,
556 #endif
557 .report = crypto_ahash_report,
558 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
559 .maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
560 .type = CRYPTO_ALG_TYPE_AHASH,
561 .tfmsize = offsetof(struct crypto_ahash, base),
562 };
563 EXPORT_SYMBOL_GPL(crypto_ahash_type);
564
565 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
566 u32 mask)
567 {
568 return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
569 }
570 EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
571
572 int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
573 {
574 return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
575 }
576 EXPORT_SYMBOL_GPL(crypto_has_ahash);
577
578 static int ahash_prepare_alg(struct ahash_alg *alg)
579 {
580 struct crypto_alg *base = &alg->halg.base;
581
582 if (alg->halg.digestsize > PAGE_SIZE / 8 ||
583 alg->halg.statesize > PAGE_SIZE / 8 ||
584 alg->halg.statesize == 0)
585 return -EINVAL;
586
587 base->cra_type = &crypto_ahash_type;
588 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
589 base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
590
591 return 0;
592 }
593
594 int crypto_register_ahash(struct ahash_alg *alg)
595 {
596 struct crypto_alg *base = &alg->halg.base;
597 int err;
598
599 err = ahash_prepare_alg(alg);
600 if (err)
601 return err;
602
603 return crypto_register_alg(base);
604 }
605 EXPORT_SYMBOL_GPL(crypto_register_ahash);
606
607 int crypto_unregister_ahash(struct ahash_alg *alg)
608 {
609 return crypto_unregister_alg(&alg->halg.base);
610 }
611 EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
612
613 int ahash_register_instance(struct crypto_template *tmpl,
614 struct ahash_instance *inst)
615 {
616 int err;
617
618 err = ahash_prepare_alg(&inst->alg);
619 if (err)
620 return err;
621
622 return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
623 }
624 EXPORT_SYMBOL_GPL(ahash_register_instance);
625
626 void ahash_free_instance(struct crypto_instance *inst)
627 {
628 crypto_drop_spawn(crypto_instance_ctx(inst));
629 kfree(ahash_instance(inst));
630 }
631 EXPORT_SYMBOL_GPL(ahash_free_instance);
632
633 int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
634 struct hash_alg_common *alg,
635 struct crypto_instance *inst)
636 {
637 return crypto_init_spawn2(&spawn->base, &alg->base, inst,
638 &crypto_ahash_type);
639 }
640 EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
641
642 struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
643 {
644 struct crypto_alg *alg;
645
646 alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
647 return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
648 }
649 EXPORT_SYMBOL_GPL(ahash_attr_alg);
650
651 bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
652 {
653 struct crypto_alg *alg = &halg->base;
654
655 if (alg->cra_type != &crypto_ahash_type)
656 return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));
657
658 return __crypto_ahash_alg(alg)->setkey != NULL;
659 }
660 EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);
661
662 MODULE_LICENSE("GPL");
663 MODULE_DESCRIPTION("Asynchronous cryptographic hash type");