1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Symmetric key cipher operations.
5 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
6 * multiple page boundaries by using temporary blocks. In user context,
7 * the kernel is given a chance to schedule us once per page.
9 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
12 #include <crypto/internal/aead.h>
13 #include <crypto/internal/cipher.h>
14 #include <crypto/internal/skcipher.h>
15 #include <crypto/scatterwalk.h>
16 #include <linux/bug.h>
17 #include <linux/cryptouser.h>
18 #include <linux/err.h>
19 #include <linux/kernel.h>
20 #include <linux/list.h>
22 #include <linux/module.h>
23 #include <linux/seq_file.h>
24 #include <linux/slab.h>
25 #include <linux/string.h>
26 #include <net/netlink.h>
31 SKCIPHER_WALK_PHYS
= 1 << 0,
32 SKCIPHER_WALK_SLOW
= 1 << 1,
33 SKCIPHER_WALK_COPY
= 1 << 2,
34 SKCIPHER_WALK_DIFF
= 1 << 3,
35 SKCIPHER_WALK_SLEEP
= 1 << 4,
38 struct skcipher_walk_buffer
{
39 struct list_head entry
;
40 struct scatter_walk dst
;
46 static int skcipher_walk_next(struct skcipher_walk
*walk
);
48 static inline void skcipher_map_src(struct skcipher_walk
*walk
)
50 walk
->src
.virt
.addr
= scatterwalk_map(&walk
->in
);
53 static inline void skcipher_map_dst(struct skcipher_walk
*walk
)
55 walk
->dst
.virt
.addr
= scatterwalk_map(&walk
->out
);
58 static inline void skcipher_unmap_src(struct skcipher_walk
*walk
)
60 scatterwalk_unmap(walk
->src
.virt
.addr
);
63 static inline void skcipher_unmap_dst(struct skcipher_walk
*walk
)
65 scatterwalk_unmap(walk
->dst
.virt
.addr
);
68 static inline gfp_t
skcipher_walk_gfp(struct skcipher_walk
*walk
)
70 return walk
->flags
& SKCIPHER_WALK_SLEEP
? GFP_KERNEL
: GFP_ATOMIC
;
73 /* Get a spot of the specified length that does not straddle a page.
74 * The caller needs to ensure that there is enough space for this operation.
76 static inline u8
*skcipher_get_spot(u8
*start
, unsigned int len
)
78 u8
*end_page
= (u8
*)(((unsigned long)(start
+ len
- 1)) & PAGE_MASK
);
80 return max(start
, end_page
);
83 static inline struct skcipher_alg
*__crypto_skcipher_alg(
84 struct crypto_alg
*alg
)
86 return container_of(alg
, struct skcipher_alg
, base
);
89 static inline struct crypto_istat_cipher
*skcipher_get_stat(
90 struct skcipher_alg
*alg
)
92 #ifdef CONFIG_CRYPTO_STATS
99 static inline int crypto_skcipher_errstat(struct skcipher_alg
*alg
, int err
)
101 struct crypto_istat_cipher
*istat
= skcipher_get_stat(alg
);
103 if (!IS_ENABLED(CONFIG_CRYPTO_STATS
))
106 if (err
&& err
!= -EINPROGRESS
&& err
!= -EBUSY
)
107 atomic64_inc(&istat
->err_cnt
);
112 static int skcipher_done_slow(struct skcipher_walk
*walk
, unsigned int bsize
)
116 addr
= (u8
*)ALIGN((unsigned long)walk
->buffer
, walk
->alignmask
+ 1);
117 addr
= skcipher_get_spot(addr
, bsize
);
118 scatterwalk_copychunks(addr
, &walk
->out
, bsize
,
119 (walk
->flags
& SKCIPHER_WALK_PHYS
) ? 2 : 1);
123 int skcipher_walk_done(struct skcipher_walk
*walk
, int err
)
125 unsigned int n
= walk
->nbytes
;
126 unsigned int nbytes
= 0;
131 if (likely(err
>= 0)) {
133 nbytes
= walk
->total
- n
;
136 if (likely(!(walk
->flags
& (SKCIPHER_WALK_PHYS
|
139 SKCIPHER_WALK_DIFF
)))) {
141 skcipher_unmap_src(walk
);
142 } else if (walk
->flags
& SKCIPHER_WALK_DIFF
) {
143 skcipher_unmap_dst(walk
);
145 } else if (walk
->flags
& SKCIPHER_WALK_COPY
) {
146 skcipher_map_dst(walk
);
147 memcpy(walk
->dst
.virt
.addr
, walk
->page
, n
);
148 skcipher_unmap_dst(walk
);
149 } else if (unlikely(walk
->flags
& SKCIPHER_WALK_SLOW
)) {
152 * Didn't process all bytes. Either the algorithm is
153 * broken, or this was the last step and it turned out
154 * the message wasn't evenly divisible into blocks but
155 * the algorithm requires it.
160 n
= skcipher_done_slow(walk
, n
);
166 walk
->total
= nbytes
;
169 scatterwalk_advance(&walk
->in
, n
);
170 scatterwalk_advance(&walk
->out
, n
);
171 scatterwalk_done(&walk
->in
, 0, nbytes
);
172 scatterwalk_done(&walk
->out
, 1, nbytes
);
175 crypto_yield(walk
->flags
& SKCIPHER_WALK_SLEEP
?
176 CRYPTO_TFM_REQ_MAY_SLEEP
: 0);
177 return skcipher_walk_next(walk
);
181 /* Short-circuit for the common/fast path. */
182 if (!((unsigned long)walk
->buffer
| (unsigned long)walk
->page
))
185 if (walk
->flags
& SKCIPHER_WALK_PHYS
)
188 if (walk
->iv
!= walk
->oiv
)
189 memcpy(walk
->oiv
, walk
->iv
, walk
->ivsize
);
190 if (walk
->buffer
!= walk
->page
)
193 free_page((unsigned long)walk
->page
);
198 EXPORT_SYMBOL_GPL(skcipher_walk_done
);
200 void skcipher_walk_complete(struct skcipher_walk
*walk
, int err
)
202 struct skcipher_walk_buffer
*p
, *tmp
;
204 list_for_each_entry_safe(p
, tmp
, &walk
->buffers
, entry
) {
212 data
= PTR_ALIGN(&p
->buffer
[0], walk
->alignmask
+ 1);
213 data
= skcipher_get_spot(data
, walk
->stride
);
216 scatterwalk_copychunks(data
, &p
->dst
, p
->len
, 1);
218 if (offset_in_page(p
->data
) + p
->len
+ walk
->stride
>
220 free_page((unsigned long)p
->data
);
227 if (!err
&& walk
->iv
!= walk
->oiv
)
228 memcpy(walk
->oiv
, walk
->iv
, walk
->ivsize
);
229 if (walk
->buffer
!= walk
->page
)
232 free_page((unsigned long)walk
->page
);
234 EXPORT_SYMBOL_GPL(skcipher_walk_complete
);
236 static void skcipher_queue_write(struct skcipher_walk
*walk
,
237 struct skcipher_walk_buffer
*p
)
240 list_add_tail(&p
->entry
, &walk
->buffers
);
243 static int skcipher_next_slow(struct skcipher_walk
*walk
, unsigned int bsize
)
245 bool phys
= walk
->flags
& SKCIPHER_WALK_PHYS
;
246 unsigned alignmask
= walk
->alignmask
;
247 struct skcipher_walk_buffer
*p
;
255 walk
->buffer
= walk
->page
;
256 buffer
= walk
->buffer
;
261 /* Start with the minimum alignment of kmalloc. */
262 a
= crypto_tfm_ctx_alignment() - 1;
266 /* Calculate the minimum alignment of p->buffer. */
267 a
&= (sizeof(*p
) ^ (sizeof(*p
) - 1)) >> 1;
271 /* Minimum size to align p->buffer by alignmask. */
274 /* Minimum size to ensure p->buffer does not straddle a page. */
275 n
+= (bsize
- 1) & ~(alignmask
| a
);
277 v
= kzalloc(n
, skcipher_walk_gfp(walk
));
279 return skcipher_walk_done(walk
, -ENOMEM
);
284 skcipher_queue_write(walk
, p
);
292 walk
->dst
.virt
.addr
= PTR_ALIGN(buffer
, alignmask
+ 1);
293 walk
->dst
.virt
.addr
= skcipher_get_spot(walk
->dst
.virt
.addr
, bsize
);
294 walk
->src
.virt
.addr
= walk
->dst
.virt
.addr
;
296 scatterwalk_copychunks(walk
->src
.virt
.addr
, &walk
->in
, bsize
, 0);
298 walk
->nbytes
= bsize
;
299 walk
->flags
|= SKCIPHER_WALK_SLOW
;
304 static int skcipher_next_copy(struct skcipher_walk
*walk
)
306 struct skcipher_walk_buffer
*p
;
307 u8
*tmp
= walk
->page
;
309 skcipher_map_src(walk
);
310 memcpy(tmp
, walk
->src
.virt
.addr
, walk
->nbytes
);
311 skcipher_unmap_src(walk
);
313 walk
->src
.virt
.addr
= tmp
;
314 walk
->dst
.virt
.addr
= tmp
;
316 if (!(walk
->flags
& SKCIPHER_WALK_PHYS
))
319 p
= kmalloc(sizeof(*p
), skcipher_walk_gfp(walk
));
323 p
->data
= walk
->page
;
324 p
->len
= walk
->nbytes
;
325 skcipher_queue_write(walk
, p
);
327 if (offset_in_page(walk
->page
) + walk
->nbytes
+ walk
->stride
>
331 walk
->page
+= walk
->nbytes
;
336 static int skcipher_next_fast(struct skcipher_walk
*walk
)
340 walk
->src
.phys
.page
= scatterwalk_page(&walk
->in
);
341 walk
->src
.phys
.offset
= offset_in_page(walk
->in
.offset
);
342 walk
->dst
.phys
.page
= scatterwalk_page(&walk
->out
);
343 walk
->dst
.phys
.offset
= offset_in_page(walk
->out
.offset
);
345 if (walk
->flags
& SKCIPHER_WALK_PHYS
)
348 diff
= walk
->src
.phys
.offset
- walk
->dst
.phys
.offset
;
349 diff
|= walk
->src
.virt
.page
- walk
->dst
.virt
.page
;
351 skcipher_map_src(walk
);
352 walk
->dst
.virt
.addr
= walk
->src
.virt
.addr
;
355 walk
->flags
|= SKCIPHER_WALK_DIFF
;
356 skcipher_map_dst(walk
);
362 static int skcipher_walk_next(struct skcipher_walk
*walk
)
368 walk
->flags
&= ~(SKCIPHER_WALK_SLOW
| SKCIPHER_WALK_COPY
|
372 bsize
= min(walk
->stride
, max(n
, walk
->blocksize
));
373 n
= scatterwalk_clamp(&walk
->in
, n
);
374 n
= scatterwalk_clamp(&walk
->out
, n
);
376 if (unlikely(n
< bsize
)) {
377 if (unlikely(walk
->total
< walk
->blocksize
))
378 return skcipher_walk_done(walk
, -EINVAL
);
381 err
= skcipher_next_slow(walk
, bsize
);
382 goto set_phys_lowmem
;
385 if (unlikely((walk
->in
.offset
| walk
->out
.offset
) & walk
->alignmask
)) {
387 gfp_t gfp
= skcipher_walk_gfp(walk
);
389 walk
->page
= (void *)__get_free_page(gfp
);
394 walk
->nbytes
= min_t(unsigned, n
,
395 PAGE_SIZE
- offset_in_page(walk
->page
));
396 walk
->flags
|= SKCIPHER_WALK_COPY
;
397 err
= skcipher_next_copy(walk
);
398 goto set_phys_lowmem
;
403 return skcipher_next_fast(walk
);
406 if (!err
&& (walk
->flags
& SKCIPHER_WALK_PHYS
)) {
407 walk
->src
.phys
.page
= virt_to_page(walk
->src
.virt
.addr
);
408 walk
->dst
.phys
.page
= virt_to_page(walk
->dst
.virt
.addr
);
409 walk
->src
.phys
.offset
&= PAGE_SIZE
- 1;
410 walk
->dst
.phys
.offset
&= PAGE_SIZE
- 1;
415 static int skcipher_copy_iv(struct skcipher_walk
*walk
)
417 unsigned a
= crypto_tfm_ctx_alignment() - 1;
418 unsigned alignmask
= walk
->alignmask
;
419 unsigned ivsize
= walk
->ivsize
;
420 unsigned bs
= walk
->stride
;
425 aligned_bs
= ALIGN(bs
, alignmask
+ 1);
427 /* Minimum size to align buffer by alignmask. */
428 size
= alignmask
& ~a
;
430 if (walk
->flags
& SKCIPHER_WALK_PHYS
)
433 size
+= aligned_bs
+ ivsize
;
435 /* Minimum size to ensure buffer does not straddle a page. */
436 size
+= (bs
- 1) & ~(alignmask
| a
);
439 walk
->buffer
= kmalloc(size
, skcipher_walk_gfp(walk
));
443 iv
= PTR_ALIGN(walk
->buffer
, alignmask
+ 1);
444 iv
= skcipher_get_spot(iv
, bs
) + aligned_bs
;
446 walk
->iv
= memcpy(iv
, walk
->iv
, walk
->ivsize
);
450 static int skcipher_walk_first(struct skcipher_walk
*walk
)
452 if (WARN_ON_ONCE(in_hardirq()))
456 if (unlikely(((unsigned long)walk
->iv
& walk
->alignmask
))) {
457 int err
= skcipher_copy_iv(walk
);
464 return skcipher_walk_next(walk
);
467 static int skcipher_walk_skcipher(struct skcipher_walk
*walk
,
468 struct skcipher_request
*req
)
470 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
472 walk
->total
= req
->cryptlen
;
477 if (unlikely(!walk
->total
))
480 scatterwalk_start(&walk
->in
, req
->src
);
481 scatterwalk_start(&walk
->out
, req
->dst
);
483 walk
->flags
&= ~SKCIPHER_WALK_SLEEP
;
484 walk
->flags
|= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
?
485 SKCIPHER_WALK_SLEEP
: 0;
487 walk
->blocksize
= crypto_skcipher_blocksize(tfm
);
488 walk
->stride
= crypto_skcipher_walksize(tfm
);
489 walk
->ivsize
= crypto_skcipher_ivsize(tfm
);
490 walk
->alignmask
= crypto_skcipher_alignmask(tfm
);
492 return skcipher_walk_first(walk
);
495 int skcipher_walk_virt(struct skcipher_walk
*walk
,
496 struct skcipher_request
*req
, bool atomic
)
500 might_sleep_if(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
502 walk
->flags
&= ~SKCIPHER_WALK_PHYS
;
504 err
= skcipher_walk_skcipher(walk
, req
);
506 walk
->flags
&= atomic
? ~SKCIPHER_WALK_SLEEP
: ~0;
510 EXPORT_SYMBOL_GPL(skcipher_walk_virt
);
512 int skcipher_walk_async(struct skcipher_walk
*walk
,
513 struct skcipher_request
*req
)
515 walk
->flags
|= SKCIPHER_WALK_PHYS
;
517 INIT_LIST_HEAD(&walk
->buffers
);
519 return skcipher_walk_skcipher(walk
, req
);
521 EXPORT_SYMBOL_GPL(skcipher_walk_async
);
523 static int skcipher_walk_aead_common(struct skcipher_walk
*walk
,
524 struct aead_request
*req
, bool atomic
)
526 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
533 if (unlikely(!walk
->total
))
536 walk
->flags
&= ~SKCIPHER_WALK_PHYS
;
538 scatterwalk_start(&walk
->in
, req
->src
);
539 scatterwalk_start(&walk
->out
, req
->dst
);
541 scatterwalk_copychunks(NULL
, &walk
->in
, req
->assoclen
, 2);
542 scatterwalk_copychunks(NULL
, &walk
->out
, req
->assoclen
, 2);
544 scatterwalk_done(&walk
->in
, 0, walk
->total
);
545 scatterwalk_done(&walk
->out
, 0, walk
->total
);
547 if (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
)
548 walk
->flags
|= SKCIPHER_WALK_SLEEP
;
550 walk
->flags
&= ~SKCIPHER_WALK_SLEEP
;
552 walk
->blocksize
= crypto_aead_blocksize(tfm
);
553 walk
->stride
= crypto_aead_chunksize(tfm
);
554 walk
->ivsize
= crypto_aead_ivsize(tfm
);
555 walk
->alignmask
= crypto_aead_alignmask(tfm
);
557 err
= skcipher_walk_first(walk
);
560 walk
->flags
&= ~SKCIPHER_WALK_SLEEP
;
565 int skcipher_walk_aead_encrypt(struct skcipher_walk
*walk
,
566 struct aead_request
*req
, bool atomic
)
568 walk
->total
= req
->cryptlen
;
570 return skcipher_walk_aead_common(walk
, req
, atomic
);
572 EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt
);
574 int skcipher_walk_aead_decrypt(struct skcipher_walk
*walk
,
575 struct aead_request
*req
, bool atomic
)
577 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
579 walk
->total
= req
->cryptlen
- crypto_aead_authsize(tfm
);
581 return skcipher_walk_aead_common(walk
, req
, atomic
);
583 EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt
);
585 static void skcipher_set_needkey(struct crypto_skcipher
*tfm
)
587 if (crypto_skcipher_max_keysize(tfm
) != 0)
588 crypto_skcipher_set_flags(tfm
, CRYPTO_TFM_NEED_KEY
);
591 static int skcipher_setkey_unaligned(struct crypto_skcipher
*tfm
,
592 const u8
*key
, unsigned int keylen
)
594 unsigned long alignmask
= crypto_skcipher_alignmask(tfm
);
595 struct skcipher_alg
*cipher
= crypto_skcipher_alg(tfm
);
596 u8
*buffer
, *alignbuffer
;
597 unsigned long absize
;
600 absize
= keylen
+ alignmask
;
601 buffer
= kmalloc(absize
, GFP_ATOMIC
);
605 alignbuffer
= (u8
*)ALIGN((unsigned long)buffer
, alignmask
+ 1);
606 memcpy(alignbuffer
, key
, keylen
);
607 ret
= cipher
->setkey(tfm
, alignbuffer
, keylen
);
608 kfree_sensitive(buffer
);
612 int crypto_skcipher_setkey(struct crypto_skcipher
*tfm
, const u8
*key
,
615 struct skcipher_alg
*cipher
= crypto_skcipher_alg(tfm
);
616 unsigned long alignmask
= crypto_skcipher_alignmask(tfm
);
619 if (keylen
< cipher
->min_keysize
|| keylen
> cipher
->max_keysize
)
622 if ((unsigned long)key
& alignmask
)
623 err
= skcipher_setkey_unaligned(tfm
, key
, keylen
);
625 err
= cipher
->setkey(tfm
, key
, keylen
);
628 skcipher_set_needkey(tfm
);
632 crypto_skcipher_clear_flags(tfm
, CRYPTO_TFM_NEED_KEY
);
635 EXPORT_SYMBOL_GPL(crypto_skcipher_setkey
);
637 int crypto_skcipher_encrypt(struct skcipher_request
*req
)
639 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
640 struct skcipher_alg
*alg
= crypto_skcipher_alg(tfm
);
643 if (IS_ENABLED(CONFIG_CRYPTO_STATS
)) {
644 struct crypto_istat_cipher
*istat
= skcipher_get_stat(alg
);
646 atomic64_inc(&istat
->encrypt_cnt
);
647 atomic64_add(req
->cryptlen
, &istat
->encrypt_tlen
);
650 if (crypto_skcipher_get_flags(tfm
) & CRYPTO_TFM_NEED_KEY
)
653 ret
= alg
->encrypt(req
);
655 return crypto_skcipher_errstat(alg
, ret
);
657 EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt
);
659 int crypto_skcipher_decrypt(struct skcipher_request
*req
)
661 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
662 struct skcipher_alg
*alg
= crypto_skcipher_alg(tfm
);
665 if (IS_ENABLED(CONFIG_CRYPTO_STATS
)) {
666 struct crypto_istat_cipher
*istat
= skcipher_get_stat(alg
);
668 atomic64_inc(&istat
->decrypt_cnt
);
669 atomic64_add(req
->cryptlen
, &istat
->decrypt_tlen
);
672 if (crypto_skcipher_get_flags(tfm
) & CRYPTO_TFM_NEED_KEY
)
675 ret
= alg
->decrypt(req
);
677 return crypto_skcipher_errstat(alg
, ret
);
679 EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt
);
681 static void crypto_skcipher_exit_tfm(struct crypto_tfm
*tfm
)
683 struct crypto_skcipher
*skcipher
= __crypto_skcipher_cast(tfm
);
684 struct skcipher_alg
*alg
= crypto_skcipher_alg(skcipher
);
689 static int crypto_skcipher_init_tfm(struct crypto_tfm
*tfm
)
691 struct crypto_skcipher
*skcipher
= __crypto_skcipher_cast(tfm
);
692 struct skcipher_alg
*alg
= crypto_skcipher_alg(skcipher
);
694 skcipher_set_needkey(skcipher
);
697 skcipher
->base
.exit
= crypto_skcipher_exit_tfm
;
700 return alg
->init(skcipher
);
705 static void crypto_skcipher_free_instance(struct crypto_instance
*inst
)
707 struct skcipher_instance
*skcipher
=
708 container_of(inst
, struct skcipher_instance
, s
.base
);
710 skcipher
->free(skcipher
);
713 static void crypto_skcipher_show(struct seq_file
*m
, struct crypto_alg
*alg
)
715 static void crypto_skcipher_show(struct seq_file
*m
, struct crypto_alg
*alg
)
717 struct skcipher_alg
*skcipher
= __crypto_skcipher_alg(alg
);
719 seq_printf(m
, "type : skcipher\n");
720 seq_printf(m
, "async : %s\n",
721 alg
->cra_flags
& CRYPTO_ALG_ASYNC
? "yes" : "no");
722 seq_printf(m
, "blocksize : %u\n", alg
->cra_blocksize
);
723 seq_printf(m
, "min keysize : %u\n", skcipher
->min_keysize
);
724 seq_printf(m
, "max keysize : %u\n", skcipher
->max_keysize
);
725 seq_printf(m
, "ivsize : %u\n", skcipher
->ivsize
);
726 seq_printf(m
, "chunksize : %u\n", skcipher
->chunksize
);
727 seq_printf(m
, "walksize : %u\n", skcipher
->walksize
);
730 static int __maybe_unused
crypto_skcipher_report(
731 struct sk_buff
*skb
, struct crypto_alg
*alg
)
733 struct skcipher_alg
*skcipher
= __crypto_skcipher_alg(alg
);
734 struct crypto_report_blkcipher rblkcipher
;
736 memset(&rblkcipher
, 0, sizeof(rblkcipher
));
738 strscpy(rblkcipher
.type
, "skcipher", sizeof(rblkcipher
.type
));
739 strscpy(rblkcipher
.geniv
, "<none>", sizeof(rblkcipher
.geniv
));
741 rblkcipher
.blocksize
= alg
->cra_blocksize
;
742 rblkcipher
.min_keysize
= skcipher
->min_keysize
;
743 rblkcipher
.max_keysize
= skcipher
->max_keysize
;
744 rblkcipher
.ivsize
= skcipher
->ivsize
;
746 return nla_put(skb
, CRYPTOCFGA_REPORT_BLKCIPHER
,
747 sizeof(rblkcipher
), &rblkcipher
);
750 static int __maybe_unused
crypto_skcipher_report_stat(
751 struct sk_buff
*skb
, struct crypto_alg
*alg
)
753 struct skcipher_alg
*skcipher
= __crypto_skcipher_alg(alg
);
754 struct crypto_istat_cipher
*istat
;
755 struct crypto_stat_cipher rcipher
;
757 istat
= skcipher_get_stat(skcipher
);
759 memset(&rcipher
, 0, sizeof(rcipher
));
761 strscpy(rcipher
.type
, "cipher", sizeof(rcipher
.type
));
763 rcipher
.stat_encrypt_cnt
= atomic64_read(&istat
->encrypt_cnt
);
764 rcipher
.stat_encrypt_tlen
= atomic64_read(&istat
->encrypt_tlen
);
765 rcipher
.stat_decrypt_cnt
= atomic64_read(&istat
->decrypt_cnt
);
766 rcipher
.stat_decrypt_tlen
= atomic64_read(&istat
->decrypt_tlen
);
767 rcipher
.stat_err_cnt
= atomic64_read(&istat
->err_cnt
);
769 return nla_put(skb
, CRYPTOCFGA_STAT_CIPHER
, sizeof(rcipher
), &rcipher
);
772 static const struct crypto_type crypto_skcipher_type
= {
773 .extsize
= crypto_alg_extsize
,
774 .init_tfm
= crypto_skcipher_init_tfm
,
775 .free
= crypto_skcipher_free_instance
,
776 #ifdef CONFIG_PROC_FS
777 .show
= crypto_skcipher_show
,
779 #if IS_ENABLED(CONFIG_CRYPTO_USER)
780 .report
= crypto_skcipher_report
,
782 #ifdef CONFIG_CRYPTO_STATS
783 .report_stat
= crypto_skcipher_report_stat
,
785 .maskclear
= ~CRYPTO_ALG_TYPE_MASK
,
786 .maskset
= CRYPTO_ALG_TYPE_MASK
,
787 .type
= CRYPTO_ALG_TYPE_SKCIPHER
,
788 .tfmsize
= offsetof(struct crypto_skcipher
, base
),
791 int crypto_grab_skcipher(struct crypto_skcipher_spawn
*spawn
,
792 struct crypto_instance
*inst
,
793 const char *name
, u32 type
, u32 mask
)
795 spawn
->base
.frontend
= &crypto_skcipher_type
;
796 return crypto_grab_spawn(&spawn
->base
, inst
, name
, type
, mask
);
798 EXPORT_SYMBOL_GPL(crypto_grab_skcipher
);
800 struct crypto_skcipher
*crypto_alloc_skcipher(const char *alg_name
,
803 return crypto_alloc_tfm(alg_name
, &crypto_skcipher_type
, type
, mask
);
805 EXPORT_SYMBOL_GPL(crypto_alloc_skcipher
);
807 struct crypto_sync_skcipher
*crypto_alloc_sync_skcipher(
808 const char *alg_name
, u32 type
, u32 mask
)
810 struct crypto_skcipher
*tfm
;
812 /* Only sync algorithms allowed. */
813 mask
|= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE
;
815 tfm
= crypto_alloc_tfm(alg_name
, &crypto_skcipher_type
, type
, mask
);
818 * Make sure we do not allocate something that might get used with
819 * an on-stack request: check the request size.
821 if (!IS_ERR(tfm
) && WARN_ON(crypto_skcipher_reqsize(tfm
) >
822 MAX_SYNC_SKCIPHER_REQSIZE
)) {
823 crypto_free_skcipher(tfm
);
824 return ERR_PTR(-EINVAL
);
827 return (struct crypto_sync_skcipher
*)tfm
;
829 EXPORT_SYMBOL_GPL(crypto_alloc_sync_skcipher
);
831 int crypto_has_skcipher(const char *alg_name
, u32 type
, u32 mask
)
833 return crypto_type_has_alg(alg_name
, &crypto_skcipher_type
, type
, mask
);
835 EXPORT_SYMBOL_GPL(crypto_has_skcipher
);
837 static int skcipher_prepare_alg(struct skcipher_alg
*alg
)
839 struct crypto_istat_cipher
*istat
= skcipher_get_stat(alg
);
840 struct crypto_alg
*base
= &alg
->base
;
842 if (alg
->ivsize
> PAGE_SIZE
/ 8 || alg
->chunksize
> PAGE_SIZE
/ 8 ||
843 alg
->walksize
> PAGE_SIZE
/ 8)
847 alg
->chunksize
= base
->cra_blocksize
;
849 alg
->walksize
= alg
->chunksize
;
851 base
->cra_type
= &crypto_skcipher_type
;
852 base
->cra_flags
&= ~CRYPTO_ALG_TYPE_MASK
;
853 base
->cra_flags
|= CRYPTO_ALG_TYPE_SKCIPHER
;
855 if (IS_ENABLED(CONFIG_CRYPTO_STATS
))
856 memset(istat
, 0, sizeof(*istat
));
861 int crypto_register_skcipher(struct skcipher_alg
*alg
)
863 struct crypto_alg
*base
= &alg
->base
;
866 err
= skcipher_prepare_alg(alg
);
870 return crypto_register_alg(base
);
872 EXPORT_SYMBOL_GPL(crypto_register_skcipher
);
874 void crypto_unregister_skcipher(struct skcipher_alg
*alg
)
876 crypto_unregister_alg(&alg
->base
);
878 EXPORT_SYMBOL_GPL(crypto_unregister_skcipher
);
880 int crypto_register_skciphers(struct skcipher_alg
*algs
, int count
)
884 for (i
= 0; i
< count
; i
++) {
885 ret
= crypto_register_skcipher(&algs
[i
]);
893 for (--i
; i
>= 0; --i
)
894 crypto_unregister_skcipher(&algs
[i
]);
898 EXPORT_SYMBOL_GPL(crypto_register_skciphers
);
900 void crypto_unregister_skciphers(struct skcipher_alg
*algs
, int count
)
904 for (i
= count
- 1; i
>= 0; --i
)
905 crypto_unregister_skcipher(&algs
[i
]);
907 EXPORT_SYMBOL_GPL(crypto_unregister_skciphers
);
909 int skcipher_register_instance(struct crypto_template
*tmpl
,
910 struct skcipher_instance
*inst
)
914 if (WARN_ON(!inst
->free
))
917 err
= skcipher_prepare_alg(&inst
->alg
);
921 return crypto_register_instance(tmpl
, skcipher_crypto_instance(inst
));
923 EXPORT_SYMBOL_GPL(skcipher_register_instance
);
925 static int skcipher_setkey_simple(struct crypto_skcipher
*tfm
, const u8
*key
,
928 struct crypto_cipher
*cipher
= skcipher_cipher_simple(tfm
);
930 crypto_cipher_clear_flags(cipher
, CRYPTO_TFM_REQ_MASK
);
931 crypto_cipher_set_flags(cipher
, crypto_skcipher_get_flags(tfm
) &
932 CRYPTO_TFM_REQ_MASK
);
933 return crypto_cipher_setkey(cipher
, key
, keylen
);
936 static int skcipher_init_tfm_simple(struct crypto_skcipher
*tfm
)
938 struct skcipher_instance
*inst
= skcipher_alg_instance(tfm
);
939 struct crypto_cipher_spawn
*spawn
= skcipher_instance_ctx(inst
);
940 struct skcipher_ctx_simple
*ctx
= crypto_skcipher_ctx(tfm
);
941 struct crypto_cipher
*cipher
;
943 cipher
= crypto_spawn_cipher(spawn
);
945 return PTR_ERR(cipher
);
947 ctx
->cipher
= cipher
;
951 static void skcipher_exit_tfm_simple(struct crypto_skcipher
*tfm
)
953 struct skcipher_ctx_simple
*ctx
= crypto_skcipher_ctx(tfm
);
955 crypto_free_cipher(ctx
->cipher
);
958 static void skcipher_free_instance_simple(struct skcipher_instance
*inst
)
960 crypto_drop_cipher(skcipher_instance_ctx(inst
));
965 * skcipher_alloc_instance_simple - allocate instance of simple block cipher mode
967 * Allocate an skcipher_instance for a simple block cipher mode of operation,
968 * e.g. cbc or ecb. The instance context will have just a single crypto_spawn,
969 * that for the underlying cipher. The {min,max}_keysize, ivsize, blocksize,
970 * alignmask, and priority are set from the underlying cipher but can be
971 * overridden if needed. The tfm context defaults to skcipher_ctx_simple, and
972 * default ->setkey(), ->init(), and ->exit() methods are installed.
974 * @tmpl: the template being instantiated
975 * @tb: the template parameters
977 * Return: a pointer to the new instance, or an ERR_PTR(). The caller still
978 * needs to register the instance.
980 struct skcipher_instance
*skcipher_alloc_instance_simple(
981 struct crypto_template
*tmpl
, struct rtattr
**tb
)
984 struct skcipher_instance
*inst
;
985 struct crypto_cipher_spawn
*spawn
;
986 struct crypto_alg
*cipher_alg
;
989 err
= crypto_check_attr_type(tb
, CRYPTO_ALG_TYPE_SKCIPHER
, &mask
);
993 inst
= kzalloc(sizeof(*inst
) + sizeof(*spawn
), GFP_KERNEL
);
995 return ERR_PTR(-ENOMEM
);
996 spawn
= skcipher_instance_ctx(inst
);
998 err
= crypto_grab_cipher(spawn
, skcipher_crypto_instance(inst
),
999 crypto_attr_alg_name(tb
[1]), 0, mask
);
1002 cipher_alg
= crypto_spawn_cipher_alg(spawn
);
1004 err
= crypto_inst_setname(skcipher_crypto_instance(inst
), tmpl
->name
,
1009 inst
->free
= skcipher_free_instance_simple
;
1011 /* Default algorithm properties, can be overridden */
1012 inst
->alg
.base
.cra_blocksize
= cipher_alg
->cra_blocksize
;
1013 inst
->alg
.base
.cra_alignmask
= cipher_alg
->cra_alignmask
;
1014 inst
->alg
.base
.cra_priority
= cipher_alg
->cra_priority
;
1015 inst
->alg
.min_keysize
= cipher_alg
->cra_cipher
.cia_min_keysize
;
1016 inst
->alg
.max_keysize
= cipher_alg
->cra_cipher
.cia_max_keysize
;
1017 inst
->alg
.ivsize
= cipher_alg
->cra_blocksize
;
1019 /* Use skcipher_ctx_simple by default, can be overridden */
1020 inst
->alg
.base
.cra_ctxsize
= sizeof(struct skcipher_ctx_simple
);
1021 inst
->alg
.setkey
= skcipher_setkey_simple
;
1022 inst
->alg
.init
= skcipher_init_tfm_simple
;
1023 inst
->alg
.exit
= skcipher_exit_tfm_simple
;
1028 skcipher_free_instance_simple(inst
);
1029 return ERR_PTR(err
);
1031 EXPORT_SYMBOL_GPL(skcipher_alloc_instance_simple
);
1033 MODULE_LICENSE("GPL");
1034 MODULE_DESCRIPTION("Symmetric key cipher type");
1035 MODULE_IMPORT_NS(CRYPTO_INTERNAL
);