]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - crypto/lskcipher.c
bnxt_en: Use proper TUNNEL_DST_PORT_ALLOC* commands
[thirdparty/kernel/stable.git] / crypto / lskcipher.c
CommitLineData
31865c4c
HX
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Linear symmetric key cipher operations.
4 *
5 * Generic encrypt/decrypt wrapper for ciphers.
6 *
7 * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au>
8 */
9
10#include <linux/cryptouser.h>
11#include <linux/err.h>
12#include <linux/export.h>
13#include <linux/kernel.h>
14#include <linux/seq_file.h>
15#include <linux/slab.h>
16#include <linux/string.h>
17#include <net/netlink.h>
18#include "skcipher.h"
19
20static inline struct crypto_lskcipher *__crypto_lskcipher_cast(
21 struct crypto_tfm *tfm)
22{
23 return container_of(tfm, struct crypto_lskcipher, base);
24}
25
26static inline struct lskcipher_alg *__crypto_lskcipher_alg(
27 struct crypto_alg *alg)
28{
29 return container_of(alg, struct lskcipher_alg, co.base);
30}
31
32static inline struct crypto_istat_cipher *lskcipher_get_stat(
33 struct lskcipher_alg *alg)
34{
35 return skcipher_get_stat_common(&alg->co);
36}
37
38static inline int crypto_lskcipher_errstat(struct lskcipher_alg *alg, int err)
39{
40 struct crypto_istat_cipher *istat = lskcipher_get_stat(alg);
41
42 if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
43 return err;
44
45 if (err)
46 atomic64_inc(&istat->err_cnt);
47
48 return err;
49}
50
51static int lskcipher_setkey_unaligned(struct crypto_lskcipher *tfm,
52 const u8 *key, unsigned int keylen)
53{
54 unsigned long alignmask = crypto_lskcipher_alignmask(tfm);
55 struct lskcipher_alg *cipher = crypto_lskcipher_alg(tfm);
56 u8 *buffer, *alignbuffer;
57 unsigned long absize;
58 int ret;
59
60 absize = keylen + alignmask;
61 buffer = kmalloc(absize, GFP_ATOMIC);
62 if (!buffer)
63 return -ENOMEM;
64
65 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
66 memcpy(alignbuffer, key, keylen);
67 ret = cipher->setkey(tfm, alignbuffer, keylen);
68 kfree_sensitive(buffer);
69 return ret;
70}
71
72int crypto_lskcipher_setkey(struct crypto_lskcipher *tfm, const u8 *key,
73 unsigned int keylen)
74{
75 unsigned long alignmask = crypto_lskcipher_alignmask(tfm);
76 struct lskcipher_alg *cipher = crypto_lskcipher_alg(tfm);
77
78 if (keylen < cipher->co.min_keysize || keylen > cipher->co.max_keysize)
79 return -EINVAL;
80
81 if ((unsigned long)key & alignmask)
82 return lskcipher_setkey_unaligned(tfm, key, keylen);
83 else
84 return cipher->setkey(tfm, key, keylen);
85}
86EXPORT_SYMBOL_GPL(crypto_lskcipher_setkey);
87
88static int crypto_lskcipher_crypt_unaligned(
89 struct crypto_lskcipher *tfm, const u8 *src, u8 *dst, unsigned len,
90 u8 *iv, int (*crypt)(struct crypto_lskcipher *tfm, const u8 *src,
91 u8 *dst, unsigned len, u8 *iv, bool final))
92{
93 unsigned ivsize = crypto_lskcipher_ivsize(tfm);
94 unsigned bs = crypto_lskcipher_blocksize(tfm);
95 unsigned cs = crypto_lskcipher_chunksize(tfm);
96 int err;
97 u8 *tiv;
98 u8 *p;
99
100 BUILD_BUG_ON(MAX_CIPHER_BLOCKSIZE > PAGE_SIZE ||
101 MAX_CIPHER_ALIGNMASK >= PAGE_SIZE);
102
103 tiv = kmalloc(PAGE_SIZE, GFP_ATOMIC);
104 if (!tiv)
105 return -ENOMEM;
106
107 memcpy(tiv, iv, ivsize);
108
109 p = kmalloc(PAGE_SIZE, GFP_ATOMIC);
110 err = -ENOMEM;
111 if (!p)
112 goto out;
113
114 while (len >= bs) {
115 unsigned chunk = min((unsigned)PAGE_SIZE, len);
116 int err;
117
118 if (chunk > cs)
119 chunk &= ~(cs - 1);
120
121 memcpy(p, src, chunk);
122 err = crypt(tfm, p, p, chunk, tiv, true);
123 if (err)
124 goto out;
125
126 memcpy(dst, p, chunk);
127 src += chunk;
128 dst += chunk;
129 len -= chunk;
130 }
131
132 err = len ? -EINVAL : 0;
133
134out:
135 memcpy(iv, tiv, ivsize);
136 kfree_sensitive(p);
137 kfree_sensitive(tiv);
138 return err;
139}
140
141static int crypto_lskcipher_crypt(struct crypto_lskcipher *tfm, const u8 *src,
142 u8 *dst, unsigned len, u8 *iv,
143 int (*crypt)(struct crypto_lskcipher *tfm,
144 const u8 *src, u8 *dst,
145 unsigned len, u8 *iv,
146 bool final))
147{
148 unsigned long alignmask = crypto_lskcipher_alignmask(tfm);
149 struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm);
150 int ret;
151
152 if (((unsigned long)src | (unsigned long)dst | (unsigned long)iv) &
153 alignmask) {
154 ret = crypto_lskcipher_crypt_unaligned(tfm, src, dst, len, iv,
155 crypt);
156 goto out;
157 }
158
159 ret = crypt(tfm, src, dst, len, iv, true);
160
161out:
162 return crypto_lskcipher_errstat(alg, ret);
163}
164
165int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src,
166 u8 *dst, unsigned len, u8 *iv)
167{
168 struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm);
169
170 if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
171 struct crypto_istat_cipher *istat = lskcipher_get_stat(alg);
172
173 atomic64_inc(&istat->encrypt_cnt);
174 atomic64_add(len, &istat->encrypt_tlen);
175 }
176
177 return crypto_lskcipher_crypt(tfm, src, dst, len, iv, alg->encrypt);
178}
179EXPORT_SYMBOL_GPL(crypto_lskcipher_encrypt);
180
181int crypto_lskcipher_decrypt(struct crypto_lskcipher *tfm, const u8 *src,
182 u8 *dst, unsigned len, u8 *iv)
183{
184 struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm);
185
186 if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
187 struct crypto_istat_cipher *istat = lskcipher_get_stat(alg);
188
189 atomic64_inc(&istat->decrypt_cnt);
190 atomic64_add(len, &istat->decrypt_tlen);
191 }
192
193 return crypto_lskcipher_crypt(tfm, src, dst, len, iv, alg->decrypt);
194}
195EXPORT_SYMBOL_GPL(crypto_lskcipher_decrypt);
196
31865c4c
HX
197static int crypto_lskcipher_crypt_sg(struct skcipher_request *req,
198 int (*crypt)(struct crypto_lskcipher *tfm,
199 const u8 *src, u8 *dst,
200 unsigned len, u8 *iv,
201 bool final))
202{
203 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
204 struct crypto_lskcipher **ctx = crypto_skcipher_ctx(skcipher);
205 struct crypto_lskcipher *tfm = *ctx;
206 struct skcipher_walk walk;
207 int err;
208
209 err = skcipher_walk_virt(&walk, req, false);
210
211 while (walk.nbytes) {
212 err = crypt(tfm, walk.src.virt.addr, walk.dst.virt.addr,
213 walk.nbytes, walk.iv, walk.nbytes == walk.total);
214 err = skcipher_walk_done(&walk, err);
215 }
216
217 return err;
218}
219
220int crypto_lskcipher_encrypt_sg(struct skcipher_request *req)
221{
222 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
223 struct crypto_lskcipher **ctx = crypto_skcipher_ctx(skcipher);
224 struct lskcipher_alg *alg = crypto_lskcipher_alg(*ctx);
225
226 return crypto_lskcipher_crypt_sg(req, alg->encrypt);
227}
228
229int crypto_lskcipher_decrypt_sg(struct skcipher_request *req)
230{
231 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
232 struct crypto_lskcipher **ctx = crypto_skcipher_ctx(skcipher);
233 struct lskcipher_alg *alg = crypto_lskcipher_alg(*ctx);
234
235 return crypto_lskcipher_crypt_sg(req, alg->decrypt);
236}
237
238static void crypto_lskcipher_exit_tfm(struct crypto_tfm *tfm)
239{
240 struct crypto_lskcipher *skcipher = __crypto_lskcipher_cast(tfm);
241 struct lskcipher_alg *alg = crypto_lskcipher_alg(skcipher);
242
243 alg->exit(skcipher);
244}
245
246static int crypto_lskcipher_init_tfm(struct crypto_tfm *tfm)
247{
248 struct crypto_lskcipher *skcipher = __crypto_lskcipher_cast(tfm);
249 struct lskcipher_alg *alg = crypto_lskcipher_alg(skcipher);
250
251 if (alg->exit)
252 skcipher->base.exit = crypto_lskcipher_exit_tfm;
253
254 if (alg->init)
255 return alg->init(skcipher);
256
257 return 0;
258}
259
260static void crypto_lskcipher_free_instance(struct crypto_instance *inst)
261{
262 struct lskcipher_instance *skcipher =
263 container_of(inst, struct lskcipher_instance, s.base);
264
265 skcipher->free(skcipher);
266}
267
268static void __maybe_unused crypto_lskcipher_show(
269 struct seq_file *m, struct crypto_alg *alg)
270{
271 struct lskcipher_alg *skcipher = __crypto_lskcipher_alg(alg);
272
273 seq_printf(m, "type : lskcipher\n");
274 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
275 seq_printf(m, "min keysize : %u\n", skcipher->co.min_keysize);
276 seq_printf(m, "max keysize : %u\n", skcipher->co.max_keysize);
277 seq_printf(m, "ivsize : %u\n", skcipher->co.ivsize);
278 seq_printf(m, "chunksize : %u\n", skcipher->co.chunksize);
279}
280
281static int __maybe_unused crypto_lskcipher_report(
282 struct sk_buff *skb, struct crypto_alg *alg)
283{
284 struct lskcipher_alg *skcipher = __crypto_lskcipher_alg(alg);
285 struct crypto_report_blkcipher rblkcipher;
286
287 memset(&rblkcipher, 0, sizeof(rblkcipher));
288
289 strscpy(rblkcipher.type, "lskcipher", sizeof(rblkcipher.type));
290 strscpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
291
292 rblkcipher.blocksize = alg->cra_blocksize;
293 rblkcipher.min_keysize = skcipher->co.min_keysize;
294 rblkcipher.max_keysize = skcipher->co.max_keysize;
295 rblkcipher.ivsize = skcipher->co.ivsize;
296
297 return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
298 sizeof(rblkcipher), &rblkcipher);
299}
300
301static int __maybe_unused crypto_lskcipher_report_stat(
302 struct sk_buff *skb, struct crypto_alg *alg)
303{
304 struct lskcipher_alg *skcipher = __crypto_lskcipher_alg(alg);
305 struct crypto_istat_cipher *istat;
306 struct crypto_stat_cipher rcipher;
307
308 istat = lskcipher_get_stat(skcipher);
309
310 memset(&rcipher, 0, sizeof(rcipher));
311
312 strscpy(rcipher.type, "cipher", sizeof(rcipher.type));
313
314 rcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt);
315 rcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen);
316 rcipher.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt);
317 rcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen);
318 rcipher.stat_err_cnt = atomic64_read(&istat->err_cnt);
319
320 return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher);
321}
322
323static const struct crypto_type crypto_lskcipher_type = {
324 .extsize = crypto_alg_extsize,
325 .init_tfm = crypto_lskcipher_init_tfm,
326 .free = crypto_lskcipher_free_instance,
327#ifdef CONFIG_PROC_FS
328 .show = crypto_lskcipher_show,
329#endif
330#if IS_ENABLED(CONFIG_CRYPTO_USER)
331 .report = crypto_lskcipher_report,
332#endif
333#ifdef CONFIG_CRYPTO_STATS
334 .report_stat = crypto_lskcipher_report_stat,
335#endif
336 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
337 .maskset = CRYPTO_ALG_TYPE_MASK,
338 .type = CRYPTO_ALG_TYPE_LSKCIPHER,
339 .tfmsize = offsetof(struct crypto_lskcipher, base),
340};
341
342static void crypto_lskcipher_exit_tfm_sg(struct crypto_tfm *tfm)
343{
344 struct crypto_lskcipher **ctx = crypto_tfm_ctx(tfm);
345
346 crypto_free_lskcipher(*ctx);
347}
348
349int crypto_init_lskcipher_ops_sg(struct crypto_tfm *tfm)
350{
351 struct crypto_lskcipher **ctx = crypto_tfm_ctx(tfm);
352 struct crypto_alg *calg = tfm->__crt_alg;
353 struct crypto_lskcipher *skcipher;
354
355 if (!crypto_mod_get(calg))
356 return -EAGAIN;
357
358 skcipher = crypto_create_tfm(calg, &crypto_lskcipher_type);
359 if (IS_ERR(skcipher)) {
360 crypto_mod_put(calg);
361 return PTR_ERR(skcipher);
362 }
363
364 *ctx = skcipher;
365 tfm->exit = crypto_lskcipher_exit_tfm_sg;
366
367 return 0;
368}
369
370int crypto_grab_lskcipher(struct crypto_lskcipher_spawn *spawn,
371 struct crypto_instance *inst,
372 const char *name, u32 type, u32 mask)
373{
374 spawn->base.frontend = &crypto_lskcipher_type;
375 return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
376}
377EXPORT_SYMBOL_GPL(crypto_grab_lskcipher);
378
379struct crypto_lskcipher *crypto_alloc_lskcipher(const char *alg_name,
380 u32 type, u32 mask)
381{
382 return crypto_alloc_tfm(alg_name, &crypto_lskcipher_type, type, mask);
383}
384EXPORT_SYMBOL_GPL(crypto_alloc_lskcipher);
385
386static int lskcipher_prepare_alg(struct lskcipher_alg *alg)
387{
388 struct crypto_alg *base = &alg->co.base;
389 int err;
390
391 err = skcipher_prepare_alg_common(&alg->co);
392 if (err)
393 return err;
394
395 if (alg->co.chunksize & (alg->co.chunksize - 1))
396 return -EINVAL;
397
398 base->cra_type = &crypto_lskcipher_type;
399 base->cra_flags |= CRYPTO_ALG_TYPE_LSKCIPHER;
400
401 return 0;
402}
403
404int crypto_register_lskcipher(struct lskcipher_alg *alg)
405{
406 struct crypto_alg *base = &alg->co.base;
407 int err;
408
409 err = lskcipher_prepare_alg(alg);
410 if (err)
411 return err;
412
413 return crypto_register_alg(base);
414}
415EXPORT_SYMBOL_GPL(crypto_register_lskcipher);
416
417void crypto_unregister_lskcipher(struct lskcipher_alg *alg)
418{
419 crypto_unregister_alg(&alg->co.base);
420}
421EXPORT_SYMBOL_GPL(crypto_unregister_lskcipher);
422
423int crypto_register_lskciphers(struct lskcipher_alg *algs, int count)
424{
425 int i, ret;
426
427 for (i = 0; i < count; i++) {
428 ret = crypto_register_lskcipher(&algs[i]);
429 if (ret)
430 goto err;
431 }
432
433 return 0;
434
435err:
436 for (--i; i >= 0; --i)
437 crypto_unregister_lskcipher(&algs[i]);
438
439 return ret;
440}
441EXPORT_SYMBOL_GPL(crypto_register_lskciphers);
442
443void crypto_unregister_lskciphers(struct lskcipher_alg *algs, int count)
444{
445 int i;
446
447 for (i = count - 1; i >= 0; --i)
448 crypto_unregister_lskcipher(&algs[i]);
449}
450EXPORT_SYMBOL_GPL(crypto_unregister_lskciphers);
451
452int lskcipher_register_instance(struct crypto_template *tmpl,
453 struct lskcipher_instance *inst)
454{
455 int err;
456
457 if (WARN_ON(!inst->free))
458 return -EINVAL;
459
460 err = lskcipher_prepare_alg(&inst->alg);
461 if (err)
462 return err;
463
464 return crypto_register_instance(tmpl, lskcipher_crypto_instance(inst));
465}
466EXPORT_SYMBOL_GPL(lskcipher_register_instance);
467
468static int lskcipher_setkey_simple(struct crypto_lskcipher *tfm, const u8 *key,
469 unsigned int keylen)
470{
471 struct crypto_lskcipher *cipher = lskcipher_cipher_simple(tfm);
472
473 crypto_lskcipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK);
474 crypto_lskcipher_set_flags(cipher, crypto_lskcipher_get_flags(tfm) &
475 CRYPTO_TFM_REQ_MASK);
476 return crypto_lskcipher_setkey(cipher, key, keylen);
477}
478
479static int lskcipher_init_tfm_simple(struct crypto_lskcipher *tfm)
480{
481 struct lskcipher_instance *inst = lskcipher_alg_instance(tfm);
482 struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm);
483 struct crypto_lskcipher_spawn *spawn;
484 struct crypto_lskcipher *cipher;
485
486 spawn = lskcipher_instance_ctx(inst);
487 cipher = crypto_spawn_lskcipher(spawn);
488 if (IS_ERR(cipher))
489 return PTR_ERR(cipher);
490
491 *ctx = cipher;
492 return 0;
493}
494
495static void lskcipher_exit_tfm_simple(struct crypto_lskcipher *tfm)
496{
497 struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm);
498
499 crypto_free_lskcipher(*ctx);
500}
501
502static void lskcipher_free_instance_simple(struct lskcipher_instance *inst)
503{
504 crypto_drop_lskcipher(lskcipher_instance_ctx(inst));
505 kfree(inst);
506}
507
508/**
509 * lskcipher_alloc_instance_simple - allocate instance of simple block cipher
510 *
511 * Allocate an lskcipher_instance for a simple block cipher mode of operation,
512 * e.g. cbc or ecb. The instance context will have just a single crypto_spawn,
513 * that for the underlying cipher. The {min,max}_keysize, ivsize, blocksize,
514 * alignmask, and priority are set from the underlying cipher but can be
515 * overridden if needed. The tfm context defaults to
516 * struct crypto_lskcipher *, and default ->setkey(), ->init(), and
517 * ->exit() methods are installed.
518 *
519 * @tmpl: the template being instantiated
520 * @tb: the template parameters
521 *
522 * Return: a pointer to the new instance, or an ERR_PTR(). The caller still
523 * needs to register the instance.
524 */
525struct lskcipher_instance *lskcipher_alloc_instance_simple(
526 struct crypto_template *tmpl, struct rtattr **tb)
527{
528 u32 mask;
529 struct lskcipher_instance *inst;
530 struct crypto_lskcipher_spawn *spawn;
8aee5d4e 531 char ecb_name[CRYPTO_MAX_ALG_NAME];
31865c4c 532 struct lskcipher_alg *cipher_alg;
8aee5d4e 533 const char *cipher_name;
31865c4c
HX
534 int err;
535
536 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_LSKCIPHER, &mask);
537 if (err)
538 return ERR_PTR(err);
539
8aee5d4e
HX
540 cipher_name = crypto_attr_alg_name(tb[1]);
541 if (IS_ERR(cipher_name))
542 return ERR_CAST(cipher_name);
543
31865c4c
HX
544 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
545 if (!inst)
546 return ERR_PTR(-ENOMEM);
547
548 spawn = lskcipher_instance_ctx(inst);
549 err = crypto_grab_lskcipher(spawn,
550 lskcipher_crypto_instance(inst),
8aee5d4e
HX
551 cipher_name, 0, mask);
552
553 ecb_name[0] = 0;
554 if (err == -ENOENT && !!memcmp(tmpl->name, "ecb", 4)) {
555 err = -ENAMETOOLONG;
556 if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
557 cipher_name) >= CRYPTO_MAX_ALG_NAME)
558 goto err_free_inst;
559
560 err = crypto_grab_lskcipher(spawn,
561 lskcipher_crypto_instance(inst),
562 ecb_name, 0, mask);
563 }
564
31865c4c
HX
565 if (err)
566 goto err_free_inst;
8aee5d4e 567
31865c4c
HX
568 cipher_alg = crypto_lskcipher_spawn_alg(spawn);
569
570 err = crypto_inst_setname(lskcipher_crypto_instance(inst), tmpl->name,
571 &cipher_alg->co.base);
572 if (err)
573 goto err_free_inst;
574
8aee5d4e
HX
575 if (ecb_name[0]) {
576 int len;
577
87d6621c 578 err = -EINVAL;
8aee5d4e
HX
579 len = strscpy(ecb_name, &cipher_alg->co.base.cra_name[4],
580 sizeof(ecb_name));
581 if (len < 2)
582 goto err_free_inst;
583
584 if (ecb_name[len - 1] != ')')
585 goto err_free_inst;
586
587 ecb_name[len - 1] = 0;
588
589 err = -ENAMETOOLONG;
590 if (snprintf(inst->alg.co.base.cra_name, CRYPTO_MAX_ALG_NAME,
591 "%s(%s)", tmpl->name, ecb_name) >=
592 CRYPTO_MAX_ALG_NAME)
593 goto err_free_inst;
594
595 if (strcmp(ecb_name, cipher_name) &&
596 snprintf(inst->alg.co.base.cra_driver_name,
597 CRYPTO_MAX_ALG_NAME,
598 "%s(%s)", tmpl->name, cipher_name) >=
599 CRYPTO_MAX_ALG_NAME)
600 goto err_free_inst;
601 } else {
602 /* Don't allow nesting. */
603 err = -ELOOP;
604 if ((cipher_alg->co.base.cra_flags & CRYPTO_ALG_INSTANCE))
605 goto err_free_inst;
606 }
31865c4c
HX
607
608 err = -EINVAL;
609 if (cipher_alg->co.ivsize)
610 goto err_free_inst;
611
612 inst->free = lskcipher_free_instance_simple;
613
614 /* Default algorithm properties, can be overridden */
615 inst->alg.co.base.cra_blocksize = cipher_alg->co.base.cra_blocksize;
616 inst->alg.co.base.cra_alignmask = cipher_alg->co.base.cra_alignmask;
617 inst->alg.co.base.cra_priority = cipher_alg->co.base.cra_priority;
618 inst->alg.co.min_keysize = cipher_alg->co.min_keysize;
619 inst->alg.co.max_keysize = cipher_alg->co.max_keysize;
620 inst->alg.co.ivsize = cipher_alg->co.base.cra_blocksize;
621
622 /* Use struct crypto_lskcipher * by default, can be overridden */
623 inst->alg.co.base.cra_ctxsize = sizeof(struct crypto_lskcipher *);
624 inst->alg.setkey = lskcipher_setkey_simple;
625 inst->alg.init = lskcipher_init_tfm_simple;
626 inst->alg.exit = lskcipher_exit_tfm_simple;
627
628 return inst;
629
630err_free_inst:
631 lskcipher_free_instance_simple(inst);
632 return ERR_PTR(err);
633}
634EXPORT_SYMBOL_GPL(lskcipher_alloc_instance_simple);