]> git.ipfire.org Git - thirdparty/linux.git/blame - drivers/crypto/chelsio/chcr_algo.c
Merge tag 'x86-fpu-2020-06-01' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
[thirdparty/linux.git] / drivers / crypto / chelsio / chcr_algo.c
CommitLineData
324429d7
HS
1/*
2 * This file is part of the Chelsio T6 Crypto driver for Linux.
3 *
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 *
34 * Written and Maintained by:
35 * Manoj Malviya (manojmalviya@chelsio.com)
36 * Atul Gupta (atul.gupta@chelsio.com)
37 * Jitendra Lulla (jlulla@chelsio.com)
38 * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39 * Harsh Jain (harsh@chelsio.com)
40 */
41
42#define pr_fmt(fmt) "chcr:" fmt
43
44#include <linux/kernel.h>
45#include <linux/module.h>
46#include <linux/crypto.h>
324429d7
HS
47#include <linux/skbuff.h>
48#include <linux/rtnetlink.h>
49#include <linux/highmem.h>
50#include <linux/scatterlist.h>
51
52#include <crypto/aes.h>
53#include <crypto/algapi.h>
54#include <crypto/hash.h>
8f6acb7f 55#include <crypto/gcm.h>
324429d7 56#include <crypto/sha.h>
2debd332 57#include <crypto/authenc.h>
b8fd1f41
HJ
58#include <crypto/ctr.h>
59#include <crypto/gf128mul.h>
2debd332
HJ
60#include <crypto/internal/aead.h>
61#include <crypto/null.h>
62#include <crypto/internal/skcipher.h>
63#include <crypto/aead.h>
64#include <crypto/scatterwalk.h>
324429d7
HS
65#include <crypto/internal/hash.h>
66
67#include "t4fw_api.h"
68#include "t4_msg.h"
69#include "chcr_core.h"
70#include "chcr_algo.h"
71#include "chcr_crypto.h"
72
2f47d580
HJ
73#define IV AES_BLOCK_SIZE
74
8579e076
CIK
75static unsigned int sgl_ent_len[] = {
76 0, 0, 16, 24, 40, 48, 64, 72, 88,
77 96, 112, 120, 136, 144, 160, 168, 184,
78 192, 208, 216, 232, 240, 256, 264, 280,
79 288, 304, 312, 328, 336, 352, 360, 376
80};
6dad4e8a 81
8579e076
CIK
82static unsigned int dsgl_ent_len[] = {
83 0, 32, 32, 48, 48, 64, 64, 80, 80,
84 112, 112, 128, 128, 144, 144, 160, 160,
85 192, 192, 208, 208, 224, 224, 240, 240,
86 272, 272, 288, 288, 304, 304, 320, 320
87};
6dad4e8a
AG
88
89static u32 round_constant[11] = {
90 0x01000000, 0x02000000, 0x04000000, 0x08000000,
91 0x10000000, 0x20000000, 0x40000000, 0x80000000,
92 0x1B000000, 0x36000000, 0x6C000000
93};
94
7cea6d3e 95static int chcr_handle_cipher_resp(struct skcipher_request *req,
6dad4e8a
AG
96 unsigned char *input, int err);
97
2debd332
HJ
98static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
99{
100 return ctx->crypto_ctx->aeadctx;
101}
102
324429d7
HS
103static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
104{
105 return ctx->crypto_ctx->ablkctx;
106}
107
108static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
109{
110 return ctx->crypto_ctx->hmacctx;
111}
112
2debd332
HJ
113static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
114{
115 return gctx->ctx->gcm;
116}
117
118static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
119{
120 return gctx->ctx->authenc;
121}
122
324429d7
HS
123static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
124{
fef4912b 125 return container_of(ctx->dev, struct uld_ctx, dev);
324429d7
HS
126}
127
128static inline int is_ofld_imm(const struct sk_buff *skb)
129{
2f47d580 130 return (skb->len <= SGE_MAX_WR_LEN);
324429d7
HS
131}
132
5110e655
HJ
133static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
134{
135 memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
136}
137
2f47d580
HJ
138static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
139 unsigned int entlen,
140 unsigned int skip)
2956f36c
HJ
141{
142 int nents = 0;
143 unsigned int less;
2f47d580 144 unsigned int skip_len = 0;
2956f36c 145
2f47d580
HJ
146 while (sg && skip) {
147 if (sg_dma_len(sg) <= skip) {
148 skip -= sg_dma_len(sg);
149 skip_len = 0;
150 sg = sg_next(sg);
151 } else {
152 skip_len = skip;
153 skip = 0;
154 }
2956f36c
HJ
155 }
156
2f47d580
HJ
157 while (sg && reqlen) {
158 less = min(reqlen, sg_dma_len(sg) - skip_len);
159 nents += DIV_ROUND_UP(less, entlen);
160 reqlen -= less;
161 skip_len = 0;
162 sg = sg_next(sg);
163 }
2956f36c
HJ
164 return nents;
165}
166
6dad4e8a 167static inline int get_aead_subtype(struct crypto_aead *aead)
2f47d580 168{
6dad4e8a
AG
169 struct aead_alg *alg = crypto_aead_alg(aead);
170 struct chcr_alg_template *chcr_crypto_alg =
171 container_of(alg, struct chcr_alg_template, alg.aead);
172 return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
2f47d580 173}
2f47d580 174
6dad4e8a 175void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
2debd332
HJ
176{
177 u8 temp[SHA512_DIGEST_SIZE];
178 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
179 int authsize = crypto_aead_authsize(tfm);
180 struct cpl_fw6_pld *fw6_pld;
181 int cmp = 0;
182
183 fw6_pld = (struct cpl_fw6_pld *)input;
184 if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
185 (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
d600fc8a 186 cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
2debd332
HJ
187 } else {
188
189 sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
190 authsize, req->assoclen +
191 req->cryptlen - authsize);
d600fc8a 192 cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
2debd332
HJ
193 }
194 if (cmp)
195 *err = -EBADMSG;
196 else
197 *err = 0;
198}
199
fef4912b
HJ
200static int chcr_inc_wrcount(struct chcr_dev *dev)
201{
fef4912b 202 if (dev->state == CHCR_DETACH)
33ddc108
AG
203 return 1;
204 atomic_inc(&dev->inflight);
205 return 0;
fef4912b
HJ
206}
207
208static inline void chcr_dec_wrcount(struct chcr_dev *dev)
209{
210 atomic_dec(&dev->inflight);
211}
212
f31ba0f9 213static inline int chcr_handle_aead_resp(struct aead_request *req,
6dad4e8a
AG
214 unsigned char *input,
215 int err)
216{
217 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
fef4912b
HJ
218 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
219 struct chcr_dev *dev = a_ctx(tfm)->dev;
6dad4e8a 220
4262c98a 221 chcr_aead_common_exit(req);
6dad4e8a
AG
222 if (reqctx->verify == VERIFY_SW) {
223 chcr_verify_tag(req, input, &err);
224 reqctx->verify = VERIFY_HW;
225 }
fef4912b 226 chcr_dec_wrcount(dev);
6dad4e8a 227 req->base.complete(&req->base, err);
f31ba0f9
HJ
228
229 return err;
6dad4e8a
AG
230}
231
2f47d580 232static void get_aes_decrypt_key(unsigned char *dec_key,
39f91a34
HJ
233 const unsigned char *key,
234 unsigned int keylength)
235{
236 u32 temp;
237 u32 w_ring[MAX_NK];
238 int i, j, k;
239 u8 nr, nk;
240
241 switch (keylength) {
242 case AES_KEYLENGTH_128BIT:
243 nk = KEYLENGTH_4BYTES;
244 nr = NUMBER_OF_ROUNDS_10;
245 break;
246 case AES_KEYLENGTH_192BIT:
247 nk = KEYLENGTH_6BYTES;
248 nr = NUMBER_OF_ROUNDS_12;
249 break;
250 case AES_KEYLENGTH_256BIT:
251 nk = KEYLENGTH_8BYTES;
252 nr = NUMBER_OF_ROUNDS_14;
253 break;
254 default:
255 return;
256 }
257 for (i = 0; i < nk; i++)
258 w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
259
260 i = 0;
261 temp = w_ring[nk - 1];
262 while (i + nk < (nr + 1) * 4) {
263 if (!(i % nk)) {
264 /* RotWord(temp) */
265 temp = (temp << 8) | (temp >> 24);
266 temp = aes_ks_subword(temp);
267 temp ^= round_constant[i / nk];
268 } else if (nk == 8 && (i % 4 == 0)) {
269 temp = aes_ks_subword(temp);
270 }
271 w_ring[i % nk] ^= temp;
272 temp = w_ring[i % nk];
273 i++;
274 }
275 i--;
276 for (k = 0, j = i % nk; k < nk; k++) {
277 *((u32 *)dec_key + k) = htonl(w_ring[j]);
278 j--;
279 if (j < 0)
280 j += nk;
281 }
282}
283
e7922729 284static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
324429d7 285{
ec1bca94 286 struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
324429d7
HS
287
288 switch (ds) {
289 case SHA1_DIGEST_SIZE:
e7922729 290 base_hash = crypto_alloc_shash("sha1", 0, 0);
324429d7
HS
291 break;
292 case SHA224_DIGEST_SIZE:
e7922729 293 base_hash = crypto_alloc_shash("sha224", 0, 0);
324429d7
HS
294 break;
295 case SHA256_DIGEST_SIZE:
e7922729 296 base_hash = crypto_alloc_shash("sha256", 0, 0);
324429d7
HS
297 break;
298 case SHA384_DIGEST_SIZE:
e7922729 299 base_hash = crypto_alloc_shash("sha384", 0, 0);
324429d7
HS
300 break;
301 case SHA512_DIGEST_SIZE:
e7922729 302 base_hash = crypto_alloc_shash("sha512", 0, 0);
324429d7
HS
303 break;
304 }
324429d7 305
e7922729 306 return base_hash;
324429d7
HS
307}
308
309static int chcr_compute_partial_hash(struct shash_desc *desc,
310 char *iopad, char *result_hash,
311 int digest_size)
312{
313 struct sha1_state sha1_st;
314 struct sha256_state sha256_st;
315 struct sha512_state sha512_st;
316 int error;
317
318 if (digest_size == SHA1_DIGEST_SIZE) {
319 error = crypto_shash_init(desc) ?:
320 crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
321 crypto_shash_export(desc, (void *)&sha1_st);
322 memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
323 } else if (digest_size == SHA224_DIGEST_SIZE) {
324 error = crypto_shash_init(desc) ?:
325 crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
326 crypto_shash_export(desc, (void *)&sha256_st);
327 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
328
329 } else if (digest_size == SHA256_DIGEST_SIZE) {
330 error = crypto_shash_init(desc) ?:
331 crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
332 crypto_shash_export(desc, (void *)&sha256_st);
333 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
334
335 } else if (digest_size == SHA384_DIGEST_SIZE) {
336 error = crypto_shash_init(desc) ?:
337 crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
338 crypto_shash_export(desc, (void *)&sha512_st);
339 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
340
341 } else if (digest_size == SHA512_DIGEST_SIZE) {
342 error = crypto_shash_init(desc) ?:
343 crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
344 crypto_shash_export(desc, (void *)&sha512_st);
345 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
346 } else {
347 error = -EINVAL;
348 pr_err("Unknown digest size %d\n", digest_size);
349 }
350 return error;
351}
352
353static void chcr_change_order(char *buf, int ds)
354{
355 int i;
356
357 if (ds == SHA512_DIGEST_SIZE) {
358 for (i = 0; i < (ds / sizeof(u64)); i++)
359 *((__be64 *)buf + i) =
360 cpu_to_be64(*((u64 *)buf + i));
361 } else {
362 for (i = 0; i < (ds / sizeof(u32)); i++)
363 *((__be32 *)buf + i) =
364 cpu_to_be32(*((u32 *)buf + i));
365 }
366}
367
368static inline int is_hmac(struct crypto_tfm *tfm)
369{
370 struct crypto_alg *alg = tfm->__crt_alg;
371 struct chcr_alg_template *chcr_crypto_alg =
372 container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
373 alg.hash);
5c86a8ff 374 if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
324429d7
HS
375 return 1;
376 return 0;
377}
378
2f47d580
HJ
379static inline void dsgl_walk_init(struct dsgl_walk *walk,
380 struct cpl_rx_phys_dsgl *dsgl)
324429d7 381{
2f47d580
HJ
382 walk->dsgl = dsgl;
383 walk->nents = 0;
384 walk->to = (struct phys_sge_pairs *)(dsgl + 1);
385}
386
add92a81
HJ
387static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
388 int pci_chan_id)
2f47d580
HJ
389{
390 struct cpl_rx_phys_dsgl *phys_cpl;
391
392 phys_cpl = walk->dsgl;
324429d7
HS
393
394 phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
395 | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
2f47d580
HJ
396 phys_cpl->pcirlxorder_to_noofsgentr =
397 htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
398 CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
399 CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
400 CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
401 CPL_RX_PHYS_DSGL_DCAID_V(0) |
402 CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
403 phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
404 phys_cpl->rss_hdr_int.qid = htons(qid);
405 phys_cpl->rss_hdr_int.hash_val = 0;
add92a81 406 phys_cpl->rss_hdr_int.channel = pci_chan_id;
2f47d580
HJ
407}
408
409static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
410 size_t size,
c4f6d44d 411 dma_addr_t addr)
2f47d580
HJ
412{
413 int j;
414
415 if (!size)
416 return;
417 j = walk->nents;
418 walk->to->len[j % 8] = htons(size);
c4f6d44d 419 walk->to->addr[j % 8] = cpu_to_be64(addr);
2f47d580
HJ
420 j++;
421 if ((j % 8) == 0)
422 walk->to++;
423 walk->nents = j;
424}
425
426static void dsgl_walk_add_sg(struct dsgl_walk *walk,
427 struct scatterlist *sg,
428 unsigned int slen,
429 unsigned int skip)
430{
431 int skip_len = 0;
432 unsigned int left_size = slen, len = 0;
433 unsigned int j = walk->nents;
434 int offset, ent_len;
435
436 if (!slen)
437 return;
438 while (sg && skip) {
439 if (sg_dma_len(sg) <= skip) {
440 skip -= sg_dma_len(sg);
441 skip_len = 0;
442 sg = sg_next(sg);
443 } else {
444 skip_len = skip;
445 skip = 0;
446 }
447 }
448
2956f36c 449 while (left_size && sg) {
2f47d580 450 len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
2956f36c
HJ
451 offset = 0;
452 while (len) {
2f47d580
HJ
453 ent_len = min_t(u32, len, CHCR_DST_SG_SIZE);
454 walk->to->len[j % 8] = htons(ent_len);
455 walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
456 offset + skip_len);
2956f36c
HJ
457 offset += ent_len;
458 len -= ent_len;
459 j++;
460 if ((j % 8) == 0)
2f47d580 461 walk->to++;
2956f36c 462 }
2f47d580
HJ
463 walk->last_sg = sg;
464 walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
465 skip_len) + skip_len;
466 left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
467 skip_len = 0;
2956f36c
HJ
468 sg = sg_next(sg);
469 }
2f47d580
HJ
470 walk->nents = j;
471}
472
473static inline void ulptx_walk_init(struct ulptx_walk *walk,
474 struct ulptx_sgl *ulp)
475{
476 walk->sgl = ulp;
477 walk->nents = 0;
478 walk->pair_idx = 0;
479 walk->pair = ulp->sge;
480 walk->last_sg = NULL;
481 walk->last_sg_len = 0;
482}
483
484static inline void ulptx_walk_end(struct ulptx_walk *walk)
485{
486 walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
487 ULPTX_NSGE_V(walk->nents));
488}
2956f36c 489
2f47d580
HJ
490
491static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
492 size_t size,
c4f6d44d 493 dma_addr_t addr)
2f47d580
HJ
494{
495 if (!size)
496 return;
497
498 if (walk->nents == 0) {
499 walk->sgl->len0 = cpu_to_be32(size);
c4f6d44d 500 walk->sgl->addr0 = cpu_to_be64(addr);
2f47d580 501 } else {
c4f6d44d 502 walk->pair->addr[walk->pair_idx] = cpu_to_be64(addr);
2f47d580
HJ
503 walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
504 walk->pair_idx = !walk->pair_idx;
505 if (!walk->pair_idx)
506 walk->pair++;
507 }
508 walk->nents++;
324429d7
HS
509}
510
2f47d580 511static void ulptx_walk_add_sg(struct ulptx_walk *walk,
adf1ca61 512 struct scatterlist *sg,
2f47d580
HJ
513 unsigned int len,
514 unsigned int skip)
324429d7 515{
2f47d580
HJ
516 int small;
517 int skip_len = 0;
518 unsigned int sgmin;
324429d7 519
2f47d580
HJ
520 if (!len)
521 return;
2f47d580
HJ
522 while (sg && skip) {
523 if (sg_dma_len(sg) <= skip) {
524 skip -= sg_dma_len(sg);
525 skip_len = 0;
526 sg = sg_next(sg);
527 } else {
528 skip_len = skip;
529 skip = 0;
530 }
531 }
8daa32b9
HJ
532 WARN(!sg, "SG should not be null here\n");
533 if (sg && (walk->nents == 0)) {
2f47d580
HJ
534 small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
535 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
536 walk->sgl->len0 = cpu_to_be32(sgmin);
537 walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
538 walk->nents++;
539 len -= sgmin;
540 walk->last_sg = sg;
541 walk->last_sg_len = sgmin + skip_len;
542 skip_len += sgmin;
543 if (sg_dma_len(sg) == skip_len) {
544 sg = sg_next(sg);
545 skip_len = 0;
546 }
547 }
548
549 while (sg && len) {
550 small = min(sg_dma_len(sg) - skip_len, len);
551 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
552 walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
553 walk->pair->addr[walk->pair_idx] =
554 cpu_to_be64(sg_dma_address(sg) + skip_len);
555 walk->pair_idx = !walk->pair_idx;
556 walk->nents++;
557 if (!walk->pair_idx)
558 walk->pair++;
559 len -= sgmin;
560 skip_len += sgmin;
561 walk->last_sg = sg;
562 walk->last_sg_len = skip_len;
563 if (sg_dma_len(sg) == skip_len) {
564 sg = sg_next(sg);
565 skip_len = 0;
566 }
324429d7 567 }
324429d7
HS
568}
569
7cea6d3e 570static inline int get_cryptoalg_subtype(struct crypto_skcipher *tfm)
324429d7 571{
7cea6d3e 572 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
324429d7 573 struct chcr_alg_template *chcr_crypto_alg =
7cea6d3e 574 container_of(alg, struct chcr_alg_template, alg.skcipher);
324429d7
HS
575
576 return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
577}
578
b8fd1f41
HJ
579static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
580{
581 struct adapter *adap = netdev2adap(dev);
582 struct sge_uld_txq_info *txq_info =
583 adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
584 struct sge_uld_txq *txq;
585 int ret = 0;
586
587 local_bh_disable();
588 txq = &txq_info->uldtxq[idx];
589 spin_lock(&txq->sendq.lock);
590 if (txq->full)
591 ret = -1;
592 spin_unlock(&txq->sendq.lock);
593 local_bh_enable();
594 return ret;
595}
596
324429d7
HS
597static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
598 struct _key_ctx *key_ctx)
599{
600 if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
cc1b156d 601 memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
324429d7
HS
602 } else {
603 memcpy(key_ctx->key,
604 ablkctx->key + (ablkctx->enckey_len >> 1),
605 ablkctx->enckey_len >> 1);
cc1b156d
HJ
606 memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
607 ablkctx->rrkey, ablkctx->enckey_len >> 1);
324429d7
HS
608 }
609 return 0;
610}
5110e655
HJ
611
612static int chcr_hash_ent_in_wr(struct scatterlist *src,
613 unsigned int minsg,
614 unsigned int space,
615 unsigned int srcskip)
616{
617 int srclen = 0;
618 int srcsg = minsg;
619 int soffset = 0, sless;
620
621 if (sg_dma_len(src) == srcskip) {
622 src = sg_next(src);
623 srcskip = 0;
624 }
625 while (src && space > (sgl_ent_len[srcsg + 1])) {
626 sless = min_t(unsigned int, sg_dma_len(src) - soffset - srcskip,
627 CHCR_SRC_SG_SIZE);
628 srclen += sless;
629 soffset += sless;
630 srcsg++;
631 if (sg_dma_len(src) == (soffset + srcskip)) {
632 src = sg_next(src);
633 soffset = 0;
634 srcskip = 0;
635 }
636 }
637 return srclen;
638}
639
b8fd1f41
HJ
640static int chcr_sg_ent_in_wr(struct scatterlist *src,
641 struct scatterlist *dst,
642 unsigned int minsg,
2f47d580
HJ
643 unsigned int space,
644 unsigned int srcskip,
645 unsigned int dstskip)
b8fd1f41
HJ
646{
647 int srclen = 0, dstlen = 0;
2f47d580 648 int srcsg = minsg, dstsg = minsg;
1d693cf6 649 int offset = 0, soffset = 0, less, sless = 0;
b8fd1f41 650
2f47d580
HJ
651 if (sg_dma_len(src) == srcskip) {
652 src = sg_next(src);
653 srcskip = 0;
654 }
2f47d580
HJ
655 if (sg_dma_len(dst) == dstskip) {
656 dst = sg_next(dst);
657 dstskip = 0;
658 }
659
660 while (src && dst &&
b8fd1f41 661 space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
1d693cf6
HJ
662 sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
663 CHCR_SRC_SG_SIZE);
664 srclen += sless;
b8fd1f41 665 srcsg++;
2956f36c 666 offset = 0;
b8fd1f41
HJ
667 while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
668 space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
669 if (srclen <= dstlen)
670 break;
2f47d580 671 less = min_t(unsigned int, sg_dma_len(dst) - offset -
db6deea4 672 dstskip, CHCR_DST_SG_SIZE);
2956f36c
HJ
673 dstlen += less;
674 offset += less;
1d693cf6 675 if ((offset + dstskip) == sg_dma_len(dst)) {
2956f36c
HJ
676 dst = sg_next(dst);
677 offset = 0;
678 }
b8fd1f41 679 dstsg++;
2f47d580 680 dstskip = 0;
b8fd1f41 681 }
1d693cf6
HJ
682 soffset += sless;
683 if ((soffset + srcskip) == sg_dma_len(src)) {
684 src = sg_next(src);
685 srcskip = 0;
686 soffset = 0;
687 }
688
b8fd1f41 689 }
b8fd1f41
HJ
690 return min(srclen, dstlen);
691}
692
28874f26 693static int chcr_cipher_fallback(struct crypto_sync_skcipher *cipher,
b8fd1f41
HJ
694 u32 flags,
695 struct scatterlist *src,
696 struct scatterlist *dst,
697 unsigned int nbytes,
698 u8 *iv,
699 unsigned short op_type)
700{
701 int err;
702
28874f26 703 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
6faa0f57 704
28874f26 705 skcipher_request_set_sync_tfm(subreq, cipher);
b8fd1f41
HJ
706 skcipher_request_set_callback(subreq, flags, NULL, NULL);
707 skcipher_request_set_crypt(subreq, src, dst,
708 nbytes, iv);
709
710 err = op_type ? crypto_skcipher_decrypt(subreq) :
711 crypto_skcipher_encrypt(subreq);
712 skcipher_request_zero(subreq);
713
714 return err;
324429d7 715
b8fd1f41 716}
567be3a5
AS
717
718static inline int get_qidxs(struct crypto_async_request *req,
719 unsigned int *txqidx, unsigned int *rxqidx)
720{
721 struct crypto_tfm *tfm = req->tfm;
722 int ret = 0;
723
724 switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
725 case CRYPTO_ALG_TYPE_AEAD:
726 {
727 struct aead_request *aead_req =
728 container_of(req, struct aead_request, base);
729 struct chcr_aead_reqctx *reqctx = aead_request_ctx(aead_req);
730 *txqidx = reqctx->txqidx;
731 *rxqidx = reqctx->rxqidx;
732 break;
733 }
734 case CRYPTO_ALG_TYPE_SKCIPHER:
735 {
736 struct skcipher_request *sk_req =
737 container_of(req, struct skcipher_request, base);
738 struct chcr_skcipher_req_ctx *reqctx =
739 skcipher_request_ctx(sk_req);
740 *txqidx = reqctx->txqidx;
741 *rxqidx = reqctx->rxqidx;
742 break;
743 }
744 case CRYPTO_ALG_TYPE_AHASH:
745 {
746 struct ahash_request *ahash_req =
747 container_of(req, struct ahash_request, base);
748 struct chcr_ahash_req_ctx *reqctx =
749 ahash_request_ctx(ahash_req);
750 *txqidx = reqctx->txqidx;
751 *rxqidx = reqctx->rxqidx;
752 break;
753 }
754 default:
755 ret = -EINVAL;
756 /* should never get here */
757 BUG();
758 break;
759 }
760 return ret;
761}
762
324429d7 763static inline void create_wreq(struct chcr_context *ctx,
358961d1 764 struct chcr_wr *chcr_req,
2f47d580
HJ
765 struct crypto_async_request *req,
766 unsigned int imm,
570265bf 767 int hash_sz,
2f47d580 768 unsigned int len16,
2512a624
HJ
769 unsigned int sc_len,
770 unsigned int lcb)
324429d7
HS
771{
772 struct uld_ctx *u_ctx = ULD_CTX(ctx);
567be3a5
AS
773 unsigned int tx_channel_id, rx_channel_id;
774 unsigned int txqidx = 0, rxqidx = 0;
775 unsigned int qid, fid;
776
777 get_qidxs(req, &txqidx, &rxqidx);
778 qid = u_ctx->lldi.rxq_ids[rxqidx];
779 fid = u_ctx->lldi.rxq_ids[0];
780 tx_channel_id = txqidx / ctx->txq_perchan;
781 rx_channel_id = rxqidx / ctx->rxq_perchan;
324429d7 782
324429d7 783
570265bf 784 chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
358961d1 785 chcr_req->wreq.pld_size_hash_size =
570265bf 786 htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
358961d1 787 chcr_req->wreq.len16_pkd =
2f47d580 788 htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
358961d1 789 chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
567be3a5
AS
790 chcr_req->wreq.rx_chid_to_rx_q_id = FILL_WR_RX_Q_ID(rx_channel_id, qid,
791 !!lcb, txqidx);
324429d7 792
567be3a5 793 chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(tx_channel_id, fid);
2f47d580 794 chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
567be3a5 795 ((sizeof(chcr_req->wreq)) >> 4)));
2f47d580 796 chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
358961d1 797 chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
2f47d580 798 sizeof(chcr_req->key_ctx) + sc_len);
324429d7
HS
799}
800
801/**
802 * create_cipher_wr - form the WR for cipher operations
803 * @req: cipher req.
804 * @ctx: crypto driver context of the request.
805 * @qid: ingress qid where response of this WR should be received.
806 * @op_type: encryption or decryption
807 */
b8fd1f41 808static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
324429d7 809{
7cea6d3e 810 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
567be3a5
AS
811 struct chcr_context *ctx = c_ctx(tfm);
812 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
324429d7 813 struct sk_buff *skb = NULL;
358961d1 814 struct chcr_wr *chcr_req;
324429d7 815 struct cpl_rx_phys_dsgl *phys_cpl;
2f47d580 816 struct ulptx_sgl *ulptx;
7cea6d3e
AB
817 struct chcr_skcipher_req_ctx *reqctx =
818 skcipher_request_ctx(wrparam->req);
2f47d580 819 unsigned int temp = 0, transhdr_len, dst_size;
b8fd1f41 820 int error;
2956f36c 821 int nents;
2f47d580 822 unsigned int kctx_len;
b8fd1f41
HJ
823 gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
824 GFP_KERNEL : GFP_ATOMIC;
567be3a5
AS
825 struct adapter *adap = padap(ctx->dev);
826 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
324429d7 827
2f47d580
HJ
828 nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE,
829 reqctx->dst_ofst);
335bcc4a 830 dst_size = get_space_for_phys_dsgl(nents);
125d01ca 831 kctx_len = roundup(ablkctx->enckey_len, 16);
2f47d580
HJ
832 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
833 nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
834 CHCR_SRC_SG_SIZE, reqctx->src_ofst);
335bcc4a
HJ
835 temp = reqctx->imm ? roundup(wrparam->bytes, 16) :
836 (sgl_len(nents) * 8);
2f47d580 837 transhdr_len += temp;
125d01ca 838 transhdr_len = roundup(transhdr_len, 16);
2f47d580 839 skb = alloc_skb(SGE_MAX_WR_LEN, flags);
b8fd1f41
HJ
840 if (!skb) {
841 error = -ENOMEM;
842 goto err;
843 }
de77b966 844 chcr_req = __skb_put_zero(skb, transhdr_len);
358961d1 845 chcr_req->sec_cpl.op_ivinsrtofst =
567be3a5 846 FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
358961d1 847
2f47d580 848 chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
358961d1 849 chcr_req->sec_cpl.aadstart_cipherstop_hi =
2f47d580 850 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
358961d1
HJ
851
852 chcr_req->sec_cpl.cipherstop_lo_authinsert =
853 FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
b8fd1f41 854 chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
324429d7 855 ablkctx->ciph_mode,
2f47d580 856 0, 0, IV >> 1);
358961d1 857 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
335bcc4a 858 0, 1, dst_size);
324429d7 859
358961d1 860 chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
b8fd1f41 861 if ((reqctx->op == CHCR_DECRYPT_OP) &&
7cea6d3e 862 (!(get_cryptoalg_subtype(tfm) ==
b8fd1f41 863 CRYPTO_ALG_SUB_TYPE_CTR)) &&
7cea6d3e 864 (!(get_cryptoalg_subtype(tfm) ==
b8fd1f41 865 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
358961d1 866 generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
324429d7 867 } else {
b8fd1f41
HJ
868 if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
869 (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
358961d1
HJ
870 memcpy(chcr_req->key_ctx.key, ablkctx->key,
871 ablkctx->enckey_len);
324429d7 872 } else {
358961d1 873 memcpy(chcr_req->key_ctx.key, ablkctx->key +
324429d7
HS
874 (ablkctx->enckey_len >> 1),
875 ablkctx->enckey_len >> 1);
358961d1 876 memcpy(chcr_req->key_ctx.key +
324429d7
HS
877 (ablkctx->enckey_len >> 1),
878 ablkctx->key,
879 ablkctx->enckey_len >> 1);
880 }
881 }
358961d1 882 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2f47d580
HJ
883 ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
884 chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
885 chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
324429d7 886
ee0863ba 887 atomic_inc(&adap->chcr_stats.cipher_rqst);
335bcc4a
HJ
888 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV
889 + (reqctx->imm ? (wrparam->bytes) : 0);
2f47d580
HJ
890 create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
891 transhdr_len, temp,
2512a624 892 ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
5c86a8ff 893 reqctx->skb = skb;
5fb78dba
HJ
894
895 if (reqctx->op && (ablkctx->ciph_mode ==
896 CHCR_SCMD_CIPHER_MODE_AES_CBC))
897 sg_pcopy_to_buffer(wrparam->req->src,
7cea6d3e 898 sg_nents(wrparam->req->src), wrparam->req->iv, 16,
5fb78dba
HJ
899 reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
900
324429d7 901 return skb;
b8fd1f41
HJ
902err:
903 return ERR_PTR(error);
904}
905
906static inline int chcr_keyctx_ck_size(unsigned int keylen)
907{
908 int ck_size = 0;
909
910 if (keylen == AES_KEYSIZE_128)
911 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
912 else if (keylen == AES_KEYSIZE_192)
913 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
914 else if (keylen == AES_KEYSIZE_256)
915 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
916 else
917 ck_size = 0;
918
919 return ck_size;
920}
7cea6d3e 921static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher,
b8fd1f41
HJ
922 const u8 *key,
923 unsigned int keylen)
924{
2f47d580 925 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
b8fd1f41 926
28874f26
KC
927 crypto_sync_skcipher_clear_flags(ablkctx->sw_cipher,
928 CRYPTO_TFM_REQ_MASK);
929 crypto_sync_skcipher_set_flags(ablkctx->sw_cipher,
930 cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
af5034e8 931 return crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
324429d7
HS
932}
933
7cea6d3e 934static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher,
b8fd1f41 935 const u8 *key,
324429d7
HS
936 unsigned int keylen)
937{
2f47d580 938 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
324429d7
HS
939 unsigned int ck_size, context_size;
940 u16 alignment = 0;
b8fd1f41 941 int err;
324429d7 942
b8fd1f41
HJ
943 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
944 if (err)
324429d7 945 goto badkey_err;
b8fd1f41
HJ
946
947 ck_size = chcr_keyctx_ck_size(keylen);
948 alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
cc1b156d
HJ
949 memcpy(ablkctx->key, key, keylen);
950 ablkctx->enckey_len = keylen;
951 get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
324429d7
HS
952 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
953 keylen + alignment) >> 4;
954
955 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
956 0, 0, context_size);
957 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
958 return 0;
959badkey_err:
324429d7 960 ablkctx->enckey_len = 0;
b8fd1f41
HJ
961
962 return err;
324429d7
HS
963}
964
7cea6d3e 965static int chcr_aes_ctr_setkey(struct crypto_skcipher *cipher,
b8fd1f41
HJ
966 const u8 *key,
967 unsigned int keylen)
324429d7 968{
2f47d580 969 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
b8fd1f41
HJ
970 unsigned int ck_size, context_size;
971 u16 alignment = 0;
972 int err;
973
974 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
975 if (err)
976 goto badkey_err;
977 ck_size = chcr_keyctx_ck_size(keylen);
978 alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
979 memcpy(ablkctx->key, key, keylen);
980 ablkctx->enckey_len = keylen;
981 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
982 keylen + alignment) >> 4;
983
984 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
985 0, 0, context_size);
986 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
987
988 return 0;
989badkey_err:
b8fd1f41
HJ
990 ablkctx->enckey_len = 0;
991
992 return err;
993}
994
7cea6d3e 995static int chcr_aes_rfc3686_setkey(struct crypto_skcipher *cipher,
b8fd1f41
HJ
996 const u8 *key,
997 unsigned int keylen)
998{
2f47d580 999 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
b8fd1f41
HJ
1000 unsigned int ck_size, context_size;
1001 u16 alignment = 0;
1002 int err;
1003
1004 if (keylen < CTR_RFC3686_NONCE_SIZE)
1005 return -EINVAL;
1006 memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
1007 CTR_RFC3686_NONCE_SIZE);
1008
1009 keylen -= CTR_RFC3686_NONCE_SIZE;
1010 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
1011 if (err)
1012 goto badkey_err;
1013
1014 ck_size = chcr_keyctx_ck_size(keylen);
1015 alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
1016 memcpy(ablkctx->key, key, keylen);
1017 ablkctx->enckey_len = keylen;
1018 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
1019 keylen + alignment) >> 4;
1020
1021 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
1022 0, 0, context_size);
1023 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
1024
1025 return 0;
1026badkey_err:
b8fd1f41
HJ
1027 ablkctx->enckey_len = 0;
1028
1029 return err;
1030}
1031static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
1032{
1033 unsigned int size = AES_BLOCK_SIZE;
1034 __be32 *b = (__be32 *)(dstiv + size);
1035 u32 c, prev;
1036
1037 memcpy(dstiv, srciv, AES_BLOCK_SIZE);
1038 for (; size >= 4; size -= 4) {
1039 prev = be32_to_cpu(*--b);
1040 c = prev + add;
1041 *b = cpu_to_be32(c);
1042 if (prev < c)
1043 break;
1044 add = 1;
1045 }
1046
1047}
1048
1049static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
1050{
1051 __be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
1052 u64 c;
1053 u32 temp = be32_to_cpu(*--b);
1054
1055 temp = ~temp;
1056 c = (u64)temp + 1; // No of block can processed withou overflow
1057 if ((bytes / AES_BLOCK_SIZE) > c)
1058 bytes = c * AES_BLOCK_SIZE;
1059 return bytes;
1060}
1061
7cea6d3e 1062static int chcr_update_tweak(struct skcipher_request *req, u8 *iv,
209897d5 1063 u32 isfinal)
b8fd1f41 1064{
7cea6d3e 1065 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
2f47d580 1066 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
7cea6d3e 1067 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
571c47ab 1068 struct crypto_aes_ctx aes;
b8fd1f41
HJ
1069 int ret, i;
1070 u8 *key;
1071 unsigned int keylen;
de1a00ac
HJ
1072 int round = reqctx->last_req_len / AES_BLOCK_SIZE;
1073 int round8 = round / 8;
b8fd1f41 1074
de1a00ac 1075 memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
b8fd1f41 1076
b8fd1f41
HJ
1077 keylen = ablkctx->enckey_len / 2;
1078 key = ablkctx->key + keylen;
571c47ab 1079 ret = aes_expandkey(&aes, key, keylen);
b8fd1f41 1080 if (ret)
571c47ab
AB
1081 return ret;
1082 aes_encrypt(&aes, iv, iv);
de1a00ac
HJ
1083 for (i = 0; i < round8; i++)
1084 gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
1085
1086 for (i = 0; i < (round % 8); i++)
b8fd1f41
HJ
1087 gf128mul_x_ble((le128 *)iv, (le128 *)iv);
1088
209897d5 1089 if (!isfinal)
571c47ab
AB
1090 aes_decrypt(&aes, iv, iv);
1091
1092 memzero_explicit(&aes, sizeof(aes));
1093 return 0;
b8fd1f41
HJ
1094}
1095
7cea6d3e 1096static int chcr_update_cipher_iv(struct skcipher_request *req,
b8fd1f41
HJ
1097 struct cpl_fw6_pld *fw6_pld, u8 *iv)
1098{
7cea6d3e
AB
1099 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1100 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1101 int subtype = get_cryptoalg_subtype(tfm);
ab677ff4 1102 int ret = 0;
324429d7 1103
b8fd1f41 1104 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
7cea6d3e 1105 ctr_add_iv(iv, req->iv, (reqctx->processed /
b8fd1f41
HJ
1106 AES_BLOCK_SIZE));
1107 else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
1108 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1109 CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
1110 AES_BLOCK_SIZE) + 1);
1111 else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
209897d5 1112 ret = chcr_update_tweak(req, iv, 0);
b8fd1f41
HJ
1113 else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1114 if (reqctx->op)
5fb78dba 1115 /*Updated before sending last WR*/
7cea6d3e 1116 memcpy(iv, req->iv, AES_BLOCK_SIZE);
b8fd1f41
HJ
1117 else
1118 memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1119 }
1120
324429d7 1121 return ret;
b8fd1f41 1122
324429d7
HS
1123}
1124
b8fd1f41
HJ
1125/* We need separate function for final iv because in rfc3686 Initial counter
1126 * starts from 1 and buffer size of iv is 8 byte only which remains constant
1127 * for subsequent update requests
1128 */
1129
7cea6d3e 1130static int chcr_final_cipher_iv(struct skcipher_request *req,
b8fd1f41
HJ
1131 struct cpl_fw6_pld *fw6_pld, u8 *iv)
1132{
7cea6d3e
AB
1133 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1134 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1135 int subtype = get_cryptoalg_subtype(tfm);
b8fd1f41
HJ
1136 int ret = 0;
1137
1138 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
7cea6d3e 1139 ctr_add_iv(iv, req->iv, DIV_ROUND_UP(reqctx->processed,
0a4491d3 1140 AES_BLOCK_SIZE));
bed44d0c
AS
1141 else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) {
1142 if (!reqctx->partial_req)
1143 memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1144 else
1145 ret = chcr_update_tweak(req, iv, 1);
1146 }
b8fd1f41 1147 else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
5fb78dba
HJ
1148 /*Already updated for Decrypt*/
1149 if (!reqctx->op)
b8fd1f41
HJ
1150 memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1151
1152 }
1153 return ret;
1154
1155}
1156
7cea6d3e 1157static int chcr_handle_cipher_resp(struct skcipher_request *req,
b8fd1f41 1158 unsigned char *input, int err)
324429d7 1159{
7cea6d3e 1160 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1c502e2e 1161 struct chcr_context *ctx = c_ctx(tfm);
2f47d580
HJ
1162 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1163 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
324429d7 1164 struct sk_buff *skb;
b8fd1f41 1165 struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
7cea6d3e 1166 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
567be3a5 1167 struct cipher_wr_param wrparam;
fef4912b 1168 struct chcr_dev *dev = c_ctx(tfm)->dev;
b8fd1f41
HJ
1169 int bytes;
1170
b8fd1f41 1171 if (err)
2f47d580 1172 goto unmap;
7cea6d3e 1173 if (req->cryptlen == reqctx->processed) {
2f47d580
HJ
1174 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1175 req);
7cea6d3e 1176 err = chcr_final_cipher_iv(req, fw6_pld, req->iv);
b8fd1f41
HJ
1177 goto complete;
1178 }
1179
2f47d580 1180 if (!reqctx->imm) {
335bcc4a 1181 bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
5110e655 1182 CIP_SPACE_LEFT(ablkctx->enckey_len),
2f47d580 1183 reqctx->src_ofst, reqctx->dst_ofst);
7cea6d3e
AB
1184 if ((bytes + reqctx->processed) >= req->cryptlen)
1185 bytes = req->cryptlen - reqctx->processed;
db6deea4 1186 else
125d01ca 1187 bytes = rounddown(bytes, 16);
2f47d580
HJ
1188 } else {
1189 /*CTR mode counter overfloa*/
7cea6d3e 1190 bytes = req->cryptlen - reqctx->processed;
2f47d580 1191 }
b8fd1f41
HJ
1192 err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
1193 if (err)
2f47d580 1194 goto unmap;
b8fd1f41
HJ
1195
1196 if (unlikely(bytes == 0)) {
2f47d580
HJ
1197 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1198 req);
b8fd1f41
HJ
1199 err = chcr_cipher_fallback(ablkctx->sw_cipher,
1200 req->base.flags,
2f47d580
HJ
1201 req->src,
1202 req->dst,
7cea6d3e
AB
1203 req->cryptlen,
1204 req->iv,
b8fd1f41
HJ
1205 reqctx->op);
1206 goto complete;
1207 }
1208
7cea6d3e 1209 if (get_cryptoalg_subtype(tfm) ==
b8fd1f41
HJ
1210 CRYPTO_ALG_SUB_TYPE_CTR)
1211 bytes = adjust_ctr_overflow(reqctx->iv, bytes);
567be3a5 1212 wrparam.qid = u_ctx->lldi.rxq_ids[reqctx->rxqidx];
b8fd1f41
HJ
1213 wrparam.req = req;
1214 wrparam.bytes = bytes;
1215 skb = create_cipher_wr(&wrparam);
1216 if (IS_ERR(skb)) {
1217 pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
1218 err = PTR_ERR(skb);
2f47d580 1219 goto unmap;
b8fd1f41
HJ
1220 }
1221 skb->dev = u_ctx->lldi.ports[0];
567be3a5 1222 set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
b8fd1f41 1223 chcr_send_wr(skb);
2f47d580
HJ
1224 reqctx->last_req_len = bytes;
1225 reqctx->processed += bytes;
1c502e2e
AS
1226 if (get_cryptoalg_subtype(tfm) ==
1227 CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1228 CRYPTO_TFM_REQ_MAY_SLEEP ) {
1229 complete(&ctx->cbc_aes_aio_done);
1230 }
b8fd1f41 1231 return 0;
2f47d580
HJ
1232unmap:
1233 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
b8fd1f41 1234complete:
1c502e2e
AS
1235 if (get_cryptoalg_subtype(tfm) ==
1236 CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1237 CRYPTO_TFM_REQ_MAY_SLEEP ) {
1238 complete(&ctx->cbc_aes_aio_done);
1239 }
fef4912b 1240 chcr_dec_wrcount(dev);
b8fd1f41
HJ
1241 req->base.complete(&req->base, err);
1242 return err;
1243}
1244
7cea6d3e 1245static int process_cipher(struct skcipher_request *req,
b8fd1f41
HJ
1246 unsigned short qid,
1247 struct sk_buff **skb,
1248 unsigned short op_type)
1249{
7cea6d3e
AB
1250 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1251 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
1252 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2f47d580 1253 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
b8fd1f41 1254 struct cipher_wr_param wrparam;
2956f36c 1255 int bytes, err = -EINVAL;
b8fd1f41 1256
b8fd1f41 1257 reqctx->processed = 0;
bed44d0c 1258 reqctx->partial_req = 0;
7cea6d3e 1259 if (!req->iv)
b8fd1f41
HJ
1260 goto error;
1261 if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
7cea6d3e
AB
1262 (req->cryptlen == 0) ||
1263 (req->cryptlen % crypto_skcipher_blocksize(tfm))) {
b8fd1f41 1264 pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
7cea6d3e 1265 ablkctx->enckey_len, req->cryptlen, ivsize);
b8fd1f41
HJ
1266 goto error;
1267 }
fef4912b
HJ
1268
1269 err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1270 if (err)
1271 goto error;
7cea6d3e 1272 if (req->cryptlen < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
2f47d580
HJ
1273 AES_MIN_KEY_SIZE +
1274 sizeof(struct cpl_rx_phys_dsgl) +
1275 /*Min dsgl size*/
1276 32))) {
1277 /* Can be sent as Imm*/
1278 unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
1279
7cea6d3e 1280 dnents = sg_nents_xlen(req->dst, req->cryptlen,
2f47d580 1281 CHCR_DST_SG_SIZE, 0);
2f47d580 1282 phys_dsgl = get_space_for_phys_dsgl(dnents);
125d01ca 1283 kctx_len = roundup(ablkctx->enckey_len, 16);
2f47d580 1284 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
7cea6d3e 1285 reqctx->imm = (transhdr_len + IV + req->cryptlen) <=
2f47d580 1286 SGE_MAX_WR_LEN;
7cea6d3e 1287 bytes = IV + req->cryptlen;
2f47d580
HJ
1288
1289 } else {
1290 reqctx->imm = 0;
1291 }
1292
1293 if (!reqctx->imm) {
335bcc4a 1294 bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
5110e655 1295 CIP_SPACE_LEFT(ablkctx->enckey_len),
2f47d580 1296 0, 0);
7cea6d3e
AB
1297 if ((bytes + reqctx->processed) >= req->cryptlen)
1298 bytes = req->cryptlen - reqctx->processed;
db6deea4 1299 else
125d01ca 1300 bytes = rounddown(bytes, 16);
2f47d580 1301 } else {
7cea6d3e 1302 bytes = req->cryptlen;
2f47d580 1303 }
7cea6d3e 1304 if (get_cryptoalg_subtype(tfm) ==
db6deea4 1305 CRYPTO_ALG_SUB_TYPE_CTR) {
7cea6d3e 1306 bytes = adjust_ctr_overflow(req->iv, bytes);
b8fd1f41 1307 }
7cea6d3e 1308 if (get_cryptoalg_subtype(tfm) ==
b8fd1f41
HJ
1309 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
1310 memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
7cea6d3e 1311 memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
b8fd1f41
HJ
1312 CTR_RFC3686_IV_SIZE);
1313
1314 /* initialize counter portion of counter block */
1315 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1316 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1317
1318 } else {
1319
7cea6d3e 1320 memcpy(reqctx->iv, req->iv, IV);
b8fd1f41
HJ
1321 }
1322 if (unlikely(bytes == 0)) {
2f47d580
HJ
1323 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1324 req);
b8fd1f41
HJ
1325 err = chcr_cipher_fallback(ablkctx->sw_cipher,
1326 req->base.flags,
1327 req->src,
1328 req->dst,
7cea6d3e 1329 req->cryptlen,
7ffb9118 1330 reqctx->iv,
b8fd1f41
HJ
1331 op_type);
1332 goto error;
1333 }
b8fd1f41 1334 reqctx->op = op_type;
2f47d580
HJ
1335 reqctx->srcsg = req->src;
1336 reqctx->dstsg = req->dst;
1337 reqctx->src_ofst = 0;
1338 reqctx->dst_ofst = 0;
b8fd1f41
HJ
1339 wrparam.qid = qid;
1340 wrparam.req = req;
1341 wrparam.bytes = bytes;
1342 *skb = create_cipher_wr(&wrparam);
1343 if (IS_ERR(*skb)) {
1344 err = PTR_ERR(*skb);
2f47d580 1345 goto unmap;
b8fd1f41 1346 }
2f47d580
HJ
1347 reqctx->processed = bytes;
1348 reqctx->last_req_len = bytes;
bed44d0c 1349 reqctx->partial_req = !!(req->cryptlen - reqctx->processed);
b8fd1f41
HJ
1350
1351 return 0;
2f47d580
HJ
1352unmap:
1353 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
b8fd1f41
HJ
1354error:
1355 return err;
1356}
1357
7cea6d3e 1358static int chcr_aes_encrypt(struct skcipher_request *req)
b8fd1f41 1359{
7cea6d3e 1360 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
bed44d0c 1361 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
fef4912b 1362 struct chcr_dev *dev = c_ctx(tfm)->dev;
b8fd1f41 1363 struct sk_buff *skb = NULL;
567be3a5 1364 int err;
2f47d580 1365 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
567be3a5
AS
1366 struct chcr_context *ctx = c_ctx(tfm);
1367 unsigned int cpu;
1368
1369 cpu = get_cpu();
1370 reqctx->txqidx = cpu % ctx->ntxq;
1371 reqctx->rxqidx = cpu % ctx->nrxq;
1372 put_cpu();
324429d7 1373
fef4912b
HJ
1374 err = chcr_inc_wrcount(dev);
1375 if (err)
1376 return -ENXIO;
324429d7 1377 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
567be3a5
AS
1378 reqctx->txqidx) &&
1379 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
fef4912b
HJ
1380 err = -ENOSPC;
1381 goto error;
324429d7
HS
1382 }
1383
567be3a5 1384 err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
2f47d580 1385 &skb, CHCR_ENCRYPT_OP);
b8fd1f41
HJ
1386 if (err || !skb)
1387 return err;
324429d7 1388 skb->dev = u_ctx->lldi.ports[0];
567be3a5 1389 set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
324429d7 1390 chcr_send_wr(skb);
1c502e2e
AS
1391 if (get_cryptoalg_subtype(tfm) ==
1392 CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1393 CRYPTO_TFM_REQ_MAY_SLEEP ) {
bed44d0c 1394 reqctx->partial_req = 1;
1c502e2e
AS
1395 wait_for_completion(&ctx->cbc_aes_aio_done);
1396 }
567be3a5 1397 return -EINPROGRESS;
fef4912b
HJ
1398error:
1399 chcr_dec_wrcount(dev);
1400 return err;
324429d7
HS
1401}
1402
7cea6d3e 1403static int chcr_aes_decrypt(struct skcipher_request *req)
324429d7 1404{
7cea6d3e 1405 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
567be3a5 1406 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2f47d580 1407 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
fef4912b 1408 struct chcr_dev *dev = c_ctx(tfm)->dev;
b8fd1f41 1409 struct sk_buff *skb = NULL;
567be3a5
AS
1410 int err;
1411 struct chcr_context *ctx = c_ctx(tfm);
1412 unsigned int cpu;
1413
1414 cpu = get_cpu();
1415 reqctx->txqidx = cpu % ctx->ntxq;
1416 reqctx->rxqidx = cpu % ctx->nrxq;
1417 put_cpu();
324429d7 1418
fef4912b
HJ
1419 err = chcr_inc_wrcount(dev);
1420 if (err)
1421 return -ENXIO;
1422
324429d7 1423 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
567be3a5
AS
1424 reqctx->txqidx) &&
1425 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))))
6faa0f57 1426 return -ENOSPC;
567be3a5 1427 err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
fc6176a2 1428 &skb, CHCR_DECRYPT_OP);
b8fd1f41
HJ
1429 if (err || !skb)
1430 return err;
324429d7 1431 skb->dev = u_ctx->lldi.ports[0];
567be3a5 1432 set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
324429d7 1433 chcr_send_wr(skb);
567be3a5 1434 return -EINPROGRESS;
324429d7 1435}
324429d7
HS
1436static int chcr_device_init(struct chcr_context *ctx)
1437{
14c19b17 1438 struct uld_ctx *u_ctx = NULL;
567be3a5
AS
1439 int txq_perchan, ntxq;
1440 int err = 0, rxq_perchan;
324429d7 1441
324429d7 1442 if (!ctx->dev) {
14c19b17
HJ
1443 u_ctx = assign_chcr_device();
1444 if (!u_ctx) {
324429d7
HS
1445 pr_err("chcr device assignment fails\n");
1446 goto out;
1447 }
fef4912b 1448 ctx->dev = &u_ctx->dev;
a1c6fd43 1449 ntxq = u_ctx->lldi.ntxq;
324429d7 1450 rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
72a56ca9 1451 txq_perchan = ntxq / u_ctx->lldi.nchan;
567be3a5
AS
1452 ctx->ntxq = ntxq;
1453 ctx->nrxq = u_ctx->lldi.nrxq;
1454 ctx->rxq_perchan = rxq_perchan;
1455 ctx->txq_perchan = txq_perchan;
324429d7
HS
1456 }
1457out:
1458 return err;
1459}
1460
7cea6d3e 1461static int chcr_init_tfm(struct crypto_skcipher *tfm)
324429d7 1462{
7cea6d3e
AB
1463 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1464 struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
b8fd1f41
HJ
1465 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1466
7cea6d3e 1467 ablkctx->sw_cipher = crypto_alloc_sync_skcipher(alg->base.cra_name, 0,
28874f26 1468 CRYPTO_ALG_NEED_FALLBACK);
b8fd1f41 1469 if (IS_ERR(ablkctx->sw_cipher)) {
7cea6d3e 1470 pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
b8fd1f41
HJ
1471 return PTR_ERR(ablkctx->sw_cipher);
1472 }
1c502e2e 1473 init_completion(&ctx->cbc_aes_aio_done);
7cea6d3e
AB
1474 crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx));
1475
1476 return chcr_device_init(ctx);
324429d7
HS
1477}
1478
7cea6d3e 1479static int chcr_rfc3686_init(struct crypto_skcipher *tfm)
b8fd1f41 1480{
7cea6d3e
AB
1481 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1482 struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
b8fd1f41
HJ
1483 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1484
1485 /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1486 * cannot be used as fallback in chcr_handle_cipher_response
1487 */
28874f26
KC
1488 ablkctx->sw_cipher = crypto_alloc_sync_skcipher("ctr(aes)", 0,
1489 CRYPTO_ALG_NEED_FALLBACK);
b8fd1f41 1490 if (IS_ERR(ablkctx->sw_cipher)) {
7cea6d3e 1491 pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
b8fd1f41
HJ
1492 return PTR_ERR(ablkctx->sw_cipher);
1493 }
7cea6d3e
AB
1494 crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx));
1495 return chcr_device_init(ctx);
b8fd1f41
HJ
1496}
1497
1498
7cea6d3e 1499static void chcr_exit_tfm(struct crypto_skcipher *tfm)
b8fd1f41 1500{
7cea6d3e 1501 struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
b8fd1f41
HJ
1502 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1503
28874f26 1504 crypto_free_sync_skcipher(ablkctx->sw_cipher);
b8fd1f41
HJ
1505}
1506
324429d7
HS
1507static int get_alg_config(struct algo_param *params,
1508 unsigned int auth_size)
1509{
1510 switch (auth_size) {
1511 case SHA1_DIGEST_SIZE:
1512 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1513 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1514 params->result_size = SHA1_DIGEST_SIZE;
1515 break;
1516 case SHA224_DIGEST_SIZE:
1517 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1518 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
1519 params->result_size = SHA256_DIGEST_SIZE;
1520 break;
1521 case SHA256_DIGEST_SIZE:
1522 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1523 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1524 params->result_size = SHA256_DIGEST_SIZE;
1525 break;
1526 case SHA384_DIGEST_SIZE:
1527 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1528 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1529 params->result_size = SHA512_DIGEST_SIZE;
1530 break;
1531 case SHA512_DIGEST_SIZE:
1532 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1533 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1534 params->result_size = SHA512_DIGEST_SIZE;
1535 break;
1536 default:
1537 pr_err("chcr : ERROR, unsupported digest size\n");
1538 return -EINVAL;
1539 }
1540 return 0;
1541}
1542
e7922729 1543static inline void chcr_free_shash(struct crypto_shash *base_hash)
324429d7 1544{
e7922729 1545 crypto_free_shash(base_hash);
324429d7
HS
1546}
1547
1548/**
358961d1 1549 * create_hash_wr - Create hash work request
324429d7
HS
1550 * @req - Cipher req base
1551 */
358961d1 1552static struct sk_buff *create_hash_wr(struct ahash_request *req,
2debd332 1553 struct hash_wr_param *param)
324429d7
HS
1554{
1555 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1556 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
567be3a5
AS
1557 struct chcr_context *ctx = h_ctx(tfm);
1558 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
324429d7 1559 struct sk_buff *skb = NULL;
567be3a5 1560 struct uld_ctx *u_ctx = ULD_CTX(ctx);
358961d1 1561 struct chcr_wr *chcr_req;
2f47d580 1562 struct ulptx_sgl *ulptx;
5110e655
HJ
1563 unsigned int nents = 0, transhdr_len;
1564 unsigned int temp = 0;
358961d1
HJ
1565 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1566 GFP_ATOMIC;
2f47d580
HJ
1567 struct adapter *adap = padap(h_ctx(tfm)->dev);
1568 int error = 0;
567be3a5 1569 unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan;
324429d7 1570
5110e655
HJ
1571 transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
1572 req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
1573 param->sg_len) <= SGE_MAX_WR_LEN;
1574 nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
1575 CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
2f47d580 1576 nents += param->bfr_len ? 1 : 0;
5110e655
HJ
1577 transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
1578 param->sg_len, 16) : (sgl_len(nents) * 8);
125d01ca 1579 transhdr_len = roundup(transhdr_len, 16);
2f47d580 1580
5110e655 1581 skb = alloc_skb(transhdr_len, flags);
324429d7 1582 if (!skb)
2f47d580 1583 return ERR_PTR(-ENOMEM);
de77b966 1584 chcr_req = __skb_put_zero(skb, transhdr_len);
324429d7 1585
358961d1 1586 chcr_req->sec_cpl.op_ivinsrtofst =
567be3a5
AS
1587 FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 0);
1588
358961d1 1589 chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
324429d7 1590
358961d1 1591 chcr_req->sec_cpl.aadstart_cipherstop_hi =
324429d7 1592 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
358961d1 1593 chcr_req->sec_cpl.cipherstop_lo_authinsert =
324429d7 1594 FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
358961d1 1595 chcr_req->sec_cpl.seqno_numivs =
324429d7 1596 FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
358961d1 1597 param->opad_needed, 0);
324429d7 1598
358961d1 1599 chcr_req->sec_cpl.ivgen_hdrlen =
324429d7
HS
1600 FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
1601
358961d1
HJ
1602 memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
1603 param->alg_prm.result_size);
324429d7
HS
1604
1605 if (param->opad_needed)
358961d1
HJ
1606 memcpy(chcr_req->key_ctx.key +
1607 ((param->alg_prm.result_size <= 32) ? 32 :
1608 CHCR_HASH_MAX_DIGEST_SIZE),
324429d7
HS
1609 hmacctx->opad, param->alg_prm.result_size);
1610
358961d1 1611 chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
324429d7
HS
1612 param->alg_prm.mk_size, 0,
1613 param->opad_needed,
5110e655 1614 ((param->kctx_len +
358961d1
HJ
1615 sizeof(chcr_req->key_ctx)) >> 4));
1616 chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
5110e655 1617 ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
2f47d580
HJ
1618 DUMMY_BYTES);
1619 if (param->bfr_len != 0) {
5110e655
HJ
1620 req_ctx->hctx_wr.dma_addr =
1621 dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
1622 param->bfr_len, DMA_TO_DEVICE);
2f47d580 1623 if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
5110e655 1624 req_ctx->hctx_wr. dma_addr)) {
2f47d580
HJ
1625 error = -ENOMEM;
1626 goto err;
1627 }
5110e655 1628 req_ctx->hctx_wr.dma_len = param->bfr_len;
2f47d580 1629 } else {
5110e655 1630 req_ctx->hctx_wr.dma_addr = 0;
2f47d580
HJ
1631 }
1632 chcr_add_hash_src_ent(req, ulptx, param);
1633 /* Request upto max wr size */
5110e655
HJ
1634 temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
1635 (param->sg_len + param->bfr_len) : 0);
ee0863ba 1636 atomic_inc(&adap->chcr_stats.digest_rqst);
5110e655
HJ
1637 create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
1638 param->hash_size, transhdr_len,
2f47d580 1639 temp, 0);
5110e655 1640 req_ctx->hctx_wr.skb = skb;
324429d7 1641 return skb;
2f47d580
HJ
1642err:
1643 kfree_skb(skb);
1644 return ERR_PTR(error);
324429d7
HS
1645}
1646
1647static int chcr_ahash_update(struct ahash_request *req)
1648{
1649 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1650 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
567be3a5
AS
1651 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1652 struct chcr_context *ctx = h_ctx(rtfm);
fef4912b 1653 struct chcr_dev *dev = h_ctx(rtfm)->dev;
324429d7
HS
1654 struct sk_buff *skb;
1655 u8 remainder = 0, bs;
1656 unsigned int nbytes = req->nbytes;
1657 struct hash_wr_param params;
567be3a5
AS
1658 int error;
1659 unsigned int cpu;
1660
1661 cpu = get_cpu();
1662 req_ctx->txqidx = cpu % ctx->ntxq;
1663 req_ctx->rxqidx = cpu % ctx->nrxq;
1664 put_cpu();
324429d7
HS
1665
1666 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
324429d7 1667
44fce12a
HJ
1668 if (nbytes + req_ctx->reqlen >= bs) {
1669 remainder = (nbytes + req_ctx->reqlen) % bs;
1670 nbytes = nbytes + req_ctx->reqlen - remainder;
324429d7 1671 } else {
44fce12a
HJ
1672 sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
1673 + req_ctx->reqlen, nbytes, 0);
1674 req_ctx->reqlen += nbytes;
324429d7
HS
1675 return 0;
1676 }
fef4912b
HJ
1677 error = chcr_inc_wrcount(dev);
1678 if (error)
1679 return -ENXIO;
1680 /* Detach state for CHCR means lldi or padap is freed. Increasing
1681 * inflight count for dev guarantees that lldi and padap is valid
1682 */
1683 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
567be3a5
AS
1684 req_ctx->txqidx) &&
1685 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
fef4912b
HJ
1686 error = -ENOSPC;
1687 goto err;
fef4912b
HJ
1688 }
1689
5110e655 1690 chcr_init_hctx_per_wr(req_ctx);
2f47d580 1691 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
fef4912b
HJ
1692 if (error) {
1693 error = -ENOMEM;
1694 goto err;
1695 }
5110e655
HJ
1696 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1697 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1698 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1699 HASH_SPACE_LEFT(params.kctx_len), 0);
1700 if (params.sg_len > req->nbytes)
1701 params.sg_len = req->nbytes;
1702 params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
1703 req_ctx->reqlen;
324429d7
HS
1704 params.opad_needed = 0;
1705 params.more = 1;
1706 params.last = 0;
44fce12a 1707 params.bfr_len = req_ctx->reqlen;
324429d7 1708 params.scmd1 = 0;
5110e655
HJ
1709 req_ctx->hctx_wr.srcsg = req->src;
1710
1711 params.hash_size = params.alg_prm.result_size;
324429d7 1712 req_ctx->data_len += params.sg_len + params.bfr_len;
358961d1 1713 skb = create_hash_wr(req, &params);
2f47d580
HJ
1714 if (IS_ERR(skb)) {
1715 error = PTR_ERR(skb);
1716 goto unmap;
1717 }
324429d7 1718
5110e655 1719 req_ctx->hctx_wr.processed += params.sg_len;
44fce12a 1720 if (remainder) {
44fce12a 1721 /* Swap buffers */
abfa2b37 1722 swap(req_ctx->reqbfr, req_ctx->skbfr);
324429d7 1723 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
44fce12a 1724 req_ctx->reqbfr, remainder, req->nbytes -
324429d7 1725 remainder);
44fce12a
HJ
1726 }
1727 req_ctx->reqlen = remainder;
324429d7 1728 skb->dev = u_ctx->lldi.ports[0];
567be3a5 1729 set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
324429d7 1730 chcr_send_wr(skb);
567be3a5 1731 return -EINPROGRESS;
2f47d580
HJ
1732unmap:
1733 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
fef4912b
HJ
1734err:
1735 chcr_dec_wrcount(dev);
2f47d580 1736 return error;
324429d7
HS
1737}
1738
1739static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
1740{
1741 memset(bfr_ptr, 0, bs);
1742 *bfr_ptr = 0x80;
1743 if (bs == 64)
1744 *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1 << 3);
1745 else
1746 *(__be64 *)(bfr_ptr + 120) = cpu_to_be64(scmd1 << 3);
1747}
1748
1749static int chcr_ahash_final(struct ahash_request *req)
1750{
1751 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1752 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
fef4912b 1753 struct chcr_dev *dev = h_ctx(rtfm)->dev;
324429d7
HS
1754 struct hash_wr_param params;
1755 struct sk_buff *skb;
567be3a5
AS
1756 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1757 struct chcr_context *ctx = h_ctx(rtfm);
324429d7 1758 u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
8a656a48 1759 int error;
567be3a5
AS
1760 unsigned int cpu;
1761
1762 cpu = get_cpu();
1763 req_ctx->txqidx = cpu % ctx->ntxq;
1764 req_ctx->rxqidx = cpu % ctx->nrxq;
1765 put_cpu();
fef4912b
HJ
1766
1767 error = chcr_inc_wrcount(dev);
1768 if (error)
1769 return -ENXIO;
324429d7 1770
5110e655 1771 chcr_init_hctx_per_wr(req_ctx);
324429d7
HS
1772 if (is_hmac(crypto_ahash_tfm(rtfm)))
1773 params.opad_needed = 1;
1774 else
1775 params.opad_needed = 0;
1776 params.sg_len = 0;
5110e655 1777 req_ctx->hctx_wr.isfinal = 1;
324429d7 1778 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
5110e655
HJ
1779 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1780 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1781 params.opad_needed = 1;
1782 params.kctx_len *= 2;
1783 } else {
1784 params.opad_needed = 0;
1785 }
1786
1787 req_ctx->hctx_wr.result = 1;
44fce12a 1788 params.bfr_len = req_ctx->reqlen;
324429d7 1789 req_ctx->data_len += params.bfr_len + params.sg_len;
5110e655 1790 req_ctx->hctx_wr.srcsg = req->src;
44fce12a
HJ
1791 if (req_ctx->reqlen == 0) {
1792 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
324429d7
HS
1793 params.last = 0;
1794 params.more = 1;
1795 params.scmd1 = 0;
1796 params.bfr_len = bs;
1797
1798 } else {
1799 params.scmd1 = req_ctx->data_len;
1800 params.last = 1;
1801 params.more = 0;
1802 }
5110e655 1803 params.hash_size = crypto_ahash_digestsize(rtfm);
358961d1 1804 skb = create_hash_wr(req, &params);
fef4912b
HJ
1805 if (IS_ERR(skb)) {
1806 error = PTR_ERR(skb);
1807 goto err;
1808 }
5110e655 1809 req_ctx->reqlen = 0;
324429d7 1810 skb->dev = u_ctx->lldi.ports[0];
567be3a5 1811 set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
324429d7
HS
1812 chcr_send_wr(skb);
1813 return -EINPROGRESS;
fef4912b
HJ
1814err:
1815 chcr_dec_wrcount(dev);
1816 return error;
324429d7
HS
1817}
1818
1819static int chcr_ahash_finup(struct ahash_request *req)
1820{
1821 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1822 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
fef4912b 1823 struct chcr_dev *dev = h_ctx(rtfm)->dev;
567be3a5
AS
1824 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1825 struct chcr_context *ctx = h_ctx(rtfm);
324429d7
HS
1826 struct sk_buff *skb;
1827 struct hash_wr_param params;
1828 u8 bs;
567be3a5
AS
1829 int error;
1830 unsigned int cpu;
1831
1832 cpu = get_cpu();
1833 req_ctx->txqidx = cpu % ctx->ntxq;
1834 req_ctx->rxqidx = cpu % ctx->nrxq;
1835 put_cpu();
324429d7
HS
1836
1837 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
fef4912b
HJ
1838 error = chcr_inc_wrcount(dev);
1839 if (error)
1840 return -ENXIO;
324429d7
HS
1841
1842 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
567be3a5
AS
1843 req_ctx->txqidx) &&
1844 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
fef4912b
HJ
1845 error = -ENOSPC;
1846 goto err;
324429d7 1847 }
5110e655
HJ
1848 chcr_init_hctx_per_wr(req_ctx);
1849 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
fef4912b
HJ
1850 if (error) {
1851 error = -ENOMEM;
1852 goto err;
1853 }
324429d7 1854
5110e655
HJ
1855 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1856 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1857 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1858 params.kctx_len *= 2;
324429d7 1859 params.opad_needed = 1;
5110e655 1860 } else {
324429d7 1861 params.opad_needed = 0;
5110e655 1862 }
324429d7 1863
5110e655
HJ
1864 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1865 HASH_SPACE_LEFT(params.kctx_len), 0);
1866 if (params.sg_len < req->nbytes) {
1867 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1868 params.kctx_len /= 2;
1869 params.opad_needed = 0;
1870 }
1871 params.last = 0;
1872 params.more = 1;
1873 params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
1874 - req_ctx->reqlen;
1875 params.hash_size = params.alg_prm.result_size;
1876 params.scmd1 = 0;
1877 } else {
1878 params.last = 1;
1879 params.more = 0;
1880 params.sg_len = req->nbytes;
1881 params.hash_size = crypto_ahash_digestsize(rtfm);
1882 params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
1883 params.sg_len;
1884 }
44fce12a 1885 params.bfr_len = req_ctx->reqlen;
324429d7 1886 req_ctx->data_len += params.bfr_len + params.sg_len;
5110e655
HJ
1887 req_ctx->hctx_wr.result = 1;
1888 req_ctx->hctx_wr.srcsg = req->src;
44fce12a
HJ
1889 if ((req_ctx->reqlen + req->nbytes) == 0) {
1890 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
324429d7
HS
1891 params.last = 0;
1892 params.more = 1;
1893 params.scmd1 = 0;
1894 params.bfr_len = bs;
324429d7 1895 }
358961d1 1896 skb = create_hash_wr(req, &params);
2f47d580
HJ
1897 if (IS_ERR(skb)) {
1898 error = PTR_ERR(skb);
1899 goto unmap;
1900 }
5110e655
HJ
1901 req_ctx->reqlen = 0;
1902 req_ctx->hctx_wr.processed += params.sg_len;
324429d7 1903 skb->dev = u_ctx->lldi.ports[0];
567be3a5 1904 set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
324429d7 1905 chcr_send_wr(skb);
567be3a5 1906 return -EINPROGRESS;
2f47d580
HJ
1907unmap:
1908 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
fef4912b
HJ
1909err:
1910 chcr_dec_wrcount(dev);
2f47d580 1911 return error;
324429d7
HS
1912}
1913
1914static int chcr_ahash_digest(struct ahash_request *req)
1915{
1916 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1917 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
fef4912b 1918 struct chcr_dev *dev = h_ctx(rtfm)->dev;
567be3a5
AS
1919 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1920 struct chcr_context *ctx = h_ctx(rtfm);
324429d7
HS
1921 struct sk_buff *skb;
1922 struct hash_wr_param params;
1923 u8 bs;
567be3a5
AS
1924 int error;
1925 unsigned int cpu;
1926
1927 cpu = get_cpu();
1928 req_ctx->txqidx = cpu % ctx->ntxq;
1929 req_ctx->rxqidx = cpu % ctx->nrxq;
1930 put_cpu();
324429d7
HS
1931
1932 rtfm->init(req);
1933 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
fef4912b
HJ
1934 error = chcr_inc_wrcount(dev);
1935 if (error)
1936 return -ENXIO;
324429d7 1937
324429d7 1938 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
567be3a5
AS
1939 req_ctx->txqidx) &&
1940 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
fef4912b
HJ
1941 error = -ENOSPC;
1942 goto err;
324429d7
HS
1943 }
1944
5110e655 1945 chcr_init_hctx_per_wr(req_ctx);
2f47d580 1946 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
fef4912b
HJ
1947 if (error) {
1948 error = -ENOMEM;
1949 goto err;
1950 }
324429d7 1951
324429d7 1952 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
5110e655
HJ
1953 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1954 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1955 params.kctx_len *= 2;
1956 params.opad_needed = 1;
1957 } else {
1958 params.opad_needed = 0;
1959 }
1960 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1961 HASH_SPACE_LEFT(params.kctx_len), 0);
1962 if (params.sg_len < req->nbytes) {
1963 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1964 params.kctx_len /= 2;
1965 params.opad_needed = 0;
1966 }
1967 params.last = 0;
1968 params.more = 1;
1969 params.scmd1 = 0;
1970 params.sg_len = rounddown(params.sg_len, bs);
1971 params.hash_size = params.alg_prm.result_size;
1972 } else {
1973 params.sg_len = req->nbytes;
1974 params.hash_size = crypto_ahash_digestsize(rtfm);
1975 params.last = 1;
1976 params.more = 0;
1977 params.scmd1 = req->nbytes + req_ctx->data_len;
1978
1979 }
1980 params.bfr_len = 0;
1981 req_ctx->hctx_wr.result = 1;
1982 req_ctx->hctx_wr.srcsg = req->src;
324429d7
HS
1983 req_ctx->data_len += params.bfr_len + params.sg_len;
1984
44fce12a
HJ
1985 if (req->nbytes == 0) {
1986 create_last_hash_block(req_ctx->reqbfr, bs, 0);
324429d7
HS
1987 params.more = 1;
1988 params.bfr_len = bs;
1989 }
1990
358961d1 1991 skb = create_hash_wr(req, &params);
2f47d580
HJ
1992 if (IS_ERR(skb)) {
1993 error = PTR_ERR(skb);
1994 goto unmap;
1995 }
5110e655 1996 req_ctx->hctx_wr.processed += params.sg_len;
324429d7 1997 skb->dev = u_ctx->lldi.ports[0];
567be3a5 1998 set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
324429d7 1999 chcr_send_wr(skb);
567be3a5 2000 return -EINPROGRESS;
2f47d580
HJ
2001unmap:
2002 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
fef4912b
HJ
2003err:
2004 chcr_dec_wrcount(dev);
2f47d580 2005 return error;
324429d7
HS
2006}
2007
6f76672b
HJ
2008static int chcr_ahash_continue(struct ahash_request *req)
2009{
2010 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2011 struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
2012 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
567be3a5
AS
2013 struct chcr_context *ctx = h_ctx(rtfm);
2014 struct uld_ctx *u_ctx = ULD_CTX(ctx);
6f76672b
HJ
2015 struct sk_buff *skb;
2016 struct hash_wr_param params;
2017 u8 bs;
2018 int error;
567be3a5
AS
2019 unsigned int cpu;
2020
2021 cpu = get_cpu();
2022 reqctx->txqidx = cpu % ctx->ntxq;
2023 reqctx->rxqidx = cpu % ctx->nrxq;
2024 put_cpu();
6f76672b
HJ
2025
2026 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
6f76672b
HJ
2027 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
2028 params.kctx_len = roundup(params.alg_prm.result_size, 16);
2029 if (is_hmac(crypto_ahash_tfm(rtfm))) {
2030 params.kctx_len *= 2;
2031 params.opad_needed = 1;
2032 } else {
2033 params.opad_needed = 0;
2034 }
2035 params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
2036 HASH_SPACE_LEFT(params.kctx_len),
2037 hctx_wr->src_ofst);
2038 if ((params.sg_len + hctx_wr->processed) > req->nbytes)
2039 params.sg_len = req->nbytes - hctx_wr->processed;
2040 if (!hctx_wr->result ||
2041 ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
2042 if (is_hmac(crypto_ahash_tfm(rtfm))) {
2043 params.kctx_len /= 2;
2044 params.opad_needed = 0;
2045 }
2046 params.last = 0;
2047 params.more = 1;
2048 params.sg_len = rounddown(params.sg_len, bs);
2049 params.hash_size = params.alg_prm.result_size;
2050 params.scmd1 = 0;
2051 } else {
2052 params.last = 1;
2053 params.more = 0;
2054 params.hash_size = crypto_ahash_digestsize(rtfm);
2055 params.scmd1 = reqctx->data_len + params.sg_len;
2056 }
2057 params.bfr_len = 0;
2058 reqctx->data_len += params.sg_len;
2059 skb = create_hash_wr(req, &params);
2060 if (IS_ERR(skb)) {
2061 error = PTR_ERR(skb);
2062 goto err;
2063 }
2064 hctx_wr->processed += params.sg_len;
2065 skb->dev = u_ctx->lldi.ports[0];
567be3a5 2066 set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
6f76672b
HJ
2067 chcr_send_wr(skb);
2068 return 0;
2069err:
2070 return error;
2071}
2072
2073static inline void chcr_handle_ahash_resp(struct ahash_request *req,
2074 unsigned char *input,
2075 int err)
2076{
2077 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2078 struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
2079 int digestsize, updated_digestsize;
2080 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2081 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
fef4912b 2082 struct chcr_dev *dev = h_ctx(tfm)->dev;
6f76672b
HJ
2083
2084 if (input == NULL)
2085 goto out;
2086 digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
2087 updated_digestsize = digestsize;
2088 if (digestsize == SHA224_DIGEST_SIZE)
2089 updated_digestsize = SHA256_DIGEST_SIZE;
2090 else if (digestsize == SHA384_DIGEST_SIZE)
2091 updated_digestsize = SHA512_DIGEST_SIZE;
2092
2093 if (hctx_wr->dma_addr) {
2094 dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
2095 hctx_wr->dma_len, DMA_TO_DEVICE);
2096 hctx_wr->dma_addr = 0;
2097 }
2098 if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
2099 req->nbytes)) {
2100 if (hctx_wr->result == 1) {
2101 hctx_wr->result = 0;
2102 memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
2103 digestsize);
2104 } else {
2105 memcpy(reqctx->partial_hash,
2106 input + sizeof(struct cpl_fw6_pld),
2107 updated_digestsize);
2108
2109 }
2110 goto unmap;
2111 }
2112 memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
2113 updated_digestsize);
2114
2115 err = chcr_ahash_continue(req);
2116 if (err)
2117 goto unmap;
2118 return;
2119unmap:
2120 if (hctx_wr->is_sg_map)
2121 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2122
2123
2124out:
fef4912b 2125 chcr_dec_wrcount(dev);
6f76672b
HJ
2126 req->base.complete(&req->base, err);
2127}
2128
2129/*
2130 * chcr_handle_resp - Unmap the DMA buffers associated with the request
2131 * @req: crypto request
2132 */
2133int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
2134 int err)
2135{
2136 struct crypto_tfm *tfm = req->tfm;
2137 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2138 struct adapter *adap = padap(ctx->dev);
2139
2140 switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
2141 case CRYPTO_ALG_TYPE_AEAD:
f31ba0f9 2142 err = chcr_handle_aead_resp(aead_request_cast(req), input, err);
6f76672b
HJ
2143 break;
2144
7cea6d3e
AB
2145 case CRYPTO_ALG_TYPE_SKCIPHER:
2146 chcr_handle_cipher_resp(skcipher_request_cast(req),
6f76672b
HJ
2147 input, err);
2148 break;
6f76672b
HJ
2149 case CRYPTO_ALG_TYPE_AHASH:
2150 chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
2151 }
2152 atomic_inc(&adap->chcr_stats.complete);
2153 return err;
2154}
324429d7
HS
2155static int chcr_ahash_export(struct ahash_request *areq, void *out)
2156{
2157 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2158 struct chcr_ahash_req_ctx *state = out;
2159
44fce12a 2160 state->reqlen = req_ctx->reqlen;
324429d7 2161 state->data_len = req_ctx->data_len;
44fce12a 2162 memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
324429d7
HS
2163 memcpy(state->partial_hash, req_ctx->partial_hash,
2164 CHCR_HASH_MAX_DIGEST_SIZE);
5110e655 2165 chcr_init_hctx_per_wr(state);
fc6176a2 2166 return 0;
324429d7
HS
2167}
2168
2169static int chcr_ahash_import(struct ahash_request *areq, const void *in)
2170{
2171 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2172 struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
2173
44fce12a 2174 req_ctx->reqlen = state->reqlen;
324429d7 2175 req_ctx->data_len = state->data_len;
44fce12a
HJ
2176 req_ctx->reqbfr = req_ctx->bfr1;
2177 req_ctx->skbfr = req_ctx->bfr2;
2178 memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
324429d7
HS
2179 memcpy(req_ctx->partial_hash, state->partial_hash,
2180 CHCR_HASH_MAX_DIGEST_SIZE);
5110e655 2181 chcr_init_hctx_per_wr(req_ctx);
324429d7
HS
2182 return 0;
2183}
2184
2185static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2186 unsigned int keylen)
2187{
2f47d580 2188 struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
324429d7
HS
2189 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2190 unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2191 unsigned int i, err = 0, updated_digestsize;
2192
e7922729
HJ
2193 SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
2194
2195 /* use the key to calculate the ipad and opad. ipad will sent with the
324429d7
HS
2196 * first request's data. opad will be sent with the final hash result
2197 * ipad in hmacctx->ipad and opad in hmacctx->opad location
2198 */
e7922729 2199 shash->tfm = hmacctx->base_hash;
324429d7 2200 if (keylen > bs) {
e7922729 2201 err = crypto_shash_digest(shash, key, keylen,
324429d7
HS
2202 hmacctx->ipad);
2203 if (err)
2204 goto out;
2205 keylen = digestsize;
2206 } else {
2207 memcpy(hmacctx->ipad, key, keylen);
2208 }
2209 memset(hmacctx->ipad + keylen, 0, bs - keylen);
2210 memcpy(hmacctx->opad, hmacctx->ipad, bs);
2211
2212 for (i = 0; i < bs / sizeof(int); i++) {
2213 *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
2214 *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
2215 }
2216
2217 updated_digestsize = digestsize;
2218 if (digestsize == SHA224_DIGEST_SIZE)
2219 updated_digestsize = SHA256_DIGEST_SIZE;
2220 else if (digestsize == SHA384_DIGEST_SIZE)
2221 updated_digestsize = SHA512_DIGEST_SIZE;
e7922729 2222 err = chcr_compute_partial_hash(shash, hmacctx->ipad,
324429d7
HS
2223 hmacctx->ipad, digestsize);
2224 if (err)
2225 goto out;
2226 chcr_change_order(hmacctx->ipad, updated_digestsize);
2227
e7922729 2228 err = chcr_compute_partial_hash(shash, hmacctx->opad,
324429d7
HS
2229 hmacctx->opad, digestsize);
2230 if (err)
2231 goto out;
2232 chcr_change_order(hmacctx->opad, updated_digestsize);
2233out:
2234 return err;
2235}
2236
7cea6d3e 2237static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
324429d7
HS
2238 unsigned int key_len)
2239{
2f47d580 2240 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
324429d7 2241 unsigned short context_size = 0;
b8fd1f41 2242 int err;
324429d7 2243
b8fd1f41
HJ
2244 err = chcr_cipher_fallback_setkey(cipher, key, key_len);
2245 if (err)
2246 goto badkey_err;
cc1b156d
HJ
2247
2248 memcpy(ablkctx->key, key, key_len);
2249 ablkctx->enckey_len = key_len;
2250 get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
2251 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
2252 ablkctx->key_ctx_hdr =
2253 FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
2254 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
2255 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
2256 CHCR_KEYCTX_NO_KEY, 1,
2257 0, context_size);
2258 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
2259 return 0;
b8fd1f41 2260badkey_err:
b8fd1f41
HJ
2261 ablkctx->enckey_len = 0;
2262
2263 return err;
324429d7
HS
2264}
2265
2266static int chcr_sha_init(struct ahash_request *areq)
2267{
2268 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2269 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2270 int digestsize = crypto_ahash_digestsize(tfm);
2271
2272 req_ctx->data_len = 0;
44fce12a
HJ
2273 req_ctx->reqlen = 0;
2274 req_ctx->reqbfr = req_ctx->bfr1;
2275 req_ctx->skbfr = req_ctx->bfr2;
324429d7 2276 copy_hash_init_values(req_ctx->partial_hash, digestsize);
5110e655 2277
324429d7
HS
2278 return 0;
2279}
2280
2281static int chcr_sha_cra_init(struct crypto_tfm *tfm)
2282{
2283 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2284 sizeof(struct chcr_ahash_req_ctx));
2285 return chcr_device_init(crypto_tfm_ctx(tfm));
2286}
2287
2288static int chcr_hmac_init(struct ahash_request *areq)
2289{
2290 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2291 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
2f47d580 2292 struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
324429d7
HS
2293 unsigned int digestsize = crypto_ahash_digestsize(rtfm);
2294 unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2295
2296 chcr_sha_init(areq);
2297 req_ctx->data_len = bs;
2298 if (is_hmac(crypto_ahash_tfm(rtfm))) {
2299 if (digestsize == SHA224_DIGEST_SIZE)
2300 memcpy(req_ctx->partial_hash, hmacctx->ipad,
2301 SHA256_DIGEST_SIZE);
2302 else if (digestsize == SHA384_DIGEST_SIZE)
2303 memcpy(req_ctx->partial_hash, hmacctx->ipad,
2304 SHA512_DIGEST_SIZE);
2305 else
2306 memcpy(req_ctx->partial_hash, hmacctx->ipad,
2307 digestsize);
2308 }
2309 return 0;
2310}
2311
2312static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
2313{
2314 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2315 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2316 unsigned int digestsize =
2317 crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
2318
2319 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2320 sizeof(struct chcr_ahash_req_ctx));
e7922729
HJ
2321 hmacctx->base_hash = chcr_alloc_shash(digestsize);
2322 if (IS_ERR(hmacctx->base_hash))
2323 return PTR_ERR(hmacctx->base_hash);
324429d7
HS
2324 return chcr_device_init(crypto_tfm_ctx(tfm));
2325}
2326
324429d7
HS
2327static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
2328{
2329 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2330 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2331
e7922729
HJ
2332 if (hmacctx->base_hash) {
2333 chcr_free_shash(hmacctx->base_hash);
2334 hmacctx->base_hash = NULL;
324429d7
HS
2335 }
2336}
2337
4262c98a
HJ
2338inline void chcr_aead_common_exit(struct aead_request *req)
2339{
2340 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2341 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2342 struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
2343
2344 chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
2345}
2346
2347static int chcr_aead_common_init(struct aead_request *req)
2debd332 2348{
2f47d580
HJ
2349 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2350 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2351 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2f47d580 2352 unsigned int authsize = crypto_aead_authsize(tfm);
4262c98a 2353 int error = -EINVAL;
2debd332 2354
2f47d580
HJ
2355 /* validate key size */
2356 if (aeadctx->enckey_len == 0)
2357 goto err;
4262c98a 2358 if (reqctx->op && req->cryptlen < authsize)
2f47d580 2359 goto err;
4262c98a
HJ
2360 if (reqctx->b0_len)
2361 reqctx->scratch_pad = reqctx->iv + IV;
2362 else
2363 reqctx->scratch_pad = NULL;
2364
2f47d580 2365 error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
4262c98a 2366 reqctx->op);
2f47d580
HJ
2367 if (error) {
2368 error = -ENOMEM;
2369 goto err;
2370 }
1f479e4c 2371
2f47d580
HJ
2372 return 0;
2373err:
2374 return error;
2debd332 2375}
2f47d580
HJ
2376
2377static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
0e93708d
HJ
2378 int aadmax, int wrlen,
2379 unsigned short op_type)
2380{
2381 unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
2382
2383 if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
2f47d580 2384 dst_nents > MAX_DSGL_ENT ||
0e93708d 2385 (req->assoclen > aadmax) ||
2f47d580 2386 (wrlen > SGE_MAX_WR_LEN))
0e93708d
HJ
2387 return 1;
2388 return 0;
2389}
2debd332 2390
0e93708d
HJ
2391static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
2392{
2393 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2f47d580 2394 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
0e93708d
HJ
2395 struct aead_request *subreq = aead_request_ctx(req);
2396
2397 aead_request_set_tfm(subreq, aeadctx->sw_cipher);
2398 aead_request_set_callback(subreq, req->base.flags,
2399 req->base.complete, req->base.data);
4262c98a 2400 aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
0e93708d 2401 req->iv);
fc6176a2 2402 aead_request_set_ad(subreq, req->assoclen);
0e93708d
HJ
2403 return op_type ? crypto_aead_decrypt(subreq) :
2404 crypto_aead_encrypt(subreq);
2405}
2debd332
HJ
2406
2407static struct sk_buff *create_authenc_wr(struct aead_request *req,
2408 unsigned short qid,
4262c98a 2409 int size)
2debd332
HJ
2410{
2411 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
567be3a5
AS
2412 struct chcr_context *ctx = a_ctx(tfm);
2413 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2debd332
HJ
2414 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2415 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2416 struct sk_buff *skb = NULL;
2417 struct chcr_wr *chcr_req;
2418 struct cpl_rx_phys_dsgl *phys_cpl;
2f47d580
HJ
2419 struct ulptx_sgl *ulptx;
2420 unsigned int transhdr_len;
3d64bd67 2421 unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
1f479e4c 2422 unsigned int kctx_len = 0, dnents, snents;
2debd332 2423 unsigned int authsize = crypto_aead_authsize(tfm);
2f47d580 2424 int error = -EINVAL;
1f479e4c 2425 u8 *ivptr;
2debd332
HJ
2426 int null = 0;
2427 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2428 GFP_ATOMIC;
567be3a5
AS
2429 struct adapter *adap = padap(ctx->dev);
2430 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2debd332 2431
2f47d580
HJ
2432 if (req->cryptlen == 0)
2433 return NULL;
2debd332 2434
4262c98a
HJ
2435 reqctx->b0_len = 0;
2436 error = chcr_aead_common_init(req);
2437 if (error)
2438 return ERR_PTR(error);
2439
3d64bd67 2440 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
4262c98a 2441 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2debd332 2442 null = 1;
2debd332 2443 }
1f479e4c
HJ
2444 dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
2445 (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, 0);
5abc8db0 2446 dnents += MIN_AUTH_SG; // For IV
1f479e4c
HJ
2447 snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
2448 CHCR_SRC_SG_SIZE, 0);
2f47d580 2449 dst_size = get_space_for_phys_dsgl(dnents);
ff462ddf 2450 kctx_len = (KEY_CONTEXT_CTX_LEN_G(ntohl(aeadctx->key_ctx_hdr)) << 4)
2debd332
HJ
2451 - sizeof(chcr_req->key_ctx);
2452 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
1f479e4c 2453 reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <
2f47d580 2454 SGE_MAX_WR_LEN;
1f479e4c
HJ
2455 temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16)
2456 : (sgl_len(snents) * 8);
2f47d580 2457 transhdr_len += temp;
125d01ca 2458 transhdr_len = roundup(transhdr_len, 16);
2f47d580
HJ
2459
2460 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
4262c98a 2461 transhdr_len, reqctx->op)) {
ee0863ba 2462 atomic_inc(&adap->chcr_stats.fallback);
4262c98a
HJ
2463 chcr_aead_common_exit(req);
2464 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
0e93708d 2465 }
1f479e4c 2466 skb = alloc_skb(transhdr_len, flags);
5fe8c711
HJ
2467 if (!skb) {
2468 error = -ENOMEM;
2debd332 2469 goto err;
5fe8c711 2470 }
2debd332 2471
de77b966 2472 chcr_req = __skb_put_zero(skb, transhdr_len);
2debd332 2473
4262c98a 2474 temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
2debd332
HJ
2475
2476 /*
2477 * Input order is AAD,IV and Payload. where IV should be included as
2478 * the part of authdata. All other fields should be filled according
2479 * to the hardware spec
2480 */
2481 chcr_req->sec_cpl.op_ivinsrtofst =
567be3a5 2482 FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
1f479e4c 2483 chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen);
2debd332 2484 chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
1f479e4c
HJ
2485 null ? 0 : 1 + IV,
2486 null ? 0 : IV + req->assoclen,
2487 req->assoclen + IV + 1,
2f47d580 2488 (temp & 0x1F0) >> 4);
2debd332 2489 chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
2f47d580 2490 temp & 0xF,
1f479e4c 2491 null ? 0 : req->assoclen + IV + 1,
2f47d580 2492 temp, temp);
3d64bd67
HJ
2493 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
2494 subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
2495 temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
2496 else
2497 temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
4262c98a
HJ
2498 chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op,
2499 (reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0,
3d64bd67 2500 temp,
2debd332 2501 actx->auth_mode, aeadctx->hmac_ctrl,
2f47d580 2502 IV >> 1);
2debd332 2503 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2f47d580 2504 0, 0, dst_size);
2debd332
HJ
2505
2506 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
4262c98a 2507 if (reqctx->op == CHCR_ENCRYPT_OP ||
3d64bd67
HJ
2508 subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2509 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
2debd332
HJ
2510 memcpy(chcr_req->key_ctx.key, aeadctx->key,
2511 aeadctx->enckey_len);
2512 else
2513 memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
2514 aeadctx->enckey_len);
2515
125d01ca
HJ
2516 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2517 actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
1f479e4c
HJ
2518 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2519 ivptr = (u8 *)(phys_cpl + 1) + dst_size;
2520 ulptx = (struct ulptx_sgl *)(ivptr + IV);
3d64bd67
HJ
2521 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2522 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
1f479e4c
HJ
2523 memcpy(ivptr, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
2524 memcpy(ivptr + CTR_RFC3686_NONCE_SIZE, req->iv,
3d64bd67 2525 CTR_RFC3686_IV_SIZE);
1f479e4c 2526 *(__be32 *)(ivptr + CTR_RFC3686_NONCE_SIZE +
3d64bd67
HJ
2527 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
2528 } else {
1f479e4c 2529 memcpy(ivptr, req->iv, IV);
3d64bd67 2530 }
1f479e4c
HJ
2531 chcr_add_aead_dst_ent(req, phys_cpl, qid);
2532 chcr_add_aead_src_ent(req, ulptx);
ee0863ba 2533 atomic_inc(&adap->chcr_stats.cipher_rqst);
1f479e4c
HJ
2534 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
2535 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
2f47d580
HJ
2536 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
2537 transhdr_len, temp, 0);
2debd332 2538 reqctx->skb = skb;
2debd332
HJ
2539
2540 return skb;
2debd332 2541err:
4262c98a 2542 chcr_aead_common_exit(req);
2f47d580 2543
5fe8c711 2544 return ERR_PTR(error);
2debd332
HJ
2545}
2546
6dad4e8a
AG
2547int chcr_aead_dma_map(struct device *dev,
2548 struct aead_request *req,
2549 unsigned short op_type)
2f47d580
HJ
2550{
2551 int error;
2552 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2553 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2554 unsigned int authsize = crypto_aead_authsize(tfm);
2555 int dst_size;
2556
2557 dst_size = req->assoclen + req->cryptlen + (op_type ?
2558 -authsize : authsize);
2559 if (!req->cryptlen || !dst_size)
2560 return 0;
4262c98a 2561 reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
2f47d580
HJ
2562 DMA_BIDIRECTIONAL);
2563 if (dma_mapping_error(dev, reqctx->iv_dma))
2564 return -ENOMEM;
4262c98a
HJ
2565 if (reqctx->b0_len)
2566 reqctx->b0_dma = reqctx->iv_dma + IV;
2567 else
2568 reqctx->b0_dma = 0;
2f47d580 2569 if (req->src == req->dst) {
9195189e
AS
2570 error = dma_map_sg(dev, req->src,
2571 sg_nents_for_len(req->src, dst_size),
2572 DMA_BIDIRECTIONAL);
2f47d580
HJ
2573 if (!error)
2574 goto err;
2575 } else {
2576 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2577 DMA_TO_DEVICE);
2578 if (!error)
2579 goto err;
2580 error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2581 DMA_FROM_DEVICE);
2582 if (!error) {
2583 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2584 DMA_TO_DEVICE);
2585 goto err;
2586 }
2587 }
2588
2589 return 0;
2590err:
2591 dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
2592 return -ENOMEM;
2593}
2594
6dad4e8a
AG
2595void chcr_aead_dma_unmap(struct device *dev,
2596 struct aead_request *req,
2597 unsigned short op_type)
2f47d580
HJ
2598{
2599 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2600 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2601 unsigned int authsize = crypto_aead_authsize(tfm);
2602 int dst_size;
2603
2604 dst_size = req->assoclen + req->cryptlen + (op_type ?
2605 -authsize : authsize);
2606 if (!req->cryptlen || !dst_size)
2607 return;
2608
4262c98a 2609 dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
2f47d580
HJ
2610 DMA_BIDIRECTIONAL);
2611 if (req->src == req->dst) {
2612 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2613 DMA_BIDIRECTIONAL);
2614 } else {
2615 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2616 DMA_TO_DEVICE);
2617 dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2618 DMA_FROM_DEVICE);
2619 }
2620}
2621
6dad4e8a 2622void chcr_add_aead_src_ent(struct aead_request *req,
1f479e4c 2623 struct ulptx_sgl *ulptx)
2f47d580
HJ
2624{
2625 struct ulptx_walk ulp_walk;
2626 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2627
2628 if (reqctx->imm) {
2629 u8 *buf = (u8 *)ulptx;
2630
4262c98a 2631 if (reqctx->b0_len) {
2f47d580
HJ
2632 memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
2633 buf += reqctx->b0_len;
2634 }
2635 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
1f479e4c 2636 buf, req->cryptlen + req->assoclen, 0);
2f47d580
HJ
2637 } else {
2638 ulptx_walk_init(&ulp_walk, ulptx);
4262c98a 2639 if (reqctx->b0_len)
2f47d580 2640 ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
c4f6d44d 2641 reqctx->b0_dma);
1f479e4c
HJ
2642 ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen +
2643 req->assoclen, 0);
2f47d580
HJ
2644 ulptx_walk_end(&ulp_walk);
2645 }
2646}
2647
6dad4e8a
AG
2648void chcr_add_aead_dst_ent(struct aead_request *req,
2649 struct cpl_rx_phys_dsgl *phys_cpl,
6dad4e8a 2650 unsigned short qid)
2f47d580
HJ
2651{
2652 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2653 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2654 struct dsgl_walk dsgl_walk;
2655 unsigned int authsize = crypto_aead_authsize(tfm);
add92a81 2656 struct chcr_context *ctx = a_ctx(tfm);
2f47d580 2657 u32 temp;
567be3a5 2658 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2f47d580
HJ
2659
2660 dsgl_walk_init(&dsgl_walk, phys_cpl);
c4f6d44d 2661 dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
1f479e4c
HJ
2662 temp = req->assoclen + req->cryptlen +
2663 (reqctx->op ? -authsize : authsize);
2664 dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0);
567be3a5 2665 dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
2f47d580
HJ
2666}
2667
7cea6d3e 2668void chcr_add_cipher_src_ent(struct skcipher_request *req,
335bcc4a 2669 void *ulptx,
6dad4e8a 2670 struct cipher_wr_param *wrparam)
2f47d580
HJ
2671{
2672 struct ulptx_walk ulp_walk;
7cea6d3e 2673 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
335bcc4a 2674 u8 *buf = ulptx;
2f47d580 2675
335bcc4a
HJ
2676 memcpy(buf, reqctx->iv, IV);
2677 buf += IV;
2f47d580 2678 if (reqctx->imm) {
2f47d580
HJ
2679 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2680 buf, wrparam->bytes, reqctx->processed);
2681 } else {
335bcc4a 2682 ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf);
2f47d580
HJ
2683 ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
2684 reqctx->src_ofst);
2685 reqctx->srcsg = ulp_walk.last_sg;
2686 reqctx->src_ofst = ulp_walk.last_sg_len;
2687 ulptx_walk_end(&ulp_walk);
2688 }
2689}
2690
7cea6d3e 2691void chcr_add_cipher_dst_ent(struct skcipher_request *req,
6dad4e8a
AG
2692 struct cpl_rx_phys_dsgl *phys_cpl,
2693 struct cipher_wr_param *wrparam,
2694 unsigned short qid)
2f47d580 2695{
7cea6d3e
AB
2696 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2697 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
add92a81 2698 struct chcr_context *ctx = c_ctx(tfm);
2f47d580 2699 struct dsgl_walk dsgl_walk;
567be3a5 2700 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2f47d580
HJ
2701
2702 dsgl_walk_init(&dsgl_walk, phys_cpl);
2f47d580
HJ
2703 dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
2704 reqctx->dst_ofst);
2705 reqctx->dstsg = dsgl_walk.last_sg;
2706 reqctx->dst_ofst = dsgl_walk.last_sg_len;
567be3a5 2707 dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
2f47d580
HJ
2708}
2709
6dad4e8a
AG
2710void chcr_add_hash_src_ent(struct ahash_request *req,
2711 struct ulptx_sgl *ulptx,
2712 struct hash_wr_param *param)
2f47d580
HJ
2713{
2714 struct ulptx_walk ulp_walk;
2715 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2716
5110e655 2717 if (reqctx->hctx_wr.imm) {
2f47d580
HJ
2718 u8 *buf = (u8 *)ulptx;
2719
2720 if (param->bfr_len) {
2721 memcpy(buf, reqctx->reqbfr, param->bfr_len);
2722 buf += param->bfr_len;
2723 }
5110e655
HJ
2724
2725 sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
2726 sg_nents(reqctx->hctx_wr.srcsg), buf,
2727 param->sg_len, 0);
2f47d580
HJ
2728 } else {
2729 ulptx_walk_init(&ulp_walk, ulptx);
2730 if (param->bfr_len)
2731 ulptx_walk_add_page(&ulp_walk, param->bfr_len,
c4f6d44d 2732 reqctx->hctx_wr.dma_addr);
5110e655
HJ
2733 ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
2734 param->sg_len, reqctx->hctx_wr.src_ofst);
2735 reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
2736 reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
db6deea4 2737 ulptx_walk_end(&ulp_walk);
2f47d580
HJ
2738 }
2739}
2740
6dad4e8a
AG
2741int chcr_hash_dma_map(struct device *dev,
2742 struct ahash_request *req)
2f47d580
HJ
2743{
2744 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2745 int error = 0;
2746
2747 if (!req->nbytes)
2748 return 0;
2749 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2750 DMA_TO_DEVICE);
2751 if (!error)
7814f552 2752 return -ENOMEM;
5110e655 2753 req_ctx->hctx_wr.is_sg_map = 1;
2f47d580
HJ
2754 return 0;
2755}
2756
6dad4e8a
AG
2757void chcr_hash_dma_unmap(struct device *dev,
2758 struct ahash_request *req)
2f47d580
HJ
2759{
2760 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2761
2762 if (!req->nbytes)
2763 return;
2764
2765 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2766 DMA_TO_DEVICE);
5110e655 2767 req_ctx->hctx_wr.is_sg_map = 0;
2f47d580
HJ
2768
2769}
2770
6dad4e8a 2771int chcr_cipher_dma_map(struct device *dev,
7cea6d3e 2772 struct skcipher_request *req)
2f47d580
HJ
2773{
2774 int error;
2f47d580
HJ
2775
2776 if (req->src == req->dst) {
2777 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2778 DMA_BIDIRECTIONAL);
2779 if (!error)
2780 goto err;
2781 } else {
2782 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2783 DMA_TO_DEVICE);
2784 if (!error)
2785 goto err;
2786 error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2787 DMA_FROM_DEVICE);
2788 if (!error) {
2789 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2790 DMA_TO_DEVICE);
2791 goto err;
2792 }
2793 }
2794
2795 return 0;
2796err:
2f47d580
HJ
2797 return -ENOMEM;
2798}
6dad4e8a
AG
2799
2800void chcr_cipher_dma_unmap(struct device *dev,
7cea6d3e 2801 struct skcipher_request *req)
2f47d580 2802{
2f47d580
HJ
2803 if (req->src == req->dst) {
2804 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2805 DMA_BIDIRECTIONAL);
2806 } else {
2807 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2808 DMA_TO_DEVICE);
2809 dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2810 DMA_FROM_DEVICE);
2811 }
2812}
2813
2debd332
HJ
2814static int set_msg_len(u8 *block, unsigned int msglen, int csize)
2815{
2816 __be32 data;
2817
2818 memset(block, 0, csize);
2819 block += csize;
2820
2821 if (csize >= 4)
2822 csize = 4;
2823 else if (msglen > (unsigned int)(1 << (8 * csize)))
2824 return -EOVERFLOW;
2825
2826 data = cpu_to_be32(msglen);
2827 memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
2828
2829 return 0;
2830}
2831
66af86d9 2832static int generate_b0(struct aead_request *req, u8 *ivptr,
2debd332
HJ
2833 unsigned short op_type)
2834{
2835 unsigned int l, lp, m;
2836 int rc;
2837 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2838 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2839 u8 *b0 = reqctx->scratch_pad;
2840
2841 m = crypto_aead_authsize(aead);
2842
1f479e4c 2843 memcpy(b0, ivptr, 16);
2debd332
HJ
2844
2845 lp = b0[0];
2846 l = lp + 1;
2847
2848 /* set m, bits 3-5 */
2849 *b0 |= (8 * ((m - 2) / 2));
2850
2851 /* set adata, bit 6, if associated data is used */
2852 if (req->assoclen)
2853 *b0 |= 64;
2854 rc = set_msg_len(b0 + 16 - l,
2855 (op_type == CHCR_DECRYPT_OP) ?
2856 req->cryptlen - m : req->cryptlen, l);
66af86d9
Y
2857
2858 return rc;
2debd332
HJ
2859}
2860
2861static inline int crypto_ccm_check_iv(const u8 *iv)
2862{
2863 /* 2 <= L <= 8, so 1 <= L' <= 7. */
2864 if (iv[0] < 1 || iv[0] > 7)
2865 return -EINVAL;
2866
2867 return 0;
2868}
2869
2870static int ccm_format_packet(struct aead_request *req,
1f479e4c 2871 u8 *ivptr,
2debd332 2872 unsigned int sub_type,
4262c98a
HJ
2873 unsigned short op_type,
2874 unsigned int assoclen)
2debd332
HJ
2875{
2876 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
1f479e4c
HJ
2877 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2878 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2debd332
HJ
2879 int rc = 0;
2880
2debd332 2881 if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
1f479e4c
HJ
2882 ivptr[0] = 3;
2883 memcpy(ivptr + 1, &aeadctx->salt[0], 3);
2884 memcpy(ivptr + 4, req->iv, 8);
2885 memset(ivptr + 12, 0, 4);
2debd332 2886 } else {
1f479e4c 2887 memcpy(ivptr, req->iv, 16);
2debd332 2888 }
4262c98a
HJ
2889 if (assoclen)
2890 *((unsigned short *)(reqctx->scratch_pad + 16)) =
2891 htons(assoclen);
2892
66af86d9 2893 rc = generate_b0(req, ivptr, op_type);
2debd332 2894 /* zero the ctr value */
1f479e4c 2895 memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1);
2debd332
HJ
2896 return rc;
2897}
2898
2899static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
2900 unsigned int dst_size,
2901 struct aead_request *req,
2f47d580 2902 unsigned short op_type)
2debd332
HJ
2903{
2904 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
567be3a5
AS
2905 struct chcr_context *ctx = a_ctx(tfm);
2906 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2907 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2debd332
HJ
2908 unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
2909 unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
567be3a5 2910 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2debd332
HJ
2911 unsigned int ccm_xtra;
2912 unsigned char tag_offset = 0, auth_offset = 0;
2debd332
HJ
2913 unsigned int assoclen;
2914
2915 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2916 assoclen = req->assoclen - 8;
2917 else
2918 assoclen = req->assoclen;
2919 ccm_xtra = CCM_B0_SIZE +
2920 ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
2921
2922 auth_offset = req->cryptlen ?
1f479e4c 2923 (req->assoclen + IV + 1 + ccm_xtra) : 0;
2debd332
HJ
2924 if (op_type == CHCR_DECRYPT_OP) {
2925 if (crypto_aead_authsize(tfm) != req->cryptlen)
2926 tag_offset = crypto_aead_authsize(tfm);
2927 else
2928 auth_offset = 0;
2929 }
2930
567be3a5 2931 sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
2debd332 2932 sec_cpl->pldlen =
1f479e4c 2933 htonl(req->assoclen + IV + req->cryptlen + ccm_xtra);
2debd332
HJ
2934 /* For CCM there wil be b0 always. So AAD start will be 1 always */
2935 sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
1f479e4c
HJ
2936 1 + IV, IV + assoclen + ccm_xtra,
2937 req->assoclen + IV + 1 + ccm_xtra, 0);
2debd332
HJ
2938
2939 sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
2940 auth_offset, tag_offset,
2941 (op_type == CHCR_ENCRYPT_OP) ? 0 :
2942 crypto_aead_authsize(tfm));
2943 sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2944 (op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
0a7bd30c 2945 cipher_mode, mac_mode,
2f47d580 2946 aeadctx->hmac_ctrl, IV >> 1);
2debd332
HJ
2947
2948 sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
2f47d580 2949 0, dst_size);
2debd332
HJ
2950}
2951
1efb892b
CIK
2952static int aead_ccm_validate_input(unsigned short op_type,
2953 struct aead_request *req,
2954 struct chcr_aead_ctx *aeadctx,
2955 unsigned int sub_type)
2debd332
HJ
2956{
2957 if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2958 if (crypto_ccm_check_iv(req->iv)) {
2959 pr_err("CCM: IV check fails\n");
2960 return -EINVAL;
2961 }
2962 } else {
2963 if (req->assoclen != 16 && req->assoclen != 20) {
2964 pr_err("RFC4309: Invalid AAD length %d\n",
2965 req->assoclen);
2966 return -EINVAL;
2967 }
2968 }
2debd332
HJ
2969 return 0;
2970}
2971
2debd332
HJ
2972static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
2973 unsigned short qid,
4262c98a 2974 int size)
2debd332
HJ
2975{
2976 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2f47d580 2977 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2debd332
HJ
2978 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2979 struct sk_buff *skb = NULL;
2980 struct chcr_wr *chcr_req;
2981 struct cpl_rx_phys_dsgl *phys_cpl;
2f47d580
HJ
2982 struct ulptx_sgl *ulptx;
2983 unsigned int transhdr_len;
1f479e4c 2984 unsigned int dst_size = 0, kctx_len, dnents, temp, snents;
2f47d580 2985 unsigned int sub_type, assoclen = req->assoclen;
2debd332 2986 unsigned int authsize = crypto_aead_authsize(tfm);
2f47d580 2987 int error = -EINVAL;
1f479e4c 2988 u8 *ivptr;
2debd332
HJ
2989 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2990 GFP_ATOMIC;
2f47d580 2991 struct adapter *adap = padap(a_ctx(tfm)->dev);
2debd332 2992
2f47d580
HJ
2993 sub_type = get_aead_subtype(tfm);
2994 if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2995 assoclen -= 8;
4262c98a
HJ
2996 reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
2997 error = chcr_aead_common_init(req);
2f47d580
HJ
2998 if (error)
2999 return ERR_PTR(error);
0e93708d 3000
4262c98a 3001 error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
5fe8c711 3002 if (error)
2debd332 3003 goto err;
1f479e4c 3004 dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen
4262c98a 3005 + (reqctx->op ? -authsize : authsize),
1f479e4c 3006 CHCR_DST_SG_SIZE, 0);
e1a018e6 3007 dnents += MIN_CCM_SG; // For IV and B0
2f47d580 3008 dst_size = get_space_for_phys_dsgl(dnents);
1f479e4c
HJ
3009 snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3010 CHCR_SRC_SG_SIZE, 0);
3011 snents += MIN_CCM_SG; //For B0
125d01ca 3012 kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
2debd332 3013 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
1f479e4c 3014 reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen +
2f47d580 3015 reqctx->b0_len) <= SGE_MAX_WR_LEN;
1f479e4c 3016 temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen +
125d01ca 3017 reqctx->b0_len, 16) :
1f479e4c 3018 (sgl_len(snents) * 8);
2f47d580 3019 transhdr_len += temp;
125d01ca 3020 transhdr_len = roundup(transhdr_len, 16);
2f47d580
HJ
3021
3022 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
1f479e4c 3023 reqctx->b0_len, transhdr_len, reqctx->op)) {
ee0863ba 3024 atomic_inc(&adap->chcr_stats.fallback);
4262c98a
HJ
3025 chcr_aead_common_exit(req);
3026 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
0e93708d 3027 }
1f479e4c 3028 skb = alloc_skb(transhdr_len, flags);
2debd332 3029
5fe8c711
HJ
3030 if (!skb) {
3031 error = -ENOMEM;
2debd332 3032 goto err;
5fe8c711 3033 }
2debd332 3034
1f479e4c 3035 chcr_req = __skb_put_zero(skb, transhdr_len);
2debd332 3036
4262c98a 3037 fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
2debd332
HJ
3038
3039 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3040 memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
125d01ca
HJ
3041 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3042 aeadctx->key, aeadctx->enckey_len);
2debd332
HJ
3043
3044 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
1f479e4c
HJ
3045 ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3046 ulptx = (struct ulptx_sgl *)(ivptr + IV);
3047 error = ccm_format_packet(req, ivptr, sub_type, reqctx->op, assoclen);
5fe8c711 3048 if (error)
2debd332 3049 goto dstmap_fail;
1f479e4c
HJ
3050 chcr_add_aead_dst_ent(req, phys_cpl, qid);
3051 chcr_add_aead_src_ent(req, ulptx);
2debd332 3052
ee0863ba 3053 atomic_inc(&adap->chcr_stats.aead_rqst);
1f479e4c
HJ
3054 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3055 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen +
2f47d580
HJ
3056 reqctx->b0_len) : 0);
3057 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
3058 transhdr_len, temp, 0);
2debd332 3059 reqctx->skb = skb;
2f47d580 3060
2debd332
HJ
3061 return skb;
3062dstmap_fail:
3063 kfree_skb(skb);
2debd332 3064err:
4262c98a 3065 chcr_aead_common_exit(req);
5fe8c711 3066 return ERR_PTR(error);
2debd332
HJ
3067}
3068
3069static struct sk_buff *create_gcm_wr(struct aead_request *req,
3070 unsigned short qid,
4262c98a 3071 int size)
2debd332
HJ
3072{
3073 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
567be3a5
AS
3074 struct chcr_context *ctx = a_ctx(tfm);
3075 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2debd332
HJ
3076 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3077 struct sk_buff *skb = NULL;
3078 struct chcr_wr *chcr_req;
3079 struct cpl_rx_phys_dsgl *phys_cpl;
2f47d580 3080 struct ulptx_sgl *ulptx;
1f479e4c 3081 unsigned int transhdr_len, dnents = 0, snents;
2f47d580 3082 unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
2debd332 3083 unsigned int authsize = crypto_aead_authsize(tfm);
2f47d580 3084 int error = -EINVAL;
1f479e4c 3085 u8 *ivptr;
2debd332
HJ
3086 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
3087 GFP_ATOMIC;
567be3a5
AS
3088 struct adapter *adap = padap(ctx->dev);
3089 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2debd332 3090
2f47d580
HJ
3091 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
3092 assoclen = req->assoclen - 8;
2debd332 3093
4262c98a
HJ
3094 reqctx->b0_len = 0;
3095 error = chcr_aead_common_init(req);
e1a018e6
HJ
3096 if (error)
3097 return ERR_PTR(error);
1f479e4c 3098 dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
4262c98a 3099 (reqctx->op ? -authsize : authsize),
1f479e4c
HJ
3100 CHCR_DST_SG_SIZE, 0);
3101 snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3102 CHCR_SRC_SG_SIZE, 0);
e1a018e6 3103 dnents += MIN_GCM_SG; // For IV
2f47d580 3104 dst_size = get_space_for_phys_dsgl(dnents);
125d01ca 3105 kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
2debd332 3106 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
1f479e4c 3107 reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <=
2f47d580 3108 SGE_MAX_WR_LEN;
1f479e4c
HJ
3109 temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) :
3110 (sgl_len(snents) * 8);
2f47d580 3111 transhdr_len += temp;
125d01ca 3112 transhdr_len = roundup(transhdr_len, 16);
2f47d580 3113 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
4262c98a
HJ
3114 transhdr_len, reqctx->op)) {
3115
ee0863ba 3116 atomic_inc(&adap->chcr_stats.fallback);
4262c98a
HJ
3117 chcr_aead_common_exit(req);
3118 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
0e93708d 3119 }
1f479e4c 3120 skb = alloc_skb(transhdr_len, flags);
5fe8c711
HJ
3121 if (!skb) {
3122 error = -ENOMEM;
2debd332 3123 goto err;
5fe8c711 3124 }
2debd332 3125
de77b966 3126 chcr_req = __skb_put_zero(skb, transhdr_len);
2debd332 3127
2f47d580 3128 //Offset of tag from end
4262c98a 3129 temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
2debd332 3130 chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
567be3a5 3131 rx_channel_id, 2, 1);
0e93708d 3132 chcr_req->sec_cpl.pldlen =
1f479e4c 3133 htonl(req->assoclen + IV + req->cryptlen);
2debd332 3134 chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
1f479e4c
HJ
3135 assoclen ? 1 + IV : 0,
3136 assoclen ? IV + assoclen : 0,
3137 req->assoclen + IV + 1, 0);
e1a018e6 3138 chcr_req->sec_cpl.cipherstop_lo_authinsert =
1f479e4c 3139 FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + IV + 1,
2f47d580 3140 temp, temp);
e1a018e6 3141 chcr_req->sec_cpl.seqno_numivs =
4262c98a 3142 FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
2debd332
HJ
3143 CHCR_ENCRYPT_OP) ? 1 : 0,
3144 CHCR_SCMD_CIPHER_MODE_AES_GCM,
0a7bd30c 3145 CHCR_SCMD_AUTH_MODE_GHASH,
2f47d580 3146 aeadctx->hmac_ctrl, IV >> 1);
2debd332 3147 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2f47d580 3148 0, 0, dst_size);
2debd332
HJ
3149 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3150 memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
125d01ca
HJ
3151 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3152 GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
2debd332 3153
1f479e4c
HJ
3154 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3155 ivptr = (u8 *)(phys_cpl + 1) + dst_size;
2debd332
HJ
3156 /* prepare a 16 byte iv */
3157 /* S A L T | IV | 0x00000001 */
3158 if (get_aead_subtype(tfm) ==
3159 CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
1f479e4c
HJ
3160 memcpy(ivptr, aeadctx->salt, 4);
3161 memcpy(ivptr + 4, req->iv, GCM_RFC4106_IV_SIZE);
2debd332 3162 } else {
1f479e4c 3163 memcpy(ivptr, req->iv, GCM_AES_IV_SIZE);
2debd332 3164 }
1f479e4c 3165 *((unsigned int *)(ivptr + 12)) = htonl(0x01);
2debd332 3166
1f479e4c 3167 ulptx = (struct ulptx_sgl *)(ivptr + 16);
2debd332 3168
1f479e4c
HJ
3169 chcr_add_aead_dst_ent(req, phys_cpl, qid);
3170 chcr_add_aead_src_ent(req, ulptx);
ee0863ba 3171 atomic_inc(&adap->chcr_stats.aead_rqst);
1f479e4c
HJ
3172 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3173 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
2f47d580
HJ
3174 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
3175 transhdr_len, temp, reqctx->verify);
2debd332 3176 reqctx->skb = skb;
2debd332
HJ
3177 return skb;
3178
2debd332 3179err:
4262c98a 3180 chcr_aead_common_exit(req);
5fe8c711 3181 return ERR_PTR(error);
2debd332
HJ
3182}
3183
3184
3185
3186static int chcr_aead_cra_init(struct crypto_aead *tfm)
3187{
2f47d580 3188 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
0e93708d
HJ
3189 struct aead_alg *alg = crypto_aead_alg(tfm);
3190
3191 aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
5fe8c711
HJ
3192 CRYPTO_ALG_NEED_FALLBACK |
3193 CRYPTO_ALG_ASYNC);
0e93708d
HJ
3194 if (IS_ERR(aeadctx->sw_cipher))
3195 return PTR_ERR(aeadctx->sw_cipher);
3196 crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
3197 sizeof(struct aead_request) +
3198 crypto_aead_reqsize(aeadctx->sw_cipher)));
2f47d580 3199 return chcr_device_init(a_ctx(tfm));
2debd332
HJ
3200}
3201
3202static void chcr_aead_cra_exit(struct crypto_aead *tfm)
3203{
2f47d580 3204 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
0e93708d 3205
0e93708d 3206 crypto_free_aead(aeadctx->sw_cipher);
2debd332
HJ
3207}
3208
3209static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
3210 unsigned int authsize)
3211{
2f47d580 3212 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2debd332
HJ
3213
3214 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
3215 aeadctx->mayverify = VERIFY_HW;
0e93708d 3216 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2debd332
HJ
3217}
3218static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
3219 unsigned int authsize)
3220{
2f47d580 3221 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2debd332
HJ
3222 u32 maxauth = crypto_aead_maxauthsize(tfm);
3223
3224 /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
3225 * true for sha1. authsize == 12 condition should be before
3226 * authsize == (maxauth >> 1)
3227 */
3228 if (authsize == ICV_4) {
3229 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3230 aeadctx->mayverify = VERIFY_HW;
3231 } else if (authsize == ICV_6) {
3232 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3233 aeadctx->mayverify = VERIFY_HW;
3234 } else if (authsize == ICV_10) {
3235 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3236 aeadctx->mayverify = VERIFY_HW;
3237 } else if (authsize == ICV_12) {
3238 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3239 aeadctx->mayverify = VERIFY_HW;
3240 } else if (authsize == ICV_14) {
3241 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3242 aeadctx->mayverify = VERIFY_HW;
3243 } else if (authsize == (maxauth >> 1)) {
3244 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3245 aeadctx->mayverify = VERIFY_HW;
3246 } else if (authsize == maxauth) {
3247 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3248 aeadctx->mayverify = VERIFY_HW;
3249 } else {
3250 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3251 aeadctx->mayverify = VERIFY_SW;
3252 }
0e93708d 3253 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2debd332
HJ
3254}
3255
3256
3257static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
3258{
2f47d580 3259 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2debd332
HJ
3260
3261 switch (authsize) {
3262 case ICV_4:
3263 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3264 aeadctx->mayverify = VERIFY_HW;
3265 break;
3266 case ICV_8:
3267 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3268 aeadctx->mayverify = VERIFY_HW;
3269 break;
3270 case ICV_12:
fc6176a2
CIK
3271 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3272 aeadctx->mayverify = VERIFY_HW;
2debd332
HJ
3273 break;
3274 case ICV_14:
fc6176a2
CIK
3275 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3276 aeadctx->mayverify = VERIFY_HW;
2debd332
HJ
3277 break;
3278 case ICV_16:
3279 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3280 aeadctx->mayverify = VERIFY_HW;
3281 break;
3282 case ICV_13:
3283 case ICV_15:
3284 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3285 aeadctx->mayverify = VERIFY_SW;
3286 break;
3287 default:
2debd332
HJ
3288 return -EINVAL;
3289 }
0e93708d 3290 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2debd332
HJ
3291}
3292
3293static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
3294 unsigned int authsize)
3295{
2f47d580 3296 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2debd332
HJ
3297
3298 switch (authsize) {
3299 case ICV_8:
3300 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3301 aeadctx->mayverify = VERIFY_HW;
3302 break;
3303 case ICV_12:
3304 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3305 aeadctx->mayverify = VERIFY_HW;
3306 break;
3307 case ICV_16:
3308 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3309 aeadctx->mayverify = VERIFY_HW;
3310 break;
3311 default:
2debd332
HJ
3312 return -EINVAL;
3313 }
0e93708d 3314 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2debd332
HJ
3315}
3316
3317static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
3318 unsigned int authsize)
3319{
2f47d580 3320 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2debd332
HJ
3321
3322 switch (authsize) {
3323 case ICV_4:
3324 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3325 aeadctx->mayverify = VERIFY_HW;
3326 break;
3327 case ICV_6:
3328 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3329 aeadctx->mayverify = VERIFY_HW;
3330 break;
3331 case ICV_8:
3332 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3333 aeadctx->mayverify = VERIFY_HW;
3334 break;
3335 case ICV_10:
3336 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3337 aeadctx->mayverify = VERIFY_HW;
3338 break;
3339 case ICV_12:
3340 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3341 aeadctx->mayverify = VERIFY_HW;
3342 break;
3343 case ICV_14:
3344 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3345 aeadctx->mayverify = VERIFY_HW;
3346 break;
3347 case ICV_16:
3348 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3349 aeadctx->mayverify = VERIFY_HW;
3350 break;
3351 default:
2debd332
HJ
3352 return -EINVAL;
3353 }
0e93708d 3354 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2debd332
HJ
3355}
3356
0e93708d 3357static int chcr_ccm_common_setkey(struct crypto_aead *aead,
2debd332
HJ
3358 const u8 *key,
3359 unsigned int keylen)
3360{
2f47d580 3361 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
2debd332
HJ
3362 unsigned char ck_size, mk_size;
3363 int key_ctx_size = 0;
3364
125d01ca 3365 key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
2debd332 3366 if (keylen == AES_KEYSIZE_128) {
2debd332 3367 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
125d01ca 3368 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
2debd332
HJ
3369 } else if (keylen == AES_KEYSIZE_192) {
3370 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3371 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
3372 } else if (keylen == AES_KEYSIZE_256) {
3373 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3374 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
3375 } else {
2debd332
HJ
3376 aeadctx->enckey_len = 0;
3377 return -EINVAL;
3378 }
3379 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
3380 key_ctx_size >> 4);
0e93708d
HJ
3381 memcpy(aeadctx->key, key, keylen);
3382 aeadctx->enckey_len = keylen;
3383
2debd332
HJ
3384 return 0;
3385}
3386
0e93708d
HJ
3387static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
3388 const u8 *key,
3389 unsigned int keylen)
3390{
2f47d580 3391 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
0e93708d
HJ
3392 int error;
3393
3394 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3395 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3396 CRYPTO_TFM_REQ_MASK);
3397 error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
0e93708d
HJ
3398 if (error)
3399 return error;
3400 return chcr_ccm_common_setkey(aead, key, keylen);
3401}
3402
2debd332
HJ
3403static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
3404 unsigned int keylen)
3405{
2f47d580 3406 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
4dbeae42 3407 int error;
2debd332
HJ
3408
3409 if (keylen < 3) {
2debd332
HJ
3410 aeadctx->enckey_len = 0;
3411 return -EINVAL;
3412 }
4dbeae42
HJ
3413 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3414 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3415 CRYPTO_TFM_REQ_MASK);
3416 error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
4dbeae42
HJ
3417 if (error)
3418 return error;
2debd332
HJ
3419 keylen -= 3;
3420 memcpy(aeadctx->salt, key + keylen, 3);
0e93708d 3421 return chcr_ccm_common_setkey(aead, key, keylen);
2debd332
HJ
3422}
3423
3424static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
3425 unsigned int keylen)
3426{
2f47d580 3427 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
2debd332 3428 struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
2debd332
HJ
3429 unsigned int ck_size;
3430 int ret = 0, key_ctx_size = 0;
571c47ab 3431 struct crypto_aes_ctx aes;
2debd332 3432
0e93708d
HJ
3433 aeadctx->enckey_len = 0;
3434 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3435 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
3436 & CRYPTO_TFM_REQ_MASK);
3437 ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
0e93708d
HJ
3438 if (ret)
3439 goto out;
3440
7c2cf1c4
HJ
3441 if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3442 keylen > 3) {
2debd332
HJ
3443 keylen -= 4; /* nonce/salt is present in the last 4 bytes */
3444 memcpy(aeadctx->salt, key + keylen, 4);
3445 }
3446 if (keylen == AES_KEYSIZE_128) {
3447 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3448 } else if (keylen == AES_KEYSIZE_192) {
3449 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3450 } else if (keylen == AES_KEYSIZE_256) {
3451 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3452 } else {
0e93708d 3453 pr_err("GCM: Invalid key length %d\n", keylen);
2debd332
HJ
3454 ret = -EINVAL;
3455 goto out;
3456 }
3457
3458 memcpy(aeadctx->key, key, keylen);
3459 aeadctx->enckey_len = keylen;
125d01ca 3460 key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
2debd332 3461 AEAD_H_SIZE;
125d01ca 3462 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
2debd332
HJ
3463 CHCR_KEYCTX_MAC_KEY_SIZE_128,
3464 0, 0,
3465 key_ctx_size >> 4);
8356ea51
HJ
3466 /* Calculate the H = CIPH(K, 0 repeated 16 times).
3467 * It will go in key context
2debd332 3468 */
571c47ab 3469 ret = aes_expandkey(&aes, key, keylen);
2debd332
HJ
3470 if (ret) {
3471 aeadctx->enckey_len = 0;
571c47ab 3472 goto out;
2debd332
HJ
3473 }
3474 memset(gctx->ghash_h, 0, AEAD_H_SIZE);
571c47ab
AB
3475 aes_encrypt(&aes, gctx->ghash_h, gctx->ghash_h);
3476 memzero_explicit(&aes, sizeof(aes));
2debd332 3477
2debd332
HJ
3478out:
3479 return ret;
3480}
3481
3482static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
3483 unsigned int keylen)
3484{
2f47d580 3485 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
2debd332
HJ
3486 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3487 /* it contains auth and cipher key both*/
3488 struct crypto_authenc_keys keys;
3d64bd67 3489 unsigned int bs, subtype;
2debd332
HJ
3490 unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
3491 int err = 0, i, key_ctx_len = 0;
3492 unsigned char ck_size = 0;
3493 unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
ec1bca94 3494 struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
2debd332
HJ
3495 struct algo_param param;
3496 int align;
3497 u8 *o_ptr = NULL;
3498
0e93708d
HJ
3499 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3500 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3501 & CRYPTO_TFM_REQ_MASK);
3502 err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
0e93708d
HJ
3503 if (err)
3504 goto out;
3505
674f368a 3506 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
2debd332 3507 goto out;
2debd332
HJ
3508
3509 if (get_alg_config(&param, max_authsize)) {
3510 pr_err("chcr : Unsupported digest size\n");
3511 goto out;
3512 }
3d64bd67
HJ
3513 subtype = get_aead_subtype(authenc);
3514 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3515 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3516 if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3517 goto out;
3518 memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3519 - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3520 keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3521 }
2debd332
HJ
3522 if (keys.enckeylen == AES_KEYSIZE_128) {
3523 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3524 } else if (keys.enckeylen == AES_KEYSIZE_192) {
3525 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3526 } else if (keys.enckeylen == AES_KEYSIZE_256) {
3527 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3528 } else {
3529 pr_err("chcr : Unsupported cipher key\n");
3530 goto out;
3531 }
3532
3533 /* Copy only encryption key. We use authkey to generate h(ipad) and
3534 * h(opad) so authkey is not needed again. authkeylen size have the
3535 * size of the hash digest size.
3536 */
3537 memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3538 aeadctx->enckey_len = keys.enckeylen;
3d64bd67
HJ
3539 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3540 subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
2debd332 3541
3d64bd67
HJ
3542 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3543 aeadctx->enckey_len << 3);
3544 }
2debd332
HJ
3545 base_hash = chcr_alloc_shash(max_authsize);
3546 if (IS_ERR(base_hash)) {
3547 pr_err("chcr : Base driver cannot be loaded\n");
0e93708d 3548 aeadctx->enckey_len = 0;
eb526531 3549 memzero_explicit(&keys, sizeof(keys));
0e93708d 3550 return -EINVAL;
324429d7 3551 }
2debd332
HJ
3552 {
3553 SHASH_DESC_ON_STACK(shash, base_hash);
6faa0f57 3554
2debd332 3555 shash->tfm = base_hash;
2debd332
HJ
3556 bs = crypto_shash_blocksize(base_hash);
3557 align = KEYCTX_ALIGN_PAD(max_authsize);
3558 o_ptr = actx->h_iopad + param.result_size + align;
3559
3560 if (keys.authkeylen > bs) {
3561 err = crypto_shash_digest(shash, keys.authkey,
3562 keys.authkeylen,
3563 o_ptr);
3564 if (err) {
3565 pr_err("chcr : Base driver cannot be loaded\n");
3566 goto out;
3567 }
3568 keys.authkeylen = max_authsize;
3569 } else
3570 memcpy(o_ptr, keys.authkey, keys.authkeylen);
3571
3572 /* Compute the ipad-digest*/
3573 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3574 memcpy(pad, o_ptr, keys.authkeylen);
3575 for (i = 0; i < bs >> 2; i++)
3576 *((unsigned int *)pad + i) ^= IPAD_DATA;
3577
3578 if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
3579 max_authsize))
3580 goto out;
3581 /* Compute the opad-digest */
3582 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3583 memcpy(pad, o_ptr, keys.authkeylen);
3584 for (i = 0; i < bs >> 2; i++)
3585 *((unsigned int *)pad + i) ^= OPAD_DATA;
3586
3587 if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
3588 goto out;
3589
3590 /* convert the ipad and opad digest to network order */
3591 chcr_change_order(actx->h_iopad, param.result_size);
3592 chcr_change_order(o_ptr, param.result_size);
3593 key_ctx_len = sizeof(struct _key_ctx) +
125d01ca 3594 roundup(keys.enckeylen, 16) +
2debd332
HJ
3595 (param.result_size + align) * 2;
3596 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
3597 0, 1, key_ctx_len >> 4);
3598 actx->auth_mode = param.auth_mode;
3599 chcr_free_shash(base_hash);
3600
eb526531 3601 memzero_explicit(&keys, sizeof(keys));
2debd332
HJ
3602 return 0;
3603 }
3604out:
3605 aeadctx->enckey_len = 0;
eb526531 3606 memzero_explicit(&keys, sizeof(keys));
ec1bca94 3607 if (!IS_ERR(base_hash))
2debd332
HJ
3608 chcr_free_shash(base_hash);
3609 return -EINVAL;
324429d7
HS
3610}
3611
2debd332
HJ
3612static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
3613 const u8 *key, unsigned int keylen)
3614{
2f47d580 3615 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
2debd332
HJ
3616 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3617 struct crypto_authenc_keys keys;
0e93708d 3618 int err;
2debd332 3619 /* it contains auth and cipher key both*/
3d64bd67 3620 unsigned int subtype;
2debd332
HJ
3621 int key_ctx_len = 0;
3622 unsigned char ck_size = 0;
3623
0e93708d
HJ
3624 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3625 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3626 & CRYPTO_TFM_REQ_MASK);
3627 err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
0e93708d
HJ
3628 if (err)
3629 goto out;
3630
674f368a 3631 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
2debd332 3632 goto out;
674f368a 3633
3d64bd67
HJ
3634 subtype = get_aead_subtype(authenc);
3635 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3636 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3637 if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3638 goto out;
3639 memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3640 - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3641 keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3642 }
2debd332
HJ
3643 if (keys.enckeylen == AES_KEYSIZE_128) {
3644 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3645 } else if (keys.enckeylen == AES_KEYSIZE_192) {
3646 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3647 } else if (keys.enckeylen == AES_KEYSIZE_256) {
3648 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3649 } else {
3d64bd67 3650 pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen);
2debd332
HJ
3651 goto out;
3652 }
3653 memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3654 aeadctx->enckey_len = keys.enckeylen;
3d64bd67
HJ
3655 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3656 subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3657 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3658 aeadctx->enckey_len << 3);
3659 }
125d01ca 3660 key_ctx_len = sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
2debd332
HJ
3661
3662 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
3663 0, key_ctx_len >> 4);
3664 actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
eb526531 3665 memzero_explicit(&keys, sizeof(keys));
2debd332
HJ
3666 return 0;
3667out:
3668 aeadctx->enckey_len = 0;
eb526531 3669 memzero_explicit(&keys, sizeof(keys));
2debd332
HJ
3670 return -EINVAL;
3671}
6dad4e8a
AG
3672
3673static int chcr_aead_op(struct aead_request *req,
6dad4e8a
AG
3674 int size,
3675 create_wr_t create_wr_fn)
3676{
3677 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
fef4912b 3678 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
567be3a5
AS
3679 struct chcr_context *ctx = a_ctx(tfm);
3680 struct uld_ctx *u_ctx = ULD_CTX(ctx);
6dad4e8a 3681 struct sk_buff *skb;
fef4912b 3682 struct chcr_dev *cdev;
6dad4e8a 3683
fef4912b
HJ
3684 cdev = a_ctx(tfm)->dev;
3685 if (!cdev) {
6dad4e8a
AG
3686 pr_err("chcr : %s : No crypto device.\n", __func__);
3687 return -ENXIO;
3688 }
fef4912b
HJ
3689
3690 if (chcr_inc_wrcount(cdev)) {
3691 /* Detach state for CHCR means lldi or padap is freed.
3692 * We cannot increment fallback here.
3693 */
3694 return chcr_aead_fallback(req, reqctx->op);
3695 }
3696
6dad4e8a 3697 if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
567be3a5
AS
3698 reqctx->txqidx) &&
3699 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) {
fef4912b 3700 chcr_dec_wrcount(cdev);
6faa0f57 3701 return -ENOSPC;
6dad4e8a
AG
3702 }
3703
3704 /* Form a WR from req */
567be3a5 3705 skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx], size);
6dad4e8a 3706
b04a27ca 3707 if (IS_ERR_OR_NULL(skb)) {
fef4912b 3708 chcr_dec_wrcount(cdev);
b04a27ca 3709 return PTR_ERR_OR_ZERO(skb);
fef4912b 3710 }
6dad4e8a
AG
3711
3712 skb->dev = u_ctx->lldi.ports[0];
567be3a5 3713 set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
6dad4e8a 3714 chcr_send_wr(skb);
567be3a5 3715 return -EINPROGRESS;
6dad4e8a
AG
3716}
3717
2debd332
HJ
3718static int chcr_aead_encrypt(struct aead_request *req)
3719{
3720 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3721 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
567be3a5
AS
3722 struct chcr_context *ctx = a_ctx(tfm);
3723 unsigned int cpu;
3724
3725 cpu = get_cpu();
3726 reqctx->txqidx = cpu % ctx->ntxq;
3727 reqctx->rxqidx = cpu % ctx->nrxq;
3728 put_cpu();
2debd332
HJ
3729
3730 reqctx->verify = VERIFY_HW;
4262c98a 3731 reqctx->op = CHCR_ENCRYPT_OP;
2debd332
HJ
3732
3733 switch (get_aead_subtype(tfm)) {
3d64bd67
HJ
3734 case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3735 case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3736 case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3737 case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
4262c98a 3738 return chcr_aead_op(req, 0, create_authenc_wr);
2debd332
HJ
3739 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3740 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
4262c98a 3741 return chcr_aead_op(req, 0, create_aead_ccm_wr);
2debd332 3742 default:
4262c98a 3743 return chcr_aead_op(req, 0, create_gcm_wr);
2debd332
HJ
3744 }
3745}
3746
3747static int chcr_aead_decrypt(struct aead_request *req)
3748{
3749 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
567be3a5
AS
3750 struct chcr_context *ctx = a_ctx(tfm);
3751 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2debd332
HJ
3752 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3753 int size;
567be3a5
AS
3754 unsigned int cpu;
3755
3756 cpu = get_cpu();
3757 reqctx->txqidx = cpu % ctx->ntxq;
3758 reqctx->rxqidx = cpu % ctx->nrxq;
3759 put_cpu();
2debd332
HJ
3760
3761 if (aeadctx->mayverify == VERIFY_SW) {
3762 size = crypto_aead_maxauthsize(tfm);
3763 reqctx->verify = VERIFY_SW;
3764 } else {
3765 size = 0;
3766 reqctx->verify = VERIFY_HW;
3767 }
4262c98a 3768 reqctx->op = CHCR_DECRYPT_OP;
2debd332 3769 switch (get_aead_subtype(tfm)) {
3d64bd67
HJ
3770 case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3771 case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3772 case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3773 case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
4262c98a 3774 return chcr_aead_op(req, size, create_authenc_wr);
2debd332
HJ
3775 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3776 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
4262c98a 3777 return chcr_aead_op(req, size, create_aead_ccm_wr);
2debd332 3778 default:
4262c98a 3779 return chcr_aead_op(req, size, create_gcm_wr);
2debd332
HJ
3780 }
3781}
3782
324429d7
HS
3783static struct chcr_alg_template driver_algs[] = {
3784 /* AES-CBC */
3785 {
7cea6d3e 3786 .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
324429d7 3787 .is_registered = 0,
7cea6d3e
AB
3788 .alg.skcipher = {
3789 .base.cra_name = "cbc(aes)",
3790 .base.cra_driver_name = "cbc-aes-chcr",
3791 .base.cra_blocksize = AES_BLOCK_SIZE,
3792
3793 .init = chcr_init_tfm,
3794 .exit = chcr_exit_tfm,
3795 .min_keysize = AES_MIN_KEY_SIZE,
3796 .max_keysize = AES_MAX_KEY_SIZE,
3797 .ivsize = AES_BLOCK_SIZE,
3798 .setkey = chcr_aes_cbc_setkey,
3799 .encrypt = chcr_aes_encrypt,
3800 .decrypt = chcr_aes_decrypt,
324429d7 3801 }
324429d7
HS
3802 },
3803 {
7cea6d3e 3804 .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
324429d7 3805 .is_registered = 0,
7cea6d3e
AB
3806 .alg.skcipher = {
3807 .base.cra_name = "xts(aes)",
3808 .base.cra_driver_name = "xts-aes-chcr",
3809 .base.cra_blocksize = AES_BLOCK_SIZE,
3810
3811 .init = chcr_init_tfm,
3812 .exit = chcr_exit_tfm,
3813 .min_keysize = 2 * AES_MIN_KEY_SIZE,
3814 .max_keysize = 2 * AES_MAX_KEY_SIZE,
3815 .ivsize = AES_BLOCK_SIZE,
3816 .setkey = chcr_aes_xts_setkey,
3817 .encrypt = chcr_aes_encrypt,
3818 .decrypt = chcr_aes_decrypt,
324429d7 3819 }
b8fd1f41
HJ
3820 },
3821 {
7cea6d3e 3822 .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
b8fd1f41 3823 .is_registered = 0,
7cea6d3e
AB
3824 .alg.skcipher = {
3825 .base.cra_name = "ctr(aes)",
3826 .base.cra_driver_name = "ctr-aes-chcr",
3827 .base.cra_blocksize = 1,
3828
3829 .init = chcr_init_tfm,
3830 .exit = chcr_exit_tfm,
3831 .min_keysize = AES_MIN_KEY_SIZE,
3832 .max_keysize = AES_MAX_KEY_SIZE,
3833 .ivsize = AES_BLOCK_SIZE,
3834 .setkey = chcr_aes_ctr_setkey,
3835 .encrypt = chcr_aes_encrypt,
3836 .decrypt = chcr_aes_decrypt,
b8fd1f41
HJ
3837 }
3838 },
3839 {
7cea6d3e 3840 .type = CRYPTO_ALG_TYPE_SKCIPHER |
b8fd1f41
HJ
3841 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
3842 .is_registered = 0,
7cea6d3e
AB
3843 .alg.skcipher = {
3844 .base.cra_name = "rfc3686(ctr(aes))",
3845 .base.cra_driver_name = "rfc3686-ctr-aes-chcr",
3846 .base.cra_blocksize = 1,
3847
3848 .init = chcr_rfc3686_init,
3849 .exit = chcr_exit_tfm,
3850 .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3851 .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3852 .ivsize = CTR_RFC3686_IV_SIZE,
3853 .setkey = chcr_aes_rfc3686_setkey,
3854 .encrypt = chcr_aes_encrypt,
3855 .decrypt = chcr_aes_decrypt,
324429d7
HS
3856 }
3857 },
3858 /* SHA */
3859 {
3860 .type = CRYPTO_ALG_TYPE_AHASH,
3861 .is_registered = 0,
3862 .alg.hash = {
3863 .halg.digestsize = SHA1_DIGEST_SIZE,
3864 .halg.base = {
3865 .cra_name = "sha1",
3866 .cra_driver_name = "sha1-chcr",
3867 .cra_blocksize = SHA1_BLOCK_SIZE,
3868 }
3869 }
3870 },
3871 {
3872 .type = CRYPTO_ALG_TYPE_AHASH,
3873 .is_registered = 0,
3874 .alg.hash = {
3875 .halg.digestsize = SHA256_DIGEST_SIZE,
3876 .halg.base = {
3877 .cra_name = "sha256",
3878 .cra_driver_name = "sha256-chcr",
3879 .cra_blocksize = SHA256_BLOCK_SIZE,
3880 }
3881 }
3882 },
3883 {
3884 .type = CRYPTO_ALG_TYPE_AHASH,
3885 .is_registered = 0,
3886 .alg.hash = {
3887 .halg.digestsize = SHA224_DIGEST_SIZE,
3888 .halg.base = {
3889 .cra_name = "sha224",
3890 .cra_driver_name = "sha224-chcr",
3891 .cra_blocksize = SHA224_BLOCK_SIZE,
3892 }
3893 }
3894 },
3895 {
3896 .type = CRYPTO_ALG_TYPE_AHASH,
3897 .is_registered = 0,
3898 .alg.hash = {
3899 .halg.digestsize = SHA384_DIGEST_SIZE,
3900 .halg.base = {
3901 .cra_name = "sha384",
3902 .cra_driver_name = "sha384-chcr",
3903 .cra_blocksize = SHA384_BLOCK_SIZE,
3904 }
3905 }
3906 },
3907 {
3908 .type = CRYPTO_ALG_TYPE_AHASH,
3909 .is_registered = 0,
3910 .alg.hash = {
3911 .halg.digestsize = SHA512_DIGEST_SIZE,
3912 .halg.base = {
3913 .cra_name = "sha512",
3914 .cra_driver_name = "sha512-chcr",
3915 .cra_blocksize = SHA512_BLOCK_SIZE,
3916 }
3917 }
3918 },
3919 /* HMAC */
3920 {
3921 .type = CRYPTO_ALG_TYPE_HMAC,
3922 .is_registered = 0,
3923 .alg.hash = {
3924 .halg.digestsize = SHA1_DIGEST_SIZE,
3925 .halg.base = {
3926 .cra_name = "hmac(sha1)",
2debd332 3927 .cra_driver_name = "hmac-sha1-chcr",
324429d7
HS
3928 .cra_blocksize = SHA1_BLOCK_SIZE,
3929 }
3930 }
3931 },
3932 {
3933 .type = CRYPTO_ALG_TYPE_HMAC,
3934 .is_registered = 0,
3935 .alg.hash = {
3936 .halg.digestsize = SHA224_DIGEST_SIZE,
3937 .halg.base = {
3938 .cra_name = "hmac(sha224)",
2debd332 3939 .cra_driver_name = "hmac-sha224-chcr",
324429d7
HS
3940 .cra_blocksize = SHA224_BLOCK_SIZE,
3941 }
3942 }
3943 },
3944 {
3945 .type = CRYPTO_ALG_TYPE_HMAC,
3946 .is_registered = 0,
3947 .alg.hash = {
3948 .halg.digestsize = SHA256_DIGEST_SIZE,
3949 .halg.base = {
3950 .cra_name = "hmac(sha256)",
2debd332 3951 .cra_driver_name = "hmac-sha256-chcr",
324429d7
HS
3952 .cra_blocksize = SHA256_BLOCK_SIZE,
3953 }
3954 }
3955 },
3956 {
3957 .type = CRYPTO_ALG_TYPE_HMAC,
3958 .is_registered = 0,
3959 .alg.hash = {
3960 .halg.digestsize = SHA384_DIGEST_SIZE,
3961 .halg.base = {
3962 .cra_name = "hmac(sha384)",
2debd332 3963 .cra_driver_name = "hmac-sha384-chcr",
324429d7
HS
3964 .cra_blocksize = SHA384_BLOCK_SIZE,
3965 }
3966 }
3967 },
3968 {
3969 .type = CRYPTO_ALG_TYPE_HMAC,
3970 .is_registered = 0,
3971 .alg.hash = {
3972 .halg.digestsize = SHA512_DIGEST_SIZE,
3973 .halg.base = {
3974 .cra_name = "hmac(sha512)",
2debd332 3975 .cra_driver_name = "hmac-sha512-chcr",
324429d7
HS
3976 .cra_blocksize = SHA512_BLOCK_SIZE,
3977 }
3978 }
3979 },
2debd332
HJ
3980 /* Add AEAD Algorithms */
3981 {
3982 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
3983 .is_registered = 0,
3984 .alg.aead = {
3985 .base = {
3986 .cra_name = "gcm(aes)",
3987 .cra_driver_name = "gcm-aes-chcr",
3988 .cra_blocksize = 1,
e29abda5 3989 .cra_priority = CHCR_AEAD_PRIORITY,
2debd332
HJ
3990 .cra_ctxsize = sizeof(struct chcr_context) +
3991 sizeof(struct chcr_aead_ctx) +
3992 sizeof(struct chcr_gcm_ctx),
3993 },
8f6acb7f 3994 .ivsize = GCM_AES_IV_SIZE,
2debd332
HJ
3995 .maxauthsize = GHASH_DIGEST_SIZE,
3996 .setkey = chcr_gcm_setkey,
3997 .setauthsize = chcr_gcm_setauthsize,
3998 }
3999 },
4000 {
4001 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
4002 .is_registered = 0,
4003 .alg.aead = {
4004 .base = {
4005 .cra_name = "rfc4106(gcm(aes))",
4006 .cra_driver_name = "rfc4106-gcm-aes-chcr",
4007 .cra_blocksize = 1,
e29abda5 4008 .cra_priority = CHCR_AEAD_PRIORITY + 1,
2debd332
HJ
4009 .cra_ctxsize = sizeof(struct chcr_context) +
4010 sizeof(struct chcr_aead_ctx) +
4011 sizeof(struct chcr_gcm_ctx),
4012
4013 },
8f6acb7f 4014 .ivsize = GCM_RFC4106_IV_SIZE,
2debd332
HJ
4015 .maxauthsize = GHASH_DIGEST_SIZE,
4016 .setkey = chcr_gcm_setkey,
4017 .setauthsize = chcr_4106_4309_setauthsize,
4018 }
4019 },
4020 {
4021 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
4022 .is_registered = 0,
4023 .alg.aead = {
4024 .base = {
4025 .cra_name = "ccm(aes)",
4026 .cra_driver_name = "ccm-aes-chcr",
4027 .cra_blocksize = 1,
e29abda5 4028 .cra_priority = CHCR_AEAD_PRIORITY,
2debd332
HJ
4029 .cra_ctxsize = sizeof(struct chcr_context) +
4030 sizeof(struct chcr_aead_ctx),
4031
4032 },
4033 .ivsize = AES_BLOCK_SIZE,
4034 .maxauthsize = GHASH_DIGEST_SIZE,
4035 .setkey = chcr_aead_ccm_setkey,
4036 .setauthsize = chcr_ccm_setauthsize,
4037 }
4038 },
4039 {
4040 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
4041 .is_registered = 0,
4042 .alg.aead = {
4043 .base = {
4044 .cra_name = "rfc4309(ccm(aes))",
4045 .cra_driver_name = "rfc4309-ccm-aes-chcr",
4046 .cra_blocksize = 1,
e29abda5 4047 .cra_priority = CHCR_AEAD_PRIORITY + 1,
2debd332
HJ
4048 .cra_ctxsize = sizeof(struct chcr_context) +
4049 sizeof(struct chcr_aead_ctx),
4050
4051 },
4052 .ivsize = 8,
4053 .maxauthsize = GHASH_DIGEST_SIZE,
4054 .setkey = chcr_aead_rfc4309_setkey,
4055 .setauthsize = chcr_4106_4309_setauthsize,
4056 }
4057 },
4058 {
3d64bd67 4059 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
2debd332
HJ
4060 .is_registered = 0,
4061 .alg.aead = {
4062 .base = {
4063 .cra_name = "authenc(hmac(sha1),cbc(aes))",
4064 .cra_driver_name =
4065 "authenc-hmac-sha1-cbc-aes-chcr",
4066 .cra_blocksize = AES_BLOCK_SIZE,
e29abda5 4067 .cra_priority = CHCR_AEAD_PRIORITY,
2debd332
HJ
4068 .cra_ctxsize = sizeof(struct chcr_context) +
4069 sizeof(struct chcr_aead_ctx) +
4070 sizeof(struct chcr_authenc_ctx),
4071
4072 },
4073 .ivsize = AES_BLOCK_SIZE,
4074 .maxauthsize = SHA1_DIGEST_SIZE,
4075 .setkey = chcr_authenc_setkey,
4076 .setauthsize = chcr_authenc_setauthsize,
4077 }
4078 },
4079 {
3d64bd67 4080 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
2debd332
HJ
4081 .is_registered = 0,
4082 .alg.aead = {
4083 .base = {
4084
4085 .cra_name = "authenc(hmac(sha256),cbc(aes))",
4086 .cra_driver_name =
4087 "authenc-hmac-sha256-cbc-aes-chcr",
4088 .cra_blocksize = AES_BLOCK_SIZE,
e29abda5 4089 .cra_priority = CHCR_AEAD_PRIORITY,
2debd332
HJ
4090 .cra_ctxsize = sizeof(struct chcr_context) +
4091 sizeof(struct chcr_aead_ctx) +
4092 sizeof(struct chcr_authenc_ctx),
4093
4094 },
4095 .ivsize = AES_BLOCK_SIZE,
4096 .maxauthsize = SHA256_DIGEST_SIZE,
4097 .setkey = chcr_authenc_setkey,
4098 .setauthsize = chcr_authenc_setauthsize,
4099 }
4100 },
4101 {
3d64bd67 4102 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
2debd332
HJ
4103 .is_registered = 0,
4104 .alg.aead = {
4105 .base = {
4106 .cra_name = "authenc(hmac(sha224),cbc(aes))",
4107 .cra_driver_name =
4108 "authenc-hmac-sha224-cbc-aes-chcr",
4109 .cra_blocksize = AES_BLOCK_SIZE,
e29abda5 4110 .cra_priority = CHCR_AEAD_PRIORITY,
2debd332
HJ
4111 .cra_ctxsize = sizeof(struct chcr_context) +
4112 sizeof(struct chcr_aead_ctx) +
4113 sizeof(struct chcr_authenc_ctx),
4114 },
4115 .ivsize = AES_BLOCK_SIZE,
4116 .maxauthsize = SHA224_DIGEST_SIZE,
4117 .setkey = chcr_authenc_setkey,
4118 .setauthsize = chcr_authenc_setauthsize,
4119 }
4120 },
4121 {
3d64bd67 4122 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
2debd332
HJ
4123 .is_registered = 0,
4124 .alg.aead = {
4125 .base = {
4126 .cra_name = "authenc(hmac(sha384),cbc(aes))",
4127 .cra_driver_name =
4128 "authenc-hmac-sha384-cbc-aes-chcr",
4129 .cra_blocksize = AES_BLOCK_SIZE,
e29abda5 4130 .cra_priority = CHCR_AEAD_PRIORITY,
2debd332
HJ
4131 .cra_ctxsize = sizeof(struct chcr_context) +
4132 sizeof(struct chcr_aead_ctx) +
4133 sizeof(struct chcr_authenc_ctx),
4134
4135 },
4136 .ivsize = AES_BLOCK_SIZE,
4137 .maxauthsize = SHA384_DIGEST_SIZE,
4138 .setkey = chcr_authenc_setkey,
4139 .setauthsize = chcr_authenc_setauthsize,
4140 }
4141 },
4142 {
3d64bd67 4143 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
2debd332
HJ
4144 .is_registered = 0,
4145 .alg.aead = {
4146 .base = {
4147 .cra_name = "authenc(hmac(sha512),cbc(aes))",
4148 .cra_driver_name =
4149 "authenc-hmac-sha512-cbc-aes-chcr",
4150 .cra_blocksize = AES_BLOCK_SIZE,
e29abda5 4151 .cra_priority = CHCR_AEAD_PRIORITY,
2debd332
HJ
4152 .cra_ctxsize = sizeof(struct chcr_context) +
4153 sizeof(struct chcr_aead_ctx) +
4154 sizeof(struct chcr_authenc_ctx),
4155
4156 },
4157 .ivsize = AES_BLOCK_SIZE,
4158 .maxauthsize = SHA512_DIGEST_SIZE,
4159 .setkey = chcr_authenc_setkey,
4160 .setauthsize = chcr_authenc_setauthsize,
4161 }
4162 },
4163 {
3d64bd67 4164 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
2debd332
HJ
4165 .is_registered = 0,
4166 .alg.aead = {
4167 .base = {
4168 .cra_name = "authenc(digest_null,cbc(aes))",
4169 .cra_driver_name =
4170 "authenc-digest_null-cbc-aes-chcr",
4171 .cra_blocksize = AES_BLOCK_SIZE,
e29abda5 4172 .cra_priority = CHCR_AEAD_PRIORITY,
2debd332
HJ
4173 .cra_ctxsize = sizeof(struct chcr_context) +
4174 sizeof(struct chcr_aead_ctx) +
4175 sizeof(struct chcr_authenc_ctx),
4176
4177 },
4178 .ivsize = AES_BLOCK_SIZE,
4179 .maxauthsize = 0,
4180 .setkey = chcr_aead_digest_null_setkey,
4181 .setauthsize = chcr_authenc_null_setauthsize,
4182 }
4183 },
3d64bd67
HJ
4184 {
4185 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4186 .is_registered = 0,
4187 .alg.aead = {
4188 .base = {
4189 .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
4190 .cra_driver_name =
4191 "authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
4192 .cra_blocksize = 1,
4193 .cra_priority = CHCR_AEAD_PRIORITY,
4194 .cra_ctxsize = sizeof(struct chcr_context) +
4195 sizeof(struct chcr_aead_ctx) +
4196 sizeof(struct chcr_authenc_ctx),
4197
4198 },
4199 .ivsize = CTR_RFC3686_IV_SIZE,
4200 .maxauthsize = SHA1_DIGEST_SIZE,
4201 .setkey = chcr_authenc_setkey,
4202 .setauthsize = chcr_authenc_setauthsize,
4203 }
4204 },
4205 {
4206 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4207 .is_registered = 0,
4208 .alg.aead = {
4209 .base = {
4210
4211 .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
4212 .cra_driver_name =
4213 "authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
4214 .cra_blocksize = 1,
4215 .cra_priority = CHCR_AEAD_PRIORITY,
4216 .cra_ctxsize = sizeof(struct chcr_context) +
4217 sizeof(struct chcr_aead_ctx) +
4218 sizeof(struct chcr_authenc_ctx),
4219
4220 },
4221 .ivsize = CTR_RFC3686_IV_SIZE,
4222 .maxauthsize = SHA256_DIGEST_SIZE,
4223 .setkey = chcr_authenc_setkey,
4224 .setauthsize = chcr_authenc_setauthsize,
4225 }
4226 },
4227 {
4228 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4229 .is_registered = 0,
4230 .alg.aead = {
4231 .base = {
4232 .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
4233 .cra_driver_name =
4234 "authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
4235 .cra_blocksize = 1,
4236 .cra_priority = CHCR_AEAD_PRIORITY,
4237 .cra_ctxsize = sizeof(struct chcr_context) +
4238 sizeof(struct chcr_aead_ctx) +
4239 sizeof(struct chcr_authenc_ctx),
4240 },
4241 .ivsize = CTR_RFC3686_IV_SIZE,
4242 .maxauthsize = SHA224_DIGEST_SIZE,
4243 .setkey = chcr_authenc_setkey,
4244 .setauthsize = chcr_authenc_setauthsize,
4245 }
4246 },
4247 {
4248 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4249 .is_registered = 0,
4250 .alg.aead = {
4251 .base = {
4252 .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
4253 .cra_driver_name =
4254 "authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
4255 .cra_blocksize = 1,
4256 .cra_priority = CHCR_AEAD_PRIORITY,
4257 .cra_ctxsize = sizeof(struct chcr_context) +
4258 sizeof(struct chcr_aead_ctx) +
4259 sizeof(struct chcr_authenc_ctx),
4260
4261 },
4262 .ivsize = CTR_RFC3686_IV_SIZE,
4263 .maxauthsize = SHA384_DIGEST_SIZE,
4264 .setkey = chcr_authenc_setkey,
4265 .setauthsize = chcr_authenc_setauthsize,
4266 }
4267 },
4268 {
4269 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4270 .is_registered = 0,
4271 .alg.aead = {
4272 .base = {
4273 .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
4274 .cra_driver_name =
4275 "authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
4276 .cra_blocksize = 1,
4277 .cra_priority = CHCR_AEAD_PRIORITY,
4278 .cra_ctxsize = sizeof(struct chcr_context) +
4279 sizeof(struct chcr_aead_ctx) +
4280 sizeof(struct chcr_authenc_ctx),
4281
4282 },
4283 .ivsize = CTR_RFC3686_IV_SIZE,
4284 .maxauthsize = SHA512_DIGEST_SIZE,
4285 .setkey = chcr_authenc_setkey,
4286 .setauthsize = chcr_authenc_setauthsize,
4287 }
4288 },
4289 {
4290 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
4291 .is_registered = 0,
4292 .alg.aead = {
4293 .base = {
4294 .cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
4295 .cra_driver_name =
4296 "authenc-digest_null-rfc3686-ctr-aes-chcr",
4297 .cra_blocksize = 1,
4298 .cra_priority = CHCR_AEAD_PRIORITY,
4299 .cra_ctxsize = sizeof(struct chcr_context) +
4300 sizeof(struct chcr_aead_ctx) +
4301 sizeof(struct chcr_authenc_ctx),
4302
4303 },
4304 .ivsize = CTR_RFC3686_IV_SIZE,
4305 .maxauthsize = 0,
4306 .setkey = chcr_aead_digest_null_setkey,
4307 .setauthsize = chcr_authenc_null_setauthsize,
4308 }
4309 },
324429d7
HS
4310};
4311
4312/*
4313 * chcr_unregister_alg - Deregister crypto algorithms with
4314 * kernel framework.
4315 */
4316static int chcr_unregister_alg(void)
4317{
4318 int i;
4319
4320 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4321 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
7cea6d3e 4322 case CRYPTO_ALG_TYPE_SKCIPHER:
324429d7 4323 if (driver_algs[i].is_registered)
7cea6d3e
AB
4324 crypto_unregister_skcipher(
4325 &driver_algs[i].alg.skcipher);
324429d7 4326 break;
2debd332
HJ
4327 case CRYPTO_ALG_TYPE_AEAD:
4328 if (driver_algs[i].is_registered)
4329 crypto_unregister_aead(
4330 &driver_algs[i].alg.aead);
4331 break;
324429d7
HS
4332 case CRYPTO_ALG_TYPE_AHASH:
4333 if (driver_algs[i].is_registered)
4334 crypto_unregister_ahash(
4335 &driver_algs[i].alg.hash);
4336 break;
4337 }
4338 driver_algs[i].is_registered = 0;
4339 }
4340 return 0;
4341}
4342
4343#define SZ_AHASH_CTX sizeof(struct chcr_context)
4344#define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
4345#define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
324429d7
HS
4346
4347/*
4348 * chcr_register_alg - Register crypto algorithms with kernel framework.
4349 */
4350static int chcr_register_alg(void)
4351{
4352 struct crypto_alg ai;
4353 struct ahash_alg *a_hash;
4354 int err = 0, i;
4355 char *name = NULL;
4356
4357 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4358 if (driver_algs[i].is_registered)
4359 continue;
4360 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
7cea6d3e
AB
4361 case CRYPTO_ALG_TYPE_SKCIPHER:
4362 driver_algs[i].alg.skcipher.base.cra_priority =
b8fd1f41 4363 CHCR_CRA_PRIORITY;
7cea6d3e
AB
4364 driver_algs[i].alg.skcipher.base.cra_module = THIS_MODULE;
4365 driver_algs[i].alg.skcipher.base.cra_flags =
4366 CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
b8fd1f41 4367 CRYPTO_ALG_NEED_FALLBACK;
7cea6d3e 4368 driver_algs[i].alg.skcipher.base.cra_ctxsize =
b8fd1f41
HJ
4369 sizeof(struct chcr_context) +
4370 sizeof(struct ablk_ctx);
7cea6d3e
AB
4371 driver_algs[i].alg.skcipher.base.cra_alignmask = 0;
4372
4373 err = crypto_register_skcipher(&driver_algs[i].alg.skcipher);
4374 name = driver_algs[i].alg.skcipher.base.cra_driver_name;
324429d7 4375 break;
2debd332 4376 case CRYPTO_ALG_TYPE_AEAD:
2debd332 4377 driver_algs[i].alg.aead.base.cra_flags =
3f4a537a 4378 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
2debd332
HJ
4379 driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
4380 driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
4381 driver_algs[i].alg.aead.init = chcr_aead_cra_init;
4382 driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
4383 driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
4384 err = crypto_register_aead(&driver_algs[i].alg.aead);
4385 name = driver_algs[i].alg.aead.base.cra_driver_name;
4386 break;
324429d7
HS
4387 case CRYPTO_ALG_TYPE_AHASH:
4388 a_hash = &driver_algs[i].alg.hash;
4389 a_hash->update = chcr_ahash_update;
4390 a_hash->final = chcr_ahash_final;
4391 a_hash->finup = chcr_ahash_finup;
4392 a_hash->digest = chcr_ahash_digest;
4393 a_hash->export = chcr_ahash_export;
4394 a_hash->import = chcr_ahash_import;
4395 a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
4396 a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
4397 a_hash->halg.base.cra_module = THIS_MODULE;
6a38f622 4398 a_hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
324429d7
HS
4399 a_hash->halg.base.cra_alignmask = 0;
4400 a_hash->halg.base.cra_exit = NULL;
324429d7
HS
4401
4402 if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
4403 a_hash->halg.base.cra_init = chcr_hmac_cra_init;
4404 a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
4405 a_hash->init = chcr_hmac_init;
4406 a_hash->setkey = chcr_ahash_setkey;
4407 a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
4408 } else {
4409 a_hash->init = chcr_sha_init;
4410 a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
4411 a_hash->halg.base.cra_init = chcr_sha_cra_init;
4412 }
4413 err = crypto_register_ahash(&driver_algs[i].alg.hash);
4414 ai = driver_algs[i].alg.hash.halg.base;
4415 name = ai.cra_driver_name;
4416 break;
4417 }
4418 if (err) {
4419 pr_err("chcr : %s : Algorithm registration failed\n",
4420 name);
4421 goto register_err;
4422 } else {
4423 driver_algs[i].is_registered = 1;
4424 }
4425 }
4426 return 0;
4427
4428register_err:
4429 chcr_unregister_alg();
4430 return err;
4431}
4432
4433/*
4434 * start_crypto - Register the crypto algorithms.
4435 * This should called once when the first device comesup. After this
4436 * kernel will start calling driver APIs for crypto operations.
4437 */
4438int start_crypto(void)
4439{
4440 return chcr_register_alg();
4441}
4442
4443/*
4444 * stop_crypto - Deregister all the crypto algorithms with kernel.
4445 * This should be called once when the last device goes down. After this
4446 * kernel will not call the driver API for crypto operations.
4447 */
4448int stop_crypto(void)
4449{
4450 chcr_unregister_alg();
4451 return 0;
4452}