]> git.ipfire.org Git - thirdparty/linux.git/blame - drivers/crypto/hisilicon/sec2/sec_crypto.c
Merge tag 'x86-fpu-2020-06-01' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
[thirdparty/linux.git] / drivers / crypto / hisilicon / sec2 / sec_crypto.c
CommitLineData
416d8220
ZX
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2019 HiSilicon Limited. */
3
4#include <crypto/aes.h>
5#include <crypto/algapi.h>
2f072d75 6#include <crypto/authenc.h>
416d8220 7#include <crypto/des.h>
2f072d75
ZX
8#include <crypto/hash.h>
9#include <crypto/internal/aead.h>
10#include <crypto/sha.h>
416d8220
ZX
11#include <crypto/skcipher.h>
12#include <crypto/xts.h>
13#include <linux/crypto.h>
14#include <linux/dma-mapping.h>
15#include <linux/idr.h>
16
17#include "sec.h"
18#include "sec_crypto.h"
19
20#define SEC_PRIORITY 4001
21#define SEC_XTS_MIN_KEY_SIZE (2 * AES_MIN_KEY_SIZE)
22#define SEC_XTS_MAX_KEY_SIZE (2 * AES_MAX_KEY_SIZE)
23#define SEC_DES3_2KEY_SIZE (2 * DES_KEY_SIZE)
24#define SEC_DES3_3KEY_SIZE (3 * DES_KEY_SIZE)
25
26/* SEC sqe(bd) bit operational relative MACRO */
27#define SEC_DE_OFFSET 1
28#define SEC_CIPHER_OFFSET 4
29#define SEC_SCENE_OFFSET 3
30#define SEC_DST_SGL_OFFSET 2
31#define SEC_SRC_SGL_OFFSET 7
32#define SEC_CKEY_OFFSET 9
33#define SEC_CMODE_OFFSET 12
2f072d75
ZX
34#define SEC_AKEY_OFFSET 5
35#define SEC_AEAD_ALG_OFFSET 11
36#define SEC_AUTH_OFFSET 6
37
416d8220
ZX
38#define SEC_FLAG_OFFSET 7
39#define SEC_FLAG_MASK 0x0780
40#define SEC_TYPE_MASK 0x0F
41#define SEC_DONE_MASK 0x0001
42
43#define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH)
44#define SEC_SGL_SGE_NR 128
45#define SEC_CTX_DEV(ctx) (&(ctx)->sec->qm.pdev->dev)
2f072d75
ZX
46#define SEC_CIPHER_AUTH 0xfe
47#define SEC_AUTH_CIPHER 0x1
48#define SEC_MAX_MAC_LEN 64
2514f559 49#define SEC_MAX_AAD_LEN 65535
2f072d75 50#define SEC_TOTAL_MAC_SZ (SEC_MAX_MAC_LEN * QM_Q_DEPTH)
74b58db8
LL
51
52#define SEC_PBUF_SZ 512
53#define SEC_PBUF_IV_OFFSET SEC_PBUF_SZ
54#define SEC_PBUF_MAC_OFFSET (SEC_PBUF_SZ + SEC_IV_SIZE)
55#define SEC_PBUF_PKG (SEC_PBUF_SZ + SEC_IV_SIZE + \
56 SEC_MAX_MAC_LEN * 2)
57#define SEC_PBUF_NUM (PAGE_SIZE / SEC_PBUF_PKG)
58#define SEC_PBUF_PAGE_NUM (QM_Q_DEPTH / SEC_PBUF_NUM)
59#define SEC_PBUF_LEFT_SZ (SEC_PBUF_PKG * (QM_Q_DEPTH - \
60 SEC_PBUF_PAGE_NUM * SEC_PBUF_NUM))
61#define SEC_TOTAL_PBUF_SZ (PAGE_SIZE * SEC_PBUF_PAGE_NUM + \
62 SEC_PBUF_LEFT_SZ)
63
2f072d75 64#define SEC_SQE_LEN_RATE 4
d6de2a59 65#define SEC_SQE_CFLAG 2
2f072d75 66#define SEC_SQE_AEAD_FLAG 3
d6de2a59 67#define SEC_SQE_DONE 0x1
416d8220 68
2f072d75 69static atomic_t sec_active_devs;
416d8220
ZX
70
71/* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
a181647c 72static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
416d8220
ZX
73{
74 if (req->c_req.encrypt)
75 return (u32)atomic_inc_return(&ctx->enc_qcyclic) %
76 ctx->hlf_q_num;
77
78 return (u32)atomic_inc_return(&ctx->dec_qcyclic) % ctx->hlf_q_num +
79 ctx->hlf_q_num;
80}
81
a181647c 82static inline void sec_free_queue_id(struct sec_ctx *ctx, struct sec_req *req)
416d8220
ZX
83{
84 if (req->c_req.encrypt)
85 atomic_dec(&ctx->enc_qcyclic);
86 else
87 atomic_dec(&ctx->dec_qcyclic);
88}
89
90static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
91{
92 int req_id;
93
94 mutex_lock(&qp_ctx->req_lock);
95
96 req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL,
97 0, QM_Q_DEPTH, GFP_ATOMIC);
98 mutex_unlock(&qp_ctx->req_lock);
b9c8d897 99 if (unlikely(req_id < 0)) {
416d8220
ZX
100 dev_err(SEC_CTX_DEV(req->ctx), "alloc req id fail!\n");
101 return req_id;
102 }
103
104 req->qp_ctx = qp_ctx;
105 qp_ctx->req_list[req_id] = req;
106 return req_id;
107}
108
109static void sec_free_req_id(struct sec_req *req)
110{
111 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
112 int req_id = req->req_id;
113
b9c8d897 114 if (unlikely(req_id < 0 || req_id >= QM_Q_DEPTH)) {
416d8220
ZX
115 dev_err(SEC_CTX_DEV(req->ctx), "free request id invalid!\n");
116 return;
117 }
118
119 qp_ctx->req_list[req_id] = NULL;
120 req->qp_ctx = NULL;
121
122 mutex_lock(&qp_ctx->req_lock);
123 idr_remove(&qp_ctx->req_idr, req_id);
124 mutex_unlock(&qp_ctx->req_lock);
125}
126
2514f559 127static int sec_aead_verify(struct sec_req *req)
2f072d75
ZX
128{
129 struct aead_request *aead_req = req->aead_req.aead_req;
130 struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
2f072d75 131 size_t authsize = crypto_aead_authsize(tfm);
2514f559 132 u8 *mac_out = req->aead_req.out_mac;
2f072d75
ZX
133 u8 *mac = mac_out + SEC_MAX_MAC_LEN;
134 struct scatterlist *sgl = aead_req->src;
135 size_t sz;
136
137 sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac, authsize,
138 aead_req->cryptlen + aead_req->assoclen -
139 authsize);
140 if (unlikely(sz != authsize || memcmp(mac_out, mac, sz))) {
141 dev_err(SEC_CTX_DEV(req->ctx), "aead verify failure!\n");
142 return -EBADMSG;
143 }
144
145 return 0;
146}
147
416d8220
ZX
148static void sec_req_cb(struct hisi_qp *qp, void *resp)
149{
150 struct sec_qp_ctx *qp_ctx = qp->qp_ctx;
8213a1a6 151 struct sec_dfx *dfx = &qp_ctx->ctx->sec->debug.dfx;
416d8220 152 struct sec_sqe *bd = resp;
d6de2a59
ZX
153 struct sec_ctx *ctx;
154 struct sec_req *req;
416d8220 155 u16 done, flag;
310ea0ac 156 int err = 0;
416d8220 157 u8 type;
416d8220
ZX
158
159 type = bd->type_cipher_auth & SEC_TYPE_MASK;
b9c8d897 160 if (unlikely(type != SEC_BD_TYPE2)) {
8213a1a6 161 atomic64_inc(&dfx->err_bd_cnt);
416d8220
ZX
162 pr_err("err bd type [%d]\n", type);
163 return;
164 }
165
d6de2a59 166 req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)];
8213a1a6
KY
167 if (unlikely(!req)) {
168 atomic64_inc(&dfx->invalid_req_cnt);
169 return;
170 }
d6de2a59
ZX
171 req->err_type = bd->type2.error_type;
172 ctx = req->ctx;
173 done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
174 flag = (le16_to_cpu(bd->type2.done_flag) &
175 SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
2f072d75
ZX
176 if (unlikely(req->err_type || done != SEC_SQE_DONE ||
177 (ctx->alg_type == SEC_SKCIPHER && flag != SEC_SQE_CFLAG) ||
178 (ctx->alg_type == SEC_AEAD && flag != SEC_SQE_AEAD_FLAG))) {
d6de2a59
ZX
179 dev_err(SEC_CTX_DEV(ctx),
180 "err_type[%d],done[%d],flag[%d]\n",
181 req->err_type, done, flag);
310ea0ac 182 err = -EIO;
8213a1a6 183 atomic64_inc(&dfx->done_flag_cnt);
310ea0ac 184 }
1e9bc276 185
2f072d75 186 if (ctx->alg_type == SEC_AEAD && !req->c_req.encrypt)
2514f559 187 err = sec_aead_verify(req);
2f072d75 188
8213a1a6 189 atomic64_inc(&dfx->recv_cnt);
416d8220 190
d6de2a59
ZX
191 ctx->req_op->buf_unmap(ctx, req);
192
310ea0ac 193 ctx->req_op->callback(ctx, req, err);
416d8220
ZX
194}
195
196static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
197{
198 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
199 int ret;
200
201 mutex_lock(&qp_ctx->req_lock);
202 ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
203 mutex_unlock(&qp_ctx->req_lock);
cb1eeb75 204 atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
416d8220 205
b9c8d897 206 if (unlikely(ret == -EBUSY))
416d8220
ZX
207 return -ENOBUFS;
208
209 if (!ret) {
8213a1a6
KY
210 if (req->fake_busy) {
211 atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt);
416d8220 212 ret = -EBUSY;
8213a1a6 213 } else {
416d8220 214 ret = -EINPROGRESS;
8213a1a6 215 }
416d8220
ZX
216 }
217
218 return ret;
219}
220
7c7d902a
ZX
221/* Get DMA memory resources */
222static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res)
223{
224 int i;
225
226 res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
227 &res->c_ivin_dma, GFP_KERNEL);
228 if (!res->c_ivin)
229 return -ENOMEM;
230
231 for (i = 1; i < QM_Q_DEPTH; i++) {
232 res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE;
233 res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE;
234 }
235
236 return 0;
237}
238
239static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res)
240{
241 if (res->c_ivin)
242 dma_free_coherent(dev, SEC_TOTAL_IV_SZ,
243 res->c_ivin, res->c_ivin_dma);
244}
245
2f072d75
ZX
246static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res)
247{
248 int i;
249
250 res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ << 1,
251 &res->out_mac_dma, GFP_KERNEL);
252 if (!res->out_mac)
253 return -ENOMEM;
254
255 for (i = 1; i < QM_Q_DEPTH; i++) {
256 res[i].out_mac_dma = res->out_mac_dma +
257 i * (SEC_MAX_MAC_LEN << 1);
258 res[i].out_mac = res->out_mac + i * (SEC_MAX_MAC_LEN << 1);
259 }
260
261 return 0;
262}
263
264static void sec_free_mac_resource(struct device *dev, struct sec_alg_res *res)
265{
266 if (res->out_mac)
267 dma_free_coherent(dev, SEC_TOTAL_MAC_SZ << 1,
268 res->out_mac, res->out_mac_dma);
269}
270
74b58db8
LL
271static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res)
272{
273 if (res->pbuf)
274 dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ,
275 res->pbuf, res->pbuf_dma);
276}
277
278/*
279 * To improve performance, pbuffer is used for
280 * small packets (< 512Bytes) as IOMMU translation using.
281 */
282static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res)
283{
284 int pbuf_page_offset;
285 int i, j, k;
286
287 res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ,
288 &res->pbuf_dma, GFP_KERNEL);
289 if (!res->pbuf)
290 return -ENOMEM;
291
292 /*
293 * SEC_PBUF_PKG contains data pbuf, iv and
294 * out_mac : <SEC_PBUF|SEC_IV|SEC_MAC>
295 * Every PAGE contains six SEC_PBUF_PKG
296 * The sec_qp_ctx contains QM_Q_DEPTH numbers of SEC_PBUF_PKG
297 * So we need SEC_PBUF_PAGE_NUM numbers of PAGE
298 * for the SEC_TOTAL_PBUF_SZ
299 */
300 for (i = 0; i <= SEC_PBUF_PAGE_NUM; i++) {
301 pbuf_page_offset = PAGE_SIZE * i;
302 for (j = 0; j < SEC_PBUF_NUM; j++) {
303 k = i * SEC_PBUF_NUM + j;
304 if (k == QM_Q_DEPTH)
305 break;
306 res[k].pbuf = res->pbuf +
307 j * SEC_PBUF_PKG + pbuf_page_offset;
308 res[k].pbuf_dma = res->pbuf_dma +
309 j * SEC_PBUF_PKG + pbuf_page_offset;
310 }
311 }
312 return 0;
313}
314
7c7d902a
ZX
315static int sec_alg_resource_alloc(struct sec_ctx *ctx,
316 struct sec_qp_ctx *qp_ctx)
317{
318 struct device *dev = SEC_CTX_DEV(ctx);
2f072d75
ZX
319 struct sec_alg_res *res = qp_ctx->res;
320 int ret;
321
322 ret = sec_alloc_civ_resource(dev, res);
323 if (ret)
324 return ret;
7c7d902a 325
2f072d75
ZX
326 if (ctx->alg_type == SEC_AEAD) {
327 ret = sec_alloc_mac_resource(dev, res);
328 if (ret)
2514f559 329 goto alloc_fail;
2f072d75 330 }
74b58db8
LL
331 if (ctx->pbuf_supported) {
332 ret = sec_alloc_pbuf_resource(dev, res);
333 if (ret) {
334 dev_err(dev, "fail to alloc pbuf dma resource!\n");
335 goto alloc_fail;
336 }
337 }
2f072d75
ZX
338
339 return 0;
2514f559 340alloc_fail:
2f072d75
ZX
341 sec_free_civ_resource(dev, res);
342
343 return ret;
7c7d902a
ZX
344}
345
346static void sec_alg_resource_free(struct sec_ctx *ctx,
347 struct sec_qp_ctx *qp_ctx)
348{
349 struct device *dev = SEC_CTX_DEV(ctx);
350
351 sec_free_civ_resource(dev, qp_ctx->res);
2f072d75 352
74b58db8
LL
353 if (ctx->pbuf_supported)
354 sec_free_pbuf_resource(dev, qp_ctx->res);
2f072d75
ZX
355 if (ctx->alg_type == SEC_AEAD)
356 sec_free_mac_resource(dev, qp_ctx->res);
7c7d902a
ZX
357}
358
416d8220
ZX
359static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
360 int qp_ctx_id, int alg_type)
361{
362 struct device *dev = SEC_CTX_DEV(ctx);
363 struct sec_qp_ctx *qp_ctx;
364 struct hisi_qp *qp;
365 int ret = -ENOMEM;
366
416d8220 367 qp_ctx = &ctx->qp_ctx[qp_ctx_id];
0b5e43bc 368 qp = ctx->qps[qp_ctx_id];
416d8220
ZX
369 qp->req_type = 0;
370 qp->qp_ctx = qp_ctx;
371 qp->req_cb = sec_req_cb;
372 qp_ctx->qp = qp;
373 qp_ctx->ctx = ctx;
374
375 mutex_init(&qp_ctx->req_lock);
376 atomic_set(&qp_ctx->pending_reqs, 0);
377 idr_init(&qp_ctx->req_idr);
378
416d8220
ZX
379 qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
380 SEC_SGL_SGE_NR);
8a6b8f4d 381 if (IS_ERR(qp_ctx->c_in_pool)) {
416d8220 382 dev_err(dev, "fail to create sgl pool for input!\n");
7c7d902a 383 goto err_destroy_idr;
416d8220
ZX
384 }
385
386 qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
387 SEC_SGL_SGE_NR);
8a6b8f4d 388 if (IS_ERR(qp_ctx->c_out_pool)) {
416d8220
ZX
389 dev_err(dev, "fail to create sgl pool for output!\n");
390 goto err_free_c_in_pool;
391 }
392
7c7d902a 393 ret = sec_alg_resource_alloc(ctx, qp_ctx);
416d8220
ZX
394 if (ret)
395 goto err_free_c_out_pool;
396
397 ret = hisi_qm_start_qp(qp, 0);
398 if (ret < 0)
399 goto err_queue_free;
400
401 return 0;
402
403err_queue_free:
7c7d902a 404 sec_alg_resource_free(ctx, qp_ctx);
416d8220
ZX
405err_free_c_out_pool:
406 hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
407err_free_c_in_pool:
408 hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
416d8220
ZX
409err_destroy_idr:
410 idr_destroy(&qp_ctx->req_idr);
416d8220
ZX
411
412 return ret;
413}
414
415static void sec_release_qp_ctx(struct sec_ctx *ctx,
416 struct sec_qp_ctx *qp_ctx)
417{
418 struct device *dev = SEC_CTX_DEV(ctx);
419
420 hisi_qm_stop_qp(qp_ctx->qp);
7c7d902a 421 sec_alg_resource_free(ctx, qp_ctx);
416d8220
ZX
422
423 hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
424 hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
425
426 idr_destroy(&qp_ctx->req_idr);
416d8220
ZX
427}
428
473a0f96 429static int sec_ctx_base_init(struct sec_ctx *ctx)
416d8220 430{
416d8220 431 struct sec_dev *sec;
416d8220
ZX
432 int i, ret;
433
0b5e43bc
KY
434 ctx->qps = sec_create_qps();
435 if (!ctx->qps) {
436 pr_err("Can not create sec qps!\n");
416d8220
ZX
437 return -ENODEV;
438 }
0b5e43bc
KY
439
440 sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm);
416d8220 441 ctx->sec = sec;
a718cfce 442 ctx->hlf_q_num = sec->ctx_q_num >> 1;
416d8220 443
74b58db8
LL
444 ctx->pbuf_supported = ctx->sec->iommu_used;
445
416d8220 446 /* Half of queue depth is taken as fake requests limit in the queue. */
a718cfce 447 ctx->fake_req_limit = QM_Q_DEPTH >> 1;
416d8220
ZX
448 ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
449 GFP_KERNEL);
450 if (!ctx->qp_ctx)
451 return -ENOMEM;
452
453 for (i = 0; i < sec->ctx_q_num; i++) {
473a0f96 454 ret = sec_create_qp_ctx(&sec->qm, ctx, i, 0);
416d8220
ZX
455 if (ret)
456 goto err_sec_release_qp_ctx;
457 }
458
416d8220 459 return 0;
416d8220
ZX
460err_sec_release_qp_ctx:
461 for (i = i - 1; i >= 0; i--)
462 sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
463
0b5e43bc 464 sec_destroy_qps(ctx->qps, sec->ctx_q_num);
416d8220
ZX
465 kfree(ctx->qp_ctx);
466 return ret;
467}
468
473a0f96 469static void sec_ctx_base_uninit(struct sec_ctx *ctx)
416d8220 470{
473a0f96 471 int i;
416d8220
ZX
472
473 for (i = 0; i < ctx->sec->ctx_q_num; i++)
474 sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
475
0b5e43bc 476 sec_destroy_qps(ctx->qps, ctx->sec->ctx_q_num);
416d8220
ZX
477 kfree(ctx->qp_ctx);
478}
479
473a0f96
ZX
480static int sec_cipher_init(struct sec_ctx *ctx)
481{
482 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
483
484 c_ctx->c_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
485 &c_ctx->c_key_dma, GFP_KERNEL);
486 if (!c_ctx->c_key)
487 return -ENOMEM;
488
489 return 0;
490}
491
492static void sec_cipher_uninit(struct sec_ctx *ctx)
493{
494 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
495
496 memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE);
497 dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
498 c_ctx->c_key, c_ctx->c_key_dma);
499}
500
2f072d75
ZX
501static int sec_auth_init(struct sec_ctx *ctx)
502{
503 struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
504
505 a_ctx->a_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
506 &a_ctx->a_key_dma, GFP_KERNEL);
507 if (!a_ctx->a_key)
508 return -ENOMEM;
509
510 return 0;
511}
512
513static void sec_auth_uninit(struct sec_ctx *ctx)
514{
515 struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
516
517 memzero_explicit(a_ctx->a_key, SEC_MAX_KEY_SIZE);
518 dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
519 a_ctx->a_key, a_ctx->a_key_dma);
520}
521
473a0f96
ZX
522static int sec_skcipher_init(struct crypto_skcipher *tfm)
523{
524 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
525 int ret;
526
2f072d75 527 ctx->alg_type = SEC_SKCIPHER;
473a0f96
ZX
528 crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
529 ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
530 if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
531 dev_err(SEC_CTX_DEV(ctx), "get error skcipher iv size!\n");
532 return -EINVAL;
533 }
534
535 ret = sec_ctx_base_init(ctx);
536 if (ret)
537 return ret;
538
539 ret = sec_cipher_init(ctx);
540 if (ret)
541 goto err_cipher_init;
542
543 return 0;
544err_cipher_init:
545 sec_ctx_base_uninit(ctx);
546
547 return ret;
548}
549
550static void sec_skcipher_uninit(struct crypto_skcipher *tfm)
551{
552 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
553
554 sec_cipher_uninit(ctx);
555 sec_ctx_base_uninit(ctx);
556}
557
416d8220
ZX
558static int sec_skcipher_3des_setkey(struct sec_cipher_ctx *c_ctx,
559 const u32 keylen,
560 const enum sec_cmode c_mode)
561{
562 switch (keylen) {
563 case SEC_DES3_2KEY_SIZE:
564 c_ctx->c_key_len = SEC_CKEY_3DES_2KEY;
565 break;
566 case SEC_DES3_3KEY_SIZE:
567 c_ctx->c_key_len = SEC_CKEY_3DES_3KEY;
568 break;
569 default:
570 return -EINVAL;
571 }
572
573 return 0;
574}
575
576static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx,
577 const u32 keylen,
578 const enum sec_cmode c_mode)
579{
580 if (c_mode == SEC_CMODE_XTS) {
581 switch (keylen) {
582 case SEC_XTS_MIN_KEY_SIZE:
583 c_ctx->c_key_len = SEC_CKEY_128BIT;
584 break;
585 case SEC_XTS_MAX_KEY_SIZE:
586 c_ctx->c_key_len = SEC_CKEY_256BIT;
587 break;
588 default:
589 pr_err("hisi_sec2: xts mode key error!\n");
590 return -EINVAL;
591 }
592 } else {
593 switch (keylen) {
594 case AES_KEYSIZE_128:
595 c_ctx->c_key_len = SEC_CKEY_128BIT;
596 break;
597 case AES_KEYSIZE_192:
598 c_ctx->c_key_len = SEC_CKEY_192BIT;
599 break;
600 case AES_KEYSIZE_256:
601 c_ctx->c_key_len = SEC_CKEY_256BIT;
602 break;
603 default:
604 pr_err("hisi_sec2: aes key error!\n");
605 return -EINVAL;
606 }
607 }
608
609 return 0;
610}
611
612static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
613 const u32 keylen, const enum sec_calg c_alg,
614 const enum sec_cmode c_mode)
615{
616 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
617 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
618 int ret;
619
620 if (c_mode == SEC_CMODE_XTS) {
621 ret = xts_verify_key(tfm, key, keylen);
622 if (ret) {
623 dev_err(SEC_CTX_DEV(ctx), "xts mode key err!\n");
624 return ret;
625 }
626 }
627
628 c_ctx->c_alg = c_alg;
629 c_ctx->c_mode = c_mode;
630
631 switch (c_alg) {
632 case SEC_CALG_3DES:
633 ret = sec_skcipher_3des_setkey(c_ctx, keylen, c_mode);
634 break;
635 case SEC_CALG_AES:
636 case SEC_CALG_SM4:
637 ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);
638 break;
639 default:
640 return -EINVAL;
641 }
642
643 if (ret) {
644 dev_err(SEC_CTX_DEV(ctx), "set sec key err!\n");
645 return ret;
646 }
647
648 memcpy(c_ctx->c_key, key, keylen);
649
650 return 0;
651}
652
653#define GEN_SEC_SETKEY_FUNC(name, c_alg, c_mode) \
654static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\
655 u32 keylen) \
656{ \
657 return sec_skcipher_setkey(tfm, key, keylen, c_alg, c_mode); \
658}
659
660GEN_SEC_SETKEY_FUNC(aes_ecb, SEC_CALG_AES, SEC_CMODE_ECB)
661GEN_SEC_SETKEY_FUNC(aes_cbc, SEC_CALG_AES, SEC_CMODE_CBC)
662GEN_SEC_SETKEY_FUNC(aes_xts, SEC_CALG_AES, SEC_CMODE_XTS)
663
664GEN_SEC_SETKEY_FUNC(3des_ecb, SEC_CALG_3DES, SEC_CMODE_ECB)
665GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC)
666
667GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS)
668GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC)
669
74b58db8
LL
670static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
671 struct scatterlist *src)
672{
673 struct aead_request *aead_req = req->aead_req.aead_req;
674 struct sec_cipher_req *c_req = &req->c_req;
675 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
676 struct device *dev = SEC_CTX_DEV(ctx);
677 int copy_size, pbuf_length;
678 int req_id = req->req_id;
679
680 if (ctx->alg_type == SEC_AEAD)
681 copy_size = aead_req->cryptlen + aead_req->assoclen;
682 else
683 copy_size = c_req->c_len;
684
685 pbuf_length = sg_copy_to_buffer(src, sg_nents(src),
686 qp_ctx->res[req_id].pbuf,
687 copy_size);
688
689 if (unlikely(pbuf_length != copy_size)) {
690 dev_err(dev, "copy src data to pbuf error!\n");
691 return -EINVAL;
692 }
693
694 c_req->c_in_dma = qp_ctx->res[req_id].pbuf_dma;
695
696 if (!c_req->c_in_dma) {
697 dev_err(dev, "fail to set pbuffer address!\n");
698 return -ENOMEM;
699 }
700
701 c_req->c_out_dma = c_req->c_in_dma;
702
703 return 0;
704}
705
706static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
707 struct scatterlist *dst)
708{
709 struct aead_request *aead_req = req->aead_req.aead_req;
710 struct sec_cipher_req *c_req = &req->c_req;
711 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
712 struct device *dev = SEC_CTX_DEV(ctx);
713 int copy_size, pbuf_length;
714 int req_id = req->req_id;
715
716 if (ctx->alg_type == SEC_AEAD)
717 copy_size = c_req->c_len + aead_req->assoclen;
718 else
719 copy_size = c_req->c_len;
720
721 pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst),
722 qp_ctx->res[req_id].pbuf,
723 copy_size);
724
725 if (unlikely(pbuf_length != copy_size))
726 dev_err(dev, "copy pbuf data to dst error!\n");
727
728}
729
2514f559 730static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
a181647c 731 struct scatterlist *src, struct scatterlist *dst)
416d8220
ZX
732{
733 struct sec_cipher_req *c_req = &req->c_req;
2514f559 734 struct sec_aead_req *a_req = &req->aead_req;
416d8220 735 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
2514f559
LL
736 struct sec_alg_res *res = &qp_ctx->res[req->req_id];
737 struct device *dev = SEC_CTX_DEV(ctx);
74b58db8
LL
738 int ret;
739
740 if (req->use_pbuf) {
741 ret = sec_cipher_pbuf_map(ctx, req, src);
742 c_req->c_ivin = res->pbuf + SEC_PBUF_IV_OFFSET;
743 c_req->c_ivin_dma = res->pbuf_dma + SEC_PBUF_IV_OFFSET;
744 if (ctx->alg_type == SEC_AEAD) {
745 a_req->out_mac = res->pbuf + SEC_PBUF_MAC_OFFSET;
746 a_req->out_mac_dma = res->pbuf_dma +
747 SEC_PBUF_MAC_OFFSET;
748 }
2514f559 749
74b58db8
LL
750 return ret;
751 }
2514f559
LL
752 c_req->c_ivin = res->c_ivin;
753 c_req->c_ivin_dma = res->c_ivin_dma;
754 if (ctx->alg_type == SEC_AEAD) {
755 a_req->out_mac = res->out_mac;
756 a_req->out_mac_dma = res->out_mac_dma;
757 }
416d8220
ZX
758
759 c_req->c_in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src,
760 qp_ctx->c_in_pool,
761 req->req_id,
762 &c_req->c_in_dma);
763
764 if (IS_ERR(c_req->c_in)) {
765 dev_err(dev, "fail to dma map input sgl buffers!\n");
766 return PTR_ERR(c_req->c_in);
767 }
768
769 if (dst == src) {
770 c_req->c_out = c_req->c_in;
771 c_req->c_out_dma = c_req->c_in_dma;
772 } else {
773 c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst,
774 qp_ctx->c_out_pool,
775 req->req_id,
776 &c_req->c_out_dma);
777
778 if (IS_ERR(c_req->c_out)) {
779 dev_err(dev, "fail to dma map output sgl buffers!\n");
780 hisi_acc_sg_buf_unmap(dev, src, c_req->c_in);
781 return PTR_ERR(c_req->c_out);
782 }
783 }
784
785 return 0;
786}
787
2514f559 788static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req,
a181647c
ZX
789 struct scatterlist *src, struct scatterlist *dst)
790{
2514f559
LL
791 struct sec_cipher_req *c_req = &req->c_req;
792 struct device *dev = SEC_CTX_DEV(ctx);
793
74b58db8
LL
794 if (req->use_pbuf) {
795 sec_cipher_pbuf_unmap(ctx, req, dst);
796 } else {
797 if (dst != src)
798 hisi_acc_sg_buf_unmap(dev, src, c_req->c_in);
a181647c 799
74b58db8
LL
800 hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out);
801 }
a181647c
ZX
802}
803
416d8220
ZX
804static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
805{
a181647c 806 struct skcipher_request *sq = req->c_req.sk_req;
416d8220 807
2514f559 808 return sec_cipher_map(ctx, req, sq->src, sq->dst);
416d8220
ZX
809}
810
811static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
812{
2514f559 813 struct skcipher_request *sq = req->c_req.sk_req;
416d8220 814
2514f559 815 sec_cipher_unmap(ctx, req, sq->src, sq->dst);
416d8220
ZX
816}
817
2f072d75
ZX
818static int sec_aead_aes_set_key(struct sec_cipher_ctx *c_ctx,
819 struct crypto_authenc_keys *keys)
820{
821 switch (keys->enckeylen) {
822 case AES_KEYSIZE_128:
823 c_ctx->c_key_len = SEC_CKEY_128BIT;
824 break;
825 case AES_KEYSIZE_192:
826 c_ctx->c_key_len = SEC_CKEY_192BIT;
827 break;
828 case AES_KEYSIZE_256:
829 c_ctx->c_key_len = SEC_CKEY_256BIT;
830 break;
831 default:
832 pr_err("hisi_sec2: aead aes key error!\n");
833 return -EINVAL;
834 }
835 memcpy(c_ctx->c_key, keys->enckey, keys->enckeylen);
836
837 return 0;
838}
839
840static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
841 struct crypto_authenc_keys *keys)
842{
843 struct crypto_shash *hash_tfm = ctx->hash_tfm;
2f072d75
ZX
844 int blocksize, ret;
845
846 if (!keys->authkeylen) {
847 pr_err("hisi_sec2: aead auth key error!\n");
848 return -EINVAL;
849 }
850
851 blocksize = crypto_shash_blocksize(hash_tfm);
852 if (keys->authkeylen > blocksize) {
61c38e3a
EB
853 ret = crypto_shash_tfm_digest(hash_tfm, keys->authkey,
854 keys->authkeylen, ctx->a_key);
2f072d75 855 if (ret) {
2203d3f7 856 pr_err("hisi_sec2: aead auth digest error!\n");
2f072d75
ZX
857 return -EINVAL;
858 }
859 ctx->a_key_len = blocksize;
860 } else {
861 memcpy(ctx->a_key, keys->authkey, keys->authkeylen);
862 ctx->a_key_len = keys->authkeylen;
863 }
864
865 return 0;
866}
867
868static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
869 const u32 keylen, const enum sec_hash_alg a_alg,
870 const enum sec_calg c_alg,
871 const enum sec_mac_len mac_len,
872 const enum sec_cmode c_mode)
873{
874 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
875 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
876 struct crypto_authenc_keys keys;
877 int ret;
878
879 ctx->a_ctx.a_alg = a_alg;
880 ctx->c_ctx.c_alg = c_alg;
881 ctx->a_ctx.mac_len = mac_len;
882 c_ctx->c_mode = c_mode;
883
884 if (crypto_authenc_extractkeys(&keys, key, keylen))
885 goto bad_key;
886
887 ret = sec_aead_aes_set_key(c_ctx, &keys);
888 if (ret) {
889 dev_err(SEC_CTX_DEV(ctx), "set sec cipher key err!\n");
890 goto bad_key;
891 }
892
893 ret = sec_aead_auth_set_key(&ctx->a_ctx, &keys);
894 if (ret) {
895 dev_err(SEC_CTX_DEV(ctx), "set sec auth key err!\n");
896 goto bad_key;
897 }
898
899 return 0;
900bad_key:
901 memzero_explicit(&keys, sizeof(struct crypto_authenc_keys));
902
903 return -EINVAL;
904}
905
906
907#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, maclen, cmode) \
908static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, \
909 u32 keylen) \
910{ \
911 return sec_aead_setkey(tfm, key, keylen, aalg, calg, maclen, cmode);\
912}
913
914GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1,
915 SEC_CALG_AES, SEC_HMAC_SHA1_MAC, SEC_CMODE_CBC)
916GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256,
917 SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC)
918GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512,
919 SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC)
920
921static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
922{
923 struct aead_request *aq = req->aead_req.aead_req;
924
2514f559 925 return sec_cipher_map(ctx, req, aq->src, aq->dst);
2f072d75
ZX
926}
927
928static void sec_aead_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
929{
2f072d75
ZX
930 struct aead_request *aq = req->aead_req.aead_req;
931
2514f559 932 sec_cipher_unmap(ctx, req, aq->src, aq->dst);
2f072d75
ZX
933}
934
416d8220
ZX
935static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req)
936{
937 int ret;
938
939 ret = ctx->req_op->buf_map(ctx, req);
b9c8d897 940 if (unlikely(ret))
416d8220
ZX
941 return ret;
942
943 ctx->req_op->do_transfer(ctx, req);
944
945 ret = ctx->req_op->bd_fill(ctx, req);
b9c8d897 946 if (unlikely(ret))
416d8220
ZX
947 goto unmap_req_buf;
948
949 return ret;
950
951unmap_req_buf:
952 ctx->req_op->buf_unmap(ctx, req);
953
954 return ret;
955}
956
957static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req)
958{
959 ctx->req_op->buf_unmap(ctx, req);
960}
961
962static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
963{
964 struct skcipher_request *sk_req = req->c_req.sk_req;
2514f559 965 struct sec_cipher_req *c_req = &req->c_req;
416d8220 966
2514f559 967 memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize);
416d8220
ZX
968}
969
970static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
971{
972 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
973 struct sec_cipher_req *c_req = &req->c_req;
974 struct sec_sqe *sec_sqe = &req->sec_sqe;
416d8220
ZX
975 u8 scene, sa_type, da_type;
976 u8 bd_type, cipher;
7c7d902a 977 u8 de = 0;
416d8220
ZX
978
979 memset(sec_sqe, 0, sizeof(struct sec_sqe));
980
981 sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
2514f559 982 sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
416d8220
ZX
983 sec_sqe->type2.data_src_addr = cpu_to_le64(c_req->c_in_dma);
984 sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);
985
986 sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) <<
987 SEC_CMODE_OFFSET);
988 sec_sqe->type2.c_alg = c_ctx->c_alg;
989 sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_key_len) <<
990 SEC_CKEY_OFFSET);
991
992 bd_type = SEC_BD_TYPE2;
993 if (c_req->encrypt)
994 cipher = SEC_CIPHER_ENC << SEC_CIPHER_OFFSET;
995 else
996 cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET;
997 sec_sqe->type_cipher_auth = bd_type | cipher;
998
74b58db8
LL
999 if (req->use_pbuf)
1000 sa_type = SEC_PBUF << SEC_SRC_SGL_OFFSET;
1001 else
1002 sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET;
416d8220
ZX
1003 scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET;
1004 if (c_req->c_in_dma != c_req->c_out_dma)
1005 de = 0x1 << SEC_DE_OFFSET;
1006
1007 sec_sqe->sds_sa_type = (de | scene | sa_type);
1008
1009 /* Just set DST address type */
74b58db8
LL
1010 if (req->use_pbuf)
1011 da_type = SEC_PBUF << SEC_DST_SGL_OFFSET;
1012 else
1013 da_type = SEC_SGL << SEC_DST_SGL_OFFSET;
416d8220
ZX
1014 sec_sqe->sdm_addr_type |= da_type;
1015
1016 sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len);
1017 sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id);
1018
1019 return 0;
1020}
1021
2f072d75 1022static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
416d8220 1023{
2f072d75 1024 struct aead_request *aead_req = req->aead_req.aead_req;
416d8220
ZX
1025 struct skcipher_request *sk_req = req->c_req.sk_req;
1026 u32 iv_size = req->ctx->c_ctx.ivsize;
1027 struct scatterlist *sgl;
2f072d75 1028 unsigned int cryptlen;
416d8220 1029 size_t sz;
2f072d75 1030 u8 *iv;
416d8220
ZX
1031
1032 if (req->c_req.encrypt)
2f072d75 1033 sgl = alg_type == SEC_SKCIPHER ? sk_req->dst : aead_req->dst;
416d8220 1034 else
2f072d75
ZX
1035 sgl = alg_type == SEC_SKCIPHER ? sk_req->src : aead_req->src;
1036
1037 if (alg_type == SEC_SKCIPHER) {
1038 iv = sk_req->iv;
1039 cryptlen = sk_req->cryptlen;
1040 } else {
1041 iv = aead_req->iv;
1042 cryptlen = aead_req->cryptlen;
1043 }
416d8220 1044
2f072d75
ZX
1045 sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size,
1046 cryptlen - iv_size);
b9c8d897 1047 if (unlikely(sz != iv_size))
416d8220
ZX
1048 dev_err(SEC_CTX_DEV(req->ctx), "copy output iv error!\n");
1049}
1050
310ea0ac
ZX
1051static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
1052 int err)
416d8220
ZX
1053{
1054 struct skcipher_request *sk_req = req->c_req.sk_req;
1055 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
1056
1057 atomic_dec(&qp_ctx->pending_reqs);
1058 sec_free_req_id(req);
1059
1060 /* IV output at encrypto of CBC mode */
310ea0ac 1061 if (!err && ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt)
2f072d75 1062 sec_update_iv(req, SEC_SKCIPHER);
416d8220 1063
ca0d158d 1064 if (req->fake_busy)
416d8220
ZX
1065 sk_req->base.complete(&sk_req->base, -EINPROGRESS);
1066
310ea0ac 1067 sk_req->base.complete(&sk_req->base, err);
416d8220
ZX
1068}
1069
2f072d75
ZX
1070static void sec_aead_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
1071{
1072 struct aead_request *aead_req = req->aead_req.aead_req;
2514f559 1073 struct sec_cipher_req *c_req = &req->c_req;
2f072d75 1074
2514f559 1075 memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize);
2f072d75
ZX
1076}
1077
1078static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
1079 struct sec_req *req, struct sec_sqe *sec_sqe)
1080{
1081 struct sec_aead_req *a_req = &req->aead_req;
1082 struct sec_cipher_req *c_req = &req->c_req;
1083 struct aead_request *aq = a_req->aead_req;
1084
1085 sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma);
1086
1087 sec_sqe->type2.mac_key_alg =
1088 cpu_to_le32(ctx->mac_len / SEC_SQE_LEN_RATE);
1089
1090 sec_sqe->type2.mac_key_alg |=
1091 cpu_to_le32((u32)((ctx->a_key_len) /
1092 SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET);
1093
1094 sec_sqe->type2.mac_key_alg |=
1095 cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET);
1096
1097 sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE1 << SEC_AUTH_OFFSET;
1098
1099 if (dir)
1100 sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH;
1101 else
1102 sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER;
1103
1104 sec_sqe->type2.alen_ivllen = cpu_to_le32(c_req->c_len + aq->assoclen);
1105
1106 sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
1107
2514f559 1108 sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma);
2f072d75
ZX
1109}
1110
1111static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
1112{
1113 struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
1114 struct sec_sqe *sec_sqe = &req->sec_sqe;
1115 int ret;
1116
1117 ret = sec_skcipher_bd_fill(ctx, req);
1118 if (unlikely(ret)) {
1119 dev_err(SEC_CTX_DEV(ctx), "skcipher bd fill is error!\n");
1120 return ret;
1121 }
1122
1123 sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe);
1124
1125 return 0;
1126}
1127
1128static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
1129{
1130 struct aead_request *a_req = req->aead_req.aead_req;
1131 struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
2514f559 1132 struct sec_aead_req *aead_req = &req->aead_req;
2f072d75
ZX
1133 struct sec_cipher_req *c_req = &req->c_req;
1134 size_t authsize = crypto_aead_authsize(tfm);
1135 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
1136 size_t sz;
1137
1138 atomic_dec(&qp_ctx->pending_reqs);
1139
1140 if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt)
1141 sec_update_iv(req, SEC_AEAD);
1142
1143 /* Copy output mac */
1144 if (!err && c_req->encrypt) {
1145 struct scatterlist *sgl = a_req->dst;
1146
1147 sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl),
2514f559 1148 aead_req->out_mac,
2f072d75
ZX
1149 authsize, a_req->cryptlen +
1150 a_req->assoclen);
1151
1152 if (unlikely(sz != authsize)) {
1153 dev_err(SEC_CTX_DEV(req->ctx), "copy out mac err!\n");
1154 err = -EINVAL;
1155 }
1156 }
1157
1158 sec_free_req_id(req);
1159
1160 if (req->fake_busy)
1161 a_req->base.complete(&a_req->base, -EINPROGRESS);
1162
1163 a_req->base.complete(&a_req->base, err);
1164}
1165
416d8220
ZX
1166static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
1167{
1168 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
1169
1170 atomic_dec(&qp_ctx->pending_reqs);
1171 sec_free_req_id(req);
a181647c 1172 sec_free_queue_id(ctx, req);
416d8220
ZX
1173}
1174
1175static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
1176{
1177 struct sec_qp_ctx *qp_ctx;
7c7d902a 1178 int queue_id;
416d8220
ZX
1179
1180 /* To load balance */
a181647c
ZX
1181 queue_id = sec_alloc_queue_id(ctx, req);
1182 qp_ctx = &ctx->qp_ctx[queue_id];
416d8220
ZX
1183
1184 req->req_id = sec_alloc_req_id(req, qp_ctx);
b9c8d897 1185 if (unlikely(req->req_id < 0)) {
a181647c 1186 sec_free_queue_id(ctx, req);
416d8220
ZX
1187 return req->req_id;
1188 }
1189
1190 if (ctx->fake_req_limit <= atomic_inc_return(&qp_ctx->pending_reqs))
ca0d158d 1191 req->fake_busy = true;
416d8220 1192 else
ca0d158d 1193 req->fake_busy = false;
416d8220 1194
7c7d902a 1195 return 0;
416d8220
ZX
1196}
1197
1198static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
1199{
2514f559 1200 struct sec_cipher_req *c_req = &req->c_req;
416d8220
ZX
1201 int ret;
1202
1203 ret = sec_request_init(ctx, req);
b9c8d897 1204 if (unlikely(ret))
416d8220
ZX
1205 return ret;
1206
1207 ret = sec_request_transfer(ctx, req);
b9c8d897 1208 if (unlikely(ret))
416d8220
ZX
1209 goto err_uninit_req;
1210
1211 /* Output IV as decrypto */
1212 if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt)
2f072d75 1213 sec_update_iv(req, ctx->alg_type);
416d8220
ZX
1214
1215 ret = ctx->req_op->bd_send(ctx, req);
b9c8d897 1216 if (unlikely(ret != -EBUSY && ret != -EINPROGRESS)) {
a718cfce 1217 dev_err_ratelimited(SEC_CTX_DEV(ctx), "send sec request failed!\n");
416d8220
ZX
1218 goto err_send_req;
1219 }
1220
1221 return ret;
1222
1223err_send_req:
1224 /* As failing, restore the IV from user */
2f072d75
ZX
1225 if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) {
1226 if (ctx->alg_type == SEC_SKCIPHER)
2514f559 1227 memcpy(req->c_req.sk_req->iv, c_req->c_ivin,
2f072d75
ZX
1228 ctx->c_ctx.ivsize);
1229 else
2514f559 1230 memcpy(req->aead_req.aead_req->iv, c_req->c_ivin,
2f072d75
ZX
1231 ctx->c_ctx.ivsize);
1232 }
416d8220
ZX
1233
1234 sec_request_untransfer(ctx, req);
1235err_uninit_req:
1236 sec_request_uninit(ctx, req);
1237
1238 return ret;
1239}
1240
a181647c 1241static const struct sec_req_op sec_skcipher_req_ops = {
416d8220
ZX
1242 .buf_map = sec_skcipher_sgl_map,
1243 .buf_unmap = sec_skcipher_sgl_unmap,
1244 .do_transfer = sec_skcipher_copy_iv,
1245 .bd_fill = sec_skcipher_bd_fill,
1246 .bd_send = sec_bd_send,
1247 .callback = sec_skcipher_callback,
1248 .process = sec_process,
1249};
1250
2f072d75
ZX
1251static const struct sec_req_op sec_aead_req_ops = {
1252 .buf_map = sec_aead_sgl_map,
1253 .buf_unmap = sec_aead_sgl_unmap,
1254 .do_transfer = sec_aead_copy_iv,
1255 .bd_fill = sec_aead_bd_fill,
1256 .bd_send = sec_bd_send,
1257 .callback = sec_aead_callback,
1258 .process = sec_process,
1259};
1260
416d8220
ZX
1261static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm)
1262{
1263 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
1264
a181647c 1265 ctx->req_op = &sec_skcipher_req_ops;
416d8220
ZX
1266
1267 return sec_skcipher_init(tfm);
1268}
1269
1270static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm)
1271{
a181647c 1272 sec_skcipher_uninit(tfm);
416d8220
ZX
1273}
1274
2f072d75
ZX
1275static int sec_aead_init(struct crypto_aead *tfm)
1276{
1277 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1278 int ret;
1279
1280 crypto_aead_set_reqsize(tfm, sizeof(struct sec_req));
1281 ctx->alg_type = SEC_AEAD;
1282 ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm);
1283 if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
1284 dev_err(SEC_CTX_DEV(ctx), "get error aead iv size!\n");
1285 return -EINVAL;
1286 }
1287
1288 ctx->req_op = &sec_aead_req_ops;
1289 ret = sec_ctx_base_init(ctx);
1290 if (ret)
1291 return ret;
1292
1293 ret = sec_auth_init(ctx);
1294 if (ret)
1295 goto err_auth_init;
1296
1297 ret = sec_cipher_init(ctx);
1298 if (ret)
1299 goto err_cipher_init;
1300
1301 return ret;
1302
1303err_cipher_init:
1304 sec_auth_uninit(ctx);
1305err_auth_init:
1306 sec_ctx_base_uninit(ctx);
1307
1308 return ret;
1309}
1310
1311static void sec_aead_exit(struct crypto_aead *tfm)
1312{
1313 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1314
1315 sec_cipher_uninit(ctx);
1316 sec_auth_uninit(ctx);
1317 sec_ctx_base_uninit(ctx);
1318}
1319
1320static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
1321{
1322 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1323 struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
1324 int ret;
1325
1326 ret = sec_aead_init(tfm);
1327 if (ret) {
1328 pr_err("hisi_sec2: aead init error!\n");
1329 return ret;
1330 }
1331
1332 auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1333 if (IS_ERR(auth_ctx->hash_tfm)) {
1334 dev_err(SEC_CTX_DEV(ctx), "aead alloc shash error!\n");
1335 sec_aead_exit(tfm);
1336 return PTR_ERR(auth_ctx->hash_tfm);
1337 }
1338
1339 return 0;
1340}
1341
1342static void sec_aead_ctx_exit(struct crypto_aead *tfm)
1343{
1344 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1345
1346 crypto_free_shash(ctx->a_ctx.hash_tfm);
1347 sec_aead_exit(tfm);
1348}
1349
1350static int sec_aead_sha1_ctx_init(struct crypto_aead *tfm)
1351{
1352 return sec_aead_ctx_init(tfm, "sha1");
1353}
1354
1355static int sec_aead_sha256_ctx_init(struct crypto_aead *tfm)
1356{
1357 return sec_aead_ctx_init(tfm, "sha256");
1358}
1359
1360static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm)
1361{
1362 return sec_aead_ctx_init(tfm, "sha512");
1363}
1364
d6de2a59 1365static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
416d8220 1366{
d6de2a59 1367 struct skcipher_request *sk_req = sreq->c_req.sk_req;
416d8220 1368 struct device *dev = SEC_CTX_DEV(ctx);
d6de2a59 1369 u8 c_alg = ctx->c_ctx.c_alg;
416d8220 1370
b9c8d897 1371 if (unlikely(!sk_req->src || !sk_req->dst)) {
416d8220
ZX
1372 dev_err(dev, "skcipher input param error!\n");
1373 return -EINVAL;
1374 }
d6de2a59 1375 sreq->c_req.c_len = sk_req->cryptlen;
74b58db8
LL
1376
1377 if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ)
1378 sreq->use_pbuf = true;
1379 else
1380 sreq->use_pbuf = false;
1381
416d8220 1382 if (c_alg == SEC_CALG_3DES) {
b9c8d897 1383 if (unlikely(sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1))) {
416d8220
ZX
1384 dev_err(dev, "skcipher 3des input length error!\n");
1385 return -EINVAL;
1386 }
1387 return 0;
1388 } else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) {
b9c8d897 1389 if (unlikely(sk_req->cryptlen & (AES_BLOCK_SIZE - 1))) {
416d8220
ZX
1390 dev_err(dev, "skcipher aes input length error!\n");
1391 return -EINVAL;
1392 }
1393 return 0;
1394 }
1395
1396 dev_err(dev, "skcipher algorithm error!\n");
1397 return -EINVAL;
1398}
1399
1400static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
1401{
1402 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req);
1403 struct sec_req *req = skcipher_request_ctx(sk_req);
1404 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
1405 int ret;
1406
1407 if (!sk_req->cryptlen)
1408 return 0;
1409
416d8220
ZX
1410 req->c_req.sk_req = sk_req;
1411 req->c_req.encrypt = encrypt;
1412 req->ctx = ctx;
1413
d6de2a59
ZX
1414 ret = sec_skcipher_param_check(ctx, req);
1415 if (unlikely(ret))
1416 return -EINVAL;
1417
416d8220
ZX
1418 return ctx->req_op->process(ctx, req);
1419}
1420
1421static int sec_skcipher_encrypt(struct skcipher_request *sk_req)
1422{
1423 return sec_skcipher_crypto(sk_req, true);
1424}
1425
1426static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
1427{
1428 return sec_skcipher_crypto(sk_req, false);
1429}
1430
1431#define SEC_SKCIPHER_GEN_ALG(sec_cra_name, sec_set_key, sec_min_key_size, \
1432 sec_max_key_size, ctx_init, ctx_exit, blk_size, iv_size)\
1433{\
1434 .base = {\
1435 .cra_name = sec_cra_name,\
1436 .cra_driver_name = "hisi_sec_"sec_cra_name,\
1437 .cra_priority = SEC_PRIORITY,\
1438 .cra_flags = CRYPTO_ALG_ASYNC,\
1439 .cra_blocksize = blk_size,\
1440 .cra_ctxsize = sizeof(struct sec_ctx),\
1441 .cra_module = THIS_MODULE,\
1442 },\
1443 .init = ctx_init,\
1444 .exit = ctx_exit,\
1445 .setkey = sec_set_key,\
1446 .decrypt = sec_skcipher_decrypt,\
1447 .encrypt = sec_skcipher_encrypt,\
1448 .min_keysize = sec_min_key_size,\
1449 .max_keysize = sec_max_key_size,\
1450 .ivsize = iv_size,\
1451},
1452
1453#define SEC_SKCIPHER_ALG(name, key_func, min_key_size, \
1454 max_key_size, blk_size, iv_size) \
1455 SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \
1456 sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size)
1457
a181647c 1458static struct skcipher_alg sec_skciphers[] = {
416d8220
ZX
1459 SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb,
1460 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
1461 AES_BLOCK_SIZE, 0)
1462
1463 SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc,
1464 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
1465 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
1466
1467 SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts,
1468 SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MAX_KEY_SIZE,
1469 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
1470
1471 SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb,
1472 SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE,
1473 DES3_EDE_BLOCK_SIZE, 0)
1474
1475 SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc,
1476 SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE,
1477 DES3_EDE_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE)
1478
1479 SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts,
1480 SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE,
1481 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
1482
1483 SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc,
1484 AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
1485 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
1486};
1487
2f072d75
ZX
1488static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
1489{
1490 u8 c_alg = ctx->c_ctx.c_alg;
1491 struct aead_request *req = sreq->aead_req.aead_req;
1492 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1493 size_t authsize = crypto_aead_authsize(tfm);
1494
2514f559
LL
1495 if (unlikely(!req->src || !req->dst || !req->cryptlen ||
1496 req->assoclen > SEC_MAX_AAD_LEN)) {
2f072d75
ZX
1497 dev_err(SEC_CTX_DEV(ctx), "aead input param error!\n");
1498 return -EINVAL;
1499 }
1500
74b58db8
LL
1501 if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <=
1502 SEC_PBUF_SZ)
1503 sreq->use_pbuf = true;
1504 else
1505 sreq->use_pbuf = false;
1506
2f072d75
ZX
1507 /* Support AES only */
1508 if (unlikely(c_alg != SEC_CALG_AES)) {
1509 dev_err(SEC_CTX_DEV(ctx), "aead crypto alg error!\n");
1510 return -EINVAL;
1511
1512 }
1513 if (sreq->c_req.encrypt)
1514 sreq->c_req.c_len = req->cryptlen;
1515 else
1516 sreq->c_req.c_len = req->cryptlen - authsize;
1517
1518 if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
1519 dev_err(SEC_CTX_DEV(ctx), "aead crypto length error!\n");
1520 return -EINVAL;
1521 }
1522
1523 return 0;
1524}
1525
1526static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
1527{
1528 struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
1529 struct sec_req *req = aead_request_ctx(a_req);
1530 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1531 int ret;
1532
1533 req->aead_req.aead_req = a_req;
1534 req->c_req.encrypt = encrypt;
1535 req->ctx = ctx;
1536
1537 ret = sec_aead_param_check(ctx, req);
1538 if (unlikely(ret))
1539 return -EINVAL;
1540
1541 return ctx->req_op->process(ctx, req);
1542}
1543
1544static int sec_aead_encrypt(struct aead_request *a_req)
1545{
1546 return sec_aead_crypto(a_req, true);
1547}
1548
1549static int sec_aead_decrypt(struct aead_request *a_req)
1550{
1551 return sec_aead_crypto(a_req, false);
1552}
1553
1554#define SEC_AEAD_GEN_ALG(sec_cra_name, sec_set_key, ctx_init,\
1555 ctx_exit, blk_size, iv_size, max_authsize)\
1556{\
1557 .base = {\
1558 .cra_name = sec_cra_name,\
1559 .cra_driver_name = "hisi_sec_"sec_cra_name,\
1560 .cra_priority = SEC_PRIORITY,\
1561 .cra_flags = CRYPTO_ALG_ASYNC,\
1562 .cra_blocksize = blk_size,\
1563 .cra_ctxsize = sizeof(struct sec_ctx),\
1564 .cra_module = THIS_MODULE,\
1565 },\
1566 .init = ctx_init,\
1567 .exit = ctx_exit,\
1568 .setkey = sec_set_key,\
1569 .decrypt = sec_aead_decrypt,\
1570 .encrypt = sec_aead_encrypt,\
1571 .ivsize = iv_size,\
1572 .maxauthsize = max_authsize,\
1573}
1574
1575#define SEC_AEAD_ALG(algname, keyfunc, aead_init, blksize, ivsize, authsize)\
1576 SEC_AEAD_GEN_ALG(algname, keyfunc, aead_init,\
1577 sec_aead_ctx_exit, blksize, ivsize, authsize)
1578
1579static struct aead_alg sec_aeads[] = {
1580 SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))",
1581 sec_setkey_aes_cbc_sha1, sec_aead_sha1_ctx_init,
1582 AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA1_DIGEST_SIZE),
1583
1584 SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))",
1585 sec_setkey_aes_cbc_sha256, sec_aead_sha256_ctx_init,
1586 AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA256_DIGEST_SIZE),
1587
1588 SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))",
1589 sec_setkey_aes_cbc_sha512, sec_aead_sha512_ctx_init,
1590 AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),
1591};
1592
416d8220
ZX
1593int sec_register_to_crypto(void)
1594{
1595 int ret = 0;
1596
1597 /* To avoid repeat register */
2f072d75 1598 if (atomic_add_return(1, &sec_active_devs) == 1) {
a181647c
ZX
1599 ret = crypto_register_skciphers(sec_skciphers,
1600 ARRAY_SIZE(sec_skciphers));
2f072d75
ZX
1601 if (ret)
1602 return ret;
1603
1604 ret = crypto_register_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
1605 if (ret)
1606 goto reg_aead_fail;
1607 }
1608
1609 return ret;
1610
1611reg_aead_fail:
1612 crypto_unregister_skciphers(sec_skciphers, ARRAY_SIZE(sec_skciphers));
416d8220
ZX
1613
1614 return ret;
1615}
1616
1617void sec_unregister_from_crypto(void)
1618{
2f072d75 1619 if (atomic_sub_return(1, &sec_active_devs) == 0) {
a181647c
ZX
1620 crypto_unregister_skciphers(sec_skciphers,
1621 ARRAY_SIZE(sec_skciphers));
2f072d75
ZX
1622 crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
1623 }
416d8220 1624}