]> git.ipfire.org Git - people/ms/linux.git/blame - drivers/crypto/hisilicon/hpre/hpre_crypto.c
crypto: hisilicon/hpre - don't use GFP_KERNEL to alloc mem during softirq
[people/ms/linux.git] / drivers / crypto / hisilicon / hpre / hpre_crypto.c
CommitLineData
c8b4b477
ZX
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2019 HiSilicon Limited. */
3#include <crypto/akcipher.h>
90274769 4#include <crypto/curve25519.h>
c8b4b477 5#include <crypto/dh.h>
05e7b906
MY
6#include <crypto/ecc_curve.h>
7#include <crypto/ecdh.h>
1e609f5f 8#include <crypto/rng.h>
c8b4b477
ZX
9#include <crypto/internal/akcipher.h>
10#include <crypto/internal/kpp.h>
11#include <crypto/internal/rsa.h>
12#include <crypto/kpp.h>
13#include <crypto/scatterwalk.h>
14#include <linux/dma-mapping.h>
15#include <linux/fips.h>
16#include <linux/module.h>
64a6301e 17#include <linux/time.h>
c8b4b477
ZX
18#include "hpre.h"
19
20struct hpre_ctx;
21
22#define HPRE_CRYPTO_ALG_PRI 1000
23#define HPRE_ALIGN_SZ 64
24#define HPRE_BITS_2_BYTES_SHIFT 3
25#define HPRE_RSA_512BITS_KSZ 64
26#define HPRE_RSA_1536BITS_KSZ 192
27#define HPRE_CRT_PRMS 5
28#define HPRE_CRT_Q 2
29#define HPRE_CRT_P 3
30#define HPRE_CRT_INV 4
31#define HPRE_DH_G_FLAG 0x02
32#define HPRE_TRY_SEND_TIMES 100
33#define HPRE_INVLD_REQ_ID (-1)
c8b4b477
ZX
34
35#define HPRE_SQE_ALG_BITS 5
36#define HPRE_SQE_DONE_SHIFT 30
37#define HPRE_DH_MAX_P_SZ 512
38
64a6301e
HT
39#define HPRE_DFX_SEC_TO_US 1000000
40#define HPRE_DFX_US_TO_NS 1000
41
1e609f5f
HT
42/* due to nist p521 */
43#define HPRE_ECC_MAX_KSZ 66
44
05e7b906
MY
45/* size in bytes of the n prime */
46#define HPRE_ECC_NIST_P192_N_SIZE 24
47#define HPRE_ECC_NIST_P256_N_SIZE 32
b981f799 48#define HPRE_ECC_NIST_P384_N_SIZE 48
05e7b906
MY
49
50/* size in bytes */
51#define HPRE_ECC_HW256_KSZ_B 32
b981f799 52#define HPRE_ECC_HW384_KSZ_B 48
05e7b906 53
c8b4b477
ZX
54typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe);
55
56struct hpre_rsa_ctx {
57 /* low address: e--->n */
58 char *pubkey;
59 dma_addr_t dma_pubkey;
60
61 /* low address: d--->n */
62 char *prikey;
63 dma_addr_t dma_prikey;
64
65 /* low address: dq->dp->q->p->qinv */
66 char *crt_prikey;
67 dma_addr_t dma_crt_prikey;
68
69 struct crypto_akcipher *soft_tfm;
70};
71
72struct hpre_dh_ctx {
73 /*
74 * If base is g we compute the public key
75 * ya = g^xa mod p; [RFC2631 sec 2.1.1]
76 * else if base if the counterpart public key we
77 * compute the shared secret
78 * ZZ = yb^xa mod p; [RFC2631 sec 2.1.1]
05e7b906 79 * low address: d--->n, please refer to Hisilicon HPRE UM
c8b4b477 80 */
05e7b906 81 char *xa_p;
c8b4b477
ZX
82 dma_addr_t dma_xa_p;
83
84 char *g; /* m */
85 dma_addr_t dma_g;
86};
87
05e7b906
MY
88struct hpre_ecdh_ctx {
89 /* low address: p->a->k->b */
90 unsigned char *p;
91 dma_addr_t dma_p;
92
93 /* low address: x->y */
94 unsigned char *g;
95 dma_addr_t dma_g;
96};
97
90274769
MY
98struct hpre_curve25519_ctx {
99 /* low address: p->a->k */
100 unsigned char *p;
101 dma_addr_t dma_p;
102
103 /* gx coordinate */
104 unsigned char *g;
105 dma_addr_t dma_g;
106};
107
c8b4b477
ZX
108struct hpre_ctx {
109 struct hisi_qp *qp;
b94c910a 110 struct device *dev;
c8b4b477 111 struct hpre_asym_request **req_list;
64a6301e 112 struct hpre *hpre;
c8b4b477
ZX
113 spinlock_t req_lock;
114 unsigned int key_sz;
115 bool crt_g2_mode;
116 struct idr req_idr;
117 union {
118 struct hpre_rsa_ctx rsa;
119 struct hpre_dh_ctx dh;
05e7b906 120 struct hpre_ecdh_ctx ecdh;
90274769 121 struct hpre_curve25519_ctx curve25519;
c8b4b477 122 };
05e7b906
MY
123 /* for ecc algorithms */
124 unsigned int curve_id;
c8b4b477
ZX
125};
126
127struct hpre_asym_request {
128 char *src;
129 char *dst;
130 struct hpre_sqe req;
131 struct hpre_ctx *ctx;
132 union {
133 struct akcipher_request *rsa;
134 struct kpp_request *dh;
05e7b906 135 struct kpp_request *ecdh;
90274769 136 struct kpp_request *curve25519;
c8b4b477
ZX
137 } areq;
138 int err;
139 int req_id;
140 hpre_cb cb;
64a6301e 141 struct timespec64 req_time;
c8b4b477
ZX
142};
143
c8b4b477
ZX
144static int hpre_alloc_req_id(struct hpre_ctx *ctx)
145{
146 unsigned long flags;
147 int id;
148
149 spin_lock_irqsave(&ctx->req_lock, flags);
150 id = idr_alloc(&ctx->req_idr, NULL, 0, QM_Q_DEPTH, GFP_ATOMIC);
151 spin_unlock_irqrestore(&ctx->req_lock, flags);
152
153 return id;
154}
155
156static void hpre_free_req_id(struct hpre_ctx *ctx, int req_id)
157{
158 unsigned long flags;
159
160 spin_lock_irqsave(&ctx->req_lock, flags);
161 idr_remove(&ctx->req_idr, req_id);
162 spin_unlock_irqrestore(&ctx->req_lock, flags);
163}
164
165static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
166{
167 struct hpre_ctx *ctx;
64a6301e 168 struct hpre_dfx *dfx;
c8b4b477
ZX
169 int id;
170
171 ctx = hpre_req->ctx;
172 id = hpre_alloc_req_id(ctx);
63fabc87 173 if (unlikely(id < 0))
c8b4b477
ZX
174 return -EINVAL;
175
176 ctx->req_list[id] = hpre_req;
177 hpre_req->req_id = id;
178
64a6301e
HT
179 dfx = ctx->hpre->debug.dfx;
180 if (atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value))
181 ktime_get_ts64(&hpre_req->req_time);
182
c8b4b477
ZX
183 return id;
184}
185
186static void hpre_rm_req_from_ctx(struct hpre_asym_request *hpre_req)
187{
188 struct hpre_ctx *ctx = hpre_req->ctx;
189 int id = hpre_req->req_id;
190
191 if (hpre_req->req_id >= 0) {
192 hpre_req->req_id = HPRE_INVLD_REQ_ID;
193 ctx->req_list[id] = NULL;
194 hpre_free_req_id(ctx, id);
195 }
196}
197
9b94ae72 198static struct hisi_qp *hpre_get_qp_and_start(u8 type)
c8b4b477
ZX
199{
200 struct hisi_qp *qp;
c8b4b477
ZX
201 int ret;
202
9b94ae72 203 qp = hpre_create_qp(type);
0f4146fa
HT
204 if (!qp) {
205 pr_err("Can not create hpre qp!\n");
c8b4b477
ZX
206 return ERR_PTR(-ENODEV);
207 }
208
209 ret = hisi_qm_start_qp(qp, 0);
210 if (ret < 0) {
0f4146fa
HT
211 hisi_qm_free_qps(&qp, 1);
212 pci_err(qp->qm->pdev, "Can not start qp!\n");
c8b4b477
ZX
213 return ERR_PTR(-EINVAL);
214 }
215
216 return qp;
217}
218
219static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,
02ab9946
ZX
220 struct scatterlist *data, unsigned int len,
221 int is_src, dma_addr_t *tmp)
c8b4b477 222{
b94c910a 223 struct device *dev = hpre_req->ctx->dev;
c8b4b477
ZX
224 enum dma_data_direction dma_dir;
225
226 if (is_src) {
227 hpre_req->src = NULL;
228 dma_dir = DMA_TO_DEVICE;
229 } else {
230 hpre_req->dst = NULL;
231 dma_dir = DMA_FROM_DEVICE;
232 }
937e8717 233 *tmp = dma_map_single(dev, sg_virt(data), len, dma_dir);
63fabc87 234 if (unlikely(dma_mapping_error(dev, *tmp))) {
c8b4b477
ZX
235 dev_err(dev, "dma map data err!\n");
236 return -ENOMEM;
237 }
238
239 return 0;
240}
241
242static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req,
02ab9946
ZX
243 struct scatterlist *data, unsigned int len,
244 int is_src, dma_addr_t *tmp)
c8b4b477
ZX
245{
246 struct hpre_ctx *ctx = hpre_req->ctx;
b94c910a 247 struct device *dev = ctx->dev;
c8b4b477
ZX
248 void *ptr;
249 int shift;
250
251 shift = ctx->key_sz - len;
63fabc87 252 if (unlikely(shift < 0))
c8b4b477
ZX
253 return -EINVAL;
254
98dfa934 255 ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_ATOMIC);
63fabc87 256 if (unlikely(!ptr))
c8b4b477
ZX
257 return -ENOMEM;
258
259 if (is_src) {
260 scatterwalk_map_and_copy(ptr + shift, data, 0, len, 0);
261 hpre_req->src = ptr;
262 } else {
263 hpre_req->dst = ptr;
264 }
265
266 return 0;
267}
268
269static int hpre_hw_data_init(struct hpre_asym_request *hpre_req,
02ab9946
ZX
270 struct scatterlist *data, unsigned int len,
271 int is_src, int is_dh)
c8b4b477
ZX
272{
273 struct hpre_sqe *msg = &hpre_req->req;
274 struct hpre_ctx *ctx = hpre_req->ctx;
02ab9946 275 dma_addr_t tmp = 0;
c8b4b477
ZX
276 int ret;
277
278 /* when the data is dh's source, we should format it */
279 if ((sg_is_last(data) && len == ctx->key_sz) &&
280 ((is_dh && !is_src) || !is_dh))
281 ret = hpre_get_data_dma_addr(hpre_req, data, len, is_src, &tmp);
282 else
937e8717
MY
283 ret = hpre_prepare_dma_buf(hpre_req, data, len, is_src, &tmp);
284
63fabc87 285 if (unlikely(ret))
c8b4b477
ZX
286 return ret;
287
288 if (is_src)
289 msg->in = cpu_to_le64(tmp);
290 else
291 msg->out = cpu_to_le64(tmp);
292
293 return 0;
294}
295
296static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
02ab9946
ZX
297 struct hpre_asym_request *req,
298 struct scatterlist *dst,
299 struct scatterlist *src)
c8b4b477 300{
b94c910a 301 struct device *dev = ctx->dev;
c8b4b477
ZX
302 struct hpre_sqe *sqe = &req->req;
303 dma_addr_t tmp;
304
305 tmp = le64_to_cpu(sqe->in);
0b0553b7
HT
306 if (unlikely(dma_mapping_error(dev, tmp)))
307 return;
c8b4b477
ZX
308
309 if (src) {
310 if (req->src)
937e8717 311 dma_free_coherent(dev, ctx->key_sz, req->src, tmp);
c8b4b477 312 else
937e8717 313 dma_unmap_single(dev, tmp, ctx->key_sz, DMA_TO_DEVICE);
c8b4b477
ZX
314 }
315
316 tmp = le64_to_cpu(sqe->out);
0b0553b7
HT
317 if (unlikely(dma_mapping_error(dev, tmp)))
318 return;
c8b4b477
ZX
319
320 if (req->dst) {
321 if (dst)
322 scatterwalk_map_and_copy(req->dst, dst, 0,
323 ctx->key_sz, 1);
324 dma_free_coherent(dev, ctx->key_sz, req->dst, tmp);
325 } else {
326 dma_unmap_single(dev, tmp, ctx->key_sz, DMA_FROM_DEVICE);
327 }
328}
329
330static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
02ab9946 331 void **kreq)
c8b4b477
ZX
332{
333 struct hpre_asym_request *req;
5bc3962c 334 unsigned int err, done, alg;
c4433247 335 int id;
c8b4b477
ZX
336
337#define HPRE_NO_HW_ERR 0
338#define HPRE_HW_TASK_DONE 3
82119db8
HT
339#define HREE_HW_ERR_MASK GENMASK(10, 0)
340#define HREE_SQE_DONE_MASK GENMASK(1, 0)
341#define HREE_ALG_TYPE_MASK GENMASK(4, 0)
c8b4b477
ZX
342 id = (int)le16_to_cpu(sqe->tag);
343 req = ctx->req_list[id];
344 hpre_rm_req_from_ctx(req);
345 *kreq = req;
346
347 err = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_ALG_BITS) &
348 HREE_HW_ERR_MASK;
349
350 done = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_DONE_SHIFT) &
351 HREE_SQE_DONE_MASK;
352
63fabc87 353 if (likely(err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE))
a2035904 354 return 0;
c8b4b477 355
5bc3962c 356 alg = le32_to_cpu(sqe->dw0) & HREE_ALG_TYPE_MASK;
b94c910a 357 dev_err_ratelimited(ctx->dev, "alg[0x%x] error: done[0x%x], etype[0x%x]\n",
5bc3962c
HT
358 alg, done, err);
359
c8b4b477
ZX
360 return -EINVAL;
361}
362
363static int hpre_ctx_set(struct hpre_ctx *ctx, struct hisi_qp *qp, int qlen)
364{
64a6301e
HT
365 struct hpre *hpre;
366
c8b4b477
ZX
367 if (!ctx || !qp || qlen < 0)
368 return -EINVAL;
369
370 spin_lock_init(&ctx->req_lock);
371 ctx->qp = qp;
b94c910a 372 ctx->dev = &qp->qm->pdev->dev;
c8b4b477 373
64a6301e
HT
374 hpre = container_of(ctx->qp->qm, struct hpre, qm);
375 ctx->hpre = hpre;
c8b4b477
ZX
376 ctx->req_list = kcalloc(qlen, sizeof(void *), GFP_KERNEL);
377 if (!ctx->req_list)
378 return -ENOMEM;
379 ctx->key_sz = 0;
380 ctx->crt_g2_mode = false;
381 idr_init(&ctx->req_idr);
382
383 return 0;
384}
385
386static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all)
387{
388 if (is_clear_all) {
389 idr_destroy(&ctx->req_idr);
390 kfree(ctx->req_list);
0f4146fa 391 hisi_qm_free_qps(&ctx->qp, 1);
c8b4b477
ZX
392 }
393
394 ctx->crt_g2_mode = false;
395 ctx->key_sz = 0;
396}
397
64a6301e
HT
398static bool hpre_is_bd_timeout(struct hpre_asym_request *req,
399 u64 overtime_thrhld)
400{
401 struct timespec64 reply_time;
402 u64 time_use_us;
403
404 ktime_get_ts64(&reply_time);
405 time_use_us = (reply_time.tv_sec - req->req_time.tv_sec) *
406 HPRE_DFX_SEC_TO_US +
407 (reply_time.tv_nsec - req->req_time.tv_nsec) /
408 HPRE_DFX_US_TO_NS;
409
410 if (time_use_us <= overtime_thrhld)
411 return false;
412
413 return true;
414}
415
c8b4b477
ZX
416static void hpre_dh_cb(struct hpre_ctx *ctx, void *resp)
417{
64a6301e 418 struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
c8b4b477
ZX
419 struct hpre_asym_request *req;
420 struct kpp_request *areq;
64a6301e 421 u64 overtime_thrhld;
c8b4b477
ZX
422 int ret;
423
424 ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
425 areq = req->areq.dh;
426 areq->dst_len = ctx->key_sz;
64a6301e
HT
427
428 overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
429 if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
430 atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
431
c8b4b477
ZX
432 hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
433 kpp_request_complete(areq, ret);
64a6301e 434 atomic64_inc(&dfx[HPRE_RECV_CNT].value);
c8b4b477
ZX
435}
436
437static void hpre_rsa_cb(struct hpre_ctx *ctx, void *resp)
438{
64a6301e 439 struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
c8b4b477
ZX
440 struct hpre_asym_request *req;
441 struct akcipher_request *areq;
64a6301e 442 u64 overtime_thrhld;
c8b4b477
ZX
443 int ret;
444
445 ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
64a6301e
HT
446
447 overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
448 if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
449 atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
450
c8b4b477
ZX
451 areq = req->areq.rsa;
452 areq->dst_len = ctx->key_sz;
453 hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
454 akcipher_request_complete(areq, ret);
64a6301e 455 atomic64_inc(&dfx[HPRE_RECV_CNT].value);
c8b4b477
ZX
456}
457
458static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
459{
460 struct hpre_ctx *ctx = qp->qp_ctx;
64a6301e 461 struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
c8b4b477 462 struct hpre_sqe *sqe = resp;
64a6301e 463 struct hpre_asym_request *req = ctx->req_list[le16_to_cpu(sqe->tag)];
c8b4b477 464
64a6301e
HT
465 if (unlikely(!req)) {
466 atomic64_inc(&dfx[HPRE_INVALID_REQ_CNT].value);
467 return;
468 }
469
470 req->cb(ctx, resp);
c8b4b477
ZX
471}
472
670fefb9
HT
473static void hpre_stop_qp_and_put(struct hisi_qp *qp)
474{
475 hisi_qm_stop_qp(qp);
476 hisi_qm_free_qps(&qp, 1);
477}
478
9b94ae72 479static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type)
c8b4b477
ZX
480{
481 struct hisi_qp *qp;
670fefb9 482 int ret;
c8b4b477 483
9b94ae72 484 qp = hpre_get_qp_and_start(type);
c8b4b477
ZX
485 if (IS_ERR(qp))
486 return PTR_ERR(qp);
487
488 qp->qp_ctx = ctx;
489 qp->req_cb = hpre_alg_cb;
490
670fefb9
HT
491 ret = hpre_ctx_set(ctx, qp, QM_Q_DEPTH);
492 if (ret)
493 hpre_stop_qp_and_put(qp);
494
495 return ret;
c8b4b477
ZX
496}
497
498static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
499{
500 struct hpre_asym_request *h_req;
501 struct hpre_sqe *msg;
502 int req_id;
503 void *tmp;
504
505 if (is_rsa) {
506 struct akcipher_request *akreq = req;
507
508 if (akreq->dst_len < ctx->key_sz) {
509 akreq->dst_len = ctx->key_sz;
510 return -EOVERFLOW;
511 }
512
513 tmp = akcipher_request_ctx(akreq);
514 h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
515 h_req->cb = hpre_rsa_cb;
516 h_req->areq.rsa = akreq;
517 msg = &h_req->req;
518 memset(msg, 0, sizeof(*msg));
519 } else {
520 struct kpp_request *kreq = req;
521
522 if (kreq->dst_len < ctx->key_sz) {
523 kreq->dst_len = ctx->key_sz;
524 return -EOVERFLOW;
525 }
526
527 tmp = kpp_request_ctx(kreq);
528 h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
529 h_req->cb = hpre_dh_cb;
530 h_req->areq.dh = kreq;
531 msg = &h_req->req;
532 memset(msg, 0, sizeof(*msg));
7747cffe 533 msg->key = cpu_to_le64(ctx->dh.dma_xa_p);
c8b4b477
ZX
534 }
535
0b0553b7
HT
536 msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
537 msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
c8b4b477
ZX
538 msg->dw0 |= cpu_to_le32(0x1 << HPRE_SQE_DONE_SHIFT);
539 msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
540 h_req->ctx = ctx;
541
542 req_id = hpre_add_req_to_ctx(h_req);
543 if (req_id < 0)
544 return -EBUSY;
545
546 msg->tag = cpu_to_le16((u16)req_id);
547
548 return 0;
549}
550
64a6301e
HT
551static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg)
552{
553 struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
554 int ctr = 0;
555 int ret;
556
557 do {
558 atomic64_inc(&dfx[HPRE_SEND_CNT].value);
559 ret = hisi_qp_send(ctx->qp, msg);
560 if (ret != -EBUSY)
561 break;
562 atomic64_inc(&dfx[HPRE_SEND_BUSY_CNT].value);
563 } while (ctr++ < HPRE_TRY_SEND_TIMES);
564
565 if (likely(!ret))
566 return ret;
567
568 if (ret != -EBUSY)
569 atomic64_inc(&dfx[HPRE_SEND_FAIL_CNT].value);
570
571 return ret;
572}
573
c8b4b477
ZX
574static int hpre_dh_compute_value(struct kpp_request *req)
575{
576 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
577 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
578 void *tmp = kpp_request_ctx(req);
579 struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
580 struct hpre_sqe *msg = &hpre_req->req;
c8b4b477
ZX
581 int ret;
582
c8b4b477 583 ret = hpre_msg_request_set(ctx, req, false);
63fabc87 584 if (unlikely(ret))
c8b4b477
ZX
585 return ret;
586
587 if (req->src) {
588 ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 1);
63fabc87 589 if (unlikely(ret))
c8b4b477 590 goto clear_all;
bfc1159e
MY
591 } else {
592 msg->in = cpu_to_le64(ctx->dh.dma_g);
c8b4b477
ZX
593 }
594
595 ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 1);
63fabc87 596 if (unlikely(ret))
c8b4b477
ZX
597 goto clear_all;
598
599 if (ctx->crt_g2_mode && !req->src)
02ab9946 600 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH_G2);
c8b4b477 601 else
02ab9946 602 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH);
c8b4b477
ZX
603
604 /* success */
64a6301e 605 ret = hpre_send(ctx, msg);
63fabc87 606 if (likely(!ret))
c8b4b477
ZX
607 return -EINPROGRESS;
608
609clear_all:
610 hpre_rm_req_from_ctx(hpre_req);
611 hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
612
613 return ret;
614}
615
616static int hpre_is_dh_params_length_valid(unsigned int key_sz)
617{
618#define _HPRE_DH_GRP1 768
619#define _HPRE_DH_GRP2 1024
620#define _HPRE_DH_GRP5 1536
621#define _HPRE_DH_GRP14 2048
622#define _HPRE_DH_GRP15 3072
623#define _HPRE_DH_GRP16 4096
624 switch (key_sz) {
625 case _HPRE_DH_GRP1:
626 case _HPRE_DH_GRP2:
627 case _HPRE_DH_GRP5:
628 case _HPRE_DH_GRP14:
629 case _HPRE_DH_GRP15:
630 case _HPRE_DH_GRP16:
631 return 0;
302e909c
HT
632 default:
633 return -EINVAL;
c8b4b477 634 }
c8b4b477
ZX
635}
636
637static int hpre_dh_set_params(struct hpre_ctx *ctx, struct dh *params)
638{
b94c910a 639 struct device *dev = ctx->dev;
c8b4b477
ZX
640 unsigned int sz;
641
642 if (params->p_size > HPRE_DH_MAX_P_SZ)
643 return -EINVAL;
644
645 if (hpre_is_dh_params_length_valid(params->p_size <<
02ab9946 646 HPRE_BITS_2_BYTES_SHIFT))
c8b4b477
ZX
647 return -EINVAL;
648
649 sz = ctx->key_sz = params->p_size;
650 ctx->dh.xa_p = dma_alloc_coherent(dev, sz << 1,
02ab9946 651 &ctx->dh.dma_xa_p, GFP_KERNEL);
c8b4b477
ZX
652 if (!ctx->dh.xa_p)
653 return -ENOMEM;
654
655 memcpy(ctx->dh.xa_p + sz, params->p, sz);
656
657 /* If g equals 2 don't copy it */
658 if (params->g_size == 1 && *(char *)params->g == HPRE_DH_G_FLAG) {
659 ctx->crt_g2_mode = true;
660 return 0;
661 }
662
663 ctx->dh.g = dma_alloc_coherent(dev, sz, &ctx->dh.dma_g, GFP_KERNEL);
664 if (!ctx->dh.g) {
665 dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
666 ctx->dh.dma_xa_p);
667 ctx->dh.xa_p = NULL;
668 return -ENOMEM;
669 }
670
671 memcpy(ctx->dh.g + (sz - params->g_size), params->g, params->g_size);
672
673 return 0;
674}
675
676static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
677{
b94c910a 678 struct device *dev = ctx->dev;
c8b4b477
ZX
679 unsigned int sz = ctx->key_sz;
680
681 if (is_clear_all)
682 hisi_qm_stop_qp(ctx->qp);
683
684 if (ctx->dh.g) {
c8b4b477
ZX
685 dma_free_coherent(dev, sz, ctx->dh.g, ctx->dh.dma_g);
686 ctx->dh.g = NULL;
687 }
688
689 if (ctx->dh.xa_p) {
02ab9946 690 memzero_explicit(ctx->dh.xa_p, sz);
c8b4b477
ZX
691 dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
692 ctx->dh.dma_xa_p);
693 ctx->dh.xa_p = NULL;
694 }
695
696 hpre_ctx_clear(ctx, is_clear_all);
697}
698
699static int hpre_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
700 unsigned int len)
701{
702 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
703 struct dh params;
704 int ret;
705
706 if (crypto_dh_decode_key(buf, len, &params) < 0)
707 return -EINVAL;
708
709 /* Free old secret if any */
710 hpre_dh_clear_ctx(ctx, false);
711
712 ret = hpre_dh_set_params(ctx, &params);
713 if (ret < 0)
714 goto err_clear_ctx;
715
716 memcpy(ctx->dh.xa_p + (ctx->key_sz - params.key_size), params.key,
717 params.key_size);
718
719 return 0;
720
721err_clear_ctx:
722 hpre_dh_clear_ctx(ctx, false);
723 return ret;
724}
725
726static unsigned int hpre_dh_max_size(struct crypto_kpp *tfm)
727{
728 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
729
730 return ctx->key_sz;
731}
732
733static int hpre_dh_init_tfm(struct crypto_kpp *tfm)
734{
735 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
736
9b94ae72 737 return hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
c8b4b477
ZX
738}
739
740static void hpre_dh_exit_tfm(struct crypto_kpp *tfm)
741{
742 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
743
744 hpre_dh_clear_ctx(ctx, true);
745}
c8b4b477
ZX
746
747static void hpre_rsa_drop_leading_zeros(const char **ptr, size_t *len)
748{
749 while (!**ptr && *len) {
750 (*ptr)++;
751 (*len)--;
752 }
753}
754
755static bool hpre_rsa_key_size_is_support(unsigned int len)
756{
757 unsigned int bits = len << HPRE_BITS_2_BYTES_SHIFT;
758
759#define _RSA_1024BITS_KEY_WDTH 1024
760#define _RSA_2048BITS_KEY_WDTH 2048
761#define _RSA_3072BITS_KEY_WDTH 3072
762#define _RSA_4096BITS_KEY_WDTH 4096
763
764 switch (bits) {
765 case _RSA_1024BITS_KEY_WDTH:
766 case _RSA_2048BITS_KEY_WDTH:
767 case _RSA_3072BITS_KEY_WDTH:
768 case _RSA_4096BITS_KEY_WDTH:
769 return true;
770 default:
771 return false;
772 }
773}
774
775static int hpre_rsa_enc(struct akcipher_request *req)
776{
777 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
778 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
779 void *tmp = akcipher_request_ctx(req);
780 struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
781 struct hpre_sqe *msg = &hpre_req->req;
c8b4b477
ZX
782 int ret;
783
c8b4b477
ZX
784 /* For 512 and 1536 bits key size, use soft tfm instead */
785 if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
786 ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
787 akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
788 ret = crypto_akcipher_encrypt(req);
789 akcipher_request_set_tfm(req, tfm);
790 return ret;
791 }
792
63fabc87 793 if (unlikely(!ctx->rsa.pubkey))
c8b4b477
ZX
794 return -EINVAL;
795
796 ret = hpre_msg_request_set(ctx, req, true);
63fabc87 797 if (unlikely(ret))
c8b4b477
ZX
798 return ret;
799
02ab9946 800 msg->dw0 |= cpu_to_le32(HPRE_ALG_NC_NCRT);
7747cffe 801 msg->key = cpu_to_le64(ctx->rsa.dma_pubkey);
c8b4b477
ZX
802
803 ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
63fabc87 804 if (unlikely(ret))
c8b4b477
ZX
805 goto clear_all;
806
807 ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
63fabc87 808 if (unlikely(ret))
c8b4b477
ZX
809 goto clear_all;
810
c8b4b477 811 /* success */
64a6301e 812 ret = hpre_send(ctx, msg);
63fabc87 813 if (likely(!ret))
c8b4b477
ZX
814 return -EINPROGRESS;
815
816clear_all:
817 hpre_rm_req_from_ctx(hpre_req);
818 hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
819
820 return ret;
821}
822
823static int hpre_rsa_dec(struct akcipher_request *req)
824{
825 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
826 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
827 void *tmp = akcipher_request_ctx(req);
828 struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
829 struct hpre_sqe *msg = &hpre_req->req;
c8b4b477
ZX
830 int ret;
831
c8b4b477
ZX
832 /* For 512 and 1536 bits key size, use soft tfm instead */
833 if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
834 ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
835 akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
836 ret = crypto_akcipher_decrypt(req);
837 akcipher_request_set_tfm(req, tfm);
838 return ret;
839 }
840
63fabc87 841 if (unlikely(!ctx->rsa.prikey))
c8b4b477
ZX
842 return -EINVAL;
843
844 ret = hpre_msg_request_set(ctx, req, true);
63fabc87 845 if (unlikely(ret))
c8b4b477
ZX
846 return ret;
847
848 if (ctx->crt_g2_mode) {
7747cffe 849 msg->key = cpu_to_le64(ctx->rsa.dma_crt_prikey);
02ab9946
ZX
850 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
851 HPRE_ALG_NC_CRT);
c8b4b477 852 } else {
7747cffe 853 msg->key = cpu_to_le64(ctx->rsa.dma_prikey);
02ab9946
ZX
854 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
855 HPRE_ALG_NC_NCRT);
c8b4b477
ZX
856 }
857
858 ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
63fabc87 859 if (unlikely(ret))
c8b4b477
ZX
860 goto clear_all;
861
862 ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
63fabc87 863 if (unlikely(ret))
c8b4b477
ZX
864 goto clear_all;
865
c8b4b477 866 /* success */
64a6301e 867 ret = hpre_send(ctx, msg);
63fabc87 868 if (likely(!ret))
c8b4b477
ZX
869 return -EINPROGRESS;
870
871clear_all:
872 hpre_rm_req_from_ctx(hpre_req);
873 hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
874
875 return ret;
876}
877
878static int hpre_rsa_set_n(struct hpre_ctx *ctx, const char *value,
879 size_t vlen, bool private)
880{
881 const char *ptr = value;
882
883 hpre_rsa_drop_leading_zeros(&ptr, &vlen);
884
885 ctx->key_sz = vlen;
886
887 /* if invalid key size provided, we use software tfm */
888 if (!hpre_rsa_key_size_is_support(ctx->key_sz))
889 return 0;
890
b94c910a 891 ctx->rsa.pubkey = dma_alloc_coherent(ctx->dev, vlen << 1,
c8b4b477
ZX
892 &ctx->rsa.dma_pubkey,
893 GFP_KERNEL);
894 if (!ctx->rsa.pubkey)
895 return -ENOMEM;
896
897 if (private) {
b94c910a 898 ctx->rsa.prikey = dma_alloc_coherent(ctx->dev, vlen << 1,
c8b4b477
ZX
899 &ctx->rsa.dma_prikey,
900 GFP_KERNEL);
901 if (!ctx->rsa.prikey) {
b94c910a 902 dma_free_coherent(ctx->dev, vlen << 1,
c8b4b477
ZX
903 ctx->rsa.pubkey,
904 ctx->rsa.dma_pubkey);
905 ctx->rsa.pubkey = NULL;
906 return -ENOMEM;
907 }
908 memcpy(ctx->rsa.prikey + vlen, ptr, vlen);
909 }
910 memcpy(ctx->rsa.pubkey + vlen, ptr, vlen);
911
912 /* Using hardware HPRE to do RSA */
913 return 1;
914}
915
916static int hpre_rsa_set_e(struct hpre_ctx *ctx, const char *value,
917 size_t vlen)
918{
919 const char *ptr = value;
920
921 hpre_rsa_drop_leading_zeros(&ptr, &vlen);
922
02ab9946 923 if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
c8b4b477 924 return -EINVAL;
c8b4b477
ZX
925
926 memcpy(ctx->rsa.pubkey + ctx->key_sz - vlen, ptr, vlen);
927
928 return 0;
929}
930
931static int hpre_rsa_set_d(struct hpre_ctx *ctx, const char *value,
932 size_t vlen)
933{
934 const char *ptr = value;
935
936 hpre_rsa_drop_leading_zeros(&ptr, &vlen);
937
938 if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
939 return -EINVAL;
940
941 memcpy(ctx->rsa.prikey + ctx->key_sz - vlen, ptr, vlen);
942
943 return 0;
944}
945
92f0726d
ZX
946static int hpre_crt_para_get(char *para, size_t para_sz,
947 const char *raw, size_t raw_sz)
c8b4b477
ZX
948{
949 const char *ptr = raw;
950 size_t len = raw_sz;
951
952 hpre_rsa_drop_leading_zeros(&ptr, &len);
92f0726d 953 if (!len || len > para_sz)
c8b4b477
ZX
954 return -EINVAL;
955
92f0726d 956 memcpy(para + para_sz - len, ptr, len);
c8b4b477
ZX
957
958 return 0;
959}
960
961static int hpre_rsa_setkey_crt(struct hpre_ctx *ctx, struct rsa_key *rsa_key)
962{
963 unsigned int hlf_ksz = ctx->key_sz >> 1;
b94c910a 964 struct device *dev = ctx->dev;
c8b4b477
ZX
965 u64 offset;
966 int ret;
967
968 ctx->rsa.crt_prikey = dma_alloc_coherent(dev, hlf_ksz * HPRE_CRT_PRMS,
969 &ctx->rsa.dma_crt_prikey,
970 GFP_KERNEL);
971 if (!ctx->rsa.crt_prikey)
972 return -ENOMEM;
973
92f0726d
ZX
974 ret = hpre_crt_para_get(ctx->rsa.crt_prikey, hlf_ksz,
975 rsa_key->dq, rsa_key->dq_sz);
c8b4b477
ZX
976 if (ret)
977 goto free_key;
978
979 offset = hlf_ksz;
92f0726d
ZX
980 ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
981 rsa_key->dp, rsa_key->dp_sz);
c8b4b477
ZX
982 if (ret)
983 goto free_key;
984
985 offset = hlf_ksz * HPRE_CRT_Q;
92f0726d
ZX
986 ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
987 rsa_key->q, rsa_key->q_sz);
c8b4b477
ZX
988 if (ret)
989 goto free_key;
990
991 offset = hlf_ksz * HPRE_CRT_P;
92f0726d
ZX
992 ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
993 rsa_key->p, rsa_key->p_sz);
c8b4b477
ZX
994 if (ret)
995 goto free_key;
996
997 offset = hlf_ksz * HPRE_CRT_INV;
92f0726d
ZX
998 ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
999 rsa_key->qinv, rsa_key->qinv_sz);
c8b4b477
ZX
1000 if (ret)
1001 goto free_key;
1002
1003 ctx->crt_g2_mode = true;
1004
1005 return 0;
1006
1007free_key:
1008 offset = hlf_ksz * HPRE_CRT_PRMS;
02ab9946 1009 memzero_explicit(ctx->rsa.crt_prikey, offset);
c8b4b477
ZX
1010 dma_free_coherent(dev, hlf_ksz * HPRE_CRT_PRMS, ctx->rsa.crt_prikey,
1011 ctx->rsa.dma_crt_prikey);
1012 ctx->rsa.crt_prikey = NULL;
1013 ctx->crt_g2_mode = false;
1014
1015 return ret;
1016}
1017
1018/* If it is clear all, all the resources of the QP will be cleaned. */
1019static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
1020{
1021 unsigned int half_key_sz = ctx->key_sz >> 1;
b94c910a 1022 struct device *dev = ctx->dev;
c8b4b477
ZX
1023
1024 if (is_clear_all)
1025 hisi_qm_stop_qp(ctx->qp);
1026
1027 if (ctx->rsa.pubkey) {
1028 dma_free_coherent(dev, ctx->key_sz << 1,
1029 ctx->rsa.pubkey, ctx->rsa.dma_pubkey);
1030 ctx->rsa.pubkey = NULL;
1031 }
1032
1033 if (ctx->rsa.crt_prikey) {
02ab9946
ZX
1034 memzero_explicit(ctx->rsa.crt_prikey,
1035 half_key_sz * HPRE_CRT_PRMS);
c8b4b477
ZX
1036 dma_free_coherent(dev, half_key_sz * HPRE_CRT_PRMS,
1037 ctx->rsa.crt_prikey, ctx->rsa.dma_crt_prikey);
1038 ctx->rsa.crt_prikey = NULL;
1039 }
1040
1041 if (ctx->rsa.prikey) {
02ab9946 1042 memzero_explicit(ctx->rsa.prikey, ctx->key_sz);
c8b4b477
ZX
1043 dma_free_coherent(dev, ctx->key_sz << 1, ctx->rsa.prikey,
1044 ctx->rsa.dma_prikey);
1045 ctx->rsa.prikey = NULL;
1046 }
1047
1048 hpre_ctx_clear(ctx, is_clear_all);
1049}
1050
1051/*
1052 * we should judge if it is CRT or not,
1053 * CRT: return true, N-CRT: return false .
1054 */
1055static bool hpre_is_crt_key(struct rsa_key *key)
1056{
1057 u16 len = key->p_sz + key->q_sz + key->dp_sz + key->dq_sz +
1058 key->qinv_sz;
1059
1060#define LEN_OF_NCRT_PARA 5
1061
1062 /* N-CRT less than 5 parameters */
1063 return len > LEN_OF_NCRT_PARA;
1064}
1065
1066static int hpre_rsa_setkey(struct hpre_ctx *ctx, const void *key,
1067 unsigned int keylen, bool private)
1068{
1069 struct rsa_key rsa_key;
1070 int ret;
1071
1072 hpre_rsa_clear_ctx(ctx, false);
1073
1074 if (private)
1075 ret = rsa_parse_priv_key(&rsa_key, key, keylen);
1076 else
1077 ret = rsa_parse_pub_key(&rsa_key, key, keylen);
1078 if (ret < 0)
1079 return ret;
1080
1081 ret = hpre_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz, private);
1082 if (ret <= 0)
1083 return ret;
1084
1085 if (private) {
1086 ret = hpre_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz);
1087 if (ret < 0)
1088 goto free;
1089
1090 if (hpre_is_crt_key(&rsa_key)) {
1091 ret = hpre_rsa_setkey_crt(ctx, &rsa_key);
1092 if (ret < 0)
1093 goto free;
1094 }
1095 }
1096
1097 ret = hpre_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz);
1098 if (ret < 0)
1099 goto free;
1100
1101 if ((private && !ctx->rsa.prikey) || !ctx->rsa.pubkey) {
1102 ret = -EINVAL;
1103 goto free;
1104 }
1105
1106 return 0;
1107
1108free:
1109 hpre_rsa_clear_ctx(ctx, false);
1110 return ret;
1111}
1112
1113static int hpre_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
1114 unsigned int keylen)
1115{
1116 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1117 int ret;
1118
1119 ret = crypto_akcipher_set_pub_key(ctx->rsa.soft_tfm, key, keylen);
1120 if (ret)
1121 return ret;
1122
1123 return hpre_rsa_setkey(ctx, key, keylen, false);
1124}
1125
1126static int hpre_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
1127 unsigned int keylen)
1128{
1129 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1130 int ret;
1131
1132 ret = crypto_akcipher_set_priv_key(ctx->rsa.soft_tfm, key, keylen);
1133 if (ret)
1134 return ret;
1135
1136 return hpre_rsa_setkey(ctx, key, keylen, true);
1137}
1138
1139static unsigned int hpre_rsa_max_size(struct crypto_akcipher *tfm)
1140{
1141 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1142
1143 /* For 512 and 1536 bits key size, use soft tfm instead */
1144 if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
1145 ctx->key_sz == HPRE_RSA_1536BITS_KSZ)
1146 return crypto_akcipher_maxsize(ctx->rsa.soft_tfm);
1147
1148 return ctx->key_sz;
1149}
1150
1151static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm)
1152{
1153 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
dfee9955 1154 int ret;
c8b4b477
ZX
1155
1156 ctx->rsa.soft_tfm = crypto_alloc_akcipher("rsa-generic", 0, 0);
1157 if (IS_ERR(ctx->rsa.soft_tfm)) {
1158 pr_err("Can not alloc_akcipher!\n");
1159 return PTR_ERR(ctx->rsa.soft_tfm);
1160 }
1161
9b94ae72 1162 ret = hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
dfee9955
ZX
1163 if (ret)
1164 crypto_free_akcipher(ctx->rsa.soft_tfm);
1165
1166 return ret;
c8b4b477
ZX
1167}
1168
1169static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm)
1170{
1171 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1172
1173 hpre_rsa_clear_ctx(ctx, true);
1174 crypto_free_akcipher(ctx->rsa.soft_tfm);
1175}
1176
05e7b906
MY
1177static void hpre_key_to_big_end(u8 *data, int len)
1178{
1179 int i, j;
05e7b906
MY
1180
1181 for (i = 0; i < len / 2; i++) {
1182 j = len - i - 1;
574c833e 1183 swap(data[j], data[i]);
05e7b906
MY
1184 }
1185}
1186
1187static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all,
1188 bool is_ecdh)
1189{
b94c910a 1190 struct device *dev = ctx->dev;
05e7b906
MY
1191 unsigned int sz = ctx->key_sz;
1192 unsigned int shift = sz << 1;
1193
1194 if (is_clear_all)
1195 hisi_qm_stop_qp(ctx->qp);
1196
1197 if (is_ecdh && ctx->ecdh.p) {
1198 /* ecdh: p->a->k->b */
1199 memzero_explicit(ctx->ecdh.p + shift, sz);
1200 dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);
1201 ctx->ecdh.p = NULL;
90274769
MY
1202 } else if (!is_ecdh && ctx->curve25519.p) {
1203 /* curve25519: p->a->k */
1204 memzero_explicit(ctx->curve25519.p + shift, sz);
1205 dma_free_coherent(dev, sz << 2, ctx->curve25519.p,
1206 ctx->curve25519.dma_p);
1207 ctx->curve25519.p = NULL;
05e7b906
MY
1208 }
1209
1210 hpre_ctx_clear(ctx, is_clear_all);
1211}
1212
b981f799
HT
1213/*
1214 * The bits of 192/224/256/384/521 are supported by HPRE,
1215 * and convert the bits like:
1216 * bits<=256, bits=256; 256<bits<=384, bits=384; 384<bits<=576, bits=576;
1217 * If the parameter bit width is insufficient, then we fill in the
1218 * high-order zeros by soft, so TASK_LENGTH1 is 0x3/0x5/0x8;
1219 */
05e7b906
MY
1220static unsigned int hpre_ecdh_supported_curve(unsigned short id)
1221{
1222 switch (id) {
1223 case ECC_CURVE_NIST_P192:
1224 case ECC_CURVE_NIST_P256:
1225 return HPRE_ECC_HW256_KSZ_B;
b981f799
HT
1226 case ECC_CURVE_NIST_P384:
1227 return HPRE_ECC_HW384_KSZ_B;
05e7b906
MY
1228 default:
1229 break;
1230 }
1231
1232 return 0;
1233}
1234
1235static void fill_curve_param(void *addr, u64 *param, unsigned int cur_sz, u8 ndigits)
1236{
1237 unsigned int sz = cur_sz - (ndigits - 1) * sizeof(u64);
1238 u8 i = 0;
1239
1240 while (i < ndigits - 1) {
1241 memcpy(addr + sizeof(u64) * i, &param[i], sizeof(u64));
1242 i++;
1243 }
1244
1245 memcpy(addr + sizeof(u64) * i, &param[ndigits - 1], sz);
1246 hpre_key_to_big_end((u8 *)addr, cur_sz);
1247}
1248
1249static int hpre_ecdh_fill_curve(struct hpre_ctx *ctx, struct ecdh *params,
1250 unsigned int cur_sz)
1251{
1252 unsigned int shifta = ctx->key_sz << 1;
1253 unsigned int shiftb = ctx->key_sz << 2;
1254 void *p = ctx->ecdh.p + ctx->key_sz - cur_sz;
1255 void *a = ctx->ecdh.p + shifta - cur_sz;
1256 void *b = ctx->ecdh.p + shiftb - cur_sz;
1257 void *x = ctx->ecdh.g + ctx->key_sz - cur_sz;
1258 void *y = ctx->ecdh.g + shifta - cur_sz;
1259 const struct ecc_curve *curve = ecc_get_curve(ctx->curve_id);
1260 char *n;
1261
1262 if (unlikely(!curve))
1263 return -EINVAL;
1264
1265 n = kzalloc(ctx->key_sz, GFP_KERNEL);
1266 if (!n)
1267 return -ENOMEM;
1268
1269 fill_curve_param(p, curve->p, cur_sz, curve->g.ndigits);
1270 fill_curve_param(a, curve->a, cur_sz, curve->g.ndigits);
1271 fill_curve_param(b, curve->b, cur_sz, curve->g.ndigits);
1272 fill_curve_param(x, curve->g.x, cur_sz, curve->g.ndigits);
1273 fill_curve_param(y, curve->g.y, cur_sz, curve->g.ndigits);
1274 fill_curve_param(n, curve->n, cur_sz, curve->g.ndigits);
1275
1276 if (params->key_size == cur_sz && memcmp(params->key, n, cur_sz) >= 0) {
1277 kfree(n);
1278 return -EINVAL;
1279 }
1280
1281 kfree(n);
1282 return 0;
1283}
1284
1285static unsigned int hpre_ecdh_get_curvesz(unsigned short id)
1286{
1287 switch (id) {
1288 case ECC_CURVE_NIST_P192:
1289 return HPRE_ECC_NIST_P192_N_SIZE;
1290 case ECC_CURVE_NIST_P256:
1291 return HPRE_ECC_NIST_P256_N_SIZE;
b981f799
HT
1292 case ECC_CURVE_NIST_P384:
1293 return HPRE_ECC_NIST_P384_N_SIZE;
05e7b906
MY
1294 default:
1295 break;
1296 }
1297
1298 return 0;
1299}
1300
1301static int hpre_ecdh_set_param(struct hpre_ctx *ctx, struct ecdh *params)
1302{
b94c910a 1303 struct device *dev = ctx->dev;
05e7b906
MY
1304 unsigned int sz, shift, curve_sz;
1305 int ret;
1306
1307 ctx->key_sz = hpre_ecdh_supported_curve(ctx->curve_id);
1308 if (!ctx->key_sz)
1309 return -EINVAL;
1310
1311 curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
1312 if (!curve_sz || params->key_size > curve_sz)
1313 return -EINVAL;
1314
1315 sz = ctx->key_sz;
1316
1317 if (!ctx->ecdh.p) {
1318 ctx->ecdh.p = dma_alloc_coherent(dev, sz << 3, &ctx->ecdh.dma_p,
1319 GFP_KERNEL);
1320 if (!ctx->ecdh.p)
1321 return -ENOMEM;
1322 }
1323
1324 shift = sz << 2;
1325 ctx->ecdh.g = ctx->ecdh.p + shift;
1326 ctx->ecdh.dma_g = ctx->ecdh.dma_p + shift;
1327
1328 ret = hpre_ecdh_fill_curve(ctx, params, curve_sz);
1329 if (ret) {
1330 dev_err(dev, "failed to fill curve_param, ret = %d!\n", ret);
1331 dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);
1332 ctx->ecdh.p = NULL;
1333 return ret;
1334 }
1335
1336 return 0;
1337}
1338
1339static bool hpre_key_is_zero(char *key, unsigned short key_sz)
1340{
1341 int i;
1342
1343 for (i = 0; i < key_sz; i++)
1344 if (key[i])
1345 return false;
1346
1347 return true;
1348}
1349
1e609f5f
HT
1350static int ecdh_gen_privkey(struct hpre_ctx *ctx, struct ecdh *params)
1351{
1352 struct device *dev = ctx->dev;
1353 int ret;
1354
1355 ret = crypto_get_default_rng();
1356 if (ret) {
1357 dev_err(dev, "failed to get default rng, ret = %d!\n", ret);
1358 return ret;
1359 }
1360
1361 ret = crypto_rng_get_bytes(crypto_default_rng, (u8 *)params->key,
1362 params->key_size);
1363 crypto_put_default_rng();
1364 if (ret)
1365 dev_err(dev, "failed to get rng, ret = %d!\n", ret);
1366
1367 return ret;
1368}
1369
05e7b906
MY
1370static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
1371 unsigned int len)
1372{
1373 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
b94c910a 1374 struct device *dev = ctx->dev;
1e609f5f 1375 char key[HPRE_ECC_MAX_KSZ];
05e7b906
MY
1376 unsigned int sz, sz_shift;
1377 struct ecdh params;
1378 int ret;
1379
1380 if (crypto_ecdh_decode_key(buf, len, &params) < 0) {
1381 dev_err(dev, "failed to decode ecdh key!\n");
1382 return -EINVAL;
1383 }
1384
1e609f5f
HT
1385 /* Use stdrng to generate private key */
1386 if (!params.key || !params.key_size) {
1387 params.key = key;
1388 params.key_size = hpre_ecdh_get_curvesz(ctx->curve_id);
1389 ret = ecdh_gen_privkey(ctx, &params);
1390 if (ret)
1391 return ret;
1392 }
1393
05e7b906
MY
1394 if (hpre_key_is_zero(params.key, params.key_size)) {
1395 dev_err(dev, "Invalid hpre key!\n");
1396 return -EINVAL;
1397 }
1398
1399 hpre_ecc_clear_ctx(ctx, false, true);
1400
1401 ret = hpre_ecdh_set_param(ctx, &params);
1402 if (ret < 0) {
1403 dev_err(dev, "failed to set hpre param, ret = %d!\n", ret);
1404 return ret;
1405 }
1406
1407 sz = ctx->key_sz;
1408 sz_shift = (sz << 1) + sz - params.key_size;
1409 memcpy(ctx->ecdh.p + sz_shift, params.key, params.key_size);
1410
1411 return 0;
1412}
1413
1414static void hpre_ecdh_hw_data_clr_all(struct hpre_ctx *ctx,
1415 struct hpre_asym_request *req,
1416 struct scatterlist *dst,
1417 struct scatterlist *src)
1418{
b94c910a 1419 struct device *dev = ctx->dev;
05e7b906
MY
1420 struct hpre_sqe *sqe = &req->req;
1421 dma_addr_t dma;
1422
1423 dma = le64_to_cpu(sqe->in);
0b0553b7
HT
1424 if (unlikely(dma_mapping_error(dev, dma)))
1425 return;
05e7b906
MY
1426
1427 if (src && req->src)
1428 dma_free_coherent(dev, ctx->key_sz << 2, req->src, dma);
1429
1430 dma = le64_to_cpu(sqe->out);
0b0553b7
HT
1431 if (unlikely(dma_mapping_error(dev, dma)))
1432 return;
05e7b906
MY
1433
1434 if (req->dst)
1435 dma_free_coherent(dev, ctx->key_sz << 1, req->dst, dma);
1436 if (dst)
1437 dma_unmap_single(dev, dma, ctx->key_sz << 1, DMA_FROM_DEVICE);
1438}
1439
1440static void hpre_ecdh_cb(struct hpre_ctx *ctx, void *resp)
1441{
1442 unsigned int curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
1443 struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
1444 struct hpre_asym_request *req = NULL;
1445 struct kpp_request *areq;
1446 u64 overtime_thrhld;
1447 char *p;
1448 int ret;
1449
1450 ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
1451 areq = req->areq.ecdh;
1452 areq->dst_len = ctx->key_sz << 1;
1453
1454 overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
1455 if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
1456 atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
1457
1458 p = sg_virt(areq->dst);
1459 memmove(p, p + ctx->key_sz - curve_sz, curve_sz);
1460 memmove(p + curve_sz, p + areq->dst_len - curve_sz, curve_sz);
1461
1462 hpre_ecdh_hw_data_clr_all(ctx, req, areq->dst, areq->src);
1463 kpp_request_complete(areq, ret);
1464
1465 atomic64_inc(&dfx[HPRE_RECV_CNT].value);
1466}
1467
1468static int hpre_ecdh_msg_request_set(struct hpre_ctx *ctx,
1469 struct kpp_request *req)
1470{
1471 struct hpre_asym_request *h_req;
1472 struct hpre_sqe *msg;
1473 int req_id;
1474 void *tmp;
1475
1476 if (req->dst_len < ctx->key_sz << 1) {
1477 req->dst_len = ctx->key_sz << 1;
1478 return -EINVAL;
1479 }
1480
1481 tmp = kpp_request_ctx(req);
1482 h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
1483 h_req->cb = hpre_ecdh_cb;
1484 h_req->areq.ecdh = req;
1485 msg = &h_req->req;
1486 memset(msg, 0, sizeof(*msg));
0b0553b7
HT
1487 msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
1488 msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
05e7b906
MY
1489 msg->key = cpu_to_le64(ctx->ecdh.dma_p);
1490
1491 msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
1492 msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
1493 h_req->ctx = ctx;
1494
1495 req_id = hpre_add_req_to_ctx(h_req);
1496 if (req_id < 0)
1497 return -EBUSY;
1498
1499 msg->tag = cpu_to_le16((u16)req_id);
1500 return 0;
1501}
1502
1503static int hpre_ecdh_src_data_init(struct hpre_asym_request *hpre_req,
1504 struct scatterlist *data, unsigned int len)
1505{
1506 struct hpre_sqe *msg = &hpre_req->req;
1507 struct hpre_ctx *ctx = hpre_req->ctx;
b94c910a 1508 struct device *dev = ctx->dev;
05e7b906
MY
1509 unsigned int tmpshift;
1510 dma_addr_t dma = 0;
1511 void *ptr;
1512 int shift;
1513
1514 /* Src_data include gx and gy. */
1515 shift = ctx->key_sz - (len >> 1);
1516 if (unlikely(shift < 0))
1517 return -EINVAL;
1518
1519 ptr = dma_alloc_coherent(dev, ctx->key_sz << 2, &dma, GFP_KERNEL);
1520 if (unlikely(!ptr))
1521 return -ENOMEM;
1522
1523 tmpshift = ctx->key_sz << 1;
1524 scatterwalk_map_and_copy(ptr + tmpshift, data, 0, len, 0);
1525 memcpy(ptr + shift, ptr + tmpshift, len >> 1);
1526 memcpy(ptr + ctx->key_sz + shift, ptr + tmpshift + (len >> 1), len >> 1);
1527
1528 hpre_req->src = ptr;
1529 msg->in = cpu_to_le64(dma);
1530 return 0;
1531}
1532
1533static int hpre_ecdh_dst_data_init(struct hpre_asym_request *hpre_req,
1534 struct scatterlist *data, unsigned int len)
1535{
1536 struct hpre_sqe *msg = &hpre_req->req;
1537 struct hpre_ctx *ctx = hpre_req->ctx;
b94c910a 1538 struct device *dev = ctx->dev;
58be5ce3 1539 dma_addr_t dma;
05e7b906
MY
1540
1541 if (unlikely(!data || !sg_is_last(data) || len != ctx->key_sz << 1)) {
1542 dev_err(dev, "data or data length is illegal!\n");
1543 return -EINVAL;
1544 }
1545
1546 hpre_req->dst = NULL;
1547 dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE);
1548 if (unlikely(dma_mapping_error(dev, dma))) {
1549 dev_err(dev, "dma map data err!\n");
1550 return -ENOMEM;
1551 }
1552
1553 msg->out = cpu_to_le64(dma);
1554 return 0;
1555}
1556
1557static int hpre_ecdh_compute_value(struct kpp_request *req)
1558{
1559 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
1560 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
b94c910a 1561 struct device *dev = ctx->dev;
05e7b906
MY
1562 void *tmp = kpp_request_ctx(req);
1563 struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
1564 struct hpre_sqe *msg = &hpre_req->req;
1565 int ret;
1566
1567 ret = hpre_ecdh_msg_request_set(ctx, req);
1568 if (unlikely(ret)) {
1569 dev_err(dev, "failed to set ecdh request, ret = %d!\n", ret);
1570 return ret;
1571 }
1572
1573 if (req->src) {
1574 ret = hpre_ecdh_src_data_init(hpre_req, req->src, req->src_len);
1575 if (unlikely(ret)) {
1576 dev_err(dev, "failed to init src data, ret = %d!\n", ret);
1577 goto clear_all;
1578 }
1579 } else {
1580 msg->in = cpu_to_le64(ctx->ecdh.dma_g);
1581 }
1582
1583 ret = hpre_ecdh_dst_data_init(hpre_req, req->dst, req->dst_len);
1584 if (unlikely(ret)) {
1585 dev_err(dev, "failed to init dst data, ret = %d!\n", ret);
1586 goto clear_all;
1587 }
1588
1589 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_ECC_MUL);
1590 ret = hpre_send(ctx, msg);
1591 if (likely(!ret))
1592 return -EINPROGRESS;
1593
1594clear_all:
1595 hpre_rm_req_from_ctx(hpre_req);
1596 hpre_ecdh_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
1597 return ret;
1598}
1599
1600static unsigned int hpre_ecdh_max_size(struct crypto_kpp *tfm)
1601{
1602 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1603
1604 /* max size is the pub_key_size, include x and y */
1605 return ctx->key_sz << 1;
1606}
1607
1608static int hpre_ecdh_nist_p192_init_tfm(struct crypto_kpp *tfm)
1609{
1610 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1611
1612 ctx->curve_id = ECC_CURVE_NIST_P192;
1613
1614 return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
1615}
1616
1617static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm)
1618{
1619 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1620
1621 ctx->curve_id = ECC_CURVE_NIST_P256;
1622
1623 return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
1624}
1625
b981f799
HT
1626static int hpre_ecdh_nist_p384_init_tfm(struct crypto_kpp *tfm)
1627{
1628 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1629
1630 ctx->curve_id = ECC_CURVE_NIST_P384;
1631
1632 return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
1633}
1634
05e7b906
MY
1635static void hpre_ecdh_exit_tfm(struct crypto_kpp *tfm)
1636{
1637 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1638
1639 hpre_ecc_clear_ctx(ctx, true, true);
1640}
1641
90274769
MY
1642static void hpre_curve25519_fill_curve(struct hpre_ctx *ctx, const void *buf,
1643 unsigned int len)
1644{
1645 u8 secret[CURVE25519_KEY_SIZE] = { 0 };
1646 unsigned int sz = ctx->key_sz;
1647 const struct ecc_curve *curve;
1648 unsigned int shift = sz << 1;
1649 void *p;
1650
1651 /*
1652 * The key from 'buf' is in little-endian, we should preprocess it as
1653 * the description in rfc7748: "k[0] &= 248, k[31] &= 127, k[31] |= 64",
1654 * then convert it to big endian. Only in this way, the result can be
1655 * the same as the software curve-25519 that exists in crypto.
1656 */
1657 memcpy(secret, buf, len);
1658 curve25519_clamp_secret(secret);
1659 hpre_key_to_big_end(secret, CURVE25519_KEY_SIZE);
1660
1661 p = ctx->curve25519.p + sz - len;
1662
1663 curve = ecc_get_curve25519();
1664
1665 /* fill curve parameters */
1666 fill_curve_param(p, curve->p, len, curve->g.ndigits);
1667 fill_curve_param(p + sz, curve->a, len, curve->g.ndigits);
1668 memcpy(p + shift, secret, len);
1669 fill_curve_param(p + shift + sz, curve->g.x, len, curve->g.ndigits);
1670 memzero_explicit(secret, CURVE25519_KEY_SIZE);
1671}
1672
1673static int hpre_curve25519_set_param(struct hpre_ctx *ctx, const void *buf,
1674 unsigned int len)
1675{
b94c910a 1676 struct device *dev = ctx->dev;
90274769
MY
1677 unsigned int sz = ctx->key_sz;
1678 unsigned int shift = sz << 1;
1679
1680 /* p->a->k->gx */
1681 if (!ctx->curve25519.p) {
1682 ctx->curve25519.p = dma_alloc_coherent(dev, sz << 2,
1683 &ctx->curve25519.dma_p,
1684 GFP_KERNEL);
1685 if (!ctx->curve25519.p)
1686 return -ENOMEM;
1687 }
1688
1689 ctx->curve25519.g = ctx->curve25519.p + shift + sz;
1690 ctx->curve25519.dma_g = ctx->curve25519.dma_p + shift + sz;
1691
1692 hpre_curve25519_fill_curve(ctx, buf, len);
1693
1694 return 0;
1695}
1696
1697static int hpre_curve25519_set_secret(struct crypto_kpp *tfm, const void *buf,
1698 unsigned int len)
1699{
1700 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
b94c910a 1701 struct device *dev = ctx->dev;
90274769
MY
1702 int ret = -EINVAL;
1703
1704 if (len != CURVE25519_KEY_SIZE ||
1705 !crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE)) {
1706 dev_err(dev, "key is null or key len is not 32bytes!\n");
1707 return ret;
1708 }
1709
1710 /* Free old secret if any */
1711 hpre_ecc_clear_ctx(ctx, false, false);
1712
1713 ctx->key_sz = CURVE25519_KEY_SIZE;
1714 ret = hpre_curve25519_set_param(ctx, buf, CURVE25519_KEY_SIZE);
1715 if (ret) {
1716 dev_err(dev, "failed to set curve25519 param, ret = %d!\n", ret);
1717 hpre_ecc_clear_ctx(ctx, false, false);
1718 return ret;
1719 }
1720
1721 return 0;
1722}
1723
1724static void hpre_curve25519_hw_data_clr_all(struct hpre_ctx *ctx,
1725 struct hpre_asym_request *req,
1726 struct scatterlist *dst,
1727 struct scatterlist *src)
1728{
b94c910a 1729 struct device *dev = ctx->dev;
90274769
MY
1730 struct hpre_sqe *sqe = &req->req;
1731 dma_addr_t dma;
1732
1733 dma = le64_to_cpu(sqe->in);
0b0553b7
HT
1734 if (unlikely(dma_mapping_error(dev, dma)))
1735 return;
90274769
MY
1736
1737 if (src && req->src)
1738 dma_free_coherent(dev, ctx->key_sz, req->src, dma);
1739
1740 dma = le64_to_cpu(sqe->out);
0b0553b7
HT
1741 if (unlikely(dma_mapping_error(dev, dma)))
1742 return;
90274769
MY
1743
1744 if (req->dst)
1745 dma_free_coherent(dev, ctx->key_sz, req->dst, dma);
1746 if (dst)
1747 dma_unmap_single(dev, dma, ctx->key_sz, DMA_FROM_DEVICE);
1748}
1749
1750static void hpre_curve25519_cb(struct hpre_ctx *ctx, void *resp)
1751{
1752 struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
1753 struct hpre_asym_request *req = NULL;
1754 struct kpp_request *areq;
1755 u64 overtime_thrhld;
1756 int ret;
1757
1758 ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
1759 areq = req->areq.curve25519;
1760 areq->dst_len = ctx->key_sz;
1761
1762 overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
1763 if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
1764 atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
1765
1766 hpre_key_to_big_end(sg_virt(areq->dst), CURVE25519_KEY_SIZE);
1767
1768 hpre_curve25519_hw_data_clr_all(ctx, req, areq->dst, areq->src);
1769 kpp_request_complete(areq, ret);
1770
1771 atomic64_inc(&dfx[HPRE_RECV_CNT].value);
1772}
1773
1774static int hpre_curve25519_msg_request_set(struct hpre_ctx *ctx,
1775 struct kpp_request *req)
1776{
1777 struct hpre_asym_request *h_req;
1778 struct hpre_sqe *msg;
1779 int req_id;
1780 void *tmp;
1781
1782 if (unlikely(req->dst_len < ctx->key_sz)) {
1783 req->dst_len = ctx->key_sz;
1784 return -EINVAL;
1785 }
1786
1787 tmp = kpp_request_ctx(req);
1788 h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
1789 h_req->cb = hpre_curve25519_cb;
1790 h_req->areq.curve25519 = req;
1791 msg = &h_req->req;
1792 memset(msg, 0, sizeof(*msg));
0b0553b7
HT
1793 msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
1794 msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
90274769
MY
1795 msg->key = cpu_to_le64(ctx->curve25519.dma_p);
1796
1797 msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
1798 msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
1799 h_req->ctx = ctx;
1800
1801 req_id = hpre_add_req_to_ctx(h_req);
1802 if (req_id < 0)
1803 return -EBUSY;
1804
1805 msg->tag = cpu_to_le16((u16)req_id);
1806 return 0;
1807}
1808
0ae86992
MY
1809static void hpre_curve25519_src_modulo_p(u8 *ptr)
1810{
1811 int i;
1812
1813 for (i = 0; i < CURVE25519_KEY_SIZE - 1; i++)
1814 ptr[i] = 0;
1815
1816 /* The modulus is ptr's last byte minus '0xed'(last byte of p) */
1817 ptr[i] -= 0xed;
1818}
1819
90274769
MY
1820static int hpre_curve25519_src_init(struct hpre_asym_request *hpre_req,
1821 struct scatterlist *data, unsigned int len)
1822{
1823 struct hpre_sqe *msg = &hpre_req->req;
1824 struct hpre_ctx *ctx = hpre_req->ctx;
b94c910a 1825 struct device *dev = ctx->dev;
90274769
MY
1826 u8 p[CURVE25519_KEY_SIZE] = { 0 };
1827 const struct ecc_curve *curve;
1828 dma_addr_t dma = 0;
1829 u8 *ptr;
1830
1831 if (len != CURVE25519_KEY_SIZE) {
1832 dev_err(dev, "sourc_data len is not 32bytes, len = %u!\n", len);
1833 return -EINVAL;
1834 }
1835
1836 ptr = dma_alloc_coherent(dev, ctx->key_sz, &dma, GFP_KERNEL);
1837 if (unlikely(!ptr))
1838 return -ENOMEM;
1839
1840 scatterwalk_map_and_copy(ptr, data, 0, len, 0);
1841
1842 if (!crypto_memneq(ptr, curve25519_null_point, CURVE25519_KEY_SIZE)) {
1843 dev_err(dev, "gx is null!\n");
1844 goto err;
1845 }
1846
1847 /*
1848 * Src_data(gx) is in little-endian order, MSB in the final byte should
fd4317b7 1849 * be masked as described in RFC7748, then transform it to big-endian
90274769
MY
1850 * form, then hisi_hpre can use the data.
1851 */
1852 ptr[31] &= 0x7f;
1853 hpre_key_to_big_end(ptr, CURVE25519_KEY_SIZE);
1854
1855 curve = ecc_get_curve25519();
1856
1857 fill_curve_param(p, curve->p, CURVE25519_KEY_SIZE, curve->g.ndigits);
0ae86992
MY
1858
1859 /*
1860 * When src_data equals (2^255 - 19) ~ (2^255 - 1), it is out of p,
1861 * we get its modulus to p, and then use it.
1862 */
9612581f
HT
1863 if (memcmp(ptr, p, ctx->key_sz) == 0) {
1864 dev_err(dev, "gx is p!\n");
51fa916b 1865 goto err;
9612581f 1866 } else if (memcmp(ptr, p, ctx->key_sz) > 0) {
0ae86992 1867 hpre_curve25519_src_modulo_p(ptr);
9612581f 1868 }
90274769
MY
1869
1870 hpre_req->src = ptr;
1871 msg->in = cpu_to_le64(dma);
1872 return 0;
1873
1874err:
1875 dma_free_coherent(dev, ctx->key_sz, ptr, dma);
1876 return -EINVAL;
1877}
1878
1879static int hpre_curve25519_dst_init(struct hpre_asym_request *hpre_req,
1880 struct scatterlist *data, unsigned int len)
1881{
1882 struct hpre_sqe *msg = &hpre_req->req;
1883 struct hpre_ctx *ctx = hpre_req->ctx;
b94c910a 1884 struct device *dev = ctx->dev;
58be5ce3 1885 dma_addr_t dma;
90274769
MY
1886
1887 if (!data || !sg_is_last(data) || len != ctx->key_sz) {
1888 dev_err(dev, "data or data length is illegal!\n");
1889 return -EINVAL;
1890 }
1891
1892 hpre_req->dst = NULL;
1893 dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE);
1894 if (unlikely(dma_mapping_error(dev, dma))) {
1895 dev_err(dev, "dma map data err!\n");
1896 return -ENOMEM;
1897 }
1898
1899 msg->out = cpu_to_le64(dma);
1900 return 0;
1901}
1902
1903static int hpre_curve25519_compute_value(struct kpp_request *req)
1904{
1905 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
1906 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
b94c910a 1907 struct device *dev = ctx->dev;
90274769
MY
1908 void *tmp = kpp_request_ctx(req);
1909 struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
1910 struct hpre_sqe *msg = &hpre_req->req;
1911 int ret;
1912
1913 ret = hpre_curve25519_msg_request_set(ctx, req);
1914 if (unlikely(ret)) {
1915 dev_err(dev, "failed to set curve25519 request, ret = %d!\n", ret);
1916 return ret;
1917 }
1918
1919 if (req->src) {
1920 ret = hpre_curve25519_src_init(hpre_req, req->src, req->src_len);
1921 if (unlikely(ret)) {
1922 dev_err(dev, "failed to init src data, ret = %d!\n",
1923 ret);
1924 goto clear_all;
1925 }
1926 } else {
1927 msg->in = cpu_to_le64(ctx->curve25519.dma_g);
1928 }
1929
1930 ret = hpre_curve25519_dst_init(hpre_req, req->dst, req->dst_len);
1931 if (unlikely(ret)) {
1932 dev_err(dev, "failed to init dst data, ret = %d!\n", ret);
1933 goto clear_all;
1934 }
1935
1936 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_CURVE25519_MUL);
1937 ret = hpre_send(ctx, msg);
1938 if (likely(!ret))
1939 return -EINPROGRESS;
1940
1941clear_all:
1942 hpre_rm_req_from_ctx(hpre_req);
1943 hpre_curve25519_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
1944 return ret;
1945}
1946
1947static unsigned int hpre_curve25519_max_size(struct crypto_kpp *tfm)
1948{
1949 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1950
1951 return ctx->key_sz;
1952}
1953
1954static int hpre_curve25519_init_tfm(struct crypto_kpp *tfm)
1955{
1956 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1957
1958 return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
1959}
1960
1961static void hpre_curve25519_exit_tfm(struct crypto_kpp *tfm)
1962{
1963 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1964
1965 hpre_ecc_clear_ctx(ctx, true, false);
1966}
1967
c8b4b477
ZX
1968static struct akcipher_alg rsa = {
1969 .sign = hpre_rsa_dec,
1970 .verify = hpre_rsa_enc,
1971 .encrypt = hpre_rsa_enc,
1972 .decrypt = hpre_rsa_dec,
1973 .set_pub_key = hpre_rsa_setpubkey,
1974 .set_priv_key = hpre_rsa_setprivkey,
1975 .max_size = hpre_rsa_max_size,
1976 .init = hpre_rsa_init_tfm,
1977 .exit = hpre_rsa_exit_tfm,
1978 .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
1979 .base = {
1980 .cra_ctxsize = sizeof(struct hpre_ctx),
1981 .cra_priority = HPRE_CRYPTO_ALG_PRI,
1982 .cra_name = "rsa",
1983 .cra_driver_name = "hpre-rsa",
1984 .cra_module = THIS_MODULE,
1985 },
1986};
1987
c8b4b477
ZX
1988static struct kpp_alg dh = {
1989 .set_secret = hpre_dh_set_secret,
1990 .generate_public_key = hpre_dh_compute_value,
1991 .compute_shared_secret = hpre_dh_compute_value,
1992 .max_size = hpre_dh_max_size,
1993 .init = hpre_dh_init_tfm,
1994 .exit = hpre_dh_exit_tfm,
1995 .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
1996 .base = {
1997 .cra_ctxsize = sizeof(struct hpre_ctx),
1998 .cra_priority = HPRE_CRYPTO_ALG_PRI,
1999 .cra_name = "dh",
2000 .cra_driver_name = "hpre-dh",
2001 .cra_module = THIS_MODULE,
2002 },
2003};
c8b4b477 2004
05e7b906
MY
2005static struct kpp_alg ecdh_nist_p192 = {
2006 .set_secret = hpre_ecdh_set_secret,
2007 .generate_public_key = hpre_ecdh_compute_value,
2008 .compute_shared_secret = hpre_ecdh_compute_value,
2009 .max_size = hpre_ecdh_max_size,
2010 .init = hpre_ecdh_nist_p192_init_tfm,
2011 .exit = hpre_ecdh_exit_tfm,
2012 .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
2013 .base = {
2014 .cra_ctxsize = sizeof(struct hpre_ctx),
2015 .cra_priority = HPRE_CRYPTO_ALG_PRI,
2016 .cra_name = "ecdh-nist-p192",
a2257620 2017 .cra_driver_name = "hpre-ecdh-nist-p192",
05e7b906
MY
2018 .cra_module = THIS_MODULE,
2019 },
2020};
2021
2022static struct kpp_alg ecdh_nist_p256 = {
2023 .set_secret = hpre_ecdh_set_secret,
2024 .generate_public_key = hpre_ecdh_compute_value,
2025 .compute_shared_secret = hpre_ecdh_compute_value,
2026 .max_size = hpre_ecdh_max_size,
2027 .init = hpre_ecdh_nist_p256_init_tfm,
2028 .exit = hpre_ecdh_exit_tfm,
2029 .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
2030 .base = {
2031 .cra_ctxsize = sizeof(struct hpre_ctx),
2032 .cra_priority = HPRE_CRYPTO_ALG_PRI,
2033 .cra_name = "ecdh-nist-p256",
a2257620 2034 .cra_driver_name = "hpre-ecdh-nist-p256",
05e7b906
MY
2035 .cra_module = THIS_MODULE,
2036 },
2037};
2038
b981f799
HT
2039static struct kpp_alg ecdh_nist_p384 = {
2040 .set_secret = hpre_ecdh_set_secret,
2041 .generate_public_key = hpre_ecdh_compute_value,
2042 .compute_shared_secret = hpre_ecdh_compute_value,
2043 .max_size = hpre_ecdh_max_size,
2044 .init = hpre_ecdh_nist_p384_init_tfm,
2045 .exit = hpre_ecdh_exit_tfm,
2046 .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
2047 .base = {
2048 .cra_ctxsize = sizeof(struct hpre_ctx),
2049 .cra_priority = HPRE_CRYPTO_ALG_PRI,
2050 .cra_name = "ecdh-nist-p384",
2051 .cra_driver_name = "hpre-ecdh-nist-p384",
2052 .cra_module = THIS_MODULE,
2053 },
2054};
2055
90274769
MY
2056static struct kpp_alg curve25519_alg = {
2057 .set_secret = hpre_curve25519_set_secret,
2058 .generate_public_key = hpre_curve25519_compute_value,
2059 .compute_shared_secret = hpre_curve25519_compute_value,
2060 .max_size = hpre_curve25519_max_size,
2061 .init = hpre_curve25519_init_tfm,
2062 .exit = hpre_curve25519_exit_tfm,
2063 .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
2064 .base = {
2065 .cra_ctxsize = sizeof(struct hpre_ctx),
2066 .cra_priority = HPRE_CRYPTO_ALG_PRI,
2067 .cra_name = "curve25519",
2068 .cra_driver_name = "hpre-curve25519",
2069 .cra_module = THIS_MODULE,
2070 },
2071};
2072
2073
05e7b906
MY
2074static int hpre_register_ecdh(void)
2075{
2076 int ret;
2077
2078 ret = crypto_register_kpp(&ecdh_nist_p192);
2079 if (ret)
2080 return ret;
2081
2082 ret = crypto_register_kpp(&ecdh_nist_p256);
b981f799
HT
2083 if (ret)
2084 goto unregister_ecdh_p192;
2085
2086 ret = crypto_register_kpp(&ecdh_nist_p384);
2087 if (ret)
2088 goto unregister_ecdh_p256;
05e7b906
MY
2089
2090 return 0;
b981f799
HT
2091
2092unregister_ecdh_p256:
2093 crypto_unregister_kpp(&ecdh_nist_p256);
2094unregister_ecdh_p192:
2095 crypto_unregister_kpp(&ecdh_nist_p192);
2096 return ret;
05e7b906
MY
2097}
2098
2099static void hpre_unregister_ecdh(void)
2100{
b981f799 2101 crypto_unregister_kpp(&ecdh_nist_p384);
05e7b906
MY
2102 crypto_unregister_kpp(&ecdh_nist_p256);
2103 crypto_unregister_kpp(&ecdh_nist_p192);
2104}
2105
8123455a 2106int hpre_algs_register(struct hisi_qm *qm)
c8b4b477 2107{
3d29e98d
YS
2108 int ret;
2109
2110 rsa.base.cra_flags = 0;
2111 ret = crypto_register_akcipher(&rsa);
2112 if (ret)
2113 return ret;
bbe6c4ba 2114
3d29e98d 2115 ret = crypto_register_kpp(&dh);
ed48466d
HT
2116 if (ret)
2117 goto unreg_rsa;
c8b4b477 2118
05e7b906
MY
2119 if (qm->ver >= QM_HW_V3) {
2120 ret = hpre_register_ecdh();
90274769 2121 if (ret)
ed48466d 2122 goto unreg_dh;
90274769 2123 ret = crypto_register_kpp(&curve25519_alg);
ed48466d
HT
2124 if (ret)
2125 goto unreg_ecdh;
05e7b906 2126 }
05e7b906 2127 return 0;
90274769 2128
ed48466d
HT
2129unreg_ecdh:
2130 hpre_unregister_ecdh();
2131unreg_dh:
90274769 2132 crypto_unregister_kpp(&dh);
ed48466d 2133unreg_rsa:
90274769
MY
2134 crypto_unregister_akcipher(&rsa);
2135 return ret;
c8b4b477
ZX
2136}
2137
8123455a 2138void hpre_algs_unregister(struct hisi_qm *qm)
c8b4b477 2139{
90274769
MY
2140 if (qm->ver >= QM_HW_V3) {
2141 crypto_unregister_kpp(&curve25519_alg);
05e7b906 2142 hpre_unregister_ecdh();
90274769 2143 }
05e7b906 2144
3d29e98d 2145 crypto_unregister_kpp(&dh);
05e7b906 2146 crypto_unregister_akcipher(&rsa);
c8b4b477 2147}