struct hpre_ctx {
struct hisi_qp *qp;
struct device *dev;
- struct hpre_asym_request **req_list;
struct hpre *hpre;
spinlock_t req_lock;
unsigned int key_sz;
bool crt_g2_mode;
- struct idr req_idr;
union {
struct hpre_rsa_ctx rsa;
struct hpre_dh_ctx dh;
struct kpp_request *ecdh;
} areq;
int err;
- int req_id;
hpre_cb cb;
struct timespec64 req_time;
};
return (hpre_align_sz() - 1) & ~(crypto_tfm_ctx_alignment() - 1);
}
-static int hpre_alloc_req_id(struct hpre_ctx *ctx)
+static void hpre_dfx_add_req_time(struct hpre_asym_request *hpre_req)
{
- unsigned long flags;
- int id;
-
- spin_lock_irqsave(&ctx->req_lock, flags);
- id = idr_alloc(&ctx->req_idr, NULL, 0, ctx->qp->sq_depth, GFP_ATOMIC);
- spin_unlock_irqrestore(&ctx->req_lock, flags);
-
- return id;
-}
-
-static void hpre_free_req_id(struct hpre_ctx *ctx, int req_id)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ctx->req_lock, flags);
- idr_remove(&ctx->req_idr, req_id);
- spin_unlock_irqrestore(&ctx->req_lock, flags);
-}
-
-static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
-{
- struct hpre_ctx *ctx;
- struct hpre_dfx *dfx;
- int id;
-
- ctx = hpre_req->ctx;
- id = hpre_alloc_req_id(ctx);
- if (unlikely(id < 0))
- return -EINVAL;
-
- ctx->req_list[id] = hpre_req;
- hpre_req->req_id = id;
+ struct hpre_ctx *ctx = hpre_req->ctx;
+ struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
- dfx = ctx->hpre->debug.dfx;
if (atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value))
ktime_get_ts64(&hpre_req->req_time);
-
- return id;
-}
-
-static void hpre_rm_req_from_ctx(struct hpre_asym_request *hpre_req)
-{
- struct hpre_ctx *ctx = hpre_req->ctx;
- int id = hpre_req->req_id;
-
- if (hpre_req->req_id >= 0) {
- hpre_req->req_id = HPRE_INVLD_REQ_ID;
- ctx->req_list[id] = NULL;
- hpre_free_req_id(ctx, id);
- }
}
static struct hisi_qp *hpre_get_qp_and_start(u8 type)
static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
void **kreq)
{
- struct hpre_asym_request *req;
unsigned int err, done, alg;
- int id;
#define HPRE_NO_HW_ERR 0
#define HPRE_HW_TASK_DONE 3
#define HREE_HW_ERR_MASK GENMASK(10, 0)
#define HREE_SQE_DONE_MASK GENMASK(1, 0)
#define HREE_ALG_TYPE_MASK GENMASK(4, 0)
- id = (int)le16_to_cpu(sqe->tag);
- req = ctx->req_list[id];
- hpre_rm_req_from_ctx(req);
- *kreq = req;
+ *kreq = (void *)le64_to_cpu(sqe->tag);
err = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_ALG_BITS) &
HREE_HW_ERR_MASK;
-
done = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_DONE_SHIFT) &
HREE_SQE_DONE_MASK;
-
if (likely(err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE))
return 0;
return -EINVAL;
}
-static int hpre_ctx_set(struct hpre_ctx *ctx, struct hisi_qp *qp, int qlen)
-{
- struct hpre *hpre;
-
- if (!ctx || !qp || qlen < 0)
- return -EINVAL;
-
- spin_lock_init(&ctx->req_lock);
- ctx->qp = qp;
- ctx->dev = &qp->qm->pdev->dev;
-
- hpre = container_of(ctx->qp->qm, struct hpre, qm);
- ctx->hpre = hpre;
- ctx->req_list = kcalloc(qlen, sizeof(void *), GFP_KERNEL);
- if (!ctx->req_list)
- return -ENOMEM;
- ctx->key_sz = 0;
- ctx->crt_g2_mode = false;
- idr_init(&ctx->req_idr);
-
- return 0;
-}
-
static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all)
{
if (is_clear_all) {
- idr_destroy(&ctx->req_idr);
- kfree(ctx->req_list);
hisi_qm_free_qps(&ctx->qp, 1);
}
static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
{
- struct hpre_ctx *ctx = qp->qp_ctx;
- struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
+ struct hpre_asym_request *h_req;
struct hpre_sqe *sqe = resp;
- struct hpre_asym_request *req = ctx->req_list[le16_to_cpu(sqe->tag)];
- if (unlikely(!req)) {
- atomic64_inc(&dfx[HPRE_INVALID_REQ_CNT].value);
+ h_req = (struct hpre_asym_request *)le64_to_cpu(sqe->tag);
+ if (unlikely(!h_req)) {
+ pr_err("Failed to get request, and qp_id is %u\n", qp->qp_id);
return;
}
- req->cb(ctx, resp);
-}
-
-static void hpre_stop_qp_and_put(struct hisi_qp *qp)
-{
- hisi_qm_stop_qp(qp);
- hisi_qm_free_qps(&qp, 1);
+ h_req->cb(h_req->ctx, resp);
}
static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type)
{
struct hisi_qp *qp;
- int ret;
+ struct hpre *hpre;
qp = hpre_get_qp_and_start(type);
if (IS_ERR(qp))
qp->qp_ctx = ctx;
qp->req_cb = hpre_alg_cb;
+ spin_lock_init(&ctx->req_lock);
+ ctx->qp = qp;
+ ctx->dev = &qp->qm->pdev->dev;
+ hpre = container_of(ctx->qp->qm, struct hpre, qm);
+ ctx->hpre = hpre;
+ ctx->key_sz = 0;
+ ctx->crt_g2_mode = false;
- ret = hpre_ctx_set(ctx, qp, qp->sq_depth);
- if (ret)
- hpre_stop_qp_and_put(qp);
-
- return ret;
+ return 0;
}
static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
{
struct hpre_asym_request *h_req;
struct hpre_sqe *msg;
- int req_id;
void *tmp;
if (is_rsa) {
msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
h_req->ctx = ctx;
- req_id = hpre_add_req_to_ctx(h_req);
- if (req_id < 0)
- return -EBUSY;
-
- msg->tag = cpu_to_le16((u16)req_id);
+ hpre_dfx_add_req_time(h_req);
+ msg->tag = cpu_to_le64((uintptr_t)h_req);
return 0;
}
return -EINPROGRESS;
clear_all:
- hpre_rm_req_from_ctx(hpre_req);
hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
return ret;
return -EINPROGRESS;
clear_all:
- hpre_rm_req_from_ctx(hpre_req);
hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
return ret;
return -EINPROGRESS;
clear_all:
- hpre_rm_req_from_ctx(hpre_req);
hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
return ret;
return 0;
}
-static bool hpre_key_is_zero(char *key, unsigned short key_sz)
+static bool hpre_key_is_zero(const char *key, unsigned short key_sz)
{
int i;
{
struct hpre_asym_request *h_req;
struct hpre_sqe *msg;
- int req_id;
void *tmp;
if (req->dst_len < ctx->key_sz << 1) {
msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
h_req->ctx = ctx;
- req_id = hpre_add_req_to_ctx(h_req);
- if (req_id < 0)
- return -EBUSY;
-
- msg->tag = cpu_to_le16((u16)req_id);
+ hpre_dfx_add_req_time(h_req);
+ msg->tag = cpu_to_le64((uintptr_t)h_req);
return 0;
}
return -EINPROGRESS;
clear_all:
- hpre_rm_req_from_ctx(hpre_req);
hpre_ecdh_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
return ret;
}