for (i = 0; i < qp_used; i++) {
pos = (i + cur_head) % sq_depth;
qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos));
+ qm_cq_head_update(qp);
atomic_dec(&qp->qp_status.used);
}
}
return -EBUSY;
memcpy(sqe, msg, qp->qm->sqe_size);
+ qp->msg[sq_tail] = msg;
qm_db(qp->qm, qp->qp_id, QM_DOORBELL_CMD_SQ, sq_tail_next, 0);
atomic_inc(&qp->qp_status.used);
static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num)
{
struct device *dev = &qm->pdev->dev;
- struct qm_dma *qdma;
+ struct hisi_qp *qp;
int i;
for (i = num - 1; i >= 0; i--) {
- qdma = &qm->qp_array[i].qdma;
- dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma);
+ qp = &qm->qp_array[i];
+ dma_free_coherent(dev, qp->qdma.size, qp->qdma.va, qp->qdma.dma);
+ kfree(qp->msg);
kfree(qm->poll_data[i].qp_finish_id);
}
return -ENOMEM;
qp = &qm->qp_array[id];
+ qp->msg = kmalloc_array(sq_depth, sizeof(void *), GFP_KERNEL);
+ if (!qp->msg)
+ goto err_free_qp_finish_id;
+
qp->qdma.va = dma_alloc_coherent(dev, dma_size, &qp->qdma.dma,
GFP_KERNEL);
if (!qp->qdma.va)
- goto err_free_qp_finish_id;
+ goto err_free_qp_msg;
qp->sqe = qp->qdma.va;
qp->sqe_dma = qp->qdma.dma;
qp->qm = qm;
qp->qp_id = id;
+ spin_lock_init(&qp->backlog.lock);
+ INIT_LIST_HEAD(&qp->backlog.list);
+
return 0;
+err_free_qp_msg:
+ kfree(qp->msg);
err_free_qp_finish_id:
kfree(qm->poll_data[id].qp_finish_id);
return ret;
#define SEC_AUTH_CIPHER_V3 0x40
#define SEC_FLAG_OFFSET 7
#define SEC_FLAG_MASK 0x0780
-#define SEC_TYPE_MASK 0x0F
#define SEC_DONE_MASK 0x0001
#define SEC_ICV_MASK 0x000E
spin_unlock_bh(&qp_ctx->id_lock);
}
-static u8 pre_parse_finished_bd(struct bd_status *status, void *resp)
+static void pre_parse_finished_bd(struct bd_status *status, void *resp)
{
struct sec_sqe *bd = resp;
SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
status->tag = le16_to_cpu(bd->type2.tag);
status->err_type = bd->type2.error_type;
-
- return bd->type_cipher_auth & SEC_TYPE_MASK;
}
-static u8 pre_parse_finished_bd3(struct bd_status *status, void *resp)
+static void pre_parse_finished_bd3(struct bd_status *status, void *resp)
{
struct sec_sqe3 *bd3 = resp;
SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
status->tag = le64_to_cpu(bd3->tag);
status->err_type = bd3->error_type;
-
- return le32_to_cpu(bd3->bd_param) & SEC_TYPE_MASK;
}
static int sec_cb_status_check(struct sec_req *req,
struct sec_req *req, *tmp;
int ret;
- list_for_each_entry_safe(req, tmp, &qp_ctx->backlog.list, list) {
+ list_for_each_entry_safe(req, tmp, &qp_ctx->qp->backlog.list, list) {
list_del(&req->list);
ctx->req_op->buf_unmap(ctx, req);
if (req->req_id >= 0)
static void sec_alg_send_backlog(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx)
{
+ struct hisi_qp *qp = qp_ctx->qp;
struct sec_req *req, *tmp;
int ret;
- spin_lock_bh(&qp_ctx->backlog.lock);
- list_for_each_entry_safe(req, tmp, &qp_ctx->backlog.list, list) {
+ spin_lock_bh(&qp->backlog.lock);
+ list_for_each_entry_safe(req, tmp, &qp->backlog.list, list) {
ret = qp_send_message(req);
switch (ret) {
case -EINPROGRESS:
}
unlock:
- spin_unlock_bh(&qp_ctx->backlog.lock);
+ spin_unlock_bh(&qp->backlog.lock);
}
static void sec_req_cb(struct hisi_qp *qp, void *resp)
{
- struct sec_qp_ctx *qp_ctx = qp->qp_ctx;
- struct sec_dfx *dfx = &qp_ctx->ctx->sec->debug.dfx;
- u8 type_supported = qp_ctx->ctx->type_supported;
+ const struct sec_sqe *sqe = qp->msg[qp->qp_status.cq_head];
+ struct sec_req *req = container_of(sqe, struct sec_req, sec_sqe);
+ struct sec_ctx *ctx = req->ctx;
+ struct sec_dfx *dfx = &ctx->sec->debug.dfx;
struct bd_status status;
- struct sec_ctx *ctx;
- struct sec_req *req;
int err;
- u8 type;
- if (type_supported == SEC_BD_TYPE2) {
- type = pre_parse_finished_bd(&status, resp);
- req = qp_ctx->req_list[status.tag];
- } else {
- type = pre_parse_finished_bd3(&status, resp);
- req = (void *)(uintptr_t)status.tag;
- }
+ pre_parse_finished_bd(&status, resp);
- if (unlikely(type != type_supported)) {
- atomic64_inc(&dfx->err_bd_cnt);
- pr_err("err bd type [%u]\n", type);
- return;
- }
+ req->err_type = status.err_type;
+ err = sec_cb_status_check(req, &status);
+ if (err)
+ atomic64_inc(&dfx->done_flag_cnt);
- if (unlikely(!req)) {
- atomic64_inc(&dfx->invalid_req_cnt);
- atomic_inc(&qp->qp_status.used);
- return;
- }
+ atomic64_inc(&dfx->recv_cnt);
+ ctx->req_op->buf_unmap(ctx, req);
+ ctx->req_op->callback(ctx, req, err);
+}
+
+static void sec_req_cb3(struct hisi_qp *qp, void *resp)
+{
+ struct bd_status status;
+ struct sec_ctx *ctx;
+ struct sec_dfx *dfx;
+ struct sec_req *req;
+ int err;
+
+ pre_parse_finished_bd3(&status, resp);
+
+ req = (void *)(uintptr_t)status.tag;
req->err_type = status.err_type;
ctx = req->ctx;
+ dfx = &ctx->sec->debug.dfx;
+
err = sec_cb_status_check(req, &status);
if (err)
atomic64_inc(&dfx->done_flag_cnt);
atomic64_inc(&dfx->recv_cnt);
ctx->req_op->buf_unmap(ctx, req);
-
ctx->req_op->callback(ctx, req, err);
}
static int sec_alg_try_enqueue(struct sec_req *req)
{
+ struct hisi_qp *qp = req->qp_ctx->qp;
+
/* Check if any request is already backlogged */
- if (!list_empty(&req->backlog->list))
+ if (!list_empty(&qp->backlog.list))
return -EBUSY;
/* Try to enqueue to HW ring */
static int sec_alg_send_message_maybacklog(struct sec_req *req)
{
+ struct hisi_qp *qp = req->qp_ctx->qp;
int ret;
ret = sec_alg_try_enqueue(req);
if (ret != -EBUSY)
return ret;
- spin_lock_bh(&req->backlog->lock);
+ spin_lock_bh(&qp->backlog.lock);
ret = sec_alg_try_enqueue(req);
if (ret == -EBUSY)
- list_add_tail(&req->list, &req->backlog->list);
- spin_unlock_bh(&req->backlog->lock);
+ list_add_tail(&req->list, &qp->backlog.list);
+ spin_unlock_bh(&qp->backlog.lock);
return ret;
}
qp_ctx->qp = qp;
qp_ctx->ctx = ctx;
- qp->req_cb = sec_req_cb;
+ if (ctx->type_supported == SEC_BD_TYPE3)
+ qp->req_cb = sec_req_cb3;
+ else
+ qp->req_cb = sec_req_cb;
spin_lock_init(&qp_ctx->req_lock);
idr_init(&qp_ctx->req_idr);
- spin_lock_init(&qp_ctx->backlog.lock);
spin_lock_init(&qp_ctx->id_lock);
- INIT_LIST_HEAD(&qp_ctx->backlog.list);
qp_ctx->send_head = 0;
ret = sec_alloc_qp_ctx_resource(ctx, qp_ctx);
} while (req->req_id < 0 && ++i < ctx->sec->ctx_q_num);
req->qp_ctx = qp_ctx;
- req->backlog = &qp_ctx->backlog;
return 0;
}