]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
crypto: hisilicon/qm - centralize the sending locks of each module into qm
authorChenghai Huang <huangchenghai2@huawei.com>
Thu, 18 Dec 2025 13:44:46 +0000 (21:44 +0800)
committerHerbert Xu <herbert@gondor.apana.org.au>
Fri, 16 Jan 2026 06:02:06 +0000 (14:02 +0800)
When a single queue used by multiple tfms, the protection of shared
resources by individual module driver programs is no longer
sufficient. The hisi_qp_send needs to be ensured by the lock in qp.

Fixes: 5fdb4b345cfb ("crypto: hisilicon - add a lock for the qp send operation")
Signed-off-by: Chenghai Huang <huangchenghai2@huawei.com>
Signed-off-by: Weili Qian <qianweili@huawei.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
drivers/crypto/hisilicon/hpre/hpre_crypto.c
drivers/crypto/hisilicon/qm.c
drivers/crypto/hisilicon/zip/zip_crypto.c
include/linux/hisi_acc_qm.h

index 4197281c8dff510e907e808208b87c6bb4bda324..220022ae7afb610a965fdf8f01828a32840c6516 100644 (file)
@@ -109,7 +109,6 @@ struct hpre_ctx {
        struct hisi_qp *qp;
        struct device *dev;
        struct hpre *hpre;
-       spinlock_t req_lock;
        unsigned int key_sz;
        bool crt_g2_mode;
        union {
@@ -410,7 +409,6 @@ static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type)
 
        qp->qp_ctx = ctx;
        qp->req_cb = hpre_alg_cb;
-       spin_lock_init(&ctx->req_lock);
        ctx->qp = qp;
        ctx->dev = &qp->qm->pdev->dev;
        hpre = container_of(ctx->qp->qm, struct hpre, qm);
@@ -478,9 +476,7 @@ static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg)
 
        do {
                atomic64_inc(&dfx[HPRE_SEND_CNT].value);
-               spin_lock_bh(&ctx->req_lock);
                ret = hisi_qp_send(ctx->qp, msg);
-               spin_unlock_bh(&ctx->req_lock);
                if (ret != -EBUSY)
                        break;
                atomic64_inc(&dfx[HPRE_SEND_BUSY_CNT].value);
index 5c80ca04a8d42dd6d50132bbfb7c3be77c462a47..0f5e39884e4a3808775b20f1bac1256a6394177e 100644 (file)
@@ -2369,26 +2369,33 @@ EXPORT_SYMBOL_GPL(hisi_qm_stop_qp);
 int hisi_qp_send(struct hisi_qp *qp, const void *msg)
 {
        struct hisi_qp_status *qp_status = &qp->qp_status;
-       u16 sq_tail = qp_status->sq_tail;
-       u16 sq_tail_next = (sq_tail + 1) % qp->sq_depth;
-       void *sqe = qm_get_avail_sqe(qp);
+       u16 sq_tail, sq_tail_next;
+       void *sqe;
 
+       spin_lock_bh(&qp->qp_lock);
        if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP ||
                     atomic_read(&qp->qm->status.flags) == QM_STOP ||
                     qp->is_resetting)) {
+               spin_unlock_bh(&qp->qp_lock);
                dev_info_ratelimited(&qp->qm->pdev->dev, "QP is stopped or resetting\n");
                return -EAGAIN;
        }
 
-       if (!sqe)
+       sqe = qm_get_avail_sqe(qp);
+       if (!sqe) {
+               spin_unlock_bh(&qp->qp_lock);
                return -EBUSY;
+       }
 
+       sq_tail = qp_status->sq_tail;
+       sq_tail_next = (sq_tail + 1) % qp->sq_depth;
        memcpy(sqe, msg, qp->qm->sqe_size);
        qp->msg[sq_tail] = msg;
 
        qm_db(qp->qm, qp->qp_id, QM_DOORBELL_CMD_SQ, sq_tail_next, 0);
        atomic_inc(&qp->qp_status.used);
        qp_status->sq_tail = sq_tail_next;
+       spin_unlock_bh(&qp->qp_lock);
 
        return 0;
 }
@@ -2968,6 +2975,7 @@ static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id,
        qp->qm = qm;
        qp->qp_id = id;
 
+       spin_lock_init(&qp->qp_lock);
        spin_lock_init(&qp->backlog.lock);
        INIT_LIST_HEAD(&qp->backlog.list);
 
index 8250a33ba58622ad6ca5205adbf28a0bc64fbc70..2f9035c016f3ffd49eb4646815dcfb34ac8980a5 100644 (file)
@@ -217,7 +217,6 @@ static int hisi_zip_do_work(struct hisi_zip_qp_ctx *qp_ctx,
 {
        struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool;
        struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
-       struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
        struct acomp_req *a_req = req->req;
        struct hisi_qp *qp = qp_ctx->qp;
        struct device *dev = &qp->qm->pdev->dev;
@@ -250,9 +249,7 @@ static int hisi_zip_do_work(struct hisi_zip_qp_ctx *qp_ctx,
 
        /* send command to start a task */
        atomic64_inc(&dfx->send_cnt);
-       spin_lock_bh(&req_q->req_lock);
        ret = hisi_qp_send(qp, &zip_sqe);
-       spin_unlock_bh(&req_q->req_lock);
        if (unlikely(ret < 0)) {
                atomic64_inc(&dfx->send_busy_cnt);
                ret = -EAGAIN;
index dd4323633d81e30ad528f0577871364b985ee86f..ef4d3a79bcb706f3719b1fc785ec2d22d8ed1b27 100644 (file)
@@ -476,6 +476,7 @@ struct hisi_qp {
        u16 pasid;
        struct uacce_queue *uacce_q;
 
+       spinlock_t qp_lock;
        struct instance_backlog backlog;
        const void **msg;
 };