]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
accel/qaic: Synchronize access to DBC request queue head & tail pointer
authorPranjal Ramajor Asha Kanojiya <quic_pkanojiy@quicinc.com>
Tue, 7 Oct 2025 06:18:37 +0000 (08:18 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 23 Oct 2025 14:20:36 +0000 (16:20 +0200)
[ Upstream commit 52e59f7740ba23bbb664914967df9a00208ca10c ]

Two threads of the same process can potential read and write parallelly to
head and tail pointers of the same DBC request queue. This could lead to a
race condition and corrupt the DBC request queue.

Fixes: ff13be830333 ("accel/qaic: Add datapath")
Signed-off-by: Pranjal Ramajor Asha Kanojiya <quic_pkanojiy@quicinc.com>
Signed-off-by: Youssef Samir <youssef.abdulrahman@oss.qualcomm.com>
Reviewed-by: Jeff Hugo <jeff.hugo@oss.qualcomm.com>
Reviewed-by: Carl Vanderlip <carl.vanderlip@oss.qualcomm.com>
[jhugo: Add fixes tag]
Signed-off-by: Jeff Hugo <jeff.hugo@oss.qualcomm.com>
Link: https://lore.kernel.org/r/20251007061837.206132-1-youssef.abdulrahman@oss.qualcomm.com
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/accel/qaic/qaic.h
drivers/accel/qaic/qaic_data.c
drivers/accel/qaic/qaic_drv.c

index 02561b6cecc64b41d9fd2985518889d6cb4509d1..2d7b3af09e28465cd1368d0edc8afd217b2ce801 100644 (file)
@@ -91,6 +91,8 @@ struct dma_bridge_chan {
         * response queue's head and tail pointer of this DBC.
         */
        void __iomem            *dbc_base;
+       /* Synchronizes access to Request queue's head and tail pointer */
+       struct mutex            req_lock;
        /* Head of list where each node is a memory handle queued in request queue */
        struct list_head        xfer_list;
        /* Synchronizes DBC readers during cleanup */
index 43aba57b48f05f590927efc74a5924e47ec94381..265eeb4e156fc6a191b57021df59ff8f4483ca72 100644 (file)
@@ -1357,13 +1357,17 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr
                goto release_ch_rcu;
        }
 
+       ret = mutex_lock_interruptible(&dbc->req_lock);
+       if (ret)
+               goto release_ch_rcu;
+
        head = readl(dbc->dbc_base + REQHP_OFF);
        tail = readl(dbc->dbc_base + REQTP_OFF);
 
        if (head == U32_MAX || tail == U32_MAX) {
                /* PCI link error */
                ret = -ENODEV;
-               goto release_ch_rcu;
+               goto unlock_req_lock;
        }
 
        queue_level = head <= tail ? tail - head : dbc->nelem - (head - tail);
@@ -1371,11 +1375,12 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr
        ret = send_bo_list_to_device(qdev, file_priv, exec, args->hdr.count, is_partial, dbc,
                                     head, &tail);
        if (ret)
-               goto release_ch_rcu;
+               goto unlock_req_lock;
 
        /* Finalize commit to hardware */
        submit_ts = ktime_get_ns();
        writel(tail, dbc->dbc_base + REQTP_OFF);
+       mutex_unlock(&dbc->req_lock);
 
        update_profiling_data(file_priv, exec, args->hdr.count, is_partial, received_ts,
                              submit_ts, queue_level);
@@ -1383,6 +1388,9 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr
        if (datapath_polling)
                schedule_work(&dbc->poll_work);
 
+unlock_req_lock:
+       if (ret)
+               mutex_unlock(&dbc->req_lock);
 release_ch_rcu:
        srcu_read_unlock(&dbc->ch_lock, rcu_id);
 unlock_dev_srcu:
index 10e711c96a67067ea78d5c4ade92811b396b70bb..cb606c4bb8511638fb12a24b48b7481847a888fb 100644 (file)
@@ -422,6 +422,9 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_de
                        return NULL;
                init_waitqueue_head(&qdev->dbc[i].dbc_release);
                INIT_LIST_HEAD(&qdev->dbc[i].bo_lists);
+               ret = drmm_mutex_init(drm, &qdev->dbc[i].req_lock);
+               if (ret)
+                       return NULL;
        }
 
        return qdev;