]> git.ipfire.org Git - thirdparty/qemu.git/commitdiff
hw/ufs: Fix mcq completion queue wraparound
authorIlia Levi <ilia.levi@intel.com>
Mon, 22 Dec 2025 12:35:58 +0000 (14:35 +0200)
committerJeuk Kim <jeuk20.kim@samsung.com>
Mon, 2 Feb 2026 05:33:46 +0000 (14:33 +0900)
Currently, ufs_mcq_process_cq() writes to the CQ without checking whether
there is available space. This can cause CQ entries to be discarded and
overwritten. The solution is to stop writing when CQ is full and exert
backpressure on the affected SQs. This is similar to how NVMe CQs operate.

Signed-off-by: Ilia Levi <ilia.levi@intel.com>
Reviewed-by: Jeuk Kim <jeuk20.kim@samsung.com>
Signed-off-by: Jeuk Kim <jeuk20.kim@samsung.com>
hw/ufs/ufs.c
hw/ufs/ufs.h

index 9cf7eab9b033a52b599cf29396f62af05b480868..cb74cb56bc15ec6fb873097ccb1b6e5694639bd9 100644 (file)
@@ -447,6 +447,10 @@ static void ufs_mcq_process_cq(void *opaque)
 
     QTAILQ_FOREACH_SAFE(req, &cq->req_list, entry, next)
     {
+        if (ufs_mcq_cq_full(u, cq->cqid)) {
+            break;
+        }
+
         ufs_dma_write_rsp_upiu(req);
 
         /* UTRD/CQE are LE; round-trip through host to keep BE correct. */
@@ -478,6 +482,12 @@ static void ufs_mcq_process_cq(void *opaque)
         tail = (tail + sizeof(req->cqe)) % (cq->size * sizeof(req->cqe));
         ufs_mcq_update_cq_tail(u, cq->cqid, tail);
 
+        if (QTAILQ_EMPTY(&req->sq->req_list) &&
+            !ufs_mcq_sq_empty(u, req->sq->sqid)) {
+            /* Dequeueing from SQ was blocked due to lack of free requests */
+            qemu_bh_schedule(req->sq->bh);
+        }
+
         ufs_clear_req(req);
         QTAILQ_INSERT_TAIL(&req->sq->req_list, req, entry);
     }
@@ -787,10 +797,18 @@ static void ufs_write_mcq_op_reg(UfsHc *u, hwaddr offset, uint32_t data,
         }
         opr->sq.tp = data;
         break;
-    case offsetof(UfsMcqOpReg, cq.hp):
+    case offsetof(UfsMcqOpReg, cq.hp): {
+        UfsCq *cq = u->cq[qid];
+
+        if (ufs_mcq_cq_full(u, qid) && !QTAILQ_EMPTY(&cq->req_list)) {
+            /* Enqueueing to CQ was blocked because it was full */
+            qemu_bh_schedule(cq->bh);
+        }
+
         opr->cq.hp = data;
         ufs_mcq_update_cq_head(u, qid, data);
         break;
+    }
     case offsetof(UfsMcqOpReg, cq_int.is):
         opr->cq_int.is &= ~data;
         break;
index 3799d97f30d3c28baee3f184291b3812fb7c5b91..13d964c5ae5ec430a98b2ef71987cb9279e9a317 100644 (file)
@@ -200,6 +200,15 @@ static inline bool ufs_mcq_cq_empty(UfsHc *u, uint32_t qid)
     return ufs_mcq_cq_tail(u, qid) == ufs_mcq_cq_head(u, qid);
 }
 
+static inline bool ufs_mcq_cq_full(UfsHc *u, uint32_t qid)
+{
+    uint32_t tail = ufs_mcq_cq_tail(u, qid);
+    uint16_t cq_size = u->cq[qid]->size;
+
+    tail = (tail + sizeof(UfsCqEntry)) % (sizeof(UfsCqEntry) * cq_size);
+    return tail == ufs_mcq_cq_head(u, qid);
+}
+
 #define TYPE_UFS "ufs"
 #define UFS(obj) OBJECT_CHECK(UfsHc, (obj), TYPE_UFS)