]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
i3c: mipi-i3c-hci: Fix race in DMA ring enqueue for parallel xfers
authorAdrian Hunter <adrian.hunter@intel.com>
Fri, 6 Mar 2026 07:24:42 +0000 (09:24 +0200)
committerAlexandre Belloni <alexandre.belloni@bootlin.com>
Wed, 11 Mar 2026 21:10:01 +0000 (22:10 +0100)
The I3C subsystem allows multiple transfers to be queued concurrently.
However, the MIPI I3C HCI driver's DMA enqueue path, hci_dma_queue_xfer(),
lacks sufficient serialization.

In particular, the allocation of the enqueue_ptr and its subsequent update
in the RING_OPERATION1 register, must be done atomically.  Otherwise, for
example, it would be possible for 2 transfers to be allocated the same
enqueue_ptr.

Extend the use of the existing spinlock for that purpose.  Keep a count of
the number of xfers enqueued so that it is easy to determine if the ring
has enough space.

Fixes: 9ad9a52cce282 ("i3c/master: introduce the mipi-i3c-hci driver")
Cc: stable@vger.kernel.org
Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
Reviewed-by: Frank Li <Frank.Li@nxp.com>
Link: https://patch.msgid.link/20260306072451.11131-6-adrian.hunter@intel.com
Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
drivers/i3c/master/mipi-i3c-hci/dma.c

index 2442cedd5c2a8f3672b13eec78140d8960539dab..74b255ad6d0f96418ca4e276a50739bb7bb8bf9d 100644 (file)
@@ -129,7 +129,7 @@ struct hci_rh_data {
        dma_addr_t xfer_dma, resp_dma, ibi_status_dma, ibi_data_dma;
        unsigned int xfer_entries, ibi_status_entries, ibi_chunks_total;
        unsigned int xfer_struct_sz, resp_struct_sz, ibi_status_sz, ibi_chunk_sz;
-       unsigned int done_ptr, ibi_chunk_ptr;
+       unsigned int done_ptr, ibi_chunk_ptr, xfer_space;
        struct hci_xfer **src_xfers;
        struct completion op_done;
 };
@@ -260,6 +260,7 @@ ring_ready:
 
        rh->done_ptr = 0;
        rh->ibi_chunk_ptr = 0;
+       rh->xfer_space = rh->xfer_entries;
 }
 
 static void hci_dma_init_rings(struct i3c_hci *hci)
@@ -470,7 +471,7 @@ static int hci_dma_queue_xfer(struct i3c_hci *hci,
        struct hci_rings_data *rings = hci->io_data;
        struct hci_rh_data *rh;
        unsigned int i, ring, enqueue_ptr;
-       u32 op1_val, op2_val;
+       u32 op1_val;
        int ret;
 
        ret = hci_dma_map_xfer_list(hci, rings->sysdev, xfer_list, n);
@@ -481,6 +482,14 @@ static int hci_dma_queue_xfer(struct i3c_hci *hci,
        ring = 0;
        rh = &rings->headers[ring];
 
+       spin_lock_irq(&hci->lock);
+
+       if (n > rh->xfer_space) {
+               spin_unlock_irq(&hci->lock);
+               hci_dma_unmap_xfer(hci, xfer_list, n);
+               return -EBUSY;
+       }
+
        op1_val = rh_reg_read(RING_OPERATION1);
        enqueue_ptr = FIELD_GET(RING_OP1_CR_ENQ_PTR, op1_val);
        for (i = 0; i < n; i++) {
@@ -518,22 +527,10 @@ static int hci_dma_queue_xfer(struct i3c_hci *hci,
                xfer->ring_entry = enqueue_ptr;
 
                enqueue_ptr = (enqueue_ptr + 1) % rh->xfer_entries;
-
-               /*
-                * We may update the hardware view of the enqueue pointer
-                * only if we didn't reach its dequeue pointer.
-                */
-               op2_val = rh_reg_read(RING_OPERATION2);
-               if (enqueue_ptr == FIELD_GET(RING_OP2_CR_DEQ_PTR, op2_val)) {
-                       /* the ring is full */
-                       hci_dma_unmap_xfer(hci, xfer_list, n);
-                       return -EBUSY;
-               }
        }
 
-       /* take care to update the hardware enqueue pointer atomically */
-       spin_lock_irq(&hci->lock);
-       op1_val = rh_reg_read(RING_OPERATION1);
+       rh->xfer_space -= n;
+
        op1_val &= ~RING_OP1_CR_ENQ_PTR;
        op1_val |= FIELD_PREP(RING_OP1_CR_ENQ_PTR, enqueue_ptr);
        rh_reg_write(RING_OPERATION1, op1_val);
@@ -601,6 +598,7 @@ static void hci_dma_xfer_done(struct i3c_hci *hci, struct hci_rh_data *rh)
 {
        u32 op1_val, op2_val, resp, *ring_resp;
        unsigned int tid, done_ptr = rh->done_ptr;
+       unsigned int done_cnt = 0;
        struct hci_xfer *xfer;
 
        for (;;) {
@@ -632,10 +630,12 @@ static void hci_dma_xfer_done(struct i3c_hci *hci, struct hci_rh_data *rh)
 
                done_ptr = (done_ptr + 1) % rh->xfer_entries;
                rh->done_ptr = done_ptr;
+               done_cnt += 1;
        }
 
        /* take care to update the software dequeue pointer atomically */
        spin_lock(&hci->lock);
+       rh->xfer_space += done_cnt;
        op1_val = rh_reg_read(RING_OPERATION1);
        op1_val &= ~RING_OP1_CR_SW_DEQ_PTR;
        op1_val |= FIELD_PREP(RING_OP1_CR_SW_DEQ_PTR, done_ptr);