]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
i3c: mipi-i3c-hci: Consolidate spinlocks
authorAdrian Hunter <adrian.hunter@intel.com>
Fri, 6 Mar 2026 07:24:41 +0000 (09:24 +0200)
committerAlexandre Belloni <alexandre.belloni@bootlin.com>
Wed, 11 Mar 2026 21:10:01 +0000 (22:10 +0100)
The MIPI I3C HCI driver currently uses separate spinlocks for different
contexts (PIO vs. DMA rings).  This split is unnecessary and complicates
upcoming fixes.  The driver does not support concurrent PIO and DMA
operation, and it only supports a single DMA ring, so a single lock is
sufficient for all paths.

Introduce a unified spinlock in struct i3c_hci, switch both PIO and DMA
code to use it, and remove the per-context locks.

No functional change is intended in this patch.

Fixes: 9ad9a52cce282 ("i3c/master: introduce the mipi-i3c-hci driver")
Cc: stable@vger.kernel.org
Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
Reviewed-by: Frank Li <Frank.Li@nxp.com>
Link: https://patch.msgid.link/20260306072451.11131-5-adrian.hunter@intel.com
Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
drivers/i3c/master/mipi-i3c-hci/core.c
drivers/i3c/master/mipi-i3c-hci/dma.c
drivers/i3c/master/mipi-i3c-hci/hci.h
drivers/i3c/master/mipi-i3c-hci/pio.c

index 4877a321edf990d00d2efee4c825ffc2ab66420d..faf5eae2409fffb4cbf0855a95cf63ae6697aaf9 100644 (file)
@@ -926,6 +926,8 @@ static int i3c_hci_probe(struct platform_device *pdev)
        if (!hci)
                return -ENOMEM;
 
+       spin_lock_init(&hci->lock);
+
        /*
         * Multi-bus instances share the same MMIO address range, but not
         * necessarily in separate contiguous sub-ranges. To avoid overlapping
index ba451f026386283a2e5898831da77a077af87c80..2442cedd5c2a8f3672b13eec78140d8960539dab 100644 (file)
@@ -131,7 +131,6 @@ struct hci_rh_data {
        unsigned int xfer_struct_sz, resp_struct_sz, ibi_status_sz, ibi_chunk_sz;
        unsigned int done_ptr, ibi_chunk_ptr;
        struct hci_xfer **src_xfers;
-       spinlock_t lock;
        struct completion op_done;
 };
 
@@ -344,7 +343,6 @@ static int hci_dma_init(struct i3c_hci *hci)
                        goto err_out;
                rh = &rings->headers[i];
                rh->regs = hci->base_regs + offset;
-               spin_lock_init(&rh->lock);
                init_completion(&rh->op_done);
 
                rh->xfer_entries = XFER_RING_ENTRIES;
@@ -534,12 +532,12 @@ static int hci_dma_queue_xfer(struct i3c_hci *hci,
        }
 
        /* take care to update the hardware enqueue pointer atomically */
-       spin_lock_irq(&rh->lock);
+       spin_lock_irq(&hci->lock);
        op1_val = rh_reg_read(RING_OPERATION1);
        op1_val &= ~RING_OP1_CR_ENQ_PTR;
        op1_val |= FIELD_PREP(RING_OP1_CR_ENQ_PTR, enqueue_ptr);
        rh_reg_write(RING_OPERATION1, op1_val);
-       spin_unlock_irq(&rh->lock);
+       spin_unlock_irq(&hci->lock);
 
        return 0;
 }
@@ -637,12 +635,12 @@ static void hci_dma_xfer_done(struct i3c_hci *hci, struct hci_rh_data *rh)
        }
 
        /* take care to update the software dequeue pointer atomically */
-       spin_lock(&rh->lock);
+       spin_lock(&hci->lock);
        op1_val = rh_reg_read(RING_OPERATION1);
        op1_val &= ~RING_OP1_CR_SW_DEQ_PTR;
        op1_val |= FIELD_PREP(RING_OP1_CR_SW_DEQ_PTR, done_ptr);
        rh_reg_write(RING_OPERATION1, op1_val);
-       spin_unlock(&rh->lock);
+       spin_unlock(&hci->lock);
 }
 
 static int hci_dma_request_ibi(struct i3c_hci *hci, struct i3c_dev_desc *dev,
@@ -823,12 +821,12 @@ static void hci_dma_process_ibi(struct i3c_hci *hci, struct hci_rh_data *rh)
 
 done:
        /* take care to update the ibi dequeue pointer atomically */
-       spin_lock(&rh->lock);
+       spin_lock(&hci->lock);
        op1_val = rh_reg_read(RING_OPERATION1);
        op1_val &= ~RING_OP1_IBI_DEQ_PTR;
        op1_val |= FIELD_PREP(RING_OP1_IBI_DEQ_PTR, deq_ptr);
        rh_reg_write(RING_OPERATION1, op1_val);
-       spin_unlock(&rh->lock);
+       spin_unlock(&hci->lock);
 
        /* update the chunk pointer */
        rh->ibi_chunk_ptr += ibi_chunks;
index 337b7ab1cb06e708d8ac16bee12f1427ce77a7fd..f1dd502c071f509bd1da7f0814375d74cf4f857a 100644 (file)
@@ -50,6 +50,7 @@ struct i3c_hci {
        const struct hci_io_ops *io;
        void *io_data;
        const struct hci_cmd_ops *cmd;
+       spinlock_t lock;
        atomic_t next_cmd_tid;
        bool irq_inactive;
        u32 caps;
index f8825ac814088c31107ec152c38ea6432218dea3..02866c2237fa1b43dfa52b6d1f4ee1bc762ab1a0 100644 (file)
@@ -123,7 +123,6 @@ struct hci_pio_ibi_data {
 };
 
 struct hci_pio_data {
-       spinlock_t lock;
        struct hci_xfer *curr_xfer, *xfer_queue;
        struct hci_xfer *curr_rx, *rx_queue;
        struct hci_xfer *curr_tx, *tx_queue;
@@ -212,7 +211,6 @@ static int hci_pio_init(struct i3c_hci *hci)
                return -ENOMEM;
 
        hci->io_data = pio;
-       spin_lock_init(&pio->lock);
 
        __hci_pio_init(hci, &size_val);
 
@@ -631,7 +629,7 @@ static int hci_pio_queue_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int n)
                xfer[i].data_left = xfer[i].data_len;
        }
 
-       spin_lock_irq(&pio->lock);
+       spin_lock_irq(&hci->lock);
        prev_queue_tail = pio->xfer_queue;
        pio->xfer_queue = &xfer[n - 1];
        if (pio->curr_xfer) {
@@ -645,7 +643,7 @@ static int hci_pio_queue_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int n)
                        pio_reg_read(INTR_STATUS),
                        pio_reg_read(INTR_SIGNAL_ENABLE));
        }
-       spin_unlock_irq(&pio->lock);
+       spin_unlock_irq(&hci->lock);
        return 0;
 }
 
@@ -716,14 +714,14 @@ static bool hci_pio_dequeue_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int
        struct hci_pio_data *pio = hci->io_data;
        int ret;
 
-       spin_lock_irq(&pio->lock);
+       spin_lock_irq(&hci->lock);
        dev_dbg(&hci->master.dev, "n=%d status=%#x/%#x", n,
                pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
        dev_dbg(&hci->master.dev, "main_status = %#x/%#x",
                readl(hci->base_regs + 0x20), readl(hci->base_regs + 0x28));
 
        ret = hci_pio_dequeue_xfer_common(hci, pio, xfer, n);
-       spin_unlock_irq(&pio->lock);
+       spin_unlock_irq(&hci->lock);
        return ret;
 }
 
@@ -1016,13 +1014,13 @@ static bool hci_pio_irq_handler(struct i3c_hci *hci)
        struct hci_pio_data *pio = hci->io_data;
        u32 status;
 
-       spin_lock(&pio->lock);
+       spin_lock(&hci->lock);
        status = pio_reg_read(INTR_STATUS);
        dev_dbg(&hci->master.dev, "PIO_INTR_STATUS %#x/%#x",
                status, pio->enabled_irqs);
        status &= pio->enabled_irqs | STAT_LATENCY_WARNINGS;
        if (!status) {
-               spin_unlock(&pio->lock);
+               spin_unlock(&hci->lock);
                return false;
        }
 
@@ -1058,7 +1056,7 @@ static bool hci_pio_irq_handler(struct i3c_hci *hci)
        pio_reg_write(INTR_SIGNAL_ENABLE, pio->enabled_irqs);
        dev_dbg(&hci->master.dev, "PIO_INTR_STATUS %#x/%#x",
                pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
-       spin_unlock(&pio->lock);
+       spin_unlock(&hci->lock);
        return true;
 }