]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
i3c: mipi-i3c-hci: Use core helpers for DMA mapping and bounce buffering
authorJarkko Nikula <jarkko.nikula@linux.intel.com>
Fri, 22 Aug 2025 10:56:28 +0000 (13:56 +0300)
committerAlexandre Belloni <alexandre.belloni@bootlin.com>
Tue, 16 Sep 2025 15:06:42 +0000 (17:06 +0200)
So far only I3C private and I2C transfers have required a bounce buffer
for DMA transfers when buffer is not DMA'able.

It was observed that when the device DMA is IOMMU mapped and the receive
length is not a multiple of DWORDs (32-bit), the last DWORD is padded
with stale data from the RX FIFO, corrupting 1-3 bytes beyond the
expected data.

A similar issue, though less severe, occurs when an I3C target returns
less data than requested. In this case, the padding does not exceed the
requested number of bytes, assuming the device DMA is not IOMMU mapped.

Therefore, all I3C private transfer, CCC command payload and I2C
transfer receive buffers must be properly sized for the DMA being IOMMU
mapped. Even if those buffers are already DMA safe, their size may not
be DWORD aligned.

To prepare for the device DMA being IOMMU mapped and to address the
above issue, use helpers from I3C core for DMA mapping and bounce
buffering for all DMA transfers.

For now, require bounce buffer only when the buffer is in the
vmalloc() area to avoid unnecessary copying with CCC commands and
DMA-safe I2C transfers.

Signed-off-by: Jarkko Nikula <jarkko.nikula@linux.intel.com>
Reviewed-by: Frank Li <Frank.Li@nxp.com>
Link: https://lore.kernel.org/r/20250822105630.2820009-3-jarkko.nikula@linux.intel.com
Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
drivers/i3c/master/mipi-i3c-hci/core.c
drivers/i3c/master/mipi-i3c-hci/dma.c
drivers/i3c/master/mipi-i3c-hci/hci.h

index 60f1175f1f37ccc25f26c1924c433882ab677caa..b2977b6ac9f7a171edaebb23e558730e45db927c 100644 (file)
@@ -272,34 +272,6 @@ static int i3c_hci_daa(struct i3c_master_controller *m)
        return hci->cmd->perform_daa(hci);
 }
 
-static int i3c_hci_alloc_safe_xfer_buf(struct i3c_hci *hci,
-                                      struct hci_xfer *xfer)
-{
-       if (hci->io != &mipi_i3c_hci_dma ||
-           xfer->data == NULL || !is_vmalloc_addr(xfer->data))
-               return 0;
-
-       if (xfer->rnw)
-               xfer->bounce_buf = kzalloc(xfer->data_len, GFP_KERNEL);
-       else
-               xfer->bounce_buf = kmemdup(xfer->data,
-                                          xfer->data_len, GFP_KERNEL);
-
-       return xfer->bounce_buf == NULL ? -ENOMEM : 0;
-}
-
-static void i3c_hci_free_safe_xfer_buf(struct i3c_hci *hci,
-                                      struct hci_xfer *xfer)
-{
-       if (hci->io != &mipi_i3c_hci_dma || xfer->bounce_buf == NULL)
-               return;
-
-       if (xfer->rnw)
-               memcpy(xfer->data, xfer->bounce_buf, xfer->data_len);
-
-       kfree(xfer->bounce_buf);
-}
-
 static int i3c_hci_priv_xfers(struct i3c_dev_desc *dev,
                              struct i3c_priv_xfer *i3c_xfers,
                              int nxfers)
@@ -333,9 +305,6 @@ static int i3c_hci_priv_xfers(struct i3c_dev_desc *dev,
                }
                hci->cmd->prep_i3c_xfer(hci, dev, &xfer[i]);
                xfer[i].cmd_desc[0] |= CMD_0_ROC;
-               ret = i3c_hci_alloc_safe_xfer_buf(hci, &xfer[i]);
-               if (ret)
-                       goto out;
        }
        last = i - 1;
        xfer[last].cmd_desc[0] |= CMD_0_TOC;
@@ -359,9 +328,6 @@ static int i3c_hci_priv_xfers(struct i3c_dev_desc *dev,
        }
 
 out:
-       for (i = 0; i < nxfers; i++)
-               i3c_hci_free_safe_xfer_buf(hci, &xfer[i]);
-
        hci_free_xfer(xfer, nxfers);
        return ret;
 }
index 491dfe70b660029ca52d25f06512b6a7afa96c82..351851859f02fe0146ced574ec4c63c8accd7d44 100644 (file)
@@ -349,9 +349,7 @@ static void hci_dma_unmap_xfer(struct i3c_hci *hci,
                xfer = xfer_list + i;
                if (!xfer->data)
                        continue;
-               dma_unmap_single(&hci->master.dev,
-                                xfer->data_dma, xfer->data_len,
-                                xfer->rnw ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+               i3c_master_dma_unmap_single(xfer->dma);
        }
 }
 
@@ -362,7 +360,6 @@ static int hci_dma_queue_xfer(struct i3c_hci *hci,
        struct hci_rh_data *rh;
        unsigned int i, ring, enqueue_ptr;
        u32 op1_val, op2_val;
-       void *buf;
 
        /* For now we only use ring 0 */
        ring = 0;
@@ -373,6 +370,8 @@ static int hci_dma_queue_xfer(struct i3c_hci *hci,
        for (i = 0; i < n; i++) {
                struct hci_xfer *xfer = xfer_list + i;
                u32 *ring_data = rh->xfer + rh->xfer_struct_sz * enqueue_ptr;
+               enum dma_data_direction dir = xfer->rnw ? DMA_FROM_DEVICE :
+                                                         DMA_TO_DEVICE;
 
                /* store cmd descriptor */
                *ring_data++ = xfer->cmd_desc[0];
@@ -391,21 +390,17 @@ static int hci_dma_queue_xfer(struct i3c_hci *hci,
 
                /* 2nd and 3rd words of Data Buffer Descriptor Structure */
                if (xfer->data) {
-                       buf = xfer->bounce_buf ? xfer->bounce_buf : xfer->data;
-                       xfer->data_dma =
-                               dma_map_single(&hci->master.dev,
-                                              buf,
-                                              xfer->data_len,
-                                              xfer->rnw ?
-                                                 DMA_FROM_DEVICE :
-                                                 DMA_TO_DEVICE);
-                       if (dma_mapping_error(&hci->master.dev,
-                                             xfer->data_dma)) {
+                       xfer->dma = i3c_master_dma_map_single(&hci->master.dev,
+                                                             xfer->data,
+                                                             xfer->data_len,
+                                                             false,
+                                                             dir);
+                       if (!xfer->dma) {
                                hci_dma_unmap_xfer(hci, xfer_list, i);
                                return -ENOMEM;
                        }
-                       *ring_data++ = lower_32_bits(xfer->data_dma);
-                       *ring_data++ = upper_32_bits(xfer->data_dma);
+                       *ring_data++ = lower_32_bits(xfer->dma->addr);
+                       *ring_data++ = upper_32_bits(xfer->dma->addr);
                } else {
                        *ring_data++ = 0;
                        *ring_data++ = 0;
index 69ea1d10414b8c803bb8fdd1b064235e8deb903c..33bc4906df1ff7511d6b802d1ea807c03a05a556 100644 (file)
@@ -94,8 +94,7 @@ struct hci_xfer {
                };
                struct {
                        /* DMA specific */
-                       dma_addr_t data_dma;
-                       void *bounce_buf;
+                       struct i3c_dma *dma;
                        int ring_number;
                        int ring_entry;
                };