]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
dmaengine: xilinx: dpdma: Add support for cyclic dma mode
authorRohit Visavalia <rohit.visavalia@xilinx.com>
Wed, 21 Aug 2024 13:40:43 +0000 (06:40 -0700)
committerVinod Koul <vkoul@kernel.org>
Wed, 28 Aug 2024 17:59:32 +0000 (23:29 +0530)
This patch adds support for DPDMA cyclic dma mode,
DMA cyclic transfers are required by audio streaming.

Signed-off-by: Rohit Visavalia <rohit.visavalia@amd.com>
Signed-off-by: Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
Signed-off-by: Vishal Sagar <vishal.sagar@amd.com>
Reviewed-by: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
Link: https://lore.kernel.org/r/20240821134043.2885506-1-vishal.sagar@amd.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>
drivers/dma/xilinx/xilinx_dpdma.c

index 36bd4825d389d5c2527916771393fb7fd0b34b34..77b5f7da7f1d0a9c782d34a630a2cb59017ad393 100644 (file)
@@ -670,6 +670,84 @@ static void xilinx_dpdma_chan_free_tx_desc(struct virt_dma_desc *vdesc)
        kfree(desc);
 }
 
+/**
+ * xilinx_dpdma_chan_prep_cyclic - Prepare a cyclic dma descriptor
+ * @chan: DPDMA channel
+ * @buf_addr: buffer address
+ * @buf_len: buffer length
+ * @period_len: number of periods
+ * @flags: tx flags argument passed in to prepare function
+ *
+ * Prepare a tx descriptor incudling internal software/hardware descriptors
+ * for the given cyclic transaction.
+ *
+ * Return: A dma async tx descriptor on success, or NULL.
+ */
+static struct dma_async_tx_descriptor *
+xilinx_dpdma_chan_prep_cyclic(struct xilinx_dpdma_chan *chan,
+                             dma_addr_t buf_addr, size_t buf_len,
+                             size_t period_len, unsigned long flags)
+{
+       struct xilinx_dpdma_tx_desc *tx_desc;
+       struct xilinx_dpdma_sw_desc *sw_desc, *last = NULL;
+       unsigned int periods = buf_len / period_len;
+       unsigned int i;
+
+       tx_desc = xilinx_dpdma_chan_alloc_tx_desc(chan);
+       if (!tx_desc)
+               return NULL;
+
+       for (i = 0; i < periods; i++) {
+               struct xilinx_dpdma_hw_desc *hw_desc;
+
+               if (!IS_ALIGNED(buf_addr, XILINX_DPDMA_ALIGN_BYTES)) {
+                       dev_err(chan->xdev->dev,
+                               "buffer should be aligned at %d B\n",
+                               XILINX_DPDMA_ALIGN_BYTES);
+                       goto error;
+               }
+
+               sw_desc = xilinx_dpdma_chan_alloc_sw_desc(chan);
+               if (!sw_desc)
+                       goto error;
+
+               xilinx_dpdma_sw_desc_set_dma_addrs(chan->xdev, sw_desc, last,
+                                                  &buf_addr, 1);
+               hw_desc = &sw_desc->hw;
+               hw_desc->xfer_size = period_len;
+               hw_desc->hsize_stride =
+                       FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_MASK,
+                                  period_len) |
+                       FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_MASK,
+                                  period_len);
+               hw_desc->control = XILINX_DPDMA_DESC_CONTROL_PREEMBLE |
+                                  XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE |
+                                  XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR;
+
+               list_add_tail(&sw_desc->node, &tx_desc->descriptors);
+
+               buf_addr += period_len;
+               last = sw_desc;
+       }
+
+       sw_desc = list_first_entry(&tx_desc->descriptors,
+                                  struct xilinx_dpdma_sw_desc, node);
+       last->hw.next_desc = lower_32_bits(sw_desc->dma_addr);
+       if (chan->xdev->ext_addr)
+               last->hw.addr_ext |=
+                       FIELD_PREP(XILINX_DPDMA_DESC_ADDR_EXT_NEXT_ADDR_MASK,
+                                  upper_32_bits(sw_desc->dma_addr));
+
+       last->hw.control |= XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME;
+
+       return vchan_tx_prep(&chan->vchan, &tx_desc->vdesc, flags);
+
+error:
+       xilinx_dpdma_chan_free_tx_desc(&tx_desc->vdesc);
+
+       return NULL;
+}
+
 /**
  * xilinx_dpdma_chan_prep_interleaved_dma - Prepare an interleaved dma
  *                                         descriptor
@@ -1189,6 +1267,23 @@ out_unlock:
 /* -----------------------------------------------------------------------------
  * DMA Engine Operations
  */
+static struct dma_async_tx_descriptor *
+xilinx_dpdma_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t buf_addr,
+                            size_t buf_len, size_t period_len,
+                            enum dma_transfer_direction direction,
+                            unsigned long flags)
+{
+       struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
+
+       if (direction != DMA_MEM_TO_DEV)
+               return NULL;
+
+       if (buf_len % period_len)
+               return NULL;
+
+       return xilinx_dpdma_chan_prep_cyclic(chan, buf_addr, buf_len,
+                                            period_len, flags);
+}
 
 static struct dma_async_tx_descriptor *
 xilinx_dpdma_prep_interleaved_dma(struct dma_chan *dchan,
@@ -1672,6 +1767,7 @@ static int xilinx_dpdma_probe(struct platform_device *pdev)
 
        dma_cap_set(DMA_SLAVE, ddev->cap_mask);
        dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
+       dma_cap_set(DMA_CYCLIC, ddev->cap_mask);
        dma_cap_set(DMA_INTERLEAVE, ddev->cap_mask);
        dma_cap_set(DMA_REPEAT, ddev->cap_mask);
        dma_cap_set(DMA_LOAD_EOT, ddev->cap_mask);
@@ -1679,6 +1775,7 @@ static int xilinx_dpdma_probe(struct platform_device *pdev)
 
        ddev->device_alloc_chan_resources = xilinx_dpdma_alloc_chan_resources;
        ddev->device_free_chan_resources = xilinx_dpdma_free_chan_resources;
+       ddev->device_prep_dma_cyclic = xilinx_dpdma_prep_dma_cyclic;
        ddev->device_prep_interleaved_dma = xilinx_dpdma_prep_interleaved_dma;
        /* TODO: Can we achieve better granularity ? */
        ddev->device_tx_status = dma_cookie_status;