]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
dmaengine: sh: rz-dmac: Add device_tx_status() callback
authorBiju Das <biju.das.jz@bp.renesas.com>
Mon, 16 Mar 2026 13:32:51 +0000 (15:32 +0200)
committerVinod Koul <vkoul@kernel.org>
Tue, 17 Mar 2026 10:42:11 +0000 (16:12 +0530)
The RZ/G2L SCIFA driver uses dmaengine_prep_slave_sg() to enqueue DMA
transfers and implements a timeout mechanism on RX to handle cases where
a DMA transfer does not complete. The timeout is implemented using an
hrtimer.

In the hrtimer callback, dmaengine_tx_status() is called (along with
dmaengine_pause()) to retrieve the transfer residue and handle incomplete
DMA transfers.

Add support for the device_tx_status() callback.

Co-developed-by: Long Luu <long.luu.ur@renesas.com>
Signed-off-by: Long Luu <long.luu.ur@renesas.com>
Signed-off-by: Biju Das <biju.das.jz@bp.renesas.com>
Co-developed-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
Reviewed-by: Frank Li <Frank.Li@nxp.com>
Link: https://patch.msgid.link/20260316133252.240348-8-claudiu.beznea.uj@bp.renesas.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>
drivers/dma/sh/rz-dmac.c

index 6bfa77844e02a3b257544a7c2a4de3d9f32b0da5..4f6f9f4bacca265bda5b8ad1f920a7758430d72d 100644 (file)
@@ -124,10 +124,12 @@ struct rz_dmac {
  * Registers
  */
 
+#define CRTB                           0x0020
 #define CHSTAT                         0x0024
 #define CHCTRL                         0x0028
 #define CHCFG                          0x002c
 #define NXLA                           0x0038
+#define CRLA                           0x003c
 
 #define DCTRL                          0x0000
 
@@ -676,6 +678,145 @@ static void rz_dmac_device_synchronize(struct dma_chan *chan)
        rz_dmac_set_dma_req_no(dmac, channel->index, dmac->info->default_dma_req_no);
 }
 
+static struct rz_lmdesc *
+rz_dmac_get_next_lmdesc(struct rz_lmdesc *base, struct rz_lmdesc *lmdesc)
+{
+       struct rz_lmdesc *next = ++lmdesc;
+
+       if (next >= base + DMAC_NR_LMDESC)
+               next = base;
+
+       return next;
+}
+
+static u32 rz_dmac_calculate_residue_bytes_in_vd(struct rz_dmac_chan *channel, u32 crla)
+{
+       struct rz_lmdesc *lmdesc = channel->lmdesc.head;
+       struct dma_chan *chan = &channel->vc.chan;
+       struct rz_dmac *dmac = to_rz_dmac(chan->device);
+       u32 residue = 0, i = 0;
+
+       while (lmdesc->nxla != crla) {
+               lmdesc = rz_dmac_get_next_lmdesc(channel->lmdesc.base, lmdesc);
+               if (++i >= DMAC_NR_LMDESC)
+                       return 0;
+       }
+
+       /* Calculate residue from next lmdesc to end of virtual desc */
+       while (lmdesc->chcfg & CHCFG_DEM) {
+               residue += lmdesc->tb;
+               lmdesc = rz_dmac_get_next_lmdesc(channel->lmdesc.base, lmdesc);
+       }
+
+       dev_dbg(dmac->dev, "%s: VD residue is %u\n", __func__, residue);
+
+       return residue;
+}
+
+static u32 rz_dmac_chan_get_residue(struct rz_dmac_chan *channel,
+                                   dma_cookie_t cookie)
+{
+       struct rz_dmac_desc *current_desc, *desc;
+       enum dma_status status;
+       u32 crla, crtb, i;
+
+       /* Get current processing virtual descriptor */
+       current_desc = list_first_entry(&channel->ld_active,
+                                       struct rz_dmac_desc, node);
+       if (!current_desc)
+               return 0;
+
+       /*
+        * If the cookie corresponds to a descriptor that has been completed
+        * there is no residue. The same check has already been performed by the
+        * caller but without holding the channel lock, so the descriptor could
+        * now be complete.
+        */
+       status = dma_cookie_status(&channel->vc.chan, cookie, NULL);
+       if (status == DMA_COMPLETE)
+               return 0;
+
+       /*
+        * If the cookie doesn't correspond to the currently processing virtual
+        * descriptor then the descriptor hasn't been processed yet, and the
+        * residue is equal to the full descriptor size. Also, a client driver
+        * is possible to call this function before rz_dmac_irq_handler_thread()
+        * runs. In this case, the running descriptor will be the next
+        * descriptor, and will appear in the done list. So, if the argument
+        * cookie matches the done list's cookie, we can assume the residue is
+        * zero.
+        */
+       if (cookie != current_desc->vd.tx.cookie) {
+               list_for_each_entry(desc, &channel->ld_free, node) {
+                       if (cookie == desc->vd.tx.cookie)
+                               return 0;
+               }
+
+               list_for_each_entry(desc, &channel->ld_queue, node) {
+                       if (cookie == desc->vd.tx.cookie)
+                               return desc->len;
+               }
+
+               list_for_each_entry(desc, &channel->ld_active, node) {
+                       if (cookie == desc->vd.tx.cookie)
+                               return desc->len;
+               }
+
+               /*
+                * No descriptor found for the cookie, there's thus no residue.
+                * This shouldn't happen if the calling driver passes a correct
+                * cookie value.
+                */
+               WARN(1, "No descriptor for cookie!");
+               return 0;
+       }
+
+       /*
+        * We need to read two registers. Make sure the hardware does not move
+        * to next lmdesc while reading the current lmdesc. Trying it 3 times
+        * should be enough: initial read, retry, retry for the paranoid.
+        */
+       for (i = 0; i < 3; i++) {
+               crla = rz_dmac_ch_readl(channel, CRLA, 1);
+               crtb = rz_dmac_ch_readl(channel, CRTB, 1);
+               /* Still the same? */
+               if (crla == rz_dmac_ch_readl(channel, CRLA, 1))
+                       break;
+       }
+
+       WARN_ONCE(i >= 3, "residue might not be continuous!");
+
+       /*
+        * Calculate number of bytes transferred in processing virtual descriptor.
+        * One virtual descriptor can have many lmdesc.
+        */
+       return crtb + rz_dmac_calculate_residue_bytes_in_vd(channel, crla);
+}
+
+static enum dma_status rz_dmac_tx_status(struct dma_chan *chan,
+                                        dma_cookie_t cookie,
+                                        struct dma_tx_state *txstate)
+{
+       struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+       enum dma_status status;
+       u32 residue;
+
+       status = dma_cookie_status(chan, cookie, txstate);
+       if (status == DMA_COMPLETE || !txstate)
+               return status;
+
+       scoped_guard(spinlock_irqsave, &channel->vc.lock)
+               residue = rz_dmac_chan_get_residue(channel, cookie);
+
+       /* if there's no residue, the cookie is complete */
+       if (!residue)
+               return DMA_COMPLETE;
+
+       dma_set_residue(txstate, residue);
+
+       return status;
+}
+
 /*
  * -----------------------------------------------------------------------------
  * IRQ handling
@@ -997,6 +1138,7 @@ static int rz_dmac_probe(struct platform_device *pdev)
        engine = &dmac->engine;
        dma_cap_set(DMA_SLAVE, engine->cap_mask);
        dma_cap_set(DMA_MEMCPY, engine->cap_mask);
+       engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
        rz_dmac_writel(dmac, DCTRL_DEFAULT, CHANNEL_0_7_COMMON_BASE + DCTRL);
        rz_dmac_writel(dmac, DCTRL_DEFAULT, CHANNEL_8_15_COMMON_BASE + DCTRL);
 
@@ -1004,7 +1146,7 @@ static int rz_dmac_probe(struct platform_device *pdev)
 
        engine->device_alloc_chan_resources = rz_dmac_alloc_chan_resources;
        engine->device_free_chan_resources = rz_dmac_free_chan_resources;
-       engine->device_tx_status = dma_cookie_status;
+       engine->device_tx_status = rz_dmac_tx_status;
        engine->device_prep_slave_sg = rz_dmac_prep_slave_sg;
        engine->device_prep_dma_memcpy = rz_dmac_prep_dma_memcpy;
        engine->device_config = rz_dmac_config;