]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
dmaengine: sh: rz-dmac: Add device_{pause,resume}() callbacks
authorClaudiu Beznea <claudiu.beznea@tuxon.dev>
Mon, 16 Mar 2026 13:32:52 +0000 (15:32 +0200)
committerVinod Koul <vkoul@kernel.org>
Tue, 17 Mar 2026 10:42:11 +0000 (16:12 +0530)
The RZ/G2L SCIFA driver uses dmaengine_prep_slave_sg() to enqueue DMA
transfers and implements a timeout mechanism on RX to handle cases where
a DMA transfer does not complete. The timeout is implemented using an
hrtimer.

In the hrtimer callback, dmaengine_tx_status() is called (along with
dmaengine_pause()) to retrieve the transfer residue and handle incomplete
DMA transfers.

Add support for device_{pause, resume}() callbacks.

Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
Link: https://patch.msgid.link/20260316133252.240348-9-claudiu.beznea.uj@bp.renesas.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>
drivers/dma/sh/rz-dmac.c

index 4f6f9f4bacca265bda5b8ad1f920a7758430d72d..625ff29024de0439a07a7f339b551426abcf8eee 100644 (file)
@@ -140,10 +140,12 @@ struct rz_dmac {
 #define CHANNEL_8_15_COMMON_BASE       0x0700
 
 #define CHSTAT_ER                      BIT(4)
+#define CHSTAT_SUS                     BIT(3)
 #define CHSTAT_EN                      BIT(0)
 
 #define CHCTRL_CLRINTMSK               BIT(17)
 #define CHCTRL_CLRSUS                  BIT(9)
+#define CHCTRL_SETSUS                  BIT(8)
 #define CHCTRL_CLRTC                   BIT(6)
 #define CHCTRL_CLREND                  BIT(5)
 #define CHCTRL_CLRRQ                   BIT(4)
@@ -805,11 +807,18 @@ static enum dma_status rz_dmac_tx_status(struct dma_chan *chan,
        if (status == DMA_COMPLETE || !txstate)
                return status;
 
-       scoped_guard(spinlock_irqsave, &channel->vc.lock)
+       scoped_guard(spinlock_irqsave, &channel->vc.lock) {
+               u32 val;
+
                residue = rz_dmac_chan_get_residue(channel, cookie);
 
-       /* if there's no residue, the cookie is complete */
-       if (!residue)
+               val = rz_dmac_ch_readl(channel, CHSTAT, 1);
+               if (val & CHSTAT_SUS)
+                       status = DMA_PAUSED;
+       }
+
+       /* if there's no residue and no paused, the cookie is complete */
+       if (!residue && status != DMA_PAUSED)
                return DMA_COMPLETE;
 
        dma_set_residue(txstate, residue);
@@ -817,6 +826,38 @@ static enum dma_status rz_dmac_tx_status(struct dma_chan *chan,
        return status;
 }
 
+static int rz_dmac_device_pause(struct dma_chan *chan)
+{
+       struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+       u32 val;
+
+       guard(spinlock_irqsave)(&channel->vc.lock);
+
+       val = rz_dmac_ch_readl(channel, CHSTAT, 1);
+       if (!(val & CHSTAT_EN))
+               return 0;
+
+       rz_dmac_ch_writel(channel, CHCTRL_SETSUS, CHCTRL, 1);
+       return read_poll_timeout_atomic(rz_dmac_ch_readl, val,
+                                       (val & CHSTAT_SUS), 1, 1024,
+                                       false, channel, CHSTAT, 1);
+}
+
+static int rz_dmac_device_resume(struct dma_chan *chan)
+{
+       struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+       u32 val;
+
+       guard(spinlock_irqsave)(&channel->vc.lock);
+
+       /* Do not check CHSTAT_SUS but rely on HW capabilities. */
+
+       rz_dmac_ch_writel(channel, CHCTRL_CLRSUS, CHCTRL, 1);
+       return read_poll_timeout_atomic(rz_dmac_ch_readl, val,
+                                       !(val & CHSTAT_SUS), 1, 1024,
+                                       false, channel, CHSTAT, 1);
+}
+
 /*
  * -----------------------------------------------------------------------------
  * IRQ handling
@@ -1153,6 +1194,8 @@ static int rz_dmac_probe(struct platform_device *pdev)
        engine->device_terminate_all = rz_dmac_terminate_all;
        engine->device_issue_pending = rz_dmac_issue_pending;
        engine->device_synchronize = rz_dmac_device_synchronize;
+       engine->device_pause = rz_dmac_device_pause;
+       engine->device_resume = rz_dmac_device_resume;
 
        engine->copy_align = DMAENGINE_ALIGN_1_BYTE;
        dma_set_max_seg_size(engine->dev, U32_MAX);