]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
dmaengine: qcom: bam_dma: use lock guards
authorBartosz Golaszewski <bartosz.golaszewski@linaro.org>
Thu, 6 Nov 2025 15:44:51 +0000 (16:44 +0100)
committerVinod Koul <vkoul@kernel.org>
Tue, 16 Dec 2025 13:02:05 +0000 (18:32 +0530)
Simplify locking across the driver with lock guards from cleanup.h.

Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@linaro.org>
Link: https://patch.msgid.link/20251106-qcom-bam-dma-refactor-v1-2-0e2baaf3d81a@linaro.org
Signed-off-by: Vinod Koul <vkoul@kernel.org>
drivers/dma/qcom/bam_dma.c

index 2f1f295d3e1ff910a5051c20dc09cb1e8077df82..bcd8de9a9a12621a36b49c31bff96f474468be06 100644 (file)
@@ -24,6 +24,7 @@
  */
 
 #include <linux/circ_buf.h>
+#include <linux/cleanup.h>
 #include <linux/clk.h>
 #include <linux/device.h>
 #include <linux/dma-mapping.h>
@@ -570,7 +571,6 @@ static void bam_free_chan(struct dma_chan *chan)
        struct bam_chan *bchan = to_bam_chan(chan);
        struct bam_device *bdev = bchan->bdev;
        u32 val;
-       unsigned long flags;
        int ret;
 
        ret = pm_runtime_get_sync(bdev->dev);
@@ -584,9 +584,8 @@ static void bam_free_chan(struct dma_chan *chan)
                goto err;
        }
 
-       spin_lock_irqsave(&bchan->vc.lock, flags);
-       bam_reset_channel(bchan);
-       spin_unlock_irqrestore(&bchan->vc.lock, flags);
+       scoped_guard(spinlock_irqsave, &bchan->vc.lock)
+               bam_reset_channel(bchan);
 
        dma_free_wc(bdev->dev, BAM_DESC_FIFO_SIZE, bchan->fifo_virt,
                    bchan->fifo_phys);
@@ -624,12 +623,11 @@ static int bam_slave_config(struct dma_chan *chan,
                            struct dma_slave_config *cfg)
 {
        struct bam_chan *bchan = to_bam_chan(chan);
-       unsigned long flag;
 
-       spin_lock_irqsave(&bchan->vc.lock, flag);
+       guard(spinlock_irqsave)(&bchan->vc.lock);
+
        memcpy(&bchan->slave, cfg, sizeof(*cfg));
        bchan->reconfigure = 1;
-       spin_unlock_irqrestore(&bchan->vc.lock, flag);
 
        return 0;
 }
@@ -726,38 +724,37 @@ static int bam_dma_terminate_all(struct dma_chan *chan)
 {
        struct bam_chan *bchan = to_bam_chan(chan);
        struct bam_async_desc *async_desc, *tmp;
-       unsigned long flag;
        LIST_HEAD(head);
 
        /* remove all transactions, including active transaction */
-       spin_lock_irqsave(&bchan->vc.lock, flag);
-       /*
-        * If we have transactions queued, then some might be committed to the
-        * hardware in the desc fifo.  The only way to reset the desc fifo is
-        * to do a hardware reset (either by pipe or the entire block).
-        * bam_chan_init_hw() will trigger a pipe reset, and also reinit the
-        * pipe.  If the pipe is left disabled (default state after pipe reset)
-        * and is accessed by a connected hardware engine, a fatal error in
-        * the BAM will occur.  There is a small window where this could happen
-        * with bam_chan_init_hw(), but it is assumed that the caller has
-        * stopped activity on any attached hardware engine.  Make sure to do
-        * this first so that the BAM hardware doesn't cause memory corruption
-        * by accessing freed resources.
-        */
-       if (!list_empty(&bchan->desc_list)) {
-               async_desc = list_first_entry(&bchan->desc_list,
-                                             struct bam_async_desc, desc_node);
-               bam_chan_init_hw(bchan, async_desc->dir);
-       }
+       scoped_guard(spinlock_irqsave, &bchan->vc.lock) {
+               /*
+                * If we have transactions queued, then some might be committed to the
+                * hardware in the desc fifo.  The only way to reset the desc fifo is
+                * to do a hardware reset (either by pipe or the entire block).
+                * bam_chan_init_hw() will trigger a pipe reset, and also reinit the
+                * pipe.  If the pipe is left disabled (default state after pipe reset)
+                * and is accessed by a connected hardware engine, a fatal error in
+                * the BAM will occur.  There is a small window where this could happen
+                * with bam_chan_init_hw(), but it is assumed that the caller has
+                * stopped activity on any attached hardware engine.  Make sure to do
+                * this first so that the BAM hardware doesn't cause memory corruption
+                * by accessing freed resources.
+                */
+               if (!list_empty(&bchan->desc_list)) {
+                       async_desc = list_first_entry(&bchan->desc_list,
+                                                     struct bam_async_desc, desc_node);
+                       bam_chan_init_hw(bchan, async_desc->dir);
+               }
 
-       list_for_each_entry_safe(async_desc, tmp,
-                                &bchan->desc_list, desc_node) {
-               list_add(&async_desc->vd.node, &bchan->vc.desc_issued);
-               list_del(&async_desc->desc_node);
-       }
+               list_for_each_entry_safe(async_desc, tmp,
+                                        &bchan->desc_list, desc_node) {
+                       list_add(&async_desc->vd.node, &bchan->vc.desc_issued);
+                       list_del(&async_desc->desc_node);
+               }
 
-       vchan_get_all_descriptors(&bchan->vc, &head);
-       spin_unlock_irqrestore(&bchan->vc.lock, flag);
+               vchan_get_all_descriptors(&bchan->vc, &head);
+       }
 
        vchan_dma_desc_free_list(&bchan->vc, &head);
 
@@ -773,17 +770,16 @@ static int bam_pause(struct dma_chan *chan)
 {
        struct bam_chan *bchan = to_bam_chan(chan);
        struct bam_device *bdev = bchan->bdev;
-       unsigned long flag;
        int ret;
 
        ret = pm_runtime_get_sync(bdev->dev);
        if (ret < 0)
                return ret;
 
-       spin_lock_irqsave(&bchan->vc.lock, flag);
-       writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT));
-       bchan->paused = 1;
-       spin_unlock_irqrestore(&bchan->vc.lock, flag);
+       scoped_guard(spinlock_irqsave, &bchan->vc.lock) {
+               writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT));
+               bchan->paused = 1;
+       }
        pm_runtime_mark_last_busy(bdev->dev);
        pm_runtime_put_autosuspend(bdev->dev);
 
@@ -799,17 +795,16 @@ static int bam_resume(struct dma_chan *chan)
 {
        struct bam_chan *bchan = to_bam_chan(chan);
        struct bam_device *bdev = bchan->bdev;
-       unsigned long flag;
        int ret;
 
        ret = pm_runtime_get_sync(bdev->dev);
        if (ret < 0)
                return ret;
 
-       spin_lock_irqsave(&bchan->vc.lock, flag);
-       writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT));
-       bchan->paused = 0;
-       spin_unlock_irqrestore(&bchan->vc.lock, flag);
+       scoped_guard(spinlock_irqsave, &bchan->vc.lock) {
+               writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT));
+               bchan->paused = 0;
+       }
        pm_runtime_mark_last_busy(bdev->dev);
        pm_runtime_put_autosuspend(bdev->dev);
 
@@ -826,7 +821,6 @@ static int bam_resume(struct dma_chan *chan)
 static u32 process_channel_irqs(struct bam_device *bdev)
 {
        u32 i, srcs, pipe_stts, offset, avail;
-       unsigned long flags;
        struct bam_async_desc *async_desc, *tmp;
 
        srcs = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_EE));
@@ -846,7 +840,7 @@ static u32 process_channel_irqs(struct bam_device *bdev)
 
                writel_relaxed(pipe_stts, bam_addr(bdev, i, BAM_P_IRQ_CLR));
 
-               spin_lock_irqsave(&bchan->vc.lock, flags);
+               guard(spinlock_irqsave)(&bchan->vc.lock);
 
                offset = readl_relaxed(bam_addr(bdev, i, BAM_P_SW_OFSTS)) &
                                       P_SW_OFSTS_MASK;
@@ -885,8 +879,6 @@ static u32 process_channel_irqs(struct bam_device *bdev)
                        }
                        list_del(&async_desc->desc_node);
                }
-
-               spin_unlock_irqrestore(&bchan->vc.lock, flags);
        }
 
        return srcs;
@@ -950,7 +942,6 @@ static enum dma_status bam_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
        int ret;
        size_t residue = 0;
        unsigned int i;
-       unsigned long flags;
 
        ret = dma_cookie_status(chan, cookie, txstate);
        if (ret == DMA_COMPLETE)
@@ -959,23 +950,22 @@ static enum dma_status bam_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
        if (!txstate)
                return bchan->paused ? DMA_PAUSED : ret;
 
-       spin_lock_irqsave(&bchan->vc.lock, flags);
-       vd = vchan_find_desc(&bchan->vc, cookie);
-       if (vd) {
-               residue = container_of(vd, struct bam_async_desc, vd)->length;
-       } else {
-               list_for_each_entry(async_desc, &bchan->desc_list, desc_node) {
-                       if (async_desc->vd.tx.cookie != cookie)
-                               continue;
-
-                       for (i = 0; i < async_desc->num_desc; i++)
-                               residue += le16_to_cpu(
-                                               async_desc->curr_desc[i].size);
+       scoped_guard(spinlock_irqsave, &bchan->vc.lock) {
+               vd = vchan_find_desc(&bchan->vc, cookie);
+               if (vd) {
+                       residue = container_of(vd, struct bam_async_desc, vd)->length;
+               } else {
+                       list_for_each_entry(async_desc, &bchan->desc_list, desc_node) {
+                               if (async_desc->vd.tx.cookie != cookie)
+                                       continue;
+
+                               for (i = 0; i < async_desc->num_desc; i++)
+                                       residue += le16_to_cpu(
+                                                       async_desc->curr_desc[i].size);
+                       }
                }
        }
 
-       spin_unlock_irqrestore(&bchan->vc.lock, flags);
-
        dma_set_residue(txstate, residue);
 
        if (ret == DMA_IN_PROGRESS && bchan->paused)
@@ -1116,17 +1106,16 @@ static void dma_tasklet(struct tasklet_struct *t)
 {
        struct bam_device *bdev = from_tasklet(bdev, t, task);
        struct bam_chan *bchan;
-       unsigned long flags;
        unsigned int i;
 
        /* go through the channels and kick off transactions */
        for (i = 0; i < bdev->num_channels; i++) {
                bchan = &bdev->channels[i];
-               spin_lock_irqsave(&bchan->vc.lock, flags);
+
+               guard(spinlock_irqsave)(&bchan->vc.lock);
 
                if (!list_empty(&bchan->vc.desc_issued) && !IS_BUSY(bchan))
                        bam_start_dma(bchan);
-               spin_unlock_irqrestore(&bchan->vc.lock, flags);
        }
 
 }
@@ -1140,15 +1129,12 @@ static void dma_tasklet(struct tasklet_struct *t)
 static void bam_issue_pending(struct dma_chan *chan)
 {
        struct bam_chan *bchan = to_bam_chan(chan);
-       unsigned long flags;
 
-       spin_lock_irqsave(&bchan->vc.lock, flags);
+       guard(spinlock_irqsave)(&bchan->vc.lock);
 
        /* if work pending and idle, start a transaction */
        if (vchan_issue_pending(&bchan->vc) && !IS_BUSY(bchan))
                bam_start_dma(bchan);
-
-       spin_unlock_irqrestore(&bchan->vc.lock, flags);
 }
 
 /**