]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
dmaengine: loongson: loongson2-apb: Simplify locking with guard() and scoped_guard()
authorBinbin Zhou <zhoubinbin@loongson.cn>
Sat, 7 Mar 2026 03:25:35 +0000 (11:25 +0800)
committerVinod Koul <vkoul@kernel.org>
Tue, 17 Mar 2026 11:18:55 +0000 (16:48 +0530)
Use guard() and scoped_guard() infrastructure instead of explicitly
acquiring and releasing spinlocks to simplify the code and ensure that
all locks are released properly.

Reviewed-by: Frank Li <Frank.Li@nxp.com>
Signed-off-by: Binbin Zhou <zhoubinbin@loongson.cn>
Reviewed-by: Huacai Chen <chenhuacai@loongson.cn>
Link: https://patch.msgid.link/fb59bb25e5c4fcb84d9aa7b351285fa8d02ea8cb.1772853681.git.zhoubinbin@loongson.cn
Signed-off-by: Vinod Koul <vkoul@kernel.org>
drivers/dma/loongson/loongson2-apb-dma.c

index adddfafc2f1cf3eda84a9b34b425db98803b2e1c..aceb069e71fca00a451fb8de1c371e3bc971f82c 100644 (file)
@@ -461,12 +461,11 @@ static int ls2x_dma_slave_config(struct dma_chan *chan,
 static void ls2x_dma_issue_pending(struct dma_chan *chan)
 {
        struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
-       unsigned long flags;
 
-       spin_lock_irqsave(&lchan->vchan.lock, flags);
+       guard(spinlock_irqsave)(&lchan->vchan.lock);
+
        if (vchan_issue_pending(&lchan->vchan) && !lchan->desc)
                ls2x_dma_start_transfer(lchan);
-       spin_unlock_irqrestore(&lchan->vchan.lock, flags);
 }
 
 /*
@@ -478,19 +477,18 @@ static void ls2x_dma_issue_pending(struct dma_chan *chan)
 static int ls2x_dma_terminate_all(struct dma_chan *chan)
 {
        struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
-       unsigned long flags;
        LIST_HEAD(head);
 
-       spin_lock_irqsave(&lchan->vchan.lock, flags);
-       /* Setting stop cmd */
-       ls2x_dma_write_cmd(lchan, LDMA_STOP);
-       if (lchan->desc) {
-               vchan_terminate_vdesc(&lchan->desc->vdesc);
-               lchan->desc = NULL;
-       }
+       scoped_guard(spinlock_irqsave, &lchan->vchan.lock) {
+               /* Setting stop cmd */
+               ls2x_dma_write_cmd(lchan, LDMA_STOP);
+               if (lchan->desc) {
+                       vchan_terminate_vdesc(&lchan->desc->vdesc);
+                       lchan->desc = NULL;
+               }
 
-       vchan_get_all_descriptors(&lchan->vchan, &head);
-       spin_unlock_irqrestore(&lchan->vchan.lock, flags);
+               vchan_get_all_descriptors(&lchan->vchan, &head);
+       }
 
        vchan_dma_desc_free_list(&lchan->vchan, &head);
        return 0;
@@ -511,14 +509,13 @@ static void ls2x_dma_synchronize(struct dma_chan *chan)
 static int ls2x_dma_pause(struct dma_chan *chan)
 {
        struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
-       unsigned long flags;
 
-       spin_lock_irqsave(&lchan->vchan.lock, flags);
+       guard(spinlock_irqsave)(&lchan->vchan.lock);
+
        if (lchan->desc && lchan->desc->status == DMA_IN_PROGRESS) {
                ls2x_dma_write_cmd(lchan, LDMA_STOP);
                lchan->desc->status = DMA_PAUSED;
        }
-       spin_unlock_irqrestore(&lchan->vchan.lock, flags);
 
        return 0;
 }
@@ -526,14 +523,13 @@ static int ls2x_dma_pause(struct dma_chan *chan)
 static int ls2x_dma_resume(struct dma_chan *chan)
 {
        struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
-       unsigned long flags;
 
-       spin_lock_irqsave(&lchan->vchan.lock, flags);
+       guard(spinlock_irqsave)(&lchan->vchan.lock);
+
        if (lchan->desc && lchan->desc->status == DMA_PAUSED) {
                lchan->desc->status = DMA_IN_PROGRESS;
                ls2x_dma_write_cmd(lchan, LDMA_START);
        }
-       spin_unlock_irqrestore(&lchan->vchan.lock, flags);
 
        return 0;
 }
@@ -550,22 +546,22 @@ static irqreturn_t ls2x_dma_isr(int irq, void *dev_id)
        struct ls2x_dma_chan *lchan = dev_id;
        struct ls2x_dma_desc *desc;
 
-       spin_lock(&lchan->vchan.lock);
-       desc = lchan->desc;
-       if (desc) {
-               if (desc->cyclic) {
-                       vchan_cyclic_callback(&desc->vdesc);
-               } else {
-                       desc->status = DMA_COMPLETE;
-                       vchan_cookie_complete(&desc->vdesc);
-                       ls2x_dma_start_transfer(lchan);
+       scoped_guard(spinlock, &lchan->vchan.lock) {
+               desc = lchan->desc;
+               if (desc) {
+                       if (desc->cyclic) {
+                               vchan_cyclic_callback(&desc->vdesc);
+                       } else {
+                               desc->status = DMA_COMPLETE;
+                               vchan_cookie_complete(&desc->vdesc);
+                               ls2x_dma_start_transfer(lchan);
+                       }
+
+                       /* ls2x_dma_start_transfer() updates lchan->desc */
+                       if (!lchan->desc)
+                               ls2x_dma_write_cmd(lchan, LDMA_STOP);
                }
-
-               /* ls2x_dma_start_transfer() updates lchan->desc */
-               if (!lchan->desc)
-                       ls2x_dma_write_cmd(lchan, LDMA_STOP);
        }
-       spin_unlock(&lchan->vchan.lock);
 
        return IRQ_HANDLED;
 }