]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - queue-5.1/dmaengine-jz4780-fix-transfers-being-acked-too-soon.patch
fixes for 5.1
[thirdparty/kernel/stable-queue.git] / queue-5.1 / dmaengine-jz4780-fix-transfers-being-acked-too-soon.patch
1 From 0690872a6349e526ab70619127fd37f74643a5a2 Mon Sep 17 00:00:00 2001
2 From: Paul Cercueil <paul@crapouillou.net>
3 Date: Sat, 4 May 2019 23:37:57 +0200
4 Subject: dmaengine: jz4780: Fix transfers being ACKed too soon
5
6 [ Upstream commit 4e4106f5e942bff65548e82fc330d40385c89220 ]
7
8 When a multi-descriptor DMA transfer is in progress, the "IRQ pending"
9 flag will apparently be set for that channel as soon as the last
10 descriptor loads, way before the IRQ actually happens. This behaviour
11 has been observed on the JZ4725B, but maybe other SoCs are affected.
12
13 In the case where another DMA transfer is running into completion on a
14 separate channel, the IRQ handler would then run the completion handler
15 for our previous channel even if the transfer didn't actually finish.
16
17 Fix this by checking in the completion handler that we're indeed done;
18 if not the interrupted DMA transfer will simply be resumed.
19
20 Signed-off-by: Paul Cercueil <paul@crapouillou.net>
21 Signed-off-by: Vinod Koul <vkoul@kernel.org>
22 Signed-off-by: Sasha Levin <sashal@kernel.org>
23 ---
24 drivers/dma/dma-jz4780.c | 32 +++++++++++++++++++++-----------
25 1 file changed, 21 insertions(+), 11 deletions(-)
26
27 diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
28 index 9ce0a386225b..f49534019d37 100644
29 --- a/drivers/dma/dma-jz4780.c
30 +++ b/drivers/dma/dma-jz4780.c
31 @@ -666,10 +666,11 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
32 return status;
33 }
34
35 -static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
36 - struct jz4780_dma_chan *jzchan)
37 +static bool jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
38 + struct jz4780_dma_chan *jzchan)
39 {
40 uint32_t dcs;
41 + bool ack = true;
42
43 spin_lock(&jzchan->vchan.lock);
44
45 @@ -692,12 +693,20 @@ static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
46 if ((dcs & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT)) == 0) {
47 if (jzchan->desc->type == DMA_CYCLIC) {
48 vchan_cyclic_callback(&jzchan->desc->vdesc);
49 - } else {
50 +
51 + jz4780_dma_begin(jzchan);
52 + } else if (dcs & JZ_DMA_DCS_TT) {
53 vchan_cookie_complete(&jzchan->desc->vdesc);
54 jzchan->desc = NULL;
55 - }
56
57 - jz4780_dma_begin(jzchan);
58 + jz4780_dma_begin(jzchan);
59 + } else {
60 + /* False positive - continue the transfer */
61 + ack = false;
62 + jz4780_dma_chn_writel(jzdma, jzchan->id,
63 + JZ_DMA_REG_DCS,
64 + JZ_DMA_DCS_CTE);
65 + }
66 }
67 } else {
68 dev_err(&jzchan->vchan.chan.dev->device,
69 @@ -705,21 +714,22 @@ static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
70 }
71
72 spin_unlock(&jzchan->vchan.lock);
73 +
74 + return ack;
75 }
76
77 static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
78 {
79 struct jz4780_dma_dev *jzdma = data;
80 + unsigned int nb_channels = jzdma->soc_data->nb_channels;
81 uint32_t pending, dmac;
82 int i;
83
84 pending = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DIRQP);
85
86 - for (i = 0; i < jzdma->soc_data->nb_channels; i++) {
87 - if (!(pending & (1<<i)))
88 - continue;
89 -
90 - jz4780_dma_chan_irq(jzdma, &jzdma->chan[i]);
91 + for_each_set_bit(i, (unsigned long *)&pending, nb_channels) {
92 + if (jz4780_dma_chan_irq(jzdma, &jzdma->chan[i]))
93 + pending &= ~BIT(i);
94 }
95
96 /* Clear halt and address error status of all channels. */
97 @@ -728,7 +738,7 @@ static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
98 jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMAC, dmac);
99
100 /* Clear interrupt pending status. */
101 - jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DIRQP, 0);
102 + jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DIRQP, pending);
103
104 return IRQ_HANDLED;
105 }
106 --
107 2.20.1
108