From: Greg Kroah-Hartman Date: Thu, 10 Oct 2013 22:55:43 +0000 (-0700) Subject: 3.4-stable patches X-Git-Tag: v3.0.100~6^2~11 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=7fad255cb68c3c6fb4306fb91bc8c2d090b654d1;p=thirdparty%2Fkernel%2Fstable-queue.git 3.4-stable patches added patches: dmaengine-imx-dma-fix-callback-path-in-tasklet.patch dmaengine-imx-dma-fix-lockdep-issue-between-irqhandler-and-tasklet.patch dmaengine-imx-dma-fix-slow-path-issue-in-prep_dma_cyclic.patch staging-comedi-ni_65xx-bug-fix-confine-insn_bits-to-one-subdevice.patch --- diff --git a/queue-3.4/dmaengine-imx-dma-fix-callback-path-in-tasklet.patch b/queue-3.4/dmaengine-imx-dma-fix-callback-path-in-tasklet.patch new file mode 100644 index 00000000000..25d8ef943e3 --- /dev/null +++ b/queue-3.4/dmaengine-imx-dma-fix-callback-path-in-tasklet.patch @@ -0,0 +1,52 @@ +From fcaaba6c7136fe47e5a13352f99a64b019b6d2c5 Mon Sep 17 00:00:00 2001 +From: Michael Grzeschik +Date: Tue, 17 Sep 2013 15:56:08 +0200 +Subject: dmaengine: imx-dma: fix callback path in tasklet + +From: Michael Grzeschik + +commit fcaaba6c7136fe47e5a13352f99a64b019b6d2c5 upstream. + +We need to free the ld_active list head before jumping into the callback +routine. Otherwise the callback could run into issue_pending and change +our ld_active list head we just going to free. This will run the channel +list into an currupted and undefined state. + +Signed-off-by: Michael Grzeschik +Signed-off-by: Vinod Koul +Cc: Jonghwan Choi +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/dma/imx-dma.c | 10 ++++++---- + 1 file changed, 6 insertions(+), 4 deletions(-) + +--- a/drivers/dma/imx-dma.c ++++ b/drivers/dma/imx-dma.c +@@ -563,13 +563,11 @@ static void imxdma_tasklet(unsigned long + + if (list_empty(&imxdmac->ld_active)) { + /* Someone might have called terminate all */ +- goto out; ++ spin_unlock_irqrestore(&imxdma->lock, flags); ++ return; + } + desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node); + +- if (desc->desc.callback) +- desc->desc.callback(desc->desc.callback_param); +- + /* If we are dealing with a cyclic descriptor keep it on ld_active + * and dont mark the descripor as complete. + * Only in non-cyclic cases it would be marked as complete +@@ -597,6 +595,10 @@ static void imxdma_tasklet(unsigned long + } + out: + spin_unlock_irqrestore(&imxdma->lock, flags); ++ ++ if (desc->desc.callback) ++ desc->desc.callback(desc->desc.callback_param); ++ + } + + static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, diff --git a/queue-3.4/dmaengine-imx-dma-fix-lockdep-issue-between-irqhandler-and-tasklet.patch b/queue-3.4/dmaengine-imx-dma-fix-lockdep-issue-between-irqhandler-and-tasklet.patch new file mode 100644 index 00000000000..46c3dcea704 --- /dev/null +++ b/queue-3.4/dmaengine-imx-dma-fix-lockdep-issue-between-irqhandler-and-tasklet.patch @@ -0,0 +1,185 @@ +From 5a276fa6bdf82fd442046969603968c83626ce0b Mon Sep 17 00:00:00 2001 +From: Michael Grzeschik +Date: Tue, 17 Sep 2013 15:56:07 +0200 +Subject: dmaengine: imx-dma: fix lockdep issue between irqhandler and tasklet + +From: Michael Grzeschik + +commit 5a276fa6bdf82fd442046969603968c83626ce0b upstream. + +The tasklet and irqhandler are using spin_lock while other routines are +using spin_lock_irqsave/restore. This leads to lockdep issues as +described bellow. This patch is changing the code to use +spinlock_irq_save/restore in both code pathes. + +As imxdma_xfer_desc always gets called with spin_lock_irqsave lock held, +this patch also removes the spare call inside the routine to avoid +double locking. + +[ 403.358162] ================================= +[ 403.362549] [ INFO: inconsistent lock state ] +[ 403.366945] 3.10.0-20130823+ #904 Not tainted +[ 403.371331] --------------------------------- +[ 403.375721] inconsistent {IN-HARDIRQ-W} -> {HARDIRQ-ON-W} usage. +[ 403.381769] swapper/0 [HC0[0]:SC1[1]:HE1:SE0] takes: +[ 403.386762] (&(&imxdma->lock)->rlock){?.-...}, at: [] imxdma_tasklet+0x20/0x134 +[ 403.395201] {IN-HARDIRQ-W} state was registered at: +[ 403.400108] [] mark_lock+0x2a0/0x6b4 +[ 403.404798] [] __lock_acquire+0x650/0x1a64 +[ 403.410004] [] lock_acquire+0x94/0xa8 +[ 403.414773] [] _raw_spin_lock+0x54/0x8c +[ 403.419720] [] dma_irq_handler+0x78/0x254 +[ 403.424845] [] handle_irq_event_percpu+0x38/0x1b4 +[ 403.430670] [] handle_irq_event+0x44/0x64 +[ 403.435789] [] handle_level_irq+0xd8/0xf0 +[ 403.440903] [] generic_handle_irq+0x28/0x38 +[ 403.446194] [] handle_IRQ+0x68/0x8c +[ 403.450789] [] avic_handle_irq+0x3c/0x48 +[ 403.455811] [] __irq_svc+0x44/0x74 +[ 403.460314] [] cpu_startup_entry+0x88/0xf4 +[ 403.465525] [] rest_init+0xb8/0xe0 +[ 403.470045] [] start_kernel+0x28c/0x2d4 +[ 403.474986] [] 0xa0008040 +[ 403.478709] irq event stamp: 50854 +[ 403.482140] hardirqs last enabled at (50854): [] tasklet_action+0x38/0xdc +[ 403.489954] hardirqs last disabled at (50853): [] tasklet_action+0x20/0xdc +[ 403.497761] softirqs last enabled at (50850): [] _local_bh_enable+0x14/0x18 +[ 403.505741] softirqs last disabled at (50851): [] irq_exit+0x88/0xdc +[ 403.513026] +[ 403.513026] other info that might help us debug this: +[ 403.519593] Possible unsafe locking scenario: +[ 403.519593] +[ 403.525548] CPU0 +[ 403.528020] ---- +[ 403.530491] lock(&(&imxdma->lock)->rlock); +[ 403.534828] +[ 403.537474] lock(&(&imxdma->lock)->rlock); +[ 403.541983] +[ 403.541983] *** DEADLOCK *** +[ 403.541983] +[ 403.547951] no locks held by swapper/0. +[ 403.551813] +[ 403.551813] stack backtrace: +[ 403.556222] CPU: 0 PID: 0 Comm: swapper Not tainted 3.10.0-20130823+ #904 +[ 403.563039] Backtrace: +[ 403.565581] [] (dump_backtrace+0x0/0x10c) from [] (show_stack+0x18/0x1c) +[ 403.574054] r6:00000000 r5:c05c51d8 r4:c040bd58 r3:00200000 +[ 403.579872] [] (show_stack+0x0/0x1c) from [] (dump_stack+0x20/0x28) +[ 403.587955] [] (dump_stack+0x0/0x28) from [] (print_usage_bug.part.28+0x224/0x28c) +[ 403.597340] [] (print_usage_bug.part.28+0x0/0x28c) from [] (mark_lock+0x440/0x6b4) +[ 403.606682] r8:c004a41c r7:00000000 r6:c040bd58 r5:c040c040 r4:00000002 +[ 403.613566] [] (mark_lock+0x0/0x6b4) from [] (__lock_acquire+0x6cc/0x1a64) +[ 403.622244] [] (__lock_acquire+0x0/0x1a64) from [] (lock_acquire+0x94/0xa8) +[ 403.631010] [] (lock_acquire+0x0/0xa8) from [] (_raw_spin_lock+0x54/0x8c) +[ 403.639614] [] (_raw_spin_lock+0x0/0x8c) from [] (imxdma_tasklet+0x20/0x134) +[ 403.648434] r6:c3847010 r5:c040e890 r4:c38470d4 +[ 403.653194] [] (imxdma_tasklet+0x0/0x134) from [] (tasklet_action+0x8c/0xdc) +[ 403.662013] r8:c0599160 r7:00000000 r6:00000000 r5:c040e890 r4:c3847114 r3:c019d75c +[ 403.670042] [] (tasklet_action+0x0/0xdc) from [] (__do_softirq+0xe4/0x1f0) +[ 403.678687] r7:00000101 r6:c0402000 r5:c059919c r4:00000001 +[ 403.684498] [] (__do_softirq+0x0/0x1f0) from [] (irq_exit+0x88/0xdc) +[ 403.692652] [] (irq_exit+0x0/0xdc) from [] (handle_IRQ+0x6c/0x8c) +[ 403.700514] r4:00000030 r3:00000110 +[ 403.704192] [] (handle_IRQ+0x0/0x8c) from [] (avic_handle_irq+0x3c/0x48) +[ 403.712664] r5:c0403f28 r4:c0593ebc +[ 403.716343] [] (avic_handle_irq+0x0/0x48) from [] (__irq_svc+0x44/0x74) +[ 403.724733] Exception stack(0xc0403f28 to 0xc0403f70) +[ 403.729841] 3f20: 00000001 00000004 00000000 20000013 c0402000 c04104a8 +[ 403.738078] 3f40: 00000002 c0b69620 a0004000 41069264 a03fb5f4 c0403f7c c0403f40 c0403f70 +[ 403.746301] 3f60: c004b92c c0009e74 20000013 ffffffff +[ 403.751383] r6:ffffffff r5:20000013 r4:c0009e74 r3:c004b92c +[ 403.757210] [] (arch_cpu_idle+0x0/0x4c) from [] (cpu_startup_entry+0x88/0xf4) +[ 403.766161] [] (cpu_startup_entry+0x0/0xf4) from [] (rest_init+0xb8/0xe0) +[ 403.774753] [] (rest_init+0x0/0xe0) from [] (start_kernel+0x28c/0x2d4) +[ 403.783051] r6:c03fc484 r5:ffffffff r4:c040a0e0 +[ 403.787797] [] (start_kernel+0x0/0x2d4) from [] (0xa0008040) + +Signed-off-by: Michael Grzeschik +Signed-off-by: Vinod Koul +Cc: Jonghwan Choi +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/dma/imx-dma.c | 19 ++++++++----------- + 1 file changed, 8 insertions(+), 11 deletions(-) + +--- a/drivers/dma/imx-dma.c ++++ b/drivers/dma/imx-dma.c +@@ -373,17 +373,18 @@ static void dma_irq_handle_channel(struc + struct imxdma_engine *imxdma = imxdmac->imxdma; + int chno = imxdmac->channel; + struct imxdma_desc *desc; ++ unsigned long flags; + +- spin_lock(&imxdma->lock); ++ spin_lock_irqsave(&imxdma->lock, flags); + if (list_empty(&imxdmac->ld_active)) { +- spin_unlock(&imxdma->lock); ++ spin_unlock_irqrestore(&imxdma->lock, flags); + goto out; + } + + desc = list_first_entry(&imxdmac->ld_active, + struct imxdma_desc, + node); +- spin_unlock(&imxdma->lock); ++ spin_unlock_irqrestore(&imxdma->lock, flags); + + if (desc->sg) { + u32 tmp; +@@ -455,7 +456,6 @@ static int imxdma_xfer_desc(struct imxdm + { + struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); + struct imxdma_engine *imxdma = imxdmac->imxdma; +- unsigned long flags; + int slot = -1; + int i; + +@@ -463,7 +463,6 @@ static int imxdma_xfer_desc(struct imxdm + switch (d->type) { + case IMXDMA_DESC_INTERLEAVED: + /* Try to get a free 2D slot */ +- spin_lock_irqsave(&imxdma->lock, flags); + for (i = 0; i < IMX_DMA_2D_SLOTS; i++) { + if ((imxdma->slots_2d[i].count > 0) && + ((imxdma->slots_2d[i].xsr != d->x) || +@@ -473,10 +472,8 @@ static int imxdma_xfer_desc(struct imxdm + slot = i; + break; + } +- if (slot < 0) { +- spin_unlock_irqrestore(&imxdma->lock, flags); ++ if (slot < 0) + return -EBUSY; +- } + + imxdma->slots_2d[slot].xsr = d->x; + imxdma->slots_2d[slot].ysr = d->y; +@@ -485,7 +482,6 @@ static int imxdma_xfer_desc(struct imxdm + + imxdmac->slot_2d = slot; + imxdmac->enabled_2d = true; +- spin_unlock_irqrestore(&imxdma->lock, flags); + + if (slot == IMX_DMA_2D_SLOT_A) { + d->config_mem &= ~CCR_MSEL_B; +@@ -561,8 +557,9 @@ static void imxdma_tasklet(unsigned long + struct imxdma_channel *imxdmac = (void *)data; + struct imxdma_engine *imxdma = imxdmac->imxdma; + struct imxdma_desc *desc; ++ unsigned long flags; + +- spin_lock(&imxdma->lock); ++ spin_lock_irqsave(&imxdma->lock, flags); + + if (list_empty(&imxdmac->ld_active)) { + /* Someone might have called terminate all */ +@@ -599,7 +596,7 @@ static void imxdma_tasklet(unsigned long + __func__, imxdmac->channel); + } + out: +- spin_unlock(&imxdma->lock); ++ spin_unlock_irqrestore(&imxdma->lock, flags); + } + + static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, diff --git a/queue-3.4/dmaengine-imx-dma-fix-slow-path-issue-in-prep_dma_cyclic.patch b/queue-3.4/dmaengine-imx-dma-fix-slow-path-issue-in-prep_dma_cyclic.patch new file mode 100644 index 00000000000..cb6dacf4fc1 --- /dev/null +++ b/queue-3.4/dmaengine-imx-dma-fix-slow-path-issue-in-prep_dma_cyclic.patch @@ -0,0 +1,70 @@ +From edc530fe7ee5a562680615d2e7cd205879c751a7 Mon Sep 17 00:00:00 2001 +From: Michael Grzeschik +Date: Tue, 17 Sep 2013 15:56:06 +0200 +Subject: dmaengine: imx-dma: fix slow path issue in prep_dma_cyclic + +From: Michael Grzeschik + +commit edc530fe7ee5a562680615d2e7cd205879c751a7 upstream. + +When perparing cyclic_dma buffers by the sound layer, it will dump the +following lockdep trace. The leading snd_pcm_action_single get called +with read_lock_irq called. To fix this, we change the kcalloc call from +GFP_KERNEL to GFP_ATOMIC. + +WARNING: at kernel/lockdep.c:2740 lockdep_trace_alloc+0xcc/0x114() +DEBUG_LOCKS_WARN_ON(irqs_disabled_flags(flags)) +Modules linked in: +CPU: 0 PID: 832 Comm: aplay Not tainted 3.11.0-20130823+ #903 +Backtrace: +[] (dump_backtrace+0x0/0x10c) from [] (show_stack+0x18/0x1c) + r6:c004c090 r5:00000009 r4:c2e0bd18 r3:00404000 +[] (show_stack+0x0/0x1c) from [] (dump_stack+0x20/0x28) +[] (dump_stack+0x0/0x28) from [] (warn_slowpath_common+0x54/0x70) +[] (warn_slowpath_common+0x0/0x70) from [] (warn_slowpath_fmt+0x38/0x40) + r8:00004000 r7:a3b90000 r6:000080d0 r5:60000093 r4:c2e0a000 r3:00000009 +[] (warn_slowpath_fmt+0x0/0x40) from [] (lockdep_trace_alloc+0xcc/0x114) + r3:c03955d8 r2:c03907db +[] (lockdep_trace_alloc+0x0/0x114) from [] (__kmalloc+0x34/0x118) + r6:000080d0 r5:c3800120 r4:000080d0 r3:c040a0f8 +[] (__kmalloc+0x0/0x118) from [] (imxdma_prep_dma_cyclic+0x64/0x168) + r7:a3b90000 r6:00000004 r5:c39d8420 r4:c3847150 +[] (imxdma_prep_dma_cyclic+0x0/0x168) from [] (snd_dmaengine_pcm_trigger+0xa8/0x160) +[] (snd_dmaengine_pcm_trigger+0x0/0x160) from [] (soc_pcm_trigger+0x90/0xb4) + r8:c058c7b0 r7:c3b8140c r6:c39da560 r5:00000001 r4:c3b81000 +[] (soc_pcm_trigger+0x0/0xb4) from [] (snd_pcm_do_start+0x2c/0x38) + r7:00000000 r6:00000003 r5:c058c7b0 r4:c3b81000 +[] (snd_pcm_do_start+0x0/0x38) from [] (snd_pcm_action_single+0x40/0x6c) +[] (snd_pcm_action_single+0x0/0x6c) from [] (snd_pcm_action_lock_irq+0x7c/0x9c) + r7:00000003 r6:c3b810f0 r5:c3b810f0 r4:c3b81000 +[] (snd_pcm_action_lock_irq+0x0/0x9c) from [] (snd_pcm_common_ioctl1+0x7f8/0xfd0) + r8:c3b7f888 r7:005407b8 r6:c2c991c0 r5:c3b81000 r4:c3b81000 r3:00004142 +[] (snd_pcm_common_ioctl1+0x0/0xfd0) from [] (snd_pcm_playback_ioctl1+0x464/0x488) +[] (snd_pcm_playback_ioctl1+0x0/0x488) from [] (snd_pcm_playback_ioctl+0x34/0x40) + r8:c3b7f888 r7:00004142 r6:00000004 r5:c2c991c0 r4:005407b8 +[] (snd_pcm_playback_ioctl+0x0/0x40) from [] (vfs_ioctl+0x30/0x44) +[] (vfs_ioctl+0x0/0x44) from [] (do_vfs_ioctl+0x55c/0x5c0) +[] (do_vfs_ioctl+0x0/0x5c0) from [] (SyS_ioctl+0x40/0x68) +[] (SyS_ioctl+0x0/0x68) from [] (ret_fast_syscall+0x0/0x44) + r8:c0009544 r7:00000036 r6:bedeaa58 r5:00000000 r4:000000c0 + +Signed-off-by: Michael Grzeschik +Signed-off-by: Vinod Koul +Cc: Jonghwan Choi +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/dma/imx-dma.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/dma/imx-dma.c ++++ b/drivers/dma/imx-dma.c +@@ -822,7 +822,7 @@ static struct dma_async_tx_descriptor *i + kfree(imxdmac->sg_list); + + imxdmac->sg_list = kcalloc(periods + 1, +- sizeof(struct scatterlist), GFP_KERNEL); ++ sizeof(struct scatterlist), GFP_ATOMIC); + if (!imxdmac->sg_list) + return NULL; + diff --git a/queue-3.4/series b/queue-3.4/series index 2006b536c81..3037f5ced7e 100644 --- a/queue-3.4/series +++ b/queue-3.4/series @@ -33,3 +33,7 @@ sparc64-fix-not-sra-ed-o5-in-32-bit-traced-syscall.patch sparc32-fix-exit-flag-passed-from-traced-sys_sigreturn.patch kernel-kmod.c-check-for-null-in-call_usermodehelper_exec.patch usb-serial-option-ignore-card-reader-interface-on-huawei-e1750.patch +dmaengine-imx-dma-fix-lockdep-issue-between-irqhandler-and-tasklet.patch +dmaengine-imx-dma-fix-callback-path-in-tasklet.patch +dmaengine-imx-dma-fix-slow-path-issue-in-prep_dma_cyclic.patch +staging-comedi-ni_65xx-bug-fix-confine-insn_bits-to-one-subdevice.patch diff --git a/queue-3.4/staging-comedi-ni_65xx-bug-fix-confine-insn_bits-to-one-subdevice.patch b/queue-3.4/staging-comedi-ni_65xx-bug-fix-confine-insn_bits-to-one-subdevice.patch new file mode 100644 index 00000000000..a33b0d408e8 --- /dev/null +++ b/queue-3.4/staging-comedi-ni_65xx-bug-fix-confine-insn_bits-to-one-subdevice.patch @@ -0,0 +1,82 @@ +From abbotti@mev.co.uk Thu Oct 10 15:53:41 2013 +From: Ian Abbott +Date: Thu, 10 Oct 2013 10:53:46 +0100 +Subject: staging: comedi: ni_65xx: (bug fix) confine insn_bits to one subdevice +To: stable@vger.kernel.org +Cc: Ian Abbott +Message-ID: <1381398826-12693-1-git-send-email-abbotti@mev.co.uk> + +From: Ian Abbott + +commit 677a31565692d596ef42ea589b53ba289abf4713 upstream. + +The `insn_bits` handler `ni_65xx_dio_insn_bits()` has a `for` loop that +currently writes (optionally) and reads back up to 5 "ports" consisting +of 8 channels each. It reads up to 32 1-bit channels but can only read +and write a whole port at once - it needs to handle up to 5 ports as the +first channel it reads might not be aligned on a port boundary. It +breaks out of the loop early if the next port it handles is beyond the +final port on the card. It also breaks out early on the 5th port in the +loop if the first channel was aligned. Unfortunately, it doesn't check +that the current port it is dealing with belongs to the comedi subdevice +the `insn_bits` handler is acting on. That's a bug. + +Redo the `for` loop to terminate after the final port belonging to the +subdevice, changing the loop variable in the process to simplify things +a bit. The `for` loop could now try and handle more than 5 ports if the +subdevice has more than 40 channels, but the test `if (bitshift >= 32)` +ensures it will break out early after 4 or 5 ports (depending on whether +the first channel is aligned on a port boundary). (`bitshift` will be +between -7 and 7 inclusive on the first iteration, increasing by 8 for +each subsequent operation.) + +Signed-off-by: Ian Abbott +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/staging/comedi/drivers/ni_65xx.c | 26 +++++++++++--------------- + 1 file changed, 11 insertions(+), 15 deletions(-) + +--- a/drivers/staging/comedi/drivers/ni_65xx.c ++++ b/drivers/staging/comedi/drivers/ni_65xx.c +@@ -411,29 +411,25 @@ static int ni_65xx_dio_insn_bits(struct + struct comedi_subdevice *s, + struct comedi_insn *insn, unsigned int *data) + { +- unsigned base_bitfield_channel; +- const unsigned max_ports_per_bitfield = 5; ++ int base_bitfield_channel; + unsigned read_bits = 0; +- unsigned j; ++ int last_port_offset = ni_65xx_port_by_channel(s->n_chan - 1); ++ int port_offset; ++ + if (insn->n != 2) + return -EINVAL; + base_bitfield_channel = CR_CHAN(insn->chanspec); +- for (j = 0; j < max_ports_per_bitfield; ++j) { +- const unsigned port_offset = +- ni_65xx_port_by_channel(base_bitfield_channel) + j; +- const unsigned port = +- sprivate(s)->base_port + port_offset; +- unsigned base_port_channel; ++ for (port_offset = ni_65xx_port_by_channel(base_bitfield_channel); ++ port_offset <= last_port_offset; port_offset++) { ++ unsigned port = sprivate(s)->base_port + port_offset; ++ int base_port_channel = port_offset * ni_65xx_channels_per_port; + unsigned port_mask, port_data, port_read_bits; +- int bitshift; +- if (port >= ni_65xx_total_num_ports(board(dev))) ++ int bitshift = base_port_channel - base_bitfield_channel; ++ ++ if (bitshift >= 32) + break; +- base_port_channel = port_offset * ni_65xx_channels_per_port; + port_mask = data[0]; + port_data = data[1]; +- bitshift = base_port_channel - base_bitfield_channel; +- if (bitshift >= 32 || bitshift <= -32) +- break; + if (bitshift > 0) { + port_mask >>= bitshift; + port_data >>= bitshift;