From: Greg Kroah-Hartman Date: Fri, 19 Jun 2015 18:23:48 +0000 (-0700) Subject: 4.0-stable patches X-Git-Tag: v3.10.81~13 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=39a29eb61997ece3c66d877b580ae901c4c414f0;p=thirdparty%2Fkernel%2Fstable-queue.git 4.0-stable patches added patches: dmaengine-at_xdmac-lock-fixes.patch dmaengine-at_xdmac-rework-slave-configuration-part.patch dmaengine-fix-choppy-sound-because-of-unimplemented-resume.patch dmaengine-pl330-fix-hang-on-dmaengine_terminate_all-on-certain-boards.patch i2c-hix5hd2-fix-modalias-to-make-module-auto-loading-work.patch i2c-s3c2410-fix-oops-in-suspend-callback-for-non-dt.patch --- diff --git a/queue-4.0/dmaengine-at_xdmac-lock-fixes.patch b/queue-4.0/dmaengine-at_xdmac-lock-fixes.patch new file mode 100644 index 00000000000..0405191cbdd --- /dev/null +++ b/queue-4.0/dmaengine-at_xdmac-lock-fixes.patch @@ -0,0 +1,346 @@ +From 4c374fc7ce944024936a6d9804daec85207d9384 Mon Sep 17 00:00:00 2001 +From: Ludovic Desroches +Date: Mon, 8 Jun 2015 10:33:14 +0200 +Subject: dmaengine: at_xdmac: lock fixes + +From: Ludovic Desroches + +commit 4c374fc7ce944024936a6d9804daec85207d9384 upstream. + +Using _bh variant for spin locks causes this kind of warning: +Starting logging: ------------[ cut here ]------------ +WARNING: CPU: 0 PID: 3 at /ssd_drive/linux/kernel/softirq.c:151 +__local_bh_enable_ip+0xe8/0xf4() +Modules linked in: +CPU: 0 PID: 3 Comm: ksoftirqd/0 Not tainted 4.1.0-rc2+ #94 +Hardware name: Atmel SAMA5 +[] (unwind_backtrace) from [] (show_stack+0x10/0x14) +[] (show_stack) from [] +(warn_slowpath_common+0x80/0xac) +[] (warn_slowpath_common) from [] +(warn_slowpath_null+0x1c/0x24) +[] (warn_slowpath_null) from [] +(__local_bh_enable_ip+0xe8/0xf4) +[] (__local_bh_enable_ip) from [] +(at_xdmac_device_terminate_all+0xf4/0x100) +[] (at_xdmac_device_terminate_all) from [] +(atmel_complete_tx_dma+0x34/0xf4) +[] (atmel_complete_tx_dma) from [] +(at_xdmac_tasklet+0x14c/0x1ac) +[] (at_xdmac_tasklet) from [] +(tasklet_action+0x68/0xb4) +[] (tasklet_action) from [] +(__do_softirq+0xfc/0x238) +[] (__do_softirq) from [] (run_ksoftirqd+0x28/0x34) +[] (run_ksoftirqd) from [] +(smpboot_thread_fn+0x138/0x18c) +[] (smpboot_thread_fn) from [] (kthread+0xdc/0xf0) +[] (kthread) from [] (ret_from_fork+0x14/0x34) +---[ end trace b57b14a99c1d8812 ]--- + +It comes from the fact that devices can called some code from the DMA +controller with irq disabled. _bh variant is not intended to be used in +this case since it can enable irqs. Switch to irqsave/irqrestore variant to +avoid this situation. + +Signed-off-by: Ludovic Desroches +Signed-off-by: Vinod Koul +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/dma/at_xdmac.c | 77 ++++++++++++++++++++++++++++--------------------- + 1 file changed, 45 insertions(+), 32 deletions(-) + +--- a/drivers/dma/at_xdmac.c ++++ b/drivers/dma/at_xdmac.c +@@ -414,8 +414,9 @@ static dma_cookie_t at_xdmac_tx_submit(s + struct at_xdmac_desc *desc = txd_to_at_desc(tx); + struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan); + dma_cookie_t cookie; ++ unsigned long irqflags; + +- spin_lock_bh(&atchan->lock); ++ spin_lock_irqsave(&atchan->lock, irqflags); + cookie = dma_cookie_assign(tx); + + dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n", +@@ -424,7 +425,7 @@ static dma_cookie_t at_xdmac_tx_submit(s + if (list_is_singular(&atchan->xfers_list)) + at_xdmac_start_xfer(atchan, desc); + +- spin_unlock_bh(&atchan->lock); ++ spin_unlock_irqrestore(&atchan->lock, irqflags); + return cookie; + } + +@@ -595,6 +596,8 @@ at_xdmac_prep_slave_sg(struct dma_chan * + struct scatterlist *sg; + int i; + unsigned int xfer_size = 0; ++ unsigned long irqflags; ++ struct dma_async_tx_descriptor *ret = NULL; + + if (!sgl) + return NULL; +@@ -610,7 +613,7 @@ at_xdmac_prep_slave_sg(struct dma_chan * + flags); + + /* Protect dma_sconfig field that can be modified by set_slave_conf. */ +- spin_lock_bh(&atchan->lock); ++ spin_lock_irqsave(&atchan->lock, irqflags); + + if (at_xdmac_compute_chan_conf(chan, direction)) + goto spin_unlock; +@@ -624,8 +627,7 @@ at_xdmac_prep_slave_sg(struct dma_chan * + mem = sg_dma_address(sg); + if (unlikely(!len)) { + dev_err(chan2dev(chan), "sg data length is zero\n"); +- spin_unlock_bh(&atchan->lock); +- return NULL; ++ goto spin_unlock; + } + dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n", + __func__, i, len, mem); +@@ -635,8 +637,7 @@ at_xdmac_prep_slave_sg(struct dma_chan * + dev_err(chan2dev(chan), "can't get descriptor\n"); + if (first) + list_splice_init(&first->descs_list, &atchan->free_descs_list); +- spin_unlock_bh(&atchan->lock); +- return NULL; ++ goto spin_unlock; + } + + /* Linked list descriptor setup. */ +@@ -679,13 +680,15 @@ at_xdmac_prep_slave_sg(struct dma_chan * + xfer_size += len; + } + +- spin_unlock_bh(&atchan->lock); + + first->tx_dma_desc.flags = flags; + first->xfer_size = xfer_size; + first->direction = direction; ++ ret = &first->tx_dma_desc; + +- return &first->tx_dma_desc; ++spin_unlock: ++ spin_unlock_irqrestore(&atchan->lock, irqflags); ++ return ret; + } + + static struct dma_async_tx_descriptor * +@@ -698,6 +701,7 @@ at_xdmac_prep_dma_cyclic(struct dma_chan + struct at_xdmac_desc *first = NULL, *prev = NULL; + unsigned int periods = buf_len / period_len; + int i; ++ unsigned long irqflags; + + dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n", + __func__, &buf_addr, buf_len, period_len, +@@ -719,16 +723,16 @@ at_xdmac_prep_dma_cyclic(struct dma_chan + for (i = 0; i < periods; i++) { + struct at_xdmac_desc *desc = NULL; + +- spin_lock_bh(&atchan->lock); ++ spin_lock_irqsave(&atchan->lock, irqflags); + desc = at_xdmac_get_desc(atchan); + if (!desc) { + dev_err(chan2dev(chan), "can't get descriptor\n"); + if (first) + list_splice_init(&first->descs_list, &atchan->free_descs_list); +- spin_unlock_bh(&atchan->lock); ++ spin_unlock_irqrestore(&atchan->lock, irqflags); + return NULL; + } +- spin_unlock_bh(&atchan->lock); ++ spin_unlock_irqrestore(&atchan->lock, irqflags); + dev_dbg(chan2dev(chan), + "%s: desc=0x%p, tx_dma_desc.phys=%pad\n", + __func__, desc, &desc->tx_dma_desc.phys); +@@ -802,6 +806,7 @@ at_xdmac_prep_dma_memcpy(struct dma_chan + | AT_XDMAC_CC_SIF(0) + | AT_XDMAC_CC_MBSIZE_SIXTEEN + | AT_XDMAC_CC_TYPE_MEM_TRAN; ++ unsigned long irqflags; + + dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n", + __func__, &src, &dest, len, flags); +@@ -834,9 +839,9 @@ at_xdmac_prep_dma_memcpy(struct dma_chan + + dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size); + +- spin_lock_bh(&atchan->lock); ++ spin_lock_irqsave(&atchan->lock, irqflags); + desc = at_xdmac_get_desc(atchan); +- spin_unlock_bh(&atchan->lock); ++ spin_unlock_irqrestore(&atchan->lock, irqflags); + if (!desc) { + dev_err(chan2dev(chan), "can't get descriptor\n"); + if (first) +@@ -922,6 +927,7 @@ at_xdmac_tx_status(struct dma_chan *chan + int residue; + u32 cur_nda, mask, value; + u8 dwidth = 0; ++ unsigned long flags; + + ret = dma_cookie_status(chan, cookie, txstate); + if (ret == DMA_COMPLETE) +@@ -930,7 +936,7 @@ at_xdmac_tx_status(struct dma_chan *chan + if (!txstate) + return ret; + +- spin_lock_bh(&atchan->lock); ++ spin_lock_irqsave(&atchan->lock, flags); + + desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node); + +@@ -940,8 +946,7 @@ at_xdmac_tx_status(struct dma_chan *chan + */ + if (!desc->active_xfer) { + dma_set_residue(txstate, desc->xfer_size); +- spin_unlock_bh(&atchan->lock); +- return ret; ++ goto spin_unlock; + } + + residue = desc->xfer_size; +@@ -972,14 +977,14 @@ at_xdmac_tx_status(struct dma_chan *chan + } + residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth; + +- spin_unlock_bh(&atchan->lock); +- + dma_set_residue(txstate, residue); + + dev_dbg(chan2dev(chan), + "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n", + __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue); + ++spin_unlock: ++ spin_unlock_irqrestore(&atchan->lock, flags); + return ret; + } + +@@ -1000,8 +1005,9 @@ static void at_xdmac_remove_xfer(struct + static void at_xdmac_advance_work(struct at_xdmac_chan *atchan) + { + struct at_xdmac_desc *desc; ++ unsigned long flags; + +- spin_lock_bh(&atchan->lock); ++ spin_lock_irqsave(&atchan->lock, flags); + + /* + * If channel is enabled, do nothing, advance_work will be triggered +@@ -1016,7 +1022,7 @@ static void at_xdmac_advance_work(struct + at_xdmac_start_xfer(atchan, desc); + } + +- spin_unlock_bh(&atchan->lock); ++ spin_unlock_irqrestore(&atchan->lock, flags); + } + + static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan) +@@ -1152,12 +1158,13 @@ static int at_xdmac_device_config(struct + { + struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); + int ret; ++ unsigned long flags; + + dev_dbg(chan2dev(chan), "%s\n", __func__); + +- spin_lock_bh(&atchan->lock); ++ spin_lock_irqsave(&atchan->lock, flags); + ret = at_xdmac_set_slave_config(chan, config); +- spin_unlock_bh(&atchan->lock); ++ spin_unlock_irqrestore(&atchan->lock, flags); + + return ret; + } +@@ -1166,18 +1173,19 @@ static int at_xdmac_device_pause(struct + { + struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); + struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); ++ unsigned long flags; + + dev_dbg(chan2dev(chan), "%s\n", __func__); + + if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status)) + return 0; + +- spin_lock_bh(&atchan->lock); ++ spin_lock_irqsave(&atchan->lock, flags); + at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask); + while (at_xdmac_chan_read(atchan, AT_XDMAC_CC) + & (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP)) + cpu_relax(); +- spin_unlock_bh(&atchan->lock); ++ spin_unlock_irqrestore(&atchan->lock, flags); + + return 0; + } +@@ -1186,16 +1194,19 @@ static int at_xdmac_device_resume(struct + { + struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); + struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); ++ unsigned long flags; + + dev_dbg(chan2dev(chan), "%s\n", __func__); + +- spin_lock_bh(&atchan->lock); +- if (!at_xdmac_chan_is_paused(atchan)) ++ spin_lock_irqsave(&atchan->lock, flags); ++ if (!at_xdmac_chan_is_paused(atchan)) { ++ spin_unlock_irqrestore(&atchan->lock, flags); + return 0; ++ } + + at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask); + clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); +- spin_unlock_bh(&atchan->lock); ++ spin_unlock_irqrestore(&atchan->lock, flags); + + return 0; + } +@@ -1205,10 +1216,11 @@ static int at_xdmac_device_terminate_all + struct at_xdmac_desc *desc, *_desc; + struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); + struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); ++ unsigned long flags; + + dev_dbg(chan2dev(chan), "%s\n", __func__); + +- spin_lock_bh(&atchan->lock); ++ spin_lock_irqsave(&atchan->lock, flags); + at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); + while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask) + cpu_relax(); +@@ -1218,7 +1230,7 @@ static int at_xdmac_device_terminate_all + at_xdmac_remove_xfer(atchan, desc); + + clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status); +- spin_unlock_bh(&atchan->lock); ++ spin_unlock_irqrestore(&atchan->lock, flags); + + return 0; + } +@@ -1228,8 +1240,9 @@ static int at_xdmac_alloc_chan_resources + struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); + struct at_xdmac_desc *desc; + int i; ++ unsigned long flags; + +- spin_lock_bh(&atchan->lock); ++ spin_lock_irqsave(&atchan->lock, flags); + + if (at_xdmac_chan_is_enabled(atchan)) { + dev_err(chan2dev(chan), +@@ -1260,7 +1273,7 @@ static int at_xdmac_alloc_chan_resources + dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i); + + spin_unlock: +- spin_unlock_bh(&atchan->lock); ++ spin_unlock_irqrestore(&atchan->lock, flags); + return i; + } + diff --git a/queue-4.0/dmaengine-at_xdmac-rework-slave-configuration-part.patch b/queue-4.0/dmaengine-at_xdmac-rework-slave-configuration-part.patch new file mode 100644 index 00000000000..8d239d53020 --- /dev/null +++ b/queue-4.0/dmaengine-at_xdmac-rework-slave-configuration-part.patch @@ -0,0 +1,256 @@ +From 765c37d876698268eea8b820081ac8fc9d0fc8bc Mon Sep 17 00:00:00 2001 +From: Ludovic Desroches +Date: Mon, 8 Jun 2015 10:33:15 +0200 +Subject: dmaengine: at_xdmac: rework slave configuration part + +From: Ludovic Desroches + +commit 765c37d876698268eea8b820081ac8fc9d0fc8bc upstream. + +Rework slave configuration part in order to more report wrong errors +about the configuration. +Only maxburst and addr width values are checked when doing the slave +configuration. The validity of the channel configuration is done at +prepare time. + +Signed-off-by: Ludovic Desroches +Signed-off-by: Vinod Koul +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/dma/at_xdmac.c | 158 ++++++++++++++++++++++++++++++------------------- + 1 file changed, 97 insertions(+), 61 deletions(-) + +--- a/drivers/dma/at_xdmac.c ++++ b/drivers/dma/at_xdmac.c +@@ -174,6 +174,8 @@ + #define AT_XDMAC_MBR_UBC_NDV3 (0x3 << 27) /* Next Descriptor View 3 */ + + #define AT_XDMAC_MAX_CHAN 0x20 ++#define AT_XDMAC_MAX_CSIZE 16 /* 16 data */ ++#define AT_XDMAC_MAX_DWIDTH 8 /* 64 bits */ + + #define AT_XDMAC_DMA_BUSWIDTHS\ + (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\ +@@ -192,20 +194,17 @@ struct at_xdmac_chan { + struct dma_chan chan; + void __iomem *ch_regs; + u32 mask; /* Channel Mask */ +- u32 cfg[2]; /* Channel Configuration Register */ +- #define AT_XDMAC_DEV_TO_MEM_CFG 0 /* Predifined dev to mem channel conf */ +- #define AT_XDMAC_MEM_TO_DEV_CFG 1 /* Predifined mem to dev channel conf */ ++ u32 cfg; /* Channel Configuration Register */ + u8 perid; /* Peripheral ID */ + u8 perif; /* Peripheral Interface */ + u8 memif; /* Memory Interface */ +- u32 per_src_addr; +- u32 per_dst_addr; + u32 save_cc; + u32 save_cim; + u32 save_cnda; + u32 save_cndc; + unsigned long status; + struct tasklet_struct tasklet; ++ struct dma_slave_config sconfig; + + spinlock_t lock; + +@@ -494,61 +493,94 @@ static struct dma_chan *at_xdmac_xlate(s + return chan; + } + ++static int at_xdmac_compute_chan_conf(struct dma_chan *chan, ++ enum dma_transfer_direction direction) ++{ ++ struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); ++ int csize, dwidth; ++ ++ if (direction == DMA_DEV_TO_MEM) { ++ atchan->cfg = ++ AT91_XDMAC_DT_PERID(atchan->perid) ++ | AT_XDMAC_CC_DAM_INCREMENTED_AM ++ | AT_XDMAC_CC_SAM_FIXED_AM ++ | AT_XDMAC_CC_DIF(atchan->memif) ++ | AT_XDMAC_CC_SIF(atchan->perif) ++ | AT_XDMAC_CC_SWREQ_HWR_CONNECTED ++ | AT_XDMAC_CC_DSYNC_PER2MEM ++ | AT_XDMAC_CC_MBSIZE_SIXTEEN ++ | AT_XDMAC_CC_TYPE_PER_TRAN; ++ csize = ffs(atchan->sconfig.src_maxburst) - 1; ++ if (csize < 0) { ++ dev_err(chan2dev(chan), "invalid src maxburst value\n"); ++ return -EINVAL; ++ } ++ atchan->cfg |= AT_XDMAC_CC_CSIZE(csize); ++ dwidth = ffs(atchan->sconfig.src_addr_width) - 1; ++ if (dwidth < 0) { ++ dev_err(chan2dev(chan), "invalid src addr width value\n"); ++ return -EINVAL; ++ } ++ atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth); ++ } else if (direction == DMA_MEM_TO_DEV) { ++ atchan->cfg = ++ AT91_XDMAC_DT_PERID(atchan->perid) ++ | AT_XDMAC_CC_DAM_FIXED_AM ++ | AT_XDMAC_CC_SAM_INCREMENTED_AM ++ | AT_XDMAC_CC_DIF(atchan->perif) ++ | AT_XDMAC_CC_SIF(atchan->memif) ++ | AT_XDMAC_CC_SWREQ_HWR_CONNECTED ++ | AT_XDMAC_CC_DSYNC_MEM2PER ++ | AT_XDMAC_CC_MBSIZE_SIXTEEN ++ | AT_XDMAC_CC_TYPE_PER_TRAN; ++ csize = ffs(atchan->sconfig.dst_maxburst) - 1; ++ if (csize < 0) { ++ dev_err(chan2dev(chan), "invalid src maxburst value\n"); ++ return -EINVAL; ++ } ++ atchan->cfg |= AT_XDMAC_CC_CSIZE(csize); ++ dwidth = ffs(atchan->sconfig.dst_addr_width) - 1; ++ if (dwidth < 0) { ++ dev_err(chan2dev(chan), "invalid dst addr width value\n"); ++ return -EINVAL; ++ } ++ atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth); ++ } ++ ++ dev_dbg(chan2dev(chan), "%s: cfg=0x%08x\n", __func__, atchan->cfg); ++ ++ return 0; ++} ++ ++/* ++ * Only check that maxburst and addr width values are supported by the ++ * the controller but not that the configuration is good to perform the ++ * transfer since we don't know the direction at this stage. ++ */ ++static int at_xdmac_check_slave_config(struct dma_slave_config *sconfig) ++{ ++ if ((sconfig->src_maxburst > AT_XDMAC_MAX_CSIZE) ++ || (sconfig->dst_maxburst > AT_XDMAC_MAX_CSIZE)) ++ return -EINVAL; ++ ++ if ((sconfig->src_addr_width > AT_XDMAC_MAX_DWIDTH) ++ || (sconfig->dst_addr_width > AT_XDMAC_MAX_DWIDTH)) ++ return -EINVAL; ++ ++ return 0; ++} ++ + static int at_xdmac_set_slave_config(struct dma_chan *chan, + struct dma_slave_config *sconfig) + { + struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); +- u8 dwidth; +- int csize; + +- atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] = +- AT91_XDMAC_DT_PERID(atchan->perid) +- | AT_XDMAC_CC_DAM_INCREMENTED_AM +- | AT_XDMAC_CC_SAM_FIXED_AM +- | AT_XDMAC_CC_DIF(atchan->memif) +- | AT_XDMAC_CC_SIF(atchan->perif) +- | AT_XDMAC_CC_SWREQ_HWR_CONNECTED +- | AT_XDMAC_CC_DSYNC_PER2MEM +- | AT_XDMAC_CC_MBSIZE_SIXTEEN +- | AT_XDMAC_CC_TYPE_PER_TRAN; +- csize = at_xdmac_csize(sconfig->src_maxburst); +- if (csize < 0) { +- dev_err(chan2dev(chan), "invalid src maxburst value\n"); +- return -EINVAL; +- } +- atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_CSIZE(csize); +- dwidth = ffs(sconfig->src_addr_width) - 1; +- atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth); +- +- +- atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] = +- AT91_XDMAC_DT_PERID(atchan->perid) +- | AT_XDMAC_CC_DAM_FIXED_AM +- | AT_XDMAC_CC_SAM_INCREMENTED_AM +- | AT_XDMAC_CC_DIF(atchan->perif) +- | AT_XDMAC_CC_SIF(atchan->memif) +- | AT_XDMAC_CC_SWREQ_HWR_CONNECTED +- | AT_XDMAC_CC_DSYNC_MEM2PER +- | AT_XDMAC_CC_MBSIZE_SIXTEEN +- | AT_XDMAC_CC_TYPE_PER_TRAN; +- csize = at_xdmac_csize(sconfig->dst_maxburst); +- if (csize < 0) { +- dev_err(chan2dev(chan), "invalid src maxburst value\n"); ++ if (at_xdmac_check_slave_config(sconfig)) { ++ dev_err(chan2dev(chan), "invalid slave configuration\n"); + return -EINVAL; + } +- atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_CSIZE(csize); +- dwidth = ffs(sconfig->dst_addr_width) - 1; +- atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth); +- +- /* Src and dst addr are needed to configure the link list descriptor. */ +- atchan->per_src_addr = sconfig->src_addr; +- atchan->per_dst_addr = sconfig->dst_addr; +- +- dev_dbg(chan2dev(chan), +- "%s: cfg[dev2mem]=0x%08x, cfg[mem2dev]=0x%08x, per_src_addr=0x%08x, per_dst_addr=0x%08x\n", +- __func__, atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG], +- atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG], +- atchan->per_src_addr, atchan->per_dst_addr); ++ ++ memcpy(&atchan->sconfig, sconfig, sizeof(atchan->sconfig)); + + return 0; + } +@@ -580,6 +612,9 @@ at_xdmac_prep_slave_sg(struct dma_chan * + /* Protect dma_sconfig field that can be modified by set_slave_conf. */ + spin_lock_bh(&atchan->lock); + ++ if (at_xdmac_compute_chan_conf(chan, direction)) ++ goto spin_unlock; ++ + /* Prepare descriptors. */ + for_each_sg(sgl, sg, sg_len, i) { + struct at_xdmac_desc *desc = NULL; +@@ -606,14 +641,13 @@ at_xdmac_prep_slave_sg(struct dma_chan * + + /* Linked list descriptor setup. */ + if (direction == DMA_DEV_TO_MEM) { +- desc->lld.mbr_sa = atchan->per_src_addr; ++ desc->lld.mbr_sa = atchan->sconfig.src_addr; + desc->lld.mbr_da = mem; +- desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG]; + } else { + desc->lld.mbr_sa = mem; +- desc->lld.mbr_da = atchan->per_dst_addr; +- desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG]; ++ desc->lld.mbr_da = atchan->sconfig.dst_addr; + } ++ desc->lld.mbr_cfg = atchan->cfg; + dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg); + fixed_dwidth = IS_ALIGNED(len, 1 << dwidth) + ? at_xdmac_get_dwidth(desc->lld.mbr_cfg) +@@ -679,6 +713,9 @@ at_xdmac_prep_dma_cyclic(struct dma_chan + return NULL; + } + ++ if (at_xdmac_compute_chan_conf(chan, direction)) ++ return NULL; ++ + for (i = 0; i < periods; i++) { + struct at_xdmac_desc *desc = NULL; + +@@ -697,14 +734,13 @@ at_xdmac_prep_dma_cyclic(struct dma_chan + __func__, desc, &desc->tx_dma_desc.phys); + + if (direction == DMA_DEV_TO_MEM) { +- desc->lld.mbr_sa = atchan->per_src_addr; ++ desc->lld.mbr_sa = atchan->sconfig.src_addr; + desc->lld.mbr_da = buf_addr + i * period_len; +- desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG]; + } else { + desc->lld.mbr_sa = buf_addr + i * period_len; +- desc->lld.mbr_da = atchan->per_dst_addr; +- desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG]; ++ desc->lld.mbr_da = atchan->sconfig.dst_addr; + } ++ desc->lld.mbr_cfg = atchan->cfg; + desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1 + | AT_XDMAC_MBR_UBC_NDEN + | AT_XDMAC_MBR_UBC_NSEN diff --git a/queue-4.0/dmaengine-fix-choppy-sound-because-of-unimplemented-resume.patch b/queue-4.0/dmaengine-fix-choppy-sound-because-of-unimplemented-resume.patch new file mode 100644 index 00000000000..c2349e354cd --- /dev/null +++ b/queue-4.0/dmaengine-fix-choppy-sound-because-of-unimplemented-resume.patch @@ -0,0 +1,48 @@ +From 88d04643c66052a1cf92a6fd5f92dff0f7757f61 Mon Sep 17 00:00:00 2001 +From: Krzysztof Kozlowski +Date: Wed, 10 Jun 2015 17:17:07 +0900 +Subject: dmaengine: Fix choppy sound because of unimplemented resume + +From: Krzysztof Kozlowski + +commit 88d04643c66052a1cf92a6fd5f92dff0f7757f61 upstream. + +Some drivers implement only pause operation (no resuming). Example is +pl330 where pause is needed for getting residuum. pl330 does not support +resume operation, transfer must be stopped after pause. + +However for slaves this is exposed always as "pause and resume" which +introduces subtle errors on Odroid U3 board (Exynos4412 with pl330). +After adding pause function to pl330 driver the audio playback +(utilizing DMA) gets choppy after some time (approximately 24 hours). + +Fix this by exposing "cmd_pause" if and only if pause and resume are +implemented. + +Signed-off-by: Krzysztof Kozlowski +Reported-by: gabriel@unseen.is +Reported-by: Marek Szyprowski +Fixes: 88987d2c7534 ("dmaengine: pl330: add DMA_PAUSE feature") +Acked-by: Maxime Ripard +Signed-off-by: Vinod Koul +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/dma/dmaengine.c | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) + +--- a/drivers/dma/dmaengine.c ++++ b/drivers/dma/dmaengine.c +@@ -505,7 +505,11 @@ int dma_get_slave_caps(struct dma_chan * + caps->directions = device->directions; + caps->residue_granularity = device->residue_granularity; + +- caps->cmd_pause = !!device->device_pause; ++ /* ++ * Some devices implement only pause (e.g. to get residuum) but no ++ * resume. However cmd_pause is advertised as pause AND resume. ++ */ ++ caps->cmd_pause = !!(device->device_pause && device->device_resume); + caps->cmd_terminate = !!device->device_terminate_all; + + return 0; diff --git a/queue-4.0/dmaengine-pl330-fix-hang-on-dmaengine_terminate_all-on-certain-boards.patch b/queue-4.0/dmaengine-pl330-fix-hang-on-dmaengine_terminate_all-on-certain-boards.patch new file mode 100644 index 00000000000..7ed9c0b33cd --- /dev/null +++ b/queue-4.0/dmaengine-pl330-fix-hang-on-dmaengine_terminate_all-on-certain-boards.patch @@ -0,0 +1,55 @@ +From 81cc6edc08705ac0146fe6ac14a0982a31ce6f3d Mon Sep 17 00:00:00 2001 +From: Krzysztof Kozlowski +Date: Thu, 21 May 2015 09:34:09 +0900 +Subject: dmaengine: pl330: Fix hang on dmaengine_terminate_all on certain boards + +From: Krzysztof Kozlowski + +commit 81cc6edc08705ac0146fe6ac14a0982a31ce6f3d upstream. + +The pl330 device could hang infinitely on certain boards when DMA +channels are terminated. + +It was caused by lack of runtime resume when executing +pl330_terminate_all() which calls the _stop() function. _stop() accesses +device register and can loop infinitely while checking for device state. + +The hang was confirmed by Dinh Nguyen on Altera SOCFPGA Cyclone V +board during boot. It can be also triggered with: + +$ echo 1 > /sys/module/dmatest/parameters/iterations +$ echo dma1chan0 > /sys/module/dmatest/parameters/channel +$ echo 1 > /sys/module/dmatest/parameters/run +$ sleep 1 +$ cat /sys/module/dmatest/parameters/run + +Reported-by: Dinh Nguyen +Signed-off-by: Krzysztof Kozlowski +Fixes: ae43b3289186 ("ARM: 8202/1: dmaengine: pl330: Add runtime Power Management support v12") +Tested-by: Dinh Nguyen +Signed-off-by: Vinod Koul +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/dma/pl330.c | 3 +++ + 1 file changed, 3 insertions(+) + +--- a/drivers/dma/pl330.c ++++ b/drivers/dma/pl330.c +@@ -2127,6 +2127,7 @@ static int pl330_terminate_all(struct dm + struct pl330_dmac *pl330 = pch->dmac; + LIST_HEAD(list); + ++ pm_runtime_get_sync(pl330->ddma.dev); + spin_lock_irqsave(&pch->lock, flags); + spin_lock(&pl330->lock); + _stop(pch->thread); +@@ -2151,6 +2152,8 @@ static int pl330_terminate_all(struct dm + list_splice_tail_init(&pch->work_list, &pl330->desc_pool); + list_splice_tail_init(&pch->completed_list, &pl330->desc_pool); + spin_unlock_irqrestore(&pch->lock, flags); ++ pm_runtime_mark_last_busy(pl330->ddma.dev); ++ pm_runtime_put_autosuspend(pl330->ddma.dev); + + return 0; + } diff --git a/queue-4.0/i2c-hix5hd2-fix-modalias-to-make-module-auto-loading-work.patch b/queue-4.0/i2c-hix5hd2-fix-modalias-to-make-module-auto-loading-work.patch new file mode 100644 index 00000000000..8c8fc1621b7 --- /dev/null +++ b/queue-4.0/i2c-hix5hd2-fix-modalias-to-make-module-auto-loading-work.patch @@ -0,0 +1,29 @@ +From 3e59ae4aa28237ced95413fbd46004b57c4da095 Mon Sep 17 00:00:00 2001 +From: Axel Lin +Date: Fri, 8 May 2015 08:50:11 +0800 +Subject: i2c: hix5hd2: Fix modalias to make module auto-loading work + +From: Axel Lin + +commit 3e59ae4aa28237ced95413fbd46004b57c4da095 upstream. + +Make the modalias match driver name, this is required to make module +auto-loading work. + +Signed-off-by: Axel Lin +Acked-by: Zhangfei Gao +Signed-off-by: Wolfram Sang +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/i2c/busses/i2c-hix5hd2.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/i2c/busses/i2c-hix5hd2.c ++++ b/drivers/i2c/busses/i2c-hix5hd2.c +@@ -554,4 +554,4 @@ module_platform_driver(hix5hd2_i2c_drive + MODULE_DESCRIPTION("Hix5hd2 I2C Bus driver"); + MODULE_AUTHOR("Wei Yan "); + MODULE_LICENSE("GPL"); +-MODULE_ALIAS("platform:i2c-hix5hd2"); ++MODULE_ALIAS("platform:hix5hd2-i2c"); diff --git a/queue-4.0/i2c-s3c2410-fix-oops-in-suspend-callback-for-non-dt.patch b/queue-4.0/i2c-s3c2410-fix-oops-in-suspend-callback-for-non-dt.patch new file mode 100644 index 00000000000..8ed2a121b65 --- /dev/null +++ b/queue-4.0/i2c-s3c2410-fix-oops-in-suspend-callback-for-non-dt.patch @@ -0,0 +1,33 @@ +From 8d487a43c36b54a029d74ad3b0a6a9d1253e728a Mon Sep 17 00:00:00 2001 +From: Vasily Khoruzhick +Date: Sun, 3 May 2015 21:13:10 +0300 +Subject: i2c: s3c2410: fix oops in suspend callback for non-dt + platforms + +From: Vasily Khoruzhick + +commit 8d487a43c36b54a029d74ad3b0a6a9d1253e728a upstream. + +Initialize sysreg by default, otherwise driver will crash in suspend +callback when not using DT. + +Signed-off-by: Vasily Khoruzhick +Reviewed-by: Krzysztof Kozlowski +Signed-off-by: Wolfram Sang +Fixes: a7750c3ef01223 ("i2c: s3c2410: Handle i2c sys_cfg register in i2c driver") +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/i2c/busses/i2c-s3c2410.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/i2c/busses/i2c-s3c2410.c ++++ b/drivers/i2c/busses/i2c-s3c2410.c +@@ -1143,6 +1143,7 @@ static int s3c24xx_i2c_probe(struct plat + return -ENOMEM; + + i2c->quirks = s3c24xx_get_device_quirks(pdev); ++ i2c->sysreg = ERR_PTR(-ENOENT); + if (pdata) + memcpy(i2c->pdata, pdata, sizeof(*pdata)); + else diff --git a/queue-4.0/series b/queue-4.0/series index c7c27de0d5c..cc052cffe2a 100644 --- a/queue-4.0/series +++ b/queue-4.0/series @@ -43,3 +43,9 @@ alsa-usb-audio-don-t-try-to-get-outlaw-rr2150-sample-rate.patch alsa-usb-audio-add-maya44-usb-mixer-control-names.patch alsa-usb-audio-fix-missing-input-volume-controls-in-maya44-usb.patch alsa-usb-audio-add-native-dsd-support-for-jlsounds-i2soverusb.patch +dmaengine-pl330-fix-hang-on-dmaengine_terminate_all-on-certain-boards.patch +dmaengine-fix-choppy-sound-because-of-unimplemented-resume.patch +dmaengine-at_xdmac-rework-slave-configuration-part.patch +dmaengine-at_xdmac-lock-fixes.patch +i2c-hix5hd2-fix-modalias-to-make-module-auto-loading-work.patch +i2c-s3c2410-fix-oops-in-suspend-callback-for-non-dt.patch