--- /dev/null
+From 87243deb88671f70def4c52dfa7ca7830707bd31 Mon Sep 17 00:00:00 2001
+From: Seth Forshee <seth.forshee@canonical.com>
+Date: Wed, 9 Mar 2016 09:18:07 -0600
+Subject: debugfs: Make automount point inodes permanently empty
+
+From: Seth Forshee <seth.forshee@canonical.com>
+
+commit 87243deb88671f70def4c52dfa7ca7830707bd31 upstream.
+
+Starting with 4.1 the tracing subsystem has its own filesystem
+which is automounted in the tracing subdirectory of debugfs.
+Prior to this debugfs could be bind mounted in a cloned mount
+namespace, but if tracefs has been mounted under debugfs this
+now fails because there is a locked child mount. This creates
+a regression for container software which bind mounts debugfs
+to satisfy the assumption of some userspace software.
+
+In other pseudo filesystems such as proc and sysfs we're already
+creating mountpoints like this in such a way that no dirents can
+be created in the directories, allowing them to be exceptions to
+some MNT_LOCKED tests. In fact we're already do this for the
+tracefs mountpoint in sysfs.
+
+Do the same in debugfs_create_automount(), since the intention
+here is clearly to create a mountpoint. This fixes the regression,
+as locked child mounts on permanently empty directories do not
+cause a bind mount to fail.
+
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Acked-by: Serge Hallyn <serge.hallyn@canonical.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/debugfs/inode.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/debugfs/inode.c
++++ b/fs/debugfs/inode.c
+@@ -457,7 +457,7 @@ struct dentry *debugfs_create_automount(
+ if (unlikely(!inode))
+ return failed_creating(dentry);
+
+- inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
++ make_empty_dir_inode(inode);
+ inode->i_flags |= S_AUTOMOUNT;
+ inode->i_private = data;
+ dentry->d_fsdata = (void *)f;
--- /dev/null
+From 6545b60baaf880b0cd29a5e89dbe745a06027e89 Mon Sep 17 00:00:00 2001
+From: Ahmed Samy <f.fallen45@gmail.com>
+Date: Sun, 17 Apr 2016 05:37:09 +0000
+Subject: dm cache metadata: fix cmd_read_lock() acquiring write lock
+
+From: Ahmed Samy <f.fallen45@gmail.com>
+
+commit 6545b60baaf880b0cd29a5e89dbe745a06027e89 upstream.
+
+Commit 9567366fefdd ("dm cache metadata: fix READ_LOCK macros and
+cleanup WRITE_LOCK macros") uses down_write() instead of down_read() in
+cmd_read_lock(), yet up_read() is used to release the lock in
+READ_UNLOCK(). Fix it.
+
+Fixes: 9567366fefdd ("dm cache metadata: fix READ_LOCK macros and cleanup WRITE_LOCK macros")
+Signed-off-by: Ahmed Samy <f.fallen45@gmail.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-cache-metadata.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/md/dm-cache-metadata.c
++++ b/drivers/md/dm-cache-metadata.c
+@@ -894,9 +894,9 @@ static bool cmd_write_lock(struct dm_cac
+
+ static bool cmd_read_lock(struct dm_cache_metadata *cmd)
+ {
+- down_write(&cmd->root_lock);
++ down_read(&cmd->root_lock);
+ if (cmd->fail_io) {
+- up_write(&cmd->root_lock);
++ up_read(&cmd->root_lock);
+ return false;
+ }
+ return true;
--- /dev/null
+From 9567366fefddeaea4ed1d713270535d93a3b3c76 Mon Sep 17 00:00:00 2001
+From: Mike Snitzer <snitzer@redhat.com>
+Date: Tue, 12 Apr 2016 12:14:46 -0400
+Subject: dm cache metadata: fix READ_LOCK macros and cleanup WRITE_LOCK macros
+
+From: Mike Snitzer <snitzer@redhat.com>
+
+commit 9567366fefddeaea4ed1d713270535d93a3b3c76 upstream.
+
+The READ_LOCK macro was incorrectly returning -EINVAL if
+dm_bm_is_read_only() was true -- it will always be true once the cache
+metadata transitions to read-only by dm_cache_metadata_set_read_only().
+
+Wrap READ_LOCK and WRITE_LOCK multi-statement macros in do {} while(0).
+Also, all accesses of the 'cmd' argument passed to these related macros
+are now encapsulated in parenthesis.
+
+A follow-up patch can be developed to eliminate the use of macros in
+favor of pure C code. Avoiding that now given that this needs to apply
+to stable@.
+
+Reported-by: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Fixes: d14fcf3dd79 ("dm cache: make sure every metadata function checks fail_io")
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-cache-metadata.c | 64 +++++++++++++++++++++++++----------------
+ 1 file changed, 40 insertions(+), 24 deletions(-)
+
+--- a/drivers/md/dm-cache-metadata.c
++++ b/drivers/md/dm-cache-metadata.c
+@@ -867,39 +867,55 @@ static int blocks_are_unmapped_or_clean(
+ return 0;
+ }
+
+-#define WRITE_LOCK(cmd) \
+- down_write(&cmd->root_lock); \
+- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
+- up_write(&cmd->root_lock); \
+- return -EINVAL; \
++static bool cmd_write_lock(struct dm_cache_metadata *cmd)
++{
++ down_write(&cmd->root_lock);
++ if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) {
++ up_write(&cmd->root_lock);
++ return false;
+ }
++ return true;
++}
+
+-#define WRITE_LOCK_VOID(cmd) \
+- down_write(&cmd->root_lock); \
+- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
+- up_write(&cmd->root_lock); \
+- return; \
+- }
++#define WRITE_LOCK(cmd) \
++ do { \
++ if (!cmd_write_lock((cmd))) \
++ return -EINVAL; \
++ } while(0)
++
++#define WRITE_LOCK_VOID(cmd) \
++ do { \
++ if (!cmd_write_lock((cmd))) \
++ return; \
++ } while(0)
+
+ #define WRITE_UNLOCK(cmd) \
+- up_write(&cmd->root_lock)
++ up_write(&(cmd)->root_lock)
+
+-#define READ_LOCK(cmd) \
+- down_read(&cmd->root_lock); \
+- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
+- up_read(&cmd->root_lock); \
+- return -EINVAL; \
++static bool cmd_read_lock(struct dm_cache_metadata *cmd)
++{
++ down_write(&cmd->root_lock);
++ if (cmd->fail_io) {
++ up_write(&cmd->root_lock);
++ return false;
+ }
++ return true;
++}
+
+-#define READ_LOCK_VOID(cmd) \
+- down_read(&cmd->root_lock); \
+- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
+- up_read(&cmd->root_lock); \
+- return; \
+- }
++#define READ_LOCK(cmd) \
++ do { \
++ if (!cmd_read_lock((cmd))) \
++ return -EINVAL; \
++ } while(0)
++
++#define READ_LOCK_VOID(cmd) \
++ do { \
++ if (!cmd_read_lock((cmd))) \
++ return; \
++ } while(0)
+
+ #define READ_UNLOCK(cmd) \
+- up_read(&cmd->root_lock)
++ up_read(&(cmd)->root_lock)
+
+ int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
+ {
--- /dev/null
+From 3fe6409c23e2bee4b2b1b6d671d2da8daa15271c Mon Sep 17 00:00:00 2001
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Date: Fri, 8 Apr 2016 16:22:17 +0300
+Subject: dmaengine: dw: fix master selection
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+commit 3fe6409c23e2bee4b2b1b6d671d2da8daa15271c upstream.
+
+The commit 895005202987 ("dmaengine: dw: apply both HS interfaces and remove
+slave_id usage") cleaned up the code to avoid usage of depricated slave_id
+member of generic slave configuration.
+
+Meanwhile it broke the master selection by removing important call to
+dwc_set_masters() in ->device_alloc_chan_resources() which copied masters from
+custom slave configuration to the internal channel structure.
+
+Everything works until now since there is no customized connection of
+DesignWare DMA IP to the bus, i.e. one bus and one or more masters are in use.
+The configurations where 2 masters are connected to the different masters are
+not working anymore. We are expecting one user of such configuration and need
+to select masters properly. Besides that it is obviously a performance
+regression since only one master is in use in multi-master configuration.
+
+Select masters in accordance with what user asked for. Keep this patch in a form
+more suitable for back porting.
+
+We are safe to take necessary data in ->device_alloc_chan_resources() because
+we don't support generic slave configuration embedded into custom one, and thus
+the only way to provide such is to use the parameter to a filter function which
+is called exactly before channel resource allocation.
+
+While here, replase BUG_ON to less noisy dev_warn() and prevent channel
+allocation in case of error.
+
+Fixes: 895005202987 ("dmaengine: dw: apply both HS interfaces and remove slave_id usage")
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Vinod Koul <vinod.koul@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma/dw/core.c | 34 +++++++++++++++++++---------------
+ 1 file changed, 19 insertions(+), 15 deletions(-)
+
+--- a/drivers/dma/dw/core.c
++++ b/drivers/dma/dw/core.c
+@@ -130,26 +130,14 @@ static void dwc_desc_put(struct dw_dma_c
+ static void dwc_initialize(struct dw_dma_chan *dwc)
+ {
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+- struct dw_dma_slave *dws = dwc->chan.private;
+ u32 cfghi = DWC_CFGH_FIFO_MODE;
+ u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
+
+ if (dwc->initialized == true)
+ return;
+
+- if (dws) {
+- /*
+- * We need controller-specific data to set up slave
+- * transfers.
+- */
+- BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
+-
+- cfghi |= DWC_CFGH_DST_PER(dws->dst_id);
+- cfghi |= DWC_CFGH_SRC_PER(dws->src_id);
+- } else {
+- cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
+- cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
+- }
++ cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
++ cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
+
+ channel_writel(dwc, CFG_LO, cfglo);
+ channel_writel(dwc, CFG_HI, cfghi);
+@@ -936,7 +924,7 @@ bool dw_dma_filter(struct dma_chan *chan
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct dw_dma_slave *dws = param;
+
+- if (!dws || dws->dma_dev != chan->device->dev)
++ if (dws->dma_dev != chan->device->dev)
+ return false;
+
+ /* We have to copy data since dws can be temporary storage */
+@@ -1160,6 +1148,14 @@ static int dwc_alloc_chan_resources(stru
+ * doesn't mean what you think it means), and status writeback.
+ */
+
++ /*
++ * We need controller-specific data to set up slave transfers.
++ */
++ if (chan->private && !dw_dma_filter(chan, chan->private)) {
++ dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
++ return -EINVAL;
++ }
++
+ /* Enable controller here if needed */
+ if (!dw->in_use)
+ dw_dma_on(dw);
+@@ -1221,6 +1217,14 @@ static void dwc_free_chan_resources(stru
+ spin_lock_irqsave(&dwc->lock, flags);
+ list_splice_init(&dwc->free_list, &list);
+ dwc->descs_allocated = 0;
++
++ /* Clear custom channel configuration */
++ dwc->src_id = 0;
++ dwc->dst_id = 0;
++
++ dwc->src_master = 0;
++ dwc->dst_master = 0;
++
+ dwc->initialized = false;
+
+ /* Disable interrupts */
--- /dev/null
+From 4f4bc0abff79dc9d7ccbd3143adbf8ad1f4fe6ab Mon Sep 17 00:00:00 2001
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Date: Fri, 18 Mar 2016 14:26:32 +0200
+Subject: dmaengine: hsu: correct use of channel status register
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+commit 4f4bc0abff79dc9d7ccbd3143adbf8ad1f4fe6ab upstream.
+
+There is a typo in documentation regarding to descriptor empty bit (DESCE)
+which is set to 1 when descriptor is empty. Thus, status register at the end of
+a transfer usually returns all DESCE bits set and thus it will never be zero.
+
+Moreover, there are 2 bits (CDESC) that encode current descriptor, on which
+interrupt has been asserted. In case when we have few descriptors programmed we
+might have non-zero value.
+
+Remove DESCE and CDESC bits from DMA channel status register (HSU_CH_SR) when
+reading it.
+
+Fixes: 2b49e0c56741 ("dmaengine: append hsu DMA driver")
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Vinod Koul <vinod.koul@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma/hsu/hsu.c | 2 +-
+ drivers/dma/hsu/hsu.h | 3 +++
+ 2 files changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/dma/hsu/hsu.c
++++ b/drivers/dma/hsu/hsu.c
+@@ -135,7 +135,7 @@ static u32 hsu_dma_chan_get_sr(struct hs
+ sr = hsu_chan_readl(hsuc, HSU_CH_SR);
+ spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
+
+- return sr;
++ return sr & ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY);
+ }
+
+ irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr)
+--- a/drivers/dma/hsu/hsu.h
++++ b/drivers/dma/hsu/hsu.h
+@@ -41,6 +41,9 @@
+ #define HSU_CH_SR_DESCTO(x) BIT(8 + (x))
+ #define HSU_CH_SR_DESCTO_ANY (BIT(11) | BIT(10) | BIT(9) | BIT(8))
+ #define HSU_CH_SR_CHE BIT(15)
++#define HSU_CH_SR_DESCE(x) BIT(16 + (x))
++#define HSU_CH_SR_DESCE_ANY (BIT(19) | BIT(18) | BIT(17) | BIT(16))
++#define HSU_CH_SR_CDESC_ANY (BIT(31) | BIT(30))
+
+ /* Bits in HSU_CH_CR */
+ #define HSU_CH_CR_CHA BIT(0)
--- /dev/null
+From 6bab1c6afdca0371cfa957079b36b78d12dd2cf5 Mon Sep 17 00:00:00 2001
+From: Robert Jarzmik <robert.jarzmik@free.fr>
+Date: Mon, 15 Feb 2016 21:57:48 +0100
+Subject: dmaengine: pxa_dma: fix the maximum requestor line
+
+From: Robert Jarzmik <robert.jarzmik@free.fr>
+
+commit 6bab1c6afdca0371cfa957079b36b78d12dd2cf5 upstream.
+
+The current number of requestor lines is limited to 31. This was an
+error of a previous commit, as this number is platform dependent, and is
+actually :
+ - for pxa25x: 40 requestor lines
+ - for pxa27x: 75 requestor lines
+ - for pxa3xx: 100 requestor lines
+
+The previous testing did not reveal the faulty constant as on pxa[23]xx
+platforms, only camera, MSL and USB are above requestor 32, and in these
+only the camera has a driver using dma.
+
+Fixes: e87ffbdf0697 ("dmaengine: pxa_dma: fix the no-requestor case")
+Signed-off-by: Robert Jarzmik <robert.jarzmik@free.fr>
+Acked-by: Vinod Koul <vinod.koul@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma/pxa_dma.c | 39 ++++++++++++++++++++++++++++-----------
+ 1 file changed, 28 insertions(+), 11 deletions(-)
+
+--- a/drivers/dma/pxa_dma.c
++++ b/drivers/dma/pxa_dma.c
+@@ -122,6 +122,7 @@ struct pxad_chan {
+ struct pxad_device {
+ struct dma_device slave;
+ int nr_chans;
++ int nr_requestors;
+ void __iomem *base;
+ struct pxad_phy *phys;
+ spinlock_t phy_lock; /* Phy association */
+@@ -473,7 +474,7 @@ static void pxad_free_phy(struct pxad_ch
+ return;
+
+ /* clear the channel mapping in DRCMR */
+- if (chan->drcmr <= DRCMR_CHLNUM) {
++ if (chan->drcmr <= pdev->nr_requestors) {
+ reg = pxad_drcmr(chan->drcmr);
+ writel_relaxed(0, chan->phy->base + reg);
+ }
+@@ -509,6 +510,7 @@ static bool is_running_chan_misaligned(s
+
+ static void phy_enable(struct pxad_phy *phy, bool misaligned)
+ {
++ struct pxad_device *pdev;
+ u32 reg, dalgn;
+
+ if (!phy->vchan)
+@@ -518,7 +520,8 @@ static void phy_enable(struct pxad_phy *
+ "%s(); phy=%p(%d) misaligned=%d\n", __func__,
+ phy, phy->idx, misaligned);
+
+- if (phy->vchan->drcmr <= DRCMR_CHLNUM) {
++ pdev = to_pxad_dev(phy->vchan->vc.chan.device);
++ if (phy->vchan->drcmr <= pdev->nr_requestors) {
+ reg = pxad_drcmr(phy->vchan->drcmr);
+ writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg);
+ }
+@@ -914,6 +917,7 @@ static void pxad_get_config(struct pxad_
+ {
+ u32 maxburst = 0, dev_addr = 0;
+ enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
++ struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
+
+ *dcmd = 0;
+ if (dir == DMA_DEV_TO_MEM) {
+@@ -922,7 +926,7 @@ static void pxad_get_config(struct pxad_
+ dev_addr = chan->cfg.src_addr;
+ *dev_src = dev_addr;
+ *dcmd |= PXA_DCMD_INCTRGADDR;
+- if (chan->drcmr <= DRCMR_CHLNUM)
++ if (chan->drcmr <= pdev->nr_requestors)
+ *dcmd |= PXA_DCMD_FLOWSRC;
+ }
+ if (dir == DMA_MEM_TO_DEV) {
+@@ -931,7 +935,7 @@ static void pxad_get_config(struct pxad_
+ dev_addr = chan->cfg.dst_addr;
+ *dev_dst = dev_addr;
+ *dcmd |= PXA_DCMD_INCSRCADDR;
+- if (chan->drcmr <= DRCMR_CHLNUM)
++ if (chan->drcmr <= pdev->nr_requestors)
+ *dcmd |= PXA_DCMD_FLOWTRG;
+ }
+ if (dir == DMA_MEM_TO_MEM)
+@@ -1341,13 +1345,15 @@ static struct dma_chan *pxad_dma_xlate(s
+
+ static int pxad_init_dmadev(struct platform_device *op,
+ struct pxad_device *pdev,
+- unsigned int nr_phy_chans)
++ unsigned int nr_phy_chans,
++ unsigned int nr_requestors)
+ {
+ int ret;
+ unsigned int i;
+ struct pxad_chan *c;
+
+ pdev->nr_chans = nr_phy_chans;
++ pdev->nr_requestors = nr_requestors;
+ INIT_LIST_HEAD(&pdev->slave.channels);
+ pdev->slave.device_alloc_chan_resources = pxad_alloc_chan_resources;
+ pdev->slave.device_free_chan_resources = pxad_free_chan_resources;
+@@ -1382,7 +1388,7 @@ static int pxad_probe(struct platform_de
+ const struct of_device_id *of_id;
+ struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
+ struct resource *iores;
+- int ret, dma_channels = 0;
++ int ret, dma_channels = 0, nb_requestors = 0;
+ const enum dma_slave_buswidth widths =
+ DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+@@ -1399,13 +1405,23 @@ static int pxad_probe(struct platform_de
+ return PTR_ERR(pdev->base);
+
+ of_id = of_match_device(pxad_dt_ids, &op->dev);
+- if (of_id)
++ if (of_id) {
+ of_property_read_u32(op->dev.of_node, "#dma-channels",
+ &dma_channels);
+- else if (pdata && pdata->dma_channels)
++ ret = of_property_read_u32(op->dev.of_node, "#dma-requests",
++ &nb_requestors);
++ if (ret) {
++ dev_warn(pdev->slave.dev,
++ "#dma-requests set to default 32 as missing in OF: %d",
++ ret);
++ nb_requestors = 32;
++ };
++ } else if (pdata && pdata->dma_channels) {
+ dma_channels = pdata->dma_channels;
+- else
++ nb_requestors = pdata->nb_requestors;
++ } else {
+ dma_channels = 32; /* default 32 channel */
++ }
+
+ dma_cap_set(DMA_SLAVE, pdev->slave.cap_mask);
+ dma_cap_set(DMA_MEMCPY, pdev->slave.cap_mask);
+@@ -1422,7 +1438,7 @@ static int pxad_probe(struct platform_de
+ pdev->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+
+ pdev->slave.dev = &op->dev;
+- ret = pxad_init_dmadev(op, pdev, dma_channels);
++ ret = pxad_init_dmadev(op, pdev, dma_channels, nb_requestors);
+ if (ret) {
+ dev_err(pdev->slave.dev, "unable to register\n");
+ return ret;
+@@ -1441,7 +1457,8 @@ static int pxad_probe(struct platform_de
+
+ platform_set_drvdata(op, pdev);
+ pxad_init_debugfs(pdev);
+- dev_info(pdev->slave.dev, "initialized %d channels\n", dma_channels);
++ dev_info(pdev->slave.dev, "initialized %d channels on %d requestors\n",
++ dma_channels, nb_requestors);
+ return 0;
+ }
+
--- /dev/null
+From 3e26a691fe3fe1e02a76e5bab0c143ace4b137b4 Mon Sep 17 00:00:00 2001
+From: Rui Salvaterra <rsalvaterra@gmail.com>
+Date: Sat, 9 Apr 2016 22:05:34 +0100
+Subject: lib: lz4: fixed zram with lz4 on big endian machines
+
+From: Rui Salvaterra <rsalvaterra@gmail.com>
+
+commit 3e26a691fe3fe1e02a76e5bab0c143ace4b137b4 upstream.
+
+Based on Sergey's test patch [1], this fixes zram with lz4 compression
+on big endian cpus.
+
+Note that the 64-bit preprocessor test is not a cleanup, it's part of
+the fix, since those identifiers are bogus (for example, __ppc64__
+isn't defined anywhere else in the kernel, which means we'd fall into
+the 32-bit definitions on ppc64).
+
+Tested on ppc64 with no regression on x86_64.
+
+[1] http://marc.info/?l=linux-kernel&m=145994470805853&w=4
+
+Suggested-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
+Signed-off-by: Rui Salvaterra <rsalvaterra@gmail.com>
+Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ lib/lz4/lz4defs.h | 21 ++++++++++++---------
+ 1 file changed, 12 insertions(+), 9 deletions(-)
+
+--- a/lib/lz4/lz4defs.h
++++ b/lib/lz4/lz4defs.h
+@@ -11,8 +11,7 @@
+ /*
+ * Detects 64 bits mode
+ */
+-#if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) \
+- || defined(__ppc64__) || defined(__LP64__))
++#if defined(CONFIG_64BIT)
+ #define LZ4_ARCH64 1
+ #else
+ #define LZ4_ARCH64 0
+@@ -35,6 +34,10 @@ typedef struct _U64_S { u64 v; } U64_S;
+
+ #define PUT4(s, d) (A32(d) = A32(s))
+ #define PUT8(s, d) (A64(d) = A64(s))
++
++#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
++ (d = s - A16(p))
++
+ #define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
+ do { \
+ A16(p) = v; \
+@@ -51,10 +54,13 @@ typedef struct _U64_S { u64 v; } U64_S;
+ #define PUT8(s, d) \
+ put_unaligned(get_unaligned((const u64 *) s), (u64 *) d)
+
+-#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
+- do { \
+- put_unaligned(v, (u16 *)(p)); \
+- p += 2; \
++#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
++ (d = s - get_unaligned_le16(p))
++
++#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
++ do { \
++ put_unaligned_le16(v, (u16 *)(p)); \
++ p += 2; \
+ } while (0)
+ #endif
+
+@@ -140,9 +146,6 @@ typedef struct _U64_S { u64 v; } U64_S;
+
+ #endif
+
+-#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
+- (d = s - get_unaligned_le16(p))
+-
+ #define LZ4_WILDCOPY(s, d, e) \
+ do { \
+ LZ4_COPYPACKET(s, d); \
--- /dev/null
+From 9097103f06332d099c5ab06d1e7f22f4bcaca6e2 Mon Sep 17 00:00:00 2001
+From: Robert Jarzmik <robert.jarzmik@free.fr>
+Date: Fri, 12 Feb 2016 23:29:04 +0100
+Subject: mtd: nand: pxa3xx_nand: fix dmaengine initialization
+
+From: Robert Jarzmik <robert.jarzmik@free.fr>
+
+commit 9097103f06332d099c5ab06d1e7f22f4bcaca6e2 upstream.
+
+When the driver is initialized in a pure device-tree platform, the
+driver's probe fails allocating the dma channel :
+[ 525.624435] pxa3xx-nand 43100000.nand: no resource defined for data DMA
+[ 525.632088] pxa3xx-nand 43100000.nand: alloc nand resource failed
+
+The reason is that the DMA IO resource is not acquired through platform
+resources but by OF bindings.
+
+Fix this by ensuring that DMA IO resources are only queried in the non
+device-tree case.
+
+Fixes: 8f5ba31aa565 ("mtd: nand: pxa3xx-nand: switch to dmaengine")
+Signed-off-by: Robert Jarzmik <robert.jarzmik@free.fr>
+Acked-by: Ezequiel Garcia <ezequiel@vanguardiasur.com.ar>
+Signed-off-by: Brian Norris <computersforpeace@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/nand/pxa3xx_nand.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/mtd/nand/pxa3xx_nand.c
++++ b/drivers/mtd/nand/pxa3xx_nand.c
+@@ -1750,7 +1750,7 @@ static int alloc_nand_resource(struct pl
+ if (ret < 0)
+ return ret;
+
+- if (use_dma) {
++ if (!np && use_dma) {
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (r == NULL) {
+ dev_err(&pdev->dev,
xhci-fix-10-second-timeout-on-removal-of-pci-hotpluggable-xhci-controllers.patch
usb-hcd-out-of-bounds-access-in-for_each_companion.patch
usb-gadget-f_fs-fix-use-after-free.patch
+dm-cache-metadata-fix-read_lock-macros-and-cleanup-write_lock-macros.patch
+dm-cache-metadata-fix-cmd_read_lock-acquiring-write-lock.patch
+lib-lz4-fixed-zram-with-lz4-on-big-endian-machines.patch
+debugfs-make-automount-point-inodes-permanently-empty.patch
+dmaengine-dw-fix-master-selection.patch
+dmaengine-hsu-correct-use-of-channel-status-register.patch
+dmaengine-pxa_dma-fix-the-maximum-requestor-line.patch
+mtd-nand-pxa3xx_nand-fix-dmaengine-initialization.patch