--- /dev/null
+From dd8ecfcac66b4485416b2d1df0ec4798b198d7d6 Mon Sep 17 00:00:00 2001
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Date: Wed, 18 Jun 2014 12:15:38 +0300
+Subject: dmaengine: dw: don't perform DMA when dmaengine_submit is called
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+commit dd8ecfcac66b4485416b2d1df0ec4798b198d7d6 upstream.
+
+Accordingly to discussion [1] and followed up documentation the DMA controller
+driver shouldn't start any DMA operations when dmaengine_submit() is called.
+
+This patch fixes the workflow in dw_dmac driver to follow the documentation.
+
+[1] http://www.spinics.net/lists/arm-kernel/msg125987.html
+
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Vinod Koul <vinod.koul@intel.com>
+Cc: "Petallo, MauriceX R" <mauricex.r.petallo@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma/TODO | 1 -
+ drivers/dma/dw/core.c | 19 +++++++------------
+ 2 files changed, 7 insertions(+), 13 deletions(-)
+
+--- a/drivers/dma/TODO
++++ b/drivers/dma/TODO
+@@ -7,7 +7,6 @@ TODO for slave dma
+ - imx-dma
+ - imx-sdma
+ - mxs-dma.c
+- - dw_dmac
+ - intel_mid_dma
+ 4. Check other subsystems for dma drivers and merge/move to dmaengine
+ 5. Remove dma_slave_config's dma direction.
+--- a/drivers/dma/dw/core.c
++++ b/drivers/dma/dw/core.c
+@@ -680,17 +680,9 @@ static dma_cookie_t dwc_tx_submit(struct
+ * possible, perhaps even appending to those already submitted
+ * for DMA. But this is hard to do in a race-free manner.
+ */
+- if (list_empty(&dwc->active_list)) {
+- dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__,
+- desc->txd.cookie);
+- list_add_tail(&desc->desc_node, &dwc->active_list);
+- dwc_dostart(dwc, dwc_first_active(dwc));
+- } else {
+- dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__,
+- desc->txd.cookie);
+
+- list_add_tail(&desc->desc_node, &dwc->queue);
+- }
++ dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__, desc->txd.cookie);
++ list_add_tail(&desc->desc_node, &dwc->queue);
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+@@ -1095,9 +1087,12 @@ dwc_tx_status(struct dma_chan *chan,
+ static void dwc_issue_pending(struct dma_chan *chan)
+ {
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
++ unsigned long flags;
+
+- if (!list_empty(&dwc->queue))
+- dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
++ spin_lock_irqsave(&dwc->lock, flags);
++ if (list_empty(&dwc->active_list))
++ dwc_dostart_first_queued(dwc);
++ spin_unlock_irqrestore(&dwc->lock, flags);
+ }
+
+ static int dwc_alloc_chan_resources(struct dma_chan *chan)
--- /dev/null
+From e7637c6c0382485f4d2e20715d058dae6f2b6a7c Mon Sep 17 00:00:00 2001
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Date: Wed, 18 Jun 2014 12:15:36 +0300
+Subject: dmaengine: dw: introduce dwc_dostart_first_queued() helper
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+commit e7637c6c0382485f4d2e20715d058dae6f2b6a7c upstream.
+
+We have a duplicate code which starts first descriptor in the queue. Let's make
+this as a separate helper that can be used in future as well.
+
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Vinod Koul <vinod.koul@intel.com>
+Cc: "Petallo, MauriceX R" <mauricex.r.petallo@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma/dw/core.c | 19 +++++++++++--------
+ 1 file changed, 11 insertions(+), 8 deletions(-)
+
+--- a/drivers/dma/dw/core.c
++++ b/drivers/dma/dw/core.c
+@@ -279,6 +279,15 @@ static void dwc_dostart(struct dw_dma_ch
+ channel_set_bit(dw, CH_EN, dwc->mask);
+ }
+
++static void dwc_dostart_first_queued(struct dw_dma_chan *dwc)
++{
++ if (list_empty(&dwc->queue))
++ return;
++
++ list_move(dwc->queue.next, &dwc->active_list);
++ dwc_dostart(dwc, dwc_first_active(dwc));
++}
++
+ /*----------------------------------------------------------------------*/
+
+ static void
+@@ -335,10 +344,7 @@ static void dwc_complete_all(struct dw_d
+ * the completed ones.
+ */
+ list_splice_init(&dwc->active_list, &list);
+- if (!list_empty(&dwc->queue)) {
+- list_move(dwc->queue.next, &dwc->active_list);
+- dwc_dostart(dwc, dwc_first_active(dwc));
+- }
++ dwc_dostart_first_queued(dwc);
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+@@ -467,10 +473,7 @@ static void dwc_scan_descriptors(struct
+ /* Try to continue after resetting the channel... */
+ dwc_chan_disable(dw, dwc);
+
+- if (!list_empty(&dwc->queue)) {
+- list_move(dwc->queue.next, &dwc->active_list);
+- dwc_dostart(dwc, dwc_first_active(dwc));
+- }
++ dwc_dostart_first_queued(dwc);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ }
+
--- /dev/null
+From eb90b0c734ad793d5f5bf230a9e9a4dcc48df8aa Mon Sep 17 00:00:00 2001
+From: Julian Anastasov <ja@ssi.bg>
+Date: Fri, 22 Aug 2014 17:53:41 +0300
+Subject: ipvs: fix ipv6 hook registration for local replies
+
+From: Julian Anastasov <ja@ssi.bg>
+
+commit eb90b0c734ad793d5f5bf230a9e9a4dcc48df8aa upstream.
+
+commit fc604767613b6d2036cdc35b660bc39451040a47
+("ipvs: changes for local real server") from 2.6.37
+introduced DNAT support to local real server but the
+IPv6 LOCAL_OUT handler ip_vs_local_reply6() is
+registered incorrectly as IPv4 hook causing any outgoing
+IPv4 traffic to be dropped depending on the IP header values.
+
+Chris tracked down the problem to CONFIG_IP_VS_IPV6=y
+Bug report: https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1349768
+
+Reported-by: Chris J Arges <chris.j.arges@canonical.com>
+Tested-by: Chris J Arges <chris.j.arges@canonical.com>
+Signed-off-by: Julian Anastasov <ja@ssi.bg>
+Signed-off-by: Simon Horman <horms@verge.net.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/netfilter/ipvs/ip_vs_core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/netfilter/ipvs/ip_vs_core.c
++++ b/net/netfilter/ipvs/ip_vs_core.c
+@@ -1906,7 +1906,7 @@ static struct nf_hook_ops ip_vs_ops[] __
+ {
+ .hook = ip_vs_local_reply6,
+ .owner = THIS_MODULE,
+- .pf = NFPROTO_IPV4,
++ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_LOCAL_OUT,
+ .priority = NF_IP6_PRI_NAT_DST + 1,
+ },
--- /dev/null
+From 76f084bc10004b3050b2cff9cfac29148f1f6088 Mon Sep 17 00:00:00 2001
+From: Alex Gartrell <agartrell@fb.com>
+Date: Wed, 16 Jul 2014 15:57:34 -0700
+Subject: ipvs: Maintain all DSCP and ECN bits for ipv6 tun forwarding
+
+From: Alex Gartrell <agartrell@fb.com>
+
+commit 76f084bc10004b3050b2cff9cfac29148f1f6088 upstream.
+
+Previously, only the four high bits of the tclass were maintained in the
+ipv6 case. This matches the behavior of ipv4, though whether or not we
+should reflect ECN bits may be up for debate.
+
+Signed-off-by: Alex Gartrell <agartrell@fb.com>
+Acked-by: Julian Anastasov <ja@ssi.bg>
+Signed-off-by: Simon Horman <horms@verge.net.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/netfilter/ipvs/ip_vs_xmit.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/netfilter/ipvs/ip_vs_xmit.c
++++ b/net/netfilter/ipvs/ip_vs_xmit.c
+@@ -967,8 +967,8 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb
+ iph->nexthdr = IPPROTO_IPV6;
+ iph->payload_len = old_iph->payload_len;
+ be16_add_cpu(&iph->payload_len, sizeof(*old_iph));
+- iph->priority = old_iph->priority;
+ memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl));
++ ipv6_change_dsfield(iph, 0, ipv6_get_dsfield(old_iph));
+ iph->daddr = cp->daddr.in6;
+ iph->saddr = saddr;
+ iph->hop_limit = old_iph->hop_limit;
--- /dev/null
+From c6d119cf1b5a778e9ed60a006e2a434fcc4471a2 Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.de>
+Date: Tue, 9 Sep 2014 13:49:46 +1000
+Subject: md/raid1: be more cautious where we read-balance during resync.
+
+From: NeilBrown <neilb@suse.de>
+
+commit c6d119cf1b5a778e9ed60a006e2a434fcc4471a2 upstream.
+
+commit 79ef3a8aa1cb1523cc231c9a90a278333c21f761 made
+it possible for reads to happen concurrently with resync.
+This means that we need to be more careful where read_balancing
+is allowed during resync - we can no longer be sure that any
+resync that has already started will definitely finish.
+
+So keep read_balancing to before recovery_cp, which is conservative
+but safe.
+
+This bug makes it possible to read from a device that doesn't
+have up-to-date data, so it can cause data corruption.
+So it is suitable for any kernel since 3.11.
+
+Fixes: 79ef3a8aa1cb1523cc231c9a90a278333c21f761
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/raid1.c | 6 +-----
+ 1 file changed, 1 insertion(+), 5 deletions(-)
+
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -540,11 +540,7 @@ static int read_balance(struct r1conf *c
+ has_nonrot_disk = 0;
+ choose_next_idle = 0;
+
+- if (conf->mddev->recovery_cp < MaxSector &&
+- (this_sector + sectors >= conf->next_resync))
+- choose_first = 1;
+- else
+- choose_first = 0;
++ choose_first = (conf->mddev->recovery_cp < this_sector + sectors);
+
+ for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
+ sector_t dist;
--- /dev/null
+From 669cc7ba77864e7b1ac39c9f2b2afb8730f341f4 Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.de>
+Date: Thu, 4 Sep 2014 16:30:38 +1000
+Subject: md/raid1: clean up request counts properly in close_sync()
+
+From: NeilBrown <neilb@suse.de>
+
+commit 669cc7ba77864e7b1ac39c9f2b2afb8730f341f4 upstream.
+
+If there are outstanding writes when close_sync is called,
+the change to ->start_next_window might cause them to
+decrement the wrong counter when they complete. Fix this
+by merging the two counters into the one that will be decremented.
+
+Having an incorrect value in a counter can cause raise_barrier()
+to hangs, so this is suitable for -stable.
+
+Fixes: 79ef3a8aa1cb1523cc231c9a90a278333c21f761
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/raid1.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -1548,8 +1548,13 @@ static void close_sync(struct r1conf *co
+ mempool_destroy(conf->r1buf_pool);
+ conf->r1buf_pool = NULL;
+
++ spin_lock_irq(&conf->resync_lock);
+ conf->next_resync = 0;
+ conf->start_next_window = MaxSector;
++ conf->current_window_requests +=
++ conf->next_window_requests;
++ conf->next_window_requests = 0;
++ spin_unlock_irq(&conf->resync_lock);
+ }
+
+ static int raid1_spare_active(struct mddev *mddev)
--- /dev/null
+From 34e97f170149bfa14979581c4c748bc9b4b79d5b Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.de>
+Date: Tue, 16 Sep 2014 12:14:14 +1000
+Subject: md/raid1: count resync requests in nr_pending.
+
+From: NeilBrown <neilb@suse.de>
+
+commit 34e97f170149bfa14979581c4c748bc9b4b79d5b upstream.
+
+Both normal IO and resync IO can be retried with reschedule_retry()
+and so be counted into ->nr_queued, but only normal IO gets counted in
+->nr_pending.
+
+Before the recent improvement to RAID1 resync there could only
+possibly have been one or the other on the queue. When handling a
+read failure it could only be normal IO. So when handle_read_error()
+called freeze_array() the fact that freeze_array only compares
+->nr_queued against ->nr_pending was safe.
+
+But now that these two types can interleave, we can have both normal
+and resync IO requests queued, so we need to count them both in
+nr_pending.
+
+This error can lead to freeze_array() hanging if there is a read
+error, so it is suitable for -stable.
+
+Fixes: 79ef3a8aa1cb1523cc231c9a90a278333c21f761
+Reported-by: Brassow Jonathan <jbrassow@redhat.com>
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/raid1.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -856,6 +856,7 @@ static void raise_barrier(struct r1conf
+ conf->next_resync + RESYNC_SECTORS),
+ conf->resync_lock);
+
++ conf->nr_pending++;
+ spin_unlock_irq(&conf->resync_lock);
+ }
+
+@@ -865,6 +866,7 @@ static void lower_barrier(struct r1conf
+ BUG_ON(conf->barrier <= 0);
+ spin_lock_irqsave(&conf->resync_lock, flags);
+ conf->barrier--;
++ conf->nr_pending--;
+ spin_unlock_irqrestore(&conf->resync_lock, flags);
+ wake_up(&conf->wait_barrier);
+ }
--- /dev/null
+From 235549605eb7f1c5a37cef8b09d12e6d412c5cd6 Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.de>
+Date: Wed, 10 Sep 2014 15:56:57 +1000
+Subject: md/raid1: Don't use next_resync to determine how far resync has progressed
+
+From: NeilBrown <neilb@suse.de>
+
+commit 235549605eb7f1c5a37cef8b09d12e6d412c5cd6 upstream.
+
+next_resync is (approximately) the location for the next resync request.
+However it does *not* reliably determine the earliest location
+at which resync might be happening.
+This is because resync requests can complete out of order, and
+we only limit the number of current requests, not the distance
+from the earliest pending request to the latest.
+
+mddev->curr_resync_completed is a reliable indicator of the earliest
+position at which resync could be happening. It is updated less
+frequently, but is actually reliable which is more important.
+
+So use it to determine if a write request is before the region
+being resynced and so safe from conflict.
+
+This error can allow resync IO to interfere with normal IO which
+could lead to data corruption. Hence: stable.
+
+Fixes: 79ef3a8aa1cb1523cc231c9a90a278333c21f761
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/raid1.c | 12 +++++-------
+ 1 file changed, 5 insertions(+), 7 deletions(-)
+
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -875,12 +875,10 @@ static bool need_to_wait_for_sync(struct
+ if (conf->array_frozen || !bio)
+ wait = true;
+ else if (conf->barrier && bio_data_dir(bio) == WRITE) {
+- if (conf->next_resync < RESYNC_WINDOW_SECTORS)
+- wait = true;
+- else if ((conf->next_resync - RESYNC_WINDOW_SECTORS
+- >= bio_end_sector(bio)) ||
+- (conf->next_resync + NEXT_NORMALIO_DISTANCE
+- <= bio->bi_iter.bi_sector))
++ if ((conf->mddev->curr_resync_completed
++ >= bio_end_sector(bio)) ||
++ (conf->next_resync + NEXT_NORMALIO_DISTANCE
++ <= bio->bi_iter.bi_sector))
+ wait = false;
+ else
+ wait = true;
+@@ -918,7 +916,7 @@ static sector_t wait_barrier(struct r1co
+
+ if (bio && bio_data_dir(bio) == WRITE) {
+ if (bio->bi_iter.bi_sector >=
+- conf->next_resync) {
++ conf->mddev->curr_resync_completed) {
+ if (conf->start_next_window == MaxSector)
+ conf->start_next_window =
+ conf->next_resync +
--- /dev/null
+From b8cb6b4c121e1bf1963c16ed69e7adcb1bc301cd Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.de>
+Date: Thu, 18 Sep 2014 11:09:04 +1000
+Subject: md/raid1: fix_read_error should act on all non-faulty devices.
+
+From: NeilBrown <neilb@suse.de>
+
+commit b8cb6b4c121e1bf1963c16ed69e7adcb1bc301cd upstream.
+
+If a devices is being recovered it is not InSync and is not Faulty.
+
+If a read error is experienced on that device, fix_read_error()
+will be called, but it ignores non-InSync devices. So it will
+neither fix the error nor fail the device.
+
+It is incorrect that fix_read_error() ignores non-InSync devices.
+It should only ignore Faulty devices. So fix it.
+
+This became a bug when we allowed reading from a device that was being
+recovered. It is suitable for any subsequent -stable kernel.
+
+Fixes: da8840a747c0dbf49506ec906757a6b87b9741e9
+Reported-by: Alexander Lyakas <alex.bolshoy@gmail.com>
+Tested-by: Alexander Lyakas <alex.bolshoy@gmail.com>
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/raid1.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -2154,7 +2154,7 @@ static void fix_read_error(struct r1conf
+ d--;
+ rdev = conf->mirrors[d].rdev;
+ if (rdev &&
+- test_bit(In_sync, &rdev->flags))
++ !test_bit(Faulty, &rdev->flags))
+ r1_sync_page_io(rdev, sect, s,
+ conf->tmppage, WRITE);
+ }
+@@ -2166,7 +2166,7 @@ static void fix_read_error(struct r1conf
+ d--;
+ rdev = conf->mirrors[d].rdev;
+ if (rdev &&
+- test_bit(In_sync, &rdev->flags)) {
++ !test_bit(Faulty, &rdev->flags)) {
+ if (r1_sync_page_io(rdev, sect, s,
+ conf->tmppage, READ)) {
+ atomic_add(s, &rdev->corrected_errors);
--- /dev/null
+From f0cc9a057151892b885be21a1d19b0185568281d Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.de>
+Date: Mon, 22 Sep 2014 10:06:23 +1000
+Subject: md/raid1: intialise start_next_window for READ case to avoid hang
+
+From: NeilBrown <neilb@suse.de>
+
+commit f0cc9a057151892b885be21a1d19b0185568281d upstream.
+
+r1_bio->start_next_window is not initialised in the READ
+case, so allow_barrier may incorrectly decrement
+ conf->current_window_requests
+which can cause raise_barrier() to block forever.
+
+Fixes: 79ef3a8aa1cb1523cc231c9a90a278333c21f761
+Reported-by: Brassow Jonathan <jbrassow@redhat.com>
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/raid1.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -1185,6 +1185,7 @@ read_again:
+ atomic_read(&bitmap->behind_writes) == 0);
+ }
+ r1_bio->read_disk = rdisk;
++ r1_bio->start_next_window = 0;
+
+ read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
+ bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
--- /dev/null
+From 2f73d3c55d09ce60647b96ad2a9b539c95a530ee Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.de>
+Date: Wed, 10 Sep 2014 15:01:49 +1000
+Subject: md/raid1: make sure resync waits for conflicting writes to complete.
+
+From: NeilBrown <neilb@suse.de>
+
+commit 2f73d3c55d09ce60647b96ad2a9b539c95a530ee upstream.
+
+The resync/recovery process for raid1 was recently changed
+so that writes could happen in parallel with resync providing
+they were in different regions of the device.
+
+There is a problem though: While a write request will always
+wait for conflicting resync to complete, a resync request
+will *not* always wait for conflicting writes to complete.
+
+Two changes are needed to fix this:
+
+1/ raise_barrier (which waits until it is safe to do resync)
+ must wait until current_window_requests is zero
+2/ wait_battier (which waits at the start of a new write request)
+ must update current_window_requests if the request could
+ possible conflict with a concurrent resync.
+
+As concurrent writes and resync can lead to data loss,
+this patch is suitable for -stable.
+
+Fixes: 79ef3a8aa1cb1523cc231c9a90a278333c21f761
+Cc: majianpeng <majianpeng@gmail.com>
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/raid1.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -845,10 +845,12 @@ static void raise_barrier(struct r1conf
+ * C: next_resync + RESYNC_SECTORS > start_next_window, meaning
+ * next resync will reach to the window which normal bios are
+ * handling.
++ * D: while there are any active requests in the current window.
+ */
+ wait_event_lock_irq(conf->wait_barrier,
+ !conf->array_frozen &&
+ conf->barrier < RESYNC_DEPTH &&
++ conf->current_window_requests == 0 &&
+ (conf->start_next_window >=
+ conf->next_resync + RESYNC_SECTORS),
+ conf->resync_lock);
+@@ -915,8 +917,8 @@ static sector_t wait_barrier(struct r1co
+ }
+
+ if (bio && bio_data_dir(bio) == WRITE) {
+- if (conf->next_resync + NEXT_NORMALIO_DISTANCE
+- <= bio->bi_iter.bi_sector) {
++ if (bio->bi_iter.bi_sector >=
++ conf->next_resync) {
+ if (conf->start_next_window == MaxSector)
+ conf->start_next_window =
+ conf->next_resync +
--- /dev/null
+From c2fd4c94deedb89ac1746c4a53219be499372c06 Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.de>
+Date: Wed, 10 Sep 2014 16:01:24 +1000
+Subject: md/raid1: update next_resync under resync_lock.
+
+From: NeilBrown <neilb@suse.de>
+
+commit c2fd4c94deedb89ac1746c4a53219be499372c06 upstream.
+
+raise_barrier() uses next_resync as part of its calculations, so it
+really should be updated first, instead of afterwards.
+
+next_resync is always used under resync_lock so update it under
+resync lock to, just before it is used. That is safest.
+
+This could cause normal IO and resync IO to interact badly so
+it suitable for -stable.
+
+Fixes: 79ef3a8aa1cb1523cc231c9a90a278333c21f761
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/raid1.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -827,7 +827,7 @@ static void flush_pending_writes(struct
+ * there is no normal IO happeing. It must arrange to call
+ * lower_barrier when the particular background IO completes.
+ */
+-static void raise_barrier(struct r1conf *conf)
++static void raise_barrier(struct r1conf *conf, sector_t sector_nr)
+ {
+ spin_lock_irq(&conf->resync_lock);
+
+@@ -837,6 +837,7 @@ static void raise_barrier(struct r1conf
+
+ /* block any new IO from starting */
+ conf->barrier++;
++ conf->next_resync = sector_nr;
+
+ /* For these conditions we must wait:
+ * A: while the array is in frozen state
+@@ -2542,9 +2543,8 @@ static sector_t sync_request(struct mdde
+
+ bitmap_cond_end_sync(mddev->bitmap, sector_nr);
+ r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
+- raise_barrier(conf);
+
+- conf->next_resync = sector_nr;
++ raise_barrier(conf, sector_nr);
+
+ rcu_read_lock();
+ /*
--- /dev/null
+From 7878289b269d41c8e611aa6d4519feae706e49f3 Mon Sep 17 00:00:00 2001
+From: Ulf Hansson <ulf.hansson@linaro.org>
+Date: Fri, 13 Jun 2014 13:21:38 +0200
+Subject: mmc: mmci: Reverse IRQ handling for the arm_variant
+
+From: Ulf Hansson <ulf.hansson@linaro.org>
+
+commit 7878289b269d41c8e611aa6d4519feae706e49f3 upstream.
+
+Commit "mmc: mmci: Handle CMD irq before DATA irq", caused an issue
+when using the ARM model of the PL181 and running QEMU.
+
+The bug was reported for the following QEMU version:
+$ qemu-system-arm -version
+QEMU emulator version 2.0.0 (Debian 2.0.0+dfsg-2ubuntu1.1), Copyright
+(c) 2003-2008 Fabrice Bellard
+
+To resolve the problem, let's restore the old behavior were the DATA
+irq is handled prior the CMD irq, but only for the arm_variant, which
+the problem was reported for.
+
+Reported-by: John Stultz <john.stultz@linaro.org>
+Cc: Peter Maydell <peter.maydell@linaro.org>
+Cc: Russell King <linux@arm.linux.org.uk>
+Tested-by: Kees Cook <keescook@chromium.org>
+Tested-by: John Stultz <john.stultz@linaro.org>
+Cc: <stable@vger.kernel.org> # v3.15+
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+[kees: backported to 3.16]
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/mmci.c | 12 ++++++++++--
+ 1 file changed, 10 insertions(+), 2 deletions(-)
+
+--- a/drivers/mmc/host/mmci.c
++++ b/drivers/mmc/host/mmci.c
+@@ -65,6 +65,7 @@ static unsigned int fmax = 515633;
+ * @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock
+ * @busy_detect: true if busy detection on dat0 is supported
+ * @pwrreg_nopower: bits in MMCIPOWER don't controls ext. power supply
++ * @reversed_irq_handling: handle data irq before cmd irq.
+ */
+ struct variant_data {
+ unsigned int clkreg;
+@@ -80,6 +81,7 @@ struct variant_data {
+ bool pwrreg_clkgate;
+ bool busy_detect;
+ bool pwrreg_nopower;
++ bool reversed_irq_handling;
+ };
+
+ static struct variant_data variant_arm = {
+@@ -87,6 +89,7 @@ static struct variant_data variant_arm =
+ .fifohalfsize = 8 * 4,
+ .datalength_bits = 16,
+ .pwrreg_powerup = MCI_PWR_UP,
++ .reversed_irq_handling = true,
+ };
+
+ static struct variant_data variant_arm_extended_fifo = {
+@@ -1163,8 +1166,13 @@ static irqreturn_t mmci_irq(int irq, voi
+
+ dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
+
+- mmci_cmd_irq(host, host->cmd, status);
+- mmci_data_irq(host, host->data, status);
++ if (host->variant->reversed_irq_handling) {
++ mmci_data_irq(host, host->data, status);
++ mmci_cmd_irq(host, host->cmd, status);
++ } else {
++ mmci_cmd_irq(host, host->cmd, status);
++ mmci_data_irq(host, host->data, status);
++ }
+
+ /* Don't poll for busy completion in irq context. */
+ if (host->busy_status)
--- /dev/null
+From b88825de8545ad252c31543fef13cadf4de7a2bc Mon Sep 17 00:00:00 2001
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+Date: Tue, 5 Aug 2014 17:25:59 +0200
+Subject: netfilter: nf_tables: don't update chain with unset counters
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+commit b88825de8545ad252c31543fef13cadf4de7a2bc upstream.
+
+Fix possible replacement of the per-cpu chain counters by null
+pointer when updating an existing chain in the commit path.
+
+Reported-by: Matteo Croce <technoboy85@gmail.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/netfilter/nf_tables_api.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -899,6 +899,9 @@ static struct nft_stats __percpu *nft_st
+ static void nft_chain_stats_replace(struct nft_base_chain *chain,
+ struct nft_stats __percpu *newstats)
+ {
++ if (newstats == NULL)
++ return;
++
+ if (chain->stats) {
+ struct nft_stats __percpu *oldstats =
+ nft_dereference(chain->stats);
--- /dev/null
+From caa8ad94edf686d02b555c65a6162c0d1b434958 Mon Sep 17 00:00:00 2001
+From: Daniel Borkmann <dborkman@redhat.com>
+Date: Mon, 18 Aug 2014 15:46:28 +0200
+Subject: netfilter: x_tables: allow to use default cgroup match
+
+From: Daniel Borkmann <dborkman@redhat.com>
+
+commit caa8ad94edf686d02b555c65a6162c0d1b434958 upstream.
+
+There's actually no good reason why we cannot use cgroup id 0,
+so lets just remove this artificial barrier.
+
+Reported-by: Alexey Perevalov <a.perevalov@samsung.com>
+Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
+Tested-by: Alexey Perevalov <a.perevalov@samsung.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/netfilter/xt_cgroup.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/netfilter/xt_cgroup.c
++++ b/net/netfilter/xt_cgroup.c
+@@ -31,7 +31,7 @@ static int cgroup_mt_check(const struct
+ if (info->invert & ~1)
+ return -EINVAL;
+
+- return info->id ? 0 : -EINVAL;
++ return 0;
+ }
+
+ static bool
--- /dev/null
+From 7bd8490eef9776ced7632345df5133384b6be0fe Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 24 Jul 2014 06:36:50 +0200
+Subject: netfilter: xt_hashlimit: perform garbage collection from process context
+
+From: Eric Dumazet <edumazet@google.com>
+
+commit 7bd8490eef9776ced7632345df5133384b6be0fe upstream.
+
+xt_hashlimit cannot be used with large hash tables, because garbage
+collector is run from a timer. If table is really big, its possible
+to hold cpu for more than 500 msec, which is unacceptable.
+
+Switch to a work queue, and use proper scheduling points to remove
+latencies spikes.
+
+Later, we also could switch to a smoother garbage collection done
+at lookup time, one bucket at a time...
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Florian Westphal <fw@strlen.de>
+Cc: Patrick McHardy <kaber@trash.net>
+Reviewed-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/netfilter/xt_hashlimit.c | 31 ++++++++++++++++---------------
+ 1 file changed, 16 insertions(+), 15 deletions(-)
+
+--- a/net/netfilter/xt_hashlimit.c
++++ b/net/netfilter/xt_hashlimit.c
+@@ -104,7 +104,7 @@ struct xt_hashlimit_htable {
+ spinlock_t lock; /* lock for list_head */
+ u_int32_t rnd; /* random seed for hash */
+ unsigned int count; /* number entries in table */
+- struct timer_list timer; /* timer for gc */
++ struct delayed_work gc_work;
+
+ /* seq_file stuff */
+ struct proc_dir_entry *pde;
+@@ -213,7 +213,7 @@ dsthash_free(struct xt_hashlimit_htable
+ call_rcu_bh(&ent->rcu, dsthash_free_rcu);
+ ht->count--;
+ }
+-static void htable_gc(unsigned long htlong);
++static void htable_gc(struct work_struct *work);
+
+ static int htable_create(struct net *net, struct xt_hashlimit_mtinfo1 *minfo,
+ u_int8_t family)
+@@ -273,9 +273,9 @@ static int htable_create(struct net *net
+ }
+ hinfo->net = net;
+
+- setup_timer(&hinfo->timer, htable_gc, (unsigned long)hinfo);
+- hinfo->timer.expires = jiffies + msecs_to_jiffies(hinfo->cfg.gc_interval);
+- add_timer(&hinfo->timer);
++ INIT_DEFERRABLE_WORK(&hinfo->gc_work, htable_gc);
++ queue_delayed_work(system_power_efficient_wq, &hinfo->gc_work,
++ msecs_to_jiffies(hinfo->cfg.gc_interval));
+
+ hlist_add_head(&hinfo->node, &hashlimit_net->htables);
+
+@@ -300,29 +300,30 @@ static void htable_selective_cleanup(str
+ {
+ unsigned int i;
+
+- /* lock hash table and iterate over it */
+- spin_lock_bh(&ht->lock);
+ for (i = 0; i < ht->cfg.size; i++) {
+ struct dsthash_ent *dh;
+ struct hlist_node *n;
++
++ spin_lock_bh(&ht->lock);
+ hlist_for_each_entry_safe(dh, n, &ht->hash[i], node) {
+ if ((*select)(ht, dh))
+ dsthash_free(ht, dh);
+ }
++ spin_unlock_bh(&ht->lock);
++ cond_resched();
+ }
+- spin_unlock_bh(&ht->lock);
+ }
+
+-/* hash table garbage collector, run by timer */
+-static void htable_gc(unsigned long htlong)
++static void htable_gc(struct work_struct *work)
+ {
+- struct xt_hashlimit_htable *ht = (struct xt_hashlimit_htable *)htlong;
++ struct xt_hashlimit_htable *ht;
++
++ ht = container_of(work, struct xt_hashlimit_htable, gc_work.work);
+
+ htable_selective_cleanup(ht, select_gc);
+
+- /* re-add the timer accordingly */
+- ht->timer.expires = jiffies + msecs_to_jiffies(ht->cfg.gc_interval);
+- add_timer(&ht->timer);
++ queue_delayed_work(system_power_efficient_wq,
++ &ht->gc_work, msecs_to_jiffies(ht->cfg.gc_interval));
+ }
+
+ static void htable_remove_proc_entry(struct xt_hashlimit_htable *hinfo)
+@@ -341,7 +342,7 @@ static void htable_remove_proc_entry(str
+
+ static void htable_destroy(struct xt_hashlimit_htable *hinfo)
+ {
+- del_timer_sync(&hinfo->timer);
++ cancel_delayed_work_sync(&hinfo->gc_work);
+ htable_remove_proc_entry(hinfo);
+ htable_selective_cleanup(hinfo, select_all);
+ kfree(hinfo->name);
--- /dev/null
+From d97a86c170b4e432f76db072a827fe30b4d6f659 Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Tue, 5 Aug 2014 11:09:59 +0300
+Subject: partitions: aix.c: off by one bug
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit d97a86c170b4e432f76db072a827fe30b4d6f659 upstream.
+
+The lvip[] array has "state->limit" elements so the condition here
+should be >= instead of >.
+
+Fixes: 6ceea22bbbc8 ('partitions: add aix lvm partition support files')
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Acked-by: Philippe De Muyter <phdm@macqel.be>
+Signed-off-by: Jens Axboe <axboe@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/partitions/aix.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/block/partitions/aix.c
++++ b/block/partitions/aix.c
+@@ -253,7 +253,7 @@ int aix_partition(struct parsed_partitio
+ continue;
+ }
+ lv_ix = be16_to_cpu(p->lv_ix) - 1;
+- if (lv_ix > state->limit) {
++ if (lv_ix >= state->limit) {
+ cur_lv_ix = -1;
+ continue;
+ }
media-videobuf2-dma-sg-fix-for-wrong-gfp-mask-to-sg_alloc_table_from_pages.patch
media-vb2-fix-vb2-state-check-when-start_streaming-fails.patch
media-vb2-fix-plane-index-sanity-check-in-vb2_plane_cookie.patch
+md-raid1-clean-up-request-counts-properly-in-close_sync.patch
+md-raid1-be-more-cautious-where-we-read-balance-during-resync.patch
+md-raid1-make-sure-resync-waits-for-conflicting-writes-to-complete.patch
+md-raid1-don-t-use-next_resync-to-determine-how-far-resync-has-progressed.patch
+md-raid1-update-next_resync-under-resync_lock.patch
+md-raid1-count-resync-requests-in-nr_pending.patch
+md-raid1-fix_read_error-should-act-on-all-non-faulty-devices.patch
+md-raid1-intialise-start_next_window-for-read-case-to-avoid-hang.patch
+netfilter-xt_hashlimit-perform-garbage-collection-from-process-context.patch
+ipvs-maintain-all-dscp-and-ecn-bits-for-ipv6-tun-forwarding.patch
+netfilter-x_tables-allow-to-use-default-cgroup-match.patch
+ipvs-fix-ipv6-hook-registration-for-local-replies.patch
+netfilter-nf_tables-don-t-update-chain-with-unset-counters.patch
+mmc-mmci-reverse-irq-handling-for-the-arm_variant.patch
+dmaengine-dw-introduce-dwc_dostart_first_queued-helper.patch
+dmaengine-dw-don-t-perform-dma-when-dmaengine_submit-is-called.patch
+partitions-aix.c-off-by-one-bug.patch