--- /dev/null
+From dd8ecfcac66b4485416b2d1df0ec4798b198d7d6 Mon Sep 17 00:00:00 2001
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Date: Wed, 18 Jun 2014 12:15:38 +0300
+Subject: dmaengine: dw: don't perform DMA when dmaengine_submit is called
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+commit dd8ecfcac66b4485416b2d1df0ec4798b198d7d6 upstream.
+
+Accordingly to discussion [1] and followed up documentation the DMA controller
+driver shouldn't start any DMA operations when dmaengine_submit() is called.
+
+This patch fixes the workflow in dw_dmac driver to follow the documentation.
+
+[1] http://www.spinics.net/lists/arm-kernel/msg125987.html
+
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Vinod Koul <vinod.koul@intel.com>
+Cc: "Petallo, MauriceX R" <mauricex.r.petallo@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma/TODO | 1 -
+ drivers/dma/dw/core.c | 19 +++++++------------
+ 2 files changed, 7 insertions(+), 13 deletions(-)
+
+--- a/drivers/dma/TODO
++++ b/drivers/dma/TODO
+@@ -7,7 +7,6 @@ TODO for slave dma
+ - imx-dma
+ - imx-sdma
+ - mxs-dma.c
+- - dw_dmac
+ - intel_mid_dma
+ 4. Check other subsystems for dma drivers and merge/move to dmaengine
+ 5. Remove dma_slave_config's dma direction.
+--- a/drivers/dma/dw/core.c
++++ b/drivers/dma/dw/core.c
+@@ -680,17 +680,9 @@ static dma_cookie_t dwc_tx_submit(struct
+ * possible, perhaps even appending to those already submitted
+ * for DMA. But this is hard to do in a race-free manner.
+ */
+- if (list_empty(&dwc->active_list)) {
+- dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__,
+- desc->txd.cookie);
+- list_add_tail(&desc->desc_node, &dwc->active_list);
+- dwc_dostart(dwc, dwc_first_active(dwc));
+- } else {
+- dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__,
+- desc->txd.cookie);
+
+- list_add_tail(&desc->desc_node, &dwc->queue);
+- }
++ dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__, desc->txd.cookie);
++ list_add_tail(&desc->desc_node, &dwc->queue);
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+@@ -1095,9 +1087,12 @@ dwc_tx_status(struct dma_chan *chan,
+ static void dwc_issue_pending(struct dma_chan *chan)
+ {
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
++ unsigned long flags;
+
+- if (!list_empty(&dwc->queue))
+- dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
++ spin_lock_irqsave(&dwc->lock, flags);
++ if (list_empty(&dwc->active_list))
++ dwc_dostart_first_queued(dwc);
++ spin_unlock_irqrestore(&dwc->lock, flags);
+ }
+
+ static int dwc_alloc_chan_resources(struct dma_chan *chan)
--- /dev/null
+From e7637c6c0382485f4d2e20715d058dae6f2b6a7c Mon Sep 17 00:00:00 2001
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Date: Wed, 18 Jun 2014 12:15:36 +0300
+Subject: dmaengine: dw: introduce dwc_dostart_first_queued() helper
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+commit e7637c6c0382485f4d2e20715d058dae6f2b6a7c upstream.
+
+We have a duplicate code which starts first descriptor in the queue. Let's make
+this as a separate helper that can be used in future as well.
+
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Vinod Koul <vinod.koul@intel.com>
+Cc: "Petallo, MauriceX R" <mauricex.r.petallo@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma/dw/core.c | 19 +++++++++++--------
+ 1 file changed, 11 insertions(+), 8 deletions(-)
+
+--- a/drivers/dma/dw/core.c
++++ b/drivers/dma/dw/core.c
+@@ -279,6 +279,15 @@ static void dwc_dostart(struct dw_dma_ch
+ channel_set_bit(dw, CH_EN, dwc->mask);
+ }
+
++static void dwc_dostart_first_queued(struct dw_dma_chan *dwc)
++{
++ if (list_empty(&dwc->queue))
++ return;
++
++ list_move(dwc->queue.next, &dwc->active_list);
++ dwc_dostart(dwc, dwc_first_active(dwc));
++}
++
+ /*----------------------------------------------------------------------*/
+
+ static void
+@@ -335,10 +344,7 @@ static void dwc_complete_all(struct dw_d
+ * the completed ones.
+ */
+ list_splice_init(&dwc->active_list, &list);
+- if (!list_empty(&dwc->queue)) {
+- list_move(dwc->queue.next, &dwc->active_list);
+- dwc_dostart(dwc, dwc_first_active(dwc));
+- }
++ dwc_dostart_first_queued(dwc);
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+@@ -467,10 +473,7 @@ static void dwc_scan_descriptors(struct
+ /* Try to continue after resetting the channel... */
+ dwc_chan_disable(dw, dwc);
+
+- if (!list_empty(&dwc->queue)) {
+- list_move(dwc->queue.next, &dwc->active_list);
+- dwc_dostart(dwc, dwc_first_active(dwc));
+- }
++ dwc_dostart_first_queued(dwc);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ }
+
--- /dev/null
+From 2627b7e15c5064ddd5e578e4efd948d48d531a3f Mon Sep 17 00:00:00 2001
+From: Julian Anastasov <ja@ssi.bg>
+Date: Thu, 10 Jul 2014 09:24:01 +0300
+Subject: ipvs: avoid netns exit crash on ip_vs_conn_drop_conntrack
+
+From: Julian Anastasov <ja@ssi.bg>
+
+commit 2627b7e15c5064ddd5e578e4efd948d48d531a3f upstream.
+
+commit 8f4e0a18682d91 ("IPVS netns exit causes crash in conntrack")
+added second ip_vs_conn_drop_conntrack call instead of just adding
+the needed check. As result, the first call still can cause
+crash on netns exit. Remove it.
+
+Signed-off-by: Julian Anastasov <ja@ssi.bg>
+Signed-off-by: Hans Schillstrom <hans@schillstrom.com>
+Signed-off-by: Simon Horman <horms@verge.net.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/netfilter/ipvs/ip_vs_conn.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/net/netfilter/ipvs/ip_vs_conn.c
++++ b/net/netfilter/ipvs/ip_vs_conn.c
+@@ -797,7 +797,6 @@ static void ip_vs_conn_expire(unsigned l
+ ip_vs_control_del(cp);
+
+ if (cp->flags & IP_VS_CONN_F_NFCT) {
+- ip_vs_conn_drop_conntrack(cp);
+ /* Do not access conntracks during subsys cleanup
+ * because nf_conntrack_find_get can not be used after
+ * conntrack cleanup for the net.
--- /dev/null
+From eb90b0c734ad793d5f5bf230a9e9a4dcc48df8aa Mon Sep 17 00:00:00 2001
+From: Julian Anastasov <ja@ssi.bg>
+Date: Fri, 22 Aug 2014 17:53:41 +0300
+Subject: ipvs: fix ipv6 hook registration for local replies
+
+From: Julian Anastasov <ja@ssi.bg>
+
+commit eb90b0c734ad793d5f5bf230a9e9a4dcc48df8aa upstream.
+
+commit fc604767613b6d2036cdc35b660bc39451040a47
+("ipvs: changes for local real server") from 2.6.37
+introduced DNAT support to local real server but the
+IPv6 LOCAL_OUT handler ip_vs_local_reply6() is
+registered incorrectly as IPv4 hook causing any outgoing
+IPv4 traffic to be dropped depending on the IP header values.
+
+Chris tracked down the problem to CONFIG_IP_VS_IPV6=y
+Bug report: https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1349768
+
+Reported-by: Chris J Arges <chris.j.arges@canonical.com>
+Tested-by: Chris J Arges <chris.j.arges@canonical.com>
+Signed-off-by: Julian Anastasov <ja@ssi.bg>
+Signed-off-by: Simon Horman <horms@verge.net.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/netfilter/ipvs/ip_vs_core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/netfilter/ipvs/ip_vs_core.c
++++ b/net/netfilter/ipvs/ip_vs_core.c
+@@ -1906,7 +1906,7 @@ static struct nf_hook_ops ip_vs_ops[] __
+ {
+ .hook = ip_vs_local_reply6,
+ .owner = THIS_MODULE,
+- .pf = NFPROTO_IPV4,
++ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_LOCAL_OUT,
+ .priority = NF_IP6_PRI_NAT_DST + 1,
+ },
--- /dev/null
+From 76f084bc10004b3050b2cff9cfac29148f1f6088 Mon Sep 17 00:00:00 2001
+From: Alex Gartrell <agartrell@fb.com>
+Date: Wed, 16 Jul 2014 15:57:34 -0700
+Subject: ipvs: Maintain all DSCP and ECN bits for ipv6 tun forwarding
+
+From: Alex Gartrell <agartrell@fb.com>
+
+commit 76f084bc10004b3050b2cff9cfac29148f1f6088 upstream.
+
+Previously, only the four high bits of the tclass were maintained in the
+ipv6 case. This matches the behavior of ipv4, though whether or not we
+should reflect ECN bits may be up for debate.
+
+Signed-off-by: Alex Gartrell <agartrell@fb.com>
+Acked-by: Julian Anastasov <ja@ssi.bg>
+Signed-off-by: Simon Horman <horms@verge.net.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/netfilter/ipvs/ip_vs_xmit.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/netfilter/ipvs/ip_vs_xmit.c
++++ b/net/netfilter/ipvs/ip_vs_xmit.c
+@@ -967,8 +967,8 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb
+ iph->nexthdr = IPPROTO_IPV6;
+ iph->payload_len = old_iph->payload_len;
+ be16_add_cpu(&iph->payload_len, sizeof(*old_iph));
+- iph->priority = old_iph->priority;
+ memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl));
++ ipv6_change_dsfield(iph, 0, ipv6_get_dsfield(old_iph));
+ iph->daddr = cp->daddr.in6;
+ iph->saddr = saddr;
+ iph->hop_limit = old_iph->hop_limit;
--- /dev/null
+From c6d119cf1b5a778e9ed60a006e2a434fcc4471a2 Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.de>
+Date: Tue, 9 Sep 2014 13:49:46 +1000
+Subject: md/raid1: be more cautious where we read-balance during resync.
+
+From: NeilBrown <neilb@suse.de>
+
+commit c6d119cf1b5a778e9ed60a006e2a434fcc4471a2 upstream.
+
+commit 79ef3a8aa1cb1523cc231c9a90a278333c21f761 made
+it possible for reads to happen concurrently with resync.
+This means that we need to be more careful where read_balancing
+is allowed during resync - we can no longer be sure that any
+resync that has already started will definitely finish.
+
+So keep read_balancing to before recovery_cp, which is conservative
+but safe.
+
+This bug makes it possible to read from a device that doesn't
+have up-to-date data, so it can cause data corruption.
+So it is suitable for any kernel since 3.11.
+
+Fixes: 79ef3a8aa1cb1523cc231c9a90a278333c21f761
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/raid1.c | 6 +-----
+ 1 file changed, 1 insertion(+), 5 deletions(-)
+
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -540,11 +540,7 @@ static int read_balance(struct r1conf *c
+ has_nonrot_disk = 0;
+ choose_next_idle = 0;
+
+- if (conf->mddev->recovery_cp < MaxSector &&
+- (this_sector + sectors >= conf->next_resync))
+- choose_first = 1;
+- else
+- choose_first = 0;
++ choose_first = (conf->mddev->recovery_cp < this_sector + sectors);
+
+ for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
+ sector_t dist;
--- /dev/null
+From 669cc7ba77864e7b1ac39c9f2b2afb8730f341f4 Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.de>
+Date: Thu, 4 Sep 2014 16:30:38 +1000
+Subject: md/raid1: clean up request counts properly in close_sync()
+
+From: NeilBrown <neilb@suse.de>
+
+commit 669cc7ba77864e7b1ac39c9f2b2afb8730f341f4 upstream.
+
+If there are outstanding writes when close_sync is called,
+the change to ->start_next_window might cause them to
+decrement the wrong counter when they complete. Fix this
+by merging the two counters into the one that will be decremented.
+
+Having an incorrect value in a counter can cause raise_barrier()
+to hangs, so this is suitable for -stable.
+
+Fixes: 79ef3a8aa1cb1523cc231c9a90a278333c21f761
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/raid1.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -1548,8 +1548,13 @@ static void close_sync(struct r1conf *co
+ mempool_destroy(conf->r1buf_pool);
+ conf->r1buf_pool = NULL;
+
++ spin_lock_irq(&conf->resync_lock);
+ conf->next_resync = 0;
+ conf->start_next_window = MaxSector;
++ conf->current_window_requests +=
++ conf->next_window_requests;
++ conf->next_window_requests = 0;
++ spin_unlock_irq(&conf->resync_lock);
+ }
+
+ static int raid1_spare_active(struct mddev *mddev)
--- /dev/null
+From 34e97f170149bfa14979581c4c748bc9b4b79d5b Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.de>
+Date: Tue, 16 Sep 2014 12:14:14 +1000
+Subject: md/raid1: count resync requests in nr_pending.
+
+From: NeilBrown <neilb@suse.de>
+
+commit 34e97f170149bfa14979581c4c748bc9b4b79d5b upstream.
+
+Both normal IO and resync IO can be retried with reschedule_retry()
+and so be counted into ->nr_queued, but only normal IO gets counted in
+->nr_pending.
+
+Before the recent improvement to RAID1 resync there could only
+possibly have been one or the other on the queue. When handling a
+read failure it could only be normal IO. So when handle_read_error()
+called freeze_array() the fact that freeze_array only compares
+->nr_queued against ->nr_pending was safe.
+
+But now that these two types can interleave, we can have both normal
+and resync IO requests queued, so we need to count them both in
+nr_pending.
+
+This error can lead to freeze_array() hanging if there is a read
+error, so it is suitable for -stable.
+
+Fixes: 79ef3a8aa1cb1523cc231c9a90a278333c21f761
+Reported-by: Brassow Jonathan <jbrassow@redhat.com>
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/raid1.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -856,6 +856,7 @@ static void raise_barrier(struct r1conf
+ conf->next_resync + RESYNC_SECTORS),
+ conf->resync_lock);
+
++ conf->nr_pending++;
+ spin_unlock_irq(&conf->resync_lock);
+ }
+
+@@ -865,6 +866,7 @@ static void lower_barrier(struct r1conf
+ BUG_ON(conf->barrier <= 0);
+ spin_lock_irqsave(&conf->resync_lock, flags);
+ conf->barrier--;
++ conf->nr_pending--;
+ spin_unlock_irqrestore(&conf->resync_lock, flags);
+ wake_up(&conf->wait_barrier);
+ }
--- /dev/null
+From 235549605eb7f1c5a37cef8b09d12e6d412c5cd6 Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.de>
+Date: Wed, 10 Sep 2014 15:56:57 +1000
+Subject: md/raid1: Don't use next_resync to determine how far resync has progressed
+
+From: NeilBrown <neilb@suse.de>
+
+commit 235549605eb7f1c5a37cef8b09d12e6d412c5cd6 upstream.
+
+next_resync is (approximately) the location for the next resync request.
+However it does *not* reliably determine the earliest location
+at which resync might be happening.
+This is because resync requests can complete out of order, and
+we only limit the number of current requests, not the distance
+from the earliest pending request to the latest.
+
+mddev->curr_resync_completed is a reliable indicator of the earliest
+position at which resync could be happening. It is updated less
+frequently, but is actually reliable which is more important.
+
+So use it to determine if a write request is before the region
+being resynced and so safe from conflict.
+
+This error can allow resync IO to interfere with normal IO which
+could lead to data corruption. Hence: stable.
+
+Fixes: 79ef3a8aa1cb1523cc231c9a90a278333c21f761
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/raid1.c | 12 +++++-------
+ 1 file changed, 5 insertions(+), 7 deletions(-)
+
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -875,12 +875,10 @@ static bool need_to_wait_for_sync(struct
+ if (conf->array_frozen || !bio)
+ wait = true;
+ else if (conf->barrier && bio_data_dir(bio) == WRITE) {
+- if (conf->next_resync < RESYNC_WINDOW_SECTORS)
+- wait = true;
+- else if ((conf->next_resync - RESYNC_WINDOW_SECTORS
+- >= bio_end_sector(bio)) ||
+- (conf->next_resync + NEXT_NORMALIO_DISTANCE
+- <= bio->bi_iter.bi_sector))
++ if ((conf->mddev->curr_resync_completed
++ >= bio_end_sector(bio)) ||
++ (conf->next_resync + NEXT_NORMALIO_DISTANCE
++ <= bio->bi_iter.bi_sector))
+ wait = false;
+ else
+ wait = true;
+@@ -918,7 +916,7 @@ static sector_t wait_barrier(struct r1co
+
+ if (bio && bio_data_dir(bio) == WRITE) {
+ if (bio->bi_iter.bi_sector >=
+- conf->next_resync) {
++ conf->mddev->curr_resync_completed) {
+ if (conf->start_next_window == MaxSector)
+ conf->start_next_window =
+ conf->next_resync +
--- /dev/null
+From b8cb6b4c121e1bf1963c16ed69e7adcb1bc301cd Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.de>
+Date: Thu, 18 Sep 2014 11:09:04 +1000
+Subject: md/raid1: fix_read_error should act on all non-faulty devices.
+
+From: NeilBrown <neilb@suse.de>
+
+commit b8cb6b4c121e1bf1963c16ed69e7adcb1bc301cd upstream.
+
+If a devices is being recovered it is not InSync and is not Faulty.
+
+If a read error is experienced on that device, fix_read_error()
+will be called, but it ignores non-InSync devices. So it will
+neither fix the error nor fail the device.
+
+It is incorrect that fix_read_error() ignores non-InSync devices.
+It should only ignore Faulty devices. So fix it.
+
+This became a bug when we allowed reading from a device that was being
+recovered. It is suitable for any subsequent -stable kernel.
+
+Fixes: da8840a747c0dbf49506ec906757a6b87b9741e9
+Reported-by: Alexander Lyakas <alex.bolshoy@gmail.com>
+Tested-by: Alexander Lyakas <alex.bolshoy@gmail.com>
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/raid1.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -2154,7 +2154,7 @@ static void fix_read_error(struct r1conf
+ d--;
+ rdev = conf->mirrors[d].rdev;
+ if (rdev &&
+- test_bit(In_sync, &rdev->flags))
++ !test_bit(Faulty, &rdev->flags))
+ r1_sync_page_io(rdev, sect, s,
+ conf->tmppage, WRITE);
+ }
+@@ -2166,7 +2166,7 @@ static void fix_read_error(struct r1conf
+ d--;
+ rdev = conf->mirrors[d].rdev;
+ if (rdev &&
+- test_bit(In_sync, &rdev->flags)) {
++ !test_bit(Faulty, &rdev->flags)) {
+ if (r1_sync_page_io(rdev, sect, s,
+ conf->tmppage, READ)) {
+ atomic_add(s, &rdev->corrected_errors);
--- /dev/null
+From f0cc9a057151892b885be21a1d19b0185568281d Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.de>
+Date: Mon, 22 Sep 2014 10:06:23 +1000
+Subject: md/raid1: intialise start_next_window for READ case to avoid hang
+
+From: NeilBrown <neilb@suse.de>
+
+commit f0cc9a057151892b885be21a1d19b0185568281d upstream.
+
+r1_bio->start_next_window is not initialised in the READ
+case, so allow_barrier may incorrectly decrement
+ conf->current_window_requests
+which can cause raise_barrier() to block forever.
+
+Fixes: 79ef3a8aa1cb1523cc231c9a90a278333c21f761
+Reported-by: Brassow Jonathan <jbrassow@redhat.com>
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/raid1.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -1185,6 +1185,7 @@ read_again:
+ atomic_read(&bitmap->behind_writes) == 0);
+ }
+ r1_bio->read_disk = rdisk;
++ r1_bio->start_next_window = 0;
+
+ read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
+ bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
--- /dev/null
+From 2f73d3c55d09ce60647b96ad2a9b539c95a530ee Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.de>
+Date: Wed, 10 Sep 2014 15:01:49 +1000
+Subject: md/raid1: make sure resync waits for conflicting writes to complete.
+
+From: NeilBrown <neilb@suse.de>
+
+commit 2f73d3c55d09ce60647b96ad2a9b539c95a530ee upstream.
+
+The resync/recovery process for raid1 was recently changed
+so that writes could happen in parallel with resync providing
+they were in different regions of the device.
+
+There is a problem though: While a write request will always
+wait for conflicting resync to complete, a resync request
+will *not* always wait for conflicting writes to complete.
+
+Two changes are needed to fix this:
+
+1/ raise_barrier (which waits until it is safe to do resync)
+ must wait until current_window_requests is zero
+2/ wait_battier (which waits at the start of a new write request)
+ must update current_window_requests if the request could
+ possible conflict with a concurrent resync.
+
+As concurrent writes and resync can lead to data loss,
+this patch is suitable for -stable.
+
+Fixes: 79ef3a8aa1cb1523cc231c9a90a278333c21f761
+Cc: majianpeng <majianpeng@gmail.com>
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/raid1.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -845,10 +845,12 @@ static void raise_barrier(struct r1conf
+ * C: next_resync + RESYNC_SECTORS > start_next_window, meaning
+ * next resync will reach to the window which normal bios are
+ * handling.
++ * D: while there are any active requests in the current window.
+ */
+ wait_event_lock_irq(conf->wait_barrier,
+ !conf->array_frozen &&
+ conf->barrier < RESYNC_DEPTH &&
++ conf->current_window_requests == 0 &&
+ (conf->start_next_window >=
+ conf->next_resync + RESYNC_SECTORS),
+ conf->resync_lock);
+@@ -915,8 +917,8 @@ static sector_t wait_barrier(struct r1co
+ }
+
+ if (bio && bio_data_dir(bio) == WRITE) {
+- if (conf->next_resync + NEXT_NORMALIO_DISTANCE
+- <= bio->bi_iter.bi_sector) {
++ if (bio->bi_iter.bi_sector >=
++ conf->next_resync) {
+ if (conf->start_next_window == MaxSector)
+ conf->start_next_window =
+ conf->next_resync +
--- /dev/null
+From c2fd4c94deedb89ac1746c4a53219be499372c06 Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.de>
+Date: Wed, 10 Sep 2014 16:01:24 +1000
+Subject: md/raid1: update next_resync under resync_lock.
+
+From: NeilBrown <neilb@suse.de>
+
+commit c2fd4c94deedb89ac1746c4a53219be499372c06 upstream.
+
+raise_barrier() uses next_resync as part of its calculations, so it
+really should be updated first, instead of afterwards.
+
+next_resync is always used under resync_lock so update it under
+resync lock to, just before it is used. That is safest.
+
+This could cause normal IO and resync IO to interact badly so
+it suitable for -stable.
+
+Fixes: 79ef3a8aa1cb1523cc231c9a90a278333c21f761
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/raid1.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -827,7 +827,7 @@ static void flush_pending_writes(struct
+ * there is no normal IO happeing. It must arrange to call
+ * lower_barrier when the particular background IO completes.
+ */
+-static void raise_barrier(struct r1conf *conf)
++static void raise_barrier(struct r1conf *conf, sector_t sector_nr)
+ {
+ spin_lock_irq(&conf->resync_lock);
+
+@@ -837,6 +837,7 @@ static void raise_barrier(struct r1conf
+
+ /* block any new IO from starting */
+ conf->barrier++;
++ conf->next_resync = sector_nr;
+
+ /* For these conditions we must wait:
+ * A: while the array is in frozen state
+@@ -2542,9 +2543,8 @@ static sector_t sync_request(struct mdde
+
+ bitmap_cond_end_sync(mddev->bitmap, sector_nr);
+ r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
+- raise_barrier(conf);
+
+- conf->next_resync = sector_nr;
++ raise_barrier(conf, sector_nr);
+
+ rcu_read_lock();
+ /*
--- /dev/null
+From caa8ad94edf686d02b555c65a6162c0d1b434958 Mon Sep 17 00:00:00 2001
+From: Daniel Borkmann <dborkman@redhat.com>
+Date: Mon, 18 Aug 2014 15:46:28 +0200
+Subject: netfilter: x_tables: allow to use default cgroup match
+
+From: Daniel Borkmann <dborkman@redhat.com>
+
+commit caa8ad94edf686d02b555c65a6162c0d1b434958 upstream.
+
+There's actually no good reason why we cannot use cgroup id 0,
+so lets just remove this artificial barrier.
+
+Reported-by: Alexey Perevalov <a.perevalov@samsung.com>
+Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
+Tested-by: Alexey Perevalov <a.perevalov@samsung.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/netfilter/xt_cgroup.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/netfilter/xt_cgroup.c
++++ b/net/netfilter/xt_cgroup.c
+@@ -31,7 +31,7 @@ static int cgroup_mt_check(const struct
+ if (info->invert & ~1)
+ return -EINVAL;
+
+- return info->id ? 0 : -EINVAL;
++ return 0;
+ }
+
+ static bool
--- /dev/null
+From 7bd8490eef9776ced7632345df5133384b6be0fe Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 24 Jul 2014 06:36:50 +0200
+Subject: netfilter: xt_hashlimit: perform garbage collection from process context
+
+From: Eric Dumazet <edumazet@google.com>
+
+commit 7bd8490eef9776ced7632345df5133384b6be0fe upstream.
+
+xt_hashlimit cannot be used with large hash tables, because garbage
+collector is run from a timer. If table is really big, its possible
+to hold cpu for more than 500 msec, which is unacceptable.
+
+Switch to a work queue, and use proper scheduling points to remove
+latencies spikes.
+
+Later, we also could switch to a smoother garbage collection done
+at lookup time, one bucket at a time...
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Florian Westphal <fw@strlen.de>
+Cc: Patrick McHardy <kaber@trash.net>
+Reviewed-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/netfilter/xt_hashlimit.c | 31 ++++++++++++++++---------------
+ 1 file changed, 16 insertions(+), 15 deletions(-)
+
+--- a/net/netfilter/xt_hashlimit.c
++++ b/net/netfilter/xt_hashlimit.c
+@@ -104,7 +104,7 @@ struct xt_hashlimit_htable {
+ spinlock_t lock; /* lock for list_head */
+ u_int32_t rnd; /* random seed for hash */
+ unsigned int count; /* number entries in table */
+- struct timer_list timer; /* timer for gc */
++ struct delayed_work gc_work;
+
+ /* seq_file stuff */
+ struct proc_dir_entry *pde;
+@@ -213,7 +213,7 @@ dsthash_free(struct xt_hashlimit_htable
+ call_rcu_bh(&ent->rcu, dsthash_free_rcu);
+ ht->count--;
+ }
+-static void htable_gc(unsigned long htlong);
++static void htable_gc(struct work_struct *work);
+
+ static int htable_create(struct net *net, struct xt_hashlimit_mtinfo1 *minfo,
+ u_int8_t family)
+@@ -273,9 +273,9 @@ static int htable_create(struct net *net
+ }
+ hinfo->net = net;
+
+- setup_timer(&hinfo->timer, htable_gc, (unsigned long)hinfo);
+- hinfo->timer.expires = jiffies + msecs_to_jiffies(hinfo->cfg.gc_interval);
+- add_timer(&hinfo->timer);
++ INIT_DEFERRABLE_WORK(&hinfo->gc_work, htable_gc);
++ queue_delayed_work(system_power_efficient_wq, &hinfo->gc_work,
++ msecs_to_jiffies(hinfo->cfg.gc_interval));
+
+ hlist_add_head(&hinfo->node, &hashlimit_net->htables);
+
+@@ -300,29 +300,30 @@ static void htable_selective_cleanup(str
+ {
+ unsigned int i;
+
+- /* lock hash table and iterate over it */
+- spin_lock_bh(&ht->lock);
+ for (i = 0; i < ht->cfg.size; i++) {
+ struct dsthash_ent *dh;
+ struct hlist_node *n;
++
++ spin_lock_bh(&ht->lock);
+ hlist_for_each_entry_safe(dh, n, &ht->hash[i], node) {
+ if ((*select)(ht, dh))
+ dsthash_free(ht, dh);
+ }
++ spin_unlock_bh(&ht->lock);
++ cond_resched();
+ }
+- spin_unlock_bh(&ht->lock);
+ }
+
+-/* hash table garbage collector, run by timer */
+-static void htable_gc(unsigned long htlong)
++static void htable_gc(struct work_struct *work)
+ {
+- struct xt_hashlimit_htable *ht = (struct xt_hashlimit_htable *)htlong;
++ struct xt_hashlimit_htable *ht;
++
++ ht = container_of(work, struct xt_hashlimit_htable, gc_work.work);
+
+ htable_selective_cleanup(ht, select_gc);
+
+- /* re-add the timer accordingly */
+- ht->timer.expires = jiffies + msecs_to_jiffies(ht->cfg.gc_interval);
+- add_timer(&ht->timer);
++ queue_delayed_work(system_power_efficient_wq,
++ &ht->gc_work, msecs_to_jiffies(ht->cfg.gc_interval));
+ }
+
+ static void htable_remove_proc_entry(struct xt_hashlimit_htable *hinfo)
+@@ -341,7 +342,7 @@ static void htable_remove_proc_entry(str
+
+ static void htable_destroy(struct xt_hashlimit_htable *hinfo)
+ {
+- del_timer_sync(&hinfo->timer);
++ cancel_delayed_work_sync(&hinfo->gc_work);
+ htable_remove_proc_entry(hinfo);
+ htable_selective_cleanup(hinfo, select_all);
+ kfree(hinfo->name);
--- /dev/null
+From d97a86c170b4e432f76db072a827fe30b4d6f659 Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Tue, 5 Aug 2014 11:09:59 +0300
+Subject: partitions: aix.c: off by one bug
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit d97a86c170b4e432f76db072a827fe30b4d6f659 upstream.
+
+The lvip[] array has "state->limit" elements so the condition here
+should be >= instead of >.
+
+Fixes: 6ceea22bbbc8 ('partitions: add aix lvm partition support files')
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Acked-by: Philippe De Muyter <phdm@macqel.be>
+Signed-off-by: Jens Axboe <axboe@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/partitions/aix.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/block/partitions/aix.c
++++ b/block/partitions/aix.c
+@@ -253,7 +253,7 @@ int aix_partition(struct parsed_partitio
+ continue;
+ }
+ lv_ix = be16_to_cpu(p->lv_ix) - 1;
+- if (lv_ix > state->limit) {
++ if (lv_ix >= state->limit) {
+ cur_lv_ix = -1;
+ continue;
+ }
--- /dev/null
+From 27ddcc6596e50cb8f03d2e83248897667811d8f6 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Date: Mon, 26 May 2014 13:40:47 +0200
+Subject: PM / sleep: Add state field to pm_states[] entries
+
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+
+commit 27ddcc6596e50cb8f03d2e83248897667811d8f6 upstream.
+
+To allow sleep states corresponding to the "mem", "standby" and
+"freeze" lables to be different from the pm_states[] indexes of
+those strings, introduce struct pm_sleep_state, consisting of
+a string label and a state number, and turn pm_states[] into an
+array of objects of that type.
+
+This modification should not lead to any functional changes.
+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Cc: Brian Norris <computersforpeace@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/power/main.c | 16 ++++++++--------
+ kernel/power/power.h | 7 ++++++-
+ kernel/power/suspend.c | 12 ++++++------
+ kernel/power/suspend_test.c | 22 ++++++++++------------
+ 4 files changed, 30 insertions(+), 27 deletions(-)
+
+--- a/kernel/power/main.c
++++ b/kernel/power/main.c
+@@ -293,12 +293,12 @@ static ssize_t state_show(struct kobject
+ {
+ char *s = buf;
+ #ifdef CONFIG_SUSPEND
+- int i;
++ suspend_state_t i;
++
++ for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++)
++ if (valid_state(i))
++ s += sprintf(s,"%s ", pm_states[i].label);
+
+- for (i = 0; i < PM_SUSPEND_MAX; i++) {
+- if (pm_states[i] && valid_state(i))
+- s += sprintf(s,"%s ", pm_states[i]);
+- }
+ #endif
+ #ifdef CONFIG_HIBERNATION
+ s += sprintf(s, "%s\n", "disk");
+@@ -314,7 +314,7 @@ static suspend_state_t decode_state(cons
+ {
+ #ifdef CONFIG_SUSPEND
+ suspend_state_t state = PM_SUSPEND_MIN;
+- const char * const *s;
++ struct pm_sleep_state *s;
+ #endif
+ char *p;
+ int len;
+@@ -328,7 +328,7 @@ static suspend_state_t decode_state(cons
+
+ #ifdef CONFIG_SUSPEND
+ for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++)
+- if (*s && len == strlen(*s) && !strncmp(buf, *s, len))
++ if (len == strlen(s->label) && !strncmp(buf, s->label, len))
+ return state;
+ #endif
+
+@@ -448,7 +448,7 @@ static ssize_t autosleep_show(struct kob
+ #ifdef CONFIG_SUSPEND
+ if (state < PM_SUSPEND_MAX)
+ return sprintf(buf, "%s\n", valid_state(state) ?
+- pm_states[state] : "error");
++ pm_states[state].label : "error");
+ #endif
+ #ifdef CONFIG_HIBERNATION
+ return sprintf(buf, "disk\n");
+--- a/kernel/power/power.h
++++ b/kernel/power/power.h
+@@ -175,8 +175,13 @@ extern void swsusp_show_speed(struct tim
+ unsigned int, char *);
+
+ #ifdef CONFIG_SUSPEND
++struct pm_sleep_state {
++ const char *label;
++ suspend_state_t state;
++};
++
+ /* kernel/power/suspend.c */
+-extern const char *const pm_states[];
++extern struct pm_sleep_state pm_states[];
+
+ extern bool valid_state(suspend_state_t state);
+ extern int suspend_devices_and_enter(suspend_state_t state);
+--- a/kernel/power/suspend.c
++++ b/kernel/power/suspend.c
+@@ -29,10 +29,10 @@
+
+ #include "power.h"
+
+-const char *const pm_states[PM_SUSPEND_MAX] = {
+- [PM_SUSPEND_FREEZE] = "freeze",
+- [PM_SUSPEND_STANDBY] = "standby",
+- [PM_SUSPEND_MEM] = "mem",
++struct pm_sleep_state pm_states[PM_SUSPEND_MAX] = {
++ [PM_SUSPEND_FREEZE] = { "freeze", PM_SUSPEND_FREEZE },
++ [PM_SUSPEND_STANDBY] = { "standby", PM_SUSPEND_STANDBY },
++ [PM_SUSPEND_MEM] = { "mem", PM_SUSPEND_MEM },
+ };
+
+ static const struct platform_suspend_ops *suspend_ops;
+@@ -337,7 +337,7 @@ static int enter_state(suspend_state_t s
+ sys_sync();
+ printk("done.\n");
+
+- pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
++ pr_debug("PM: Preparing system for %s sleep\n", pm_states[state].label);
+ error = suspend_prepare(state);
+ if (error)
+ goto Unlock;
+@@ -345,7 +345,7 @@ static int enter_state(suspend_state_t s
+ if (suspend_test(TEST_FREEZER))
+ goto Finish;
+
+- pr_debug("PM: Entering %s sleep\n", pm_states[state]);
++ pr_debug("PM: Entering %s sleep\n", pm_states[state].label);
+ pm_restrict_gfp_mask();
+ error = suspend_devices_and_enter(state);
+ pm_restore_gfp_mask();
+--- a/kernel/power/suspend_test.c
++++ b/kernel/power/suspend_test.c
+@@ -92,13 +92,13 @@ static void __init test_wakealarm(struct
+ }
+
+ if (state == PM_SUSPEND_MEM) {
+- printk(info_test, pm_states[state]);
++ printk(info_test, pm_states[state].label);
+ status = pm_suspend(state);
+ if (status == -ENODEV)
+ state = PM_SUSPEND_STANDBY;
+ }
+ if (state == PM_SUSPEND_STANDBY) {
+- printk(info_test, pm_states[state]);
++ printk(info_test, pm_states[state].label);
+ status = pm_suspend(state);
+ }
+ if (status < 0)
+@@ -136,18 +136,16 @@ static char warn_bad_state[] __initdata
+
+ static int __init setup_test_suspend(char *value)
+ {
+- unsigned i;
++ suspend_state_t i;
+
+ /* "=mem" ==> "mem" */
+ value++;
+- for (i = 0; i < PM_SUSPEND_MAX; i++) {
+- if (!pm_states[i])
+- continue;
+- if (strcmp(pm_states[i], value) != 0)
+- continue;
+- test_state = (__force suspend_state_t) i;
+- return 0;
+- }
++ for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++)
++ if (!strcmp(pm_states[i].label, value)) {
++ test_state = pm_states[i].state;
++ return 0;
++ }
++
+ printk(warn_bad_state, value);
+ return 0;
+ }
+@@ -165,7 +163,7 @@ static int __init test_suspend(void)
+ if (test_state == PM_SUSPEND_ON)
+ goto done;
+ if (!valid_state(test_state)) {
+- printk(warn_bad_state, pm_states[test_state]);
++ printk(warn_bad_state, pm_states[test_state].label);
+ goto done;
+ }
+
--- /dev/null
+From 43e8317b0bba1d6eb85f38a4a233d82d7c20d732 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Date: Mon, 26 May 2014 13:40:53 +0200
+Subject: PM / sleep: Use valid_state() for platform-dependent sleep states only
+
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+
+commit 43e8317b0bba1d6eb85f38a4a233d82d7c20d732 upstream.
+
+Use the observation that, for platform-dependent sleep states
+(PM_SUSPEND_STANDBY, PM_SUSPEND_MEM), a given state is either
+always supported or always unsupported and store that information
+in pm_states[] instead of calling valid_state() every time we
+need to check it.
+
+Also do not use valid_state() for PM_SUSPEND_FREEZE, which is always
+valid, and move the pm_test_level validity check for PM_SUSPEND_FREEZE
+directly into enter_state().
+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Cc: Brian Norris <computersforpeace@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/power/main.c | 9 +++---
+ kernel/power/power.h | 2 -
+ kernel/power/suspend.c | 60 ++++++++++++++++++++++----------------------
+ kernel/power/suspend_test.c | 2 -
+ 4 files changed, 36 insertions(+), 37 deletions(-)
+
+--- a/kernel/power/main.c
++++ b/kernel/power/main.c
+@@ -296,7 +296,7 @@ static ssize_t state_show(struct kobject
+ suspend_state_t i;
+
+ for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++)
+- if (valid_state(i))
++ if (pm_states[i].state)
+ s += sprintf(s,"%s ", pm_states[i].label);
+
+ #endif
+@@ -328,8 +328,9 @@ static suspend_state_t decode_state(cons
+
+ #ifdef CONFIG_SUSPEND
+ for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++)
+- if (len == strlen(s->label) && !strncmp(buf, s->label, len))
+- return state;
++ if (s->state && len == strlen(s->label)
++ && !strncmp(buf, s->label, len))
++ return s->state;
+ #endif
+
+ return PM_SUSPEND_ON;
+@@ -447,7 +448,7 @@ static ssize_t autosleep_show(struct kob
+
+ #ifdef CONFIG_SUSPEND
+ if (state < PM_SUSPEND_MAX)
+- return sprintf(buf, "%s\n", valid_state(state) ?
++ return sprintf(buf, "%s\n", pm_states[state].state ?
+ pm_states[state].label : "error");
+ #endif
+ #ifdef CONFIG_HIBERNATION
+--- a/kernel/power/power.h
++++ b/kernel/power/power.h
+@@ -183,14 +183,12 @@ struct pm_sleep_state {
+ /* kernel/power/suspend.c */
+ extern struct pm_sleep_state pm_states[];
+
+-extern bool valid_state(suspend_state_t state);
+ extern int suspend_devices_and_enter(suspend_state_t state);
+ #else /* !CONFIG_SUSPEND */
+ static inline int suspend_devices_and_enter(suspend_state_t state)
+ {
+ return -ENOSYS;
+ }
+-static inline bool valid_state(suspend_state_t state) { return false; }
+ #endif /* !CONFIG_SUSPEND */
+
+ #ifdef CONFIG_PM_TEST_SUSPEND
+--- a/kernel/power/suspend.c
++++ b/kernel/power/suspend.c
+@@ -30,9 +30,9 @@
+ #include "power.h"
+
+ struct pm_sleep_state pm_states[PM_SUSPEND_MAX] = {
+- [PM_SUSPEND_FREEZE] = { "freeze", PM_SUSPEND_FREEZE },
+- [PM_SUSPEND_STANDBY] = { "standby", PM_SUSPEND_STANDBY },
+- [PM_SUSPEND_MEM] = { "mem", PM_SUSPEND_MEM },
++ [PM_SUSPEND_FREEZE] = { .label = "freeze", .state = PM_SUSPEND_FREEZE },
++ [PM_SUSPEND_STANDBY] = { .label = "standby", },
++ [PM_SUSPEND_MEM] = { .label = "mem", },
+ };
+
+ static const struct platform_suspend_ops *suspend_ops;
+@@ -62,42 +62,34 @@ void freeze_wake(void)
+ }
+ EXPORT_SYMBOL_GPL(freeze_wake);
+
++static bool valid_state(suspend_state_t state)
++{
++ /*
++ * PM_SUSPEND_STANDBY and PM_SUSPEND_MEM states need low level
++ * support and need to be valid to the low level
++ * implementation, no valid callback implies that none are valid.
++ */
++ return suspend_ops && suspend_ops->valid && suspend_ops->valid(state);
++}
++
+ /**
+ * suspend_set_ops - Set the global suspend method table.
+ * @ops: Suspend operations to use.
+ */
+ void suspend_set_ops(const struct platform_suspend_ops *ops)
+ {
++ suspend_state_t i;
++
+ lock_system_sleep();
++
+ suspend_ops = ops;
++ for (i = PM_SUSPEND_STANDBY; i <= PM_SUSPEND_MEM; i++)
++ pm_states[i].state = valid_state(i) ? i : 0;
++
+ unlock_system_sleep();
+ }
+ EXPORT_SYMBOL_GPL(suspend_set_ops);
+
+-bool valid_state(suspend_state_t state)
+-{
+- if (state == PM_SUSPEND_FREEZE) {
+-#ifdef CONFIG_PM_DEBUG
+- if (pm_test_level != TEST_NONE &&
+- pm_test_level != TEST_FREEZER &&
+- pm_test_level != TEST_DEVICES &&
+- pm_test_level != TEST_PLATFORM) {
+- printk(KERN_WARNING "Unsupported pm_test mode for "
+- "freeze state, please choose "
+- "none/freezer/devices/platform.\n");
+- return false;
+- }
+-#endif
+- return true;
+- }
+- /*
+- * PM_SUSPEND_STANDBY and PM_SUSPEND_MEMORY states need lowlevel
+- * support and need to be valid to the lowlevel
+- * implementation, no valid callback implies that none are valid.
+- */
+- return suspend_ops && suspend_ops->valid && suspend_ops->valid(state);
+-}
+-
+ /**
+ * suspend_valid_only_mem - Generic memory-only valid callback.
+ *
+@@ -324,9 +316,17 @@ static int enter_state(suspend_state_t s
+ {
+ int error;
+
+- if (!valid_state(state))
+- return -ENODEV;
+-
++ if (state == PM_SUSPEND_FREEZE) {
++#ifdef CONFIG_PM_DEBUG
++ if (pm_test_level != TEST_NONE && pm_test_level <= TEST_CPUS) {
++ pr_warning("PM: Unsupported test mode for freeze state,"
++ "please choose none/freezer/devices/platform.\n");
++ return -EAGAIN;
++ }
++#endif
++ } else if (!valid_state(state)) {
++ return -EINVAL;
++ }
+ if (!mutex_trylock(&pm_mutex))
+ return -EBUSY;
+
+--- a/kernel/power/suspend_test.c
++++ b/kernel/power/suspend_test.c
+@@ -162,7 +162,7 @@ static int __init test_suspend(void)
+ /* PM is initialized by now; is that state testable? */
+ if (test_state == PM_SUSPEND_ON)
+ goto done;
+- if (!valid_state(test_state)) {
++ if (!pm_states[test_state].state) {
+ printk(warn_bad_state, pm_states[test_state].label);
+ goto done;
+ }
--- /dev/null
+From d4089a332883ad969700aac5dd4dd5f1c4fee825 Mon Sep 17 00:00:00 2001
+From: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Date: Mon, 28 Apr 2014 15:59:56 +0300
+Subject: serial: 8250_dma: check the result of TX buffer mapping
+
+From: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+
+commit d4089a332883ad969700aac5dd4dd5f1c4fee825 upstream.
+
+Using dma_mapping_error() to make sure the mapping did not
+fail.
+
+Signed-off-by: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Cc: "Petallo, MauriceX R" <mauricex.r.petallo@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/tty/serial/8250/8250_dma.c | 17 ++++++++++++-----
+ 1 file changed, 12 insertions(+), 5 deletions(-)
+
+--- a/drivers/tty/serial/8250/8250_dma.c
++++ b/drivers/tty/serial/8250/8250_dma.c
+@@ -192,21 +192,28 @@ int serial8250_request_dma(struct uart_8
+
+ dma->rx_buf = dma_alloc_coherent(dma->rxchan->device->dev, dma->rx_size,
+ &dma->rx_addr, GFP_KERNEL);
+- if (!dma->rx_buf) {
+- dma_release_channel(dma->rxchan);
+- dma_release_channel(dma->txchan);
+- return -ENOMEM;
+- }
++ if (!dma->rx_buf)
++ goto err;
+
+ /* TX buffer */
+ dma->tx_addr = dma_map_single(dma->txchan->device->dev,
+ p->port.state->xmit.buf,
+ UART_XMIT_SIZE,
+ DMA_TO_DEVICE);
++ if (dma_mapping_error(dma->txchan->device->dev, dma->tx_addr)) {
++ dma_free_coherent(dma->rxchan->device->dev, dma->rx_size,
++ dma->rx_buf, dma->rx_addr);
++ goto err;
++ }
+
+ dev_dbg_ratelimited(p->port.dev, "got both dma channels\n");
+
+ return 0;
++err:
++ dma_release_channel(dma->rxchan);
++ dma_release_channel(dma->txchan);
++
++ return -ENOMEM;
+ }
+ EXPORT_SYMBOL_GPL(serial8250_request_dma);
+
lockd-fix-rpcbind-crash-on-lockd-startup-failure.patch
lockdep-revert-lockdep-check-in-raw_seqcount_begin.patch
genhd-fix-leftover-might_sleep-in-blk_free_devt.patch
-usb-dwc3-fix-trb-completion-when-multiple-trbs-are-started.patch
usb-dwc3-core-fix-order-of-pm-runtime-calls.patch
usb-dwc3-core-fix-ordering-for-phy-suspend.patch
revert-mac80211-disable-uapsd-if-all-acs-are-under-acm.patch
media-af9035-new-ids-add-support-for-pctv-78e-and-pctv-79e.patch
media-cx18-fix-kernel-oops-with-tda8290-tuner.patch
media-adv7604-fix-inverted-condition.patch
+md-raid1-clean-up-request-counts-properly-in-close_sync.patch
+md-raid1-be-more-cautious-where-we-read-balance-during-resync.patch
+md-raid1-make-sure-resync-waits-for-conflicting-writes-to-complete.patch
+md-raid1-don-t-use-next_resync-to-determine-how-far-resync-has-progressed.patch
+md-raid1-update-next_resync-under-resync_lock.patch
+md-raid1-count-resync-requests-in-nr_pending.patch
+md-raid1-fix_read_error-should-act-on-all-non-faulty-devices.patch
+md-raid1-intialise-start_next_window-for-read-case-to-avoid-hang.patch
+ipvs-avoid-netns-exit-crash-on-ip_vs_conn_drop_conntrack.patch
+netfilter-xt_hashlimit-perform-garbage-collection-from-process-context.patch
+ipvs-maintain-all-dscp-and-ecn-bits-for-ipv6-tun-forwarding.patch
+netfilter-x_tables-allow-to-use-default-cgroup-match.patch
+ipvs-fix-ipv6-hook-registration-for-local-replies.patch
+pm-sleep-add-state-field-to-pm_states-entries.patch
+pm-sleep-use-valid_state-for-platform-dependent-sleep-states-only.patch
+serial-8250_dma-check-the-result-of-tx-buffer-mapping.patch
+dmaengine-dw-introduce-dwc_dostart_first_queued-helper.patch
+dmaengine-dw-don-t-perform-dma-when-dmaengine_submit-is-called.patch
+partitions-aix.c-off-by-one-bug.patch
+++ /dev/null
-From 0b93a4c838fa10370d72f86fe712426ac63804de Mon Sep 17 00:00:00 2001
-From: Felipe Balbi <balbi@ti.com>
-Date: Thu, 4 Sep 2014 10:28:10 -0500
-Subject: usb: dwc3: fix TRB completion when multiple TRBs are started
-
-From: Felipe Balbi <balbi@ti.com>
-
-commit 0b93a4c838fa10370d72f86fe712426ac63804de upstream.
-
-After commit 2ec2a8be (usb: dwc3: gadget:
-always enable IOC on bulk/interrupt transfers)
-we created a situation where it was possible to
-hang a bulk/interrupt endpoint if we had more
-than one pending request in our queue and they
-were both started with a single Start Transfer
-command.
-
-The problems triggers because we had not enabled
-Transfer In Progress event for those endpoints
-and we were not able to process early giveback
-of requests completed without LST bit set.
-
-Fix the problem by finally enabling Xfer In Progress
-event for all endpoint types, except control.
-
-Fixes: 2ec2a8be (usb: dwc3: gadget: always
- enable IOC on bulk/interrupt transfers)
-Reported-by: Pratyush Anand <pratyush.anand@st.com>
-Signed-off-by: Felipe Balbi <balbi@ti.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-
-
----
- drivers/usb/dwc3/gadget.c | 8 +-------
- 1 file changed, 1 insertion(+), 7 deletions(-)
-
---- a/drivers/usb/dwc3/gadget.c
-+++ b/drivers/usb/dwc3/gadget.c
-@@ -445,7 +445,7 @@ static int dwc3_gadget_set_ep_config(str
- dep->stream_capable = true;
- }
-
-- if (usb_endpoint_xfer_isoc(desc))
-+ if (!usb_endpoint_xfer_control(desc))
- params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
-
- /*
-@@ -1937,12 +1937,6 @@ static void dwc3_endpoint_interrupt(stru
- dwc3_endpoint_transfer_complete(dwc, dep, event, 1);
- break;
- case DWC3_DEPEVT_XFERINPROGRESS:
-- if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
-- dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n",
-- dep->name);
-- return;
-- }
--
- dwc3_endpoint_transfer_complete(dwc, dep, event, 0);
- break;
- case DWC3_DEPEVT_XFERNOTREADY: