--- /dev/null
+From 7ede612fd615abcda0cc30e5bef2a70f4cf4f75c Mon Sep 17 00:00:00 2001
+From: Felix Fietkau <nbd@openwrt.org>
+Date: Mon, 14 Oct 2013 21:18:48 +0200
+Subject: ath5k: fix regression in tx status processing
+
+From: Felix Fietkau <nbd@openwrt.org>
+
+commit 7ede612fd615abcda0cc30e5bef2a70f4cf4f75c upstream.
+
+The regression was introduced in the following commit:
+
+0967e01e8e713ed2982fb4eba8ba13794e9a6e89
+"ath5k: make use of the new rate control API"
+
+ath5k_tx_frame_completed saves the intended per-rate retry counts before
+they are cleared by ieee80211_tx_info_clear_status, however at this
+point the information in info->status.rates is incomplete.
+
+This causes significant throughput degradation and excessive packet loss
+on links where high bit rates don't work properly.
+
+Move the copy from bf->rates a few lines up to ensure that the saved
+retry counts are updated, and that they are really cleared in
+info->status.rates after the call to ieee80211_tx_info_clear_status.
+
+Cc: Thomas Huehn <thomas@net.t-labs.tu-berlin.de>
+Cc: Benjamin Vahl <bvahl@net.t-labs.tu-berlin.de>
+Reported-by: Ben West <ben@gowasabi.net>
+Signed-off-by: Felix Fietkau <nbd@openwrt.org>
+Acked-by: Thomas Huehn <thomas@net.t-labs.tu-berlin.de>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/ath/ath5k/base.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/wireless/ath/ath5k/base.c
++++ b/drivers/net/wireless/ath/ath5k/base.c
+@@ -1663,15 +1663,15 @@ ath5k_tx_frame_completed(struct ath5k_hw
+ ah->stats.tx_bytes_count += skb->len;
+ info = IEEE80211_SKB_CB(skb);
+
++ size = min_t(int, sizeof(info->status.rates), sizeof(bf->rates));
++ memcpy(info->status.rates, bf->rates, size);
++
+ tries[0] = info->status.rates[0].count;
+ tries[1] = info->status.rates[1].count;
+ tries[2] = info->status.rates[2].count;
+
+ ieee80211_tx_info_clear_status(info);
+
+- size = min_t(int, sizeof(info->status.rates), sizeof(bf->rates));
+- memcpy(info->status.rates, bf->rates, size);
+-
+ for (i = 0; i < ts->ts_final_idx; i++) {
+ struct ieee80211_tx_rate *r =
+ &info->status.rates[i];
--- /dev/null
+From fff4996b7db7955414ac74386efa5e07fd766b50 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Mon, 14 Oct 2013 12:11:36 -0400
+Subject: blk-core: Fix memory corruption if blkcg_init_queue fails
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit fff4996b7db7955414ac74386efa5e07fd766b50 upstream.
+
+If blkcg_init_queue fails, blk_alloc_queue_node doesn't call bdi_destroy
+to clean up structures allocated by the backing dev.
+
+------------[ cut here ]------------
+WARNING: at lib/debugobjects.c:260 debug_print_object+0x85/0xa0()
+ODEBUG: free active (active state 0) object type: percpu_counter hint: (null)
+Modules linked in: dm_loop dm_mod ip6table_filter ip6_tables uvesafb cfbcopyarea cfbimgblt cfbfillrect fbcon font bitblit fbcon_rotate fbcon_cw fbcon_ud fbcon_ccw softcursor fb fbdev ipt_MASQUERADE iptable_nat nf_nat_ipv4 msr nf_conntrack_ipv4 nf_defrag_ipv4 xt_state ipt_REJECT xt_tcpudp iptable_filter ip_tables x_tables bridge stp llc tun ipv6 cpufreq_userspace cpufreq_stats cpufreq_powersave cpufreq_ondemand cpufreq_conservative spadfs fuse hid_generic usbhid hid raid0 md_mod dmi_sysfs nf_nat_ftp nf_nat nf_conntrack_ftp nf_conntrack lm85 hwmon_vid snd_usb_audio snd_pcm_oss snd_mixer_oss snd_pcm snd_timer snd_page_alloc snd_hwdep snd_usbmidi_lib snd_rawmidi snd soundcore acpi_cpufreq freq_table mperf sata_svw serverworks kvm_amd ide_core ehci_pci ohci_hcd libata ehci_hcd kvm usbcore tg3 usb_common libphy k10temp pcspkr ptp i2c_piix4 i2c_core evdev microcode hwmon rtc_cmos pps_core e100 skge floppy mii processor button unix
+CPU: 0 PID: 2739 Comm: lvchange Tainted: G W
+3.10.15-devel #14
+Hardware name: empty empty/S3992-E, BIOS 'V1.06 ' 06/09/2009
+ 0000000000000009 ffff88023c3c1ae8 ffffffff813c8fd4 ffff88023c3c1b20
+ ffffffff810399eb ffff88043d35cd58 ffffffff81651940 ffff88023c3c1bf8
+ ffffffff82479d90 0000000000000005 ffff88023c3c1b80 ffffffff81039a67
+Call Trace:
+ [<ffffffff813c8fd4>] dump_stack+0x19/0x1b
+ [<ffffffff810399eb>] warn_slowpath_common+0x6b/0xa0
+ [<ffffffff81039a67>] warn_slowpath_fmt+0x47/0x50
+ [<ffffffff8122aaaf>] ? debug_check_no_obj_freed+0xcf/0x250
+ [<ffffffff81229a15>] debug_print_object+0x85/0xa0
+ [<ffffffff8122abe3>] debug_check_no_obj_freed+0x203/0x250
+ [<ffffffff8113c4ac>] kmem_cache_free+0x20c/0x3a0
+ [<ffffffff811f6709>] blk_alloc_queue_node+0x2a9/0x2c0
+ [<ffffffff811f672e>] blk_alloc_queue+0xe/0x10
+ [<ffffffffa04c0093>] dm_create+0x1a3/0x530 [dm_mod]
+ [<ffffffffa04c6bb0>] ? list_version_get_info+0xe0/0xe0 [dm_mod]
+ [<ffffffffa04c6c07>] dev_create+0x57/0x2b0 [dm_mod]
+ [<ffffffffa04c6bb0>] ? list_version_get_info+0xe0/0xe0 [dm_mod]
+ [<ffffffffa04c6bb0>] ? list_version_get_info+0xe0/0xe0 [dm_mod]
+ [<ffffffffa04c6528>] ctl_ioctl+0x268/0x500 [dm_mod]
+ [<ffffffff81097662>] ? get_lock_stats+0x22/0x70
+ [<ffffffffa04c67ce>] dm_ctl_ioctl+0xe/0x20 [dm_mod]
+ [<ffffffff81161aad>] do_vfs_ioctl+0x2ed/0x520
+ [<ffffffff8116cfc7>] ? fget_light+0x377/0x4e0
+ [<ffffffff81161d2b>] SyS_ioctl+0x4b/0x90
+ [<ffffffff813cff16>] system_call_fastpath+0x1a/0x1f
+---[ end trace 4b5ff0d55673d986 ]---
+------------[ cut here ]------------
+
+This fix should be backported to stable kernels starting with 2.6.37. Note
+that in the kernels prior to 3.5 the affected code is different, but the
+bug is still there - bdi_init is called and bdi_destroy isn't.
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Acked-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/blk-core.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -645,10 +645,12 @@ struct request_queue *blk_alloc_queue_no
+ __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
+
+ if (blkcg_init_queue(q))
+- goto fail_id;
++ goto fail_bdi;
+
+ return q;
+
++fail_bdi:
++ bdi_destroy(&q->backing_dev_info);
+ fail_id:
+ ida_simple_remove(&blk_queue_ida, q->id);
+ fail_q:
--- /dev/null
+From f36afb3957353d2529cb2b00f78fdccd14fc5e9c Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Thu, 31 Oct 2013 13:55:45 -0400
+Subject: dm: allocate buffer for messages with small number of arguments using GFP_NOIO
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit f36afb3957353d2529cb2b00f78fdccd14fc5e9c upstream.
+
+dm-mpath and dm-thin must process messages even if some device is
+suspended, so we allocate argv buffer with GFP_NOIO. These messages have
+a small fixed number of arguments.
+
+On the other hand, dm-switch needs to process bulk data using messages
+so excessive use of GFP_NOIO could cause trouble.
+
+The patch also lowers the default number of arguments from 64 to 8, so
+that there is smaller load on GFP_NOIO allocations.
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Acked-by: Alasdair G Kergon <agk@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-table.c | 18 ++++++++++++++++--
+ 1 file changed, 16 insertions(+), 2 deletions(-)
+
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -545,14 +545,28 @@ static int adjoin(struct dm_table *table
+
+ /*
+ * Used to dynamically allocate the arg array.
++ *
++ * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must
++ * process messages even if some device is suspended. These messages have a
++ * small fixed number of arguments.
++ *
++ * On the other hand, dm-switch needs to process bulk data using messages and
++ * excessive use of GFP_NOIO could cause trouble.
+ */
+ static char **realloc_argv(unsigned *array_size, char **old_argv)
+ {
+ char **argv;
+ unsigned new_size;
++ gfp_t gfp;
+
+- new_size = *array_size ? *array_size * 2 : 64;
+- argv = kmalloc(new_size * sizeof(*argv), GFP_KERNEL);
++ if (*array_size) {
++ new_size = *array_size * 2;
++ gfp = GFP_KERNEL;
++ } else {
++ new_size = 8;
++ gfp = GFP_NOIO;
++ }
++ argv = kmalloc(new_size * sizeof(*argv), gfp);
+ if (argv) {
+ memcpy(argv, old_argv, *array_size * sizeof(*argv));
+ *array_size = new_size;
--- /dev/null
+From 9c1d4de56066e4d6abc66ec188faafd7b303fb08 Mon Sep 17 00:00:00 2001
+From: Joe Thornber <ejt@redhat.com>
+Date: Wed, 30 Oct 2013 11:19:59 +0000
+Subject: dm array: fix bug in growing array
+
+From: Joe Thornber <ejt@redhat.com>
+
+commit 9c1d4de56066e4d6abc66ec188faafd7b303fb08 upstream.
+
+Entries would be lost if the old tail block was partially filled.
+
+Signed-off-by: Joe Thornber <ejt@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/persistent-data/dm-array.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/md/persistent-data/dm-array.c
++++ b/drivers/md/persistent-data/dm-array.c
+@@ -509,15 +509,18 @@ static int grow_add_tail_block(struct re
+ static int grow_needs_more_blocks(struct resize *resize)
+ {
+ int r;
++ unsigned old_nr_blocks = resize->old_nr_full_blocks;
+
+ if (resize->old_nr_entries_in_last_block > 0) {
++ old_nr_blocks++;
++
+ r = grow_extend_tail_block(resize, resize->max_entries);
+ if (r)
+ return r;
+ }
+
+ r = insert_full_ablocks(resize->info, resize->size_of_block,
+- resize->old_nr_full_blocks,
++ old_nr_blocks,
+ resize->new_nr_full_blocks,
+ resize->max_entries, resize->value,
+ &resize->root);
--- /dev/null
+From 66cb1910df17b38334153462ec8166e48058035f Mon Sep 17 00:00:00 2001
+From: Joe Thornber <ejt@redhat.com>
+Date: Wed, 30 Oct 2013 17:11:58 +0000
+Subject: dm cache: fix a race condition between queuing new migrations and quiescing for a shutdown
+
+From: Joe Thornber <ejt@redhat.com>
+
+commit 66cb1910df17b38334153462ec8166e48058035f upstream.
+
+The code that was trying to do this was inadequate. The postsuspend
+method (in ioctl context), needs to wait for the worker thread to
+acknowledge the request to quiesce. Otherwise the migration count may
+drop to zero temporarily before the worker thread realises we're
+quiescing. In this case the target will be taken down, but the worker
+thread may have issued a new migration, which will cause an oops when
+it completes.
+
+Signed-off-by: Joe Thornber <ejt@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-cache-target.c | 54 +++++++++++++++++++++++++++++++------------
+ 1 file changed, 40 insertions(+), 14 deletions(-)
+
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -148,6 +148,9 @@ struct cache {
+ wait_queue_head_t migration_wait;
+ atomic_t nr_migrations;
+
++ wait_queue_head_t quiescing_wait;
++ atomic_t quiescing_ack;
++
+ /*
+ * cache_size entries, dirty if set
+ */
+@@ -748,8 +751,9 @@ static void cell_defer(struct cache *cac
+
+ static void cleanup_migration(struct dm_cache_migration *mg)
+ {
+- dec_nr_migrations(mg->cache);
++ struct cache *cache = mg->cache;
+ free_migration(mg);
++ dec_nr_migrations(cache);
+ }
+
+ static void migration_failure(struct dm_cache_migration *mg)
+@@ -1346,34 +1350,51 @@ static void writeback_some_dirty_blocks(
+ /*----------------------------------------------------------------
+ * Main worker loop
+ *--------------------------------------------------------------*/
+-static void start_quiescing(struct cache *cache)
++static bool is_quiescing(struct cache *cache)
+ {
++ int r;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cache->lock, flags);
+- cache->quiescing = 1;
++ r = cache->quiescing;
+ spin_unlock_irqrestore(&cache->lock, flags);
++
++ return r;
+ }
+
+-static void stop_quiescing(struct cache *cache)
++static void ack_quiescing(struct cache *cache)
++{
++ if (is_quiescing(cache)) {
++ atomic_inc(&cache->quiescing_ack);
++ wake_up(&cache->quiescing_wait);
++ }
++}
++
++static void wait_for_quiescing_ack(struct cache *cache)
++{
++ wait_event(cache->quiescing_wait, atomic_read(&cache->quiescing_ack));
++}
++
++static void start_quiescing(struct cache *cache)
+ {
+ unsigned long flags;
+
+ spin_lock_irqsave(&cache->lock, flags);
+- cache->quiescing = 0;
++ cache->quiescing = true;
+ spin_unlock_irqrestore(&cache->lock, flags);
++
++ wait_for_quiescing_ack(cache);
+ }
+
+-static bool is_quiescing(struct cache *cache)
++static void stop_quiescing(struct cache *cache)
+ {
+- int r;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cache->lock, flags);
+- r = cache->quiescing;
++ cache->quiescing = false;
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+- return r;
++ atomic_set(&cache->quiescing_ack, 0);
+ }
+
+ static void wait_for_migrations(struct cache *cache)
+@@ -1420,16 +1441,15 @@ static void do_worker(struct work_struct
+ struct cache *cache = container_of(ws, struct cache, worker);
+
+ do {
+- if (!is_quiescing(cache))
++ if (!is_quiescing(cache)) {
++ writeback_some_dirty_blocks(cache);
++ process_deferred_writethrough_bios(cache);
+ process_deferred_bios(cache);
++ }
+
+ process_migrations(cache, &cache->quiesced_migrations, issue_copy);
+ process_migrations(cache, &cache->completed_migrations, complete_migration);
+
+- writeback_some_dirty_blocks(cache);
+-
+- process_deferred_writethrough_bios(cache);
+-
+ if (commit_if_needed(cache)) {
+ process_deferred_flush_bios(cache, false);
+
+@@ -1442,6 +1462,9 @@ static void do_worker(struct work_struct
+ process_migrations(cache, &cache->need_commit_migrations,
+ migration_success_post_commit);
+ }
++
++ ack_quiescing(cache);
++
+ } while (more_work(cache));
+ }
+
+@@ -2005,6 +2028,9 @@ static int cache_create(struct cache_arg
+ atomic_set(&cache->nr_migrations, 0);
+ init_waitqueue_head(&cache->migration_wait);
+
++ init_waitqueue_head(&cache->quiescing_wait);
++ atomic_set(&cache->quiescing_ack, 0);
++
+ r = -ENOMEM;
+ cache->nr_dirty = 0;
+ cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
--- /dev/null
+From 954a73d5d3073df2231820c718fdd2f18b0fe4c9 Mon Sep 17 00:00:00 2001
+From: Shiva Krishna Merla <shivakrishna.merla@netapp.com>
+Date: Wed, 30 Oct 2013 03:26:38 +0000
+Subject: dm mpath: fix race condition between multipath_dtr and pg_init_done
+
+From: Shiva Krishna Merla <shivakrishna.merla@netapp.com>
+
+commit 954a73d5d3073df2231820c718fdd2f18b0fe4c9 upstream.
+
+Whenever multipath_dtr() is happening we must prevent queueing any
+further path activation work. Implement this by adding a new
+'pg_init_disabled' flag to the multipath structure that denotes future
+path activation work should be skipped if it is set. By disabling
+pg_init and then re-enabling in flush_multipath_work() we also avoid the
+potential for pg_init to be initiated while suspending an mpath device.
+
+Without this patch a race condition exists that may result in a kernel
+panic:
+
+1) If after pg_init_done() decrements pg_init_in_progress to 0, a call
+ to wait_for_pg_init_completion() assumes there are no more pending path
+ management commands.
+2) If pg_init_required is set by pg_init_done(), due to retryable
+ mode_select errors, then process_queued_ios() will again queue the
+ path activation work.
+3) If free_multipath() completes before activate_path() work is called a
+ NULL pointer dereference like the following can be seen when
+ accessing members of the recently destructed multipath:
+
+BUG: unable to handle kernel NULL pointer dereference at 0000000000000090
+RIP: 0010:[<ffffffffa003db1b>] [<ffffffffa003db1b>] activate_path+0x1b/0x30 [dm_multipath]
+[<ffffffff81090ac0>] worker_thread+0x170/0x2a0
+[<ffffffff81096c80>] ? autoremove_wake_function+0x0/0x40
+
+[switch to disabling pg_init in flush_multipath_work & header edits by Mike Snitzer]
+Signed-off-by: Shiva Krishna Merla <shivakrishna.merla@netapp.com>
+Reviewed-by: Krishnasamy Somasundaram <somasundaram.krishnasamy@netapp.com>
+Tested-by: Speagle Andy <Andy.Speagle@netapp.com>
+Acked-by: Junichi Nomura <j-nomura@ce.jp.nec.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-mpath.c | 18 +++++++++++++++---
+ 1 file changed, 15 insertions(+), 3 deletions(-)
+
+--- a/drivers/md/dm-mpath.c
++++ b/drivers/md/dm-mpath.c
+@@ -87,6 +87,7 @@ struct multipath {
+ unsigned queue_if_no_path:1; /* Queue I/O if last path fails? */
+ unsigned saved_queue_if_no_path:1; /* Saved state during suspension */
+ unsigned retain_attached_hw_handler:1; /* If there's already a hw_handler present, don't change it. */
++ unsigned pg_init_disabled:1; /* pg_init is not currently allowed */
+
+ unsigned pg_init_retries; /* Number of times to retry pg_init */
+ unsigned pg_init_count; /* Number of times pg_init called */
+@@ -497,7 +498,8 @@ static void process_queued_ios(struct wo
+ (!pgpath && !m->queue_if_no_path))
+ must_queue = 0;
+
+- if (m->pg_init_required && !m->pg_init_in_progress && pgpath)
++ if (m->pg_init_required && !m->pg_init_in_progress && pgpath &&
++ !m->pg_init_disabled)
+ __pg_init_all_paths(m);
+
+ spin_unlock_irqrestore(&m->lock, flags);
+@@ -942,10 +944,20 @@ static void multipath_wait_for_pg_init_c
+
+ static void flush_multipath_work(struct multipath *m)
+ {
++ unsigned long flags;
++
++ spin_lock_irqsave(&m->lock, flags);
++ m->pg_init_disabled = 1;
++ spin_unlock_irqrestore(&m->lock, flags);
++
+ flush_workqueue(kmpath_handlerd);
+ multipath_wait_for_pg_init_completion(m);
+ flush_workqueue(kmultipathd);
+ flush_work(&m->trigger_event);
++
++ spin_lock_irqsave(&m->lock, flags);
++ m->pg_init_disabled = 0;
++ spin_unlock_irqrestore(&m->lock, flags);
+ }
+
+ static void multipath_dtr(struct dm_target *ti)
+@@ -1164,7 +1176,7 @@ static int pg_init_limit_reached(struct
+
+ spin_lock_irqsave(&m->lock, flags);
+
+- if (m->pg_init_count <= m->pg_init_retries)
++ if (m->pg_init_count <= m->pg_init_retries && !m->pg_init_disabled)
+ m->pg_init_required = 1;
+ else
+ limit_reached = 1;
+@@ -1714,7 +1726,7 @@ out:
+ *---------------------------------------------------------------*/
+ static struct target_type multipath_target = {
+ .name = "multipath",
+- .version = {1, 5, 1},
++ .version = {1, 6, 0},
+ .module = THIS_MODULE,
+ .ctr = multipath_ctr,
+ .dtr = multipath_dtr,
--- /dev/null
+From ac7d631f7d9f9e4e6116c4a72b6308067d0a2226 Mon Sep 17 00:00:00 2001
+From: Dave Jiang <dave.jiang@intel.com>
+Date: Wed, 6 Nov 2013 08:50:09 -0700
+Subject: ioatdma: Fix bug in selftest after removal of DMA_MEMSET.
+
+From: Dave Jiang <dave.jiang@intel.com>
+
+commit ac7d631f7d9f9e4e6116c4a72b6308067d0a2226 upstream.
+
+Commit 48a9db4 (3.11) removed the memset op in the xor selftest for ioatdma.
+The issue is that with the removal of that op, it never replaced the memset
+with a CPU memset. The memory being operated on is expected to be zeroes but
+was not. This is causing the xor selftest to fail.
+
+Signed-off-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma/ioat/dma_v3.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/dma/ioat/dma_v3.c
++++ b/drivers/dma/ioat/dma_v3.c
+@@ -1545,6 +1545,8 @@ static int ioat_xor_val_self_test(struct
+ goto free_resources;
+ }
+
++ memset(page_address(dest), 0, PAGE_SIZE);
++
+ /* test for non-zero parity sum */
+ op = IOAT_OP_XOR_VAL;
+
--- /dev/null
+From 5d48b9b5d80e3aa38a5161565398b1e48a650573 Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Wed, 13 Nov 2013 10:15:42 -0800
+Subject: ioatdma: fix sed pool selection
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+commit 5d48b9b5d80e3aa38a5161565398b1e48a650573 upstream.
+
+The array to lookup the sed pool based on the number of sources
+(pq16_idx_to_sedi) is 16 entries and expects a max source index.
+However, we pass the total source count which runs off the end of the
+array when src_cnt == 16. The minimal fix is to just pass src_cnt-1,
+but given we know the source count is > 8 we can just calculate the sed
+pool by (src_cnt - 2) >> 3.
+
+Cc: Dave Jiang <dave.jiang@intel.com>
+Acked-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma/ioat/dma_v3.c | 16 +---------------
+ 1 file changed, 1 insertion(+), 15 deletions(-)
+
+--- a/drivers/dma/ioat/dma_v3.c
++++ b/drivers/dma/ioat/dma_v3.c
+@@ -87,13 +87,6 @@ static const u8 pq_idx_to_field[] = { 1,
+ static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7,
+ 0, 1, 2, 3, 4, 5, 6 };
+
+-/*
+- * technically sources 1 and 2 do not require SED, but the op will have
+- * at least 9 descriptors so that's irrelevant.
+- */
+-static const u8 pq16_idx_to_sed[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+- 1, 1, 1, 1, 1, 1, 1 };
+-
+ static void ioat3_eh(struct ioat2_dma_chan *ioat);
+
+ static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
+@@ -135,12 +128,6 @@ static void pq_set_src(struct ioat_raw_d
+ pq->coef[idx] = coef;
+ }
+
+-static int sed_get_pq16_pool_idx(int src_cnt)
+-{
+-
+- return pq16_idx_to_sed[src_cnt];
+-}
+-
+ static bool is_jf_ioat(struct pci_dev *pdev)
+ {
+ switch (pdev->device) {
+@@ -1159,8 +1146,7 @@ __ioat3_prep_pq16_lock(struct dma_chan *
+
+ descs[0] = (struct ioat_raw_descriptor *) pq;
+
+- desc->sed = ioat3_alloc_sed(device,
+- sed_get_pq16_pool_idx(src_cnt));
++ desc->sed = ioat3_alloc_sed(device, (src_cnt-2) >> 3);
+ if (!desc->sed) {
+ dev_err(to_dev(chan),
+ "%s: no free sed entries\n", __func__);
--- /dev/null
+From 21e96c7313486390c694919522a76dfea0a86c59 Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Wed, 13 Nov 2013 10:37:36 -0800
+Subject: ioatdma: fix selection of 16 vs 8 source path
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+commit 21e96c7313486390c694919522a76dfea0a86c59 upstream.
+
+When performing continuations there are implied sources that need to be
+added to the source count. Quoting dma_set_maxpq:
+
+/* dma_maxpq - reduce maxpq in the face of continued operations
+ * @dma - dma device with PQ capability
+ * @flags - to check if DMA_PREP_CONTINUE and DMA_PREP_PQ_DISABLE_P are set
+ *
+ * When an engine does not support native continuation we need 3 extra
+ * source slots to reuse P and Q with the following coefficients:
+ * 1/ {00} * P : remove P from Q', but use it as a source for P'
+ * 2/ {01} * Q : use Q to continue Q' calculation
+ * 3/ {00} * Q : subtract Q from P' to cancel (2)
+ *
+ * In the case where P is disabled we only need 1 extra source:
+ * 1/ {01} * Q : use Q to continue Q' calculation
+ */
+
+...fix the selection of the 16 source path to take these implied sources
+into account.
+
+Note this also kills the BUG_ON(src_cnt < 9) check in
+__ioat3_prep_pq16_lock(). Besides not accounting for implied sources
+the check is redundant given we already made the path selection.
+
+Cc: Dave Jiang <dave.jiang@intel.com>
+Acked-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma/ioat/dma_v3.c | 30 +++++++++++++++---------------
+ 1 file changed, 15 insertions(+), 15 deletions(-)
+
+--- a/drivers/dma/ioat/dma_v3.c
++++ b/drivers/dma/ioat/dma_v3.c
+@@ -1116,9 +1116,6 @@ __ioat3_prep_pq16_lock(struct dma_chan *
+ u8 op;
+ int i, s, idx, num_descs;
+
+- /* this function only handles src_cnt 9 - 16 */
+- BUG_ON(src_cnt < 9);
+-
+ /* this function is only called with 9-16 sources */
+ op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S;
+
+@@ -1204,13 +1201,21 @@ __ioat3_prep_pq16_lock(struct dma_chan *
+ return &desc->txd;
+ }
+
++static int src_cnt_flags(unsigned int src_cnt, unsigned long flags)
++{
++ if (dmaf_p_disabled_continue(flags))
++ return src_cnt + 1;
++ else if (dmaf_continue(flags))
++ return src_cnt + 3;
++ else
++ return src_cnt;
++}
++
+ static struct dma_async_tx_descriptor *
+ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
+ unsigned int src_cnt, const unsigned char *scf, size_t len,
+ unsigned long flags)
+ {
+- struct dma_device *dma = chan->device;
+-
+ /* specify valid address for disabled result */
+ if (flags & DMA_PREP_PQ_DISABLE_P)
+ dst[0] = dst[1];
+@@ -1230,7 +1235,7 @@ ioat3_prep_pq(struct dma_chan *chan, dma
+ single_source_coef[0] = scf[0];
+ single_source_coef[1] = 0;
+
+- return (src_cnt > 8) && (dma->max_pq > 8) ?
++ return src_cnt_flags(src_cnt, flags) > 8 ?
+ __ioat3_prep_pq16_lock(chan, NULL, dst, single_source,
+ 2, single_source_coef, len,
+ flags) :
+@@ -1238,7 +1243,7 @@ ioat3_prep_pq(struct dma_chan *chan, dma
+ single_source_coef, len, flags);
+
+ } else {
+- return (src_cnt > 8) && (dma->max_pq > 8) ?
++ return src_cnt_flags(src_cnt, flags) > 8 ?
+ __ioat3_prep_pq16_lock(chan, NULL, dst, src, src_cnt,
+ scf, len, flags) :
+ __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt,
+@@ -1251,8 +1256,6 @@ ioat3_prep_pq_val(struct dma_chan *chan,
+ unsigned int src_cnt, const unsigned char *scf, size_t len,
+ enum sum_check_flags *pqres, unsigned long flags)
+ {
+- struct dma_device *dma = chan->device;
+-
+ /* specify valid address for disabled result */
+ if (flags & DMA_PREP_PQ_DISABLE_P)
+ pq[0] = pq[1];
+@@ -1264,7 +1267,7 @@ ioat3_prep_pq_val(struct dma_chan *chan,
+ */
+ *pqres = 0;
+
+- return (src_cnt > 8) && (dma->max_pq > 8) ?
++ return src_cnt_flags(src_cnt, flags) > 8 ?
+ __ioat3_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len,
+ flags) :
+ __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len,
+@@ -1275,7 +1278,6 @@ static struct dma_async_tx_descriptor *
+ ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
+ unsigned int src_cnt, size_t len, unsigned long flags)
+ {
+- struct dma_device *dma = chan->device;
+ unsigned char scf[src_cnt];
+ dma_addr_t pq[2];
+
+@@ -1284,7 +1286,7 @@ ioat3_prep_pqxor(struct dma_chan *chan,
+ flags |= DMA_PREP_PQ_DISABLE_Q;
+ pq[1] = dst; /* specify valid address for disabled result */
+
+- return (src_cnt > 8) && (dma->max_pq > 8) ?
++ return src_cnt_flags(src_cnt, flags) > 8 ?
+ __ioat3_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len,
+ flags) :
+ __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
+@@ -1296,7 +1298,6 @@ ioat3_prep_pqxor_val(struct dma_chan *ch
+ unsigned int src_cnt, size_t len,
+ enum sum_check_flags *result, unsigned long flags)
+ {
+- struct dma_device *dma = chan->device;
+ unsigned char scf[src_cnt];
+ dma_addr_t pq[2];
+
+@@ -1310,8 +1311,7 @@ ioat3_prep_pqxor_val(struct dma_chan *ch
+ flags |= DMA_PREP_PQ_DISABLE_Q;
+ pq[1] = pq[0]; /* specify valid address for disabled result */
+
+-
+- return (src_cnt > 8) && (dma->max_pq > 8) ?
++ return src_cnt_flags(src_cnt, flags) > 8 ?
+ __ioat3_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1,
+ scf, len, flags) :
+ __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1,
--- /dev/null
+From f01b9f73392b48c6cda7c2c66594c73137c776da Mon Sep 17 00:00:00 2001
+From: Vu Pham <vu@mellanox.com>
+Date: Mon, 11 Nov 2013 19:04:29 +0200
+Subject: iser-target: Avoid using FRMR for single dma entry requests
+
+From: Vu Pham <vu@mellanox.com>
+
+commit f01b9f73392b48c6cda7c2c66594c73137c776da upstream.
+
+This patch changes isert_reg_rdma_frwr() to not use FRMR for single
+dma entry requests from small I/Os, in order to avoid the associated
+memory registration overhead.
+
+Using DMA MR is sufficient here for the single dma entry requests,
+and addresses a >= v3.12 performance regression.
+
+Signed-off-by: Vu Pham <vu@mellanox.com>
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/ulp/isert/ib_isert.c | 30 +++++++++++++++++++-----------
+ 1 file changed, 19 insertions(+), 11 deletions(-)
+
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -2259,18 +2259,26 @@ isert_reg_rdma_frwr(struct iscsi_conn *c
+ data_len = min(data_left, rdma_write_max);
+ wr->cur_rdma_length = data_len;
+
+- spin_lock_irqsave(&isert_conn->conn_lock, flags);
+- fr_desc = list_first_entry(&isert_conn->conn_frwr_pool,
+- struct fast_reg_descriptor, list);
+- list_del(&fr_desc->list);
+- spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
+- wr->fr_desc = fr_desc;
++ /* if there is a single dma entry, dma mr is sufficient */
++ if (count == 1) {
++ ib_sge->addr = ib_sg_dma_address(ib_dev, &sg_start[0]);
++ ib_sge->length = ib_sg_dma_len(ib_dev, &sg_start[0]);
++ ib_sge->lkey = isert_conn->conn_mr->lkey;
++ wr->fr_desc = NULL;
++ } else {
++ spin_lock_irqsave(&isert_conn->conn_lock, flags);
++ fr_desc = list_first_entry(&isert_conn->conn_frwr_pool,
++ struct fast_reg_descriptor, list);
++ list_del(&fr_desc->list);
++ spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
++ wr->fr_desc = fr_desc;
+
+- ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn,
+- ib_sge, offset, data_len);
+- if (ret) {
+- list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool);
+- goto unmap_sg;
++ ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn,
++ ib_sge, offset, data_len);
++ if (ret) {
++ list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool);
++ goto unmap_sg;
++ }
+ }
+
+ return 0;
--- /dev/null
+From 2afc745f3e3079ab16c826be4860da2529054dd2 Mon Sep 17 00:00:00 2001
+From: Akira Takeuchi <takeuchi.akr@jp.panasonic.com>
+Date: Tue, 12 Nov 2013 15:08:21 -0800
+Subject: mm: ensure get_unmapped_area() returns higher address than mmap_min_addr
+
+From: Akira Takeuchi <takeuchi.akr@jp.panasonic.com>
+
+commit 2afc745f3e3079ab16c826be4860da2529054dd2 upstream.
+
+This patch fixes the problem that get_unmapped_area() can return illegal
+address and result in failing mmap(2) etc.
+
+In case that the address higher than PAGE_SIZE is set to
+/proc/sys/vm/mmap_min_addr, the address lower than mmap_min_addr can be
+returned by get_unmapped_area(), even if you do not pass any virtual
+address hint (i.e. the second argument).
+
+This is because the current get_unmapped_area() code does not take into
+account mmap_min_addr.
+
+This leads to two actual problems as follows:
+
+1. mmap(2) can fail with EPERM on the process without CAP_SYS_RAWIO,
+ although any illegal parameter is not passed.
+
+2. The bottom-up search path after the top-down search might not work in
+ arch_get_unmapped_area_topdown().
+
+Note: The first and third chunk of my patch, which changes "len" check,
+are for more precise check using mmap_min_addr, and not for solving the
+above problem.
+
+[How to reproduce]
+
+ --- test.c -------------------------------------------------
+ #include <stdio.h>
+ #include <unistd.h>
+ #include <sys/mman.h>
+ #include <sys/errno.h>
+
+ int main(int argc, char *argv[])
+ {
+ void *ret = NULL, *last_map;
+ size_t pagesize = sysconf(_SC_PAGESIZE);
+
+ do {
+ last_map = ret;
+ ret = mmap(0, pagesize, PROT_NONE,
+ MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+ // printf("ret=%p\n", ret);
+ } while (ret != MAP_FAILED);
+
+ if (errno != ENOMEM) {
+ printf("ERR: unexpected errno: %d (last map=%p)\n",
+ errno, last_map);
+ }
+
+ return 0;
+ }
+ ---------------------------------------------------------------
+
+ $ gcc -m32 -o test test.c
+ $ sudo sysctl -w vm.mmap_min_addr=65536
+ vm.mmap_min_addr = 65536
+ $ ./test (run as non-priviledge user)
+ ERR: unexpected errno: 1 (last map=0x10000)
+
+Signed-off-by: Akira Takeuchi <takeuchi.akr@jp.panasonic.com>
+Signed-off-by: Kiyoshi Owada <owada.kiyoshi@jp.panasonic.com>
+Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/mmap.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -1856,7 +1856,7 @@ arch_get_unmapped_area(struct file *filp
+ struct vm_area_struct *vma;
+ struct vm_unmapped_area_info info;
+
+- if (len > TASK_SIZE)
++ if (len > TASK_SIZE - mmap_min_addr)
+ return -ENOMEM;
+
+ if (flags & MAP_FIXED)
+@@ -1865,7 +1865,7 @@ arch_get_unmapped_area(struct file *filp
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
++ if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
+ (!vma || addr + len <= vma->vm_start))
+ return addr;
+ }
+@@ -1895,7 +1895,7 @@ arch_get_unmapped_area_topdown(struct fi
+ struct vm_unmapped_area_info info;
+
+ /* requested length too big for entire address space */
+- if (len > TASK_SIZE)
++ if (len > TASK_SIZE - mmap_min_addr)
+ return -ENOMEM;
+
+ if (flags & MAP_FIXED)
+@@ -1905,14 +1905,14 @@ arch_get_unmapped_area_topdown(struct fi
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
++ if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
+ (!vma || addr + len <= vma->vm_start))
+ return addr;
+ }
+
+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
+ info.length = len;
+- info.low_limit = PAGE_SIZE;
++ info.low_limit = max(PAGE_SIZE, mmap_min_addr);
+ info.high_limit = mm->mmap_base;
+ info.align_mask = 0;
+ addr = vm_unmapped_area(&info);
--- /dev/null
+From 67d13fe846c57a54d12578e7a4518f68c5c86ad7 Mon Sep 17 00:00:00 2001
+From: Weijie Yang <weijie.yang@samsung.com>
+Date: Tue, 12 Nov 2013 15:08:26 -0800
+Subject: mm/zswap: bugfix: memory leak when invalidate and reclaim occur concurrently
+
+From: Weijie Yang <weijie.yang@samsung.com>
+
+commit 67d13fe846c57a54d12578e7a4518f68c5c86ad7 upstream.
+
+Consider the following scenario:
+
+thread 0: reclaim entry x (get refcount, but not call zswap_get_swap_cache_page)
+thread 1: call zswap_frontswap_invalidate_page to invalidate entry x.
+ finished, entry x and its zbud is not freed as its refcount != 0
+ now, the swap_map[x] = 0
+thread 0: now call zswap_get_swap_cache_page
+ swapcache_prepare return -ENOENT because entry x is not used any more
+ zswap_get_swap_cache_page return ZSWAP_SWAPCACHE_NOMEM
+ zswap_writeback_entry do nothing except put refcount
+
+Now, the memory of zswap_entry x and its zpage leak.
+
+Modify:
+ - check the refcount in fail path, free memory if it is not referenced.
+
+ - use ZSWAP_SWAPCACHE_FAIL instead of ZSWAP_SWAPCACHE_NOMEM as the fail path
+ can be not only caused by nomem but also by invalidate.
+
+Signed-off-by: Weijie Yang <weijie.yang@samsung.com>
+Reviewed-by: Bob Liu <bob.liu@oracle.com>
+Reviewed-by: Minchan Kim <minchan@kernel.org>
+Acked-by: Seth Jennings <sjenning@linux.vnet.ibm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/zswap.c | 22 ++++++++++++++--------
+ 1 file changed, 14 insertions(+), 8 deletions(-)
+
+--- a/mm/zswap.c
++++ b/mm/zswap.c
+@@ -387,7 +387,7 @@ static void zswap_free_entry(struct zswa
+ enum zswap_get_swap_ret {
+ ZSWAP_SWAPCACHE_NEW,
+ ZSWAP_SWAPCACHE_EXIST,
+- ZSWAP_SWAPCACHE_NOMEM
++ ZSWAP_SWAPCACHE_FAIL,
+ };
+
+ /*
+@@ -401,9 +401,10 @@ enum zswap_get_swap_ret {
+ * added to the swap cache, and returned in retpage.
+ *
+ * If success, the swap cache page is returned in retpage
+- * Returns 0 if page was already in the swap cache, page is not locked
+- * Returns 1 if the new page needs to be populated, page is locked
+- * Returns <0 on error
++ * Returns ZSWAP_SWAPCACHE_EXIST if page was already in the swap cache
++ * Returns ZSWAP_SWAPCACHE_NEW if the new page needs to be populated,
++ * the new page is added to swapcache and locked
++ * Returns ZSWAP_SWAPCACHE_FAIL on error
+ */
+ static int zswap_get_swap_cache_page(swp_entry_t entry,
+ struct page **retpage)
+@@ -475,7 +476,7 @@ static int zswap_get_swap_cache_page(swp
+ if (new_page)
+ page_cache_release(new_page);
+ if (!found_page)
+- return ZSWAP_SWAPCACHE_NOMEM;
++ return ZSWAP_SWAPCACHE_FAIL;
+ *retpage = found_page;
+ return ZSWAP_SWAPCACHE_EXIST;
+ }
+@@ -529,11 +530,11 @@ static int zswap_writeback_entry(struct
+
+ /* try to allocate swap cache page */
+ switch (zswap_get_swap_cache_page(swpentry, &page)) {
+- case ZSWAP_SWAPCACHE_NOMEM: /* no memory */
++ case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */
+ ret = -ENOMEM;
+ goto fail;
+
+- case ZSWAP_SWAPCACHE_EXIST: /* page is unlocked */
++ case ZSWAP_SWAPCACHE_EXIST:
+ /* page is already in the swap cache, ignore for now */
+ page_cache_release(page);
+ ret = -EEXIST;
+@@ -591,7 +592,12 @@ static int zswap_writeback_entry(struct
+
+ fail:
+ spin_lock(&tree->lock);
+- zswap_entry_put(entry);
++ refcount = zswap_entry_put(entry);
++ if (refcount <= 0) {
++ /* invalidate happened, consider writeback as success */
++ zswap_free_entry(tree, entry);
++ ret = 0;
++ }
+ spin_unlock(&tree->lock);
+ return ret;
+ }
--- /dev/null
+From c1fa3426aa5c782724c97394303d52228206eda4 Mon Sep 17 00:00:00 2001
+From: Ludovic Desroches <ludovic.desroches@atmel.com>
+Date: Mon, 9 Sep 2013 17:29:56 +0200
+Subject: mmc: atmel-mci: abort transfer on timeout error
+
+From: Ludovic Desroches <ludovic.desroches@atmel.com>
+
+commit c1fa3426aa5c782724c97394303d52228206eda4 upstream.
+
+When a software timeout occurs, the transfer is not stopped. In DMA case,
+it causes DMA channel to be stuck because the transfer is still active
+causing following transfers to be queued but not computed.
+
+Signed-off-by: Ludovic Desroches <ludovic.desroches@atmel.com>
+Reported-by: Alexander Morozov <etesial@gmail.com>
+Acked-by: Nicolas Ferre <nicolas.ferre@atmel.com>
+Signed-off-by: Chris Ball <cjb@laptop.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/atmel-mci.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/drivers/mmc/host/atmel-mci.c
++++ b/drivers/mmc/host/atmel-mci.c
+@@ -589,6 +589,13 @@ static void atmci_timeout_timer(unsigned
+ if (host->mrq->cmd->data) {
+ host->mrq->cmd->data->error = -ETIMEDOUT;
+ host->data = NULL;
++ /*
++ * With some SDIO modules, sometimes DMA transfer hangs. If
++ * stop_transfer() is not called then the DMA request is not
++ * removed, following ones are queued and never computed.
++ */
++ if (host->state == STATE_DATA_XFER)
++ host->stop_transfer(host);
+ } else {
+ host->mrq->cmd->error = -ETIMEDOUT;
+ host->cmd = NULL;
--- /dev/null
+From fbd986cd420d1deeabf1039ec4e74075a5639db5 Mon Sep 17 00:00:00 2001
+From: Rodolfo Giometti <giometti@enneenne.com>
+Date: Mon, 9 Sep 2013 17:31:59 +0200
+Subject: mmc: atmel-mci: fix oops in atmci_tasklet_func
+
+From: Rodolfo Giometti <giometti@enneenne.com>
+
+commit fbd986cd420d1deeabf1039ec4e74075a5639db5 upstream.
+
+In some cases, a NULL pointer dereference happens because data is NULL when
+STATE_END_REQUEST case is reached in atmci_tasklet_func.
+
+Signed-off-by: Rodolfo Giometti <giometti@enneenne.com>
+Acked-by: Ludovic Desroches <ludovic.desroches@atmel.com>
+Acked-by: Nicolas Ferre <nicolas.ferre@atmel.com>
+Signed-off-by: Chris Ball <cjb@laptop.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/atmel-mci.c | 14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
+
+--- a/drivers/mmc/host/atmel-mci.c
++++ b/drivers/mmc/host/atmel-mci.c
+@@ -1810,12 +1810,14 @@ static void atmci_tasklet_func(unsigned
+ if (unlikely(status)) {
+ host->stop_transfer(host);
+ host->data = NULL;
+- if (status & ATMCI_DTOE) {
+- data->error = -ETIMEDOUT;
+- } else if (status & ATMCI_DCRCE) {
+- data->error = -EILSEQ;
+- } else {
+- data->error = -EIO;
++ if (data) {
++ if (status & ATMCI_DTOE) {
++ data->error = -ETIMEDOUT;
++ } else if (status & ATMCI_DCRCE) {
++ data->error = -EILSEQ;
++ } else {
++ data->error = -EIO;
++ }
+ }
+ }
+
--- /dev/null
+From fd432b9f8c7c88428a4635b9f5a9c6e174df6e36 Mon Sep 17 00:00:00 2001
+From: Aaron Lu <aaron.lu@intel.com>
+Date: Wed, 6 Nov 2013 08:41:31 +0800
+Subject: PM / hibernate: Avoid overflow in hibernate_preallocate_memory()
+
+From: Aaron Lu <aaron.lu@intel.com>
+
+commit fd432b9f8c7c88428a4635b9f5a9c6e174df6e36 upstream.
+
+When system has a lot of highmem (e.g. 16GiB using a 32 bits kernel),
+the code to calculate how much memory we need to preallocate in
+normal zone may cause overflow. As Leon has analysed:
+
+ It looks that during computing 'alloc' variable there is overflow:
+ alloc = (3943404 - 1970542) - 1978280 = -5418 (signed)
+ And this function goes to err_out.
+
+Fix this by avoiding that overflow.
+
+References: https://bugzilla.kernel.org/show_bug.cgi?id=60817
+Reported-and-tested-by: Leon Drugi <eyak@wp.pl>
+Signed-off-by: Aaron Lu <aaron.lu@intel.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/power/snapshot.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/kernel/power/snapshot.c
++++ b/kernel/power/snapshot.c
+@@ -1402,7 +1402,11 @@ int hibernate_preallocate_memory(void)
+ * highmem and non-highmem zones separately.
+ */
+ pages_highmem = preallocate_image_highmem(highmem / 2);
+- alloc = (count - max_size) - pages_highmem;
++ alloc = count - max_size;
++ if (alloc > pages_highmem)
++ alloc -= pages_highmem;
++ else
++ alloc = 0;
+ pages = preallocate_image_memory(alloc, avail_normal);
+ if (pages < alloc) {
+ /* We have exhausted non-highmem pages, try highmem. */
--- /dev/null
+From 6a0c7cd33075f6b7f1d80145bb19812beb3fc5c9 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Date: Thu, 14 Nov 2013 23:26:58 +0100
+Subject: PM / Hibernate: Do not crash kernel in free_basic_memory_bitmaps()
+
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+
+commit 6a0c7cd33075f6b7f1d80145bb19812beb3fc5c9 upstream.
+
+I have received a report about the BUG_ON() in free_basic_memory_bitmaps()
+triggering mysteriously during an aborted s2disk hibernation attempt.
+The only way I can explain that is that /dev/snapshot was first
+opened for writing (resume mode), then closed and then opened again
+for reading and closed again without freezing tasks. In that case
+the first invocation of snapshot_open() would set the free_bitmaps
+flag in snapshot_state, which is a static variable. That flag
+wouldn't be cleared later and the second invocation of snapshot_open()
+would just leave it like that, so the subsequent snapshot_release()
+would see data->frozen set and free_basic_memory_bitmaps() would be
+called unnecessarily.
+
+To prevent that from happening clear data->free_bitmaps in
+snapshot_open() when the file is being opened for reading (hibernate
+mode).
+
+In addition to that, replace the BUG_ON() in free_basic_memory_bitmaps()
+with a WARN_ON() as the kernel can continue just fine if the condition
+checked by that macro occurs.
+
+Fixes: aab172891542 (PM / hibernate: Fix user space driven resume regression)
+Reported-by: Oliver Lorenz <olli@olorenz.org>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/power/snapshot.c | 3 ++-
+ kernel/power/user.c | 1 +
+ 2 files changed, 3 insertions(+), 1 deletion(-)
+
+--- a/kernel/power/snapshot.c
++++ b/kernel/power/snapshot.c
+@@ -792,7 +792,8 @@ void free_basic_memory_bitmaps(void)
+ {
+ struct memory_bitmap *bm1, *bm2;
+
+- BUG_ON(!(forbidden_pages_map && free_pages_map));
++ if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
++ return;
+
+ bm1 = forbidden_pages_map;
+ bm2 = free_pages_map;
+--- a/kernel/power/user.c
++++ b/kernel/power/user.c
+@@ -70,6 +70,7 @@ static int snapshot_open(struct inode *i
+ data->swap = swsusp_resume_device ?
+ swap_type_of(swsusp_resume_device, 0, NULL) : -1;
+ data->mode = O_RDONLY;
++ data->free_bitmaps = false;
+ error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE);
+ if (error)
+ pm_notifier_call_chain(PM_POST_HIBERNATION);
--- /dev/null
+From baab52ded242c35a2290e1fa82e0cc147d0d8c1a Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Date: Thu, 7 Nov 2013 01:51:15 +0100
+Subject: PM / runtime: Use pm_runtime_put_sync() in __device_release_driver()
+
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+
+commit baab52ded242c35a2290e1fa82e0cc147d0d8c1a upstream.
+
+Commit fa180eb448fa (PM / Runtime: Idle devices asynchronously after
+probe|release) modified __device_release_driver() to call
+pm_runtime_put(dev) instead of pm_runtime_put_sync(dev) before
+detaching the driver from the device. However, that was a mistake,
+because pm_runtime_put(dev) causes rpm_idle() to be queued up and
+the driver may be gone already when that function is executed.
+That breaks the assumptions the drivers have the right to make
+about the core's behavior on the basis of the existing documentation
+and actually causes problems to happen, so revert that part of
+commit fa180eb448fa and restore the previous behavior of
+__device_release_driver().
+
+Reported-by: Tomi Valkeinen <tomi.valkeinen@ti.com>
+Fixes: fa180eb448fa (PM / Runtime: Idle devices asynchronously after probe|release)
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Acked-by: Kevin Hilman <khilman@linaro.org>
+Acked-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/base/dd.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/base/dd.c
++++ b/drivers/base/dd.c
+@@ -499,7 +499,7 @@ static void __device_release_driver(stru
+ BUS_NOTIFY_UNBIND_DRIVER,
+ dev);
+
+- pm_runtime_put(dev);
++ pm_runtime_put_sync(dev);
+
+ if (dev->bus && dev->bus->remove)
+ dev->bus->remove(dev);
--- /dev/null
+From 6fb392b1a63ae36c31f62bc3fc8630b49d602b62 Mon Sep 17 00:00:00 2001
+From: Ursula Braun <ursula.braun@de.ibm.com>
+Date: Wed, 6 Nov 2013 09:04:52 +0100
+Subject: qeth: avoid buffer overflow in snmp ioctl
+
+From: Ursula Braun <ursula.braun@de.ibm.com>
+
+commit 6fb392b1a63ae36c31f62bc3fc8630b49d602b62 upstream.
+
+Check user-defined length in snmp ioctl request and allow request
+only if it fits into a qeth command buffer.
+
+Signed-off-by: Ursula Braun <ursula.braun@de.ibm.com>
+Signed-off-by: Frank Blaschka <frank.blaschka@de.ibm.com>
+Reviewed-by: Heiko Carstens <heicars2@linux.vnet.ibm.com>
+Reported-by: Nico Golde <nico@ngolde.de>
+Reported-by: Fabian Yamaguchi <fabs@goesec.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/s390/net/qeth_core_main.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/s390/net/qeth_core_main.c
++++ b/drivers/s390/net/qeth_core_main.c
+@@ -4451,7 +4451,7 @@ int qeth_snmp_command(struct qeth_card *
+ struct qeth_cmd_buffer *iob;
+ struct qeth_ipa_cmd *cmd;
+ struct qeth_snmp_ureq *ureq;
+- int req_len;
++ unsigned int req_len;
+ struct qeth_arp_query_info qinfo = {0, };
+ int rc = 0;
+
+@@ -4467,6 +4467,10 @@ int qeth_snmp_command(struct qeth_card *
+ /* skip 4 bytes (data_len struct member) to get req_len */
+ if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int)))
+ return -EFAULT;
++ if (req_len > (QETH_BUFSIZE - IPA_PDU_HEADER_SIZE -
++ sizeof(struct qeth_ipacmd_hdr) -
++ sizeof(struct qeth_ipacmd_setadpparms_hdr)))
++ return -EINVAL;
+ ureq = memdup_user(udata, req_len + sizeof(struct qeth_snmp_ureq_hdr));
+ if (IS_ERR(ureq)) {
+ QETH_CARD_TEXT(card, 2, "snmpnome");
--- /dev/null
+From 2bf127a5cc372b9319afcbae10b090663b621c8b Mon Sep 17 00:00:00 2001
+From: Stanislaw Gruszka <stf_xl@wp.pl>
+Date: Tue, 15 Oct 2013 14:28:48 +0200
+Subject: rt2400pci: fix RSSI read
+
+From: Stanislaw Gruszka <stf_xl@wp.pl>
+
+commit 2bf127a5cc372b9319afcbae10b090663b621c8b upstream.
+
+RSSI value is provided on word3 not on word2.
+
+Signed-off-by: Stanislaw Gruszka <stf_xl@wp.pl>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/rt2x00/rt2400pci.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/wireless/rt2x00/rt2400pci.c
++++ b/drivers/net/wireless/rt2x00/rt2400pci.c
+@@ -1261,7 +1261,7 @@ static void rt2400pci_fill_rxdone(struct
+ */
+ rxdesc->timestamp = ((u64)rx_high << 32) | rx_low;
+ rxdesc->signal = rt2x00_get_field32(word2, RXD_W2_SIGNAL) & ~0x08;
+- rxdesc->rssi = rt2x00_get_field32(word2, RXD_W3_RSSI) -
++ rxdesc->rssi = rt2x00_get_field32(word3, RXD_W3_RSSI) -
+ entry->queue->rt2x00dev->rssi_offset;
+ rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
+
rtlwifi-rtl8192de-fix-incorrect-signal-strength-for-unassociated-ap.patch
rtlwifi-rtl8192se-fix-incorrect-signal-strength-for-unassociated-ap.patch
rtlwifi-rtl8192cu-fix-incorrect-signal-strength-for-unassociated-ap.patch
+ath5k-fix-regression-in-tx-status-processing.patch
+qeth-avoid-buffer-overflow-in-snmp-ioctl.patch
+rt2400pci-fix-rssi-read.patch
+mm-ensure-get_unmapped_area-returns-higher-address-than-mmap_min_addr.patch
+mm-zswap-bugfix-memory-leak-when-invalidate-and-reclaim-occur-concurrently.patch
+mmc-atmel-mci-abort-transfer-on-timeout-error.patch
+mmc-atmel-mci-fix-oops-in-atmci_tasklet_func.patch
+dm-mpath-fix-race-condition-between-multipath_dtr-and-pg_init_done.patch
+dm-array-fix-bug-in-growing-array.patch
+dm-cache-fix-a-race-condition-between-queuing-new-migrations-and-quiescing-for-a-shutdown.patch
+dm-allocate-buffer-for-messages-with-small-number-of-arguments-using-gfp_noio.patch
+ioatdma-fix-bug-in-selftest-after-removal-of-dma_memset.patch
+ioatdma-fix-sed-pool-selection.patch
+ioatdma-fix-selection-of-16-vs-8-source-path.patch
+iser-target-avoid-using-frmr-for-single-dma-entry-requests.patch
+target-fix-delayed-task-aborted-status-tas-handling-bug.patch
+blk-core-fix-memory-corruption-if-blkcg_init_queue-fails.patch
+pm-hibernate-avoid-overflow-in-hibernate_preallocate_memory.patch
+pm-runtime-use-pm_runtime_put_sync-in-__device_release_driver.patch
+pm-hibernate-do-not-crash-kernel-in-free_basic_memory_bitmaps.patch
--- /dev/null
+From 29f4c090079f442ea2723d292e4e64f0b6ac1f27 Mon Sep 17 00:00:00 2001
+From: Nicholas Bellinger <nab@linux-iscsi.org>
+Date: Wed, 13 Nov 2013 14:39:14 -0800
+Subject: target: Fix delayed Task Aborted Status (TAS) handling bug
+
+From: Nicholas Bellinger <nab@linux-iscsi.org>
+
+commit 29f4c090079f442ea2723d292e4e64f0b6ac1f27 upstream.
+
+This patch fixes a bug in delayed Task Aborted Status (TAS) handling,
+where transport_send_task_abort() was not returning for the case
+when the se_tfo->write_pending() callback indicated that last fabric
+specific WRITE PDU had not yet been received.
+
+It also adds an explicit cmd->scsi_status = SAM_STAT_TASK_ABORTED
+assignment within transport_check_aborted_status() to avoid the case
+where se_tfo->queue_status() is called when the SAM_STAT_TASK_ABORTED
+assignment + ->queue_status() in transport_send_task_abort() does not
+occur once SCF_SENT_DELAYED_TAS has been set.
+
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/target/target_core_transport.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -2910,6 +2910,7 @@ int transport_check_aborted_status(struc
+ cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd));
+
+ cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
++ cmd->scsi_status = SAM_STAT_TASK_ABORTED;
+ trace_target_cmd_complete(cmd);
+ cmd->se_tfo->queue_status(cmd);
+
+@@ -2938,6 +2939,7 @@ void transport_send_task_abort(struct se
+ if (cmd->se_tfo->write_pending_status(cmd) != 0) {
+ cmd->transport_state |= CMD_T_ABORTED;
+ smp_mb__after_atomic_inc();
++ return;
+ }
+ }
+ cmd->scsi_status = SAM_STAT_TASK_ABORTED;