]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 1 Dec 2013 19:59:38 +0000 (11:59 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 1 Dec 2013 19:59:38 +0000 (11:59 -0800)
added patches:
blk-core-fix-memory-corruption-if-blkcg_init_queue-fails.patch
dm-allocate-buffer-for-messages-with-small-number-of-arguments-using-gfp_noio.patch
dm-array-fix-bug-in-growing-array.patch
dm-cache-fix-a-race-condition-between-queuing-new-migrations-and-quiescing-for-a-shutdown.patch
dm-mpath-fix-race-condition-between-multipath_dtr-and-pg_init_done.patch
ioatdma-fix-sed-pool-selection.patch
ioatdma-fix-selection-of-16-vs-8-source-path.patch
mmc-atmel-mci-abort-transfer-on-timeout-error.patch
mmc-atmel-mci-fix-oops-in-atmci_tasklet_func.patch
mm-ensure-get_unmapped_area-returns-higher-address-than-mmap_min_addr.patch
pm-hibernate-avoid-overflow-in-hibernate_preallocate_memory.patch
pm-runtime-use-pm_runtime_put_sync-in-__device_release_driver.patch
qeth-avoid-buffer-overflow-in-snmp-ioctl.patch
rt2400pci-fix-rssi-read.patch

15 files changed:
queue-3.10/blk-core-fix-memory-corruption-if-blkcg_init_queue-fails.patch [new file with mode: 0644]
queue-3.10/dm-allocate-buffer-for-messages-with-small-number-of-arguments-using-gfp_noio.patch [new file with mode: 0644]
queue-3.10/dm-array-fix-bug-in-growing-array.patch [new file with mode: 0644]
queue-3.10/dm-cache-fix-a-race-condition-between-queuing-new-migrations-and-quiescing-for-a-shutdown.patch [new file with mode: 0644]
queue-3.10/dm-mpath-fix-race-condition-between-multipath_dtr-and-pg_init_done.patch [new file with mode: 0644]
queue-3.10/ioatdma-fix-sed-pool-selection.patch [new file with mode: 0644]
queue-3.10/ioatdma-fix-selection-of-16-vs-8-source-path.patch [new file with mode: 0644]
queue-3.10/mm-ensure-get_unmapped_area-returns-higher-address-than-mmap_min_addr.patch [new file with mode: 0644]
queue-3.10/mmc-atmel-mci-abort-transfer-on-timeout-error.patch [new file with mode: 0644]
queue-3.10/mmc-atmel-mci-fix-oops-in-atmci_tasklet_func.patch [new file with mode: 0644]
queue-3.10/pm-hibernate-avoid-overflow-in-hibernate_preallocate_memory.patch [new file with mode: 0644]
queue-3.10/pm-runtime-use-pm_runtime_put_sync-in-__device_release_driver.patch [new file with mode: 0644]
queue-3.10/qeth-avoid-buffer-overflow-in-snmp-ioctl.patch [new file with mode: 0644]
queue-3.10/rt2400pci-fix-rssi-read.patch [new file with mode: 0644]
queue-3.10/series

diff --git a/queue-3.10/blk-core-fix-memory-corruption-if-blkcg_init_queue-fails.patch b/queue-3.10/blk-core-fix-memory-corruption-if-blkcg_init_queue-fails.patch
new file mode 100644 (file)
index 0000000..0c5077d
--- /dev/null
@@ -0,0 +1,76 @@
+From fff4996b7db7955414ac74386efa5e07fd766b50 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Mon, 14 Oct 2013 12:11:36 -0400
+Subject: blk-core: Fix memory corruption if blkcg_init_queue fails
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit fff4996b7db7955414ac74386efa5e07fd766b50 upstream.
+
+If blkcg_init_queue fails, blk_alloc_queue_node doesn't call bdi_destroy
+to clean up structures allocated by the backing dev.
+
+------------[ cut here ]------------
+WARNING: at lib/debugobjects.c:260 debug_print_object+0x85/0xa0()
+ODEBUG: free active (active state 0) object type: percpu_counter hint:           (null)
+Modules linked in: dm_loop dm_mod ip6table_filter ip6_tables uvesafb cfbcopyarea cfbimgblt cfbfillrect fbcon font bitblit fbcon_rotate fbcon_cw fbcon_ud fbcon_ccw softcursor fb fbdev ipt_MASQUERADE iptable_nat nf_nat_ipv4 msr nf_conntrack_ipv4 nf_defrag_ipv4 xt_state ipt_REJECT xt_tcpudp iptable_filter ip_tables x_tables bridge stp llc tun ipv6 cpufreq_userspace cpufreq_stats cpufreq_powersave cpufreq_ondemand cpufreq_conservative spadfs fuse hid_generic usbhid hid raid0 md_mod dmi_sysfs nf_nat_ftp nf_nat nf_conntrack_ftp nf_conntrack lm85 hwmon_vid snd_usb_audio snd_pcm_oss snd_mixer_oss snd_pcm snd_timer snd_page_alloc snd_hwdep snd_usbmidi_lib snd_rawmidi snd soundcore acpi_cpufreq freq_table mperf sata_svw serverworks kvm_amd ide_core ehci_pci ohci_hcd libata ehci_hcd kvm usbcore tg3 usb_common libphy k10temp pcspkr ptp i2c_piix4 i2c_core evdev microcode hwmon rtc_cmos pps_core e100 skge floppy mii processor button unix
+CPU: 0 PID: 2739 Comm: lvchange Tainted: G        W
+3.10.15-devel #14
+Hardware name: empty empty/S3992-E, BIOS 'V1.06   ' 06/09/2009
+ 0000000000000009 ffff88023c3c1ae8 ffffffff813c8fd4 ffff88023c3c1b20
+ ffffffff810399eb ffff88043d35cd58 ffffffff81651940 ffff88023c3c1bf8
+ ffffffff82479d90 0000000000000005 ffff88023c3c1b80 ffffffff81039a67
+Call Trace:
+ [<ffffffff813c8fd4>] dump_stack+0x19/0x1b
+ [<ffffffff810399eb>] warn_slowpath_common+0x6b/0xa0
+ [<ffffffff81039a67>] warn_slowpath_fmt+0x47/0x50
+ [<ffffffff8122aaaf>] ? debug_check_no_obj_freed+0xcf/0x250
+ [<ffffffff81229a15>] debug_print_object+0x85/0xa0
+ [<ffffffff8122abe3>] debug_check_no_obj_freed+0x203/0x250
+ [<ffffffff8113c4ac>] kmem_cache_free+0x20c/0x3a0
+ [<ffffffff811f6709>] blk_alloc_queue_node+0x2a9/0x2c0
+ [<ffffffff811f672e>] blk_alloc_queue+0xe/0x10
+ [<ffffffffa04c0093>] dm_create+0x1a3/0x530 [dm_mod]
+ [<ffffffffa04c6bb0>] ? list_version_get_info+0xe0/0xe0 [dm_mod]
+ [<ffffffffa04c6c07>] dev_create+0x57/0x2b0 [dm_mod]
+ [<ffffffffa04c6bb0>] ? list_version_get_info+0xe0/0xe0 [dm_mod]
+ [<ffffffffa04c6bb0>] ? list_version_get_info+0xe0/0xe0 [dm_mod]
+ [<ffffffffa04c6528>] ctl_ioctl+0x268/0x500 [dm_mod]
+ [<ffffffff81097662>] ? get_lock_stats+0x22/0x70
+ [<ffffffffa04c67ce>] dm_ctl_ioctl+0xe/0x20 [dm_mod]
+ [<ffffffff81161aad>] do_vfs_ioctl+0x2ed/0x520
+ [<ffffffff8116cfc7>] ? fget_light+0x377/0x4e0
+ [<ffffffff81161d2b>] SyS_ioctl+0x4b/0x90
+ [<ffffffff813cff16>] system_call_fastpath+0x1a/0x1f
+---[ end trace 4b5ff0d55673d986 ]---
+------------[ cut here ]------------
+
+This fix should be backported to stable kernels starting with 2.6.37. Note
+that in the kernels prior to 3.5 the affected code is different, but the
+bug is still there - bdi_init is called and bdi_destroy isn't.
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Acked-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/blk-core.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -645,10 +645,12 @@ struct request_queue *blk_alloc_queue_no
+       __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
+       if (blkcg_init_queue(q))
+-              goto fail_id;
++              goto fail_bdi;
+       return q;
++fail_bdi:
++      bdi_destroy(&q->backing_dev_info);
+ fail_id:
+       ida_simple_remove(&blk_queue_ida, q->id);
+ fail_q:
diff --git a/queue-3.10/dm-allocate-buffer-for-messages-with-small-number-of-arguments-using-gfp_noio.patch b/queue-3.10/dm-allocate-buffer-for-messages-with-small-number-of-arguments-using-gfp_noio.patch
new file mode 100644 (file)
index 0000000..8a2c03c
--- /dev/null
@@ -0,0 +1,61 @@
+From f36afb3957353d2529cb2b00f78fdccd14fc5e9c Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Thu, 31 Oct 2013 13:55:45 -0400
+Subject: dm: allocate buffer for messages with small number of arguments using GFP_NOIO
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit f36afb3957353d2529cb2b00f78fdccd14fc5e9c upstream.
+
+dm-mpath and dm-thin must process messages even if some device is
+suspended, so we allocate argv buffer with GFP_NOIO. These messages have
+a small fixed number of arguments.
+
+On the other hand, dm-switch needs to process bulk data using messages
+so excessive use of GFP_NOIO could cause trouble.
+
+The patch also lowers the default number of arguments from 64 to 8, so
+that there is smaller load on GFP_NOIO allocations.
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Acked-by: Alasdair G Kergon <agk@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-table.c |   18 ++++++++++++++++--
+ 1 file changed, 16 insertions(+), 2 deletions(-)
+
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -580,14 +580,28 @@ static int adjoin(struct dm_table *table
+ /*
+  * Used to dynamically allocate the arg array.
++ *
++ * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must
++ * process messages even if some device is suspended. These messages have a
++ * small fixed number of arguments.
++ *
++ * On the other hand, dm-switch needs to process bulk data using messages and
++ * excessive use of GFP_NOIO could cause trouble.
+  */
+ static char **realloc_argv(unsigned *array_size, char **old_argv)
+ {
+       char **argv;
+       unsigned new_size;
++      gfp_t gfp;
+-      new_size = *array_size ? *array_size * 2 : 64;
+-      argv = kmalloc(new_size * sizeof(*argv), GFP_KERNEL);
++      if (*array_size) {
++              new_size = *array_size * 2;
++              gfp = GFP_KERNEL;
++      } else {
++              new_size = 8;
++              gfp = GFP_NOIO;
++      }
++      argv = kmalloc(new_size * sizeof(*argv), gfp);
+       if (argv) {
+               memcpy(argv, old_argv, *array_size * sizeof(*argv));
+               *array_size = new_size;
diff --git a/queue-3.10/dm-array-fix-bug-in-growing-array.patch b/queue-3.10/dm-array-fix-bug-in-growing-array.patch
new file mode 100644 (file)
index 0000000..2fc47c9
--- /dev/null
@@ -0,0 +1,41 @@
+From 9c1d4de56066e4d6abc66ec188faafd7b303fb08 Mon Sep 17 00:00:00 2001
+From: Joe Thornber <ejt@redhat.com>
+Date: Wed, 30 Oct 2013 11:19:59 +0000
+Subject: dm array: fix bug in growing array
+
+From: Joe Thornber <ejt@redhat.com>
+
+commit 9c1d4de56066e4d6abc66ec188faafd7b303fb08 upstream.
+
+Entries would be lost if the old tail block was partially filled.
+
+Signed-off-by: Joe Thornber <ejt@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/persistent-data/dm-array.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/md/persistent-data/dm-array.c
++++ b/drivers/md/persistent-data/dm-array.c
+@@ -509,15 +509,18 @@ static int grow_add_tail_block(struct re
+ static int grow_needs_more_blocks(struct resize *resize)
+ {
+       int r;
++      unsigned old_nr_blocks = resize->old_nr_full_blocks;
+       if (resize->old_nr_entries_in_last_block > 0) {
++              old_nr_blocks++;
++
+               r = grow_extend_tail_block(resize, resize->max_entries);
+               if (r)
+                       return r;
+       }
+       r = insert_full_ablocks(resize->info, resize->size_of_block,
+-                              resize->old_nr_full_blocks,
++                              old_nr_blocks,
+                               resize->new_nr_full_blocks,
+                               resize->max_entries, resize->value,
+                               &resize->root);
diff --git a/queue-3.10/dm-cache-fix-a-race-condition-between-queuing-new-migrations-and-quiescing-for-a-shutdown.patch b/queue-3.10/dm-cache-fix-a-race-condition-between-queuing-new-migrations-and-quiescing-for-a-shutdown.patch
new file mode 100644 (file)
index 0000000..1af0d7a
--- /dev/null
@@ -0,0 +1,149 @@
+From 66cb1910df17b38334153462ec8166e48058035f Mon Sep 17 00:00:00 2001
+From: Joe Thornber <ejt@redhat.com>
+Date: Wed, 30 Oct 2013 17:11:58 +0000
+Subject: dm cache: fix a race condition between queuing new migrations and quiescing for a shutdown
+
+From: Joe Thornber <ejt@redhat.com>
+
+commit 66cb1910df17b38334153462ec8166e48058035f upstream.
+
+The code that was trying to do this was inadequate.  The postsuspend
+method (in ioctl context), needs to wait for the worker thread to
+acknowledge the request to quiesce.  Otherwise the migration count may
+drop to zero temporarily before the worker thread realises we're
+quiescing.  In this case the target will be taken down, but the worker
+thread may have issued a new migration, which will cause an oops when
+it completes.
+
+Signed-off-by: Joe Thornber <ejt@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-cache-target.c |   54 +++++++++++++++++++++++++++++++------------
+ 1 file changed, 40 insertions(+), 14 deletions(-)
+
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -151,6 +151,9 @@ struct cache {
+       atomic_t nr_migrations;
+       wait_queue_head_t migration_wait;
++      wait_queue_head_t quiescing_wait;
++      atomic_t quiescing_ack;
++
+       /*
+        * cache_size entries, dirty if set
+        */
+@@ -742,8 +745,9 @@ static void cell_defer(struct cache *cac
+ static void cleanup_migration(struct dm_cache_migration *mg)
+ {
+-      dec_nr_migrations(mg->cache);
++      struct cache *cache = mg->cache;
+       free_migration(mg);
++      dec_nr_migrations(cache);
+ }
+ static void migration_failure(struct dm_cache_migration *mg)
+@@ -1340,34 +1344,51 @@ static void writeback_some_dirty_blocks(
+ /*----------------------------------------------------------------
+  * Main worker loop
+  *--------------------------------------------------------------*/
+-static void start_quiescing(struct cache *cache)
++static bool is_quiescing(struct cache *cache)
+ {
++      int r;
+       unsigned long flags;
+       spin_lock_irqsave(&cache->lock, flags);
+-      cache->quiescing = 1;
++      r = cache->quiescing;
+       spin_unlock_irqrestore(&cache->lock, flags);
++
++      return r;
+ }
+-static void stop_quiescing(struct cache *cache)
++static void ack_quiescing(struct cache *cache)
++{
++      if (is_quiescing(cache)) {
++              atomic_inc(&cache->quiescing_ack);
++              wake_up(&cache->quiescing_wait);
++      }
++}
++
++static void wait_for_quiescing_ack(struct cache *cache)
++{
++      wait_event(cache->quiescing_wait, atomic_read(&cache->quiescing_ack));
++}
++
++static void start_quiescing(struct cache *cache)
+ {
+       unsigned long flags;
+       spin_lock_irqsave(&cache->lock, flags);
+-      cache->quiescing = 0;
++      cache->quiescing = true;
+       spin_unlock_irqrestore(&cache->lock, flags);
++
++      wait_for_quiescing_ack(cache);
+ }
+-static bool is_quiescing(struct cache *cache)
++static void stop_quiescing(struct cache *cache)
+ {
+-      int r;
+       unsigned long flags;
+       spin_lock_irqsave(&cache->lock, flags);
+-      r = cache->quiescing;
++      cache->quiescing = false;
+       spin_unlock_irqrestore(&cache->lock, flags);
+-      return r;
++      atomic_set(&cache->quiescing_ack, 0);
+ }
+ static void wait_for_migrations(struct cache *cache)
+@@ -1414,16 +1435,15 @@ static void do_worker(struct work_struct
+       struct cache *cache = container_of(ws, struct cache, worker);
+       do {
+-              if (!is_quiescing(cache))
++              if (!is_quiescing(cache)) {
++                      writeback_some_dirty_blocks(cache);
++                      process_deferred_writethrough_bios(cache);
+                       process_deferred_bios(cache);
++              }
+               process_migrations(cache, &cache->quiesced_migrations, issue_copy);
+               process_migrations(cache, &cache->completed_migrations, complete_migration);
+-              writeback_some_dirty_blocks(cache);
+-
+-              process_deferred_writethrough_bios(cache);
+-
+               if (commit_if_needed(cache)) {
+                       process_deferred_flush_bios(cache, false);
+@@ -1436,6 +1456,9 @@ static void do_worker(struct work_struct
+                       process_migrations(cache, &cache->need_commit_migrations,
+                                          migration_success_post_commit);
+               }
++
++              ack_quiescing(cache);
++
+       } while (more_work(cache));
+ }
+@@ -1998,6 +2021,9 @@ static int cache_create(struct cache_arg
+       atomic_set(&cache->nr_migrations, 0);
+       init_waitqueue_head(&cache->migration_wait);
++      init_waitqueue_head(&cache->quiescing_wait);
++      atomic_set(&cache->quiescing_ack, 0);
++
+       r = -ENOMEM;
+       cache->nr_dirty = 0;
+       cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
diff --git a/queue-3.10/dm-mpath-fix-race-condition-between-multipath_dtr-and-pg_init_done.patch b/queue-3.10/dm-mpath-fix-race-condition-between-multipath_dtr-and-pg_init_done.patch
new file mode 100644 (file)
index 0000000..83d60ab
--- /dev/null
@@ -0,0 +1,105 @@
+From 954a73d5d3073df2231820c718fdd2f18b0fe4c9 Mon Sep 17 00:00:00 2001
+From: Shiva Krishna Merla <shivakrishna.merla@netapp.com>
+Date: Wed, 30 Oct 2013 03:26:38 +0000
+Subject: dm mpath: fix race condition between multipath_dtr and pg_init_done
+
+From: Shiva Krishna Merla <shivakrishna.merla@netapp.com>
+
+commit 954a73d5d3073df2231820c718fdd2f18b0fe4c9 upstream.
+
+Whenever multipath_dtr() is happening we must prevent queueing any
+further path activation work.  Implement this by adding a new
+'pg_init_disabled' flag to the multipath structure that denotes future
+path activation work should be skipped if it is set.  By disabling
+pg_init and then re-enabling in flush_multipath_work() we also avoid the
+potential for pg_init to be initiated while suspending an mpath device.
+
+Without this patch a race condition exists that may result in a kernel
+panic:
+
+1) If after pg_init_done() decrements pg_init_in_progress to 0, a call
+   to wait_for_pg_init_completion() assumes there are no more pending path
+   management commands.
+2) If pg_init_required is set by pg_init_done(), due to retryable
+   mode_select errors, then process_queued_ios() will again queue the
+   path activation work.
+3) If free_multipath() completes before activate_path() work is called a
+   NULL pointer dereference like the following can be seen when
+   accessing members of the recently destructed multipath:
+
+BUG: unable to handle kernel NULL pointer dereference at 0000000000000090
+RIP: 0010:[<ffffffffa003db1b>]  [<ffffffffa003db1b>] activate_path+0x1b/0x30 [dm_multipath]
+[<ffffffff81090ac0>] worker_thread+0x170/0x2a0
+[<ffffffff81096c80>] ? autoremove_wake_function+0x0/0x40
+
+[switch to disabling pg_init in flush_multipath_work & header edits by Mike Snitzer]
+Signed-off-by: Shiva Krishna Merla <shivakrishna.merla@netapp.com>
+Reviewed-by: Krishnasamy Somasundaram <somasundaram.krishnasamy@netapp.com>
+Tested-by: Speagle Andy <Andy.Speagle@netapp.com>
+Acked-by: Junichi Nomura <j-nomura@ce.jp.nec.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-mpath.c |   18 +++++++++++++++---
+ 1 file changed, 15 insertions(+), 3 deletions(-)
+
+--- a/drivers/md/dm-mpath.c
++++ b/drivers/md/dm-mpath.c
+@@ -86,6 +86,7 @@ struct multipath {
+       unsigned queue_if_no_path:1;    /* Queue I/O if last path fails? */
+       unsigned saved_queue_if_no_path:1; /* Saved state during suspension */
+       unsigned retain_attached_hw_handler:1; /* If there's already a hw_handler present, don't change it. */
++      unsigned pg_init_disabled:1;    /* pg_init is not currently allowed */
+       unsigned pg_init_retries;       /* Number of times to retry pg_init */
+       unsigned pg_init_count;         /* Number of times pg_init called */
+@@ -497,7 +498,8 @@ static void process_queued_ios(struct wo
+           (!pgpath && !m->queue_if_no_path))
+               must_queue = 0;
+-      if (m->pg_init_required && !m->pg_init_in_progress && pgpath)
++      if (m->pg_init_required && !m->pg_init_in_progress && pgpath &&
++          !m->pg_init_disabled)
+               __pg_init_all_paths(m);
+       spin_unlock_irqrestore(&m->lock, flags);
+@@ -942,10 +944,20 @@ static void multipath_wait_for_pg_init_c
+ static void flush_multipath_work(struct multipath *m)
+ {
++      unsigned long flags;
++
++      spin_lock_irqsave(&m->lock, flags);
++      m->pg_init_disabled = 1;
++      spin_unlock_irqrestore(&m->lock, flags);
++
+       flush_workqueue(kmpath_handlerd);
+       multipath_wait_for_pg_init_completion(m);
+       flush_workqueue(kmultipathd);
+       flush_work(&m->trigger_event);
++
++      spin_lock_irqsave(&m->lock, flags);
++      m->pg_init_disabled = 0;
++      spin_unlock_irqrestore(&m->lock, flags);
+ }
+ static void multipath_dtr(struct dm_target *ti)
+@@ -1164,7 +1176,7 @@ static int pg_init_limit_reached(struct
+       spin_lock_irqsave(&m->lock, flags);
+-      if (m->pg_init_count <= m->pg_init_retries)
++      if (m->pg_init_count <= m->pg_init_retries && !m->pg_init_disabled)
+               m->pg_init_required = 1;
+       else
+               limit_reached = 1;
+@@ -1699,7 +1711,7 @@ out:
+  *---------------------------------------------------------------*/
+ static struct target_type multipath_target = {
+       .name = "multipath",
+-      .version = {1, 5, 1},
++      .version = {1, 6, 0},
+       .module = THIS_MODULE,
+       .ctr = multipath_ctr,
+       .dtr = multipath_dtr,
diff --git a/queue-3.10/ioatdma-fix-sed-pool-selection.patch b/queue-3.10/ioatdma-fix-sed-pool-selection.patch
new file mode 100644 (file)
index 0000000..a462d4a
--- /dev/null
@@ -0,0 +1,64 @@
+From 5d48b9b5d80e3aa38a5161565398b1e48a650573 Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Wed, 13 Nov 2013 10:15:42 -0800
+Subject: ioatdma: fix sed pool selection
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+commit 5d48b9b5d80e3aa38a5161565398b1e48a650573 upstream.
+
+The array to lookup the sed pool based on the number of sources
+(pq16_idx_to_sedi) is 16 entries and expects a max source index.
+However, we pass the total source count which runs off the end of the
+array when src_cnt == 16.  The minimal fix is to just pass src_cnt-1,
+but given we know the source count is > 8 we can just calculate the sed
+pool by (src_cnt - 2) >> 3.
+
+Cc: Dave Jiang <dave.jiang@intel.com>
+Acked-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma/ioat/dma_v3.c |   16 +---------------
+ 1 file changed, 1 insertion(+), 15 deletions(-)
+
+--- a/drivers/dma/ioat/dma_v3.c
++++ b/drivers/dma/ioat/dma_v3.c
+@@ -87,13 +87,6 @@ static const u8 pq_idx_to_field[] = { 1,
+ static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7,
+                                       0, 1, 2, 3, 4, 5, 6 };
+-/*
+- * technically sources 1 and 2 do not require SED, but the op will have
+- * at least 9 descriptors so that's irrelevant.
+- */
+-static const u8 pq16_idx_to_sed[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+-                                    1, 1, 1, 1, 1, 1, 1 };
+-
+ static void ioat3_eh(struct ioat2_dma_chan *ioat);
+ static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
+@@ -135,12 +128,6 @@ static void pq_set_src(struct ioat_raw_d
+       pq->coef[idx] = coef;
+ }
+-static int sed_get_pq16_pool_idx(int src_cnt)
+-{
+-
+-      return pq16_idx_to_sed[src_cnt];
+-}
+-
+ static bool is_jf_ioat(struct pci_dev *pdev)
+ {
+       switch (pdev->device) {
+@@ -1212,8 +1199,7 @@ __ioat3_prep_pq16_lock(struct dma_chan *
+               descs[0] = (struct ioat_raw_descriptor *) pq;
+-              desc->sed = ioat3_alloc_sed(device,
+-                                          sed_get_pq16_pool_idx(src_cnt));
++              desc->sed = ioat3_alloc_sed(device, (src_cnt-2) >> 3);
+               if (!desc->sed) {
+                       dev_err(to_dev(chan),
+                               "%s: no free sed entries\n", __func__);
diff --git a/queue-3.10/ioatdma-fix-selection-of-16-vs-8-source-path.patch b/queue-3.10/ioatdma-fix-selection-of-16-vs-8-source-path.patch
new file mode 100644 (file)
index 0000000..c38a303
--- /dev/null
@@ -0,0 +1,149 @@
+From 21e96c7313486390c694919522a76dfea0a86c59 Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Wed, 13 Nov 2013 10:37:36 -0800
+Subject: ioatdma: fix selection of 16 vs 8 source path
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+commit 21e96c7313486390c694919522a76dfea0a86c59 upstream.
+
+When performing continuations there are implied sources that need to be
+added to the source count. Quoting dma_set_maxpq:
+
+/* dma_maxpq - reduce maxpq in the face of continued operations
+ * @dma - dma device with PQ capability
+ * @flags - to check if DMA_PREP_CONTINUE and DMA_PREP_PQ_DISABLE_P are set
+ *
+ * When an engine does not support native continuation we need 3 extra
+ * source slots to reuse P and Q with the following coefficients:
+ * 1/ {00} * P : remove P from Q', but use it as a source for P'
+ * 2/ {01} * Q : use Q to continue Q' calculation
+ * 3/ {00} * Q : subtract Q from P' to cancel (2)
+ *
+ * In the case where P is disabled we only need 1 extra source:
+ * 1/ {01} * Q : use Q to continue Q' calculation
+ */
+
+...fix the selection of the 16 source path to take these implied sources
+into account.
+
+Note this also kills the BUG_ON(src_cnt < 9) check in
+__ioat3_prep_pq16_lock().  Besides not accounting for implied sources
+the check is redundant given we already made the path selection.
+
+Cc: Dave Jiang <dave.jiang@intel.com>
+Acked-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma/ioat/dma_v3.c |   30 +++++++++++++++---------------
+ 1 file changed, 15 insertions(+), 15 deletions(-)
+
+--- a/drivers/dma/ioat/dma_v3.c
++++ b/drivers/dma/ioat/dma_v3.c
+@@ -1169,9 +1169,6 @@ __ioat3_prep_pq16_lock(struct dma_chan *
+       u8 op;
+       int i, s, idx, num_descs;
+-      /* this function only handles src_cnt 9 - 16 */
+-      BUG_ON(src_cnt < 9);
+-
+       /* this function is only called with 9-16 sources */
+       op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S;
+@@ -1257,13 +1254,21 @@ __ioat3_prep_pq16_lock(struct dma_chan *
+       return &desc->txd;
+ }
++static int src_cnt_flags(unsigned int src_cnt, unsigned long flags)
++{
++      if (dmaf_p_disabled_continue(flags))
++              return src_cnt + 1;
++      else if (dmaf_continue(flags))
++              return src_cnt + 3;
++      else
++              return src_cnt;
++}
++
+ static struct dma_async_tx_descriptor *
+ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
+             unsigned int src_cnt, const unsigned char *scf, size_t len,
+             unsigned long flags)
+ {
+-      struct dma_device *dma = chan->device;
+-
+       /* specify valid address for disabled result */
+       if (flags & DMA_PREP_PQ_DISABLE_P)
+               dst[0] = dst[1];
+@@ -1283,7 +1288,7 @@ ioat3_prep_pq(struct dma_chan *chan, dma
+               single_source_coef[0] = scf[0];
+               single_source_coef[1] = 0;
+-              return (src_cnt > 8) && (dma->max_pq > 8) ?
++              return src_cnt_flags(src_cnt, flags) > 8 ?
+                       __ioat3_prep_pq16_lock(chan, NULL, dst, single_source,
+                                              2, single_source_coef, len,
+                                              flags) :
+@@ -1291,7 +1296,7 @@ ioat3_prep_pq(struct dma_chan *chan, dma
+                                            single_source_coef, len, flags);
+       } else {
+-              return (src_cnt > 8) && (dma->max_pq > 8) ?
++              return src_cnt_flags(src_cnt, flags) > 8 ?
+                       __ioat3_prep_pq16_lock(chan, NULL, dst, src, src_cnt,
+                                              scf, len, flags) :
+                       __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt,
+@@ -1304,8 +1309,6 @@ ioat3_prep_pq_val(struct dma_chan *chan,
+                 unsigned int src_cnt, const unsigned char *scf, size_t len,
+                 enum sum_check_flags *pqres, unsigned long flags)
+ {
+-      struct dma_device *dma = chan->device;
+-
+       /* specify valid address for disabled result */
+       if (flags & DMA_PREP_PQ_DISABLE_P)
+               pq[0] = pq[1];
+@@ -1317,7 +1320,7 @@ ioat3_prep_pq_val(struct dma_chan *chan,
+        */
+       *pqres = 0;
+-      return (src_cnt > 8) && (dma->max_pq > 8) ?
++      return src_cnt_flags(src_cnt, flags) > 8 ?
+               __ioat3_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len,
+                                      flags) :
+               __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len,
+@@ -1328,7 +1331,6 @@ static struct dma_async_tx_descriptor *
+ ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
+                unsigned int src_cnt, size_t len, unsigned long flags)
+ {
+-      struct dma_device *dma = chan->device;
+       unsigned char scf[src_cnt];
+       dma_addr_t pq[2];
+@@ -1337,7 +1339,7 @@ ioat3_prep_pqxor(struct dma_chan *chan,
+       flags |= DMA_PREP_PQ_DISABLE_Q;
+       pq[1] = dst; /* specify valid address for disabled result */
+-      return (src_cnt > 8) && (dma->max_pq > 8) ?
++      return src_cnt_flags(src_cnt, flags) > 8 ?
+               __ioat3_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len,
+                                      flags) :
+               __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
+@@ -1349,7 +1351,6 @@ ioat3_prep_pqxor_val(struct dma_chan *ch
+                    unsigned int src_cnt, size_t len,
+                    enum sum_check_flags *result, unsigned long flags)
+ {
+-      struct dma_device *dma = chan->device;
+       unsigned char scf[src_cnt];
+       dma_addr_t pq[2];
+@@ -1363,8 +1364,7 @@ ioat3_prep_pqxor_val(struct dma_chan *ch
+       flags |= DMA_PREP_PQ_DISABLE_Q;
+       pq[1] = pq[0]; /* specify valid address for disabled result */
+-
+-      return (src_cnt > 8) && (dma->max_pq > 8) ?
++      return src_cnt_flags(src_cnt, flags) > 8 ?
+               __ioat3_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1,
+                                      scf, len, flags) :
+               __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1,
diff --git a/queue-3.10/mm-ensure-get_unmapped_area-returns-higher-address-than-mmap_min_addr.patch b/queue-3.10/mm-ensure-get_unmapped_area-returns-higher-address-than-mmap_min_addr.patch
new file mode 100644 (file)
index 0000000..b86eecf
--- /dev/null
@@ -0,0 +1,124 @@
+From 2afc745f3e3079ab16c826be4860da2529054dd2 Mon Sep 17 00:00:00 2001
+From: Akira Takeuchi <takeuchi.akr@jp.panasonic.com>
+Date: Tue, 12 Nov 2013 15:08:21 -0800
+Subject: mm: ensure get_unmapped_area() returns higher address than mmap_min_addr
+
+From: Akira Takeuchi <takeuchi.akr@jp.panasonic.com>
+
+commit 2afc745f3e3079ab16c826be4860da2529054dd2 upstream.
+
+This patch fixes the problem that get_unmapped_area() can return illegal
+address and result in failing mmap(2) etc.
+
+In case that the address higher than PAGE_SIZE is set to
+/proc/sys/vm/mmap_min_addr, the address lower than mmap_min_addr can be
+returned by get_unmapped_area(), even if you do not pass any virtual
+address hint (i.e.  the second argument).
+
+This is because the current get_unmapped_area() code does not take into
+account mmap_min_addr.
+
+This leads to two actual problems as follows:
+
+1. mmap(2) can fail with EPERM on the process without CAP_SYS_RAWIO,
+   although any illegal parameter is not passed.
+
+2. The bottom-up search path after the top-down search might not work in
+   arch_get_unmapped_area_topdown().
+
+Note: The first and third chunk of my patch, which changes "len" check,
+are for more precise check using mmap_min_addr, and not for solving the
+above problem.
+
+[How to reproduce]
+
+       --- test.c -------------------------------------------------
+       #include <stdio.h>
+       #include <unistd.h>
+       #include <sys/mman.h>
+       #include <sys/errno.h>
+
+       int main(int argc, char *argv[])
+       {
+               void *ret = NULL, *last_map;
+               size_t pagesize = sysconf(_SC_PAGESIZE);
+
+               do {
+                       last_map = ret;
+                       ret = mmap(0, pagesize, PROT_NONE,
+                               MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+       //              printf("ret=%p\n", ret);
+               } while (ret != MAP_FAILED);
+
+               if (errno != ENOMEM) {
+                       printf("ERR: unexpected errno: %d (last map=%p)\n",
+                       errno, last_map);
+               }
+
+               return 0;
+       }
+       ---------------------------------------------------------------
+
+       $ gcc -m32 -o test test.c
+       $ sudo sysctl -w vm.mmap_min_addr=65536
+       vm.mmap_min_addr = 65536
+       $ ./test  (run as non-priviledge user)
+       ERR: unexpected errno: 1 (last map=0x10000)
+
+Signed-off-by: Akira Takeuchi <takeuchi.akr@jp.panasonic.com>
+Signed-off-by: Kiyoshi Owada <owada.kiyoshi@jp.panasonic.com>
+Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/mmap.c |   10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -1853,7 +1853,7 @@ arch_get_unmapped_area(struct file *filp
+       struct vm_area_struct *vma;
+       struct vm_unmapped_area_info info;
+-      if (len > TASK_SIZE)
++      if (len > TASK_SIZE - mmap_min_addr)
+               return -ENOMEM;
+       if (flags & MAP_FIXED)
+@@ -1862,7 +1862,7 @@ arch_get_unmapped_area(struct file *filp
+       if (addr) {
+               addr = PAGE_ALIGN(addr);
+               vma = find_vma(mm, addr);
+-              if (TASK_SIZE - len >= addr &&
++              if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
+                   (!vma || addr + len <= vma->vm_start))
+                       return addr;
+       }
+@@ -1901,7 +1901,7 @@ arch_get_unmapped_area_topdown(struct fi
+       struct vm_unmapped_area_info info;
+       /* requested length too big for entire address space */
+-      if (len > TASK_SIZE)
++      if (len > TASK_SIZE - mmap_min_addr)
+               return -ENOMEM;
+       if (flags & MAP_FIXED)
+@@ -1911,14 +1911,14 @@ arch_get_unmapped_area_topdown(struct fi
+       if (addr) {
+               addr = PAGE_ALIGN(addr);
+               vma = find_vma(mm, addr);
+-              if (TASK_SIZE - len >= addr &&
++              if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
+                               (!vma || addr + len <= vma->vm_start))
+                       return addr;
+       }
+       info.flags = VM_UNMAPPED_AREA_TOPDOWN;
+       info.length = len;
+-      info.low_limit = PAGE_SIZE;
++      info.low_limit = max(PAGE_SIZE, mmap_min_addr);
+       info.high_limit = mm->mmap_base;
+       info.align_mask = 0;
+       addr = vm_unmapped_area(&info);
diff --git a/queue-3.10/mmc-atmel-mci-abort-transfer-on-timeout-error.patch b/queue-3.10/mmc-atmel-mci-abort-transfer-on-timeout-error.patch
new file mode 100644 (file)
index 0000000..9c26a8d
--- /dev/null
@@ -0,0 +1,39 @@
+From c1fa3426aa5c782724c97394303d52228206eda4 Mon Sep 17 00:00:00 2001
+From: Ludovic Desroches <ludovic.desroches@atmel.com>
+Date: Mon, 9 Sep 2013 17:29:56 +0200
+Subject: mmc: atmel-mci: abort transfer on timeout error
+
+From: Ludovic Desroches <ludovic.desroches@atmel.com>
+
+commit c1fa3426aa5c782724c97394303d52228206eda4 upstream.
+
+When a software timeout occurs, the transfer is not stopped. In DMA case,
+it causes DMA channel to be stuck because the transfer is still active
+causing following transfers to be queued but not computed.
+
+Signed-off-by: Ludovic Desroches <ludovic.desroches@atmel.com>
+Reported-by: Alexander Morozov <etesial@gmail.com>
+Acked-by: Nicolas Ferre <nicolas.ferre@atmel.com>
+Signed-off-by: Chris Ball <cjb@laptop.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/atmel-mci.c |    7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/drivers/mmc/host/atmel-mci.c
++++ b/drivers/mmc/host/atmel-mci.c
+@@ -584,6 +584,13 @@ static void atmci_timeout_timer(unsigned
+       if (host->mrq->cmd->data) {
+               host->mrq->cmd->data->error = -ETIMEDOUT;
+               host->data = NULL;
++              /*
++               * With some SDIO modules, sometimes DMA transfer hangs. If
++               * stop_transfer() is not called then the DMA request is not
++               * removed, following ones are queued and never computed.
++               */
++              if (host->state == STATE_DATA_XFER)
++                      host->stop_transfer(host);
+       } else {
+               host->mrq->cmd->error = -ETIMEDOUT;
+               host->cmd = NULL;
diff --git a/queue-3.10/mmc-atmel-mci-fix-oops-in-atmci_tasklet_func.patch b/queue-3.10/mmc-atmel-mci-fix-oops-in-atmci_tasklet_func.patch
new file mode 100644 (file)
index 0000000..a11b01f
--- /dev/null
@@ -0,0 +1,45 @@
+From fbd986cd420d1deeabf1039ec4e74075a5639db5 Mon Sep 17 00:00:00 2001
+From: Rodolfo Giometti <giometti@enneenne.com>
+Date: Mon, 9 Sep 2013 17:31:59 +0200
+Subject: mmc: atmel-mci: fix oops in atmci_tasklet_func
+
+From: Rodolfo Giometti <giometti@enneenne.com>
+
+commit fbd986cd420d1deeabf1039ec4e74075a5639db5 upstream.
+
+In some cases, a NULL pointer dereference happens because data is NULL when
+STATE_END_REQUEST case is reached in atmci_tasklet_func.
+
+Signed-off-by: Rodolfo Giometti <giometti@enneenne.com>
+Acked-by: Ludovic Desroches <ludovic.desroches@atmel.com>
+Acked-by: Nicolas Ferre <nicolas.ferre@atmel.com>
+Signed-off-by: Chris Ball <cjb@laptop.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/atmel-mci.c |   14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
+
+--- a/drivers/mmc/host/atmel-mci.c
++++ b/drivers/mmc/host/atmel-mci.c
+@@ -1794,12 +1794,14 @@ static void atmci_tasklet_func(unsigned
+                       if (unlikely(status)) {
+                               host->stop_transfer(host);
+                               host->data = NULL;
+-                              if (status & ATMCI_DTOE) {
+-                                      data->error = -ETIMEDOUT;
+-                              } else if (status & ATMCI_DCRCE) {
+-                                      data->error = -EILSEQ;
+-                              } else {
+-                                      data->error = -EIO;
++                              if (data) {
++                                      if (status & ATMCI_DTOE) {
++                                              data->error = -ETIMEDOUT;
++                                      } else if (status & ATMCI_DCRCE) {
++                                              data->error = -EILSEQ;
++                                      } else {
++                                              data->error = -EIO;
++                                      }
+                               }
+                       }
diff --git a/queue-3.10/pm-hibernate-avoid-overflow-in-hibernate_preallocate_memory.patch b/queue-3.10/pm-hibernate-avoid-overflow-in-hibernate_preallocate_memory.patch
new file mode 100644 (file)
index 0000000..4b75140
--- /dev/null
@@ -0,0 +1,44 @@
+From fd432b9f8c7c88428a4635b9f5a9c6e174df6e36 Mon Sep 17 00:00:00 2001
+From: Aaron Lu <aaron.lu@intel.com>
+Date: Wed, 6 Nov 2013 08:41:31 +0800
+Subject: PM / hibernate: Avoid overflow in hibernate_preallocate_memory()
+
+From: Aaron Lu <aaron.lu@intel.com>
+
+commit fd432b9f8c7c88428a4635b9f5a9c6e174df6e36 upstream.
+
+When system has a lot of highmem (e.g. 16GiB using a 32 bits kernel),
+the code to calculate how much memory we need to preallocate in
+normal zone may cause overflow. As Leon has analysed:
+
+ It looks that during computing 'alloc' variable there is overflow:
+ alloc = (3943404 - 1970542) - 1978280 = -5418 (signed)
+ And this function goes to err_out.
+
+Fix this by avoiding that overflow.
+
+References: https://bugzilla.kernel.org/show_bug.cgi?id=60817
+Reported-and-tested-by: Leon Drugi <eyak@wp.pl>
+Signed-off-by: Aaron Lu <aaron.lu@intel.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/power/snapshot.c |    6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/kernel/power/snapshot.c
++++ b/kernel/power/snapshot.c
+@@ -1398,7 +1398,11 @@ int hibernate_preallocate_memory(void)
+        * highmem and non-highmem zones separately.
+        */
+       pages_highmem = preallocate_image_highmem(highmem / 2);
+-      alloc = (count - max_size) - pages_highmem;
++      alloc = count - max_size;
++      if (alloc > pages_highmem)
++              alloc -= pages_highmem;
++      else
++              alloc = 0;
+       pages = preallocate_image_memory(alloc, avail_normal);
+       if (pages < alloc) {
+               /* We have exhausted non-highmem pages, try highmem. */
diff --git a/queue-3.10/pm-runtime-use-pm_runtime_put_sync-in-__device_release_driver.patch b/queue-3.10/pm-runtime-use-pm_runtime_put_sync-in-__device_release_driver.patch
new file mode 100644 (file)
index 0000000..9854225
--- /dev/null
@@ -0,0 +1,43 @@
+From baab52ded242c35a2290e1fa82e0cc147d0d8c1a Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Date: Thu, 7 Nov 2013 01:51:15 +0100
+Subject: PM / runtime: Use pm_runtime_put_sync() in __device_release_driver()
+
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+
+commit baab52ded242c35a2290e1fa82e0cc147d0d8c1a upstream.
+
+Commit fa180eb448fa (PM / Runtime: Idle devices asynchronously after
+probe|release) modified __device_release_driver() to call
+pm_runtime_put(dev) instead of pm_runtime_put_sync(dev) before
+detaching the driver from the device.  However, that was a mistake,
+because pm_runtime_put(dev) causes rpm_idle() to be queued up and
+the driver may be gone already when that function is executed.
+That breaks the assumptions the drivers have the right to make
+about the core's behavior on the basis of the existing documentation
+and actually causes problems to happen, so revert that part of
+commit fa180eb448fa and restore the previous behavior of
+__device_release_driver().
+
+Reported-by: Tomi Valkeinen <tomi.valkeinen@ti.com>
+Fixes: fa180eb448fa (PM / Runtime: Idle devices asynchronously after probe|release)
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Acked-by: Kevin Hilman <khilman@linaro.org>
+Acked-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/base/dd.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/base/dd.c
++++ b/drivers/base/dd.c
+@@ -499,7 +499,7 @@ static void __device_release_driver(stru
+                                                    BUS_NOTIFY_UNBIND_DRIVER,
+                                                    dev);
+-              pm_runtime_put(dev);
++              pm_runtime_put_sync(dev);
+               if (dev->bus && dev->bus->remove)
+                       dev->bus->remove(dev);
diff --git a/queue-3.10/qeth-avoid-buffer-overflow-in-snmp-ioctl.patch b/queue-3.10/qeth-avoid-buffer-overflow-in-snmp-ioctl.patch
new file mode 100644 (file)
index 0000000..f1c5923
--- /dev/null
@@ -0,0 +1,46 @@
+From 6fb392b1a63ae36c31f62bc3fc8630b49d602b62 Mon Sep 17 00:00:00 2001
+From: Ursula Braun <ursula.braun@de.ibm.com>
+Date: Wed, 6 Nov 2013 09:04:52 +0100
+Subject: qeth: avoid buffer overflow in snmp ioctl
+
+From: Ursula Braun <ursula.braun@de.ibm.com>
+
+commit 6fb392b1a63ae36c31f62bc3fc8630b49d602b62 upstream.
+
+Check user-defined length in snmp ioctl request and allow request
+only if it fits into a qeth command buffer.
+
+Signed-off-by: Ursula Braun <ursula.braun@de.ibm.com>
+Signed-off-by: Frank Blaschka <frank.blaschka@de.ibm.com>
+Reviewed-by: Heiko Carstens <heicars2@linux.vnet.ibm.com>
+Reported-by: Nico Golde <nico@ngolde.de>
+Reported-by: Fabian Yamaguchi <fabs@goesec.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/s390/net/qeth_core_main.c |    6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/s390/net/qeth_core_main.c
++++ b/drivers/s390/net/qeth_core_main.c
+@@ -4448,7 +4448,7 @@ int qeth_snmp_command(struct qeth_card *
+       struct qeth_cmd_buffer *iob;
+       struct qeth_ipa_cmd *cmd;
+       struct qeth_snmp_ureq *ureq;
+-      int req_len;
++      unsigned int req_len;
+       struct qeth_arp_query_info qinfo = {0, };
+       int rc = 0;
+@@ -4464,6 +4464,10 @@ int qeth_snmp_command(struct qeth_card *
+       /* skip 4 bytes (data_len struct member) to get req_len */
+       if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int)))
+               return -EFAULT;
++      if (req_len > (QETH_BUFSIZE - IPA_PDU_HEADER_SIZE -
++                     sizeof(struct qeth_ipacmd_hdr) -
++                     sizeof(struct qeth_ipacmd_setadpparms_hdr)))
++              return -EINVAL;
+       ureq = memdup_user(udata, req_len + sizeof(struct qeth_snmp_ureq_hdr));
+       if (IS_ERR(ureq)) {
+               QETH_CARD_TEXT(card, 2, "snmpnome");
diff --git a/queue-3.10/rt2400pci-fix-rssi-read.patch b/queue-3.10/rt2400pci-fix-rssi-read.patch
new file mode 100644 (file)
index 0000000..a3914d7
--- /dev/null
@@ -0,0 +1,30 @@
+From 2bf127a5cc372b9319afcbae10b090663b621c8b Mon Sep 17 00:00:00 2001
+From: Stanislaw Gruszka <stf_xl@wp.pl>
+Date: Tue, 15 Oct 2013 14:28:48 +0200
+Subject: rt2400pci: fix RSSI read
+
+From: Stanislaw Gruszka <stf_xl@wp.pl>
+
+commit 2bf127a5cc372b9319afcbae10b090663b621c8b upstream.
+
+RSSI value is provided on word3 not on word2.
+
+Signed-off-by: Stanislaw Gruszka <stf_xl@wp.pl>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/rt2x00/rt2400pci.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/wireless/rt2x00/rt2400pci.c
++++ b/drivers/net/wireless/rt2x00/rt2400pci.c
+@@ -1261,7 +1261,7 @@ static void rt2400pci_fill_rxdone(struct
+        */
+       rxdesc->timestamp = ((u64)rx_high << 32) | rx_low;
+       rxdesc->signal = rt2x00_get_field32(word2, RXD_W2_SIGNAL) & ~0x08;
+-      rxdesc->rssi = rt2x00_get_field32(word2, RXD_W3_RSSI) -
++      rxdesc->rssi = rt2x00_get_field32(word3, RXD_W3_RSSI) -
+           entry->queue->rt2x00dev->rssi_offset;
+       rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
index 360478bd9c67147e7a0d85b43fed0f5642485a64..e1790834d80cf350934b401f573c8ee408c01f3d 100644 (file)
@@ -61,3 +61,17 @@ xen-blkback-fix-reference-counting.patch
 rtlwifi-rtl8192de-fix-incorrect-signal-strength-for-unassociated-ap.patch
 rtlwifi-rtl8192se-fix-incorrect-signal-strength-for-unassociated-ap.patch
 rtlwifi-rtl8192cu-fix-incorrect-signal-strength-for-unassociated-ap.patch
+qeth-avoid-buffer-overflow-in-snmp-ioctl.patch
+rt2400pci-fix-rssi-read.patch
+mm-ensure-get_unmapped_area-returns-higher-address-than-mmap_min_addr.patch
+mmc-atmel-mci-abort-transfer-on-timeout-error.patch
+mmc-atmel-mci-fix-oops-in-atmci_tasklet_func.patch
+dm-mpath-fix-race-condition-between-multipath_dtr-and-pg_init_done.patch
+dm-array-fix-bug-in-growing-array.patch
+dm-cache-fix-a-race-condition-between-queuing-new-migrations-and-quiescing-for-a-shutdown.patch
+dm-allocate-buffer-for-messages-with-small-number-of-arguments-using-gfp_noio.patch
+ioatdma-fix-sed-pool-selection.patch
+ioatdma-fix-selection-of-16-vs-8-source-path.patch
+blk-core-fix-memory-corruption-if-blkcg_init_queue-fails.patch
+pm-hibernate-avoid-overflow-in-hibernate_preallocate_memory.patch
+pm-runtime-use-pm_runtime_put_sync-in-__device_release_driver.patch