From: Greg Kroah-Hartman Date: Sun, 1 Dec 2013 19:59:44 +0000 (-0800) Subject: 3.12-stable patches X-Git-Tag: v3.4.72~36 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=7bf0750fa7b0df860279497ad94661e26eba4a13;p=thirdparty%2Fkernel%2Fstable-queue.git 3.12-stable patches added patches: ath5k-fix-regression-in-tx-status-processing.patch blk-core-fix-memory-corruption-if-blkcg_init_queue-fails.patch dm-allocate-buffer-for-messages-with-small-number-of-arguments-using-gfp_noio.patch dm-array-fix-bug-in-growing-array.patch dm-cache-fix-a-race-condition-between-queuing-new-migrations-and-quiescing-for-a-shutdown.patch dm-mpath-fix-race-condition-between-multipath_dtr-and-pg_init_done.patch ioatdma-fix-bug-in-selftest-after-removal-of-dma_memset.patch ioatdma-fix-sed-pool-selection.patch ioatdma-fix-selection-of-16-vs-8-source-path.patch iser-target-avoid-using-frmr-for-single-dma-entry-requests.patch mmc-atmel-mci-abort-transfer-on-timeout-error.patch mmc-atmel-mci-fix-oops-in-atmci_tasklet_func.patch mm-ensure-get_unmapped_area-returns-higher-address-than-mmap_min_addr.patch mm-zswap-bugfix-memory-leak-when-invalidate-and-reclaim-occur-concurrently.patch pm-hibernate-avoid-overflow-in-hibernate_preallocate_memory.patch pm-hibernate-do-not-crash-kernel-in-free_basic_memory_bitmaps.patch pm-runtime-use-pm_runtime_put_sync-in-__device_release_driver.patch qeth-avoid-buffer-overflow-in-snmp-ioctl.patch rt2400pci-fix-rssi-read.patch target-fix-delayed-task-aborted-status-tas-handling-bug.patch --- diff --git a/queue-3.12/ath5k-fix-regression-in-tx-status-processing.patch b/queue-3.12/ath5k-fix-regression-in-tx-status-processing.patch new file mode 100644 index 00000000000..60524bcb8f0 --- /dev/null +++ b/queue-3.12/ath5k-fix-regression-in-tx-status-processing.patch @@ -0,0 +1,58 @@ +From 7ede612fd615abcda0cc30e5bef2a70f4cf4f75c Mon Sep 17 00:00:00 2001 +From: Felix Fietkau +Date: Mon, 14 Oct 2013 21:18:48 +0200 +Subject: ath5k: fix regression in tx status processing + +From: Felix Fietkau + +commit 7ede612fd615abcda0cc30e5bef2a70f4cf4f75c upstream. + +The regression was introduced in the following commit: + +0967e01e8e713ed2982fb4eba8ba13794e9a6e89 +"ath5k: make use of the new rate control API" + +ath5k_tx_frame_completed saves the intended per-rate retry counts before +they are cleared by ieee80211_tx_info_clear_status, however at this +point the information in info->status.rates is incomplete. + +This causes significant throughput degradation and excessive packet loss +on links where high bit rates don't work properly. + +Move the copy from bf->rates a few lines up to ensure that the saved +retry counts are updated, and that they are really cleared in +info->status.rates after the call to ieee80211_tx_info_clear_status. + +Cc: Thomas Huehn +Cc: Benjamin Vahl +Reported-by: Ben West +Signed-off-by: Felix Fietkau +Acked-by: Thomas Huehn +Signed-off-by: John W. Linville +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/net/wireless/ath/ath5k/base.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +--- a/drivers/net/wireless/ath/ath5k/base.c ++++ b/drivers/net/wireless/ath/ath5k/base.c +@@ -1663,15 +1663,15 @@ ath5k_tx_frame_completed(struct ath5k_hw + ah->stats.tx_bytes_count += skb->len; + info = IEEE80211_SKB_CB(skb); + ++ size = min_t(int, sizeof(info->status.rates), sizeof(bf->rates)); ++ memcpy(info->status.rates, bf->rates, size); ++ + tries[0] = info->status.rates[0].count; + tries[1] = info->status.rates[1].count; + tries[2] = info->status.rates[2].count; + + ieee80211_tx_info_clear_status(info); + +- size = min_t(int, sizeof(info->status.rates), sizeof(bf->rates)); +- memcpy(info->status.rates, bf->rates, size); +- + for (i = 0; i < ts->ts_final_idx; i++) { + struct ieee80211_tx_rate *r = + &info->status.rates[i]; diff --git a/queue-3.12/blk-core-fix-memory-corruption-if-blkcg_init_queue-fails.patch b/queue-3.12/blk-core-fix-memory-corruption-if-blkcg_init_queue-fails.patch new file mode 100644 index 00000000000..0c5077d5415 --- /dev/null +++ b/queue-3.12/blk-core-fix-memory-corruption-if-blkcg_init_queue-fails.patch @@ -0,0 +1,76 @@ +From fff4996b7db7955414ac74386efa5e07fd766b50 Mon Sep 17 00:00:00 2001 +From: Mikulas Patocka +Date: Mon, 14 Oct 2013 12:11:36 -0400 +Subject: blk-core: Fix memory corruption if blkcg_init_queue fails + +From: Mikulas Patocka + +commit fff4996b7db7955414ac74386efa5e07fd766b50 upstream. + +If blkcg_init_queue fails, blk_alloc_queue_node doesn't call bdi_destroy +to clean up structures allocated by the backing dev. + +------------[ cut here ]------------ +WARNING: at lib/debugobjects.c:260 debug_print_object+0x85/0xa0() +ODEBUG: free active (active state 0) object type: percpu_counter hint: (null) +Modules linked in: dm_loop dm_mod ip6table_filter ip6_tables uvesafb cfbcopyarea cfbimgblt cfbfillrect fbcon font bitblit fbcon_rotate fbcon_cw fbcon_ud fbcon_ccw softcursor fb fbdev ipt_MASQUERADE iptable_nat nf_nat_ipv4 msr nf_conntrack_ipv4 nf_defrag_ipv4 xt_state ipt_REJECT xt_tcpudp iptable_filter ip_tables x_tables bridge stp llc tun ipv6 cpufreq_userspace cpufreq_stats cpufreq_powersave cpufreq_ondemand cpufreq_conservative spadfs fuse hid_generic usbhid hid raid0 md_mod dmi_sysfs nf_nat_ftp nf_nat nf_conntrack_ftp nf_conntrack lm85 hwmon_vid snd_usb_audio snd_pcm_oss snd_mixer_oss snd_pcm snd_timer snd_page_alloc snd_hwdep snd_usbmidi_lib snd_rawmidi snd soundcore acpi_cpufreq freq_table mperf sata_svw serverworks kvm_amd ide_core ehci_pci ohci_hcd libata ehci_hcd kvm usbcore tg3 usb_common libphy k10temp pcspkr ptp i2c_piix4 i2c_core evdev microcode hwmon rtc_cmos pps_core e100 skge floppy mii processor button unix +CPU: 0 PID: 2739 Comm: lvchange Tainted: G W +3.10.15-devel #14 +Hardware name: empty empty/S3992-E, BIOS 'V1.06 ' 06/09/2009 + 0000000000000009 ffff88023c3c1ae8 ffffffff813c8fd4 ffff88023c3c1b20 + ffffffff810399eb ffff88043d35cd58 ffffffff81651940 ffff88023c3c1bf8 + ffffffff82479d90 0000000000000005 ffff88023c3c1b80 ffffffff81039a67 +Call Trace: + [] dump_stack+0x19/0x1b + [] warn_slowpath_common+0x6b/0xa0 + [] warn_slowpath_fmt+0x47/0x50 + [] ? debug_check_no_obj_freed+0xcf/0x250 + [] debug_print_object+0x85/0xa0 + [] debug_check_no_obj_freed+0x203/0x250 + [] kmem_cache_free+0x20c/0x3a0 + [] blk_alloc_queue_node+0x2a9/0x2c0 + [] blk_alloc_queue+0xe/0x10 + [] dm_create+0x1a3/0x530 [dm_mod] + [] ? list_version_get_info+0xe0/0xe0 [dm_mod] + [] dev_create+0x57/0x2b0 [dm_mod] + [] ? list_version_get_info+0xe0/0xe0 [dm_mod] + [] ? list_version_get_info+0xe0/0xe0 [dm_mod] + [] ctl_ioctl+0x268/0x500 [dm_mod] + [] ? get_lock_stats+0x22/0x70 + [] dm_ctl_ioctl+0xe/0x20 [dm_mod] + [] do_vfs_ioctl+0x2ed/0x520 + [] ? fget_light+0x377/0x4e0 + [] SyS_ioctl+0x4b/0x90 + [] system_call_fastpath+0x1a/0x1f +---[ end trace 4b5ff0d55673d986 ]--- +------------[ cut here ]------------ + +This fix should be backported to stable kernels starting with 2.6.37. Note +that in the kernels prior to 3.5 the affected code is different, but the +bug is still there - bdi_init is called and bdi_destroy isn't. + +Signed-off-by: Mikulas Patocka +Acked-by: Tejun Heo +Signed-off-by: Jens Axboe +Signed-off-by: Greg Kroah-Hartman + +--- + block/blk-core.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +--- a/block/blk-core.c ++++ b/block/blk-core.c +@@ -645,10 +645,12 @@ struct request_queue *blk_alloc_queue_no + __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags); + + if (blkcg_init_queue(q)) +- goto fail_id; ++ goto fail_bdi; + + return q; + ++fail_bdi: ++ bdi_destroy(&q->backing_dev_info); + fail_id: + ida_simple_remove(&blk_queue_ida, q->id); + fail_q: diff --git a/queue-3.12/dm-allocate-buffer-for-messages-with-small-number-of-arguments-using-gfp_noio.patch b/queue-3.12/dm-allocate-buffer-for-messages-with-small-number-of-arguments-using-gfp_noio.patch new file mode 100644 index 00000000000..fbf47d9a91b --- /dev/null +++ b/queue-3.12/dm-allocate-buffer-for-messages-with-small-number-of-arguments-using-gfp_noio.patch @@ -0,0 +1,61 @@ +From f36afb3957353d2529cb2b00f78fdccd14fc5e9c Mon Sep 17 00:00:00 2001 +From: Mikulas Patocka +Date: Thu, 31 Oct 2013 13:55:45 -0400 +Subject: dm: allocate buffer for messages with small number of arguments using GFP_NOIO + +From: Mikulas Patocka + +commit f36afb3957353d2529cb2b00f78fdccd14fc5e9c upstream. + +dm-mpath and dm-thin must process messages even if some device is +suspended, so we allocate argv buffer with GFP_NOIO. These messages have +a small fixed number of arguments. + +On the other hand, dm-switch needs to process bulk data using messages +so excessive use of GFP_NOIO could cause trouble. + +The patch also lowers the default number of arguments from 64 to 8, so +that there is smaller load on GFP_NOIO allocations. + +Signed-off-by: Mikulas Patocka +Acked-by: Alasdair G Kergon +Signed-off-by: Mike Snitzer +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/md/dm-table.c | 18 ++++++++++++++++-- + 1 file changed, 16 insertions(+), 2 deletions(-) + +--- a/drivers/md/dm-table.c ++++ b/drivers/md/dm-table.c +@@ -545,14 +545,28 @@ static int adjoin(struct dm_table *table + + /* + * Used to dynamically allocate the arg array. ++ * ++ * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must ++ * process messages even if some device is suspended. These messages have a ++ * small fixed number of arguments. ++ * ++ * On the other hand, dm-switch needs to process bulk data using messages and ++ * excessive use of GFP_NOIO could cause trouble. + */ + static char **realloc_argv(unsigned *array_size, char **old_argv) + { + char **argv; + unsigned new_size; ++ gfp_t gfp; + +- new_size = *array_size ? *array_size * 2 : 64; +- argv = kmalloc(new_size * sizeof(*argv), GFP_KERNEL); ++ if (*array_size) { ++ new_size = *array_size * 2; ++ gfp = GFP_KERNEL; ++ } else { ++ new_size = 8; ++ gfp = GFP_NOIO; ++ } ++ argv = kmalloc(new_size * sizeof(*argv), gfp); + if (argv) { + memcpy(argv, old_argv, *array_size * sizeof(*argv)); + *array_size = new_size; diff --git a/queue-3.12/dm-array-fix-bug-in-growing-array.patch b/queue-3.12/dm-array-fix-bug-in-growing-array.patch new file mode 100644 index 00000000000..2fc47c949bd --- /dev/null +++ b/queue-3.12/dm-array-fix-bug-in-growing-array.patch @@ -0,0 +1,41 @@ +From 9c1d4de56066e4d6abc66ec188faafd7b303fb08 Mon Sep 17 00:00:00 2001 +From: Joe Thornber +Date: Wed, 30 Oct 2013 11:19:59 +0000 +Subject: dm array: fix bug in growing array + +From: Joe Thornber + +commit 9c1d4de56066e4d6abc66ec188faafd7b303fb08 upstream. + +Entries would be lost if the old tail block was partially filled. + +Signed-off-by: Joe Thornber +Signed-off-by: Mike Snitzer +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/md/persistent-data/dm-array.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +--- a/drivers/md/persistent-data/dm-array.c ++++ b/drivers/md/persistent-data/dm-array.c +@@ -509,15 +509,18 @@ static int grow_add_tail_block(struct re + static int grow_needs_more_blocks(struct resize *resize) + { + int r; ++ unsigned old_nr_blocks = resize->old_nr_full_blocks; + + if (resize->old_nr_entries_in_last_block > 0) { ++ old_nr_blocks++; ++ + r = grow_extend_tail_block(resize, resize->max_entries); + if (r) + return r; + } + + r = insert_full_ablocks(resize->info, resize->size_of_block, +- resize->old_nr_full_blocks, ++ old_nr_blocks, + resize->new_nr_full_blocks, + resize->max_entries, resize->value, + &resize->root); diff --git a/queue-3.12/dm-cache-fix-a-race-condition-between-queuing-new-migrations-and-quiescing-for-a-shutdown.patch b/queue-3.12/dm-cache-fix-a-race-condition-between-queuing-new-migrations-and-quiescing-for-a-shutdown.patch new file mode 100644 index 00000000000..e84adce2002 --- /dev/null +++ b/queue-3.12/dm-cache-fix-a-race-condition-between-queuing-new-migrations-and-quiescing-for-a-shutdown.patch @@ -0,0 +1,149 @@ +From 66cb1910df17b38334153462ec8166e48058035f Mon Sep 17 00:00:00 2001 +From: Joe Thornber +Date: Wed, 30 Oct 2013 17:11:58 +0000 +Subject: dm cache: fix a race condition between queuing new migrations and quiescing for a shutdown + +From: Joe Thornber + +commit 66cb1910df17b38334153462ec8166e48058035f upstream. + +The code that was trying to do this was inadequate. The postsuspend +method (in ioctl context), needs to wait for the worker thread to +acknowledge the request to quiesce. Otherwise the migration count may +drop to zero temporarily before the worker thread realises we're +quiescing. In this case the target will be taken down, but the worker +thread may have issued a new migration, which will cause an oops when +it completes. + +Signed-off-by: Joe Thornber +Signed-off-by: Mike Snitzer +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/md/dm-cache-target.c | 54 +++++++++++++++++++++++++++++++------------ + 1 file changed, 40 insertions(+), 14 deletions(-) + +--- a/drivers/md/dm-cache-target.c ++++ b/drivers/md/dm-cache-target.c +@@ -148,6 +148,9 @@ struct cache { + wait_queue_head_t migration_wait; + atomic_t nr_migrations; + ++ wait_queue_head_t quiescing_wait; ++ atomic_t quiescing_ack; ++ + /* + * cache_size entries, dirty if set + */ +@@ -748,8 +751,9 @@ static void cell_defer(struct cache *cac + + static void cleanup_migration(struct dm_cache_migration *mg) + { +- dec_nr_migrations(mg->cache); ++ struct cache *cache = mg->cache; + free_migration(mg); ++ dec_nr_migrations(cache); + } + + static void migration_failure(struct dm_cache_migration *mg) +@@ -1346,34 +1350,51 @@ static void writeback_some_dirty_blocks( + /*---------------------------------------------------------------- + * Main worker loop + *--------------------------------------------------------------*/ +-static void start_quiescing(struct cache *cache) ++static bool is_quiescing(struct cache *cache) + { ++ int r; + unsigned long flags; + + spin_lock_irqsave(&cache->lock, flags); +- cache->quiescing = 1; ++ r = cache->quiescing; + spin_unlock_irqrestore(&cache->lock, flags); ++ ++ return r; + } + +-static void stop_quiescing(struct cache *cache) ++static void ack_quiescing(struct cache *cache) ++{ ++ if (is_quiescing(cache)) { ++ atomic_inc(&cache->quiescing_ack); ++ wake_up(&cache->quiescing_wait); ++ } ++} ++ ++static void wait_for_quiescing_ack(struct cache *cache) ++{ ++ wait_event(cache->quiescing_wait, atomic_read(&cache->quiescing_ack)); ++} ++ ++static void start_quiescing(struct cache *cache) + { + unsigned long flags; + + spin_lock_irqsave(&cache->lock, flags); +- cache->quiescing = 0; ++ cache->quiescing = true; + spin_unlock_irqrestore(&cache->lock, flags); ++ ++ wait_for_quiescing_ack(cache); + } + +-static bool is_quiescing(struct cache *cache) ++static void stop_quiescing(struct cache *cache) + { +- int r; + unsigned long flags; + + spin_lock_irqsave(&cache->lock, flags); +- r = cache->quiescing; ++ cache->quiescing = false; + spin_unlock_irqrestore(&cache->lock, flags); + +- return r; ++ atomic_set(&cache->quiescing_ack, 0); + } + + static void wait_for_migrations(struct cache *cache) +@@ -1420,16 +1441,15 @@ static void do_worker(struct work_struct + struct cache *cache = container_of(ws, struct cache, worker); + + do { +- if (!is_quiescing(cache)) ++ if (!is_quiescing(cache)) { ++ writeback_some_dirty_blocks(cache); ++ process_deferred_writethrough_bios(cache); + process_deferred_bios(cache); ++ } + + process_migrations(cache, &cache->quiesced_migrations, issue_copy); + process_migrations(cache, &cache->completed_migrations, complete_migration); + +- writeback_some_dirty_blocks(cache); +- +- process_deferred_writethrough_bios(cache); +- + if (commit_if_needed(cache)) { + process_deferred_flush_bios(cache, false); + +@@ -1442,6 +1462,9 @@ static void do_worker(struct work_struct + process_migrations(cache, &cache->need_commit_migrations, + migration_success_post_commit); + } ++ ++ ack_quiescing(cache); ++ + } while (more_work(cache)); + } + +@@ -2005,6 +2028,9 @@ static int cache_create(struct cache_arg + atomic_set(&cache->nr_migrations, 0); + init_waitqueue_head(&cache->migration_wait); + ++ init_waitqueue_head(&cache->quiescing_wait); ++ atomic_set(&cache->quiescing_ack, 0); ++ + r = -ENOMEM; + cache->nr_dirty = 0; + cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size)); diff --git a/queue-3.12/dm-mpath-fix-race-condition-between-multipath_dtr-and-pg_init_done.patch b/queue-3.12/dm-mpath-fix-race-condition-between-multipath_dtr-and-pg_init_done.patch new file mode 100644 index 00000000000..8686561f2d4 --- /dev/null +++ b/queue-3.12/dm-mpath-fix-race-condition-between-multipath_dtr-and-pg_init_done.patch @@ -0,0 +1,105 @@ +From 954a73d5d3073df2231820c718fdd2f18b0fe4c9 Mon Sep 17 00:00:00 2001 +From: Shiva Krishna Merla +Date: Wed, 30 Oct 2013 03:26:38 +0000 +Subject: dm mpath: fix race condition between multipath_dtr and pg_init_done + +From: Shiva Krishna Merla + +commit 954a73d5d3073df2231820c718fdd2f18b0fe4c9 upstream. + +Whenever multipath_dtr() is happening we must prevent queueing any +further path activation work. Implement this by adding a new +'pg_init_disabled' flag to the multipath structure that denotes future +path activation work should be skipped if it is set. By disabling +pg_init and then re-enabling in flush_multipath_work() we also avoid the +potential for pg_init to be initiated while suspending an mpath device. + +Without this patch a race condition exists that may result in a kernel +panic: + +1) If after pg_init_done() decrements pg_init_in_progress to 0, a call + to wait_for_pg_init_completion() assumes there are no more pending path + management commands. +2) If pg_init_required is set by pg_init_done(), due to retryable + mode_select errors, then process_queued_ios() will again queue the + path activation work. +3) If free_multipath() completes before activate_path() work is called a + NULL pointer dereference like the following can be seen when + accessing members of the recently destructed multipath: + +BUG: unable to handle kernel NULL pointer dereference at 0000000000000090 +RIP: 0010:[] [] activate_path+0x1b/0x30 [dm_multipath] +[] worker_thread+0x170/0x2a0 +[] ? autoremove_wake_function+0x0/0x40 + +[switch to disabling pg_init in flush_multipath_work & header edits by Mike Snitzer] +Signed-off-by: Shiva Krishna Merla +Reviewed-by: Krishnasamy Somasundaram +Tested-by: Speagle Andy +Acked-by: Junichi Nomura +Signed-off-by: Mike Snitzer +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/md/dm-mpath.c | 18 +++++++++++++++--- + 1 file changed, 15 insertions(+), 3 deletions(-) + +--- a/drivers/md/dm-mpath.c ++++ b/drivers/md/dm-mpath.c +@@ -87,6 +87,7 @@ struct multipath { + unsigned queue_if_no_path:1; /* Queue I/O if last path fails? */ + unsigned saved_queue_if_no_path:1; /* Saved state during suspension */ + unsigned retain_attached_hw_handler:1; /* If there's already a hw_handler present, don't change it. */ ++ unsigned pg_init_disabled:1; /* pg_init is not currently allowed */ + + unsigned pg_init_retries; /* Number of times to retry pg_init */ + unsigned pg_init_count; /* Number of times pg_init called */ +@@ -497,7 +498,8 @@ static void process_queued_ios(struct wo + (!pgpath && !m->queue_if_no_path)) + must_queue = 0; + +- if (m->pg_init_required && !m->pg_init_in_progress && pgpath) ++ if (m->pg_init_required && !m->pg_init_in_progress && pgpath && ++ !m->pg_init_disabled) + __pg_init_all_paths(m); + + spin_unlock_irqrestore(&m->lock, flags); +@@ -942,10 +944,20 @@ static void multipath_wait_for_pg_init_c + + static void flush_multipath_work(struct multipath *m) + { ++ unsigned long flags; ++ ++ spin_lock_irqsave(&m->lock, flags); ++ m->pg_init_disabled = 1; ++ spin_unlock_irqrestore(&m->lock, flags); ++ + flush_workqueue(kmpath_handlerd); + multipath_wait_for_pg_init_completion(m); + flush_workqueue(kmultipathd); + flush_work(&m->trigger_event); ++ ++ spin_lock_irqsave(&m->lock, flags); ++ m->pg_init_disabled = 0; ++ spin_unlock_irqrestore(&m->lock, flags); + } + + static void multipath_dtr(struct dm_target *ti) +@@ -1164,7 +1176,7 @@ static int pg_init_limit_reached(struct + + spin_lock_irqsave(&m->lock, flags); + +- if (m->pg_init_count <= m->pg_init_retries) ++ if (m->pg_init_count <= m->pg_init_retries && !m->pg_init_disabled) + m->pg_init_required = 1; + else + limit_reached = 1; +@@ -1714,7 +1726,7 @@ out: + *---------------------------------------------------------------*/ + static struct target_type multipath_target = { + .name = "multipath", +- .version = {1, 5, 1}, ++ .version = {1, 6, 0}, + .module = THIS_MODULE, + .ctr = multipath_ctr, + .dtr = multipath_dtr, diff --git a/queue-3.12/ioatdma-fix-bug-in-selftest-after-removal-of-dma_memset.patch b/queue-3.12/ioatdma-fix-bug-in-selftest-after-removal-of-dma_memset.patch new file mode 100644 index 00000000000..bf1d78309d8 --- /dev/null +++ b/queue-3.12/ioatdma-fix-bug-in-selftest-after-removal-of-dma_memset.patch @@ -0,0 +1,33 @@ +From ac7d631f7d9f9e4e6116c4a72b6308067d0a2226 Mon Sep 17 00:00:00 2001 +From: Dave Jiang +Date: Wed, 6 Nov 2013 08:50:09 -0700 +Subject: ioatdma: Fix bug in selftest after removal of DMA_MEMSET. + +From: Dave Jiang + +commit ac7d631f7d9f9e4e6116c4a72b6308067d0a2226 upstream. + +Commit 48a9db4 (3.11) removed the memset op in the xor selftest for ioatdma. +The issue is that with the removal of that op, it never replaced the memset +with a CPU memset. The memory being operated on is expected to be zeroes but +was not. This is causing the xor selftest to fail. + +Signed-off-by: Dave Jiang +Signed-off-by: Dan Williams +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/dma/ioat/dma_v3.c | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/drivers/dma/ioat/dma_v3.c ++++ b/drivers/dma/ioat/dma_v3.c +@@ -1545,6 +1545,8 @@ static int ioat_xor_val_self_test(struct + goto free_resources; + } + ++ memset(page_address(dest), 0, PAGE_SIZE); ++ + /* test for non-zero parity sum */ + op = IOAT_OP_XOR_VAL; + diff --git a/queue-3.12/ioatdma-fix-sed-pool-selection.patch b/queue-3.12/ioatdma-fix-sed-pool-selection.patch new file mode 100644 index 00000000000..9562c459379 --- /dev/null +++ b/queue-3.12/ioatdma-fix-sed-pool-selection.patch @@ -0,0 +1,64 @@ +From 5d48b9b5d80e3aa38a5161565398b1e48a650573 Mon Sep 17 00:00:00 2001 +From: Dan Williams +Date: Wed, 13 Nov 2013 10:15:42 -0800 +Subject: ioatdma: fix sed pool selection + +From: Dan Williams + +commit 5d48b9b5d80e3aa38a5161565398b1e48a650573 upstream. + +The array to lookup the sed pool based on the number of sources +(pq16_idx_to_sedi) is 16 entries and expects a max source index. +However, we pass the total source count which runs off the end of the +array when src_cnt == 16. The minimal fix is to just pass src_cnt-1, +but given we know the source count is > 8 we can just calculate the sed +pool by (src_cnt - 2) >> 3. + +Cc: Dave Jiang +Acked-by: Dave Jiang +Signed-off-by: Dan Williams +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/dma/ioat/dma_v3.c | 16 +--------------- + 1 file changed, 1 insertion(+), 15 deletions(-) + +--- a/drivers/dma/ioat/dma_v3.c ++++ b/drivers/dma/ioat/dma_v3.c +@@ -87,13 +87,6 @@ static const u8 pq_idx_to_field[] = { 1, + static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7, + 0, 1, 2, 3, 4, 5, 6 }; + +-/* +- * technically sources 1 and 2 do not require SED, but the op will have +- * at least 9 descriptors so that's irrelevant. +- */ +-static const u8 pq16_idx_to_sed[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, +- 1, 1, 1, 1, 1, 1, 1 }; +- + static void ioat3_eh(struct ioat2_dma_chan *ioat); + + static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx) +@@ -135,12 +128,6 @@ static void pq_set_src(struct ioat_raw_d + pq->coef[idx] = coef; + } + +-static int sed_get_pq16_pool_idx(int src_cnt) +-{ +- +- return pq16_idx_to_sed[src_cnt]; +-} +- + static bool is_jf_ioat(struct pci_dev *pdev) + { + switch (pdev->device) { +@@ -1159,8 +1146,7 @@ __ioat3_prep_pq16_lock(struct dma_chan * + + descs[0] = (struct ioat_raw_descriptor *) pq; + +- desc->sed = ioat3_alloc_sed(device, +- sed_get_pq16_pool_idx(src_cnt)); ++ desc->sed = ioat3_alloc_sed(device, (src_cnt-2) >> 3); + if (!desc->sed) { + dev_err(to_dev(chan), + "%s: no free sed entries\n", __func__); diff --git a/queue-3.12/ioatdma-fix-selection-of-16-vs-8-source-path.patch b/queue-3.12/ioatdma-fix-selection-of-16-vs-8-source-path.patch new file mode 100644 index 00000000000..6725c5c8d1f --- /dev/null +++ b/queue-3.12/ioatdma-fix-selection-of-16-vs-8-source-path.patch @@ -0,0 +1,149 @@ +From 21e96c7313486390c694919522a76dfea0a86c59 Mon Sep 17 00:00:00 2001 +From: Dan Williams +Date: Wed, 13 Nov 2013 10:37:36 -0800 +Subject: ioatdma: fix selection of 16 vs 8 source path + +From: Dan Williams + +commit 21e96c7313486390c694919522a76dfea0a86c59 upstream. + +When performing continuations there are implied sources that need to be +added to the source count. Quoting dma_set_maxpq: + +/* dma_maxpq - reduce maxpq in the face of continued operations + * @dma - dma device with PQ capability + * @flags - to check if DMA_PREP_CONTINUE and DMA_PREP_PQ_DISABLE_P are set + * + * When an engine does not support native continuation we need 3 extra + * source slots to reuse P and Q with the following coefficients: + * 1/ {00} * P : remove P from Q', but use it as a source for P' + * 2/ {01} * Q : use Q to continue Q' calculation + * 3/ {00} * Q : subtract Q from P' to cancel (2) + * + * In the case where P is disabled we only need 1 extra source: + * 1/ {01} * Q : use Q to continue Q' calculation + */ + +...fix the selection of the 16 source path to take these implied sources +into account. + +Note this also kills the BUG_ON(src_cnt < 9) check in +__ioat3_prep_pq16_lock(). Besides not accounting for implied sources +the check is redundant given we already made the path selection. + +Cc: Dave Jiang +Acked-by: Dave Jiang +Signed-off-by: Dan Williams +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/dma/ioat/dma_v3.c | 30 +++++++++++++++--------------- + 1 file changed, 15 insertions(+), 15 deletions(-) + +--- a/drivers/dma/ioat/dma_v3.c ++++ b/drivers/dma/ioat/dma_v3.c +@@ -1116,9 +1116,6 @@ __ioat3_prep_pq16_lock(struct dma_chan * + u8 op; + int i, s, idx, num_descs; + +- /* this function only handles src_cnt 9 - 16 */ +- BUG_ON(src_cnt < 9); +- + /* this function is only called with 9-16 sources */ + op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S; + +@@ -1204,13 +1201,21 @@ __ioat3_prep_pq16_lock(struct dma_chan * + return &desc->txd; + } + ++static int src_cnt_flags(unsigned int src_cnt, unsigned long flags) ++{ ++ if (dmaf_p_disabled_continue(flags)) ++ return src_cnt + 1; ++ else if (dmaf_continue(flags)) ++ return src_cnt + 3; ++ else ++ return src_cnt; ++} ++ + static struct dma_async_tx_descriptor * + ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, + unsigned int src_cnt, const unsigned char *scf, size_t len, + unsigned long flags) + { +- struct dma_device *dma = chan->device; +- + /* specify valid address for disabled result */ + if (flags & DMA_PREP_PQ_DISABLE_P) + dst[0] = dst[1]; +@@ -1230,7 +1235,7 @@ ioat3_prep_pq(struct dma_chan *chan, dma + single_source_coef[0] = scf[0]; + single_source_coef[1] = 0; + +- return (src_cnt > 8) && (dma->max_pq > 8) ? ++ return src_cnt_flags(src_cnt, flags) > 8 ? + __ioat3_prep_pq16_lock(chan, NULL, dst, single_source, + 2, single_source_coef, len, + flags) : +@@ -1238,7 +1243,7 @@ ioat3_prep_pq(struct dma_chan *chan, dma + single_source_coef, len, flags); + + } else { +- return (src_cnt > 8) && (dma->max_pq > 8) ? ++ return src_cnt_flags(src_cnt, flags) > 8 ? + __ioat3_prep_pq16_lock(chan, NULL, dst, src, src_cnt, + scf, len, flags) : + __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt, +@@ -1251,8 +1256,6 @@ ioat3_prep_pq_val(struct dma_chan *chan, + unsigned int src_cnt, const unsigned char *scf, size_t len, + enum sum_check_flags *pqres, unsigned long flags) + { +- struct dma_device *dma = chan->device; +- + /* specify valid address for disabled result */ + if (flags & DMA_PREP_PQ_DISABLE_P) + pq[0] = pq[1]; +@@ -1264,7 +1267,7 @@ ioat3_prep_pq_val(struct dma_chan *chan, + */ + *pqres = 0; + +- return (src_cnt > 8) && (dma->max_pq > 8) ? ++ return src_cnt_flags(src_cnt, flags) > 8 ? + __ioat3_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len, + flags) : + __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len, +@@ -1275,7 +1278,6 @@ static struct dma_async_tx_descriptor * + ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, + unsigned int src_cnt, size_t len, unsigned long flags) + { +- struct dma_device *dma = chan->device; + unsigned char scf[src_cnt]; + dma_addr_t pq[2]; + +@@ -1284,7 +1286,7 @@ ioat3_prep_pqxor(struct dma_chan *chan, + flags |= DMA_PREP_PQ_DISABLE_Q; + pq[1] = dst; /* specify valid address for disabled result */ + +- return (src_cnt > 8) && (dma->max_pq > 8) ? ++ return src_cnt_flags(src_cnt, flags) > 8 ? + __ioat3_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len, + flags) : + __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len, +@@ -1296,7 +1298,6 @@ ioat3_prep_pqxor_val(struct dma_chan *ch + unsigned int src_cnt, size_t len, + enum sum_check_flags *result, unsigned long flags) + { +- struct dma_device *dma = chan->device; + unsigned char scf[src_cnt]; + dma_addr_t pq[2]; + +@@ -1310,8 +1311,7 @@ ioat3_prep_pqxor_val(struct dma_chan *ch + flags |= DMA_PREP_PQ_DISABLE_Q; + pq[1] = pq[0]; /* specify valid address for disabled result */ + +- +- return (src_cnt > 8) && (dma->max_pq > 8) ? ++ return src_cnt_flags(src_cnt, flags) > 8 ? + __ioat3_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1, + scf, len, flags) : + __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, diff --git a/queue-3.12/iser-target-avoid-using-frmr-for-single-dma-entry-requests.patch b/queue-3.12/iser-target-avoid-using-frmr-for-single-dma-entry-requests.patch new file mode 100644 index 00000000000..4b2979bc74a --- /dev/null +++ b/queue-3.12/iser-target-avoid-using-frmr-for-single-dma-entry-requests.patch @@ -0,0 +1,64 @@ +From f01b9f73392b48c6cda7c2c66594c73137c776da Mon Sep 17 00:00:00 2001 +From: Vu Pham +Date: Mon, 11 Nov 2013 19:04:29 +0200 +Subject: iser-target: Avoid using FRMR for single dma entry requests + +From: Vu Pham + +commit f01b9f73392b48c6cda7c2c66594c73137c776da upstream. + +This patch changes isert_reg_rdma_frwr() to not use FRMR for single +dma entry requests from small I/Os, in order to avoid the associated +memory registration overhead. + +Using DMA MR is sufficient here for the single dma entry requests, +and addresses a >= v3.12 performance regression. + +Signed-off-by: Vu Pham +Signed-off-by: Nicholas Bellinger +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/infiniband/ulp/isert/ib_isert.c | 30 +++++++++++++++++++----------- + 1 file changed, 19 insertions(+), 11 deletions(-) + +--- a/drivers/infiniband/ulp/isert/ib_isert.c ++++ b/drivers/infiniband/ulp/isert/ib_isert.c +@@ -2259,18 +2259,26 @@ isert_reg_rdma_frwr(struct iscsi_conn *c + data_len = min(data_left, rdma_write_max); + wr->cur_rdma_length = data_len; + +- spin_lock_irqsave(&isert_conn->conn_lock, flags); +- fr_desc = list_first_entry(&isert_conn->conn_frwr_pool, +- struct fast_reg_descriptor, list); +- list_del(&fr_desc->list); +- spin_unlock_irqrestore(&isert_conn->conn_lock, flags); +- wr->fr_desc = fr_desc; ++ /* if there is a single dma entry, dma mr is sufficient */ ++ if (count == 1) { ++ ib_sge->addr = ib_sg_dma_address(ib_dev, &sg_start[0]); ++ ib_sge->length = ib_sg_dma_len(ib_dev, &sg_start[0]); ++ ib_sge->lkey = isert_conn->conn_mr->lkey; ++ wr->fr_desc = NULL; ++ } else { ++ spin_lock_irqsave(&isert_conn->conn_lock, flags); ++ fr_desc = list_first_entry(&isert_conn->conn_frwr_pool, ++ struct fast_reg_descriptor, list); ++ list_del(&fr_desc->list); ++ spin_unlock_irqrestore(&isert_conn->conn_lock, flags); ++ wr->fr_desc = fr_desc; + +- ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn, +- ib_sge, offset, data_len); +- if (ret) { +- list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool); +- goto unmap_sg; ++ ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn, ++ ib_sge, offset, data_len); ++ if (ret) { ++ list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool); ++ goto unmap_sg; ++ } + } + + return 0; diff --git a/queue-3.12/mm-ensure-get_unmapped_area-returns-higher-address-than-mmap_min_addr.patch b/queue-3.12/mm-ensure-get_unmapped_area-returns-higher-address-than-mmap_min_addr.patch new file mode 100644 index 00000000000..c5cb70d386c --- /dev/null +++ b/queue-3.12/mm-ensure-get_unmapped_area-returns-higher-address-than-mmap_min_addr.patch @@ -0,0 +1,124 @@ +From 2afc745f3e3079ab16c826be4860da2529054dd2 Mon Sep 17 00:00:00 2001 +From: Akira Takeuchi +Date: Tue, 12 Nov 2013 15:08:21 -0800 +Subject: mm: ensure get_unmapped_area() returns higher address than mmap_min_addr + +From: Akira Takeuchi + +commit 2afc745f3e3079ab16c826be4860da2529054dd2 upstream. + +This patch fixes the problem that get_unmapped_area() can return illegal +address and result in failing mmap(2) etc. + +In case that the address higher than PAGE_SIZE is set to +/proc/sys/vm/mmap_min_addr, the address lower than mmap_min_addr can be +returned by get_unmapped_area(), even if you do not pass any virtual +address hint (i.e. the second argument). + +This is because the current get_unmapped_area() code does not take into +account mmap_min_addr. + +This leads to two actual problems as follows: + +1. mmap(2) can fail with EPERM on the process without CAP_SYS_RAWIO, + although any illegal parameter is not passed. + +2. The bottom-up search path after the top-down search might not work in + arch_get_unmapped_area_topdown(). + +Note: The first and third chunk of my patch, which changes "len" check, +are for more precise check using mmap_min_addr, and not for solving the +above problem. + +[How to reproduce] + + --- test.c ------------------------------------------------- + #include + #include + #include + #include + + int main(int argc, char *argv[]) + { + void *ret = NULL, *last_map; + size_t pagesize = sysconf(_SC_PAGESIZE); + + do { + last_map = ret; + ret = mmap(0, pagesize, PROT_NONE, + MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); + // printf("ret=%p\n", ret); + } while (ret != MAP_FAILED); + + if (errno != ENOMEM) { + printf("ERR: unexpected errno: %d (last map=%p)\n", + errno, last_map); + } + + return 0; + } + --------------------------------------------------------------- + + $ gcc -m32 -o test test.c + $ sudo sysctl -w vm.mmap_min_addr=65536 + vm.mmap_min_addr = 65536 + $ ./test (run as non-priviledge user) + ERR: unexpected errno: 1 (last map=0x10000) + +Signed-off-by: Akira Takeuchi +Signed-off-by: Kiyoshi Owada +Reviewed-by: Naoya Horiguchi +Signed-off-by: Andrew Morton +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + mm/mmap.c | 10 +++++----- + 1 file changed, 5 insertions(+), 5 deletions(-) + +--- a/mm/mmap.c ++++ b/mm/mmap.c +@@ -1856,7 +1856,7 @@ arch_get_unmapped_area(struct file *filp + struct vm_area_struct *vma; + struct vm_unmapped_area_info info; + +- if (len > TASK_SIZE) ++ if (len > TASK_SIZE - mmap_min_addr) + return -ENOMEM; + + if (flags & MAP_FIXED) +@@ -1865,7 +1865,7 @@ arch_get_unmapped_area(struct file *filp + if (addr) { + addr = PAGE_ALIGN(addr); + vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && ++ if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && + (!vma || addr + len <= vma->vm_start)) + return addr; + } +@@ -1895,7 +1895,7 @@ arch_get_unmapped_area_topdown(struct fi + struct vm_unmapped_area_info info; + + /* requested length too big for entire address space */ +- if (len > TASK_SIZE) ++ if (len > TASK_SIZE - mmap_min_addr) + return -ENOMEM; + + if (flags & MAP_FIXED) +@@ -1905,14 +1905,14 @@ arch_get_unmapped_area_topdown(struct fi + if (addr) { + addr = PAGE_ALIGN(addr); + vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && ++ if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && + (!vma || addr + len <= vma->vm_start)) + return addr; + } + + info.flags = VM_UNMAPPED_AREA_TOPDOWN; + info.length = len; +- info.low_limit = PAGE_SIZE; ++ info.low_limit = max(PAGE_SIZE, mmap_min_addr); + info.high_limit = mm->mmap_base; + info.align_mask = 0; + addr = vm_unmapped_area(&info); diff --git a/queue-3.12/mm-zswap-bugfix-memory-leak-when-invalidate-and-reclaim-occur-concurrently.patch b/queue-3.12/mm-zswap-bugfix-memory-leak-when-invalidate-and-reclaim-occur-concurrently.patch new file mode 100644 index 00000000000..f7793098455 --- /dev/null +++ b/queue-3.12/mm-zswap-bugfix-memory-leak-when-invalidate-and-reclaim-occur-concurrently.patch @@ -0,0 +1,102 @@ +From 67d13fe846c57a54d12578e7a4518f68c5c86ad7 Mon Sep 17 00:00:00 2001 +From: Weijie Yang +Date: Tue, 12 Nov 2013 15:08:26 -0800 +Subject: mm/zswap: bugfix: memory leak when invalidate and reclaim occur concurrently + +From: Weijie Yang + +commit 67d13fe846c57a54d12578e7a4518f68c5c86ad7 upstream. + +Consider the following scenario: + +thread 0: reclaim entry x (get refcount, but not call zswap_get_swap_cache_page) +thread 1: call zswap_frontswap_invalidate_page to invalidate entry x. + finished, entry x and its zbud is not freed as its refcount != 0 + now, the swap_map[x] = 0 +thread 0: now call zswap_get_swap_cache_page + swapcache_prepare return -ENOENT because entry x is not used any more + zswap_get_swap_cache_page return ZSWAP_SWAPCACHE_NOMEM + zswap_writeback_entry do nothing except put refcount + +Now, the memory of zswap_entry x and its zpage leak. + +Modify: + - check the refcount in fail path, free memory if it is not referenced. + + - use ZSWAP_SWAPCACHE_FAIL instead of ZSWAP_SWAPCACHE_NOMEM as the fail path + can be not only caused by nomem but also by invalidate. + +Signed-off-by: Weijie Yang +Reviewed-by: Bob Liu +Reviewed-by: Minchan Kim +Acked-by: Seth Jennings +Signed-off-by: Andrew Morton +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + mm/zswap.c | 22 ++++++++++++++-------- + 1 file changed, 14 insertions(+), 8 deletions(-) + +--- a/mm/zswap.c ++++ b/mm/zswap.c +@@ -387,7 +387,7 @@ static void zswap_free_entry(struct zswa + enum zswap_get_swap_ret { + ZSWAP_SWAPCACHE_NEW, + ZSWAP_SWAPCACHE_EXIST, +- ZSWAP_SWAPCACHE_NOMEM ++ ZSWAP_SWAPCACHE_FAIL, + }; + + /* +@@ -401,9 +401,10 @@ enum zswap_get_swap_ret { + * added to the swap cache, and returned in retpage. + * + * If success, the swap cache page is returned in retpage +- * Returns 0 if page was already in the swap cache, page is not locked +- * Returns 1 if the new page needs to be populated, page is locked +- * Returns <0 on error ++ * Returns ZSWAP_SWAPCACHE_EXIST if page was already in the swap cache ++ * Returns ZSWAP_SWAPCACHE_NEW if the new page needs to be populated, ++ * the new page is added to swapcache and locked ++ * Returns ZSWAP_SWAPCACHE_FAIL on error + */ + static int zswap_get_swap_cache_page(swp_entry_t entry, + struct page **retpage) +@@ -475,7 +476,7 @@ static int zswap_get_swap_cache_page(swp + if (new_page) + page_cache_release(new_page); + if (!found_page) +- return ZSWAP_SWAPCACHE_NOMEM; ++ return ZSWAP_SWAPCACHE_FAIL; + *retpage = found_page; + return ZSWAP_SWAPCACHE_EXIST; + } +@@ -529,11 +530,11 @@ static int zswap_writeback_entry(struct + + /* try to allocate swap cache page */ + switch (zswap_get_swap_cache_page(swpentry, &page)) { +- case ZSWAP_SWAPCACHE_NOMEM: /* no memory */ ++ case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */ + ret = -ENOMEM; + goto fail; + +- case ZSWAP_SWAPCACHE_EXIST: /* page is unlocked */ ++ case ZSWAP_SWAPCACHE_EXIST: + /* page is already in the swap cache, ignore for now */ + page_cache_release(page); + ret = -EEXIST; +@@ -591,7 +592,12 @@ static int zswap_writeback_entry(struct + + fail: + spin_lock(&tree->lock); +- zswap_entry_put(entry); ++ refcount = zswap_entry_put(entry); ++ if (refcount <= 0) { ++ /* invalidate happened, consider writeback as success */ ++ zswap_free_entry(tree, entry); ++ ret = 0; ++ } + spin_unlock(&tree->lock); + return ret; + } diff --git a/queue-3.12/mmc-atmel-mci-abort-transfer-on-timeout-error.patch b/queue-3.12/mmc-atmel-mci-abort-transfer-on-timeout-error.patch new file mode 100644 index 00000000000..51a86f021bf --- /dev/null +++ b/queue-3.12/mmc-atmel-mci-abort-transfer-on-timeout-error.patch @@ -0,0 +1,39 @@ +From c1fa3426aa5c782724c97394303d52228206eda4 Mon Sep 17 00:00:00 2001 +From: Ludovic Desroches +Date: Mon, 9 Sep 2013 17:29:56 +0200 +Subject: mmc: atmel-mci: abort transfer on timeout error + +From: Ludovic Desroches + +commit c1fa3426aa5c782724c97394303d52228206eda4 upstream. + +When a software timeout occurs, the transfer is not stopped. In DMA case, +it causes DMA channel to be stuck because the transfer is still active +causing following transfers to be queued but not computed. + +Signed-off-by: Ludovic Desroches +Reported-by: Alexander Morozov +Acked-by: Nicolas Ferre +Signed-off-by: Chris Ball +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/mmc/host/atmel-mci.c | 7 +++++++ + 1 file changed, 7 insertions(+) + +--- a/drivers/mmc/host/atmel-mci.c ++++ b/drivers/mmc/host/atmel-mci.c +@@ -589,6 +589,13 @@ static void atmci_timeout_timer(unsigned + if (host->mrq->cmd->data) { + host->mrq->cmd->data->error = -ETIMEDOUT; + host->data = NULL; ++ /* ++ * With some SDIO modules, sometimes DMA transfer hangs. If ++ * stop_transfer() is not called then the DMA request is not ++ * removed, following ones are queued and never computed. ++ */ ++ if (host->state == STATE_DATA_XFER) ++ host->stop_transfer(host); + } else { + host->mrq->cmd->error = -ETIMEDOUT; + host->cmd = NULL; diff --git a/queue-3.12/mmc-atmel-mci-fix-oops-in-atmci_tasklet_func.patch b/queue-3.12/mmc-atmel-mci-fix-oops-in-atmci_tasklet_func.patch new file mode 100644 index 00000000000..266056bdd7b --- /dev/null +++ b/queue-3.12/mmc-atmel-mci-fix-oops-in-atmci_tasklet_func.patch @@ -0,0 +1,45 @@ +From fbd986cd420d1deeabf1039ec4e74075a5639db5 Mon Sep 17 00:00:00 2001 +From: Rodolfo Giometti +Date: Mon, 9 Sep 2013 17:31:59 +0200 +Subject: mmc: atmel-mci: fix oops in atmci_tasklet_func + +From: Rodolfo Giometti + +commit fbd986cd420d1deeabf1039ec4e74075a5639db5 upstream. + +In some cases, a NULL pointer dereference happens because data is NULL when +STATE_END_REQUEST case is reached in atmci_tasklet_func. + +Signed-off-by: Rodolfo Giometti +Acked-by: Ludovic Desroches +Acked-by: Nicolas Ferre +Signed-off-by: Chris Ball +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/mmc/host/atmel-mci.c | 14 ++++++++------ + 1 file changed, 8 insertions(+), 6 deletions(-) + +--- a/drivers/mmc/host/atmel-mci.c ++++ b/drivers/mmc/host/atmel-mci.c +@@ -1810,12 +1810,14 @@ static void atmci_tasklet_func(unsigned + if (unlikely(status)) { + host->stop_transfer(host); + host->data = NULL; +- if (status & ATMCI_DTOE) { +- data->error = -ETIMEDOUT; +- } else if (status & ATMCI_DCRCE) { +- data->error = -EILSEQ; +- } else { +- data->error = -EIO; ++ if (data) { ++ if (status & ATMCI_DTOE) { ++ data->error = -ETIMEDOUT; ++ } else if (status & ATMCI_DCRCE) { ++ data->error = -EILSEQ; ++ } else { ++ data->error = -EIO; ++ } + } + } + diff --git a/queue-3.12/pm-hibernate-avoid-overflow-in-hibernate_preallocate_memory.patch b/queue-3.12/pm-hibernate-avoid-overflow-in-hibernate_preallocate_memory.patch new file mode 100644 index 00000000000..5e72511e0bb --- /dev/null +++ b/queue-3.12/pm-hibernate-avoid-overflow-in-hibernate_preallocate_memory.patch @@ -0,0 +1,44 @@ +From fd432b9f8c7c88428a4635b9f5a9c6e174df6e36 Mon Sep 17 00:00:00 2001 +From: Aaron Lu +Date: Wed, 6 Nov 2013 08:41:31 +0800 +Subject: PM / hibernate: Avoid overflow in hibernate_preallocate_memory() + +From: Aaron Lu + +commit fd432b9f8c7c88428a4635b9f5a9c6e174df6e36 upstream. + +When system has a lot of highmem (e.g. 16GiB using a 32 bits kernel), +the code to calculate how much memory we need to preallocate in +normal zone may cause overflow. As Leon has analysed: + + It looks that during computing 'alloc' variable there is overflow: + alloc = (3943404 - 1970542) - 1978280 = -5418 (signed) + And this function goes to err_out. + +Fix this by avoiding that overflow. + +References: https://bugzilla.kernel.org/show_bug.cgi?id=60817 +Reported-and-tested-by: Leon Drugi +Signed-off-by: Aaron Lu +Signed-off-by: Rafael J. Wysocki +Signed-off-by: Greg Kroah-Hartman + +--- + kernel/power/snapshot.c | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) + +--- a/kernel/power/snapshot.c ++++ b/kernel/power/snapshot.c +@@ -1402,7 +1402,11 @@ int hibernate_preallocate_memory(void) + * highmem and non-highmem zones separately. + */ + pages_highmem = preallocate_image_highmem(highmem / 2); +- alloc = (count - max_size) - pages_highmem; ++ alloc = count - max_size; ++ if (alloc > pages_highmem) ++ alloc -= pages_highmem; ++ else ++ alloc = 0; + pages = preallocate_image_memory(alloc, avail_normal); + if (pages < alloc) { + /* We have exhausted non-highmem pages, try highmem. */ diff --git a/queue-3.12/pm-hibernate-do-not-crash-kernel-in-free_basic_memory_bitmaps.patch b/queue-3.12/pm-hibernate-do-not-crash-kernel-in-free_basic_memory_bitmaps.patch new file mode 100644 index 00000000000..ef01324348a --- /dev/null +++ b/queue-3.12/pm-hibernate-do-not-crash-kernel-in-free_basic_memory_bitmaps.patch @@ -0,0 +1,61 @@ +From 6a0c7cd33075f6b7f1d80145bb19812beb3fc5c9 Mon Sep 17 00:00:00 2001 +From: "Rafael J. Wysocki" +Date: Thu, 14 Nov 2013 23:26:58 +0100 +Subject: PM / Hibernate: Do not crash kernel in free_basic_memory_bitmaps() + +From: "Rafael J. Wysocki" + +commit 6a0c7cd33075f6b7f1d80145bb19812beb3fc5c9 upstream. + +I have received a report about the BUG_ON() in free_basic_memory_bitmaps() +triggering mysteriously during an aborted s2disk hibernation attempt. +The only way I can explain that is that /dev/snapshot was first +opened for writing (resume mode), then closed and then opened again +for reading and closed again without freezing tasks. In that case +the first invocation of snapshot_open() would set the free_bitmaps +flag in snapshot_state, which is a static variable. That flag +wouldn't be cleared later and the second invocation of snapshot_open() +would just leave it like that, so the subsequent snapshot_release() +would see data->frozen set and free_basic_memory_bitmaps() would be +called unnecessarily. + +To prevent that from happening clear data->free_bitmaps in +snapshot_open() when the file is being opened for reading (hibernate +mode). + +In addition to that, replace the BUG_ON() in free_basic_memory_bitmaps() +with a WARN_ON() as the kernel can continue just fine if the condition +checked by that macro occurs. + +Fixes: aab172891542 (PM / hibernate: Fix user space driven resume regression) +Reported-by: Oliver Lorenz +Signed-off-by: Rafael J. Wysocki +Signed-off-by: Greg Kroah-Hartman + +--- + kernel/power/snapshot.c | 3 ++- + kernel/power/user.c | 1 + + 2 files changed, 3 insertions(+), 1 deletion(-) + +--- a/kernel/power/snapshot.c ++++ b/kernel/power/snapshot.c +@@ -792,7 +792,8 @@ void free_basic_memory_bitmaps(void) + { + struct memory_bitmap *bm1, *bm2; + +- BUG_ON(!(forbidden_pages_map && free_pages_map)); ++ if (WARN_ON(!(forbidden_pages_map && free_pages_map))) ++ return; + + bm1 = forbidden_pages_map; + bm2 = free_pages_map; +--- a/kernel/power/user.c ++++ b/kernel/power/user.c +@@ -70,6 +70,7 @@ static int snapshot_open(struct inode *i + data->swap = swsusp_resume_device ? + swap_type_of(swsusp_resume_device, 0, NULL) : -1; + data->mode = O_RDONLY; ++ data->free_bitmaps = false; + error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE); + if (error) + pm_notifier_call_chain(PM_POST_HIBERNATION); diff --git a/queue-3.12/pm-runtime-use-pm_runtime_put_sync-in-__device_release_driver.patch b/queue-3.12/pm-runtime-use-pm_runtime_put_sync-in-__device_release_driver.patch new file mode 100644 index 00000000000..985422500cd --- /dev/null +++ b/queue-3.12/pm-runtime-use-pm_runtime_put_sync-in-__device_release_driver.patch @@ -0,0 +1,43 @@ +From baab52ded242c35a2290e1fa82e0cc147d0d8c1a Mon Sep 17 00:00:00 2001 +From: "Rafael J. Wysocki" +Date: Thu, 7 Nov 2013 01:51:15 +0100 +Subject: PM / runtime: Use pm_runtime_put_sync() in __device_release_driver() + +From: "Rafael J. Wysocki" + +commit baab52ded242c35a2290e1fa82e0cc147d0d8c1a upstream. + +Commit fa180eb448fa (PM / Runtime: Idle devices asynchronously after +probe|release) modified __device_release_driver() to call +pm_runtime_put(dev) instead of pm_runtime_put_sync(dev) before +detaching the driver from the device. However, that was a mistake, +because pm_runtime_put(dev) causes rpm_idle() to be queued up and +the driver may be gone already when that function is executed. +That breaks the assumptions the drivers have the right to make +about the core's behavior on the basis of the existing documentation +and actually causes problems to happen, so revert that part of +commit fa180eb448fa and restore the previous behavior of +__device_release_driver(). + +Reported-by: Tomi Valkeinen +Fixes: fa180eb448fa (PM / Runtime: Idle devices asynchronously after probe|release) +Signed-off-by: Rafael J. Wysocki +Acked-by: Kevin Hilman +Acked-by: Ulf Hansson +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/base/dd.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/base/dd.c ++++ b/drivers/base/dd.c +@@ -499,7 +499,7 @@ static void __device_release_driver(stru + BUS_NOTIFY_UNBIND_DRIVER, + dev); + +- pm_runtime_put(dev); ++ pm_runtime_put_sync(dev); + + if (dev->bus && dev->bus->remove) + dev->bus->remove(dev); diff --git a/queue-3.12/qeth-avoid-buffer-overflow-in-snmp-ioctl.patch b/queue-3.12/qeth-avoid-buffer-overflow-in-snmp-ioctl.patch new file mode 100644 index 00000000000..0e36985b1d5 --- /dev/null +++ b/queue-3.12/qeth-avoid-buffer-overflow-in-snmp-ioctl.patch @@ -0,0 +1,46 @@ +From 6fb392b1a63ae36c31f62bc3fc8630b49d602b62 Mon Sep 17 00:00:00 2001 +From: Ursula Braun +Date: Wed, 6 Nov 2013 09:04:52 +0100 +Subject: qeth: avoid buffer overflow in snmp ioctl + +From: Ursula Braun + +commit 6fb392b1a63ae36c31f62bc3fc8630b49d602b62 upstream. + +Check user-defined length in snmp ioctl request and allow request +only if it fits into a qeth command buffer. + +Signed-off-by: Ursula Braun +Signed-off-by: Frank Blaschka +Reviewed-by: Heiko Carstens +Reported-by: Nico Golde +Reported-by: Fabian Yamaguchi +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/s390/net/qeth_core_main.c | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) + +--- a/drivers/s390/net/qeth_core_main.c ++++ b/drivers/s390/net/qeth_core_main.c +@@ -4451,7 +4451,7 @@ int qeth_snmp_command(struct qeth_card * + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + struct qeth_snmp_ureq *ureq; +- int req_len; ++ unsigned int req_len; + struct qeth_arp_query_info qinfo = {0, }; + int rc = 0; + +@@ -4467,6 +4467,10 @@ int qeth_snmp_command(struct qeth_card * + /* skip 4 bytes (data_len struct member) to get req_len */ + if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int))) + return -EFAULT; ++ if (req_len > (QETH_BUFSIZE - IPA_PDU_HEADER_SIZE - ++ sizeof(struct qeth_ipacmd_hdr) - ++ sizeof(struct qeth_ipacmd_setadpparms_hdr))) ++ return -EINVAL; + ureq = memdup_user(udata, req_len + sizeof(struct qeth_snmp_ureq_hdr)); + if (IS_ERR(ureq)) { + QETH_CARD_TEXT(card, 2, "snmpnome"); diff --git a/queue-3.12/rt2400pci-fix-rssi-read.patch b/queue-3.12/rt2400pci-fix-rssi-read.patch new file mode 100644 index 00000000000..a3914d7cf3b --- /dev/null +++ b/queue-3.12/rt2400pci-fix-rssi-read.patch @@ -0,0 +1,30 @@ +From 2bf127a5cc372b9319afcbae10b090663b621c8b Mon Sep 17 00:00:00 2001 +From: Stanislaw Gruszka +Date: Tue, 15 Oct 2013 14:28:48 +0200 +Subject: rt2400pci: fix RSSI read + +From: Stanislaw Gruszka + +commit 2bf127a5cc372b9319afcbae10b090663b621c8b upstream. + +RSSI value is provided on word3 not on word2. + +Signed-off-by: Stanislaw Gruszka +Signed-off-by: John W. Linville +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/net/wireless/rt2x00/rt2400pci.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/net/wireless/rt2x00/rt2400pci.c ++++ b/drivers/net/wireless/rt2x00/rt2400pci.c +@@ -1261,7 +1261,7 @@ static void rt2400pci_fill_rxdone(struct + */ + rxdesc->timestamp = ((u64)rx_high << 32) | rx_low; + rxdesc->signal = rt2x00_get_field32(word2, RXD_W2_SIGNAL) & ~0x08; +- rxdesc->rssi = rt2x00_get_field32(word2, RXD_W3_RSSI) - ++ rxdesc->rssi = rt2x00_get_field32(word3, RXD_W3_RSSI) - + entry->queue->rt2x00dev->rssi_offset; + rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); + diff --git a/queue-3.12/series b/queue-3.12/series index 047b46855a5..d78b230826d 100644 --- a/queue-3.12/series +++ b/queue-3.12/series @@ -76,3 +76,23 @@ xen-blkback-fix-reference-counting.patch rtlwifi-rtl8192de-fix-incorrect-signal-strength-for-unassociated-ap.patch rtlwifi-rtl8192se-fix-incorrect-signal-strength-for-unassociated-ap.patch rtlwifi-rtl8192cu-fix-incorrect-signal-strength-for-unassociated-ap.patch +ath5k-fix-regression-in-tx-status-processing.patch +qeth-avoid-buffer-overflow-in-snmp-ioctl.patch +rt2400pci-fix-rssi-read.patch +mm-ensure-get_unmapped_area-returns-higher-address-than-mmap_min_addr.patch +mm-zswap-bugfix-memory-leak-when-invalidate-and-reclaim-occur-concurrently.patch +mmc-atmel-mci-abort-transfer-on-timeout-error.patch +mmc-atmel-mci-fix-oops-in-atmci_tasklet_func.patch +dm-mpath-fix-race-condition-between-multipath_dtr-and-pg_init_done.patch +dm-array-fix-bug-in-growing-array.patch +dm-cache-fix-a-race-condition-between-queuing-new-migrations-and-quiescing-for-a-shutdown.patch +dm-allocate-buffer-for-messages-with-small-number-of-arguments-using-gfp_noio.patch +ioatdma-fix-bug-in-selftest-after-removal-of-dma_memset.patch +ioatdma-fix-sed-pool-selection.patch +ioatdma-fix-selection-of-16-vs-8-source-path.patch +iser-target-avoid-using-frmr-for-single-dma-entry-requests.patch +target-fix-delayed-task-aborted-status-tas-handling-bug.patch +blk-core-fix-memory-corruption-if-blkcg_init_queue-fails.patch +pm-hibernate-avoid-overflow-in-hibernate_preallocate_memory.patch +pm-runtime-use-pm_runtime_put_sync-in-__device_release_driver.patch +pm-hibernate-do-not-crash-kernel-in-free_basic_memory_bitmaps.patch diff --git a/queue-3.12/target-fix-delayed-task-aborted-status-tas-handling-bug.patch b/queue-3.12/target-fix-delayed-task-aborted-status-tas-handling-bug.patch new file mode 100644 index 00000000000..19814d7e1a0 --- /dev/null +++ b/queue-3.12/target-fix-delayed-task-aborted-status-tas-handling-bug.patch @@ -0,0 +1,45 @@ +From 29f4c090079f442ea2723d292e4e64f0b6ac1f27 Mon Sep 17 00:00:00 2001 +From: Nicholas Bellinger +Date: Wed, 13 Nov 2013 14:39:14 -0800 +Subject: target: Fix delayed Task Aborted Status (TAS) handling bug + +From: Nicholas Bellinger + +commit 29f4c090079f442ea2723d292e4e64f0b6ac1f27 upstream. + +This patch fixes a bug in delayed Task Aborted Status (TAS) handling, +where transport_send_task_abort() was not returning for the case +when the se_tfo->write_pending() callback indicated that last fabric +specific WRITE PDU had not yet been received. + +It also adds an explicit cmd->scsi_status = SAM_STAT_TASK_ABORTED +assignment within transport_check_aborted_status() to avoid the case +where se_tfo->queue_status() is called when the SAM_STAT_TASK_ABORTED +assignment + ->queue_status() in transport_send_task_abort() does not +occur once SCF_SENT_DELAYED_TAS has been set. + +Signed-off-by: Nicholas Bellinger +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/target/target_core_transport.c | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/drivers/target/target_core_transport.c ++++ b/drivers/target/target_core_transport.c +@@ -2910,6 +2910,7 @@ int transport_check_aborted_status(struc + cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd)); + + cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; ++ cmd->scsi_status = SAM_STAT_TASK_ABORTED; + trace_target_cmd_complete(cmd); + cmd->se_tfo->queue_status(cmd); + +@@ -2938,6 +2939,7 @@ void transport_send_task_abort(struct se + if (cmd->se_tfo->write_pending_status(cmd) != 0) { + cmd->transport_state |= CMD_T_ABORTED; + smp_mb__after_atomic_inc(); ++ return; + } + } + cmd->scsi_status = SAM_STAT_TASK_ABORTED;