From: Greg Kroah-Hartman Date: Sun, 18 Oct 2015 00:51:41 +0000 (-0700) Subject: 3.10-stable patches X-Git-Tag: v3.10.91~9 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=86cc7e79b39b6b627dfc9de80b4269c33eb4caf2;p=thirdparty%2Fkernel%2Fstable-queue.git 3.10-stable patches added patches: arm64-readahead-fault-retry-breaks-mmap-file-read-random-detection.patch bonding-correct-the-mac-address-for-follow-fail_over_mac-policy.patch dm-cache-fix-null-pointer-when-switching-from-cleaner-policy.patch fib_rules-fix-dump_rules-not-to-exit-early.patch genirq-fix-race-in-register_irq_proc.patch m68k-define-asmlinkage_protect.patch staging-speakup-fix-speakup-r-regression.patch x86-add-1-2-4-8-byte-optimization-to-64bit-__copy_-from-to-_user_inatomic.patch --- diff --git a/queue-3.10/arm64-readahead-fault-retry-breaks-mmap-file-read-random-detection.patch b/queue-3.10/arm64-readahead-fault-retry-breaks-mmap-file-read-random-detection.patch new file mode 100644 index 00000000000..9b9c8cb2758 --- /dev/null +++ b/queue-3.10/arm64-readahead-fault-retry-breaks-mmap-file-read-random-detection.patch @@ -0,0 +1,48 @@ +From 569ba74a7ba69f46ce2950bf085b37fea2408385 Mon Sep 17 00:00:00 2001 +From: Mark Salyzyn +Date: Mon, 21 Sep 2015 21:39:50 +0100 +Subject: arm64: readahead: fault retry breaks mmap file read random detection + +From: Mark Salyzyn + +commit 569ba74a7ba69f46ce2950bf085b37fea2408385 upstream. + +This is the arm64 portion of commit 45cac65b0fcd ("readahead: fault +retry breaks mmap file read random detection"), which was absent from +the initial port and has since gone unnoticed. The original commit says: + +> .fault now can retry. The retry can break state machine of .fault. In +> filemap_fault, if page is miss, ra->mmap_miss is increased. In the second +> try, since the page is in page cache now, ra->mmap_miss is decreased. And +> these are done in one fault, so we can't detect random mmap file access. +> +> Add a new flag to indicate .fault is tried once. In the second try, skip +> ra->mmap_miss decreasing. The filemap_fault state machine is ok with it. + +With this change, Mark reports that: + +> Random read improves by 250%, sequential read improves by 40%, and +> random write by 400% to an eMMC device with dm crypto wrapped around it. + +Cc: Shaohua Li +Cc: Rik van Riel +Cc: Wu Fengguang +Signed-off-by: Mark Salyzyn +Signed-off-by: Riley Andrews +Signed-off-by: Will Deacon +Signed-off-by: Greg Kroah-Hartman + +--- + arch/arm64/mm/fault.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/arch/arm64/mm/fault.c ++++ b/arch/arm64/mm/fault.c +@@ -278,6 +278,7 @@ retry: + * starvation. + */ + mm_flags &= ~FAULT_FLAG_ALLOW_RETRY; ++ mm_flags |= FAULT_FLAG_TRIED; + goto retry; + } + } diff --git a/queue-3.10/bonding-correct-the-mac-address-for-follow-fail_over_mac-policy.patch b/queue-3.10/bonding-correct-the-mac-address-for-follow-fail_over_mac-policy.patch new file mode 100644 index 00000000000..06f27242e5b --- /dev/null +++ b/queue-3.10/bonding-correct-the-mac-address-for-follow-fail_over_mac-policy.patch @@ -0,0 +1,82 @@ +From: dingtianhong +Date: Thu, 16 Jul 2015 16:30:02 +0800 +Subject: bonding: correct the MAC address for "follow" fail_over_mac policy + +From: dingtianhong + +[ Upstream commit a951bc1e6ba58f11df5ed5ddc41311e10f5fd20b ] + +The "follow" fail_over_mac policy is useful for multiport devices that +either become confused or incur a performance penalty when multiple +ports are programmed with the same MAC address, but the same MAC +address still may happened by this steps for this policy: + +1) echo +eth0 > /sys/class/net/bond0/bonding/slaves + bond0 has the same mac address with eth0, it is MAC1. + +2) echo +eth1 > /sys/class/net/bond0/bonding/slaves + eth1 is backup, eth1 has MAC2. + +3) ifconfig eth0 down + eth1 became active slave, bond will swap MAC for eth0 and eth1, + so eth1 has MAC1, and eth0 has MAC2. + +4) ifconfig eth1 down + there is no active slave, and eth1 still has MAC1, eth2 has MAC2. + +5) ifconfig eth0 up + the eth0 became active slave again, the bond set eth0 to MAC1. + +Something wrong here, then if you set eth1 up, the eth0 and eth1 will have the same +MAC address, it will break this policy for ACTIVE_BACKUP mode. + +This patch will fix this problem by finding the old active slave and +swap them MAC address before change active slave. + +Signed-off-by: Ding Tianhong +Tested-by: Nikolay Aleksandrov +Signed-off-by: David S. Miller +[bwh: Backported to 3.10: bond_for_each_slave() takes an extra int paramter] +Signed-off-by: Ben Hutchings +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/net/bonding/bond_main.c | 20 ++++++++++++++++++++ + 1 file changed, 20 insertions(+) + +--- a/drivers/net/bonding/bond_main.c ++++ b/drivers/net/bonding/bond_main.c +@@ -876,6 +876,23 @@ static void bond_mc_swap(struct bonding + } + } + ++static struct slave *bond_get_old_active(struct bonding *bond, ++ struct slave *new_active) ++{ ++ struct slave *slave; ++ int i; ++ ++ bond_for_each_slave(bond, slave, i) { ++ if (slave == new_active) ++ continue; ++ ++ if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr)) ++ return slave; ++ } ++ ++ return NULL; ++} ++ + /* + * bond_do_fail_over_mac + * +@@ -919,6 +936,9 @@ static void bond_do_fail_over_mac(struct + write_unlock_bh(&bond->curr_slave_lock); + read_unlock(&bond->lock); + ++ if (!old_active) ++ old_active = bond_get_old_active(bond, new_active); ++ + if (old_active) { + memcpy(tmp_mac, new_active->dev->dev_addr, ETH_ALEN); + memcpy(saddr.sa_data, old_active->dev->dev_addr, diff --git a/queue-3.10/dm-cache-fix-null-pointer-when-switching-from-cleaner-policy.patch b/queue-3.10/dm-cache-fix-null-pointer-when-switching-from-cleaner-policy.patch new file mode 100644 index 00000000000..3eded01b418 --- /dev/null +++ b/queue-3.10/dm-cache-fix-null-pointer-when-switching-from-cleaner-policy.patch @@ -0,0 +1,37 @@ +From 2bffa1503c5c06192eb1459180fac4416575a966 Mon Sep 17 00:00:00 2001 +From: Joe Thornber +Date: Fri, 9 Oct 2015 14:03:38 +0100 +Subject: dm cache: fix NULL pointer when switching from cleaner policy + +From: Joe Thornber + +commit 2bffa1503c5c06192eb1459180fac4416575a966 upstream. + +The cleaner policy doesn't make use of the per cache block hint space in +the metadata (unlike the other policies). When switching from the +cleaner policy to mq or smq a NULL pointer crash (in dm_tm_new_block) +was observed. The crash was caused by bugs in dm-cache-metadata.c +when trying to skip creation of the hint btree. + +The minimal fix is to change hint size for the cleaner policy to 4 bytes +(only hint size supported). + +Signed-off-by: Joe Thornber +Signed-off-by: Mike Snitzer +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/md/dm-cache-policy-cleaner.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/md/dm-cache-policy-cleaner.c ++++ b/drivers/md/dm-cache-policy-cleaner.c +@@ -434,7 +434,7 @@ static struct dm_cache_policy *wb_create + static struct dm_cache_policy_type wb_policy_type = { + .name = "cleaner", + .version = {1, 0, 0}, +- .hint_size = 0, ++ .hint_size = 4, + .owner = THIS_MODULE, + .create = wb_create + }; diff --git a/queue-3.10/fib_rules-fix-dump_rules-not-to-exit-early.patch b/queue-3.10/fib_rules-fix-dump_rules-not-to-exit-early.patch new file mode 100644 index 00000000000..30c310717ef --- /dev/null +++ b/queue-3.10/fib_rules-fix-dump_rules-not-to-exit-early.patch @@ -0,0 +1,38 @@ +From roland@kernel.org Sat Oct 17 17:11:23 2015 +From: Roland Dreier +Date: Mon, 5 Oct 2015 10:29:28 -0700 +Subject: fib_rules: Fix dump_rules() not to exit early +To: netdev@vger.kernel.org, stable@vger.kernel.org +Cc: Roland Dreier +Message-ID: <1444066168-5566-1-git-send-email-roland@kernel.org> + + +From: Roland Dreier + +Backports of 41fc014332d9 ("fib_rules: fix fib rule dumps across +multiple skbs") introduced a regression in "ip rule show" - it ends up +dumping the first rule over and over and never exiting, because 3.19 +and earlier are missing commit 053c095a82cf ("netlink: make +nlmsg_end() and genlmsg_end() void"), so fib_nl_fill_rule() ends up +returning skb->len (i.e. > 0) in the success case. + +Fix this by checking the return code for < 0 instead of != 0. + +Signed-off-by: Roland Dreier +Signed-off-by: Greg Kroah-Hartman + +--- + net/core/fib_rules.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/net/core/fib_rules.c ++++ b/net/core/fib_rules.c +@@ -606,7 +606,7 @@ static int dump_rules(struct sk_buff *sk + err = fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, RTM_NEWRULE, + NLM_F_MULTI, ops); +- if (err) ++ if (err < 0) + break; + skip: + idx++; diff --git a/queue-3.10/genirq-fix-race-in-register_irq_proc.patch b/queue-3.10/genirq-fix-race-in-register_irq_proc.patch new file mode 100644 index 00000000000..9e59374a787 --- /dev/null +++ b/queue-3.10/genirq-fix-race-in-register_irq_proc.patch @@ -0,0 +1,76 @@ +From 95c2b17534654829db428f11bcf4297c059a2a7e Mon Sep 17 00:00:00 2001 +From: Ben Hutchings +Date: Sat, 26 Sep 2015 12:23:56 +0100 +Subject: genirq: Fix race in register_irq_proc() + +From: Ben Hutchings + +commit 95c2b17534654829db428f11bcf4297c059a2a7e upstream. + +Per-IRQ directories in procfs are created only when a handler is first +added to the irqdesc, not when the irqdesc is created. In the case of +a shared IRQ, multiple tasks can race to create a directory. This +race condition seems to have been present forever, but is easier to +hit with async probing. + +Signed-off-by: Ben Hutchings +Link: http://lkml.kernel.org/r/1443266636.2004.2.camel@decadent.org.uk +Signed-off-by: Thomas Gleixner +Signed-off-by: Greg Kroah-Hartman + +--- + kernel/irq/proc.c | 19 +++++++++++++++++-- + 1 file changed, 17 insertions(+), 2 deletions(-) + +--- a/kernel/irq/proc.c ++++ b/kernel/irq/proc.c +@@ -12,6 +12,7 @@ + #include + #include + #include ++#include + + #include "internals.h" + +@@ -309,18 +310,29 @@ void register_handler_proc(unsigned int + + void register_irq_proc(unsigned int irq, struct irq_desc *desc) + { ++ static DEFINE_MUTEX(register_lock); + char name [MAX_NAMELEN]; + +- if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir) ++ if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip)) + return; + ++ /* ++ * irq directories are registered only when a handler is ++ * added, not when the descriptor is created, so multiple ++ * tasks might try to register at the same time. ++ */ ++ mutex_lock(®ister_lock); ++ ++ if (desc->dir) ++ goto out_unlock; ++ + memset(name, 0, MAX_NAMELEN); + sprintf(name, "%d", irq); + + /* create /proc/irq/1234 */ + desc->dir = proc_mkdir(name, root_irq_dir); + if (!desc->dir) +- return; ++ goto out_unlock; + + #ifdef CONFIG_SMP + /* create /proc/irq//smp_affinity */ +@@ -341,6 +353,9 @@ void register_irq_proc(unsigned int irq, + + proc_create_data("spurious", 0444, desc->dir, + &irq_spurious_proc_fops, (void *)(long)irq); ++ ++out_unlock: ++ mutex_unlock(®ister_lock); + } + + void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) diff --git a/queue-3.10/m68k-define-asmlinkage_protect.patch b/queue-3.10/m68k-define-asmlinkage_protect.patch new file mode 100644 index 00000000000..e284c30de41 --- /dev/null +++ b/queue-3.10/m68k-define-asmlinkage_protect.patch @@ -0,0 +1,70 @@ +From 8474ba74193d302e8340dddd1e16c85cc4b98caf Mon Sep 17 00:00:00 2001 +From: Andreas Schwab +Date: Wed, 23 Sep 2015 23:12:09 +0200 +Subject: m68k: Define asmlinkage_protect + +From: Andreas Schwab + +commit 8474ba74193d302e8340dddd1e16c85cc4b98caf upstream. + +Make sure the compiler does not modify arguments of syscall functions. +This can happen if the compiler generates a tailcall to another +function. For example, without asmlinkage_protect sys_openat is compiled +into this function: + +sys_openat: + clr.l %d0 + move.w 18(%sp),%d0 + move.l %d0,16(%sp) + jbra do_sys_open + +Note how the fourth argument is modified in place, modifying the register +%d4 that gets restored from this stack slot when the function returns to +user-space. The caller may expect the register to be unmodified across +system calls. + +Signed-off-by: Andreas Schwab +Signed-off-by: Geert Uytterhoeven +Signed-off-by: Greg Kroah-Hartman + +--- + arch/m68k/include/asm/linkage.h | 30 ++++++++++++++++++++++++++++++ + 1 file changed, 30 insertions(+) + +--- a/arch/m68k/include/asm/linkage.h ++++ b/arch/m68k/include/asm/linkage.h +@@ -4,4 +4,34 @@ + #define __ALIGN .align 4 + #define __ALIGN_STR ".align 4" + ++/* ++ * Make sure the compiler doesn't do anything stupid with the ++ * arguments on the stack - they are owned by the *caller*, not ++ * the callee. This just fools gcc into not spilling into them, ++ * and keeps it from doing tailcall recursion and/or using the ++ * stack slots for temporaries, since they are live and "used" ++ * all the way to the end of the function. ++ */ ++#define asmlinkage_protect(n, ret, args...) \ ++ __asmlinkage_protect##n(ret, ##args) ++#define __asmlinkage_protect_n(ret, args...) \ ++ __asm__ __volatile__ ("" : "=r" (ret) : "0" (ret), ##args) ++#define __asmlinkage_protect0(ret) \ ++ __asmlinkage_protect_n(ret) ++#define __asmlinkage_protect1(ret, arg1) \ ++ __asmlinkage_protect_n(ret, "m" (arg1)) ++#define __asmlinkage_protect2(ret, arg1, arg2) \ ++ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2)) ++#define __asmlinkage_protect3(ret, arg1, arg2, arg3) \ ++ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3)) ++#define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \ ++ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \ ++ "m" (arg4)) ++#define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \ ++ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \ ++ "m" (arg4), "m" (arg5)) ++#define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \ ++ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \ ++ "m" (arg4), "m" (arg5), "m" (arg6)) ++ + #endif diff --git a/queue-3.10/series b/queue-3.10/series index 2af2995cf37..312b899dd50 100644 --- a/queue-3.10/series +++ b/queue-3.10/series @@ -43,3 +43,11 @@ ubi-return-enospc-if-no-enough-space-available.patch ib-qib-change-lkey-table-allocation-to-support-more-mrs.patch dcache-handle-escaped-paths-in-prepend_path.patch vfs-test-for-and-handle-paths-that-are-unreachable-from-their-mnt_root.patch +arm64-readahead-fault-retry-breaks-mmap-file-read-random-detection.patch +m68k-define-asmlinkage_protect.patch +bonding-correct-the-mac-address-for-follow-fail_over_mac-policy.patch +fib_rules-fix-dump_rules-not-to-exit-early.patch +genirq-fix-race-in-register_irq_proc.patch +x86-add-1-2-4-8-byte-optimization-to-64bit-__copy_-from-to-_user_inatomic.patch +dm-cache-fix-null-pointer-when-switching-from-cleaner-policy.patch +staging-speakup-fix-speakup-r-regression.patch diff --git a/queue-3.10/staging-speakup-fix-speakup-r-regression.patch b/queue-3.10/staging-speakup-fix-speakup-r-regression.patch new file mode 100644 index 00000000000..52590cb859b --- /dev/null +++ b/queue-3.10/staging-speakup-fix-speakup-r-regression.patch @@ -0,0 +1,37 @@ +From b1d562acc78f0af46de0dfe447410bc40bdb7ece Mon Sep 17 00:00:00 2001 +From: "covici@ccs.covici.com" +Date: Wed, 20 May 2015 05:44:11 -0400 +Subject: staging: speakup: fix speakup-r regression + +From: "covici@ccs.covici.com" + +commit b1d562acc78f0af46de0dfe447410bc40bdb7ece upstream. + +Here is a patch to make speakup-r work again. + +It broke in 3.6 due to commit 4369c64c79a22b98d3b7eff9d089196cd878a10a +"Input: Send events one packet at a time) + +The problem was that the fakekey.c routine to fake a down arrow no +longer functioned properly and putting the input_sync fixed it. + +Fixes: 4369c64c79a22b98d3b7eff9d089196cd878a10a +Acked-by: Samuel Thibault +Signed-off-by: John Covici +Signed-off-by: Greg Kroah-Hartman +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/staging/speakup/fakekey.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/staging/speakup/fakekey.c ++++ b/drivers/staging/speakup/fakekey.c +@@ -81,6 +81,7 @@ void speakup_fake_down_arrow(void) + __this_cpu_write(reporting_keystroke, true); + input_report_key(virt_keyboard, KEY_DOWN, PRESSED); + input_report_key(virt_keyboard, KEY_DOWN, RELEASED); ++ input_sync(virt_keyboard); + __this_cpu_write(reporting_keystroke, false); + + /* reenable preemption */ diff --git a/queue-3.10/x86-add-1-2-4-8-byte-optimization-to-64bit-__copy_-from-to-_user_inatomic.patch b/queue-3.10/x86-add-1-2-4-8-byte-optimization-to-64bit-__copy_-from-to-_user_inatomic.patch new file mode 100644 index 00000000000..7495842a7d5 --- /dev/null +++ b/queue-3.10/x86-add-1-2-4-8-byte-optimization-to-64bit-__copy_-from-to-_user_inatomic.patch @@ -0,0 +1,99 @@ +From ff47ab4ff3cddfa7bc1b25b990e24abe2ae474ff Mon Sep 17 00:00:00 2001 +From: Andi Kleen +Date: Fri, 16 Aug 2013 14:17:19 -0700 +Subject: x86: Add 1/2/4/8 byte optimization to 64bit __copy_{from,to}_user_inatomic + +From: Andi Kleen + +commit ff47ab4ff3cddfa7bc1b25b990e24abe2ae474ff upstream. + +The 64bit __copy_{from,to}_user_inatomic always called +copy_from_user_generic, but skipped the special optimizations for 1/2/4/8 +byte accesses. + +This especially hurts the futex call, which accesses the 4 byte futex +user value with a complicated fast string operation in a function call, +instead of a single movl. + +Use __copy_{from,to}_user for _inatomic instead to get the same +optimizations. The only problem was the might_fault() in those functions. +So move that to new wrapper and call __copy_{f,t}_user_nocheck() +from *_inatomic directly. + +32bit already did this correctly by duplicating the code. + +Signed-off-by: Andi Kleen +Link: http://lkml.kernel.org/r/1376687844-19857-2-git-send-email-andi@firstfloor.org +Signed-off-by: H. Peter Anvin +Cc: Thomas Gleixner +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/include/asm/uaccess_64.h | 24 ++++++++++++++++++------ + 1 file changed, 18 insertions(+), 6 deletions(-) + +--- a/arch/x86/include/asm/uaccess_64.h ++++ b/arch/x86/include/asm/uaccess_64.h +@@ -77,11 +77,10 @@ int copy_to_user(void __user *dst, const + } + + static __always_inline __must_check +-int __copy_from_user(void *dst, const void __user *src, unsigned size) ++int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size) + { + int ret = 0; + +- might_fault(); + if (!__builtin_constant_p(size)) + return copy_user_generic(dst, (__force void *)src, size); + switch (size) { +@@ -121,11 +120,17 @@ int __copy_from_user(void *dst, const vo + } + + static __always_inline __must_check +-int __copy_to_user(void __user *dst, const void *src, unsigned size) ++int __copy_from_user(void *dst, const void __user *src, unsigned size) ++{ ++ might_fault(); ++ return __copy_from_user_nocheck(dst, src, size); ++} ++ ++static __always_inline __must_check ++int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size) + { + int ret = 0; + +- might_fault(); + if (!__builtin_constant_p(size)) + return copy_user_generic((__force void *)dst, src, size); + switch (size) { +@@ -165,6 +170,13 @@ int __copy_to_user(void __user *dst, con + } + + static __always_inline __must_check ++int __copy_to_user(void __user *dst, const void *src, unsigned size) ++{ ++ might_fault(); ++ return __copy_to_user_nocheck(dst, src, size); ++} ++ ++static __always_inline __must_check + int __copy_in_user(void __user *dst, const void __user *src, unsigned size) + { + int ret = 0; +@@ -220,13 +232,13 @@ int __copy_in_user(void __user *dst, con + static __must_check __always_inline int + __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size) + { +- return copy_user_generic(dst, (__force const void *)src, size); ++ return __copy_from_user_nocheck(dst, (__force const void *)src, size); + } + + static __must_check __always_inline int + __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) + { +- return copy_user_generic((__force void *)dst, src, size); ++ return __copy_to_user_nocheck((__force void *)dst, src, size); + } + + extern long __copy_user_nocache(void *dst, const void __user *src,