--- /dev/null
+From 569ba74a7ba69f46ce2950bf085b37fea2408385 Mon Sep 17 00:00:00 2001
+From: Mark Salyzyn <salyzyn@android.com>
+Date: Mon, 21 Sep 2015 21:39:50 +0100
+Subject: arm64: readahead: fault retry breaks mmap file read random detection
+
+From: Mark Salyzyn <salyzyn@android.com>
+
+commit 569ba74a7ba69f46ce2950bf085b37fea2408385 upstream.
+
+This is the arm64 portion of commit 45cac65b0fcd ("readahead: fault
+retry breaks mmap file read random detection"), which was absent from
+the initial port and has since gone unnoticed. The original commit says:
+
+> .fault now can retry. The retry can break state machine of .fault. In
+> filemap_fault, if page is miss, ra->mmap_miss is increased. In the second
+> try, since the page is in page cache now, ra->mmap_miss is decreased. And
+> these are done in one fault, so we can't detect random mmap file access.
+>
+> Add a new flag to indicate .fault is tried once. In the second try, skip
+> ra->mmap_miss decreasing. The filemap_fault state machine is ok with it.
+
+With this change, Mark reports that:
+
+> Random read improves by 250%, sequential read improves by 40%, and
+> random write by 400% to an eMMC device with dm crypto wrapped around it.
+
+Cc: Shaohua Li <shli@kernel.org>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Wu Fengguang <fengguang.wu@intel.com>
+Signed-off-by: Mark Salyzyn <salyzyn@android.com>
+Signed-off-by: Riley Andrews <riandrews@android.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/mm/fault.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -278,6 +278,7 @@ retry:
+ * starvation.
+ */
+ mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
++ mm_flags |= FAULT_FLAG_TRIED;
+ goto retry;
+ }
+ }
--- /dev/null
+From: dingtianhong <dingtianhong@huawei.com>
+Date: Thu, 16 Jul 2015 16:30:02 +0800
+Subject: bonding: correct the MAC address for "follow" fail_over_mac policy
+
+From: dingtianhong <dingtianhong@huawei.com>
+
+[ Upstream commit a951bc1e6ba58f11df5ed5ddc41311e10f5fd20b ]
+
+The "follow" fail_over_mac policy is useful for multiport devices that
+either become confused or incur a performance penalty when multiple
+ports are programmed with the same MAC address, but the same MAC
+address still may happened by this steps for this policy:
+
+1) echo +eth0 > /sys/class/net/bond0/bonding/slaves
+ bond0 has the same mac address with eth0, it is MAC1.
+
+2) echo +eth1 > /sys/class/net/bond0/bonding/slaves
+ eth1 is backup, eth1 has MAC2.
+
+3) ifconfig eth0 down
+ eth1 became active slave, bond will swap MAC for eth0 and eth1,
+ so eth1 has MAC1, and eth0 has MAC2.
+
+4) ifconfig eth1 down
+ there is no active slave, and eth1 still has MAC1, eth2 has MAC2.
+
+5) ifconfig eth0 up
+ the eth0 became active slave again, the bond set eth0 to MAC1.
+
+Something wrong here, then if you set eth1 up, the eth0 and eth1 will have the same
+MAC address, it will break this policy for ACTIVE_BACKUP mode.
+
+This patch will fix this problem by finding the old active slave and
+swap them MAC address before change active slave.
+
+Signed-off-by: Ding Tianhong <dingtianhong@huawei.com>
+Tested-by: Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+[bwh: Backported to 3.10: bond_for_each_slave() takes an extra int paramter]
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/bonding/bond_main.c | 20 ++++++++++++++++++++
+ 1 file changed, 20 insertions(+)
+
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -876,6 +876,23 @@ static void bond_mc_swap(struct bonding
+ }
+ }
+
++static struct slave *bond_get_old_active(struct bonding *bond,
++ struct slave *new_active)
++{
++ struct slave *slave;
++ int i;
++
++ bond_for_each_slave(bond, slave, i) {
++ if (slave == new_active)
++ continue;
++
++ if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr))
++ return slave;
++ }
++
++ return NULL;
++}
++
+ /*
+ * bond_do_fail_over_mac
+ *
+@@ -919,6 +936,9 @@ static void bond_do_fail_over_mac(struct
+ write_unlock_bh(&bond->curr_slave_lock);
+ read_unlock(&bond->lock);
+
++ if (!old_active)
++ old_active = bond_get_old_active(bond, new_active);
++
+ if (old_active) {
+ memcpy(tmp_mac, new_active->dev->dev_addr, ETH_ALEN);
+ memcpy(saddr.sa_data, old_active->dev->dev_addr,
--- /dev/null
+From 2bffa1503c5c06192eb1459180fac4416575a966 Mon Sep 17 00:00:00 2001
+From: Joe Thornber <ejt@redhat.com>
+Date: Fri, 9 Oct 2015 14:03:38 +0100
+Subject: dm cache: fix NULL pointer when switching from cleaner policy
+
+From: Joe Thornber <ejt@redhat.com>
+
+commit 2bffa1503c5c06192eb1459180fac4416575a966 upstream.
+
+The cleaner policy doesn't make use of the per cache block hint space in
+the metadata (unlike the other policies). When switching from the
+cleaner policy to mq or smq a NULL pointer crash (in dm_tm_new_block)
+was observed. The crash was caused by bugs in dm-cache-metadata.c
+when trying to skip creation of the hint btree.
+
+The minimal fix is to change hint size for the cleaner policy to 4 bytes
+(only hint size supported).
+
+Signed-off-by: Joe Thornber <ejt@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-cache-policy-cleaner.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/md/dm-cache-policy-cleaner.c
++++ b/drivers/md/dm-cache-policy-cleaner.c
+@@ -434,7 +434,7 @@ static struct dm_cache_policy *wb_create
+ static struct dm_cache_policy_type wb_policy_type = {
+ .name = "cleaner",
+ .version = {1, 0, 0},
+- .hint_size = 0,
++ .hint_size = 4,
+ .owner = THIS_MODULE,
+ .create = wb_create
+ };
--- /dev/null
+From roland@kernel.org Sat Oct 17 17:11:23 2015
+From: Roland Dreier <roland@kernel.org>
+Date: Mon, 5 Oct 2015 10:29:28 -0700
+Subject: fib_rules: Fix dump_rules() not to exit early
+To: netdev@vger.kernel.org, stable@vger.kernel.org
+Cc: Roland Dreier <roland@purestorage.com>
+Message-ID: <1444066168-5566-1-git-send-email-roland@kernel.org>
+
+
+From: Roland Dreier <roland@purestorage.com>
+
+Backports of 41fc014332d9 ("fib_rules: fix fib rule dumps across
+multiple skbs") introduced a regression in "ip rule show" - it ends up
+dumping the first rule over and over and never exiting, because 3.19
+and earlier are missing commit 053c095a82cf ("netlink: make
+nlmsg_end() and genlmsg_end() void"), so fib_nl_fill_rule() ends up
+returning skb->len (i.e. > 0) in the success case.
+
+Fix this by checking the return code for < 0 instead of != 0.
+
+Signed-off-by: Roland Dreier <roland@purestorage.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/core/fib_rules.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/core/fib_rules.c
++++ b/net/core/fib_rules.c
+@@ -606,7 +606,7 @@ static int dump_rules(struct sk_buff *sk
+ err = fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, RTM_NEWRULE,
+ NLM_F_MULTI, ops);
+- if (err)
++ if (err < 0)
+ break;
+ skip:
+ idx++;
--- /dev/null
+From 95c2b17534654829db428f11bcf4297c059a2a7e Mon Sep 17 00:00:00 2001
+From: Ben Hutchings <ben@decadent.org.uk>
+Date: Sat, 26 Sep 2015 12:23:56 +0100
+Subject: genirq: Fix race in register_irq_proc()
+
+From: Ben Hutchings <ben@decadent.org.uk>
+
+commit 95c2b17534654829db428f11bcf4297c059a2a7e upstream.
+
+Per-IRQ directories in procfs are created only when a handler is first
+added to the irqdesc, not when the irqdesc is created. In the case of
+a shared IRQ, multiple tasks can race to create a directory. This
+race condition seems to have been present forever, but is easier to
+hit with async probing.
+
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Link: http://lkml.kernel.org/r/1443266636.2004.2.camel@decadent.org.uk
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/irq/proc.c | 19 +++++++++++++++++--
+ 1 file changed, 17 insertions(+), 2 deletions(-)
+
+--- a/kernel/irq/proc.c
++++ b/kernel/irq/proc.c
+@@ -12,6 +12,7 @@
+ #include <linux/seq_file.h>
+ #include <linux/interrupt.h>
+ #include <linux/kernel_stat.h>
++#include <linux/mutex.h>
+
+ #include "internals.h"
+
+@@ -309,18 +310,29 @@ void register_handler_proc(unsigned int
+
+ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
+ {
++ static DEFINE_MUTEX(register_lock);
+ char name [MAX_NAMELEN];
+
+- if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir)
++ if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip))
+ return;
+
++ /*
++ * irq directories are registered only when a handler is
++ * added, not when the descriptor is created, so multiple
++ * tasks might try to register at the same time.
++ */
++ mutex_lock(®ister_lock);
++
++ if (desc->dir)
++ goto out_unlock;
++
+ memset(name, 0, MAX_NAMELEN);
+ sprintf(name, "%d", irq);
+
+ /* create /proc/irq/1234 */
+ desc->dir = proc_mkdir(name, root_irq_dir);
+ if (!desc->dir)
+- return;
++ goto out_unlock;
+
+ #ifdef CONFIG_SMP
+ /* create /proc/irq/<irq>/smp_affinity */
+@@ -341,6 +353,9 @@ void register_irq_proc(unsigned int irq,
+
+ proc_create_data("spurious", 0444, desc->dir,
+ &irq_spurious_proc_fops, (void *)(long)irq);
++
++out_unlock:
++ mutex_unlock(®ister_lock);
+ }
+
+ void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
--- /dev/null
+From 8474ba74193d302e8340dddd1e16c85cc4b98caf Mon Sep 17 00:00:00 2001
+From: Andreas Schwab <schwab@linux-m68k.org>
+Date: Wed, 23 Sep 2015 23:12:09 +0200
+Subject: m68k: Define asmlinkage_protect
+
+From: Andreas Schwab <schwab@linux-m68k.org>
+
+commit 8474ba74193d302e8340dddd1e16c85cc4b98caf upstream.
+
+Make sure the compiler does not modify arguments of syscall functions.
+This can happen if the compiler generates a tailcall to another
+function. For example, without asmlinkage_protect sys_openat is compiled
+into this function:
+
+sys_openat:
+ clr.l %d0
+ move.w 18(%sp),%d0
+ move.l %d0,16(%sp)
+ jbra do_sys_open
+
+Note how the fourth argument is modified in place, modifying the register
+%d4 that gets restored from this stack slot when the function returns to
+user-space. The caller may expect the register to be unmodified across
+system calls.
+
+Signed-off-by: Andreas Schwab <schwab@linux-m68k.org>
+Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/m68k/include/asm/linkage.h | 30 ++++++++++++++++++++++++++++++
+ 1 file changed, 30 insertions(+)
+
+--- a/arch/m68k/include/asm/linkage.h
++++ b/arch/m68k/include/asm/linkage.h
+@@ -4,4 +4,34 @@
+ #define __ALIGN .align 4
+ #define __ALIGN_STR ".align 4"
+
++/*
++ * Make sure the compiler doesn't do anything stupid with the
++ * arguments on the stack - they are owned by the *caller*, not
++ * the callee. This just fools gcc into not spilling into them,
++ * and keeps it from doing tailcall recursion and/or using the
++ * stack slots for temporaries, since they are live and "used"
++ * all the way to the end of the function.
++ */
++#define asmlinkage_protect(n, ret, args...) \
++ __asmlinkage_protect##n(ret, ##args)
++#define __asmlinkage_protect_n(ret, args...) \
++ __asm__ __volatile__ ("" : "=r" (ret) : "0" (ret), ##args)
++#define __asmlinkage_protect0(ret) \
++ __asmlinkage_protect_n(ret)
++#define __asmlinkage_protect1(ret, arg1) \
++ __asmlinkage_protect_n(ret, "m" (arg1))
++#define __asmlinkage_protect2(ret, arg1, arg2) \
++ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2))
++#define __asmlinkage_protect3(ret, arg1, arg2, arg3) \
++ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3))
++#define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \
++ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
++ "m" (arg4))
++#define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \
++ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
++ "m" (arg4), "m" (arg5))
++#define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \
++ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
++ "m" (arg4), "m" (arg5), "m" (arg6))
++
+ #endif
ib-qib-change-lkey-table-allocation-to-support-more-mrs.patch
dcache-handle-escaped-paths-in-prepend_path.patch
vfs-test-for-and-handle-paths-that-are-unreachable-from-their-mnt_root.patch
+arm64-readahead-fault-retry-breaks-mmap-file-read-random-detection.patch
+m68k-define-asmlinkage_protect.patch
+bonding-correct-the-mac-address-for-follow-fail_over_mac-policy.patch
+fib_rules-fix-dump_rules-not-to-exit-early.patch
+genirq-fix-race-in-register_irq_proc.patch
+x86-add-1-2-4-8-byte-optimization-to-64bit-__copy_-from-to-_user_inatomic.patch
+dm-cache-fix-null-pointer-when-switching-from-cleaner-policy.patch
+staging-speakup-fix-speakup-r-regression.patch
--- /dev/null
+From b1d562acc78f0af46de0dfe447410bc40bdb7ece Mon Sep 17 00:00:00 2001
+From: "covici@ccs.covici.com" <covici@ccs.covici.com>
+Date: Wed, 20 May 2015 05:44:11 -0400
+Subject: staging: speakup: fix speakup-r regression
+
+From: "covici@ccs.covici.com" <covici@ccs.covici.com>
+
+commit b1d562acc78f0af46de0dfe447410bc40bdb7ece upstream.
+
+Here is a patch to make speakup-r work again.
+
+It broke in 3.6 due to commit 4369c64c79a22b98d3b7eff9d089196cd878a10a
+"Input: Send events one packet at a time)
+
+The problem was that the fakekey.c routine to fake a down arrow no
+longer functioned properly and putting the input_sync fixed it.
+
+Fixes: 4369c64c79a22b98d3b7eff9d089196cd878a10a
+Acked-by: Samuel Thibault <samuel.thibault@ens-lyon.org>
+Signed-off-by: John Covici <covici@ccs.covici.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/speakup/fakekey.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/staging/speakup/fakekey.c
++++ b/drivers/staging/speakup/fakekey.c
+@@ -81,6 +81,7 @@ void speakup_fake_down_arrow(void)
+ __this_cpu_write(reporting_keystroke, true);
+ input_report_key(virt_keyboard, KEY_DOWN, PRESSED);
+ input_report_key(virt_keyboard, KEY_DOWN, RELEASED);
++ input_sync(virt_keyboard);
+ __this_cpu_write(reporting_keystroke, false);
+
+ /* reenable preemption */
--- /dev/null
+From ff47ab4ff3cddfa7bc1b25b990e24abe2ae474ff Mon Sep 17 00:00:00 2001
+From: Andi Kleen <ak@linux.intel.com>
+Date: Fri, 16 Aug 2013 14:17:19 -0700
+Subject: x86: Add 1/2/4/8 byte optimization to 64bit __copy_{from,to}_user_inatomic
+
+From: Andi Kleen <ak@linux.intel.com>
+
+commit ff47ab4ff3cddfa7bc1b25b990e24abe2ae474ff upstream.
+
+The 64bit __copy_{from,to}_user_inatomic always called
+copy_from_user_generic, but skipped the special optimizations for 1/2/4/8
+byte accesses.
+
+This especially hurts the futex call, which accesses the 4 byte futex
+user value with a complicated fast string operation in a function call,
+instead of a single movl.
+
+Use __copy_{from,to}_user for _inatomic instead to get the same
+optimizations. The only problem was the might_fault() in those functions.
+So move that to new wrapper and call __copy_{f,t}_user_nocheck()
+from *_inatomic directly.
+
+32bit already did this correctly by duplicating the code.
+
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Link: http://lkml.kernel.org/r/1376687844-19857-2-git-send-email-andi@firstfloor.org
+Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/uaccess_64.h | 24 ++++++++++++++++++------
+ 1 file changed, 18 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/include/asm/uaccess_64.h
++++ b/arch/x86/include/asm/uaccess_64.h
+@@ -77,11 +77,10 @@ int copy_to_user(void __user *dst, const
+ }
+
+ static __always_inline __must_check
+-int __copy_from_user(void *dst, const void __user *src, unsigned size)
++int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
+ {
+ int ret = 0;
+
+- might_fault();
+ if (!__builtin_constant_p(size))
+ return copy_user_generic(dst, (__force void *)src, size);
+ switch (size) {
+@@ -121,11 +120,17 @@ int __copy_from_user(void *dst, const vo
+ }
+
+ static __always_inline __must_check
+-int __copy_to_user(void __user *dst, const void *src, unsigned size)
++int __copy_from_user(void *dst, const void __user *src, unsigned size)
++{
++ might_fault();
++ return __copy_from_user_nocheck(dst, src, size);
++}
++
++static __always_inline __must_check
++int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
+ {
+ int ret = 0;
+
+- might_fault();
+ if (!__builtin_constant_p(size))
+ return copy_user_generic((__force void *)dst, src, size);
+ switch (size) {
+@@ -165,6 +170,13 @@ int __copy_to_user(void __user *dst, con
+ }
+
+ static __always_inline __must_check
++int __copy_to_user(void __user *dst, const void *src, unsigned size)
++{
++ might_fault();
++ return __copy_to_user_nocheck(dst, src, size);
++}
++
++static __always_inline __must_check
+ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
+ {
+ int ret = 0;
+@@ -220,13 +232,13 @@ int __copy_in_user(void __user *dst, con
+ static __must_check __always_inline int
+ __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
+ {
+- return copy_user_generic(dst, (__force const void *)src, size);
++ return __copy_from_user_nocheck(dst, (__force const void *)src, size);
+ }
+
+ static __must_check __always_inline int
+ __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
+ {
+- return copy_user_generic((__force void *)dst, src, size);
++ return __copy_to_user_nocheck((__force void *)dst, src, size);
+ }
+
+ extern long __copy_user_nocache(void *dst, const void __user *src,