]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 10 Apr 2017 14:45:49 +0000 (16:45 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 10 Apr 2017 14:45:49 +0000 (16:45 +0200)
added patches:
metag-usercopy-add-early-abort-to-copy_to_user.patch
metag-usercopy-add-missing-fixups.patch
metag-usercopy-drop-unused-macros.patch
metag-usercopy-fix-alignment-error-checking.patch
metag-usercopy-fix-src-fixup-in-from-user-rapf-loops.patch
metag-usercopy-set-flags-before-addz.patch
metag-usercopy-zero-rest-of-buffer-from-copy_from_user.patch
mips-end-spinlocks-with-.insn.patch
mips-flush-wrong-invalid-ftlb-entry-for-huge-page.patch
mips-force-o32-fp64-support-on-32bit-mips64r6-kernels.patch
mips-lantiq-fix-missing-xbar-kernel-panic.patch
mips-ralink-fix-typos-in-rt3883-pinctrl.patch
nios2-reserve-boot-memory-for-device-tree.patch
powerpc-don-t-try-to-fix-up-misaligned-load-with-reservation-instructions.patch
powerpc-mm-add-missing-global-tlb-invalidate-if-cxl-is-active.patch
s390-decompressor-fix-initrd-corruption-caused-by-bss-clear.patch
s390-uaccess-get_user-should-zero-on-failure-again.patch

18 files changed:
queue-4.4/metag-usercopy-add-early-abort-to-copy_to_user.patch [new file with mode: 0644]
queue-4.4/metag-usercopy-add-missing-fixups.patch [new file with mode: 0644]
queue-4.4/metag-usercopy-drop-unused-macros.patch [new file with mode: 0644]
queue-4.4/metag-usercopy-fix-alignment-error-checking.patch [new file with mode: 0644]
queue-4.4/metag-usercopy-fix-src-fixup-in-from-user-rapf-loops.patch [new file with mode: 0644]
queue-4.4/metag-usercopy-set-flags-before-addz.patch [new file with mode: 0644]
queue-4.4/metag-usercopy-zero-rest-of-buffer-from-copy_from_user.patch [new file with mode: 0644]
queue-4.4/mips-end-spinlocks-with-.insn.patch [new file with mode: 0644]
queue-4.4/mips-flush-wrong-invalid-ftlb-entry-for-huge-page.patch [new file with mode: 0644]
queue-4.4/mips-force-o32-fp64-support-on-32bit-mips64r6-kernels.patch [new file with mode: 0644]
queue-4.4/mips-lantiq-fix-missing-xbar-kernel-panic.patch [new file with mode: 0644]
queue-4.4/mips-ralink-fix-typos-in-rt3883-pinctrl.patch [new file with mode: 0644]
queue-4.4/nios2-reserve-boot-memory-for-device-tree.patch [new file with mode: 0644]
queue-4.4/powerpc-don-t-try-to-fix-up-misaligned-load-with-reservation-instructions.patch [new file with mode: 0644]
queue-4.4/powerpc-mm-add-missing-global-tlb-invalidate-if-cxl-is-active.patch [new file with mode: 0644]
queue-4.4/s390-decompressor-fix-initrd-corruption-caused-by-bss-clear.patch [new file with mode: 0644]
queue-4.4/s390-uaccess-get_user-should-zero-on-failure-again.patch [new file with mode: 0644]
queue-4.4/series

diff --git a/queue-4.4/metag-usercopy-add-early-abort-to-copy_to_user.patch b/queue-4.4/metag-usercopy-add-early-abort-to-copy_to_user.patch
new file mode 100644 (file)
index 0000000..d5a75f9
--- /dev/null
@@ -0,0 +1,103 @@
+From fb8ea062a8f2e85256e13f55696c5c5f0dfdcc8b Mon Sep 17 00:00:00 2001
+From: James Hogan <james.hogan@imgtec.com>
+Date: Fri, 31 Mar 2017 13:35:01 +0100
+Subject: metag/usercopy: Add early abort to copy_to_user
+
+From: James Hogan <james.hogan@imgtec.com>
+
+commit fb8ea062a8f2e85256e13f55696c5c5f0dfdcc8b upstream.
+
+When copying to userland on Meta, if any faults are encountered
+immediately abort the copy instead of continuing on and repeatedly
+faulting, and worse potentially copying further bytes successfully to
+subsequent valid pages.
+
+Fixes: 373cd784d0fc ("metag: Memory handling")
+Reported-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Cc: linux-metag@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/metag/lib/usercopy.c |   20 ++++++++++++++++++++
+ 1 file changed, 20 insertions(+)
+
+--- a/arch/metag/lib/usercopy.c
++++ b/arch/metag/lib/usercopy.c
+@@ -538,23 +538,31 @@ unsigned long __copy_user(void __user *p
+       if ((unsigned long) src & 1) {
+               __asm_copy_to_user_1(dst, src, retn);
+               n--;
++              if (retn)
++                      return retn + n;
+       }
+       if ((unsigned long) dst & 1) {
+               /* Worst case - byte copy */
+               while (n > 0) {
+                       __asm_copy_to_user_1(dst, src, retn);
+                       n--;
++                      if (retn)
++                              return retn + n;
+               }
+       }
+       if (((unsigned long) src & 2) && n >= 2) {
+               __asm_copy_to_user_2(dst, src, retn);
+               n -= 2;
++              if (retn)
++                      return retn + n;
+       }
+       if ((unsigned long) dst & 2) {
+               /* Second worst case - word copy */
+               while (n >= 2) {
+                       __asm_copy_to_user_2(dst, src, retn);
+                       n -= 2;
++                      if (retn)
++                              return retn + n;
+               }
+       }
+@@ -569,6 +577,8 @@ unsigned long __copy_user(void __user *p
+               while (n >= 8) {
+                       __asm_copy_to_user_8x64(dst, src, retn);
+                       n -= 8;
++                      if (retn)
++                              return retn + n;
+               }
+       }
+       if (n >= RAPF_MIN_BUF_SIZE) {
+@@ -581,6 +591,8 @@ unsigned long __copy_user(void __user *p
+               while (n >= 8) {
+                       __asm_copy_to_user_8x64(dst, src, retn);
+                       n -= 8;
++                      if (retn)
++                              return retn + n;
+               }
+       }
+ #endif
+@@ -588,11 +600,15 @@ unsigned long __copy_user(void __user *p
+       while (n >= 16) {
+               __asm_copy_to_user_16(dst, src, retn);
+               n -= 16;
++              if (retn)
++                      return retn + n;
+       }
+       while (n >= 4) {
+               __asm_copy_to_user_4(dst, src, retn);
+               n -= 4;
++              if (retn)
++                      return retn + n;
+       }
+       switch (n) {
+@@ -609,6 +625,10 @@ unsigned long __copy_user(void __user *p
+               break;
+       }
++      /*
++       * If we get here, retn correctly reflects the number of failing
++       * bytes.
++       */
+       return retn;
+ }
+ EXPORT_SYMBOL(__copy_user);
diff --git a/queue-4.4/metag-usercopy-add-missing-fixups.patch b/queue-4.4/metag-usercopy-add-missing-fixups.patch
new file mode 100644 (file)
index 0000000..f1021ca
--- /dev/null
@@ -0,0 +1,167 @@
+From b884a190afcecdbef34ca508ea5ee88bb7c77861 Mon Sep 17 00:00:00 2001
+From: James Hogan <james.hogan@imgtec.com>
+Date: Tue, 4 Apr 2017 08:51:34 +0100
+Subject: metag/usercopy: Add missing fixups
+
+From: James Hogan <james.hogan@imgtec.com>
+
+commit b884a190afcecdbef34ca508ea5ee88bb7c77861 upstream.
+
+The rapf copy loops in the Meta usercopy code is missing some extable
+entries for HTP cores with unaligned access checking enabled, where
+faults occur on the instruction immediately after the faulting access.
+
+Add the fixup labels and extable entries for these cases so that corner
+case user copy failures don't cause kernel crashes.
+
+Fixes: 373cd784d0fc ("metag: Memory handling")
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Cc: linux-metag@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/metag/lib/usercopy.c |   72 ++++++++++++++++++++++++++++++----------------
+ 1 file changed, 48 insertions(+), 24 deletions(-)
+
+--- a/arch/metag/lib/usercopy.c
++++ b/arch/metag/lib/usercopy.c
+@@ -259,27 +259,31 @@
+               "MGETL  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+               "22:\n"                                                 \
+               "MSETL  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
+-              "SUB    %3, %3, #32\n"                                  \
+               "23:\n"                                                 \
+-              "MGETL  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
++              "SUB    %3, %3, #32\n"                                  \
+               "24:\n"                                                 \
++              "MGETL  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
++              "25:\n"                                                 \
+               "MSETL  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
++              "26:\n"                                                 \
+               "SUB    %3, %3, #32\n"                                  \
+               "DCACHE [%1+#-64], D0Ar6\n"                             \
+               "BR     $Lloop"id"\n"                                   \
+                                                                       \
+               "MOV    RAPF, %1\n"                                     \
+-              "25:\n"                                                 \
++              "27:\n"                                                 \
+               "MGETL  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+-              "26:\n"                                                 \
++              "28:\n"                                                 \
+               "MSETL  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
++              "29:\n"                                                 \
+               "SUB    %3, %3, #32\n"                                  \
+-              "27:\n"                                                 \
++              "30:\n"                                                 \
+               "MGETL  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+-              "28:\n"                                                 \
++              "31:\n"                                                 \
+               "MSETL  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
++              "32:\n"                                                 \
+               "SUB    %0, %0, #8\n"                                   \
+-              "29:\n"                                                 \
++              "33:\n"                                                 \
+               "SETL   [%0++], D0.7, D1.7\n"                           \
+               "SUB    %3, %3, #32\n"                                  \
+               "1:"                                                    \
+@@ -311,7 +315,11 @@
+               "       .long 26b,3b\n"                                 \
+               "       .long 27b,3b\n"                                 \
+               "       .long 28b,3b\n"                                 \
+-              "       .long 29b,4b\n"                                 \
++              "       .long 29b,3b\n"                                 \
++              "       .long 30b,3b\n"                                 \
++              "       .long 31b,3b\n"                                 \
++              "       .long 32b,3b\n"                                 \
++              "       .long 33b,4b\n"                                 \
+               "       .previous\n"                                    \
+               : "=r" (to), "=r" (from), "=r" (ret), "=d" (n)          \
+               : "0" (to), "1" (from), "2" (ret), "3" (n)              \
+@@ -402,47 +410,55 @@
+               "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+               "22:\n"                                                 \
+               "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
+-              "SUB    %3, %3, #16\n"                                  \
+               "23:\n"                                                 \
+-              "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+-              "24:\n"                                                 \
+-              "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
+               "SUB    %3, %3, #16\n"                                  \
+-              "25:\n"                                                 \
++              "24:\n"                                                 \
+               "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+-              "26:\n"                                                 \
++              "25:\n"                                                 \
+               "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
++              "26:\n"                                                 \
+               "SUB    %3, %3, #16\n"                                  \
+               "27:\n"                                                 \
+               "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+               "28:\n"                                                 \
+               "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
++              "29:\n"                                                 \
++              "SUB    %3, %3, #16\n"                                  \
++              "30:\n"                                                 \
++              "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
++              "31:\n"                                                 \
++              "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
++              "32:\n"                                                 \
+               "SUB    %3, %3, #16\n"                                  \
+               "DCACHE [%1+#-64], D0Ar6\n"                             \
+               "BR     $Lloop"id"\n"                                   \
+                                                                       \
+               "MOV    RAPF, %1\n"                                     \
+-              "29:\n"                                                 \
++              "33:\n"                                                 \
+               "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+-              "30:\n"                                                 \
++              "34:\n"                                                 \
+               "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
++              "35:\n"                                                 \
+               "SUB    %3, %3, #16\n"                                  \
+-              "31:\n"                                                 \
++              "36:\n"                                                 \
+               "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+-              "32:\n"                                                 \
++              "37:\n"                                                 \
+               "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
++              "38:\n"                                                 \
+               "SUB    %3, %3, #16\n"                                  \
+-              "33:\n"                                                 \
++              "39:\n"                                                 \
+               "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+-              "34:\n"                                                 \
++              "40:\n"                                                 \
+               "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
++              "41:\n"                                                 \
+               "SUB    %3, %3, #16\n"                                  \
+-              "35:\n"                                                 \
++              "42:\n"                                                 \
+               "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+-              "36:\n"                                                 \
++              "43:\n"                                                 \
+               "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
++              "44:\n"                                                 \
+               "SUB    %0, %0, #4\n"                                   \
+-              "37:\n"                                                 \
++              "45:\n"                                                 \
+               "SETD   [%0++], D0.7\n"                                 \
+               "SUB    %3, %3, #16\n"                                  \
+               "1:"                                                    \
+@@ -482,7 +498,15 @@
+               "       .long 34b,3b\n"                                 \
+               "       .long 35b,3b\n"                                 \
+               "       .long 36b,3b\n"                                 \
+-              "       .long 37b,4b\n"                                 \
++              "       .long 37b,3b\n"                                 \
++              "       .long 38b,3b\n"                                 \
++              "       .long 39b,3b\n"                                 \
++              "       .long 40b,3b\n"                                 \
++              "       .long 41b,3b\n"                                 \
++              "       .long 42b,3b\n"                                 \
++              "       .long 43b,3b\n"                                 \
++              "       .long 44b,3b\n"                                 \
++              "       .long 45b,4b\n"                                 \
+               "       .previous\n"                                    \
+               : "=r" (to), "=r" (from), "=r" (ret), "=d" (n)          \
+               : "0" (to), "1" (from), "2" (ret), "3" (n)              \
diff --git a/queue-4.4/metag-usercopy-drop-unused-macros.patch b/queue-4.4/metag-usercopy-drop-unused-macros.patch
new file mode 100644 (file)
index 0000000..c427262
--- /dev/null
@@ -0,0 +1,144 @@
+From ef62a2d81f73d9cddef14bc3d9097a57010d551c Mon Sep 17 00:00:00 2001
+From: James Hogan <james.hogan@imgtec.com>
+Date: Fri, 31 Mar 2017 10:37:44 +0100
+Subject: metag/usercopy: Drop unused macros
+
+From: James Hogan <james.hogan@imgtec.com>
+
+commit ef62a2d81f73d9cddef14bc3d9097a57010d551c upstream.
+
+Metag's lib/usercopy.c has a bunch of copy_from_user macros for larger
+copies between 5 and 16 bytes which are completely unused. Before fixing
+zeroing lets drop these macros so there is less to fix.
+
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: linux-metag@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/metag/lib/usercopy.c |  113 ----------------------------------------------
+ 1 file changed, 113 deletions(-)
+
+--- a/arch/metag/lib/usercopy.c
++++ b/arch/metag/lib/usercopy.c
+@@ -651,119 +651,6 @@ EXPORT_SYMBOL(__copy_user);
+ #define __asm_copy_from_user_4(to, from, ret) \
+       __asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
+-#define __asm_copy_from_user_5(to, from, ret) \
+-      __asm_copy_from_user_4x_cont(to, from, ret,     \
+-              "       GETB D1Ar1,[%1++]\n"            \
+-              "4:     SETB [%0++],D1Ar1\n",           \
+-              "5:     ADD  %2,%2,#1\n"                \
+-              "       SETB [%0++],D1Ar1\n",           \
+-              "       .long 4b,5b\n")
+-
+-#define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+-      __asm_copy_from_user_4x_cont(to, from, ret,     \
+-              "       GETW D1Ar1,[%1++]\n"            \
+-              "4:     SETW [%0++],D1Ar1\n" COPY,      \
+-              "5:     ADD  %2,%2,#2\n"                \
+-              "       SETW [%0++],D1Ar1\n" FIXUP,     \
+-              "       .long 4b,5b\n" TENTRY)
+-
+-#define __asm_copy_from_user_6(to, from, ret) \
+-      __asm_copy_from_user_6x_cont(to, from, ret, "", "", "")
+-
+-#define __asm_copy_from_user_7(to, from, ret) \
+-      __asm_copy_from_user_6x_cont(to, from, ret,     \
+-              "       GETB D1Ar1,[%1++]\n"            \
+-              "6:     SETB [%0++],D1Ar1\n",           \
+-              "7:     ADD  %2,%2,#1\n"                \
+-              "       SETB [%0++],D1Ar1\n",           \
+-              "       .long 6b,7b\n")
+-
+-#define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+-      __asm_copy_from_user_4x_cont(to, from, ret,     \
+-              "       GETD D1Ar1,[%1++]\n"            \
+-              "4:     SETD [%0++],D1Ar1\n" COPY,      \
+-              "5:     ADD  %2,%2,#4\n"                        \
+-              "       SETD [%0++],D1Ar1\n" FIXUP,             \
+-              "       .long 4b,5b\n" TENTRY)
+-
+-#define __asm_copy_from_user_8(to, from, ret) \
+-      __asm_copy_from_user_8x_cont(to, from, ret, "", "", "")
+-
+-#define __asm_copy_from_user_9(to, from, ret) \
+-      __asm_copy_from_user_8x_cont(to, from, ret,     \
+-              "       GETB D1Ar1,[%1++]\n"            \
+-              "6:     SETB [%0++],D1Ar1\n",           \
+-              "7:     ADD  %2,%2,#1\n"                \
+-              "       SETB [%0++],D1Ar1\n",           \
+-              "       .long 6b,7b\n")
+-
+-#define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+-      __asm_copy_from_user_8x_cont(to, from, ret,     \
+-              "       GETW D1Ar1,[%1++]\n"            \
+-              "6:     SETW [%0++],D1Ar1\n" COPY,      \
+-              "7:     ADD  %2,%2,#2\n"                \
+-              "       SETW [%0++],D1Ar1\n" FIXUP,     \
+-              "       .long 6b,7b\n" TENTRY)
+-
+-#define __asm_copy_from_user_10(to, from, ret) \
+-      __asm_copy_from_user_10x_cont(to, from, ret, "", "", "")
+-
+-#define __asm_copy_from_user_11(to, from, ret)                \
+-      __asm_copy_from_user_10x_cont(to, from, ret,    \
+-              "       GETB D1Ar1,[%1++]\n"            \
+-              "8:     SETB [%0++],D1Ar1\n",           \
+-              "9:     ADD  %2,%2,#1\n"                \
+-              "       SETB [%0++],D1Ar1\n",           \
+-              "       .long 8b,9b\n")
+-
+-#define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+-      __asm_copy_from_user_8x_cont(to, from, ret,     \
+-              "       GETD D1Ar1,[%1++]\n"            \
+-              "6:     SETD [%0++],D1Ar1\n" COPY,      \
+-              "7:     ADD  %2,%2,#4\n"                \
+-              "       SETD [%0++],D1Ar1\n" FIXUP,     \
+-              "       .long 6b,7b\n" TENTRY)
+-
+-#define __asm_copy_from_user_12(to, from, ret) \
+-      __asm_copy_from_user_12x_cont(to, from, ret, "", "", "")
+-
+-#define __asm_copy_from_user_13(to, from, ret) \
+-      __asm_copy_from_user_12x_cont(to, from, ret,    \
+-              "       GETB D1Ar1,[%1++]\n"            \
+-              "8:     SETB [%0++],D1Ar1\n",           \
+-              "9:     ADD  %2,%2,#1\n"                \
+-              "       SETB [%0++],D1Ar1\n",           \
+-              "       .long 8b,9b\n")
+-
+-#define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+-      __asm_copy_from_user_12x_cont(to, from, ret,    \
+-              "       GETW D1Ar1,[%1++]\n"            \
+-              "8:     SETW [%0++],D1Ar1\n" COPY,      \
+-              "9:     ADD  %2,%2,#2\n"                \
+-              "       SETW [%0++],D1Ar1\n" FIXUP,     \
+-              "       .long 8b,9b\n" TENTRY)
+-
+-#define __asm_copy_from_user_14(to, from, ret) \
+-      __asm_copy_from_user_14x_cont(to, from, ret, "", "", "")
+-
+-#define __asm_copy_from_user_15(to, from, ret) \
+-      __asm_copy_from_user_14x_cont(to, from, ret,    \
+-              "       GETB D1Ar1,[%1++]\n"            \
+-              "10:    SETB [%0++],D1Ar1\n",           \
+-              "11:    ADD  %2,%2,#1\n"                \
+-              "       SETB [%0++],D1Ar1\n",           \
+-              "       .long 10b,11b\n")
+-
+-#define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+-      __asm_copy_from_user_12x_cont(to, from, ret,    \
+-              "       GETD D1Ar1,[%1++]\n"            \
+-              "8:     SETD [%0++],D1Ar1\n" COPY,      \
+-              "9:     ADD  %2,%2,#4\n"                \
+-              "       SETD [%0++],D1Ar1\n" FIXUP,     \
+-              "       .long 8b,9b\n" TENTRY)
+-
+-#define __asm_copy_from_user_16(to, from, ret) \
+-      __asm_copy_from_user_16x_cont(to, from, ret, "", "", "")
+ #define __asm_copy_from_user_8x64(to, from, ret) \
+       asm volatile (                          \
diff --git a/queue-4.4/metag-usercopy-fix-alignment-error-checking.patch b/queue-4.4/metag-usercopy-fix-alignment-error-checking.patch
new file mode 100644 (file)
index 0000000..b6b7c63
--- /dev/null
@@ -0,0 +1,61 @@
+From 2257211942bbbf6c798ab70b487d7e62f7835a1a Mon Sep 17 00:00:00 2001
+From: James Hogan <james.hogan@imgtec.com>
+Date: Fri, 31 Mar 2017 11:23:18 +0100
+Subject: metag/usercopy: Fix alignment error checking
+
+From: James Hogan <james.hogan@imgtec.com>
+
+commit 2257211942bbbf6c798ab70b487d7e62f7835a1a upstream.
+
+Fix the error checking of the alignment adjustment code in
+raw_copy_from_user(), which mistakenly considers it safe to skip the
+error check when aligning the source buffer on a 2 or 4 byte boundary.
+
+If the destination buffer was unaligned it may have started to copy
+using byte or word accesses, which could well be at the start of a new
+(valid) source page. This would result in it appearing to have copied 1
+or 2 bytes at the end of the first (invalid) page rather than none at
+all.
+
+Fixes: 373cd784d0fc ("metag: Memory handling")
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Cc: linux-metag@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/metag/lib/usercopy.c |   10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+--- a/arch/metag/lib/usercopy.c
++++ b/arch/metag/lib/usercopy.c
+@@ -717,6 +717,8 @@ unsigned long __copy_user_zeroing(void *
+       if ((unsigned long) src & 1) {
+               __asm_copy_from_user_1(dst, src, retn);
+               n--;
++              if (retn)
++                      goto copy_exception_bytes;
+       }
+       if ((unsigned long) dst & 1) {
+               /* Worst case - byte copy */
+@@ -730,6 +732,8 @@ unsigned long __copy_user_zeroing(void *
+       if (((unsigned long) src & 2) && n >= 2) {
+               __asm_copy_from_user_2(dst, src, retn);
+               n -= 2;
++              if (retn)
++                      goto copy_exception_bytes;
+       }
+       if ((unsigned long) dst & 2) {
+               /* Second worst case - word copy */
+@@ -741,12 +745,6 @@ unsigned long __copy_user_zeroing(void *
+               }
+       }
+-      /* We only need one check after the unalignment-adjustments,
+-         because if both adjustments were done, either both or
+-         neither reference had an exception.  */
+-      if (retn != 0)
+-              goto copy_exception_bytes;
+-
+ #ifdef USE_RAPF
+       /* 64 bit copy loop */
+       if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
diff --git a/queue-4.4/metag-usercopy-fix-src-fixup-in-from-user-rapf-loops.patch b/queue-4.4/metag-usercopy-fix-src-fixup-in-from-user-rapf-loops.patch
new file mode 100644 (file)
index 0000000..194bc9e
--- /dev/null
@@ -0,0 +1,89 @@
+From 2c0b1df88b987a12d95ea1d6beaf01894f3cc725 Mon Sep 17 00:00:00 2001
+From: James Hogan <james.hogan@imgtec.com>
+Date: Mon, 3 Apr 2017 17:41:40 +0100
+Subject: metag/usercopy: Fix src fixup in from user rapf loops
+
+From: James Hogan <james.hogan@imgtec.com>
+
+commit 2c0b1df88b987a12d95ea1d6beaf01894f3cc725 upstream.
+
+The fixup code to rewind the source pointer in
+__asm_copy_from_user_{32,64}bit_rapf_loop() always rewound the source by
+a single unit (4 or 8 bytes), however this is insufficient if the fault
+didn't occur on the first load in the loop, as the source pointer will
+have been incremented but nothing will have been stored until all 4
+register [pairs] are loaded.
+
+Read the LSM_STEP field of TXSTATUS (which is already loaded into a
+register), a bit like the copy_to_user versions, to determine how many
+iterations of MGET[DL] have taken place, all of which need rewinding.
+
+Fixes: 373cd784d0fc ("metag: Memory handling")
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Cc: linux-metag@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/metag/lib/usercopy.c |   36 ++++++++++++++++++++++++++++--------
+ 1 file changed, 28 insertions(+), 8 deletions(-)
+
+--- a/arch/metag/lib/usercopy.c
++++ b/arch/metag/lib/usercopy.c
+@@ -687,29 +687,49 @@ EXPORT_SYMBOL(__copy_user);
+  *
+  *    Rationale:
+  *            A fault occurs while reading from user buffer, which is the
+- *            source. Since the fault is at a single address, we only
+- *            need to rewind by 8 bytes.
++ *            source.
+  *            Since we don't write to kernel buffer until we read first,
+  *            the kernel buffer is at the right state and needn't be
+- *            corrected.
++ *            corrected, but the source must be rewound to the beginning of
++ *            the block, which is LSM_STEP*8 bytes.
++ *            LSM_STEP is bits 10:8 in TXSTATUS which is already read
++ *            and stored in D0Ar2
++ *
++ *            NOTE: If a fault occurs at the last operation in M{G,S}ETL
++ *                    LSM_STEP will be 0. ie: we do 4 writes in our case, if
++ *                    a fault happens at the 4th write, LSM_STEP will be 0
++ *                    instead of 4. The code copes with that.
+  */
+ #define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id)    \
+       __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id,           \
+-              "SUB    %1, %1, #8\n")
++              "LSR    D0Ar2, D0Ar2, #5\n"                             \
++              "ANDS   D0Ar2, D0Ar2, #0x38\n"                          \
++              "ADDZ   D0Ar2, D0Ar2, #32\n"                            \
++              "SUB    %1, %1, D0Ar2\n")
+ /*    rewind 'from' pointer when a fault occurs
+  *
+  *    Rationale:
+  *            A fault occurs while reading from user buffer, which is the
+- *            source. Since the fault is at a single address, we only
+- *            need to rewind by 4 bytes.
++ *            source.
+  *            Since we don't write to kernel buffer until we read first,
+  *            the kernel buffer is at the right state and needn't be
+- *            corrected.
++ *            corrected, but the source must be rewound to the beginning of
++ *            the block, which is LSM_STEP*4 bytes.
++ *            LSM_STEP is bits 10:8 in TXSTATUS which is already read
++ *            and stored in D0Ar2
++ *
++ *            NOTE: If a fault occurs at the last operation in M{G,S}ETL
++ *                    LSM_STEP will be 0. ie: we do 4 writes in our case, if
++ *                    a fault happens at the 4th write, LSM_STEP will be 0
++ *                    instead of 4. The code copes with that.
+  */
+ #define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id)    \
+       __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id,           \
+-              "SUB    %1, %1, #4\n")
++              "LSR    D0Ar2, D0Ar2, #6\n"                             \
++              "ANDS   D0Ar2, D0Ar2, #0x1c\n"                          \
++              "ADDZ   D0Ar2, D0Ar2, #16\n"                            \
++              "SUB    %1, %1, D0Ar2\n")
+ /*
diff --git a/queue-4.4/metag-usercopy-set-flags-before-addz.patch b/queue-4.4/metag-usercopy-set-flags-before-addz.patch
new file mode 100644 (file)
index 0000000..b44cb3b
--- /dev/null
@@ -0,0 +1,67 @@
+From fd40eee1290ad7add7aa665e3ce6b0f9fe9734b4 Mon Sep 17 00:00:00 2001
+From: James Hogan <james.hogan@imgtec.com>
+Date: Tue, 4 Apr 2017 11:43:26 +0100
+Subject: metag/usercopy: Set flags before ADDZ
+
+From: James Hogan <james.hogan@imgtec.com>
+
+commit fd40eee1290ad7add7aa665e3ce6b0f9fe9734b4 upstream.
+
+The fixup code for the copy_to_user rapf loops reads TXStatus.LSM_STEP
+to decide how far to rewind the source pointer. There is a special case
+for the last execution of an MGETL/MGETD, since it leaves LSM_STEP=0
+even though the number of MGETLs/MGETDs attempted was 4. This uses ADDZ
+which is conditional upon the Z condition flag, but the AND instruction
+which masked the TXStatus.LSM_STEP field didn't set the condition flags
+based on the result.
+
+Fix that now by using ANDS which does set the flags, and also marking
+the condition codes as clobbered by the inline assembly.
+
+Fixes: 373cd784d0fc ("metag: Memory handling")
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Cc: linux-metag@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/metag/lib/usercopy.c |    8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/arch/metag/lib/usercopy.c
++++ b/arch/metag/lib/usercopy.c
+@@ -315,7 +315,7 @@
+               "       .previous\n"                                    \
+               : "=r" (to), "=r" (from), "=r" (ret), "=d" (n)          \
+               : "0" (to), "1" (from), "2" (ret), "3" (n)              \
+-              : "D1Ar1", "D0Ar2", "memory")
++              : "D1Ar1", "D0Ar2", "cc", "memory")
+ /*    rewind 'to' and 'from'  pointers when a fault occurs
+  *
+@@ -341,7 +341,7 @@
+ #define __asm_copy_to_user_64bit_rapf_loop(to,        from, ret, n, id)\
+       __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id,           \
+               "LSR    D0Ar2, D0Ar2, #8\n"                             \
+-              "AND    D0Ar2, D0Ar2, #0x7\n"                           \
++              "ANDS   D0Ar2, D0Ar2, #0x7\n"                           \
+               "ADDZ   D0Ar2, D0Ar2, #4\n"                             \
+               "SUB    D0Ar2, D0Ar2, #1\n"                             \
+               "MOV    D1Ar1, #4\n"                                    \
+@@ -486,7 +486,7 @@
+               "       .previous\n"                                    \
+               : "=r" (to), "=r" (from), "=r" (ret), "=d" (n)          \
+               : "0" (to), "1" (from), "2" (ret), "3" (n)              \
+-              : "D1Ar1", "D0Ar2", "memory")
++              : "D1Ar1", "D0Ar2", "cc", "memory")
+ /*    rewind 'to' and 'from'  pointers when a fault occurs
+  *
+@@ -512,7 +512,7 @@
+ #define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\
+       __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id,           \
+               "LSR    D0Ar2, D0Ar2, #8\n"                             \
+-              "AND    D0Ar2, D0Ar2, #0x7\n"                           \
++              "ANDS   D0Ar2, D0Ar2, #0x7\n"                           \
+               "ADDZ   D0Ar2, D0Ar2, #4\n"                             \
+               "SUB    D0Ar2, D0Ar2, #1\n"                             \
+               "MOV    D1Ar1, #4\n"                                    \
diff --git a/queue-4.4/metag-usercopy-zero-rest-of-buffer-from-copy_from_user.patch b/queue-4.4/metag-usercopy-zero-rest-of-buffer-from-copy_from_user.patch
new file mode 100644 (file)
index 0000000..0860fb4
--- /dev/null
@@ -0,0 +1,232 @@
+From 563ddc1076109f2b3f88e6d355eab7b6fd4662cb Mon Sep 17 00:00:00 2001
+From: James Hogan <james.hogan@imgtec.com>
+Date: Fri, 31 Mar 2017 11:14:02 +0100
+Subject: metag/usercopy: Zero rest of buffer from copy_from_user
+
+From: James Hogan <james.hogan@imgtec.com>
+
+commit 563ddc1076109f2b3f88e6d355eab7b6fd4662cb upstream.
+
+Currently we try to zero the destination for a failed read from userland
+in fixup code in the usercopy.c macros. The rest of the destination
+buffer is then zeroed from __copy_user_zeroing(), which is used for both
+copy_from_user() and __copy_from_user().
+
+Unfortunately we fail to zero in the fixup code as D1Ar1 is set to 0
+before the fixup code entry labels, and __copy_from_user() shouldn't even
+be zeroing the rest of the buffer.
+
+Move the zeroing out into copy_from_user() and rename
+__copy_user_zeroing() to raw_copy_from_user() since it no longer does
+any zeroing. This also conveniently matches the name needed for
+RAW_COPY_USER support in a later patch.
+
+Fixes: 373cd784d0fc ("metag: Memory handling")
+Reported-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Cc: linux-metag@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/metag/include/asm/uaccess.h |   15 +++++-----
+ arch/metag/lib/usercopy.c        |   57 ++++++++++++---------------------------
+ 2 files changed, 26 insertions(+), 46 deletions(-)
+
+--- a/arch/metag/include/asm/uaccess.h
++++ b/arch/metag/include/asm/uaccess.h
+@@ -197,20 +197,21 @@ extern long __must_check strnlen_user(co
+ #define strlen_user(str) strnlen_user(str, 32767)
+-extern unsigned long __must_check __copy_user_zeroing(void *to,
+-                                                    const void __user *from,
+-                                                    unsigned long n);
++extern unsigned long raw_copy_from_user(void *to, const void __user *from,
++                                      unsigned long n);
+ static inline unsigned long
+ copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++      unsigned long res = n;
+       if (likely(access_ok(VERIFY_READ, from, n)))
+-              return __copy_user_zeroing(to, from, n);
+-      memset(to, 0, n);
+-      return n;
++              res = raw_copy_from_user(to, from, n);
++      if (unlikely(res))
++              memset(to + (n - res), 0, res);
++      return res;
+ }
+-#define __copy_from_user(to, from, n) __copy_user_zeroing(to, from, n)
++#define __copy_from_user(to, from, n) raw_copy_from_user(to, from, n)
+ #define __copy_from_user_inatomic __copy_from_user
+ extern unsigned long __must_check __copy_user(void __user *to,
+--- a/arch/metag/lib/usercopy.c
++++ b/arch/metag/lib/usercopy.c
+@@ -29,7 +29,6 @@
+               COPY                                             \
+               "1:\n"                                           \
+               "       .section .fixup,\"ax\"\n"                \
+-              "       MOV D1Ar1,#0\n"                          \
+               FIXUP                                            \
+               "       MOVT    D1Ar1,#HI(1b)\n"                 \
+               "       JUMP    D1Ar1,#LO(1b)\n"                 \
+@@ -637,16 +636,14 @@ EXPORT_SYMBOL(__copy_user);
+       __asm_copy_user_cont(to, from, ret,     \
+               "       GETB D1Ar1,[%1++]\n"    \
+               "2:     SETB [%0++],D1Ar1\n",   \
+-              "3:     ADD  %2,%2,#1\n"        \
+-              "       SETB [%0++],D1Ar1\n",   \
++              "3:     ADD  %2,%2,#1\n",       \
+               "       .long 2b,3b\n")
+ #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+       __asm_copy_user_cont(to, from, ret,             \
+               "       GETW D1Ar1,[%1++]\n"            \
+               "2:     SETW [%0++],D1Ar1\n" COPY,      \
+-              "3:     ADD  %2,%2,#2\n"                \
+-              "       SETW [%0++],D1Ar1\n" FIXUP,     \
++              "3:     ADD  %2,%2,#2\n" FIXUP,         \
+               "       .long 2b,3b\n" TENTRY)
+ #define __asm_copy_from_user_2(to, from, ret) \
+@@ -656,32 +653,26 @@ EXPORT_SYMBOL(__copy_user);
+       __asm_copy_from_user_2x_cont(to, from, ret,     \
+               "       GETB D1Ar1,[%1++]\n"            \
+               "4:     SETB [%0++],D1Ar1\n",           \
+-              "5:     ADD  %2,%2,#1\n"                \
+-              "       SETB [%0++],D1Ar1\n",           \
++              "5:     ADD  %2,%2,#1\n",               \
+               "       .long 4b,5b\n")
+ #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+       __asm_copy_user_cont(to, from, ret,             \
+               "       GETD D1Ar1,[%1++]\n"            \
+               "2:     SETD [%0++],D1Ar1\n" COPY,      \
+-              "3:     ADD  %2,%2,#4\n"                \
+-              "       SETD [%0++],D1Ar1\n" FIXUP,     \
++              "3:     ADD  %2,%2,#4\n" FIXUP,         \
+               "       .long 2b,3b\n" TENTRY)
+ #define __asm_copy_from_user_4(to, from, ret) \
+       __asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
+-
+ #define __asm_copy_from_user_8x64(to, from, ret) \
+       asm volatile (                          \
+               "       GETL D0Ar2,D1Ar1,[%1++]\n"      \
+               "2:     SETL [%0++],D0Ar2,D1Ar1\n"      \
+               "1:\n"                                  \
+               "       .section .fixup,\"ax\"\n"       \
+-              "       MOV D1Ar1,#0\n"                 \
+-              "       MOV D0Ar2,#0\n"                 \
+               "3:     ADD  %2,%2,#8\n"                \
+-              "       SETL [%0++],D0Ar2,D1Ar1\n"      \
+               "       MOVT    D0Ar2,#HI(1b)\n"        \
+               "       JUMP    D0Ar2,#LO(1b)\n"        \
+               "       .previous\n"                    \
+@@ -721,11 +712,12 @@ EXPORT_SYMBOL(__copy_user);
+               "SUB    %1, %1, #4\n")
+-/* Copy from user to kernel, zeroing the bytes that were inaccessible in
+-   userland.  The return-value is the number of bytes that were
+-   inaccessible.  */
+-unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
+-                                unsigned long n)
++/*
++ * Copy from user to kernel. The return-value is the number of bytes that were
++ * inaccessible.
++ */
++unsigned long raw_copy_from_user(void *pdst, const void __user *psrc,
++                               unsigned long n)
+ {
+       register char *dst asm ("A0.2") = pdst;
+       register const char __user *src asm ("A1.2") = psrc;
+@@ -738,7 +730,7 @@ unsigned long __copy_user_zeroing(void *
+               __asm_copy_from_user_1(dst, src, retn);
+               n--;
+               if (retn)
+-                      goto copy_exception_bytes;
++                      return retn + n;
+       }
+       if ((unsigned long) dst & 1) {
+               /* Worst case - byte copy */
+@@ -746,14 +738,14 @@ unsigned long __copy_user_zeroing(void *
+                       __asm_copy_from_user_1(dst, src, retn);
+                       n--;
+                       if (retn)
+-                              goto copy_exception_bytes;
++                              return retn + n;
+               }
+       }
+       if (((unsigned long) src & 2) && n >= 2) {
+               __asm_copy_from_user_2(dst, src, retn);
+               n -= 2;
+               if (retn)
+-                      goto copy_exception_bytes;
++                      return retn + n;
+       }
+       if ((unsigned long) dst & 2) {
+               /* Second worst case - word copy */
+@@ -761,7 +753,7 @@ unsigned long __copy_user_zeroing(void *
+                       __asm_copy_from_user_2(dst, src, retn);
+                       n -= 2;
+                       if (retn)
+-                              goto copy_exception_bytes;
++                              return retn + n;
+               }
+       }
+@@ -777,7 +769,7 @@ unsigned long __copy_user_zeroing(void *
+                       __asm_copy_from_user_8x64(dst, src, retn);
+                       n -= 8;
+                       if (retn)
+-                              goto copy_exception_bytes;
++                              return retn + n;
+               }
+       }
+@@ -793,7 +785,7 @@ unsigned long __copy_user_zeroing(void *
+                       __asm_copy_from_user_8x64(dst, src, retn);
+                       n -= 8;
+                       if (retn)
+-                              goto copy_exception_bytes;
++                              return retn + n;
+               }
+       }
+ #endif
+@@ -803,7 +795,7 @@ unsigned long __copy_user_zeroing(void *
+               n -= 4;
+               if (retn)
+-                      goto copy_exception_bytes;
++                      return retn + n;
+       }
+       /* If we get here, there were no memory read faults.  */
+@@ -829,21 +821,8 @@ unsigned long __copy_user_zeroing(void *
+       /* If we get here, retn correctly reflects the number of failing
+          bytes.  */
+       return retn;
+-
+- copy_exception_bytes:
+-      /* We already have "retn" bytes cleared, and need to clear the
+-         remaining "n" bytes.  A non-optimized simple byte-for-byte in-line
+-         memset is preferred here, since this isn't speed-critical code and
+-         we'd rather have this a leaf-function than calling memset.  */
+-      {
+-              char *endp;
+-              for (endp = dst + n; dst < endp; dst++)
+-                      *dst = 0;
+-      }
+-
+-      return retn + n;
+ }
+-EXPORT_SYMBOL(__copy_user_zeroing);
++EXPORT_SYMBOL(raw_copy_from_user);
+ #define __asm_clear_8x64(to, ret) \
+       asm volatile (                                  \
diff --git a/queue-4.4/mips-end-spinlocks-with-.insn.patch b/queue-4.4/mips-end-spinlocks-with-.insn.patch
new file mode 100644 (file)
index 0000000..051c823
--- /dev/null
@@ -0,0 +1,77 @@
+From 4b5347a24a0f2d3272032c120664b484478455de Mon Sep 17 00:00:00 2001
+From: Paul Burton <paul.burton@imgtec.com>
+Date: Thu, 23 Feb 2017 14:50:24 +0000
+Subject: MIPS: End spinlocks with .insn
+
+From: Paul Burton <paul.burton@imgtec.com>
+
+commit 4b5347a24a0f2d3272032c120664b484478455de upstream.
+
+When building for microMIPS we need to ensure that the assembler always
+knows that there is code at the target of a branch or jump. Recent
+toolchains will fail to link a microMIPS kernel when this isn't the case
+due to what it thinks is a branch to non-microMIPS code.
+
+mips-mti-linux-gnu-ld kernel/built-in.o: .spinlock.text+0x2fc: Unsupported branch between ISA modes.
+mips-mti-linux-gnu-ld final link failed: Bad value
+
+This is due to inline assembly labels in spinlock.h not being followed
+by an instruction mnemonic, either due to a .subsection pseudo-op or the
+end of the inline asm block.
+
+Fix this with a .insn direction after such labels.
+
+Signed-off-by: Paul Burton <paul.burton@imgtec.com>
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Reviewed-by: Maciej W. Rozycki <macro@imgtec.com>
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: linux-mips@linux-mips.org
+Cc: linux-kernel@vger.kernel.org
+Patchwork: https://patchwork.linux-mips.org/patch/15325/
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/include/asm/spinlock.h |    8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/arch/mips/include/asm/spinlock.h
++++ b/arch/mips/include/asm/spinlock.h
+@@ -112,7 +112,7 @@ static inline void arch_spin_lock(arch_s
+               "       andi    %[ticket], %[ticket], 0xffff            \n"
+               "       bne     %[ticket], %[my_ticket], 4f             \n"
+               "        subu   %[ticket], %[my_ticket], %[ticket]      \n"
+-              "2:                                                     \n"
++              "2:     .insn                                           \n"
+               "       .subsection 2                                   \n"
+               "4:     andi    %[ticket], %[ticket], 0xffff            \n"
+               "       sll     %[ticket], 5                            \n"
+@@ -187,7 +187,7 @@ static inline unsigned int arch_spin_try
+               "       sc      %[ticket], %[ticket_ptr]                \n"
+               "       beqz    %[ticket], 1b                           \n"
+               "        li     %[ticket], 1                            \n"
+-              "2:                                                     \n"
++              "2:     .insn                                           \n"
+               "       .subsection 2                                   \n"
+               "3:     b       2b                                      \n"
+               "        li     %[ticket], 0                            \n"
+@@ -367,7 +367,7 @@ static inline int arch_read_trylock(arch
+               "       .set    reorder                                 \n"
+               __WEAK_LLSC_MB
+               "       li      %2, 1                                   \n"
+-              "2:                                                     \n"
++              "2:     .insn                                           \n"
+               : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
+               : GCC_OFF_SMALL_ASM() (rw->lock)
+               : "memory");
+@@ -407,7 +407,7 @@ static inline int arch_write_trylock(arc
+                       "       lui     %1, 0x8000                      \n"
+                       "       sc      %1, %0                          \n"
+                       "       li      %2, 1                           \n"
+-                      "2:                                             \n"
++                      "2:     .insn                                   \n"
+                       : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp),
+                         "=&r" (ret)
+                       : GCC_OFF_SMALL_ASM() (rw->lock)
diff --git a/queue-4.4/mips-flush-wrong-invalid-ftlb-entry-for-huge-page.patch b/queue-4.4/mips-flush-wrong-invalid-ftlb-entry-for-huge-page.patch
new file mode 100644 (file)
index 0000000..bfe47c3
--- /dev/null
@@ -0,0 +1,102 @@
+From 0115f6cbf26663c86496bc56eeea293f85b77897 Mon Sep 17 00:00:00 2001
+From: Huacai Chen <chenhc@lemote.com>
+Date: Thu, 16 Mar 2017 21:00:27 +0800
+Subject: MIPS: Flush wrong invalid FTLB entry for huge page
+
+From: Huacai Chen <chenhc@lemote.com>
+
+commit 0115f6cbf26663c86496bc56eeea293f85b77897 upstream.
+
+On VTLB+FTLB platforms (such as Loongson-3A R2), FTLB's pagesize is
+usually configured the same as PAGE_SIZE. In such a case, Huge page
+entry is not suitable to write in FTLB.
+
+Unfortunately, when a huge page is created, its page table entries
+haven't created immediately. Then the TLB refill handler will fetch an
+invalid page table entry which has no "HUGE" bit, and this entry may be
+written to FTLB. Since it is invalid, TLB load/store handler will then
+use tlbwi to write the valid entry at the same place. However, the
+valid entry is a huge page entry which isn't suitable for FTLB.
+
+Our solution is to modify build_huge_handler_tail. Flush the invalid
+old entry (whether it is in FTLB or VTLB, this is in order to reduce
+branches) and use tlbwr to write the valid new entry.
+
+Signed-off-by: Rui Wang <wangr@lemote.com>
+Signed-off-by: Huacai Chen <chenhc@lemote.com>
+Cc: John Crispin <john@phrozen.org>
+Cc: Steven J . Hill <Steven.Hill@caviumnetworks.com>
+Cc: Fuxin Zhang <zhangfx@lemote.com>
+Cc: Zhangjin Wu <wuzhangjin@gmail.com>
+Cc: Huacai Chen <chenhc@lemote.com>
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/15754/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/mm/tlbex.c |   25 +++++++++++++++++++++----
+ 1 file changed, 21 insertions(+), 4 deletions(-)
+
+--- a/arch/mips/mm/tlbex.c
++++ b/arch/mips/mm/tlbex.c
+@@ -757,7 +757,8 @@ static void build_huge_update_entries(u3
+ static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
+                                   struct uasm_label **l,
+                                   unsigned int pte,
+-                                  unsigned int ptr)
++                                  unsigned int ptr,
++                                  unsigned int flush)
+ {
+ #ifdef CONFIG_SMP
+       UASM_i_SC(p, pte, 0, ptr);
+@@ -766,6 +767,22 @@ static void build_huge_handler_tail(u32
+ #else
+       UASM_i_SW(p, pte, 0, ptr);
+ #endif
++      if (cpu_has_ftlb && flush) {
++              BUG_ON(!cpu_has_tlbinv);
++
++              UASM_i_MFC0(p, ptr, C0_ENTRYHI);
++              uasm_i_ori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
++              UASM_i_MTC0(p, ptr, C0_ENTRYHI);
++              build_tlb_write_entry(p, l, r, tlb_indexed);
++
++              uasm_i_xori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
++              UASM_i_MTC0(p, ptr, C0_ENTRYHI);
++              build_huge_update_entries(p, pte, ptr);
++              build_huge_tlb_write_entry(p, l, r, pte, tlb_random, 0);
++
++              return;
++      }
++
+       build_huge_update_entries(p, pte, ptr);
+       build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
+ }
+@@ -2082,7 +2099,7 @@ static void build_r4000_tlb_load_handler
+               uasm_l_tlbl_goaround2(&l, p);
+       }
+       uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID));
+-      build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
++      build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
+ #endif
+       uasm_l_nopage_tlbl(&l, p);
+@@ -2137,7 +2154,7 @@ static void build_r4000_tlb_store_handle
+       build_tlb_probe_entry(&p);
+       uasm_i_ori(&p, wr.r1, wr.r1,
+                  _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
+-      build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
++      build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
+ #endif
+       uasm_l_nopage_tlbs(&l, p);
+@@ -2193,7 +2210,7 @@ static void build_r4000_tlb_modify_handl
+       build_tlb_probe_entry(&p);
+       uasm_i_ori(&p, wr.r1, wr.r1,
+                  _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
+-      build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
++      build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 0);
+ #endif
+       uasm_l_nopage_tlbm(&l, p);
diff --git a/queue-4.4/mips-force-o32-fp64-support-on-32bit-mips64r6-kernels.patch b/queue-4.4/mips-force-o32-fp64-support-on-32bit-mips64r6-kernels.patch
new file mode 100644 (file)
index 0000000..09807fa
--- /dev/null
@@ -0,0 +1,46 @@
+From 2e6c7747730296a6d4fd700894286db1132598c4 Mon Sep 17 00:00:00 2001
+From: James Hogan <james.hogan@imgtec.com>
+Date: Thu, 16 Feb 2017 12:39:01 +0000
+Subject: MIPS: Force o32 fp64 support on 32bit MIPS64r6 kernels
+
+From: James Hogan <james.hogan@imgtec.com>
+
+commit 2e6c7747730296a6d4fd700894286db1132598c4 upstream.
+
+When a 32-bit kernel is configured to support MIPS64r6 (CPU_MIPS64_R6),
+MIPS_O32_FP64_SUPPORT won't be selected as it should be because
+MIPS32_O32 is disabled (o32 is already the default ABI available on
+32-bit kernels).
+
+This results in userland FP breakage as CP0_Status.FR is read-only 1
+since r6 (when an FPU is present) so __enable_fpu() will fail to clear
+FR. This causes the FPU emulator to get used which will incorrectly
+emulate 32-bit FPU registers.
+
+Force o32 fp64 support in this case by also selecting
+MIPS_O32_FP64_SUPPORT from CPU_MIPS64_R6 if 32BIT.
+
+Fixes: 4e9d324d4288 ("MIPS: Require O32 FP64 support for MIPS64 with O32 compat")
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Reviewed-by: Paul Burton <paul.burton@imgtec.com>
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/15310/
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/Kconfig |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -1412,7 +1412,7 @@ config CPU_MIPS32_R6
+       select CPU_SUPPORTS_MSA
+       select GENERIC_CSUM
+       select HAVE_KVM
+-      select MIPS_O32_FP64_SUPPORT
++      select MIPS_O32_FP64_SUPPORT if 32BIT
+       help
+         Choose this option to build a kernel for release 6 or later of the
+         MIPS32 architecture.  New MIPS processors, starting with the Warrior
diff --git a/queue-4.4/mips-lantiq-fix-missing-xbar-kernel-panic.patch b/queue-4.4/mips-lantiq-fix-missing-xbar-kernel-panic.patch
new file mode 100644 (file)
index 0000000..e254f81
--- /dev/null
@@ -0,0 +1,45 @@
+From 6ef90877eee63a0d03e83183bb44b64229b624e6 Mon Sep 17 00:00:00 2001
+From: Hauke Mehrtens <hauke@hauke-m.de>
+Date: Wed, 15 Mar 2017 23:26:42 +0100
+Subject: MIPS: Lantiq: fix missing xbar kernel panic
+
+From: Hauke Mehrtens <hauke@hauke-m.de>
+
+commit 6ef90877eee63a0d03e83183bb44b64229b624e6 upstream.
+
+Commit 08b3c894e565 ("MIPS: lantiq: Disable xbar fpi burst mode")
+accidentally requested the resources from the pmu address region
+instead of the xbar registers region, but the check for the return
+value of request_mem_region() was wrong. Commit 98ea51cb0c8c ("MIPS:
+Lantiq: Fix another request_mem_region() return code check") fixed the
+check of the return value of request_mem_region() which made the kernel
+panics.
+This patch now makes use of the correct memory region for the cross bar.
+
+Fixes: 08b3c894e565 ("MIPS: lantiq: Disable xbar fpi burst mode")
+Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
+Cc: John Crispin <john@phrozen.org>
+Cc: james.hogan@imgtec.com
+Cc: arnd@arndb.de
+Cc: sergei.shtylyov@cogentembedded.com
+Cc: john@phrozen.org
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/15751
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/lantiq/xway/sysctrl.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/mips/lantiq/xway/sysctrl.c
++++ b/arch/mips/lantiq/xway/sysctrl.c
+@@ -467,7 +467,7 @@ void __init ltq_soc_init(void)
+               if (!np_xbar)
+                       panic("Failed to load xbar nodes from devicetree");
+-              if (of_address_to_resource(np_pmu, 0, &res_xbar))
++              if (of_address_to_resource(np_xbar, 0, &res_xbar))
+                       panic("Failed to get xbar resources");
+               if (request_mem_region(res_xbar.start, resource_size(&res_xbar),
+                       res_xbar.name) < 0)
diff --git a/queue-4.4/mips-ralink-fix-typos-in-rt3883-pinctrl.patch b/queue-4.4/mips-ralink-fix-typos-in-rt3883-pinctrl.patch
new file mode 100644 (file)
index 0000000..a6002c1
--- /dev/null
@@ -0,0 +1,44 @@
+From 7c5a3d813050ee235817b0220dd8c42359a9efd8 Mon Sep 17 00:00:00 2001
+From: John Crispin <john@phrozen.org>
+Date: Sat, 25 Feb 2017 11:54:23 +0100
+Subject: MIPS: ralink: Fix typos in rt3883 pinctrl
+
+From: John Crispin <john@phrozen.org>
+
+commit 7c5a3d813050ee235817b0220dd8c42359a9efd8 upstream.
+
+There are two copy & paste errors in the definition of the 5GHz LNA and
+second ethernet pinmux.
+
+Fixes: f576fb6a0700 ("MIPS: ralink: cleanup the soc specific pinmux data")
+Signed-off-by: John Crispin <john@phrozen.org>
+Signed-off-by: Daniel Golle <daniel@makrotopia.org>
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/15328/
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/ralink/rt3883.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/mips/ralink/rt3883.c
++++ b/arch/mips/ralink/rt3883.c
+@@ -36,7 +36,7 @@ static struct rt2880_pmx_func uartlite_f
+ static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
+ static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
+ static struct rt2880_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) };
+-static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna a", 0, 35, 3) };
++static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna g", 0, 35, 3) };
+ static struct rt2880_pmx_func pci_func[] = {
+       FUNC("pci-dev", 0, 40, 32),
+       FUNC("pci-host2", 1, 40, 32),
+@@ -44,7 +44,7 @@ static struct rt2880_pmx_func pci_func[]
+       FUNC("pci-fnc", 3, 40, 32)
+ };
+ static struct rt2880_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) };
+-static struct rt2880_pmx_func ge2_func[] = { FUNC("ge1", 0, 84, 12) };
++static struct rt2880_pmx_func ge2_func[] = { FUNC("ge2", 0, 84, 12) };
+ static struct rt2880_pmx_group rt3883_pinmux_data[] = {
+       GRP("i2c", i2c_func, 1, RT3883_GPIO_MODE_I2C),
diff --git a/queue-4.4/nios2-reserve-boot-memory-for-device-tree.patch b/queue-4.4/nios2-reserve-boot-memory-for-device-tree.patch
new file mode 100644 (file)
index 0000000..d3aa9cc
--- /dev/null
@@ -0,0 +1,71 @@
+From 921d701e6f31e1ffaca3560416af1aa04edb4c4f Mon Sep 17 00:00:00 2001
+From: Tobias Klauser <tklauser@distanz.ch>
+Date: Sun, 2 Apr 2017 20:08:04 -0700
+Subject: nios2: reserve boot memory for device tree
+
+From: Tobias Klauser <tklauser@distanz.ch>
+
+commit 921d701e6f31e1ffaca3560416af1aa04edb4c4f upstream.
+
+Make sure to reserve the boot memory for the flattened device tree.
+Otherwise it might get overwritten, e.g. when initial_boot_params is
+copied, leading to a corrupted FDT and a boot hang/crash:
+
+  bootconsole [early0] enabled
+  Early console on uart16650 initialized at 0xf8001600
+  OF: fdt: Error -11 processing FDT
+  Kernel panic - not syncing: setup_cpuinfo: No CPU found in devicetree!
+
+  ---[ end Kernel panic - not syncing: setup_cpuinfo: No CPU found in devicetree!
+
+Guenter Roeck says:
+
+> I think I found the problem. In unflatten_and_copy_device_tree(), with added
+> debug information:
+>
+> OF: fdt: initial_boot_params=c861e400, dt=c861f000 size=28874 (0x70ca)
+>
+> ... and then initial_boot_params is copied to dt, which results in corrupted
+> fdt since the memory overlaps. Looks like the initial_boot_params memory
+> is not reserved and (re-)allocated by early_init_dt_alloc_memory_arch().
+
+Reported-by: Guenter Roeck <linux@roeck-us.net>
+Reference: http://lkml.kernel.org/r/20170226210338.GA19476@roeck-us.net
+Tested-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Tobias Klauser <tklauser@distanz.ch>
+Acked-by: Ley Foon Tan <ley.foon.tan@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/nios2/kernel/prom.c  |    7 +++++++
+ arch/nios2/kernel/setup.c |    3 +++
+ 2 files changed, 10 insertions(+)
+
+--- a/arch/nios2/kernel/prom.c
++++ b/arch/nios2/kernel/prom.c
+@@ -48,6 +48,13 @@ void * __init early_init_dt_alloc_memory
+       return alloc_bootmem_align(size, align);
+ }
++int __init early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size,
++                                           bool nomap)
++{
++      reserve_bootmem(base, size, BOOTMEM_DEFAULT);
++      return 0;
++}
++
+ void __init early_init_devtree(void *params)
+ {
+       __be32 *dtb = (u32 *)__dtb_start;
+--- a/arch/nios2/kernel/setup.c
++++ b/arch/nios2/kernel/setup.c
+@@ -195,6 +195,9 @@ void __init setup_arch(char **cmdline_p)
+       }
+ #endif /* CONFIG_BLK_DEV_INITRD */
++      early_init_fdt_reserve_self();
++      early_init_fdt_scan_reserved_mem();
++
+       unflatten_and_copy_device_tree();
+       setup_cpuinfo();
diff --git a/queue-4.4/powerpc-don-t-try-to-fix-up-misaligned-load-with-reservation-instructions.patch b/queue-4.4/powerpc-don-t-try-to-fix-up-misaligned-load-with-reservation-instructions.patch
new file mode 100644 (file)
index 0000000..3f6c8fd
--- /dev/null
@@ -0,0 +1,71 @@
+From 48fe9e9488743eec9b7c1addd3c93f12f2123d54 Mon Sep 17 00:00:00 2001
+From: Paul Mackerras <paulus@ozlabs.org>
+Date: Tue, 4 Apr 2017 14:56:05 +1000
+Subject: powerpc: Don't try to fix up misaligned load-with-reservation instructions
+
+From: Paul Mackerras <paulus@ozlabs.org>
+
+commit 48fe9e9488743eec9b7c1addd3c93f12f2123d54 upstream.
+
+In the past, there was only one load-with-reservation instruction,
+lwarx, and if a program attempted a lwarx on a misaligned address, it
+would take an alignment interrupt and the kernel handler would emulate
+it as though it was lwzx, which was not really correct, but benign since
+it is loading the right amount of data, and the lwarx should be paired
+with a stwcx. to the same address, which would also cause an alignment
+interrupt which would result in a SIGBUS being delivered to the process.
+
+We now have 5 different sizes of load-with-reservation instruction. Of
+those, lharx and ldarx cause an immediate SIGBUS by luck since their
+entries in aligninfo[] overlap instructions which were not fixed up, but
+lqarx overlaps with lhz and will be emulated as such. lbarx can never
+generate an alignment interrupt since it only operates on 1 byte.
+
+To straighten this out and fix the lqarx case, this adds code to detect
+the l[hwdq]arx instructions and return without fixing them up, resulting
+in a SIGBUS being delivered to the process.
+
+Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/align.c |   27 +++++++++++++++++++--------
+ 1 file changed, 19 insertions(+), 8 deletions(-)
+
+--- a/arch/powerpc/kernel/align.c
++++ b/arch/powerpc/kernel/align.c
+@@ -808,14 +808,25 @@ int fix_alignment(struct pt_regs *regs)
+       nb = aligninfo[instr].len;
+       flags = aligninfo[instr].flags;
+-      /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */
+-      if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) {
+-              nb = 8;
+-              flags = LD+SW;
+-      } else if (IS_XFORM(instruction) &&
+-                 ((instruction >> 1) & 0x3ff) == 660) {
+-              nb = 8;
+-              flags = ST+SW;
++      /*
++       * Handle some cases which give overlaps in the DSISR values.
++       */
++      if (IS_XFORM(instruction)) {
++              switch (get_xop(instruction)) {
++              case 532:       /* ldbrx */
++                      nb = 8;
++                      flags = LD+SW;
++                      break;
++              case 660:       /* stdbrx */
++                      nb = 8;
++                      flags = ST+SW;
++                      break;
++              case 20:        /* lwarx */
++              case 84:        /* ldarx */
++              case 116:       /* lharx */
++              case 276:       /* lqarx */
++                      return 0;       /* not emulated ever */
++              }
+       }
+       /* Byteswap little endian loads and stores */
diff --git a/queue-4.4/powerpc-mm-add-missing-global-tlb-invalidate-if-cxl-is-active.patch b/queue-4.4/powerpc-mm-add-missing-global-tlb-invalidate-if-cxl-is-active.patch
new file mode 100644 (file)
index 0000000..8f7bc89
--- /dev/null
@@ -0,0 +1,52 @@
+From 88b1bf7268f56887ca88eb09c6fb0f4fc970121a Mon Sep 17 00:00:00 2001
+From: Frederic Barrat <fbarrat@linux.vnet.ibm.com>
+Date: Wed, 29 Mar 2017 19:19:42 +0200
+Subject: powerpc/mm: Add missing global TLB invalidate if cxl is active
+
+From: Frederic Barrat <fbarrat@linux.vnet.ibm.com>
+
+commit 88b1bf7268f56887ca88eb09c6fb0f4fc970121a upstream.
+
+Commit 4c6d9acce1f4 ("powerpc/mm: Add hooks for cxl") converted local
+TLB invalidates to global if the cxl driver is active. This is necessary
+because the CAPP snoops invalidations to forward them to the PSL on the
+cxl adapter. However one path was forgotten. native_flush_hash_range()
+still does local TLB invalidates, as found out the hard way recently.
+
+This patch fixes it by following the same logic as previously: if the
+cxl driver is active, the local TLB invalidates are 'upgraded' to
+global.
+
+Fixes: 4c6d9acce1f4 ("powerpc/mm: Add hooks for cxl")
+Signed-off-by: Frederic Barrat <fbarrat@linux.vnet.ibm.com>
+Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/mm/hash_native_64.c |    7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/arch/powerpc/mm/hash_native_64.c
++++ b/arch/powerpc/mm/hash_native_64.c
+@@ -645,6 +645,10 @@ static void native_flush_hash_range(unsi
+       unsigned long psize = batch->psize;
+       int ssize = batch->ssize;
+       int i;
++      unsigned int use_local;
++
++      use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) &&
++              mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use();
+       local_irq_save(flags);
+@@ -671,8 +675,7 @@ static void native_flush_hash_range(unsi
+               } pte_iterate_hashed_end();
+       }
+-      if (mmu_has_feature(MMU_FTR_TLBIEL) &&
+-          mmu_psize_defs[psize].tlbiel && local) {
++      if (use_local) {
+               asm volatile("ptesync":::"memory");
+               for (i = 0; i < number; i++) {
+                       vpn = batch->vpn[i];
diff --git a/queue-4.4/s390-decompressor-fix-initrd-corruption-caused-by-bss-clear.patch b/queue-4.4/s390-decompressor-fix-initrd-corruption-caused-by-bss-clear.patch
new file mode 100644 (file)
index 0000000..51080ed
--- /dev/null
@@ -0,0 +1,85 @@
+From d82c0d12c92705ef468683c9b7a8298dd61ed191 Mon Sep 17 00:00:00 2001
+From: Marcelo Henrique Cerri <marcelo.cerri@canonical.com>
+Date: Mon, 13 Mar 2017 12:14:58 -0300
+Subject: s390/decompressor: fix initrd corruption caused by bss clear
+
+From: Marcelo Henrique Cerri <marcelo.cerri@canonical.com>
+
+commit d82c0d12c92705ef468683c9b7a8298dd61ed191 upstream.
+
+Reorder the operations in decompress_kernel() to ensure initrd is moved
+to a safe location before the bss section is zeroed.
+
+During decompression bss can overlap with the initrd and this can
+corrupt the initrd contents depending on the size of the compressed
+kernel (which affects where the initrd is placed by the bootloader) and
+the size of the bss section of the decompressor.
+
+Also use the correct initrd size when checking for overlaps with
+parmblock.
+
+Fixes: 06c0dd72aea3 ([S390] fix boot failures with compressed kernels)
+Reviewed-by: Joy Latten <joy.latten@canonical.com>
+Reviewed-by: Vineetha HariPai <vineetha.hari.pai@canonical.com>
+Signed-off-by: Marcelo Henrique Cerri <marcelo.cerri@canonical.com>
+Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/boot/compressed/misc.c |   35 +++++++++++++++++++----------------
+ 1 file changed, 19 insertions(+), 16 deletions(-)
+
+--- a/arch/s390/boot/compressed/misc.c
++++ b/arch/s390/boot/compressed/misc.c
+@@ -141,31 +141,34 @@ static void check_ipl_parmblock(void *st
+ unsigned long decompress_kernel(void)
+ {
+-      unsigned long output_addr;
+-      unsigned char *output;
++      void *output, *kernel_end;
+-      output_addr = ((unsigned long) &_end + HEAP_SIZE + 4095UL) & -4096UL;
+-      check_ipl_parmblock((void *) 0, output_addr + SZ__bss_start);
+-      memset(&_bss, 0, &_ebss - &_bss);
+-      free_mem_ptr = (unsigned long)&_end;
+-      free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
+-      output = (unsigned char *) output_addr;
++      output = (void *) ALIGN((unsigned long) &_end + HEAP_SIZE, PAGE_SIZE);
++      kernel_end = output + SZ__bss_start;
++      check_ipl_parmblock((void *) 0, (unsigned long) kernel_end);
+ #ifdef CONFIG_BLK_DEV_INITRD
+       /*
+        * Move the initrd right behind the end of the decompressed
+-       * kernel image.
++       * kernel image. This also prevents initrd corruption caused by
++       * bss clearing since kernel_end will always be located behind the
++       * current bss section..
+        */
+-      if (INITRD_START && INITRD_SIZE &&
+-          INITRD_START < (unsigned long) output + SZ__bss_start) {
+-              check_ipl_parmblock(output + SZ__bss_start,
+-                                  INITRD_START + INITRD_SIZE);
+-              memmove(output + SZ__bss_start,
+-                      (void *) INITRD_START, INITRD_SIZE);
+-              INITRD_START = (unsigned long) output + SZ__bss_start;
++      if (INITRD_START && INITRD_SIZE && kernel_end > (void *) INITRD_START) {
++              check_ipl_parmblock(kernel_end, INITRD_SIZE);
++              memmove(kernel_end, (void *) INITRD_START, INITRD_SIZE);
++              INITRD_START = (unsigned long) kernel_end;
+       }
+ #endif
++      /*
++       * Clear bss section. free_mem_ptr and free_mem_end_ptr need to be
++       * initialized afterwards since they reside in bss.
++       */
++      memset(&_bss, 0, &_ebss - &_bss);
++      free_mem_ptr = (unsigned long) &_end;
++      free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
++
+       puts("Uncompressing Linux... ");
+       __decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
+       puts("Ok, booting the kernel.\n");
diff --git a/queue-4.4/s390-uaccess-get_user-should-zero-on-failure-again.patch b/queue-4.4/s390-uaccess-get_user-should-zero-on-failure-again.patch
new file mode 100644 (file)
index 0000000..5b34b61
--- /dev/null
@@ -0,0 +1,44 @@
+From d09c5373e8e4eaaa09233552cbf75dc4c4f21203 Mon Sep 17 00:00:00 2001
+From: Heiko Carstens <heiko.carstens@de.ibm.com>
+Date: Mon, 27 Mar 2017 09:48:04 +0200
+Subject: s390/uaccess: get_user() should zero on failure (again)
+
+From: Heiko Carstens <heiko.carstens@de.ibm.com>
+
+commit d09c5373e8e4eaaa09233552cbf75dc4c4f21203 upstream.
+
+Commit fd2d2b191fe7 ("s390: get_user() should zero on failure")
+intended to fix s390's get_user() implementation which did not zero
+the target operand if the read from user space faulted. Unfortunately
+the patch has no effect: the corresponding inline assembly specifies
+that the operand is only written to ("=") and the previous value is
+discarded.
+
+Therefore the compiler is free to and actually does omit the zero
+initialization.
+
+To fix this simply change the contraint modifier to "+", so the
+compiler cannot omit the initialization anymore.
+
+Fixes: c9ca78415ac1 ("s390/uaccess: provide inline variants of get_user/put_user")
+Fixes: fd2d2b191fe7 ("s390: get_user() should zero on failure")
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/include/asm/uaccess.h |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/s390/include/asm/uaccess.h
++++ b/arch/s390/include/asm/uaccess.h
+@@ -150,7 +150,7 @@ unsigned long __must_check __copy_to_use
+               "       jg      2b\n"                           \
+               ".popsection\n"                                 \
+               EX_TABLE(0b,3b) EX_TABLE(1b,3b)                 \
+-              : "=d" (__rc), "=Q" (*(to))                     \
++              : "=d" (__rc), "+Q" (*(to))                     \
+               : "d" (size), "Q" (*(from)),                    \
+                 "d" (__reg0), "K" (-EFAULT)                   \
+               : "cc");                                        \
index 33d6a013b19f872ff8e07ba7f87e1ac2863e02e6..112b1ef31ce2bd8f799c1ce4e3802edc18e81653 100644 (file)
@@ -12,3 +12,20 @@ iio-bmg160-reset-chip-when-probing.patch
 reset-treeid-to-zero-on-smb2-tree_connect.patch
 ptrace-fix-ptrace_listen-race-corrupting-task-state.patch
 ring-buffer-fix-return-value-check-in-test_ringbuffer.patch
+metag-usercopy-drop-unused-macros.patch
+metag-usercopy-fix-alignment-error-checking.patch
+metag-usercopy-add-early-abort-to-copy_to_user.patch
+metag-usercopy-zero-rest-of-buffer-from-copy_from_user.patch
+metag-usercopy-set-flags-before-addz.patch
+metag-usercopy-fix-src-fixup-in-from-user-rapf-loops.patch
+metag-usercopy-add-missing-fixups.patch
+powerpc-mm-add-missing-global-tlb-invalidate-if-cxl-is-active.patch
+powerpc-don-t-try-to-fix-up-misaligned-load-with-reservation-instructions.patch
+nios2-reserve-boot-memory-for-device-tree.patch
+s390-decompressor-fix-initrd-corruption-caused-by-bss-clear.patch
+s390-uaccess-get_user-should-zero-on-failure-again.patch
+mips-force-o32-fp64-support-on-32bit-mips64r6-kernels.patch
+mips-ralink-fix-typos-in-rt3883-pinctrl.patch
+mips-end-spinlocks-with-.insn.patch
+mips-lantiq-fix-missing-xbar-kernel-panic.patch
+mips-flush-wrong-invalid-ftlb-entry-for-huge-page.patch