]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 28 Jun 2018 02:09:03 +0000 (11:09 +0900)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 28 Jun 2018 02:09:03 +0000 (11:09 +0900)
added patches:
x86-spectre_v1-disable-compiler-optimizations-over-array_index_mask_nospec.patch

queue-4.4/x86-spectre_v1-disable-compiler-optimizations-over-array_index_mask_nospec.patch [new file with mode: 0644]

diff --git a/queue-4.4/x86-spectre_v1-disable-compiler-optimizations-over-array_index_mask_nospec.patch b/queue-4.4/x86-spectre_v1-disable-compiler-optimizations-over-array_index_mask_nospec.patch
new file mode 100644 (file)
index 0000000..73532bd
--- /dev/null
@@ -0,0 +1,81 @@
+From eab6870fee877258122a042bfd99ee7908c40280 Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Thu, 7 Jun 2018 09:13:48 -0700
+Subject: x86/spectre_v1: Disable compiler optimizations over array_index_mask_nospec()
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+commit eab6870fee877258122a042bfd99ee7908c40280 upstream.
+
+Mark Rutland noticed that GCC optimization passes have the potential to elide
+necessary invocations of the array_index_mask_nospec() instruction sequence,
+so mark the asm() volatile.
+
+Mark explains:
+
+"The volatile will inhibit *some* cases where the compiler could lift the
+ array_index_nospec() call out of a branch, e.g. where there are multiple
+ invocations of array_index_nospec() with the same arguments:
+
+        if (idx < foo) {
+                idx1 = array_idx_nospec(idx, foo)
+                do_something(idx1);
+        }
+
+        < some other code >
+
+        if (idx < foo) {
+                idx2 = array_idx_nospec(idx, foo);
+                do_something_else(idx2);
+        }
+
+ ... since the compiler can determine that the two invocations yield the same
+ result, and reuse the first result (likely the same register as idx was in
+ originally) for the second branch, effectively re-writing the above as:
+
+        if (idx < foo) {
+                idx = array_idx_nospec(idx, foo);
+                do_something(idx);
+        }
+
+        < some other code >
+
+        if (idx < foo) {
+                do_something_else(idx);
+        }
+
+ ... if we don't take the first branch, then speculatively take the second, we
+ lose the nospec protection.
+
+ There's more info on volatile asm in the GCC docs:
+
+   https://gcc.gnu.org/onlinedocs/gcc/Extended-Asm.html#Volatile
+ "
+
+Reported-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: <stable@vger.kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Fixes: babdde2698d4 ("x86: Implement array_index_mask_nospec")
+Link: https://lkml.kernel.org/lkml/152838798950.14521.4893346294059739135.stgit@dwillia2-desk3.amr.corp.intel.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/barrier.h |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/barrier.h
++++ b/arch/x86/include/asm/barrier.h
+@@ -38,7 +38,7 @@ static inline unsigned long array_index_
+ {
+       unsigned long mask;
+-      asm ("cmp %1,%2; sbb %0,%0;"
++      asm volatile ("cmp %1,%2; sbb %0,%0;"
+                       :"=r" (mask)
+                       :"r"(size),"r" (index)
+                       :"cc");