--- /dev/null
+From 6cfb521ac0d5b97470883ff9b7facae264b7ab12 Mon Sep 17 00:00:00 2001
+From: Andi Kleen <ak@linux.intel.com>
+Date: Tue, 16 Jan 2018 12:52:28 -0800
+Subject: module: Add retpoline tag to VERMAGIC
+
+From: Andi Kleen <ak@linux.intel.com>
+
+commit 6cfb521ac0d5b97470883ff9b7facae264b7ab12 upstream.
+
+Add a marker for retpoline to the module VERMAGIC. This catches the case
+when a non RETPOLINE compiled module gets loaded into a retpoline kernel,
+making it insecure.
+
+It doesn't handle the case when retpoline has been runtime disabled. Even
+in this case the match of the retcompile status will be enforced. This
+implies that even with retpoline run time disabled all modules loaded need
+to be recompiled.
+
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Acked-by: David Woodhouse <dwmw@amazon.co.uk>
+Cc: rusty@rustcorp.com.au
+Cc: arjan.van.de.ven@intel.com
+Cc: jeyu@kernel.org
+Cc: torvalds@linux-foundation.org
+Link: https://lkml.kernel.org/r/20180116205228.4890-1-andi@firstfloor.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/vermagic.h | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/include/linux/vermagic.h
++++ b/include/linux/vermagic.h
+@@ -24,10 +24,16 @@
+ #ifndef MODULE_ARCH_VERMAGIC
+ #define MODULE_ARCH_VERMAGIC ""
+ #endif
++#ifdef RETPOLINE
++#define MODULE_VERMAGIC_RETPOLINE "retpoline "
++#else
++#define MODULE_VERMAGIC_RETPOLINE ""
++#endif
+
+ #define VERMAGIC_STRING \
+ UTS_RELEASE " " \
+ MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
+ MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
+- MODULE_ARCH_VERMAGIC
++ MODULE_ARCH_VERMAGIC \
++ MODULE_VERMAGIC_RETPOLINE
+
--- /dev/null
+From 28d437d550e1e39f805d99f9f8ac399c778827b7 Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Sat, 13 Jan 2018 17:27:30 -0600
+Subject: x86/retpoline: Add LFENCE to the retpoline/RSB filling RSB macros
+
+From: Tom Lendacky <thomas.lendacky@amd.com>
+
+commit 28d437d550e1e39f805d99f9f8ac399c778827b7 upstream.
+
+The PAUSE instruction is currently used in the retpoline and RSB filling
+macros as a speculation trap. The use of PAUSE was originally suggested
+because it showed a very, very small difference in the amount of
+cycles/time used to execute the retpoline as compared to LFENCE. On AMD,
+the PAUSE instruction is not a serializing instruction, so the pause/jmp
+loop will use excess power as it is speculated over waiting for return
+to mispredict to the correct target.
+
+The RSB filling macro is applicable to AMD, and, if software is unable to
+verify that LFENCE is serializing on AMD (possible when running under a
+hypervisor), the generic retpoline support will be used and, so, is also
+applicable to AMD. Keep the current usage of PAUSE for Intel, but add an
+LFENCE instruction to the speculation trap for AMD.
+
+The same sequence has been adopted by GCC for the GCC generated retpolines.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@alien8.de>
+Acked-by: David Woodhouse <dwmw@amazon.co.uk>
+Acked-by: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Paul Turner <pjt@google.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Jiri Kosina <jikos@kernel.org>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
+Cc: Kees Cook <keescook@google.com>
+Link: https://lkml.kernel.org/r/20180113232730.31060.36287.stgit@tlendack-t1.amdoffice.net
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/nospec-branch.h | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -11,7 +11,7 @@
+ * Fill the CPU return stack buffer.
+ *
+ * Each entry in the RSB, if used for a speculative 'ret', contains an
+- * infinite 'pause; jmp' loop to capture speculative execution.
++ * infinite 'pause; lfence; jmp' loop to capture speculative execution.
+ *
+ * This is required in various cases for retpoline and IBRS-based
+ * mitigations for the Spectre variant 2 vulnerability. Sometimes to
+@@ -38,11 +38,13 @@
+ call 772f; \
+ 773: /* speculation trap */ \
+ pause; \
++ lfence; \
+ jmp 773b; \
+ 772: \
+ call 774f; \
+ 775: /* speculation trap */ \
+ pause; \
++ lfence; \
+ jmp 775b; \
+ 774: \
+ dec reg; \
+@@ -60,6 +62,7 @@
+ call .Ldo_rop_\@
+ .Lspec_trap_\@:
+ pause
++ lfence
+ jmp .Lspec_trap_\@
+ .Ldo_rop_\@:
+ mov \reg, (%_ASM_SP)
+@@ -142,6 +145,7 @@
+ " .align 16\n" \
+ "901: call 903f;\n" \
+ "902: pause;\n" \
++ " lfence;\n" \
+ " jmp 902b;\n" \
+ " .align 16\n" \
+ "903: addl $4, %%esp;\n" \