--- /dev/null
+From 83c133cf11fb0e68a51681447e372489f052d40e Mon Sep 17 00:00:00 2001
+From: Andy Lutomirski <luto@kernel.org>
+Date: Sun, 20 Sep 2015 16:32:05 -0700
+Subject: x86/nmi/64: Fix a paravirt stack-clobbering bug in the NMI code
+
+From: Andy Lutomirski <luto@kernel.org>
+
+commit 83c133cf11fb0e68a51681447e372489f052d40e upstream.
+
+The NMI entry code that switches to the normal kernel stack needs to
+be very careful not to clobber any extra stack slots on the NMI
+stack. The code is fine under the assumption that SWAPGS is just a
+normal instruction, but that assumption isn't really true. Use
+SWAPGS_UNSAFE_STACK instead.
+
+This is part of a fix for some random crashes that Sasha saw.
+
+Fixes: 9b6e6a8334d5 ("x86/nmi/64: Switch stacks on userspace NMI entry")
+Reported-and-tested-by: Sasha Levin <sasha.levin@oracle.com>
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Link: http://lkml.kernel.org/r/974bc40edffdb5c2950a5c4977f821a446b76178.1442791737.git.luto@kernel.org
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/entry_64.S | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/entry_64.S
++++ b/arch/x86/kernel/entry_64.S
+@@ -1747,8 +1747,11 @@ ENTRY(nmi)
+ * we don't want to enable interrupts, because then we'll end
+ * up in an awkward situation in which IRQs are on but NMIs
+ * are off.
++ *
++ * We also must not push anything to the stack before switching
++ * stacks lest we corrupt the "NMI executing" variable.
+ */
+- SWAPGS
++ SWAPGS_UNSAFE_STACK
+ cld
+ movq %rsp, %rdx
+ movq PER_CPU_VAR(kernel_stack), %rsp
--- /dev/null
+From fc57a7c68020dcf954428869eafd934c0ab1536f Mon Sep 17 00:00:00 2001
+From: Andy Lutomirski <luto@kernel.org>
+Date: Sun, 20 Sep 2015 16:32:04 -0700
+Subject: x86/paravirt: Replace the paravirt nop with a bona fide empty function
+
+From: Andy Lutomirski <luto@kernel.org>
+
+commit fc57a7c68020dcf954428869eafd934c0ab1536f upstream.
+
+PARAVIRT_ADJUST_EXCEPTION_FRAME generates this code (using nmi as an
+example, trimmed for readability):
+
+ ff 15 00 00 00 00 callq *0x0(%rip) # 2796 <nmi+0x6>
+ 2792: R_X86_64_PC32 pv_irq_ops+0x2c
+
+That's a call through a function pointer to regular C function that
+does nothing on native boots, but that function isn't protected
+against kprobes, isn't marked notrace, and is certainly not
+guaranteed to preserve any registers if the compiler is feeling
+perverse. This is bad news for a CLBR_NONE operation.
+
+Of course, if everything works correctly, once paravirt ops are
+patched, it gets nopped out, but what if we hit this code before
+paravirt ops are patched in? This can potentially cause breakage
+that is very difficult to debug.
+
+A more subtle failure is possible here, too: if _paravirt_nop uses
+the stack at all (even just to push RBP), it will overwrite the "NMI
+executing" variable if it's called in the NMI prologue.
+
+The Xen case, perhaps surprisingly, is fine, because it's already
+written in asm.
+
+Fix all of the cases that default to paravirt_nop (including
+adjust_exception_frame) with a big hammer: replace paravirt_nop with
+an asm function that is just a ret instruction.
+
+The Xen case may have other problems, so document them.
+
+This is part of a fix for some random crashes that Sasha saw.
+
+Reported-and-tested-by: Sasha Levin <sasha.levin@oracle.com>
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Link: http://lkml.kernel.org/r/8f5d2ba295f9d73751c33d97fda03e0495d9ade0.1442791737.git.luto@kernel.org
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/entry_64.S | 11 +++++++++++
+ arch/x86/kernel/paravirt.c | 16 ++++++++++++----
+ 2 files changed, 23 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/kernel/entry_64.S
++++ b/arch/x86/kernel/entry_64.S
+@@ -1684,7 +1684,18 @@ END(error_exit)
+ /* runs on exception stack */
+ ENTRY(nmi)
+ INTR_FRAME
++ /*
++ * Fix up the exception frame if we're on Xen.
++ * PARAVIRT_ADJUST_EXCEPTION_FRAME is guaranteed to push at most
++ * one value to the stack on native, so it may clobber the rdx
++ * scratch slot, but it won't clobber any of the important
++ * slots past it.
++ *
++ * Xen is a different story, because the Xen frame itself overlaps
++ * the "NMI executing" variable.
++ */
+ PARAVIRT_ADJUST_EXCEPTION_FRAME
++
+ /*
+ * We allow breakpoints in NMIs. If a breakpoint occurs, then
+ * the iretq it performs will take us out of NMI context.
+--- a/arch/x86/kernel/paravirt.c
++++ b/arch/x86/kernel/paravirt.c
+@@ -40,10 +40,18 @@
+ #include <asm/timer.h>
+ #include <asm/special_insns.h>
+
+-/* nop stub */
+-void _paravirt_nop(void)
+-{
+-}
++/*
++ * nop stub, which must not clobber anything *including the stack* to
++ * avoid confusing the entry prologues.
++ */
++extern void _paravirt_nop(void);
++asm (".pushsection .entry.text, \"ax\"\n"
++ ".global _paravirt_nop\n"
++ "_paravirt_nop:\n\t"
++ "ret\n\t"
++ ".size _paravirt_nop, . - _paravirt_nop\n\t"
++ ".type _paravirt_nop, @function\n\t"
++ ".popsection");
+
+ /* identity function, which can be inlined */
+ u32 _paravirt_ident_32(u32 x)