--- /dev/null
+From 956421fbb74c3a6261903f3836c0740187cf038b Mon Sep 17 00:00:00 2001
+From: Andy Lutomirski <luto@amacapital.net>
+Date: Thu, 5 Mar 2015 01:09:44 +0100
+Subject: x86/asm/entry/64: Remove a bogus 'ret_from_fork' optimization
+
+From: Andy Lutomirski <luto@amacapital.net>
+
+commit 956421fbb74c3a6261903f3836c0740187cf038b upstream.
+
+'ret_from_fork' checks TIF_IA32 to determine whether 'pt_regs' and
+the related state make sense for 'ret_from_sys_call'. This is
+entirely the wrong check. TS_COMPAT would make a little more
+sense, but there's really no point in keeping this optimization
+at all.
+
+This fixes a return to the wrong user CS if we came from int
+0x80 in a 64-bit task.
+
+Signed-off-by: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/4710be56d76ef994ddf59087aad98c000fbab9a4.1424989793.git.luto@amacapital.net
+[ Backported from tip:x86/asm. ]
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/entry_64.S | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/kernel/entry_64.S
++++ b/arch/x86/kernel/entry_64.S
+@@ -334,11 +334,14 @@ ENTRY(ret_from_fork)
+ testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
+ jz 1f
+
+- testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
+- jnz int_ret_from_sys_call
+-
+- RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET
+- jmp ret_from_sys_call # go to the SYSRET fastpath
++ /*
++ * By the time we get here, we have no idea whether our pt_regs,
++ * ti flags, and ti status came from the 64-bit SYSCALL fast path,
++ * the slow path, or one of the ia32entry paths.
++ * Use int_ret_from_sys_call to return, since it can safely handle
++ * all of the above.
++ */
++ jmp int_ret_from_sys_call
+
+ 1:
+ subq $REST_SKIP, %rsp # leave space for volatiles
--- /dev/null
+From 06c8173eb92bbfc03a0fe8bb64315857d0badd06 Mon Sep 17 00:00:00 2001
+From: Quentin Casasnovas <quentin.casasnovas@oracle.com>
+Date: Thu, 5 Mar 2015 13:19:22 +0100
+Subject: x86/fpu/xsaves: Fix improper uses of __ex_table
+
+From: Quentin Casasnovas <quentin.casasnovas@oracle.com>
+
+commit 06c8173eb92bbfc03a0fe8bb64315857d0badd06 upstream.
+
+Commit:
+
+ f31a9f7c7169 ("x86/xsaves: Use xsaves/xrstors to save and restore xsave area")
+
+introduced alternative instructions for XSAVES/XRSTORS and commit:
+
+ adb9d526e982 ("x86/xsaves: Add xsaves and xrstors support for booting time")
+
+added support for the XSAVES/XRSTORS instructions at boot time.
+
+Unfortunately both failed to properly protect them against faulting:
+
+The 'xstate_fault' macro will use the closest label named '1'
+backward and that ends up in the .altinstr_replacement section
+rather than in .text. This means that the kernel will never find
+in the __ex_table the .text address where this instruction might
+fault, leading to serious problems if userspace manages to
+trigger the fault.
+
+Signed-off-by: Quentin Casasnovas <quentin.casasnovas@oracle.com>
+Signed-off-by: Jamie Iles <jamie.iles@oracle.com>
+[ Improved the changelog, fixed some whitespace noise. ]
+Acked-by: Borislav Petkov <bp@alien8.de>
+Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Allan Xavier <mr.a.xavier@gmail.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Fixes: adb9d526e982 ("x86/xsaves: Add xsaves and xrstors support for booting time")
+Fixes: f31a9f7c7169 ("x86/xsaves: Use xsaves/xrstors to save and restore xsave area")
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/xsave.h | 28 +++++++++++-----------------
+ 1 file changed, 11 insertions(+), 17 deletions(-)
+
+--- a/arch/x86/include/asm/xsave.h
++++ b/arch/x86/include/asm/xsave.h
+@@ -82,18 +82,15 @@ static inline int xsave_state_booting(st
+ if (boot_cpu_has(X86_FEATURE_XSAVES))
+ asm volatile("1:"XSAVES"\n\t"
+ "2:\n\t"
+- : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
++ xstate_fault
++ : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
+ : "memory");
+ else
+ asm volatile("1:"XSAVE"\n\t"
+ "2:\n\t"
+- : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
++ xstate_fault
++ : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
+ : "memory");
+-
+- asm volatile(xstate_fault
+- : "0" (0)
+- : "memory");
+-
+ return err;
+ }
+
+@@ -112,18 +109,15 @@ static inline int xrstor_state_booting(s
+ if (boot_cpu_has(X86_FEATURE_XSAVES))
+ asm volatile("1:"XRSTORS"\n\t"
+ "2:\n\t"
+- : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
++ xstate_fault
++ : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
+ : "memory");
+ else
+ asm volatile("1:"XRSTOR"\n\t"
+ "2:\n\t"
+- : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
++ xstate_fault
++ : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
+ : "memory");
+-
+- asm volatile(xstate_fault
+- : "0" (0)
+- : "memory");
+-
+ return err;
+ }
+
+@@ -149,9 +143,9 @@ static inline int xsave_state(struct xsa
+ */
+ alternative_input_2(
+ "1:"XSAVE,
+- "1:"XSAVEOPT,
++ XSAVEOPT,
+ X86_FEATURE_XSAVEOPT,
+- "1:"XSAVES,
++ XSAVES,
+ X86_FEATURE_XSAVES,
+ [fx] "D" (fx), "a" (lmask), "d" (hmask) :
+ "memory");
+@@ -178,7 +172,7 @@ static inline int xrstor_state(struct xs
+ */
+ alternative_input(
+ "1: " XRSTOR,
+- "1: " XRSTORS,
++ XRSTORS,
+ X86_FEATURE_XSAVES,
+ "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
+ : "memory");