--- /dev/null
+From 956421fbb74c3a6261903f3836c0740187cf038b Mon Sep 17 00:00:00 2001
+From: Andy Lutomirski <luto@amacapital.net>
+Date: Thu, 5 Mar 2015 01:09:44 +0100
+Subject: x86/asm/entry/64: Remove a bogus 'ret_from_fork' optimization
+
+From: Andy Lutomirski <luto@amacapital.net>
+
+commit 956421fbb74c3a6261903f3836c0740187cf038b upstream.
+
+'ret_from_fork' checks TIF_IA32 to determine whether 'pt_regs' and
+the related state make sense for 'ret_from_sys_call'. This is
+entirely the wrong check. TS_COMPAT would make a little more
+sense, but there's really no point in keeping this optimization
+at all.
+
+This fixes a return to the wrong user CS if we came from int
+0x80 in a 64-bit task.
+
+Signed-off-by: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/4710be56d76ef994ddf59087aad98c000fbab9a4.1424989793.git.luto@amacapital.net
+[ Backported from tip:x86/asm. ]
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/entry_64.S | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/kernel/entry_64.S
++++ b/arch/x86/kernel/entry_64.S
+@@ -542,11 +542,14 @@ ENTRY(ret_from_fork)
+ testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
+ jz 1f
+
+- testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
+- jnz int_ret_from_sys_call
+-
+- RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET
+- jmp ret_from_sys_call # go to the SYSRET fastpath
++ /*
++ * By the time we get here, we have no idea whether our pt_regs,
++ * ti flags, and ti status came from the 64-bit SYSCALL fast path,
++ * the slow path, or one of the ia32entry paths.
++ * Use int_ret_from_sys_call to return, since it can safely handle
++ * all of the above.
++ */
++ jmp int_ret_from_sys_call
+
+ 1:
+ subq $REST_SKIP, %rsp # leave space for volatiles