]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
x86/retpoline/entry: Convert entry assembler indirect jumps
authorDavid Woodhouse <dwmw@amazon.co.uk>
Thu, 11 Jan 2018 21:46:28 +0000 (21:46 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 23 Jan 2018 18:50:12 +0000 (19:50 +0100)
commit 2641f08bb7fc63a636a2b18173221d7040a3512e upstream.

Convert indirect jumps in core 32/64bit entry assembler code to use
non-speculative sequences when CONFIG_RETPOLINE is enabled.

Don't use CALL_NOSPEC in entry_SYSCALL_64_fastpath because the return
address after the 'call' instruction must be *precisely* at the
.Lentry_SYSCALL_64_after_fastpath label for stub_ptregs_64 to work,
and the use of alternatives will mess that up unless we play horrid
games to prepend with NOPs and make the variants the same length. It's
not worth it; in the case where we ALTERNATIVE out the retpoline, the
first instruction at __x86.indirect_thunk.rax is going to be a bare
jmp *%rax anyway.

Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Arjan van de Ven <arjan@linux.intel.com>
Cc: gnomes@lxorguk.ukuu.org.uk
Cc: Rik van Riel <riel@redhat.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: thomas.lendacky@amd.com
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Jiri Kosina <jikos@kernel.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Kees Cook <keescook@google.com>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
Cc: Paul Turner <pjt@google.com>
Link: https://lkml.kernel.org/r/1515707194-20531-7-git-send-email-dwmw@amazon.co.uk
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: Razvan Ghitulete <rga@amazon.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/entry/entry_32.S
arch/x86/entry/entry_64.S

index ae678ad128a94d2e945c15a85e9553bb1df9edd6..adbbd4f538e96de9c09d6d634957567a64f9aa09 100644 (file)
@@ -44,6 +44,7 @@
 #include <asm/alternative-asm.h>
 #include <asm/asm.h>
 #include <asm/smap.h>
+#include <asm/nospec-branch.h>
 
        .section .entry.text, "ax"
 
@@ -226,7 +227,8 @@ ENTRY(ret_from_kernel_thread)
        pushl   $0x0202                         # Reset kernel eflags
        popfl
        movl    PT_EBP(%esp), %eax
-       call    *PT_EBX(%esp)
+       movl    PT_EBX(%esp), %edx
+       CALL_NOSPEC %edx
        movl    $0, PT_EAX(%esp)
 
        /*
@@ -938,7 +940,7 @@ error_code:
        movl    %ecx, %es
        TRACE_IRQS_OFF
        movl    %esp, %eax                      # pt_regs pointer
-       call    *%edi
+       CALL_NOSPEC %edi
        jmp     ret_from_exception
 END(page_fault)
 
index 952b23b5d4e99aae0db56e67a5594c5348a2aa92..81b1cd533965465b512f5584f2cc752dfc29ba27 100644 (file)
@@ -36,6 +36,7 @@
 #include <asm/smap.h>
 #include <asm/pgtable_types.h>
 #include <asm/kaiser.h>
+#include <asm/nospec-branch.h>
 #include <linux/err.h>
 
 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
@@ -184,7 +185,13 @@ entry_SYSCALL_64_fastpath:
 #endif
        ja      1f                              /* return -ENOSYS (already in pt_regs->ax) */
        movq    %r10, %rcx
+#ifdef CONFIG_RETPOLINE
+       movq    sys_call_table(, %rax, 8), %rax
+       call    __x86_indirect_thunk_rax
+#else
        call    *sys_call_table(, %rax, 8)
+#endif
+
        movq    %rax, RAX(%rsp)
 1:
 /*
@@ -276,7 +283,12 @@ tracesys_phase2:
 #endif
        ja      1f                              /* return -ENOSYS (already in pt_regs->ax) */
        movq    %r10, %rcx                      /* fixup for C */
+#ifdef CONFIG_RETPOLINE
+       movq    sys_call_table(, %rax, 8), %rax
+       call    __x86_indirect_thunk_rax
+#else
        call    *sys_call_table(, %rax, 8)
+#endif
        movq    %rax, RAX(%rsp)
 1:
        /* Use IRET because user could have changed pt_regs->foo */
@@ -491,7 +503,7 @@ ENTRY(ret_from_fork)
         * nb: we depend on RESTORE_EXTRA_REGS above
         */
        movq    %rbp, %rdi
-       call    *%rbx
+       CALL_NOSPEC %rbx
        movl    $0, RAX(%rsp)
        RESTORE_EXTRA_REGS
        jmp     int_ret_from_sys_call