From: David Woodhouse Date: Thu, 11 Jan 2018 21:46:28 +0000 (+0000) Subject: x86/retpoline/entry: Convert entry assembler indirect jumps X-Git-Tag: v3.2.101~45 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=1a63cf5cb98b6bbd6a51b4b97b19c3c34c59bd8c;p=thirdparty%2Fkernel%2Fstable.git x86/retpoline/entry: Convert entry assembler indirect jumps commit 2641f08bb7fc63a636a2b18173221d7040a3512e upstream. Convert indirect jumps in core 32/64bit entry assembler code to use non-speculative sequences when CONFIG_RETPOLINE is enabled. Don't use CALL_NOSPEC in entry_SYSCALL_64_fastpath because the return address after the 'call' instruction must be *precisely* at the .Lentry_SYSCALL_64_after_fastpath label for stub_ptregs_64 to work, and the use of alternatives will mess that up unless we play horrid games to prepend with NOPs and make the variants the same length. It's not worth it; in the case where we ALTERNATIVE out the retpoline, the first instruction at __x86.indirect_thunk.rax is going to be a bare jmp *%rax anyway. Signed-off-by: David Woodhouse Signed-off-by: Thomas Gleixner Acked-by: Ingo Molnar Acked-by: Arjan van de Ven Cc: gnomes@lxorguk.ukuu.org.uk Cc: Rik van Riel Cc: Andi Kleen Cc: Josh Poimboeuf Cc: thomas.lendacky@amd.com Cc: Peter Zijlstra Cc: Linus Torvalds Cc: Jiri Kosina Cc: Andy Lutomirski Cc: Dave Hansen Cc: Kees Cook Cc: Tim Chen Cc: Greg Kroah-Hartman Cc: Paul Turner Link: https://lkml.kernel.org/r/1515707194-20531-7-git-send-email-dwmw@amazon.co.uk Signed-off-by: David Woodhouse Signed-off-by: Razvan Ghitulete [bwh: Backported to 3.2: - Also update indirect jumps through system call table in entry_32.s and ia32entry.S - Adjust filenames, context] Signed-off-by: Ben Hutchings --- diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index 130db0702d9fe..ad5ea3e24e3f8 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S @@ -18,6 +18,7 @@ #include #include #include +#include /* Avoid __ASSEMBLER__'ifying just for this. */ #include @@ -165,7 +166,12 @@ ENTRY(ia32_sysenter_target) sysenter_do_call: IA32_ARG_FIXUP sysenter_dispatch: +#ifdef CONFIG_RETPOLINE + movq ia32_sys_call_table(,%rax,8),%rax + call __x86_indirect_thunk_rax +#else call *ia32_sys_call_table(,%rax,8) +#endif movq %rax,RAX-ARGOFFSET(%rsp) GET_THREAD_INFO(%r10) DISABLE_INTERRUPTS(CLBR_NONE) @@ -326,7 +332,12 @@ ENTRY(ia32_cstar_target) cstar_do_call: IA32_ARG_FIXUP 1 cstar_dispatch: +#ifdef CONFIG_RETPOLINE + movq ia32_sys_call_table(,%rax,8),%rax + call __x86_indirect_thunk_rax +#else call *ia32_sys_call_table(,%rax,8) +#endif movq %rax,RAX-ARGOFFSET(%rsp) GET_THREAD_INFO(%r10) DISABLE_INTERRUPTS(CLBR_NONE) @@ -437,7 +448,12 @@ ENTRY(ia32_syscall) ja ia32_badsys ia32_do_call: IA32_ARG_FIXUP +#ifdef CONFIG_RETPOLINE + movq ia32_sys_call_table(,%rax,8),%rax + call __x86_indirect_thunk_rax +#else call *ia32_sys_call_table(,%rax,8) # xxx: rip relative +#endif ia32_sysret: movq %rax,RAX-ARGOFFSET(%rsp) ia32_ret_from_sys_call: @@ -503,7 +519,7 @@ ENTRY(ia32_ptregs_common) CFI_REL_OFFSET rsp,RSP-ARGOFFSET /* CFI_REL_OFFSET ss,SS-ARGOFFSET*/ SAVE_REST - call *%rax + CALL_NOSPEC %rax RESTORE_REST jmp ia32_sysret /* misbalances the return cache */ CFI_ENDPROC diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 061f09b469297..4859635f5d426 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -55,6 +55,7 @@ #include #include #include +#include /* Avoid __ASSEMBLER__'ifying just for this. */ #include @@ -428,7 +429,12 @@ sysenter_past_esp: sysenter_do_call: cmpl $(nr_syscalls), %eax jae sysenter_badsys +#ifdef CONFIG_RETPOLINE + movl sys_call_table(,%eax,4),%eax + call __x86_indirect_thunk_eax +#else call *sys_call_table(,%eax,4) +#endif sysenter_after_call: movl %eax,PT_EAX(%esp) LOCKDEP_SYS_EXIT @@ -511,7 +517,12 @@ ENTRY(system_call) cmpl $(nr_syscalls), %eax jae syscall_badsys syscall_call: +#ifdef CONFIG_RETPOLINE + movl sys_call_table(,%eax,4),%eax + call __x86_indirect_thunk_eax +#else call *sys_call_table(,%eax,4) +#endif syscall_after_call: movl %eax,PT_EAX(%esp) # store the return value syscall_exit: @@ -1017,7 +1028,7 @@ ENTRY(kernel_thread_helper) pushl $0 # fake return address for unwinder CFI_STARTPROC movl %edi,%eax - call *%esi + CALL_NOSPEC %esi call do_exit ud2 # padding for call trace CFI_ENDPROC @@ -1274,7 +1285,7 @@ error_code: movl %ecx, %es TRACE_IRQS_OFF movl %esp,%eax # pt_regs pointer - call *%edi + CALL_NOSPEC %edi jmp ret_from_exception CFI_ENDPROC END(page_fault) diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index c13ca52ce42e5..7e9d2ad4c47d6 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -59,6 +59,7 @@ #include #include #include +#include /* Avoid __ASSEMBLER__'ifying just for this. */ #include @@ -519,7 +520,12 @@ system_call_fastpath: cmpq $__NR_syscall_max,%rax ja badsys movq %r10,%rcx +#ifdef CONFIG_RETPOLINE + movq sys_call_table(, %rax, 8), %rax + call __x86_indirect_thunk_rax +#else call *sys_call_table(,%rax,8) # XXX: rip relative +#endif movq %rax,RAX-ARGOFFSET(%rsp) /* * Syscall return path ending with SYSRET (fast path) @@ -643,7 +649,12 @@ tracesys: cmpq $__NR_syscall_max,%rax ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */ movq %r10,%rcx /* fixup for C */ +#ifdef CONFIG_RETPOLINE + movq sys_call_table(, %rax, 8), %rax + call __x86_indirect_thunk_rax +#else call *sys_call_table(,%rax,8) +#endif movq %rax,RAX-ARGOFFSET(%rsp) /* Use IRET because user could have changed frame */ @@ -1219,7 +1230,7 @@ ENTRY(kernel_thread_helper) * Here we are in the child and the registers are set as they were * at kernel_thread() invocation in the parent. */ - call *%rsi + CALL_NOSPEC %rsi # exit mov %eax, %edi call do_exit