1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/jump_label.h>
3 #include <asm/unwind_hints.h>
4 #include <asm/cpufeatures.h>
5 #include <asm/page_types.h>
6 #include <asm/percpu.h>
7 #include <asm/asm-offsets.h>
8 #include <asm/processor-flags.h>
9 #include <asm/ptrace-abi.h>
11 #include <asm/nospec-branch.h>
15 x86 function call convention, 64-bit:
16 -------------------------------------
17 arguments | callee-saved | extra caller-saved | return
18 [callee-clobbered] | | [callee-clobbered] |
19 ---------------------------------------------------------------------------
20 rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11 | rax, rdx [**]
22 ( rsp is obviously invariant across normal function calls. (gcc can 'merge'
23 functions when it sees tail-call optimization possibilities) rflags is
24 clobbered. Leftover arguments are passed over the stack frame.)
26 [*] In the frame-pointers case rbp is fixed to the stack frame.
28 [**] for struct return values wider than 64 bits the return convention is a
29 bit more complex: up to 128 bits width we return small structures
30 straight in rax, rdx. For structures larger than that (3 words or
31 larger) the caller puts a pointer to an on-stack return struct
32 [allocated in the caller's stack frame] into the first argument - i.e.
33 into rdi. All other arguments shift up by one in this case.
34 Fortunately this case is rare in the kernel.
36 For 32-bit we have the following conventions - kernel is built with
37 -mregparm=3 and -freg-struct-return:
39 x86 function calling convention, 32-bit:
40 ----------------------------------------
41 arguments | callee-saved | extra caller-saved | return
42 [callee-clobbered] | | [callee-clobbered] |
43 -------------------------------------------------------------------------
44 eax edx ecx | ebx edi esi ebp [*] | <none> | eax, edx [**]
46 ( here too esp is obviously invariant across normal function calls. eflags
47 is clobbered. Leftover arguments are passed over the stack frame. )
49 [*] In the frame-pointers case ebp is fixed to the stack frame.
51 [**] We build with -freg-struct-return, which on 32-bit means similar
52 semantics as on 64-bit: edx can be used for a second return value
53 (i.e. covering integer and structure sizes up to 64 bits) - after that
54 it gets more complex and more expensive: 3-word or larger struct returns
55 get done in the caller's frame and the pointer to the return struct goes
56 into regparm0, i.e. eax - the other arguments shift up and the
57 function's register parameters degenerate to regparm=2 in essence.
64 * 64-bit system call stack frame layout defines and helpers,
68 .macro PUSH_REGS rdx
=%rdx rcx
=%rcx rax
=%rax save_ret
=0
70 pushq
%rsi
/* pt_regs->si */
71 movq
8(%rsp
), %rsi
/* temporarily store the return address in %rsi */
72 movq
%rdi
, 8(%rsp
) /* pt_regs->di (overwriting original return address) */
74 pushq
%rdi
/* pt_regs->di */
75 pushq
%rsi
/* pt_regs->si */
77 pushq
\rdx
/* pt_regs->dx */
78 pushq
\rcx
/* pt_regs->cx */
79 pushq
\rax
/* pt_regs->ax */
80 pushq
%r8
/* pt_regs->r8 */
81 pushq
%r9
/* pt_regs->r9 */
82 pushq
%r10
/* pt_regs->r10 */
83 pushq
%r11
/* pt_regs->r11 */
84 pushq
%rbx
/* pt_regs->rbx */
85 pushq
%rbp
/* pt_regs->rbp */
86 pushq
%r12
/* pt_regs->r12 */
87 pushq
%r13
/* pt_regs->r13 */
88 pushq
%r14
/* pt_regs->r14 */
89 pushq
%r15
/* pt_regs->r15 */
93 pushq
%rsi
/* return address on top of stack */
99 * Sanitize registers of values that a speculation attack might
100 * otherwise want to exploit. The lower registers are likely clobbered
101 * well before they could be put to use in a speculative execution
104 xorl
%esi
, %esi
/* nospec si */
105 xorl
%edx
, %edx
/* nospec dx */
106 xorl
%ecx
, %ecx
/* nospec cx */
107 xorl
%r8d
, %r8d
/* nospec r8 */
108 xorl
%r9d
, %r9d
/* nospec r9 */
109 xorl
%r10d
, %r10d
/* nospec r10 */
110 xorl
%r11d
, %r11d
/* nospec r11 */
111 xorl
%ebx
, %ebx
/* nospec rbx */
112 xorl
%ebp
, %ebp
/* nospec rbp */
113 xorl
%r12d
, %r12d
/* nospec r12 */
114 xorl
%r13d
, %r13d
/* nospec r13 */
115 xorl
%r14d
, %r14d
/* nospec r14 */
116 xorl
%r15d
, %r15d
/* nospec r15 */
120 .macro PUSH_AND_CLEAR_REGS rdx
=%rdx rcx
=%rcx rax
=%rax save_ret
=0
121 PUSH_REGS rdx
=\rdx
, rcx
=\rcx
, rax
=\rax
, save_ret
=\save_ret
125 .macro POP_REGS pop_rdi
=1
145 #ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
148 * MITIGATION_PAGE_TABLE_ISOLATION PGDs are 8k. Flip bit 12 to switch between the two
151 #define PTI_USER_PGTABLE_BIT PAGE_SHIFT
152 #define PTI_USER_PGTABLE_MASK (1 << PTI_USER_PGTABLE_BIT)
153 #define PTI_USER_PCID_BIT X86_CR3_PTI_PCID_USER_BIT
154 #define PTI_USER_PCID_MASK (1 << PTI_USER_PCID_BIT)
155 #define PTI_USER_PGTABLE_AND_PCID_MASK (PTI_USER_PCID_MASK | PTI_USER_PGTABLE_MASK)
157 .macro SET_NOFLUSH_BIT reg
:req
158 bts $X86_CR3_PCID_NOFLUSH_BIT
, \reg
161 .macro ADJUST_KERNEL_CR3 reg
:req
162 ALTERNATIVE
"", "SET_NOFLUSH_BIT \reg", X86_FEATURE_PCID
163 /* Clear PCID and "MITIGATION_PAGE_TABLE_ISOLATION bit", point CR3 at kernel pagetables: */
164 andq $
(~PTI_USER_PGTABLE_AND_PCID_MASK
), \reg
167 .macro SWITCH_TO_KERNEL_CR3 scratch_reg
:req
168 ALTERNATIVE
"jmp .Lend_\@", "", X86_FEATURE_PTI
169 mov
%cr3
, \scratch_reg
170 ADJUST_KERNEL_CR3 \scratch_reg
171 mov \scratch_reg
, %cr3
175 #define THIS_CPU_user_pcid_flush_mask \
176 PER_CPU_VAR(cpu_tlbstate + TLB_STATE_user_pcid_flush_mask)
178 .macro SWITCH_TO_USER_CR3 scratch_reg
:req scratch_reg2
:req
179 mov
%cr3
, \scratch_reg
181 ALTERNATIVE
"jmp .Lwrcr3_\@", "", X86_FEATURE_PCID
184 * Test if the ASID needs a flush.
186 movq \scratch_reg
, \scratch_reg2
187 andq $
(0x7FF), \scratch_reg
/* mask ASID */
188 bt \scratch_reg
, THIS_CPU_user_pcid_flush_mask
191 /* Flush needed, clear the bit */
192 btr \scratch_reg
, THIS_CPU_user_pcid_flush_mask
193 movq \scratch_reg2
, \scratch_reg
197 movq \scratch_reg2
, \scratch_reg
198 SET_NOFLUSH_BIT \scratch_reg
201 /* Flip the ASID to the user version */
202 orq $
(PTI_USER_PCID_MASK
), \scratch_reg
205 /* Flip the PGD to the user version */
206 orq $
(PTI_USER_PGTABLE_MASK
), \scratch_reg
207 mov \scratch_reg
, %cr3
210 .macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg
:req scratch_reg2
:req
211 ALTERNATIVE
"jmp .Lend_\@", "", X86_FEATURE_PTI
212 SWITCH_TO_USER_CR3 \scratch_reg \scratch_reg2
216 .macro SWITCH_TO_USER_CR3_STACK scratch_reg
:req
217 ALTERNATIVE
"jmp .Lend_\@", "", X86_FEATURE_PTI
219 SWITCH_TO_USER_CR3 scratch_reg
=\scratch_reg scratch_reg2
=%rax
224 .macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg
:req save_reg
:req
225 ALTERNATIVE
"jmp .Ldone_\@", "", X86_FEATURE_PTI
226 movq
%cr3
, \scratch_reg
227 movq \scratch_reg
, \save_reg
229 * Test the user pagetable bit. If set, then the user page tables
230 * are active. If clear CR3 already has the kernel page table
233 bt $PTI_USER_PGTABLE_BIT
, \scratch_reg
236 ADJUST_KERNEL_CR3 \scratch_reg
237 movq \scratch_reg
, %cr3
242 .macro RESTORE_CR3 scratch_reg
:req save_reg
:req
243 ALTERNATIVE
"jmp .Lend_\@", "", X86_FEATURE_PTI
245 ALTERNATIVE
"jmp .Lwrcr3_\@", "", X86_FEATURE_PCID
248 * KERNEL pages can always resume with NOFLUSH as we do
251 bt $PTI_USER_PGTABLE_BIT
, \save_reg
255 * Check if there's a pending flush for the user ASID we're
258 movq \save_reg
, \scratch_reg
259 andq $
(0x7FF), \scratch_reg
260 bt \scratch_reg
, THIS_CPU_user_pcid_flush_mask
263 btr \scratch_reg
, THIS_CPU_user_pcid_flush_mask
267 SET_NOFLUSH_BIT \save_reg
271 * The CR3 write could be avoided when not changing its value,
272 * but would require a CR3 read *and* a scratch register.
278 #else /* CONFIG_MITIGATION_PAGE_TABLE_ISOLATION=n: */
280 .macro SWITCH_TO_KERNEL_CR3 scratch_reg
:req
282 .macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg
:req scratch_reg2
:req
284 .macro SWITCH_TO_USER_CR3_STACK scratch_reg
:req
286 .macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg
:req save_reg
:req
288 .macro RESTORE_CR3 scratch_reg
:req save_reg
:req
294 * IBRS kernel mitigation for Spectre_v2.
296 * Assumes full context is established (PUSH_REGS, CR3 and GS) and it clobbers
297 * the regs it uses (AX, CX, DX). Must be called before the first RET
298 * instruction (NOTE! UNTRAIN_RET includes a RET instruction)
300 * The optional argument is used to save/restore the current value,
301 * which is used on the paranoid paths.
303 * Assumes x86_spec_ctrl_{base,current} to have SPEC_CTRL_IBRS set.
305 .macro IBRS_ENTER save_reg
306 #ifdef CONFIG_MITIGATION_IBRS_ENTRY
307 ALTERNATIVE
"jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS
308 movl $MSR_IA32_SPEC_CTRL
, %ecx
315 test $SPEC_CTRL_IBRS
, %eax
322 movq
PER_CPU_VAR(x86_spec_ctrl_current
), %rdx
331 * Similar to IBRS_ENTER, requires KERNEL GS,CR3 and clobbers (AX, CX, DX)
332 * regs. Must be called after the last RET.
334 .macro IBRS_EXIT save_reg
335 #ifdef CONFIG_MITIGATION_IBRS_ENTRY
336 ALTERNATIVE
"jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS
337 movl $MSR_IA32_SPEC_CTRL
, %ecx
342 movq
PER_CPU_VAR(x86_spec_ctrl_current
), %rdx
343 andl $
(~SPEC_CTRL_IBRS
), %edx
354 * Mitigate Spectre v1 for conditional swapgs code paths.
356 * FENCE_SWAPGS_USER_ENTRY is used in the user entry swapgs code path, to
357 * prevent a speculative swapgs when coming from kernel space.
359 * FENCE_SWAPGS_KERNEL_ENTRY is used in the kernel entry non-swapgs code path,
360 * to prevent the swapgs from getting speculatively skipped when coming from
363 .macro FENCE_SWAPGS_USER_ENTRY
364 ALTERNATIVE
"", "lfence", X86_FEATURE_FENCE_SWAPGS_USER
366 .macro FENCE_SWAPGS_KERNEL_ENTRY
367 ALTERNATIVE
"", "lfence", X86_FEATURE_FENCE_SWAPGS_KERNEL
370 .macro STACKLEAK_ERASE_NOCLOBBER
371 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
378 .macro SAVE_AND_SET_GSBASE scratch_reg
:req save_reg
:req
380 GET_PERCPU_BASE \scratch_reg
381 wrgsbase \scratch_reg
384 #else /* CONFIG_X86_64 */
385 # undef UNWIND_HINT_IRET_REGS
386 # define UNWIND_HINT_IRET_REGS
387 #endif /* !CONFIG_X86_64 */
389 .macro STACKLEAK_ERASE
390 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
398 * CPU/node NR is loaded from the limit (size) field of a special segment
399 * descriptor entry in GDT.
401 .macro LOAD_CPU_AND_NODE_SEG_LIMIT reg
:req
402 movq $__CPUNODE_SEG
, \reg
407 * Fetch the per-CPU GSBASE value for this processor and put it in @reg.
408 * We normally use %gs for accessing per-CPU data, but we are setting up
409 * %gs here and obviously can not use %gs itself to access per-CPU data.
411 * Do not use RDPID, because KVM loads guest's TSC_AUX on vm-entry and
412 * may not restore the host's value until the CPU returns to userspace.
413 * Thus the kernel would consume a guest's TSC_AUX if an NMI arrives
414 * while running KVM's run loop.
416 .macro GET_PERCPU_BASE reg
:req
417 LOAD_CPU_AND_NODE_SEG_LIMIT
\reg
418 andq $VDSO_CPUNODE_MASK
, \reg
419 movq
__per_cpu_offset(, \reg
, 8), \reg
424 .macro GET_PERCPU_BASE reg
:req
425 movq
pcpu_unit_offsets(%rip
), \reg
428 #endif /* CONFIG_SMP */
432 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
433 .macro THUNK name
, func
434 SYM_FUNC_START(\name
)
465 #else /* CONFIG_X86_32 */
467 /* put return address in eax (arg1) */
468 .macro THUNK name
, func
, put_ret_addr_in_eax
=0
469 SYM_CODE_START_NOALIGN(\name
)
474 .if \put_ret_addr_in_eax
475 /* Place EIP in the arg1 */