]> git.ipfire.org Git - people/ms/linux.git/blob - arch/x86/include/asm/calling.h
Importing "grsecurity-3.1-3.19.2-201503201903.patch"
[people/ms/linux.git] / arch / x86 / include / asm / calling.h
1 /*
2
3 x86 function call convention, 64-bit:
4 -------------------------------------
5 arguments | callee-saved | extra caller-saved | return
6 [callee-clobbered] | | [callee-clobbered] |
7 ---------------------------------------------------------------------------
8 rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11 | rax, rdx [**]
9
10 ( rsp is obviously invariant across normal function calls. (gcc can 'merge'
11 functions when it sees tail-call optimization possibilities) rflags is
12 clobbered. Leftover arguments are passed over the stack frame.)
13
14 [*] In the frame-pointers case rbp is fixed to the stack frame.
15
16 [**] for struct return values wider than 64 bits the return convention is a
17 bit more complex: up to 128 bits width we return small structures
18 straight in rax, rdx. For structures larger than that (3 words or
19 larger) the caller puts a pointer to an on-stack return struct
20 [allocated in the caller's stack frame] into the first argument - i.e.
21 into rdi. All other arguments shift up by one in this case.
22 Fortunately this case is rare in the kernel.
23
24 For 32-bit we have the following conventions - kernel is built with
25 -mregparm=3 and -freg-struct-return:
26
27 x86 function calling convention, 32-bit:
28 ----------------------------------------
29 arguments | callee-saved | extra caller-saved | return
30 [callee-clobbered] | | [callee-clobbered] |
31 -------------------------------------------------------------------------
32 eax edx ecx | ebx edi esi ebp [*] | <none> | eax, edx [**]
33
34 ( here too esp is obviously invariant across normal function calls. eflags
35 is clobbered. Leftover arguments are passed over the stack frame. )
36
37 [*] In the frame-pointers case ebp is fixed to the stack frame.
38
39 [**] We build with -freg-struct-return, which on 32-bit means similar
40 semantics as on 64-bit: edx can be used for a second return value
41 (i.e. covering integer and structure sizes up to 64 bits) - after that
42 it gets more complex and more expensive: 3-word or larger struct returns
43 get done in the caller's frame and the pointer to the return struct goes
44 into regparm0, i.e. eax - the other arguments shift up and the
45 function's register parameters degenerate to regparm=2 in essence.
46
47 */
48
49 #include <asm/dwarf2.h>
50
51 #ifdef CONFIG_X86_64
52
53 /*
54 * 64-bit system call stack frame layout defines and helpers,
55 * for assembly code:
56 */
57
58 #define R15 0
59 #define R14 8
60 #define R13 16
61 #define R12 24
62 #define RBP 32
63 #define RBX 40
64
65 /* arguments: interrupts/non tracing syscalls only save up to here: */
66 #define R11 48
67 #define R10 56
68 #define R9 64
69 #define R8 72
70 #define RAX 80
71 #define RCX 88
72 #define RDX 96
73 #define RSI 104
74 #define RDI 112
75 #define ORIG_RAX 120 /* + error_code */
76 /* end of arguments */
77
78 /* cpu exception frame or undefined in case of fast syscall: */
79 #define RIP 128
80 #define CS 136
81 #define EFLAGS 144
82 #define RSP 152
83 #define SS 160
84
85 #define ARGOFFSET R15
86
87 .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1, rax_enosys=0
88 subq $ORIG_RAX-ARGOFFSET+\addskip, %rsp
89 CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+\addskip
90 movq_cfi rdi, RDI
91 movq_cfi rsi, RSI
92 movq_cfi rdx, RDX
93
94 .if \save_rcx
95 movq_cfi rcx, RCX
96 .endif
97
98 .if \rax_enosys
99 movq $-ENOSYS, RAX(%rsp)
100 .else
101 movq_cfi rax, RAX
102 .endif
103
104 .if \save_r891011
105 movq_cfi r8, R8
106 movq_cfi r9, R9
107 movq_cfi r10, R10
108 movq_cfi r11, R11
109 .endif
110
111 #ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
112 movq_cfi r12, R12
113 #endif
114
115 .endm
116
117 #define ARG_SKIP ORIG_RAX
118
119 .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
120 rstor_r8910=1, rstor_rdx=1
121
122 #ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
123 movq_cfi_restore R12, r12
124 #endif
125
126 .if \rstor_r11
127 movq_cfi_restore R11, r11
128 .endif
129
130 .if \rstor_r8910
131 movq_cfi_restore R10, r10
132 movq_cfi_restore R9, r9
133 movq_cfi_restore R8, r8
134 .endif
135
136 .if \rstor_rax
137 movq_cfi_restore RAX, rax
138 .endif
139
140 .if \rstor_rcx
141 movq_cfi_restore RCX, rcx
142 .endif
143
144 .if \rstor_rdx
145 movq_cfi_restore RDX, rdx
146 .endif
147
148 movq_cfi_restore RSI, rsi
149 movq_cfi_restore RDI, rdi
150
151 .if ORIG_RAX+\addskip > 0
152 addq $ORIG_RAX+\addskip, %rsp
153 CFI_ADJUST_CFA_OFFSET -(ORIG_RAX+\addskip)
154 .endif
155 .endm
156
157 .macro LOAD_ARGS skiprax=0
158 movq R11(%rsp), %r11
159 movq R10(%rsp), %r10
160 movq R9(%rsp), %r9
161 movq R8(%rsp), %r8
162 movq RCX(%rsp), %rcx
163 movq RDX(%rsp), %rdx
164 movq RSI(%rsp), %rsi
165 movq RDI(%rsp), %rdi
166 .if \skiprax
167 .else
168 movq ORIG_RAX(%rsp), %rax
169 .endif
170 .endm
171
172 .macro SAVE_REST
173 movq_cfi rbx, RBX
174 movq_cfi rbp, RBP
175
176 #ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
177 movq_cfi r12, R12
178 #endif
179
180 movq_cfi r13, R13
181 movq_cfi r14, R14
182 movq_cfi r15, R15
183 .endm
184
185 .macro RESTORE_REST
186 movq_cfi_restore R15, r15
187 movq_cfi_restore R14, r14
188 movq_cfi_restore R13, r13
189
190 #ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
191 movq_cfi_restore R12, r12
192 #endif
193
194 movq_cfi_restore RBP, rbp
195 movq_cfi_restore RBX, rbx
196 .endm
197
198 .macro SAVE_ALL
199 SAVE_ARGS
200 SAVE_REST
201 .endm
202
203 .macro RESTORE_ALL addskip=0
204 RESTORE_REST
205 RESTORE_ARGS 1, \addskip
206 .endm
207
208 .macro icebp
209 .byte 0xf1
210 .endm
211
212 #else /* CONFIG_X86_64 */
213
214 /*
215 * For 32bit only simplified versions of SAVE_ALL/RESTORE_ALL. These
216 * are different from the entry_32.S versions in not changing the segment
217 * registers. So only suitable for in kernel use, not when transitioning
218 * from or to user space. The resulting stack frame is not a standard
219 * pt_regs frame. The main use case is calling C code from assembler
220 * when all the registers need to be preserved.
221 */
222
223 .macro SAVE_ALL
224 pushl_cfi %eax
225 CFI_REL_OFFSET eax, 0
226 pushl_cfi %ebp
227 CFI_REL_OFFSET ebp, 0
228 pushl_cfi %edi
229 CFI_REL_OFFSET edi, 0
230 pushl_cfi %esi
231 CFI_REL_OFFSET esi, 0
232 pushl_cfi %edx
233 CFI_REL_OFFSET edx, 0
234 pushl_cfi %ecx
235 CFI_REL_OFFSET ecx, 0
236 pushl_cfi %ebx
237 CFI_REL_OFFSET ebx, 0
238 .endm
239
240 .macro RESTORE_ALL
241 popl_cfi %ebx
242 CFI_RESTORE ebx
243 popl_cfi %ecx
244 CFI_RESTORE ecx
245 popl_cfi %edx
246 CFI_RESTORE edx
247 popl_cfi %esi
248 CFI_RESTORE esi
249 popl_cfi %edi
250 CFI_RESTORE edi
251 popl_cfi %ebp
252 CFI_RESTORE ebp
253 popl_cfi %eax
254 CFI_RESTORE eax
255 .endm
256
257 #endif /* CONFIG_X86_64 */
258