]>
Commit | Line | Data |
---|---|---|
f3dcae82 | 1 | /* PLT trampolines. x86-64 version. |
6d7e8eda | 2 | Copyright (C) 2009-2023 Free Software Foundation, Inc. |
4e1e2f42 L |
3 | This file is part of the GNU C Library. |
4 | ||
5 | The GNU C Library is free software; you can redistribute it and/or | |
6 | modify it under the terms of the GNU Lesser General Public | |
7 | License as published by the Free Software Foundation; either | |
8 | version 2.1 of the License, or (at your option) any later version. | |
9 | ||
10 | The GNU C Library is distributed in the hope that it will be useful, | |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
13 | Lesser General Public License for more details. | |
14 | ||
15 | You should have received a copy of the GNU Lesser General Public | |
59ba27a6 | 16 | License along with the GNU C Library; if not, see |
5a82c748 | 17 | <https://www.gnu.org/licenses/>. */ |
4e1e2f42 | 18 | |
58bcf7b7 NG |
19 | #ifndef SECTION |
20 | # define SECTION(p) p | |
21 | #endif | |
22 | ||
23 | .section SECTION(.text),"ax",@progbits | |
b52b0d79 | 24 | #ifdef _dl_runtime_resolve |
f3dcae82 | 25 | |
b52b0d79 L |
26 | # undef REGISTER_SAVE_AREA |
27 | # undef LOCAL_STORAGE_AREA | |
28 | # undef BASE | |
f3dcae82 | 29 | |
b52b0d79 L |
30 | # if (STATE_SAVE_ALIGNMENT % 16) != 0 |
31 | # error STATE_SAVE_ALIGNMENT must be multples of 16 | |
32 | # endif | |
fb0f7a67 | 33 | |
b52b0d79 L |
34 | # if (STATE_SAVE_OFFSET % STATE_SAVE_ALIGNMENT) != 0 |
35 | # error STATE_SAVE_OFFSET must be multples of STATE_SAVE_ALIGNMENT | |
fb0f7a67 L |
36 | # endif |
37 | ||
b52b0d79 L |
38 | # if DL_RUNTIME_RESOLVE_REALIGN_STACK |
39 | /* Local stack area before jumping to function address: RBX. */ | |
40 | # define LOCAL_STORAGE_AREA 8 | |
41 | # define BASE rbx | |
42 | # ifdef USE_FXSAVE | |
43 | /* Use fxsave to save XMM registers. */ | |
44 | # define REGISTER_SAVE_AREA (512 + STATE_SAVE_OFFSET) | |
45 | # if (REGISTER_SAVE_AREA % 16) != 0 | |
46 | # error REGISTER_SAVE_AREA must be multples of 16 | |
47 | # endif | |
48 | # endif | |
fb0f7a67 | 49 | # else |
b52b0d79 L |
50 | # ifndef USE_FXSAVE |
51 | # error USE_FXSAVE must be defined | |
52 | # endif | |
53 | /* Use fxsave to save XMM registers. */ | |
54 | # define REGISTER_SAVE_AREA (512 + STATE_SAVE_OFFSET + 8) | |
55 | /* Local stack area before jumping to function address: All saved | |
56 | registers. */ | |
57 | # define LOCAL_STORAGE_AREA REGISTER_SAVE_AREA | |
58 | # define BASE rsp | |
59 | # if (REGISTER_SAVE_AREA % 16) != 8 | |
60 | # error REGISTER_SAVE_AREA must be odd multples of 8 | |
61 | # endif | |
fb0f7a67 | 62 | # endif |
b52b0d79 | 63 | |
f3dcae82 L |
64 | .globl _dl_runtime_resolve |
65 | .hidden _dl_runtime_resolve | |
66 | .type _dl_runtime_resolve, @function | |
67 | .align 16 | |
68 | cfi_startproc | |
69 | _dl_runtime_resolve: | |
70 | cfi_adjust_cfa_offset(16) # Incorporate PLT | |
f753fa7d | 71 | _CET_ENDBR |
b52b0d79 L |
72 | # if DL_RUNTIME_RESOLVE_REALIGN_STACK |
73 | # if LOCAL_STORAGE_AREA != 8 | |
74 | # error LOCAL_STORAGE_AREA must be 8 | |
75 | # endif | |
f3dcae82 L |
76 | pushq %rbx # push subtracts stack by 8. |
77 | cfi_adjust_cfa_offset(8) | |
78 | cfi_rel_offset(%rbx, 0) | |
79 | mov %RSP_LP, %RBX_LP | |
80 | cfi_def_cfa_register(%rbx) | |
b52b0d79 L |
81 | and $-STATE_SAVE_ALIGNMENT, %RSP_LP |
82 | # endif | |
83 | # ifdef REGISTER_SAVE_AREA | |
f3dcae82 | 84 | sub $REGISTER_SAVE_AREA, %RSP_LP |
b52b0d79 | 85 | # if !DL_RUNTIME_RESOLVE_REALIGN_STACK |
f3dcae82 | 86 | cfi_adjust_cfa_offset(REGISTER_SAVE_AREA) |
b52b0d79 L |
87 | # endif |
88 | # else | |
89 | # Allocate stack space of the required size to save the state. | |
90 | # if IS_IN (rtld) | |
91 | sub _rtld_local_ro+RTLD_GLOBAL_RO_DL_X86_CPU_FEATURES_OFFSET+XSAVE_STATE_SIZE_OFFSET(%rip), %RSP_LP | |
92 | # else | |
93 | sub _dl_x86_cpu_features+XSAVE_STATE_SIZE_OFFSET(%rip), %RSP_LP | |
94 | # endif | |
95 | # endif | |
f3dcae82 L |
96 | # Preserve registers otherwise clobbered. |
97 | movq %rax, REGISTER_SAVE_RAX(%rsp) | |
98 | movq %rcx, REGISTER_SAVE_RCX(%rsp) | |
99 | movq %rdx, REGISTER_SAVE_RDX(%rsp) | |
100 | movq %rsi, REGISTER_SAVE_RSI(%rsp) | |
101 | movq %rdi, REGISTER_SAVE_RDI(%rsp) | |
102 | movq %r8, REGISTER_SAVE_R8(%rsp) | |
103 | movq %r9, REGISTER_SAVE_R9(%rsp) | |
b52b0d79 L |
104 | # ifdef USE_FXSAVE |
105 | fxsave STATE_SAVE_OFFSET(%rsp) | |
f3dcae82 | 106 | # else |
b52b0d79 L |
107 | movl $STATE_SAVE_MASK, %eax |
108 | xorl %edx, %edx | |
109 | # Clear the XSAVE Header. | |
110 | # ifdef USE_XSAVE | |
111 | movq %rdx, (STATE_SAVE_OFFSET + 512)(%rsp) | |
112 | movq %rdx, (STATE_SAVE_OFFSET + 512 + 8)(%rsp) | |
113 | # endif | |
114 | movq %rdx, (STATE_SAVE_OFFSET + 512 + 8 * 2)(%rsp) | |
115 | movq %rdx, (STATE_SAVE_OFFSET + 512 + 8 * 3)(%rsp) | |
116 | movq %rdx, (STATE_SAVE_OFFSET + 512 + 8 * 4)(%rsp) | |
117 | movq %rdx, (STATE_SAVE_OFFSET + 512 + 8 * 5)(%rsp) | |
118 | movq %rdx, (STATE_SAVE_OFFSET + 512 + 8 * 6)(%rsp) | |
119 | movq %rdx, (STATE_SAVE_OFFSET + 512 + 8 * 7)(%rsp) | |
120 | # ifdef USE_XSAVE | |
121 | xsave STATE_SAVE_OFFSET(%rsp) | |
f3dcae82 | 122 | # else |
b52b0d79 | 123 | xsavec STATE_SAVE_OFFSET(%rsp) |
f3dcae82 | 124 | # endif |
f3dcae82 | 125 | # endif |
f3dcae82 L |
126 | # Copy args pushed by PLT in register. |
127 | # %rdi: link_map, %rsi: reloc_index | |
128 | mov (LOCAL_STORAGE_AREA + 8)(%BASE), %RSI_LP | |
129 | mov LOCAL_STORAGE_AREA(%BASE), %RDI_LP | |
130 | call _dl_fixup # Call resolver. | |
131 | mov %RAX_LP, %R11_LP # Save return value | |
b52b0d79 L |
132 | # Get register content back. |
133 | # ifdef USE_FXSAVE | |
134 | fxrstor STATE_SAVE_OFFSET(%rsp) | |
f3dcae82 | 135 | # else |
b52b0d79 L |
136 | movl $STATE_SAVE_MASK, %eax |
137 | xorl %edx, %edx | |
138 | xrstor STATE_SAVE_OFFSET(%rsp) | |
f3dcae82 | 139 | # endif |
f3dcae82 L |
140 | movq REGISTER_SAVE_R9(%rsp), %r9 |
141 | movq REGISTER_SAVE_R8(%rsp), %r8 | |
142 | movq REGISTER_SAVE_RDI(%rsp), %rdi | |
143 | movq REGISTER_SAVE_RSI(%rsp), %rsi | |
144 | movq REGISTER_SAVE_RDX(%rsp), %rdx | |
145 | movq REGISTER_SAVE_RCX(%rsp), %rcx | |
146 | movq REGISTER_SAVE_RAX(%rsp), %rax | |
b52b0d79 | 147 | # if DL_RUNTIME_RESOLVE_REALIGN_STACK |
f3dcae82 L |
148 | mov %RBX_LP, %RSP_LP |
149 | cfi_def_cfa_register(%rsp) | |
150 | movq (%rsp), %rbx | |
151 | cfi_restore(%rbx) | |
b52b0d79 | 152 | # endif |
f3dcae82 L |
153 | # Adjust stack(PLT did 2 pushes) |
154 | add $(LOCAL_STORAGE_AREA + 16), %RSP_LP | |
155 | cfi_adjust_cfa_offset(-(LOCAL_STORAGE_AREA + 16)) | |
f3dcae82 L |
156 | jmp *%r11 # Jump to function address. |
157 | cfi_endproc | |
158 | .size _dl_runtime_resolve, .-_dl_runtime_resolve | |
b52b0d79 | 159 | #endif |
f3dcae82 L |
160 | |
161 | ||
fb0f7a67 | 162 | #if !defined PROF && defined _dl_runtime_profile |
f3dcae82 L |
163 | # if (LR_VECTOR_OFFSET % VEC_SIZE) != 0 |
164 | # error LR_VECTOR_OFFSET must be multples of VEC_SIZE | |
165 | # endif | |
166 | ||
167 | .globl _dl_runtime_profile | |
168 | .hidden _dl_runtime_profile | |
169 | .type _dl_runtime_profile, @function | |
170 | .align 16 | |
171 | _dl_runtime_profile: | |
172 | cfi_startproc | |
173 | cfi_adjust_cfa_offset(16) # Incorporate PLT | |
f753fa7d | 174 | _CET_ENDBR |
f3dcae82 L |
175 | /* The La_x86_64_regs data structure pointed to by the |
176 | fourth paramater must be VEC_SIZE-byte aligned. This must | |
177 | be explicitly enforced. We have the set up a dynamically | |
178 | sized stack frame. %rbx points to the top half which | |
179 | has a fixed size and preserves the original stack pointer. */ | |
180 | ||
181 | sub $32, %RSP_LP # Allocate the local storage. | |
182 | cfi_adjust_cfa_offset(32) | |
183 | movq %rbx, (%rsp) | |
184 | cfi_rel_offset(%rbx, 0) | |
185 | ||
186 | /* On the stack: | |
187 | 56(%rbx) parameter #1 | |
188 | 48(%rbx) return address | |
189 | ||
190 | 40(%rbx) reloc index | |
191 | 32(%rbx) link_map | |
192 | ||
193 | 24(%rbx) La_x86_64_regs pointer | |
194 | 16(%rbx) framesize | |
195 | 8(%rbx) rax | |
196 | (%rbx) rbx | |
197 | */ | |
198 | ||
199 | movq %rax, 8(%rsp) | |
200 | mov %RSP_LP, %RBX_LP | |
201 | cfi_def_cfa_register(%rbx) | |
202 | ||
203 | /* Actively align the La_x86_64_regs structure. */ | |
204 | and $-VEC_SIZE, %RSP_LP | |
f3dcae82 L |
205 | /* sizeof(La_x86_64_regs). Need extra space for 8 SSE registers |
206 | to detect if any xmm0-xmm7 registers are changed by audit | |
207 | module. */ | |
208 | sub $(LR_SIZE + XMM_SIZE*8), %RSP_LP | |
f3dcae82 L |
209 | movq %rsp, 24(%rbx) |
210 | ||
211 | /* Fill the La_x86_64_regs structure. */ | |
212 | movq %rdx, LR_RDX_OFFSET(%rsp) | |
213 | movq %r8, LR_R8_OFFSET(%rsp) | |
214 | movq %r9, LR_R9_OFFSET(%rsp) | |
215 | movq %rcx, LR_RCX_OFFSET(%rsp) | |
216 | movq %rsi, LR_RSI_OFFSET(%rsp) | |
217 | movq %rdi, LR_RDI_OFFSET(%rsp) | |
218 | movq %rbp, LR_RBP_OFFSET(%rsp) | |
219 | ||
220 | lea 48(%rbx), %RAX_LP | |
221 | movq %rax, LR_RSP_OFFSET(%rsp) | |
222 | ||
223 | /* We always store the XMM registers even if AVX is available. | |
224 | This is to provide backward binary compatibility for existing | |
225 | audit modules. */ | |
58bcf7b7 NG |
226 | VMOVA %xmm0, (LR_XMM_OFFSET + XMM_SIZE*0)(%rsp) |
227 | VMOVA %xmm1, (LR_XMM_OFFSET + XMM_SIZE*1)(%rsp) | |
228 | VMOVA %xmm2, (LR_XMM_OFFSET + XMM_SIZE*2)(%rsp) | |
229 | VMOVA %xmm3, (LR_XMM_OFFSET + XMM_SIZE*3)(%rsp) | |
230 | VMOVA %xmm4, (LR_XMM_OFFSET + XMM_SIZE*4)(%rsp) | |
231 | VMOVA %xmm5, (LR_XMM_OFFSET + XMM_SIZE*5)(%rsp) | |
232 | VMOVA %xmm6, (LR_XMM_OFFSET + XMM_SIZE*6)(%rsp) | |
233 | VMOVA %xmm7, (LR_XMM_OFFSET + XMM_SIZE*7)(%rsp) | |
f3dcae82 | 234 | |
f3dcae82 | 235 | # ifdef RESTORE_AVX |
4e1e2f42 | 236 | /* This is to support AVX audit modules. */ |
58bcf7b7 NG |
237 | VMOVA %VEC(0), (LR_VECTOR_OFFSET + VECTOR_SIZE*0)(%rsp) |
238 | VMOVA %VEC(1), (LR_VECTOR_OFFSET + VECTOR_SIZE*1)(%rsp) | |
f3dcae82 L |
239 | VMOVA %VEC(2), (LR_VECTOR_OFFSET + VECTOR_SIZE*2)(%rsp) |
240 | VMOVA %VEC(3), (LR_VECTOR_OFFSET + VECTOR_SIZE*3)(%rsp) | |
241 | VMOVA %VEC(4), (LR_VECTOR_OFFSET + VECTOR_SIZE*4)(%rsp) | |
242 | VMOVA %VEC(5), (LR_VECTOR_OFFSET + VECTOR_SIZE*5)(%rsp) | |
243 | VMOVA %VEC(6), (LR_VECTOR_OFFSET + VECTOR_SIZE*6)(%rsp) | |
244 | VMOVA %VEC(7), (LR_VECTOR_OFFSET + VECTOR_SIZE*7)(%rsp) | |
4e1e2f42 L |
245 | |
246 | /* Save xmm0-xmm7 registers to detect if any of them are | |
247 | changed by audit module. */ | |
58bcf7b7 NG |
248 | vmovdqa %xmm0, (LR_SIZE + XMM_SIZE*0)(%rsp) |
249 | vmovdqa %xmm1, (LR_SIZE + XMM_SIZE*1)(%rsp) | |
4e1e2f42 L |
250 | vmovdqa %xmm2, (LR_SIZE + XMM_SIZE*2)(%rsp) |
251 | vmovdqa %xmm3, (LR_SIZE + XMM_SIZE*3)(%rsp) | |
252 | vmovdqa %xmm4, (LR_SIZE + XMM_SIZE*4)(%rsp) | |
253 | vmovdqa %xmm5, (LR_SIZE + XMM_SIZE*5)(%rsp) | |
254 | vmovdqa %xmm6, (LR_SIZE + XMM_SIZE*6)(%rsp) | |
255 | vmovdqa %xmm7, (LR_SIZE + XMM_SIZE*7)(%rsp) | |
f3dcae82 | 256 | # endif |
4e1e2f42 | 257 | |
d86813a0 L |
258 | mov %RSP_LP, %RCX_LP # La_x86_64_regs pointer to %rcx. |
259 | mov 48(%rbx), %RDX_LP # Load return address if needed. | |
260 | mov 40(%rbx), %RSI_LP # Copy args pushed by PLT in register. | |
261 | mov 32(%rbx), %RDI_LP # %rdi: link_map, %rsi: reloc_index | |
262 | lea 16(%rbx), %R8_LP # Address of framesize | |
4e1e2f42 L |
263 | call _dl_profile_fixup # Call resolver. |
264 | ||
d86813a0 | 265 | mov %RAX_LP, %R11_LP # Save return value. |
4e1e2f42 L |
266 | |
267 | movq 8(%rbx), %rax # Get back register content. | |
268 | movq LR_RDX_OFFSET(%rsp), %rdx | |
269 | movq LR_R8_OFFSET(%rsp), %r8 | |
270 | movq LR_R9_OFFSET(%rsp), %r9 | |
271 | ||
58bcf7b7 NG |
272 | VMOVA (LR_XMM_OFFSET + XMM_SIZE*0)(%rsp), %xmm0 |
273 | VMOVA (LR_XMM_OFFSET + XMM_SIZE*1)(%rsp), %xmm1 | |
274 | VMOVA (LR_XMM_OFFSET + XMM_SIZE*2)(%rsp), %xmm2 | |
275 | VMOVA (LR_XMM_OFFSET + XMM_SIZE*3)(%rsp), %xmm3 | |
276 | VMOVA (LR_XMM_OFFSET + XMM_SIZE*4)(%rsp), %xmm4 | |
277 | VMOVA (LR_XMM_OFFSET + XMM_SIZE*5)(%rsp), %xmm5 | |
278 | VMOVA (LR_XMM_OFFSET + XMM_SIZE*6)(%rsp), %xmm6 | |
279 | VMOVA (LR_XMM_OFFSET + XMM_SIZE*7)(%rsp), %xmm7 | |
4e1e2f42 | 280 | |
f3dcae82 | 281 | # ifdef RESTORE_AVX |
4e1e2f42 L |
282 | /* Check if any xmm0-xmm7 registers are changed by audit |
283 | module. */ | |
58bcf7b7 | 284 | vpcmpeqb (LR_SIZE)(%rsp), %xmm0, %xmm8 |
4e1e2f42 | 285 | vpmovmskb %xmm8, %esi |
58bcf7b7 | 286 | incw %si |
4e1e2f42 L |
287 | je 2f |
288 | vmovdqa %xmm0, (LR_VECTOR_OFFSET)(%rsp) | |
289 | jmp 1f | |
f3dcae82 | 290 | 2: VMOVA (LR_VECTOR_OFFSET)(%rsp), %VEC(0) |
4e1e2f42 L |
291 | vmovdqa %xmm0, (LR_XMM_OFFSET)(%rsp) |
292 | ||
58bcf7b7 | 293 | 1: vpcmpeqb (LR_SIZE + XMM_SIZE)(%rsp), %xmm1, %xmm8 |
4e1e2f42 | 294 | vpmovmskb %xmm8, %esi |
58bcf7b7 | 295 | incw %si |
4e1e2f42 L |
296 | je 2f |
297 | vmovdqa %xmm1, (LR_VECTOR_OFFSET + VECTOR_SIZE)(%rsp) | |
298 | jmp 1f | |
f3dcae82 | 299 | 2: VMOVA (LR_VECTOR_OFFSET + VECTOR_SIZE)(%rsp), %VEC(1) |
4e1e2f42 L |
300 | vmovdqa %xmm1, (LR_XMM_OFFSET + XMM_SIZE)(%rsp) |
301 | ||
58bcf7b7 | 302 | 1: vpcmpeqb (LR_SIZE + XMM_SIZE*2)(%rsp), %xmm2, %xmm8 |
4e1e2f42 | 303 | vpmovmskb %xmm8, %esi |
58bcf7b7 | 304 | incw %si |
4e1e2f42 L |
305 | je 2f |
306 | vmovdqa %xmm2, (LR_VECTOR_OFFSET + VECTOR_SIZE*2)(%rsp) | |
307 | jmp 1f | |
f3dcae82 | 308 | 2: VMOVA (LR_VECTOR_OFFSET + VECTOR_SIZE*2)(%rsp), %VEC(2) |
4e1e2f42 L |
309 | vmovdqa %xmm2, (LR_XMM_OFFSET + XMM_SIZE*2)(%rsp) |
310 | ||
58bcf7b7 | 311 | 1: vpcmpeqb (LR_SIZE + XMM_SIZE*3)(%rsp), %xmm3, %xmm8 |
4e1e2f42 | 312 | vpmovmskb %xmm8, %esi |
58bcf7b7 | 313 | incw %si |
4e1e2f42 L |
314 | je 2f |
315 | vmovdqa %xmm3, (LR_VECTOR_OFFSET + VECTOR_SIZE*3)(%rsp) | |
316 | jmp 1f | |
f3dcae82 | 317 | 2: VMOVA (LR_VECTOR_OFFSET + VECTOR_SIZE*3)(%rsp), %VEC(3) |
4e1e2f42 L |
318 | vmovdqa %xmm3, (LR_XMM_OFFSET + XMM_SIZE*3)(%rsp) |
319 | ||
58bcf7b7 | 320 | 1: vpcmpeqb (LR_SIZE + XMM_SIZE*4)(%rsp), %xmm4, %xmm8 |
4e1e2f42 | 321 | vpmovmskb %xmm8, %esi |
58bcf7b7 | 322 | incw %si |
4e1e2f42 L |
323 | je 2f |
324 | vmovdqa %xmm4, (LR_VECTOR_OFFSET + VECTOR_SIZE*4)(%rsp) | |
325 | jmp 1f | |
f3dcae82 | 326 | 2: VMOVA (LR_VECTOR_OFFSET + VECTOR_SIZE*4)(%rsp), %VEC(4) |
4e1e2f42 L |
327 | vmovdqa %xmm4, (LR_XMM_OFFSET + XMM_SIZE*4)(%rsp) |
328 | ||
58bcf7b7 | 329 | 1: vpcmpeqb (LR_SIZE + XMM_SIZE*5)(%rsp), %xmm5, %xmm8 |
4e1e2f42 | 330 | vpmovmskb %xmm8, %esi |
58bcf7b7 | 331 | incw %si |
4e1e2f42 L |
332 | je 2f |
333 | vmovdqa %xmm5, (LR_VECTOR_OFFSET + VECTOR_SIZE*5)(%rsp) | |
334 | jmp 1f | |
f3dcae82 | 335 | 2: VMOVA (LR_VECTOR_OFFSET + VECTOR_SIZE*5)(%rsp), %VEC(5) |
4e1e2f42 L |
336 | vmovdqa %xmm5, (LR_XMM_OFFSET + XMM_SIZE*5)(%rsp) |
337 | ||
58bcf7b7 | 338 | 1: vpcmpeqb (LR_SIZE + XMM_SIZE*6)(%rsp), %xmm6, %xmm8 |
4e1e2f42 | 339 | vpmovmskb %xmm8, %esi |
58bcf7b7 | 340 | incw %si |
4e1e2f42 L |
341 | je 2f |
342 | vmovdqa %xmm6, (LR_VECTOR_OFFSET + VECTOR_SIZE*6)(%rsp) | |
343 | jmp 1f | |
f3dcae82 | 344 | 2: VMOVA (LR_VECTOR_OFFSET + VECTOR_SIZE*6)(%rsp), %VEC(6) |
4e1e2f42 L |
345 | vmovdqa %xmm6, (LR_XMM_OFFSET + XMM_SIZE*6)(%rsp) |
346 | ||
58bcf7b7 | 347 | 1: vpcmpeqb (LR_SIZE + XMM_SIZE*7)(%rsp), %xmm7, %xmm8 |
4e1e2f42 | 348 | vpmovmskb %xmm8, %esi |
58bcf7b7 | 349 | incw %si |
4e1e2f42 L |
350 | je 2f |
351 | vmovdqa %xmm7, (LR_VECTOR_OFFSET + VECTOR_SIZE*7)(%rsp) | |
352 | jmp 1f | |
f3dcae82 | 353 | 2: VMOVA (LR_VECTOR_OFFSET + VECTOR_SIZE*7)(%rsp), %VEC(7) |
4e1e2f42 L |
354 | vmovdqa %xmm7, (LR_XMM_OFFSET + XMM_SIZE*7)(%rsp) |
355 | ||
356 | 1: | |
14c5cbab | 357 | # endif |
14c5cbab | 358 | |
58bcf7b7 NG |
359 | mov 16(%rbx), %RCX_LP # Anything in framesize? |
360 | test %RCX_LP, %RCX_LP | |
4e1e2f42 L |
361 | jns 3f |
362 | ||
363 | /* There's nothing in the frame size, so there | |
8c0664e2 | 364 | will be no call to the _dl_audit_pltexit. */ |
4e1e2f42 L |
365 | |
366 | /* Get back registers content. */ | |
367 | movq LR_RCX_OFFSET(%rsp), %rcx | |
368 | movq LR_RSI_OFFSET(%rsp), %rsi | |
369 | movq LR_RDI_OFFSET(%rsp), %rdi | |
370 | ||
f3dcae82 | 371 | mov %RBX_LP, %RSP_LP |
4e1e2f42 | 372 | movq (%rsp), %rbx |
f3dcae82 | 373 | cfi_restore(%rbx) |
4e1e2f42 L |
374 | cfi_def_cfa_register(%rsp) |
375 | ||
f3dcae82 | 376 | add $48, %RSP_LP # Adjust the stack to the return value |
4e1e2f42 L |
377 | # (eats the reloc index and link_map) |
378 | cfi_adjust_cfa_offset(-48) | |
379 | jmp *%r11 # Jump to function address. | |
380 | ||
381 | 3: | |
382 | cfi_adjust_cfa_offset(48) | |
383 | cfi_rel_offset(%rbx, 0) | |
384 | cfi_def_cfa_register(%rbx) | |
385 | ||
386 | /* At this point we need to prepare new stack for the function | |
387 | which has to be called. We copy the original stack to a | |
388 | temporary buffer of the size specified by the 'framesize' | |
389 | returned from _dl_profile_fixup */ | |
390 | ||
f3dcae82 | 391 | lea LR_RSP_OFFSET(%rbx), %RSI_LP # stack |
58bcf7b7 NG |
392 | add $8, %RCX_LP |
393 | and $-16, %RCX_LP | |
394 | sub %RCX_LP, %RSP_LP | |
f3dcae82 | 395 | mov %RSP_LP, %RDI_LP |
58bcf7b7 | 396 | rep movsb |
4e1e2f42 L |
397 | |
398 | movq 24(%rdi), %rcx # Get back register content. | |
399 | movq 32(%rdi), %rsi | |
400 | movq 40(%rdi), %rdi | |
401 | ||
402 | call *%r11 | |
403 | ||
f3dcae82 | 404 | mov 24(%rbx), %RSP_LP # Drop the copied stack content |
4e1e2f42 L |
405 | |
406 | /* Now we have to prepare the La_x86_64_retval structure for the | |
8c0664e2 | 407 | _dl_audit_pltexit. The La_x86_64_regs is being pointed by rsp now, |
4e1e2f42 L |
408 | so we just need to allocate the sizeof(La_x86_64_retval) space on |
409 | the stack, since the alignment has already been taken care of. */ | |
f3dcae82 | 410 | # ifdef RESTORE_AVX |
4e1e2f42 L |
411 | /* sizeof(La_x86_64_retval). Need extra space for 2 SSE |
412 | registers to detect if xmm0/xmm1 registers are changed | |
207a72e2 L |
413 | by audit module. Since rsp is aligned to VEC_SIZE, we |
414 | need to make sure that the address of La_x86_64_retval + | |
415 | LRV_VECTOR0_OFFSET is aligned to VEC_SIZE. */ | |
416 | # define LRV_SPACE (LRV_SIZE + XMM_SIZE*2) | |
417 | # define LRV_MISALIGNED ((LRV_SIZE + LRV_VECTOR0_OFFSET) & (VEC_SIZE - 1)) | |
418 | # if LRV_MISALIGNED == 0 | |
419 | sub $LRV_SPACE, %RSP_LP | |
420 | # else | |
421 | sub $(LRV_SPACE + VEC_SIZE - LRV_MISALIGNED), %RSP_LP | |
422 | # endif | |
f3dcae82 L |
423 | # else |
424 | sub $LRV_SIZE, %RSP_LP # sizeof(La_x86_64_retval) | |
425 | # endif | |
426 | mov %RSP_LP, %RCX_LP # La_x86_64_retval argument to %rcx. | |
4e1e2f42 L |
427 | |
428 | /* Fill in the La_x86_64_retval structure. */ | |
429 | movq %rax, LRV_RAX_OFFSET(%rcx) | |
430 | movq %rdx, LRV_RDX_OFFSET(%rcx) | |
431 | ||
58bcf7b7 NG |
432 | VMOVA %xmm0, LRV_XMM0_OFFSET(%rcx) |
433 | VMOVA %xmm1, LRV_XMM1_OFFSET(%rcx) | |
4e1e2f42 | 434 | |
f3dcae82 | 435 | # ifdef RESTORE_AVX |
4e1e2f42 | 436 | /* This is to support AVX audit modules. */ |
f3dcae82 L |
437 | VMOVA %VEC(0), LRV_VECTOR0_OFFSET(%rcx) |
438 | VMOVA %VEC(1), LRV_VECTOR1_OFFSET(%rcx) | |
4e1e2f42 L |
439 | |
440 | /* Save xmm0/xmm1 registers to detect if they are changed | |
441 | by audit module. */ | |
58bcf7b7 NG |
442 | vmovdqa %xmm0, (LRV_SIZE + XMM_SIZE*0)(%rcx) |
443 | vmovdqa %xmm1, (LRV_SIZE + XMM_SIZE*1)(%rcx) | |
f3dcae82 | 444 | # endif |
4e1e2f42 L |
445 | |
446 | fstpt LRV_ST0_OFFSET(%rcx) | |
447 | fstpt LRV_ST1_OFFSET(%rcx) | |
448 | ||
449 | movq 24(%rbx), %rdx # La_x86_64_regs argument to %rdx. | |
450 | movq 40(%rbx), %rsi # Copy args pushed by PLT in register. | |
c88f1766 | 451 | movq 32(%rbx), %rdi # %rdi: link_map, %rsi: reloc_index |
8c0664e2 | 452 | call _dl_audit_pltexit |
4e1e2f42 L |
453 | |
454 | /* Restore return registers. */ | |
455 | movq LRV_RAX_OFFSET(%rsp), %rax | |
456 | movq LRV_RDX_OFFSET(%rsp), %rdx | |
457 | ||
58bcf7b7 NG |
458 | VMOVA LRV_XMM0_OFFSET(%rsp), %xmm0 |
459 | VMOVA LRV_XMM1_OFFSET(%rsp), %xmm1 | |
4e1e2f42 | 460 | |
f3dcae82 | 461 | # ifdef RESTORE_AVX |
4e1e2f42 | 462 | /* Check if xmm0/xmm1 registers are changed by audit module. */ |
58bcf7b7 | 463 | vpcmpeqb (LRV_SIZE)(%rsp), %xmm0, %xmm2 |
4e1e2f42 | 464 | vpmovmskb %xmm2, %esi |
58bcf7b7 | 465 | incw %si |
4e1e2f42 | 466 | jne 1f |
f3dcae82 | 467 | VMOVA LRV_VECTOR0_OFFSET(%rsp), %VEC(0) |
4e1e2f42 | 468 | |
58bcf7b7 | 469 | 1: vpcmpeqb (LRV_SIZE + XMM_SIZE)(%rsp), %xmm1, %xmm2 |
4e1e2f42 | 470 | vpmovmskb %xmm2, %esi |
58bcf7b7 | 471 | incw %si |
4e1e2f42 | 472 | jne 1f |
f3dcae82 | 473 | VMOVA LRV_VECTOR1_OFFSET(%rsp), %VEC(1) |
4e1e2f42 L |
474 | |
475 | 1: | |
ea8ba7cd | 476 | # endif |
4e1e2f42 L |
477 | |
478 | fldt LRV_ST1_OFFSET(%rsp) | |
479 | fldt LRV_ST0_OFFSET(%rsp) | |
480 | ||
f3dcae82 | 481 | mov %RBX_LP, %RSP_LP |
4e1e2f42 | 482 | movq (%rsp), %rbx |
f3dcae82 | 483 | cfi_restore(%rbx) |
4e1e2f42 L |
484 | cfi_def_cfa_register(%rsp) |
485 | ||
f3dcae82 | 486 | add $48, %RSP_LP # Adjust the stack to the return value |
4e1e2f42 L |
487 | # (eats the reloc index and link_map) |
488 | cfi_adjust_cfa_offset(-48) | |
489 | retq | |
c88f1766 | 490 | |
f3dcae82 L |
491 | cfi_endproc |
492 | .size _dl_runtime_profile, .-_dl_runtime_profile | |
c88f1766 | 493 | #endif |