]> git.ipfire.org Git - thirdparty/glibc.git/blob - sysdeps/x86_64/dl-trampoline.S
Merge branch 'master' of ssh://sourceware.org/git/glibc
[thirdparty/glibc.git] / sysdeps / x86_64 / dl-trampoline.S
1 /* PLT trampolines. x86-64 version.
2 Copyright (C) 2004, 2005, 2007, 2009, 2011 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
19
20 #include <config.h>
21 #include <sysdep.h>
22 #include <link-defines.h>
23
24 .text
25 .globl _dl_runtime_resolve
26 .type _dl_runtime_resolve, @function
27 .align 16
28 cfi_startproc
29 _dl_runtime_resolve:
30 cfi_adjust_cfa_offset(16) # Incorporate PLT
31 subq $56,%rsp
32 cfi_adjust_cfa_offset(56)
33 movq %rax,(%rsp) # Preserve registers otherwise clobbered.
34 movq %rcx, 8(%rsp)
35 movq %rdx, 16(%rsp)
36 movq %rsi, 24(%rsp)
37 movq %rdi, 32(%rsp)
38 movq %r8, 40(%rsp)
39 movq %r9, 48(%rsp)
40 movq 64(%rsp), %rsi # Copy args pushed by PLT in register.
41 movq 56(%rsp), %rdi # %rdi: link_map, %rsi: reloc_index
42 call _dl_fixup # Call resolver.
43 movq %rax, %r11 # Save return value
44 movq 48(%rsp), %r9 # Get register content back.
45 movq 40(%rsp), %r8
46 movq 32(%rsp), %rdi
47 movq 24(%rsp), %rsi
48 movq 16(%rsp), %rdx
49 movq 8(%rsp), %rcx
50 movq (%rsp), %rax
51 addq $72, %rsp # Adjust stack(PLT did 2 pushes)
52 cfi_adjust_cfa_offset(-72)
53 jmp *%r11 # Jump to function address.
54 cfi_endproc
55 .size _dl_runtime_resolve, .-_dl_runtime_resolve
56
57
58 #ifndef PROF
59 .globl _dl_runtime_profile
60 .type _dl_runtime_profile, @function
61 .align 16
62 cfi_startproc
63
64 _dl_runtime_profile:
65 cfi_adjust_cfa_offset(16) # Incorporate PLT
66 /* The La_x86_64_regs data structure pointed to by the
67 fourth paramater must be 16-byte aligned. This must
68 be explicitly enforced. We have the set up a dynamically
69 sized stack frame. %rbx points to the top half which
70 has a fixed size and preserves the original stack pointer. */
71
72 subq $32, %rsp # Allocate the local storage.
73 cfi_adjust_cfa_offset(32)
74 movq %rbx, (%rsp)
75 cfi_rel_offset(%rbx, 0)
76
77 /* On the stack:
78 56(%rbx) parameter #1
79 48(%rbx) return address
80
81 40(%rbx) reloc index
82 32(%rbx) link_map
83
84 24(%rbx) La_x86_64_regs pointer
85 16(%rbx) framesize
86 8(%rbx) rax
87 (%rbx) rbx
88 */
89
90 movq %rax, 8(%rsp)
91 movq %rsp, %rbx
92 cfi_def_cfa_register(%rbx)
93
94 /* Actively align the La_x86_64_regs structure. */
95 andq $0xfffffffffffffff0, %rsp
96 # ifdef HAVE_AVX_SUPPORT
97 /* sizeof(La_x86_64_regs). Need extra space for 8 SSE registers
98 to detect if any xmm0-xmm7 registers are changed by audit
99 module. */
100 subq $(LR_SIZE + XMM_SIZE*8), %rsp
101 # else
102 subq $LR_SIZE, %rsp # sizeof(La_x86_64_regs)
103 # endif
104 movq %rsp, 24(%rbx)
105
106 /* Fill the La_x86_64_regs structure. */
107 movq %rdx, LR_RDX_OFFSET(%rsp)
108 movq %r8, LR_R8_OFFSET(%rsp)
109 movq %r9, LR_R9_OFFSET(%rsp)
110 movq %rcx, LR_RCX_OFFSET(%rsp)
111 movq %rsi, LR_RSI_OFFSET(%rsp)
112 movq %rdi, LR_RDI_OFFSET(%rsp)
113 movq %rbp, LR_RBP_OFFSET(%rsp)
114
115 leaq 48(%rbx), %rax
116 movq %rax, LR_RSP_OFFSET(%rsp)
117
118 /* We always store the XMM registers even if AVX is available.
119 This is to provide backward binary compatility for existing
120 audit modules. */
121 movaps %xmm0, (LR_XMM_OFFSET)(%rsp)
122 movaps %xmm1, (LR_XMM_OFFSET + XMM_SIZE)(%rsp)
123 movaps %xmm2, (LR_XMM_OFFSET + XMM_SIZE*2)(%rsp)
124 movaps %xmm3, (LR_XMM_OFFSET + XMM_SIZE*3)(%rsp)
125 movaps %xmm4, (LR_XMM_OFFSET + XMM_SIZE*4)(%rsp)
126 movaps %xmm5, (LR_XMM_OFFSET + XMM_SIZE*5)(%rsp)
127 movaps %xmm6, (LR_XMM_OFFSET + XMM_SIZE*6)(%rsp)
128 movaps %xmm7, (LR_XMM_OFFSET + XMM_SIZE*7)(%rsp)
129
130 # ifdef HAVE_AVX_SUPPORT
131 .data
132 L(have_avx):
133 .zero 4
134 .size L(have_avx), 4
135 .previous
136
137 cmpl $0, L(have_avx)(%rip)
138 jne 1f
139 movq %rbx, %r11 # Save rbx
140 movl $1, %eax
141 cpuid
142 movq %r11,%rbx # Restore rbx
143 xorl %eax, %eax
144 // AVX and XSAVE supported?
145 andl $((1 << 28) | (1 << 27)), %ecx
146 cmpl $((1 << 28) | (1 << 27)), %ecx
147 jne 2f
148 xorl %ecx, %ecx
149 // Get XFEATURE_ENABLED_MASK
150 xgetbv
151 andl $0x6, %eax
152 2: subl $0x5, %eax
153 movl %eax, L(have_avx)(%rip)
154 cmpl $0, %eax
155
156 1: js L(no_avx)
157
158 # define RESTORE_AVX
159 # define MORE_CODE
160 # include "dl-trampoline.h"
161
162 .align 16
163 L(no_avx):
164 # endif
165
166 # undef RESTORE_AVX
167 # include "dl-trampoline.h"
168
169 cfi_endproc
170 .size _dl_runtime_profile, .-_dl_runtime_profile
171 #endif
172
173
174 #ifdef SHARED
175 .globl _dl_x86_64_save_sse
176 .type _dl_x86_64_save_sse, @function
177 .align 16
178 cfi_startproc
179 _dl_x86_64_save_sse:
180 # ifdef HAVE_AVX_SUPPORT
181 cmpl $0, L(have_avx)(%rip)
182 jne 1f
183 movq %rbx, %r11 # Save rbx
184 movl $1, %eax
185 cpuid
186 movq %r11,%rbx # Restore rbx
187 xorl %eax, %eax
188 // AVX and XSAVE supported?
189 andl $((1 << 28) | (1 << 27)), %ecx
190 cmpl $((1 << 28) | (1 << 27)), %ecx
191 jne 2f
192 xorl %ecx, %ecx
193 // Get XFEATURE_ENABLED_MASK
194 xgetbv
195 andl $0x6, %eax
196 cmpl $0x6, %eax
197 // Nonzero if SSE and AVX state saving is enabled.
198 sete %al
199 2: leal -1(%eax,%eax), %eax
200 movl %eax, L(have_avx)(%rip)
201 cmpl $0, %eax
202
203 1: js L(no_avx5)
204
205 # define YMM_SIZE 32
206 vmovdqa %ymm0, %fs:RTLD_SAVESPACE_SSE+0*YMM_SIZE
207 vmovdqa %ymm1, %fs:RTLD_SAVESPACE_SSE+1*YMM_SIZE
208 vmovdqa %ymm2, %fs:RTLD_SAVESPACE_SSE+2*YMM_SIZE
209 vmovdqa %ymm3, %fs:RTLD_SAVESPACE_SSE+3*YMM_SIZE
210 vmovdqa %ymm4, %fs:RTLD_SAVESPACE_SSE+4*YMM_SIZE
211 vmovdqa %ymm5, %fs:RTLD_SAVESPACE_SSE+5*YMM_SIZE
212 vmovdqa %ymm6, %fs:RTLD_SAVESPACE_SSE+6*YMM_SIZE
213 vmovdqa %ymm7, %fs:RTLD_SAVESPACE_SSE+7*YMM_SIZE
214 ret
215 L(no_avx5):
216 # endif
217 movdqa %xmm0, %fs:RTLD_SAVESPACE_SSE+0*XMM_SIZE
218 movdqa %xmm1, %fs:RTLD_SAVESPACE_SSE+1*XMM_SIZE
219 movdqa %xmm2, %fs:RTLD_SAVESPACE_SSE+2*XMM_SIZE
220 movdqa %xmm3, %fs:RTLD_SAVESPACE_SSE+3*XMM_SIZE
221 movdqa %xmm4, %fs:RTLD_SAVESPACE_SSE+4*XMM_SIZE
222 movdqa %xmm5, %fs:RTLD_SAVESPACE_SSE+5*XMM_SIZE
223 movdqa %xmm6, %fs:RTLD_SAVESPACE_SSE+6*XMM_SIZE
224 movdqa %xmm7, %fs:RTLD_SAVESPACE_SSE+7*XMM_SIZE
225 ret
226 cfi_endproc
227 .size _dl_x86_64_save_sse, .-_dl_x86_64_save_sse
228
229
230 .globl _dl_x86_64_restore_sse
231 .type _dl_x86_64_restore_sse, @function
232 .align 16
233 cfi_startproc
234 _dl_x86_64_restore_sse:
235 # ifdef HAVE_AVX_SUPPORT
236 cmpl $0, L(have_avx)(%rip)
237 js L(no_avx6)
238
239 vmovdqa %fs:RTLD_SAVESPACE_SSE+0*YMM_SIZE, %ymm0
240 vmovdqa %fs:RTLD_SAVESPACE_SSE+1*YMM_SIZE, %ymm1
241 vmovdqa %fs:RTLD_SAVESPACE_SSE+2*YMM_SIZE, %ymm2
242 vmovdqa %fs:RTLD_SAVESPACE_SSE+3*YMM_SIZE, %ymm3
243 vmovdqa %fs:RTLD_SAVESPACE_SSE+4*YMM_SIZE, %ymm4
244 vmovdqa %fs:RTLD_SAVESPACE_SSE+5*YMM_SIZE, %ymm5
245 vmovdqa %fs:RTLD_SAVESPACE_SSE+6*YMM_SIZE, %ymm6
246 vmovdqa %fs:RTLD_SAVESPACE_SSE+7*YMM_SIZE, %ymm7
247 ret
248 L(no_avx6):
249 # endif
250 movdqa %fs:RTLD_SAVESPACE_SSE+0*XMM_SIZE, %xmm0
251 movdqa %fs:RTLD_SAVESPACE_SSE+1*XMM_SIZE, %xmm1
252 movdqa %fs:RTLD_SAVESPACE_SSE+2*XMM_SIZE, %xmm2
253 movdqa %fs:RTLD_SAVESPACE_SSE+3*XMM_SIZE, %xmm3
254 movdqa %fs:RTLD_SAVESPACE_SSE+4*XMM_SIZE, %xmm4
255 movdqa %fs:RTLD_SAVESPACE_SSE+5*XMM_SIZE, %xmm5
256 movdqa %fs:RTLD_SAVESPACE_SSE+6*XMM_SIZE, %xmm6
257 movdqa %fs:RTLD_SAVESPACE_SSE+7*XMM_SIZE, %xmm7
258 ret
259 cfi_endproc
260 .size _dl_x86_64_restore_sse, .-_dl_x86_64_restore_sse
261 #endif