]>
git.ipfire.org Git - thirdparty/glibc.git/blob - sysdeps/x86_64/dl-trampoline.h
1 /* Partial PLT profile trampoline to save and restore x86-64 vector
3 Copyright (C) 2009, 2011 Free Software Foundation, Inc.
4 This file is part of the GNU C Library.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; if not, write to the Free
18 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
22 /* This is to support AVX audit modules. */
23 vmovdqu
%ymm0
, (LR_VECTOR_OFFSET
)(%rsp
)
24 vmovdqu
%ymm1
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
)(%rsp
)
25 vmovdqu
%ymm2
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*2)(%rsp
)
26 vmovdqu
%ymm3
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*3)(%rsp
)
27 vmovdqu
%ymm4
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*4)(%rsp
)
28 vmovdqu
%ymm5
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*5)(%rsp
)
29 vmovdqu
%ymm6
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*6)(%rsp
)
30 vmovdqu
%ymm7
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*7)(%rsp
)
32 /* Save xmm0-xmm7 registers to detect if any of them are
33 changed by audit module. */
34 vmovdqa
%xmm0
, (LR_SIZE
)(%rsp
)
35 vmovdqa
%xmm1
, (LR_SIZE
+ XMM_SIZE
)(%rsp
)
36 vmovdqa
%xmm2
, (LR_SIZE
+ XMM_SIZE
*2)(%rsp
)
37 vmovdqa
%xmm3
, (LR_SIZE
+ XMM_SIZE
*3)(%rsp
)
38 vmovdqa
%xmm4
, (LR_SIZE
+ XMM_SIZE
*4)(%rsp
)
39 vmovdqa
%xmm5
, (LR_SIZE
+ XMM_SIZE
*5)(%rsp
)
40 vmovdqa
%xmm6
, (LR_SIZE
+ XMM_SIZE
*6)(%rsp
)
41 vmovdqa
%xmm7
, (LR_SIZE
+ XMM_SIZE
*7)(%rsp
)
44 movq
%rsp
, %rcx
# La_x86_64_regs pointer to %rcx.
45 movq
48(%rbx
), %rdx
# Load return address if needed.
46 movq
40(%rbx
), %rsi
# Copy args pushed by PLT in register.
47 movq
32(%rbx
), %rdi
# %rdi: link_map, %rsi: reloc_index
49 call _dl_profile_fixup
# Call resolver.
51 movq
%rax
, %r11
# Save return value.
53 movq
8(%rbx
), %rax
# Get back register content.
54 movq
LR_RDX_OFFSET(%rsp
), %rdx
55 movq
LR_R8_OFFSET(%rsp
), %r8
56 movq
LR_R9_OFFSET(%rsp
), %r9
58 movaps (LR_XMM_OFFSET
)(%rsp
), %xmm0
59 movaps (LR_XMM_OFFSET
+ XMM_SIZE
)(%rsp
), %xmm1
60 movaps (LR_XMM_OFFSET
+ XMM_SIZE
*2)(%rsp
), %xmm2
61 movaps (LR_XMM_OFFSET
+ XMM_SIZE
*3)(%rsp
), %xmm3
62 movaps (LR_XMM_OFFSET
+ XMM_SIZE
*4)(%rsp
), %xmm4
63 movaps (LR_XMM_OFFSET
+ XMM_SIZE
*5)(%rsp
), %xmm5
64 movaps (LR_XMM_OFFSET
+ XMM_SIZE
*6)(%rsp
), %xmm6
65 movaps (LR_XMM_OFFSET
+ XMM_SIZE
*7)(%rsp
), %xmm7
68 /* Check if any xmm0-xmm7 registers are changed by audit
70 vpcmpeqq (LR_SIZE
)(%rsp
), %xmm0
, %xmm8
74 vmovdqa
%xmm0
, (LR_VECTOR_OFFSET
)(%rsp
)
76 2: vmovdqu (LR_VECTOR_OFFSET
)(%rsp
), %ymm0
77 vmovdqa
%xmm0
, (LR_XMM_OFFSET
)(%rsp
)
79 1: vpcmpeqq (LR_SIZE
+ XMM_SIZE
)(%rsp
), %xmm1
, %xmm8
83 vmovdqa
%xmm1
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
)(%rsp
)
85 2: vmovdqu (LR_VECTOR_OFFSET
+ VECTOR_SIZE
)(%rsp
), %ymm1
86 vmovdqa
%xmm1
, (LR_XMM_OFFSET
+ XMM_SIZE
)(%rsp
)
88 1: vpcmpeqq (LR_SIZE
+ XMM_SIZE
*2)(%rsp
), %xmm2
, %xmm8
92 vmovdqa
%xmm2
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*2)(%rsp
)
94 2: vmovdqu (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*2)(%rsp
), %ymm2
95 vmovdqa
%xmm2
, (LR_XMM_OFFSET
+ XMM_SIZE
*2)(%rsp
)
97 1: vpcmpeqq (LR_SIZE
+ XMM_SIZE
*3)(%rsp
), %xmm3
, %xmm8
101 vmovdqa
%xmm3
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*3)(%rsp
)
103 2: vmovdqu (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*3)(%rsp
), %ymm3
104 vmovdqa
%xmm3
, (LR_XMM_OFFSET
+ XMM_SIZE
*3)(%rsp
)
106 1: vpcmpeqq (LR_SIZE
+ XMM_SIZE
*4)(%rsp
), %xmm4
, %xmm8
107 vpmovmskb
%xmm8
, %esi
110 vmovdqa
%xmm4
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*4)(%rsp
)
112 2: vmovdqu (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*4)(%rsp
), %ymm4
113 vmovdqa
%xmm4
, (LR_XMM_OFFSET
+ XMM_SIZE
*4)(%rsp
)
115 1: vpcmpeqq (LR_SIZE
+ XMM_SIZE
*5)(%rsp
), %xmm5
, %xmm8
116 vpmovmskb
%xmm8
, %esi
119 vmovdqa
%xmm5
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*5)(%rsp
)
121 2: vmovdqu (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*5)(%rsp
), %ymm5
122 vmovdqa
%xmm5
, (LR_XMM_OFFSET
+ XMM_SIZE
*5)(%rsp
)
124 1: vpcmpeqq (LR_SIZE
+ XMM_SIZE
*6)(%rsp
), %xmm6
, %xmm8
125 vpmovmskb
%xmm8
, %esi
128 vmovdqa
%xmm6
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*6)(%rsp
)
130 2: vmovdqu (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*6)(%rsp
), %ymm6
131 vmovdqa
%xmm6
, (LR_XMM_OFFSET
+ XMM_SIZE
*6)(%rsp
)
133 1: vpcmpeqq (LR_SIZE
+ XMM_SIZE
*7)(%rsp
), %xmm7
, %xmm8
134 vpmovmskb
%xmm8
, %esi
137 vmovdqa
%xmm7
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*7)(%rsp
)
139 2: vmovdqu (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*7)(%rsp
), %ymm7
140 vmovdqa
%xmm7
, (LR_XMM_OFFSET
+ XMM_SIZE
*7)(%rsp
)
144 movq
16(%rbx
), %r10
# Anything in framesize?
148 /* There's nothing in the frame size, so there
149 will be no call to the _dl_call_pltexit. */
151 /* Get back registers content. */
152 movq
LR_RCX_OFFSET(%rsp
), %rcx
153 movq
LR_RSI_OFFSET(%rsp
), %rsi
154 movq
LR_RDI_OFFSET(%rsp
), %rdi
159 cfi_def_cfa_register(%rsp
)
161 addq $
48, %rsp
# Adjust the stack to the return value
162 # (eats the reloc index and link_map)
163 cfi_adjust_cfa_offset(-48)
164 jmp
*%r11
# Jump to function address.
167 cfi_adjust_cfa_offset(48)
168 cfi_rel_offset(%rbx
, 0)
169 cfi_def_cfa_register(%rbx
)
171 /* At this point we need to prepare new stack for the function
172 which has to be called. We copy the original stack to a
173 temporary buffer of the size specified by the 'framesize'
174 returned from _dl_profile_fixup */
176 leaq
LR_RSP_OFFSET(%rbx
), %rsi
# stack
178 andq $
0xfffffffffffffff0, %r10
186 movq
24(%rdi
), %rcx
# Get back register content.
192 mov
24(%rbx
), %rsp
# Drop the copied stack content
194 /* Now we have to prepare the La_x86_64_retval structure for the
195 _dl_call_pltexit. The La_x86_64_regs is being pointed by rsp now,
196 so we just need to allocate the sizeof(La_x86_64_retval) space on
197 the stack, since the alignment has already been taken care of. */
199 /* sizeof(La_x86_64_retval). Need extra space for 2 SSE
200 registers to detect if xmm0/xmm1 registers are changed
202 subq $
(LRV_SIZE
+ XMM_SIZE
*2), %rsp
204 subq $LRV_SIZE
, %rsp
# sizeof(La_x86_64_retval)
206 movq
%rsp
, %rcx
# La_x86_64_retval argument to %rcx.
208 /* Fill in the La_x86_64_retval structure. */
209 movq
%rax
, LRV_RAX_OFFSET(%rcx
)
210 movq
%rdx
, LRV_RDX_OFFSET(%rcx
)
212 movaps
%xmm0
, LRV_XMM0_OFFSET(%rcx
)
213 movaps
%xmm1
, LRV_XMM1_OFFSET(%rcx
)
216 /* This is to support AVX audit modules. */
217 vmovdqu
%ymm0
, LRV_VECTOR0_OFFSET(%rcx
)
218 vmovdqu
%ymm1
, LRV_VECTOR1_OFFSET(%rcx
)
220 /* Save xmm0/xmm1 registers to detect if they are changed
222 vmovdqa
%xmm0
, (LRV_SIZE
)(%rcx
)
223 vmovdqa
%xmm1
, (LRV_SIZE
+ XMM_SIZE
)(%rcx
)
226 fstpt
LRV_ST0_OFFSET(%rcx
)
227 fstpt
LRV_ST1_OFFSET(%rcx
)
229 movq
24(%rbx
), %rdx
# La_x86_64_regs argument to %rdx.
230 movq
40(%rbx
), %rsi
# Copy args pushed by PLT in register.
231 movq
32(%rbx
), %rdi
# %rdi: link_map, %rsi: reloc_index
232 call _dl_call_pltexit
234 /* Restore return registers. */
235 movq
LRV_RAX_OFFSET(%rsp
), %rax
236 movq
LRV_RDX_OFFSET(%rsp
), %rdx
238 movaps
LRV_XMM0_OFFSET(%rsp
), %xmm0
239 movaps
LRV_XMM1_OFFSET(%rsp
), %xmm1
242 /* Check if xmm0/xmm1 registers are changed by audit module. */
243 vpcmpeqq (LRV_SIZE
)(%rsp
), %xmm0
, %xmm2
244 vpmovmskb
%xmm2
, %esi
247 vmovdqu
LRV_VECTOR0_OFFSET(%rsp
), %ymm0
249 1: vpcmpeqq (LRV_SIZE
+ XMM_SIZE
)(%rsp
), %xmm1
, %xmm2
250 vpmovmskb
%xmm2
, %esi
253 vmovdqu
LRV_VECTOR1_OFFSET(%rsp
), %ymm1
258 fldt
LRV_ST1_OFFSET(%rsp
)
259 fldt
LRV_ST0_OFFSET(%rsp
)
264 cfi_def_cfa_register(%rsp
)
266 addq $
48, %rsp
# Adjust the stack to the return value
267 # (eats the reloc index and link_map)
268 cfi_adjust_cfa_offset(-48)
272 cfi_adjust_cfa_offset(48)
273 cfi_rel_offset(%rbx
, 0)
274 cfi_def_cfa_register(%rbx
)