]>
git.ipfire.org Git - thirdparty/glibc.git/blob - sysdeps/x86_64/dl-trampoline.h
1 /* Partial PLT profile trampoline to save and restore x86-64 vector
3 Copyright (C) 2009, 2011 Free Software Foundation, Inc.
4 This file is part of the GNU C Library.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; if not, see
18 <http://www.gnu.org/licenses/>. */
21 /* This is to support AVX audit modules. */
22 vmovdqu
%ymm0
, (LR_VECTOR_OFFSET
)(%rsp
)
23 vmovdqu
%ymm1
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
)(%rsp
)
24 vmovdqu
%ymm2
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*2)(%rsp
)
25 vmovdqu
%ymm3
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*3)(%rsp
)
26 vmovdqu
%ymm4
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*4)(%rsp
)
27 vmovdqu
%ymm5
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*5)(%rsp
)
28 vmovdqu
%ymm6
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*6)(%rsp
)
29 vmovdqu
%ymm7
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*7)(%rsp
)
31 /* Save xmm0-xmm7 registers to detect if any of them are
32 changed by audit module. */
33 vmovdqa
%xmm0
, (LR_SIZE
)(%rsp
)
34 vmovdqa
%xmm1
, (LR_SIZE
+ XMM_SIZE
)(%rsp
)
35 vmovdqa
%xmm2
, (LR_SIZE
+ XMM_SIZE
*2)(%rsp
)
36 vmovdqa
%xmm3
, (LR_SIZE
+ XMM_SIZE
*3)(%rsp
)
37 vmovdqa
%xmm4
, (LR_SIZE
+ XMM_SIZE
*4)(%rsp
)
38 vmovdqa
%xmm5
, (LR_SIZE
+ XMM_SIZE
*5)(%rsp
)
39 vmovdqa
%xmm6
, (LR_SIZE
+ XMM_SIZE
*6)(%rsp
)
40 vmovdqa
%xmm7
, (LR_SIZE
+ XMM_SIZE
*7)(%rsp
)
43 movq
%rsp
, %rcx
# La_x86_64_regs pointer to %rcx.
44 movq
48(%rbx
), %rdx
# Load return address if needed.
45 movq
40(%rbx
), %rsi
# Copy args pushed by PLT in register.
46 movq
32(%rbx
), %rdi
# %rdi: link_map, %rsi: reloc_index
48 call _dl_profile_fixup
# Call resolver.
50 movq
%rax
, %r11
# Save return value.
52 movq
8(%rbx
), %rax
# Get back register content.
53 movq
LR_RDX_OFFSET(%rsp
), %rdx
54 movq
LR_R8_OFFSET(%rsp
), %r8
55 movq
LR_R9_OFFSET(%rsp
), %r9
57 movaps (LR_XMM_OFFSET
)(%rsp
), %xmm0
58 movaps (LR_XMM_OFFSET
+ XMM_SIZE
)(%rsp
), %xmm1
59 movaps (LR_XMM_OFFSET
+ XMM_SIZE
*2)(%rsp
), %xmm2
60 movaps (LR_XMM_OFFSET
+ XMM_SIZE
*3)(%rsp
), %xmm3
61 movaps (LR_XMM_OFFSET
+ XMM_SIZE
*4)(%rsp
), %xmm4
62 movaps (LR_XMM_OFFSET
+ XMM_SIZE
*5)(%rsp
), %xmm5
63 movaps (LR_XMM_OFFSET
+ XMM_SIZE
*6)(%rsp
), %xmm6
64 movaps (LR_XMM_OFFSET
+ XMM_SIZE
*7)(%rsp
), %xmm7
67 /* Check if any xmm0-xmm7 registers are changed by audit
69 vpcmpeqq (LR_SIZE
)(%rsp
), %xmm0
, %xmm8
73 vmovdqa
%xmm0
, (LR_VECTOR_OFFSET
)(%rsp
)
75 2: vmovdqu (LR_VECTOR_OFFSET
)(%rsp
), %ymm0
76 vmovdqa
%xmm0
, (LR_XMM_OFFSET
)(%rsp
)
78 1: vpcmpeqq (LR_SIZE
+ XMM_SIZE
)(%rsp
), %xmm1
, %xmm8
82 vmovdqa
%xmm1
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
)(%rsp
)
84 2: vmovdqu (LR_VECTOR_OFFSET
+ VECTOR_SIZE
)(%rsp
), %ymm1
85 vmovdqa
%xmm1
, (LR_XMM_OFFSET
+ XMM_SIZE
)(%rsp
)
87 1: vpcmpeqq (LR_SIZE
+ XMM_SIZE
*2)(%rsp
), %xmm2
, %xmm8
91 vmovdqa
%xmm2
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*2)(%rsp
)
93 2: vmovdqu (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*2)(%rsp
), %ymm2
94 vmovdqa
%xmm2
, (LR_XMM_OFFSET
+ XMM_SIZE
*2)(%rsp
)
96 1: vpcmpeqq (LR_SIZE
+ XMM_SIZE
*3)(%rsp
), %xmm3
, %xmm8
100 vmovdqa
%xmm3
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*3)(%rsp
)
102 2: vmovdqu (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*3)(%rsp
), %ymm3
103 vmovdqa
%xmm3
, (LR_XMM_OFFSET
+ XMM_SIZE
*3)(%rsp
)
105 1: vpcmpeqq (LR_SIZE
+ XMM_SIZE
*4)(%rsp
), %xmm4
, %xmm8
106 vpmovmskb
%xmm8
, %esi
109 vmovdqa
%xmm4
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*4)(%rsp
)
111 2: vmovdqu (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*4)(%rsp
), %ymm4
112 vmovdqa
%xmm4
, (LR_XMM_OFFSET
+ XMM_SIZE
*4)(%rsp
)
114 1: vpcmpeqq (LR_SIZE
+ XMM_SIZE
*5)(%rsp
), %xmm5
, %xmm8
115 vpmovmskb
%xmm8
, %esi
118 vmovdqa
%xmm5
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*5)(%rsp
)
120 2: vmovdqu (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*5)(%rsp
), %ymm5
121 vmovdqa
%xmm5
, (LR_XMM_OFFSET
+ XMM_SIZE
*5)(%rsp
)
123 1: vpcmpeqq (LR_SIZE
+ XMM_SIZE
*6)(%rsp
), %xmm6
, %xmm8
124 vpmovmskb
%xmm8
, %esi
127 vmovdqa
%xmm6
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*6)(%rsp
)
129 2: vmovdqu (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*6)(%rsp
), %ymm6
130 vmovdqa
%xmm6
, (LR_XMM_OFFSET
+ XMM_SIZE
*6)(%rsp
)
132 1: vpcmpeqq (LR_SIZE
+ XMM_SIZE
*7)(%rsp
), %xmm7
, %xmm8
133 vpmovmskb
%xmm8
, %esi
136 vmovdqa
%xmm7
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*7)(%rsp
)
138 2: vmovdqu (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*7)(%rsp
), %ymm7
139 vmovdqa
%xmm7
, (LR_XMM_OFFSET
+ XMM_SIZE
*7)(%rsp
)
143 movq
16(%rbx
), %r10
# Anything in framesize?
147 /* There's nothing in the frame size, so there
148 will be no call to the _dl_call_pltexit. */
150 /* Get back registers content. */
151 movq
LR_RCX_OFFSET(%rsp
), %rcx
152 movq
LR_RSI_OFFSET(%rsp
), %rsi
153 movq
LR_RDI_OFFSET(%rsp
), %rdi
158 cfi_def_cfa_register(%rsp
)
160 addq $
48, %rsp
# Adjust the stack to the return value
161 # (eats the reloc index and link_map)
162 cfi_adjust_cfa_offset(-48)
163 jmp
*%r11
# Jump to function address.
166 cfi_adjust_cfa_offset(48)
167 cfi_rel_offset(%rbx
, 0)
168 cfi_def_cfa_register(%rbx
)
170 /* At this point we need to prepare new stack for the function
171 which has to be called. We copy the original stack to a
172 temporary buffer of the size specified by the 'framesize'
173 returned from _dl_profile_fixup */
175 leaq
LR_RSP_OFFSET(%rbx
), %rsi
# stack
177 andq $
0xfffffffffffffff0, %r10
185 movq
24(%rdi
), %rcx
# Get back register content.
191 mov
24(%rbx
), %rsp
# Drop the copied stack content
193 /* Now we have to prepare the La_x86_64_retval structure for the
194 _dl_call_pltexit. The La_x86_64_regs is being pointed by rsp now,
195 so we just need to allocate the sizeof(La_x86_64_retval) space on
196 the stack, since the alignment has already been taken care of. */
198 /* sizeof(La_x86_64_retval). Need extra space for 2 SSE
199 registers to detect if xmm0/xmm1 registers are changed
201 subq $
(LRV_SIZE
+ XMM_SIZE
*2), %rsp
203 subq $LRV_SIZE
, %rsp
# sizeof(La_x86_64_retval)
205 movq
%rsp
, %rcx
# La_x86_64_retval argument to %rcx.
207 /* Fill in the La_x86_64_retval structure. */
208 movq
%rax
, LRV_RAX_OFFSET(%rcx
)
209 movq
%rdx
, LRV_RDX_OFFSET(%rcx
)
211 movaps
%xmm0
, LRV_XMM0_OFFSET(%rcx
)
212 movaps
%xmm1
, LRV_XMM1_OFFSET(%rcx
)
215 /* This is to support AVX audit modules. */
216 vmovdqu
%ymm0
, LRV_VECTOR0_OFFSET(%rcx
)
217 vmovdqu
%ymm1
, LRV_VECTOR1_OFFSET(%rcx
)
219 /* Save xmm0/xmm1 registers to detect if they are changed
221 vmovdqa
%xmm0
, (LRV_SIZE
)(%rcx
)
222 vmovdqa
%xmm1
, (LRV_SIZE
+ XMM_SIZE
)(%rcx
)
225 fstpt
LRV_ST0_OFFSET(%rcx
)
226 fstpt
LRV_ST1_OFFSET(%rcx
)
228 movq
24(%rbx
), %rdx
# La_x86_64_regs argument to %rdx.
229 movq
40(%rbx
), %rsi
# Copy args pushed by PLT in register.
230 movq
32(%rbx
), %rdi
# %rdi: link_map, %rsi: reloc_index
231 call _dl_call_pltexit
233 /* Restore return registers. */
234 movq
LRV_RAX_OFFSET(%rsp
), %rax
235 movq
LRV_RDX_OFFSET(%rsp
), %rdx
237 movaps
LRV_XMM0_OFFSET(%rsp
), %xmm0
238 movaps
LRV_XMM1_OFFSET(%rsp
), %xmm1
241 /* Check if xmm0/xmm1 registers are changed by audit module. */
242 vpcmpeqq (LRV_SIZE
)(%rsp
), %xmm0
, %xmm2
243 vpmovmskb
%xmm2
, %esi
246 vmovdqu
LRV_VECTOR0_OFFSET(%rsp
), %ymm0
248 1: vpcmpeqq (LRV_SIZE
+ XMM_SIZE
)(%rsp
), %xmm1
, %xmm2
249 vpmovmskb
%xmm2
, %esi
252 vmovdqu
LRV_VECTOR1_OFFSET(%rsp
), %ymm1
257 fldt
LRV_ST1_OFFSET(%rsp
)
258 fldt
LRV_ST0_OFFSET(%rsp
)
263 cfi_def_cfa_register(%rsp
)
265 addq $
48, %rsp
# Adjust the stack to the return value
266 # (eats the reloc index and link_map)
267 cfi_adjust_cfa_offset(-48)
271 cfi_adjust_cfa_offset(48)
272 cfi_rel_offset(%rbx
, 0)
273 cfi_def_cfa_register(%rbx
)