]> git.ipfire.org Git - thirdparty/glibc.git/blob - sysdeps/x86_64/fpu/multiarch/svml_d_exp4_core_avx2.S
Prefer https to http for gnu.org and fsf.org URLs
[thirdparty/glibc.git] / sysdeps / x86_64 / fpu / multiarch / svml_d_exp4_core_avx2.S
1 /* Function exp vectorized with AVX2.
2 Copyright (C) 2014-2019 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
18
19 #include <sysdep.h>
20 #include "svml_d_exp_data.h"
21
22 .text
23 ENTRY (_ZGVdN4v_exp_avx2)
24 /*
25 ALGORITHM DESCRIPTION:
26
27 Argument representation:
28 N = rint(X*2^k/ln2) = 2^k*M+j
29 X = N*ln2/2^k + r = M*ln2 + ln2*(j/2^k) + r
30 then -ln2/2^(k+1) < r < ln2/2^(k+1)
31 Alternatively:
32 N = trunc(X*2^k/ln2)
33 then 0 < r < ln2/2^k
34
35 Result calculation:
36 exp(X) = exp(M*ln2 + ln2*(j/2^k) + r)
37 = 2^M * 2^(j/2^k) * exp(r)
38 2^M is calculated by bit manipulation
39 2^(j/2^k) is stored in table
40 exp(r) is approximated by polynomial
41
42 The table lookup is skipped if k = 0. */
43
44 pushq %rbp
45 cfi_adjust_cfa_offset (8)
46 cfi_rel_offset (%rbp, 0)
47 movq %rsp, %rbp
48 cfi_def_cfa_register (%rbp)
49 andq $-64, %rsp
50 subq $448, %rsp
51 movq __svml_dexp_data@GOTPCREL(%rip), %rax
52 vmovdqa %ymm0, %ymm2
53 vmovupd __dbInvLn2(%rax), %ymm3
54 vmovupd __dbShifter(%rax), %ymm1
55 vmovupd __lIndexMask(%rax), %ymm4
56
57 /* dM = X*dbInvLn2+dbShifter, dbInvLn2 = 2^k/Ln2 */
58 vfmadd213pd %ymm1, %ymm2, %ymm3
59
60 /* iAbsX = (int)(lX>>32), lX = *(longlong*)&X */
61 vextracti128 $1, %ymm2, %xmm5
62 vshufps $221, %xmm5, %xmm2, %xmm6
63
64 /* iAbsX = iAbsX&iAbsMask */
65 vandps __iAbsMask(%rax), %xmm6, %xmm7
66
67 /* dN = dM-dbShifter, dN = rint(X*2^k/Ln2) */
68 vsubpd %ymm1, %ymm3, %ymm6
69
70 /* iRangeMask = (iAbsX>iDomainRange) */
71 vpcmpgtd __iDomainRange(%rax), %xmm7, %xmm0
72 vmovupd __dbLn2hi(%rax), %ymm1
73 vmovupd __dPC0(%rax), %ymm7
74
75 /* Mask = iRangeMask?1:0, set mask for overflow/underflow */
76 vmovmskps %xmm0, %ecx
77 vmovupd __dPC2(%rax), %ymm0
78
79 /* dR = X - dN*dbLn2hi, dbLn2hi is 52-8-k hi bits of ln2/2^k */
80 vmovdqa %ymm2, %ymm5
81 vfnmadd231pd %ymm6, %ymm1, %ymm5
82
83 /* dR = dR - dN*dbLn2lo, dbLn2lo is 40..94 bits of lo part of ln2/2^k */
84 vfnmadd132pd __dbLn2lo(%rax), %ymm5, %ymm6
85
86 /* exp(r) = b0+r*(b0+r*(b1+r*b2)) */
87 vfmadd213pd __dPC1(%rax), %ymm6, %ymm0
88 vfmadd213pd %ymm7, %ymm6, %ymm0
89 vfmadd213pd %ymm7, %ymm6, %ymm0
90
91 /* lIndex = (*(longlong*)&dM)&lIndexMask, lIndex is the lower K bits of lM */
92 vandps %ymm4, %ymm3, %ymm1
93
94 /* table lookup for dT[j] = 2^(j/2^k) */
95 vxorpd %ymm6, %ymm6, %ymm6
96 vpcmpeqd %ymm5, %ymm5, %ymm5
97 vgatherqpd %ymm5, (%rax,%ymm1,8), %ymm6
98
99 /* lM = (*(longlong*)&dM)&(~lIndexMask) */
100 vpandn %ymm3, %ymm4, %ymm3
101
102 /* 2^(j/2^k) * exp(r) */
103 vmulpd %ymm0, %ymm6, %ymm0
104
105 /* lM = lM<<(52-K), 2^M */
106 vpsllq $42, %ymm3, %ymm4
107
108 /* multiply by 2^M through integer add */
109 vpaddq %ymm4, %ymm0, %ymm0
110 testl %ecx, %ecx
111 jne .LBL_1_3
112
113 .LBL_1_2:
114 cfi_remember_state
115 movq %rbp, %rsp
116 cfi_def_cfa_register (%rsp)
117 popq %rbp
118 cfi_adjust_cfa_offset (-8)
119 cfi_restore (%rbp)
120 ret
121
122 .LBL_1_3:
123 cfi_restore_state
124 vmovupd %ymm2, 320(%rsp)
125 vmovupd %ymm0, 384(%rsp)
126 je .LBL_1_2
127
128 xorb %dl, %dl
129 xorl %eax, %eax
130 vmovups %ymm8, 224(%rsp)
131 vmovups %ymm9, 192(%rsp)
132 vmovups %ymm10, 160(%rsp)
133 vmovups %ymm11, 128(%rsp)
134 vmovups %ymm12, 96(%rsp)
135 vmovups %ymm13, 64(%rsp)
136 vmovups %ymm14, 32(%rsp)
137 vmovups %ymm15, (%rsp)
138 movq %rsi, 264(%rsp)
139 movq %rdi, 256(%rsp)
140 movq %r12, 296(%rsp)
141 cfi_offset_rel_rsp (12, 296)
142 movb %dl, %r12b
143 movq %r13, 288(%rsp)
144 cfi_offset_rel_rsp (13, 288)
145 movl %ecx, %r13d
146 movq %r14, 280(%rsp)
147 cfi_offset_rel_rsp (14, 280)
148 movl %eax, %r14d
149 movq %r15, 272(%rsp)
150 cfi_offset_rel_rsp (15, 272)
151 cfi_remember_state
152
153 .LBL_1_6:
154 btl %r14d, %r13d
155 jc .LBL_1_12
156
157 .LBL_1_7:
158 lea 1(%r14), %esi
159 btl %esi, %r13d
160 jc .LBL_1_10
161
162 .LBL_1_8:
163 incb %r12b
164 addl $2, %r14d
165 cmpb $16, %r12b
166 jb .LBL_1_6
167
168 vmovups 224(%rsp), %ymm8
169 vmovups 192(%rsp), %ymm9
170 vmovups 160(%rsp), %ymm10
171 vmovups 128(%rsp), %ymm11
172 vmovups 96(%rsp), %ymm12
173 vmovups 64(%rsp), %ymm13
174 vmovups 32(%rsp), %ymm14
175 vmovups (%rsp), %ymm15
176 vmovupd 384(%rsp), %ymm0
177 movq 264(%rsp), %rsi
178 movq 256(%rsp), %rdi
179 movq 296(%rsp), %r12
180 cfi_restore (%r12)
181 movq 288(%rsp), %r13
182 cfi_restore (%r13)
183 movq 280(%rsp), %r14
184 cfi_restore (%r14)
185 movq 272(%rsp), %r15
186 cfi_restore (%r15)
187 jmp .LBL_1_2
188
189 .LBL_1_10:
190 cfi_restore_state
191 movzbl %r12b, %r15d
192 shlq $4, %r15
193 vmovsd 328(%rsp,%r15), %xmm0
194 vzeroupper
195
196 call JUMPTARGET(__exp_finite)
197
198 vmovsd %xmm0, 392(%rsp,%r15)
199 jmp .LBL_1_8
200
201 .LBL_1_12:
202 movzbl %r12b, %r15d
203 shlq $4, %r15
204 vmovsd 320(%rsp,%r15), %xmm0
205 vzeroupper
206
207 call JUMPTARGET(__exp_finite)
208
209 vmovsd %xmm0, 384(%rsp,%r15)
210 jmp .LBL_1_7
211
212 END (_ZGVdN4v_exp_avx2)