1 /* Function exp vectorized with AVX-512. KNL and SKX versions.
2 Copyright (C) 2014-2019 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
20 #include "svml_d_exp_data.h"
21 #include "svml_d_wrapper_impl.h"
24 ENTRY (_ZGVeN8v_exp_knl)
25 #ifndef HAVE_AVX512DQ_ASM_SUPPORT
26 WRAPPER_IMPL_AVX512 _ZGVdN4v_exp
29 ALGORITHM DESCRIPTION:
31 Argument representation:
32 N = rint(X*2^k/ln2) = 2^k*M+j
33 X = N*ln2/2^k + r = M*ln2 + ln2*(j/2^k) + r
34 then -ln2/2^(k+1) < r < ln2/2^(k+1)
40 exp(X) = exp(M*ln2 + ln2*(j/2^k) + r)
41 = 2^M * 2^(j/2^k) * exp(r)
42 2^M is calculated by bit manipulation
43 2^(j/2^k) is stored in table
44 exp(r) is approximated by polynomial
46 The table lookup is skipped if k = 0. */
49 cfi_adjust_cfa_offset (8)
50 cfi_rel_offset (%rbp, 0)
52 cfi_def_cfa_register (%rbp)
55 movq __svml_dexp_data@GOTPCREL(%rip), %rax
57 /* dR = X - dN*dbLn2hi, dbLn2hi is 52-8-k hi bits of ln2/2^k */
60 /* iAbsX = (int)(lX>>32), lX = *(longlong*)&X */
61 vpsrlq $32, %zmm0, %zmm1
63 /* iAbsX = iAbsX&iAbsMask */
68 /* iRangeMask = (iAbsX>iDomainRange) */
71 /* table lookup for dT[j] = 2^(j/2^k) */
72 vpxord %zmm11, %zmm11, %zmm11
73 vmovups __dbInvLn2(%rax), %zmm5
74 vmovups __dbLn2hi(%rax), %zmm7
77 /* dM = X*dbInvLn2+dbShifter, dbInvLn2 = 2^k/Ln2 */
78 vfmadd213pd __dbShifter(%rax), %zmm0, %zmm5
79 vmovups __dPC2(%rax), %zmm12
81 /* dN = dM-dbShifter, dN = rint(X*2^k/Ln2) */
82 vsubpd __dbShifter(%rax), %zmm5, %zmm9
83 vmovups __lIndexMask(%rax), %zmm4
84 vfnmadd231pd %zmm9, %zmm7, %zmm8
85 vpandd __iAbsMask(%rax), %zmm2, %zmm2{%k2}
87 /* lIndex = (*(longlong*)&dM)&lIndexMask, lIndex is the lower K bits of lM */
88 vpandq %zmm4, %zmm5, %zmm10
89 vgatherqpd (%rax,%zmm10,8), %zmm11{%k3}
90 vpcmpgtd __iDomainRange(%rax), %zmm2, %k1{%k2}
92 /* lM = (*(longlong*)&dM)&(~lIndexMask) */
93 vpandnq %zmm5, %zmm4, %zmm6
94 vpbroadcastd %ecx, %zmm3{%k1}{z}
96 /* lM = lM<<(52-K), 2^M */
97 vpsllq $42, %zmm6, %zmm14
99 /* dR = dR - dN*dbLn2lo, dbLn2lo is 40..94 bits of lo part of ln2/2^k */
100 vfnmadd132pd __dbLn2lo(%rax), %zmm8, %zmm9
102 /* Mask = iRangeMask?1:0, set mask for overflow/underflow */
103 vptestmd %zmm3, %zmm3, %k0{%k2}
105 /* exp(r) = b0+r*(b0+r*(b1+r*b2)) */
106 vfmadd213pd __dPC1(%rax), %zmm9, %zmm12
109 vfmadd213pd __dPC0(%rax), %zmm9, %zmm12
110 vfmadd213pd __dPC0(%rax), %zmm9, %zmm12
112 /* 2^(j/2^k) * exp(r) */
113 vmulpd %zmm12, %zmm11, %zmm13
115 /* multiply by 2^M through integer add */
116 vpaddq %zmm14, %zmm13, %zmm1
124 cfi_def_cfa_register (%rsp)
126 cfi_adjust_cfa_offset (-8)
132 vmovups %zmm0, 1152(%rsp)
133 vmovups %zmm1, 1216(%rsp)
137 kmovw %k4, 1048(%rsp)
139 kmovw %k5, 1040(%rsp)
140 kmovw %k6, 1032(%rsp)
141 kmovw %k7, 1024(%rsp)
142 vmovups %zmm16, 960(%rsp)
143 vmovups %zmm17, 896(%rsp)
144 vmovups %zmm18, 832(%rsp)
145 vmovups %zmm19, 768(%rsp)
146 vmovups %zmm20, 704(%rsp)
147 vmovups %zmm21, 640(%rsp)
148 vmovups %zmm22, 576(%rsp)
149 vmovups %zmm23, 512(%rsp)
150 vmovups %zmm24, 448(%rsp)
151 vmovups %zmm25, 384(%rsp)
152 vmovups %zmm26, 320(%rsp)
153 vmovups %zmm27, 256(%rsp)
154 vmovups %zmm28, 192(%rsp)
155 vmovups %zmm29, 128(%rsp)
156 vmovups %zmm30, 64(%rsp)
157 vmovups %zmm31, (%rsp)
158 movq %rsi, 1064(%rsp)
159 movq %rdi, 1056(%rsp)
160 movq %r12, 1096(%rsp)
161 cfi_offset_rel_rsp (12, 1096)
163 movq %r13, 1088(%rsp)
164 cfi_offset_rel_rsp (13, 1088)
166 movq %r14, 1080(%rsp)
167 cfi_offset_rel_rsp (14, 1080)
169 movq %r15, 1072(%rsp)
170 cfi_offset_rel_rsp (15, 1072)
188 kmovw 1048(%rsp), %k4
189 movq 1064(%rsp), %rsi
190 kmovw 1040(%rsp), %k5
191 movq 1056(%rsp), %rdi
192 kmovw 1032(%rsp), %k6
193 movq 1096(%rsp), %r12
195 movq 1088(%rsp), %r13
197 kmovw 1024(%rsp), %k7
198 vmovups 960(%rsp), %zmm16
199 vmovups 896(%rsp), %zmm17
200 vmovups 832(%rsp), %zmm18
201 vmovups 768(%rsp), %zmm19
202 vmovups 704(%rsp), %zmm20
203 vmovups 640(%rsp), %zmm21
204 vmovups 576(%rsp), %zmm22
205 vmovups 512(%rsp), %zmm23
206 vmovups 448(%rsp), %zmm24
207 vmovups 384(%rsp), %zmm25
208 vmovups 320(%rsp), %zmm26
209 vmovups 256(%rsp), %zmm27
210 vmovups 192(%rsp), %zmm28
211 vmovups 128(%rsp), %zmm29
212 vmovups 64(%rsp), %zmm30
213 vmovups (%rsp), %zmm31
214 movq 1080(%rsp), %r14
216 movq 1072(%rsp), %r15
218 vmovups 1216(%rsp), %zmm1
225 vmovsd 1160(%rsp,%r15), %xmm0
226 call JUMPTARGET(__exp_finite)
227 vmovsd %xmm0, 1224(%rsp,%r15)
233 vmovsd 1152(%rsp,%r15), %xmm0
234 call JUMPTARGET(__exp_finite)
235 vmovsd %xmm0, 1216(%rsp,%r15)
238 END (_ZGVeN8v_exp_knl)
240 ENTRY (_ZGVeN8v_exp_skx)
241 #ifndef HAVE_AVX512DQ_ASM_SUPPORT
242 WRAPPER_IMPL_AVX512 _ZGVdN4v_exp
245 ALGORITHM DESCRIPTION:
247 Argument representation:
248 N = rint(X*2^k/ln2) = 2^k*M+j
249 X = N*ln2/2^k + r = M*ln2 + ln2*(j/2^k) + r
250 then -ln2/2^(k+1) < r < ln2/2^(k+1)
256 exp(X) = exp(M*ln2 + ln2*(j/2^k) + r)
257 = 2^M * 2^(j/2^k) * exp(r)
258 2^M is calculated by bit manipulation
259 2^(j/2^k) is stored in table
260 exp(r) is approximated by polynomial
262 The table lookup is skipped if k = 0. */
265 cfi_adjust_cfa_offset (8)
266 cfi_rel_offset (%rbp, 0)
268 cfi_def_cfa_register (%rbp)
271 movq __svml_dexp_data@GOTPCREL(%rip), %rax
273 /* table lookup for dT[j] = 2^(j/2^k) */
276 /* iAbsX = (int)(lX>>32), lX = *(longlong*)&X */
277 vpsrlq $32, %zmm0, %zmm1
278 vmovups __dbInvLn2(%rax), %zmm7
279 vmovups __dbShifter(%rax), %zmm5
280 vmovups __lIndexMask(%rax), %zmm6
281 vmovups __dbLn2hi(%rax), %zmm9
282 vmovups __dPC0(%rax), %zmm12
284 /* dM = X*dbInvLn2+dbShifter, dbInvLn2 = 2^k/Ln2 */
285 vfmadd213pd %zmm5, %zmm0, %zmm7
288 /* dN = dM-dbShifter, dN = rint(X*2^k/Ln2) */
289 vsubpd %zmm5, %zmm7, %zmm11
291 /* iAbsX = iAbsX&iAbsMask */
292 vpand __iAbsMask(%rax), %ymm2, %ymm3
294 /* dR = X - dN*dbLn2hi, dbLn2hi is 52-8-k hi bits of ln2/2^k */
295 vmovaps %zmm0, %zmm10
296 vfnmadd231pd %zmm11, %zmm9, %zmm10
297 vmovups __dPC2(%rax), %zmm9
299 /* dR = dR - dN*dbLn2lo, dbLn2lo is 40..94 bits of lo part of ln2/2^k */
300 vfnmadd132pd __dbLn2lo(%rax), %zmm10, %zmm11
302 /* exp(r) = b0+r*(b0+r*(b1+r*b2)) */
303 vfmadd213pd __dPC1(%rax), %zmm11, %zmm9
304 vfmadd213pd %zmm12, %zmm11, %zmm9
305 vfmadd213pd %zmm12, %zmm11, %zmm9
307 /* iRangeMask = (iAbsX>iDomainRange) */
308 vpcmpgtd __iDomainRange(%rax), %ymm3, %ymm4
310 /* Mask = iRangeMask?1:0, set mask for overflow/underflow */
311 vmovmskps %ymm4, %ecx
313 /* lIndex = (*(longlong*)&dM)&lIndexMask, lIndex is the lower K bits of lM */
314 vpandq %zmm6, %zmm7, %zmm13
315 vpmovqd %zmm13, %ymm14
316 vpxord %zmm15, %zmm15, %zmm15
317 vgatherdpd (%rax,%ymm14,8), %zmm15{%k1}
319 /* 2^(j/2^k) * exp(r) */
320 vmulpd %zmm9, %zmm15, %zmm10
322 /* lM = (*(longlong*)&dM)&(~lIndexMask) */
323 vpandnq %zmm7, %zmm6, %zmm8
325 /* lM = lM<<(52-K), 2^M */
326 vpsllq $42, %zmm8, %zmm1
328 /* multiply by 2^M through integer add */
329 vpaddq %zmm1, %zmm10, %zmm1
337 cfi_def_cfa_register (%rsp)
339 cfi_adjust_cfa_offset (-8)
345 vmovups %zmm0, 1152(%rsp)
346 vmovups %zmm1, 1216(%rsp)
351 kmovw %k4, 1048(%rsp)
352 kmovw %k5, 1040(%rsp)
353 kmovw %k6, 1032(%rsp)
354 kmovw %k7, 1024(%rsp)
355 vmovups %zmm16, 960(%rsp)
356 vmovups %zmm17, 896(%rsp)
357 vmovups %zmm18, 832(%rsp)
358 vmovups %zmm19, 768(%rsp)
359 vmovups %zmm20, 704(%rsp)
360 vmovups %zmm21, 640(%rsp)
361 vmovups %zmm22, 576(%rsp)
362 vmovups %zmm23, 512(%rsp)
363 vmovups %zmm24, 448(%rsp)
364 vmovups %zmm25, 384(%rsp)
365 vmovups %zmm26, 320(%rsp)
366 vmovups %zmm27, 256(%rsp)
367 vmovups %zmm28, 192(%rsp)
368 vmovups %zmm29, 128(%rsp)
369 vmovups %zmm30, 64(%rsp)
370 vmovups %zmm31, (%rsp)
371 movq %rsi, 1064(%rsp)
372 movq %rdi, 1056(%rsp)
373 movq %r12, 1096(%rsp)
374 cfi_offset_rel_rsp (12, 1096)
376 movq %r13, 1088(%rsp)
377 cfi_offset_rel_rsp (13, 1088)
379 movq %r14, 1080(%rsp)
380 cfi_offset_rel_rsp (14, 1080)
382 movq %r15, 1072(%rsp)
383 cfi_offset_rel_rsp (15, 1072)
401 kmovw 1048(%rsp), %k4
402 kmovw 1040(%rsp), %k5
403 kmovw 1032(%rsp), %k6
404 kmovw 1024(%rsp), %k7
405 vmovups 960(%rsp), %zmm16
406 vmovups 896(%rsp), %zmm17
407 vmovups 832(%rsp), %zmm18
408 vmovups 768(%rsp), %zmm19
409 vmovups 704(%rsp), %zmm20
410 vmovups 640(%rsp), %zmm21
411 vmovups 576(%rsp), %zmm22
412 vmovups 512(%rsp), %zmm23
413 vmovups 448(%rsp), %zmm24
414 vmovups 384(%rsp), %zmm25
415 vmovups 320(%rsp), %zmm26
416 vmovups 256(%rsp), %zmm27
417 vmovups 192(%rsp), %zmm28
418 vmovups 128(%rsp), %zmm29
419 vmovups 64(%rsp), %zmm30
420 vmovups (%rsp), %zmm31
421 vmovups 1216(%rsp), %zmm1
422 movq 1064(%rsp), %rsi
423 movq 1056(%rsp), %rdi
424 movq 1096(%rsp), %r12
426 movq 1088(%rsp), %r13
428 movq 1080(%rsp), %r14
430 movq 1072(%rsp), %r15
438 vmovsd 1160(%rsp,%r15), %xmm0
440 vmovsd 1160(%rsp,%r15), %xmm0
441 call JUMPTARGET(__exp_finite)
442 vmovsd %xmm0, 1224(%rsp,%r15)
448 vmovsd 1152(%rsp,%r15), %xmm0
450 vmovsd 1152(%rsp,%r15), %xmm0
451 call JUMPTARGET(__exp_finite)
452 vmovsd %xmm0, 1216(%rsp,%r15)
456 END (_ZGVeN8v_exp_skx)