1 /* Function sincosf vectorized with AVX-512. KNL and SKX versions.
2 Copyright (C) 2014-2016 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
20 #include "svml_s_trig_data.h"
21 #include "svml_s_wrapper_impl.h"
24 ALGORITHM DESCRIPTION:
26 1) Range reduction to [-Pi/4; +Pi/4] interval
27 a) Grab sign from source argument and save it.
28 b) Remove sign using AND operation
29 c) Getting octant Y by 2/Pi multiplication
30 d) Add "Right Shifter" value
31 e) Treat obtained value as integer S for destination sign setting.
32 SS = ((S-S&1)&2)<<30; For sin part
33 SC = ((S+S&1)&2)<<30; For cos part
34 f) Change destination sign if source sign is negative
36 g) Subtract "Right Shifter" (0x4B000000) value
37 h) Subtract Y*(PI/2) from X argument, where PI/2 divided to 4 parts:
38 X = X - Y*PI1 - Y*PI2 - Y*PI3 - Y*PI4;
39 2) Polynomial (minimax for sin within [-Pi/4; +Pi/4] interval)
40 a) Calculate X^2 = X * X
41 b) Calculate 2 polynomials for sin and cos:
42 RS = X * ( A0 + X^2 * (A1 + x^2 * (A2 + x^2 * (A3))));
43 RC = B0 + X^2 * (B1 + x^2 * (B2 + x^2 * (B3 + x^2 * (B4))));
44 c) Swap RS & RC if if first bit of obtained value after
45 Right Shifting is set to 1. Using And, Andnot & Or operations.
46 3) Destination sign setting
47 a) Set shifted destination sign using XOR operation:
49 R2 = XOR( RC, SC ). */
52 ENTRY (_ZGVeN16vvv_sincosf_knl)
53 #ifndef HAVE_AVX512_ASM_SUPPORT
54 WRAPPER_IMPL_AVX512_fFF _ZGVdN8vvv_sincosf
57 cfi_adjust_cfa_offset (8)
58 cfi_rel_offset (%rbp, 0)
60 cfi_def_cfa_register (%rbp)
63 movq __svml_s_trig_data@GOTPCREL(%rip), %rax
66 vmovups __sAbsMask(%rax), %zmm0
67 vmovups __sInvPI(%rax), %zmm3
69 /* Absolute argument computation */
70 vpandd %zmm0, %zmm2, %zmm1
71 vmovups __sPI1_FMA(%rax), %zmm5
72 vmovups __sSignMask(%rax), %zmm9
73 vpandnd %zmm2, %zmm0, %zmm0
75 /* h) Subtract Y*(PI/2) from X argument, where PI/2 divided to 3 parts:
76 X = X - Y*PI1 - Y*PI2 - Y*PI3 */
80 /* c) Getting octant Y by 2/Pi multiplication
81 d) Add "Right Shifter" value */
82 vfmadd213ps __sRShifter(%rax), %zmm1, %zmm3
83 vmovups __sPI3_FMA(%rax), %zmm7
85 /* g) Subtract "Right Shifter" (0x4B000000) value */
86 vsubps __sRShifter(%rax), %zmm3, %zmm12
88 /* e) Treat obtained value as integer S for destination sign setting */
89 vpslld $31, %zmm3, %zmm13
90 vmovups __sA7_FMA(%rax), %zmm14
91 vfnmadd231ps %zmm12, %zmm5, %zmm6
93 /* 2) Polynomial (minimax for sin within [-Pi/4; +Pi/4] interval)
94 a) Calculate X^2 = X * X
95 b) Calculate 2 polynomials for sin and cos:
96 RS = X * ( A0 + X^2 * (A1 + x^2 * (A2 + x^2 * (A3))));
97 RC = B0 + X^2 * (B1 + x^2 * (B2 + x^2 * (B3 + x^2 * (B4)))) */
98 vmovaps %zmm14, %zmm15
99 vmovups __sA9_FMA(%rax), %zmm3
100 vcmpps $22, __sRangeReductionVal(%rax), %zmm1, %k1
101 vpbroadcastd %edx, %zmm1{%k1}{z}
102 vfnmadd231ps __sPI2_FMA(%rax), %zmm12, %zmm6
103 vptestmd %zmm1, %zmm1, %k0
104 vpandd %zmm6, %zmm9, %zmm11
106 vpxord __sOneHalf(%rax), %zmm11, %zmm4
108 /* Result sign calculations */
109 vpternlogd $150, %zmm13, %zmm9, %zmm11
111 /* Add correction term 0.5 for cos() part */
112 vaddps %zmm4, %zmm12, %zmm10
113 vfnmadd213ps %zmm6, %zmm7, %zmm12
114 vfnmadd231ps %zmm10, %zmm5, %zmm8
115 vpxord %zmm13, %zmm12, %zmm13
116 vmulps %zmm13, %zmm13, %zmm12
117 vfnmadd231ps __sPI2_FMA(%rax), %zmm10, %zmm8
118 vfmadd231ps __sA9_FMA(%rax), %zmm12, %zmm15
119 vfnmadd213ps %zmm8, %zmm7, %zmm10
120 vfmadd213ps __sA5_FMA(%rax), %zmm12, %zmm15
121 vpxord %zmm11, %zmm10, %zmm5
122 vmulps %zmm5, %zmm5, %zmm4
123 vfmadd213ps __sA3(%rax), %zmm12, %zmm15
124 vfmadd213ps %zmm14, %zmm4, %zmm3
125 vmulps %zmm12, %zmm15, %zmm14
126 vfmadd213ps __sA5_FMA(%rax), %zmm4, %zmm3
127 vfmadd213ps %zmm13, %zmm13, %zmm14
128 vfmadd213ps __sA3(%rax), %zmm4, %zmm3
129 vpxord %zmm0, %zmm14, %zmm0
130 vmulps %zmm4, %zmm3, %zmm3
131 vfmadd213ps %zmm5, %zmm5, %zmm3
137 vmovups %zmm0, (%rdi)
138 vmovups %zmm3, (%rsi)
140 cfi_def_cfa_register (%rsp)
142 cfi_adjust_cfa_offset (-8)
148 vmovups %zmm2, 1152(%rsp)
149 vmovups %zmm0, 1216(%rsp)
150 vmovups %zmm3, 1280(%rsp)
154 kmovw %k4, 1048(%rsp)
156 kmovw %k5, 1040(%rsp)
157 kmovw %k6, 1032(%rsp)
158 kmovw %k7, 1024(%rsp)
159 vmovups %zmm16, 960(%rsp)
160 vmovups %zmm17, 896(%rsp)
161 vmovups %zmm18, 832(%rsp)
162 vmovups %zmm19, 768(%rsp)
163 vmovups %zmm20, 704(%rsp)
164 vmovups %zmm21, 640(%rsp)
165 vmovups %zmm22, 576(%rsp)
166 vmovups %zmm23, 512(%rsp)
167 vmovups %zmm24, 448(%rsp)
168 vmovups %zmm25, 384(%rsp)
169 vmovups %zmm26, 320(%rsp)
170 vmovups %zmm27, 256(%rsp)
171 vmovups %zmm28, 192(%rsp)
172 vmovups %zmm29, 128(%rsp)
173 vmovups %zmm30, 64(%rsp)
174 vmovups %zmm31, (%rsp)
175 movq %rsi, 1056(%rsp)
176 movq %r12, 1096(%rsp)
177 cfi_offset_rel_rsp (12, 1096)
179 movq %r13, 1088(%rsp)
180 cfi_offset_rel_rsp (13, 1088)
182 movq %r14, 1080(%rsp)
183 cfi_offset_rel_rsp (14, 1080)
185 movq %r15, 1072(%rsp)
186 cfi_offset_rel_rsp (15, 1072)
187 movq %rbx, 1064(%rsp)
207 kmovw 1048(%rsp), %k4
208 movq 1056(%rsp), %rsi
209 kmovw 1040(%rsp), %k5
210 movq 1096(%rsp), %r12
212 kmovw 1032(%rsp), %k6
213 movq 1088(%rsp), %r13
215 kmovw 1024(%rsp), %k7
216 vmovups 960(%rsp), %zmm16
217 vmovups 896(%rsp), %zmm17
218 vmovups 832(%rsp), %zmm18
219 vmovups 768(%rsp), %zmm19
220 vmovups 704(%rsp), %zmm20
221 vmovups 640(%rsp), %zmm21
222 vmovups 576(%rsp), %zmm22
223 vmovups 512(%rsp), %zmm23
224 vmovups 448(%rsp), %zmm24
225 vmovups 384(%rsp), %zmm25
226 vmovups 320(%rsp), %zmm26
227 vmovups 256(%rsp), %zmm27
228 vmovups 192(%rsp), %zmm28
229 vmovups 128(%rsp), %zmm29
230 vmovups 64(%rsp), %zmm30
231 vmovups (%rsp), %zmm31
232 movq 1080(%rsp), %r14
234 movq 1072(%rsp), %r15
236 movq 1064(%rsp), %rbx
237 vmovups 1216(%rsp), %zmm0
238 vmovups 1280(%rsp), %zmm3
244 vmovss 1156(%rsp,%r15,8), %xmm0
248 vmovss %xmm0, 1220(%rsp,%r15,8)
249 vmovss 1156(%rsp,%r15,8), %xmm0
253 vmovss %xmm0, 1284(%rsp,%r15,8)
258 vmovss 1152(%rsp,%r15,8), %xmm0
262 vmovss %xmm0, 1216(%rsp,%r15,8)
263 vmovss 1152(%rsp,%r15,8), %xmm0
267 vmovss %xmm0, 1280(%rsp,%r15,8)
270 END (_ZGVeN16vvv_sincosf_knl)
272 ENTRY (_ZGVeN16vvv_sincosf_skx)
273 #ifndef HAVE_AVX512_ASM_SUPPORT
274 WRAPPER_IMPL_AVX512_fFF _ZGVdN8vvv_sincosf
277 cfi_adjust_cfa_offset (8)
278 cfi_rel_offset (%rbp, 0)
280 cfi_def_cfa_register (%rbp)
283 movq __svml_s_trig_data@GOTPCREL(%rip), %rax
285 vmovups __sAbsMask(%rax), %zmm3
286 vmovups __sInvPI(%rax), %zmm5
287 vmovups __sRShifter(%rax), %zmm6
288 vmovups __sPI1_FMA(%rax), %zmm9
289 vmovups __sPI2_FMA(%rax), %zmm10
290 vmovups __sSignMask(%rax), %zmm14
291 vmovups __sOneHalf(%rax), %zmm7
292 vmovups __sPI3_FMA(%rax), %zmm12
294 /* Absolute argument computation */
295 vandps %zmm3, %zmm4, %zmm2
297 /* c) Getting octant Y by 2/Pi multiplication
298 d) Add "Right Shifter" value */
299 vfmadd213ps %zmm6, %zmm2, %zmm5
300 vcmpps $18, __sRangeReductionVal(%rax), %zmm2, %k1
302 /* e) Treat obtained value as integer S for destination sign setting */
303 vpslld $31, %zmm5, %zmm0
305 /* g) Subtract "Right Shifter" (0x4B000000) value */
306 vsubps %zmm6, %zmm5, %zmm5
307 vmovups __sA3(%rax), %zmm6
309 /* h) Subtract Y*(PI/2) from X argument, where PI/2 divided to 3 parts:
310 X = X - Y*PI1 - Y*PI2 - Y*PI3 */
311 vmovaps %zmm2, %zmm11
312 vfnmadd231ps %zmm5, %zmm9, %zmm11
313 vfnmadd231ps %zmm5, %zmm10, %zmm11
314 vandps %zmm11, %zmm14, %zmm1
315 vxorps %zmm1, %zmm7, %zmm8
317 /* Result sign calculations */
318 vpternlogd $150, %zmm0, %zmm14, %zmm1
319 vmovups .L_2il0floatpacket.13(%rip), %zmm14
321 /* Add correction term 0.5 for cos() part */
322 vaddps %zmm8, %zmm5, %zmm15
323 vfnmadd213ps %zmm11, %zmm12, %zmm5
324 vandnps %zmm4, %zmm3, %zmm11
325 vmovups __sA7_FMA(%rax), %zmm3
326 vmovaps %zmm2, %zmm13
327 vfnmadd231ps %zmm15, %zmm9, %zmm13
328 vxorps %zmm0, %zmm5, %zmm9
329 vmovups __sA5_FMA(%rax), %zmm0
330 vfnmadd231ps %zmm15, %zmm10, %zmm13
331 vmulps %zmm9, %zmm9, %zmm8
332 vfnmadd213ps %zmm13, %zmm12, %zmm15
333 vmovups __sA9_FMA(%rax), %zmm12
334 vxorps %zmm1, %zmm15, %zmm1
335 vmulps %zmm1, %zmm1, %zmm13
337 /* 2) Polynomial (minimax for sin within [-Pi/4; +Pi/4] interval)
338 a) Calculate X^2 = X * X
339 b) Calculate 2 polynomials for sin and cos:
340 RS = X * ( A0 + X^2 * (A1 + x^2 * (A2 + x^2 * (A3))));
341 RC = B0 + X^2 * (B1 + x^2 * (B2 + x^2 * (B3 + x^2 * (B4)))) */
342 vmovaps %zmm12, %zmm7
343 vfmadd213ps %zmm3, %zmm8, %zmm7
344 vfmadd213ps %zmm3, %zmm13, %zmm12
345 vfmadd213ps %zmm0, %zmm8, %zmm7
346 vfmadd213ps %zmm0, %zmm13, %zmm12
347 vfmadd213ps %zmm6, %zmm8, %zmm7
348 vfmadd213ps %zmm6, %zmm13, %zmm12
349 vmulps %zmm8, %zmm7, %zmm10
350 vmulps %zmm13, %zmm12, %zmm3
351 vfmadd213ps %zmm9, %zmm9, %zmm10
352 vfmadd213ps %zmm1, %zmm1, %zmm3
353 vxorps %zmm11, %zmm10, %zmm0
354 vpandnd %zmm2, %zmm2, %zmm14{%k1}
355 vptestmd %zmm14, %zmm14, %k0
362 vmovups %zmm0, (%rdi)
363 vmovups %zmm3, (%rsi)
365 cfi_def_cfa_register (%rsp)
367 cfi_adjust_cfa_offset (-8)
373 vmovups %zmm4, 1152(%rsp)
374 vmovups %zmm0, 1216(%rsp)
375 vmovups %zmm3, 1280(%rsp)
380 kmovw %k4, 1048(%rsp)
381 kmovw %k5, 1040(%rsp)
382 kmovw %k6, 1032(%rsp)
383 kmovw %k7, 1024(%rsp)
384 vmovups %zmm16, 960(%rsp)
385 vmovups %zmm17, 896(%rsp)
386 vmovups %zmm18, 832(%rsp)
387 vmovups %zmm19, 768(%rsp)
388 vmovups %zmm20, 704(%rsp)
389 vmovups %zmm21, 640(%rsp)
390 vmovups %zmm22, 576(%rsp)
391 vmovups %zmm23, 512(%rsp)
392 vmovups %zmm24, 448(%rsp)
393 vmovups %zmm25, 384(%rsp)
394 vmovups %zmm26, 320(%rsp)
395 vmovups %zmm27, 256(%rsp)
396 vmovups %zmm28, 192(%rsp)
397 vmovups %zmm29, 128(%rsp)
398 vmovups %zmm30, 64(%rsp)
399 vmovups %zmm31, (%rsp)
400 movq %rsi, 1056(%rsp)
401 movq %r12, 1096(%rsp)
402 cfi_offset_rel_rsp (12, 1096)
404 movq %r13, 1088(%rsp)
405 cfi_offset_rel_rsp (13, 1088)
407 movq %r14, 1080(%rsp)
408 cfi_offset_rel_rsp (14, 1080)
410 movq %r15, 1072(%rsp)
411 cfi_offset_rel_rsp (15, 1072)
412 movq %rbx, 1064(%rsp)
431 kmovw 1048(%rsp), %k4
433 kmovw 1040(%rsp), %k5
434 kmovw 1032(%rsp), %k6
435 kmovw 1024(%rsp), %k7
436 vmovups 960(%rsp), %zmm16
437 vmovups 896(%rsp), %zmm17
438 vmovups 832(%rsp), %zmm18
439 vmovups 768(%rsp), %zmm19
440 vmovups 704(%rsp), %zmm20
441 vmovups 640(%rsp), %zmm21
442 vmovups 576(%rsp), %zmm22
443 vmovups 512(%rsp), %zmm23
444 vmovups 448(%rsp), %zmm24
445 vmovups 384(%rsp), %zmm25
446 vmovups 320(%rsp), %zmm26
447 vmovups 256(%rsp), %zmm27
448 vmovups 192(%rsp), %zmm28
449 vmovups 128(%rsp), %zmm29
450 vmovups 64(%rsp), %zmm30
451 vmovups (%rsp), %zmm31
452 vmovups 1216(%rsp), %zmm0
453 vmovups 1280(%rsp), %zmm3
454 movq 1056(%rsp), %rsi
455 movq 1096(%rsp), %r12
457 movq 1088(%rsp), %r13
459 movq 1080(%rsp), %r14
461 movq 1072(%rsp), %r15
463 movq 1064(%rsp), %rbx
469 vmovss 1156(%rsp,%r15,8), %xmm0
471 vmovss 1156(%rsp,%r15,8), %xmm0
475 vmovss %xmm0, 1220(%rsp,%r15,8)
476 vmovss 1156(%rsp,%r15,8), %xmm0
480 vmovss %xmm0, 1284(%rsp,%r15,8)
485 vmovss 1152(%rsp,%r15,8), %xmm0
487 vmovss 1152(%rsp,%r15,8), %xmm0
491 vmovss %xmm0, 1216(%rsp,%r15,8)
492 vmovss 1152(%rsp,%r15,8), %xmm0
496 vmovss %xmm0, 1280(%rsp,%r15,8)
499 END (_ZGVeN16vvv_sincosf_skx)
501 .section .rodata, "a"
502 .L_2il0floatpacket.13:
503 .long 0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff
504 .type .L_2il0floatpacket.13,@object