1 /* Function sincos vectorized with AVX-512. KNL and SKX versions.
2 Copyright (C) 2014-2021 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
20 #include "svml_d_trig_data.h"
21 #include "svml_d_wrapper_impl.h"
24 ALGORITHM DESCRIPTION:
26 ( low accuracy ( < 4ulp ) or enhanced performance
27 ( half of correct mantissa ) implementation )
29 Argument representation:
33 sin(arg) = sin(N*Pi + R) = (-1)^N * sin(R)
34 arg + Pi/2 = (N'*Pi + R')
35 cos(arg) = sin(arg+Pi/2) = sin(N'*Pi + R') = (-1)^N' * sin(R')
36 sin(R), sin(R') are approximated by corresponding polynomial. */
39 ENTRY (_ZGVeN8vl8l8_sincos_knl)
41 cfi_adjust_cfa_offset (8)
42 cfi_rel_offset (%rbp, 0)
44 cfi_def_cfa_register (%rbp)
47 movq __svml_d_trig_data@GOTPCREL(%rip), %rax
50 vmovups __dSignMask(%rax), %zmm12
51 vmovups __dInvPI(%rax), %zmm5
53 /* ARGUMENT RANGE REDUCTION:
54 Absolute argument: X' = |X| */
55 vpandnq %zmm4, %zmm12, %zmm3
56 vmovups __dPI1_FMA(%rax), %zmm7
57 vmovups __dPI3_FMA(%rax), %zmm9
59 /* SinR = X' - SinN*Pi1 */
62 /* CosR = SinX - CosN*Pi1 */
65 /* SinY = X'*InvPi + RS : right shifter add */
66 vfmadd213pd __dRShifter(%rax), %zmm3, %zmm5
67 vmovups __dC6(%rax), %zmm13
69 /* SinN = Y - RS : right shifter sub */
70 vsubpd __dRShifter(%rax), %zmm5, %zmm1
71 vmovaps %zmm13, %zmm14
73 /* SinSignRes = Y<<63 : shift LSB to MSB place for result sign */
74 vpsllq $63, %zmm5, %zmm2
75 vcmppd $22, __dRangeVal(%rax), %zmm3, %k1
77 /* Update CosRSign and CosSignRes signs */
79 vfnmadd231pd %zmm1, %zmm7, %zmm8
81 /* SinR = SinR - SinN*Pi1 */
82 vfnmadd231pd __dPI2_FMA(%rax), %zmm1, %zmm8
84 /* Sine result sign: SinRSign = SignMask & SinR */
85 vpandq %zmm8, %zmm12, %zmm11
87 /* Set SinRSign to 0.5 */
88 vporq __dOneHalf(%rax), %zmm11, %zmm6
89 vpternlogq $150, %zmm2, %zmm11, %zmm5
91 /* Update sign SinSignRes */
92 vpternlogq $120, %zmm4, %zmm12, %zmm2
94 /* Polynomial approximation */
95 vmovups __dC7(%rax), %zmm11
97 /* CosN = SinN +(-)0.5 */
98 vaddpd %zmm6, %zmm1, %zmm0
100 /* SinR = SinR - SinN*Pi3 */
101 vfnmadd213pd %zmm8, %zmm9, %zmm1
102 vfnmadd231pd %zmm0, %zmm7, %zmm10
105 vmulpd %zmm1, %zmm1, %zmm15
108 CosR = CosR - CosN*Pi2 */
109 vfnmadd231pd __dPI2_FMA(%rax), %zmm0, %zmm10
110 vfmadd231pd __dC7(%rax), %zmm15, %zmm14
112 /* CosR = CosR - CosN*Pi3 */
113 vfnmadd213pd %zmm10, %zmm9, %zmm0
114 vfmadd213pd __dC5(%rax), %zmm15, %zmm14
117 vmulpd %zmm0, %zmm0, %zmm12
118 vfmadd213pd __dC4(%rax), %zmm15, %zmm14
119 vfmadd213pd %zmm13, %zmm12, %zmm11
121 /* SinPoly = C3 + SinR2*(C4 + SinR2*(C5 + SinR2*(C6 + SinR2*C7))) */
122 vfmadd213pd __dC3(%rax), %zmm15, %zmm14
123 vfmadd213pd __dC5(%rax), %zmm12, %zmm11
125 /* SinPoly = C2 + SinR2*SinPoly */
126 vfmadd213pd __dC2(%rax), %zmm15, %zmm14
127 vfmadd213pd __dC4(%rax), %zmm12, %zmm11
129 /* SinPoly = C1 + SinR2*SinPoly */
130 vfmadd213pd __dC1(%rax), %zmm15, %zmm14
132 /* CosPoly = C3 + CosR2*(C4 + CosR2*(C5 + CosR2*(C6 + CosR2*C7))) */
133 vfmadd213pd __dC3(%rax), %zmm12, %zmm11
135 /* SinPoly = SinR2*SinPoly */
136 vmulpd %zmm15, %zmm14, %zmm13
138 /* CosPoly = C2 + CosR2*CosPoly */
139 vfmadd213pd __dC2(%rax), %zmm12, %zmm11
141 /* SinPoly = SinR*SinPoly */
142 vfmadd213pd %zmm1, %zmm1, %zmm13
143 vpbroadcastq %rdx, %zmm1{%k1}{z}
145 /* CosPoly = C1 + CosR2*CosPoly */
146 vfmadd213pd __dC1(%rax), %zmm12, %zmm11
147 vptestmq %zmm1, %zmm1, %k0
150 /* CosPoly = CosR2*CosPoly */
151 vmulpd %zmm12, %zmm11, %zmm14
154 /* CosPoly = CosR*CosPoly */
155 vfmadd213pd %zmm0, %zmm0, %zmm14
157 /* Final reconstruction.
158 Update Sin result's sign */
159 vpxorq %zmm2, %zmm13, %zmm0
161 /* Update Cos result's sign */
162 vpxorq %zmm5, %zmm14, %zmm2
168 vmovups %zmm0, (%rdi)
169 vmovups %zmm2, (%rsi)
171 cfi_def_cfa_register (%rsp)
173 cfi_adjust_cfa_offset (-8)
179 vmovups %zmm4, 1152(%rsp)
180 vmovups %zmm0, 1216(%rsp)
181 vmovups %zmm2, 1280(%rsp)
185 kmovw %k4, 1048(%rsp)
187 kmovw %k5, 1040(%rsp)
188 kmovw %k6, 1032(%rsp)
189 kmovw %k7, 1024(%rsp)
190 vmovups %zmm16, 960(%rsp)
191 vmovups %zmm17, 896(%rsp)
192 vmovups %zmm18, 832(%rsp)
193 vmovups %zmm19, 768(%rsp)
194 vmovups %zmm20, 704(%rsp)
195 vmovups %zmm21, 640(%rsp)
196 vmovups %zmm22, 576(%rsp)
197 vmovups %zmm23, 512(%rsp)
198 vmovups %zmm24, 448(%rsp)
199 vmovups %zmm25, 384(%rsp)
200 vmovups %zmm26, 320(%rsp)
201 vmovups %zmm27, 256(%rsp)
202 vmovups %zmm28, 192(%rsp)
203 vmovups %zmm29, 128(%rsp)
204 vmovups %zmm30, 64(%rsp)
205 vmovups %zmm31, (%rsp)
206 movq %rsi, 1056(%rsp)
207 movq %r12, 1096(%rsp)
208 cfi_offset_rel_rsp (12, 1096)
210 movq %r13, 1088(%rsp)
211 cfi_offset_rel_rsp (13, 1088)
213 movq %r14, 1080(%rsp)
214 cfi_offset_rel_rsp (14, 1080)
216 movq %r15, 1072(%rsp)
217 cfi_offset_rel_rsp (15, 1072)
218 movq %rbx, 1064(%rsp)
238 kmovw 1048(%rsp), %k4
239 movq 1056(%rsp), %rsi
240 kmovw 1040(%rsp), %k5
241 movq 1096(%rsp), %r12
243 kmovw 1032(%rsp), %k6
244 movq 1088(%rsp), %r13
246 kmovw 1024(%rsp), %k7
247 vmovups 960(%rsp), %zmm16
248 vmovups 896(%rsp), %zmm17
249 vmovups 832(%rsp), %zmm18
250 vmovups 768(%rsp), %zmm19
251 vmovups 704(%rsp), %zmm20
252 vmovups 640(%rsp), %zmm21
253 vmovups 576(%rsp), %zmm22
254 vmovups 512(%rsp), %zmm23
255 vmovups 448(%rsp), %zmm24
256 vmovups 384(%rsp), %zmm25
257 vmovups 320(%rsp), %zmm26
258 vmovups 256(%rsp), %zmm27
259 vmovups 192(%rsp), %zmm28
260 vmovups 128(%rsp), %zmm29
261 vmovups 64(%rsp), %zmm30
262 vmovups (%rsp), %zmm31
263 movq 1080(%rsp), %r14
265 movq 1072(%rsp), %r15
267 movq 1064(%rsp), %rbx
268 vmovups 1216(%rsp), %zmm0
269 vmovups 1280(%rsp), %zmm2
276 vmovsd 1160(%rsp,%r15), %xmm0
280 vmovsd %xmm0, 1224(%rsp,%r15)
281 vmovsd 1160(%rsp,%r15), %xmm0
285 vmovsd %xmm0, 1288(%rsp,%r15)
291 vmovsd 1152(%rsp,%r15), %xmm0
295 vmovsd %xmm0, 1216(%rsp,%r15)
296 vmovsd 1152(%rsp,%r15), %xmm0
300 vmovsd %xmm0, 1280(%rsp,%r15)
303 END (_ZGVeN8vl8l8_sincos_knl)
304 libmvec_hidden_def(_ZGVeN8vl8l8_sincos_knl)
306 ENTRY (_ZGVeN8vl8l8_sincos_skx)
308 cfi_adjust_cfa_offset (8)
309 cfi_rel_offset (%rbp, 0)
311 cfi_def_cfa_register (%rbp)
314 movq __svml_d_trig_data@GOTPCREL(%rip), %rax
316 vmovups __dSignMask(%rax), %zmm4
317 vmovups __dInvPI(%rax), %zmm9
318 vmovups __dRShifter(%rax), %zmm10
319 vmovups __dPI1_FMA(%rax), %zmm13
320 vmovups __dPI2_FMA(%rax), %zmm14
321 vmovups __dOneHalf(%rax), %zmm11
322 vmovups __dPI3_FMA(%rax), %zmm2
324 /* ARGUMENT RANGE REDUCTION:
325 Absolute argument: X' = |X| */
326 vandnpd %zmm8, %zmm4, %zmm7
328 /* SinY = X'*InvPi + RS : right shifter add */
329 vfmadd213pd %zmm10, %zmm7, %zmm9
330 vcmppd $18, __dRangeVal(%rax), %zmm7, %k1
332 /* SinSignRes = Y<<63 : shift LSB to MSB place for result sign */
333 vpsllq $63, %zmm9, %zmm6
335 /* SinN = Y - RS : right shifter sub */
336 vsubpd %zmm10, %zmm9, %zmm5
337 vmovups __dC5(%rax), %zmm9
338 vmovups __dC4(%rax), %zmm10
340 /* SinR = X' - SinN*Pi1 */
341 vmovaps %zmm7, %zmm15
342 vfnmadd231pd %zmm5, %zmm13, %zmm15
344 /* SinR = SinR - SinN*Pi1 */
345 vfnmadd231pd %zmm5, %zmm14, %zmm15
347 /* Sine result sign: SinRSign = SignMask & SinR */
348 vandpd %zmm15, %zmm4, %zmm1
350 /* Set SinRSign to 0.5 */
351 vorpd %zmm1, %zmm11, %zmm12
352 vmovups __dC3(%rax), %zmm11
354 /* CosN = SinN +(-)0.5 */
355 vaddpd %zmm12, %zmm5, %zmm3
357 /* SinR = SinR - SinN*Pi3 */
358 vfnmadd213pd %zmm15, %zmm2, %zmm5
359 vmovups __dC2(%rax), %zmm12
362 vmulpd %zmm5, %zmm5, %zmm15
364 /* CosR = SinX - CosN*Pi1 */
366 vfnmadd231pd %zmm3, %zmm13, %zmm0
367 vmovups __dC1(%rax), %zmm13
370 CosR = CosR - CosN*Pi2 */
371 vfnmadd231pd %zmm3, %zmm14, %zmm0
373 /* CosR = CosR - CosN*Pi3 */
374 vfnmadd213pd %zmm0, %zmm2, %zmm3
376 /* Polynomial approximation */
377 vmovups __dC7(%rax), %zmm0
379 /* Update CosRSign and CosSignRes signs */
381 vpternlogq $150, %zmm6, %zmm1, %zmm2
383 /* Update sign SinSignRes */
384 vpternlogq $120, %zmm8, %zmm4, %zmm6
387 vmulpd %zmm3, %zmm3, %zmm1
388 vmovups __dC6(%rax), %zmm4
389 vmovaps %zmm0, %zmm14
390 vfmadd213pd %zmm4, %zmm1, %zmm0
391 vfmadd213pd %zmm4, %zmm15, %zmm14
392 vfmadd213pd %zmm9, %zmm1, %zmm0
393 vfmadd213pd %zmm9, %zmm15, %zmm14
394 vfmadd213pd %zmm10, %zmm1, %zmm0
395 vfmadd213pd %zmm10, %zmm15, %zmm14
397 /* CosPoly = C3 + CosR2*(C4 + CosR2*(C5 + CosR2*(C6 + CosR2*C7))) */
398 vfmadd213pd %zmm11, %zmm1, %zmm0
400 /* SinPoly = C3 + SinR2*(C4 + SinR2*(C5 + SinR2*(C6 + SinR2*C7))) */
401 vfmadd213pd %zmm11, %zmm15, %zmm14
403 /* CosPoly = C2 + CosR2*CosPoly */
404 vfmadd213pd %zmm12, %zmm1, %zmm0
406 /* SinPoly = C2 + SinR2*SinPoly */
407 vfmadd213pd %zmm12, %zmm15, %zmm14
409 /* CosPoly = C1 + CosR2*CosPoly */
410 vfmadd213pd %zmm13, %zmm1, %zmm0
412 /* SinPoly = C1 + SinR2*SinPoly */
413 vfmadd213pd %zmm13, %zmm15, %zmm14
415 /* CosPoly = CosR2*CosPoly */
416 vmulpd %zmm1, %zmm0, %zmm1
418 /* SinPoly = SinR2*SinPoly */
419 vmulpd %zmm15, %zmm14, %zmm4
421 /* CosPoly = CosR*CosPoly */
422 vfmadd213pd %zmm3, %zmm3, %zmm1
424 /* SinPoly = SinR*SinPoly */
425 vfmadd213pd %zmm5, %zmm5, %zmm4
426 vpternlogd $0xff, %zmm3, %zmm3, %zmm3
428 /* Update Cos result's sign */
429 vxorpd %zmm2, %zmm1, %zmm1
431 /* Final reconstruction.
432 Update Sin result's sign */
433 vxorpd %zmm6, %zmm4, %zmm0
434 vpandnq %zmm7, %zmm7, %zmm3{%k1}
435 vcmppd $3, %zmm3, %zmm3, %k0
442 vmovups %zmm0, (%rdi)
443 vmovups %zmm1, (%rsi)
445 cfi_def_cfa_register (%rsp)
447 cfi_adjust_cfa_offset (-8)
453 vmovups %zmm8, 1152(%rsp)
454 vmovups %zmm0, 1216(%rsp)
455 vmovups %zmm1, 1280(%rsp)
460 kmovw %k4, 1048(%rsp)
461 kmovw %k5, 1040(%rsp)
462 kmovw %k6, 1032(%rsp)
463 kmovw %k7, 1024(%rsp)
464 vmovups %zmm16, 960(%rsp)
465 vmovups %zmm17, 896(%rsp)
466 vmovups %zmm18, 832(%rsp)
467 vmovups %zmm19, 768(%rsp)
468 vmovups %zmm20, 704(%rsp)
469 vmovups %zmm21, 640(%rsp)
470 vmovups %zmm22, 576(%rsp)
471 vmovups %zmm23, 512(%rsp)
472 vmovups %zmm24, 448(%rsp)
473 vmovups %zmm25, 384(%rsp)
474 vmovups %zmm26, 320(%rsp)
475 vmovups %zmm27, 256(%rsp)
476 vmovups %zmm28, 192(%rsp)
477 vmovups %zmm29, 128(%rsp)
478 vmovups %zmm30, 64(%rsp)
479 vmovups %zmm31, (%rsp)
480 movq %rsi, 1056(%rsp)
481 movq %r12, 1096(%rsp)
482 cfi_offset_rel_rsp (12, 1096)
484 movq %r13, 1088(%rsp)
485 cfi_offset_rel_rsp (13, 1088)
487 movq %r14, 1080(%rsp)
488 cfi_offset_rel_rsp (14, 1080)
490 movq %r15, 1072(%rsp)
491 cfi_offset_rel_rsp (15, 1072)
492 movq %rbx, 1064(%rsp)
511 kmovw 1048(%rsp), %k4
513 kmovw 1040(%rsp), %k5
514 kmovw 1032(%rsp), %k6
515 kmovw 1024(%rsp), %k7
516 vmovups 960(%rsp), %zmm16
517 vmovups 896(%rsp), %zmm17
518 vmovups 832(%rsp), %zmm18
519 vmovups 768(%rsp), %zmm19
520 vmovups 704(%rsp), %zmm20
521 vmovups 640(%rsp), %zmm21
522 vmovups 576(%rsp), %zmm22
523 vmovups 512(%rsp), %zmm23
524 vmovups 448(%rsp), %zmm24
525 vmovups 384(%rsp), %zmm25
526 vmovups 320(%rsp), %zmm26
527 vmovups 256(%rsp), %zmm27
528 vmovups 192(%rsp), %zmm28
529 vmovups 128(%rsp), %zmm29
530 vmovups 64(%rsp), %zmm30
531 vmovups (%rsp), %zmm31
532 vmovups 1216(%rsp), %zmm0
533 vmovups 1280(%rsp), %zmm1
534 movq 1056(%rsp), %rsi
535 movq 1096(%rsp), %r12
537 movq 1088(%rsp), %r13
539 movq 1080(%rsp), %r14
541 movq 1072(%rsp), %r15
543 movq 1064(%rsp), %rbx
550 vmovsd 1160(%rsp,%r15), %xmm0
552 vmovsd 1160(%rsp,%r15), %xmm0
556 vmovsd %xmm0, 1224(%rsp,%r15)
557 vmovsd 1160(%rsp,%r15), %xmm0
561 vmovsd %xmm0, 1288(%rsp,%r15)
567 vmovsd 1152(%rsp,%r15), %xmm0
569 vmovsd 1152(%rsp,%r15), %xmm0
573 vmovsd %xmm0, 1216(%rsp,%r15)
574 vmovsd 1152(%rsp,%r15), %xmm0
578 vmovsd %xmm0, 1280(%rsp,%r15)
581 END (_ZGVeN8vl8l8_sincos_skx)
582 libmvec_hidden_def(_ZGVeN8vl8l8_sincos_skx)
584 /* Wrapper between vvv and vl8l8 vector variants. */
585 .macro WRAPPER_AVX512_vvv_vl8l8 callee
588 cfi_adjust_cfa_offset (8)
589 cfi_rel_offset (%rbp, 0)
591 cfi_def_cfa_register (%rbp)
594 vmovups %zmm1, 128(%rsp)
596 vmovups %zmm2, 192(%rdi)
598 call HIDDEN_JUMPTARGET(\callee)
648 cfi_def_cfa_register (%rsp)
650 cfi_adjust_cfa_offset (-8)
659 .cfi_escape 0x10,0x6,0x2,0x76,0
662 .cfi_escape 0xf,0x3,0x76,0x78,0x6
663 leal -112(%rbp), %esi
664 leal -176(%rbp), %edi
666 vmovdqa %ymm1, -208(%ebp)
667 vmovdqa %ymm2, -240(%ebp)
668 call HIDDEN_JUMPTARGET(\callee)
669 vmovdqa -208(%ebp), %xmm0
671 vmovsd -176(%ebp), %xmm0
674 vmovsd -168(%ebp), %xmm0
676 movq -200(%ebp), %rax
677 vmovsd -160(%ebp), %xmm0
680 vmovsd -152(%ebp), %xmm0
682 movq -192(%ebp), %rax
683 vmovsd -144(%ebp), %xmm0
686 vmovsd -136(%ebp), %xmm0
688 movq -184(%ebp), %rax
689 vmovsd -128(%ebp), %xmm0
692 vmovsd -120(%ebp), %xmm0
694 vmovdqa -240(%ebp), %xmm0
696 vmovsd -112(%ebp), %xmm0
699 vmovsd -104(%ebp), %xmm0
701 movq -232(%ebp), %rax
702 vmovsd -96(%ebp), %xmm0
705 vmovsd -88(%ebp), %xmm0
707 movq -224(%ebp), %rax
708 vmovsd -80(%ebp), %xmm0
711 vmovsd -72(%ebp), %xmm0
713 movq -216(%ebp), %rax
714 vmovsd -64(%ebp), %xmm0
717 vmovsd -56(%ebp), %xmm0
729 ENTRY (_ZGVeN8vvv_sincos_knl)
730 WRAPPER_AVX512_vvv_vl8l8 _ZGVeN8vl8l8_sincos_knl
731 END (_ZGVeN8vvv_sincos_knl)
733 ENTRY (_ZGVeN8vvv_sincos_skx)
734 WRAPPER_AVX512_vvv_vl8l8 _ZGVeN8vl8l8_sincos_skx
735 END (_ZGVeN8vvv_sincos_skx)