]> git.ipfire.org Git - thirdparty/glibc.git/blame - sysdeps/x86_64/fpu/multiarch/svml_d_cos8_core_avx512.S
Prefer https to http for gnu.org and fsf.org URLs
[thirdparty/glibc.git] / sysdeps / x86_64 / fpu / multiarch / svml_d_cos8_core_avx512.S
CommitLineData
21933112 1/* Function cos vectorized with AVX-512, KNL and SKX versions.
04277e02 2 Copyright (C) 2014-2019 Free Software Foundation, Inc.
21933112
AS
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
5a82c748 17 <https://www.gnu.org/licenses/>. */
21933112
AS
18
19#include <sysdep.h>
5872b835 20#include "svml_d_trig_data.h"
21933112
AS
21#include "svml_d_wrapper_impl.h"
22
23 .text
24ENTRY (_ZGVeN8v_cos_knl)
f43cb35c 25#ifndef HAVE_AVX512DQ_ASM_SUPPORT
21933112
AS
26WRAPPER_IMPL_AVX512 _ZGVdN4v_cos
27#else
28/*
29 ALGORITHM DESCRIPTION:
30
31 ( low accuracy ( < 4ulp ) or enhanced performance
32 ( half of correct mantissa ) implementation )
33
34 Argument representation:
35 arg + Pi/2 = (N*Pi + R)
36
37 Result calculation:
38 cos(arg) = sin(arg+Pi/2) = sin(N*Pi + R) = (-1)^N * sin(R)
39 sin(R) is approximated by corresponding polynomial
40 */
41 pushq %rbp
42 cfi_adjust_cfa_offset (8)
43 cfi_rel_offset (%rbp, 0)
44 movq %rsp, %rbp
45 cfi_def_cfa_register (%rbp)
46 andq $-64, %rsp
47 subq $1280, %rsp
5872b835 48 movq __svml_d_trig_data@GOTPCREL(%rip), %rax
21933112
AS
49
50/* R = X - N*Pi1 */
51 vmovaps %zmm0, %zmm7
52
53/* Check for large arguments path */
54 movq $-1, %rcx
55
56/*
57 ARGUMENT RANGE REDUCTION:
58 Add Pi/2 to argument: X' = X+Pi/2
59 */
60 vaddpd __dHalfPI(%rax), %zmm0, %zmm5
61 vmovups __dInvPI(%rax), %zmm3
62
63/* Get absolute argument value: X' = |X'| */
64 vpandq __dAbsMask(%rax), %zmm5, %zmm1
65
66/* Y = X'*InvPi + RS : right shifter add */
67 vfmadd213pd __dRShifter(%rax), %zmm3, %zmm5
68 vmovups __dPI1_FMA(%rax), %zmm6
69
70/* N = Y - RS : right shifter sub */
71 vsubpd __dRShifter(%rax), %zmm5, %zmm4
72
73/* SignRes = Y<<63 : shift LSB to MSB place for result sign */
74 vpsllq $63, %zmm5, %zmm12
75 vmovups __dC7(%rax), %zmm8
76
77/* N = N - 0.5 */
78 vsubpd __dOneHalf(%rax), %zmm4, %zmm10
79 vcmppd $22, __dRangeVal(%rax), %zmm1, %k1
80 vpbroadcastq %rcx, %zmm2{%k1}{z}
81 vfnmadd231pd %zmm10, %zmm6, %zmm7
82 vptestmq %zmm2, %zmm2, %k0
83
84/* R = R - N*Pi2 */
85 vfnmadd231pd __dPI2_FMA(%rax), %zmm10, %zmm7
86 kmovw %k0, %ecx
87 movzbl %cl, %ecx
88
89/* R = R - N*Pi3 */
90 vfnmadd132pd __dPI3_FMA(%rax), %zmm7, %zmm10
91
92/*
93 POLYNOMIAL APPROXIMATION:
94 R2 = R*R
95 */
96 vmulpd %zmm10, %zmm10, %zmm9
97 vfmadd213pd __dC6(%rax), %zmm9, %zmm8
98 vfmadd213pd __dC5(%rax), %zmm9, %zmm8
99 vfmadd213pd __dC4(%rax), %zmm9, %zmm8
100
101/* Poly = C3+R2*(C4+R2*(C5+R2*(C6+R2*C7))) */
102 vfmadd213pd __dC3(%rax), %zmm9, %zmm8
103
104/* Poly = R+R*(R2*(C1+R2*(C2+R2*Poly))) */
105 vfmadd213pd __dC2(%rax), %zmm9, %zmm8
106 vfmadd213pd __dC1(%rax), %zmm9, %zmm8
107 vmulpd %zmm9, %zmm8, %zmm11
108 vfmadd213pd %zmm10, %zmm10, %zmm11
109
110/*
111 RECONSTRUCTION:
112 Final sign setting: Res = Poly^SignRes
113 */
114 vpxorq %zmm12, %zmm11, %zmm1
115 testl %ecx, %ecx
116 jne .LBL_1_3
117
118.LBL_1_2:
119 cfi_remember_state
120 vmovaps %zmm1, %zmm0
121 movq %rbp, %rsp
122 cfi_def_cfa_register (%rsp)
123 popq %rbp
124 cfi_adjust_cfa_offset (-8)
125 cfi_restore (%rbp)
126 ret
127
128.LBL_1_3:
129 cfi_restore_state
130 vmovups %zmm0, 1152(%rsp)
131 vmovups %zmm1, 1216(%rsp)
132 je .LBL_1_2
133
134 xorb %dl, %dl
135 kmovw %k4, 1048(%rsp)
136 xorl %eax, %eax
137 kmovw %k5, 1040(%rsp)
138 kmovw %k6, 1032(%rsp)
139 kmovw %k7, 1024(%rsp)
140 vmovups %zmm16, 960(%rsp)
141 vmovups %zmm17, 896(%rsp)
142 vmovups %zmm18, 832(%rsp)
143 vmovups %zmm19, 768(%rsp)
144 vmovups %zmm20, 704(%rsp)
145 vmovups %zmm21, 640(%rsp)
146 vmovups %zmm22, 576(%rsp)
147 vmovups %zmm23, 512(%rsp)
148 vmovups %zmm24, 448(%rsp)
149 vmovups %zmm25, 384(%rsp)
150 vmovups %zmm26, 320(%rsp)
151 vmovups %zmm27, 256(%rsp)
152 vmovups %zmm28, 192(%rsp)
153 vmovups %zmm29, 128(%rsp)
154 vmovups %zmm30, 64(%rsp)
155 vmovups %zmm31, (%rsp)
156 movq %rsi, 1064(%rsp)
157 movq %rdi, 1056(%rsp)
158 movq %r12, 1096(%rsp)
159 cfi_offset_rel_rsp (12, 1096)
160 movb %dl, %r12b
161 movq %r13, 1088(%rsp)
162 cfi_offset_rel_rsp (13, 1088)
163 movl %ecx, %r13d
164 movq %r14, 1080(%rsp)
165 cfi_offset_rel_rsp (14, 1080)
166 movl %eax, %r14d
167 movq %r15, 1072(%rsp)
168 cfi_offset_rel_rsp (15, 1072)
169 cfi_remember_state
170
171.LBL_1_6:
172 btl %r14d, %r13d
173 jc .LBL_1_12
174
175.LBL_1_7:
176 lea 1(%r14), %esi
177 btl %esi, %r13d
178 jc .LBL_1_10
179
180.LBL_1_8:
181 addb $1, %r12b
182 addl $2, %r14d
183 cmpb $16, %r12b
184 jb .LBL_1_6
185
186 kmovw 1048(%rsp), %k4
187 movq 1064(%rsp), %rsi
188 kmovw 1040(%rsp), %k5
189 movq 1056(%rsp), %rdi
190 kmovw 1032(%rsp), %k6
191 movq 1096(%rsp), %r12
192 cfi_restore (%r12)
193 movq 1088(%rsp), %r13
194 cfi_restore (%r13)
195 kmovw 1024(%rsp), %k7
196 vmovups 960(%rsp), %zmm16
197 vmovups 896(%rsp), %zmm17
198 vmovups 832(%rsp), %zmm18
199 vmovups 768(%rsp), %zmm19
200 vmovups 704(%rsp), %zmm20
201 vmovups 640(%rsp), %zmm21
202 vmovups 576(%rsp), %zmm22
203 vmovups 512(%rsp), %zmm23
204 vmovups 448(%rsp), %zmm24
205 vmovups 384(%rsp), %zmm25
206 vmovups 320(%rsp), %zmm26
207 vmovups 256(%rsp), %zmm27
208 vmovups 192(%rsp), %zmm28
209 vmovups 128(%rsp), %zmm29
210 vmovups 64(%rsp), %zmm30
211 vmovups (%rsp), %zmm31
212 movq 1080(%rsp), %r14
213 cfi_restore (%r14)
214 movq 1072(%rsp), %r15
215 cfi_restore (%r15)
216 vmovups 1216(%rsp), %zmm1
217 jmp .LBL_1_2
218
219.LBL_1_10:
220 cfi_restore_state
221 movzbl %r12b, %r15d
222 shlq $4, %r15
223 vmovsd 1160(%rsp,%r15), %xmm0
86ed8882 224 call JUMPTARGET(cos)
21933112
AS
225 vmovsd %xmm0, 1224(%rsp,%r15)
226 jmp .LBL_1_8
227
228.LBL_1_12:
229 movzbl %r12b, %r15d
230 shlq $4, %r15
231 vmovsd 1152(%rsp,%r15), %xmm0
86ed8882 232 call JUMPTARGET(cos)
21933112
AS
233 vmovsd %xmm0, 1216(%rsp,%r15)
234 jmp .LBL_1_7
235#endif
236END (_ZGVeN8v_cos_knl)
237
238ENTRY (_ZGVeN8v_cos_skx)
f43cb35c 239#ifndef HAVE_AVX512DQ_ASM_SUPPORT
21933112
AS
240WRAPPER_IMPL_AVX512 _ZGVdN4v_cos
241#else
242/*
243 ALGORITHM DESCRIPTION:
244
245 ( low accuracy ( < 4ulp ) or enhanced performance
246 ( half of correct mantissa ) implementation )
247
248 Argument representation:
249 arg + Pi/2 = (N*Pi + R)
250
251 Result calculation:
252 cos(arg) = sin(arg+Pi/2) = sin(N*Pi + R) = (-1)^N * sin(R)
253 sin(R) is approximated by corresponding polynomial
254 */
255 pushq %rbp
256 cfi_adjust_cfa_offset (8)
257 cfi_rel_offset (%rbp, 0)
258 movq %rsp, %rbp
259 cfi_def_cfa_register (%rbp)
260 andq $-64, %rsp
261 subq $1280, %rsp
5872b835 262 movq __svml_d_trig_data@GOTPCREL(%rip), %rax
21933112
AS
263
264/* R = X - N*Pi1 */
265 vmovaps %zmm0, %zmm8
266
267/* Check for large arguments path */
268 vpbroadcastq .L_2il0floatpacket.16(%rip), %zmm2
269
270/*
271 ARGUMENT RANGE REDUCTION:
272 Add Pi/2 to argument: X' = X+Pi/2
273 */
274 vaddpd __dHalfPI(%rax), %zmm0, %zmm6
275 vmovups __dInvPI(%rax), %zmm3
276 vmovups __dRShifter(%rax), %zmm4
277 vmovups __dPI1_FMA(%rax), %zmm7
278 vmovups __dC7(%rax), %zmm9
279
280/* Get absolute argument value: X' = |X'| */
281 vandpd __dAbsMask(%rax), %zmm6, %zmm1
282
283/* Y = X'*InvPi + RS : right shifter add */
284 vfmadd213pd %zmm4, %zmm3, %zmm6
285 vcmppd $18, __dRangeVal(%rax), %zmm1, %k1
286
287/* SignRes = Y<<63 : shift LSB to MSB place for result sign */
288 vpsllq $63, %zmm6, %zmm13
289
290/* N = Y - RS : right shifter sub */
291 vsubpd %zmm4, %zmm6, %zmm5
292
293/* N = N - 0.5 */
294 vsubpd __dOneHalf(%rax), %zmm5, %zmm11
295 vfnmadd231pd %zmm11, %zmm7, %zmm8
296
297/* R = R - N*Pi2 */
298 vfnmadd231pd __dPI2_FMA(%rax), %zmm11, %zmm8
299
300/* R = R - N*Pi3 */
301 vfnmadd132pd __dPI3_FMA(%rax), %zmm8, %zmm11
302
303/*
304 POLYNOMIAL APPROXIMATION:
305 R2 = R*R
306 */
307 vmulpd %zmm11, %zmm11, %zmm10
308 vfmadd213pd __dC6(%rax), %zmm10, %zmm9
309 vfmadd213pd __dC5(%rax), %zmm10, %zmm9
310 vfmadd213pd __dC4(%rax), %zmm10, %zmm9
311
312/* Poly = C3+R2*(C4+R2*(C5+R2*(C6+R2*C7))) */
313 vfmadd213pd __dC3(%rax), %zmm10, %zmm9
314
315/* Poly = R+R*(R2*(C1+R2*(C2+R2*Poly))) */
316 vfmadd213pd __dC2(%rax), %zmm10, %zmm9
317 vfmadd213pd __dC1(%rax), %zmm10, %zmm9
318 vmulpd %zmm10, %zmm9, %zmm12
319 vfmadd213pd %zmm11, %zmm11, %zmm12
320 vpandnq %zmm1, %zmm1, %zmm2{%k1}
321 vcmppd $3, %zmm2, %zmm2, %k0
322
323/*
324 RECONSTRUCTION:
325 Final sign setting: Res = Poly^SignRes
326 */
327 vxorpd %zmm13, %zmm12, %zmm1
328 kmovw %k0, %ecx
329 testl %ecx, %ecx
330 jne .LBL_2_3
331
332.LBL_2_2:
333 cfi_remember_state
334 vmovaps %zmm1, %zmm0
335 movq %rbp, %rsp
336 cfi_def_cfa_register (%rsp)
337 popq %rbp
338 cfi_adjust_cfa_offset (-8)
339 cfi_restore (%rbp)
340 ret
341
342.LBL_2_3:
343 cfi_restore_state
344 vmovups %zmm0, 1152(%rsp)
345 vmovups %zmm1, 1216(%rsp)
346 je .LBL_2_2
347
348 xorb %dl, %dl
349 xorl %eax, %eax
350 kmovw %k4, 1048(%rsp)
351 kmovw %k5, 1040(%rsp)
352 kmovw %k6, 1032(%rsp)
353 kmovw %k7, 1024(%rsp)
354 vmovups %zmm16, 960(%rsp)
355 vmovups %zmm17, 896(%rsp)
356 vmovups %zmm18, 832(%rsp)
357 vmovups %zmm19, 768(%rsp)
358 vmovups %zmm20, 704(%rsp)
359 vmovups %zmm21, 640(%rsp)
360 vmovups %zmm22, 576(%rsp)
361 vmovups %zmm23, 512(%rsp)
362 vmovups %zmm24, 448(%rsp)
363 vmovups %zmm25, 384(%rsp)
364 vmovups %zmm26, 320(%rsp)
365 vmovups %zmm27, 256(%rsp)
366 vmovups %zmm28, 192(%rsp)
367 vmovups %zmm29, 128(%rsp)
368 vmovups %zmm30, 64(%rsp)
369 vmovups %zmm31, (%rsp)
370 movq %rsi, 1064(%rsp)
371 movq %rdi, 1056(%rsp)
372 movq %r12, 1096(%rsp)
373 cfi_offset_rel_rsp (12, 1096)
374 movb %dl, %r12b
375 movq %r13, 1088(%rsp)
376 cfi_offset_rel_rsp (13, 1088)
377 movl %ecx, %r13d
378 movq %r14, 1080(%rsp)
379 cfi_offset_rel_rsp (14, 1080)
380 movl %eax, %r14d
381 movq %r15, 1072(%rsp)
382 cfi_offset_rel_rsp (15, 1072)
383 cfi_remember_state
384
385.LBL_2_6:
386 btl %r14d, %r13d
387 jc .LBL_2_12
388
389.LBL_2_7:
390 lea 1(%r14), %esi
391 btl %esi, %r13d
392 jc .LBL_2_10
393
394.LBL_2_8:
395 incb %r12b
396 addl $2, %r14d
397 cmpb $16, %r12b
398 jb .LBL_2_6
399
400 kmovw 1048(%rsp), %k4
401 kmovw 1040(%rsp), %k5
402 kmovw 1032(%rsp), %k6
403 kmovw 1024(%rsp), %k7
404 vmovups 960(%rsp), %zmm16
405 vmovups 896(%rsp), %zmm17
406 vmovups 832(%rsp), %zmm18
407 vmovups 768(%rsp), %zmm19
408 vmovups 704(%rsp), %zmm20
409 vmovups 640(%rsp), %zmm21
410 vmovups 576(%rsp), %zmm22
411 vmovups 512(%rsp), %zmm23
412 vmovups 448(%rsp), %zmm24
413 vmovups 384(%rsp), %zmm25
414 vmovups 320(%rsp), %zmm26
415 vmovups 256(%rsp), %zmm27
416 vmovups 192(%rsp), %zmm28
417 vmovups 128(%rsp), %zmm29
418 vmovups 64(%rsp), %zmm30
419 vmovups (%rsp), %zmm31
420 vmovups 1216(%rsp), %zmm1
421 movq 1064(%rsp), %rsi
422 movq 1056(%rsp), %rdi
423 movq 1096(%rsp), %r12
424 cfi_restore (%r12)
425 movq 1088(%rsp), %r13
426 cfi_restore (%r13)
427 movq 1080(%rsp), %r14
428 cfi_restore (%r14)
429 movq 1072(%rsp), %r15
430 cfi_restore (%r15)
431 jmp .LBL_2_2
432
433.LBL_2_10:
434 cfi_restore_state
435 movzbl %r12b, %r15d
436 shlq $4, %r15
437 vmovsd 1160(%rsp,%r15), %xmm0
438 vzeroupper
439 vmovsd 1160(%rsp,%r15), %xmm0
440
86ed8882 441 call JUMPTARGET(cos)
21933112
AS
442
443 vmovsd %xmm0, 1224(%rsp,%r15)
444 jmp .LBL_2_8
445
446.LBL_2_12:
447 movzbl %r12b, %r15d
448 shlq $4, %r15
449 vmovsd 1152(%rsp,%r15), %xmm0
450 vzeroupper
451 vmovsd 1152(%rsp,%r15), %xmm0
452
86ed8882 453 call JUMPTARGET(cos)
21933112
AS
454
455 vmovsd %xmm0, 1216(%rsp,%r15)
456 jmp .LBL_2_7
457#endif
458END (_ZGVeN8v_cos_skx)
459
460 .section .rodata, "a"
461.L_2il0floatpacket.16:
462 .long 0xffffffff,0xffffffff
463 .type .L_2il0floatpacket.16,@object