]> git.ipfire.org Git - thirdparty/glibc.git/blob - sysdeps/x86_64/fpu/multiarch/svml_d_log28_core_avx512.S
Update copyright dates with scripts/update-copyrights
[thirdparty/glibc.git] / sysdeps / x86_64 / fpu / multiarch / svml_d_log28_core_avx512.S
1 /* Function log2 vectorized with AVX-512.
2 Copyright (C) 2021-2022 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 https://www.gnu.org/licenses/. */
18
19 /*
20 * ALGORITHM DESCRIPTION:
21 *
22 * Get short reciprocal approximation Rcp ~ 1/mantissa(x)
23 * R = Rcp*x - 1.0
24 * log2(x) = k - log2(Rcp) + poly_approximation(R)
25 * log2(Rcp) is tabulated
26 *
27 *
28 */
29
30 /* Offsets for data table __svml_dlog2_data_internal_avx512
31 */
32 #define Log_tbl 0
33 #define One 128
34 #define C075 192
35 #define poly_coeff9 256
36 #define poly_coeff8 320
37 #define poly_coeff7 384
38 #define poly_coeff6 448
39 #define poly_coeff5 512
40 #define poly_coeff4 576
41 #define poly_coeff3 640
42 #define poly_coeff2 704
43 #define poly_coeff1 768
44
45 #include <sysdep.h>
46
47 .text
48 .section .text.evex512,"ax",@progbits
49 ENTRY(_ZGVeN8v_log2_skx)
50 pushq %rbp
51 cfi_def_cfa_offset(16)
52 movq %rsp, %rbp
53 cfi_def_cfa(6, 16)
54 cfi_offset(6, -16)
55 andq $-64, %rsp
56 subq $192, %rsp
57 vmovaps %zmm0, %zmm7
58 vgetmantpd $8, {sae}, %zmm7, %zmm6
59 vmovups One+__svml_dlog2_data_internal_avx512(%rip), %zmm2
60 vmovups poly_coeff5+__svml_dlog2_data_internal_avx512(%rip), %zmm12
61 vmovups poly_coeff3+__svml_dlog2_data_internal_avx512(%rip), %zmm13
62
63 /* Start polynomial evaluation */
64 vmovups poly_coeff9+__svml_dlog2_data_internal_avx512(%rip), %zmm10
65 vmovups poly_coeff8+__svml_dlog2_data_internal_avx512(%rip), %zmm0
66 vmovups poly_coeff7+__svml_dlog2_data_internal_avx512(%rip), %zmm11
67 vmovups poly_coeff6+__svml_dlog2_data_internal_avx512(%rip), %zmm14
68
69 /* Prepare exponent correction: DblRcp<0.75? */
70 vmovups C075+__svml_dlog2_data_internal_avx512(%rip), %zmm1
71
72 /* Table lookup */
73 vmovups __svml_dlog2_data_internal_avx512(%rip), %zmm4
74
75 /* GetExp(x) */
76 vgetexppd {sae}, %zmm7, %zmm5
77
78 /* DblRcp ~ 1/Mantissa */
79 vrcp14pd %zmm6, %zmm8
80
81 /* x<=0? */
82 vfpclasspd $94, %zmm7, %k0
83
84 /* round DblRcp to 4 fractional bits (RN mode, no Precision exception) */
85 vrndscalepd $88, {sae}, %zmm8, %zmm3
86 vmovups poly_coeff4+__svml_dlog2_data_internal_avx512(%rip), %zmm8
87 kmovw %k0, %edx
88
89 /* Reduced argument: R = DblRcp*Mantissa - 1 */
90 vfmsub213pd {rn-sae}, %zmm2, %zmm3, %zmm6
91 vcmppd $17, {sae}, %zmm1, %zmm3, %k1
92 vfmadd231pd {rn-sae}, %zmm6, %zmm12, %zmm8
93 vmovups poly_coeff2+__svml_dlog2_data_internal_avx512(%rip), %zmm12
94 vfmadd231pd {rn-sae}, %zmm6, %zmm10, %zmm0
95 vfmadd231pd {rn-sae}, %zmm6, %zmm11, %zmm14
96 vmovups poly_coeff1+__svml_dlog2_data_internal_avx512(%rip), %zmm1
97
98 /* R^2 */
99 vmulpd {rn-sae}, %zmm6, %zmm6, %zmm15
100 vfmadd231pd {rn-sae}, %zmm6, %zmm13, %zmm12
101
102 /* Prepare table index */
103 vpsrlq $48, %zmm3, %zmm9
104
105 /* add 1 to Expon if DblRcp<0.75 */
106 vaddpd {rn-sae}, %zmm2, %zmm5, %zmm5{%k1}
107 vmulpd {rn-sae}, %zmm15, %zmm15, %zmm13
108 vfmadd213pd {rn-sae}, %zmm14, %zmm15, %zmm0
109 vfmadd213pd {rn-sae}, %zmm12, %zmm15, %zmm8
110 vpermt2pd Log_tbl+64+__svml_dlog2_data_internal_avx512(%rip), %zmm9, %zmm4
111
112 /* polynomial */
113 vfmadd213pd {rn-sae}, %zmm8, %zmm13, %zmm0
114 vfmadd213pd {rn-sae}, %zmm1, %zmm6, %zmm0
115 vfmadd213pd {rn-sae}, %zmm4, %zmm0, %zmm6
116 vaddpd {rn-sae}, %zmm6, %zmm5, %zmm0
117 testl %edx, %edx
118
119 /* Go to special inputs processing branch */
120 jne L(SPECIAL_VALUES_BRANCH)
121 # LOE rbx r12 r13 r14 r15 edx zmm0 zmm7
122
123 /* Restore registers
124 * and exit the function
125 */
126
127 L(EXIT):
128 movq %rbp, %rsp
129 popq %rbp
130 cfi_def_cfa(7, 8)
131 cfi_restore(6)
132 ret
133 cfi_def_cfa(6, 16)
134 cfi_offset(6, -16)
135
136 /* Branch to process
137 * special inputs
138 */
139
140 L(SPECIAL_VALUES_BRANCH):
141 vmovups %zmm7, 64(%rsp)
142 vmovups %zmm0, 128(%rsp)
143 # LOE rbx r12 r13 r14 r15 edx zmm0
144
145 xorl %eax, %eax
146 # LOE rbx r12 r13 r14 r15 eax edx
147
148 vzeroupper
149 movq %r12, 16(%rsp)
150 /* DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -176; DW_OP_plus) */
151 .cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x50, 0xff, 0xff, 0xff, 0x22
152 movl %eax, %r12d
153 movq %r13, 8(%rsp)
154 /* DW_CFA_expression: r13 (r13) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -184; DW_OP_plus) */
155 .cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x48, 0xff, 0xff, 0xff, 0x22
156 movl %edx, %r13d
157 movq %r14, (%rsp)
158 /* DW_CFA_expression: r14 (r14) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -192; DW_OP_plus) */
159 .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
160 # LOE rbx r15 r12d r13d
161
162 /* Range mask
163 * bits check
164 */
165
166 L(RANGEMASK_CHECK):
167 btl %r12d, %r13d
168
169 /* Call scalar math function */
170 jc L(SCALAR_MATH_CALL)
171 # LOE rbx r15 r12d r13d
172
173 /* Special inputs
174 * processing loop
175 */
176
177 L(SPECIAL_VALUES_LOOP):
178 incl %r12d
179 cmpl $8, %r12d
180
181 /* Check bits in range mask */
182 jl L(RANGEMASK_CHECK)
183 # LOE rbx r15 r12d r13d
184
185 movq 16(%rsp), %r12
186 cfi_restore(12)
187 movq 8(%rsp), %r13
188 cfi_restore(13)
189 movq (%rsp), %r14
190 cfi_restore(14)
191 vmovups 128(%rsp), %zmm0
192
193 /* Go to exit */
194 jmp L(EXIT)
195 /* DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -176; DW_OP_plus) */
196 .cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x50, 0xff, 0xff, 0xff, 0x22
197 /* DW_CFA_expression: r13 (r13) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -184; DW_OP_plus) */
198 .cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x48, 0xff, 0xff, 0xff, 0x22
199 /* DW_CFA_expression: r14 (r14) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -192; DW_OP_plus) */
200 .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
201 # LOE rbx r12 r13 r14 r15 zmm0
202
203 /* Scalar math fucntion call
204 * to process special input
205 */
206
207 L(SCALAR_MATH_CALL):
208 movl %r12d, %r14d
209 movsd 64(%rsp,%r14,8), %xmm0
210 call log2@PLT
211 # LOE rbx r14 r15 r12d r13d xmm0
212
213 movsd %xmm0, 128(%rsp,%r14,8)
214
215 /* Process special inputs in loop */
216 jmp L(SPECIAL_VALUES_LOOP)
217 # LOE rbx r15 r12d r13d
218 END(_ZGVeN8v_log2_skx)
219
220 .section .rodata, "a"
221 .align 64
222
223 #ifdef __svml_dlog2_data_internal_avx512_typedef
224 typedef unsigned int VUINT32;
225 typedef struct {
226 __declspec(align(64)) VUINT32 Log_tbl[16][2];
227 __declspec(align(64)) VUINT32 One[8][2];
228 __declspec(align(64)) VUINT32 C075[8][2];
229 __declspec(align(64)) VUINT32 poly_coeff9[8][2];
230 __declspec(align(64)) VUINT32 poly_coeff8[8][2];
231 __declspec(align(64)) VUINT32 poly_coeff7[8][2];
232 __declspec(align(64)) VUINT32 poly_coeff6[8][2];
233 __declspec(align(64)) VUINT32 poly_coeff5[8][2];
234 __declspec(align(64)) VUINT32 poly_coeff4[8][2];
235 __declspec(align(64)) VUINT32 poly_coeff3[8][2];
236 __declspec(align(64)) VUINT32 poly_coeff2[8][2];
237 __declspec(align(64)) VUINT32 poly_coeff1[8][2];
238 } __svml_dlog2_data_internal_avx512;
239 #endif
240 __svml_dlog2_data_internal_avx512:
241 /*== Log_tbl ==*/
242 .quad 0x0000000000000000
243 .quad 0xbfb663f6fac91316
244 .quad 0xbfc5c01a39fbd688
245 .quad 0xbfcfbc16b902680a
246 .quad 0xbfd49a784bcd1b8b
247 .quad 0xbfd91bba891f1709
248 .quad 0xbfdd6753e032ea0f
249 .quad 0xbfe0c10500d63aa6
250 .quad 0x3fda8ff971810a5e
251 .quad 0x3fd6cb0f6865c8ea
252 .quad 0x3fd32bfee370ee68
253 .quad 0x3fcf5fd8a9063e35
254 .quad 0x3fc8a8980abfbd32
255 .quad 0x3fc22dadc2ab3497
256 .quad 0x3fb7d60496cfbb4c
257 .quad 0x3fa77394c9d958d5
258 /*== One ==*/
259 .align 64
260 .quad 0x3ff0000000000000, 0x3ff0000000000000, 0x3ff0000000000000, 0x3ff0000000000000, 0x3ff0000000000000, 0x3ff0000000000000, 0x3ff0000000000000, 0x3ff0000000000000
261 /*== C075 0.75 ==*/
262 .align 64
263 .quad 0x3fe8000000000000, 0x3fe8000000000000, 0x3fe8000000000000, 0x3fe8000000000000, 0x3fe8000000000000, 0x3fe8000000000000, 0x3fe8000000000000, 0x3fe8000000000000
264 /*== poly_coeff9 ==*/
265 .align 64
266 .quad 0x3fc4904bda0e1d12, 0x3fc4904bda0e1d12, 0x3fc4904bda0e1d12, 0x3fc4904bda0e1d12, 0x3fc4904bda0e1d12, 0x3fc4904bda0e1d12, 0x3fc4904bda0e1d12, 0x3fc4904bda0e1d12
267 /*== poly_coeff8 ==*/
268 .align 64
269 .quad 0xbfc71fb84deb5cce, 0xbfc71fb84deb5cce, 0xbfc71fb84deb5cce, 0xbfc71fb84deb5cce, 0xbfc71fb84deb5cce, 0xbfc71fb84deb5cce, 0xbfc71fb84deb5cce, 0xbfc71fb84deb5cce
270 /*== poly_coeff7 ==*/
271 .align 64
272 .quad 0x3fca617351818613, 0x3fca617351818613, 0x3fca617351818613, 0x3fca617351818613, 0x3fca617351818613, 0x3fca617351818613, 0x3fca617351818613, 0x3fca617351818613
273 /*== poly_coeff6 ==*/
274 .align 64
275 .quad 0xbfcec707e4e3144c, 0xbfcec707e4e3144c, 0xbfcec707e4e3144c, 0xbfcec707e4e3144c, 0xbfcec707e4e3144c, 0xbfcec707e4e3144c, 0xbfcec707e4e3144c, 0xbfcec707e4e3144c
276 /*== poly_coeff5 ==*/
277 .align 64
278 .quad 0x3fd2776c5114d91a, 0x3fd2776c5114d91a, 0x3fd2776c5114d91a, 0x3fd2776c5114d91a, 0x3fd2776c5114d91a, 0x3fd2776c5114d91a, 0x3fd2776c5114d91a, 0x3fd2776c5114d91a
279 /*== poly_coeff4 ==*/
280 .align 64
281 .quad 0xbfd71547653d0f8d, 0xbfd71547653d0f8d, 0xbfd71547653d0f8d, 0xbfd71547653d0f8d, 0xbfd71547653d0f8d, 0xbfd71547653d0f8d, 0xbfd71547653d0f8d, 0xbfd71547653d0f8d
282 /*== poly_coeff3 ==*/
283 .align 64
284 .quad 0x3fdec709dc3a029f, 0x3fdec709dc3a029f, 0x3fdec709dc3a029f, 0x3fdec709dc3a029f, 0x3fdec709dc3a029f, 0x3fdec709dc3a029f, 0x3fdec709dc3a029f, 0x3fdec709dc3a029f
285 /*== poly_coeff2 ==*/
286 .align 64
287 .quad 0xbfe71547652b82d4, 0xbfe71547652b82d4, 0xbfe71547652b82d4, 0xbfe71547652b82d4, 0xbfe71547652b82d4, 0xbfe71547652b82d4, 0xbfe71547652b82d4, 0xbfe71547652b82d4
288 /*== poly_coeff1 ==*/
289 .align 64
290 .quad 0x3ff71547652b82fe, 0x3ff71547652b82fe, 0x3ff71547652b82fe, 0x3ff71547652b82fe, 0x3ff71547652b82fe, 0x3ff71547652b82fe, 0x3ff71547652b82fe, 0x3ff71547652b82fe
291 .align 64
292 .type __svml_dlog2_data_internal_avx512,@object
293 .size __svml_dlog2_data_internal_avx512,.-__svml_dlog2_data_internal_avx512