]> git.ipfire.org Git - thirdparty/glibc.git/blob - sysdeps/x86_64/fpu/multiarch/svml_s_log1pf4_core_sse4.S
x86-64: Add vector log1p/log1pf implementation to libmvec
[thirdparty/glibc.git] / sysdeps / x86_64 / fpu / multiarch / svml_s_log1pf4_core_sse4.S
1 /* Function log1pf vectorized with SSE4.
2 Copyright (C) 2021 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 https://www.gnu.org/licenses/. */
18
19 /*
20 * ALGORITHM DESCRIPTION:
21 *
22 * 1+x = 2^k*(xh + xl) is computed in high-low parts; xh in [1,2)
23 * Get short reciprocal approximation Rcp ~ 1/xh
24 * R = (Rcp*xh - 1.0) + Rcp*xl
25 * log1p(x) = k*log(2.0) - log(Rcp) + poly(R)
26 * log(Rcp) is tabulated
27 *
28 *
29 */
30
31 /* Offsets for data table __svml_slog1p_data_internal
32 */
33 #define SgnMask 0
34 #define sOne 16
35 #define sPoly 32
36 #define iHiDelta 160
37 #define iLoRange 176
38 #define iBrkValue 192
39 #define iOffExpoMask 208
40 #define sLn2 224
41
42 #include <sysdep.h>
43
44 .text
45 .section .text.sse4,"ax",@progbits
46 ENTRY(_ZGVbN4v_log1pf_sse4)
47 subq $72, %rsp
48 cfi_def_cfa_offset(80)
49 movups sOne+__svml_slog1p_data_internal(%rip), %xmm7
50
51 /* compute 1+x as high, low parts */
52 movaps %xmm7, %xmm1
53 movaps %xmm7, %xmm5
54 maxps %xmm0, %xmm1
55 minps %xmm0, %xmm5
56 movaps %xmm1, %xmm4
57
58 /* check argument value ranges */
59 movdqu iHiDelta+__svml_slog1p_data_internal(%rip), %xmm2
60 addps %xmm5, %xmm4
61
62 /* reduction: compute r,n */
63 movdqu iBrkValue+__svml_slog1p_data_internal(%rip), %xmm3
64 paddd %xmm4, %xmm2
65 movdqu iOffExpoMask+__svml_slog1p_data_internal(%rip), %xmm8
66 subps %xmm4, %xmm1
67 psubd %xmm3, %xmm4
68 addps %xmm1, %xmm5
69 pand %xmm4, %xmm8
70 psrad $23, %xmm4
71 cvtdq2ps %xmm4, %xmm10
72 pslld $23, %xmm4
73 movaps %xmm7, %xmm1
74 paddd %xmm3, %xmm8
75 psubd %xmm4, %xmm1
76 mulps %xmm5, %xmm1
77
78 /* polynomial evaluation */
79 subps %xmm7, %xmm8
80
81 /* final reconstruction */
82 mulps sLn2+__svml_slog1p_data_internal(%rip), %xmm10
83 addps %xmm8, %xmm1
84 movups sPoly+112+__svml_slog1p_data_internal(%rip), %xmm9
85 mulps %xmm1, %xmm9
86 movdqu iLoRange+__svml_slog1p_data_internal(%rip), %xmm6
87 pcmpgtd %xmm2, %xmm6
88 addps sPoly+96+__svml_slog1p_data_internal(%rip), %xmm9
89
90 /* combine and get argument value range mask */
91 movmskps %xmm6, %edx
92 movups SgnMask+__svml_slog1p_data_internal(%rip), %xmm11
93 mulps %xmm1, %xmm9
94 andnps %xmm0, %xmm11
95 addps sPoly+80+__svml_slog1p_data_internal(%rip), %xmm9
96 mulps %xmm1, %xmm9
97 addps sPoly+64+__svml_slog1p_data_internal(%rip), %xmm9
98 mulps %xmm1, %xmm9
99 addps sPoly+48+__svml_slog1p_data_internal(%rip), %xmm9
100 mulps %xmm1, %xmm9
101 addps sPoly+32+__svml_slog1p_data_internal(%rip), %xmm9
102 mulps %xmm1, %xmm9
103 addps sPoly+16+__svml_slog1p_data_internal(%rip), %xmm9
104 mulps %xmm1, %xmm9
105 addps sPoly+__svml_slog1p_data_internal(%rip), %xmm9
106 mulps %xmm1, %xmm9
107 mulps %xmm1, %xmm9
108 addps %xmm9, %xmm1
109 addps %xmm10, %xmm1
110 orps %xmm11, %xmm1
111 testl %edx, %edx
112
113 /* Go to special inputs processing branch */
114 jne L(SPECIAL_VALUES_BRANCH)
115 # LOE rbx rbp r12 r13 r14 r15 edx xmm0 xmm1
116
117 /* Restore registers
118 * and exit the function
119 */
120
121 L(EXIT):
122 movaps %xmm1, %xmm0
123 addq $72, %rsp
124 cfi_def_cfa_offset(8)
125 ret
126 cfi_def_cfa_offset(80)
127
128 /* Branch to process
129 * special inputs
130 */
131
132 L(SPECIAL_VALUES_BRANCH):
133 movups %xmm0, 32(%rsp)
134 movups %xmm1, 48(%rsp)
135 # LOE rbx rbp r12 r13 r14 r15 edx
136
137 xorl %eax, %eax
138 movq %r12, 16(%rsp)
139 cfi_offset(12, -64)
140 movl %eax, %r12d
141 movq %r13, 8(%rsp)
142 cfi_offset(13, -72)
143 movl %edx, %r13d
144 movq %r14, (%rsp)
145 cfi_offset(14, -80)
146 # LOE rbx rbp r15 r12d r13d
147
148 /* Range mask
149 * bits check
150 */
151
152 L(RANGEMASK_CHECK):
153 btl %r12d, %r13d
154
155 /* Call scalar math function */
156 jc L(SCALAR_MATH_CALL)
157 # LOE rbx rbp r15 r12d r13d
158
159 /* Special inputs
160 * processing loop
161 */
162
163 L(SPECIAL_VALUES_LOOP):
164 incl %r12d
165 cmpl $4, %r12d
166
167 /* Check bits in range mask */
168 jl L(RANGEMASK_CHECK)
169 # LOE rbx rbp r15 r12d r13d
170
171 movq 16(%rsp), %r12
172 cfi_restore(12)
173 movq 8(%rsp), %r13
174 cfi_restore(13)
175 movq (%rsp), %r14
176 cfi_restore(14)
177 movups 48(%rsp), %xmm1
178
179 /* Go to exit */
180 jmp L(EXIT)
181 cfi_offset(12, -64)
182 cfi_offset(13, -72)
183 cfi_offset(14, -80)
184 # LOE rbx rbp r12 r13 r14 r15 xmm1
185
186 /* Scalar math fucntion call
187 * to process special input
188 */
189
190 L(SCALAR_MATH_CALL):
191 movl %r12d, %r14d
192 movss 32(%rsp,%r14,4), %xmm0
193 call log1pf@PLT
194 # LOE rbx rbp r14 r15 r12d r13d xmm0
195
196 movss %xmm0, 48(%rsp,%r14,4)
197
198 /* Process special inputs in loop */
199 jmp L(SPECIAL_VALUES_LOOP)
200 # LOE rbx rbp r15 r12d r13d
201 END(_ZGVbN4v_log1pf_sse4)
202
203 .section .rodata, "a"
204 .align 16
205
206 #ifdef __svml_slog1p_data_internal_typedef
207 typedef unsigned int VUINT32;
208 typedef struct {
209 __declspec(align(16)) VUINT32 SgnMask[4][1];
210 __declspec(align(16)) VUINT32 sOne[4][1];
211 __declspec(align(16)) VUINT32 sPoly[8][4][1];
212 __declspec(align(16)) VUINT32 iHiDelta[4][1];
213 __declspec(align(16)) VUINT32 iLoRange[4][1];
214 __declspec(align(16)) VUINT32 iBrkValue[4][1];
215 __declspec(align(16)) VUINT32 iOffExpoMask[4][1];
216 __declspec(align(16)) VUINT32 sLn2[4][1];
217 } __svml_slog1p_data_internal;
218 #endif
219 __svml_slog1p_data_internal:
220 /*== SgnMask ==*/
221 .long 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff
222 /*== sOne = SP 1.0 ==*/
223 .align 16
224 .long 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000
225 /*== sPoly[] = SP polynomial ==*/
226 .align 16
227 .long 0xbf000000, 0xbf000000, 0xbf000000, 0xbf000000 /* -5.0000000000000000000000000e-01 P0 */
228 .long 0x3eaaaa94, 0x3eaaaa94, 0x3eaaaa94, 0x3eaaaa94 /* 3.3333265781402587890625000e-01 P1 */
229 .long 0xbe80058e, 0xbe80058e, 0xbe80058e, 0xbe80058e /* -2.5004237890243530273437500e-01 P2 */
230 .long 0x3e4ce190, 0x3e4ce190, 0x3e4ce190, 0x3e4ce190 /* 2.0007920265197753906250000e-01 P3 */
231 .long 0xbe28ad37, 0xbe28ad37, 0xbe28ad37, 0xbe28ad37 /* -1.6472326219081878662109375e-01 P4 */
232 .long 0x3e0fcb12, 0x3e0fcb12, 0x3e0fcb12, 0x3e0fcb12 /* 1.4042308926582336425781250e-01 P5 */
233 .long 0xbe1ad9e3, 0xbe1ad9e3, 0xbe1ad9e3, 0xbe1ad9e3 /* -1.5122179687023162841796875e-01 P6 */
234 .long 0x3e0d84ed, 0x3e0d84ed, 0x3e0d84ed, 0x3e0d84ed /* 1.3820238411426544189453125e-01 P7 */
235 /*== iHiDelta = SP 80000000-7f000000 ==*/
236 .align 16
237 .long 0x01000000, 0x01000000, 0x01000000, 0x01000000
238 /*== iLoRange = SP 00800000+iHiDelta ==*/
239 .align 16
240 .long 0x01800000, 0x01800000, 0x01800000, 0x01800000
241 /*== iBrkValue = SP 2/3 ==*/
242 .align 16
243 .long 0x3f2aaaab, 0x3f2aaaab, 0x3f2aaaab, 0x3f2aaaab
244 /*== iOffExpoMask = SP significand mask ==*/
245 .align 16
246 .long 0x007fffff, 0x007fffff, 0x007fffff, 0x007fffff
247 /*== sLn2 = SP ln(2) ==*/
248 .align 16
249 .long 0x3f317218, 0x3f317218, 0x3f317218, 0x3f317218
250 .align 16
251 .type __svml_slog1p_data_internal,@object
252 .size __svml_slog1p_data_internal,.-__svml_slog1p_data_internal