]> git.ipfire.org Git - thirdparty/glibc.git/blob - sysdeps/ieee754/dbl-64/s_tan.c
09db096d056a440fcc770d0227537fa7d1469f3d
[thirdparty/glibc.git] / sysdeps / ieee754 / dbl-64 / s_tan.c
1 /*
2 * IBM Accurate Mathematical Library
3 * written by International Business Machines Corp.
4 * Copyright (C) 2001-2013 Free Software Foundation, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU Lesser General Public License as published by
8 * the Free Software Foundation; either version 2.1 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 /*********************************************************************/
20 /* MODULE_NAME: utan.c */
21 /* */
22 /* FUNCTIONS: utan */
23 /* tanMp */
24 /* */
25 /* FILES NEEDED:dla.h endian.h mpa.h mydefs.h utan.h */
26 /* branred.c sincos32.c mptan.c */
27 /* utan.tbl */
28 /* */
29 /* An ultimate tan routine. Given an IEEE double machine number x */
30 /* it computes the correctly rounded (to nearest) value of tan(x). */
31 /* Assumption: Machine arithmetic operations are performed in */
32 /* round to nearest mode of IEEE 754 standard. */
33 /* */
34 /*********************************************************************/
35
36 #include <errno.h>
37 #include "endian.h"
38 #include <dla.h>
39 #include "mpa.h"
40 #include "MathLib.h"
41 #include <math.h>
42 #include <math_private.h>
43 #include <fenv.h>
44 #include <stap-probe.h>
45
46 #ifndef SECTION
47 # define SECTION
48 #endif
49
50 static double tanMp (double);
51 void __mptan (double, mp_no *, int);
52
53 double
54 SECTION
55 tan (double x)
56 {
57 #include "utan.h"
58 #include "utan.tbl"
59
60 int ux, i, n;
61 double a, da, a2, b, db, c, dc, c1, cc1, c2, cc2, c3, cc3, fi, ffi, gi, pz,
62 s, sy, t, t1, t2, t3, t4, t7, t8, t9, t10, w, x2, xn, xx2, y, ya,
63 yya, z0, z, zz, z2, zz2;
64 #ifndef DLA_FMS
65 double t5, t6;
66 #endif
67 int p;
68 number num, v;
69 mp_no mpa, mpt1, mpt2;
70
71 double retval;
72
73 int __branred (double, double *, double *);
74 int __mpranred (double, mp_no *, int);
75
76 SET_RESTORE_ROUND_53BIT (FE_TONEAREST);
77
78 /* x=+-INF, x=NaN */
79 num.d = x;
80 ux = num.i[HIGH_HALF];
81 if ((ux & 0x7ff00000) == 0x7ff00000)
82 {
83 if ((ux & 0x7fffffff) == 0x7ff00000)
84 __set_errno (EDOM);
85 retval = x - x;
86 goto ret;
87 }
88
89 w = (x < 0.0) ? -x : x;
90
91 /* (I) The case abs(x) <= 1.259e-8 */
92 if (w <= g1.d)
93 {
94 retval = x;
95 goto ret;
96 }
97
98 /* (II) The case 1.259e-8 < abs(x) <= 0.0608 */
99 if (w <= g2.d)
100 {
101 /* First stage */
102 x2 = x * x;
103
104 t2 = d9.d + x2 * d11.d;
105 t2 = d7.d + x2 * t2;
106 t2 = d5.d + x2 * t2;
107 t2 = d3.d + x2 * t2;
108 t2 *= x * x2;
109
110 if ((y = x + (t2 - u1.d * t2)) == x + (t2 + u1.d * t2))
111 {
112 retval = y;
113 goto ret;
114 }
115
116 /* Second stage */
117 c1 = a25.d + x2 * a27.d;
118 c1 = a23.d + x2 * c1;
119 c1 = a21.d + x2 * c1;
120 c1 = a19.d + x2 * c1;
121 c1 = a17.d + x2 * c1;
122 c1 = a15.d + x2 * c1;
123 c1 *= x2;
124
125 EMULV (x, x, x2, xx2, t1, t2, t3, t4, t5);
126 ADD2 (a13.d, aa13.d, c1, 0.0, c2, cc2, t1, t2);
127 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
128 ADD2 (a11.d, aa11.d, c1, cc1, c2, cc2, t1, t2);
129 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
130 ADD2 (a9.d, aa9.d, c1, cc1, c2, cc2, t1, t2);
131 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
132 ADD2 (a7.d, aa7.d, c1, cc1, c2, cc2, t1, t2);
133 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
134 ADD2 (a5.d, aa5.d, c1, cc1, c2, cc2, t1, t2);
135 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
136 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
137 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
138 MUL2 (x, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
139 ADD2 (x, 0.0, c2, cc2, c1, cc1, t1, t2);
140 if ((y = c1 + (cc1 - u2.d * c1)) == c1 + (cc1 + u2.d * c1))
141 {
142 retval = y;
143 goto ret;
144 }
145 retval = tanMp (x);
146 goto ret;
147 }
148
149 /* (III) The case 0.0608 < abs(x) <= 0.787 */
150 if (w <= g3.d)
151 {
152 /* First stage */
153 i = ((int) (mfftnhf.d + TWO8 * w));
154 z = w - xfg[i][0].d;
155 z2 = z * z;
156 s = (x < 0.0) ? -1 : 1;
157 pz = z + z * z2 * (e0.d + z2 * e1.d);
158 fi = xfg[i][1].d;
159 gi = xfg[i][2].d;
160 t2 = pz * (gi + fi) / (gi - pz);
161 if ((y = fi + (t2 - fi * u3.d)) == fi + (t2 + fi * u3.d))
162 {
163 retval = (s * y);
164 goto ret;
165 }
166 t3 = (t2 < 0.0) ? -t2 : t2;
167 t4 = fi * ua3.d + t3 * ub3.d;
168 if ((y = fi + (t2 - t4)) == fi + (t2 + t4))
169 {
170 retval = (s * y);
171 goto ret;
172 }
173
174 /* Second stage */
175 ffi = xfg[i][3].d;
176 c1 = z2 * (a7.d + z2 * (a9.d + z2 * a11.d));
177 EMULV (z, z, z2, zz2, t1, t2, t3, t4, t5);
178 ADD2 (a5.d, aa5.d, c1, 0.0, c2, cc2, t1, t2);
179 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
180 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
181 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
182 MUL2 (z, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
183 ADD2 (z, 0.0, c2, cc2, c1, cc1, t1, t2);
184
185 ADD2 (fi, ffi, c1, cc1, c2, cc2, t1, t2);
186 MUL2 (fi, ffi, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8);
187 SUB2 (1.0, 0.0, c3, cc3, c1, cc1, t1, t2);
188 DIV2 (c2, cc2, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
189 t10);
190
191 if ((y = c3 + (cc3 - u4.d * c3)) == c3 + (cc3 + u4.d * c3))
192 {
193 retval = (s * y);
194 goto ret;
195 }
196 retval = tanMp (x);
197 goto ret;
198 }
199
200 /* (---) The case 0.787 < abs(x) <= 25 */
201 if (w <= g4.d)
202 {
203 /* Range reduction by algorithm i */
204 t = (x * hpinv.d + toint.d);
205 xn = t - toint.d;
206 v.d = t;
207 t1 = (x - xn * mp1.d) - xn * mp2.d;
208 n = v.i[LOW_HALF] & 0x00000001;
209 da = xn * mp3.d;
210 a = t1 - da;
211 da = (t1 - a) - da;
212 if (a < 0.0)
213 {
214 ya = -a;
215 yya = -da;
216 sy = -1;
217 }
218 else
219 {
220 ya = a;
221 yya = da;
222 sy = 1;
223 }
224
225 /* (IV),(V) The case 0.787 < abs(x) <= 25, abs(y) <= 1e-7 */
226 if (ya <= gy1.d)
227 {
228 retval = tanMp (x);
229 goto ret;
230 }
231
232 /* (VI) The case 0.787 < abs(x) <= 25, 1e-7 < abs(y) <= 0.0608 */
233 if (ya <= gy2.d)
234 {
235 a2 = a * a;
236 t2 = d9.d + a2 * d11.d;
237 t2 = d7.d + a2 * t2;
238 t2 = d5.d + a2 * t2;
239 t2 = d3.d + a2 * t2;
240 t2 = da + a * a2 * t2;
241
242 if (n)
243 {
244 /* First stage -cot */
245 EADD (a, t2, b, db);
246 DIV2 (1.0, 0.0, b, db, c, dc, t1, t2, t3, t4, t5, t6, t7, t8,
247 t9, t10);
248 if ((y = c + (dc - u6.d * c)) == c + (dc + u6.d * c))
249 {
250 retval = (-y);
251 goto ret;
252 }
253 }
254 else
255 {
256 /* First stage tan */
257 if ((y = a + (t2 - u5.d * a)) == a + (t2 + u5.d * a))
258 {
259 retval = y;
260 goto ret;
261 }
262 }
263 /* Second stage */
264 /* Range reduction by algorithm ii */
265 t = (x * hpinv.d + toint.d);
266 xn = t - toint.d;
267 v.d = t;
268 t1 = (x - xn * mp1.d) - xn * mp2.d;
269 n = v.i[LOW_HALF] & 0x00000001;
270 da = xn * pp3.d;
271 t = t1 - da;
272 da = (t1 - t) - da;
273 t1 = xn * pp4.d;
274 a = t - t1;
275 da = ((t - a) - t1) + da;
276
277 /* Second stage */
278 EADD (a, da, t1, t2);
279 a = t1;
280 da = t2;
281 MUL2 (a, da, a, da, x2, xx2, t1, t2, t3, t4, t5, t6, t7, t8);
282
283 c1 = a25.d + x2 * a27.d;
284 c1 = a23.d + x2 * c1;
285 c1 = a21.d + x2 * c1;
286 c1 = a19.d + x2 * c1;
287 c1 = a17.d + x2 * c1;
288 c1 = a15.d + x2 * c1;
289 c1 *= x2;
290
291 ADD2 (a13.d, aa13.d, c1, 0.0, c2, cc2, t1, t2);
292 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
293 ADD2 (a11.d, aa11.d, c1, cc1, c2, cc2, t1, t2);
294 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
295 ADD2 (a9.d, aa9.d, c1, cc1, c2, cc2, t1, t2);
296 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
297 ADD2 (a7.d, aa7.d, c1, cc1, c2, cc2, t1, t2);
298 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
299 ADD2 (a5.d, aa5.d, c1, cc1, c2, cc2, t1, t2);
300 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
301 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
302 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
303 MUL2 (a, da, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
304 ADD2 (a, da, c2, cc2, c1, cc1, t1, t2);
305
306 if (n)
307 {
308 /* Second stage -cot */
309 DIV2 (1.0, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7,
310 t8, t9, t10);
311 if ((y = c2 + (cc2 - u8.d * c2)) == c2 + (cc2 + u8.d * c2))
312 {
313 retval = (-y);
314 goto ret;
315 }
316 }
317 else
318 {
319 /* Second stage tan */
320 if ((y = c1 + (cc1 - u7.d * c1)) == c1 + (cc1 + u7.d * c1))
321 {
322 retval = y;
323 goto ret;
324 }
325 }
326 retval = tanMp (x);
327 goto ret;
328 }
329
330 /* (VII) The case 0.787 < abs(x) <= 25, 0.0608 < abs(y) <= 0.787 */
331
332 /* First stage */
333 i = ((int) (mfftnhf.d + TWO8 * ya));
334 z = (z0 = (ya - xfg[i][0].d)) + yya;
335 z2 = z * z;
336 pz = z + z * z2 * (e0.d + z2 * e1.d);
337 fi = xfg[i][1].d;
338 gi = xfg[i][2].d;
339
340 if (n)
341 {
342 /* -cot */
343 t2 = pz * (fi + gi) / (fi + pz);
344 if ((y = gi - (t2 - gi * u10.d)) == gi - (t2 + gi * u10.d))
345 {
346 retval = (-sy * y);
347 goto ret;
348 }
349 t3 = (t2 < 0.0) ? -t2 : t2;
350 t4 = gi * ua10.d + t3 * ub10.d;
351 if ((y = gi - (t2 - t4)) == gi - (t2 + t4))
352 {
353 retval = (-sy * y);
354 goto ret;
355 }
356 }
357 else
358 {
359 /* tan */
360 t2 = pz * (gi + fi) / (gi - pz);
361 if ((y = fi + (t2 - fi * u9.d)) == fi + (t2 + fi * u9.d))
362 {
363 retval = (sy * y);
364 goto ret;
365 }
366 t3 = (t2 < 0.0) ? -t2 : t2;
367 t4 = fi * ua9.d + t3 * ub9.d;
368 if ((y = fi + (t2 - t4)) == fi + (t2 + t4))
369 {
370 retval = (sy * y);
371 goto ret;
372 }
373 }
374
375 /* Second stage */
376 ffi = xfg[i][3].d;
377 EADD (z0, yya, z, zz)
378 MUL2 (z, zz, z, zz, z2, zz2, t1, t2, t3, t4, t5, t6, t7, t8);
379 c1 = z2 * (a7.d + z2 * (a9.d + z2 * a11.d));
380 ADD2 (a5.d, aa5.d, c1, 0.0, c2, cc2, t1, t2);
381 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
382 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
383 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
384 MUL2 (z, zz, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
385 ADD2 (z, zz, c2, cc2, c1, cc1, t1, t2);
386
387 ADD2 (fi, ffi, c1, cc1, c2, cc2, t1, t2);
388 MUL2 (fi, ffi, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8);
389 SUB2 (1.0, 0.0, c3, cc3, c1, cc1, t1, t2);
390
391 if (n)
392 {
393 /* -cot */
394 DIV2 (c1, cc1, c2, cc2, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
395 t10);
396 if ((y = c3 + (cc3 - u12.d * c3)) == c3 + (cc3 + u12.d * c3))
397 {
398 retval = (-sy * y);
399 goto ret;
400 }
401 }
402 else
403 {
404 /* tan */
405 DIV2 (c2, cc2, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
406 t10);
407 if ((y = c3 + (cc3 - u11.d * c3)) == c3 + (cc3 + u11.d * c3))
408 {
409 retval = (sy * y);
410 goto ret;
411 }
412 }
413
414 retval = tanMp (x);
415 goto ret;
416 }
417
418 /* (---) The case 25 < abs(x) <= 1e8 */
419 if (w <= g5.d)
420 {
421 /* Range reduction by algorithm ii */
422 t = (x * hpinv.d + toint.d);
423 xn = t - toint.d;
424 v.d = t;
425 t1 = (x - xn * mp1.d) - xn * mp2.d;
426 n = v.i[LOW_HALF] & 0x00000001;
427 da = xn * pp3.d;
428 t = t1 - da;
429 da = (t1 - t) - da;
430 t1 = xn * pp4.d;
431 a = t - t1;
432 da = ((t - a) - t1) + da;
433 EADD (a, da, t1, t2);
434 a = t1;
435 da = t2;
436 if (a < 0.0)
437 {
438 ya = -a;
439 yya = -da;
440 sy = -1;
441 }
442 else
443 {
444 ya = a;
445 yya = da;
446 sy = 1;
447 }
448
449 /* (+++) The case 25 < abs(x) <= 1e8, abs(y) <= 1e-7 */
450 if (ya <= gy1.d)
451 {
452 retval = tanMp (x);
453 goto ret;
454 }
455
456 /* (VIII) The case 25 < abs(x) <= 1e8, 1e-7 < abs(y) <= 0.0608 */
457 if (ya <= gy2.d)
458 {
459 a2 = a * a;
460 t2 = d9.d + a2 * d11.d;
461 t2 = d7.d + a2 * t2;
462 t2 = d5.d + a2 * t2;
463 t2 = d3.d + a2 * t2;
464 t2 = da + a * a2 * t2;
465
466 if (n)
467 {
468 /* First stage -cot */
469 EADD (a, t2, b, db);
470 DIV2 (1.0, 0.0, b, db, c, dc, t1, t2, t3, t4, t5, t6, t7, t8,
471 t9, t10);
472 if ((y = c + (dc - u14.d * c)) == c + (dc + u14.d * c))
473 {
474 retval = (-y);
475 goto ret;
476 }
477 }
478 else
479 {
480 /* First stage tan */
481 if ((y = a + (t2 - u13.d * a)) == a + (t2 + u13.d * a))
482 {
483 retval = y;
484 goto ret;
485 }
486 }
487
488 /* Second stage */
489 MUL2 (a, da, a, da, x2, xx2, t1, t2, t3, t4, t5, t6, t7, t8);
490 c1 = a25.d + x2 * a27.d;
491 c1 = a23.d + x2 * c1;
492 c1 = a21.d + x2 * c1;
493 c1 = a19.d + x2 * c1;
494 c1 = a17.d + x2 * c1;
495 c1 = a15.d + x2 * c1;
496 c1 *= x2;
497
498 ADD2 (a13.d, aa13.d, c1, 0.0, c2, cc2, t1, t2);
499 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
500 ADD2 (a11.d, aa11.d, c1, cc1, c2, cc2, t1, t2);
501 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
502 ADD2 (a9.d, aa9.d, c1, cc1, c2, cc2, t1, t2);
503 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
504 ADD2 (a7.d, aa7.d, c1, cc1, c2, cc2, t1, t2);
505 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
506 ADD2 (a5.d, aa5.d, c1, cc1, c2, cc2, t1, t2);
507 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
508 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
509 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
510 MUL2 (a, da, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
511 ADD2 (a, da, c2, cc2, c1, cc1, t1, t2);
512
513 if (n)
514 {
515 /* Second stage -cot */
516 DIV2 (1.0, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7,
517 t8, t9, t10);
518 if ((y = c2 + (cc2 - u16.d * c2)) == c2 + (cc2 + u16.d * c2))
519 {
520 retval = (-y);
521 goto ret;
522 }
523 }
524 else
525 {
526 /* Second stage tan */
527 if ((y = c1 + (cc1 - u15.d * c1)) == c1 + (cc1 + u15.d * c1))
528 {
529 retval = (y);
530 goto ret;
531 }
532 }
533 retval = tanMp (x);
534 goto ret;
535 }
536
537 /* (IX) The case 25 < abs(x) <= 1e8, 0.0608 < abs(y) <= 0.787 */
538 /* First stage */
539 i = ((int) (mfftnhf.d + TWO8 * ya));
540 z = (z0 = (ya - xfg[i][0].d)) + yya;
541 z2 = z * z;
542 pz = z + z * z2 * (e0.d + z2 * e1.d);
543 fi = xfg[i][1].d;
544 gi = xfg[i][2].d;
545
546 if (n)
547 {
548 /* -cot */
549 t2 = pz * (fi + gi) / (fi + pz);
550 if ((y = gi - (t2 - gi * u18.d)) == gi - (t2 + gi * u18.d))
551 {
552 retval = (-sy * y);
553 goto ret;
554 }
555 t3 = (t2 < 0.0) ? -t2 : t2;
556 t4 = gi * ua18.d + t3 * ub18.d;
557 if ((y = gi - (t2 - t4)) == gi - (t2 + t4))
558 {
559 retval = (-sy * y);
560 goto ret;
561 }
562 }
563 else
564 {
565 /* tan */
566 t2 = pz * (gi + fi) / (gi - pz);
567 if ((y = fi + (t2 - fi * u17.d)) == fi + (t2 + fi * u17.d))
568 {
569 retval = (sy * y);
570 goto ret;
571 }
572 t3 = (t2 < 0.0) ? -t2 : t2;
573 t4 = fi * ua17.d + t3 * ub17.d;
574 if ((y = fi + (t2 - t4)) == fi + (t2 + t4))
575 {
576 retval = (sy * y);
577 goto ret;
578 }
579 }
580
581 /* Second stage */
582 ffi = xfg[i][3].d;
583 EADD (z0, yya, z, zz);
584 MUL2 (z, zz, z, zz, z2, zz2, t1, t2, t3, t4, t5, t6, t7, t8);
585 c1 = z2 * (a7.d + z2 * (a9.d + z2 * a11.d));
586 ADD2 (a5.d, aa5.d, c1, 0.0, c2, cc2, t1, t2);
587 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
588 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
589 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
590 MUL2 (z, zz, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
591 ADD2 (z, zz, c2, cc2, c1, cc1, t1, t2);
592
593 ADD2 (fi, ffi, c1, cc1, c2, cc2, t1, t2);
594 MUL2 (fi, ffi, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8);
595 SUB2 (1.0, 0.0, c3, cc3, c1, cc1, t1, t2);
596
597 if (n)
598 {
599 /* -cot */
600 DIV2 (c1, cc1, c2, cc2, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
601 t10);
602 if ((y = c3 + (cc3 - u20.d * c3)) == c3 + (cc3 + u20.d * c3))
603 {
604 retval = (-sy * y);
605 goto ret;
606 }
607 }
608 else
609 {
610 /* tan */
611 DIV2 (c2, cc2, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
612 t10);
613 if ((y = c3 + (cc3 - u19.d * c3)) == c3 + (cc3 + u19.d * c3))
614 {
615 retval = (sy * y);
616 goto ret;
617 }
618 }
619 retval = tanMp (x);
620 goto ret;
621 }
622
623 /* (---) The case 1e8 < abs(x) < 2**1024 */
624 /* Range reduction by algorithm iii */
625 n = (__branred (x, &a, &da)) & 0x00000001;
626 EADD (a, da, t1, t2);
627 a = t1;
628 da = t2;
629 if (a < 0.0)
630 {
631 ya = -a;
632 yya = -da;
633 sy = -1;
634 }
635 else
636 {
637 ya = a;
638 yya = da;
639 sy = 1;
640 }
641
642 /* (+++) The case 1e8 < abs(x) < 2**1024, abs(y) <= 1e-7 */
643 if (ya <= gy1.d)
644 {
645 retval = tanMp (x);
646 goto ret;
647 }
648
649 /* (X) The case 1e8 < abs(x) < 2**1024, 1e-7 < abs(y) <= 0.0608 */
650 if (ya <= gy2.d)
651 {
652 a2 = a * a;
653 t2 = d9.d + a2 * d11.d;
654 t2 = d7.d + a2 * t2;
655 t2 = d5.d + a2 * t2;
656 t2 = d3.d + a2 * t2;
657 t2 = da + a * a2 * t2;
658 if (n)
659 {
660 /* First stage -cot */
661 EADD (a, t2, b, db);
662 DIV2 (1.0, 0.0, b, db, c, dc, t1, t2, t3, t4, t5, t6, t7, t8, t9,
663 t10);
664 if ((y = c + (dc - u22.d * c)) == c + (dc + u22.d * c))
665 {
666 retval = (-y);
667 goto ret;
668 }
669 }
670 else
671 {
672 /* First stage tan */
673 if ((y = a + (t2 - u21.d * a)) == a + (t2 + u21.d * a))
674 {
675 retval = y;
676 goto ret;
677 }
678 }
679
680 /* Second stage */
681 /* Reduction by algorithm iv */
682 p = 10;
683 n = (__mpranred (x, &mpa, p)) & 0x00000001;
684 __mp_dbl (&mpa, &a, p);
685 __dbl_mp (a, &mpt1, p);
686 __sub (&mpa, &mpt1, &mpt2, p);
687 __mp_dbl (&mpt2, &da, p);
688
689 MUL2 (a, da, a, da, x2, xx2, t1, t2, t3, t4, t5, t6, t7, t8);
690
691 c1 = a25.d + x2 * a27.d;
692 c1 = a23.d + x2 * c1;
693 c1 = a21.d + x2 * c1;
694 c1 = a19.d + x2 * c1;
695 c1 = a17.d + x2 * c1;
696 c1 = a15.d + x2 * c1;
697 c1 *= x2;
698
699 ADD2 (a13.d, aa13.d, c1, 0.0, c2, cc2, t1, t2);
700 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
701 ADD2 (a11.d, aa11.d, c1, cc1, c2, cc2, t1, t2);
702 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
703 ADD2 (a9.d, aa9.d, c1, cc1, c2, cc2, t1, t2);
704 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
705 ADD2 (a7.d, aa7.d, c1, cc1, c2, cc2, t1, t2);
706 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
707 ADD2 (a5.d, aa5.d, c1, cc1, c2, cc2, t1, t2);
708 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
709 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
710 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
711 MUL2 (a, da, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
712 ADD2 (a, da, c2, cc2, c1, cc1, t1, t2);
713
714 if (n)
715 {
716 /* Second stage -cot */
717 DIV2 (1.0, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8,
718 t9, t10);
719 if ((y = c2 + (cc2 - u24.d * c2)) == c2 + (cc2 + u24.d * c2))
720 {
721 retval = (-y);
722 goto ret;
723 }
724 }
725 else
726 {
727 /* Second stage tan */
728 if ((y = c1 + (cc1 - u23.d * c1)) == c1 + (cc1 + u23.d * c1))
729 {
730 retval = y;
731 goto ret;
732 }
733 }
734 retval = tanMp (x);
735 goto ret;
736 }
737
738 /* (XI) The case 1e8 < abs(x) < 2**1024, 0.0608 < abs(y) <= 0.787 */
739 /* First stage */
740 i = ((int) (mfftnhf.d + TWO8 * ya));
741 z = (z0 = (ya - xfg[i][0].d)) + yya;
742 z2 = z * z;
743 pz = z + z * z2 * (e0.d + z2 * e1.d);
744 fi = xfg[i][1].d;
745 gi = xfg[i][2].d;
746
747 if (n)
748 {
749 /* -cot */
750 t2 = pz * (fi + gi) / (fi + pz);
751 if ((y = gi - (t2 - gi * u26.d)) == gi - (t2 + gi * u26.d))
752 {
753 retval = (-sy * y);
754 goto ret;
755 }
756 t3 = (t2 < 0.0) ? -t2 : t2;
757 t4 = gi * ua26.d + t3 * ub26.d;
758 if ((y = gi - (t2 - t4)) == gi - (t2 + t4))
759 {
760 retval = (-sy * y);
761 goto ret;
762 }
763 }
764 else
765 {
766 /* tan */
767 t2 = pz * (gi + fi) / (gi - pz);
768 if ((y = fi + (t2 - fi * u25.d)) == fi + (t2 + fi * u25.d))
769 {
770 retval = (sy * y);
771 goto ret;
772 }
773 t3 = (t2 < 0.0) ? -t2 : t2;
774 t4 = fi * ua25.d + t3 * ub25.d;
775 if ((y = fi + (t2 - t4)) == fi + (t2 + t4))
776 {
777 retval = (sy * y);
778 goto ret;
779 }
780 }
781
782 /* Second stage */
783 ffi = xfg[i][3].d;
784 EADD (z0, yya, z, zz);
785 MUL2 (z, zz, z, zz, z2, zz2, t1, t2, t3, t4, t5, t6, t7, t8);
786 c1 = z2 * (a7.d + z2 * (a9.d + z2 * a11.d));
787 ADD2 (a5.d, aa5.d, c1, 0.0, c2, cc2, t1, t2);
788 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
789 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
790 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
791 MUL2 (z, zz, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
792 ADD2 (z, zz, c2, cc2, c1, cc1, t1, t2);
793
794 ADD2 (fi, ffi, c1, cc1, c2, cc2, t1, t2);
795 MUL2 (fi, ffi, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8);
796 SUB2 (1.0, 0.0, c3, cc3, c1, cc1, t1, t2);
797
798 if (n)
799 {
800 /* -cot */
801 DIV2 (c1, cc1, c2, cc2, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
802 t10);
803 if ((y = c3 + (cc3 - u28.d * c3)) == c3 + (cc3 + u28.d * c3))
804 {
805 retval = (-sy * y);
806 goto ret;
807 }
808 }
809 else
810 {
811 /* tan */
812 DIV2 (c2, cc2, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
813 t10);
814 if ((y = c3 + (cc3 - u27.d * c3)) == c3 + (cc3 + u27.d * c3))
815 {
816 retval = (sy * y);
817 goto ret;
818 }
819 }
820 retval = tanMp (x);
821 goto ret;
822
823 ret:
824 return retval;
825 }
826
827 /* multiple precision stage */
828 /* Convert x to multi precision number,compute tan(x) by mptan() routine */
829 /* and converts result back to double */
830 static double
831 SECTION
832 tanMp (double x)
833 {
834 int p;
835 double y;
836 mp_no mpy;
837 p = 32;
838 __mptan (x, &mpy, p);
839 __mp_dbl (&mpy, &y, p);
840 LIBC_PROBE (slowtan, 2, &x, &y);
841 return y;
842 }
843
844 #ifdef NO_LONG_DOUBLE
845 weak_alias (tan, tanl)
846 #endif