]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-call-cdce.c
Update libbid according to the latest Intel Decimal Floating-Point Math Library.
[thirdparty/gcc.git] / gcc / tree-call-cdce.c
1 /* Conditional Dead Call Elimination pass for the GNU compiler.
2 Copyright (C) 2008-2019 Free Software Foundation, Inc.
3 Contributed by Xinliang David Li <davidxl@google.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
10 later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "cfghooks.h"
28 #include "tree-pass.h"
29 #include "ssa.h"
30 #include "gimple-pretty-print.h"
31 #include "fold-const.h"
32 #include "stor-layout.h"
33 #include "gimple-iterator.h"
34 #include "tree-cfg.h"
35 #include "tree-into-ssa.h"
36 #include "builtins.h"
37 #include "internal-fn.h"
38 #include "tree-dfa.h"
39 \f
40
41 /* This pass serves two closely-related purposes:
42
43 1. It conditionally executes calls that set errno if (a) the result of
44 the call is unused and (b) a simple range check on the arguments can
45 detect most cases where errno does not need to be set.
46
47 This is the "conditional dead-code elimination" that gave the pass
48 its original name, since the call is dead for most argument values.
49 The calls for which it helps are usually part of the C++ abstraction
50 penalty exposed after inlining.
51
52 2. It looks for calls to built-in functions that set errno and whose
53 result is used. It checks whether there is an associated internal
54 function that doesn't set errno and whether the target supports
55 that internal function. If so, the pass uses the internal function
56 to compute the result of the built-in function but still arranges
57 for errno to be set when necessary. There are two ways of setting
58 errno:
59
60 a. by protecting the original call with the same argument checks as (1)
61
62 b. by protecting the original call with a check that the result
63 of the internal function is not equal to itself (i.e. is NaN).
64
65 (b) requires that NaNs are the only erroneous results. It is not
66 appropriate for functions like log, which returns ERANGE for zero
67 arguments. (b) is also likely to perform worse than (a) because it
68 requires the result to be calculated first. The pass therefore uses
69 (a) when it can and uses (b) as a fallback.
70
71 For (b) the pass can replace the original call with a call to
72 IFN_SET_EDOM, if the target supports direct assignments to errno.
73
74 In both cases, arguments that require errno to be set should occur
75 rarely in practice. Checks of the errno result should also be rare,
76 but the compiler would need powerful interprocedural analysis to
77 prove that errno is not checked. It's much easier to add argument
78 checks or result checks instead.
79
80 An example of (1) is:
81
82 log (x); // Mostly dead call
83 ==>
84 if (__builtin_islessequal (x, 0))
85 log (x);
86
87 With this change, call to log (x) is effectively eliminated, as
88 in the majority of the cases, log won't be called with x out of
89 range. The branch is totally predictable, so the branch cost
90 is low.
91
92 An example of (2) is:
93
94 y = sqrt (x);
95 ==>
96 if (__builtin_isless (x, 0))
97 y = sqrt (x);
98 else
99 y = IFN_SQRT (x);
100 In the vast majority of cases we should then never need to call sqrt.
101
102 Note that library functions are not supposed to clear errno to zero without
103 error. See IEEE Std 1003.1, section 2.3 Error Numbers, and section 7.5:3 of
104 ISO/IEC 9899 (C99).
105
106 The condition wrapping the builtin call is conservatively set to avoid too
107 aggressive (wrong) shrink wrapping. */
108
109
110 /* A structure for representing input domain of
111 a function argument in integer. If the lower
112 bound is -inf, has_lb is set to false. If the
113 upper bound is +inf, has_ub is false.
114 is_lb_inclusive and is_ub_inclusive are flags
115 to indicate if lb and ub value are inclusive
116 respectively. */
117
118 struct inp_domain
119 {
120 int lb;
121 int ub;
122 bool has_lb;
123 bool has_ub;
124 bool is_lb_inclusive;
125 bool is_ub_inclusive;
126 };
127
128 /* A helper function to construct and return an input
129 domain object. LB is the lower bound, HAS_LB is
130 a boolean flag indicating if the lower bound exists,
131 and LB_INCLUSIVE is a boolean flag indicating if the
132 lower bound is inclusive or not. UB, HAS_UB, and
133 UB_INCLUSIVE have the same meaning, but for upper
134 bound of the domain. */
135
136 static inp_domain
137 get_domain (int lb, bool has_lb, bool lb_inclusive,
138 int ub, bool has_ub, bool ub_inclusive)
139 {
140 inp_domain domain;
141 domain.lb = lb;
142 domain.has_lb = has_lb;
143 domain.is_lb_inclusive = lb_inclusive;
144 domain.ub = ub;
145 domain.has_ub = has_ub;
146 domain.is_ub_inclusive = ub_inclusive;
147 return domain;
148 }
149
150 /* A helper function to check the target format for the
151 argument type. In this implementation, only IEEE formats
152 are supported. ARG is the call argument to be checked.
153 Returns true if the format is supported. To support other
154 target formats, function get_no_error_domain needs to be
155 enhanced to have range bounds properly computed. Since
156 the check is cheap (very small number of candidates
157 to be checked), the result is not cached for each float type. */
158
159 static bool
160 check_target_format (tree arg)
161 {
162 tree type;
163 machine_mode mode;
164 const struct real_format *rfmt;
165
166 type = TREE_TYPE (arg);
167 mode = TYPE_MODE (type);
168 rfmt = REAL_MODE_FORMAT (mode);
169 if ((mode == SFmode
170 && (rfmt == &ieee_single_format || rfmt == &mips_single_format
171 || rfmt == &motorola_single_format))
172 || (mode == DFmode
173 && (rfmt == &ieee_double_format || rfmt == &mips_double_format
174 || rfmt == &motorola_double_format))
175 /* For long double, we cannot really check XFmode
176 which is only defined on intel platforms.
177 Candidate pre-selection using builtin function
178 code guarantees that we are checking formats
179 for long double modes: double, quad, and extended. */
180 || (mode != SFmode && mode != DFmode
181 && (rfmt == &ieee_quad_format
182 || rfmt == &mips_quad_format
183 || rfmt == &ieee_extended_motorola_format
184 || rfmt == &ieee_extended_intel_96_format
185 || rfmt == &ieee_extended_intel_128_format
186 || rfmt == &ieee_extended_intel_96_round_53_format)))
187 return true;
188
189 return false;
190 }
191
192 \f
193 /* A helper function to help select calls to pow that are suitable for
194 conditional DCE transformation. It looks for pow calls that can be
195 guided with simple conditions. Such calls either have constant base
196 values or base values converted from integers. Returns true if
197 the pow call POW_CALL is a candidate. */
198
199 /* The maximum integer bit size for base argument of a pow call
200 that is suitable for shrink-wrapping transformation. */
201 #define MAX_BASE_INT_BIT_SIZE 32
202
203 static bool
204 check_pow (gcall *pow_call)
205 {
206 tree base, expn;
207 enum tree_code bc, ec;
208
209 if (gimple_call_num_args (pow_call) != 2)
210 return false;
211
212 base = gimple_call_arg (pow_call, 0);
213 expn = gimple_call_arg (pow_call, 1);
214
215 if (!check_target_format (expn))
216 return false;
217
218 bc = TREE_CODE (base);
219 ec = TREE_CODE (expn);
220
221 /* Folding candidates are not interesting.
222 Can actually assert that it is already folded. */
223 if (ec == REAL_CST && bc == REAL_CST)
224 return false;
225
226 if (bc == REAL_CST)
227 {
228 /* Only handle a fixed range of constant. */
229 REAL_VALUE_TYPE mv;
230 REAL_VALUE_TYPE bcv = TREE_REAL_CST (base);
231 if (real_equal (&bcv, &dconst1))
232 return false;
233 if (real_less (&bcv, &dconst1))
234 return false;
235 real_from_integer (&mv, TYPE_MODE (TREE_TYPE (base)), 256, UNSIGNED);
236 if (real_less (&mv, &bcv))
237 return false;
238 return true;
239 }
240 else if (bc == SSA_NAME)
241 {
242 tree base_val0, type;
243 gimple *base_def;
244 int bit_sz;
245
246 /* Only handles cases where base value is converted
247 from integer values. */
248 base_def = SSA_NAME_DEF_STMT (base);
249 if (gimple_code (base_def) != GIMPLE_ASSIGN)
250 return false;
251
252 if (gimple_assign_rhs_code (base_def) != FLOAT_EXPR)
253 return false;
254 base_val0 = gimple_assign_rhs1 (base_def);
255
256 type = TREE_TYPE (base_val0);
257 if (TREE_CODE (type) != INTEGER_TYPE)
258 return false;
259 bit_sz = TYPE_PRECISION (type);
260 /* If the type of the base is too wide,
261 the resulting shrink wrapping condition
262 will be too conservative. */
263 if (bit_sz > MAX_BASE_INT_BIT_SIZE)
264 return false;
265
266 return true;
267 }
268 else
269 return false;
270 }
271
272 /* A helper function to help select candidate function calls that are
273 suitable for conditional DCE. Candidate functions must have single
274 valid input domain in this implementation except for pow (see check_pow).
275 Returns true if the function call is a candidate. */
276
277 static bool
278 check_builtin_call (gcall *bcall)
279 {
280 tree arg;
281
282 arg = gimple_call_arg (bcall, 0);
283 return check_target_format (arg);
284 }
285
286 /* Return true if built-in function call CALL calls a math function
287 and if we know how to test the range of its arguments to detect _most_
288 situations in which errno is not set. The test must err on the side
289 of treating non-erroneous values as potentially erroneous. */
290
291 static bool
292 can_test_argument_range (gcall *call)
293 {
294 switch (DECL_FUNCTION_CODE (gimple_call_fndecl (call)))
295 {
296 /* Trig functions. */
297 CASE_FLT_FN (BUILT_IN_ACOS):
298 CASE_FLT_FN (BUILT_IN_ASIN):
299 /* Hyperbolic functions. */
300 CASE_FLT_FN (BUILT_IN_ACOSH):
301 CASE_FLT_FN (BUILT_IN_ATANH):
302 CASE_FLT_FN (BUILT_IN_COSH):
303 CASE_FLT_FN (BUILT_IN_SINH):
304 /* Log functions. */
305 CASE_FLT_FN (BUILT_IN_LOG):
306 CASE_FLT_FN (BUILT_IN_LOG2):
307 CASE_FLT_FN (BUILT_IN_LOG10):
308 CASE_FLT_FN (BUILT_IN_LOG1P):
309 /* Exp functions. */
310 CASE_FLT_FN (BUILT_IN_EXP):
311 CASE_FLT_FN (BUILT_IN_EXP2):
312 CASE_FLT_FN (BUILT_IN_EXP10):
313 CASE_FLT_FN (BUILT_IN_EXPM1):
314 CASE_FLT_FN (BUILT_IN_POW10):
315 /* Sqrt. */
316 CASE_FLT_FN (BUILT_IN_SQRT):
317 CASE_FLT_FN_FLOATN_NX (BUILT_IN_SQRT):
318 return check_builtin_call (call);
319 /* Special one: two argument pow. */
320 case BUILT_IN_POW:
321 return check_pow (call);
322 default:
323 break;
324 }
325
326 return false;
327 }
328
329 /* Return true if CALL can produce a domain error (EDOM) but can never
330 produce a pole, range overflow or range underflow error (all ERANGE).
331 This means that we can tell whether a function would have set errno
332 by testing whether the result is a NaN. */
333
334 static bool
335 edom_only_function (gcall *call)
336 {
337 switch (DECL_FUNCTION_CODE (gimple_call_fndecl (call)))
338 {
339 CASE_FLT_FN (BUILT_IN_ACOS):
340 CASE_FLT_FN (BUILT_IN_ASIN):
341 CASE_FLT_FN (BUILT_IN_ATAN):
342 CASE_FLT_FN (BUILT_IN_COS):
343 CASE_FLT_FN (BUILT_IN_SIGNIFICAND):
344 CASE_FLT_FN (BUILT_IN_SIN):
345 CASE_FLT_FN (BUILT_IN_SQRT):
346 CASE_FLT_FN_FLOATN_NX (BUILT_IN_SQRT):
347 CASE_FLT_FN (BUILT_IN_FMOD):
348 CASE_FLT_FN (BUILT_IN_REMAINDER):
349 return true;
350
351 default:
352 return false;
353 }
354 }
355
356 /* Return true if it is structurally possible to guard CALL. */
357
358 static bool
359 can_guard_call_p (gimple *call)
360 {
361 return (!stmt_ends_bb_p (call)
362 || find_fallthru_edge (gimple_bb (call)->succs));
363 }
364 \f
365 /* For a comparison code return the comparison code we should use if we don't
366 HONOR_NANS. */
367
368 static enum tree_code
369 comparison_code_if_no_nans (tree_code code)
370 {
371 switch (code)
372 {
373 case UNLT_EXPR:
374 return LT_EXPR;
375 case UNGT_EXPR:
376 return GT_EXPR;
377 case UNLE_EXPR:
378 return LE_EXPR;
379 case UNGE_EXPR:
380 return GE_EXPR;
381 case UNEQ_EXPR:
382 return EQ_EXPR;
383 case LTGT_EXPR:
384 return NE_EXPR;
385
386 case LT_EXPR:
387 case GT_EXPR:
388 case LE_EXPR:
389 case GE_EXPR:
390 case EQ_EXPR:
391 case NE_EXPR:
392 return code;
393
394 default:
395 gcc_unreachable ();
396 }
397 }
398
399 /* A helper function to generate gimple statements for one bound
400 comparison, so that the built-in function is called whenever
401 TCODE <ARG, LBUB> is *false*. TEMP_NAME1/TEMP_NAME2 are names
402 of the temporaries, CONDS is a vector holding the produced GIMPLE
403 statements, and NCONDS points to the variable holding the number of
404 logical comparisons. CONDS is either empty or a list ended with a
405 null tree. */
406
407 static void
408 gen_one_condition (tree arg, int lbub,
409 enum tree_code tcode,
410 const char *temp_name1,
411 const char *temp_name2,
412 vec<gimple *> conds,
413 unsigned *nconds)
414 {
415 if (!HONOR_NANS (arg))
416 tcode = comparison_code_if_no_nans (tcode);
417
418 tree lbub_real_cst, lbub_cst, float_type;
419 tree temp, tempn, tempc, tempcn;
420 gassign *stmt1;
421 gassign *stmt2;
422 gcond *stmt3;
423
424 float_type = TREE_TYPE (arg);
425 lbub_cst = build_int_cst (integer_type_node, lbub);
426 lbub_real_cst = build_real_from_int_cst (float_type, lbub_cst);
427
428 temp = create_tmp_var (float_type, temp_name1);
429 stmt1 = gimple_build_assign (temp, arg);
430 tempn = make_ssa_name (temp, stmt1);
431 gimple_assign_set_lhs (stmt1, tempn);
432
433 tempc = create_tmp_var (boolean_type_node, temp_name2);
434 stmt2 = gimple_build_assign (tempc,
435 fold_build2 (tcode,
436 boolean_type_node,
437 tempn, lbub_real_cst));
438 tempcn = make_ssa_name (tempc, stmt2);
439 gimple_assign_set_lhs (stmt2, tempcn);
440
441 stmt3 = gimple_build_cond_from_tree (tempcn, NULL_TREE, NULL_TREE);
442 conds.quick_push (stmt1);
443 conds.quick_push (stmt2);
444 conds.quick_push (stmt3);
445 (*nconds)++;
446 }
447
448 /* A helper function to generate GIMPLE statements for
449 out of input domain check. ARG is the call argument
450 to be runtime checked, DOMAIN holds the valid domain
451 for the given function, CONDS points to the vector
452 holding the result GIMPLE statements. *NCONDS is
453 the number of logical comparisons. This function
454 produces no more than two logical comparisons, one
455 for lower bound check, one for upper bound check. */
456
457 static void
458 gen_conditions_for_domain (tree arg, inp_domain domain,
459 vec<gimple *> conds,
460 unsigned *nconds)
461 {
462 if (domain.has_lb)
463 gen_one_condition (arg, domain.lb,
464 (domain.is_lb_inclusive
465 ? UNGE_EXPR : UNGT_EXPR),
466 "DCE_COND_LB", "DCE_COND_LB_TEST",
467 conds, nconds);
468
469 if (domain.has_ub)
470 {
471 /* Now push a separator. */
472 if (domain.has_lb)
473 conds.quick_push (NULL);
474
475 gen_one_condition (arg, domain.ub,
476 (domain.is_ub_inclusive
477 ? UNLE_EXPR : UNLT_EXPR),
478 "DCE_COND_UB", "DCE_COND_UB_TEST",
479 conds, nconds);
480 }
481 }
482
483
484 /* A helper function to generate condition
485 code for the y argument in call pow (some_const, y).
486 See candidate selection in check_pow. Since the
487 candidates' base values have a limited range,
488 the guarded code generated for y are simple:
489 if (__builtin_isgreater (y, max_y))
490 pow (const, y);
491 Note max_y can be computed separately for each
492 const base, but in this implementation, we
493 choose to compute it using the max base
494 in the allowed range for the purpose of
495 simplicity. BASE is the constant base value,
496 EXPN is the expression for the exponent argument,
497 *CONDS is the vector to hold resulting statements,
498 and *NCONDS is the number of logical conditions. */
499
500 static void
501 gen_conditions_for_pow_cst_base (tree base, tree expn,
502 vec<gimple *> conds,
503 unsigned *nconds)
504 {
505 inp_domain exp_domain;
506 /* Validate the range of the base constant to make
507 sure it is consistent with check_pow. */
508 REAL_VALUE_TYPE mv;
509 REAL_VALUE_TYPE bcv = TREE_REAL_CST (base);
510 gcc_assert (!real_equal (&bcv, &dconst1)
511 && !real_less (&bcv, &dconst1));
512 real_from_integer (&mv, TYPE_MODE (TREE_TYPE (base)), 256, UNSIGNED);
513 gcc_assert (!real_less (&mv, &bcv));
514
515 exp_domain = get_domain (0, false, false,
516 127, true, false);
517
518 gen_conditions_for_domain (expn, exp_domain,
519 conds, nconds);
520 }
521
522 /* Generate error condition code for pow calls with
523 non constant base values. The candidates selected
524 have their base argument value converted from
525 integer (see check_pow) value (1, 2, 4 bytes), and
526 the max exp value is computed based on the size
527 of the integer type (i.e. max possible base value).
528 The resulting input domain for exp argument is thus
529 conservative (smaller than the max value allowed by
530 the runtime value of the base). BASE is the integer
531 base value, EXPN is the expression for the exponent
532 argument, *CONDS is the vector to hold resulting
533 statements, and *NCONDS is the number of logical
534 conditions. */
535
536 static void
537 gen_conditions_for_pow_int_base (tree base, tree expn,
538 vec<gimple *> conds,
539 unsigned *nconds)
540 {
541 gimple *base_def;
542 tree base_val0;
543 tree int_type;
544 tree temp, tempn;
545 tree cst0;
546 gimple *stmt1, *stmt2;
547 int bit_sz, max_exp;
548 inp_domain exp_domain;
549
550 base_def = SSA_NAME_DEF_STMT (base);
551 base_val0 = gimple_assign_rhs1 (base_def);
552 int_type = TREE_TYPE (base_val0);
553 bit_sz = TYPE_PRECISION (int_type);
554 gcc_assert (bit_sz > 0
555 && bit_sz <= MAX_BASE_INT_BIT_SIZE);
556
557 /* Determine the max exp argument value according to
558 the size of the base integer. The max exp value
559 is conservatively estimated assuming IEEE754 double
560 precision format. */
561 if (bit_sz == 8)
562 max_exp = 128;
563 else if (bit_sz == 16)
564 max_exp = 64;
565 else
566 {
567 gcc_assert (bit_sz == MAX_BASE_INT_BIT_SIZE);
568 max_exp = 32;
569 }
570
571 /* For pow ((double)x, y), generate the following conditions:
572 cond 1:
573 temp1 = x;
574 if (__builtin_islessequal (temp1, 0))
575
576 cond 2:
577 temp2 = y;
578 if (__builtin_isgreater (temp2, max_exp_real_cst)) */
579
580 /* Generate condition in reverse order -- first
581 the condition for the exp argument. */
582
583 exp_domain = get_domain (0, false, false,
584 max_exp, true, true);
585
586 gen_conditions_for_domain (expn, exp_domain,
587 conds, nconds);
588
589 /* Now generate condition for the base argument.
590 Note it does not use the helper function
591 gen_conditions_for_domain because the base
592 type is integer. */
593
594 /* Push a separator. */
595 conds.quick_push (NULL);
596
597 temp = create_tmp_var (int_type, "DCE_COND1");
598 cst0 = build_int_cst (int_type, 0);
599 stmt1 = gimple_build_assign (temp, base_val0);
600 tempn = make_ssa_name (temp, stmt1);
601 gimple_assign_set_lhs (stmt1, tempn);
602 stmt2 = gimple_build_cond (GT_EXPR, tempn, cst0, NULL_TREE, NULL_TREE);
603
604 conds.quick_push (stmt1);
605 conds.quick_push (stmt2);
606 (*nconds)++;
607 }
608
609 /* Method to generate conditional statements for guarding conditionally
610 dead calls to pow. One or more statements can be generated for
611 each logical condition. Statement groups of different conditions
612 are separated by a NULL tree and they are stored in the vec
613 conds. The number of logical conditions are stored in *nconds.
614
615 See C99 standard, 7.12.7.4:2, for description of pow (x, y).
616 The precise condition for domain errors are complex. In this
617 implementation, a simplified (but conservative) valid domain
618 for x and y are used: x is positive to avoid dom errors, while
619 y is smaller than a upper bound (depending on x) to avoid range
620 errors. Runtime code is generated to check x (if not constant)
621 and y against the valid domain. If it is out, jump to the call,
622 otherwise the call is bypassed. POW_CALL is the call statement,
623 *CONDS is a vector holding the resulting condition statements,
624 and *NCONDS is the number of logical conditions. */
625
626 static void
627 gen_conditions_for_pow (gcall *pow_call, vec<gimple *> conds,
628 unsigned *nconds)
629 {
630 tree base, expn;
631 enum tree_code bc;
632
633 gcc_checking_assert (check_pow (pow_call));
634
635 *nconds = 0;
636
637 base = gimple_call_arg (pow_call, 0);
638 expn = gimple_call_arg (pow_call, 1);
639
640 bc = TREE_CODE (base);
641
642 if (bc == REAL_CST)
643 gen_conditions_for_pow_cst_base (base, expn, conds, nconds);
644 else if (bc == SSA_NAME)
645 gen_conditions_for_pow_int_base (base, expn, conds, nconds);
646 else
647 gcc_unreachable ();
648 }
649
650 /* A helper routine to help computing the valid input domain
651 for a builtin function. See C99 7.12.7 for details. In this
652 implementation, we only handle single region domain. The
653 resulting region can be conservative (smaller) than the actual
654 one and rounded to integers. Some of the bounds are documented
655 in the standard, while other limit constants are computed
656 assuming IEEE floating point format (for SF and DF modes).
657 Since IEEE only sets minimum requirements for long double format,
658 different long double formats exist under different implementations
659 (e.g, 64 bit double precision (DF), 80 bit double-extended
660 precision (XF), and 128 bit quad precision (QF) ). For simplicity,
661 in this implementation, the computed bounds for long double assume
662 64 bit format (DF), and are therefore conservative. Another
663 assumption is that single precision float type is always SF mode,
664 and double type is DF mode. This function is quite
665 implementation specific, so it may not be suitable to be part of
666 builtins.c. This needs to be revisited later to see if it can
667 be leveraged in x87 assembly expansion. */
668
669 static inp_domain
670 get_no_error_domain (enum built_in_function fnc)
671 {
672 switch (fnc)
673 {
674 /* Trig functions: return [-1, +1] */
675 CASE_FLT_FN (BUILT_IN_ACOS):
676 CASE_FLT_FN (BUILT_IN_ASIN):
677 return get_domain (-1, true, true,
678 1, true, true);
679 /* Hyperbolic functions. */
680 CASE_FLT_FN (BUILT_IN_ACOSH):
681 /* acosh: [1, +inf) */
682 return get_domain (1, true, true,
683 1, false, false);
684 CASE_FLT_FN (BUILT_IN_ATANH):
685 /* atanh: (-1, +1) */
686 return get_domain (-1, true, false,
687 1, true, false);
688 case BUILT_IN_COSHF:
689 case BUILT_IN_SINHF:
690 /* coshf: (-89, +89) */
691 return get_domain (-89, true, false,
692 89, true, false);
693 case BUILT_IN_COSH:
694 case BUILT_IN_SINH:
695 case BUILT_IN_COSHL:
696 case BUILT_IN_SINHL:
697 /* cosh: (-710, +710) */
698 return get_domain (-710, true, false,
699 710, true, false);
700 /* Log functions: (0, +inf) */
701 CASE_FLT_FN (BUILT_IN_LOG):
702 CASE_FLT_FN (BUILT_IN_LOG2):
703 CASE_FLT_FN (BUILT_IN_LOG10):
704 return get_domain (0, true, false,
705 0, false, false);
706 CASE_FLT_FN (BUILT_IN_LOG1P):
707 return get_domain (-1, true, false,
708 0, false, false);
709 /* Exp functions. */
710 case BUILT_IN_EXPF:
711 case BUILT_IN_EXPM1F:
712 /* expf: (-inf, 88) */
713 return get_domain (-1, false, false,
714 88, true, false);
715 case BUILT_IN_EXP:
716 case BUILT_IN_EXPM1:
717 case BUILT_IN_EXPL:
718 case BUILT_IN_EXPM1L:
719 /* exp: (-inf, 709) */
720 return get_domain (-1, false, false,
721 709, true, false);
722 case BUILT_IN_EXP2F:
723 /* exp2f: (-inf, 128) */
724 return get_domain (-1, false, false,
725 128, true, false);
726 case BUILT_IN_EXP2:
727 case BUILT_IN_EXP2L:
728 /* exp2: (-inf, 1024) */
729 return get_domain (-1, false, false,
730 1024, true, false);
731 case BUILT_IN_EXP10F:
732 case BUILT_IN_POW10F:
733 /* exp10f: (-inf, 38) */
734 return get_domain (-1, false, false,
735 38, true, false);
736 case BUILT_IN_EXP10:
737 case BUILT_IN_POW10:
738 case BUILT_IN_EXP10L:
739 case BUILT_IN_POW10L:
740 /* exp10: (-inf, 308) */
741 return get_domain (-1, false, false,
742 308, true, false);
743 /* sqrt: [0, +inf) */
744 CASE_FLT_FN (BUILT_IN_SQRT):
745 CASE_FLT_FN_FLOATN_NX (BUILT_IN_SQRT):
746 return get_domain (0, true, true,
747 0, false, false);
748 default:
749 gcc_unreachable ();
750 }
751
752 gcc_unreachable ();
753 }
754
755 /* The function to generate shrink wrap conditions for a partially
756 dead builtin call whose return value is not used anywhere,
757 but has to be kept live due to potential error condition.
758 BI_CALL is the builtin call, CONDS is the vector of statements
759 for condition code, NCODES is the pointer to the number of
760 logical conditions. Statements belonging to different logical
761 condition are separated by NULL tree in the vector. */
762
763 static void
764 gen_shrink_wrap_conditions (gcall *bi_call, vec<gimple *> conds,
765 unsigned int *nconds)
766 {
767 gcall *call;
768 tree fn;
769 enum built_in_function fnc;
770
771 gcc_assert (nconds && conds.exists ());
772 gcc_assert (conds.length () == 0);
773 gcc_assert (is_gimple_call (bi_call));
774
775 call = bi_call;
776 fn = gimple_call_fndecl (call);
777 gcc_assert (fn && fndecl_built_in_p (fn));
778 fnc = DECL_FUNCTION_CODE (fn);
779 *nconds = 0;
780
781 if (fnc == BUILT_IN_POW)
782 gen_conditions_for_pow (call, conds, nconds);
783 else
784 {
785 tree arg;
786 inp_domain domain = get_no_error_domain (fnc);
787 *nconds = 0;
788 arg = gimple_call_arg (bi_call, 0);
789 gen_conditions_for_domain (arg, domain, conds, nconds);
790 }
791
792 return;
793 }
794
795 /* Shrink-wrap BI_CALL so that it is only called when one of the NCONDS
796 conditions in CONDS is false. Also move BI_NEWCALL to a new basic block
797 when it is non-null, it is called while all of the CONDS are true. */
798
799 static void
800 shrink_wrap_one_built_in_call_with_conds (gcall *bi_call, vec <gimple *> conds,
801 unsigned int nconds,
802 gcall *bi_newcall = NULL)
803 {
804 gimple_stmt_iterator bi_call_bsi;
805 basic_block bi_call_bb, bi_newcall_bb, join_tgt_bb, guard_bb;
806 edge join_tgt_in_edge_from_call, join_tgt_in_edge_fall_thru;
807 edge bi_call_in_edge0, guard_bb_in_edge;
808 unsigned tn_cond_stmts;
809 unsigned ci;
810 gimple *cond_expr = NULL;
811 gimple *cond_expr_start;
812
813 /* The cfg we want to create looks like this:
814 [guard n-1] <- guard_bb (old block)
815 | \
816 | [guard n-2] }
817 | / \ }
818 | / ... } new blocks
819 | / [guard 0] }
820 | / / | }
821 [call] | <- bi_call_bb }
822 \ [newcall] <-bi_newcall_bb}
823 \ |
824 [join] <- join_tgt_bb (old iff call must end bb)
825 possible EH edges (only if [join] is old)
826
827 When [join] is new, the immediate dominators for these blocks are:
828
829 1. [guard n-1]: unchanged
830 2. [call]: [guard n-1]
831 3. [newcall]: [guard 0]
832 4. [guard m]: [guard m+1] for 0 <= m <= n-2
833 5. [join]: [guard n-1]
834
835 We punt for the more complex case case of [join] being old and
836 simply free the dominance info. We also punt on postdominators,
837 which aren't expected to be available at this point anyway. */
838 bi_call_bb = gimple_bb (bi_call);
839
840 /* Now find the join target bb -- split bi_call_bb if needed. */
841 if (stmt_ends_bb_p (bi_call))
842 {
843 /* We checked that there was a fallthrough edge in
844 can_guard_call_p. */
845 join_tgt_in_edge_from_call = find_fallthru_edge (bi_call_bb->succs);
846 gcc_assert (join_tgt_in_edge_from_call);
847 /* We don't want to handle PHIs. */
848 if (EDGE_COUNT (join_tgt_in_edge_from_call->dest->preds) > 1)
849 join_tgt_bb = split_edge (join_tgt_in_edge_from_call);
850 else
851 {
852 join_tgt_bb = join_tgt_in_edge_from_call->dest;
853 /* We may have degenerate PHIs in the destination. Propagate
854 those out. */
855 for (gphi_iterator i = gsi_start_phis (join_tgt_bb); !gsi_end_p (i);)
856 {
857 gphi *phi = i.phi ();
858 replace_uses_by (gimple_phi_result (phi),
859 gimple_phi_arg_def (phi, 0));
860 remove_phi_node (&i, true);
861 }
862 }
863 }
864 else
865 {
866 join_tgt_in_edge_from_call = split_block (bi_call_bb, bi_call);
867 join_tgt_bb = join_tgt_in_edge_from_call->dest;
868 }
869
870 bi_call_bsi = gsi_for_stmt (bi_call);
871
872 /* Now it is time to insert the first conditional expression
873 into bi_call_bb and split this bb so that bi_call is
874 shrink-wrapped. */
875 tn_cond_stmts = conds.length ();
876 cond_expr = NULL;
877 cond_expr_start = conds[0];
878 for (ci = 0; ci < tn_cond_stmts; ci++)
879 {
880 gimple *c = conds[ci];
881 gcc_assert (c || ci != 0);
882 if (!c)
883 break;
884 gsi_insert_before (&bi_call_bsi, c, GSI_SAME_STMT);
885 cond_expr = c;
886 }
887 ci++;
888 gcc_assert (cond_expr && gimple_code (cond_expr) == GIMPLE_COND);
889
890 typedef std::pair<edge, edge> edge_pair;
891 auto_vec<edge_pair, 8> edges;
892
893 bi_call_in_edge0 = split_block (bi_call_bb, cond_expr);
894 bi_call_in_edge0->flags &= ~EDGE_FALLTHRU;
895 bi_call_in_edge0->flags |= EDGE_FALSE_VALUE;
896 guard_bb = bi_call_bb;
897 bi_call_bb = bi_call_in_edge0->dest;
898 join_tgt_in_edge_fall_thru = make_edge (guard_bb, join_tgt_bb,
899 EDGE_TRUE_VALUE);
900
901 edges.reserve (nconds);
902 edges.quick_push (edge_pair (bi_call_in_edge0, join_tgt_in_edge_fall_thru));
903
904 /* Code generation for the rest of the conditions */
905 for (unsigned int i = 1; i < nconds; ++i)
906 {
907 unsigned ci0;
908 edge bi_call_in_edge;
909 gimple_stmt_iterator guard_bsi = gsi_for_stmt (cond_expr_start);
910 ci0 = ci;
911 cond_expr_start = conds[ci0];
912 for (; ci < tn_cond_stmts; ci++)
913 {
914 gimple *c = conds[ci];
915 gcc_assert (c || ci != ci0);
916 if (!c)
917 break;
918 gsi_insert_before (&guard_bsi, c, GSI_SAME_STMT);
919 cond_expr = c;
920 }
921 ci++;
922 gcc_assert (cond_expr && gimple_code (cond_expr) == GIMPLE_COND);
923 guard_bb_in_edge = split_block (guard_bb, cond_expr);
924 guard_bb_in_edge->flags &= ~EDGE_FALLTHRU;
925 guard_bb_in_edge->flags |= EDGE_TRUE_VALUE;
926
927 bi_call_in_edge = make_edge (guard_bb, bi_call_bb, EDGE_FALSE_VALUE);
928 edges.quick_push (edge_pair (bi_call_in_edge, guard_bb_in_edge));
929 }
930
931 /* Move BI_NEWCALL to new basic block when it is non-null. */
932 if (bi_newcall)
933 {
934 /* Get bi_newcall_bb by split join_tgt_in_edge_fall_thru edge,
935 and move BI_NEWCALL to bi_newcall_bb. */
936 bi_newcall_bb = split_edge (join_tgt_in_edge_fall_thru);
937 gimple_stmt_iterator to_gsi = gsi_start_bb (bi_newcall_bb);
938 gimple_stmt_iterator from_gsi = gsi_for_stmt (bi_newcall);
939 gsi_move_before (&from_gsi, &to_gsi);
940 join_tgt_in_edge_fall_thru = EDGE_SUCC (bi_newcall_bb, 0);
941 join_tgt_bb = join_tgt_in_edge_fall_thru->dest;
942
943 tree bi_newcall_lhs = gimple_call_lhs (bi_newcall);
944 tree bi_call_lhs = gimple_call_lhs (bi_call);
945 if (!bi_call_lhs)
946 {
947 bi_call_lhs = copy_ssa_name (bi_newcall_lhs);
948 gimple_call_set_lhs (bi_call, bi_call_lhs);
949 SSA_NAME_DEF_STMT (bi_call_lhs) = bi_call;
950 }
951
952 /* Create phi node for lhs of BI_CALL and BI_NEWCALL. */
953 gphi *new_phi = create_phi_node (copy_ssa_name (bi_newcall_lhs),
954 join_tgt_bb);
955 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (PHI_RESULT (new_phi))
956 = SSA_NAME_OCCURS_IN_ABNORMAL_PHI (bi_newcall_lhs);
957 add_phi_arg (new_phi, bi_call_lhs, join_tgt_in_edge_from_call,
958 gimple_location (bi_call));
959 add_phi_arg (new_phi, bi_newcall_lhs, join_tgt_in_edge_fall_thru,
960 gimple_location (bi_newcall));
961
962 /* Replace all use of original return value with result of phi node. */
963 use_operand_p use_p;
964 gimple *use_stmt;
965 imm_use_iterator iterator;
966 FOR_EACH_IMM_USE_STMT (use_stmt, iterator, bi_newcall_lhs)
967 if (use_stmt != new_phi)
968 FOR_EACH_IMM_USE_ON_STMT (use_p, iterator)
969 SET_USE (use_p, PHI_RESULT (new_phi));
970 }
971
972 /* Now update the probability and profile information, processing the
973 guards in order of execution.
974
975 There are two approaches we could take here. On the one hand we
976 could assign a probability of X to the call block and distribute
977 that probability among its incoming edges. On the other hand we
978 could assign a probability of X to each individual call edge.
979
980 The choice only affects calls that have more than one condition.
981 In those cases, the second approach would give the call block
982 a greater probability than the first. However, the difference
983 is only small, and our chosen X is a pure guess anyway.
984
985 Here we take the second approach because it's slightly simpler
986 and because it's easy to see that it doesn't lose profile counts. */
987 bi_call_bb->count = profile_count::zero ();
988 while (!edges.is_empty ())
989 {
990 edge_pair e = edges.pop ();
991 edge call_edge = e.first;
992 edge nocall_edge = e.second;
993 basic_block src_bb = call_edge->src;
994 gcc_assert (src_bb == nocall_edge->src);
995
996 call_edge->probability = profile_probability::very_unlikely ();
997 nocall_edge->probability = profile_probability::always ()
998 - call_edge->probability;
999
1000 bi_call_bb->count += call_edge->count ();
1001
1002 if (nocall_edge->dest != join_tgt_bb)
1003 nocall_edge->dest->count = src_bb->count - bi_call_bb->count;
1004 }
1005
1006 if (dom_info_available_p (CDI_DOMINATORS))
1007 {
1008 /* The split_blocks leave [guard 0] as the immediate dominator
1009 of [call] and [call] as the immediate dominator of [join].
1010 Fix them up. */
1011 set_immediate_dominator (CDI_DOMINATORS, bi_call_bb, guard_bb);
1012 set_immediate_dominator (CDI_DOMINATORS, join_tgt_bb, guard_bb);
1013 }
1014
1015 if (dump_file && (dump_flags & TDF_DETAILS))
1016 {
1017 location_t loc;
1018 loc = gimple_location (bi_call);
1019 fprintf (dump_file,
1020 "%s:%d: note: function call is shrink-wrapped"
1021 " into error conditions.\n",
1022 LOCATION_FILE (loc), LOCATION_LINE (loc));
1023 }
1024 }
1025
1026 /* Shrink-wrap BI_CALL so that it is only called when it might set errno
1027 (but is always called if it would set errno). */
1028
1029 static void
1030 shrink_wrap_one_built_in_call (gcall *bi_call)
1031 {
1032 unsigned nconds = 0;
1033 auto_vec<gimple *, 12> conds;
1034 gen_shrink_wrap_conditions (bi_call, conds, &nconds);
1035 gcc_assert (nconds != 0);
1036 shrink_wrap_one_built_in_call_with_conds (bi_call, conds, nconds);
1037 }
1038
1039 /* Return true if built-in function call CALL could be implemented using
1040 a combination of an internal function to compute the result and a
1041 separate call to set errno. */
1042
1043 static bool
1044 can_use_internal_fn (gcall *call)
1045 {
1046 /* Only replace calls that set errno. */
1047 if (!gimple_vdef (call))
1048 return false;
1049
1050 /* See whether there is an internal function for this built-in. */
1051 if (replacement_internal_fn (call) == IFN_LAST)
1052 return false;
1053
1054 /* See whether we can catch all cases where errno would be set,
1055 while still avoiding the call in most cases. */
1056 if (!can_test_argument_range (call)
1057 && !edom_only_function (call))
1058 return false;
1059
1060 return true;
1061 }
1062
1063 /* Implement built-in function call CALL using an internal function. */
1064
1065 static void
1066 use_internal_fn (gcall *call)
1067 {
1068 /* We'll be inserting another call with the same arguments after the
1069 lhs has been set, so prevent any possible coalescing failure from
1070 having both values live at once. See PR 71020. */
1071 replace_abnormal_ssa_names (call);
1072
1073 unsigned nconds = 0;
1074 auto_vec<gimple *, 12> conds;
1075 bool is_arg_conds = false;
1076 if (can_test_argument_range (call))
1077 {
1078 gen_shrink_wrap_conditions (call, conds, &nconds);
1079 is_arg_conds = true;
1080 gcc_assert (nconds != 0);
1081 }
1082 else
1083 gcc_assert (edom_only_function (call));
1084
1085 internal_fn ifn = replacement_internal_fn (call);
1086 gcc_assert (ifn != IFN_LAST);
1087
1088 /* Construct the new call, with the same arguments as the original one. */
1089 auto_vec <tree, 16> args;
1090 unsigned int nargs = gimple_call_num_args (call);
1091 for (unsigned int i = 0; i < nargs; ++i)
1092 args.safe_push (gimple_call_arg (call, i));
1093 gcall *new_call = gimple_build_call_internal_vec (ifn, args);
1094 gimple_set_location (new_call, gimple_location (call));
1095 gimple_call_set_nothrow (new_call, gimple_call_nothrow_p (call));
1096
1097 /* Transfer the LHS to the new call. */
1098 tree lhs = gimple_call_lhs (call);
1099 gimple_call_set_lhs (new_call, lhs);
1100 gimple_call_set_lhs (call, NULL_TREE);
1101 SSA_NAME_DEF_STMT (lhs) = new_call;
1102
1103 /* Insert the new call. */
1104 gimple_stmt_iterator gsi = gsi_for_stmt (call);
1105 gsi_insert_before (&gsi, new_call, GSI_SAME_STMT);
1106
1107 if (nconds == 0)
1108 {
1109 /* Skip the call if LHS == LHS. If we reach here, EDOM is the only
1110 valid errno value and it is used iff the result is NaN. */
1111 conds.quick_push (gimple_build_cond (EQ_EXPR, lhs, lhs,
1112 NULL_TREE, NULL_TREE));
1113 nconds++;
1114
1115 /* Try replacing the original call with a direct assignment to
1116 errno, via an internal function. */
1117 if (set_edom_supported_p () && !stmt_ends_bb_p (call))
1118 {
1119 gimple_stmt_iterator gsi = gsi_for_stmt (call);
1120 gcall *new_call = gimple_build_call_internal (IFN_SET_EDOM, 0);
1121 gimple_set_vuse (new_call, gimple_vuse (call));
1122 gimple_set_vdef (new_call, gimple_vdef (call));
1123 SSA_NAME_DEF_STMT (gimple_vdef (new_call)) = new_call;
1124 gimple_set_location (new_call, gimple_location (call));
1125 gsi_replace (&gsi, new_call, false);
1126 call = new_call;
1127 }
1128 }
1129 shrink_wrap_one_built_in_call_with_conds (call, conds, nconds,
1130 is_arg_conds ? new_call : NULL);
1131 }
1132
1133 /* The top level function for conditional dead code shrink
1134 wrapping transformation. */
1135
1136 static void
1137 shrink_wrap_conditional_dead_built_in_calls (vec<gcall *> calls)
1138 {
1139 unsigned i = 0;
1140
1141 unsigned n = calls.length ();
1142 for (; i < n ; i++)
1143 {
1144 gcall *bi_call = calls[i];
1145 if (gimple_call_lhs (bi_call))
1146 use_internal_fn (bi_call);
1147 else
1148 shrink_wrap_one_built_in_call (bi_call);
1149 }
1150 }
1151
1152 namespace {
1153
1154 const pass_data pass_data_call_cdce =
1155 {
1156 GIMPLE_PASS, /* type */
1157 "cdce", /* name */
1158 OPTGROUP_NONE, /* optinfo_flags */
1159 TV_TREE_CALL_CDCE, /* tv_id */
1160 ( PROP_cfg | PROP_ssa ), /* properties_required */
1161 0, /* properties_provided */
1162 0, /* properties_destroyed */
1163 0, /* todo_flags_start */
1164 0, /* todo_flags_finish */
1165 };
1166
1167 class pass_call_cdce : public gimple_opt_pass
1168 {
1169 public:
1170 pass_call_cdce (gcc::context *ctxt)
1171 : gimple_opt_pass (pass_data_call_cdce, ctxt)
1172 {}
1173
1174 /* opt_pass methods: */
1175 virtual bool gate (function *)
1176 {
1177 /* The limit constants used in the implementation
1178 assume IEEE floating point format. Other formats
1179 can be supported in the future if needed. */
1180 return flag_tree_builtin_call_dce != 0;
1181 }
1182
1183 virtual unsigned int execute (function *);
1184
1185 }; // class pass_call_cdce
1186
1187 unsigned int
1188 pass_call_cdce::execute (function *fun)
1189 {
1190 basic_block bb;
1191 gimple_stmt_iterator i;
1192 auto_vec<gcall *> cond_dead_built_in_calls;
1193 FOR_EACH_BB_FN (bb, fun)
1194 {
1195 /* Skip blocks that are being optimized for size, since our
1196 transformation always increases code size. */
1197 if (optimize_bb_for_size_p (bb))
1198 continue;
1199
1200 /* Collect dead call candidates. */
1201 for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
1202 {
1203 gcall *stmt = dyn_cast <gcall *> (gsi_stmt (i));
1204 if (stmt
1205 && gimple_call_builtin_p (stmt, BUILT_IN_NORMAL)
1206 && (gimple_call_lhs (stmt)
1207 ? can_use_internal_fn (stmt)
1208 : can_test_argument_range (stmt))
1209 && can_guard_call_p (stmt))
1210 {
1211 if (dump_file && (dump_flags & TDF_DETAILS))
1212 {
1213 fprintf (dump_file, "Found conditional dead call: ");
1214 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1215 fprintf (dump_file, "\n");
1216 }
1217 if (!cond_dead_built_in_calls.exists ())
1218 cond_dead_built_in_calls.create (64);
1219 cond_dead_built_in_calls.safe_push (stmt);
1220 }
1221 }
1222 }
1223
1224 if (!cond_dead_built_in_calls.exists ())
1225 return 0;
1226
1227 shrink_wrap_conditional_dead_built_in_calls (cond_dead_built_in_calls);
1228 free_dominance_info (CDI_POST_DOMINATORS);
1229 /* As we introduced new control-flow we need to insert PHI-nodes
1230 for the call-clobbers of the remaining call. */
1231 mark_virtual_operands_for_renaming (fun);
1232 return TODO_update_ssa;
1233 }
1234
1235 } // anon namespace
1236
1237 gimple_opt_pass *
1238 make_pass_call_cdce (gcc::context *ctxt)
1239 {
1240 return new pass_call_cdce (ctxt);
1241 }