1 /* Global, SSA-based optimizations using mathematical identities.
2 Copyright (C) 2005-2020 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* Currently, the only mini-pass in this file tries to CSE reciprocal
21 operations. These are common in sequences such as this one:
23 modulus = sqrt(x*x + y*y + z*z);
28 that can be optimized to
30 modulus = sqrt(x*x + y*y + z*z);
31 rmodulus = 1.0 / modulus;
36 We do this for loop invariant divisors, and with this pass whenever
37 we notice that a division has the same divisor multiple times.
39 Of course, like in PRE, we don't insert a division if a dominator
40 already has one. However, this cannot be done as an extension of
41 PRE for several reasons.
43 First of all, with some experiments it was found out that the
44 transformation is not always useful if there are only two divisions
45 by the same divisor. This is probably because modern processors
46 can pipeline the divisions; on older, in-order processors it should
47 still be effective to optimize two divisions by the same number.
48 We make this a param, and it shall be called N in the remainder of
51 Second, if trapping math is active, we have less freedom on where
52 to insert divisions: we can only do so in basic blocks that already
53 contain one. (If divisions don't trap, instead, we can insert
54 divisions elsewhere, which will be in blocks that are common dominators
55 of those that have the division).
57 We really don't want to compute the reciprocal unless a division will
58 be found. To do this, we won't insert the division in a basic block
59 that has less than N divisions *post-dominating* it.
61 The algorithm constructs a subset of the dominator tree, holding the
62 blocks containing the divisions and the common dominators to them,
63 and walk it twice. The first walk is in post-order, and it annotates
64 each block with the number of divisions that post-dominate it: this
65 gives information on where divisions can be inserted profitably.
66 The second walk is in pre-order, and it inserts divisions as explained
67 above, and replaces divisions by multiplications.
69 In the best case, the cost of the pass is O(n_statements). In the
70 worst-case, the cost is due to creating the dominator tree subset,
71 with a cost of O(n_basic_blocks ^ 2); however this can only happen
72 for n_statements / n_basic_blocks statements. So, the amortized cost
73 of creating the dominator tree subset is O(n_basic_blocks) and the
74 worst-case cost of the pass is O(n_statements * n_basic_blocks).
76 More practically, the cost will be small because there are few
77 divisions, and they tend to be in the same basic block, so insert_bb
78 is called very few times.
80 If we did this using domwalk.c, an efficient implementation would have
81 to work on all the variables in a single pass, because we could not
82 work on just a subset of the dominator tree, as we do now, and the
83 cost would also be something like O(n_statements * n_basic_blocks).
84 The data structures would be more complex in order to work on all the
85 variables in a single pass. */
89 #include "coretypes.h"
96 #include "alloc-pool.h"
97 #include "tree-pass.h"
99 #include "optabs-tree.h"
100 #include "gimple-pretty-print.h"
102 #include "fold-const.h"
103 #include "gimple-fold.h"
104 #include "gimple-iterator.h"
105 #include "gimplify.h"
106 #include "gimplify-me.h"
107 #include "stor-layout.h"
108 #include "tree-cfg.h"
109 #include "tree-dfa.h"
110 #include "tree-ssa.h"
111 #include "builtins.h"
112 #include "internal-fn.h"
113 #include "case-cfn-macros.h"
114 #include "optabs-libfuncs.h"
116 #include "targhooks.h"
119 /* This structure represents one basic block that either computes a
120 division, or is a common dominator for basic block that compute a
123 /* The basic block represented by this structure. */
126 /* If non-NULL, the SSA_NAME holding the definition for a reciprocal
130 /* If non-NULL, the SSA_NAME holding the definition for a squared
131 reciprocal inserted in BB. */
132 tree square_recip_def
;
134 /* If non-NULL, the GIMPLE_ASSIGN for a reciprocal computation that
135 was inserted in BB. */
136 gimple
*recip_def_stmt
;
138 /* Pointer to a list of "struct occurrence"s for blocks dominated
140 struct occurrence
*children
;
142 /* Pointer to the next "struct occurrence"s in the list of blocks
143 sharing a common dominator. */
144 struct occurrence
*next
;
146 /* The number of divisions that are in BB before compute_merit. The
147 number of divisions that are in BB or post-dominate it after
151 /* True if the basic block has a division, false if it is a common
152 dominator for basic blocks that do. If it is false and trapping
153 math is active, BB is not a candidate for inserting a reciprocal. */
154 bool bb_has_division
;
159 /* Number of 1.0/X ops inserted. */
162 /* Number of 1.0/FUNC ops inserted. */
168 /* Number of cexpi calls inserted. */
174 /* Number of widening multiplication ops inserted. */
175 int widen_mults_inserted
;
177 /* Number of integer multiply-and-accumulate ops inserted. */
180 /* Number of fp fused multiply-add ops inserted. */
183 /* Number of divmod calls inserted. */
184 int divmod_calls_inserted
;
187 /* The instance of "struct occurrence" representing the highest
188 interesting block in the dominator tree. */
189 static struct occurrence
*occ_head
;
191 /* Allocation pool for getting instances of "struct occurrence". */
192 static object_allocator
<occurrence
> *occ_pool
;
196 /* Allocate and return a new struct occurrence for basic block BB, and
197 whose children list is headed by CHILDREN. */
198 static struct occurrence
*
199 occ_new (basic_block bb
, struct occurrence
*children
)
201 struct occurrence
*occ
;
203 bb
->aux
= occ
= occ_pool
->allocate ();
204 memset (occ
, 0, sizeof (struct occurrence
));
207 occ
->children
= children
;
212 /* Insert NEW_OCC into our subset of the dominator tree. P_HEAD points to a
213 list of "struct occurrence"s, one per basic block, having IDOM as
214 their common dominator.
216 We try to insert NEW_OCC as deep as possible in the tree, and we also
217 insert any other block that is a common dominator for BB and one
218 block already in the tree. */
221 insert_bb (struct occurrence
*new_occ
, basic_block idom
,
222 struct occurrence
**p_head
)
224 struct occurrence
*occ
, **p_occ
;
226 for (p_occ
= p_head
; (occ
= *p_occ
) != NULL
; )
228 basic_block bb
= new_occ
->bb
, occ_bb
= occ
->bb
;
229 basic_block dom
= nearest_common_dominator (CDI_DOMINATORS
, occ_bb
, bb
);
232 /* BB dominates OCC_BB. OCC becomes NEW_OCC's child: remove OCC
235 occ
->next
= new_occ
->children
;
236 new_occ
->children
= occ
;
238 /* Try the next block (it may as well be dominated by BB). */
241 else if (dom
== occ_bb
)
243 /* OCC_BB dominates BB. Tail recurse to look deeper. */
244 insert_bb (new_occ
, dom
, &occ
->children
);
248 else if (dom
!= idom
)
250 gcc_assert (!dom
->aux
);
252 /* There is a dominator between IDOM and BB, add it and make
253 two children out of NEW_OCC and OCC. First, remove OCC from
259 /* None of the previous blocks has DOM as a dominator: if we tail
260 recursed, we would reexamine them uselessly. Just switch BB with
261 DOM, and go on looking for blocks dominated by DOM. */
262 new_occ
= occ_new (dom
, new_occ
);
267 /* Nothing special, go on with the next element. */
272 /* No place was found as a child of IDOM. Make BB a sibling of IDOM. */
273 new_occ
->next
= *p_head
;
277 /* Register that we found a division in BB.
278 IMPORTANCE is a measure of how much weighting to give
279 that division. Use IMPORTANCE = 2 to register a single
280 division. If the division is going to be found multiple
281 times use 1 (as it is with squares). */
284 register_division_in (basic_block bb
, int importance
)
286 struct occurrence
*occ
;
288 occ
= (struct occurrence
*) bb
->aux
;
291 occ
= occ_new (bb
, NULL
);
292 insert_bb (occ
, ENTRY_BLOCK_PTR_FOR_FN (cfun
), &occ_head
);
295 occ
->bb_has_division
= true;
296 occ
->num_divisions
+= importance
;
300 /* Compute the number of divisions that postdominate each block in OCC and
304 compute_merit (struct occurrence
*occ
)
306 struct occurrence
*occ_child
;
307 basic_block dom
= occ
->bb
;
309 for (occ_child
= occ
->children
; occ_child
; occ_child
= occ_child
->next
)
312 if (occ_child
->children
)
313 compute_merit (occ_child
);
316 bb
= single_noncomplex_succ (dom
);
320 if (dominated_by_p (CDI_POST_DOMINATORS
, bb
, occ_child
->bb
))
321 occ
->num_divisions
+= occ_child
->num_divisions
;
326 /* Return whether USE_STMT is a floating-point division by DEF. */
328 is_division_by (gimple
*use_stmt
, tree def
)
330 return is_gimple_assign (use_stmt
)
331 && gimple_assign_rhs_code (use_stmt
) == RDIV_EXPR
332 && gimple_assign_rhs2 (use_stmt
) == def
333 /* Do not recognize x / x as valid division, as we are getting
334 confused later by replacing all immediate uses x in such
336 && gimple_assign_rhs1 (use_stmt
) != def
337 && !stmt_can_throw_internal (cfun
, use_stmt
);
340 /* Return TRUE if USE_STMT is a multiplication of DEF by A. */
342 is_mult_by (gimple
*use_stmt
, tree def
, tree a
)
344 if (gimple_code (use_stmt
) == GIMPLE_ASSIGN
345 && gimple_assign_rhs_code (use_stmt
) == MULT_EXPR
)
347 tree op0
= gimple_assign_rhs1 (use_stmt
);
348 tree op1
= gimple_assign_rhs2 (use_stmt
);
350 return (op0
== def
&& op1
== a
)
351 || (op0
== a
&& op1
== def
);
356 /* Return whether USE_STMT is DEF * DEF. */
358 is_square_of (gimple
*use_stmt
, tree def
)
360 return is_mult_by (use_stmt
, def
, def
);
363 /* Return whether USE_STMT is a floating-point division by
366 is_division_by_square (gimple
*use_stmt
, tree def
)
368 if (gimple_code (use_stmt
) == GIMPLE_ASSIGN
369 && gimple_assign_rhs_code (use_stmt
) == RDIV_EXPR
370 && gimple_assign_rhs1 (use_stmt
) != gimple_assign_rhs2 (use_stmt
)
371 && !stmt_can_throw_internal (cfun
, use_stmt
))
373 tree denominator
= gimple_assign_rhs2 (use_stmt
);
374 if (TREE_CODE (denominator
) == SSA_NAME
)
375 return is_square_of (SSA_NAME_DEF_STMT (denominator
), def
);
380 /* Walk the subset of the dominator tree rooted at OCC, setting the
381 RECIP_DEF field to a definition of 1.0 / DEF that can be used in
382 the given basic block. The field may be left NULL, of course,
383 if it is not possible or profitable to do the optimization.
385 DEF_BSI is an iterator pointing at the statement defining DEF.
386 If RECIP_DEF is set, a dominator already has a computation that can
389 If should_insert_square_recip is set, then this also inserts
390 the square of the reciprocal immediately after the definition
391 of the reciprocal. */
394 insert_reciprocals (gimple_stmt_iterator
*def_gsi
, struct occurrence
*occ
,
395 tree def
, tree recip_def
, tree square_recip_def
,
396 int should_insert_square_recip
, int threshold
)
399 gassign
*new_stmt
, *new_square_stmt
;
400 gimple_stmt_iterator gsi
;
401 struct occurrence
*occ_child
;
404 && (occ
->bb_has_division
|| !flag_trapping_math
)
405 /* Divide by two as all divisions are counted twice in
407 && occ
->num_divisions
/ 2 >= threshold
)
409 /* Make a variable with the replacement and substitute it. */
410 type
= TREE_TYPE (def
);
411 recip_def
= create_tmp_reg (type
, "reciptmp");
412 new_stmt
= gimple_build_assign (recip_def
, RDIV_EXPR
,
413 build_one_cst (type
), def
);
415 if (should_insert_square_recip
)
417 square_recip_def
= create_tmp_reg (type
, "powmult_reciptmp");
418 new_square_stmt
= gimple_build_assign (square_recip_def
, MULT_EXPR
,
419 recip_def
, recip_def
);
422 if (occ
->bb_has_division
)
424 /* Case 1: insert before an existing division. */
425 gsi
= gsi_after_labels (occ
->bb
);
426 while (!gsi_end_p (gsi
)
427 && (!is_division_by (gsi_stmt (gsi
), def
))
428 && (!is_division_by_square (gsi_stmt (gsi
), def
)))
431 gsi_insert_before (&gsi
, new_stmt
, GSI_SAME_STMT
);
432 if (should_insert_square_recip
)
433 gsi_insert_before (&gsi
, new_square_stmt
, GSI_SAME_STMT
);
435 else if (def_gsi
&& occ
->bb
== def_gsi
->bb
)
437 /* Case 2: insert right after the definition. Note that this will
438 never happen if the definition statement can throw, because in
439 that case the sole successor of the statement's basic block will
440 dominate all the uses as well. */
441 gsi_insert_after (def_gsi
, new_stmt
, GSI_NEW_STMT
);
442 if (should_insert_square_recip
)
443 gsi_insert_after (def_gsi
, new_square_stmt
, GSI_NEW_STMT
);
447 /* Case 3: insert in a basic block not containing defs/uses. */
448 gsi
= gsi_after_labels (occ
->bb
);
449 gsi_insert_before (&gsi
, new_stmt
, GSI_SAME_STMT
);
450 if (should_insert_square_recip
)
451 gsi_insert_before (&gsi
, new_square_stmt
, GSI_SAME_STMT
);
454 reciprocal_stats
.rdivs_inserted
++;
456 occ
->recip_def_stmt
= new_stmt
;
459 occ
->recip_def
= recip_def
;
460 occ
->square_recip_def
= square_recip_def
;
461 for (occ_child
= occ
->children
; occ_child
; occ_child
= occ_child
->next
)
462 insert_reciprocals (def_gsi
, occ_child
, def
, recip_def
,
463 square_recip_def
, should_insert_square_recip
,
467 /* Replace occurrences of expr / (x * x) with expr * ((1 / x) * (1 / x)).
468 Take as argument the use for (x * x). */
470 replace_reciprocal_squares (use_operand_p use_p
)
472 gimple
*use_stmt
= USE_STMT (use_p
);
473 basic_block bb
= gimple_bb (use_stmt
);
474 struct occurrence
*occ
= (struct occurrence
*) bb
->aux
;
476 if (optimize_bb_for_speed_p (bb
) && occ
->square_recip_def
479 gimple_stmt_iterator gsi
= gsi_for_stmt (use_stmt
);
480 gimple_assign_set_rhs_code (use_stmt
, MULT_EXPR
);
481 gimple_assign_set_rhs2 (use_stmt
, occ
->square_recip_def
);
482 SET_USE (use_p
, occ
->square_recip_def
);
483 fold_stmt_inplace (&gsi
);
484 update_stmt (use_stmt
);
489 /* Replace the division at USE_P with a multiplication by the reciprocal, if
493 replace_reciprocal (use_operand_p use_p
)
495 gimple
*use_stmt
= USE_STMT (use_p
);
496 basic_block bb
= gimple_bb (use_stmt
);
497 struct occurrence
*occ
= (struct occurrence
*) bb
->aux
;
499 if (optimize_bb_for_speed_p (bb
)
500 && occ
->recip_def
&& use_stmt
!= occ
->recip_def_stmt
)
502 gimple_stmt_iterator gsi
= gsi_for_stmt (use_stmt
);
503 gimple_assign_set_rhs_code (use_stmt
, MULT_EXPR
);
504 SET_USE (use_p
, occ
->recip_def
);
505 fold_stmt_inplace (&gsi
);
506 update_stmt (use_stmt
);
511 /* Free OCC and return one more "struct occurrence" to be freed. */
513 static struct occurrence
*
514 free_bb (struct occurrence
*occ
)
516 struct occurrence
*child
, *next
;
518 /* First get the two pointers hanging off OCC. */
520 child
= occ
->children
;
522 occ_pool
->remove (occ
);
524 /* Now ensure that we don't recurse unless it is necessary. */
530 next
= free_bb (next
);
536 /* Transform sequences like
546 depending on the uses of x, r1, r2. This removes one multiplication and
547 allows the sqrt and division operations to execute in parallel.
548 DEF_GSI is the gsi of the initial division by sqrt that defines
549 DEF (x in the example above). */
552 optimize_recip_sqrt (gimple_stmt_iterator
*def_gsi
, tree def
)
555 imm_use_iterator use_iter
;
556 gimple
*stmt
= gsi_stmt (*def_gsi
);
558 tree orig_sqrt_ssa_name
= gimple_assign_rhs2 (stmt
);
559 tree div_rhs1
= gimple_assign_rhs1 (stmt
);
561 if (TREE_CODE (orig_sqrt_ssa_name
) != SSA_NAME
562 || TREE_CODE (div_rhs1
) != REAL_CST
563 || !real_equal (&TREE_REAL_CST (div_rhs1
), &dconst1
))
567 = dyn_cast
<gcall
*> (SSA_NAME_DEF_STMT (orig_sqrt_ssa_name
));
569 if (!sqrt_stmt
|| !gimple_call_lhs (sqrt_stmt
))
572 switch (gimple_call_combined_fn (sqrt_stmt
))
581 tree a
= gimple_call_arg (sqrt_stmt
, 0);
583 /* We have 'a' and 'x'. Now analyze the uses of 'x'. */
585 /* Statements that use x in x * x. */
586 auto_vec
<gimple
*> sqr_stmts
;
587 /* Statements that use x in a * x. */
588 auto_vec
<gimple
*> mult_stmts
;
589 bool has_other_use
= false;
590 bool mult_on_main_path
= false;
592 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, x
)
594 if (is_gimple_debug (use_stmt
))
596 if (is_square_of (use_stmt
, x
))
598 sqr_stmts
.safe_push (use_stmt
);
599 if (gimple_bb (use_stmt
) == gimple_bb (stmt
))
600 mult_on_main_path
= true;
602 else if (is_mult_by (use_stmt
, x
, a
))
604 mult_stmts
.safe_push (use_stmt
);
605 if (gimple_bb (use_stmt
) == gimple_bb (stmt
))
606 mult_on_main_path
= true;
609 has_other_use
= true;
612 /* In the x * x and a * x cases we just rewire stmt operands or
613 remove multiplications. In the has_other_use case we introduce
614 a multiplication so make sure we don't introduce a multiplication
615 on a path where there was none. */
616 if (has_other_use
&& !mult_on_main_path
)
619 if (sqr_stmts
.is_empty () && mult_stmts
.is_empty ())
622 /* If x = 1.0 / sqrt (a) has uses other than those optimized here we want
623 to be able to compose it from the sqr and mult cases. */
624 if (has_other_use
&& (sqr_stmts
.is_empty () || mult_stmts
.is_empty ()))
629 fprintf (dump_file
, "Optimizing reciprocal sqrt multiplications of\n");
630 print_gimple_stmt (dump_file
, sqrt_stmt
, 0, TDF_NONE
);
631 print_gimple_stmt (dump_file
, stmt
, 0, TDF_NONE
);
632 fprintf (dump_file
, "\n");
635 bool delete_div
= !has_other_use
;
636 tree sqr_ssa_name
= NULL_TREE
;
637 if (!sqr_stmts
.is_empty ())
639 /* r1 = x * x. Transform the original
646 = make_temp_ssa_name (TREE_TYPE (a
), NULL
, "recip_sqrt_sqr");
650 fprintf (dump_file
, "Replacing original division\n");
651 print_gimple_stmt (dump_file
, stmt
, 0, TDF_NONE
);
652 fprintf (dump_file
, "with new division\n");
655 = gimple_build_assign (sqr_ssa_name
, gimple_assign_rhs_code (stmt
),
656 gimple_assign_rhs1 (stmt
), a
);
657 gsi_insert_before (def_gsi
, stmt
, GSI_SAME_STMT
);
658 gsi_remove (def_gsi
, true);
659 *def_gsi
= gsi_for_stmt (stmt
);
660 fold_stmt_inplace (def_gsi
);
664 print_gimple_stmt (dump_file
, stmt
, 0, TDF_NONE
);
669 FOR_EACH_VEC_ELT (sqr_stmts
, i
, sqr_stmt
)
671 gimple_stmt_iterator gsi2
= gsi_for_stmt (sqr_stmt
);
672 gimple_assign_set_rhs_from_tree (&gsi2
, sqr_ssa_name
);
673 update_stmt (sqr_stmt
);
676 if (!mult_stmts
.is_empty ())
678 /* r2 = a * x. Transform this into:
679 r2 = t (The original sqrt (a)). */
681 gimple
*mult_stmt
= NULL
;
682 FOR_EACH_VEC_ELT (mult_stmts
, i
, mult_stmt
)
684 gimple_stmt_iterator gsi2
= gsi_for_stmt (mult_stmt
);
688 fprintf (dump_file
, "Replacing squaring multiplication\n");
689 print_gimple_stmt (dump_file
, mult_stmt
, 0, TDF_NONE
);
690 fprintf (dump_file
, "with assignment\n");
692 gimple_assign_set_rhs_from_tree (&gsi2
, orig_sqrt_ssa_name
);
693 fold_stmt_inplace (&gsi2
);
694 update_stmt (mult_stmt
);
696 print_gimple_stmt (dump_file
, mult_stmt
, 0, TDF_NONE
);
702 /* Using the two temporaries tmp1, tmp2 from above
703 the original x is now:
705 gcc_assert (orig_sqrt_ssa_name
);
706 gcc_assert (sqr_ssa_name
);
709 = gimple_build_assign (x
, MULT_EXPR
,
710 orig_sqrt_ssa_name
, sqr_ssa_name
);
711 gsi_insert_after (def_gsi
, new_stmt
, GSI_NEW_STMT
);
716 /* Remove the original division. */
717 gimple_stmt_iterator gsi2
= gsi_for_stmt (stmt
);
718 gsi_remove (&gsi2
, true);
722 release_ssa_name (x
);
725 /* Look for floating-point divisions among DEF's uses, and try to
726 replace them by multiplications with the reciprocal. Add
727 as many statements computing the reciprocal as needed.
729 DEF must be a GIMPLE register of a floating-point type. */
732 execute_cse_reciprocals_1 (gimple_stmt_iterator
*def_gsi
, tree def
)
734 use_operand_p use_p
, square_use_p
;
735 imm_use_iterator use_iter
, square_use_iter
;
737 struct occurrence
*occ
;
740 int square_recip_count
= 0;
741 int sqrt_recip_count
= 0;
743 gcc_assert (FLOAT_TYPE_P (TREE_TYPE (def
)) && TREE_CODE (def
) == SSA_NAME
);
744 threshold
= targetm
.min_divisions_for_recip_mul (TYPE_MODE (TREE_TYPE (def
)));
746 /* If DEF is a square (x * x), count the number of divisions by x.
747 If there are more divisions by x than by (DEF * DEF), prefer to optimize
748 the reciprocal of x instead of DEF. This improves cases like:
753 Reciprocal optimization of x results in 1 division rather than 2 or 3. */
754 gimple
*def_stmt
= SSA_NAME_DEF_STMT (def
);
756 if (is_gimple_assign (def_stmt
)
757 && gimple_assign_rhs_code (def_stmt
) == MULT_EXPR
758 && TREE_CODE (gimple_assign_rhs1 (def_stmt
)) == SSA_NAME
759 && gimple_assign_rhs1 (def_stmt
) == gimple_assign_rhs2 (def_stmt
))
761 tree op0
= gimple_assign_rhs1 (def_stmt
);
763 FOR_EACH_IMM_USE_FAST (use_p
, use_iter
, op0
)
765 gimple
*use_stmt
= USE_STMT (use_p
);
766 if (is_division_by (use_stmt
, op0
))
771 FOR_EACH_IMM_USE_FAST (use_p
, use_iter
, def
)
773 gimple
*use_stmt
= USE_STMT (use_p
);
774 if (is_division_by (use_stmt
, def
))
776 register_division_in (gimple_bb (use_stmt
), 2);
780 if (is_square_of (use_stmt
, def
))
782 square_def
= gimple_assign_lhs (use_stmt
);
783 FOR_EACH_IMM_USE_FAST (square_use_p
, square_use_iter
, square_def
)
785 gimple
*square_use_stmt
= USE_STMT (square_use_p
);
786 if (is_division_by (square_use_stmt
, square_def
))
788 /* This is executed twice for each division by a square. */
789 register_division_in (gimple_bb (square_use_stmt
), 1);
790 square_recip_count
++;
796 /* Square reciprocals were counted twice above. */
797 square_recip_count
/= 2;
799 /* If it is more profitable to optimize 1 / x, don't optimize 1 / (x * x). */
800 if (sqrt_recip_count
> square_recip_count
)
803 /* Do the expensive part only if we can hope to optimize something. */
804 if (count
+ square_recip_count
>= threshold
&& count
>= 1)
807 for (occ
= occ_head
; occ
; occ
= occ
->next
)
810 insert_reciprocals (def_gsi
, occ
, def
, NULL
, NULL
,
811 square_recip_count
, threshold
);
814 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, def
)
816 if (is_division_by (use_stmt
, def
))
818 FOR_EACH_IMM_USE_ON_STMT (use_p
, use_iter
)
819 replace_reciprocal (use_p
);
821 else if (square_recip_count
> 0 && is_square_of (use_stmt
, def
))
823 FOR_EACH_IMM_USE_ON_STMT (use_p
, use_iter
)
825 /* Find all uses of the square that are divisions and
826 * replace them by multiplications with the inverse. */
827 imm_use_iterator square_iterator
;
828 gimple
*powmult_use_stmt
= USE_STMT (use_p
);
829 tree powmult_def_name
= gimple_assign_lhs (powmult_use_stmt
);
831 FOR_EACH_IMM_USE_STMT (powmult_use_stmt
,
832 square_iterator
, powmult_def_name
)
833 FOR_EACH_IMM_USE_ON_STMT (square_use_p
, square_iterator
)
835 gimple
*powmult_use_stmt
= USE_STMT (square_use_p
);
836 if (is_division_by (powmult_use_stmt
, powmult_def_name
))
837 replace_reciprocal_squares (square_use_p
);
845 for (occ
= occ_head
; occ
; )
851 /* Return an internal function that implements the reciprocal of CALL,
852 or IFN_LAST if there is no such function that the target supports. */
855 internal_fn_reciprocal (gcall
*call
)
859 switch (gimple_call_combined_fn (call
))
870 tree_pair types
= direct_internal_fn_types (ifn
, call
);
871 if (!direct_internal_fn_supported_p (ifn
, types
, OPTIMIZE_FOR_SPEED
))
877 /* Go through all the floating-point SSA_NAMEs, and call
878 execute_cse_reciprocals_1 on each of them. */
881 const pass_data pass_data_cse_reciprocals
=
883 GIMPLE_PASS
, /* type */
885 OPTGROUP_NONE
, /* optinfo_flags */
886 TV_TREE_RECIP
, /* tv_id */
887 PROP_ssa
, /* properties_required */
888 0, /* properties_provided */
889 0, /* properties_destroyed */
890 0, /* todo_flags_start */
891 TODO_update_ssa
, /* todo_flags_finish */
894 class pass_cse_reciprocals
: public gimple_opt_pass
897 pass_cse_reciprocals (gcc::context
*ctxt
)
898 : gimple_opt_pass (pass_data_cse_reciprocals
, ctxt
)
901 /* opt_pass methods: */
902 virtual bool gate (function
*) { return optimize
&& flag_reciprocal_math
; }
903 virtual unsigned int execute (function
*);
905 }; // class pass_cse_reciprocals
908 pass_cse_reciprocals::execute (function
*fun
)
913 occ_pool
= new object_allocator
<occurrence
> ("dominators for recip");
915 memset (&reciprocal_stats
, 0, sizeof (reciprocal_stats
));
916 calculate_dominance_info (CDI_DOMINATORS
);
917 calculate_dominance_info (CDI_POST_DOMINATORS
);
920 FOR_EACH_BB_FN (bb
, fun
)
921 gcc_assert (!bb
->aux
);
923 for (arg
= DECL_ARGUMENTS (fun
->decl
); arg
; arg
= DECL_CHAIN (arg
))
924 if (FLOAT_TYPE_P (TREE_TYPE (arg
))
925 && is_gimple_reg (arg
))
927 tree name
= ssa_default_def (fun
, arg
);
929 execute_cse_reciprocals_1 (NULL
, name
);
932 FOR_EACH_BB_FN (bb
, fun
)
936 for (gphi_iterator gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
);
939 gphi
*phi
= gsi
.phi ();
940 def
= PHI_RESULT (phi
);
941 if (! virtual_operand_p (def
)
942 && FLOAT_TYPE_P (TREE_TYPE (def
)))
943 execute_cse_reciprocals_1 (NULL
, def
);
946 for (gimple_stmt_iterator gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
);
949 gimple
*stmt
= gsi_stmt (gsi
);
951 if (gimple_has_lhs (stmt
)
952 && (def
= SINGLE_SSA_TREE_OPERAND (stmt
, SSA_OP_DEF
)) != NULL
953 && FLOAT_TYPE_P (TREE_TYPE (def
))
954 && TREE_CODE (def
) == SSA_NAME
)
956 execute_cse_reciprocals_1 (&gsi
, def
);
957 stmt
= gsi_stmt (gsi
);
958 if (flag_unsafe_math_optimizations
959 && is_gimple_assign (stmt
)
960 && gimple_assign_lhs (stmt
) == def
961 && !stmt_can_throw_internal (cfun
, stmt
)
962 && gimple_assign_rhs_code (stmt
) == RDIV_EXPR
)
963 optimize_recip_sqrt (&gsi
, def
);
967 if (optimize_bb_for_size_p (bb
))
970 /* Scan for a/func(b) and convert it to reciprocal a*rfunc(b). */
971 for (gimple_stmt_iterator gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
);
974 gimple
*stmt
= gsi_stmt (gsi
);
976 if (is_gimple_assign (stmt
)
977 && gimple_assign_rhs_code (stmt
) == RDIV_EXPR
)
979 tree arg1
= gimple_assign_rhs2 (stmt
);
982 if (TREE_CODE (arg1
) != SSA_NAME
)
985 stmt1
= SSA_NAME_DEF_STMT (arg1
);
987 if (is_gimple_call (stmt1
)
988 && gimple_call_lhs (stmt1
))
993 tree fndecl
= NULL_TREE
;
995 gcall
*call
= as_a
<gcall
*> (stmt1
);
996 internal_fn ifn
= internal_fn_reciprocal (call
);
999 fndecl
= gimple_call_fndecl (call
);
1001 || !fndecl_built_in_p (fndecl
, BUILT_IN_MD
))
1003 fndecl
= targetm
.builtin_reciprocal (fndecl
);
1008 /* Check that all uses of the SSA name are divisions,
1009 otherwise replacing the defining statement will do
1012 FOR_EACH_IMM_USE_FAST (use_p
, ui
, arg1
)
1014 gimple
*stmt2
= USE_STMT (use_p
);
1015 if (is_gimple_debug (stmt2
))
1017 if (!is_gimple_assign (stmt2
)
1018 || gimple_assign_rhs_code (stmt2
) != RDIV_EXPR
1019 || gimple_assign_rhs1 (stmt2
) == arg1
1020 || gimple_assign_rhs2 (stmt2
) != arg1
)
1029 gimple_replace_ssa_lhs (call
, arg1
);
1030 if (gimple_call_internal_p (call
) != (ifn
!= IFN_LAST
))
1032 auto_vec
<tree
, 4> args
;
1033 for (unsigned int i
= 0;
1034 i
< gimple_call_num_args (call
); i
++)
1035 args
.safe_push (gimple_call_arg (call
, i
));
1037 if (ifn
== IFN_LAST
)
1038 stmt2
= gimple_build_call_vec (fndecl
, args
);
1040 stmt2
= gimple_build_call_internal_vec (ifn
, args
);
1041 gimple_call_set_lhs (stmt2
, arg1
);
1042 gimple_move_vops (stmt2
, call
);
1043 gimple_call_set_nothrow (stmt2
,
1044 gimple_call_nothrow_p (call
));
1045 gimple_stmt_iterator gsi2
= gsi_for_stmt (call
);
1046 gsi_replace (&gsi2
, stmt2
, true);
1050 if (ifn
== IFN_LAST
)
1051 gimple_call_set_fndecl (call
, fndecl
);
1053 gimple_call_set_internal_fn (call
, ifn
);
1056 reciprocal_stats
.rfuncs_inserted
++;
1058 FOR_EACH_IMM_USE_STMT (stmt
, ui
, arg1
)
1060 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
1061 gimple_assign_set_rhs_code (stmt
, MULT_EXPR
);
1062 fold_stmt_inplace (&gsi
);
1070 statistics_counter_event (fun
, "reciprocal divs inserted",
1071 reciprocal_stats
.rdivs_inserted
);
1072 statistics_counter_event (fun
, "reciprocal functions inserted",
1073 reciprocal_stats
.rfuncs_inserted
);
1075 free_dominance_info (CDI_DOMINATORS
);
1076 free_dominance_info (CDI_POST_DOMINATORS
);
1084 make_pass_cse_reciprocals (gcc::context
*ctxt
)
1086 return new pass_cse_reciprocals (ctxt
);
1089 /* Records an occurrence at statement USE_STMT in the vector of trees
1090 STMTS if it is dominated by *TOP_BB or dominates it or this basic block
1091 is not yet initialized. Returns true if the occurrence was pushed on
1092 the vector. Adjusts *TOP_BB to be the basic block dominating all
1093 statements in the vector. */
1096 maybe_record_sincos (vec
<gimple
*> *stmts
,
1097 basic_block
*top_bb
, gimple
*use_stmt
)
1099 basic_block use_bb
= gimple_bb (use_stmt
);
1101 && (*top_bb
== use_bb
1102 || dominated_by_p (CDI_DOMINATORS
, use_bb
, *top_bb
)))
1103 stmts
->safe_push (use_stmt
);
1105 || dominated_by_p (CDI_DOMINATORS
, *top_bb
, use_bb
))
1107 stmts
->safe_push (use_stmt
);
1116 /* Look for sin, cos and cexpi calls with the same argument NAME and
1117 create a single call to cexpi CSEing the result in this case.
1118 We first walk over all immediate uses of the argument collecting
1119 statements that we can CSE in a vector and in a second pass replace
1120 the statement rhs with a REALPART or IMAGPART expression on the
1121 result of the cexpi call we insert before the use statement that
1122 dominates all other candidates. */
1125 execute_cse_sincos_1 (tree name
)
1127 gimple_stmt_iterator gsi
;
1128 imm_use_iterator use_iter
;
1129 tree fndecl
, res
, type
;
1130 gimple
*def_stmt
, *use_stmt
, *stmt
;
1131 int seen_cos
= 0, seen_sin
= 0, seen_cexpi
= 0;
1132 auto_vec
<gimple
*> stmts
;
1133 basic_block top_bb
= NULL
;
1135 bool cfg_changed
= false;
1137 type
= TREE_TYPE (name
);
1138 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, name
)
1140 if (gimple_code (use_stmt
) != GIMPLE_CALL
1141 || !gimple_call_lhs (use_stmt
))
1144 switch (gimple_call_combined_fn (use_stmt
))
1147 seen_cos
|= maybe_record_sincos (&stmts
, &top_bb
, use_stmt
) ? 1 : 0;
1151 seen_sin
|= maybe_record_sincos (&stmts
, &top_bb
, use_stmt
) ? 1 : 0;
1155 seen_cexpi
|= maybe_record_sincos (&stmts
, &top_bb
, use_stmt
) ? 1 : 0;
1162 if (seen_cos
+ seen_sin
+ seen_cexpi
<= 1)
1165 /* Simply insert cexpi at the beginning of top_bb but not earlier than
1166 the name def statement. */
1167 fndecl
= mathfn_built_in (type
, BUILT_IN_CEXPI
);
1170 stmt
= gimple_build_call (fndecl
, 1, name
);
1171 res
= make_temp_ssa_name (TREE_TYPE (TREE_TYPE (fndecl
)), stmt
, "sincostmp");
1172 gimple_call_set_lhs (stmt
, res
);
1174 def_stmt
= SSA_NAME_DEF_STMT (name
);
1175 if (!SSA_NAME_IS_DEFAULT_DEF (name
)
1176 && gimple_code (def_stmt
) != GIMPLE_PHI
1177 && gimple_bb (def_stmt
) == top_bb
)
1179 gsi
= gsi_for_stmt (def_stmt
);
1180 gsi_insert_after (&gsi
, stmt
, GSI_SAME_STMT
);
1184 gsi
= gsi_after_labels (top_bb
);
1185 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
1187 sincos_stats
.inserted
++;
1189 /* And adjust the recorded old call sites. */
1190 for (i
= 0; stmts
.iterate (i
, &use_stmt
); ++i
)
1194 switch (gimple_call_combined_fn (use_stmt
))
1197 rhs
= fold_build1 (REALPART_EXPR
, type
, res
);
1201 rhs
= fold_build1 (IMAGPART_EXPR
, type
, res
);
1212 /* Replace call with a copy. */
1213 stmt
= gimple_build_assign (gimple_call_lhs (use_stmt
), rhs
);
1215 gsi
= gsi_for_stmt (use_stmt
);
1216 gsi_replace (&gsi
, stmt
, true);
1217 if (gimple_purge_dead_eh_edges (gimple_bb (stmt
)))
1224 /* To evaluate powi(x,n), the floating point value x raised to the
1225 constant integer exponent n, we use a hybrid algorithm that
1226 combines the "window method" with look-up tables. For an
1227 introduction to exponentiation algorithms and "addition chains",
1228 see section 4.6.3, "Evaluation of Powers" of Donald E. Knuth,
1229 "Seminumerical Algorithms", Vol. 2, "The Art of Computer Programming",
1230 3rd Edition, 1998, and Daniel M. Gordon, "A Survey of Fast Exponentiation
1231 Methods", Journal of Algorithms, Vol. 27, pp. 129-146, 1998. */
1233 /* Provide a default value for POWI_MAX_MULTS, the maximum number of
1234 multiplications to inline before calling the system library's pow
1235 function. powi(x,n) requires at worst 2*bits(n)-2 multiplications,
1236 so this default never requires calling pow, powf or powl. */
1238 #ifndef POWI_MAX_MULTS
1239 #define POWI_MAX_MULTS (2*HOST_BITS_PER_WIDE_INT-2)
1242 /* The size of the "optimal power tree" lookup table. All
1243 exponents less than this value are simply looked up in the
1244 powi_table below. This threshold is also used to size the
1245 cache of pseudo registers that hold intermediate results. */
1246 #define POWI_TABLE_SIZE 256
1248 /* The size, in bits of the window, used in the "window method"
1249 exponentiation algorithm. This is equivalent to a radix of
1250 (1<<POWI_WINDOW_SIZE) in the corresponding "m-ary method". */
1251 #define POWI_WINDOW_SIZE 3
1253 /* The following table is an efficient representation of an
1254 "optimal power tree". For each value, i, the corresponding
1255 value, j, in the table states than an optimal evaluation
1256 sequence for calculating pow(x,i) can be found by evaluating
1257 pow(x,j)*pow(x,i-j). An optimal power tree for the first
1258 100 integers is given in Knuth's "Seminumerical algorithms". */
1260 static const unsigned char powi_table
[POWI_TABLE_SIZE
] =
1262 0, 1, 1, 2, 2, 3, 3, 4, /* 0 - 7 */
1263 4, 6, 5, 6, 6, 10, 7, 9, /* 8 - 15 */
1264 8, 16, 9, 16, 10, 12, 11, 13, /* 16 - 23 */
1265 12, 17, 13, 18, 14, 24, 15, 26, /* 24 - 31 */
1266 16, 17, 17, 19, 18, 33, 19, 26, /* 32 - 39 */
1267 20, 25, 21, 40, 22, 27, 23, 44, /* 40 - 47 */
1268 24, 32, 25, 34, 26, 29, 27, 44, /* 48 - 55 */
1269 28, 31, 29, 34, 30, 60, 31, 36, /* 56 - 63 */
1270 32, 64, 33, 34, 34, 46, 35, 37, /* 64 - 71 */
1271 36, 65, 37, 50, 38, 48, 39, 69, /* 72 - 79 */
1272 40, 49, 41, 43, 42, 51, 43, 58, /* 80 - 87 */
1273 44, 64, 45, 47, 46, 59, 47, 76, /* 88 - 95 */
1274 48, 65, 49, 66, 50, 67, 51, 66, /* 96 - 103 */
1275 52, 70, 53, 74, 54, 104, 55, 74, /* 104 - 111 */
1276 56, 64, 57, 69, 58, 78, 59, 68, /* 112 - 119 */
1277 60, 61, 61, 80, 62, 75, 63, 68, /* 120 - 127 */
1278 64, 65, 65, 128, 66, 129, 67, 90, /* 128 - 135 */
1279 68, 73, 69, 131, 70, 94, 71, 88, /* 136 - 143 */
1280 72, 128, 73, 98, 74, 132, 75, 121, /* 144 - 151 */
1281 76, 102, 77, 124, 78, 132, 79, 106, /* 152 - 159 */
1282 80, 97, 81, 160, 82, 99, 83, 134, /* 160 - 167 */
1283 84, 86, 85, 95, 86, 160, 87, 100, /* 168 - 175 */
1284 88, 113, 89, 98, 90, 107, 91, 122, /* 176 - 183 */
1285 92, 111, 93, 102, 94, 126, 95, 150, /* 184 - 191 */
1286 96, 128, 97, 130, 98, 133, 99, 195, /* 192 - 199 */
1287 100, 128, 101, 123, 102, 164, 103, 138, /* 200 - 207 */
1288 104, 145, 105, 146, 106, 109, 107, 149, /* 208 - 215 */
1289 108, 200, 109, 146, 110, 170, 111, 157, /* 216 - 223 */
1290 112, 128, 113, 130, 114, 182, 115, 132, /* 224 - 231 */
1291 116, 200, 117, 132, 118, 158, 119, 206, /* 232 - 239 */
1292 120, 240, 121, 162, 122, 147, 123, 152, /* 240 - 247 */
1293 124, 166, 125, 214, 126, 138, 127, 153, /* 248 - 255 */
1297 /* Return the number of multiplications required to calculate
1298 powi(x,n) where n is less than POWI_TABLE_SIZE. This is a
1299 subroutine of powi_cost. CACHE is an array indicating
1300 which exponents have already been calculated. */
1303 powi_lookup_cost (unsigned HOST_WIDE_INT n
, bool *cache
)
1305 /* If we've already calculated this exponent, then this evaluation
1306 doesn't require any additional multiplications. */
1311 return powi_lookup_cost (n
- powi_table
[n
], cache
)
1312 + powi_lookup_cost (powi_table
[n
], cache
) + 1;
1315 /* Return the number of multiplications required to calculate
1316 powi(x,n) for an arbitrary x, given the exponent N. This
1317 function needs to be kept in sync with powi_as_mults below. */
1320 powi_cost (HOST_WIDE_INT n
)
1322 bool cache
[POWI_TABLE_SIZE
];
1323 unsigned HOST_WIDE_INT digit
;
1324 unsigned HOST_WIDE_INT val
;
1330 /* Ignore the reciprocal when calculating the cost. */
1331 val
= (n
< 0) ? -n
: n
;
1333 /* Initialize the exponent cache. */
1334 memset (cache
, 0, POWI_TABLE_SIZE
* sizeof (bool));
1339 while (val
>= POWI_TABLE_SIZE
)
1343 digit
= val
& ((1 << POWI_WINDOW_SIZE
) - 1);
1344 result
+= powi_lookup_cost (digit
, cache
)
1345 + POWI_WINDOW_SIZE
+ 1;
1346 val
>>= POWI_WINDOW_SIZE
;
1355 return result
+ powi_lookup_cost (val
, cache
);
1358 /* Recursive subroutine of powi_as_mults. This function takes the
1359 array, CACHE, of already calculated exponents and an exponent N and
1360 returns a tree that corresponds to CACHE[1]**N, with type TYPE. */
1363 powi_as_mults_1 (gimple_stmt_iterator
*gsi
, location_t loc
, tree type
,
1364 HOST_WIDE_INT n
, tree
*cache
)
1366 tree op0
, op1
, ssa_target
;
1367 unsigned HOST_WIDE_INT digit
;
1370 if (n
< POWI_TABLE_SIZE
&& cache
[n
])
1373 ssa_target
= make_temp_ssa_name (type
, NULL
, "powmult");
1375 if (n
< POWI_TABLE_SIZE
)
1377 cache
[n
] = ssa_target
;
1378 op0
= powi_as_mults_1 (gsi
, loc
, type
, n
- powi_table
[n
], cache
);
1379 op1
= powi_as_mults_1 (gsi
, loc
, type
, powi_table
[n
], cache
);
1383 digit
= n
& ((1 << POWI_WINDOW_SIZE
) - 1);
1384 op0
= powi_as_mults_1 (gsi
, loc
, type
, n
- digit
, cache
);
1385 op1
= powi_as_mults_1 (gsi
, loc
, type
, digit
, cache
);
1389 op0
= powi_as_mults_1 (gsi
, loc
, type
, n
>> 1, cache
);
1393 mult_stmt
= gimple_build_assign (ssa_target
, MULT_EXPR
, op0
, op1
);
1394 gimple_set_location (mult_stmt
, loc
);
1395 gsi_insert_before (gsi
, mult_stmt
, GSI_SAME_STMT
);
1400 /* Convert ARG0**N to a tree of multiplications of ARG0 with itself.
1401 This function needs to be kept in sync with powi_cost above. */
1404 powi_as_mults (gimple_stmt_iterator
*gsi
, location_t loc
,
1405 tree arg0
, HOST_WIDE_INT n
)
1407 tree cache
[POWI_TABLE_SIZE
], result
, type
= TREE_TYPE (arg0
);
1412 return build_real (type
, dconst1
);
1414 memset (cache
, 0, sizeof (cache
));
1417 result
= powi_as_mults_1 (gsi
, loc
, type
, (n
< 0) ? -n
: n
, cache
);
1421 /* If the original exponent was negative, reciprocate the result. */
1422 target
= make_temp_ssa_name (type
, NULL
, "powmult");
1423 div_stmt
= gimple_build_assign (target
, RDIV_EXPR
,
1424 build_real (type
, dconst1
), result
);
1425 gimple_set_location (div_stmt
, loc
);
1426 gsi_insert_before (gsi
, div_stmt
, GSI_SAME_STMT
);
1431 /* ARG0 and N are the two arguments to a powi builtin in GSI with
1432 location info LOC. If the arguments are appropriate, create an
1433 equivalent sequence of statements prior to GSI using an optimal
1434 number of multiplications, and return an expession holding the
1438 gimple_expand_builtin_powi (gimple_stmt_iterator
*gsi
, location_t loc
,
1439 tree arg0
, HOST_WIDE_INT n
)
1441 /* Avoid largest negative number. */
1443 && ((n
>= -1 && n
<= 2)
1444 || (optimize_function_for_speed_p (cfun
)
1445 && powi_cost (n
) <= POWI_MAX_MULTS
)))
1446 return powi_as_mults (gsi
, loc
, arg0
, n
);
1451 /* Build a gimple call statement that calls FN with argument ARG.
1452 Set the lhs of the call statement to a fresh SSA name. Insert the
1453 statement prior to GSI's current position, and return the fresh
1457 build_and_insert_call (gimple_stmt_iterator
*gsi
, location_t loc
,
1463 call_stmt
= gimple_build_call (fn
, 1, arg
);
1464 ssa_target
= make_temp_ssa_name (TREE_TYPE (arg
), NULL
, "powroot");
1465 gimple_set_lhs (call_stmt
, ssa_target
);
1466 gimple_set_location (call_stmt
, loc
);
1467 gsi_insert_before (gsi
, call_stmt
, GSI_SAME_STMT
);
1472 /* Build a gimple binary operation with the given CODE and arguments
1473 ARG0, ARG1, assigning the result to a new SSA name for variable
1474 TARGET. Insert the statement prior to GSI's current position, and
1475 return the fresh SSA name.*/
1478 build_and_insert_binop (gimple_stmt_iterator
*gsi
, location_t loc
,
1479 const char *name
, enum tree_code code
,
1480 tree arg0
, tree arg1
)
1482 tree result
= make_temp_ssa_name (TREE_TYPE (arg0
), NULL
, name
);
1483 gassign
*stmt
= gimple_build_assign (result
, code
, arg0
, arg1
);
1484 gimple_set_location (stmt
, loc
);
1485 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
1489 /* Build a gimple reference operation with the given CODE and argument
1490 ARG, assigning the result to a new SSA name of TYPE with NAME.
1491 Insert the statement prior to GSI's current position, and return
1492 the fresh SSA name. */
1495 build_and_insert_ref (gimple_stmt_iterator
*gsi
, location_t loc
, tree type
,
1496 const char *name
, enum tree_code code
, tree arg0
)
1498 tree result
= make_temp_ssa_name (type
, NULL
, name
);
1499 gimple
*stmt
= gimple_build_assign (result
, build1 (code
, type
, arg0
));
1500 gimple_set_location (stmt
, loc
);
1501 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
1505 /* Build a gimple assignment to cast VAL to TYPE. Insert the statement
1506 prior to GSI's current position, and return the fresh SSA name. */
1509 build_and_insert_cast (gimple_stmt_iterator
*gsi
, location_t loc
,
1510 tree type
, tree val
)
1512 tree result
= make_ssa_name (type
);
1513 gassign
*stmt
= gimple_build_assign (result
, NOP_EXPR
, val
);
1514 gimple_set_location (stmt
, loc
);
1515 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
1519 struct pow_synth_sqrt_info
1522 unsigned int deepest
;
1523 unsigned int num_mults
;
1526 /* Return true iff the real value C can be represented as a
1527 sum of powers of 0.5 up to N. That is:
1528 C == SUM<i from 1..N> (a[i]*(0.5**i)) where a[i] is either 0 or 1.
1529 Record in INFO the various parameters of the synthesis algorithm such
1530 as the factors a[i], the maximum 0.5 power and the number of
1531 multiplications that will be required. */
1534 representable_as_half_series_p (REAL_VALUE_TYPE c
, unsigned n
,
1535 struct pow_synth_sqrt_info
*info
)
1537 REAL_VALUE_TYPE factor
= dconsthalf
;
1538 REAL_VALUE_TYPE remainder
= c
;
1541 info
->num_mults
= 0;
1542 memset (info
->factors
, 0, n
* sizeof (bool));
1544 for (unsigned i
= 0; i
< n
; i
++)
1546 REAL_VALUE_TYPE res
;
1548 /* If something inexact happened bail out now. */
1549 if (real_arithmetic (&res
, MINUS_EXPR
, &remainder
, &factor
))
1552 /* We have hit zero. The number is representable as a sum
1553 of powers of 0.5. */
1554 if (real_equal (&res
, &dconst0
))
1556 info
->factors
[i
] = true;
1557 info
->deepest
= i
+ 1;
1560 else if (!REAL_VALUE_NEGATIVE (res
))
1563 info
->factors
[i
] = true;
1567 info
->factors
[i
] = false;
1569 real_arithmetic (&factor
, MULT_EXPR
, &factor
, &dconsthalf
);
1574 /* Return the tree corresponding to FN being applied
1575 to ARG N times at GSI and LOC.
1576 Look up previous results from CACHE if need be.
1577 cache[0] should contain just plain ARG i.e. FN applied to ARG 0 times. */
1580 get_fn_chain (tree arg
, unsigned int n
, gimple_stmt_iterator
*gsi
,
1581 tree fn
, location_t loc
, tree
*cache
)
1583 tree res
= cache
[n
];
1586 tree prev
= get_fn_chain (arg
, n
- 1, gsi
, fn
, loc
, cache
);
1587 res
= build_and_insert_call (gsi
, loc
, fn
, prev
);
1594 /* Print to STREAM the repeated application of function FNAME to ARG
1595 N times. So, for FNAME = "foo", ARG = "x", N = 2 it would print:
1599 print_nested_fn (FILE* stream
, const char *fname
, const char* arg
,
1603 fprintf (stream
, "%s", arg
);
1606 fprintf (stream
, "%s (", fname
);
1607 print_nested_fn (stream
, fname
, arg
, n
- 1);
1608 fprintf (stream
, ")");
1612 /* Print to STREAM the fractional sequence of sqrt chains
1613 applied to ARG, described by INFO. Used for the dump file. */
1616 dump_fractional_sqrt_sequence (FILE *stream
, const char *arg
,
1617 struct pow_synth_sqrt_info
*info
)
1619 for (unsigned int i
= 0; i
< info
->deepest
; i
++)
1621 bool is_set
= info
->factors
[i
];
1624 print_nested_fn (stream
, "sqrt", arg
, i
+ 1);
1625 if (i
!= info
->deepest
- 1)
1626 fprintf (stream
, " * ");
1631 /* Print to STREAM a representation of raising ARG to an integer
1632 power N. Used for the dump file. */
1635 dump_integer_part (FILE *stream
, const char* arg
, HOST_WIDE_INT n
)
1638 fprintf (stream
, "powi (%s, " HOST_WIDE_INT_PRINT_DEC
")", arg
, n
);
1640 fprintf (stream
, "%s", arg
);
1643 /* Attempt to synthesize a POW[F] (ARG0, ARG1) call using chains of
1644 square roots. Place at GSI and LOC. Limit the maximum depth
1645 of the sqrt chains to MAX_DEPTH. Return the tree holding the
1646 result of the expanded sequence or NULL_TREE if the expansion failed.
1648 This routine assumes that ARG1 is a real number with a fractional part
1649 (the integer exponent case will have been handled earlier in
1650 gimple_expand_builtin_pow).
1653 * For ARG1 composed of a whole part WHOLE_PART and a fractional part
1654 FRAC_PART i.e. WHOLE_PART == floor (ARG1) and
1655 FRAC_PART == ARG1 - WHOLE_PART:
1656 Produce POWI (ARG0, WHOLE_PART) * POW (ARG0, FRAC_PART) where
1657 POW (ARG0, FRAC_PART) is expanded as a product of square root chains
1658 if it can be expressed as such, that is if FRAC_PART satisfies:
1659 FRAC_PART == <SUM from i = 1 until MAX_DEPTH> (a[i] * (0.5**i))
1660 where integer a[i] is either 0 or 1.
1663 POW (x, 3.625) == POWI (x, 3) * POW (x, 0.625)
1664 --> POWI (x, 3) * SQRT (x) * SQRT (SQRT (SQRT (x)))
1666 For ARG1 < 0.0 there are two approaches:
1667 * (A) Expand to 1.0 / POW (ARG0, -ARG1) where POW (ARG0, -ARG1)
1668 is calculated as above.
1671 POW (x, -5.625) == 1.0 / POW (x, 5.625)
1672 --> 1.0 / (POWI (x, 5) * SQRT (x) * SQRT (SQRT (SQRT (x))))
1674 * (B) : WHOLE_PART := - ceil (abs (ARG1))
1675 FRAC_PART := ARG1 - WHOLE_PART
1676 and expand to POW (x, FRAC_PART) / POWI (x, WHOLE_PART).
1678 POW (x, -5.875) == POW (x, 0.125) / POWI (X, 6)
1679 --> SQRT (SQRT (SQRT (x))) / (POWI (x, 6))
1681 For ARG1 < 0.0 we choose between (A) and (B) depending on
1682 how many multiplications we'd have to do.
1683 So, for the example in (B): POW (x, -5.875), if we were to
1684 follow algorithm (A) we would produce:
1685 1.0 / POWI (X, 5) * SQRT (X) * SQRT (SQRT (X)) * SQRT (SQRT (SQRT (X)))
1686 which contains more multiplications than approach (B).
1688 Hopefully, this approach will eliminate potentially expensive POW library
1689 calls when unsafe floating point math is enabled and allow the compiler to
1690 further optimise the multiplies, square roots and divides produced by this
1694 expand_pow_as_sqrts (gimple_stmt_iterator
*gsi
, location_t loc
,
1695 tree arg0
, tree arg1
, HOST_WIDE_INT max_depth
)
1697 tree type
= TREE_TYPE (arg0
);
1698 machine_mode mode
= TYPE_MODE (type
);
1699 tree sqrtfn
= mathfn_built_in (type
, BUILT_IN_SQRT
);
1700 bool one_over
= true;
1705 if (TREE_CODE (arg1
) != REAL_CST
)
1708 REAL_VALUE_TYPE exp_init
= TREE_REAL_CST (arg1
);
1710 gcc_assert (max_depth
> 0);
1711 tree
*cache
= XALLOCAVEC (tree
, max_depth
+ 1);
1713 struct pow_synth_sqrt_info synth_info
;
1714 synth_info
.factors
= XALLOCAVEC (bool, max_depth
+ 1);
1715 synth_info
.deepest
= 0;
1716 synth_info
.num_mults
= 0;
1718 bool neg_exp
= REAL_VALUE_NEGATIVE (exp_init
);
1719 REAL_VALUE_TYPE exp
= real_value_abs (&exp_init
);
1721 /* The whole and fractional parts of exp. */
1722 REAL_VALUE_TYPE whole_part
;
1723 REAL_VALUE_TYPE frac_part
;
1725 real_floor (&whole_part
, mode
, &exp
);
1726 real_arithmetic (&frac_part
, MINUS_EXPR
, &exp
, &whole_part
);
1729 REAL_VALUE_TYPE ceil_whole
= dconst0
;
1730 REAL_VALUE_TYPE ceil_fract
= dconst0
;
1734 real_ceil (&ceil_whole
, mode
, &exp
);
1735 real_arithmetic (&ceil_fract
, MINUS_EXPR
, &ceil_whole
, &exp
);
1738 if (!representable_as_half_series_p (frac_part
, max_depth
, &synth_info
))
1741 /* Check whether it's more profitable to not use 1.0 / ... */
1744 struct pow_synth_sqrt_info alt_synth_info
;
1745 alt_synth_info
.factors
= XALLOCAVEC (bool, max_depth
+ 1);
1746 alt_synth_info
.deepest
= 0;
1747 alt_synth_info
.num_mults
= 0;
1749 if (representable_as_half_series_p (ceil_fract
, max_depth
,
1751 && alt_synth_info
.deepest
<= synth_info
.deepest
1752 && alt_synth_info
.num_mults
< synth_info
.num_mults
)
1754 whole_part
= ceil_whole
;
1755 frac_part
= ceil_fract
;
1756 synth_info
.deepest
= alt_synth_info
.deepest
;
1757 synth_info
.num_mults
= alt_synth_info
.num_mults
;
1758 memcpy (synth_info
.factors
, alt_synth_info
.factors
,
1759 (max_depth
+ 1) * sizeof (bool));
1764 HOST_WIDE_INT n
= real_to_integer (&whole_part
);
1765 REAL_VALUE_TYPE cint
;
1766 real_from_integer (&cint
, VOIDmode
, n
, SIGNED
);
1768 if (!real_identical (&whole_part
, &cint
))
1771 if (powi_cost (n
) + synth_info
.num_mults
> POWI_MAX_MULTS
)
1774 memset (cache
, 0, (max_depth
+ 1) * sizeof (tree
));
1776 tree integer_res
= n
== 0 ? build_real (type
, dconst1
) : arg0
;
1778 /* Calculate the integer part of the exponent. */
1781 integer_res
= gimple_expand_builtin_powi (gsi
, loc
, arg0
, n
);
1790 real_to_decimal (string
, &exp_init
, sizeof (string
), 0, 1);
1791 fprintf (dump_file
, "synthesizing pow (x, %s) as:\n", string
);
1797 fprintf (dump_file
, "1.0 / (");
1798 dump_integer_part (dump_file
, "x", n
);
1800 fprintf (dump_file
, " * ");
1801 dump_fractional_sqrt_sequence (dump_file
, "x", &synth_info
);
1802 fprintf (dump_file
, ")");
1806 dump_fractional_sqrt_sequence (dump_file
, "x", &synth_info
);
1807 fprintf (dump_file
, " / (");
1808 dump_integer_part (dump_file
, "x", n
);
1809 fprintf (dump_file
, ")");
1814 dump_fractional_sqrt_sequence (dump_file
, "x", &synth_info
);
1816 fprintf (dump_file
, " * ");
1817 dump_integer_part (dump_file
, "x", n
);
1820 fprintf (dump_file
, "\ndeepest sqrt chain: %d\n", synth_info
.deepest
);
1824 tree fract_res
= NULL_TREE
;
1827 /* Calculate the fractional part of the exponent. */
1828 for (unsigned i
= 0; i
< synth_info
.deepest
; i
++)
1830 if (synth_info
.factors
[i
])
1832 tree sqrt_chain
= get_fn_chain (arg0
, i
+ 1, gsi
, sqrtfn
, loc
, cache
);
1835 fract_res
= sqrt_chain
;
1838 fract_res
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1839 fract_res
, sqrt_chain
);
1843 tree res
= NULL_TREE
;
1850 res
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1851 fract_res
, integer_res
);
1855 res
= build_and_insert_binop (gsi
, loc
, "powrootrecip", RDIV_EXPR
,
1856 build_real (type
, dconst1
), res
);
1860 res
= build_and_insert_binop (gsi
, loc
, "powroot", RDIV_EXPR
,
1861 fract_res
, integer_res
);
1865 res
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1866 fract_res
, integer_res
);
1870 /* ARG0 and ARG1 are the two arguments to a pow builtin call in GSI
1871 with location info LOC. If possible, create an equivalent and
1872 less expensive sequence of statements prior to GSI, and return an
1873 expession holding the result. */
1876 gimple_expand_builtin_pow (gimple_stmt_iterator
*gsi
, location_t loc
,
1877 tree arg0
, tree arg1
)
1879 REAL_VALUE_TYPE c
, cint
, dconst1_3
, dconst1_4
, dconst1_6
;
1880 REAL_VALUE_TYPE c2
, dconst3
;
1882 tree type
, sqrtfn
, cbrtfn
, sqrt_arg0
, result
, cbrt_x
, powi_cbrt_x
;
1884 bool speed_p
= optimize_bb_for_speed_p (gsi_bb (*gsi
));
1885 bool hw_sqrt_exists
, c_is_int
, c2_is_int
;
1887 dconst1_4
= dconst1
;
1888 SET_REAL_EXP (&dconst1_4
, REAL_EXP (&dconst1_4
) - 2);
1890 /* If the exponent isn't a constant, there's nothing of interest
1892 if (TREE_CODE (arg1
) != REAL_CST
)
1895 /* Don't perform the operation if flag_signaling_nans is on
1896 and the operand is a signaling NaN. */
1897 if (HONOR_SNANS (TYPE_MODE (TREE_TYPE (arg1
)))
1898 && ((TREE_CODE (arg0
) == REAL_CST
1899 && REAL_VALUE_ISSIGNALING_NAN (TREE_REAL_CST (arg0
)))
1900 || REAL_VALUE_ISSIGNALING_NAN (TREE_REAL_CST (arg1
))))
1903 /* If the exponent is equivalent to an integer, expand to an optimal
1904 multiplication sequence when profitable. */
1905 c
= TREE_REAL_CST (arg1
);
1906 n
= real_to_integer (&c
);
1907 real_from_integer (&cint
, VOIDmode
, n
, SIGNED
);
1908 c_is_int
= real_identical (&c
, &cint
);
1911 && ((n
>= -1 && n
<= 2)
1912 || (flag_unsafe_math_optimizations
1914 && powi_cost (n
) <= POWI_MAX_MULTS
)))
1915 return gimple_expand_builtin_powi (gsi
, loc
, arg0
, n
);
1917 /* Attempt various optimizations using sqrt and cbrt. */
1918 type
= TREE_TYPE (arg0
);
1919 mode
= TYPE_MODE (type
);
1920 sqrtfn
= mathfn_built_in (type
, BUILT_IN_SQRT
);
1922 /* Optimize pow(x,0.5) = sqrt(x). This replacement is always safe
1923 unless signed zeros must be maintained. pow(-0,0.5) = +0, while
1926 && real_equal (&c
, &dconsthalf
)
1927 && !HONOR_SIGNED_ZEROS (mode
))
1928 return build_and_insert_call (gsi
, loc
, sqrtfn
, arg0
);
1930 hw_sqrt_exists
= optab_handler (sqrt_optab
, mode
) != CODE_FOR_nothing
;
1932 /* Optimize pow(x,1./3.) = cbrt(x). This requires unsafe math
1933 optimizations since 1./3. is not exactly representable. If x
1934 is negative and finite, the correct value of pow(x,1./3.) is
1935 a NaN with the "invalid" exception raised, because the value
1936 of 1./3. actually has an even denominator. The correct value
1937 of cbrt(x) is a negative real value. */
1938 cbrtfn
= mathfn_built_in (type
, BUILT_IN_CBRT
);
1939 dconst1_3
= real_value_truncate (mode
, dconst_third ());
1941 if (flag_unsafe_math_optimizations
1943 && (!HONOR_NANS (mode
) || tree_expr_nonnegative_p (arg0
))
1944 && real_equal (&c
, &dconst1_3
))
1945 return build_and_insert_call (gsi
, loc
, cbrtfn
, arg0
);
1947 /* Optimize pow(x,1./6.) = cbrt(sqrt(x)). Don't do this optimization
1948 if we don't have a hardware sqrt insn. */
1949 dconst1_6
= dconst1_3
;
1950 SET_REAL_EXP (&dconst1_6
, REAL_EXP (&dconst1_6
) - 1);
1952 if (flag_unsafe_math_optimizations
1955 && (!HONOR_NANS (mode
) || tree_expr_nonnegative_p (arg0
))
1958 && real_equal (&c
, &dconst1_6
))
1961 sqrt_arg0
= build_and_insert_call (gsi
, loc
, sqrtfn
, arg0
);
1964 return build_and_insert_call (gsi
, loc
, cbrtfn
, sqrt_arg0
);
1968 /* Attempt to expand the POW as a product of square root chains.
1969 Expand the 0.25 case even when otpimising for size. */
1970 if (flag_unsafe_math_optimizations
1973 && (speed_p
|| real_equal (&c
, &dconst1_4
))
1974 && !HONOR_SIGNED_ZEROS (mode
))
1976 unsigned int max_depth
= speed_p
1977 ? param_max_pow_sqrt_depth
1980 tree expand_with_sqrts
1981 = expand_pow_as_sqrts (gsi
, loc
, arg0
, arg1
, max_depth
);
1983 if (expand_with_sqrts
)
1984 return expand_with_sqrts
;
1987 real_arithmetic (&c2
, MULT_EXPR
, &c
, &dconst2
);
1988 n
= real_to_integer (&c2
);
1989 real_from_integer (&cint
, VOIDmode
, n
, SIGNED
);
1990 c2_is_int
= real_identical (&c2
, &cint
);
1992 /* Optimize pow(x,c), where 3c = n for some nonzero integer n, into
1994 powi(x, n/3) * powi(cbrt(x), n%3), n > 0;
1995 1.0 / (powi(x, abs(n)/3) * powi(cbrt(x), abs(n)%3)), n < 0.
1997 Do not calculate the first factor when n/3 = 0. As cbrt(x) is
1998 different from pow(x, 1./3.) due to rounding and behavior with
1999 negative x, we need to constrain this transformation to unsafe
2000 math and positive x or finite math. */
2001 real_from_integer (&dconst3
, VOIDmode
, 3, SIGNED
);
2002 real_arithmetic (&c2
, MULT_EXPR
, &c
, &dconst3
);
2003 real_round (&c2
, mode
, &c2
);
2004 n
= real_to_integer (&c2
);
2005 real_from_integer (&cint
, VOIDmode
, n
, SIGNED
);
2006 real_arithmetic (&c2
, RDIV_EXPR
, &cint
, &dconst3
);
2007 real_convert (&c2
, mode
, &c2
);
2009 if (flag_unsafe_math_optimizations
2011 && (!HONOR_NANS (mode
) || tree_expr_nonnegative_p (arg0
))
2012 && real_identical (&c2
, &c
)
2014 && optimize_function_for_speed_p (cfun
)
2015 && powi_cost (n
/ 3) <= POWI_MAX_MULTS
)
2017 tree powi_x_ndiv3
= NULL_TREE
;
2019 /* Attempt to fold powi(arg0, abs(n/3)) into multiplies. If not
2020 possible or profitable, give up. Skip the degenerate case when
2021 abs(n) < 3, where the result is always 1. */
2022 if (absu_hwi (n
) >= 3)
2024 powi_x_ndiv3
= gimple_expand_builtin_powi (gsi
, loc
, arg0
,
2030 /* Calculate powi(cbrt(x), n%3). Don't use gimple_expand_builtin_powi
2031 as that creates an unnecessary variable. Instead, just produce
2032 either cbrt(x) or cbrt(x) * cbrt(x). */
2033 cbrt_x
= build_and_insert_call (gsi
, loc
, cbrtfn
, arg0
);
2035 if (absu_hwi (n
) % 3 == 1)
2036 powi_cbrt_x
= cbrt_x
;
2038 powi_cbrt_x
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
2041 /* Multiply the two subexpressions, unless powi(x,abs(n)/3) = 1. */
2042 if (absu_hwi (n
) < 3)
2043 result
= powi_cbrt_x
;
2045 result
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
2046 powi_x_ndiv3
, powi_cbrt_x
);
2048 /* If n is negative, reciprocate the result. */
2050 result
= build_and_insert_binop (gsi
, loc
, "powroot", RDIV_EXPR
,
2051 build_real (type
, dconst1
), result
);
2056 /* No optimizations succeeded. */
2060 /* ARG is the argument to a cabs builtin call in GSI with location info
2061 LOC. Create a sequence of statements prior to GSI that calculates
2062 sqrt(R*R + I*I), where R and I are the real and imaginary components
2063 of ARG, respectively. Return an expression holding the result. */
2066 gimple_expand_builtin_cabs (gimple_stmt_iterator
*gsi
, location_t loc
, tree arg
)
2068 tree real_part
, imag_part
, addend1
, addend2
, sum
, result
;
2069 tree type
= TREE_TYPE (TREE_TYPE (arg
));
2070 tree sqrtfn
= mathfn_built_in (type
, BUILT_IN_SQRT
);
2071 machine_mode mode
= TYPE_MODE (type
);
2073 if (!flag_unsafe_math_optimizations
2074 || !optimize_bb_for_speed_p (gimple_bb (gsi_stmt (*gsi
)))
2076 || optab_handler (sqrt_optab
, mode
) == CODE_FOR_nothing
)
2079 real_part
= build_and_insert_ref (gsi
, loc
, type
, "cabs",
2080 REALPART_EXPR
, arg
);
2081 addend1
= build_and_insert_binop (gsi
, loc
, "cabs", MULT_EXPR
,
2082 real_part
, real_part
);
2083 imag_part
= build_and_insert_ref (gsi
, loc
, type
, "cabs",
2084 IMAGPART_EXPR
, arg
);
2085 addend2
= build_and_insert_binop (gsi
, loc
, "cabs", MULT_EXPR
,
2086 imag_part
, imag_part
);
2087 sum
= build_and_insert_binop (gsi
, loc
, "cabs", PLUS_EXPR
, addend1
, addend2
);
2088 result
= build_and_insert_call (gsi
, loc
, sqrtfn
, sum
);
2093 /* Go through all calls to sin, cos and cexpi and call execute_cse_sincos_1
2094 on the SSA_NAME argument of each of them. Also expand powi(x,n) into
2095 an optimal number of multiplies, when n is a constant. */
2099 const pass_data pass_data_cse_sincos
=
2101 GIMPLE_PASS
, /* type */
2102 "sincos", /* name */
2103 OPTGROUP_NONE
, /* optinfo_flags */
2104 TV_TREE_SINCOS
, /* tv_id */
2105 PROP_ssa
, /* properties_required */
2106 PROP_gimple_opt_math
, /* properties_provided */
2107 0, /* properties_destroyed */
2108 0, /* todo_flags_start */
2109 TODO_update_ssa
, /* todo_flags_finish */
2112 class pass_cse_sincos
: public gimple_opt_pass
2115 pass_cse_sincos (gcc::context
*ctxt
)
2116 : gimple_opt_pass (pass_data_cse_sincos
, ctxt
)
2119 /* opt_pass methods: */
2120 virtual bool gate (function
*)
2122 /* We no longer require either sincos or cexp, since powi expansion
2123 piggybacks on this pass. */
2127 virtual unsigned int execute (function
*);
2129 }; // class pass_cse_sincos
2132 pass_cse_sincos::execute (function
*fun
)
2135 bool cfg_changed
= false;
2137 calculate_dominance_info (CDI_DOMINATORS
);
2138 memset (&sincos_stats
, 0, sizeof (sincos_stats
));
2140 FOR_EACH_BB_FN (bb
, fun
)
2142 gimple_stmt_iterator gsi
;
2143 bool cleanup_eh
= false;
2145 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2147 gimple
*stmt
= gsi_stmt (gsi
);
2149 /* Only the last stmt in a bb could throw, no need to call
2150 gimple_purge_dead_eh_edges if we change something in the middle
2151 of a basic block. */
2154 if (is_gimple_call (stmt
)
2155 && gimple_call_lhs (stmt
))
2157 tree arg
, arg0
, arg1
, result
;
2161 switch (gimple_call_combined_fn (stmt
))
2166 /* Make sure we have either sincos or cexp. */
2167 if (!targetm
.libc_has_function (function_c99_math_complex
)
2168 && !targetm
.libc_has_function (function_sincos
))
2171 arg
= gimple_call_arg (stmt
, 0);
2172 if (TREE_CODE (arg
) == SSA_NAME
)
2173 cfg_changed
|= execute_cse_sincos_1 (arg
);
2177 arg0
= gimple_call_arg (stmt
, 0);
2178 arg1
= gimple_call_arg (stmt
, 1);
2180 loc
= gimple_location (stmt
);
2181 result
= gimple_expand_builtin_pow (&gsi
, loc
, arg0
, arg1
);
2185 tree lhs
= gimple_get_lhs (stmt
);
2186 gassign
*new_stmt
= gimple_build_assign (lhs
, result
);
2187 gimple_set_location (new_stmt
, loc
);
2188 unlink_stmt_vdef (stmt
);
2189 gsi_replace (&gsi
, new_stmt
, true);
2191 if (gimple_vdef (stmt
))
2192 release_ssa_name (gimple_vdef (stmt
));
2197 arg0
= gimple_call_arg (stmt
, 0);
2198 arg1
= gimple_call_arg (stmt
, 1);
2199 loc
= gimple_location (stmt
);
2201 if (real_minus_onep (arg0
))
2203 tree t0
, t1
, cond
, one
, minus_one
;
2206 t0
= TREE_TYPE (arg0
);
2207 t1
= TREE_TYPE (arg1
);
2208 one
= build_real (t0
, dconst1
);
2209 minus_one
= build_real (t0
, dconstm1
);
2211 cond
= make_temp_ssa_name (t1
, NULL
, "powi_cond");
2212 stmt
= gimple_build_assign (cond
, BIT_AND_EXPR
,
2213 arg1
, build_int_cst (t1
, 1));
2214 gimple_set_location (stmt
, loc
);
2215 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
2217 result
= make_temp_ssa_name (t0
, NULL
, "powi");
2218 stmt
= gimple_build_assign (result
, COND_EXPR
, cond
,
2220 gimple_set_location (stmt
, loc
);
2221 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
2225 if (!tree_fits_shwi_p (arg1
))
2228 n
= tree_to_shwi (arg1
);
2229 result
= gimple_expand_builtin_powi (&gsi
, loc
, arg0
, n
);
2234 tree lhs
= gimple_get_lhs (stmt
);
2235 gassign
*new_stmt
= gimple_build_assign (lhs
, result
);
2236 gimple_set_location (new_stmt
, loc
);
2237 unlink_stmt_vdef (stmt
);
2238 gsi_replace (&gsi
, new_stmt
, true);
2240 if (gimple_vdef (stmt
))
2241 release_ssa_name (gimple_vdef (stmt
));
2246 arg0
= gimple_call_arg (stmt
, 0);
2247 loc
= gimple_location (stmt
);
2248 result
= gimple_expand_builtin_cabs (&gsi
, loc
, arg0
);
2252 tree lhs
= gimple_get_lhs (stmt
);
2253 gassign
*new_stmt
= gimple_build_assign (lhs
, result
);
2254 gimple_set_location (new_stmt
, loc
);
2255 unlink_stmt_vdef (stmt
);
2256 gsi_replace (&gsi
, new_stmt
, true);
2258 if (gimple_vdef (stmt
))
2259 release_ssa_name (gimple_vdef (stmt
));
2268 cfg_changed
|= gimple_purge_dead_eh_edges (bb
);
2271 statistics_counter_event (fun
, "sincos statements inserted",
2272 sincos_stats
.inserted
);
2274 return cfg_changed
? TODO_cleanup_cfg
: 0;
2280 make_pass_cse_sincos (gcc::context
*ctxt
)
2282 return new pass_cse_sincos (ctxt
);
2285 /* Return true if stmt is a type conversion operation that can be stripped
2286 when used in a widening multiply operation. */
2288 widening_mult_conversion_strippable_p (tree result_type
, gimple
*stmt
)
2290 enum tree_code rhs_code
= gimple_assign_rhs_code (stmt
);
2292 if (TREE_CODE (result_type
) == INTEGER_TYPE
)
2297 if (!CONVERT_EXPR_CODE_P (rhs_code
))
2300 op_type
= TREE_TYPE (gimple_assign_lhs (stmt
));
2302 /* If the type of OP has the same precision as the result, then
2303 we can strip this conversion. The multiply operation will be
2304 selected to create the correct extension as a by-product. */
2305 if (TYPE_PRECISION (result_type
) == TYPE_PRECISION (op_type
))
2308 /* We can also strip a conversion if it preserves the signed-ness of
2309 the operation and doesn't narrow the range. */
2310 inner_op_type
= TREE_TYPE (gimple_assign_rhs1 (stmt
));
2312 /* If the inner-most type is unsigned, then we can strip any
2313 intermediate widening operation. If it's signed, then the
2314 intermediate widening operation must also be signed. */
2315 if ((TYPE_UNSIGNED (inner_op_type
)
2316 || TYPE_UNSIGNED (op_type
) == TYPE_UNSIGNED (inner_op_type
))
2317 && TYPE_PRECISION (op_type
) > TYPE_PRECISION (inner_op_type
))
2323 return rhs_code
== FIXED_CONVERT_EXPR
;
2326 /* Return true if RHS is a suitable operand for a widening multiplication,
2327 assuming a target type of TYPE.
2328 There are two cases:
2330 - RHS makes some value at least twice as wide. Store that value
2331 in *NEW_RHS_OUT if so, and store its type in *TYPE_OUT.
2333 - RHS is an integer constant. Store that value in *NEW_RHS_OUT if so,
2334 but leave *TYPE_OUT untouched. */
2337 is_widening_mult_rhs_p (tree type
, tree rhs
, tree
*type_out
,
2343 if (TREE_CODE (rhs
) == SSA_NAME
)
2345 stmt
= SSA_NAME_DEF_STMT (rhs
);
2346 if (is_gimple_assign (stmt
))
2348 if (! widening_mult_conversion_strippable_p (type
, stmt
))
2352 rhs1
= gimple_assign_rhs1 (stmt
);
2354 if (TREE_CODE (rhs1
) == INTEGER_CST
)
2356 *new_rhs_out
= rhs1
;
2365 type1
= TREE_TYPE (rhs1
);
2367 if (TREE_CODE (type1
) != TREE_CODE (type
)
2368 || TYPE_PRECISION (type1
) * 2 > TYPE_PRECISION (type
))
2371 *new_rhs_out
= rhs1
;
2376 if (TREE_CODE (rhs
) == INTEGER_CST
)
2386 /* Return true if STMT performs a widening multiplication, assuming the
2387 output type is TYPE. If so, store the unwidened types of the operands
2388 in *TYPE1_OUT and *TYPE2_OUT respectively. Also fill *RHS1_OUT and
2389 *RHS2_OUT such that converting those operands to types *TYPE1_OUT
2390 and *TYPE2_OUT would give the operands of the multiplication. */
2393 is_widening_mult_p (gimple
*stmt
,
2394 tree
*type1_out
, tree
*rhs1_out
,
2395 tree
*type2_out
, tree
*rhs2_out
)
2397 tree type
= TREE_TYPE (gimple_assign_lhs (stmt
));
2399 if (TREE_CODE (type
) == INTEGER_TYPE
)
2401 if (TYPE_OVERFLOW_TRAPS (type
))
2404 else if (TREE_CODE (type
) != FIXED_POINT_TYPE
)
2407 if (!is_widening_mult_rhs_p (type
, gimple_assign_rhs1 (stmt
), type1_out
,
2411 if (!is_widening_mult_rhs_p (type
, gimple_assign_rhs2 (stmt
), type2_out
,
2415 if (*type1_out
== NULL
)
2417 if (*type2_out
== NULL
|| !int_fits_type_p (*rhs1_out
, *type2_out
))
2419 *type1_out
= *type2_out
;
2422 if (*type2_out
== NULL
)
2424 if (!int_fits_type_p (*rhs2_out
, *type1_out
))
2426 *type2_out
= *type1_out
;
2429 /* Ensure that the larger of the two operands comes first. */
2430 if (TYPE_PRECISION (*type1_out
) < TYPE_PRECISION (*type2_out
))
2432 std::swap (*type1_out
, *type2_out
);
2433 std::swap (*rhs1_out
, *rhs2_out
);
2439 /* Check to see if the CALL statement is an invocation of copysign
2440 with 1. being the first argument. */
2442 is_copysign_call_with_1 (gimple
*call
)
2444 gcall
*c
= dyn_cast
<gcall
*> (call
);
2448 enum combined_fn code
= gimple_call_combined_fn (c
);
2450 if (code
== CFN_LAST
)
2453 if (builtin_fn_p (code
))
2455 switch (as_builtin_fn (code
))
2457 CASE_FLT_FN (BUILT_IN_COPYSIGN
):
2458 CASE_FLT_FN_FLOATN_NX (BUILT_IN_COPYSIGN
):
2459 return real_onep (gimple_call_arg (c
, 0));
2465 if (internal_fn_p (code
))
2467 switch (as_internal_fn (code
))
2470 return real_onep (gimple_call_arg (c
, 0));
2479 /* Try to expand the pattern x * copysign (1, y) into xorsign (x, y).
2480 This only happens when the xorsign optab is defined, if the
2481 pattern is not a xorsign pattern or if expansion fails FALSE is
2482 returned, otherwise TRUE is returned. */
2484 convert_expand_mult_copysign (gimple
*stmt
, gimple_stmt_iterator
*gsi
)
2486 tree treeop0
, treeop1
, lhs
, type
;
2487 location_t loc
= gimple_location (stmt
);
2488 lhs
= gimple_assign_lhs (stmt
);
2489 treeop0
= gimple_assign_rhs1 (stmt
);
2490 treeop1
= gimple_assign_rhs2 (stmt
);
2491 type
= TREE_TYPE (lhs
);
2492 machine_mode mode
= TYPE_MODE (type
);
2494 if (HONOR_SNANS (type
))
2497 if (TREE_CODE (treeop0
) == SSA_NAME
&& TREE_CODE (treeop1
) == SSA_NAME
)
2499 gimple
*call0
= SSA_NAME_DEF_STMT (treeop0
);
2500 if (!has_single_use (treeop0
) || !is_copysign_call_with_1 (call0
))
2502 call0
= SSA_NAME_DEF_STMT (treeop1
);
2503 if (!has_single_use (treeop1
) || !is_copysign_call_with_1 (call0
))
2508 if (optab_handler (xorsign_optab
, mode
) == CODE_FOR_nothing
)
2511 gcall
*c
= as_a
<gcall
*> (call0
);
2512 treeop0
= gimple_call_arg (c
, 1);
2515 = gimple_build_call_internal (IFN_XORSIGN
, 2, treeop1
, treeop0
);
2516 gimple_set_lhs (call_stmt
, lhs
);
2517 gimple_set_location (call_stmt
, loc
);
2518 gsi_replace (gsi
, call_stmt
, true);
2525 /* Process a single gimple statement STMT, which has a MULT_EXPR as
2526 its rhs, and try to convert it into a WIDEN_MULT_EXPR. The return
2527 value is true iff we converted the statement. */
2530 convert_mult_to_widen (gimple
*stmt
, gimple_stmt_iterator
*gsi
)
2532 tree lhs
, rhs1
, rhs2
, type
, type1
, type2
;
2533 enum insn_code handler
;
2534 scalar_int_mode to_mode
, from_mode
, actual_mode
;
2536 int actual_precision
;
2537 location_t loc
= gimple_location (stmt
);
2538 bool from_unsigned1
, from_unsigned2
;
2540 lhs
= gimple_assign_lhs (stmt
);
2541 type
= TREE_TYPE (lhs
);
2542 if (TREE_CODE (type
) != INTEGER_TYPE
)
2545 if (!is_widening_mult_p (stmt
, &type1
, &rhs1
, &type2
, &rhs2
))
2548 to_mode
= SCALAR_INT_TYPE_MODE (type
);
2549 from_mode
= SCALAR_INT_TYPE_MODE (type1
);
2550 if (to_mode
== from_mode
)
2553 from_unsigned1
= TYPE_UNSIGNED (type1
);
2554 from_unsigned2
= TYPE_UNSIGNED (type2
);
2556 if (from_unsigned1
&& from_unsigned2
)
2557 op
= umul_widen_optab
;
2558 else if (!from_unsigned1
&& !from_unsigned2
)
2559 op
= smul_widen_optab
;
2561 op
= usmul_widen_optab
;
2563 handler
= find_widening_optab_handler_and_mode (op
, to_mode
, from_mode
,
2566 if (handler
== CODE_FOR_nothing
)
2568 if (op
!= smul_widen_optab
)
2570 /* We can use a signed multiply with unsigned types as long as
2571 there is a wider mode to use, or it is the smaller of the two
2572 types that is unsigned. Note that type1 >= type2, always. */
2573 if ((TYPE_UNSIGNED (type1
)
2574 && TYPE_PRECISION (type1
) == GET_MODE_PRECISION (from_mode
))
2575 || (TYPE_UNSIGNED (type2
)
2576 && TYPE_PRECISION (type2
) == GET_MODE_PRECISION (from_mode
)))
2578 if (!GET_MODE_WIDER_MODE (from_mode
).exists (&from_mode
)
2579 || GET_MODE_SIZE (to_mode
) <= GET_MODE_SIZE (from_mode
))
2583 op
= smul_widen_optab
;
2584 handler
= find_widening_optab_handler_and_mode (op
, to_mode
,
2588 if (handler
== CODE_FOR_nothing
)
2591 from_unsigned1
= from_unsigned2
= false;
2597 /* Ensure that the inputs to the handler are in the correct precison
2598 for the opcode. This will be the full mode size. */
2599 actual_precision
= GET_MODE_PRECISION (actual_mode
);
2600 if (2 * actual_precision
> TYPE_PRECISION (type
))
2602 if (actual_precision
!= TYPE_PRECISION (type1
)
2603 || from_unsigned1
!= TYPE_UNSIGNED (type1
))
2604 rhs1
= build_and_insert_cast (gsi
, loc
,
2605 build_nonstandard_integer_type
2606 (actual_precision
, from_unsigned1
), rhs1
);
2607 if (actual_precision
!= TYPE_PRECISION (type2
)
2608 || from_unsigned2
!= TYPE_UNSIGNED (type2
))
2609 rhs2
= build_and_insert_cast (gsi
, loc
,
2610 build_nonstandard_integer_type
2611 (actual_precision
, from_unsigned2
), rhs2
);
2613 /* Handle constants. */
2614 if (TREE_CODE (rhs1
) == INTEGER_CST
)
2615 rhs1
= fold_convert (type1
, rhs1
);
2616 if (TREE_CODE (rhs2
) == INTEGER_CST
)
2617 rhs2
= fold_convert (type2
, rhs2
);
2619 gimple_assign_set_rhs1 (stmt
, rhs1
);
2620 gimple_assign_set_rhs2 (stmt
, rhs2
);
2621 gimple_assign_set_rhs_code (stmt
, WIDEN_MULT_EXPR
);
2623 widen_mul_stats
.widen_mults_inserted
++;
2627 /* Process a single gimple statement STMT, which is found at the
2628 iterator GSI and has a either a PLUS_EXPR or a MINUS_EXPR as its
2629 rhs (given by CODE), and try to convert it into a
2630 WIDEN_MULT_PLUS_EXPR or a WIDEN_MULT_MINUS_EXPR. The return value
2631 is true iff we converted the statement. */
2634 convert_plusminus_to_widen (gimple_stmt_iterator
*gsi
, gimple
*stmt
,
2635 enum tree_code code
)
2637 gimple
*rhs1_stmt
= NULL
, *rhs2_stmt
= NULL
;
2638 gimple
*conv1_stmt
= NULL
, *conv2_stmt
= NULL
, *conv_stmt
;
2639 tree type
, type1
, type2
, optype
;
2640 tree lhs
, rhs1
, rhs2
, mult_rhs1
, mult_rhs2
, add_rhs
;
2641 enum tree_code rhs1_code
= ERROR_MARK
, rhs2_code
= ERROR_MARK
;
2643 enum tree_code wmult_code
;
2644 enum insn_code handler
;
2645 scalar_mode to_mode
, from_mode
, actual_mode
;
2646 location_t loc
= gimple_location (stmt
);
2647 int actual_precision
;
2648 bool from_unsigned1
, from_unsigned2
;
2650 lhs
= gimple_assign_lhs (stmt
);
2651 type
= TREE_TYPE (lhs
);
2652 if (TREE_CODE (type
) != INTEGER_TYPE
2653 && TREE_CODE (type
) != FIXED_POINT_TYPE
)
2656 if (code
== MINUS_EXPR
)
2657 wmult_code
= WIDEN_MULT_MINUS_EXPR
;
2659 wmult_code
= WIDEN_MULT_PLUS_EXPR
;
2661 rhs1
= gimple_assign_rhs1 (stmt
);
2662 rhs2
= gimple_assign_rhs2 (stmt
);
2664 if (TREE_CODE (rhs1
) == SSA_NAME
)
2666 rhs1_stmt
= SSA_NAME_DEF_STMT (rhs1
);
2667 if (is_gimple_assign (rhs1_stmt
))
2668 rhs1_code
= gimple_assign_rhs_code (rhs1_stmt
);
2671 if (TREE_CODE (rhs2
) == SSA_NAME
)
2673 rhs2_stmt
= SSA_NAME_DEF_STMT (rhs2
);
2674 if (is_gimple_assign (rhs2_stmt
))
2675 rhs2_code
= gimple_assign_rhs_code (rhs2_stmt
);
2678 /* Allow for one conversion statement between the multiply
2679 and addition/subtraction statement. If there are more than
2680 one conversions then we assume they would invalidate this
2681 transformation. If that's not the case then they should have
2682 been folded before now. */
2683 if (CONVERT_EXPR_CODE_P (rhs1_code
))
2685 conv1_stmt
= rhs1_stmt
;
2686 rhs1
= gimple_assign_rhs1 (rhs1_stmt
);
2687 if (TREE_CODE (rhs1
) == SSA_NAME
)
2689 rhs1_stmt
= SSA_NAME_DEF_STMT (rhs1
);
2690 if (is_gimple_assign (rhs1_stmt
))
2691 rhs1_code
= gimple_assign_rhs_code (rhs1_stmt
);
2696 if (CONVERT_EXPR_CODE_P (rhs2_code
))
2698 conv2_stmt
= rhs2_stmt
;
2699 rhs2
= gimple_assign_rhs1 (rhs2_stmt
);
2700 if (TREE_CODE (rhs2
) == SSA_NAME
)
2702 rhs2_stmt
= SSA_NAME_DEF_STMT (rhs2
);
2703 if (is_gimple_assign (rhs2_stmt
))
2704 rhs2_code
= gimple_assign_rhs_code (rhs2_stmt
);
2710 /* If code is WIDEN_MULT_EXPR then it would seem unnecessary to call
2711 is_widening_mult_p, but we still need the rhs returns.
2713 It might also appear that it would be sufficient to use the existing
2714 operands of the widening multiply, but that would limit the choice of
2715 multiply-and-accumulate instructions.
2717 If the widened-multiplication result has more than one uses, it is
2718 probably wiser not to do the conversion. Also restrict this operation
2719 to single basic block to avoid moving the multiply to a different block
2720 with a higher execution frequency. */
2721 if (code
== PLUS_EXPR
2722 && (rhs1_code
== MULT_EXPR
|| rhs1_code
== WIDEN_MULT_EXPR
))
2724 if (!has_single_use (rhs1
)
2725 || gimple_bb (rhs1_stmt
) != gimple_bb (stmt
)
2726 || !is_widening_mult_p (rhs1_stmt
, &type1
, &mult_rhs1
,
2727 &type2
, &mult_rhs2
))
2730 conv_stmt
= conv1_stmt
;
2732 else if (rhs2_code
== MULT_EXPR
|| rhs2_code
== WIDEN_MULT_EXPR
)
2734 if (!has_single_use (rhs2
)
2735 || gimple_bb (rhs2_stmt
) != gimple_bb (stmt
)
2736 || !is_widening_mult_p (rhs2_stmt
, &type1
, &mult_rhs1
,
2737 &type2
, &mult_rhs2
))
2740 conv_stmt
= conv2_stmt
;
2745 to_mode
= SCALAR_TYPE_MODE (type
);
2746 from_mode
= SCALAR_TYPE_MODE (type1
);
2747 if (to_mode
== from_mode
)
2750 from_unsigned1
= TYPE_UNSIGNED (type1
);
2751 from_unsigned2
= TYPE_UNSIGNED (type2
);
2754 /* There's no such thing as a mixed sign madd yet, so use a wider mode. */
2755 if (from_unsigned1
!= from_unsigned2
)
2757 if (!INTEGRAL_TYPE_P (type
))
2759 /* We can use a signed multiply with unsigned types as long as
2760 there is a wider mode to use, or it is the smaller of the two
2761 types that is unsigned. Note that type1 >= type2, always. */
2763 && TYPE_PRECISION (type1
) == GET_MODE_PRECISION (from_mode
))
2765 && TYPE_PRECISION (type2
) == GET_MODE_PRECISION (from_mode
)))
2767 if (!GET_MODE_WIDER_MODE (from_mode
).exists (&from_mode
)
2768 || GET_MODE_SIZE (from_mode
) >= GET_MODE_SIZE (to_mode
))
2772 from_unsigned1
= from_unsigned2
= false;
2773 optype
= build_nonstandard_integer_type (GET_MODE_PRECISION (from_mode
),
2777 /* If there was a conversion between the multiply and addition
2778 then we need to make sure it fits a multiply-and-accumulate.
2779 The should be a single mode change which does not change the
2783 /* We use the original, unmodified data types for this. */
2784 tree from_type
= TREE_TYPE (gimple_assign_rhs1 (conv_stmt
));
2785 tree to_type
= TREE_TYPE (gimple_assign_lhs (conv_stmt
));
2786 int data_size
= TYPE_PRECISION (type1
) + TYPE_PRECISION (type2
);
2787 bool is_unsigned
= TYPE_UNSIGNED (type1
) && TYPE_UNSIGNED (type2
);
2789 if (TYPE_PRECISION (from_type
) > TYPE_PRECISION (to_type
))
2791 /* Conversion is a truncate. */
2792 if (TYPE_PRECISION (to_type
) < data_size
)
2795 else if (TYPE_PRECISION (from_type
) < TYPE_PRECISION (to_type
))
2797 /* Conversion is an extend. Check it's the right sort. */
2798 if (TYPE_UNSIGNED (from_type
) != is_unsigned
2799 && !(is_unsigned
&& TYPE_PRECISION (from_type
) > data_size
))
2802 /* else convert is a no-op for our purposes. */
2805 /* Verify that the machine can perform a widening multiply
2806 accumulate in this mode/signedness combination, otherwise
2807 this transformation is likely to pessimize code. */
2808 this_optab
= optab_for_tree_code (wmult_code
, optype
, optab_default
);
2809 handler
= find_widening_optab_handler_and_mode (this_optab
, to_mode
,
2810 from_mode
, &actual_mode
);
2812 if (handler
== CODE_FOR_nothing
)
2815 /* Ensure that the inputs to the handler are in the correct precison
2816 for the opcode. This will be the full mode size. */
2817 actual_precision
= GET_MODE_PRECISION (actual_mode
);
2818 if (actual_precision
!= TYPE_PRECISION (type1
)
2819 || from_unsigned1
!= TYPE_UNSIGNED (type1
))
2820 mult_rhs1
= build_and_insert_cast (gsi
, loc
,
2821 build_nonstandard_integer_type
2822 (actual_precision
, from_unsigned1
),
2824 if (actual_precision
!= TYPE_PRECISION (type2
)
2825 || from_unsigned2
!= TYPE_UNSIGNED (type2
))
2826 mult_rhs2
= build_and_insert_cast (gsi
, loc
,
2827 build_nonstandard_integer_type
2828 (actual_precision
, from_unsigned2
),
2831 if (!useless_type_conversion_p (type
, TREE_TYPE (add_rhs
)))
2832 add_rhs
= build_and_insert_cast (gsi
, loc
, type
, add_rhs
);
2834 /* Handle constants. */
2835 if (TREE_CODE (mult_rhs1
) == INTEGER_CST
)
2836 mult_rhs1
= fold_convert (type1
, mult_rhs1
);
2837 if (TREE_CODE (mult_rhs2
) == INTEGER_CST
)
2838 mult_rhs2
= fold_convert (type2
, mult_rhs2
);
2840 gimple_assign_set_rhs_with_ops (gsi
, wmult_code
, mult_rhs1
, mult_rhs2
,
2842 update_stmt (gsi_stmt (*gsi
));
2843 widen_mul_stats
.maccs_inserted
++;
2847 /* Given a result MUL_RESULT which is a result of a multiplication of OP1 and
2848 OP2 and which we know is used in statements that can be, together with the
2849 multiplication, converted to FMAs, perform the transformation. */
2852 convert_mult_to_fma_1 (tree mul_result
, tree op1
, tree op2
)
2854 tree type
= TREE_TYPE (mul_result
);
2856 imm_use_iterator imm_iter
;
2859 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, mul_result
)
2861 gimple_stmt_iterator gsi
= gsi_for_stmt (use_stmt
);
2862 tree addop
, mulop1
= op1
, result
= mul_result
;
2863 bool negate_p
= false;
2864 gimple_seq seq
= NULL
;
2866 if (is_gimple_debug (use_stmt
))
2869 if (is_gimple_assign (use_stmt
)
2870 && gimple_assign_rhs_code (use_stmt
) == NEGATE_EXPR
)
2872 result
= gimple_assign_lhs (use_stmt
);
2873 use_operand_p use_p
;
2874 gimple
*neguse_stmt
;
2875 single_imm_use (gimple_assign_lhs (use_stmt
), &use_p
, &neguse_stmt
);
2876 gsi_remove (&gsi
, true);
2877 release_defs (use_stmt
);
2879 use_stmt
= neguse_stmt
;
2880 gsi
= gsi_for_stmt (use_stmt
);
2884 tree cond
, else_value
, ops
[3];
2886 if (!can_interpret_as_conditional_op_p (use_stmt
, &cond
, &code
,
2889 addop
= ops
[0] == result
? ops
[1] : ops
[0];
2891 if (code
== MINUS_EXPR
)
2893 if (ops
[0] == result
)
2894 /* a * b - c -> a * b + (-c) */
2895 addop
= gimple_build (&seq
, NEGATE_EXPR
, type
, addop
);
2897 /* a - b * c -> (-b) * c + a */
2898 negate_p
= !negate_p
;
2902 mulop1
= gimple_build (&seq
, NEGATE_EXPR
, type
, mulop1
);
2905 gsi_insert_seq_before (&gsi
, seq
, GSI_SAME_STMT
);
2908 fma_stmt
= gimple_build_call_internal (IFN_COND_FMA
, 5, cond
, mulop1
,
2909 op2
, addop
, else_value
);
2911 fma_stmt
= gimple_build_call_internal (IFN_FMA
, 3, mulop1
, op2
, addop
);
2912 gimple_set_lhs (fma_stmt
, gimple_get_lhs (use_stmt
));
2913 gimple_call_set_nothrow (fma_stmt
, !stmt_can_throw_internal (cfun
,
2915 gsi_replace (&gsi
, fma_stmt
, true);
2916 /* Follow all SSA edges so that we generate FMS, FNMA and FNMS
2917 regardless of where the negation occurs. */
2918 gimple
*orig_stmt
= gsi_stmt (gsi
);
2919 if (fold_stmt (&gsi
, follow_all_ssa_edges
))
2921 if (maybe_clean_or_replace_eh_stmt (orig_stmt
, gsi_stmt (gsi
)))
2923 update_stmt (gsi_stmt (gsi
));
2926 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2928 fprintf (dump_file
, "Generated FMA ");
2929 print_gimple_stmt (dump_file
, gsi_stmt (gsi
), 0, TDF_NONE
);
2930 fprintf (dump_file
, "\n");
2933 /* If the FMA result is negated in a single use, fold the negation
2935 orig_stmt
= gsi_stmt (gsi
);
2936 use_operand_p use_p
;
2938 if (is_gimple_call (orig_stmt
)
2939 && gimple_call_internal_p (orig_stmt
)
2940 && gimple_call_lhs (orig_stmt
)
2941 && TREE_CODE (gimple_call_lhs (orig_stmt
)) == SSA_NAME
2942 && single_imm_use (gimple_call_lhs (orig_stmt
), &use_p
, &neg_stmt
)
2943 && is_gimple_assign (neg_stmt
)
2944 && gimple_assign_rhs_code (neg_stmt
) == NEGATE_EXPR
2945 && !stmt_could_throw_p (cfun
, neg_stmt
))
2947 gsi
= gsi_for_stmt (neg_stmt
);
2948 if (fold_stmt (&gsi
, follow_all_ssa_edges
))
2950 if (maybe_clean_or_replace_eh_stmt (neg_stmt
, gsi_stmt (gsi
)))
2952 update_stmt (gsi_stmt (gsi
));
2953 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2955 fprintf (dump_file
, "Folded FMA negation ");
2956 print_gimple_stmt (dump_file
, gsi_stmt (gsi
), 0, TDF_NONE
);
2957 fprintf (dump_file
, "\n");
2962 widen_mul_stats
.fmas_inserted
++;
2966 /* Data necessary to perform the actual transformation from a multiplication
2967 and an addition to an FMA after decision is taken it should be done and to
2968 then delete the multiplication statement from the function IL. */
2970 struct fma_transformation_info
2978 /* Structure containing the current state of FMA deferring, i.e. whether we are
2979 deferring, whether to continue deferring, and all data necessary to come
2980 back and perform all deferred transformations. */
2982 class fma_deferring_state
2985 /* Class constructor. Pass true as PERFORM_DEFERRING in order to actually
2986 do any deferring. */
2988 fma_deferring_state (bool perform_deferring
)
2989 : m_candidates (), m_mul_result_set (), m_initial_phi (NULL
),
2990 m_last_result (NULL_TREE
), m_deferring_p (perform_deferring
) {}
2992 /* List of FMA candidates for which we the transformation has been determined
2993 possible but we at this point in BB analysis we do not consider them
2995 auto_vec
<fma_transformation_info
, 8> m_candidates
;
2997 /* Set of results of multiplication that are part of an already deferred FMA
2999 hash_set
<tree
> m_mul_result_set
;
3001 /* The PHI that supposedly feeds back result of a FMA to another over loop
3003 gphi
*m_initial_phi
;
3005 /* Result of the last produced FMA candidate or NULL if there has not been
3009 /* If true, deferring might still be profitable. If false, transform all
3010 candidates and no longer defer. */
3014 /* Transform all deferred FMA candidates and mark STATE as no longer
3018 cancel_fma_deferring (fma_deferring_state
*state
)
3020 if (!state
->m_deferring_p
)
3023 for (unsigned i
= 0; i
< state
->m_candidates
.length (); i
++)
3025 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3026 fprintf (dump_file
, "Generating deferred FMA\n");
3028 const fma_transformation_info
&fti
= state
->m_candidates
[i
];
3029 convert_mult_to_fma_1 (fti
.mul_result
, fti
.op1
, fti
.op2
);
3031 gimple_stmt_iterator gsi
= gsi_for_stmt (fti
.mul_stmt
);
3032 gsi_remove (&gsi
, true);
3033 release_defs (fti
.mul_stmt
);
3035 state
->m_deferring_p
= false;
3038 /* If OP is an SSA name defined by a PHI node, return the PHI statement.
3039 Otherwise return NULL. */
3042 result_of_phi (tree op
)
3044 if (TREE_CODE (op
) != SSA_NAME
)
3047 return dyn_cast
<gphi
*> (SSA_NAME_DEF_STMT (op
));
3050 /* After processing statements of a BB and recording STATE, return true if the
3051 initial phi is fed by the last FMA candidate result ore one such result from
3052 previously processed BBs marked in LAST_RESULT_SET. */
3055 last_fma_candidate_feeds_initial_phi (fma_deferring_state
*state
,
3056 hash_set
<tree
> *last_result_set
)
3060 FOR_EACH_PHI_ARG (use
, state
->m_initial_phi
, iter
, SSA_OP_USE
)
3062 tree t
= USE_FROM_PTR (use
);
3063 if (t
== state
->m_last_result
3064 || last_result_set
->contains (t
))
3071 /* Combine the multiplication at MUL_STMT with operands MULOP1 and MULOP2
3072 with uses in additions and subtractions to form fused multiply-add
3073 operations. Returns true if successful and MUL_STMT should be removed.
3074 If MUL_COND is nonnull, the multiplication in MUL_STMT is conditional
3075 on MUL_COND, otherwise it is unconditional.
3077 If STATE indicates that we are deferring FMA transformation, that means
3078 that we do not produce FMAs for basic blocks which look like:
3081 # accumulator_111 = PHI <0.0(5), accumulator_66(6)>
3083 accumulator_66 = _65 + accumulator_111;
3085 or its unrolled version, i.e. with several FMA candidates that feed result
3086 of one into the addend of another. Instead, we add them to a list in STATE
3087 and if we later discover an FMA candidate that is not part of such a chain,
3088 we go back and perform all deferred past candidates. */
3091 convert_mult_to_fma (gimple
*mul_stmt
, tree op1
, tree op2
,
3092 fma_deferring_state
*state
, tree mul_cond
= NULL_TREE
)
3094 tree mul_result
= gimple_get_lhs (mul_stmt
);
3095 tree type
= TREE_TYPE (mul_result
);
3096 gimple
*use_stmt
, *neguse_stmt
;
3097 use_operand_p use_p
;
3098 imm_use_iterator imm_iter
;
3100 if (FLOAT_TYPE_P (type
)
3101 && flag_fp_contract_mode
== FP_CONTRACT_OFF
)
3104 /* We don't want to do bitfield reduction ops. */
3105 if (INTEGRAL_TYPE_P (type
)
3106 && (!type_has_mode_precision_p (type
) || TYPE_OVERFLOW_TRAPS (type
)))
3109 /* If the target doesn't support it, don't generate it. We assume that
3110 if fma isn't available then fms, fnma or fnms are not either. */
3111 optimization_type opt_type
= bb_optimization_type (gimple_bb (mul_stmt
));
3112 if (!direct_internal_fn_supported_p (IFN_FMA
, type
, opt_type
))
3115 /* If the multiplication has zero uses, it is kept around probably because
3116 of -fnon-call-exceptions. Don't optimize it away in that case,
3118 if (has_zero_uses (mul_result
))
3122 = (state
->m_deferring_p
3123 && (tree_to_shwi (TYPE_SIZE (type
))
3124 <= param_avoid_fma_max_bits
));
3125 bool defer
= check_defer
;
3126 bool seen_negate_p
= false;
3127 /* Make sure that the multiplication statement becomes dead after
3128 the transformation, thus that all uses are transformed to FMAs.
3129 This means we assume that an FMA operation has the same cost
3131 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, mul_result
)
3133 tree result
= mul_result
;
3134 bool negate_p
= false;
3136 use_stmt
= USE_STMT (use_p
);
3138 if (is_gimple_debug (use_stmt
))
3141 /* For now restrict this operations to single basic blocks. In theory
3142 we would want to support sinking the multiplication in
3148 to form a fma in the then block and sink the multiplication to the
3150 if (gimple_bb (use_stmt
) != gimple_bb (mul_stmt
))
3153 /* A negate on the multiplication leads to FNMA. */
3154 if (is_gimple_assign (use_stmt
)
3155 && gimple_assign_rhs_code (use_stmt
) == NEGATE_EXPR
)
3160 /* If (due to earlier missed optimizations) we have two
3161 negates of the same value, treat them as equivalent
3162 to a single negate with multiple uses. */
3166 result
= gimple_assign_lhs (use_stmt
);
3168 /* Make sure the negate statement becomes dead with this
3169 single transformation. */
3170 if (!single_imm_use (gimple_assign_lhs (use_stmt
),
3171 &use_p
, &neguse_stmt
))
3174 /* Make sure the multiplication isn't also used on that stmt. */
3175 FOR_EACH_PHI_OR_STMT_USE (usep
, neguse_stmt
, iter
, SSA_OP_USE
)
3176 if (USE_FROM_PTR (usep
) == mul_result
)
3180 use_stmt
= neguse_stmt
;
3181 if (gimple_bb (use_stmt
) != gimple_bb (mul_stmt
))
3184 negate_p
= seen_negate_p
= true;
3187 tree cond
, else_value
, ops
[3];
3189 if (!can_interpret_as_conditional_op_p (use_stmt
, &cond
, &code
, ops
,
3196 if (ops
[1] == result
)
3197 negate_p
= !negate_p
;
3202 /* FMA can only be formed from PLUS and MINUS. */
3206 if (mul_cond
&& cond
!= mul_cond
)
3211 if (cond
== result
|| else_value
== result
)
3213 if (!direct_internal_fn_supported_p (IFN_COND_FMA
, type
, opt_type
))
3217 /* If the subtrahend (OPS[1]) is computed by a MULT_EXPR that
3218 we'll visit later, we might be able to get a more profitable
3220 OTOH, if we don't, a negate / fma pair has likely lower latency
3221 that a mult / subtract pair. */
3222 if (code
== MINUS_EXPR
3225 && !direct_internal_fn_supported_p (IFN_FMS
, type
, opt_type
)
3226 && direct_internal_fn_supported_p (IFN_FNMA
, type
, opt_type
)
3227 && TREE_CODE (ops
[1]) == SSA_NAME
3228 && has_single_use (ops
[1]))
3230 gimple
*stmt2
= SSA_NAME_DEF_STMT (ops
[1]);
3231 if (is_gimple_assign (stmt2
)
3232 && gimple_assign_rhs_code (stmt2
) == MULT_EXPR
)
3236 /* We can't handle a * b + a * b. */
3237 if (ops
[0] == ops
[1])
3239 /* If deferring, make sure we are not looking at an instruction that
3240 wouldn't have existed if we were not. */
3241 if (state
->m_deferring_p
3242 && (state
->m_mul_result_set
.contains (ops
[0])
3243 || state
->m_mul_result_set
.contains (ops
[1])))
3248 tree use_lhs
= gimple_get_lhs (use_stmt
);
3249 if (state
->m_last_result
)
3251 if (ops
[1] == state
->m_last_result
3252 || ops
[0] == state
->m_last_result
)
3259 gcc_checking_assert (!state
->m_initial_phi
);
3261 if (ops
[0] == result
)
3262 phi
= result_of_phi (ops
[1]);
3265 gcc_assert (ops
[1] == result
);
3266 phi
= result_of_phi (ops
[0]);
3271 state
->m_initial_phi
= phi
;
3278 state
->m_last_result
= use_lhs
;
3279 check_defer
= false;
3284 /* While it is possible to validate whether or not the exact form that
3285 we've recognized is available in the backend, the assumption is that
3286 if the deferring logic above did not trigger, the transformation is
3287 never a loss. For instance, suppose the target only has the plain FMA
3288 pattern available. Consider a*b-c -> fma(a,b,-c): we've exchanged
3289 MUL+SUB for FMA+NEG, which is still two operations. Consider
3290 -(a*b)-c -> fma(-a,b,-c): we still have 3 operations, but in the FMA
3291 form the two NEGs are independent and could be run in parallel. */
3296 fma_transformation_info fti
;
3297 fti
.mul_stmt
= mul_stmt
;
3298 fti
.mul_result
= mul_result
;
3301 state
->m_candidates
.safe_push (fti
);
3302 state
->m_mul_result_set
.add (mul_result
);
3304 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3306 fprintf (dump_file
, "Deferred generating FMA for multiplication ");
3307 print_gimple_stmt (dump_file
, mul_stmt
, 0, TDF_NONE
);
3308 fprintf (dump_file
, "\n");
3315 if (state
->m_deferring_p
)
3316 cancel_fma_deferring (state
);
3317 convert_mult_to_fma_1 (mul_result
, op1
, op2
);
3323 /* Helper function of match_uaddsub_overflow. Return 1
3324 if USE_STMT is unsigned overflow check ovf != 0 for
3325 STMT, -1 if USE_STMT is unsigned overflow check ovf == 0
3329 uaddsub_overflow_check_p (gimple
*stmt
, gimple
*use_stmt
)
3331 enum tree_code ccode
= ERROR_MARK
;
3332 tree crhs1
= NULL_TREE
, crhs2
= NULL_TREE
;
3333 if (gimple_code (use_stmt
) == GIMPLE_COND
)
3335 ccode
= gimple_cond_code (use_stmt
);
3336 crhs1
= gimple_cond_lhs (use_stmt
);
3337 crhs2
= gimple_cond_rhs (use_stmt
);
3339 else if (is_gimple_assign (use_stmt
))
3341 if (gimple_assign_rhs_class (use_stmt
) == GIMPLE_BINARY_RHS
)
3343 ccode
= gimple_assign_rhs_code (use_stmt
);
3344 crhs1
= gimple_assign_rhs1 (use_stmt
);
3345 crhs2
= gimple_assign_rhs2 (use_stmt
);
3347 else if (gimple_assign_rhs_code (use_stmt
) == COND_EXPR
)
3349 tree cond
= gimple_assign_rhs1 (use_stmt
);
3350 if (COMPARISON_CLASS_P (cond
))
3352 ccode
= TREE_CODE (cond
);
3353 crhs1
= TREE_OPERAND (cond
, 0);
3354 crhs2
= TREE_OPERAND (cond
, 1);
3365 if (TREE_CODE_CLASS (ccode
) != tcc_comparison
)
3368 enum tree_code code
= gimple_assign_rhs_code (stmt
);
3369 tree lhs
= gimple_assign_lhs (stmt
);
3370 tree rhs1
= gimple_assign_rhs1 (stmt
);
3371 tree rhs2
= gimple_assign_rhs2 (stmt
);
3377 /* r = a - b; r > a or r <= a
3378 r = a + b; a > r or a <= r or b > r or b <= r. */
3379 if ((code
== MINUS_EXPR
&& crhs1
== lhs
&& crhs2
== rhs1
)
3380 || (code
== PLUS_EXPR
&& (crhs1
== rhs1
|| crhs1
== rhs2
)
3382 return ccode
== GT_EXPR
? 1 : -1;
3386 /* r = a - b; a < r or a >= r
3387 r = a + b; r < a or r >= a or r < b or r >= b. */
3388 if ((code
== MINUS_EXPR
&& crhs1
== rhs1
&& crhs2
== lhs
)
3389 || (code
== PLUS_EXPR
&& crhs1
== lhs
3390 && (crhs2
== rhs1
|| crhs2
== rhs2
)))
3391 return ccode
== LT_EXPR
? 1 : -1;
3399 /* Recognize for unsigned x
3402 where there are other uses of x and replace it with
3403 _7 = SUB_OVERFLOW (y, z);
3404 x = REALPART_EXPR <_7>;
3405 _8 = IMAGPART_EXPR <_7>;
3407 and similarly for addition. */
3410 match_uaddsub_overflow (gimple_stmt_iterator
*gsi
, gimple
*stmt
,
3411 enum tree_code code
)
3413 tree lhs
= gimple_assign_lhs (stmt
);
3414 tree type
= TREE_TYPE (lhs
);
3415 use_operand_p use_p
;
3416 imm_use_iterator iter
;
3417 bool use_seen
= false;
3418 bool ovf_use_seen
= false;
3421 gcc_checking_assert (code
== PLUS_EXPR
|| code
== MINUS_EXPR
);
3422 if (!INTEGRAL_TYPE_P (type
)
3423 || !TYPE_UNSIGNED (type
)
3424 || has_zero_uses (lhs
)
3425 || has_single_use (lhs
)
3426 || optab_handler (code
== PLUS_EXPR
? uaddv4_optab
: usubv4_optab
,
3427 TYPE_MODE (type
)) == CODE_FOR_nothing
)
3430 FOR_EACH_IMM_USE_FAST (use_p
, iter
, lhs
)
3432 use_stmt
= USE_STMT (use_p
);
3433 if (is_gimple_debug (use_stmt
))
3436 if (uaddsub_overflow_check_p (stmt
, use_stmt
))
3437 ovf_use_seen
= true;
3440 if (ovf_use_seen
&& use_seen
)
3444 if (!ovf_use_seen
|| !use_seen
)
3447 tree ctype
= build_complex_type (type
);
3448 tree rhs1
= gimple_assign_rhs1 (stmt
);
3449 tree rhs2
= gimple_assign_rhs2 (stmt
);
3450 gcall
*g
= gimple_build_call_internal (code
== PLUS_EXPR
3451 ? IFN_ADD_OVERFLOW
: IFN_SUB_OVERFLOW
,
3453 tree ctmp
= make_ssa_name (ctype
);
3454 gimple_call_set_lhs (g
, ctmp
);
3455 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
3456 gassign
*g2
= gimple_build_assign (lhs
, REALPART_EXPR
,
3457 build1 (REALPART_EXPR
, type
, ctmp
));
3458 gsi_replace (gsi
, g2
, true);
3459 tree ovf
= make_ssa_name (type
);
3460 g2
= gimple_build_assign (ovf
, IMAGPART_EXPR
,
3461 build1 (IMAGPART_EXPR
, type
, ctmp
));
3462 gsi_insert_after (gsi
, g2
, GSI_NEW_STMT
);
3464 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, lhs
)
3466 if (is_gimple_debug (use_stmt
))
3469 int ovf_use
= uaddsub_overflow_check_p (stmt
, use_stmt
);
3472 if (gimple_code (use_stmt
) == GIMPLE_COND
)
3474 gcond
*cond_stmt
= as_a
<gcond
*> (use_stmt
);
3475 gimple_cond_set_lhs (cond_stmt
, ovf
);
3476 gimple_cond_set_rhs (cond_stmt
, build_int_cst (type
, 0));
3477 gimple_cond_set_code (cond_stmt
, ovf_use
== 1 ? NE_EXPR
: EQ_EXPR
);
3481 gcc_checking_assert (is_gimple_assign (use_stmt
));
3482 if (gimple_assign_rhs_class (use_stmt
) == GIMPLE_BINARY_RHS
)
3484 gimple_assign_set_rhs1 (use_stmt
, ovf
);
3485 gimple_assign_set_rhs2 (use_stmt
, build_int_cst (type
, 0));
3486 gimple_assign_set_rhs_code (use_stmt
,
3487 ovf_use
== 1 ? NE_EXPR
: EQ_EXPR
);
3491 gcc_checking_assert (gimple_assign_rhs_code (use_stmt
)
3493 tree cond
= build2 (ovf_use
== 1 ? NE_EXPR
: EQ_EXPR
,
3494 boolean_type_node
, ovf
,
3495 build_int_cst (type
, 0));
3496 gimple_assign_set_rhs1 (use_stmt
, cond
);
3499 update_stmt (use_stmt
);
3504 /* Return true if target has support for divmod. */
3507 target_supports_divmod_p (optab divmod_optab
, optab div_optab
, machine_mode mode
)
3509 /* If target supports hardware divmod insn, use it for divmod. */
3510 if (optab_handler (divmod_optab
, mode
) != CODE_FOR_nothing
)
3513 /* Check if libfunc for divmod is available. */
3514 rtx libfunc
= optab_libfunc (divmod_optab
, mode
);
3515 if (libfunc
!= NULL_RTX
)
3517 /* If optab_handler exists for div_optab, perhaps in a wider mode,
3518 we don't want to use the libfunc even if it exists for given mode. */
3519 machine_mode div_mode
;
3520 FOR_EACH_MODE_FROM (div_mode
, mode
)
3521 if (optab_handler (div_optab
, div_mode
) != CODE_FOR_nothing
)
3524 return targetm
.expand_divmod_libfunc
!= NULL
;
3530 /* Check if stmt is candidate for divmod transform. */
3533 divmod_candidate_p (gassign
*stmt
)
3535 tree type
= TREE_TYPE (gimple_assign_lhs (stmt
));
3536 machine_mode mode
= TYPE_MODE (type
);
3537 optab divmod_optab
, div_optab
;
3539 if (TYPE_UNSIGNED (type
))
3541 divmod_optab
= udivmod_optab
;
3542 div_optab
= udiv_optab
;
3546 divmod_optab
= sdivmod_optab
;
3547 div_optab
= sdiv_optab
;
3550 tree op1
= gimple_assign_rhs1 (stmt
);
3551 tree op2
= gimple_assign_rhs2 (stmt
);
3553 /* Disable the transform if either is a constant, since division-by-constant
3554 may have specialized expansion. */
3555 if (CONSTANT_CLASS_P (op1
) || CONSTANT_CLASS_P (op2
))
3558 /* Exclude the case where TYPE_OVERFLOW_TRAPS (type) as that should
3559 expand using the [su]divv optabs. */
3560 if (TYPE_OVERFLOW_TRAPS (type
))
3563 if (!target_supports_divmod_p (divmod_optab
, div_optab
, mode
))
3569 /* This function looks for:
3570 t1 = a TRUNC_DIV_EXPR b;
3571 t2 = a TRUNC_MOD_EXPR b;
3572 and transforms it to the following sequence:
3573 complex_tmp = DIVMOD (a, b);
3574 t1 = REALPART_EXPR(a);
3575 t2 = IMAGPART_EXPR(b);
3576 For conditions enabling the transform see divmod_candidate_p().
3578 The pass has three parts:
3579 1) Find top_stmt which is trunc_div or trunc_mod stmt and dominates all
3580 other trunc_div_expr and trunc_mod_expr stmts.
3581 2) Add top_stmt and all trunc_div and trunc_mod stmts dominated by top_stmt
3583 3) Insert DIVMOD call just before top_stmt and update entries in
3584 stmts vector to use return value of DIMOVD (REALEXPR_PART for div,
3585 IMAGPART_EXPR for mod). */
3588 convert_to_divmod (gassign
*stmt
)
3590 if (stmt_can_throw_internal (cfun
, stmt
)
3591 || !divmod_candidate_p (stmt
))
3594 tree op1
= gimple_assign_rhs1 (stmt
);
3595 tree op2
= gimple_assign_rhs2 (stmt
);
3597 imm_use_iterator use_iter
;
3599 auto_vec
<gimple
*> stmts
;
3601 gimple
*top_stmt
= stmt
;
3602 basic_block top_bb
= gimple_bb (stmt
);
3604 /* Part 1: Try to set top_stmt to "topmost" stmt that dominates
3605 at-least stmt and possibly other trunc_div/trunc_mod stmts
3606 having same operands as stmt. */
3608 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, op1
)
3610 if (is_gimple_assign (use_stmt
)
3611 && (gimple_assign_rhs_code (use_stmt
) == TRUNC_DIV_EXPR
3612 || gimple_assign_rhs_code (use_stmt
) == TRUNC_MOD_EXPR
)
3613 && operand_equal_p (op1
, gimple_assign_rhs1 (use_stmt
), 0)
3614 && operand_equal_p (op2
, gimple_assign_rhs2 (use_stmt
), 0))
3616 if (stmt_can_throw_internal (cfun
, use_stmt
))
3619 basic_block bb
= gimple_bb (use_stmt
);
3623 if (gimple_uid (use_stmt
) < gimple_uid (top_stmt
))
3624 top_stmt
= use_stmt
;
3626 else if (dominated_by_p (CDI_DOMINATORS
, top_bb
, bb
))
3629 top_stmt
= use_stmt
;
3634 tree top_op1
= gimple_assign_rhs1 (top_stmt
);
3635 tree top_op2
= gimple_assign_rhs2 (top_stmt
);
3637 stmts
.safe_push (top_stmt
);
3638 bool div_seen
= (gimple_assign_rhs_code (top_stmt
) == TRUNC_DIV_EXPR
);
3640 /* Part 2: Add all trunc_div/trunc_mod statements domianted by top_bb
3641 to stmts vector. The 2nd loop will always add stmt to stmts vector, since
3642 gimple_bb (top_stmt) dominates gimple_bb (stmt), so the
3643 2nd loop ends up adding at-least single trunc_mod_expr stmt. */
3645 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, top_op1
)
3647 if (is_gimple_assign (use_stmt
)
3648 && (gimple_assign_rhs_code (use_stmt
) == TRUNC_DIV_EXPR
3649 || gimple_assign_rhs_code (use_stmt
) == TRUNC_MOD_EXPR
)
3650 && operand_equal_p (top_op1
, gimple_assign_rhs1 (use_stmt
), 0)
3651 && operand_equal_p (top_op2
, gimple_assign_rhs2 (use_stmt
), 0))
3653 if (use_stmt
== top_stmt
3654 || stmt_can_throw_internal (cfun
, use_stmt
)
3655 || !dominated_by_p (CDI_DOMINATORS
, gimple_bb (use_stmt
), top_bb
))
3658 stmts
.safe_push (use_stmt
);
3659 if (gimple_assign_rhs_code (use_stmt
) == TRUNC_DIV_EXPR
)
3667 /* Part 3: Create libcall to internal fn DIVMOD:
3668 divmod_tmp = DIVMOD (op1, op2). */
3670 gcall
*call_stmt
= gimple_build_call_internal (IFN_DIVMOD
, 2, op1
, op2
);
3671 tree res
= make_temp_ssa_name (build_complex_type (TREE_TYPE (op1
)),
3672 call_stmt
, "divmod_tmp");
3673 gimple_call_set_lhs (call_stmt
, res
);
3674 /* We rejected throwing statements above. */
3675 gimple_call_set_nothrow (call_stmt
, true);
3677 /* Insert the call before top_stmt. */
3678 gimple_stmt_iterator top_stmt_gsi
= gsi_for_stmt (top_stmt
);
3679 gsi_insert_before (&top_stmt_gsi
, call_stmt
, GSI_SAME_STMT
);
3681 widen_mul_stats
.divmod_calls_inserted
++;
3683 /* Update all statements in stmts vector:
3684 lhs = op1 TRUNC_DIV_EXPR op2 -> lhs = REALPART_EXPR<divmod_tmp>
3685 lhs = op1 TRUNC_MOD_EXPR op2 -> lhs = IMAGPART_EXPR<divmod_tmp>. */
3687 for (unsigned i
= 0; stmts
.iterate (i
, &use_stmt
); ++i
)
3691 switch (gimple_assign_rhs_code (use_stmt
))
3693 case TRUNC_DIV_EXPR
:
3694 new_rhs
= fold_build1 (REALPART_EXPR
, TREE_TYPE (op1
), res
);
3697 case TRUNC_MOD_EXPR
:
3698 new_rhs
= fold_build1 (IMAGPART_EXPR
, TREE_TYPE (op1
), res
);
3705 gimple_stmt_iterator gsi
= gsi_for_stmt (use_stmt
);
3706 gimple_assign_set_rhs_from_tree (&gsi
, new_rhs
);
3707 update_stmt (use_stmt
);
3713 /* Find integer multiplications where the operands are extended from
3714 smaller types, and replace the MULT_EXPR with a WIDEN_MULT_EXPR
3715 where appropriate. */
3719 const pass_data pass_data_optimize_widening_mul
=
3721 GIMPLE_PASS
, /* type */
3722 "widening_mul", /* name */
3723 OPTGROUP_NONE
, /* optinfo_flags */
3724 TV_TREE_WIDEN_MUL
, /* tv_id */
3725 PROP_ssa
, /* properties_required */
3726 0, /* properties_provided */
3727 0, /* properties_destroyed */
3728 0, /* todo_flags_start */
3729 TODO_update_ssa
, /* todo_flags_finish */
3732 class pass_optimize_widening_mul
: public gimple_opt_pass
3735 pass_optimize_widening_mul (gcc::context
*ctxt
)
3736 : gimple_opt_pass (pass_data_optimize_widening_mul
, ctxt
)
3739 /* opt_pass methods: */
3740 virtual bool gate (function
*)
3742 return flag_expensive_optimizations
&& optimize
;
3745 virtual unsigned int execute (function
*);
3747 }; // class pass_optimize_widening_mul
3749 /* Walker class to perform the transformation in reverse dominance order. */
3751 class math_opts_dom_walker
: public dom_walker
3754 /* Constructor, CFG_CHANGED is a pointer to a boolean flag that will be set
3755 if walking modidifes the CFG. */
3757 math_opts_dom_walker (bool *cfg_changed_p
)
3758 : dom_walker (CDI_DOMINATORS
), m_last_result_set (),
3759 m_cfg_changed_p (cfg_changed_p
) {}
3761 /* The actual actions performed in the walk. */
3763 virtual void after_dom_children (basic_block
);
3765 /* Set of results of chains of multiply and add statement combinations that
3766 were not transformed into FMAs because of active deferring. */
3767 hash_set
<tree
> m_last_result_set
;
3769 /* Pointer to a flag of the user that needs to be set if CFG has been
3771 bool *m_cfg_changed_p
;
3775 math_opts_dom_walker::after_dom_children (basic_block bb
)
3777 gimple_stmt_iterator gsi
;
3779 fma_deferring_state
fma_state (param_avoid_fma_max_bits
> 0);
3781 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
);)
3783 gimple
*stmt
= gsi_stmt (gsi
);
3784 enum tree_code code
;
3786 if (is_gimple_assign (stmt
))
3788 code
= gimple_assign_rhs_code (stmt
);
3792 if (!convert_mult_to_widen (stmt
, &gsi
)
3793 && !convert_expand_mult_copysign (stmt
, &gsi
)
3794 && convert_mult_to_fma (stmt
,
3795 gimple_assign_rhs1 (stmt
),
3796 gimple_assign_rhs2 (stmt
),
3799 gsi_remove (&gsi
, true);
3800 release_defs (stmt
);
3807 if (!convert_plusminus_to_widen (&gsi
, stmt
, code
))
3808 match_uaddsub_overflow (&gsi
, stmt
, code
);
3811 case TRUNC_MOD_EXPR
:
3812 convert_to_divmod (as_a
<gassign
*> (stmt
));
3818 else if (is_gimple_call (stmt
))
3820 switch (gimple_call_combined_fn (stmt
))
3823 if (gimple_call_lhs (stmt
)
3824 && TREE_CODE (gimple_call_arg (stmt
, 1)) == REAL_CST
3825 && real_equal (&TREE_REAL_CST (gimple_call_arg (stmt
, 1)),
3827 && convert_mult_to_fma (stmt
,
3828 gimple_call_arg (stmt
, 0),
3829 gimple_call_arg (stmt
, 0),
3832 unlink_stmt_vdef (stmt
);
3833 if (gsi_remove (&gsi
, true)
3834 && gimple_purge_dead_eh_edges (bb
))
3835 *m_cfg_changed_p
= true;
3836 release_defs (stmt
);
3842 if (convert_mult_to_fma (stmt
,
3843 gimple_call_arg (stmt
, 1),
3844 gimple_call_arg (stmt
, 2),
3846 gimple_call_arg (stmt
, 0)))
3849 gsi_remove (&gsi
, true);
3850 release_defs (stmt
);
3856 cancel_fma_deferring (&fma_state
);
3865 if (fma_state
.m_deferring_p
3866 && fma_state
.m_initial_phi
)
3868 gcc_checking_assert (fma_state
.m_last_result
);
3869 if (!last_fma_candidate_feeds_initial_phi (&fma_state
,
3870 &m_last_result_set
))
3871 cancel_fma_deferring (&fma_state
);
3873 m_last_result_set
.add (fma_state
.m_last_result
);
3879 pass_optimize_widening_mul::execute (function
*fun
)
3881 bool cfg_changed
= false;
3883 memset (&widen_mul_stats
, 0, sizeof (widen_mul_stats
));
3884 calculate_dominance_info (CDI_DOMINATORS
);
3885 renumber_gimple_stmt_uids (cfun
);
3887 math_opts_dom_walker (&cfg_changed
).walk (ENTRY_BLOCK_PTR_FOR_FN (cfun
));
3889 statistics_counter_event (fun
, "widening multiplications inserted",
3890 widen_mul_stats
.widen_mults_inserted
);
3891 statistics_counter_event (fun
, "widening maccs inserted",
3892 widen_mul_stats
.maccs_inserted
);
3893 statistics_counter_event (fun
, "fused multiply-adds inserted",
3894 widen_mul_stats
.fmas_inserted
);
3895 statistics_counter_event (fun
, "divmod calls inserted",
3896 widen_mul_stats
.divmod_calls_inserted
);
3898 return cfg_changed
? TODO_cleanup_cfg
: 0;
3904 make_pass_optimize_widening_mul (gcc::context
*ctxt
)
3906 return new pass_optimize_widening_mul (ctxt
);