1 /* Global, SSA-based optimizations using mathematical identities.
2 Copyright (C) 2005-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* Currently, the only mini-pass in this file tries to CSE reciprocal
21 operations. These are common in sequences such as this one:
23 modulus = sqrt(x*x + y*y + z*z);
28 that can be optimized to
30 modulus = sqrt(x*x + y*y + z*z);
31 rmodulus = 1.0 / modulus;
36 We do this for loop invariant divisors, and with this pass whenever
37 we notice that a division has the same divisor multiple times.
39 Of course, like in PRE, we don't insert a division if a dominator
40 already has one. However, this cannot be done as an extension of
41 PRE for several reasons.
43 First of all, with some experiments it was found out that the
44 transformation is not always useful if there are only two divisions
45 by the same divisor. This is probably because modern processors
46 can pipeline the divisions; on older, in-order processors it should
47 still be effective to optimize two divisions by the same number.
48 We make this a param, and it shall be called N in the remainder of
51 Second, if trapping math is active, we have less freedom on where
52 to insert divisions: we can only do so in basic blocks that already
53 contain one. (If divisions don't trap, instead, we can insert
54 divisions elsewhere, which will be in blocks that are common dominators
55 of those that have the division).
57 We really don't want to compute the reciprocal unless a division will
58 be found. To do this, we won't insert the division in a basic block
59 that has less than N divisions *post-dominating* it.
61 The algorithm constructs a subset of the dominator tree, holding the
62 blocks containing the divisions and the common dominators to them,
63 and walk it twice. The first walk is in post-order, and it annotates
64 each block with the number of divisions that post-dominate it: this
65 gives information on where divisions can be inserted profitably.
66 The second walk is in pre-order, and it inserts divisions as explained
67 above, and replaces divisions by multiplications.
69 In the best case, the cost of the pass is O(n_statements). In the
70 worst-case, the cost is due to creating the dominator tree subset,
71 with a cost of O(n_basic_blocks ^ 2); however this can only happen
72 for n_statements / n_basic_blocks statements. So, the amortized cost
73 of creating the dominator tree subset is O(n_basic_blocks) and the
74 worst-case cost of the pass is O(n_statements * n_basic_blocks).
76 More practically, the cost will be small because there are few
77 divisions, and they tend to be in the same basic block, so insert_bb
78 is called very few times.
80 If we did this using domwalk.c, an efficient implementation would have
81 to work on all the variables in a single pass, because we could not
82 work on just a subset of the dominator tree, as we do now, and the
83 cost would also be something like O(n_statements * n_basic_blocks).
84 The data structures would be more complex in order to work on all the
85 variables in a single pass. */
89 #include "coretypes.h"
96 #include "alloc-pool.h"
97 #include "tree-pass.h"
99 #include "optabs-tree.h"
100 #include "gimple-pretty-print.h"
102 #include "fold-const.h"
103 #include "gimple-fold.h"
104 #include "gimple-iterator.h"
105 #include "gimplify.h"
106 #include "gimplify-me.h"
107 #include "stor-layout.h"
108 #include "tree-cfg.h"
109 #include "tree-dfa.h"
110 #include "tree-ssa.h"
111 #include "builtins.h"
113 #include "internal-fn.h"
114 #include "case-cfn-macros.h"
115 #include "optabs-libfuncs.h"
117 #include "targhooks.h"
119 /* This structure represents one basic block that either computes a
120 division, or is a common dominator for basic block that compute a
123 /* The basic block represented by this structure. */
126 /* If non-NULL, the SSA_NAME holding the definition for a reciprocal
130 /* If non-NULL, the SSA_NAME holding the definition for a squared
131 reciprocal inserted in BB. */
132 tree square_recip_def
;
134 /* If non-NULL, the GIMPLE_ASSIGN for a reciprocal computation that
135 was inserted in BB. */
136 gimple
*recip_def_stmt
;
138 /* Pointer to a list of "struct occurrence"s for blocks dominated
140 struct occurrence
*children
;
142 /* Pointer to the next "struct occurrence"s in the list of blocks
143 sharing a common dominator. */
144 struct occurrence
*next
;
146 /* The number of divisions that are in BB before compute_merit. The
147 number of divisions that are in BB or post-dominate it after
151 /* True if the basic block has a division, false if it is a common
152 dominator for basic blocks that do. If it is false and trapping
153 math is active, BB is not a candidate for inserting a reciprocal. */
154 bool bb_has_division
;
159 /* Number of 1.0/X ops inserted. */
162 /* Number of 1.0/FUNC ops inserted. */
168 /* Number of cexpi calls inserted. */
174 /* Number of widening multiplication ops inserted. */
175 int widen_mults_inserted
;
177 /* Number of integer multiply-and-accumulate ops inserted. */
180 /* Number of fp fused multiply-add ops inserted. */
183 /* Number of divmod calls inserted. */
184 int divmod_calls_inserted
;
187 /* The instance of "struct occurrence" representing the highest
188 interesting block in the dominator tree. */
189 static struct occurrence
*occ_head
;
191 /* Allocation pool for getting instances of "struct occurrence". */
192 static object_allocator
<occurrence
> *occ_pool
;
196 /* Allocate and return a new struct occurrence for basic block BB, and
197 whose children list is headed by CHILDREN. */
198 static struct occurrence
*
199 occ_new (basic_block bb
, struct occurrence
*children
)
201 struct occurrence
*occ
;
203 bb
->aux
= occ
= occ_pool
->allocate ();
204 memset (occ
, 0, sizeof (struct occurrence
));
207 occ
->children
= children
;
212 /* Insert NEW_OCC into our subset of the dominator tree. P_HEAD points to a
213 list of "struct occurrence"s, one per basic block, having IDOM as
214 their common dominator.
216 We try to insert NEW_OCC as deep as possible in the tree, and we also
217 insert any other block that is a common dominator for BB and one
218 block already in the tree. */
221 insert_bb (struct occurrence
*new_occ
, basic_block idom
,
222 struct occurrence
**p_head
)
224 struct occurrence
*occ
, **p_occ
;
226 for (p_occ
= p_head
; (occ
= *p_occ
) != NULL
; )
228 basic_block bb
= new_occ
->bb
, occ_bb
= occ
->bb
;
229 basic_block dom
= nearest_common_dominator (CDI_DOMINATORS
, occ_bb
, bb
);
232 /* BB dominates OCC_BB. OCC becomes NEW_OCC's child: remove OCC
235 occ
->next
= new_occ
->children
;
236 new_occ
->children
= occ
;
238 /* Try the next block (it may as well be dominated by BB). */
241 else if (dom
== occ_bb
)
243 /* OCC_BB dominates BB. Tail recurse to look deeper. */
244 insert_bb (new_occ
, dom
, &occ
->children
);
248 else if (dom
!= idom
)
250 gcc_assert (!dom
->aux
);
252 /* There is a dominator between IDOM and BB, add it and make
253 two children out of NEW_OCC and OCC. First, remove OCC from
259 /* None of the previous blocks has DOM as a dominator: if we tail
260 recursed, we would reexamine them uselessly. Just switch BB with
261 DOM, and go on looking for blocks dominated by DOM. */
262 new_occ
= occ_new (dom
, new_occ
);
267 /* Nothing special, go on with the next element. */
272 /* No place was found as a child of IDOM. Make BB a sibling of IDOM. */
273 new_occ
->next
= *p_head
;
277 /* Register that we found a division in BB.
278 IMPORTANCE is a measure of how much weighting to give
279 that division. Use IMPORTANCE = 2 to register a single
280 division. If the division is going to be found multiple
281 times use 1 (as it is with squares). */
284 register_division_in (basic_block bb
, int importance
)
286 struct occurrence
*occ
;
288 occ
= (struct occurrence
*) bb
->aux
;
291 occ
= occ_new (bb
, NULL
);
292 insert_bb (occ
, ENTRY_BLOCK_PTR_FOR_FN (cfun
), &occ_head
);
295 occ
->bb_has_division
= true;
296 occ
->num_divisions
+= importance
;
300 /* Compute the number of divisions that postdominate each block in OCC and
304 compute_merit (struct occurrence
*occ
)
306 struct occurrence
*occ_child
;
307 basic_block dom
= occ
->bb
;
309 for (occ_child
= occ
->children
; occ_child
; occ_child
= occ_child
->next
)
312 if (occ_child
->children
)
313 compute_merit (occ_child
);
316 bb
= single_noncomplex_succ (dom
);
320 if (dominated_by_p (CDI_POST_DOMINATORS
, bb
, occ_child
->bb
))
321 occ
->num_divisions
+= occ_child
->num_divisions
;
326 /* Return whether USE_STMT is a floating-point division by DEF. */
328 is_division_by (gimple
*use_stmt
, tree def
)
330 return is_gimple_assign (use_stmt
)
331 && gimple_assign_rhs_code (use_stmt
) == RDIV_EXPR
332 && gimple_assign_rhs2 (use_stmt
) == def
333 /* Do not recognize x / x as valid division, as we are getting
334 confused later by replacing all immediate uses x in such
336 && gimple_assign_rhs1 (use_stmt
) != def
;
339 /* Return whether USE_STMT is DEF * DEF. */
341 is_square_of (gimple
*use_stmt
, tree def
)
343 if (gimple_code (use_stmt
) == GIMPLE_ASSIGN
344 && gimple_assign_rhs_code (use_stmt
) == MULT_EXPR
)
346 tree op0
= gimple_assign_rhs1 (use_stmt
);
347 tree op1
= gimple_assign_rhs2 (use_stmt
);
349 return op0
== op1
&& op0
== def
;
354 /* Return whether USE_STMT is a floating-point division by
357 is_division_by_square (gimple
*use_stmt
, tree def
)
359 if (gimple_code (use_stmt
) == GIMPLE_ASSIGN
360 && gimple_assign_rhs_code (use_stmt
) == RDIV_EXPR
361 && gimple_assign_rhs1 (use_stmt
) != gimple_assign_rhs2 (use_stmt
))
363 tree denominator
= gimple_assign_rhs2 (use_stmt
);
364 if (TREE_CODE (denominator
) == SSA_NAME
)
366 return is_square_of (SSA_NAME_DEF_STMT (denominator
), def
);
372 /* Walk the subset of the dominator tree rooted at OCC, setting the
373 RECIP_DEF field to a definition of 1.0 / DEF that can be used in
374 the given basic block. The field may be left NULL, of course,
375 if it is not possible or profitable to do the optimization.
377 DEF_BSI is an iterator pointing at the statement defining DEF.
378 If RECIP_DEF is set, a dominator already has a computation that can
381 If should_insert_square_recip is set, then this also inserts
382 the square of the reciprocal immediately after the definition
383 of the reciprocal. */
386 insert_reciprocals (gimple_stmt_iterator
*def_gsi
, struct occurrence
*occ
,
387 tree def
, tree recip_def
, tree square_recip_def
,
388 int should_insert_square_recip
, int threshold
)
391 gassign
*new_stmt
, *new_square_stmt
;
392 gimple_stmt_iterator gsi
;
393 struct occurrence
*occ_child
;
396 && (occ
->bb_has_division
|| !flag_trapping_math
)
397 /* Divide by two as all divisions are counted twice in
399 && occ
->num_divisions
/ 2 >= threshold
)
401 /* Make a variable with the replacement and substitute it. */
402 type
= TREE_TYPE (def
);
403 recip_def
= create_tmp_reg (type
, "reciptmp");
404 new_stmt
= gimple_build_assign (recip_def
, RDIV_EXPR
,
405 build_one_cst (type
), def
);
407 if (should_insert_square_recip
)
409 square_recip_def
= create_tmp_reg (type
, "powmult_reciptmp");
410 new_square_stmt
= gimple_build_assign (square_recip_def
, MULT_EXPR
,
411 recip_def
, recip_def
);
414 if (occ
->bb_has_division
)
416 /* Case 1: insert before an existing division. */
417 gsi
= gsi_after_labels (occ
->bb
);
418 while (!gsi_end_p (gsi
)
419 && (!is_division_by (gsi_stmt (gsi
), def
))
420 && (!is_division_by_square (gsi_stmt (gsi
), def
)))
423 gsi_insert_before (&gsi
, new_stmt
, GSI_SAME_STMT
);
425 else if (def_gsi
&& occ
->bb
== def_gsi
->bb
)
427 /* Case 2: insert right after the definition. Note that this will
428 never happen if the definition statement can throw, because in
429 that case the sole successor of the statement's basic block will
430 dominate all the uses as well. */
432 gsi_insert_after (def_gsi
, new_stmt
, GSI_NEW_STMT
);
436 /* Case 3: insert in a basic block not containing defs/uses. */
437 gsi
= gsi_after_labels (occ
->bb
);
438 gsi_insert_before (&gsi
, new_stmt
, GSI_SAME_STMT
);
441 /* Regardless of which case the reciprocal as inserted in,
442 we insert the square immediately after the reciprocal. */
443 if (should_insert_square_recip
)
444 gsi_insert_before (&gsi
, new_square_stmt
, GSI_SAME_STMT
);
446 reciprocal_stats
.rdivs_inserted
++;
448 occ
->recip_def_stmt
= new_stmt
;
451 occ
->recip_def
= recip_def
;
452 occ
->square_recip_def
= square_recip_def
;
453 for (occ_child
= occ
->children
; occ_child
; occ_child
= occ_child
->next
)
454 insert_reciprocals (def_gsi
, occ_child
, def
, recip_def
,
455 square_recip_def
, should_insert_square_recip
,
459 /* Replace occurrences of expr / (x * x) with expr * ((1 / x) * (1 / x)).
460 Take as argument the use for (x * x). */
462 replace_reciprocal_squares (use_operand_p use_p
)
464 gimple
*use_stmt
= USE_STMT (use_p
);
465 basic_block bb
= gimple_bb (use_stmt
);
466 struct occurrence
*occ
= (struct occurrence
*) bb
->aux
;
468 if (optimize_bb_for_speed_p (bb
) && occ
->square_recip_def
471 gimple_stmt_iterator gsi
= gsi_for_stmt (use_stmt
);
472 gimple_assign_set_rhs_code (use_stmt
, MULT_EXPR
);
473 gimple_assign_set_rhs2 (use_stmt
, occ
->square_recip_def
);
474 SET_USE (use_p
, occ
->square_recip_def
);
475 fold_stmt_inplace (&gsi
);
476 update_stmt (use_stmt
);
481 /* Replace the division at USE_P with a multiplication by the reciprocal, if
485 replace_reciprocal (use_operand_p use_p
)
487 gimple
*use_stmt
= USE_STMT (use_p
);
488 basic_block bb
= gimple_bb (use_stmt
);
489 struct occurrence
*occ
= (struct occurrence
*) bb
->aux
;
491 if (optimize_bb_for_speed_p (bb
)
492 && occ
->recip_def
&& use_stmt
!= occ
->recip_def_stmt
)
494 gimple_stmt_iterator gsi
= gsi_for_stmt (use_stmt
);
495 gimple_assign_set_rhs_code (use_stmt
, MULT_EXPR
);
496 SET_USE (use_p
, occ
->recip_def
);
497 fold_stmt_inplace (&gsi
);
498 update_stmt (use_stmt
);
503 /* Free OCC and return one more "struct occurrence" to be freed. */
505 static struct occurrence
*
506 free_bb (struct occurrence
*occ
)
508 struct occurrence
*child
, *next
;
510 /* First get the two pointers hanging off OCC. */
512 child
= occ
->children
;
514 occ_pool
->remove (occ
);
516 /* Now ensure that we don't recurse unless it is necessary. */
522 next
= free_bb (next
);
529 /* Look for floating-point divisions among DEF's uses, and try to
530 replace them by multiplications with the reciprocal. Add
531 as many statements computing the reciprocal as needed.
533 DEF must be a GIMPLE register of a floating-point type. */
536 execute_cse_reciprocals_1 (gimple_stmt_iterator
*def_gsi
, tree def
)
538 use_operand_p use_p
, square_use_p
;
539 imm_use_iterator use_iter
, square_use_iter
;
541 struct occurrence
*occ
;
544 int square_recip_count
= 0;
545 int sqrt_recip_count
= 0;
547 gcc_assert (FLOAT_TYPE_P (TREE_TYPE (def
)) && is_gimple_reg (def
)
548 && TREE_CODE (def
) == SSA_NAME
);
549 threshold
= targetm
.min_divisions_for_recip_mul (TYPE_MODE (TREE_TYPE (def
)));
551 /* If DEF is a square (x * x), count the number of divisions by x.
552 If there are more divisions by x than by (DEF * DEF), prefer to optimize
553 the reciprocal of x instead of DEF. This improves cases like:
558 Reciprocal optimization of x results in 1 division rather than 2 or 3. */
559 gimple
*def_stmt
= SSA_NAME_DEF_STMT (def
);
561 if (is_gimple_assign (def_stmt
)
562 && gimple_assign_rhs_code (def_stmt
) == MULT_EXPR
563 && TREE_CODE (gimple_assign_rhs1 (def_stmt
)) == SSA_NAME
564 && gimple_assign_rhs1 (def_stmt
) == gimple_assign_rhs2 (def_stmt
))
566 tree op0
= gimple_assign_rhs1 (def_stmt
);
568 FOR_EACH_IMM_USE_FAST (use_p
, use_iter
, op0
)
570 gimple
*use_stmt
= USE_STMT (use_p
);
571 if (is_division_by (use_stmt
, op0
))
576 FOR_EACH_IMM_USE_FAST (use_p
, use_iter
, def
)
578 gimple
*use_stmt
= USE_STMT (use_p
);
579 if (is_division_by (use_stmt
, def
))
581 register_division_in (gimple_bb (use_stmt
), 2);
585 if (is_square_of (use_stmt
, def
))
587 square_def
= gimple_assign_lhs (use_stmt
);
588 FOR_EACH_IMM_USE_FAST (square_use_p
, square_use_iter
, square_def
)
590 gimple
*square_use_stmt
= USE_STMT (square_use_p
);
591 if (is_division_by (square_use_stmt
, square_def
))
593 /* This is executed twice for each division by a square. */
594 register_division_in (gimple_bb (square_use_stmt
), 1);
595 square_recip_count
++;
601 /* Square reciprocals were counted twice above. */
602 square_recip_count
/= 2;
604 /* If it is more profitable to optimize 1 / x, don't optimize 1 / (x * x). */
605 if (sqrt_recip_count
> square_recip_count
)
608 /* Do the expensive part only if we can hope to optimize something. */
609 if (count
+ square_recip_count
>= threshold
&& count
>= 1)
612 for (occ
= occ_head
; occ
; occ
= occ
->next
)
615 insert_reciprocals (def_gsi
, occ
, def
, NULL
, NULL
,
616 square_recip_count
, threshold
);
619 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, def
)
621 if (is_division_by (use_stmt
, def
))
623 FOR_EACH_IMM_USE_ON_STMT (use_p
, use_iter
)
624 replace_reciprocal (use_p
);
626 else if (square_recip_count
> 0 && is_square_of (use_stmt
, def
))
628 FOR_EACH_IMM_USE_ON_STMT (use_p
, use_iter
)
630 /* Find all uses of the square that are divisions and
631 * replace them by multiplications with the inverse. */
632 imm_use_iterator square_iterator
;
633 gimple
*powmult_use_stmt
= USE_STMT (use_p
);
634 tree powmult_def_name
= gimple_assign_lhs (powmult_use_stmt
);
636 FOR_EACH_IMM_USE_STMT (powmult_use_stmt
,
637 square_iterator
, powmult_def_name
)
638 FOR_EACH_IMM_USE_ON_STMT (square_use_p
, square_iterator
)
640 gimple
*powmult_use_stmt
= USE_STMT (square_use_p
);
641 if (is_division_by (powmult_use_stmt
, powmult_def_name
))
642 replace_reciprocal_squares (square_use_p
);
649 for (occ
= occ_head
; occ
; )
655 /* Return an internal function that implements the reciprocal of CALL,
656 or IFN_LAST if there is no such function that the target supports. */
659 internal_fn_reciprocal (gcall
*call
)
663 switch (gimple_call_combined_fn (call
))
674 tree_pair types
= direct_internal_fn_types (ifn
, call
);
675 if (!direct_internal_fn_supported_p (ifn
, types
, OPTIMIZE_FOR_SPEED
))
681 /* Go through all the floating-point SSA_NAMEs, and call
682 execute_cse_reciprocals_1 on each of them. */
685 const pass_data pass_data_cse_reciprocals
=
687 GIMPLE_PASS
, /* type */
689 OPTGROUP_NONE
, /* optinfo_flags */
690 TV_TREE_RECIP
, /* tv_id */
691 PROP_ssa
, /* properties_required */
692 0, /* properties_provided */
693 0, /* properties_destroyed */
694 0, /* todo_flags_start */
695 TODO_update_ssa
, /* todo_flags_finish */
698 class pass_cse_reciprocals
: public gimple_opt_pass
701 pass_cse_reciprocals (gcc::context
*ctxt
)
702 : gimple_opt_pass (pass_data_cse_reciprocals
, ctxt
)
705 /* opt_pass methods: */
706 virtual bool gate (function
*) { return optimize
&& flag_reciprocal_math
; }
707 virtual unsigned int execute (function
*);
709 }; // class pass_cse_reciprocals
712 pass_cse_reciprocals::execute (function
*fun
)
717 occ_pool
= new object_allocator
<occurrence
> ("dominators for recip");
719 memset (&reciprocal_stats
, 0, sizeof (reciprocal_stats
));
720 calculate_dominance_info (CDI_DOMINATORS
);
721 calculate_dominance_info (CDI_POST_DOMINATORS
);
724 FOR_EACH_BB_FN (bb
, fun
)
725 gcc_assert (!bb
->aux
);
727 for (arg
= DECL_ARGUMENTS (fun
->decl
); arg
; arg
= DECL_CHAIN (arg
))
728 if (FLOAT_TYPE_P (TREE_TYPE (arg
))
729 && is_gimple_reg (arg
))
731 tree name
= ssa_default_def (fun
, arg
);
733 execute_cse_reciprocals_1 (NULL
, name
);
736 FOR_EACH_BB_FN (bb
, fun
)
740 for (gphi_iterator gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
);
743 gphi
*phi
= gsi
.phi ();
744 def
= PHI_RESULT (phi
);
745 if (! virtual_operand_p (def
)
746 && FLOAT_TYPE_P (TREE_TYPE (def
)))
747 execute_cse_reciprocals_1 (NULL
, def
);
750 for (gimple_stmt_iterator gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
);
753 gimple
*stmt
= gsi_stmt (gsi
);
755 if (gimple_has_lhs (stmt
)
756 && (def
= SINGLE_SSA_TREE_OPERAND (stmt
, SSA_OP_DEF
)) != NULL
757 && FLOAT_TYPE_P (TREE_TYPE (def
))
758 && TREE_CODE (def
) == SSA_NAME
)
759 execute_cse_reciprocals_1 (&gsi
, def
);
762 if (optimize_bb_for_size_p (bb
))
765 /* Scan for a/func(b) and convert it to reciprocal a*rfunc(b). */
766 for (gimple_stmt_iterator gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
);
769 gimple
*stmt
= gsi_stmt (gsi
);
771 if (is_gimple_assign (stmt
)
772 && gimple_assign_rhs_code (stmt
) == RDIV_EXPR
)
774 tree arg1
= gimple_assign_rhs2 (stmt
);
777 if (TREE_CODE (arg1
) != SSA_NAME
)
780 stmt1
= SSA_NAME_DEF_STMT (arg1
);
782 if (is_gimple_call (stmt1
)
783 && gimple_call_lhs (stmt1
))
788 tree fndecl
= NULL_TREE
;
790 gcall
*call
= as_a
<gcall
*> (stmt1
);
791 internal_fn ifn
= internal_fn_reciprocal (call
);
794 fndecl
= gimple_call_fndecl (call
);
796 || DECL_BUILT_IN_CLASS (fndecl
) != BUILT_IN_MD
)
798 fndecl
= targetm
.builtin_reciprocal (fndecl
);
803 /* Check that all uses of the SSA name are divisions,
804 otherwise replacing the defining statement will do
807 FOR_EACH_IMM_USE_FAST (use_p
, ui
, arg1
)
809 gimple
*stmt2
= USE_STMT (use_p
);
810 if (is_gimple_debug (stmt2
))
812 if (!is_gimple_assign (stmt2
)
813 || gimple_assign_rhs_code (stmt2
) != RDIV_EXPR
814 || gimple_assign_rhs1 (stmt2
) == arg1
815 || gimple_assign_rhs2 (stmt2
) != arg1
)
824 gimple_replace_ssa_lhs (call
, arg1
);
825 if (gimple_call_internal_p (call
) != (ifn
!= IFN_LAST
))
827 auto_vec
<tree
, 4> args
;
828 for (unsigned int i
= 0;
829 i
< gimple_call_num_args (call
); i
++)
830 args
.safe_push (gimple_call_arg (call
, i
));
833 stmt2
= gimple_build_call_vec (fndecl
, args
);
835 stmt2
= gimple_build_call_internal_vec (ifn
, args
);
836 gimple_call_set_lhs (stmt2
, arg1
);
837 if (gimple_vdef (call
))
839 gimple_set_vdef (stmt2
, gimple_vdef (call
));
840 SSA_NAME_DEF_STMT (gimple_vdef (stmt2
)) = stmt2
;
842 gimple_call_set_nothrow (stmt2
,
843 gimple_call_nothrow_p (call
));
844 gimple_set_vuse (stmt2
, gimple_vuse (call
));
845 gimple_stmt_iterator gsi2
= gsi_for_stmt (call
);
846 gsi_replace (&gsi2
, stmt2
, true);
851 gimple_call_set_fndecl (call
, fndecl
);
853 gimple_call_set_internal_fn (call
, ifn
);
856 reciprocal_stats
.rfuncs_inserted
++;
858 FOR_EACH_IMM_USE_STMT (stmt
, ui
, arg1
)
860 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
861 gimple_assign_set_rhs_code (stmt
, MULT_EXPR
);
862 fold_stmt_inplace (&gsi
);
870 statistics_counter_event (fun
, "reciprocal divs inserted",
871 reciprocal_stats
.rdivs_inserted
);
872 statistics_counter_event (fun
, "reciprocal functions inserted",
873 reciprocal_stats
.rfuncs_inserted
);
875 free_dominance_info (CDI_DOMINATORS
);
876 free_dominance_info (CDI_POST_DOMINATORS
);
884 make_pass_cse_reciprocals (gcc::context
*ctxt
)
886 return new pass_cse_reciprocals (ctxt
);
889 /* Records an occurrence at statement USE_STMT in the vector of trees
890 STMTS if it is dominated by *TOP_BB or dominates it or this basic block
891 is not yet initialized. Returns true if the occurrence was pushed on
892 the vector. Adjusts *TOP_BB to be the basic block dominating all
893 statements in the vector. */
896 maybe_record_sincos (vec
<gimple
*> *stmts
,
897 basic_block
*top_bb
, gimple
*use_stmt
)
899 basic_block use_bb
= gimple_bb (use_stmt
);
901 && (*top_bb
== use_bb
902 || dominated_by_p (CDI_DOMINATORS
, use_bb
, *top_bb
)))
903 stmts
->safe_push (use_stmt
);
905 || dominated_by_p (CDI_DOMINATORS
, *top_bb
, use_bb
))
907 stmts
->safe_push (use_stmt
);
916 /* Look for sin, cos and cexpi calls with the same argument NAME and
917 create a single call to cexpi CSEing the result in this case.
918 We first walk over all immediate uses of the argument collecting
919 statements that we can CSE in a vector and in a second pass replace
920 the statement rhs with a REALPART or IMAGPART expression on the
921 result of the cexpi call we insert before the use statement that
922 dominates all other candidates. */
925 execute_cse_sincos_1 (tree name
)
927 gimple_stmt_iterator gsi
;
928 imm_use_iterator use_iter
;
929 tree fndecl
, res
, type
;
930 gimple
*def_stmt
, *use_stmt
, *stmt
;
931 int seen_cos
= 0, seen_sin
= 0, seen_cexpi
= 0;
932 auto_vec
<gimple
*> stmts
;
933 basic_block top_bb
= NULL
;
935 bool cfg_changed
= false;
937 type
= TREE_TYPE (name
);
938 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, name
)
940 if (gimple_code (use_stmt
) != GIMPLE_CALL
941 || !gimple_call_lhs (use_stmt
))
944 switch (gimple_call_combined_fn (use_stmt
))
947 seen_cos
|= maybe_record_sincos (&stmts
, &top_bb
, use_stmt
) ? 1 : 0;
951 seen_sin
|= maybe_record_sincos (&stmts
, &top_bb
, use_stmt
) ? 1 : 0;
955 seen_cexpi
|= maybe_record_sincos (&stmts
, &top_bb
, use_stmt
) ? 1 : 0;
962 if (seen_cos
+ seen_sin
+ seen_cexpi
<= 1)
965 /* Simply insert cexpi at the beginning of top_bb but not earlier than
966 the name def statement. */
967 fndecl
= mathfn_built_in (type
, BUILT_IN_CEXPI
);
970 stmt
= gimple_build_call (fndecl
, 1, name
);
971 res
= make_temp_ssa_name (TREE_TYPE (TREE_TYPE (fndecl
)), stmt
, "sincostmp");
972 gimple_call_set_lhs (stmt
, res
);
974 def_stmt
= SSA_NAME_DEF_STMT (name
);
975 if (!SSA_NAME_IS_DEFAULT_DEF (name
)
976 && gimple_code (def_stmt
) != GIMPLE_PHI
977 && gimple_bb (def_stmt
) == top_bb
)
979 gsi
= gsi_for_stmt (def_stmt
);
980 gsi_insert_after (&gsi
, stmt
, GSI_SAME_STMT
);
984 gsi
= gsi_after_labels (top_bb
);
985 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
987 sincos_stats
.inserted
++;
989 /* And adjust the recorded old call sites. */
990 for (i
= 0; stmts
.iterate (i
, &use_stmt
); ++i
)
994 switch (gimple_call_combined_fn (use_stmt
))
997 rhs
= fold_build1 (REALPART_EXPR
, type
, res
);
1001 rhs
= fold_build1 (IMAGPART_EXPR
, type
, res
);
1012 /* Replace call with a copy. */
1013 stmt
= gimple_build_assign (gimple_call_lhs (use_stmt
), rhs
);
1015 gsi
= gsi_for_stmt (use_stmt
);
1016 gsi_replace (&gsi
, stmt
, true);
1017 if (gimple_purge_dead_eh_edges (gimple_bb (stmt
)))
1024 /* To evaluate powi(x,n), the floating point value x raised to the
1025 constant integer exponent n, we use a hybrid algorithm that
1026 combines the "window method" with look-up tables. For an
1027 introduction to exponentiation algorithms and "addition chains",
1028 see section 4.6.3, "Evaluation of Powers" of Donald E. Knuth,
1029 "Seminumerical Algorithms", Vol. 2, "The Art of Computer Programming",
1030 3rd Edition, 1998, and Daniel M. Gordon, "A Survey of Fast Exponentiation
1031 Methods", Journal of Algorithms, Vol. 27, pp. 129-146, 1998. */
1033 /* Provide a default value for POWI_MAX_MULTS, the maximum number of
1034 multiplications to inline before calling the system library's pow
1035 function. powi(x,n) requires at worst 2*bits(n)-2 multiplications,
1036 so this default never requires calling pow, powf or powl. */
1038 #ifndef POWI_MAX_MULTS
1039 #define POWI_MAX_MULTS (2*HOST_BITS_PER_WIDE_INT-2)
1042 /* The size of the "optimal power tree" lookup table. All
1043 exponents less than this value are simply looked up in the
1044 powi_table below. This threshold is also used to size the
1045 cache of pseudo registers that hold intermediate results. */
1046 #define POWI_TABLE_SIZE 256
1048 /* The size, in bits of the window, used in the "window method"
1049 exponentiation algorithm. This is equivalent to a radix of
1050 (1<<POWI_WINDOW_SIZE) in the corresponding "m-ary method". */
1051 #define POWI_WINDOW_SIZE 3
1053 /* The following table is an efficient representation of an
1054 "optimal power tree". For each value, i, the corresponding
1055 value, j, in the table states than an optimal evaluation
1056 sequence for calculating pow(x,i) can be found by evaluating
1057 pow(x,j)*pow(x,i-j). An optimal power tree for the first
1058 100 integers is given in Knuth's "Seminumerical algorithms". */
1060 static const unsigned char powi_table
[POWI_TABLE_SIZE
] =
1062 0, 1, 1, 2, 2, 3, 3, 4, /* 0 - 7 */
1063 4, 6, 5, 6, 6, 10, 7, 9, /* 8 - 15 */
1064 8, 16, 9, 16, 10, 12, 11, 13, /* 16 - 23 */
1065 12, 17, 13, 18, 14, 24, 15, 26, /* 24 - 31 */
1066 16, 17, 17, 19, 18, 33, 19, 26, /* 32 - 39 */
1067 20, 25, 21, 40, 22, 27, 23, 44, /* 40 - 47 */
1068 24, 32, 25, 34, 26, 29, 27, 44, /* 48 - 55 */
1069 28, 31, 29, 34, 30, 60, 31, 36, /* 56 - 63 */
1070 32, 64, 33, 34, 34, 46, 35, 37, /* 64 - 71 */
1071 36, 65, 37, 50, 38, 48, 39, 69, /* 72 - 79 */
1072 40, 49, 41, 43, 42, 51, 43, 58, /* 80 - 87 */
1073 44, 64, 45, 47, 46, 59, 47, 76, /* 88 - 95 */
1074 48, 65, 49, 66, 50, 67, 51, 66, /* 96 - 103 */
1075 52, 70, 53, 74, 54, 104, 55, 74, /* 104 - 111 */
1076 56, 64, 57, 69, 58, 78, 59, 68, /* 112 - 119 */
1077 60, 61, 61, 80, 62, 75, 63, 68, /* 120 - 127 */
1078 64, 65, 65, 128, 66, 129, 67, 90, /* 128 - 135 */
1079 68, 73, 69, 131, 70, 94, 71, 88, /* 136 - 143 */
1080 72, 128, 73, 98, 74, 132, 75, 121, /* 144 - 151 */
1081 76, 102, 77, 124, 78, 132, 79, 106, /* 152 - 159 */
1082 80, 97, 81, 160, 82, 99, 83, 134, /* 160 - 167 */
1083 84, 86, 85, 95, 86, 160, 87, 100, /* 168 - 175 */
1084 88, 113, 89, 98, 90, 107, 91, 122, /* 176 - 183 */
1085 92, 111, 93, 102, 94, 126, 95, 150, /* 184 - 191 */
1086 96, 128, 97, 130, 98, 133, 99, 195, /* 192 - 199 */
1087 100, 128, 101, 123, 102, 164, 103, 138, /* 200 - 207 */
1088 104, 145, 105, 146, 106, 109, 107, 149, /* 208 - 215 */
1089 108, 200, 109, 146, 110, 170, 111, 157, /* 216 - 223 */
1090 112, 128, 113, 130, 114, 182, 115, 132, /* 224 - 231 */
1091 116, 200, 117, 132, 118, 158, 119, 206, /* 232 - 239 */
1092 120, 240, 121, 162, 122, 147, 123, 152, /* 240 - 247 */
1093 124, 166, 125, 214, 126, 138, 127, 153, /* 248 - 255 */
1097 /* Return the number of multiplications required to calculate
1098 powi(x,n) where n is less than POWI_TABLE_SIZE. This is a
1099 subroutine of powi_cost. CACHE is an array indicating
1100 which exponents have already been calculated. */
1103 powi_lookup_cost (unsigned HOST_WIDE_INT n
, bool *cache
)
1105 /* If we've already calculated this exponent, then this evaluation
1106 doesn't require any additional multiplications. */
1111 return powi_lookup_cost (n
- powi_table
[n
], cache
)
1112 + powi_lookup_cost (powi_table
[n
], cache
) + 1;
1115 /* Return the number of multiplications required to calculate
1116 powi(x,n) for an arbitrary x, given the exponent N. This
1117 function needs to be kept in sync with powi_as_mults below. */
1120 powi_cost (HOST_WIDE_INT n
)
1122 bool cache
[POWI_TABLE_SIZE
];
1123 unsigned HOST_WIDE_INT digit
;
1124 unsigned HOST_WIDE_INT val
;
1130 /* Ignore the reciprocal when calculating the cost. */
1131 val
= (n
< 0) ? -n
: n
;
1133 /* Initialize the exponent cache. */
1134 memset (cache
, 0, POWI_TABLE_SIZE
* sizeof (bool));
1139 while (val
>= POWI_TABLE_SIZE
)
1143 digit
= val
& ((1 << POWI_WINDOW_SIZE
) - 1);
1144 result
+= powi_lookup_cost (digit
, cache
)
1145 + POWI_WINDOW_SIZE
+ 1;
1146 val
>>= POWI_WINDOW_SIZE
;
1155 return result
+ powi_lookup_cost (val
, cache
);
1158 /* Recursive subroutine of powi_as_mults. This function takes the
1159 array, CACHE, of already calculated exponents and an exponent N and
1160 returns a tree that corresponds to CACHE[1]**N, with type TYPE. */
1163 powi_as_mults_1 (gimple_stmt_iterator
*gsi
, location_t loc
, tree type
,
1164 HOST_WIDE_INT n
, tree
*cache
)
1166 tree op0
, op1
, ssa_target
;
1167 unsigned HOST_WIDE_INT digit
;
1170 if (n
< POWI_TABLE_SIZE
&& cache
[n
])
1173 ssa_target
= make_temp_ssa_name (type
, NULL
, "powmult");
1175 if (n
< POWI_TABLE_SIZE
)
1177 cache
[n
] = ssa_target
;
1178 op0
= powi_as_mults_1 (gsi
, loc
, type
, n
- powi_table
[n
], cache
);
1179 op1
= powi_as_mults_1 (gsi
, loc
, type
, powi_table
[n
], cache
);
1183 digit
= n
& ((1 << POWI_WINDOW_SIZE
) - 1);
1184 op0
= powi_as_mults_1 (gsi
, loc
, type
, n
- digit
, cache
);
1185 op1
= powi_as_mults_1 (gsi
, loc
, type
, digit
, cache
);
1189 op0
= powi_as_mults_1 (gsi
, loc
, type
, n
>> 1, cache
);
1193 mult_stmt
= gimple_build_assign (ssa_target
, MULT_EXPR
, op0
, op1
);
1194 gimple_set_location (mult_stmt
, loc
);
1195 gsi_insert_before (gsi
, mult_stmt
, GSI_SAME_STMT
);
1200 /* Convert ARG0**N to a tree of multiplications of ARG0 with itself.
1201 This function needs to be kept in sync with powi_cost above. */
1204 powi_as_mults (gimple_stmt_iterator
*gsi
, location_t loc
,
1205 tree arg0
, HOST_WIDE_INT n
)
1207 tree cache
[POWI_TABLE_SIZE
], result
, type
= TREE_TYPE (arg0
);
1212 return build_real (type
, dconst1
);
1214 memset (cache
, 0, sizeof (cache
));
1217 result
= powi_as_mults_1 (gsi
, loc
, type
, (n
< 0) ? -n
: n
, cache
);
1221 /* If the original exponent was negative, reciprocate the result. */
1222 target
= make_temp_ssa_name (type
, NULL
, "powmult");
1223 div_stmt
= gimple_build_assign (target
, RDIV_EXPR
,
1224 build_real (type
, dconst1
), result
);
1225 gimple_set_location (div_stmt
, loc
);
1226 gsi_insert_before (gsi
, div_stmt
, GSI_SAME_STMT
);
1231 /* ARG0 and N are the two arguments to a powi builtin in GSI with
1232 location info LOC. If the arguments are appropriate, create an
1233 equivalent sequence of statements prior to GSI using an optimal
1234 number of multiplications, and return an expession holding the
1238 gimple_expand_builtin_powi (gimple_stmt_iterator
*gsi
, location_t loc
,
1239 tree arg0
, HOST_WIDE_INT n
)
1241 /* Avoid largest negative number. */
1243 && ((n
>= -1 && n
<= 2)
1244 || (optimize_function_for_speed_p (cfun
)
1245 && powi_cost (n
) <= POWI_MAX_MULTS
)))
1246 return powi_as_mults (gsi
, loc
, arg0
, n
);
1251 /* Build a gimple call statement that calls FN with argument ARG.
1252 Set the lhs of the call statement to a fresh SSA name. Insert the
1253 statement prior to GSI's current position, and return the fresh
1257 build_and_insert_call (gimple_stmt_iterator
*gsi
, location_t loc
,
1263 call_stmt
= gimple_build_call (fn
, 1, arg
);
1264 ssa_target
= make_temp_ssa_name (TREE_TYPE (arg
), NULL
, "powroot");
1265 gimple_set_lhs (call_stmt
, ssa_target
);
1266 gimple_set_location (call_stmt
, loc
);
1267 gsi_insert_before (gsi
, call_stmt
, GSI_SAME_STMT
);
1272 /* Build a gimple binary operation with the given CODE and arguments
1273 ARG0, ARG1, assigning the result to a new SSA name for variable
1274 TARGET. Insert the statement prior to GSI's current position, and
1275 return the fresh SSA name.*/
1278 build_and_insert_binop (gimple_stmt_iterator
*gsi
, location_t loc
,
1279 const char *name
, enum tree_code code
,
1280 tree arg0
, tree arg1
)
1282 tree result
= make_temp_ssa_name (TREE_TYPE (arg0
), NULL
, name
);
1283 gassign
*stmt
= gimple_build_assign (result
, code
, arg0
, arg1
);
1284 gimple_set_location (stmt
, loc
);
1285 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
1289 /* Build a gimple reference operation with the given CODE and argument
1290 ARG, assigning the result to a new SSA name of TYPE with NAME.
1291 Insert the statement prior to GSI's current position, and return
1292 the fresh SSA name. */
1295 build_and_insert_ref (gimple_stmt_iterator
*gsi
, location_t loc
, tree type
,
1296 const char *name
, enum tree_code code
, tree arg0
)
1298 tree result
= make_temp_ssa_name (type
, NULL
, name
);
1299 gimple
*stmt
= gimple_build_assign (result
, build1 (code
, type
, arg0
));
1300 gimple_set_location (stmt
, loc
);
1301 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
1305 /* Build a gimple assignment to cast VAL to TYPE. Insert the statement
1306 prior to GSI's current position, and return the fresh SSA name. */
1309 build_and_insert_cast (gimple_stmt_iterator
*gsi
, location_t loc
,
1310 tree type
, tree val
)
1312 tree result
= make_ssa_name (type
);
1313 gassign
*stmt
= gimple_build_assign (result
, NOP_EXPR
, val
);
1314 gimple_set_location (stmt
, loc
);
1315 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
1319 struct pow_synth_sqrt_info
1322 unsigned int deepest
;
1323 unsigned int num_mults
;
1326 /* Return true iff the real value C can be represented as a
1327 sum of powers of 0.5 up to N. That is:
1328 C == SUM<i from 1..N> (a[i]*(0.5**i)) where a[i] is either 0 or 1.
1329 Record in INFO the various parameters of the synthesis algorithm such
1330 as the factors a[i], the maximum 0.5 power and the number of
1331 multiplications that will be required. */
1334 representable_as_half_series_p (REAL_VALUE_TYPE c
, unsigned n
,
1335 struct pow_synth_sqrt_info
*info
)
1337 REAL_VALUE_TYPE factor
= dconsthalf
;
1338 REAL_VALUE_TYPE remainder
= c
;
1341 info
->num_mults
= 0;
1342 memset (info
->factors
, 0, n
* sizeof (bool));
1344 for (unsigned i
= 0; i
< n
; i
++)
1346 REAL_VALUE_TYPE res
;
1348 /* If something inexact happened bail out now. */
1349 if (real_arithmetic (&res
, MINUS_EXPR
, &remainder
, &factor
))
1352 /* We have hit zero. The number is representable as a sum
1353 of powers of 0.5. */
1354 if (real_equal (&res
, &dconst0
))
1356 info
->factors
[i
] = true;
1357 info
->deepest
= i
+ 1;
1360 else if (!REAL_VALUE_NEGATIVE (res
))
1363 info
->factors
[i
] = true;
1367 info
->factors
[i
] = false;
1369 real_arithmetic (&factor
, MULT_EXPR
, &factor
, &dconsthalf
);
1374 /* Return the tree corresponding to FN being applied
1375 to ARG N times at GSI and LOC.
1376 Look up previous results from CACHE if need be.
1377 cache[0] should contain just plain ARG i.e. FN applied to ARG 0 times. */
1380 get_fn_chain (tree arg
, unsigned int n
, gimple_stmt_iterator
*gsi
,
1381 tree fn
, location_t loc
, tree
*cache
)
1383 tree res
= cache
[n
];
1386 tree prev
= get_fn_chain (arg
, n
- 1, gsi
, fn
, loc
, cache
);
1387 res
= build_and_insert_call (gsi
, loc
, fn
, prev
);
1394 /* Print to STREAM the repeated application of function FNAME to ARG
1395 N times. So, for FNAME = "foo", ARG = "x", N = 2 it would print:
1399 print_nested_fn (FILE* stream
, const char *fname
, const char* arg
,
1403 fprintf (stream
, "%s", arg
);
1406 fprintf (stream
, "%s (", fname
);
1407 print_nested_fn (stream
, fname
, arg
, n
- 1);
1408 fprintf (stream
, ")");
1412 /* Print to STREAM the fractional sequence of sqrt chains
1413 applied to ARG, described by INFO. Used for the dump file. */
1416 dump_fractional_sqrt_sequence (FILE *stream
, const char *arg
,
1417 struct pow_synth_sqrt_info
*info
)
1419 for (unsigned int i
= 0; i
< info
->deepest
; i
++)
1421 bool is_set
= info
->factors
[i
];
1424 print_nested_fn (stream
, "sqrt", arg
, i
+ 1);
1425 if (i
!= info
->deepest
- 1)
1426 fprintf (stream
, " * ");
1431 /* Print to STREAM a representation of raising ARG to an integer
1432 power N. Used for the dump file. */
1435 dump_integer_part (FILE *stream
, const char* arg
, HOST_WIDE_INT n
)
1438 fprintf (stream
, "powi (%s, " HOST_WIDE_INT_PRINT_DEC
")", arg
, n
);
1440 fprintf (stream
, "%s", arg
);
1443 /* Attempt to synthesize a POW[F] (ARG0, ARG1) call using chains of
1444 square roots. Place at GSI and LOC. Limit the maximum depth
1445 of the sqrt chains to MAX_DEPTH. Return the tree holding the
1446 result of the expanded sequence or NULL_TREE if the expansion failed.
1448 This routine assumes that ARG1 is a real number with a fractional part
1449 (the integer exponent case will have been handled earlier in
1450 gimple_expand_builtin_pow).
1453 * For ARG1 composed of a whole part WHOLE_PART and a fractional part
1454 FRAC_PART i.e. WHOLE_PART == floor (ARG1) and
1455 FRAC_PART == ARG1 - WHOLE_PART:
1456 Produce POWI (ARG0, WHOLE_PART) * POW (ARG0, FRAC_PART) where
1457 POW (ARG0, FRAC_PART) is expanded as a product of square root chains
1458 if it can be expressed as such, that is if FRAC_PART satisfies:
1459 FRAC_PART == <SUM from i = 1 until MAX_DEPTH> (a[i] * (0.5**i))
1460 where integer a[i] is either 0 or 1.
1463 POW (x, 3.625) == POWI (x, 3) * POW (x, 0.625)
1464 --> POWI (x, 3) * SQRT (x) * SQRT (SQRT (SQRT (x)))
1466 For ARG1 < 0.0 there are two approaches:
1467 * (A) Expand to 1.0 / POW (ARG0, -ARG1) where POW (ARG0, -ARG1)
1468 is calculated as above.
1471 POW (x, -5.625) == 1.0 / POW (x, 5.625)
1472 --> 1.0 / (POWI (x, 5) * SQRT (x) * SQRT (SQRT (SQRT (x))))
1474 * (B) : WHOLE_PART := - ceil (abs (ARG1))
1475 FRAC_PART := ARG1 - WHOLE_PART
1476 and expand to POW (x, FRAC_PART) / POWI (x, WHOLE_PART).
1478 POW (x, -5.875) == POW (x, 0.125) / POWI (X, 6)
1479 --> SQRT (SQRT (SQRT (x))) / (POWI (x, 6))
1481 For ARG1 < 0.0 we choose between (A) and (B) depending on
1482 how many multiplications we'd have to do.
1483 So, for the example in (B): POW (x, -5.875), if we were to
1484 follow algorithm (A) we would produce:
1485 1.0 / POWI (X, 5) * SQRT (X) * SQRT (SQRT (X)) * SQRT (SQRT (SQRT (X)))
1486 which contains more multiplications than approach (B).
1488 Hopefully, this approach will eliminate potentially expensive POW library
1489 calls when unsafe floating point math is enabled and allow the compiler to
1490 further optimise the multiplies, square roots and divides produced by this
1494 expand_pow_as_sqrts (gimple_stmt_iterator
*gsi
, location_t loc
,
1495 tree arg0
, tree arg1
, HOST_WIDE_INT max_depth
)
1497 tree type
= TREE_TYPE (arg0
);
1498 machine_mode mode
= TYPE_MODE (type
);
1499 tree sqrtfn
= mathfn_built_in (type
, BUILT_IN_SQRT
);
1500 bool one_over
= true;
1505 if (TREE_CODE (arg1
) != REAL_CST
)
1508 REAL_VALUE_TYPE exp_init
= TREE_REAL_CST (arg1
);
1510 gcc_assert (max_depth
> 0);
1511 tree
*cache
= XALLOCAVEC (tree
, max_depth
+ 1);
1513 struct pow_synth_sqrt_info synth_info
;
1514 synth_info
.factors
= XALLOCAVEC (bool, max_depth
+ 1);
1515 synth_info
.deepest
= 0;
1516 synth_info
.num_mults
= 0;
1518 bool neg_exp
= REAL_VALUE_NEGATIVE (exp_init
);
1519 REAL_VALUE_TYPE exp
= real_value_abs (&exp_init
);
1521 /* The whole and fractional parts of exp. */
1522 REAL_VALUE_TYPE whole_part
;
1523 REAL_VALUE_TYPE frac_part
;
1525 real_floor (&whole_part
, mode
, &exp
);
1526 real_arithmetic (&frac_part
, MINUS_EXPR
, &exp
, &whole_part
);
1529 REAL_VALUE_TYPE ceil_whole
= dconst0
;
1530 REAL_VALUE_TYPE ceil_fract
= dconst0
;
1534 real_ceil (&ceil_whole
, mode
, &exp
);
1535 real_arithmetic (&ceil_fract
, MINUS_EXPR
, &ceil_whole
, &exp
);
1538 if (!representable_as_half_series_p (frac_part
, max_depth
, &synth_info
))
1541 /* Check whether it's more profitable to not use 1.0 / ... */
1544 struct pow_synth_sqrt_info alt_synth_info
;
1545 alt_synth_info
.factors
= XALLOCAVEC (bool, max_depth
+ 1);
1546 alt_synth_info
.deepest
= 0;
1547 alt_synth_info
.num_mults
= 0;
1549 if (representable_as_half_series_p (ceil_fract
, max_depth
,
1551 && alt_synth_info
.deepest
<= synth_info
.deepest
1552 && alt_synth_info
.num_mults
< synth_info
.num_mults
)
1554 whole_part
= ceil_whole
;
1555 frac_part
= ceil_fract
;
1556 synth_info
.deepest
= alt_synth_info
.deepest
;
1557 synth_info
.num_mults
= alt_synth_info
.num_mults
;
1558 memcpy (synth_info
.factors
, alt_synth_info
.factors
,
1559 (max_depth
+ 1) * sizeof (bool));
1564 HOST_WIDE_INT n
= real_to_integer (&whole_part
);
1565 REAL_VALUE_TYPE cint
;
1566 real_from_integer (&cint
, VOIDmode
, n
, SIGNED
);
1568 if (!real_identical (&whole_part
, &cint
))
1571 if (powi_cost (n
) + synth_info
.num_mults
> POWI_MAX_MULTS
)
1574 memset (cache
, 0, (max_depth
+ 1) * sizeof (tree
));
1576 tree integer_res
= n
== 0 ? build_real (type
, dconst1
) : arg0
;
1578 /* Calculate the integer part of the exponent. */
1581 integer_res
= gimple_expand_builtin_powi (gsi
, loc
, arg0
, n
);
1590 real_to_decimal (string
, &exp_init
, sizeof (string
), 0, 1);
1591 fprintf (dump_file
, "synthesizing pow (x, %s) as:\n", string
);
1597 fprintf (dump_file
, "1.0 / (");
1598 dump_integer_part (dump_file
, "x", n
);
1600 fprintf (dump_file
, " * ");
1601 dump_fractional_sqrt_sequence (dump_file
, "x", &synth_info
);
1602 fprintf (dump_file
, ")");
1606 dump_fractional_sqrt_sequence (dump_file
, "x", &synth_info
);
1607 fprintf (dump_file
, " / (");
1608 dump_integer_part (dump_file
, "x", n
);
1609 fprintf (dump_file
, ")");
1614 dump_fractional_sqrt_sequence (dump_file
, "x", &synth_info
);
1616 fprintf (dump_file
, " * ");
1617 dump_integer_part (dump_file
, "x", n
);
1620 fprintf (dump_file
, "\ndeepest sqrt chain: %d\n", synth_info
.deepest
);
1624 tree fract_res
= NULL_TREE
;
1627 /* Calculate the fractional part of the exponent. */
1628 for (unsigned i
= 0; i
< synth_info
.deepest
; i
++)
1630 if (synth_info
.factors
[i
])
1632 tree sqrt_chain
= get_fn_chain (arg0
, i
+ 1, gsi
, sqrtfn
, loc
, cache
);
1635 fract_res
= sqrt_chain
;
1638 fract_res
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1639 fract_res
, sqrt_chain
);
1643 tree res
= NULL_TREE
;
1650 res
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1651 fract_res
, integer_res
);
1655 res
= build_and_insert_binop (gsi
, loc
, "powrootrecip", RDIV_EXPR
,
1656 build_real (type
, dconst1
), res
);
1660 res
= build_and_insert_binop (gsi
, loc
, "powroot", RDIV_EXPR
,
1661 fract_res
, integer_res
);
1665 res
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1666 fract_res
, integer_res
);
1670 /* ARG0 and ARG1 are the two arguments to a pow builtin call in GSI
1671 with location info LOC. If possible, create an equivalent and
1672 less expensive sequence of statements prior to GSI, and return an
1673 expession holding the result. */
1676 gimple_expand_builtin_pow (gimple_stmt_iterator
*gsi
, location_t loc
,
1677 tree arg0
, tree arg1
)
1679 REAL_VALUE_TYPE c
, cint
, dconst1_3
, dconst1_4
, dconst1_6
;
1680 REAL_VALUE_TYPE c2
, dconst3
;
1682 tree type
, sqrtfn
, cbrtfn
, sqrt_arg0
, result
, cbrt_x
, powi_cbrt_x
;
1684 bool speed_p
= optimize_bb_for_speed_p (gsi_bb (*gsi
));
1685 bool hw_sqrt_exists
, c_is_int
, c2_is_int
;
1687 dconst1_4
= dconst1
;
1688 SET_REAL_EXP (&dconst1_4
, REAL_EXP (&dconst1_4
) - 2);
1690 /* If the exponent isn't a constant, there's nothing of interest
1692 if (TREE_CODE (arg1
) != REAL_CST
)
1695 /* Don't perform the operation if flag_signaling_nans is on
1696 and the operand is a signaling NaN. */
1697 if (HONOR_SNANS (TYPE_MODE (TREE_TYPE (arg1
)))
1698 && ((TREE_CODE (arg0
) == REAL_CST
1699 && REAL_VALUE_ISSIGNALING_NAN (TREE_REAL_CST (arg0
)))
1700 || REAL_VALUE_ISSIGNALING_NAN (TREE_REAL_CST (arg1
))))
1703 /* If the exponent is equivalent to an integer, expand to an optimal
1704 multiplication sequence when profitable. */
1705 c
= TREE_REAL_CST (arg1
);
1706 n
= real_to_integer (&c
);
1707 real_from_integer (&cint
, VOIDmode
, n
, SIGNED
);
1708 c_is_int
= real_identical (&c
, &cint
);
1711 && ((n
>= -1 && n
<= 2)
1712 || (flag_unsafe_math_optimizations
1714 && powi_cost (n
) <= POWI_MAX_MULTS
)))
1715 return gimple_expand_builtin_powi (gsi
, loc
, arg0
, n
);
1717 /* Attempt various optimizations using sqrt and cbrt. */
1718 type
= TREE_TYPE (arg0
);
1719 mode
= TYPE_MODE (type
);
1720 sqrtfn
= mathfn_built_in (type
, BUILT_IN_SQRT
);
1722 /* Optimize pow(x,0.5) = sqrt(x). This replacement is always safe
1723 unless signed zeros must be maintained. pow(-0,0.5) = +0, while
1726 && real_equal (&c
, &dconsthalf
)
1727 && !HONOR_SIGNED_ZEROS (mode
))
1728 return build_and_insert_call (gsi
, loc
, sqrtfn
, arg0
);
1730 hw_sqrt_exists
= optab_handler (sqrt_optab
, mode
) != CODE_FOR_nothing
;
1732 /* Optimize pow(x,1./3.) = cbrt(x). This requires unsafe math
1733 optimizations since 1./3. is not exactly representable. If x
1734 is negative and finite, the correct value of pow(x,1./3.) is
1735 a NaN with the "invalid" exception raised, because the value
1736 of 1./3. actually has an even denominator. The correct value
1737 of cbrt(x) is a negative real value. */
1738 cbrtfn
= mathfn_built_in (type
, BUILT_IN_CBRT
);
1739 dconst1_3
= real_value_truncate (mode
, dconst_third ());
1741 if (flag_unsafe_math_optimizations
1743 && (!HONOR_NANS (mode
) || tree_expr_nonnegative_p (arg0
))
1744 && real_equal (&c
, &dconst1_3
))
1745 return build_and_insert_call (gsi
, loc
, cbrtfn
, arg0
);
1747 /* Optimize pow(x,1./6.) = cbrt(sqrt(x)). Don't do this optimization
1748 if we don't have a hardware sqrt insn. */
1749 dconst1_6
= dconst1_3
;
1750 SET_REAL_EXP (&dconst1_6
, REAL_EXP (&dconst1_6
) - 1);
1752 if (flag_unsafe_math_optimizations
1755 && (!HONOR_NANS (mode
) || tree_expr_nonnegative_p (arg0
))
1758 && real_equal (&c
, &dconst1_6
))
1761 sqrt_arg0
= build_and_insert_call (gsi
, loc
, sqrtfn
, arg0
);
1764 return build_and_insert_call (gsi
, loc
, cbrtfn
, sqrt_arg0
);
1768 /* Attempt to expand the POW as a product of square root chains.
1769 Expand the 0.25 case even when otpimising for size. */
1770 if (flag_unsafe_math_optimizations
1773 && (speed_p
|| real_equal (&c
, &dconst1_4
))
1774 && !HONOR_SIGNED_ZEROS (mode
))
1776 unsigned int max_depth
= speed_p
1777 ? PARAM_VALUE (PARAM_MAX_POW_SQRT_DEPTH
)
1780 tree expand_with_sqrts
1781 = expand_pow_as_sqrts (gsi
, loc
, arg0
, arg1
, max_depth
);
1783 if (expand_with_sqrts
)
1784 return expand_with_sqrts
;
1787 real_arithmetic (&c2
, MULT_EXPR
, &c
, &dconst2
);
1788 n
= real_to_integer (&c2
);
1789 real_from_integer (&cint
, VOIDmode
, n
, SIGNED
);
1790 c2_is_int
= real_identical (&c2
, &cint
);
1792 /* Optimize pow(x,c), where 3c = n for some nonzero integer n, into
1794 powi(x, n/3) * powi(cbrt(x), n%3), n > 0;
1795 1.0 / (powi(x, abs(n)/3) * powi(cbrt(x), abs(n)%3)), n < 0.
1797 Do not calculate the first factor when n/3 = 0. As cbrt(x) is
1798 different from pow(x, 1./3.) due to rounding and behavior with
1799 negative x, we need to constrain this transformation to unsafe
1800 math and positive x or finite math. */
1801 real_from_integer (&dconst3
, VOIDmode
, 3, SIGNED
);
1802 real_arithmetic (&c2
, MULT_EXPR
, &c
, &dconst3
);
1803 real_round (&c2
, mode
, &c2
);
1804 n
= real_to_integer (&c2
);
1805 real_from_integer (&cint
, VOIDmode
, n
, SIGNED
);
1806 real_arithmetic (&c2
, RDIV_EXPR
, &cint
, &dconst3
);
1807 real_convert (&c2
, mode
, &c2
);
1809 if (flag_unsafe_math_optimizations
1811 && (!HONOR_NANS (mode
) || tree_expr_nonnegative_p (arg0
))
1812 && real_identical (&c2
, &c
)
1814 && optimize_function_for_speed_p (cfun
)
1815 && powi_cost (n
/ 3) <= POWI_MAX_MULTS
)
1817 tree powi_x_ndiv3
= NULL_TREE
;
1819 /* Attempt to fold powi(arg0, abs(n/3)) into multiplies. If not
1820 possible or profitable, give up. Skip the degenerate case when
1821 abs(n) < 3, where the result is always 1. */
1822 if (absu_hwi (n
) >= 3)
1824 powi_x_ndiv3
= gimple_expand_builtin_powi (gsi
, loc
, arg0
,
1830 /* Calculate powi(cbrt(x), n%3). Don't use gimple_expand_builtin_powi
1831 as that creates an unnecessary variable. Instead, just produce
1832 either cbrt(x) or cbrt(x) * cbrt(x). */
1833 cbrt_x
= build_and_insert_call (gsi
, loc
, cbrtfn
, arg0
);
1835 if (absu_hwi (n
) % 3 == 1)
1836 powi_cbrt_x
= cbrt_x
;
1838 powi_cbrt_x
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1841 /* Multiply the two subexpressions, unless powi(x,abs(n)/3) = 1. */
1842 if (absu_hwi (n
) < 3)
1843 result
= powi_cbrt_x
;
1845 result
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1846 powi_x_ndiv3
, powi_cbrt_x
);
1848 /* If n is negative, reciprocate the result. */
1850 result
= build_and_insert_binop (gsi
, loc
, "powroot", RDIV_EXPR
,
1851 build_real (type
, dconst1
), result
);
1856 /* No optimizations succeeded. */
1860 /* ARG is the argument to a cabs builtin call in GSI with location info
1861 LOC. Create a sequence of statements prior to GSI that calculates
1862 sqrt(R*R + I*I), where R and I are the real and imaginary components
1863 of ARG, respectively. Return an expression holding the result. */
1866 gimple_expand_builtin_cabs (gimple_stmt_iterator
*gsi
, location_t loc
, tree arg
)
1868 tree real_part
, imag_part
, addend1
, addend2
, sum
, result
;
1869 tree type
= TREE_TYPE (TREE_TYPE (arg
));
1870 tree sqrtfn
= mathfn_built_in (type
, BUILT_IN_SQRT
);
1871 machine_mode mode
= TYPE_MODE (type
);
1873 if (!flag_unsafe_math_optimizations
1874 || !optimize_bb_for_speed_p (gimple_bb (gsi_stmt (*gsi
)))
1876 || optab_handler (sqrt_optab
, mode
) == CODE_FOR_nothing
)
1879 real_part
= build_and_insert_ref (gsi
, loc
, type
, "cabs",
1880 REALPART_EXPR
, arg
);
1881 addend1
= build_and_insert_binop (gsi
, loc
, "cabs", MULT_EXPR
,
1882 real_part
, real_part
);
1883 imag_part
= build_and_insert_ref (gsi
, loc
, type
, "cabs",
1884 IMAGPART_EXPR
, arg
);
1885 addend2
= build_and_insert_binop (gsi
, loc
, "cabs", MULT_EXPR
,
1886 imag_part
, imag_part
);
1887 sum
= build_and_insert_binop (gsi
, loc
, "cabs", PLUS_EXPR
, addend1
, addend2
);
1888 result
= build_and_insert_call (gsi
, loc
, sqrtfn
, sum
);
1893 /* Go through all calls to sin, cos and cexpi and call execute_cse_sincos_1
1894 on the SSA_NAME argument of each of them. Also expand powi(x,n) into
1895 an optimal number of multiplies, when n is a constant. */
1899 const pass_data pass_data_cse_sincos
=
1901 GIMPLE_PASS
, /* type */
1902 "sincos", /* name */
1903 OPTGROUP_NONE
, /* optinfo_flags */
1904 TV_TREE_SINCOS
, /* tv_id */
1905 PROP_ssa
, /* properties_required */
1906 PROP_gimple_opt_math
, /* properties_provided */
1907 0, /* properties_destroyed */
1908 0, /* todo_flags_start */
1909 TODO_update_ssa
, /* todo_flags_finish */
1912 class pass_cse_sincos
: public gimple_opt_pass
1915 pass_cse_sincos (gcc::context
*ctxt
)
1916 : gimple_opt_pass (pass_data_cse_sincos
, ctxt
)
1919 /* opt_pass methods: */
1920 virtual bool gate (function
*)
1922 /* We no longer require either sincos or cexp, since powi expansion
1923 piggybacks on this pass. */
1927 virtual unsigned int execute (function
*);
1929 }; // class pass_cse_sincos
1932 pass_cse_sincos::execute (function
*fun
)
1935 bool cfg_changed
= false;
1937 calculate_dominance_info (CDI_DOMINATORS
);
1938 memset (&sincos_stats
, 0, sizeof (sincos_stats
));
1940 FOR_EACH_BB_FN (bb
, fun
)
1942 gimple_stmt_iterator gsi
;
1943 bool cleanup_eh
= false;
1945 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1947 gimple
*stmt
= gsi_stmt (gsi
);
1949 /* Only the last stmt in a bb could throw, no need to call
1950 gimple_purge_dead_eh_edges if we change something in the middle
1951 of a basic block. */
1954 if (is_gimple_call (stmt
)
1955 && gimple_call_lhs (stmt
))
1957 tree arg
, arg0
, arg1
, result
;
1961 switch (gimple_call_combined_fn (stmt
))
1966 /* Make sure we have either sincos or cexp. */
1967 if (!targetm
.libc_has_function (function_c99_math_complex
)
1968 && !targetm
.libc_has_function (function_sincos
))
1971 arg
= gimple_call_arg (stmt
, 0);
1972 if (TREE_CODE (arg
) == SSA_NAME
)
1973 cfg_changed
|= execute_cse_sincos_1 (arg
);
1977 arg0
= gimple_call_arg (stmt
, 0);
1978 arg1
= gimple_call_arg (stmt
, 1);
1980 loc
= gimple_location (stmt
);
1981 result
= gimple_expand_builtin_pow (&gsi
, loc
, arg0
, arg1
);
1985 tree lhs
= gimple_get_lhs (stmt
);
1986 gassign
*new_stmt
= gimple_build_assign (lhs
, result
);
1987 gimple_set_location (new_stmt
, loc
);
1988 unlink_stmt_vdef (stmt
);
1989 gsi_replace (&gsi
, new_stmt
, true);
1991 if (gimple_vdef (stmt
))
1992 release_ssa_name (gimple_vdef (stmt
));
1997 arg0
= gimple_call_arg (stmt
, 0);
1998 arg1
= gimple_call_arg (stmt
, 1);
1999 loc
= gimple_location (stmt
);
2001 if (real_minus_onep (arg0
))
2003 tree t0
, t1
, cond
, one
, minus_one
;
2006 t0
= TREE_TYPE (arg0
);
2007 t1
= TREE_TYPE (arg1
);
2008 one
= build_real (t0
, dconst1
);
2009 minus_one
= build_real (t0
, dconstm1
);
2011 cond
= make_temp_ssa_name (t1
, NULL
, "powi_cond");
2012 stmt
= gimple_build_assign (cond
, BIT_AND_EXPR
,
2013 arg1
, build_int_cst (t1
, 1));
2014 gimple_set_location (stmt
, loc
);
2015 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
2017 result
= make_temp_ssa_name (t0
, NULL
, "powi");
2018 stmt
= gimple_build_assign (result
, COND_EXPR
, cond
,
2020 gimple_set_location (stmt
, loc
);
2021 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
2025 if (!tree_fits_shwi_p (arg1
))
2028 n
= tree_to_shwi (arg1
);
2029 result
= gimple_expand_builtin_powi (&gsi
, loc
, arg0
, n
);
2034 tree lhs
= gimple_get_lhs (stmt
);
2035 gassign
*new_stmt
= gimple_build_assign (lhs
, result
);
2036 gimple_set_location (new_stmt
, loc
);
2037 unlink_stmt_vdef (stmt
);
2038 gsi_replace (&gsi
, new_stmt
, true);
2040 if (gimple_vdef (stmt
))
2041 release_ssa_name (gimple_vdef (stmt
));
2046 arg0
= gimple_call_arg (stmt
, 0);
2047 loc
= gimple_location (stmt
);
2048 result
= gimple_expand_builtin_cabs (&gsi
, loc
, arg0
);
2052 tree lhs
= gimple_get_lhs (stmt
);
2053 gassign
*new_stmt
= gimple_build_assign (lhs
, result
);
2054 gimple_set_location (new_stmt
, loc
);
2055 unlink_stmt_vdef (stmt
);
2056 gsi_replace (&gsi
, new_stmt
, true);
2058 if (gimple_vdef (stmt
))
2059 release_ssa_name (gimple_vdef (stmt
));
2068 cfg_changed
|= gimple_purge_dead_eh_edges (bb
);
2071 statistics_counter_event (fun
, "sincos statements inserted",
2072 sincos_stats
.inserted
);
2074 return cfg_changed
? TODO_cleanup_cfg
: 0;
2080 make_pass_cse_sincos (gcc::context
*ctxt
)
2082 return new pass_cse_sincos (ctxt
);
2085 /* Return true if stmt is a type conversion operation that can be stripped
2086 when used in a widening multiply operation. */
2088 widening_mult_conversion_strippable_p (tree result_type
, gimple
*stmt
)
2090 enum tree_code rhs_code
= gimple_assign_rhs_code (stmt
);
2092 if (TREE_CODE (result_type
) == INTEGER_TYPE
)
2097 if (!CONVERT_EXPR_CODE_P (rhs_code
))
2100 op_type
= TREE_TYPE (gimple_assign_lhs (stmt
));
2102 /* If the type of OP has the same precision as the result, then
2103 we can strip this conversion. The multiply operation will be
2104 selected to create the correct extension as a by-product. */
2105 if (TYPE_PRECISION (result_type
) == TYPE_PRECISION (op_type
))
2108 /* We can also strip a conversion if it preserves the signed-ness of
2109 the operation and doesn't narrow the range. */
2110 inner_op_type
= TREE_TYPE (gimple_assign_rhs1 (stmt
));
2112 /* If the inner-most type is unsigned, then we can strip any
2113 intermediate widening operation. If it's signed, then the
2114 intermediate widening operation must also be signed. */
2115 if ((TYPE_UNSIGNED (inner_op_type
)
2116 || TYPE_UNSIGNED (op_type
) == TYPE_UNSIGNED (inner_op_type
))
2117 && TYPE_PRECISION (op_type
) > TYPE_PRECISION (inner_op_type
))
2123 return rhs_code
== FIXED_CONVERT_EXPR
;
2126 /* Return true if RHS is a suitable operand for a widening multiplication,
2127 assuming a target type of TYPE.
2128 There are two cases:
2130 - RHS makes some value at least twice as wide. Store that value
2131 in *NEW_RHS_OUT if so, and store its type in *TYPE_OUT.
2133 - RHS is an integer constant. Store that value in *NEW_RHS_OUT if so,
2134 but leave *TYPE_OUT untouched. */
2137 is_widening_mult_rhs_p (tree type
, tree rhs
, tree
*type_out
,
2143 if (TREE_CODE (rhs
) == SSA_NAME
)
2145 stmt
= SSA_NAME_DEF_STMT (rhs
);
2146 if (is_gimple_assign (stmt
))
2148 if (! widening_mult_conversion_strippable_p (type
, stmt
))
2152 rhs1
= gimple_assign_rhs1 (stmt
);
2154 if (TREE_CODE (rhs1
) == INTEGER_CST
)
2156 *new_rhs_out
= rhs1
;
2165 type1
= TREE_TYPE (rhs1
);
2167 if (TREE_CODE (type1
) != TREE_CODE (type
)
2168 || TYPE_PRECISION (type1
) * 2 > TYPE_PRECISION (type
))
2171 *new_rhs_out
= rhs1
;
2176 if (TREE_CODE (rhs
) == INTEGER_CST
)
2186 /* Return true if STMT performs a widening multiplication, assuming the
2187 output type is TYPE. If so, store the unwidened types of the operands
2188 in *TYPE1_OUT and *TYPE2_OUT respectively. Also fill *RHS1_OUT and
2189 *RHS2_OUT such that converting those operands to types *TYPE1_OUT
2190 and *TYPE2_OUT would give the operands of the multiplication. */
2193 is_widening_mult_p (gimple
*stmt
,
2194 tree
*type1_out
, tree
*rhs1_out
,
2195 tree
*type2_out
, tree
*rhs2_out
)
2197 tree type
= TREE_TYPE (gimple_assign_lhs (stmt
));
2199 if (TREE_CODE (type
) != INTEGER_TYPE
2200 && TREE_CODE (type
) != FIXED_POINT_TYPE
)
2203 if (!is_widening_mult_rhs_p (type
, gimple_assign_rhs1 (stmt
), type1_out
,
2207 if (!is_widening_mult_rhs_p (type
, gimple_assign_rhs2 (stmt
), type2_out
,
2211 if (*type1_out
== NULL
)
2213 if (*type2_out
== NULL
|| !int_fits_type_p (*rhs1_out
, *type2_out
))
2215 *type1_out
= *type2_out
;
2218 if (*type2_out
== NULL
)
2220 if (!int_fits_type_p (*rhs2_out
, *type1_out
))
2222 *type2_out
= *type1_out
;
2225 /* Ensure that the larger of the two operands comes first. */
2226 if (TYPE_PRECISION (*type1_out
) < TYPE_PRECISION (*type2_out
))
2228 std::swap (*type1_out
, *type2_out
);
2229 std::swap (*rhs1_out
, *rhs2_out
);
2235 /* Check to see if the CALL statement is an invocation of copysign
2236 with 1. being the first argument. */
2238 is_copysign_call_with_1 (gimple
*call
)
2240 gcall
*c
= dyn_cast
<gcall
*> (call
);
2244 enum combined_fn code
= gimple_call_combined_fn (c
);
2246 if (code
== CFN_LAST
)
2249 if (builtin_fn_p (code
))
2251 switch (as_builtin_fn (code
))
2253 CASE_FLT_FN (BUILT_IN_COPYSIGN
):
2254 CASE_FLT_FN_FLOATN_NX (BUILT_IN_COPYSIGN
):
2255 return real_onep (gimple_call_arg (c
, 0));
2261 if (internal_fn_p (code
))
2263 switch (as_internal_fn (code
))
2266 return real_onep (gimple_call_arg (c
, 0));
2275 /* Try to expand the pattern x * copysign (1, y) into xorsign (x, y).
2276 This only happens when the the xorsign optab is defined, if the
2277 pattern is not a xorsign pattern or if expansion fails FALSE is
2278 returned, otherwise TRUE is returned. */
2280 convert_expand_mult_copysign (gimple
*stmt
, gimple_stmt_iterator
*gsi
)
2282 tree treeop0
, treeop1
, lhs
, type
;
2283 location_t loc
= gimple_location (stmt
);
2284 lhs
= gimple_assign_lhs (stmt
);
2285 treeop0
= gimple_assign_rhs1 (stmt
);
2286 treeop1
= gimple_assign_rhs2 (stmt
);
2287 type
= TREE_TYPE (lhs
);
2288 machine_mode mode
= TYPE_MODE (type
);
2290 if (HONOR_SNANS (type
))
2293 if (TREE_CODE (treeop0
) == SSA_NAME
&& TREE_CODE (treeop1
) == SSA_NAME
)
2295 gimple
*call0
= SSA_NAME_DEF_STMT (treeop0
);
2296 if (!has_single_use (treeop0
) || !is_copysign_call_with_1 (call0
))
2298 call0
= SSA_NAME_DEF_STMT (treeop1
);
2299 if (!has_single_use (treeop1
) || !is_copysign_call_with_1 (call0
))
2304 if (optab_handler (xorsign_optab
, mode
) == CODE_FOR_nothing
)
2307 gcall
*c
= as_a
<gcall
*> (call0
);
2308 treeop0
= gimple_call_arg (c
, 1);
2311 = gimple_build_call_internal (IFN_XORSIGN
, 2, treeop1
, treeop0
);
2312 gimple_set_lhs (call_stmt
, lhs
);
2313 gimple_set_location (call_stmt
, loc
);
2314 gsi_replace (gsi
, call_stmt
, true);
2321 /* Process a single gimple statement STMT, which has a MULT_EXPR as
2322 its rhs, and try to convert it into a WIDEN_MULT_EXPR. The return
2323 value is true iff we converted the statement. */
2326 convert_mult_to_widen (gimple
*stmt
, gimple_stmt_iterator
*gsi
)
2328 tree lhs
, rhs1
, rhs2
, type
, type1
, type2
;
2329 enum insn_code handler
;
2330 scalar_int_mode to_mode
, from_mode
, actual_mode
;
2332 int actual_precision
;
2333 location_t loc
= gimple_location (stmt
);
2334 bool from_unsigned1
, from_unsigned2
;
2336 lhs
= gimple_assign_lhs (stmt
);
2337 type
= TREE_TYPE (lhs
);
2338 if (TREE_CODE (type
) != INTEGER_TYPE
)
2341 if (!is_widening_mult_p (stmt
, &type1
, &rhs1
, &type2
, &rhs2
))
2344 to_mode
= SCALAR_INT_TYPE_MODE (type
);
2345 from_mode
= SCALAR_INT_TYPE_MODE (type1
);
2346 if (to_mode
== from_mode
)
2349 from_unsigned1
= TYPE_UNSIGNED (type1
);
2350 from_unsigned2
= TYPE_UNSIGNED (type2
);
2352 if (from_unsigned1
&& from_unsigned2
)
2353 op
= umul_widen_optab
;
2354 else if (!from_unsigned1
&& !from_unsigned2
)
2355 op
= smul_widen_optab
;
2357 op
= usmul_widen_optab
;
2359 handler
= find_widening_optab_handler_and_mode (op
, to_mode
, from_mode
,
2362 if (handler
== CODE_FOR_nothing
)
2364 if (op
!= smul_widen_optab
)
2366 /* We can use a signed multiply with unsigned types as long as
2367 there is a wider mode to use, or it is the smaller of the two
2368 types that is unsigned. Note that type1 >= type2, always. */
2369 if ((TYPE_UNSIGNED (type1
)
2370 && TYPE_PRECISION (type1
) == GET_MODE_PRECISION (from_mode
))
2371 || (TYPE_UNSIGNED (type2
)
2372 && TYPE_PRECISION (type2
) == GET_MODE_PRECISION (from_mode
)))
2374 if (!GET_MODE_WIDER_MODE (from_mode
).exists (&from_mode
)
2375 || GET_MODE_SIZE (to_mode
) <= GET_MODE_SIZE (from_mode
))
2379 op
= smul_widen_optab
;
2380 handler
= find_widening_optab_handler_and_mode (op
, to_mode
,
2384 if (handler
== CODE_FOR_nothing
)
2387 from_unsigned1
= from_unsigned2
= false;
2393 /* Ensure that the inputs to the handler are in the correct precison
2394 for the opcode. This will be the full mode size. */
2395 actual_precision
= GET_MODE_PRECISION (actual_mode
);
2396 if (2 * actual_precision
> TYPE_PRECISION (type
))
2398 if (actual_precision
!= TYPE_PRECISION (type1
)
2399 || from_unsigned1
!= TYPE_UNSIGNED (type1
))
2400 rhs1
= build_and_insert_cast (gsi
, loc
,
2401 build_nonstandard_integer_type
2402 (actual_precision
, from_unsigned1
), rhs1
);
2403 if (actual_precision
!= TYPE_PRECISION (type2
)
2404 || from_unsigned2
!= TYPE_UNSIGNED (type2
))
2405 rhs2
= build_and_insert_cast (gsi
, loc
,
2406 build_nonstandard_integer_type
2407 (actual_precision
, from_unsigned2
), rhs2
);
2409 /* Handle constants. */
2410 if (TREE_CODE (rhs1
) == INTEGER_CST
)
2411 rhs1
= fold_convert (type1
, rhs1
);
2412 if (TREE_CODE (rhs2
) == INTEGER_CST
)
2413 rhs2
= fold_convert (type2
, rhs2
);
2415 gimple_assign_set_rhs1 (stmt
, rhs1
);
2416 gimple_assign_set_rhs2 (stmt
, rhs2
);
2417 gimple_assign_set_rhs_code (stmt
, WIDEN_MULT_EXPR
);
2419 widen_mul_stats
.widen_mults_inserted
++;
2423 /* Process a single gimple statement STMT, which is found at the
2424 iterator GSI and has a either a PLUS_EXPR or a MINUS_EXPR as its
2425 rhs (given by CODE), and try to convert it into a
2426 WIDEN_MULT_PLUS_EXPR or a WIDEN_MULT_MINUS_EXPR. The return value
2427 is true iff we converted the statement. */
2430 convert_plusminus_to_widen (gimple_stmt_iterator
*gsi
, gimple
*stmt
,
2431 enum tree_code code
)
2433 gimple
*rhs1_stmt
= NULL
, *rhs2_stmt
= NULL
;
2434 gimple
*conv1_stmt
= NULL
, *conv2_stmt
= NULL
, *conv_stmt
;
2435 tree type
, type1
, type2
, optype
;
2436 tree lhs
, rhs1
, rhs2
, mult_rhs1
, mult_rhs2
, add_rhs
;
2437 enum tree_code rhs1_code
= ERROR_MARK
, rhs2_code
= ERROR_MARK
;
2439 enum tree_code wmult_code
;
2440 enum insn_code handler
;
2441 scalar_mode to_mode
, from_mode
, actual_mode
;
2442 location_t loc
= gimple_location (stmt
);
2443 int actual_precision
;
2444 bool from_unsigned1
, from_unsigned2
;
2446 lhs
= gimple_assign_lhs (stmt
);
2447 type
= TREE_TYPE (lhs
);
2448 if (TREE_CODE (type
) != INTEGER_TYPE
2449 && TREE_CODE (type
) != FIXED_POINT_TYPE
)
2452 if (code
== MINUS_EXPR
)
2453 wmult_code
= WIDEN_MULT_MINUS_EXPR
;
2455 wmult_code
= WIDEN_MULT_PLUS_EXPR
;
2457 rhs1
= gimple_assign_rhs1 (stmt
);
2458 rhs2
= gimple_assign_rhs2 (stmt
);
2460 if (TREE_CODE (rhs1
) == SSA_NAME
)
2462 rhs1_stmt
= SSA_NAME_DEF_STMT (rhs1
);
2463 if (is_gimple_assign (rhs1_stmt
))
2464 rhs1_code
= gimple_assign_rhs_code (rhs1_stmt
);
2467 if (TREE_CODE (rhs2
) == SSA_NAME
)
2469 rhs2_stmt
= SSA_NAME_DEF_STMT (rhs2
);
2470 if (is_gimple_assign (rhs2_stmt
))
2471 rhs2_code
= gimple_assign_rhs_code (rhs2_stmt
);
2474 /* Allow for one conversion statement between the multiply
2475 and addition/subtraction statement. If there are more than
2476 one conversions then we assume they would invalidate this
2477 transformation. If that's not the case then they should have
2478 been folded before now. */
2479 if (CONVERT_EXPR_CODE_P (rhs1_code
))
2481 conv1_stmt
= rhs1_stmt
;
2482 rhs1
= gimple_assign_rhs1 (rhs1_stmt
);
2483 if (TREE_CODE (rhs1
) == SSA_NAME
)
2485 rhs1_stmt
= SSA_NAME_DEF_STMT (rhs1
);
2486 if (is_gimple_assign (rhs1_stmt
))
2487 rhs1_code
= gimple_assign_rhs_code (rhs1_stmt
);
2492 if (CONVERT_EXPR_CODE_P (rhs2_code
))
2494 conv2_stmt
= rhs2_stmt
;
2495 rhs2
= gimple_assign_rhs1 (rhs2_stmt
);
2496 if (TREE_CODE (rhs2
) == SSA_NAME
)
2498 rhs2_stmt
= SSA_NAME_DEF_STMT (rhs2
);
2499 if (is_gimple_assign (rhs2_stmt
))
2500 rhs2_code
= gimple_assign_rhs_code (rhs2_stmt
);
2506 /* If code is WIDEN_MULT_EXPR then it would seem unnecessary to call
2507 is_widening_mult_p, but we still need the rhs returns.
2509 It might also appear that it would be sufficient to use the existing
2510 operands of the widening multiply, but that would limit the choice of
2511 multiply-and-accumulate instructions.
2513 If the widened-multiplication result has more than one uses, it is
2514 probably wiser not to do the conversion. */
2515 if (code
== PLUS_EXPR
2516 && (rhs1_code
== MULT_EXPR
|| rhs1_code
== WIDEN_MULT_EXPR
))
2518 if (!has_single_use (rhs1
)
2519 || !is_widening_mult_p (rhs1_stmt
, &type1
, &mult_rhs1
,
2520 &type2
, &mult_rhs2
))
2523 conv_stmt
= conv1_stmt
;
2525 else if (rhs2_code
== MULT_EXPR
|| rhs2_code
== WIDEN_MULT_EXPR
)
2527 if (!has_single_use (rhs2
)
2528 || !is_widening_mult_p (rhs2_stmt
, &type1
, &mult_rhs1
,
2529 &type2
, &mult_rhs2
))
2532 conv_stmt
= conv2_stmt
;
2537 to_mode
= SCALAR_TYPE_MODE (type
);
2538 from_mode
= SCALAR_TYPE_MODE (type1
);
2539 if (to_mode
== from_mode
)
2542 from_unsigned1
= TYPE_UNSIGNED (type1
);
2543 from_unsigned2
= TYPE_UNSIGNED (type2
);
2546 /* There's no such thing as a mixed sign madd yet, so use a wider mode. */
2547 if (from_unsigned1
!= from_unsigned2
)
2549 if (!INTEGRAL_TYPE_P (type
))
2551 /* We can use a signed multiply with unsigned types as long as
2552 there is a wider mode to use, or it is the smaller of the two
2553 types that is unsigned. Note that type1 >= type2, always. */
2555 && TYPE_PRECISION (type1
) == GET_MODE_PRECISION (from_mode
))
2557 && TYPE_PRECISION (type2
) == GET_MODE_PRECISION (from_mode
)))
2559 if (!GET_MODE_WIDER_MODE (from_mode
).exists (&from_mode
)
2560 || GET_MODE_SIZE (from_mode
) >= GET_MODE_SIZE (to_mode
))
2564 from_unsigned1
= from_unsigned2
= false;
2565 optype
= build_nonstandard_integer_type (GET_MODE_PRECISION (from_mode
),
2569 /* If there was a conversion between the multiply and addition
2570 then we need to make sure it fits a multiply-and-accumulate.
2571 The should be a single mode change which does not change the
2575 /* We use the original, unmodified data types for this. */
2576 tree from_type
= TREE_TYPE (gimple_assign_rhs1 (conv_stmt
));
2577 tree to_type
= TREE_TYPE (gimple_assign_lhs (conv_stmt
));
2578 int data_size
= TYPE_PRECISION (type1
) + TYPE_PRECISION (type2
);
2579 bool is_unsigned
= TYPE_UNSIGNED (type1
) && TYPE_UNSIGNED (type2
);
2581 if (TYPE_PRECISION (from_type
) > TYPE_PRECISION (to_type
))
2583 /* Conversion is a truncate. */
2584 if (TYPE_PRECISION (to_type
) < data_size
)
2587 else if (TYPE_PRECISION (from_type
) < TYPE_PRECISION (to_type
))
2589 /* Conversion is an extend. Check it's the right sort. */
2590 if (TYPE_UNSIGNED (from_type
) != is_unsigned
2591 && !(is_unsigned
&& TYPE_PRECISION (from_type
) > data_size
))
2594 /* else convert is a no-op for our purposes. */
2597 /* Verify that the machine can perform a widening multiply
2598 accumulate in this mode/signedness combination, otherwise
2599 this transformation is likely to pessimize code. */
2600 this_optab
= optab_for_tree_code (wmult_code
, optype
, optab_default
);
2601 handler
= find_widening_optab_handler_and_mode (this_optab
, to_mode
,
2602 from_mode
, &actual_mode
);
2604 if (handler
== CODE_FOR_nothing
)
2607 /* Ensure that the inputs to the handler are in the correct precison
2608 for the opcode. This will be the full mode size. */
2609 actual_precision
= GET_MODE_PRECISION (actual_mode
);
2610 if (actual_precision
!= TYPE_PRECISION (type1
)
2611 || from_unsigned1
!= TYPE_UNSIGNED (type1
))
2612 mult_rhs1
= build_and_insert_cast (gsi
, loc
,
2613 build_nonstandard_integer_type
2614 (actual_precision
, from_unsigned1
),
2616 if (actual_precision
!= TYPE_PRECISION (type2
)
2617 || from_unsigned2
!= TYPE_UNSIGNED (type2
))
2618 mult_rhs2
= build_and_insert_cast (gsi
, loc
,
2619 build_nonstandard_integer_type
2620 (actual_precision
, from_unsigned2
),
2623 if (!useless_type_conversion_p (type
, TREE_TYPE (add_rhs
)))
2624 add_rhs
= build_and_insert_cast (gsi
, loc
, type
, add_rhs
);
2626 /* Handle constants. */
2627 if (TREE_CODE (mult_rhs1
) == INTEGER_CST
)
2628 mult_rhs1
= fold_convert (type1
, mult_rhs1
);
2629 if (TREE_CODE (mult_rhs2
) == INTEGER_CST
)
2630 mult_rhs2
= fold_convert (type2
, mult_rhs2
);
2632 gimple_assign_set_rhs_with_ops (gsi
, wmult_code
, mult_rhs1
, mult_rhs2
,
2634 update_stmt (gsi_stmt (*gsi
));
2635 widen_mul_stats
.maccs_inserted
++;
2639 /* Combine the multiplication at MUL_STMT with operands MULOP1 and MULOP2
2640 with uses in additions and subtractions to form fused multiply-add
2641 operations. Returns true if successful and MUL_STMT should be removed. */
2644 convert_mult_to_fma (gimple
*mul_stmt
, tree op1
, tree op2
)
2646 tree mul_result
= gimple_get_lhs (mul_stmt
);
2647 tree type
= TREE_TYPE (mul_result
);
2648 gimple
*use_stmt
, *neguse_stmt
;
2650 use_operand_p use_p
;
2651 imm_use_iterator imm_iter
;
2653 if (FLOAT_TYPE_P (type
)
2654 && flag_fp_contract_mode
== FP_CONTRACT_OFF
)
2657 /* We don't want to do bitfield reduction ops. */
2658 if (INTEGRAL_TYPE_P (type
)
2659 && !type_has_mode_precision_p (type
))
2662 /* If the target doesn't support it, don't generate it. We assume that
2663 if fma isn't available then fms, fnma or fnms are not either. */
2664 if (optab_handler (fma_optab
, TYPE_MODE (type
)) == CODE_FOR_nothing
)
2667 /* If the multiplication has zero uses, it is kept around probably because
2668 of -fnon-call-exceptions. Don't optimize it away in that case,
2670 if (has_zero_uses (mul_result
))
2673 /* Make sure that the multiplication statement becomes dead after
2674 the transformation, thus that all uses are transformed to FMAs.
2675 This means we assume that an FMA operation has the same cost
2677 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, mul_result
)
2679 enum tree_code use_code
;
2680 tree result
= mul_result
;
2681 bool negate_p
= false;
2683 use_stmt
= USE_STMT (use_p
);
2685 if (is_gimple_debug (use_stmt
))
2688 /* For now restrict this operations to single basic blocks. In theory
2689 we would want to support sinking the multiplication in
2695 to form a fma in the then block and sink the multiplication to the
2697 if (gimple_bb (use_stmt
) != gimple_bb (mul_stmt
))
2700 if (!is_gimple_assign (use_stmt
))
2703 use_code
= gimple_assign_rhs_code (use_stmt
);
2705 /* A negate on the multiplication leads to FNMA. */
2706 if (use_code
== NEGATE_EXPR
)
2711 result
= gimple_assign_lhs (use_stmt
);
2713 /* Make sure the negate statement becomes dead with this
2714 single transformation. */
2715 if (!single_imm_use (gimple_assign_lhs (use_stmt
),
2716 &use_p
, &neguse_stmt
))
2719 /* Make sure the multiplication isn't also used on that stmt. */
2720 FOR_EACH_PHI_OR_STMT_USE (usep
, neguse_stmt
, iter
, SSA_OP_USE
)
2721 if (USE_FROM_PTR (usep
) == mul_result
)
2725 use_stmt
= neguse_stmt
;
2726 if (gimple_bb (use_stmt
) != gimple_bb (mul_stmt
))
2728 if (!is_gimple_assign (use_stmt
))
2731 use_code
= gimple_assign_rhs_code (use_stmt
);
2738 if (gimple_assign_rhs2 (use_stmt
) == result
)
2739 negate_p
= !negate_p
;
2744 /* FMA can only be formed from PLUS and MINUS. */
2748 /* If the subtrahend (gimple_assign_rhs2 (use_stmt)) is computed
2749 by a MULT_EXPR that we'll visit later, we might be able to
2750 get a more profitable match with fnma.
2751 OTOH, if we don't, a negate / fma pair has likely lower latency
2752 that a mult / subtract pair. */
2753 if (use_code
== MINUS_EXPR
&& !negate_p
2754 && gimple_assign_rhs1 (use_stmt
) == result
2755 && optab_handler (fms_optab
, TYPE_MODE (type
)) == CODE_FOR_nothing
2756 && optab_handler (fnma_optab
, TYPE_MODE (type
)) != CODE_FOR_nothing
)
2758 tree rhs2
= gimple_assign_rhs2 (use_stmt
);
2760 if (TREE_CODE (rhs2
) == SSA_NAME
)
2762 gimple
*stmt2
= SSA_NAME_DEF_STMT (rhs2
);
2763 if (has_single_use (rhs2
)
2764 && is_gimple_assign (stmt2
)
2765 && gimple_assign_rhs_code (stmt2
) == MULT_EXPR
)
2770 /* We can't handle a * b + a * b. */
2771 if (gimple_assign_rhs1 (use_stmt
) == gimple_assign_rhs2 (use_stmt
))
2774 /* While it is possible to validate whether or not the exact form
2775 that we've recognized is available in the backend, the assumption
2776 is that the transformation is never a loss. For instance, suppose
2777 the target only has the plain FMA pattern available. Consider
2778 a*b-c -> fma(a,b,-c): we've exchanged MUL+SUB for FMA+NEG, which
2779 is still two operations. Consider -(a*b)-c -> fma(-a,b,-c): we
2780 still have 3 operations, but in the FMA form the two NEGs are
2781 independent and could be run in parallel. */
2784 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, mul_result
)
2786 gimple_stmt_iterator gsi
= gsi_for_stmt (use_stmt
);
2787 enum tree_code use_code
;
2788 tree addop
, mulop1
= op1
, result
= mul_result
;
2789 bool negate_p
= false;
2791 if (is_gimple_debug (use_stmt
))
2794 use_code
= gimple_assign_rhs_code (use_stmt
);
2795 if (use_code
== NEGATE_EXPR
)
2797 result
= gimple_assign_lhs (use_stmt
);
2798 single_imm_use (gimple_assign_lhs (use_stmt
), &use_p
, &neguse_stmt
);
2799 gsi_remove (&gsi
, true);
2800 release_defs (use_stmt
);
2802 use_stmt
= neguse_stmt
;
2803 gsi
= gsi_for_stmt (use_stmt
);
2804 use_code
= gimple_assign_rhs_code (use_stmt
);
2808 if (gimple_assign_rhs1 (use_stmt
) == result
)
2810 addop
= gimple_assign_rhs2 (use_stmt
);
2811 /* a * b - c -> a * b + (-c) */
2812 if (gimple_assign_rhs_code (use_stmt
) == MINUS_EXPR
)
2813 addop
= force_gimple_operand_gsi (&gsi
,
2814 build1 (NEGATE_EXPR
,
2816 true, NULL_TREE
, true,
2821 addop
= gimple_assign_rhs1 (use_stmt
);
2822 /* a - b * c -> (-b) * c + a */
2823 if (gimple_assign_rhs_code (use_stmt
) == MINUS_EXPR
)
2824 negate_p
= !negate_p
;
2828 mulop1
= force_gimple_operand_gsi (&gsi
,
2829 build1 (NEGATE_EXPR
,
2831 true, NULL_TREE
, true,
2834 fma_stmt
= gimple_build_assign (gimple_assign_lhs (use_stmt
),
2835 FMA_EXPR
, mulop1
, op2
, addop
);
2836 gsi_replace (&gsi
, fma_stmt
, true);
2837 widen_mul_stats
.fmas_inserted
++;
2844 /* Helper function of match_uaddsub_overflow. Return 1
2845 if USE_STMT is unsigned overflow check ovf != 0 for
2846 STMT, -1 if USE_STMT is unsigned overflow check ovf == 0
2850 uaddsub_overflow_check_p (gimple
*stmt
, gimple
*use_stmt
)
2852 enum tree_code ccode
= ERROR_MARK
;
2853 tree crhs1
= NULL_TREE
, crhs2
= NULL_TREE
;
2854 if (gimple_code (use_stmt
) == GIMPLE_COND
)
2856 ccode
= gimple_cond_code (use_stmt
);
2857 crhs1
= gimple_cond_lhs (use_stmt
);
2858 crhs2
= gimple_cond_rhs (use_stmt
);
2860 else if (is_gimple_assign (use_stmt
))
2862 if (gimple_assign_rhs_class (use_stmt
) == GIMPLE_BINARY_RHS
)
2864 ccode
= gimple_assign_rhs_code (use_stmt
);
2865 crhs1
= gimple_assign_rhs1 (use_stmt
);
2866 crhs2
= gimple_assign_rhs2 (use_stmt
);
2868 else if (gimple_assign_rhs_code (use_stmt
) == COND_EXPR
)
2870 tree cond
= gimple_assign_rhs1 (use_stmt
);
2871 if (COMPARISON_CLASS_P (cond
))
2873 ccode
= TREE_CODE (cond
);
2874 crhs1
= TREE_OPERAND (cond
, 0);
2875 crhs2
= TREE_OPERAND (cond
, 1);
2886 if (TREE_CODE_CLASS (ccode
) != tcc_comparison
)
2889 enum tree_code code
= gimple_assign_rhs_code (stmt
);
2890 tree lhs
= gimple_assign_lhs (stmt
);
2891 tree rhs1
= gimple_assign_rhs1 (stmt
);
2892 tree rhs2
= gimple_assign_rhs2 (stmt
);
2898 /* r = a - b; r > a or r <= a
2899 r = a + b; a > r or a <= r or b > r or b <= r. */
2900 if ((code
== MINUS_EXPR
&& crhs1
== lhs
&& crhs2
== rhs1
)
2901 || (code
== PLUS_EXPR
&& (crhs1
== rhs1
|| crhs1
== rhs2
)
2903 return ccode
== GT_EXPR
? 1 : -1;
2907 /* r = a - b; a < r or a >= r
2908 r = a + b; r < a or r >= a or r < b or r >= b. */
2909 if ((code
== MINUS_EXPR
&& crhs1
== rhs1
&& crhs2
== lhs
)
2910 || (code
== PLUS_EXPR
&& crhs1
== lhs
2911 && (crhs2
== rhs1
|| crhs2
== rhs2
)))
2912 return ccode
== LT_EXPR
? 1 : -1;
2920 /* Recognize for unsigned x
2923 where there are other uses of x and replace it with
2924 _7 = SUB_OVERFLOW (y, z);
2925 x = REALPART_EXPR <_7>;
2926 _8 = IMAGPART_EXPR <_7>;
2928 and similarly for addition. */
2931 match_uaddsub_overflow (gimple_stmt_iterator
*gsi
, gimple
*stmt
,
2932 enum tree_code code
)
2934 tree lhs
= gimple_assign_lhs (stmt
);
2935 tree type
= TREE_TYPE (lhs
);
2936 use_operand_p use_p
;
2937 imm_use_iterator iter
;
2938 bool use_seen
= false;
2939 bool ovf_use_seen
= false;
2942 gcc_checking_assert (code
== PLUS_EXPR
|| code
== MINUS_EXPR
);
2943 if (!INTEGRAL_TYPE_P (type
)
2944 || !TYPE_UNSIGNED (type
)
2945 || has_zero_uses (lhs
)
2946 || has_single_use (lhs
)
2947 || optab_handler (code
== PLUS_EXPR
? uaddv4_optab
: usubv4_optab
,
2948 TYPE_MODE (type
)) == CODE_FOR_nothing
)
2951 FOR_EACH_IMM_USE_FAST (use_p
, iter
, lhs
)
2953 use_stmt
= USE_STMT (use_p
);
2954 if (is_gimple_debug (use_stmt
))
2957 if (uaddsub_overflow_check_p (stmt
, use_stmt
))
2958 ovf_use_seen
= true;
2961 if (ovf_use_seen
&& use_seen
)
2965 if (!ovf_use_seen
|| !use_seen
)
2968 tree ctype
= build_complex_type (type
);
2969 tree rhs1
= gimple_assign_rhs1 (stmt
);
2970 tree rhs2
= gimple_assign_rhs2 (stmt
);
2971 gcall
*g
= gimple_build_call_internal (code
== PLUS_EXPR
2972 ? IFN_ADD_OVERFLOW
: IFN_SUB_OVERFLOW
,
2974 tree ctmp
= make_ssa_name (ctype
);
2975 gimple_call_set_lhs (g
, ctmp
);
2976 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
2977 gassign
*g2
= gimple_build_assign (lhs
, REALPART_EXPR
,
2978 build1 (REALPART_EXPR
, type
, ctmp
));
2979 gsi_replace (gsi
, g2
, true);
2980 tree ovf
= make_ssa_name (type
);
2981 g2
= gimple_build_assign (ovf
, IMAGPART_EXPR
,
2982 build1 (IMAGPART_EXPR
, type
, ctmp
));
2983 gsi_insert_after (gsi
, g2
, GSI_NEW_STMT
);
2985 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, lhs
)
2987 if (is_gimple_debug (use_stmt
))
2990 int ovf_use
= uaddsub_overflow_check_p (stmt
, use_stmt
);
2993 if (gimple_code (use_stmt
) == GIMPLE_COND
)
2995 gcond
*cond_stmt
= as_a
<gcond
*> (use_stmt
);
2996 gimple_cond_set_lhs (cond_stmt
, ovf
);
2997 gimple_cond_set_rhs (cond_stmt
, build_int_cst (type
, 0));
2998 gimple_cond_set_code (cond_stmt
, ovf_use
== 1 ? NE_EXPR
: EQ_EXPR
);
3002 gcc_checking_assert (is_gimple_assign (use_stmt
));
3003 if (gimple_assign_rhs_class (use_stmt
) == GIMPLE_BINARY_RHS
)
3005 gimple_assign_set_rhs1 (use_stmt
, ovf
);
3006 gimple_assign_set_rhs2 (use_stmt
, build_int_cst (type
, 0));
3007 gimple_assign_set_rhs_code (use_stmt
,
3008 ovf_use
== 1 ? NE_EXPR
: EQ_EXPR
);
3012 gcc_checking_assert (gimple_assign_rhs_code (use_stmt
)
3014 tree cond
= build2 (ovf_use
== 1 ? NE_EXPR
: EQ_EXPR
,
3015 boolean_type_node
, ovf
,
3016 build_int_cst (type
, 0));
3017 gimple_assign_set_rhs1 (use_stmt
, cond
);
3020 update_stmt (use_stmt
);
3025 /* Return true if target has support for divmod. */
3028 target_supports_divmod_p (optab divmod_optab
, optab div_optab
, machine_mode mode
)
3030 /* If target supports hardware divmod insn, use it for divmod. */
3031 if (optab_handler (divmod_optab
, mode
) != CODE_FOR_nothing
)
3034 /* Check if libfunc for divmod is available. */
3035 rtx libfunc
= optab_libfunc (divmod_optab
, mode
);
3036 if (libfunc
!= NULL_RTX
)
3038 /* If optab_handler exists for div_optab, perhaps in a wider mode,
3039 we don't want to use the libfunc even if it exists for given mode. */
3040 machine_mode div_mode
;
3041 FOR_EACH_MODE_FROM (div_mode
, mode
)
3042 if (optab_handler (div_optab
, div_mode
) != CODE_FOR_nothing
)
3045 return targetm
.expand_divmod_libfunc
!= NULL
;
3051 /* Check if stmt is candidate for divmod transform. */
3054 divmod_candidate_p (gassign
*stmt
)
3056 tree type
= TREE_TYPE (gimple_assign_lhs (stmt
));
3057 machine_mode mode
= TYPE_MODE (type
);
3058 optab divmod_optab
, div_optab
;
3060 if (TYPE_UNSIGNED (type
))
3062 divmod_optab
= udivmod_optab
;
3063 div_optab
= udiv_optab
;
3067 divmod_optab
= sdivmod_optab
;
3068 div_optab
= sdiv_optab
;
3071 tree op1
= gimple_assign_rhs1 (stmt
);
3072 tree op2
= gimple_assign_rhs2 (stmt
);
3074 /* Disable the transform if either is a constant, since division-by-constant
3075 may have specialized expansion. */
3076 if (CONSTANT_CLASS_P (op1
) || CONSTANT_CLASS_P (op2
))
3079 /* Exclude the case where TYPE_OVERFLOW_TRAPS (type) as that should
3080 expand using the [su]divv optabs. */
3081 if (TYPE_OVERFLOW_TRAPS (type
))
3084 if (!target_supports_divmod_p (divmod_optab
, div_optab
, mode
))
3090 /* This function looks for:
3091 t1 = a TRUNC_DIV_EXPR b;
3092 t2 = a TRUNC_MOD_EXPR b;
3093 and transforms it to the following sequence:
3094 complex_tmp = DIVMOD (a, b);
3095 t1 = REALPART_EXPR(a);
3096 t2 = IMAGPART_EXPR(b);
3097 For conditions enabling the transform see divmod_candidate_p().
3099 The pass has three parts:
3100 1) Find top_stmt which is trunc_div or trunc_mod stmt and dominates all
3101 other trunc_div_expr and trunc_mod_expr stmts.
3102 2) Add top_stmt and all trunc_div and trunc_mod stmts dominated by top_stmt
3104 3) Insert DIVMOD call just before top_stmt and update entries in
3105 stmts vector to use return value of DIMOVD (REALEXPR_PART for div,
3106 IMAGPART_EXPR for mod). */
3109 convert_to_divmod (gassign
*stmt
)
3111 if (stmt_can_throw_internal (stmt
)
3112 || !divmod_candidate_p (stmt
))
3115 tree op1
= gimple_assign_rhs1 (stmt
);
3116 tree op2
= gimple_assign_rhs2 (stmt
);
3118 imm_use_iterator use_iter
;
3120 auto_vec
<gimple
*> stmts
;
3122 gimple
*top_stmt
= stmt
;
3123 basic_block top_bb
= gimple_bb (stmt
);
3125 /* Part 1: Try to set top_stmt to "topmost" stmt that dominates
3126 at-least stmt and possibly other trunc_div/trunc_mod stmts
3127 having same operands as stmt. */
3129 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, op1
)
3131 if (is_gimple_assign (use_stmt
)
3132 && (gimple_assign_rhs_code (use_stmt
) == TRUNC_DIV_EXPR
3133 || gimple_assign_rhs_code (use_stmt
) == TRUNC_MOD_EXPR
)
3134 && operand_equal_p (op1
, gimple_assign_rhs1 (use_stmt
), 0)
3135 && operand_equal_p (op2
, gimple_assign_rhs2 (use_stmt
), 0))
3137 if (stmt_can_throw_internal (use_stmt
))
3140 basic_block bb
= gimple_bb (use_stmt
);
3144 if (gimple_uid (use_stmt
) < gimple_uid (top_stmt
))
3145 top_stmt
= use_stmt
;
3147 else if (dominated_by_p (CDI_DOMINATORS
, top_bb
, bb
))
3150 top_stmt
= use_stmt
;
3155 tree top_op1
= gimple_assign_rhs1 (top_stmt
);
3156 tree top_op2
= gimple_assign_rhs2 (top_stmt
);
3158 stmts
.safe_push (top_stmt
);
3159 bool div_seen
= (gimple_assign_rhs_code (top_stmt
) == TRUNC_DIV_EXPR
);
3161 /* Part 2: Add all trunc_div/trunc_mod statements domianted by top_bb
3162 to stmts vector. The 2nd loop will always add stmt to stmts vector, since
3163 gimple_bb (top_stmt) dominates gimple_bb (stmt), so the
3164 2nd loop ends up adding at-least single trunc_mod_expr stmt. */
3166 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, top_op1
)
3168 if (is_gimple_assign (use_stmt
)
3169 && (gimple_assign_rhs_code (use_stmt
) == TRUNC_DIV_EXPR
3170 || gimple_assign_rhs_code (use_stmt
) == TRUNC_MOD_EXPR
)
3171 && operand_equal_p (top_op1
, gimple_assign_rhs1 (use_stmt
), 0)
3172 && operand_equal_p (top_op2
, gimple_assign_rhs2 (use_stmt
), 0))
3174 if (use_stmt
== top_stmt
3175 || stmt_can_throw_internal (use_stmt
)
3176 || !dominated_by_p (CDI_DOMINATORS
, gimple_bb (use_stmt
), top_bb
))
3179 stmts
.safe_push (use_stmt
);
3180 if (gimple_assign_rhs_code (use_stmt
) == TRUNC_DIV_EXPR
)
3188 /* Part 3: Create libcall to internal fn DIVMOD:
3189 divmod_tmp = DIVMOD (op1, op2). */
3191 gcall
*call_stmt
= gimple_build_call_internal (IFN_DIVMOD
, 2, op1
, op2
);
3192 tree res
= make_temp_ssa_name (build_complex_type (TREE_TYPE (op1
)),
3193 call_stmt
, "divmod_tmp");
3194 gimple_call_set_lhs (call_stmt
, res
);
3195 /* We rejected throwing statements above. */
3196 gimple_call_set_nothrow (call_stmt
, true);
3198 /* Insert the call before top_stmt. */
3199 gimple_stmt_iterator top_stmt_gsi
= gsi_for_stmt (top_stmt
);
3200 gsi_insert_before (&top_stmt_gsi
, call_stmt
, GSI_SAME_STMT
);
3202 widen_mul_stats
.divmod_calls_inserted
++;
3204 /* Update all statements in stmts vector:
3205 lhs = op1 TRUNC_DIV_EXPR op2 -> lhs = REALPART_EXPR<divmod_tmp>
3206 lhs = op1 TRUNC_MOD_EXPR op2 -> lhs = IMAGPART_EXPR<divmod_tmp>. */
3208 for (unsigned i
= 0; stmts
.iterate (i
, &use_stmt
); ++i
)
3212 switch (gimple_assign_rhs_code (use_stmt
))
3214 case TRUNC_DIV_EXPR
:
3215 new_rhs
= fold_build1 (REALPART_EXPR
, TREE_TYPE (op1
), res
);
3218 case TRUNC_MOD_EXPR
:
3219 new_rhs
= fold_build1 (IMAGPART_EXPR
, TREE_TYPE (op1
), res
);
3226 gimple_stmt_iterator gsi
= gsi_for_stmt (use_stmt
);
3227 gimple_assign_set_rhs_from_tree (&gsi
, new_rhs
);
3228 update_stmt (use_stmt
);
3234 /* Find integer multiplications where the operands are extended from
3235 smaller types, and replace the MULT_EXPR with a WIDEN_MULT_EXPR
3236 where appropriate. */
3240 const pass_data pass_data_optimize_widening_mul
=
3242 GIMPLE_PASS
, /* type */
3243 "widening_mul", /* name */
3244 OPTGROUP_NONE
, /* optinfo_flags */
3245 TV_TREE_WIDEN_MUL
, /* tv_id */
3246 PROP_ssa
, /* properties_required */
3247 0, /* properties_provided */
3248 0, /* properties_destroyed */
3249 0, /* todo_flags_start */
3250 TODO_update_ssa
, /* todo_flags_finish */
3253 class pass_optimize_widening_mul
: public gimple_opt_pass
3256 pass_optimize_widening_mul (gcc::context
*ctxt
)
3257 : gimple_opt_pass (pass_data_optimize_widening_mul
, ctxt
)
3260 /* opt_pass methods: */
3261 virtual bool gate (function
*)
3263 return flag_expensive_optimizations
&& optimize
;
3266 virtual unsigned int execute (function
*);
3268 }; // class pass_optimize_widening_mul
3271 pass_optimize_widening_mul::execute (function
*fun
)
3274 bool cfg_changed
= false;
3276 memset (&widen_mul_stats
, 0, sizeof (widen_mul_stats
));
3277 calculate_dominance_info (CDI_DOMINATORS
);
3278 renumber_gimple_stmt_uids ();
3280 FOR_EACH_BB_FN (bb
, fun
)
3282 gimple_stmt_iterator gsi
;
3284 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
);)
3286 gimple
*stmt
= gsi_stmt (gsi
);
3287 enum tree_code code
;
3289 if (is_gimple_assign (stmt
))
3291 code
= gimple_assign_rhs_code (stmt
);
3295 if (!convert_mult_to_widen (stmt
, &gsi
)
3296 && !convert_expand_mult_copysign (stmt
, &gsi
)
3297 && convert_mult_to_fma (stmt
,
3298 gimple_assign_rhs1 (stmt
),
3299 gimple_assign_rhs2 (stmt
)))
3301 gsi_remove (&gsi
, true);
3302 release_defs (stmt
);
3309 if (!convert_plusminus_to_widen (&gsi
, stmt
, code
))
3310 match_uaddsub_overflow (&gsi
, stmt
, code
);
3313 case TRUNC_MOD_EXPR
:
3314 convert_to_divmod (as_a
<gassign
*> (stmt
));
3320 else if (is_gimple_call (stmt
)
3321 && gimple_call_lhs (stmt
))
3323 tree fndecl
= gimple_call_fndecl (stmt
);
3325 && gimple_call_builtin_p (stmt
, BUILT_IN_NORMAL
))
3327 switch (DECL_FUNCTION_CODE (fndecl
))
3332 if (TREE_CODE (gimple_call_arg (stmt
, 1)) == REAL_CST
3334 (&TREE_REAL_CST (gimple_call_arg (stmt
, 1)),
3336 && convert_mult_to_fma (stmt
,
3337 gimple_call_arg (stmt
, 0),
3338 gimple_call_arg (stmt
, 0)))
3340 unlink_stmt_vdef (stmt
);
3341 if (gsi_remove (&gsi
, true)
3342 && gimple_purge_dead_eh_edges (bb
))
3344 release_defs (stmt
);
3357 statistics_counter_event (fun
, "widening multiplications inserted",
3358 widen_mul_stats
.widen_mults_inserted
);
3359 statistics_counter_event (fun
, "widening maccs inserted",
3360 widen_mul_stats
.maccs_inserted
);
3361 statistics_counter_event (fun
, "fused multiply-adds inserted",
3362 widen_mul_stats
.fmas_inserted
);
3363 statistics_counter_event (fun
, "divmod calls inserted",
3364 widen_mul_stats
.divmod_calls_inserted
);
3366 return cfg_changed
? TODO_cleanup_cfg
: 0;
3372 make_pass_optimize_widening_mul (gcc::context
*ctxt
)
3374 return new pass_optimize_widening_mul (ctxt
);