1 /* Global, SSA-based optimizations using mathematical identities.
2 Copyright (C) 2005-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* Currently, the only mini-pass in this file tries to CSE reciprocal
21 operations. These are common in sequences such as this one:
23 modulus = sqrt(x*x + y*y + z*z);
28 that can be optimized to
30 modulus = sqrt(x*x + y*y + z*z);
31 rmodulus = 1.0 / modulus;
36 We do this for loop invariant divisors, and with this pass whenever
37 we notice that a division has the same divisor multiple times.
39 Of course, like in PRE, we don't insert a division if a dominator
40 already has one. However, this cannot be done as an extension of
41 PRE for several reasons.
43 First of all, with some experiments it was found out that the
44 transformation is not always useful if there are only two divisions
45 hy the same divisor. This is probably because modern processors
46 can pipeline the divisions; on older, in-order processors it should
47 still be effective to optimize two divisions by the same number.
48 We make this a param, and it shall be called N in the remainder of
51 Second, if trapping math is active, we have less freedom on where
52 to insert divisions: we can only do so in basic blocks that already
53 contain one. (If divisions don't trap, instead, we can insert
54 divisions elsewhere, which will be in blocks that are common dominators
55 of those that have the division).
57 We really don't want to compute the reciprocal unless a division will
58 be found. To do this, we won't insert the division in a basic block
59 that has less than N divisions *post-dominating* it.
61 The algorithm constructs a subset of the dominator tree, holding the
62 blocks containing the divisions and the common dominators to them,
63 and walk it twice. The first walk is in post-order, and it annotates
64 each block with the number of divisions that post-dominate it: this
65 gives information on where divisions can be inserted profitably.
66 The second walk is in pre-order, and it inserts divisions as explained
67 above, and replaces divisions by multiplications.
69 In the best case, the cost of the pass is O(n_statements). In the
70 worst-case, the cost is due to creating the dominator tree subset,
71 with a cost of O(n_basic_blocks ^ 2); however this can only happen
72 for n_statements / n_basic_blocks statements. So, the amortized cost
73 of creating the dominator tree subset is O(n_basic_blocks) and the
74 worst-case cost of the pass is O(n_statements * n_basic_blocks).
76 More practically, the cost will be small because there are few
77 divisions, and they tend to be in the same basic block, so insert_bb
78 is called very few times.
80 If we did this using domwalk.c, an efficient implementation would have
81 to work on all the variables in a single pass, because we could not
82 work on just a subset of the dominator tree, as we do now, and the
83 cost would also be something like O(n_statements * n_basic_blocks).
84 The data structures would be more complex in order to work on all the
85 variables in a single pass. */
89 #include "coretypes.h"
96 #include "alloc-pool.h"
97 #include "tree-pass.h"
99 #include "optabs-tree.h"
100 #include "gimple-pretty-print.h"
102 #include "fold-const.h"
103 #include "gimple-fold.h"
104 #include "gimple-iterator.h"
105 #include "gimplify.h"
106 #include "gimplify-me.h"
107 #include "stor-layout.h"
108 #include "tree-cfg.h"
109 #include "tree-dfa.h"
110 #include "tree-ssa.h"
111 #include "builtins.h"
113 #include "internal-fn.h"
114 #include "case-cfn-macros.h"
116 /* This structure represents one basic block that either computes a
117 division, or is a common dominator for basic block that compute a
120 /* The basic block represented by this structure. */
123 /* If non-NULL, the SSA_NAME holding the definition for a reciprocal
127 /* If non-NULL, the GIMPLE_ASSIGN for a reciprocal computation that
128 was inserted in BB. */
129 gimple
*recip_def_stmt
;
131 /* Pointer to a list of "struct occurrence"s for blocks dominated
133 struct occurrence
*children
;
135 /* Pointer to the next "struct occurrence"s in the list of blocks
136 sharing a common dominator. */
137 struct occurrence
*next
;
139 /* The number of divisions that are in BB before compute_merit. The
140 number of divisions that are in BB or post-dominate it after
144 /* True if the basic block has a division, false if it is a common
145 dominator for basic blocks that do. If it is false and trapping
146 math is active, BB is not a candidate for inserting a reciprocal. */
147 bool bb_has_division
;
152 /* Number of 1.0/X ops inserted. */
155 /* Number of 1.0/FUNC ops inserted. */
161 /* Number of cexpi calls inserted. */
167 /* Number of hand-written 16-bit nop / bswaps found. */
170 /* Number of hand-written 32-bit nop / bswaps found. */
173 /* Number of hand-written 64-bit nop / bswaps found. */
175 } nop_stats
, bswap_stats
;
179 /* Number of widening multiplication ops inserted. */
180 int widen_mults_inserted
;
182 /* Number of integer multiply-and-accumulate ops inserted. */
185 /* Number of fp fused multiply-add ops inserted. */
189 /* The instance of "struct occurrence" representing the highest
190 interesting block in the dominator tree. */
191 static struct occurrence
*occ_head
;
193 /* Allocation pool for getting instances of "struct occurrence". */
194 static object_allocator
<occurrence
> *occ_pool
;
198 /* Allocate and return a new struct occurrence for basic block BB, and
199 whose children list is headed by CHILDREN. */
200 static struct occurrence
*
201 occ_new (basic_block bb
, struct occurrence
*children
)
203 struct occurrence
*occ
;
205 bb
->aux
= occ
= occ_pool
->allocate ();
206 memset (occ
, 0, sizeof (struct occurrence
));
209 occ
->children
= children
;
214 /* Insert NEW_OCC into our subset of the dominator tree. P_HEAD points to a
215 list of "struct occurrence"s, one per basic block, having IDOM as
216 their common dominator.
218 We try to insert NEW_OCC as deep as possible in the tree, and we also
219 insert any other block that is a common dominator for BB and one
220 block already in the tree. */
223 insert_bb (struct occurrence
*new_occ
, basic_block idom
,
224 struct occurrence
**p_head
)
226 struct occurrence
*occ
, **p_occ
;
228 for (p_occ
= p_head
; (occ
= *p_occ
) != NULL
; )
230 basic_block bb
= new_occ
->bb
, occ_bb
= occ
->bb
;
231 basic_block dom
= nearest_common_dominator (CDI_DOMINATORS
, occ_bb
, bb
);
234 /* BB dominates OCC_BB. OCC becomes NEW_OCC's child: remove OCC
237 occ
->next
= new_occ
->children
;
238 new_occ
->children
= occ
;
240 /* Try the next block (it may as well be dominated by BB). */
243 else if (dom
== occ_bb
)
245 /* OCC_BB dominates BB. Tail recurse to look deeper. */
246 insert_bb (new_occ
, dom
, &occ
->children
);
250 else if (dom
!= idom
)
252 gcc_assert (!dom
->aux
);
254 /* There is a dominator between IDOM and BB, add it and make
255 two children out of NEW_OCC and OCC. First, remove OCC from
261 /* None of the previous blocks has DOM as a dominator: if we tail
262 recursed, we would reexamine them uselessly. Just switch BB with
263 DOM, and go on looking for blocks dominated by DOM. */
264 new_occ
= occ_new (dom
, new_occ
);
269 /* Nothing special, go on with the next element. */
274 /* No place was found as a child of IDOM. Make BB a sibling of IDOM. */
275 new_occ
->next
= *p_head
;
279 /* Register that we found a division in BB. */
282 register_division_in (basic_block bb
)
284 struct occurrence
*occ
;
286 occ
= (struct occurrence
*) bb
->aux
;
289 occ
= occ_new (bb
, NULL
);
290 insert_bb (occ
, ENTRY_BLOCK_PTR_FOR_FN (cfun
), &occ_head
);
293 occ
->bb_has_division
= true;
294 occ
->num_divisions
++;
298 /* Compute the number of divisions that postdominate each block in OCC and
302 compute_merit (struct occurrence
*occ
)
304 struct occurrence
*occ_child
;
305 basic_block dom
= occ
->bb
;
307 for (occ_child
= occ
->children
; occ_child
; occ_child
= occ_child
->next
)
310 if (occ_child
->children
)
311 compute_merit (occ_child
);
314 bb
= single_noncomplex_succ (dom
);
318 if (dominated_by_p (CDI_POST_DOMINATORS
, bb
, occ_child
->bb
))
319 occ
->num_divisions
+= occ_child
->num_divisions
;
324 /* Return whether USE_STMT is a floating-point division by DEF. */
326 is_division_by (gimple
*use_stmt
, tree def
)
328 return is_gimple_assign (use_stmt
)
329 && gimple_assign_rhs_code (use_stmt
) == RDIV_EXPR
330 && gimple_assign_rhs2 (use_stmt
) == def
331 /* Do not recognize x / x as valid division, as we are getting
332 confused later by replacing all immediate uses x in such
334 && gimple_assign_rhs1 (use_stmt
) != def
;
337 /* Walk the subset of the dominator tree rooted at OCC, setting the
338 RECIP_DEF field to a definition of 1.0 / DEF that can be used in
339 the given basic block. The field may be left NULL, of course,
340 if it is not possible or profitable to do the optimization.
342 DEF_BSI is an iterator pointing at the statement defining DEF.
343 If RECIP_DEF is set, a dominator already has a computation that can
347 insert_reciprocals (gimple_stmt_iterator
*def_gsi
, struct occurrence
*occ
,
348 tree def
, tree recip_def
, int threshold
)
352 gimple_stmt_iterator gsi
;
353 struct occurrence
*occ_child
;
356 && (occ
->bb_has_division
|| !flag_trapping_math
)
357 && occ
->num_divisions
>= threshold
)
359 /* Make a variable with the replacement and substitute it. */
360 type
= TREE_TYPE (def
);
361 recip_def
= create_tmp_reg (type
, "reciptmp");
362 new_stmt
= gimple_build_assign (recip_def
, RDIV_EXPR
,
363 build_one_cst (type
), def
);
365 if (occ
->bb_has_division
)
367 /* Case 1: insert before an existing division. */
368 gsi
= gsi_after_labels (occ
->bb
);
369 while (!gsi_end_p (gsi
) && !is_division_by (gsi_stmt (gsi
), def
))
372 gsi_insert_before (&gsi
, new_stmt
, GSI_SAME_STMT
);
374 else if (def_gsi
&& occ
->bb
== def_gsi
->bb
)
376 /* Case 2: insert right after the definition. Note that this will
377 never happen if the definition statement can throw, because in
378 that case the sole successor of the statement's basic block will
379 dominate all the uses as well. */
380 gsi_insert_after (def_gsi
, new_stmt
, GSI_NEW_STMT
);
384 /* Case 3: insert in a basic block not containing defs/uses. */
385 gsi
= gsi_after_labels (occ
->bb
);
386 gsi_insert_before (&gsi
, new_stmt
, GSI_SAME_STMT
);
389 reciprocal_stats
.rdivs_inserted
++;
391 occ
->recip_def_stmt
= new_stmt
;
394 occ
->recip_def
= recip_def
;
395 for (occ_child
= occ
->children
; occ_child
; occ_child
= occ_child
->next
)
396 insert_reciprocals (def_gsi
, occ_child
, def
, recip_def
, threshold
);
400 /* Replace the division at USE_P with a multiplication by the reciprocal, if
404 replace_reciprocal (use_operand_p use_p
)
406 gimple
*use_stmt
= USE_STMT (use_p
);
407 basic_block bb
= gimple_bb (use_stmt
);
408 struct occurrence
*occ
= (struct occurrence
*) bb
->aux
;
410 if (optimize_bb_for_speed_p (bb
)
411 && occ
->recip_def
&& use_stmt
!= occ
->recip_def_stmt
)
413 gimple_stmt_iterator gsi
= gsi_for_stmt (use_stmt
);
414 gimple_assign_set_rhs_code (use_stmt
, MULT_EXPR
);
415 SET_USE (use_p
, occ
->recip_def
);
416 fold_stmt_inplace (&gsi
);
417 update_stmt (use_stmt
);
422 /* Free OCC and return one more "struct occurrence" to be freed. */
424 static struct occurrence
*
425 free_bb (struct occurrence
*occ
)
427 struct occurrence
*child
, *next
;
429 /* First get the two pointers hanging off OCC. */
431 child
= occ
->children
;
433 occ_pool
->remove (occ
);
435 /* Now ensure that we don't recurse unless it is necessary. */
441 next
= free_bb (next
);
448 /* Look for floating-point divisions among DEF's uses, and try to
449 replace them by multiplications with the reciprocal. Add
450 as many statements computing the reciprocal as needed.
452 DEF must be a GIMPLE register of a floating-point type. */
455 execute_cse_reciprocals_1 (gimple_stmt_iterator
*def_gsi
, tree def
)
458 imm_use_iterator use_iter
;
459 struct occurrence
*occ
;
460 int count
= 0, threshold
;
462 gcc_assert (FLOAT_TYPE_P (TREE_TYPE (def
)) && is_gimple_reg (def
));
464 FOR_EACH_IMM_USE_FAST (use_p
, use_iter
, def
)
466 gimple
*use_stmt
= USE_STMT (use_p
);
467 if (is_division_by (use_stmt
, def
))
469 register_division_in (gimple_bb (use_stmt
));
474 /* Do the expensive part only if we can hope to optimize something. */
475 threshold
= targetm
.min_divisions_for_recip_mul (TYPE_MODE (TREE_TYPE (def
)));
476 if (count
>= threshold
)
479 for (occ
= occ_head
; occ
; occ
= occ
->next
)
482 insert_reciprocals (def_gsi
, occ
, def
, NULL
, threshold
);
485 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, def
)
487 if (is_division_by (use_stmt
, def
))
489 FOR_EACH_IMM_USE_ON_STMT (use_p
, use_iter
)
490 replace_reciprocal (use_p
);
495 for (occ
= occ_head
; occ
; )
501 /* Return an internal function that implements the reciprocal of CALL,
502 or IFN_LAST if there is no such function that the target supports. */
505 internal_fn_reciprocal (gcall
*call
)
509 switch (gimple_call_combined_fn (call
))
519 tree_pair types
= direct_internal_fn_types (ifn
, call
);
520 if (!direct_internal_fn_supported_p (ifn
, types
, OPTIMIZE_FOR_SPEED
))
526 /* Go through all the floating-point SSA_NAMEs, and call
527 execute_cse_reciprocals_1 on each of them. */
530 const pass_data pass_data_cse_reciprocals
=
532 GIMPLE_PASS
, /* type */
534 OPTGROUP_NONE
, /* optinfo_flags */
536 PROP_ssa
, /* properties_required */
537 0, /* properties_provided */
538 0, /* properties_destroyed */
539 0, /* todo_flags_start */
540 TODO_update_ssa
, /* todo_flags_finish */
543 class pass_cse_reciprocals
: public gimple_opt_pass
546 pass_cse_reciprocals (gcc::context
*ctxt
)
547 : gimple_opt_pass (pass_data_cse_reciprocals
, ctxt
)
550 /* opt_pass methods: */
551 virtual bool gate (function
*) { return optimize
&& flag_reciprocal_math
; }
552 virtual unsigned int execute (function
*);
554 }; // class pass_cse_reciprocals
557 pass_cse_reciprocals::execute (function
*fun
)
562 occ_pool
= new object_allocator
<occurrence
> ("dominators for recip");
564 memset (&reciprocal_stats
, 0, sizeof (reciprocal_stats
));
565 calculate_dominance_info (CDI_DOMINATORS
);
566 calculate_dominance_info (CDI_POST_DOMINATORS
);
569 FOR_EACH_BB_FN (bb
, fun
)
570 gcc_assert (!bb
->aux
);
572 for (arg
= DECL_ARGUMENTS (fun
->decl
); arg
; arg
= DECL_CHAIN (arg
))
573 if (FLOAT_TYPE_P (TREE_TYPE (arg
))
574 && is_gimple_reg (arg
))
576 tree name
= ssa_default_def (fun
, arg
);
578 execute_cse_reciprocals_1 (NULL
, name
);
581 FOR_EACH_BB_FN (bb
, fun
)
585 for (gphi_iterator gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
);
588 gphi
*phi
= gsi
.phi ();
589 def
= PHI_RESULT (phi
);
590 if (! virtual_operand_p (def
)
591 && FLOAT_TYPE_P (TREE_TYPE (def
)))
592 execute_cse_reciprocals_1 (NULL
, def
);
595 for (gimple_stmt_iterator gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
);
598 gimple
*stmt
= gsi_stmt (gsi
);
600 if (gimple_has_lhs (stmt
)
601 && (def
= SINGLE_SSA_TREE_OPERAND (stmt
, SSA_OP_DEF
)) != NULL
602 && FLOAT_TYPE_P (TREE_TYPE (def
))
603 && TREE_CODE (def
) == SSA_NAME
)
604 execute_cse_reciprocals_1 (&gsi
, def
);
607 if (optimize_bb_for_size_p (bb
))
610 /* Scan for a/func(b) and convert it to reciprocal a*rfunc(b). */
611 for (gimple_stmt_iterator gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
);
614 gimple
*stmt
= gsi_stmt (gsi
);
616 if (is_gimple_assign (stmt
)
617 && gimple_assign_rhs_code (stmt
) == RDIV_EXPR
)
619 tree arg1
= gimple_assign_rhs2 (stmt
);
622 if (TREE_CODE (arg1
) != SSA_NAME
)
625 stmt1
= SSA_NAME_DEF_STMT (arg1
);
627 if (is_gimple_call (stmt1
)
628 && gimple_call_lhs (stmt1
))
633 tree fndecl
= NULL_TREE
;
635 gcall
*call
= as_a
<gcall
*> (stmt1
);
636 internal_fn ifn
= internal_fn_reciprocal (call
);
639 fndecl
= gimple_call_fndecl (call
);
641 || DECL_BUILT_IN_CLASS (fndecl
) != BUILT_IN_MD
)
643 fndecl
= targetm
.builtin_reciprocal (fndecl
);
648 /* Check that all uses of the SSA name are divisions,
649 otherwise replacing the defining statement will do
652 FOR_EACH_IMM_USE_FAST (use_p
, ui
, arg1
)
654 gimple
*stmt2
= USE_STMT (use_p
);
655 if (is_gimple_debug (stmt2
))
657 if (!is_gimple_assign (stmt2
)
658 || gimple_assign_rhs_code (stmt2
) != RDIV_EXPR
659 || gimple_assign_rhs1 (stmt2
) == arg1
660 || gimple_assign_rhs2 (stmt2
) != arg1
)
669 gimple_replace_ssa_lhs (call
, arg1
);
670 if (gimple_call_internal_p (call
) != (ifn
!= IFN_LAST
))
672 auto_vec
<tree
, 4> args
;
673 for (unsigned int i
= 0;
674 i
< gimple_call_num_args (call
); i
++)
675 args
.safe_push (gimple_call_arg (call
, i
));
678 stmt2
= gimple_build_call_vec (fndecl
, args
);
680 stmt2
= gimple_build_call_internal_vec (ifn
, args
);
681 gimple_call_set_lhs (stmt2
, arg1
);
682 if (gimple_vdef (call
))
684 gimple_set_vdef (stmt2
, gimple_vdef (call
));
685 SSA_NAME_DEF_STMT (gimple_vdef (stmt2
)) = stmt2
;
687 gimple_set_vuse (stmt2
, gimple_vuse (call
));
688 gimple_stmt_iterator gsi2
= gsi_for_stmt (call
);
689 gsi_replace (&gsi2
, stmt2
, true);
694 gimple_call_set_fndecl (call
, fndecl
);
696 gimple_call_set_internal_fn (call
, ifn
);
699 reciprocal_stats
.rfuncs_inserted
++;
701 FOR_EACH_IMM_USE_STMT (stmt
, ui
, arg1
)
703 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
704 gimple_assign_set_rhs_code (stmt
, MULT_EXPR
);
705 fold_stmt_inplace (&gsi
);
713 statistics_counter_event (fun
, "reciprocal divs inserted",
714 reciprocal_stats
.rdivs_inserted
);
715 statistics_counter_event (fun
, "reciprocal functions inserted",
716 reciprocal_stats
.rfuncs_inserted
);
718 free_dominance_info (CDI_DOMINATORS
);
719 free_dominance_info (CDI_POST_DOMINATORS
);
727 make_pass_cse_reciprocals (gcc::context
*ctxt
)
729 return new pass_cse_reciprocals (ctxt
);
732 /* Records an occurrence at statement USE_STMT in the vector of trees
733 STMTS if it is dominated by *TOP_BB or dominates it or this basic block
734 is not yet initialized. Returns true if the occurrence was pushed on
735 the vector. Adjusts *TOP_BB to be the basic block dominating all
736 statements in the vector. */
739 maybe_record_sincos (vec
<gimple
*> *stmts
,
740 basic_block
*top_bb
, gimple
*use_stmt
)
742 basic_block use_bb
= gimple_bb (use_stmt
);
744 && (*top_bb
== use_bb
745 || dominated_by_p (CDI_DOMINATORS
, use_bb
, *top_bb
)))
746 stmts
->safe_push (use_stmt
);
748 || dominated_by_p (CDI_DOMINATORS
, *top_bb
, use_bb
))
750 stmts
->safe_push (use_stmt
);
759 /* Look for sin, cos and cexpi calls with the same argument NAME and
760 create a single call to cexpi CSEing the result in this case.
761 We first walk over all immediate uses of the argument collecting
762 statements that we can CSE in a vector and in a second pass replace
763 the statement rhs with a REALPART or IMAGPART expression on the
764 result of the cexpi call we insert before the use statement that
765 dominates all other candidates. */
768 execute_cse_sincos_1 (tree name
)
770 gimple_stmt_iterator gsi
;
771 imm_use_iterator use_iter
;
772 tree fndecl
, res
, type
;
773 gimple
*def_stmt
, *use_stmt
, *stmt
;
774 int seen_cos
= 0, seen_sin
= 0, seen_cexpi
= 0;
775 auto_vec
<gimple
*> stmts
;
776 basic_block top_bb
= NULL
;
778 bool cfg_changed
= false;
780 type
= TREE_TYPE (name
);
781 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, name
)
783 if (gimple_code (use_stmt
) != GIMPLE_CALL
784 || !gimple_call_lhs (use_stmt
))
787 switch (gimple_call_combined_fn (use_stmt
))
790 seen_cos
|= maybe_record_sincos (&stmts
, &top_bb
, use_stmt
) ? 1 : 0;
794 seen_sin
|= maybe_record_sincos (&stmts
, &top_bb
, use_stmt
) ? 1 : 0;
798 seen_cexpi
|= maybe_record_sincos (&stmts
, &top_bb
, use_stmt
) ? 1 : 0;
805 if (seen_cos
+ seen_sin
+ seen_cexpi
<= 1)
808 /* Simply insert cexpi at the beginning of top_bb but not earlier than
809 the name def statement. */
810 fndecl
= mathfn_built_in (type
, BUILT_IN_CEXPI
);
813 stmt
= gimple_build_call (fndecl
, 1, name
);
814 res
= make_temp_ssa_name (TREE_TYPE (TREE_TYPE (fndecl
)), stmt
, "sincostmp");
815 gimple_call_set_lhs (stmt
, res
);
817 def_stmt
= SSA_NAME_DEF_STMT (name
);
818 if (!SSA_NAME_IS_DEFAULT_DEF (name
)
819 && gimple_code (def_stmt
) != GIMPLE_PHI
820 && gimple_bb (def_stmt
) == top_bb
)
822 gsi
= gsi_for_stmt (def_stmt
);
823 gsi_insert_after (&gsi
, stmt
, GSI_SAME_STMT
);
827 gsi
= gsi_after_labels (top_bb
);
828 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
830 sincos_stats
.inserted
++;
832 /* And adjust the recorded old call sites. */
833 for (i
= 0; stmts
.iterate (i
, &use_stmt
); ++i
)
837 switch (gimple_call_combined_fn (use_stmt
))
840 rhs
= fold_build1 (REALPART_EXPR
, type
, res
);
844 rhs
= fold_build1 (IMAGPART_EXPR
, type
, res
);
855 /* Replace call with a copy. */
856 stmt
= gimple_build_assign (gimple_call_lhs (use_stmt
), rhs
);
858 gsi
= gsi_for_stmt (use_stmt
);
859 gsi_replace (&gsi
, stmt
, true);
860 if (gimple_purge_dead_eh_edges (gimple_bb (stmt
)))
867 /* To evaluate powi(x,n), the floating point value x raised to the
868 constant integer exponent n, we use a hybrid algorithm that
869 combines the "window method" with look-up tables. For an
870 introduction to exponentiation algorithms and "addition chains",
871 see section 4.6.3, "Evaluation of Powers" of Donald E. Knuth,
872 "Seminumerical Algorithms", Vol. 2, "The Art of Computer Programming",
873 3rd Edition, 1998, and Daniel M. Gordon, "A Survey of Fast Exponentiation
874 Methods", Journal of Algorithms, Vol. 27, pp. 129-146, 1998. */
876 /* Provide a default value for POWI_MAX_MULTS, the maximum number of
877 multiplications to inline before calling the system library's pow
878 function. powi(x,n) requires at worst 2*bits(n)-2 multiplications,
879 so this default never requires calling pow, powf or powl. */
881 #ifndef POWI_MAX_MULTS
882 #define POWI_MAX_MULTS (2*HOST_BITS_PER_WIDE_INT-2)
885 /* The size of the "optimal power tree" lookup table. All
886 exponents less than this value are simply looked up in the
887 powi_table below. This threshold is also used to size the
888 cache of pseudo registers that hold intermediate results. */
889 #define POWI_TABLE_SIZE 256
891 /* The size, in bits of the window, used in the "window method"
892 exponentiation algorithm. This is equivalent to a radix of
893 (1<<POWI_WINDOW_SIZE) in the corresponding "m-ary method". */
894 #define POWI_WINDOW_SIZE 3
896 /* The following table is an efficient representation of an
897 "optimal power tree". For each value, i, the corresponding
898 value, j, in the table states than an optimal evaluation
899 sequence for calculating pow(x,i) can be found by evaluating
900 pow(x,j)*pow(x,i-j). An optimal power tree for the first
901 100 integers is given in Knuth's "Seminumerical algorithms". */
903 static const unsigned char powi_table
[POWI_TABLE_SIZE
] =
905 0, 1, 1, 2, 2, 3, 3, 4, /* 0 - 7 */
906 4, 6, 5, 6, 6, 10, 7, 9, /* 8 - 15 */
907 8, 16, 9, 16, 10, 12, 11, 13, /* 16 - 23 */
908 12, 17, 13, 18, 14, 24, 15, 26, /* 24 - 31 */
909 16, 17, 17, 19, 18, 33, 19, 26, /* 32 - 39 */
910 20, 25, 21, 40, 22, 27, 23, 44, /* 40 - 47 */
911 24, 32, 25, 34, 26, 29, 27, 44, /* 48 - 55 */
912 28, 31, 29, 34, 30, 60, 31, 36, /* 56 - 63 */
913 32, 64, 33, 34, 34, 46, 35, 37, /* 64 - 71 */
914 36, 65, 37, 50, 38, 48, 39, 69, /* 72 - 79 */
915 40, 49, 41, 43, 42, 51, 43, 58, /* 80 - 87 */
916 44, 64, 45, 47, 46, 59, 47, 76, /* 88 - 95 */
917 48, 65, 49, 66, 50, 67, 51, 66, /* 96 - 103 */
918 52, 70, 53, 74, 54, 104, 55, 74, /* 104 - 111 */
919 56, 64, 57, 69, 58, 78, 59, 68, /* 112 - 119 */
920 60, 61, 61, 80, 62, 75, 63, 68, /* 120 - 127 */
921 64, 65, 65, 128, 66, 129, 67, 90, /* 128 - 135 */
922 68, 73, 69, 131, 70, 94, 71, 88, /* 136 - 143 */
923 72, 128, 73, 98, 74, 132, 75, 121, /* 144 - 151 */
924 76, 102, 77, 124, 78, 132, 79, 106, /* 152 - 159 */
925 80, 97, 81, 160, 82, 99, 83, 134, /* 160 - 167 */
926 84, 86, 85, 95, 86, 160, 87, 100, /* 168 - 175 */
927 88, 113, 89, 98, 90, 107, 91, 122, /* 176 - 183 */
928 92, 111, 93, 102, 94, 126, 95, 150, /* 184 - 191 */
929 96, 128, 97, 130, 98, 133, 99, 195, /* 192 - 199 */
930 100, 128, 101, 123, 102, 164, 103, 138, /* 200 - 207 */
931 104, 145, 105, 146, 106, 109, 107, 149, /* 208 - 215 */
932 108, 200, 109, 146, 110, 170, 111, 157, /* 216 - 223 */
933 112, 128, 113, 130, 114, 182, 115, 132, /* 224 - 231 */
934 116, 200, 117, 132, 118, 158, 119, 206, /* 232 - 239 */
935 120, 240, 121, 162, 122, 147, 123, 152, /* 240 - 247 */
936 124, 166, 125, 214, 126, 138, 127, 153, /* 248 - 255 */
940 /* Return the number of multiplications required to calculate
941 powi(x,n) where n is less than POWI_TABLE_SIZE. This is a
942 subroutine of powi_cost. CACHE is an array indicating
943 which exponents have already been calculated. */
946 powi_lookup_cost (unsigned HOST_WIDE_INT n
, bool *cache
)
948 /* If we've already calculated this exponent, then this evaluation
949 doesn't require any additional multiplications. */
954 return powi_lookup_cost (n
- powi_table
[n
], cache
)
955 + powi_lookup_cost (powi_table
[n
], cache
) + 1;
958 /* Return the number of multiplications required to calculate
959 powi(x,n) for an arbitrary x, given the exponent N. This
960 function needs to be kept in sync with powi_as_mults below. */
963 powi_cost (HOST_WIDE_INT n
)
965 bool cache
[POWI_TABLE_SIZE
];
966 unsigned HOST_WIDE_INT digit
;
967 unsigned HOST_WIDE_INT val
;
973 /* Ignore the reciprocal when calculating the cost. */
974 val
= (n
< 0) ? -n
: n
;
976 /* Initialize the exponent cache. */
977 memset (cache
, 0, POWI_TABLE_SIZE
* sizeof (bool));
982 while (val
>= POWI_TABLE_SIZE
)
986 digit
= val
& ((1 << POWI_WINDOW_SIZE
) - 1);
987 result
+= powi_lookup_cost (digit
, cache
)
988 + POWI_WINDOW_SIZE
+ 1;
989 val
>>= POWI_WINDOW_SIZE
;
998 return result
+ powi_lookup_cost (val
, cache
);
1001 /* Recursive subroutine of powi_as_mults. This function takes the
1002 array, CACHE, of already calculated exponents and an exponent N and
1003 returns a tree that corresponds to CACHE[1]**N, with type TYPE. */
1006 powi_as_mults_1 (gimple_stmt_iterator
*gsi
, location_t loc
, tree type
,
1007 HOST_WIDE_INT n
, tree
*cache
)
1009 tree op0
, op1
, ssa_target
;
1010 unsigned HOST_WIDE_INT digit
;
1013 if (n
< POWI_TABLE_SIZE
&& cache
[n
])
1016 ssa_target
= make_temp_ssa_name (type
, NULL
, "powmult");
1018 if (n
< POWI_TABLE_SIZE
)
1020 cache
[n
] = ssa_target
;
1021 op0
= powi_as_mults_1 (gsi
, loc
, type
, n
- powi_table
[n
], cache
);
1022 op1
= powi_as_mults_1 (gsi
, loc
, type
, powi_table
[n
], cache
);
1026 digit
= n
& ((1 << POWI_WINDOW_SIZE
) - 1);
1027 op0
= powi_as_mults_1 (gsi
, loc
, type
, n
- digit
, cache
);
1028 op1
= powi_as_mults_1 (gsi
, loc
, type
, digit
, cache
);
1032 op0
= powi_as_mults_1 (gsi
, loc
, type
, n
>> 1, cache
);
1036 mult_stmt
= gimple_build_assign (ssa_target
, MULT_EXPR
, op0
, op1
);
1037 gimple_set_location (mult_stmt
, loc
);
1038 gsi_insert_before (gsi
, mult_stmt
, GSI_SAME_STMT
);
1043 /* Convert ARG0**N to a tree of multiplications of ARG0 with itself.
1044 This function needs to be kept in sync with powi_cost above. */
1047 powi_as_mults (gimple_stmt_iterator
*gsi
, location_t loc
,
1048 tree arg0
, HOST_WIDE_INT n
)
1050 tree cache
[POWI_TABLE_SIZE
], result
, type
= TREE_TYPE (arg0
);
1055 return build_real (type
, dconst1
);
1057 memset (cache
, 0, sizeof (cache
));
1060 result
= powi_as_mults_1 (gsi
, loc
, type
, (n
< 0) ? -n
: n
, cache
);
1064 /* If the original exponent was negative, reciprocate the result. */
1065 target
= make_temp_ssa_name (type
, NULL
, "powmult");
1066 div_stmt
= gimple_build_assign (target
, RDIV_EXPR
,
1067 build_real (type
, dconst1
), result
);
1068 gimple_set_location (div_stmt
, loc
);
1069 gsi_insert_before (gsi
, div_stmt
, GSI_SAME_STMT
);
1074 /* ARG0 and N are the two arguments to a powi builtin in GSI with
1075 location info LOC. If the arguments are appropriate, create an
1076 equivalent sequence of statements prior to GSI using an optimal
1077 number of multiplications, and return an expession holding the
1081 gimple_expand_builtin_powi (gimple_stmt_iterator
*gsi
, location_t loc
,
1082 tree arg0
, HOST_WIDE_INT n
)
1084 /* Avoid largest negative number. */
1086 && ((n
>= -1 && n
<= 2)
1087 || (optimize_function_for_speed_p (cfun
)
1088 && powi_cost (n
) <= POWI_MAX_MULTS
)))
1089 return powi_as_mults (gsi
, loc
, arg0
, n
);
1094 /* Build a gimple call statement that calls FN with argument ARG.
1095 Set the lhs of the call statement to a fresh SSA name. Insert the
1096 statement prior to GSI's current position, and return the fresh
1100 build_and_insert_call (gimple_stmt_iterator
*gsi
, location_t loc
,
1106 call_stmt
= gimple_build_call (fn
, 1, arg
);
1107 ssa_target
= make_temp_ssa_name (TREE_TYPE (arg
), NULL
, "powroot");
1108 gimple_set_lhs (call_stmt
, ssa_target
);
1109 gimple_set_location (call_stmt
, loc
);
1110 gsi_insert_before (gsi
, call_stmt
, GSI_SAME_STMT
);
1115 /* Build a gimple binary operation with the given CODE and arguments
1116 ARG0, ARG1, assigning the result to a new SSA name for variable
1117 TARGET. Insert the statement prior to GSI's current position, and
1118 return the fresh SSA name.*/
1121 build_and_insert_binop (gimple_stmt_iterator
*gsi
, location_t loc
,
1122 const char *name
, enum tree_code code
,
1123 tree arg0
, tree arg1
)
1125 tree result
= make_temp_ssa_name (TREE_TYPE (arg0
), NULL
, name
);
1126 gassign
*stmt
= gimple_build_assign (result
, code
, arg0
, arg1
);
1127 gimple_set_location (stmt
, loc
);
1128 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
1132 /* Build a gimple reference operation with the given CODE and argument
1133 ARG, assigning the result to a new SSA name of TYPE with NAME.
1134 Insert the statement prior to GSI's current position, and return
1135 the fresh SSA name. */
1138 build_and_insert_ref (gimple_stmt_iterator
*gsi
, location_t loc
, tree type
,
1139 const char *name
, enum tree_code code
, tree arg0
)
1141 tree result
= make_temp_ssa_name (type
, NULL
, name
);
1142 gimple
*stmt
= gimple_build_assign (result
, build1 (code
, type
, arg0
));
1143 gimple_set_location (stmt
, loc
);
1144 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
1148 /* Build a gimple assignment to cast VAL to TYPE. Insert the statement
1149 prior to GSI's current position, and return the fresh SSA name. */
1152 build_and_insert_cast (gimple_stmt_iterator
*gsi
, location_t loc
,
1153 tree type
, tree val
)
1155 tree result
= make_ssa_name (type
);
1156 gassign
*stmt
= gimple_build_assign (result
, NOP_EXPR
, val
);
1157 gimple_set_location (stmt
, loc
);
1158 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
1162 struct pow_synth_sqrt_info
1165 unsigned int deepest
;
1166 unsigned int num_mults
;
1169 /* Return true iff the real value C can be represented as a
1170 sum of powers of 0.5 up to N. That is:
1171 C == SUM<i from 1..N> (a[i]*(0.5**i)) where a[i] is either 0 or 1.
1172 Record in INFO the various parameters of the synthesis algorithm such
1173 as the factors a[i], the maximum 0.5 power and the number of
1174 multiplications that will be required. */
1177 representable_as_half_series_p (REAL_VALUE_TYPE c
, unsigned n
,
1178 struct pow_synth_sqrt_info
*info
)
1180 REAL_VALUE_TYPE factor
= dconsthalf
;
1181 REAL_VALUE_TYPE remainder
= c
;
1184 info
->num_mults
= 0;
1185 memset (info
->factors
, 0, n
* sizeof (bool));
1187 for (unsigned i
= 0; i
< n
; i
++)
1189 REAL_VALUE_TYPE res
;
1191 /* If something inexact happened bail out now. */
1192 if (real_arithmetic (&res
, MINUS_EXPR
, &remainder
, &factor
))
1195 /* We have hit zero. The number is representable as a sum
1196 of powers of 0.5. */
1197 if (real_equal (&res
, &dconst0
))
1199 info
->factors
[i
] = true;
1200 info
->deepest
= i
+ 1;
1203 else if (!REAL_VALUE_NEGATIVE (res
))
1206 info
->factors
[i
] = true;
1210 info
->factors
[i
] = false;
1212 real_arithmetic (&factor
, MULT_EXPR
, &factor
, &dconsthalf
);
1217 /* Return the tree corresponding to FN being applied
1218 to ARG N times at GSI and LOC.
1219 Look up previous results from CACHE if need be.
1220 cache[0] should contain just plain ARG i.e. FN applied to ARG 0 times. */
1223 get_fn_chain (tree arg
, unsigned int n
, gimple_stmt_iterator
*gsi
,
1224 tree fn
, location_t loc
, tree
*cache
)
1226 tree res
= cache
[n
];
1229 tree prev
= get_fn_chain (arg
, n
- 1, gsi
, fn
, loc
, cache
);
1230 res
= build_and_insert_call (gsi
, loc
, fn
, prev
);
1237 /* Print to STREAM the repeated application of function FNAME to ARG
1238 N times. So, for FNAME = "foo", ARG = "x", N = 2 it would print:
1242 print_nested_fn (FILE* stream
, const char *fname
, const char* arg
,
1246 fprintf (stream
, "%s", arg
);
1249 fprintf (stream
, "%s (", fname
);
1250 print_nested_fn (stream
, fname
, arg
, n
- 1);
1251 fprintf (stream
, ")");
1255 /* Print to STREAM the fractional sequence of sqrt chains
1256 applied to ARG, described by INFO. Used for the dump file. */
1259 dump_fractional_sqrt_sequence (FILE *stream
, const char *arg
,
1260 struct pow_synth_sqrt_info
*info
)
1262 for (unsigned int i
= 0; i
< info
->deepest
; i
++)
1264 bool is_set
= info
->factors
[i
];
1267 print_nested_fn (stream
, "sqrt", arg
, i
+ 1);
1268 if (i
!= info
->deepest
- 1)
1269 fprintf (stream
, " * ");
1274 /* Print to STREAM a representation of raising ARG to an integer
1275 power N. Used for the dump file. */
1278 dump_integer_part (FILE *stream
, const char* arg
, HOST_WIDE_INT n
)
1281 fprintf (stream
, "powi (%s, " HOST_WIDE_INT_PRINT_DEC
")", arg
, n
);
1283 fprintf (stream
, "%s", arg
);
1286 /* Attempt to synthesize a POW[F] (ARG0, ARG1) call using chains of
1287 square roots. Place at GSI and LOC. Limit the maximum depth
1288 of the sqrt chains to MAX_DEPTH. Return the tree holding the
1289 result of the expanded sequence or NULL_TREE if the expansion failed.
1291 This routine assumes that ARG1 is a real number with a fractional part
1292 (the integer exponent case will have been handled earlier in
1293 gimple_expand_builtin_pow).
1296 * For ARG1 composed of a whole part WHOLE_PART and a fractional part
1297 FRAC_PART i.e. WHOLE_PART == floor (ARG1) and
1298 FRAC_PART == ARG1 - WHOLE_PART:
1299 Produce POWI (ARG0, WHOLE_PART) * POW (ARG0, FRAC_PART) where
1300 POW (ARG0, FRAC_PART) is expanded as a product of square root chains
1301 if it can be expressed as such, that is if FRAC_PART satisfies:
1302 FRAC_PART == <SUM from i = 1 until MAX_DEPTH> (a[i] * (0.5**i))
1303 where integer a[i] is either 0 or 1.
1306 POW (x, 3.625) == POWI (x, 3) * POW (x, 0.625)
1307 --> POWI (x, 3) * SQRT (x) * SQRT (SQRT (SQRT (x)))
1309 For ARG1 < 0.0 there are two approaches:
1310 * (A) Expand to 1.0 / POW (ARG0, -ARG1) where POW (ARG0, -ARG1)
1311 is calculated as above.
1314 POW (x, -5.625) == 1.0 / POW (x, 5.625)
1315 --> 1.0 / (POWI (x, 5) * SQRT (x) * SQRT (SQRT (SQRT (x))))
1317 * (B) : WHOLE_PART := - ceil (abs (ARG1))
1318 FRAC_PART := ARG1 - WHOLE_PART
1319 and expand to POW (x, FRAC_PART) / POWI (x, WHOLE_PART).
1321 POW (x, -5.875) == POW (x, 0.125) / POWI (X, 6)
1322 --> SQRT (SQRT (SQRT (x))) / (POWI (x, 6))
1324 For ARG1 < 0.0 we choose between (A) and (B) depending on
1325 how many multiplications we'd have to do.
1326 So, for the example in (B): POW (x, -5.875), if we were to
1327 follow algorithm (A) we would produce:
1328 1.0 / POWI (X, 5) * SQRT (X) * SQRT (SQRT (X)) * SQRT (SQRT (SQRT (X)))
1329 which contains more multiplications than approach (B).
1331 Hopefully, this approach will eliminate potentially expensive POW library
1332 calls when unsafe floating point math is enabled and allow the compiler to
1333 further optimise the multiplies, square roots and divides produced by this
1337 expand_pow_as_sqrts (gimple_stmt_iterator
*gsi
, location_t loc
,
1338 tree arg0
, tree arg1
, HOST_WIDE_INT max_depth
)
1340 tree type
= TREE_TYPE (arg0
);
1341 machine_mode mode
= TYPE_MODE (type
);
1342 tree sqrtfn
= mathfn_built_in (type
, BUILT_IN_SQRT
);
1343 bool one_over
= true;
1348 if (TREE_CODE (arg1
) != REAL_CST
)
1351 REAL_VALUE_TYPE exp_init
= TREE_REAL_CST (arg1
);
1353 gcc_assert (max_depth
> 0);
1354 tree
*cache
= XALLOCAVEC (tree
, max_depth
+ 1);
1356 struct pow_synth_sqrt_info synth_info
;
1357 synth_info
.factors
= XALLOCAVEC (bool, max_depth
+ 1);
1358 synth_info
.deepest
= 0;
1359 synth_info
.num_mults
= 0;
1361 bool neg_exp
= REAL_VALUE_NEGATIVE (exp_init
);
1362 REAL_VALUE_TYPE exp
= real_value_abs (&exp_init
);
1364 /* The whole and fractional parts of exp. */
1365 REAL_VALUE_TYPE whole_part
;
1366 REAL_VALUE_TYPE frac_part
;
1368 real_floor (&whole_part
, mode
, &exp
);
1369 real_arithmetic (&frac_part
, MINUS_EXPR
, &exp
, &whole_part
);
1372 REAL_VALUE_TYPE ceil_whole
= dconst0
;
1373 REAL_VALUE_TYPE ceil_fract
= dconst0
;
1377 real_ceil (&ceil_whole
, mode
, &exp
);
1378 real_arithmetic (&ceil_fract
, MINUS_EXPR
, &ceil_whole
, &exp
);
1381 if (!representable_as_half_series_p (frac_part
, max_depth
, &synth_info
))
1384 /* Check whether it's more profitable to not use 1.0 / ... */
1387 struct pow_synth_sqrt_info alt_synth_info
;
1388 alt_synth_info
.factors
= XALLOCAVEC (bool, max_depth
+ 1);
1389 alt_synth_info
.deepest
= 0;
1390 alt_synth_info
.num_mults
= 0;
1392 if (representable_as_half_series_p (ceil_fract
, max_depth
,
1394 && alt_synth_info
.deepest
<= synth_info
.deepest
1395 && alt_synth_info
.num_mults
< synth_info
.num_mults
)
1397 whole_part
= ceil_whole
;
1398 frac_part
= ceil_fract
;
1399 synth_info
.deepest
= alt_synth_info
.deepest
;
1400 synth_info
.num_mults
= alt_synth_info
.num_mults
;
1401 memcpy (synth_info
.factors
, alt_synth_info
.factors
,
1402 (max_depth
+ 1) * sizeof (bool));
1407 HOST_WIDE_INT n
= real_to_integer (&whole_part
);
1408 REAL_VALUE_TYPE cint
;
1409 real_from_integer (&cint
, VOIDmode
, n
, SIGNED
);
1411 if (!real_identical (&whole_part
, &cint
))
1414 if (powi_cost (n
) + synth_info
.num_mults
> POWI_MAX_MULTS
)
1417 memset (cache
, 0, (max_depth
+ 1) * sizeof (tree
));
1419 tree integer_res
= n
== 0 ? build_real (type
, dconst1
) : arg0
;
1421 /* Calculate the integer part of the exponent. */
1424 integer_res
= gimple_expand_builtin_powi (gsi
, loc
, arg0
, n
);
1433 real_to_decimal (string
, &exp_init
, sizeof (string
), 0, 1);
1434 fprintf (dump_file
, "synthesizing pow (x, %s) as:\n", string
);
1440 fprintf (dump_file
, "1.0 / (");
1441 dump_integer_part (dump_file
, "x", n
);
1443 fprintf (dump_file
, " * ");
1444 dump_fractional_sqrt_sequence (dump_file
, "x", &synth_info
);
1445 fprintf (dump_file
, ")");
1449 dump_fractional_sqrt_sequence (dump_file
, "x", &synth_info
);
1450 fprintf (dump_file
, " / (");
1451 dump_integer_part (dump_file
, "x", n
);
1452 fprintf (dump_file
, ")");
1457 dump_fractional_sqrt_sequence (dump_file
, "x", &synth_info
);
1459 fprintf (dump_file
, " * ");
1460 dump_integer_part (dump_file
, "x", n
);
1463 fprintf (dump_file
, "\ndeepest sqrt chain: %d\n", synth_info
.deepest
);
1467 tree fract_res
= NULL_TREE
;
1470 /* Calculate the fractional part of the exponent. */
1471 for (unsigned i
= 0; i
< synth_info
.deepest
; i
++)
1473 if (synth_info
.factors
[i
])
1475 tree sqrt_chain
= get_fn_chain (arg0
, i
+ 1, gsi
, sqrtfn
, loc
, cache
);
1478 fract_res
= sqrt_chain
;
1481 fract_res
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1482 fract_res
, sqrt_chain
);
1486 tree res
= NULL_TREE
;
1493 res
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1494 fract_res
, integer_res
);
1498 res
= build_and_insert_binop (gsi
, loc
, "powrootrecip", RDIV_EXPR
,
1499 build_real (type
, dconst1
), res
);
1503 res
= build_and_insert_binop (gsi
, loc
, "powroot", RDIV_EXPR
,
1504 fract_res
, integer_res
);
1508 res
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1509 fract_res
, integer_res
);
1513 /* ARG0 and ARG1 are the two arguments to a pow builtin call in GSI
1514 with location info LOC. If possible, create an equivalent and
1515 less expensive sequence of statements prior to GSI, and return an
1516 expession holding the result. */
1519 gimple_expand_builtin_pow (gimple_stmt_iterator
*gsi
, location_t loc
,
1520 tree arg0
, tree arg1
)
1522 REAL_VALUE_TYPE c
, cint
, dconst1_3
, dconst1_4
, dconst1_6
;
1523 REAL_VALUE_TYPE c2
, dconst3
;
1525 tree type
, sqrtfn
, cbrtfn
, sqrt_arg0
, result
, cbrt_x
, powi_cbrt_x
;
1527 bool speed_p
= optimize_bb_for_speed_p (gsi_bb (*gsi
));
1528 bool hw_sqrt_exists
, c_is_int
, c2_is_int
;
1530 dconst1_4
= dconst1
;
1531 SET_REAL_EXP (&dconst1_4
, REAL_EXP (&dconst1_4
) - 2);
1533 /* If the exponent isn't a constant, there's nothing of interest
1535 if (TREE_CODE (arg1
) != REAL_CST
)
1538 /* Don't perform the operation if flag_signaling_nans is on
1539 and the operand is a signaling NaN. */
1540 if (HONOR_SNANS (TYPE_MODE (TREE_TYPE (arg1
)))
1541 && (REAL_VALUE_ISSIGNALING_NAN (TREE_REAL_CST (arg0
))
1542 || REAL_VALUE_ISSIGNALING_NAN (TREE_REAL_CST (arg1
))))
1545 /* If the exponent is equivalent to an integer, expand to an optimal
1546 multiplication sequence when profitable. */
1547 c
= TREE_REAL_CST (arg1
);
1548 n
= real_to_integer (&c
);
1549 real_from_integer (&cint
, VOIDmode
, n
, SIGNED
);
1550 c_is_int
= real_identical (&c
, &cint
);
1553 && ((n
>= -1 && n
<= 2)
1554 || (flag_unsafe_math_optimizations
1556 && powi_cost (n
) <= POWI_MAX_MULTS
)))
1557 return gimple_expand_builtin_powi (gsi
, loc
, arg0
, n
);
1559 /* Attempt various optimizations using sqrt and cbrt. */
1560 type
= TREE_TYPE (arg0
);
1561 mode
= TYPE_MODE (type
);
1562 sqrtfn
= mathfn_built_in (type
, BUILT_IN_SQRT
);
1564 /* Optimize pow(x,0.5) = sqrt(x). This replacement is always safe
1565 unless signed zeros must be maintained. pow(-0,0.5) = +0, while
1568 && real_equal (&c
, &dconsthalf
)
1569 && !HONOR_SIGNED_ZEROS (mode
))
1570 return build_and_insert_call (gsi
, loc
, sqrtfn
, arg0
);
1572 hw_sqrt_exists
= optab_handler (sqrt_optab
, mode
) != CODE_FOR_nothing
;
1574 /* Optimize pow(x,1./3.) = cbrt(x). This requires unsafe math
1575 optimizations since 1./3. is not exactly representable. If x
1576 is negative and finite, the correct value of pow(x,1./3.) is
1577 a NaN with the "invalid" exception raised, because the value
1578 of 1./3. actually has an even denominator. The correct value
1579 of cbrt(x) is a negative real value. */
1580 cbrtfn
= mathfn_built_in (type
, BUILT_IN_CBRT
);
1581 dconst1_3
= real_value_truncate (mode
, dconst_third ());
1583 if (flag_unsafe_math_optimizations
1585 && (!HONOR_NANS (mode
) || tree_expr_nonnegative_p (arg0
))
1586 && real_equal (&c
, &dconst1_3
))
1587 return build_and_insert_call (gsi
, loc
, cbrtfn
, arg0
);
1589 /* Optimize pow(x,1./6.) = cbrt(sqrt(x)). Don't do this optimization
1590 if we don't have a hardware sqrt insn. */
1591 dconst1_6
= dconst1_3
;
1592 SET_REAL_EXP (&dconst1_6
, REAL_EXP (&dconst1_6
) - 1);
1594 if (flag_unsafe_math_optimizations
1597 && (!HONOR_NANS (mode
) || tree_expr_nonnegative_p (arg0
))
1600 && real_equal (&c
, &dconst1_6
))
1603 sqrt_arg0
= build_and_insert_call (gsi
, loc
, sqrtfn
, arg0
);
1606 return build_and_insert_call (gsi
, loc
, cbrtfn
, sqrt_arg0
);
1610 /* Attempt to expand the POW as a product of square root chains.
1611 Expand the 0.25 case even when otpimising for size. */
1612 if (flag_unsafe_math_optimizations
1615 && (speed_p
|| real_equal (&c
, &dconst1_4
))
1616 && !HONOR_SIGNED_ZEROS (mode
))
1618 unsigned int max_depth
= speed_p
1619 ? PARAM_VALUE (PARAM_MAX_POW_SQRT_DEPTH
)
1622 tree expand_with_sqrts
1623 = expand_pow_as_sqrts (gsi
, loc
, arg0
, arg1
, max_depth
);
1625 if (expand_with_sqrts
)
1626 return expand_with_sqrts
;
1629 real_arithmetic (&c2
, MULT_EXPR
, &c
, &dconst2
);
1630 n
= real_to_integer (&c2
);
1631 real_from_integer (&cint
, VOIDmode
, n
, SIGNED
);
1632 c2_is_int
= real_identical (&c2
, &cint
);
1634 /* Optimize pow(x,c), where 3c = n for some nonzero integer n, into
1636 powi(x, n/3) * powi(cbrt(x), n%3), n > 0;
1637 1.0 / (powi(x, abs(n)/3) * powi(cbrt(x), abs(n)%3)), n < 0.
1639 Do not calculate the first factor when n/3 = 0. As cbrt(x) is
1640 different from pow(x, 1./3.) due to rounding and behavior with
1641 negative x, we need to constrain this transformation to unsafe
1642 math and positive x or finite math. */
1643 real_from_integer (&dconst3
, VOIDmode
, 3, SIGNED
);
1644 real_arithmetic (&c2
, MULT_EXPR
, &c
, &dconst3
);
1645 real_round (&c2
, mode
, &c2
);
1646 n
= real_to_integer (&c2
);
1647 real_from_integer (&cint
, VOIDmode
, n
, SIGNED
);
1648 real_arithmetic (&c2
, RDIV_EXPR
, &cint
, &dconst3
);
1649 real_convert (&c2
, mode
, &c2
);
1651 if (flag_unsafe_math_optimizations
1653 && (!HONOR_NANS (mode
) || tree_expr_nonnegative_p (arg0
))
1654 && real_identical (&c2
, &c
)
1656 && optimize_function_for_speed_p (cfun
)
1657 && powi_cost (n
/ 3) <= POWI_MAX_MULTS
)
1659 tree powi_x_ndiv3
= NULL_TREE
;
1661 /* Attempt to fold powi(arg0, abs(n/3)) into multiplies. If not
1662 possible or profitable, give up. Skip the degenerate case when
1663 abs(n) < 3, where the result is always 1. */
1664 if (absu_hwi (n
) >= 3)
1666 powi_x_ndiv3
= gimple_expand_builtin_powi (gsi
, loc
, arg0
,
1672 /* Calculate powi(cbrt(x), n%3). Don't use gimple_expand_builtin_powi
1673 as that creates an unnecessary variable. Instead, just produce
1674 either cbrt(x) or cbrt(x) * cbrt(x). */
1675 cbrt_x
= build_and_insert_call (gsi
, loc
, cbrtfn
, arg0
);
1677 if (absu_hwi (n
) % 3 == 1)
1678 powi_cbrt_x
= cbrt_x
;
1680 powi_cbrt_x
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1683 /* Multiply the two subexpressions, unless powi(x,abs(n)/3) = 1. */
1684 if (absu_hwi (n
) < 3)
1685 result
= powi_cbrt_x
;
1687 result
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1688 powi_x_ndiv3
, powi_cbrt_x
);
1690 /* If n is negative, reciprocate the result. */
1692 result
= build_and_insert_binop (gsi
, loc
, "powroot", RDIV_EXPR
,
1693 build_real (type
, dconst1
), result
);
1698 /* No optimizations succeeded. */
1702 /* ARG is the argument to a cabs builtin call in GSI with location info
1703 LOC. Create a sequence of statements prior to GSI that calculates
1704 sqrt(R*R + I*I), where R and I are the real and imaginary components
1705 of ARG, respectively. Return an expression holding the result. */
1708 gimple_expand_builtin_cabs (gimple_stmt_iterator
*gsi
, location_t loc
, tree arg
)
1710 tree real_part
, imag_part
, addend1
, addend2
, sum
, result
;
1711 tree type
= TREE_TYPE (TREE_TYPE (arg
));
1712 tree sqrtfn
= mathfn_built_in (type
, BUILT_IN_SQRT
);
1713 machine_mode mode
= TYPE_MODE (type
);
1715 if (!flag_unsafe_math_optimizations
1716 || !optimize_bb_for_speed_p (gimple_bb (gsi_stmt (*gsi
)))
1718 || optab_handler (sqrt_optab
, mode
) == CODE_FOR_nothing
)
1721 real_part
= build_and_insert_ref (gsi
, loc
, type
, "cabs",
1722 REALPART_EXPR
, arg
);
1723 addend1
= build_and_insert_binop (gsi
, loc
, "cabs", MULT_EXPR
,
1724 real_part
, real_part
);
1725 imag_part
= build_and_insert_ref (gsi
, loc
, type
, "cabs",
1726 IMAGPART_EXPR
, arg
);
1727 addend2
= build_and_insert_binop (gsi
, loc
, "cabs", MULT_EXPR
,
1728 imag_part
, imag_part
);
1729 sum
= build_and_insert_binop (gsi
, loc
, "cabs", PLUS_EXPR
, addend1
, addend2
);
1730 result
= build_and_insert_call (gsi
, loc
, sqrtfn
, sum
);
1735 /* Go through all calls to sin, cos and cexpi and call execute_cse_sincos_1
1736 on the SSA_NAME argument of each of them. Also expand powi(x,n) into
1737 an optimal number of multiplies, when n is a constant. */
1741 const pass_data pass_data_cse_sincos
=
1743 GIMPLE_PASS
, /* type */
1744 "sincos", /* name */
1745 OPTGROUP_NONE
, /* optinfo_flags */
1746 TV_NONE
, /* tv_id */
1747 PROP_ssa
, /* properties_required */
1748 PROP_gimple_opt_math
, /* properties_provided */
1749 0, /* properties_destroyed */
1750 0, /* todo_flags_start */
1751 TODO_update_ssa
, /* todo_flags_finish */
1754 class pass_cse_sincos
: public gimple_opt_pass
1757 pass_cse_sincos (gcc::context
*ctxt
)
1758 : gimple_opt_pass (pass_data_cse_sincos
, ctxt
)
1761 /* opt_pass methods: */
1762 virtual bool gate (function
*)
1764 /* We no longer require either sincos or cexp, since powi expansion
1765 piggybacks on this pass. */
1769 virtual unsigned int execute (function
*);
1771 }; // class pass_cse_sincos
1774 pass_cse_sincos::execute (function
*fun
)
1777 bool cfg_changed
= false;
1779 calculate_dominance_info (CDI_DOMINATORS
);
1780 memset (&sincos_stats
, 0, sizeof (sincos_stats
));
1782 FOR_EACH_BB_FN (bb
, fun
)
1784 gimple_stmt_iterator gsi
;
1785 bool cleanup_eh
= false;
1787 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1789 gimple
*stmt
= gsi_stmt (gsi
);
1791 /* Only the last stmt in a bb could throw, no need to call
1792 gimple_purge_dead_eh_edges if we change something in the middle
1793 of a basic block. */
1796 if (is_gimple_call (stmt
)
1797 && gimple_call_lhs (stmt
))
1799 tree arg
, arg0
, arg1
, result
;
1803 switch (gimple_call_combined_fn (stmt
))
1808 /* Make sure we have either sincos or cexp. */
1809 if (!targetm
.libc_has_function (function_c99_math_complex
)
1810 && !targetm
.libc_has_function (function_sincos
))
1813 arg
= gimple_call_arg (stmt
, 0);
1814 if (TREE_CODE (arg
) == SSA_NAME
)
1815 cfg_changed
|= execute_cse_sincos_1 (arg
);
1819 arg0
= gimple_call_arg (stmt
, 0);
1820 arg1
= gimple_call_arg (stmt
, 1);
1822 loc
= gimple_location (stmt
);
1823 result
= gimple_expand_builtin_pow (&gsi
, loc
, arg0
, arg1
);
1827 tree lhs
= gimple_get_lhs (stmt
);
1828 gassign
*new_stmt
= gimple_build_assign (lhs
, result
);
1829 gimple_set_location (new_stmt
, loc
);
1830 unlink_stmt_vdef (stmt
);
1831 gsi_replace (&gsi
, new_stmt
, true);
1833 if (gimple_vdef (stmt
))
1834 release_ssa_name (gimple_vdef (stmt
));
1839 arg0
= gimple_call_arg (stmt
, 0);
1840 arg1
= gimple_call_arg (stmt
, 1);
1841 loc
= gimple_location (stmt
);
1843 if (real_minus_onep (arg0
))
1845 tree t0
, t1
, cond
, one
, minus_one
;
1848 t0
= TREE_TYPE (arg0
);
1849 t1
= TREE_TYPE (arg1
);
1850 one
= build_real (t0
, dconst1
);
1851 minus_one
= build_real (t0
, dconstm1
);
1853 cond
= make_temp_ssa_name (t1
, NULL
, "powi_cond");
1854 stmt
= gimple_build_assign (cond
, BIT_AND_EXPR
,
1855 arg1
, build_int_cst (t1
, 1));
1856 gimple_set_location (stmt
, loc
);
1857 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
1859 result
= make_temp_ssa_name (t0
, NULL
, "powi");
1860 stmt
= gimple_build_assign (result
, COND_EXPR
, cond
,
1862 gimple_set_location (stmt
, loc
);
1863 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
1867 if (!tree_fits_shwi_p (arg1
))
1870 n
= tree_to_shwi (arg1
);
1871 result
= gimple_expand_builtin_powi (&gsi
, loc
, arg0
, n
);
1876 tree lhs
= gimple_get_lhs (stmt
);
1877 gassign
*new_stmt
= gimple_build_assign (lhs
, result
);
1878 gimple_set_location (new_stmt
, loc
);
1879 unlink_stmt_vdef (stmt
);
1880 gsi_replace (&gsi
, new_stmt
, true);
1882 if (gimple_vdef (stmt
))
1883 release_ssa_name (gimple_vdef (stmt
));
1888 arg0
= gimple_call_arg (stmt
, 0);
1889 loc
= gimple_location (stmt
);
1890 result
= gimple_expand_builtin_cabs (&gsi
, loc
, arg0
);
1894 tree lhs
= gimple_get_lhs (stmt
);
1895 gassign
*new_stmt
= gimple_build_assign (lhs
, result
);
1896 gimple_set_location (new_stmt
, loc
);
1897 unlink_stmt_vdef (stmt
);
1898 gsi_replace (&gsi
, new_stmt
, true);
1900 if (gimple_vdef (stmt
))
1901 release_ssa_name (gimple_vdef (stmt
));
1910 cfg_changed
|= gimple_purge_dead_eh_edges (bb
);
1913 statistics_counter_event (fun
, "sincos statements inserted",
1914 sincos_stats
.inserted
);
1916 return cfg_changed
? TODO_cleanup_cfg
: 0;
1922 make_pass_cse_sincos (gcc::context
*ctxt
)
1924 return new pass_cse_sincos (ctxt
);
1927 /* A symbolic number is used to detect byte permutation and selection
1928 patterns. Therefore the field N contains an artificial number
1929 consisting of octet sized markers:
1931 0 - target byte has the value 0
1932 FF - target byte has an unknown value (eg. due to sign extension)
1933 1..size - marker value is the target byte index minus one.
1935 To detect permutations on memory sources (arrays and structures), a symbolic
1936 number is also associated a base address (the array or structure the load is
1937 made from), an offset from the base address and a range which gives the
1938 difference between the highest and lowest accessed memory location to make
1939 such a symbolic number. The range is thus different from size which reflects
1940 the size of the type of current expression. Note that for non memory source,
1941 range holds the same value as size.
1943 For instance, for an array char a[], (short) a[0] | (short) a[3] would have
1944 a size of 2 but a range of 4 while (short) a[0] | ((short) a[0] << 1) would
1945 still have a size of 2 but this time a range of 1. */
1947 struct symbolic_number
{
1952 HOST_WIDE_INT bytepos
;
1955 unsigned HOST_WIDE_INT range
;
1958 #define BITS_PER_MARKER 8
1959 #define MARKER_MASK ((1 << BITS_PER_MARKER) - 1)
1960 #define MARKER_BYTE_UNKNOWN MARKER_MASK
1961 #define HEAD_MARKER(n, size) \
1962 ((n) & ((uint64_t) MARKER_MASK << (((size) - 1) * BITS_PER_MARKER)))
1964 /* The number which the find_bswap_or_nop_1 result should match in
1965 order to have a nop. The number is masked according to the size of
1966 the symbolic number before using it. */
1967 #define CMPNOP (sizeof (int64_t) < 8 ? 0 : \
1968 (uint64_t)0x08070605 << 32 | 0x04030201)
1970 /* The number which the find_bswap_or_nop_1 result should match in
1971 order to have a byte swap. The number is masked according to the
1972 size of the symbolic number before using it. */
1973 #define CMPXCHG (sizeof (int64_t) < 8 ? 0 : \
1974 (uint64_t)0x01020304 << 32 | 0x05060708)
1976 /* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic
1977 number N. Return false if the requested operation is not permitted
1978 on a symbolic number. */
1981 do_shift_rotate (enum tree_code code
,
1982 struct symbolic_number
*n
,
1985 int i
, size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
1986 unsigned head_marker
;
1988 if (count
% BITS_PER_UNIT
!= 0)
1990 count
= (count
/ BITS_PER_UNIT
) * BITS_PER_MARKER
;
1992 /* Zero out the extra bits of N in order to avoid them being shifted
1993 into the significant bits. */
1994 if (size
< 64 / BITS_PER_MARKER
)
1995 n
->n
&= ((uint64_t) 1 << (size
* BITS_PER_MARKER
)) - 1;
2003 head_marker
= HEAD_MARKER (n
->n
, size
);
2005 /* Arithmetic shift of signed type: result is dependent on the value. */
2006 if (!TYPE_UNSIGNED (n
->type
) && head_marker
)
2007 for (i
= 0; i
< count
/ BITS_PER_MARKER
; i
++)
2008 n
->n
|= (uint64_t) MARKER_BYTE_UNKNOWN
2009 << ((size
- 1 - i
) * BITS_PER_MARKER
);
2012 n
->n
= (n
->n
<< count
) | (n
->n
>> ((size
* BITS_PER_MARKER
) - count
));
2015 n
->n
= (n
->n
>> count
) | (n
->n
<< ((size
* BITS_PER_MARKER
) - count
));
2020 /* Zero unused bits for size. */
2021 if (size
< 64 / BITS_PER_MARKER
)
2022 n
->n
&= ((uint64_t) 1 << (size
* BITS_PER_MARKER
)) - 1;
2026 /* Perform sanity checking for the symbolic number N and the gimple
2030 verify_symbolic_number_p (struct symbolic_number
*n
, gimple
*stmt
)
2034 lhs_type
= gimple_expr_type (stmt
);
2036 if (TREE_CODE (lhs_type
) != INTEGER_TYPE
)
2039 if (TYPE_PRECISION (lhs_type
) != TYPE_PRECISION (n
->type
))
2045 /* Initialize the symbolic number N for the bswap pass from the base element
2046 SRC manipulated by the bitwise OR expression. */
2049 init_symbolic_number (struct symbolic_number
*n
, tree src
)
2053 n
->base_addr
= n
->offset
= n
->alias_set
= n
->vuse
= NULL_TREE
;
2055 /* Set up the symbolic number N by setting each byte to a value between 1 and
2056 the byte size of rhs1. The highest order byte is set to n->size and the
2057 lowest order byte to 1. */
2058 n
->type
= TREE_TYPE (src
);
2059 size
= TYPE_PRECISION (n
->type
);
2060 if (size
% BITS_PER_UNIT
!= 0)
2062 size
/= BITS_PER_UNIT
;
2063 if (size
> 64 / BITS_PER_MARKER
)
2068 if (size
< 64 / BITS_PER_MARKER
)
2069 n
->n
&= ((uint64_t) 1 << (size
* BITS_PER_MARKER
)) - 1;
2074 /* Check if STMT might be a byte swap or a nop from a memory source and returns
2075 the answer. If so, REF is that memory source and the base of the memory area
2076 accessed and the offset of the access from that base are recorded in N. */
2079 find_bswap_or_nop_load (gimple
*stmt
, tree ref
, struct symbolic_number
*n
)
2081 /* Leaf node is an array or component ref. Memorize its base and
2082 offset from base to compare to other such leaf node. */
2083 HOST_WIDE_INT bitsize
, bitpos
;
2085 int unsignedp
, reversep
, volatilep
;
2086 tree offset
, base_addr
;
2088 /* Not prepared to handle PDP endian. */
2089 if (BYTES_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
2092 if (!gimple_assign_load_p (stmt
) || gimple_has_volatile_ops (stmt
))
2095 base_addr
= get_inner_reference (ref
, &bitsize
, &bitpos
, &offset
, &mode
,
2096 &unsignedp
, &reversep
, &volatilep
, false);
2098 if (TREE_CODE (base_addr
) == MEM_REF
)
2100 offset_int bit_offset
= 0;
2101 tree off
= TREE_OPERAND (base_addr
, 1);
2103 if (!integer_zerop (off
))
2105 offset_int boff
, coff
= mem_ref_offset (base_addr
);
2106 boff
= wi::lshift (coff
, LOG2_BITS_PER_UNIT
);
2110 base_addr
= TREE_OPERAND (base_addr
, 0);
2112 /* Avoid returning a negative bitpos as this may wreak havoc later. */
2113 if (wi::neg_p (bit_offset
))
2115 offset_int mask
= wi::mask
<offset_int
> (LOG2_BITS_PER_UNIT
, false);
2116 offset_int tem
= bit_offset
.and_not (mask
);
2117 /* TEM is the bitpos rounded to BITS_PER_UNIT towards -Inf.
2118 Subtract it to BIT_OFFSET and add it (scaled) to OFFSET. */
2120 tem
= wi::arshift (tem
, LOG2_BITS_PER_UNIT
);
2122 offset
= size_binop (PLUS_EXPR
, offset
,
2123 wide_int_to_tree (sizetype
, tem
));
2125 offset
= wide_int_to_tree (sizetype
, tem
);
2128 bitpos
+= bit_offset
.to_shwi ();
2131 if (bitpos
% BITS_PER_UNIT
)
2133 if (bitsize
% BITS_PER_UNIT
)
2138 if (!init_symbolic_number (n
, ref
))
2140 n
->base_addr
= base_addr
;
2142 n
->bytepos
= bitpos
/ BITS_PER_UNIT
;
2143 n
->alias_set
= reference_alias_ptr_type (ref
);
2144 n
->vuse
= gimple_vuse (stmt
);
2148 /* Compute the symbolic number N representing the result of a bitwise OR on 2
2149 symbolic number N1 and N2 whose source statements are respectively
2150 SOURCE_STMT1 and SOURCE_STMT2. */
2153 perform_symbolic_merge (gimple
*source_stmt1
, struct symbolic_number
*n1
,
2154 gimple
*source_stmt2
, struct symbolic_number
*n2
,
2155 struct symbolic_number
*n
)
2159 gimple
*source_stmt
;
2160 struct symbolic_number
*n_start
;
2162 /* Sources are different, cancel bswap if they are not memory location with
2163 the same base (array, structure, ...). */
2164 if (gimple_assign_rhs1 (source_stmt1
) != gimple_assign_rhs1 (source_stmt2
))
2167 HOST_WIDE_INT start_sub
, end_sub
, end1
, end2
, end
;
2168 struct symbolic_number
*toinc_n_ptr
, *n_end
;
2170 if (!n1
->base_addr
|| !n2
->base_addr
2171 || !operand_equal_p (n1
->base_addr
, n2
->base_addr
, 0))
2174 if (!n1
->offset
!= !n2
->offset
2175 || (n1
->offset
&& !operand_equal_p (n1
->offset
, n2
->offset
, 0)))
2178 if (n1
->bytepos
< n2
->bytepos
)
2181 start_sub
= n2
->bytepos
- n1
->bytepos
;
2182 source_stmt
= source_stmt1
;
2187 start_sub
= n1
->bytepos
- n2
->bytepos
;
2188 source_stmt
= source_stmt2
;
2191 /* Find the highest address at which a load is performed and
2192 compute related info. */
2193 end1
= n1
->bytepos
+ (n1
->range
- 1);
2194 end2
= n2
->bytepos
+ (n2
->range
- 1);
2198 end_sub
= end2
- end1
;
2203 end_sub
= end1
- end2
;
2205 n_end
= (end2
> end1
) ? n2
: n1
;
2207 /* Find symbolic number whose lsb is the most significant. */
2208 if (BYTES_BIG_ENDIAN
)
2209 toinc_n_ptr
= (n_end
== n1
) ? n2
: n1
;
2211 toinc_n_ptr
= (n_start
== n1
) ? n2
: n1
;
2213 n
->range
= end
- n_start
->bytepos
+ 1;
2215 /* Check that the range of memory covered can be represented by
2216 a symbolic number. */
2217 if (n
->range
> 64 / BITS_PER_MARKER
)
2220 /* Reinterpret byte marks in symbolic number holding the value of
2221 bigger weight according to target endianness. */
2222 inc
= BYTES_BIG_ENDIAN
? end_sub
: start_sub
;
2223 size
= TYPE_PRECISION (n1
->type
) / BITS_PER_UNIT
;
2224 for (i
= 0; i
< size
; i
++, inc
<<= BITS_PER_MARKER
)
2227 = (toinc_n_ptr
->n
>> (i
* BITS_PER_MARKER
)) & MARKER_MASK
;
2228 if (marker
&& marker
!= MARKER_BYTE_UNKNOWN
)
2229 toinc_n_ptr
->n
+= inc
;
2234 n
->range
= n1
->range
;
2236 source_stmt
= source_stmt1
;
2240 || alias_ptr_types_compatible_p (n1
->alias_set
, n2
->alias_set
))
2241 n
->alias_set
= n1
->alias_set
;
2243 n
->alias_set
= ptr_type_node
;
2244 n
->vuse
= n_start
->vuse
;
2245 n
->base_addr
= n_start
->base_addr
;
2246 n
->offset
= n_start
->offset
;
2247 n
->bytepos
= n_start
->bytepos
;
2248 n
->type
= n_start
->type
;
2249 size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
2251 for (i
= 0, mask
= MARKER_MASK
; i
< size
; i
++, mask
<<= BITS_PER_MARKER
)
2253 uint64_t masked1
, masked2
;
2255 masked1
= n1
->n
& mask
;
2256 masked2
= n2
->n
& mask
;
2257 if (masked1
&& masked2
&& masked1
!= masked2
)
2260 n
->n
= n1
->n
| n2
->n
;
2265 /* find_bswap_or_nop_1 invokes itself recursively with N and tries to perform
2266 the operation given by the rhs of STMT on the result. If the operation
2267 could successfully be executed the function returns a gimple stmt whose
2268 rhs's first tree is the expression of the source operand and NULL
2272 find_bswap_or_nop_1 (gimple
*stmt
, struct symbolic_number
*n
, int limit
)
2274 enum tree_code code
;
2275 tree rhs1
, rhs2
= NULL
;
2276 gimple
*rhs1_stmt
, *rhs2_stmt
, *source_stmt1
;
2277 enum gimple_rhs_class rhs_class
;
2279 if (!limit
|| !is_gimple_assign (stmt
))
2282 rhs1
= gimple_assign_rhs1 (stmt
);
2284 if (find_bswap_or_nop_load (stmt
, rhs1
, n
))
2287 if (TREE_CODE (rhs1
) != SSA_NAME
)
2290 code
= gimple_assign_rhs_code (stmt
);
2291 rhs_class
= gimple_assign_rhs_class (stmt
);
2292 rhs1_stmt
= SSA_NAME_DEF_STMT (rhs1
);
2294 if (rhs_class
== GIMPLE_BINARY_RHS
)
2295 rhs2
= gimple_assign_rhs2 (stmt
);
2297 /* Handle unary rhs and binary rhs with integer constants as second
2300 if (rhs_class
== GIMPLE_UNARY_RHS
2301 || (rhs_class
== GIMPLE_BINARY_RHS
2302 && TREE_CODE (rhs2
) == INTEGER_CST
))
2304 if (code
!= BIT_AND_EXPR
2305 && code
!= LSHIFT_EXPR
2306 && code
!= RSHIFT_EXPR
2307 && code
!= LROTATE_EXPR
2308 && code
!= RROTATE_EXPR
2309 && !CONVERT_EXPR_CODE_P (code
))
2312 source_stmt1
= find_bswap_or_nop_1 (rhs1_stmt
, n
, limit
- 1);
2314 /* If find_bswap_or_nop_1 returned NULL, STMT is a leaf node and
2315 we have to initialize the symbolic number. */
2318 if (gimple_assign_load_p (stmt
)
2319 || !init_symbolic_number (n
, rhs1
))
2321 source_stmt1
= stmt
;
2328 int i
, size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
2329 uint64_t val
= int_cst_value (rhs2
), mask
= 0;
2330 uint64_t tmp
= (1 << BITS_PER_UNIT
) - 1;
2332 /* Only constants masking full bytes are allowed. */
2333 for (i
= 0; i
< size
; i
++, tmp
<<= BITS_PER_UNIT
)
2334 if ((val
& tmp
) != 0 && (val
& tmp
) != tmp
)
2337 mask
|= (uint64_t) MARKER_MASK
<< (i
* BITS_PER_MARKER
);
2346 if (!do_shift_rotate (code
, n
, (int) TREE_INT_CST_LOW (rhs2
)))
2351 int i
, type_size
, old_type_size
;
2354 type
= gimple_expr_type (stmt
);
2355 type_size
= TYPE_PRECISION (type
);
2356 if (type_size
% BITS_PER_UNIT
!= 0)
2358 type_size
/= BITS_PER_UNIT
;
2359 if (type_size
> 64 / BITS_PER_MARKER
)
2362 /* Sign extension: result is dependent on the value. */
2363 old_type_size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
2364 if (!TYPE_UNSIGNED (n
->type
) && type_size
> old_type_size
2365 && HEAD_MARKER (n
->n
, old_type_size
))
2366 for (i
= 0; i
< type_size
- old_type_size
; i
++)
2367 n
->n
|= (uint64_t) MARKER_BYTE_UNKNOWN
2368 << ((type_size
- 1 - i
) * BITS_PER_MARKER
);
2370 if (type_size
< 64 / BITS_PER_MARKER
)
2372 /* If STMT casts to a smaller type mask out the bits not
2373 belonging to the target type. */
2374 n
->n
&= ((uint64_t) 1 << (type_size
* BITS_PER_MARKER
)) - 1;
2378 n
->range
= type_size
;
2384 return verify_symbolic_number_p (n
, stmt
) ? source_stmt1
: NULL
;
2387 /* Handle binary rhs. */
2389 if (rhs_class
== GIMPLE_BINARY_RHS
)
2391 struct symbolic_number n1
, n2
;
2392 gimple
*source_stmt
, *source_stmt2
;
2394 if (code
!= BIT_IOR_EXPR
)
2397 if (TREE_CODE (rhs2
) != SSA_NAME
)
2400 rhs2_stmt
= SSA_NAME_DEF_STMT (rhs2
);
2405 source_stmt1
= find_bswap_or_nop_1 (rhs1_stmt
, &n1
, limit
- 1);
2410 source_stmt2
= find_bswap_or_nop_1 (rhs2_stmt
, &n2
, limit
- 1);
2415 if (TYPE_PRECISION (n1
.type
) != TYPE_PRECISION (n2
.type
))
2418 if (!n1
.vuse
!= !n2
.vuse
2419 || (n1
.vuse
&& !operand_equal_p (n1
.vuse
, n2
.vuse
, 0)))
2423 = perform_symbolic_merge (source_stmt1
, &n1
, source_stmt2
, &n2
, n
);
2428 if (!verify_symbolic_number_p (n
, stmt
))
2440 /* Check if STMT completes a bswap implementation or a read in a given
2441 endianness consisting of ORs, SHIFTs and ANDs and sets *BSWAP
2442 accordingly. It also sets N to represent the kind of operations
2443 performed: size of the resulting expression and whether it works on
2444 a memory source, and if so alias-set and vuse. At last, the
2445 function returns a stmt whose rhs's first tree is the source
2449 find_bswap_or_nop (gimple
*stmt
, struct symbolic_number
*n
, bool *bswap
)
2451 /* The number which the find_bswap_or_nop_1 result should match in order
2452 to have a full byte swap. The number is shifted to the right
2453 according to the size of the symbolic number before using it. */
2454 uint64_t cmpxchg
= CMPXCHG
;
2455 uint64_t cmpnop
= CMPNOP
;
2457 gimple
*source_stmt
;
2460 /* The last parameter determines the depth search limit. It usually
2461 correlates directly to the number n of bytes to be touched. We
2462 increase that number by log2(n) + 1 here in order to also
2463 cover signed -> unsigned conversions of the src operand as can be seen
2464 in libgcc, and for initial shift/and operation of the src operand. */
2465 limit
= TREE_INT_CST_LOW (TYPE_SIZE_UNIT (gimple_expr_type (stmt
)));
2466 limit
+= 1 + (int) ceil_log2 ((unsigned HOST_WIDE_INT
) limit
);
2467 source_stmt
= find_bswap_or_nop_1 (stmt
, n
, limit
);
2472 /* Find real size of result (highest non-zero byte). */
2478 for (tmpn
= n
->n
, rsize
= 0; tmpn
; tmpn
>>= BITS_PER_MARKER
, rsize
++);
2482 /* Zero out the extra bits of N and CMP*. */
2483 if (n
->range
< (int) sizeof (int64_t))
2487 mask
= ((uint64_t) 1 << (n
->range
* BITS_PER_MARKER
)) - 1;
2488 cmpxchg
>>= (64 / BITS_PER_MARKER
- n
->range
) * BITS_PER_MARKER
;
2492 /* A complete byte swap should make the symbolic number to start with
2493 the largest digit in the highest order byte. Unchanged symbolic
2494 number indicates a read with same endianness as target architecture. */
2497 else if (n
->n
== cmpxchg
)
2502 /* Useless bit manipulation performed by code. */
2503 if (!n
->base_addr
&& n
->n
== cmpnop
)
2506 n
->range
*= BITS_PER_UNIT
;
2512 const pass_data pass_data_optimize_bswap
=
2514 GIMPLE_PASS
, /* type */
2516 OPTGROUP_NONE
, /* optinfo_flags */
2517 TV_NONE
, /* tv_id */
2518 PROP_ssa
, /* properties_required */
2519 0, /* properties_provided */
2520 0, /* properties_destroyed */
2521 0, /* todo_flags_start */
2522 0, /* todo_flags_finish */
2525 class pass_optimize_bswap
: public gimple_opt_pass
2528 pass_optimize_bswap (gcc::context
*ctxt
)
2529 : gimple_opt_pass (pass_data_optimize_bswap
, ctxt
)
2532 /* opt_pass methods: */
2533 virtual bool gate (function
*)
2535 return flag_expensive_optimizations
&& optimize
;
2538 virtual unsigned int execute (function
*);
2540 }; // class pass_optimize_bswap
2542 /* Perform the bswap optimization: replace the expression computed in the rhs
2543 of CUR_STMT by an equivalent bswap, load or load + bswap expression.
2544 Which of these alternatives replace the rhs is given by N->base_addr (non
2545 null if a load is needed) and BSWAP. The type, VUSE and set-alias of the
2546 load to perform are also given in N while the builtin bswap invoke is given
2547 in FNDEL. Finally, if a load is involved, SRC_STMT refers to one of the
2548 load statements involved to construct the rhs in CUR_STMT and N->range gives
2549 the size of the rhs expression for maintaining some statistics.
2551 Note that if the replacement involve a load, CUR_STMT is moved just after
2552 SRC_STMT to do the load with the same VUSE which can lead to CUR_STMT
2553 changing of basic block. */
2556 bswap_replace (gimple
*cur_stmt
, gimple
*src_stmt
, tree fndecl
,
2557 tree bswap_type
, tree load_type
, struct symbolic_number
*n
,
2560 gimple_stmt_iterator gsi
;
2564 gsi
= gsi_for_stmt (cur_stmt
);
2565 src
= gimple_assign_rhs1 (src_stmt
);
2566 tgt
= gimple_assign_lhs (cur_stmt
);
2568 /* Need to load the value from memory first. */
2571 gimple_stmt_iterator gsi_ins
= gsi_for_stmt (src_stmt
);
2572 tree addr_expr
, addr_tmp
, val_expr
, val_tmp
;
2573 tree load_offset_ptr
, aligned_load_type
;
2574 gimple
*addr_stmt
, *load_stmt
;
2576 HOST_WIDE_INT load_offset
= 0;
2578 align
= get_object_alignment (src
);
2579 /* If the new access is smaller than the original one, we need
2580 to perform big endian adjustment. */
2581 if (BYTES_BIG_ENDIAN
)
2583 HOST_WIDE_INT bitsize
, bitpos
;
2585 int unsignedp
, reversep
, volatilep
;
2588 get_inner_reference (src
, &bitsize
, &bitpos
, &offset
, &mode
,
2589 &unsignedp
, &reversep
, &volatilep
, false);
2590 if (n
->range
< (unsigned HOST_WIDE_INT
) bitsize
)
2592 load_offset
= (bitsize
- n
->range
) / BITS_PER_UNIT
;
2593 unsigned HOST_WIDE_INT l
2594 = (load_offset
* BITS_PER_UNIT
) & (align
- 1);
2601 && align
< GET_MODE_ALIGNMENT (TYPE_MODE (load_type
))
2602 && SLOW_UNALIGNED_ACCESS (TYPE_MODE (load_type
), align
))
2605 /* Move cur_stmt just before one of the load of the original
2606 to ensure it has the same VUSE. See PR61517 for what could
2608 gsi_move_before (&gsi
, &gsi_ins
);
2609 gsi
= gsi_for_stmt (cur_stmt
);
2611 /* Compute address to load from and cast according to the size
2613 addr_expr
= build_fold_addr_expr (unshare_expr (src
));
2614 if (is_gimple_mem_ref_addr (addr_expr
))
2615 addr_tmp
= addr_expr
;
2618 addr_tmp
= make_temp_ssa_name (TREE_TYPE (addr_expr
), NULL
,
2620 addr_stmt
= gimple_build_assign (addr_tmp
, addr_expr
);
2621 gsi_insert_before (&gsi
, addr_stmt
, GSI_SAME_STMT
);
2624 /* Perform the load. */
2625 aligned_load_type
= load_type
;
2626 if (align
< TYPE_ALIGN (load_type
))
2627 aligned_load_type
= build_aligned_type (load_type
, align
);
2628 load_offset_ptr
= build_int_cst (n
->alias_set
, load_offset
);
2629 val_expr
= fold_build2 (MEM_REF
, aligned_load_type
, addr_tmp
,
2635 nop_stats
.found_16bit
++;
2636 else if (n
->range
== 32)
2637 nop_stats
.found_32bit
++;
2640 gcc_assert (n
->range
== 64);
2641 nop_stats
.found_64bit
++;
2644 /* Convert the result of load if necessary. */
2645 if (!useless_type_conversion_p (TREE_TYPE (tgt
), load_type
))
2647 val_tmp
= make_temp_ssa_name (aligned_load_type
, NULL
,
2649 load_stmt
= gimple_build_assign (val_tmp
, val_expr
);
2650 gimple_set_vuse (load_stmt
, n
->vuse
);
2651 gsi_insert_before (&gsi
, load_stmt
, GSI_SAME_STMT
);
2652 gimple_assign_set_rhs_with_ops (&gsi
, NOP_EXPR
, val_tmp
);
2656 gimple_assign_set_rhs_with_ops (&gsi
, MEM_REF
, val_expr
);
2657 gimple_set_vuse (cur_stmt
, n
->vuse
);
2659 update_stmt (cur_stmt
);
2664 "%d bit load in target endianness found at: ",
2666 print_gimple_stmt (dump_file
, cur_stmt
, 0, 0);
2672 val_tmp
= make_temp_ssa_name (aligned_load_type
, NULL
, "load_dst");
2673 load_stmt
= gimple_build_assign (val_tmp
, val_expr
);
2674 gimple_set_vuse (load_stmt
, n
->vuse
);
2675 gsi_insert_before (&gsi
, load_stmt
, GSI_SAME_STMT
);
2681 bswap_stats
.found_16bit
++;
2682 else if (n
->range
== 32)
2683 bswap_stats
.found_32bit
++;
2686 gcc_assert (n
->range
== 64);
2687 bswap_stats
.found_64bit
++;
2692 /* Convert the src expression if necessary. */
2693 if (!useless_type_conversion_p (TREE_TYPE (tmp
), bswap_type
))
2695 gimple
*convert_stmt
;
2697 tmp
= make_temp_ssa_name (bswap_type
, NULL
, "bswapsrc");
2698 convert_stmt
= gimple_build_assign (tmp
, NOP_EXPR
, src
);
2699 gsi_insert_before (&gsi
, convert_stmt
, GSI_SAME_STMT
);
2702 /* Canonical form for 16 bit bswap is a rotate expression. Only 16bit values
2703 are considered as rotation of 2N bit values by N bits is generally not
2704 equivalent to a bswap. Consider for instance 0x01020304 r>> 16 which
2705 gives 0x03040102 while a bswap for that value is 0x04030201. */
2706 if (bswap
&& n
->range
== 16)
2708 tree count
= build_int_cst (NULL
, BITS_PER_UNIT
);
2709 src
= fold_build2 (LROTATE_EXPR
, bswap_type
, tmp
, count
);
2710 bswap_stmt
= gimple_build_assign (NULL
, src
);
2713 bswap_stmt
= gimple_build_call (fndecl
, 1, tmp
);
2717 /* Convert the result if necessary. */
2718 if (!useless_type_conversion_p (TREE_TYPE (tgt
), bswap_type
))
2720 gimple
*convert_stmt
;
2722 tmp
= make_temp_ssa_name (bswap_type
, NULL
, "bswapdst");
2723 convert_stmt
= gimple_build_assign (tgt
, NOP_EXPR
, tmp
);
2724 gsi_insert_after (&gsi
, convert_stmt
, GSI_SAME_STMT
);
2727 gimple_set_lhs (bswap_stmt
, tmp
);
2731 fprintf (dump_file
, "%d bit bswap implementation found at: ",
2733 print_gimple_stmt (dump_file
, cur_stmt
, 0, 0);
2736 gsi_insert_after (&gsi
, bswap_stmt
, GSI_SAME_STMT
);
2737 gsi_remove (&gsi
, true);
2741 /* Find manual byte swap implementations as well as load in a given
2742 endianness. Byte swaps are turned into a bswap builtin invokation
2743 while endian loads are converted to bswap builtin invokation or
2744 simple load according to the target endianness. */
2747 pass_optimize_bswap::execute (function
*fun
)
2750 bool bswap32_p
, bswap64_p
;
2751 bool changed
= false;
2752 tree bswap32_type
= NULL_TREE
, bswap64_type
= NULL_TREE
;
2754 if (BITS_PER_UNIT
!= 8)
2757 bswap32_p
= (builtin_decl_explicit_p (BUILT_IN_BSWAP32
)
2758 && optab_handler (bswap_optab
, SImode
) != CODE_FOR_nothing
);
2759 bswap64_p
= (builtin_decl_explicit_p (BUILT_IN_BSWAP64
)
2760 && (optab_handler (bswap_optab
, DImode
) != CODE_FOR_nothing
2761 || (bswap32_p
&& word_mode
== SImode
)));
2763 /* Determine the argument type of the builtins. The code later on
2764 assumes that the return and argument type are the same. */
2767 tree fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP32
);
2768 bswap32_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
2773 tree fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP64
);
2774 bswap64_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
2777 memset (&nop_stats
, 0, sizeof (nop_stats
));
2778 memset (&bswap_stats
, 0, sizeof (bswap_stats
));
2780 FOR_EACH_BB_FN (bb
, fun
)
2782 gimple_stmt_iterator gsi
;
2784 /* We do a reverse scan for bswap patterns to make sure we get the
2785 widest match. As bswap pattern matching doesn't handle previously
2786 inserted smaller bswap replacements as sub-patterns, the wider
2787 variant wouldn't be detected. */
2788 for (gsi
= gsi_last_bb (bb
); !gsi_end_p (gsi
);)
2790 gimple
*src_stmt
, *cur_stmt
= gsi_stmt (gsi
);
2791 tree fndecl
= NULL_TREE
, bswap_type
= NULL_TREE
, load_type
;
2792 enum tree_code code
;
2793 struct symbolic_number n
;
2796 /* This gsi_prev (&gsi) is not part of the for loop because cur_stmt
2797 might be moved to a different basic block by bswap_replace and gsi
2798 must not points to it if that's the case. Moving the gsi_prev
2799 there make sure that gsi points to the statement previous to
2800 cur_stmt while still making sure that all statements are
2801 considered in this basic block. */
2804 if (!is_gimple_assign (cur_stmt
))
2807 code
= gimple_assign_rhs_code (cur_stmt
);
2812 if (!tree_fits_uhwi_p (gimple_assign_rhs2 (cur_stmt
))
2813 || tree_to_uhwi (gimple_assign_rhs2 (cur_stmt
))
2823 src_stmt
= find_bswap_or_nop (cur_stmt
, &n
, &bswap
);
2831 /* Already in canonical form, nothing to do. */
2832 if (code
== LROTATE_EXPR
|| code
== RROTATE_EXPR
)
2834 load_type
= bswap_type
= uint16_type_node
;
2837 load_type
= uint32_type_node
;
2840 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP32
);
2841 bswap_type
= bswap32_type
;
2845 load_type
= uint64_type_node
;
2848 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP64
);
2849 bswap_type
= bswap64_type
;
2856 if (bswap
&& !fndecl
&& n
.range
!= 16)
2859 if (bswap_replace (cur_stmt
, src_stmt
, fndecl
, bswap_type
, load_type
,
2865 statistics_counter_event (fun
, "16-bit nop implementations found",
2866 nop_stats
.found_16bit
);
2867 statistics_counter_event (fun
, "32-bit nop implementations found",
2868 nop_stats
.found_32bit
);
2869 statistics_counter_event (fun
, "64-bit nop implementations found",
2870 nop_stats
.found_64bit
);
2871 statistics_counter_event (fun
, "16-bit bswap implementations found",
2872 bswap_stats
.found_16bit
);
2873 statistics_counter_event (fun
, "32-bit bswap implementations found",
2874 bswap_stats
.found_32bit
);
2875 statistics_counter_event (fun
, "64-bit bswap implementations found",
2876 bswap_stats
.found_64bit
);
2878 return (changed
? TODO_update_ssa
: 0);
2884 make_pass_optimize_bswap (gcc::context
*ctxt
)
2886 return new pass_optimize_bswap (ctxt
);
2889 /* Return true if stmt is a type conversion operation that can be stripped
2890 when used in a widening multiply operation. */
2892 widening_mult_conversion_strippable_p (tree result_type
, gimple
*stmt
)
2894 enum tree_code rhs_code
= gimple_assign_rhs_code (stmt
);
2896 if (TREE_CODE (result_type
) == INTEGER_TYPE
)
2901 if (!CONVERT_EXPR_CODE_P (rhs_code
))
2904 op_type
= TREE_TYPE (gimple_assign_lhs (stmt
));
2906 /* If the type of OP has the same precision as the result, then
2907 we can strip this conversion. The multiply operation will be
2908 selected to create the correct extension as a by-product. */
2909 if (TYPE_PRECISION (result_type
) == TYPE_PRECISION (op_type
))
2912 /* We can also strip a conversion if it preserves the signed-ness of
2913 the operation and doesn't narrow the range. */
2914 inner_op_type
= TREE_TYPE (gimple_assign_rhs1 (stmt
));
2916 /* If the inner-most type is unsigned, then we can strip any
2917 intermediate widening operation. If it's signed, then the
2918 intermediate widening operation must also be signed. */
2919 if ((TYPE_UNSIGNED (inner_op_type
)
2920 || TYPE_UNSIGNED (op_type
) == TYPE_UNSIGNED (inner_op_type
))
2921 && TYPE_PRECISION (op_type
) > TYPE_PRECISION (inner_op_type
))
2927 return rhs_code
== FIXED_CONVERT_EXPR
;
2930 /* Return true if RHS is a suitable operand for a widening multiplication,
2931 assuming a target type of TYPE.
2932 There are two cases:
2934 - RHS makes some value at least twice as wide. Store that value
2935 in *NEW_RHS_OUT if so, and store its type in *TYPE_OUT.
2937 - RHS is an integer constant. Store that value in *NEW_RHS_OUT if so,
2938 but leave *TYPE_OUT untouched. */
2941 is_widening_mult_rhs_p (tree type
, tree rhs
, tree
*type_out
,
2947 if (TREE_CODE (rhs
) == SSA_NAME
)
2949 stmt
= SSA_NAME_DEF_STMT (rhs
);
2950 if (is_gimple_assign (stmt
))
2952 if (! widening_mult_conversion_strippable_p (type
, stmt
))
2956 rhs1
= gimple_assign_rhs1 (stmt
);
2958 if (TREE_CODE (rhs1
) == INTEGER_CST
)
2960 *new_rhs_out
= rhs1
;
2969 type1
= TREE_TYPE (rhs1
);
2971 if (TREE_CODE (type1
) != TREE_CODE (type
)
2972 || TYPE_PRECISION (type1
) * 2 > TYPE_PRECISION (type
))
2975 *new_rhs_out
= rhs1
;
2980 if (TREE_CODE (rhs
) == INTEGER_CST
)
2990 /* Return true if STMT performs a widening multiplication, assuming the
2991 output type is TYPE. If so, store the unwidened types of the operands
2992 in *TYPE1_OUT and *TYPE2_OUT respectively. Also fill *RHS1_OUT and
2993 *RHS2_OUT such that converting those operands to types *TYPE1_OUT
2994 and *TYPE2_OUT would give the operands of the multiplication. */
2997 is_widening_mult_p (gimple
*stmt
,
2998 tree
*type1_out
, tree
*rhs1_out
,
2999 tree
*type2_out
, tree
*rhs2_out
)
3001 tree type
= TREE_TYPE (gimple_assign_lhs (stmt
));
3003 if (TREE_CODE (type
) != INTEGER_TYPE
3004 && TREE_CODE (type
) != FIXED_POINT_TYPE
)
3007 if (!is_widening_mult_rhs_p (type
, gimple_assign_rhs1 (stmt
), type1_out
,
3011 if (!is_widening_mult_rhs_p (type
, gimple_assign_rhs2 (stmt
), type2_out
,
3015 if (*type1_out
== NULL
)
3017 if (*type2_out
== NULL
|| !int_fits_type_p (*rhs1_out
, *type2_out
))
3019 *type1_out
= *type2_out
;
3022 if (*type2_out
== NULL
)
3024 if (!int_fits_type_p (*rhs2_out
, *type1_out
))
3026 *type2_out
= *type1_out
;
3029 /* Ensure that the larger of the two operands comes first. */
3030 if (TYPE_PRECISION (*type1_out
) < TYPE_PRECISION (*type2_out
))
3032 std::swap (*type1_out
, *type2_out
);
3033 std::swap (*rhs1_out
, *rhs2_out
);
3039 /* Process a single gimple statement STMT, which has a MULT_EXPR as
3040 its rhs, and try to convert it into a WIDEN_MULT_EXPR. The return
3041 value is true iff we converted the statement. */
3044 convert_mult_to_widen (gimple
*stmt
, gimple_stmt_iterator
*gsi
)
3046 tree lhs
, rhs1
, rhs2
, type
, type1
, type2
;
3047 enum insn_code handler
;
3048 machine_mode to_mode
, from_mode
, actual_mode
;
3050 int actual_precision
;
3051 location_t loc
= gimple_location (stmt
);
3052 bool from_unsigned1
, from_unsigned2
;
3054 lhs
= gimple_assign_lhs (stmt
);
3055 type
= TREE_TYPE (lhs
);
3056 if (TREE_CODE (type
) != INTEGER_TYPE
)
3059 if (!is_widening_mult_p (stmt
, &type1
, &rhs1
, &type2
, &rhs2
))
3062 to_mode
= TYPE_MODE (type
);
3063 from_mode
= TYPE_MODE (type1
);
3064 from_unsigned1
= TYPE_UNSIGNED (type1
);
3065 from_unsigned2
= TYPE_UNSIGNED (type2
);
3067 if (from_unsigned1
&& from_unsigned2
)
3068 op
= umul_widen_optab
;
3069 else if (!from_unsigned1
&& !from_unsigned2
)
3070 op
= smul_widen_optab
;
3072 op
= usmul_widen_optab
;
3074 handler
= find_widening_optab_handler_and_mode (op
, to_mode
, from_mode
,
3077 if (handler
== CODE_FOR_nothing
)
3079 if (op
!= smul_widen_optab
)
3081 /* We can use a signed multiply with unsigned types as long as
3082 there is a wider mode to use, or it is the smaller of the two
3083 types that is unsigned. Note that type1 >= type2, always. */
3084 if ((TYPE_UNSIGNED (type1
)
3085 && TYPE_PRECISION (type1
) == GET_MODE_PRECISION (from_mode
))
3086 || (TYPE_UNSIGNED (type2
)
3087 && TYPE_PRECISION (type2
) == GET_MODE_PRECISION (from_mode
)))
3089 from_mode
= GET_MODE_WIDER_MODE (from_mode
);
3090 if (GET_MODE_SIZE (to_mode
) <= GET_MODE_SIZE (from_mode
))
3094 op
= smul_widen_optab
;
3095 handler
= find_widening_optab_handler_and_mode (op
, to_mode
,
3099 if (handler
== CODE_FOR_nothing
)
3102 from_unsigned1
= from_unsigned2
= false;
3108 /* Ensure that the inputs to the handler are in the correct precison
3109 for the opcode. This will be the full mode size. */
3110 actual_precision
= GET_MODE_PRECISION (actual_mode
);
3111 if (2 * actual_precision
> TYPE_PRECISION (type
))
3113 if (actual_precision
!= TYPE_PRECISION (type1
)
3114 || from_unsigned1
!= TYPE_UNSIGNED (type1
))
3115 rhs1
= build_and_insert_cast (gsi
, loc
,
3116 build_nonstandard_integer_type
3117 (actual_precision
, from_unsigned1
), rhs1
);
3118 if (actual_precision
!= TYPE_PRECISION (type2
)
3119 || from_unsigned2
!= TYPE_UNSIGNED (type2
))
3120 rhs2
= build_and_insert_cast (gsi
, loc
,
3121 build_nonstandard_integer_type
3122 (actual_precision
, from_unsigned2
), rhs2
);
3124 /* Handle constants. */
3125 if (TREE_CODE (rhs1
) == INTEGER_CST
)
3126 rhs1
= fold_convert (type1
, rhs1
);
3127 if (TREE_CODE (rhs2
) == INTEGER_CST
)
3128 rhs2
= fold_convert (type2
, rhs2
);
3130 gimple_assign_set_rhs1 (stmt
, rhs1
);
3131 gimple_assign_set_rhs2 (stmt
, rhs2
);
3132 gimple_assign_set_rhs_code (stmt
, WIDEN_MULT_EXPR
);
3134 widen_mul_stats
.widen_mults_inserted
++;
3138 /* Process a single gimple statement STMT, which is found at the
3139 iterator GSI and has a either a PLUS_EXPR or a MINUS_EXPR as its
3140 rhs (given by CODE), and try to convert it into a
3141 WIDEN_MULT_PLUS_EXPR or a WIDEN_MULT_MINUS_EXPR. The return value
3142 is true iff we converted the statement. */
3145 convert_plusminus_to_widen (gimple_stmt_iterator
*gsi
, gimple
*stmt
,
3146 enum tree_code code
)
3148 gimple
*rhs1_stmt
= NULL
, *rhs2_stmt
= NULL
;
3149 gimple
*conv1_stmt
= NULL
, *conv2_stmt
= NULL
, *conv_stmt
;
3150 tree type
, type1
, type2
, optype
;
3151 tree lhs
, rhs1
, rhs2
, mult_rhs1
, mult_rhs2
, add_rhs
;
3152 enum tree_code rhs1_code
= ERROR_MARK
, rhs2_code
= ERROR_MARK
;
3154 enum tree_code wmult_code
;
3155 enum insn_code handler
;
3156 machine_mode to_mode
, from_mode
, actual_mode
;
3157 location_t loc
= gimple_location (stmt
);
3158 int actual_precision
;
3159 bool from_unsigned1
, from_unsigned2
;
3161 lhs
= gimple_assign_lhs (stmt
);
3162 type
= TREE_TYPE (lhs
);
3163 if (TREE_CODE (type
) != INTEGER_TYPE
3164 && TREE_CODE (type
) != FIXED_POINT_TYPE
)
3167 if (code
== MINUS_EXPR
)
3168 wmult_code
= WIDEN_MULT_MINUS_EXPR
;
3170 wmult_code
= WIDEN_MULT_PLUS_EXPR
;
3172 rhs1
= gimple_assign_rhs1 (stmt
);
3173 rhs2
= gimple_assign_rhs2 (stmt
);
3175 if (TREE_CODE (rhs1
) == SSA_NAME
)
3177 rhs1_stmt
= SSA_NAME_DEF_STMT (rhs1
);
3178 if (is_gimple_assign (rhs1_stmt
))
3179 rhs1_code
= gimple_assign_rhs_code (rhs1_stmt
);
3182 if (TREE_CODE (rhs2
) == SSA_NAME
)
3184 rhs2_stmt
= SSA_NAME_DEF_STMT (rhs2
);
3185 if (is_gimple_assign (rhs2_stmt
))
3186 rhs2_code
= gimple_assign_rhs_code (rhs2_stmt
);
3189 /* Allow for one conversion statement between the multiply
3190 and addition/subtraction statement. If there are more than
3191 one conversions then we assume they would invalidate this
3192 transformation. If that's not the case then they should have
3193 been folded before now. */
3194 if (CONVERT_EXPR_CODE_P (rhs1_code
))
3196 conv1_stmt
= rhs1_stmt
;
3197 rhs1
= gimple_assign_rhs1 (rhs1_stmt
);
3198 if (TREE_CODE (rhs1
) == SSA_NAME
)
3200 rhs1_stmt
= SSA_NAME_DEF_STMT (rhs1
);
3201 if (is_gimple_assign (rhs1_stmt
))
3202 rhs1_code
= gimple_assign_rhs_code (rhs1_stmt
);
3207 if (CONVERT_EXPR_CODE_P (rhs2_code
))
3209 conv2_stmt
= rhs2_stmt
;
3210 rhs2
= gimple_assign_rhs1 (rhs2_stmt
);
3211 if (TREE_CODE (rhs2
) == SSA_NAME
)
3213 rhs2_stmt
= SSA_NAME_DEF_STMT (rhs2
);
3214 if (is_gimple_assign (rhs2_stmt
))
3215 rhs2_code
= gimple_assign_rhs_code (rhs2_stmt
);
3221 /* If code is WIDEN_MULT_EXPR then it would seem unnecessary to call
3222 is_widening_mult_p, but we still need the rhs returns.
3224 It might also appear that it would be sufficient to use the existing
3225 operands of the widening multiply, but that would limit the choice of
3226 multiply-and-accumulate instructions.
3228 If the widened-multiplication result has more than one uses, it is
3229 probably wiser not to do the conversion. */
3230 if (code
== PLUS_EXPR
3231 && (rhs1_code
== MULT_EXPR
|| rhs1_code
== WIDEN_MULT_EXPR
))
3233 if (!has_single_use (rhs1
)
3234 || !is_widening_mult_p (rhs1_stmt
, &type1
, &mult_rhs1
,
3235 &type2
, &mult_rhs2
))
3238 conv_stmt
= conv1_stmt
;
3240 else if (rhs2_code
== MULT_EXPR
|| rhs2_code
== WIDEN_MULT_EXPR
)
3242 if (!has_single_use (rhs2
)
3243 || !is_widening_mult_p (rhs2_stmt
, &type1
, &mult_rhs1
,
3244 &type2
, &mult_rhs2
))
3247 conv_stmt
= conv2_stmt
;
3252 to_mode
= TYPE_MODE (type
);
3253 from_mode
= TYPE_MODE (type1
);
3254 from_unsigned1
= TYPE_UNSIGNED (type1
);
3255 from_unsigned2
= TYPE_UNSIGNED (type2
);
3258 /* There's no such thing as a mixed sign madd yet, so use a wider mode. */
3259 if (from_unsigned1
!= from_unsigned2
)
3261 if (!INTEGRAL_TYPE_P (type
))
3263 /* We can use a signed multiply with unsigned types as long as
3264 there is a wider mode to use, or it is the smaller of the two
3265 types that is unsigned. Note that type1 >= type2, always. */
3267 && TYPE_PRECISION (type1
) == GET_MODE_PRECISION (from_mode
))
3269 && TYPE_PRECISION (type2
) == GET_MODE_PRECISION (from_mode
)))
3271 from_mode
= GET_MODE_WIDER_MODE (from_mode
);
3272 if (GET_MODE_SIZE (from_mode
) >= GET_MODE_SIZE (to_mode
))
3276 from_unsigned1
= from_unsigned2
= false;
3277 optype
= build_nonstandard_integer_type (GET_MODE_PRECISION (from_mode
),
3281 /* If there was a conversion between the multiply and addition
3282 then we need to make sure it fits a multiply-and-accumulate.
3283 The should be a single mode change which does not change the
3287 /* We use the original, unmodified data types for this. */
3288 tree from_type
= TREE_TYPE (gimple_assign_rhs1 (conv_stmt
));
3289 tree to_type
= TREE_TYPE (gimple_assign_lhs (conv_stmt
));
3290 int data_size
= TYPE_PRECISION (type1
) + TYPE_PRECISION (type2
);
3291 bool is_unsigned
= TYPE_UNSIGNED (type1
) && TYPE_UNSIGNED (type2
);
3293 if (TYPE_PRECISION (from_type
) > TYPE_PRECISION (to_type
))
3295 /* Conversion is a truncate. */
3296 if (TYPE_PRECISION (to_type
) < data_size
)
3299 else if (TYPE_PRECISION (from_type
) < TYPE_PRECISION (to_type
))
3301 /* Conversion is an extend. Check it's the right sort. */
3302 if (TYPE_UNSIGNED (from_type
) != is_unsigned
3303 && !(is_unsigned
&& TYPE_PRECISION (from_type
) > data_size
))
3306 /* else convert is a no-op for our purposes. */
3309 /* Verify that the machine can perform a widening multiply
3310 accumulate in this mode/signedness combination, otherwise
3311 this transformation is likely to pessimize code. */
3312 this_optab
= optab_for_tree_code (wmult_code
, optype
, optab_default
);
3313 handler
= find_widening_optab_handler_and_mode (this_optab
, to_mode
,
3314 from_mode
, 0, &actual_mode
);
3316 if (handler
== CODE_FOR_nothing
)
3319 /* Ensure that the inputs to the handler are in the correct precison
3320 for the opcode. This will be the full mode size. */
3321 actual_precision
= GET_MODE_PRECISION (actual_mode
);
3322 if (actual_precision
!= TYPE_PRECISION (type1
)
3323 || from_unsigned1
!= TYPE_UNSIGNED (type1
))
3324 mult_rhs1
= build_and_insert_cast (gsi
, loc
,
3325 build_nonstandard_integer_type
3326 (actual_precision
, from_unsigned1
),
3328 if (actual_precision
!= TYPE_PRECISION (type2
)
3329 || from_unsigned2
!= TYPE_UNSIGNED (type2
))
3330 mult_rhs2
= build_and_insert_cast (gsi
, loc
,
3331 build_nonstandard_integer_type
3332 (actual_precision
, from_unsigned2
),
3335 if (!useless_type_conversion_p (type
, TREE_TYPE (add_rhs
)))
3336 add_rhs
= build_and_insert_cast (gsi
, loc
, type
, add_rhs
);
3338 /* Handle constants. */
3339 if (TREE_CODE (mult_rhs1
) == INTEGER_CST
)
3340 mult_rhs1
= fold_convert (type1
, mult_rhs1
);
3341 if (TREE_CODE (mult_rhs2
) == INTEGER_CST
)
3342 mult_rhs2
= fold_convert (type2
, mult_rhs2
);
3344 gimple_assign_set_rhs_with_ops (gsi
, wmult_code
, mult_rhs1
, mult_rhs2
,
3346 update_stmt (gsi_stmt (*gsi
));
3347 widen_mul_stats
.maccs_inserted
++;
3351 /* Combine the multiplication at MUL_STMT with operands MULOP1 and MULOP2
3352 with uses in additions and subtractions to form fused multiply-add
3353 operations. Returns true if successful and MUL_STMT should be removed. */
3356 convert_mult_to_fma (gimple
*mul_stmt
, tree op1
, tree op2
)
3358 tree mul_result
= gimple_get_lhs (mul_stmt
);
3359 tree type
= TREE_TYPE (mul_result
);
3360 gimple
*use_stmt
, *neguse_stmt
;
3362 use_operand_p use_p
;
3363 imm_use_iterator imm_iter
;
3365 if (FLOAT_TYPE_P (type
)
3366 && flag_fp_contract_mode
== FP_CONTRACT_OFF
)
3369 /* We don't want to do bitfield reduction ops. */
3370 if (INTEGRAL_TYPE_P (type
)
3371 && (TYPE_PRECISION (type
)
3372 != GET_MODE_PRECISION (TYPE_MODE (type
))))
3375 /* If the target doesn't support it, don't generate it. We assume that
3376 if fma isn't available then fms, fnma or fnms are not either. */
3377 if (optab_handler (fma_optab
, TYPE_MODE (type
)) == CODE_FOR_nothing
)
3380 /* If the multiplication has zero uses, it is kept around probably because
3381 of -fnon-call-exceptions. Don't optimize it away in that case,
3383 if (has_zero_uses (mul_result
))
3386 /* Make sure that the multiplication statement becomes dead after
3387 the transformation, thus that all uses are transformed to FMAs.
3388 This means we assume that an FMA operation has the same cost
3390 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, mul_result
)
3392 enum tree_code use_code
;
3393 tree result
= mul_result
;
3394 bool negate_p
= false;
3396 use_stmt
= USE_STMT (use_p
);
3398 if (is_gimple_debug (use_stmt
))
3401 /* For now restrict this operations to single basic blocks. In theory
3402 we would want to support sinking the multiplication in
3408 to form a fma in the then block and sink the multiplication to the
3410 if (gimple_bb (use_stmt
) != gimple_bb (mul_stmt
))
3413 if (!is_gimple_assign (use_stmt
))
3416 use_code
= gimple_assign_rhs_code (use_stmt
);
3418 /* A negate on the multiplication leads to FNMA. */
3419 if (use_code
== NEGATE_EXPR
)
3424 result
= gimple_assign_lhs (use_stmt
);
3426 /* Make sure the negate statement becomes dead with this
3427 single transformation. */
3428 if (!single_imm_use (gimple_assign_lhs (use_stmt
),
3429 &use_p
, &neguse_stmt
))
3432 /* Make sure the multiplication isn't also used on that stmt. */
3433 FOR_EACH_PHI_OR_STMT_USE (usep
, neguse_stmt
, iter
, SSA_OP_USE
)
3434 if (USE_FROM_PTR (usep
) == mul_result
)
3438 use_stmt
= neguse_stmt
;
3439 if (gimple_bb (use_stmt
) != gimple_bb (mul_stmt
))
3441 if (!is_gimple_assign (use_stmt
))
3444 use_code
= gimple_assign_rhs_code (use_stmt
);
3451 if (gimple_assign_rhs2 (use_stmt
) == result
)
3452 negate_p
= !negate_p
;
3457 /* FMA can only be formed from PLUS and MINUS. */
3461 /* If the subtrahend (gimple_assign_rhs2 (use_stmt)) is computed
3462 by a MULT_EXPR that we'll visit later, we might be able to
3463 get a more profitable match with fnma.
3464 OTOH, if we don't, a negate / fma pair has likely lower latency
3465 that a mult / subtract pair. */
3466 if (use_code
== MINUS_EXPR
&& !negate_p
3467 && gimple_assign_rhs1 (use_stmt
) == result
3468 && optab_handler (fms_optab
, TYPE_MODE (type
)) == CODE_FOR_nothing
3469 && optab_handler (fnma_optab
, TYPE_MODE (type
)) != CODE_FOR_nothing
)
3471 tree rhs2
= gimple_assign_rhs2 (use_stmt
);
3473 if (TREE_CODE (rhs2
) == SSA_NAME
)
3475 gimple
*stmt2
= SSA_NAME_DEF_STMT (rhs2
);
3476 if (has_single_use (rhs2
)
3477 && is_gimple_assign (stmt2
)
3478 && gimple_assign_rhs_code (stmt2
) == MULT_EXPR
)
3483 /* We can't handle a * b + a * b. */
3484 if (gimple_assign_rhs1 (use_stmt
) == gimple_assign_rhs2 (use_stmt
))
3487 /* While it is possible to validate whether or not the exact form
3488 that we've recognized is available in the backend, the assumption
3489 is that the transformation is never a loss. For instance, suppose
3490 the target only has the plain FMA pattern available. Consider
3491 a*b-c -> fma(a,b,-c): we've exchanged MUL+SUB for FMA+NEG, which
3492 is still two operations. Consider -(a*b)-c -> fma(-a,b,-c): we
3493 still have 3 operations, but in the FMA form the two NEGs are
3494 independent and could be run in parallel. */
3497 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, mul_result
)
3499 gimple_stmt_iterator gsi
= gsi_for_stmt (use_stmt
);
3500 enum tree_code use_code
;
3501 tree addop
, mulop1
= op1
, result
= mul_result
;
3502 bool negate_p
= false;
3504 if (is_gimple_debug (use_stmt
))
3507 use_code
= gimple_assign_rhs_code (use_stmt
);
3508 if (use_code
== NEGATE_EXPR
)
3510 result
= gimple_assign_lhs (use_stmt
);
3511 single_imm_use (gimple_assign_lhs (use_stmt
), &use_p
, &neguse_stmt
);
3512 gsi_remove (&gsi
, true);
3513 release_defs (use_stmt
);
3515 use_stmt
= neguse_stmt
;
3516 gsi
= gsi_for_stmt (use_stmt
);
3517 use_code
= gimple_assign_rhs_code (use_stmt
);
3521 if (gimple_assign_rhs1 (use_stmt
) == result
)
3523 addop
= gimple_assign_rhs2 (use_stmt
);
3524 /* a * b - c -> a * b + (-c) */
3525 if (gimple_assign_rhs_code (use_stmt
) == MINUS_EXPR
)
3526 addop
= force_gimple_operand_gsi (&gsi
,
3527 build1 (NEGATE_EXPR
,
3529 true, NULL_TREE
, true,
3534 addop
= gimple_assign_rhs1 (use_stmt
);
3535 /* a - b * c -> (-b) * c + a */
3536 if (gimple_assign_rhs_code (use_stmt
) == MINUS_EXPR
)
3537 negate_p
= !negate_p
;
3541 mulop1
= force_gimple_operand_gsi (&gsi
,
3542 build1 (NEGATE_EXPR
,
3544 true, NULL_TREE
, true,
3547 fma_stmt
= gimple_build_assign (gimple_assign_lhs (use_stmt
),
3548 FMA_EXPR
, mulop1
, op2
, addop
);
3549 gsi_replace (&gsi
, fma_stmt
, true);
3550 widen_mul_stats
.fmas_inserted
++;
3557 /* Helper function of match_uaddsub_overflow. Return 1
3558 if USE_STMT is unsigned overflow check ovf != 0 for
3559 STMT, -1 if USE_STMT is unsigned overflow check ovf == 0
3563 uaddsub_overflow_check_p (gimple
*stmt
, gimple
*use_stmt
)
3565 enum tree_code ccode
= ERROR_MARK
;
3566 tree crhs1
= NULL_TREE
, crhs2
= NULL_TREE
;
3567 if (gimple_code (use_stmt
) == GIMPLE_COND
)
3569 ccode
= gimple_cond_code (use_stmt
);
3570 crhs1
= gimple_cond_lhs (use_stmt
);
3571 crhs2
= gimple_cond_rhs (use_stmt
);
3573 else if (is_gimple_assign (use_stmt
))
3575 if (gimple_assign_rhs_class (use_stmt
) == GIMPLE_BINARY_RHS
)
3577 ccode
= gimple_assign_rhs_code (use_stmt
);
3578 crhs1
= gimple_assign_rhs1 (use_stmt
);
3579 crhs2
= gimple_assign_rhs2 (use_stmt
);
3581 else if (gimple_assign_rhs_code (use_stmt
) == COND_EXPR
)
3583 tree cond
= gimple_assign_rhs1 (use_stmt
);
3584 if (COMPARISON_CLASS_P (cond
))
3586 ccode
= TREE_CODE (cond
);
3587 crhs1
= TREE_OPERAND (cond
, 0);
3588 crhs2
= TREE_OPERAND (cond
, 1);
3599 if (TREE_CODE_CLASS (ccode
) != tcc_comparison
)
3602 enum tree_code code
= gimple_assign_rhs_code (stmt
);
3603 tree lhs
= gimple_assign_lhs (stmt
);
3604 tree rhs1
= gimple_assign_rhs1 (stmt
);
3605 tree rhs2
= gimple_assign_rhs2 (stmt
);
3611 /* r = a - b; r > a or r <= a
3612 r = a + b; a > r or a <= r or b > r or b <= r. */
3613 if ((code
== MINUS_EXPR
&& crhs1
== lhs
&& crhs2
== rhs1
)
3614 || (code
== PLUS_EXPR
&& (crhs1
== rhs1
|| crhs1
== rhs2
)
3616 return ccode
== GT_EXPR
? 1 : -1;
3620 /* r = a - b; a < r or a >= r
3621 r = a + b; r < a or r >= a or r < b or r >= b. */
3622 if ((code
== MINUS_EXPR
&& crhs1
== rhs1
&& crhs2
== lhs
)
3623 || (code
== PLUS_EXPR
&& crhs1
== lhs
3624 && (crhs2
== rhs1
|| crhs2
== rhs2
)))
3625 return ccode
== LT_EXPR
? 1 : -1;
3633 /* Recognize for unsigned x
3636 where there are other uses of x and replace it with
3637 _7 = SUB_OVERFLOW (y, z);
3638 x = REALPART_EXPR <_7>;
3639 _8 = IMAGPART_EXPR <_7>;
3641 and similarly for addition. */
3644 match_uaddsub_overflow (gimple_stmt_iterator
*gsi
, gimple
*stmt
,
3645 enum tree_code code
)
3647 tree lhs
= gimple_assign_lhs (stmt
);
3648 tree type
= TREE_TYPE (lhs
);
3649 use_operand_p use_p
;
3650 imm_use_iterator iter
;
3651 bool use_seen
= false;
3652 bool ovf_use_seen
= false;
3655 gcc_checking_assert (code
== PLUS_EXPR
|| code
== MINUS_EXPR
);
3656 if (!INTEGRAL_TYPE_P (type
)
3657 || !TYPE_UNSIGNED (type
)
3658 || has_zero_uses (lhs
)
3659 || has_single_use (lhs
)
3660 || optab_handler (code
== PLUS_EXPR
? uaddv4_optab
: usubv4_optab
,
3661 TYPE_MODE (type
)) == CODE_FOR_nothing
)
3664 FOR_EACH_IMM_USE_FAST (use_p
, iter
, lhs
)
3666 use_stmt
= USE_STMT (use_p
);
3667 if (is_gimple_debug (use_stmt
))
3670 if (uaddsub_overflow_check_p (stmt
, use_stmt
))
3671 ovf_use_seen
= true;
3674 if (ovf_use_seen
&& use_seen
)
3678 if (!ovf_use_seen
|| !use_seen
)
3681 tree ctype
= build_complex_type (type
);
3682 tree rhs1
= gimple_assign_rhs1 (stmt
);
3683 tree rhs2
= gimple_assign_rhs2 (stmt
);
3684 gcall
*g
= gimple_build_call_internal (code
== PLUS_EXPR
3685 ? IFN_ADD_OVERFLOW
: IFN_SUB_OVERFLOW
,
3687 tree ctmp
= make_ssa_name (ctype
);
3688 gimple_call_set_lhs (g
, ctmp
);
3689 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
3690 gassign
*g2
= gimple_build_assign (lhs
, REALPART_EXPR
,
3691 build1 (REALPART_EXPR
, type
, ctmp
));
3692 gsi_replace (gsi
, g2
, true);
3693 tree ovf
= make_ssa_name (type
);
3694 g2
= gimple_build_assign (ovf
, IMAGPART_EXPR
,
3695 build1 (IMAGPART_EXPR
, type
, ctmp
));
3696 gsi_insert_after (gsi
, g2
, GSI_NEW_STMT
);
3698 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, lhs
)
3700 if (is_gimple_debug (use_stmt
))
3703 int ovf_use
= uaddsub_overflow_check_p (stmt
, use_stmt
);
3706 if (gimple_code (use_stmt
) == GIMPLE_COND
)
3708 gcond
*cond_stmt
= as_a
<gcond
*> (use_stmt
);
3709 gimple_cond_set_lhs (cond_stmt
, ovf
);
3710 gimple_cond_set_rhs (cond_stmt
, build_int_cst (type
, 0));
3711 gimple_cond_set_code (cond_stmt
, ovf_use
== 1 ? NE_EXPR
: EQ_EXPR
);
3715 gcc_checking_assert (is_gimple_assign (use_stmt
));
3716 if (gimple_assign_rhs_class (use_stmt
) == GIMPLE_BINARY_RHS
)
3718 gimple_assign_set_rhs1 (use_stmt
, ovf
);
3719 gimple_assign_set_rhs2 (use_stmt
, build_int_cst (type
, 0));
3720 gimple_assign_set_rhs_code (use_stmt
,
3721 ovf_use
== 1 ? NE_EXPR
: EQ_EXPR
);
3725 gcc_checking_assert (gimple_assign_rhs_code (use_stmt
)
3727 tree cond
= build2 (ovf_use
== 1 ? NE_EXPR
: EQ_EXPR
,
3728 boolean_type_node
, ovf
,
3729 build_int_cst (type
, 0));
3730 gimple_assign_set_rhs1 (use_stmt
, cond
);
3733 update_stmt (use_stmt
);
3739 /* Find integer multiplications where the operands are extended from
3740 smaller types, and replace the MULT_EXPR with a WIDEN_MULT_EXPR
3741 where appropriate. */
3745 const pass_data pass_data_optimize_widening_mul
=
3747 GIMPLE_PASS
, /* type */
3748 "widening_mul", /* name */
3749 OPTGROUP_NONE
, /* optinfo_flags */
3750 TV_NONE
, /* tv_id */
3751 PROP_ssa
, /* properties_required */
3752 0, /* properties_provided */
3753 0, /* properties_destroyed */
3754 0, /* todo_flags_start */
3755 TODO_update_ssa
, /* todo_flags_finish */
3758 class pass_optimize_widening_mul
: public gimple_opt_pass
3761 pass_optimize_widening_mul (gcc::context
*ctxt
)
3762 : gimple_opt_pass (pass_data_optimize_widening_mul
, ctxt
)
3765 /* opt_pass methods: */
3766 virtual bool gate (function
*)
3768 return flag_expensive_optimizations
&& optimize
;
3771 virtual unsigned int execute (function
*);
3773 }; // class pass_optimize_widening_mul
3776 pass_optimize_widening_mul::execute (function
*fun
)
3779 bool cfg_changed
= false;
3781 memset (&widen_mul_stats
, 0, sizeof (widen_mul_stats
));
3783 FOR_EACH_BB_FN (bb
, fun
)
3785 gimple_stmt_iterator gsi
;
3787 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
);)
3789 gimple
*stmt
= gsi_stmt (gsi
);
3790 enum tree_code code
;
3792 if (is_gimple_assign (stmt
))
3794 code
= gimple_assign_rhs_code (stmt
);
3798 if (!convert_mult_to_widen (stmt
, &gsi
)
3799 && convert_mult_to_fma (stmt
,
3800 gimple_assign_rhs1 (stmt
),
3801 gimple_assign_rhs2 (stmt
)))
3803 gsi_remove (&gsi
, true);
3804 release_defs (stmt
);
3811 if (!convert_plusminus_to_widen (&gsi
, stmt
, code
))
3812 match_uaddsub_overflow (&gsi
, stmt
, code
);
3818 else if (is_gimple_call (stmt
)
3819 && gimple_call_lhs (stmt
))
3821 tree fndecl
= gimple_call_fndecl (stmt
);
3823 && DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
3825 switch (DECL_FUNCTION_CODE (fndecl
))
3830 if (TREE_CODE (gimple_call_arg (stmt
, 1)) == REAL_CST
3832 (&TREE_REAL_CST (gimple_call_arg (stmt
, 1)),
3834 && convert_mult_to_fma (stmt
,
3835 gimple_call_arg (stmt
, 0),
3836 gimple_call_arg (stmt
, 0)))
3838 unlink_stmt_vdef (stmt
);
3839 if (gsi_remove (&gsi
, true)
3840 && gimple_purge_dead_eh_edges (bb
))
3842 release_defs (stmt
);
3855 statistics_counter_event (fun
, "widening multiplications inserted",
3856 widen_mul_stats
.widen_mults_inserted
);
3857 statistics_counter_event (fun
, "widening maccs inserted",
3858 widen_mul_stats
.maccs_inserted
);
3859 statistics_counter_event (fun
, "fused multiply-adds inserted",
3860 widen_mul_stats
.fmas_inserted
);
3862 return cfg_changed
? TODO_cleanup_cfg
: 0;
3868 make_pass_optimize_widening_mul (gcc::context
*ctxt
)
3870 return new pass_optimize_widening_mul (ctxt
);