]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-ssa-math-opts.c
Merge in wide-int.
[thirdparty/gcc.git] / gcc / tree-ssa-math-opts.c
1 /* Global, SSA-based optimizations using mathematical identities.
2 Copyright (C) 2005-2014 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* Currently, the only mini-pass in this file tries to CSE reciprocal
21 operations. These are common in sequences such as this one:
22
23 modulus = sqrt(x*x + y*y + z*z);
24 x = x / modulus;
25 y = y / modulus;
26 z = z / modulus;
27
28 that can be optimized to
29
30 modulus = sqrt(x*x + y*y + z*z);
31 rmodulus = 1.0 / modulus;
32 x = x * rmodulus;
33 y = y * rmodulus;
34 z = z * rmodulus;
35
36 We do this for loop invariant divisors, and with this pass whenever
37 we notice that a division has the same divisor multiple times.
38
39 Of course, like in PRE, we don't insert a division if a dominator
40 already has one. However, this cannot be done as an extension of
41 PRE for several reasons.
42
43 First of all, with some experiments it was found out that the
44 transformation is not always useful if there are only two divisions
45 hy the same divisor. This is probably because modern processors
46 can pipeline the divisions; on older, in-order processors it should
47 still be effective to optimize two divisions by the same number.
48 We make this a param, and it shall be called N in the remainder of
49 this comment.
50
51 Second, if trapping math is active, we have less freedom on where
52 to insert divisions: we can only do so in basic blocks that already
53 contain one. (If divisions don't trap, instead, we can insert
54 divisions elsewhere, which will be in blocks that are common dominators
55 of those that have the division).
56
57 We really don't want to compute the reciprocal unless a division will
58 be found. To do this, we won't insert the division in a basic block
59 that has less than N divisions *post-dominating* it.
60
61 The algorithm constructs a subset of the dominator tree, holding the
62 blocks containing the divisions and the common dominators to them,
63 and walk it twice. The first walk is in post-order, and it annotates
64 each block with the number of divisions that post-dominate it: this
65 gives information on where divisions can be inserted profitably.
66 The second walk is in pre-order, and it inserts divisions as explained
67 above, and replaces divisions by multiplications.
68
69 In the best case, the cost of the pass is O(n_statements). In the
70 worst-case, the cost is due to creating the dominator tree subset,
71 with a cost of O(n_basic_blocks ^ 2); however this can only happen
72 for n_statements / n_basic_blocks statements. So, the amortized cost
73 of creating the dominator tree subset is O(n_basic_blocks) and the
74 worst-case cost of the pass is O(n_statements * n_basic_blocks).
75
76 More practically, the cost will be small because there are few
77 divisions, and they tend to be in the same basic block, so insert_bb
78 is called very few times.
79
80 If we did this using domwalk.c, an efficient implementation would have
81 to work on all the variables in a single pass, because we could not
82 work on just a subset of the dominator tree, as we do now, and the
83 cost would also be something like O(n_statements * n_basic_blocks).
84 The data structures would be more complex in order to work on all the
85 variables in a single pass. */
86
87 #include "config.h"
88 #include "system.h"
89 #include "coretypes.h"
90 #include "tm.h"
91 #include "flags.h"
92 #include "tree.h"
93 #include "basic-block.h"
94 #include "tree-ssa-alias.h"
95 #include "internal-fn.h"
96 #include "gimple-fold.h"
97 #include "gimple-expr.h"
98 #include "is-a.h"
99 #include "gimple.h"
100 #include "gimple-iterator.h"
101 #include "gimplify-me.h"
102 #include "stor-layout.h"
103 #include "gimple-ssa.h"
104 #include "tree-cfg.h"
105 #include "tree-phinodes.h"
106 #include "ssa-iterators.h"
107 #include "stringpool.h"
108 #include "tree-ssanames.h"
109 #include "expr.h"
110 #include "tree-dfa.h"
111 #include "tree-ssa.h"
112 #include "tree-pass.h"
113 #include "alloc-pool.h"
114 #include "target.h"
115 #include "gimple-pretty-print.h"
116
117 /* FIXME: RTL headers have to be included here for optabs. */
118 #include "rtl.h" /* Because optabs.h wants enum rtx_code. */
119 #include "expr.h" /* Because optabs.h wants sepops. */
120 #include "optabs.h"
121
122 /* This structure represents one basic block that either computes a
123 division, or is a common dominator for basic block that compute a
124 division. */
125 struct occurrence {
126 /* The basic block represented by this structure. */
127 basic_block bb;
128
129 /* If non-NULL, the SSA_NAME holding the definition for a reciprocal
130 inserted in BB. */
131 tree recip_def;
132
133 /* If non-NULL, the GIMPLE_ASSIGN for a reciprocal computation that
134 was inserted in BB. */
135 gimple recip_def_stmt;
136
137 /* Pointer to a list of "struct occurrence"s for blocks dominated
138 by BB. */
139 struct occurrence *children;
140
141 /* Pointer to the next "struct occurrence"s in the list of blocks
142 sharing a common dominator. */
143 struct occurrence *next;
144
145 /* The number of divisions that are in BB before compute_merit. The
146 number of divisions that are in BB or post-dominate it after
147 compute_merit. */
148 int num_divisions;
149
150 /* True if the basic block has a division, false if it is a common
151 dominator for basic blocks that do. If it is false and trapping
152 math is active, BB is not a candidate for inserting a reciprocal. */
153 bool bb_has_division;
154 };
155
156 static struct
157 {
158 /* Number of 1.0/X ops inserted. */
159 int rdivs_inserted;
160
161 /* Number of 1.0/FUNC ops inserted. */
162 int rfuncs_inserted;
163 } reciprocal_stats;
164
165 static struct
166 {
167 /* Number of cexpi calls inserted. */
168 int inserted;
169 } sincos_stats;
170
171 static struct
172 {
173 /* Number of hand-written 16-bit bswaps found. */
174 int found_16bit;
175
176 /* Number of hand-written 32-bit bswaps found. */
177 int found_32bit;
178
179 /* Number of hand-written 64-bit bswaps found. */
180 int found_64bit;
181 } bswap_stats;
182
183 static struct
184 {
185 /* Number of widening multiplication ops inserted. */
186 int widen_mults_inserted;
187
188 /* Number of integer multiply-and-accumulate ops inserted. */
189 int maccs_inserted;
190
191 /* Number of fp fused multiply-add ops inserted. */
192 int fmas_inserted;
193 } widen_mul_stats;
194
195 /* The instance of "struct occurrence" representing the highest
196 interesting block in the dominator tree. */
197 static struct occurrence *occ_head;
198
199 /* Allocation pool for getting instances of "struct occurrence". */
200 static alloc_pool occ_pool;
201
202
203
204 /* Allocate and return a new struct occurrence for basic block BB, and
205 whose children list is headed by CHILDREN. */
206 static struct occurrence *
207 occ_new (basic_block bb, struct occurrence *children)
208 {
209 struct occurrence *occ;
210
211 bb->aux = occ = (struct occurrence *) pool_alloc (occ_pool);
212 memset (occ, 0, sizeof (struct occurrence));
213
214 occ->bb = bb;
215 occ->children = children;
216 return occ;
217 }
218
219
220 /* Insert NEW_OCC into our subset of the dominator tree. P_HEAD points to a
221 list of "struct occurrence"s, one per basic block, having IDOM as
222 their common dominator.
223
224 We try to insert NEW_OCC as deep as possible in the tree, and we also
225 insert any other block that is a common dominator for BB and one
226 block already in the tree. */
227
228 static void
229 insert_bb (struct occurrence *new_occ, basic_block idom,
230 struct occurrence **p_head)
231 {
232 struct occurrence *occ, **p_occ;
233
234 for (p_occ = p_head; (occ = *p_occ) != NULL; )
235 {
236 basic_block bb = new_occ->bb, occ_bb = occ->bb;
237 basic_block dom = nearest_common_dominator (CDI_DOMINATORS, occ_bb, bb);
238 if (dom == bb)
239 {
240 /* BB dominates OCC_BB. OCC becomes NEW_OCC's child: remove OCC
241 from its list. */
242 *p_occ = occ->next;
243 occ->next = new_occ->children;
244 new_occ->children = occ;
245
246 /* Try the next block (it may as well be dominated by BB). */
247 }
248
249 else if (dom == occ_bb)
250 {
251 /* OCC_BB dominates BB. Tail recurse to look deeper. */
252 insert_bb (new_occ, dom, &occ->children);
253 return;
254 }
255
256 else if (dom != idom)
257 {
258 gcc_assert (!dom->aux);
259
260 /* There is a dominator between IDOM and BB, add it and make
261 two children out of NEW_OCC and OCC. First, remove OCC from
262 its list. */
263 *p_occ = occ->next;
264 new_occ->next = occ;
265 occ->next = NULL;
266
267 /* None of the previous blocks has DOM as a dominator: if we tail
268 recursed, we would reexamine them uselessly. Just switch BB with
269 DOM, and go on looking for blocks dominated by DOM. */
270 new_occ = occ_new (dom, new_occ);
271 }
272
273 else
274 {
275 /* Nothing special, go on with the next element. */
276 p_occ = &occ->next;
277 }
278 }
279
280 /* No place was found as a child of IDOM. Make BB a sibling of IDOM. */
281 new_occ->next = *p_head;
282 *p_head = new_occ;
283 }
284
285 /* Register that we found a division in BB. */
286
287 static inline void
288 register_division_in (basic_block bb)
289 {
290 struct occurrence *occ;
291
292 occ = (struct occurrence *) bb->aux;
293 if (!occ)
294 {
295 occ = occ_new (bb, NULL);
296 insert_bb (occ, ENTRY_BLOCK_PTR_FOR_FN (cfun), &occ_head);
297 }
298
299 occ->bb_has_division = true;
300 occ->num_divisions++;
301 }
302
303
304 /* Compute the number of divisions that postdominate each block in OCC and
305 its children. */
306
307 static void
308 compute_merit (struct occurrence *occ)
309 {
310 struct occurrence *occ_child;
311 basic_block dom = occ->bb;
312
313 for (occ_child = occ->children; occ_child; occ_child = occ_child->next)
314 {
315 basic_block bb;
316 if (occ_child->children)
317 compute_merit (occ_child);
318
319 if (flag_exceptions)
320 bb = single_noncomplex_succ (dom);
321 else
322 bb = dom;
323
324 if (dominated_by_p (CDI_POST_DOMINATORS, bb, occ_child->bb))
325 occ->num_divisions += occ_child->num_divisions;
326 }
327 }
328
329
330 /* Return whether USE_STMT is a floating-point division by DEF. */
331 static inline bool
332 is_division_by (gimple use_stmt, tree def)
333 {
334 return is_gimple_assign (use_stmt)
335 && gimple_assign_rhs_code (use_stmt) == RDIV_EXPR
336 && gimple_assign_rhs2 (use_stmt) == def
337 /* Do not recognize x / x as valid division, as we are getting
338 confused later by replacing all immediate uses x in such
339 a stmt. */
340 && gimple_assign_rhs1 (use_stmt) != def;
341 }
342
343 /* Walk the subset of the dominator tree rooted at OCC, setting the
344 RECIP_DEF field to a definition of 1.0 / DEF that can be used in
345 the given basic block. The field may be left NULL, of course,
346 if it is not possible or profitable to do the optimization.
347
348 DEF_BSI is an iterator pointing at the statement defining DEF.
349 If RECIP_DEF is set, a dominator already has a computation that can
350 be used. */
351
352 static void
353 insert_reciprocals (gimple_stmt_iterator *def_gsi, struct occurrence *occ,
354 tree def, tree recip_def, int threshold)
355 {
356 tree type;
357 gimple new_stmt;
358 gimple_stmt_iterator gsi;
359 struct occurrence *occ_child;
360
361 if (!recip_def
362 && (occ->bb_has_division || !flag_trapping_math)
363 && occ->num_divisions >= threshold)
364 {
365 /* Make a variable with the replacement and substitute it. */
366 type = TREE_TYPE (def);
367 recip_def = create_tmp_reg (type, "reciptmp");
368 new_stmt = gimple_build_assign_with_ops (RDIV_EXPR, recip_def,
369 build_one_cst (type), def);
370
371 if (occ->bb_has_division)
372 {
373 /* Case 1: insert before an existing division. */
374 gsi = gsi_after_labels (occ->bb);
375 while (!gsi_end_p (gsi) && !is_division_by (gsi_stmt (gsi), def))
376 gsi_next (&gsi);
377
378 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
379 }
380 else if (def_gsi && occ->bb == def_gsi->bb)
381 {
382 /* Case 2: insert right after the definition. Note that this will
383 never happen if the definition statement can throw, because in
384 that case the sole successor of the statement's basic block will
385 dominate all the uses as well. */
386 gsi_insert_after (def_gsi, new_stmt, GSI_NEW_STMT);
387 }
388 else
389 {
390 /* Case 3: insert in a basic block not containing defs/uses. */
391 gsi = gsi_after_labels (occ->bb);
392 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
393 }
394
395 reciprocal_stats.rdivs_inserted++;
396
397 occ->recip_def_stmt = new_stmt;
398 }
399
400 occ->recip_def = recip_def;
401 for (occ_child = occ->children; occ_child; occ_child = occ_child->next)
402 insert_reciprocals (def_gsi, occ_child, def, recip_def, threshold);
403 }
404
405
406 /* Replace the division at USE_P with a multiplication by the reciprocal, if
407 possible. */
408
409 static inline void
410 replace_reciprocal (use_operand_p use_p)
411 {
412 gimple use_stmt = USE_STMT (use_p);
413 basic_block bb = gimple_bb (use_stmt);
414 struct occurrence *occ = (struct occurrence *) bb->aux;
415
416 if (optimize_bb_for_speed_p (bb)
417 && occ->recip_def && use_stmt != occ->recip_def_stmt)
418 {
419 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
420 gimple_assign_set_rhs_code (use_stmt, MULT_EXPR);
421 SET_USE (use_p, occ->recip_def);
422 fold_stmt_inplace (&gsi);
423 update_stmt (use_stmt);
424 }
425 }
426
427
428 /* Free OCC and return one more "struct occurrence" to be freed. */
429
430 static struct occurrence *
431 free_bb (struct occurrence *occ)
432 {
433 struct occurrence *child, *next;
434
435 /* First get the two pointers hanging off OCC. */
436 next = occ->next;
437 child = occ->children;
438 occ->bb->aux = NULL;
439 pool_free (occ_pool, occ);
440
441 /* Now ensure that we don't recurse unless it is necessary. */
442 if (!child)
443 return next;
444 else
445 {
446 while (next)
447 next = free_bb (next);
448
449 return child;
450 }
451 }
452
453
454 /* Look for floating-point divisions among DEF's uses, and try to
455 replace them by multiplications with the reciprocal. Add
456 as many statements computing the reciprocal as needed.
457
458 DEF must be a GIMPLE register of a floating-point type. */
459
460 static void
461 execute_cse_reciprocals_1 (gimple_stmt_iterator *def_gsi, tree def)
462 {
463 use_operand_p use_p;
464 imm_use_iterator use_iter;
465 struct occurrence *occ;
466 int count = 0, threshold;
467
468 gcc_assert (FLOAT_TYPE_P (TREE_TYPE (def)) && is_gimple_reg (def));
469
470 FOR_EACH_IMM_USE_FAST (use_p, use_iter, def)
471 {
472 gimple use_stmt = USE_STMT (use_p);
473 if (is_division_by (use_stmt, def))
474 {
475 register_division_in (gimple_bb (use_stmt));
476 count++;
477 }
478 }
479
480 /* Do the expensive part only if we can hope to optimize something. */
481 threshold = targetm.min_divisions_for_recip_mul (TYPE_MODE (TREE_TYPE (def)));
482 if (count >= threshold)
483 {
484 gimple use_stmt;
485 for (occ = occ_head; occ; occ = occ->next)
486 {
487 compute_merit (occ);
488 insert_reciprocals (def_gsi, occ, def, NULL, threshold);
489 }
490
491 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, def)
492 {
493 if (is_division_by (use_stmt, def))
494 {
495 FOR_EACH_IMM_USE_ON_STMT (use_p, use_iter)
496 replace_reciprocal (use_p);
497 }
498 }
499 }
500
501 for (occ = occ_head; occ; )
502 occ = free_bb (occ);
503
504 occ_head = NULL;
505 }
506
507 /* Go through all the floating-point SSA_NAMEs, and call
508 execute_cse_reciprocals_1 on each of them. */
509 namespace {
510
511 const pass_data pass_data_cse_reciprocals =
512 {
513 GIMPLE_PASS, /* type */
514 "recip", /* name */
515 OPTGROUP_NONE, /* optinfo_flags */
516 true, /* has_execute */
517 TV_NONE, /* tv_id */
518 PROP_ssa, /* properties_required */
519 0, /* properties_provided */
520 0, /* properties_destroyed */
521 0, /* todo_flags_start */
522 TODO_update_ssa, /* todo_flags_finish */
523 };
524
525 class pass_cse_reciprocals : public gimple_opt_pass
526 {
527 public:
528 pass_cse_reciprocals (gcc::context *ctxt)
529 : gimple_opt_pass (pass_data_cse_reciprocals, ctxt)
530 {}
531
532 /* opt_pass methods: */
533 virtual bool gate (function *) { return optimize && flag_reciprocal_math; }
534 virtual unsigned int execute (function *);
535
536 }; // class pass_cse_reciprocals
537
538 unsigned int
539 pass_cse_reciprocals::execute (function *fun)
540 {
541 basic_block bb;
542 tree arg;
543
544 occ_pool = create_alloc_pool ("dominators for recip",
545 sizeof (struct occurrence),
546 n_basic_blocks_for_fn (fun) / 3 + 1);
547
548 memset (&reciprocal_stats, 0, sizeof (reciprocal_stats));
549 calculate_dominance_info (CDI_DOMINATORS);
550 calculate_dominance_info (CDI_POST_DOMINATORS);
551
552 #ifdef ENABLE_CHECKING
553 FOR_EACH_BB_FN (bb, fun)
554 gcc_assert (!bb->aux);
555 #endif
556
557 for (arg = DECL_ARGUMENTS (fun->decl); arg; arg = DECL_CHAIN (arg))
558 if (FLOAT_TYPE_P (TREE_TYPE (arg))
559 && is_gimple_reg (arg))
560 {
561 tree name = ssa_default_def (fun, arg);
562 if (name)
563 execute_cse_reciprocals_1 (NULL, name);
564 }
565
566 FOR_EACH_BB_FN (bb, fun)
567 {
568 gimple_stmt_iterator gsi;
569 gimple phi;
570 tree def;
571
572 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
573 {
574 phi = gsi_stmt (gsi);
575 def = PHI_RESULT (phi);
576 if (! virtual_operand_p (def)
577 && FLOAT_TYPE_P (TREE_TYPE (def)))
578 execute_cse_reciprocals_1 (NULL, def);
579 }
580
581 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
582 {
583 gimple stmt = gsi_stmt (gsi);
584
585 if (gimple_has_lhs (stmt)
586 && (def = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_DEF)) != NULL
587 && FLOAT_TYPE_P (TREE_TYPE (def))
588 && TREE_CODE (def) == SSA_NAME)
589 execute_cse_reciprocals_1 (&gsi, def);
590 }
591
592 if (optimize_bb_for_size_p (bb))
593 continue;
594
595 /* Scan for a/func(b) and convert it to reciprocal a*rfunc(b). */
596 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
597 {
598 gimple stmt = gsi_stmt (gsi);
599 tree fndecl;
600
601 if (is_gimple_assign (stmt)
602 && gimple_assign_rhs_code (stmt) == RDIV_EXPR)
603 {
604 tree arg1 = gimple_assign_rhs2 (stmt);
605 gimple stmt1;
606
607 if (TREE_CODE (arg1) != SSA_NAME)
608 continue;
609
610 stmt1 = SSA_NAME_DEF_STMT (arg1);
611
612 if (is_gimple_call (stmt1)
613 && gimple_call_lhs (stmt1)
614 && (fndecl = gimple_call_fndecl (stmt1))
615 && (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
616 || DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD))
617 {
618 enum built_in_function code;
619 bool md_code, fail;
620 imm_use_iterator ui;
621 use_operand_p use_p;
622
623 code = DECL_FUNCTION_CODE (fndecl);
624 md_code = DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD;
625
626 fndecl = targetm.builtin_reciprocal (code, md_code, false);
627 if (!fndecl)
628 continue;
629
630 /* Check that all uses of the SSA name are divisions,
631 otherwise replacing the defining statement will do
632 the wrong thing. */
633 fail = false;
634 FOR_EACH_IMM_USE_FAST (use_p, ui, arg1)
635 {
636 gimple stmt2 = USE_STMT (use_p);
637 if (is_gimple_debug (stmt2))
638 continue;
639 if (!is_gimple_assign (stmt2)
640 || gimple_assign_rhs_code (stmt2) != RDIV_EXPR
641 || gimple_assign_rhs1 (stmt2) == arg1
642 || gimple_assign_rhs2 (stmt2) != arg1)
643 {
644 fail = true;
645 break;
646 }
647 }
648 if (fail)
649 continue;
650
651 gimple_replace_ssa_lhs (stmt1, arg1);
652 gimple_call_set_fndecl (stmt1, fndecl);
653 update_stmt (stmt1);
654 reciprocal_stats.rfuncs_inserted++;
655
656 FOR_EACH_IMM_USE_STMT (stmt, ui, arg1)
657 {
658 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
659 gimple_assign_set_rhs_code (stmt, MULT_EXPR);
660 fold_stmt_inplace (&gsi);
661 update_stmt (stmt);
662 }
663 }
664 }
665 }
666 }
667
668 statistics_counter_event (fun, "reciprocal divs inserted",
669 reciprocal_stats.rdivs_inserted);
670 statistics_counter_event (fun, "reciprocal functions inserted",
671 reciprocal_stats.rfuncs_inserted);
672
673 free_dominance_info (CDI_DOMINATORS);
674 free_dominance_info (CDI_POST_DOMINATORS);
675 free_alloc_pool (occ_pool);
676 return 0;
677 }
678
679 } // anon namespace
680
681 gimple_opt_pass *
682 make_pass_cse_reciprocals (gcc::context *ctxt)
683 {
684 return new pass_cse_reciprocals (ctxt);
685 }
686
687 /* Records an occurrence at statement USE_STMT in the vector of trees
688 STMTS if it is dominated by *TOP_BB or dominates it or this basic block
689 is not yet initialized. Returns true if the occurrence was pushed on
690 the vector. Adjusts *TOP_BB to be the basic block dominating all
691 statements in the vector. */
692
693 static bool
694 maybe_record_sincos (vec<gimple> *stmts,
695 basic_block *top_bb, gimple use_stmt)
696 {
697 basic_block use_bb = gimple_bb (use_stmt);
698 if (*top_bb
699 && (*top_bb == use_bb
700 || dominated_by_p (CDI_DOMINATORS, use_bb, *top_bb)))
701 stmts->safe_push (use_stmt);
702 else if (!*top_bb
703 || dominated_by_p (CDI_DOMINATORS, *top_bb, use_bb))
704 {
705 stmts->safe_push (use_stmt);
706 *top_bb = use_bb;
707 }
708 else
709 return false;
710
711 return true;
712 }
713
714 /* Look for sin, cos and cexpi calls with the same argument NAME and
715 create a single call to cexpi CSEing the result in this case.
716 We first walk over all immediate uses of the argument collecting
717 statements that we can CSE in a vector and in a second pass replace
718 the statement rhs with a REALPART or IMAGPART expression on the
719 result of the cexpi call we insert before the use statement that
720 dominates all other candidates. */
721
722 static bool
723 execute_cse_sincos_1 (tree name)
724 {
725 gimple_stmt_iterator gsi;
726 imm_use_iterator use_iter;
727 tree fndecl, res, type;
728 gimple def_stmt, use_stmt, stmt;
729 int seen_cos = 0, seen_sin = 0, seen_cexpi = 0;
730 vec<gimple> stmts = vNULL;
731 basic_block top_bb = NULL;
732 int i;
733 bool cfg_changed = false;
734
735 type = TREE_TYPE (name);
736 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, name)
737 {
738 if (gimple_code (use_stmt) != GIMPLE_CALL
739 || !gimple_call_lhs (use_stmt)
740 || !(fndecl = gimple_call_fndecl (use_stmt))
741 || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_NORMAL)
742 continue;
743
744 switch (DECL_FUNCTION_CODE (fndecl))
745 {
746 CASE_FLT_FN (BUILT_IN_COS):
747 seen_cos |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
748 break;
749
750 CASE_FLT_FN (BUILT_IN_SIN):
751 seen_sin |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
752 break;
753
754 CASE_FLT_FN (BUILT_IN_CEXPI):
755 seen_cexpi |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
756 break;
757
758 default:;
759 }
760 }
761
762 if (seen_cos + seen_sin + seen_cexpi <= 1)
763 {
764 stmts.release ();
765 return false;
766 }
767
768 /* Simply insert cexpi at the beginning of top_bb but not earlier than
769 the name def statement. */
770 fndecl = mathfn_built_in (type, BUILT_IN_CEXPI);
771 if (!fndecl)
772 return false;
773 stmt = gimple_build_call (fndecl, 1, name);
774 res = make_temp_ssa_name (TREE_TYPE (TREE_TYPE (fndecl)), stmt, "sincostmp");
775 gimple_call_set_lhs (stmt, res);
776
777 def_stmt = SSA_NAME_DEF_STMT (name);
778 if (!SSA_NAME_IS_DEFAULT_DEF (name)
779 && gimple_code (def_stmt) != GIMPLE_PHI
780 && gimple_bb (def_stmt) == top_bb)
781 {
782 gsi = gsi_for_stmt (def_stmt);
783 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
784 }
785 else
786 {
787 gsi = gsi_after_labels (top_bb);
788 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
789 }
790 sincos_stats.inserted++;
791
792 /* And adjust the recorded old call sites. */
793 for (i = 0; stmts.iterate (i, &use_stmt); ++i)
794 {
795 tree rhs = NULL;
796 fndecl = gimple_call_fndecl (use_stmt);
797
798 switch (DECL_FUNCTION_CODE (fndecl))
799 {
800 CASE_FLT_FN (BUILT_IN_COS):
801 rhs = fold_build1 (REALPART_EXPR, type, res);
802 break;
803
804 CASE_FLT_FN (BUILT_IN_SIN):
805 rhs = fold_build1 (IMAGPART_EXPR, type, res);
806 break;
807
808 CASE_FLT_FN (BUILT_IN_CEXPI):
809 rhs = res;
810 break;
811
812 default:;
813 gcc_unreachable ();
814 }
815
816 /* Replace call with a copy. */
817 stmt = gimple_build_assign (gimple_call_lhs (use_stmt), rhs);
818
819 gsi = gsi_for_stmt (use_stmt);
820 gsi_replace (&gsi, stmt, true);
821 if (gimple_purge_dead_eh_edges (gimple_bb (stmt)))
822 cfg_changed = true;
823 }
824
825 stmts.release ();
826
827 return cfg_changed;
828 }
829
830 /* To evaluate powi(x,n), the floating point value x raised to the
831 constant integer exponent n, we use a hybrid algorithm that
832 combines the "window method" with look-up tables. For an
833 introduction to exponentiation algorithms and "addition chains",
834 see section 4.6.3, "Evaluation of Powers" of Donald E. Knuth,
835 "Seminumerical Algorithms", Vol. 2, "The Art of Computer Programming",
836 3rd Edition, 1998, and Daniel M. Gordon, "A Survey of Fast Exponentiation
837 Methods", Journal of Algorithms, Vol. 27, pp. 129-146, 1998. */
838
839 /* Provide a default value for POWI_MAX_MULTS, the maximum number of
840 multiplications to inline before calling the system library's pow
841 function. powi(x,n) requires at worst 2*bits(n)-2 multiplications,
842 so this default never requires calling pow, powf or powl. */
843
844 #ifndef POWI_MAX_MULTS
845 #define POWI_MAX_MULTS (2*HOST_BITS_PER_WIDE_INT-2)
846 #endif
847
848 /* The size of the "optimal power tree" lookup table. All
849 exponents less than this value are simply looked up in the
850 powi_table below. This threshold is also used to size the
851 cache of pseudo registers that hold intermediate results. */
852 #define POWI_TABLE_SIZE 256
853
854 /* The size, in bits of the window, used in the "window method"
855 exponentiation algorithm. This is equivalent to a radix of
856 (1<<POWI_WINDOW_SIZE) in the corresponding "m-ary method". */
857 #define POWI_WINDOW_SIZE 3
858
859 /* The following table is an efficient representation of an
860 "optimal power tree". For each value, i, the corresponding
861 value, j, in the table states than an optimal evaluation
862 sequence for calculating pow(x,i) can be found by evaluating
863 pow(x,j)*pow(x,i-j). An optimal power tree for the first
864 100 integers is given in Knuth's "Seminumerical algorithms". */
865
866 static const unsigned char powi_table[POWI_TABLE_SIZE] =
867 {
868 0, 1, 1, 2, 2, 3, 3, 4, /* 0 - 7 */
869 4, 6, 5, 6, 6, 10, 7, 9, /* 8 - 15 */
870 8, 16, 9, 16, 10, 12, 11, 13, /* 16 - 23 */
871 12, 17, 13, 18, 14, 24, 15, 26, /* 24 - 31 */
872 16, 17, 17, 19, 18, 33, 19, 26, /* 32 - 39 */
873 20, 25, 21, 40, 22, 27, 23, 44, /* 40 - 47 */
874 24, 32, 25, 34, 26, 29, 27, 44, /* 48 - 55 */
875 28, 31, 29, 34, 30, 60, 31, 36, /* 56 - 63 */
876 32, 64, 33, 34, 34, 46, 35, 37, /* 64 - 71 */
877 36, 65, 37, 50, 38, 48, 39, 69, /* 72 - 79 */
878 40, 49, 41, 43, 42, 51, 43, 58, /* 80 - 87 */
879 44, 64, 45, 47, 46, 59, 47, 76, /* 88 - 95 */
880 48, 65, 49, 66, 50, 67, 51, 66, /* 96 - 103 */
881 52, 70, 53, 74, 54, 104, 55, 74, /* 104 - 111 */
882 56, 64, 57, 69, 58, 78, 59, 68, /* 112 - 119 */
883 60, 61, 61, 80, 62, 75, 63, 68, /* 120 - 127 */
884 64, 65, 65, 128, 66, 129, 67, 90, /* 128 - 135 */
885 68, 73, 69, 131, 70, 94, 71, 88, /* 136 - 143 */
886 72, 128, 73, 98, 74, 132, 75, 121, /* 144 - 151 */
887 76, 102, 77, 124, 78, 132, 79, 106, /* 152 - 159 */
888 80, 97, 81, 160, 82, 99, 83, 134, /* 160 - 167 */
889 84, 86, 85, 95, 86, 160, 87, 100, /* 168 - 175 */
890 88, 113, 89, 98, 90, 107, 91, 122, /* 176 - 183 */
891 92, 111, 93, 102, 94, 126, 95, 150, /* 184 - 191 */
892 96, 128, 97, 130, 98, 133, 99, 195, /* 192 - 199 */
893 100, 128, 101, 123, 102, 164, 103, 138, /* 200 - 207 */
894 104, 145, 105, 146, 106, 109, 107, 149, /* 208 - 215 */
895 108, 200, 109, 146, 110, 170, 111, 157, /* 216 - 223 */
896 112, 128, 113, 130, 114, 182, 115, 132, /* 224 - 231 */
897 116, 200, 117, 132, 118, 158, 119, 206, /* 232 - 239 */
898 120, 240, 121, 162, 122, 147, 123, 152, /* 240 - 247 */
899 124, 166, 125, 214, 126, 138, 127, 153, /* 248 - 255 */
900 };
901
902
903 /* Return the number of multiplications required to calculate
904 powi(x,n) where n is less than POWI_TABLE_SIZE. This is a
905 subroutine of powi_cost. CACHE is an array indicating
906 which exponents have already been calculated. */
907
908 static int
909 powi_lookup_cost (unsigned HOST_WIDE_INT n, bool *cache)
910 {
911 /* If we've already calculated this exponent, then this evaluation
912 doesn't require any additional multiplications. */
913 if (cache[n])
914 return 0;
915
916 cache[n] = true;
917 return powi_lookup_cost (n - powi_table[n], cache)
918 + powi_lookup_cost (powi_table[n], cache) + 1;
919 }
920
921 /* Return the number of multiplications required to calculate
922 powi(x,n) for an arbitrary x, given the exponent N. This
923 function needs to be kept in sync with powi_as_mults below. */
924
925 static int
926 powi_cost (HOST_WIDE_INT n)
927 {
928 bool cache[POWI_TABLE_SIZE];
929 unsigned HOST_WIDE_INT digit;
930 unsigned HOST_WIDE_INT val;
931 int result;
932
933 if (n == 0)
934 return 0;
935
936 /* Ignore the reciprocal when calculating the cost. */
937 val = (n < 0) ? -n : n;
938
939 /* Initialize the exponent cache. */
940 memset (cache, 0, POWI_TABLE_SIZE * sizeof (bool));
941 cache[1] = true;
942
943 result = 0;
944
945 while (val >= POWI_TABLE_SIZE)
946 {
947 if (val & 1)
948 {
949 digit = val & ((1 << POWI_WINDOW_SIZE) - 1);
950 result += powi_lookup_cost (digit, cache)
951 + POWI_WINDOW_SIZE + 1;
952 val >>= POWI_WINDOW_SIZE;
953 }
954 else
955 {
956 val >>= 1;
957 result++;
958 }
959 }
960
961 return result + powi_lookup_cost (val, cache);
962 }
963
964 /* Recursive subroutine of powi_as_mults. This function takes the
965 array, CACHE, of already calculated exponents and an exponent N and
966 returns a tree that corresponds to CACHE[1]**N, with type TYPE. */
967
968 static tree
969 powi_as_mults_1 (gimple_stmt_iterator *gsi, location_t loc, tree type,
970 HOST_WIDE_INT n, tree *cache)
971 {
972 tree op0, op1, ssa_target;
973 unsigned HOST_WIDE_INT digit;
974 gimple mult_stmt;
975
976 if (n < POWI_TABLE_SIZE && cache[n])
977 return cache[n];
978
979 ssa_target = make_temp_ssa_name (type, NULL, "powmult");
980
981 if (n < POWI_TABLE_SIZE)
982 {
983 cache[n] = ssa_target;
984 op0 = powi_as_mults_1 (gsi, loc, type, n - powi_table[n], cache);
985 op1 = powi_as_mults_1 (gsi, loc, type, powi_table[n], cache);
986 }
987 else if (n & 1)
988 {
989 digit = n & ((1 << POWI_WINDOW_SIZE) - 1);
990 op0 = powi_as_mults_1 (gsi, loc, type, n - digit, cache);
991 op1 = powi_as_mults_1 (gsi, loc, type, digit, cache);
992 }
993 else
994 {
995 op0 = powi_as_mults_1 (gsi, loc, type, n >> 1, cache);
996 op1 = op0;
997 }
998
999 mult_stmt = gimple_build_assign_with_ops (MULT_EXPR, ssa_target, op0, op1);
1000 gimple_set_location (mult_stmt, loc);
1001 gsi_insert_before (gsi, mult_stmt, GSI_SAME_STMT);
1002
1003 return ssa_target;
1004 }
1005
1006 /* Convert ARG0**N to a tree of multiplications of ARG0 with itself.
1007 This function needs to be kept in sync with powi_cost above. */
1008
1009 static tree
1010 powi_as_mults (gimple_stmt_iterator *gsi, location_t loc,
1011 tree arg0, HOST_WIDE_INT n)
1012 {
1013 tree cache[POWI_TABLE_SIZE], result, type = TREE_TYPE (arg0);
1014 gimple div_stmt;
1015 tree target;
1016
1017 if (n == 0)
1018 return build_real (type, dconst1);
1019
1020 memset (cache, 0, sizeof (cache));
1021 cache[1] = arg0;
1022
1023 result = powi_as_mults_1 (gsi, loc, type, (n < 0) ? -n : n, cache);
1024 if (n >= 0)
1025 return result;
1026
1027 /* If the original exponent was negative, reciprocate the result. */
1028 target = make_temp_ssa_name (type, NULL, "powmult");
1029 div_stmt = gimple_build_assign_with_ops (RDIV_EXPR, target,
1030 build_real (type, dconst1),
1031 result);
1032 gimple_set_location (div_stmt, loc);
1033 gsi_insert_before (gsi, div_stmt, GSI_SAME_STMT);
1034
1035 return target;
1036 }
1037
1038 /* ARG0 and N are the two arguments to a powi builtin in GSI with
1039 location info LOC. If the arguments are appropriate, create an
1040 equivalent sequence of statements prior to GSI using an optimal
1041 number of multiplications, and return an expession holding the
1042 result. */
1043
1044 static tree
1045 gimple_expand_builtin_powi (gimple_stmt_iterator *gsi, location_t loc,
1046 tree arg0, HOST_WIDE_INT n)
1047 {
1048 /* Avoid largest negative number. */
1049 if (n != -n
1050 && ((n >= -1 && n <= 2)
1051 || (optimize_function_for_speed_p (cfun)
1052 && powi_cost (n) <= POWI_MAX_MULTS)))
1053 return powi_as_mults (gsi, loc, arg0, n);
1054
1055 return NULL_TREE;
1056 }
1057
1058 /* Build a gimple call statement that calls FN with argument ARG.
1059 Set the lhs of the call statement to a fresh SSA name. Insert the
1060 statement prior to GSI's current position, and return the fresh
1061 SSA name. */
1062
1063 static tree
1064 build_and_insert_call (gimple_stmt_iterator *gsi, location_t loc,
1065 tree fn, tree arg)
1066 {
1067 gimple call_stmt;
1068 tree ssa_target;
1069
1070 call_stmt = gimple_build_call (fn, 1, arg);
1071 ssa_target = make_temp_ssa_name (TREE_TYPE (arg), NULL, "powroot");
1072 gimple_set_lhs (call_stmt, ssa_target);
1073 gimple_set_location (call_stmt, loc);
1074 gsi_insert_before (gsi, call_stmt, GSI_SAME_STMT);
1075
1076 return ssa_target;
1077 }
1078
1079 /* Build a gimple binary operation with the given CODE and arguments
1080 ARG0, ARG1, assigning the result to a new SSA name for variable
1081 TARGET. Insert the statement prior to GSI's current position, and
1082 return the fresh SSA name.*/
1083
1084 static tree
1085 build_and_insert_binop (gimple_stmt_iterator *gsi, location_t loc,
1086 const char *name, enum tree_code code,
1087 tree arg0, tree arg1)
1088 {
1089 tree result = make_temp_ssa_name (TREE_TYPE (arg0), NULL, name);
1090 gimple stmt = gimple_build_assign_with_ops (code, result, arg0, arg1);
1091 gimple_set_location (stmt, loc);
1092 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1093 return result;
1094 }
1095
1096 /* Build a gimple reference operation with the given CODE and argument
1097 ARG, assigning the result to a new SSA name of TYPE with NAME.
1098 Insert the statement prior to GSI's current position, and return
1099 the fresh SSA name. */
1100
1101 static inline tree
1102 build_and_insert_ref (gimple_stmt_iterator *gsi, location_t loc, tree type,
1103 const char *name, enum tree_code code, tree arg0)
1104 {
1105 tree result = make_temp_ssa_name (type, NULL, name);
1106 gimple stmt = gimple_build_assign (result, build1 (code, type, arg0));
1107 gimple_set_location (stmt, loc);
1108 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1109 return result;
1110 }
1111
1112 /* Build a gimple assignment to cast VAL to TYPE. Insert the statement
1113 prior to GSI's current position, and return the fresh SSA name. */
1114
1115 static tree
1116 build_and_insert_cast (gimple_stmt_iterator *gsi, location_t loc,
1117 tree type, tree val)
1118 {
1119 tree result = make_ssa_name (type, NULL);
1120 gimple stmt = gimple_build_assign_with_ops (NOP_EXPR, result, val, NULL_TREE);
1121 gimple_set_location (stmt, loc);
1122 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1123 return result;
1124 }
1125
1126 /* ARG0 and ARG1 are the two arguments to a pow builtin call in GSI
1127 with location info LOC. If possible, create an equivalent and
1128 less expensive sequence of statements prior to GSI, and return an
1129 expession holding the result. */
1130
1131 static tree
1132 gimple_expand_builtin_pow (gimple_stmt_iterator *gsi, location_t loc,
1133 tree arg0, tree arg1)
1134 {
1135 REAL_VALUE_TYPE c, cint, dconst1_4, dconst3_4, dconst1_3, dconst1_6;
1136 REAL_VALUE_TYPE c2, dconst3;
1137 HOST_WIDE_INT n;
1138 tree type, sqrtfn, cbrtfn, sqrt_arg0, sqrt_sqrt, result, cbrt_x, powi_cbrt_x;
1139 enum machine_mode mode;
1140 bool hw_sqrt_exists, c_is_int, c2_is_int;
1141
1142 /* If the exponent isn't a constant, there's nothing of interest
1143 to be done. */
1144 if (TREE_CODE (arg1) != REAL_CST)
1145 return NULL_TREE;
1146
1147 /* If the exponent is equivalent to an integer, expand to an optimal
1148 multiplication sequence when profitable. */
1149 c = TREE_REAL_CST (arg1);
1150 n = real_to_integer (&c);
1151 real_from_integer (&cint, VOIDmode, n, SIGNED);
1152 c_is_int = real_identical (&c, &cint);
1153
1154 if (c_is_int
1155 && ((n >= -1 && n <= 2)
1156 || (flag_unsafe_math_optimizations
1157 && optimize_bb_for_speed_p (gsi_bb (*gsi))
1158 && powi_cost (n) <= POWI_MAX_MULTS)))
1159 return gimple_expand_builtin_powi (gsi, loc, arg0, n);
1160
1161 /* Attempt various optimizations using sqrt and cbrt. */
1162 type = TREE_TYPE (arg0);
1163 mode = TYPE_MODE (type);
1164 sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1165
1166 /* Optimize pow(x,0.5) = sqrt(x). This replacement is always safe
1167 unless signed zeros must be maintained. pow(-0,0.5) = +0, while
1168 sqrt(-0) = -0. */
1169 if (sqrtfn
1170 && REAL_VALUES_EQUAL (c, dconsthalf)
1171 && !HONOR_SIGNED_ZEROS (mode))
1172 return build_and_insert_call (gsi, loc, sqrtfn, arg0);
1173
1174 /* Optimize pow(x,0.25) = sqrt(sqrt(x)). Assume on most machines that
1175 a builtin sqrt instruction is smaller than a call to pow with 0.25,
1176 so do this optimization even if -Os. Don't do this optimization
1177 if we don't have a hardware sqrt insn. */
1178 dconst1_4 = dconst1;
1179 SET_REAL_EXP (&dconst1_4, REAL_EXP (&dconst1_4) - 2);
1180 hw_sqrt_exists = optab_handler (sqrt_optab, mode) != CODE_FOR_nothing;
1181
1182 if (flag_unsafe_math_optimizations
1183 && sqrtfn
1184 && REAL_VALUES_EQUAL (c, dconst1_4)
1185 && hw_sqrt_exists)
1186 {
1187 /* sqrt(x) */
1188 sqrt_arg0 = build_and_insert_call (gsi, loc, sqrtfn, arg0);
1189
1190 /* sqrt(sqrt(x)) */
1191 return build_and_insert_call (gsi, loc, sqrtfn, sqrt_arg0);
1192 }
1193
1194 /* Optimize pow(x,0.75) = sqrt(x) * sqrt(sqrt(x)) unless we are
1195 optimizing for space. Don't do this optimization if we don't have
1196 a hardware sqrt insn. */
1197 real_from_integer (&dconst3_4, VOIDmode, 3, SIGNED);
1198 SET_REAL_EXP (&dconst3_4, REAL_EXP (&dconst3_4) - 2);
1199
1200 if (flag_unsafe_math_optimizations
1201 && sqrtfn
1202 && optimize_function_for_speed_p (cfun)
1203 && REAL_VALUES_EQUAL (c, dconst3_4)
1204 && hw_sqrt_exists)
1205 {
1206 /* sqrt(x) */
1207 sqrt_arg0 = build_and_insert_call (gsi, loc, sqrtfn, arg0);
1208
1209 /* sqrt(sqrt(x)) */
1210 sqrt_sqrt = build_and_insert_call (gsi, loc, sqrtfn, sqrt_arg0);
1211
1212 /* sqrt(x) * sqrt(sqrt(x)) */
1213 return build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1214 sqrt_arg0, sqrt_sqrt);
1215 }
1216
1217 /* Optimize pow(x,1./3.) = cbrt(x). This requires unsafe math
1218 optimizations since 1./3. is not exactly representable. If x
1219 is negative and finite, the correct value of pow(x,1./3.) is
1220 a NaN with the "invalid" exception raised, because the value
1221 of 1./3. actually has an even denominator. The correct value
1222 of cbrt(x) is a negative real value. */
1223 cbrtfn = mathfn_built_in (type, BUILT_IN_CBRT);
1224 dconst1_3 = real_value_truncate (mode, dconst_third ());
1225
1226 if (flag_unsafe_math_optimizations
1227 && cbrtfn
1228 && (gimple_val_nonnegative_real_p (arg0) || !HONOR_NANS (mode))
1229 && REAL_VALUES_EQUAL (c, dconst1_3))
1230 return build_and_insert_call (gsi, loc, cbrtfn, arg0);
1231
1232 /* Optimize pow(x,1./6.) = cbrt(sqrt(x)). Don't do this optimization
1233 if we don't have a hardware sqrt insn. */
1234 dconst1_6 = dconst1_3;
1235 SET_REAL_EXP (&dconst1_6, REAL_EXP (&dconst1_6) - 1);
1236
1237 if (flag_unsafe_math_optimizations
1238 && sqrtfn
1239 && cbrtfn
1240 && (gimple_val_nonnegative_real_p (arg0) || !HONOR_NANS (mode))
1241 && optimize_function_for_speed_p (cfun)
1242 && hw_sqrt_exists
1243 && REAL_VALUES_EQUAL (c, dconst1_6))
1244 {
1245 /* sqrt(x) */
1246 sqrt_arg0 = build_and_insert_call (gsi, loc, sqrtfn, arg0);
1247
1248 /* cbrt(sqrt(x)) */
1249 return build_and_insert_call (gsi, loc, cbrtfn, sqrt_arg0);
1250 }
1251
1252 /* Optimize pow(x,c), where n = 2c for some nonzero integer n
1253 and c not an integer, into
1254
1255 sqrt(x) * powi(x, n/2), n > 0;
1256 1.0 / (sqrt(x) * powi(x, abs(n/2))), n < 0.
1257
1258 Do not calculate the powi factor when n/2 = 0. */
1259 real_arithmetic (&c2, MULT_EXPR, &c, &dconst2);
1260 n = real_to_integer (&c2);
1261 real_from_integer (&cint, VOIDmode, n, SIGNED);
1262 c2_is_int = real_identical (&c2, &cint);
1263
1264 if (flag_unsafe_math_optimizations
1265 && sqrtfn
1266 && c2_is_int
1267 && !c_is_int
1268 && optimize_function_for_speed_p (cfun))
1269 {
1270 tree powi_x_ndiv2 = NULL_TREE;
1271
1272 /* Attempt to fold powi(arg0, abs(n/2)) into multiplies. If not
1273 possible or profitable, give up. Skip the degenerate case when
1274 n is 1 or -1, where the result is always 1. */
1275 if (absu_hwi (n) != 1)
1276 {
1277 powi_x_ndiv2 = gimple_expand_builtin_powi (gsi, loc, arg0,
1278 abs_hwi (n / 2));
1279 if (!powi_x_ndiv2)
1280 return NULL_TREE;
1281 }
1282
1283 /* Calculate sqrt(x). When n is not 1 or -1, multiply it by the
1284 result of the optimal multiply sequence just calculated. */
1285 sqrt_arg0 = build_and_insert_call (gsi, loc, sqrtfn, arg0);
1286
1287 if (absu_hwi (n) == 1)
1288 result = sqrt_arg0;
1289 else
1290 result = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1291 sqrt_arg0, powi_x_ndiv2);
1292
1293 /* If n is negative, reciprocate the result. */
1294 if (n < 0)
1295 result = build_and_insert_binop (gsi, loc, "powroot", RDIV_EXPR,
1296 build_real (type, dconst1), result);
1297 return result;
1298 }
1299
1300 /* Optimize pow(x,c), where 3c = n for some nonzero integer n, into
1301
1302 powi(x, n/3) * powi(cbrt(x), n%3), n > 0;
1303 1.0 / (powi(x, abs(n)/3) * powi(cbrt(x), abs(n)%3)), n < 0.
1304
1305 Do not calculate the first factor when n/3 = 0. As cbrt(x) is
1306 different from pow(x, 1./3.) due to rounding and behavior with
1307 negative x, we need to constrain this transformation to unsafe
1308 math and positive x or finite math. */
1309 real_from_integer (&dconst3, VOIDmode, 3, SIGNED);
1310 real_arithmetic (&c2, MULT_EXPR, &c, &dconst3);
1311 real_round (&c2, mode, &c2);
1312 n = real_to_integer (&c2);
1313 real_from_integer (&cint, VOIDmode, n, SIGNED);
1314 real_arithmetic (&c2, RDIV_EXPR, &cint, &dconst3);
1315 real_convert (&c2, mode, &c2);
1316
1317 if (flag_unsafe_math_optimizations
1318 && cbrtfn
1319 && (gimple_val_nonnegative_real_p (arg0) || !HONOR_NANS (mode))
1320 && real_identical (&c2, &c)
1321 && !c2_is_int
1322 && optimize_function_for_speed_p (cfun)
1323 && powi_cost (n / 3) <= POWI_MAX_MULTS)
1324 {
1325 tree powi_x_ndiv3 = NULL_TREE;
1326
1327 /* Attempt to fold powi(arg0, abs(n/3)) into multiplies. If not
1328 possible or profitable, give up. Skip the degenerate case when
1329 abs(n) < 3, where the result is always 1. */
1330 if (absu_hwi (n) >= 3)
1331 {
1332 powi_x_ndiv3 = gimple_expand_builtin_powi (gsi, loc, arg0,
1333 abs_hwi (n / 3));
1334 if (!powi_x_ndiv3)
1335 return NULL_TREE;
1336 }
1337
1338 /* Calculate powi(cbrt(x), n%3). Don't use gimple_expand_builtin_powi
1339 as that creates an unnecessary variable. Instead, just produce
1340 either cbrt(x) or cbrt(x) * cbrt(x). */
1341 cbrt_x = build_and_insert_call (gsi, loc, cbrtfn, arg0);
1342
1343 if (absu_hwi (n) % 3 == 1)
1344 powi_cbrt_x = cbrt_x;
1345 else
1346 powi_cbrt_x = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1347 cbrt_x, cbrt_x);
1348
1349 /* Multiply the two subexpressions, unless powi(x,abs(n)/3) = 1. */
1350 if (absu_hwi (n) < 3)
1351 result = powi_cbrt_x;
1352 else
1353 result = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1354 powi_x_ndiv3, powi_cbrt_x);
1355
1356 /* If n is negative, reciprocate the result. */
1357 if (n < 0)
1358 result = build_and_insert_binop (gsi, loc, "powroot", RDIV_EXPR,
1359 build_real (type, dconst1), result);
1360
1361 return result;
1362 }
1363
1364 /* No optimizations succeeded. */
1365 return NULL_TREE;
1366 }
1367
1368 /* ARG is the argument to a cabs builtin call in GSI with location info
1369 LOC. Create a sequence of statements prior to GSI that calculates
1370 sqrt(R*R + I*I), where R and I are the real and imaginary components
1371 of ARG, respectively. Return an expression holding the result. */
1372
1373 static tree
1374 gimple_expand_builtin_cabs (gimple_stmt_iterator *gsi, location_t loc, tree arg)
1375 {
1376 tree real_part, imag_part, addend1, addend2, sum, result;
1377 tree type = TREE_TYPE (TREE_TYPE (arg));
1378 tree sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1379 enum machine_mode mode = TYPE_MODE (type);
1380
1381 if (!flag_unsafe_math_optimizations
1382 || !optimize_bb_for_speed_p (gimple_bb (gsi_stmt (*gsi)))
1383 || !sqrtfn
1384 || optab_handler (sqrt_optab, mode) == CODE_FOR_nothing)
1385 return NULL_TREE;
1386
1387 real_part = build_and_insert_ref (gsi, loc, type, "cabs",
1388 REALPART_EXPR, arg);
1389 addend1 = build_and_insert_binop (gsi, loc, "cabs", MULT_EXPR,
1390 real_part, real_part);
1391 imag_part = build_and_insert_ref (gsi, loc, type, "cabs",
1392 IMAGPART_EXPR, arg);
1393 addend2 = build_and_insert_binop (gsi, loc, "cabs", MULT_EXPR,
1394 imag_part, imag_part);
1395 sum = build_and_insert_binop (gsi, loc, "cabs", PLUS_EXPR, addend1, addend2);
1396 result = build_and_insert_call (gsi, loc, sqrtfn, sum);
1397
1398 return result;
1399 }
1400
1401 /* Go through all calls to sin, cos and cexpi and call execute_cse_sincos_1
1402 on the SSA_NAME argument of each of them. Also expand powi(x,n) into
1403 an optimal number of multiplies, when n is a constant. */
1404
1405 namespace {
1406
1407 const pass_data pass_data_cse_sincos =
1408 {
1409 GIMPLE_PASS, /* type */
1410 "sincos", /* name */
1411 OPTGROUP_NONE, /* optinfo_flags */
1412 true, /* has_execute */
1413 TV_NONE, /* tv_id */
1414 PROP_ssa, /* properties_required */
1415 0, /* properties_provided */
1416 0, /* properties_destroyed */
1417 0, /* todo_flags_start */
1418 TODO_update_ssa, /* todo_flags_finish */
1419 };
1420
1421 class pass_cse_sincos : public gimple_opt_pass
1422 {
1423 public:
1424 pass_cse_sincos (gcc::context *ctxt)
1425 : gimple_opt_pass (pass_data_cse_sincos, ctxt)
1426 {}
1427
1428 /* opt_pass methods: */
1429 virtual bool gate (function *)
1430 {
1431 /* We no longer require either sincos or cexp, since powi expansion
1432 piggybacks on this pass. */
1433 return optimize;
1434 }
1435
1436 virtual unsigned int execute (function *);
1437
1438 }; // class pass_cse_sincos
1439
1440 unsigned int
1441 pass_cse_sincos::execute (function *fun)
1442 {
1443 basic_block bb;
1444 bool cfg_changed = false;
1445
1446 calculate_dominance_info (CDI_DOMINATORS);
1447 memset (&sincos_stats, 0, sizeof (sincos_stats));
1448
1449 FOR_EACH_BB_FN (bb, fun)
1450 {
1451 gimple_stmt_iterator gsi;
1452 bool cleanup_eh = false;
1453
1454 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1455 {
1456 gimple stmt = gsi_stmt (gsi);
1457 tree fndecl;
1458
1459 /* Only the last stmt in a bb could throw, no need to call
1460 gimple_purge_dead_eh_edges if we change something in the middle
1461 of a basic block. */
1462 cleanup_eh = false;
1463
1464 if (is_gimple_call (stmt)
1465 && gimple_call_lhs (stmt)
1466 && (fndecl = gimple_call_fndecl (stmt))
1467 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
1468 {
1469 tree arg, arg0, arg1, result;
1470 HOST_WIDE_INT n;
1471 location_t loc;
1472
1473 switch (DECL_FUNCTION_CODE (fndecl))
1474 {
1475 CASE_FLT_FN (BUILT_IN_COS):
1476 CASE_FLT_FN (BUILT_IN_SIN):
1477 CASE_FLT_FN (BUILT_IN_CEXPI):
1478 /* Make sure we have either sincos or cexp. */
1479 if (!targetm.libc_has_function (function_c99_math_complex)
1480 && !targetm.libc_has_function (function_sincos))
1481 break;
1482
1483 arg = gimple_call_arg (stmt, 0);
1484 if (TREE_CODE (arg) == SSA_NAME)
1485 cfg_changed |= execute_cse_sincos_1 (arg);
1486 break;
1487
1488 CASE_FLT_FN (BUILT_IN_POW):
1489 arg0 = gimple_call_arg (stmt, 0);
1490 arg1 = gimple_call_arg (stmt, 1);
1491
1492 loc = gimple_location (stmt);
1493 result = gimple_expand_builtin_pow (&gsi, loc, arg0, arg1);
1494
1495 if (result)
1496 {
1497 tree lhs = gimple_get_lhs (stmt);
1498 gimple new_stmt = gimple_build_assign (lhs, result);
1499 gimple_set_location (new_stmt, loc);
1500 unlink_stmt_vdef (stmt);
1501 gsi_replace (&gsi, new_stmt, true);
1502 cleanup_eh = true;
1503 if (gimple_vdef (stmt))
1504 release_ssa_name (gimple_vdef (stmt));
1505 }
1506 break;
1507
1508 CASE_FLT_FN (BUILT_IN_POWI):
1509 arg0 = gimple_call_arg (stmt, 0);
1510 arg1 = gimple_call_arg (stmt, 1);
1511 loc = gimple_location (stmt);
1512
1513 if (real_minus_onep (arg0))
1514 {
1515 tree t0, t1, cond, one, minus_one;
1516 gimple stmt;
1517
1518 t0 = TREE_TYPE (arg0);
1519 t1 = TREE_TYPE (arg1);
1520 one = build_real (t0, dconst1);
1521 minus_one = build_real (t0, dconstm1);
1522
1523 cond = make_temp_ssa_name (t1, NULL, "powi_cond");
1524 stmt = gimple_build_assign_with_ops (BIT_AND_EXPR, cond,
1525 arg1,
1526 build_int_cst (t1,
1527 1));
1528 gimple_set_location (stmt, loc);
1529 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1530
1531 result = make_temp_ssa_name (t0, NULL, "powi");
1532 stmt = gimple_build_assign_with_ops (COND_EXPR, result,
1533 cond,
1534 minus_one, one);
1535 gimple_set_location (stmt, loc);
1536 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1537 }
1538 else
1539 {
1540 if (!tree_fits_shwi_p (arg1))
1541 break;
1542
1543 n = tree_to_shwi (arg1);
1544 result = gimple_expand_builtin_powi (&gsi, loc, arg0, n);
1545 }
1546
1547 if (result)
1548 {
1549 tree lhs = gimple_get_lhs (stmt);
1550 gimple new_stmt = gimple_build_assign (lhs, result);
1551 gimple_set_location (new_stmt, loc);
1552 unlink_stmt_vdef (stmt);
1553 gsi_replace (&gsi, new_stmt, true);
1554 cleanup_eh = true;
1555 if (gimple_vdef (stmt))
1556 release_ssa_name (gimple_vdef (stmt));
1557 }
1558 break;
1559
1560 CASE_FLT_FN (BUILT_IN_CABS):
1561 arg0 = gimple_call_arg (stmt, 0);
1562 loc = gimple_location (stmt);
1563 result = gimple_expand_builtin_cabs (&gsi, loc, arg0);
1564
1565 if (result)
1566 {
1567 tree lhs = gimple_get_lhs (stmt);
1568 gimple new_stmt = gimple_build_assign (lhs, result);
1569 gimple_set_location (new_stmt, loc);
1570 unlink_stmt_vdef (stmt);
1571 gsi_replace (&gsi, new_stmt, true);
1572 cleanup_eh = true;
1573 if (gimple_vdef (stmt))
1574 release_ssa_name (gimple_vdef (stmt));
1575 }
1576 break;
1577
1578 default:;
1579 }
1580 }
1581 }
1582 if (cleanup_eh)
1583 cfg_changed |= gimple_purge_dead_eh_edges (bb);
1584 }
1585
1586 statistics_counter_event (fun, "sincos statements inserted",
1587 sincos_stats.inserted);
1588
1589 free_dominance_info (CDI_DOMINATORS);
1590 return cfg_changed ? TODO_cleanup_cfg : 0;
1591 }
1592
1593 } // anon namespace
1594
1595 gimple_opt_pass *
1596 make_pass_cse_sincos (gcc::context *ctxt)
1597 {
1598 return new pass_cse_sincos (ctxt);
1599 }
1600
1601 /* A symbolic number is used to detect byte permutation and selection
1602 patterns. Therefore the field N contains an artificial number
1603 consisting of byte size markers:
1604
1605 0 - byte has the value 0
1606 1..size - byte contains the content of the byte
1607 number indexed with that value minus one */
1608
1609 struct symbolic_number {
1610 unsigned HOST_WIDEST_INT n;
1611 int size;
1612 };
1613
1614 /* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic
1615 number N. Return false if the requested operation is not permitted
1616 on a symbolic number. */
1617
1618 static inline bool
1619 do_shift_rotate (enum tree_code code,
1620 struct symbolic_number *n,
1621 int count)
1622 {
1623 if (count % 8 != 0)
1624 return false;
1625
1626 /* Zero out the extra bits of N in order to avoid them being shifted
1627 into the significant bits. */
1628 if (n->size < (int)sizeof (HOST_WIDEST_INT))
1629 n->n &= ((unsigned HOST_WIDEST_INT)1 << (n->size * BITS_PER_UNIT)) - 1;
1630
1631 switch (code)
1632 {
1633 case LSHIFT_EXPR:
1634 n->n <<= count;
1635 break;
1636 case RSHIFT_EXPR:
1637 n->n >>= count;
1638 break;
1639 case LROTATE_EXPR:
1640 n->n = (n->n << count) | (n->n >> ((n->size * BITS_PER_UNIT) - count));
1641 break;
1642 case RROTATE_EXPR:
1643 n->n = (n->n >> count) | (n->n << ((n->size * BITS_PER_UNIT) - count));
1644 break;
1645 default:
1646 return false;
1647 }
1648 /* Zero unused bits for size. */
1649 if (n->size < (int)sizeof (HOST_WIDEST_INT))
1650 n->n &= ((unsigned HOST_WIDEST_INT)1 << (n->size * BITS_PER_UNIT)) - 1;
1651 return true;
1652 }
1653
1654 /* Perform sanity checking for the symbolic number N and the gimple
1655 statement STMT. */
1656
1657 static inline bool
1658 verify_symbolic_number_p (struct symbolic_number *n, gimple stmt)
1659 {
1660 tree lhs_type;
1661
1662 lhs_type = gimple_expr_type (stmt);
1663
1664 if (TREE_CODE (lhs_type) != INTEGER_TYPE)
1665 return false;
1666
1667 if (TYPE_PRECISION (lhs_type) != n->size * BITS_PER_UNIT)
1668 return false;
1669
1670 return true;
1671 }
1672
1673 /* find_bswap_1 invokes itself recursively with N and tries to perform
1674 the operation given by the rhs of STMT on the result. If the
1675 operation could successfully be executed the function returns the
1676 tree expression of the source operand and NULL otherwise. */
1677
1678 static tree
1679 find_bswap_1 (gimple stmt, struct symbolic_number *n, int limit)
1680 {
1681 enum tree_code code;
1682 tree rhs1, rhs2 = NULL;
1683 gimple rhs1_stmt, rhs2_stmt;
1684 tree source_expr1;
1685 enum gimple_rhs_class rhs_class;
1686
1687 if (!limit || !is_gimple_assign (stmt))
1688 return NULL_TREE;
1689
1690 rhs1 = gimple_assign_rhs1 (stmt);
1691
1692 if (TREE_CODE (rhs1) != SSA_NAME)
1693 return NULL_TREE;
1694
1695 code = gimple_assign_rhs_code (stmt);
1696 rhs_class = gimple_assign_rhs_class (stmt);
1697 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
1698
1699 if (rhs_class == GIMPLE_BINARY_RHS)
1700 rhs2 = gimple_assign_rhs2 (stmt);
1701
1702 /* Handle unary rhs and binary rhs with integer constants as second
1703 operand. */
1704
1705 if (rhs_class == GIMPLE_UNARY_RHS
1706 || (rhs_class == GIMPLE_BINARY_RHS
1707 && TREE_CODE (rhs2) == INTEGER_CST))
1708 {
1709 if (code != BIT_AND_EXPR
1710 && code != LSHIFT_EXPR
1711 && code != RSHIFT_EXPR
1712 && code != LROTATE_EXPR
1713 && code != RROTATE_EXPR
1714 && code != NOP_EXPR
1715 && code != CONVERT_EXPR)
1716 return NULL_TREE;
1717
1718 source_expr1 = find_bswap_1 (rhs1_stmt, n, limit - 1);
1719
1720 /* If find_bswap_1 returned NULL STMT is a leaf node and we have
1721 to initialize the symbolic number. */
1722 if (!source_expr1)
1723 {
1724 /* Set up the symbolic number N by setting each byte to a
1725 value between 1 and the byte size of rhs1. The highest
1726 order byte is set to n->size and the lowest order
1727 byte to 1. */
1728 n->size = TYPE_PRECISION (TREE_TYPE (rhs1));
1729 if (n->size % BITS_PER_UNIT != 0)
1730 return NULL_TREE;
1731 n->size /= BITS_PER_UNIT;
1732 n->n = (sizeof (HOST_WIDEST_INT) < 8 ? 0 :
1733 (unsigned HOST_WIDEST_INT)0x08070605 << 32 | 0x04030201);
1734
1735 if (n->size < (int)sizeof (HOST_WIDEST_INT))
1736 n->n &= ((unsigned HOST_WIDEST_INT)1 <<
1737 (n->size * BITS_PER_UNIT)) - 1;
1738
1739 source_expr1 = rhs1;
1740 }
1741
1742 switch (code)
1743 {
1744 case BIT_AND_EXPR:
1745 {
1746 int i;
1747 unsigned HOST_WIDEST_INT val = widest_int_cst_value (rhs2);
1748 unsigned HOST_WIDEST_INT tmp = val;
1749
1750 /* Only constants masking full bytes are allowed. */
1751 for (i = 0; i < n->size; i++, tmp >>= BITS_PER_UNIT)
1752 if ((tmp & 0xff) != 0 && (tmp & 0xff) != 0xff)
1753 return NULL_TREE;
1754
1755 n->n &= val;
1756 }
1757 break;
1758 case LSHIFT_EXPR:
1759 case RSHIFT_EXPR:
1760 case LROTATE_EXPR:
1761 case RROTATE_EXPR:
1762 if (!do_shift_rotate (code, n, (int)TREE_INT_CST_LOW (rhs2)))
1763 return NULL_TREE;
1764 break;
1765 CASE_CONVERT:
1766 {
1767 int type_size;
1768
1769 type_size = TYPE_PRECISION (gimple_expr_type (stmt));
1770 if (type_size % BITS_PER_UNIT != 0)
1771 return NULL_TREE;
1772
1773 if (type_size / BITS_PER_UNIT < (int)(sizeof (HOST_WIDEST_INT)))
1774 {
1775 /* If STMT casts to a smaller type mask out the bits not
1776 belonging to the target type. */
1777 n->n &= ((unsigned HOST_WIDEST_INT)1 << type_size) - 1;
1778 }
1779 n->size = type_size / BITS_PER_UNIT;
1780 }
1781 break;
1782 default:
1783 return NULL_TREE;
1784 };
1785 return verify_symbolic_number_p (n, stmt) ? source_expr1 : NULL;
1786 }
1787
1788 /* Handle binary rhs. */
1789
1790 if (rhs_class == GIMPLE_BINARY_RHS)
1791 {
1792 int i;
1793 struct symbolic_number n1, n2;
1794 unsigned HOST_WIDEST_INT mask;
1795 tree source_expr2;
1796
1797 if (code != BIT_IOR_EXPR)
1798 return NULL_TREE;
1799
1800 if (TREE_CODE (rhs2) != SSA_NAME)
1801 return NULL_TREE;
1802
1803 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
1804
1805 switch (code)
1806 {
1807 case BIT_IOR_EXPR:
1808 source_expr1 = find_bswap_1 (rhs1_stmt, &n1, limit - 1);
1809
1810 if (!source_expr1)
1811 return NULL_TREE;
1812
1813 source_expr2 = find_bswap_1 (rhs2_stmt, &n2, limit - 1);
1814
1815 if (source_expr1 != source_expr2
1816 || n1.size != n2.size)
1817 return NULL_TREE;
1818
1819 n->size = n1.size;
1820 for (i = 0, mask = 0xff; i < n->size; i++, mask <<= BITS_PER_UNIT)
1821 {
1822 unsigned HOST_WIDEST_INT masked1, masked2;
1823
1824 masked1 = n1.n & mask;
1825 masked2 = n2.n & mask;
1826 if (masked1 && masked2 && masked1 != masked2)
1827 return NULL_TREE;
1828 }
1829 n->n = n1.n | n2.n;
1830
1831 if (!verify_symbolic_number_p (n, stmt))
1832 return NULL_TREE;
1833
1834 break;
1835 default:
1836 return NULL_TREE;
1837 }
1838 return source_expr1;
1839 }
1840 return NULL_TREE;
1841 }
1842
1843 /* Check if STMT completes a bswap implementation consisting of ORs,
1844 SHIFTs and ANDs. Return the source tree expression on which the
1845 byte swap is performed and NULL if no bswap was found. */
1846
1847 static tree
1848 find_bswap (gimple stmt)
1849 {
1850 /* The number which the find_bswap result should match in order to
1851 have a full byte swap. The number is shifted to the left according
1852 to the size of the symbolic number before using it. */
1853 unsigned HOST_WIDEST_INT cmp =
1854 sizeof (HOST_WIDEST_INT) < 8 ? 0 :
1855 (unsigned HOST_WIDEST_INT)0x01020304 << 32 | 0x05060708;
1856
1857 struct symbolic_number n;
1858 tree source_expr;
1859 int limit;
1860
1861 /* The last parameter determines the depth search limit. It usually
1862 correlates directly to the number of bytes to be touched. We
1863 increase that number by three here in order to also
1864 cover signed -> unsigned converions of the src operand as can be seen
1865 in libgcc, and for initial shift/and operation of the src operand. */
1866 limit = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (gimple_expr_type (stmt)));
1867 limit += 1 + (int) ceil_log2 ((unsigned HOST_WIDE_INT) limit);
1868 source_expr = find_bswap_1 (stmt, &n, limit);
1869
1870 if (!source_expr)
1871 return NULL_TREE;
1872
1873 /* Zero out the extra bits of N and CMP. */
1874 if (n.size < (int)sizeof (HOST_WIDEST_INT))
1875 {
1876 unsigned HOST_WIDEST_INT mask =
1877 ((unsigned HOST_WIDEST_INT)1 << (n.size * BITS_PER_UNIT)) - 1;
1878
1879 n.n &= mask;
1880 cmp >>= (sizeof (HOST_WIDEST_INT) - n.size) * BITS_PER_UNIT;
1881 }
1882
1883 /* A complete byte swap should make the symbolic number to start
1884 with the largest digit in the highest order byte. */
1885 if (cmp != n.n)
1886 return NULL_TREE;
1887
1888 return source_expr;
1889 }
1890
1891 /* Find manual byte swap implementations and turn them into a bswap
1892 builtin invokation. */
1893
1894 namespace {
1895
1896 const pass_data pass_data_optimize_bswap =
1897 {
1898 GIMPLE_PASS, /* type */
1899 "bswap", /* name */
1900 OPTGROUP_NONE, /* optinfo_flags */
1901 true, /* has_execute */
1902 TV_NONE, /* tv_id */
1903 PROP_ssa, /* properties_required */
1904 0, /* properties_provided */
1905 0, /* properties_destroyed */
1906 0, /* todo_flags_start */
1907 0, /* todo_flags_finish */
1908 };
1909
1910 class pass_optimize_bswap : public gimple_opt_pass
1911 {
1912 public:
1913 pass_optimize_bswap (gcc::context *ctxt)
1914 : gimple_opt_pass (pass_data_optimize_bswap, ctxt)
1915 {}
1916
1917 /* opt_pass methods: */
1918 virtual bool gate (function *)
1919 {
1920 return flag_expensive_optimizations && optimize;
1921 }
1922
1923 virtual unsigned int execute (function *);
1924
1925 }; // class pass_optimize_bswap
1926
1927 unsigned int
1928 pass_optimize_bswap::execute (function *fun)
1929 {
1930 basic_block bb;
1931 bool bswap16_p, bswap32_p, bswap64_p;
1932 bool changed = false;
1933 tree bswap16_type = NULL_TREE, bswap32_type = NULL_TREE, bswap64_type = NULL_TREE;
1934
1935 if (BITS_PER_UNIT != 8)
1936 return 0;
1937
1938 if (sizeof (HOST_WIDEST_INT) < 8)
1939 return 0;
1940
1941 bswap16_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP16)
1942 && optab_handler (bswap_optab, HImode) != CODE_FOR_nothing);
1943 bswap32_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP32)
1944 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing);
1945 bswap64_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP64)
1946 && (optab_handler (bswap_optab, DImode) != CODE_FOR_nothing
1947 || (bswap32_p && word_mode == SImode)));
1948
1949 if (!bswap16_p && !bswap32_p && !bswap64_p)
1950 return 0;
1951
1952 /* Determine the argument type of the builtins. The code later on
1953 assumes that the return and argument type are the same. */
1954 if (bswap16_p)
1955 {
1956 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP16);
1957 bswap16_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1958 }
1959
1960 if (bswap32_p)
1961 {
1962 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
1963 bswap32_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1964 }
1965
1966 if (bswap64_p)
1967 {
1968 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
1969 bswap64_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1970 }
1971
1972 memset (&bswap_stats, 0, sizeof (bswap_stats));
1973
1974 FOR_EACH_BB_FN (bb, fun)
1975 {
1976 gimple_stmt_iterator gsi;
1977
1978 /* We do a reverse scan for bswap patterns to make sure we get the
1979 widest match. As bswap pattern matching doesn't handle
1980 previously inserted smaller bswap replacements as sub-
1981 patterns, the wider variant wouldn't be detected. */
1982 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
1983 {
1984 gimple stmt = gsi_stmt (gsi);
1985 tree bswap_src, bswap_type;
1986 tree bswap_tmp;
1987 tree fndecl = NULL_TREE;
1988 int type_size;
1989 gimple call;
1990
1991 if (!is_gimple_assign (stmt)
1992 || gimple_assign_rhs_code (stmt) != BIT_IOR_EXPR)
1993 continue;
1994
1995 type_size = TYPE_PRECISION (gimple_expr_type (stmt));
1996
1997 switch (type_size)
1998 {
1999 case 16:
2000 if (bswap16_p)
2001 {
2002 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP16);
2003 bswap_type = bswap16_type;
2004 }
2005 break;
2006 case 32:
2007 if (bswap32_p)
2008 {
2009 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
2010 bswap_type = bswap32_type;
2011 }
2012 break;
2013 case 64:
2014 if (bswap64_p)
2015 {
2016 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
2017 bswap_type = bswap64_type;
2018 }
2019 break;
2020 default:
2021 continue;
2022 }
2023
2024 if (!fndecl)
2025 continue;
2026
2027 bswap_src = find_bswap (stmt);
2028
2029 if (!bswap_src)
2030 continue;
2031
2032 changed = true;
2033 if (type_size == 16)
2034 bswap_stats.found_16bit++;
2035 else if (type_size == 32)
2036 bswap_stats.found_32bit++;
2037 else
2038 bswap_stats.found_64bit++;
2039
2040 bswap_tmp = bswap_src;
2041
2042 /* Convert the src expression if necessary. */
2043 if (!useless_type_conversion_p (TREE_TYPE (bswap_tmp), bswap_type))
2044 {
2045 gimple convert_stmt;
2046 bswap_tmp = make_temp_ssa_name (bswap_type, NULL, "bswapsrc");
2047 convert_stmt = gimple_build_assign_with_ops
2048 (NOP_EXPR, bswap_tmp, bswap_src, NULL);
2049 gsi_insert_before (&gsi, convert_stmt, GSI_SAME_STMT);
2050 }
2051
2052 call = gimple_build_call (fndecl, 1, bswap_tmp);
2053
2054 bswap_tmp = gimple_assign_lhs (stmt);
2055
2056 /* Convert the result if necessary. */
2057 if (!useless_type_conversion_p (TREE_TYPE (bswap_tmp), bswap_type))
2058 {
2059 gimple convert_stmt;
2060 bswap_tmp = make_temp_ssa_name (bswap_type, NULL, "bswapdst");
2061 convert_stmt = gimple_build_assign_with_ops
2062 (NOP_EXPR, gimple_assign_lhs (stmt), bswap_tmp, NULL);
2063 gsi_insert_after (&gsi, convert_stmt, GSI_SAME_STMT);
2064 }
2065
2066 gimple_call_set_lhs (call, bswap_tmp);
2067
2068 if (dump_file)
2069 {
2070 fprintf (dump_file, "%d bit bswap implementation found at: ",
2071 (int)type_size);
2072 print_gimple_stmt (dump_file, stmt, 0, 0);
2073 }
2074
2075 gsi_insert_after (&gsi, call, GSI_SAME_STMT);
2076 gsi_remove (&gsi, true);
2077 }
2078 }
2079
2080 statistics_counter_event (fun, "16-bit bswap implementations found",
2081 bswap_stats.found_16bit);
2082 statistics_counter_event (fun, "32-bit bswap implementations found",
2083 bswap_stats.found_32bit);
2084 statistics_counter_event (fun, "64-bit bswap implementations found",
2085 bswap_stats.found_64bit);
2086
2087 return (changed ? TODO_update_ssa : 0);
2088 }
2089
2090 } // anon namespace
2091
2092 gimple_opt_pass *
2093 make_pass_optimize_bswap (gcc::context *ctxt)
2094 {
2095 return new pass_optimize_bswap (ctxt);
2096 }
2097
2098 /* Return true if stmt is a type conversion operation that can be stripped
2099 when used in a widening multiply operation. */
2100 static bool
2101 widening_mult_conversion_strippable_p (tree result_type, gimple stmt)
2102 {
2103 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
2104
2105 if (TREE_CODE (result_type) == INTEGER_TYPE)
2106 {
2107 tree op_type;
2108 tree inner_op_type;
2109
2110 if (!CONVERT_EXPR_CODE_P (rhs_code))
2111 return false;
2112
2113 op_type = TREE_TYPE (gimple_assign_lhs (stmt));
2114
2115 /* If the type of OP has the same precision as the result, then
2116 we can strip this conversion. The multiply operation will be
2117 selected to create the correct extension as a by-product. */
2118 if (TYPE_PRECISION (result_type) == TYPE_PRECISION (op_type))
2119 return true;
2120
2121 /* We can also strip a conversion if it preserves the signed-ness of
2122 the operation and doesn't narrow the range. */
2123 inner_op_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
2124
2125 /* If the inner-most type is unsigned, then we can strip any
2126 intermediate widening operation. If it's signed, then the
2127 intermediate widening operation must also be signed. */
2128 if ((TYPE_UNSIGNED (inner_op_type)
2129 || TYPE_UNSIGNED (op_type) == TYPE_UNSIGNED (inner_op_type))
2130 && TYPE_PRECISION (op_type) > TYPE_PRECISION (inner_op_type))
2131 return true;
2132
2133 return false;
2134 }
2135
2136 return rhs_code == FIXED_CONVERT_EXPR;
2137 }
2138
2139 /* Return true if RHS is a suitable operand for a widening multiplication,
2140 assuming a target type of TYPE.
2141 There are two cases:
2142
2143 - RHS makes some value at least twice as wide. Store that value
2144 in *NEW_RHS_OUT if so, and store its type in *TYPE_OUT.
2145
2146 - RHS is an integer constant. Store that value in *NEW_RHS_OUT if so,
2147 but leave *TYPE_OUT untouched. */
2148
2149 static bool
2150 is_widening_mult_rhs_p (tree type, tree rhs, tree *type_out,
2151 tree *new_rhs_out)
2152 {
2153 gimple stmt;
2154 tree type1, rhs1;
2155
2156 if (TREE_CODE (rhs) == SSA_NAME)
2157 {
2158 stmt = SSA_NAME_DEF_STMT (rhs);
2159 if (is_gimple_assign (stmt))
2160 {
2161 if (! widening_mult_conversion_strippable_p (type, stmt))
2162 rhs1 = rhs;
2163 else
2164 {
2165 rhs1 = gimple_assign_rhs1 (stmt);
2166
2167 if (TREE_CODE (rhs1) == INTEGER_CST)
2168 {
2169 *new_rhs_out = rhs1;
2170 *type_out = NULL;
2171 return true;
2172 }
2173 }
2174 }
2175 else
2176 rhs1 = rhs;
2177
2178 type1 = TREE_TYPE (rhs1);
2179
2180 if (TREE_CODE (type1) != TREE_CODE (type)
2181 || TYPE_PRECISION (type1) * 2 > TYPE_PRECISION (type))
2182 return false;
2183
2184 *new_rhs_out = rhs1;
2185 *type_out = type1;
2186 return true;
2187 }
2188
2189 if (TREE_CODE (rhs) == INTEGER_CST)
2190 {
2191 *new_rhs_out = rhs;
2192 *type_out = NULL;
2193 return true;
2194 }
2195
2196 return false;
2197 }
2198
2199 /* Return true if STMT performs a widening multiplication, assuming the
2200 output type is TYPE. If so, store the unwidened types of the operands
2201 in *TYPE1_OUT and *TYPE2_OUT respectively. Also fill *RHS1_OUT and
2202 *RHS2_OUT such that converting those operands to types *TYPE1_OUT
2203 and *TYPE2_OUT would give the operands of the multiplication. */
2204
2205 static bool
2206 is_widening_mult_p (gimple stmt,
2207 tree *type1_out, tree *rhs1_out,
2208 tree *type2_out, tree *rhs2_out)
2209 {
2210 tree type = TREE_TYPE (gimple_assign_lhs (stmt));
2211
2212 if (TREE_CODE (type) != INTEGER_TYPE
2213 && TREE_CODE (type) != FIXED_POINT_TYPE)
2214 return false;
2215
2216 if (!is_widening_mult_rhs_p (type, gimple_assign_rhs1 (stmt), type1_out,
2217 rhs1_out))
2218 return false;
2219
2220 if (!is_widening_mult_rhs_p (type, gimple_assign_rhs2 (stmt), type2_out,
2221 rhs2_out))
2222 return false;
2223
2224 if (*type1_out == NULL)
2225 {
2226 if (*type2_out == NULL || !int_fits_type_p (*rhs1_out, *type2_out))
2227 return false;
2228 *type1_out = *type2_out;
2229 }
2230
2231 if (*type2_out == NULL)
2232 {
2233 if (!int_fits_type_p (*rhs2_out, *type1_out))
2234 return false;
2235 *type2_out = *type1_out;
2236 }
2237
2238 /* Ensure that the larger of the two operands comes first. */
2239 if (TYPE_PRECISION (*type1_out) < TYPE_PRECISION (*type2_out))
2240 {
2241 tree tmp;
2242 tmp = *type1_out;
2243 *type1_out = *type2_out;
2244 *type2_out = tmp;
2245 tmp = *rhs1_out;
2246 *rhs1_out = *rhs2_out;
2247 *rhs2_out = tmp;
2248 }
2249
2250 return true;
2251 }
2252
2253 /* Process a single gimple statement STMT, which has a MULT_EXPR as
2254 its rhs, and try to convert it into a WIDEN_MULT_EXPR. The return
2255 value is true iff we converted the statement. */
2256
2257 static bool
2258 convert_mult_to_widen (gimple stmt, gimple_stmt_iterator *gsi)
2259 {
2260 tree lhs, rhs1, rhs2, type, type1, type2;
2261 enum insn_code handler;
2262 enum machine_mode to_mode, from_mode, actual_mode;
2263 optab op;
2264 int actual_precision;
2265 location_t loc = gimple_location (stmt);
2266 bool from_unsigned1, from_unsigned2;
2267
2268 lhs = gimple_assign_lhs (stmt);
2269 type = TREE_TYPE (lhs);
2270 if (TREE_CODE (type) != INTEGER_TYPE)
2271 return false;
2272
2273 if (!is_widening_mult_p (stmt, &type1, &rhs1, &type2, &rhs2))
2274 return false;
2275
2276 to_mode = TYPE_MODE (type);
2277 from_mode = TYPE_MODE (type1);
2278 from_unsigned1 = TYPE_UNSIGNED (type1);
2279 from_unsigned2 = TYPE_UNSIGNED (type2);
2280
2281 if (from_unsigned1 && from_unsigned2)
2282 op = umul_widen_optab;
2283 else if (!from_unsigned1 && !from_unsigned2)
2284 op = smul_widen_optab;
2285 else
2286 op = usmul_widen_optab;
2287
2288 handler = find_widening_optab_handler_and_mode (op, to_mode, from_mode,
2289 0, &actual_mode);
2290
2291 if (handler == CODE_FOR_nothing)
2292 {
2293 if (op != smul_widen_optab)
2294 {
2295 /* We can use a signed multiply with unsigned types as long as
2296 there is a wider mode to use, or it is the smaller of the two
2297 types that is unsigned. Note that type1 >= type2, always. */
2298 if ((TYPE_UNSIGNED (type1)
2299 && TYPE_PRECISION (type1) == GET_MODE_PRECISION (from_mode))
2300 || (TYPE_UNSIGNED (type2)
2301 && TYPE_PRECISION (type2) == GET_MODE_PRECISION (from_mode)))
2302 {
2303 from_mode = GET_MODE_WIDER_MODE (from_mode);
2304 if (GET_MODE_SIZE (to_mode) <= GET_MODE_SIZE (from_mode))
2305 return false;
2306 }
2307
2308 op = smul_widen_optab;
2309 handler = find_widening_optab_handler_and_mode (op, to_mode,
2310 from_mode, 0,
2311 &actual_mode);
2312
2313 if (handler == CODE_FOR_nothing)
2314 return false;
2315
2316 from_unsigned1 = from_unsigned2 = false;
2317 }
2318 else
2319 return false;
2320 }
2321
2322 /* Ensure that the inputs to the handler are in the correct precison
2323 for the opcode. This will be the full mode size. */
2324 actual_precision = GET_MODE_PRECISION (actual_mode);
2325 if (2 * actual_precision > TYPE_PRECISION (type))
2326 return false;
2327 if (actual_precision != TYPE_PRECISION (type1)
2328 || from_unsigned1 != TYPE_UNSIGNED (type1))
2329 rhs1 = build_and_insert_cast (gsi, loc,
2330 build_nonstandard_integer_type
2331 (actual_precision, from_unsigned1), rhs1);
2332 if (actual_precision != TYPE_PRECISION (type2)
2333 || from_unsigned2 != TYPE_UNSIGNED (type2))
2334 rhs2 = build_and_insert_cast (gsi, loc,
2335 build_nonstandard_integer_type
2336 (actual_precision, from_unsigned2), rhs2);
2337
2338 /* Handle constants. */
2339 if (TREE_CODE (rhs1) == INTEGER_CST)
2340 rhs1 = fold_convert (type1, rhs1);
2341 if (TREE_CODE (rhs2) == INTEGER_CST)
2342 rhs2 = fold_convert (type2, rhs2);
2343
2344 gimple_assign_set_rhs1 (stmt, rhs1);
2345 gimple_assign_set_rhs2 (stmt, rhs2);
2346 gimple_assign_set_rhs_code (stmt, WIDEN_MULT_EXPR);
2347 update_stmt (stmt);
2348 widen_mul_stats.widen_mults_inserted++;
2349 return true;
2350 }
2351
2352 /* Process a single gimple statement STMT, which is found at the
2353 iterator GSI and has a either a PLUS_EXPR or a MINUS_EXPR as its
2354 rhs (given by CODE), and try to convert it into a
2355 WIDEN_MULT_PLUS_EXPR or a WIDEN_MULT_MINUS_EXPR. The return value
2356 is true iff we converted the statement. */
2357
2358 static bool
2359 convert_plusminus_to_widen (gimple_stmt_iterator *gsi, gimple stmt,
2360 enum tree_code code)
2361 {
2362 gimple rhs1_stmt = NULL, rhs2_stmt = NULL;
2363 gimple conv1_stmt = NULL, conv2_stmt = NULL, conv_stmt;
2364 tree type, type1, type2, optype;
2365 tree lhs, rhs1, rhs2, mult_rhs1, mult_rhs2, add_rhs;
2366 enum tree_code rhs1_code = ERROR_MARK, rhs2_code = ERROR_MARK;
2367 optab this_optab;
2368 enum tree_code wmult_code;
2369 enum insn_code handler;
2370 enum machine_mode to_mode, from_mode, actual_mode;
2371 location_t loc = gimple_location (stmt);
2372 int actual_precision;
2373 bool from_unsigned1, from_unsigned2;
2374
2375 lhs = gimple_assign_lhs (stmt);
2376 type = TREE_TYPE (lhs);
2377 if (TREE_CODE (type) != INTEGER_TYPE
2378 && TREE_CODE (type) != FIXED_POINT_TYPE)
2379 return false;
2380
2381 if (code == MINUS_EXPR)
2382 wmult_code = WIDEN_MULT_MINUS_EXPR;
2383 else
2384 wmult_code = WIDEN_MULT_PLUS_EXPR;
2385
2386 rhs1 = gimple_assign_rhs1 (stmt);
2387 rhs2 = gimple_assign_rhs2 (stmt);
2388
2389 if (TREE_CODE (rhs1) == SSA_NAME)
2390 {
2391 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
2392 if (is_gimple_assign (rhs1_stmt))
2393 rhs1_code = gimple_assign_rhs_code (rhs1_stmt);
2394 }
2395
2396 if (TREE_CODE (rhs2) == SSA_NAME)
2397 {
2398 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
2399 if (is_gimple_assign (rhs2_stmt))
2400 rhs2_code = gimple_assign_rhs_code (rhs2_stmt);
2401 }
2402
2403 /* Allow for one conversion statement between the multiply
2404 and addition/subtraction statement. If there are more than
2405 one conversions then we assume they would invalidate this
2406 transformation. If that's not the case then they should have
2407 been folded before now. */
2408 if (CONVERT_EXPR_CODE_P (rhs1_code))
2409 {
2410 conv1_stmt = rhs1_stmt;
2411 rhs1 = gimple_assign_rhs1 (rhs1_stmt);
2412 if (TREE_CODE (rhs1) == SSA_NAME)
2413 {
2414 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
2415 if (is_gimple_assign (rhs1_stmt))
2416 rhs1_code = gimple_assign_rhs_code (rhs1_stmt);
2417 }
2418 else
2419 return false;
2420 }
2421 if (CONVERT_EXPR_CODE_P (rhs2_code))
2422 {
2423 conv2_stmt = rhs2_stmt;
2424 rhs2 = gimple_assign_rhs1 (rhs2_stmt);
2425 if (TREE_CODE (rhs2) == SSA_NAME)
2426 {
2427 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
2428 if (is_gimple_assign (rhs2_stmt))
2429 rhs2_code = gimple_assign_rhs_code (rhs2_stmt);
2430 }
2431 else
2432 return false;
2433 }
2434
2435 /* If code is WIDEN_MULT_EXPR then it would seem unnecessary to call
2436 is_widening_mult_p, but we still need the rhs returns.
2437
2438 It might also appear that it would be sufficient to use the existing
2439 operands of the widening multiply, but that would limit the choice of
2440 multiply-and-accumulate instructions.
2441
2442 If the widened-multiplication result has more than one uses, it is
2443 probably wiser not to do the conversion. */
2444 if (code == PLUS_EXPR
2445 && (rhs1_code == MULT_EXPR || rhs1_code == WIDEN_MULT_EXPR))
2446 {
2447 if (!has_single_use (rhs1)
2448 || !is_widening_mult_p (rhs1_stmt, &type1, &mult_rhs1,
2449 &type2, &mult_rhs2))
2450 return false;
2451 add_rhs = rhs2;
2452 conv_stmt = conv1_stmt;
2453 }
2454 else if (rhs2_code == MULT_EXPR || rhs2_code == WIDEN_MULT_EXPR)
2455 {
2456 if (!has_single_use (rhs2)
2457 || !is_widening_mult_p (rhs2_stmt, &type1, &mult_rhs1,
2458 &type2, &mult_rhs2))
2459 return false;
2460 add_rhs = rhs1;
2461 conv_stmt = conv2_stmt;
2462 }
2463 else
2464 return false;
2465
2466 to_mode = TYPE_MODE (type);
2467 from_mode = TYPE_MODE (type1);
2468 from_unsigned1 = TYPE_UNSIGNED (type1);
2469 from_unsigned2 = TYPE_UNSIGNED (type2);
2470 optype = type1;
2471
2472 /* There's no such thing as a mixed sign madd yet, so use a wider mode. */
2473 if (from_unsigned1 != from_unsigned2)
2474 {
2475 if (!INTEGRAL_TYPE_P (type))
2476 return false;
2477 /* We can use a signed multiply with unsigned types as long as
2478 there is a wider mode to use, or it is the smaller of the two
2479 types that is unsigned. Note that type1 >= type2, always. */
2480 if ((from_unsigned1
2481 && TYPE_PRECISION (type1) == GET_MODE_PRECISION (from_mode))
2482 || (from_unsigned2
2483 && TYPE_PRECISION (type2) == GET_MODE_PRECISION (from_mode)))
2484 {
2485 from_mode = GET_MODE_WIDER_MODE (from_mode);
2486 if (GET_MODE_SIZE (from_mode) >= GET_MODE_SIZE (to_mode))
2487 return false;
2488 }
2489
2490 from_unsigned1 = from_unsigned2 = false;
2491 optype = build_nonstandard_integer_type (GET_MODE_PRECISION (from_mode),
2492 false);
2493 }
2494
2495 /* If there was a conversion between the multiply and addition
2496 then we need to make sure it fits a multiply-and-accumulate.
2497 The should be a single mode change which does not change the
2498 value. */
2499 if (conv_stmt)
2500 {
2501 /* We use the original, unmodified data types for this. */
2502 tree from_type = TREE_TYPE (gimple_assign_rhs1 (conv_stmt));
2503 tree to_type = TREE_TYPE (gimple_assign_lhs (conv_stmt));
2504 int data_size = TYPE_PRECISION (type1) + TYPE_PRECISION (type2);
2505 bool is_unsigned = TYPE_UNSIGNED (type1) && TYPE_UNSIGNED (type2);
2506
2507 if (TYPE_PRECISION (from_type) > TYPE_PRECISION (to_type))
2508 {
2509 /* Conversion is a truncate. */
2510 if (TYPE_PRECISION (to_type) < data_size)
2511 return false;
2512 }
2513 else if (TYPE_PRECISION (from_type) < TYPE_PRECISION (to_type))
2514 {
2515 /* Conversion is an extend. Check it's the right sort. */
2516 if (TYPE_UNSIGNED (from_type) != is_unsigned
2517 && !(is_unsigned && TYPE_PRECISION (from_type) > data_size))
2518 return false;
2519 }
2520 /* else convert is a no-op for our purposes. */
2521 }
2522
2523 /* Verify that the machine can perform a widening multiply
2524 accumulate in this mode/signedness combination, otherwise
2525 this transformation is likely to pessimize code. */
2526 this_optab = optab_for_tree_code (wmult_code, optype, optab_default);
2527 handler = find_widening_optab_handler_and_mode (this_optab, to_mode,
2528 from_mode, 0, &actual_mode);
2529
2530 if (handler == CODE_FOR_nothing)
2531 return false;
2532
2533 /* Ensure that the inputs to the handler are in the correct precison
2534 for the opcode. This will be the full mode size. */
2535 actual_precision = GET_MODE_PRECISION (actual_mode);
2536 if (actual_precision != TYPE_PRECISION (type1)
2537 || from_unsigned1 != TYPE_UNSIGNED (type1))
2538 mult_rhs1 = build_and_insert_cast (gsi, loc,
2539 build_nonstandard_integer_type
2540 (actual_precision, from_unsigned1),
2541 mult_rhs1);
2542 if (actual_precision != TYPE_PRECISION (type2)
2543 || from_unsigned2 != TYPE_UNSIGNED (type2))
2544 mult_rhs2 = build_and_insert_cast (gsi, loc,
2545 build_nonstandard_integer_type
2546 (actual_precision, from_unsigned2),
2547 mult_rhs2);
2548
2549 if (!useless_type_conversion_p (type, TREE_TYPE (add_rhs)))
2550 add_rhs = build_and_insert_cast (gsi, loc, type, add_rhs);
2551
2552 /* Handle constants. */
2553 if (TREE_CODE (mult_rhs1) == INTEGER_CST)
2554 mult_rhs1 = fold_convert (type1, mult_rhs1);
2555 if (TREE_CODE (mult_rhs2) == INTEGER_CST)
2556 mult_rhs2 = fold_convert (type2, mult_rhs2);
2557
2558 gimple_assign_set_rhs_with_ops_1 (gsi, wmult_code, mult_rhs1, mult_rhs2,
2559 add_rhs);
2560 update_stmt (gsi_stmt (*gsi));
2561 widen_mul_stats.maccs_inserted++;
2562 return true;
2563 }
2564
2565 /* Combine the multiplication at MUL_STMT with operands MULOP1 and MULOP2
2566 with uses in additions and subtractions to form fused multiply-add
2567 operations. Returns true if successful and MUL_STMT should be removed. */
2568
2569 static bool
2570 convert_mult_to_fma (gimple mul_stmt, tree op1, tree op2)
2571 {
2572 tree mul_result = gimple_get_lhs (mul_stmt);
2573 tree type = TREE_TYPE (mul_result);
2574 gimple use_stmt, neguse_stmt, fma_stmt;
2575 use_operand_p use_p;
2576 imm_use_iterator imm_iter;
2577
2578 if (FLOAT_TYPE_P (type)
2579 && flag_fp_contract_mode == FP_CONTRACT_OFF)
2580 return false;
2581
2582 /* We don't want to do bitfield reduction ops. */
2583 if (INTEGRAL_TYPE_P (type)
2584 && (TYPE_PRECISION (type)
2585 != GET_MODE_PRECISION (TYPE_MODE (type))))
2586 return false;
2587
2588 /* If the target doesn't support it, don't generate it. We assume that
2589 if fma isn't available then fms, fnma or fnms are not either. */
2590 if (optab_handler (fma_optab, TYPE_MODE (type)) == CODE_FOR_nothing)
2591 return false;
2592
2593 /* If the multiplication has zero uses, it is kept around probably because
2594 of -fnon-call-exceptions. Don't optimize it away in that case,
2595 it is DCE job. */
2596 if (has_zero_uses (mul_result))
2597 return false;
2598
2599 /* Make sure that the multiplication statement becomes dead after
2600 the transformation, thus that all uses are transformed to FMAs.
2601 This means we assume that an FMA operation has the same cost
2602 as an addition. */
2603 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, mul_result)
2604 {
2605 enum tree_code use_code;
2606 tree result = mul_result;
2607 bool negate_p = false;
2608
2609 use_stmt = USE_STMT (use_p);
2610
2611 if (is_gimple_debug (use_stmt))
2612 continue;
2613
2614 /* For now restrict this operations to single basic blocks. In theory
2615 we would want to support sinking the multiplication in
2616 m = a*b;
2617 if ()
2618 ma = m + c;
2619 else
2620 d = m;
2621 to form a fma in the then block and sink the multiplication to the
2622 else block. */
2623 if (gimple_bb (use_stmt) != gimple_bb (mul_stmt))
2624 return false;
2625
2626 if (!is_gimple_assign (use_stmt))
2627 return false;
2628
2629 use_code = gimple_assign_rhs_code (use_stmt);
2630
2631 /* A negate on the multiplication leads to FNMA. */
2632 if (use_code == NEGATE_EXPR)
2633 {
2634 ssa_op_iter iter;
2635 use_operand_p usep;
2636
2637 result = gimple_assign_lhs (use_stmt);
2638
2639 /* Make sure the negate statement becomes dead with this
2640 single transformation. */
2641 if (!single_imm_use (gimple_assign_lhs (use_stmt),
2642 &use_p, &neguse_stmt))
2643 return false;
2644
2645 /* Make sure the multiplication isn't also used on that stmt. */
2646 FOR_EACH_PHI_OR_STMT_USE (usep, neguse_stmt, iter, SSA_OP_USE)
2647 if (USE_FROM_PTR (usep) == mul_result)
2648 return false;
2649
2650 /* Re-validate. */
2651 use_stmt = neguse_stmt;
2652 if (gimple_bb (use_stmt) != gimple_bb (mul_stmt))
2653 return false;
2654 if (!is_gimple_assign (use_stmt))
2655 return false;
2656
2657 use_code = gimple_assign_rhs_code (use_stmt);
2658 negate_p = true;
2659 }
2660
2661 switch (use_code)
2662 {
2663 case MINUS_EXPR:
2664 if (gimple_assign_rhs2 (use_stmt) == result)
2665 negate_p = !negate_p;
2666 break;
2667 case PLUS_EXPR:
2668 break;
2669 default:
2670 /* FMA can only be formed from PLUS and MINUS. */
2671 return false;
2672 }
2673
2674 /* If the subtrahend (gimple_assign_rhs2 (use_stmt)) is computed
2675 by a MULT_EXPR that we'll visit later, we might be able to
2676 get a more profitable match with fnma.
2677 OTOH, if we don't, a negate / fma pair has likely lower latency
2678 that a mult / subtract pair. */
2679 if (use_code == MINUS_EXPR && !negate_p
2680 && gimple_assign_rhs1 (use_stmt) == result
2681 && optab_handler (fms_optab, TYPE_MODE (type)) == CODE_FOR_nothing
2682 && optab_handler (fnma_optab, TYPE_MODE (type)) != CODE_FOR_nothing)
2683 {
2684 tree rhs2 = gimple_assign_rhs2 (use_stmt);
2685
2686 if (TREE_CODE (rhs2) == SSA_NAME)
2687 {
2688 gimple stmt2 = SSA_NAME_DEF_STMT (rhs2);
2689 if (has_single_use (rhs2)
2690 && is_gimple_assign (stmt2)
2691 && gimple_assign_rhs_code (stmt2) == MULT_EXPR)
2692 return false;
2693 }
2694 }
2695
2696 /* We can't handle a * b + a * b. */
2697 if (gimple_assign_rhs1 (use_stmt) == gimple_assign_rhs2 (use_stmt))
2698 return false;
2699
2700 /* While it is possible to validate whether or not the exact form
2701 that we've recognized is available in the backend, the assumption
2702 is that the transformation is never a loss. For instance, suppose
2703 the target only has the plain FMA pattern available. Consider
2704 a*b-c -> fma(a,b,-c): we've exchanged MUL+SUB for FMA+NEG, which
2705 is still two operations. Consider -(a*b)-c -> fma(-a,b,-c): we
2706 still have 3 operations, but in the FMA form the two NEGs are
2707 independent and could be run in parallel. */
2708 }
2709
2710 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, mul_result)
2711 {
2712 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
2713 enum tree_code use_code;
2714 tree addop, mulop1 = op1, result = mul_result;
2715 bool negate_p = false;
2716
2717 if (is_gimple_debug (use_stmt))
2718 continue;
2719
2720 use_code = gimple_assign_rhs_code (use_stmt);
2721 if (use_code == NEGATE_EXPR)
2722 {
2723 result = gimple_assign_lhs (use_stmt);
2724 single_imm_use (gimple_assign_lhs (use_stmt), &use_p, &neguse_stmt);
2725 gsi_remove (&gsi, true);
2726 release_defs (use_stmt);
2727
2728 use_stmt = neguse_stmt;
2729 gsi = gsi_for_stmt (use_stmt);
2730 use_code = gimple_assign_rhs_code (use_stmt);
2731 negate_p = true;
2732 }
2733
2734 if (gimple_assign_rhs1 (use_stmt) == result)
2735 {
2736 addop = gimple_assign_rhs2 (use_stmt);
2737 /* a * b - c -> a * b + (-c) */
2738 if (gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
2739 addop = force_gimple_operand_gsi (&gsi,
2740 build1 (NEGATE_EXPR,
2741 type, addop),
2742 true, NULL_TREE, true,
2743 GSI_SAME_STMT);
2744 }
2745 else
2746 {
2747 addop = gimple_assign_rhs1 (use_stmt);
2748 /* a - b * c -> (-b) * c + a */
2749 if (gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
2750 negate_p = !negate_p;
2751 }
2752
2753 if (negate_p)
2754 mulop1 = force_gimple_operand_gsi (&gsi,
2755 build1 (NEGATE_EXPR,
2756 type, mulop1),
2757 true, NULL_TREE, true,
2758 GSI_SAME_STMT);
2759
2760 fma_stmt = gimple_build_assign_with_ops (FMA_EXPR,
2761 gimple_assign_lhs (use_stmt),
2762 mulop1, op2,
2763 addop);
2764 gsi_replace (&gsi, fma_stmt, true);
2765 widen_mul_stats.fmas_inserted++;
2766 }
2767
2768 return true;
2769 }
2770
2771 /* Find integer multiplications where the operands are extended from
2772 smaller types, and replace the MULT_EXPR with a WIDEN_MULT_EXPR
2773 where appropriate. */
2774
2775 namespace {
2776
2777 const pass_data pass_data_optimize_widening_mul =
2778 {
2779 GIMPLE_PASS, /* type */
2780 "widening_mul", /* name */
2781 OPTGROUP_NONE, /* optinfo_flags */
2782 true, /* has_execute */
2783 TV_NONE, /* tv_id */
2784 PROP_ssa, /* properties_required */
2785 0, /* properties_provided */
2786 0, /* properties_destroyed */
2787 0, /* todo_flags_start */
2788 TODO_update_ssa, /* todo_flags_finish */
2789 };
2790
2791 class pass_optimize_widening_mul : public gimple_opt_pass
2792 {
2793 public:
2794 pass_optimize_widening_mul (gcc::context *ctxt)
2795 : gimple_opt_pass (pass_data_optimize_widening_mul, ctxt)
2796 {}
2797
2798 /* opt_pass methods: */
2799 virtual bool gate (function *)
2800 {
2801 return flag_expensive_optimizations && optimize;
2802 }
2803
2804 virtual unsigned int execute (function *);
2805
2806 }; // class pass_optimize_widening_mul
2807
2808 unsigned int
2809 pass_optimize_widening_mul::execute (function *fun)
2810 {
2811 basic_block bb;
2812 bool cfg_changed = false;
2813
2814 memset (&widen_mul_stats, 0, sizeof (widen_mul_stats));
2815
2816 FOR_EACH_BB_FN (bb, fun)
2817 {
2818 gimple_stmt_iterator gsi;
2819
2820 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi);)
2821 {
2822 gimple stmt = gsi_stmt (gsi);
2823 enum tree_code code;
2824
2825 if (is_gimple_assign (stmt))
2826 {
2827 code = gimple_assign_rhs_code (stmt);
2828 switch (code)
2829 {
2830 case MULT_EXPR:
2831 if (!convert_mult_to_widen (stmt, &gsi)
2832 && convert_mult_to_fma (stmt,
2833 gimple_assign_rhs1 (stmt),
2834 gimple_assign_rhs2 (stmt)))
2835 {
2836 gsi_remove (&gsi, true);
2837 release_defs (stmt);
2838 continue;
2839 }
2840 break;
2841
2842 case PLUS_EXPR:
2843 case MINUS_EXPR:
2844 convert_plusminus_to_widen (&gsi, stmt, code);
2845 break;
2846
2847 default:;
2848 }
2849 }
2850 else if (is_gimple_call (stmt)
2851 && gimple_call_lhs (stmt))
2852 {
2853 tree fndecl = gimple_call_fndecl (stmt);
2854 if (fndecl
2855 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
2856 {
2857 switch (DECL_FUNCTION_CODE (fndecl))
2858 {
2859 case BUILT_IN_POWF:
2860 case BUILT_IN_POW:
2861 case BUILT_IN_POWL:
2862 if (TREE_CODE (gimple_call_arg (stmt, 1)) == REAL_CST
2863 && REAL_VALUES_EQUAL
2864 (TREE_REAL_CST (gimple_call_arg (stmt, 1)),
2865 dconst2)
2866 && convert_mult_to_fma (stmt,
2867 gimple_call_arg (stmt, 0),
2868 gimple_call_arg (stmt, 0)))
2869 {
2870 unlink_stmt_vdef (stmt);
2871 if (gsi_remove (&gsi, true)
2872 && gimple_purge_dead_eh_edges (bb))
2873 cfg_changed = true;
2874 release_defs (stmt);
2875 continue;
2876 }
2877 break;
2878
2879 default:;
2880 }
2881 }
2882 }
2883 gsi_next (&gsi);
2884 }
2885 }
2886
2887 statistics_counter_event (fun, "widening multiplications inserted",
2888 widen_mul_stats.widen_mults_inserted);
2889 statistics_counter_event (fun, "widening maccs inserted",
2890 widen_mul_stats.maccs_inserted);
2891 statistics_counter_event (fun, "fused multiply-adds inserted",
2892 widen_mul_stats.fmas_inserted);
2893
2894 return cfg_changed ? TODO_cleanup_cfg : 0;
2895 }
2896
2897 } // anon namespace
2898
2899 gimple_opt_pass *
2900 make_pass_optimize_widening_mul (gcc::context *ctxt)
2901 {
2902 return new pass_optimize_widening_mul (ctxt);
2903 }