]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-ssa-math-opts.c
re PR tree-optimization/46728 (GCC does not generate fmadd for pow (x, 0.75)+y on...
[thirdparty/gcc.git] / gcc / tree-ssa-math-opts.c
1 /* Global, SSA-based optimizations using mathematical identities.
2 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011
3 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
10 later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* Currently, the only mini-pass in this file tries to CSE reciprocal
22 operations. These are common in sequences such as this one:
23
24 modulus = sqrt(x*x + y*y + z*z);
25 x = x / modulus;
26 y = y / modulus;
27 z = z / modulus;
28
29 that can be optimized to
30
31 modulus = sqrt(x*x + y*y + z*z);
32 rmodulus = 1.0 / modulus;
33 x = x * rmodulus;
34 y = y * rmodulus;
35 z = z * rmodulus;
36
37 We do this for loop invariant divisors, and with this pass whenever
38 we notice that a division has the same divisor multiple times.
39
40 Of course, like in PRE, we don't insert a division if a dominator
41 already has one. However, this cannot be done as an extension of
42 PRE for several reasons.
43
44 First of all, with some experiments it was found out that the
45 transformation is not always useful if there are only two divisions
46 hy the same divisor. This is probably because modern processors
47 can pipeline the divisions; on older, in-order processors it should
48 still be effective to optimize two divisions by the same number.
49 We make this a param, and it shall be called N in the remainder of
50 this comment.
51
52 Second, if trapping math is active, we have less freedom on where
53 to insert divisions: we can only do so in basic blocks that already
54 contain one. (If divisions don't trap, instead, we can insert
55 divisions elsewhere, which will be in blocks that are common dominators
56 of those that have the division).
57
58 We really don't want to compute the reciprocal unless a division will
59 be found. To do this, we won't insert the division in a basic block
60 that has less than N divisions *post-dominating* it.
61
62 The algorithm constructs a subset of the dominator tree, holding the
63 blocks containing the divisions and the common dominators to them,
64 and walk it twice. The first walk is in post-order, and it annotates
65 each block with the number of divisions that post-dominate it: this
66 gives information on where divisions can be inserted profitably.
67 The second walk is in pre-order, and it inserts divisions as explained
68 above, and replaces divisions by multiplications.
69
70 In the best case, the cost of the pass is O(n_statements). In the
71 worst-case, the cost is due to creating the dominator tree subset,
72 with a cost of O(n_basic_blocks ^ 2); however this can only happen
73 for n_statements / n_basic_blocks statements. So, the amortized cost
74 of creating the dominator tree subset is O(n_basic_blocks) and the
75 worst-case cost of the pass is O(n_statements * n_basic_blocks).
76
77 More practically, the cost will be small because there are few
78 divisions, and they tend to be in the same basic block, so insert_bb
79 is called very few times.
80
81 If we did this using domwalk.c, an efficient implementation would have
82 to work on all the variables in a single pass, because we could not
83 work on just a subset of the dominator tree, as we do now, and the
84 cost would also be something like O(n_statements * n_basic_blocks).
85 The data structures would be more complex in order to work on all the
86 variables in a single pass. */
87
88 #include "config.h"
89 #include "system.h"
90 #include "coretypes.h"
91 #include "tm.h"
92 #include "flags.h"
93 #include "tree.h"
94 #include "tree-flow.h"
95 #include "timevar.h"
96 #include "tree-pass.h"
97 #include "alloc-pool.h"
98 #include "basic-block.h"
99 #include "target.h"
100 #include "gimple-pretty-print.h"
101
102 /* FIXME: RTL headers have to be included here for optabs. */
103 #include "rtl.h" /* Because optabs.h wants enum rtx_code. */
104 #include "expr.h" /* Because optabs.h wants sepops. */
105 #include "optabs.h"
106
107 /* This structure represents one basic block that either computes a
108 division, or is a common dominator for basic block that compute a
109 division. */
110 struct occurrence {
111 /* The basic block represented by this structure. */
112 basic_block bb;
113
114 /* If non-NULL, the SSA_NAME holding the definition for a reciprocal
115 inserted in BB. */
116 tree recip_def;
117
118 /* If non-NULL, the GIMPLE_ASSIGN for a reciprocal computation that
119 was inserted in BB. */
120 gimple recip_def_stmt;
121
122 /* Pointer to a list of "struct occurrence"s for blocks dominated
123 by BB. */
124 struct occurrence *children;
125
126 /* Pointer to the next "struct occurrence"s in the list of blocks
127 sharing a common dominator. */
128 struct occurrence *next;
129
130 /* The number of divisions that are in BB before compute_merit. The
131 number of divisions that are in BB or post-dominate it after
132 compute_merit. */
133 int num_divisions;
134
135 /* True if the basic block has a division, false if it is a common
136 dominator for basic blocks that do. If it is false and trapping
137 math is active, BB is not a candidate for inserting a reciprocal. */
138 bool bb_has_division;
139 };
140
141 static struct
142 {
143 /* Number of 1.0/X ops inserted. */
144 int rdivs_inserted;
145
146 /* Number of 1.0/FUNC ops inserted. */
147 int rfuncs_inserted;
148 } reciprocal_stats;
149
150 static struct
151 {
152 /* Number of cexpi calls inserted. */
153 int inserted;
154 } sincos_stats;
155
156 static struct
157 {
158 /* Number of hand-written 32-bit bswaps found. */
159 int found_32bit;
160
161 /* Number of hand-written 64-bit bswaps found. */
162 int found_64bit;
163 } bswap_stats;
164
165 static struct
166 {
167 /* Number of widening multiplication ops inserted. */
168 int widen_mults_inserted;
169
170 /* Number of integer multiply-and-accumulate ops inserted. */
171 int maccs_inserted;
172
173 /* Number of fp fused multiply-add ops inserted. */
174 int fmas_inserted;
175 } widen_mul_stats;
176
177 /* The instance of "struct occurrence" representing the highest
178 interesting block in the dominator tree. */
179 static struct occurrence *occ_head;
180
181 /* Allocation pool for getting instances of "struct occurrence". */
182 static alloc_pool occ_pool;
183
184
185
186 /* Allocate and return a new struct occurrence for basic block BB, and
187 whose children list is headed by CHILDREN. */
188 static struct occurrence *
189 occ_new (basic_block bb, struct occurrence *children)
190 {
191 struct occurrence *occ;
192
193 bb->aux = occ = (struct occurrence *) pool_alloc (occ_pool);
194 memset (occ, 0, sizeof (struct occurrence));
195
196 occ->bb = bb;
197 occ->children = children;
198 return occ;
199 }
200
201
202 /* Insert NEW_OCC into our subset of the dominator tree. P_HEAD points to a
203 list of "struct occurrence"s, one per basic block, having IDOM as
204 their common dominator.
205
206 We try to insert NEW_OCC as deep as possible in the tree, and we also
207 insert any other block that is a common dominator for BB and one
208 block already in the tree. */
209
210 static void
211 insert_bb (struct occurrence *new_occ, basic_block idom,
212 struct occurrence **p_head)
213 {
214 struct occurrence *occ, **p_occ;
215
216 for (p_occ = p_head; (occ = *p_occ) != NULL; )
217 {
218 basic_block bb = new_occ->bb, occ_bb = occ->bb;
219 basic_block dom = nearest_common_dominator (CDI_DOMINATORS, occ_bb, bb);
220 if (dom == bb)
221 {
222 /* BB dominates OCC_BB. OCC becomes NEW_OCC's child: remove OCC
223 from its list. */
224 *p_occ = occ->next;
225 occ->next = new_occ->children;
226 new_occ->children = occ;
227
228 /* Try the next block (it may as well be dominated by BB). */
229 }
230
231 else if (dom == occ_bb)
232 {
233 /* OCC_BB dominates BB. Tail recurse to look deeper. */
234 insert_bb (new_occ, dom, &occ->children);
235 return;
236 }
237
238 else if (dom != idom)
239 {
240 gcc_assert (!dom->aux);
241
242 /* There is a dominator between IDOM and BB, add it and make
243 two children out of NEW_OCC and OCC. First, remove OCC from
244 its list. */
245 *p_occ = occ->next;
246 new_occ->next = occ;
247 occ->next = NULL;
248
249 /* None of the previous blocks has DOM as a dominator: if we tail
250 recursed, we would reexamine them uselessly. Just switch BB with
251 DOM, and go on looking for blocks dominated by DOM. */
252 new_occ = occ_new (dom, new_occ);
253 }
254
255 else
256 {
257 /* Nothing special, go on with the next element. */
258 p_occ = &occ->next;
259 }
260 }
261
262 /* No place was found as a child of IDOM. Make BB a sibling of IDOM. */
263 new_occ->next = *p_head;
264 *p_head = new_occ;
265 }
266
267 /* Register that we found a division in BB. */
268
269 static inline void
270 register_division_in (basic_block bb)
271 {
272 struct occurrence *occ;
273
274 occ = (struct occurrence *) bb->aux;
275 if (!occ)
276 {
277 occ = occ_new (bb, NULL);
278 insert_bb (occ, ENTRY_BLOCK_PTR, &occ_head);
279 }
280
281 occ->bb_has_division = true;
282 occ->num_divisions++;
283 }
284
285
286 /* Compute the number of divisions that postdominate each block in OCC and
287 its children. */
288
289 static void
290 compute_merit (struct occurrence *occ)
291 {
292 struct occurrence *occ_child;
293 basic_block dom = occ->bb;
294
295 for (occ_child = occ->children; occ_child; occ_child = occ_child->next)
296 {
297 basic_block bb;
298 if (occ_child->children)
299 compute_merit (occ_child);
300
301 if (flag_exceptions)
302 bb = single_noncomplex_succ (dom);
303 else
304 bb = dom;
305
306 if (dominated_by_p (CDI_POST_DOMINATORS, bb, occ_child->bb))
307 occ->num_divisions += occ_child->num_divisions;
308 }
309 }
310
311
312 /* Return whether USE_STMT is a floating-point division by DEF. */
313 static inline bool
314 is_division_by (gimple use_stmt, tree def)
315 {
316 return is_gimple_assign (use_stmt)
317 && gimple_assign_rhs_code (use_stmt) == RDIV_EXPR
318 && gimple_assign_rhs2 (use_stmt) == def
319 /* Do not recognize x / x as valid division, as we are getting
320 confused later by replacing all immediate uses x in such
321 a stmt. */
322 && gimple_assign_rhs1 (use_stmt) != def;
323 }
324
325 /* Walk the subset of the dominator tree rooted at OCC, setting the
326 RECIP_DEF field to a definition of 1.0 / DEF that can be used in
327 the given basic block. The field may be left NULL, of course,
328 if it is not possible or profitable to do the optimization.
329
330 DEF_BSI is an iterator pointing at the statement defining DEF.
331 If RECIP_DEF is set, a dominator already has a computation that can
332 be used. */
333
334 static void
335 insert_reciprocals (gimple_stmt_iterator *def_gsi, struct occurrence *occ,
336 tree def, tree recip_def, int threshold)
337 {
338 tree type;
339 gimple new_stmt;
340 gimple_stmt_iterator gsi;
341 struct occurrence *occ_child;
342
343 if (!recip_def
344 && (occ->bb_has_division || !flag_trapping_math)
345 && occ->num_divisions >= threshold)
346 {
347 /* Make a variable with the replacement and substitute it. */
348 type = TREE_TYPE (def);
349 recip_def = make_rename_temp (type, "reciptmp");
350 new_stmt = gimple_build_assign_with_ops (RDIV_EXPR, recip_def,
351 build_one_cst (type), def);
352
353 if (occ->bb_has_division)
354 {
355 /* Case 1: insert before an existing division. */
356 gsi = gsi_after_labels (occ->bb);
357 while (!gsi_end_p (gsi) && !is_division_by (gsi_stmt (gsi), def))
358 gsi_next (&gsi);
359
360 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
361 }
362 else if (def_gsi && occ->bb == def_gsi->bb)
363 {
364 /* Case 2: insert right after the definition. Note that this will
365 never happen if the definition statement can throw, because in
366 that case the sole successor of the statement's basic block will
367 dominate all the uses as well. */
368 gsi_insert_after (def_gsi, new_stmt, GSI_NEW_STMT);
369 }
370 else
371 {
372 /* Case 3: insert in a basic block not containing defs/uses. */
373 gsi = gsi_after_labels (occ->bb);
374 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
375 }
376
377 reciprocal_stats.rdivs_inserted++;
378
379 occ->recip_def_stmt = new_stmt;
380 }
381
382 occ->recip_def = recip_def;
383 for (occ_child = occ->children; occ_child; occ_child = occ_child->next)
384 insert_reciprocals (def_gsi, occ_child, def, recip_def, threshold);
385 }
386
387
388 /* Replace the division at USE_P with a multiplication by the reciprocal, if
389 possible. */
390
391 static inline void
392 replace_reciprocal (use_operand_p use_p)
393 {
394 gimple use_stmt = USE_STMT (use_p);
395 basic_block bb = gimple_bb (use_stmt);
396 struct occurrence *occ = (struct occurrence *) bb->aux;
397
398 if (optimize_bb_for_speed_p (bb)
399 && occ->recip_def && use_stmt != occ->recip_def_stmt)
400 {
401 gimple_assign_set_rhs_code (use_stmt, MULT_EXPR);
402 SET_USE (use_p, occ->recip_def);
403 fold_stmt_inplace (use_stmt);
404 update_stmt (use_stmt);
405 }
406 }
407
408
409 /* Free OCC and return one more "struct occurrence" to be freed. */
410
411 static struct occurrence *
412 free_bb (struct occurrence *occ)
413 {
414 struct occurrence *child, *next;
415
416 /* First get the two pointers hanging off OCC. */
417 next = occ->next;
418 child = occ->children;
419 occ->bb->aux = NULL;
420 pool_free (occ_pool, occ);
421
422 /* Now ensure that we don't recurse unless it is necessary. */
423 if (!child)
424 return next;
425 else
426 {
427 while (next)
428 next = free_bb (next);
429
430 return child;
431 }
432 }
433
434
435 /* Look for floating-point divisions among DEF's uses, and try to
436 replace them by multiplications with the reciprocal. Add
437 as many statements computing the reciprocal as needed.
438
439 DEF must be a GIMPLE register of a floating-point type. */
440
441 static void
442 execute_cse_reciprocals_1 (gimple_stmt_iterator *def_gsi, tree def)
443 {
444 use_operand_p use_p;
445 imm_use_iterator use_iter;
446 struct occurrence *occ;
447 int count = 0, threshold;
448
449 gcc_assert (FLOAT_TYPE_P (TREE_TYPE (def)) && is_gimple_reg (def));
450
451 FOR_EACH_IMM_USE_FAST (use_p, use_iter, def)
452 {
453 gimple use_stmt = USE_STMT (use_p);
454 if (is_division_by (use_stmt, def))
455 {
456 register_division_in (gimple_bb (use_stmt));
457 count++;
458 }
459 }
460
461 /* Do the expensive part only if we can hope to optimize something. */
462 threshold = targetm.min_divisions_for_recip_mul (TYPE_MODE (TREE_TYPE (def)));
463 if (count >= threshold)
464 {
465 gimple use_stmt;
466 for (occ = occ_head; occ; occ = occ->next)
467 {
468 compute_merit (occ);
469 insert_reciprocals (def_gsi, occ, def, NULL, threshold);
470 }
471
472 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, def)
473 {
474 if (is_division_by (use_stmt, def))
475 {
476 FOR_EACH_IMM_USE_ON_STMT (use_p, use_iter)
477 replace_reciprocal (use_p);
478 }
479 }
480 }
481
482 for (occ = occ_head; occ; )
483 occ = free_bb (occ);
484
485 occ_head = NULL;
486 }
487
488 static bool
489 gate_cse_reciprocals (void)
490 {
491 return optimize && flag_reciprocal_math;
492 }
493
494 /* Go through all the floating-point SSA_NAMEs, and call
495 execute_cse_reciprocals_1 on each of them. */
496 static unsigned int
497 execute_cse_reciprocals (void)
498 {
499 basic_block bb;
500 tree arg;
501
502 occ_pool = create_alloc_pool ("dominators for recip",
503 sizeof (struct occurrence),
504 n_basic_blocks / 3 + 1);
505
506 memset (&reciprocal_stats, 0, sizeof (reciprocal_stats));
507 calculate_dominance_info (CDI_DOMINATORS);
508 calculate_dominance_info (CDI_POST_DOMINATORS);
509
510 #ifdef ENABLE_CHECKING
511 FOR_EACH_BB (bb)
512 gcc_assert (!bb->aux);
513 #endif
514
515 for (arg = DECL_ARGUMENTS (cfun->decl); arg; arg = DECL_CHAIN (arg))
516 if (gimple_default_def (cfun, arg)
517 && FLOAT_TYPE_P (TREE_TYPE (arg))
518 && is_gimple_reg (arg))
519 execute_cse_reciprocals_1 (NULL, gimple_default_def (cfun, arg));
520
521 FOR_EACH_BB (bb)
522 {
523 gimple_stmt_iterator gsi;
524 gimple phi;
525 tree def;
526
527 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
528 {
529 phi = gsi_stmt (gsi);
530 def = PHI_RESULT (phi);
531 if (FLOAT_TYPE_P (TREE_TYPE (def))
532 && is_gimple_reg (def))
533 execute_cse_reciprocals_1 (NULL, def);
534 }
535
536 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
537 {
538 gimple stmt = gsi_stmt (gsi);
539
540 if (gimple_has_lhs (stmt)
541 && (def = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_DEF)) != NULL
542 && FLOAT_TYPE_P (TREE_TYPE (def))
543 && TREE_CODE (def) == SSA_NAME)
544 execute_cse_reciprocals_1 (&gsi, def);
545 }
546
547 if (optimize_bb_for_size_p (bb))
548 continue;
549
550 /* Scan for a/func(b) and convert it to reciprocal a*rfunc(b). */
551 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
552 {
553 gimple stmt = gsi_stmt (gsi);
554 tree fndecl;
555
556 if (is_gimple_assign (stmt)
557 && gimple_assign_rhs_code (stmt) == RDIV_EXPR)
558 {
559 tree arg1 = gimple_assign_rhs2 (stmt);
560 gimple stmt1;
561
562 if (TREE_CODE (arg1) != SSA_NAME)
563 continue;
564
565 stmt1 = SSA_NAME_DEF_STMT (arg1);
566
567 if (is_gimple_call (stmt1)
568 && gimple_call_lhs (stmt1)
569 && (fndecl = gimple_call_fndecl (stmt1))
570 && (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
571 || DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD))
572 {
573 enum built_in_function code;
574 bool md_code, fail;
575 imm_use_iterator ui;
576 use_operand_p use_p;
577
578 code = DECL_FUNCTION_CODE (fndecl);
579 md_code = DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD;
580
581 fndecl = targetm.builtin_reciprocal (code, md_code, false);
582 if (!fndecl)
583 continue;
584
585 /* Check that all uses of the SSA name are divisions,
586 otherwise replacing the defining statement will do
587 the wrong thing. */
588 fail = false;
589 FOR_EACH_IMM_USE_FAST (use_p, ui, arg1)
590 {
591 gimple stmt2 = USE_STMT (use_p);
592 if (is_gimple_debug (stmt2))
593 continue;
594 if (!is_gimple_assign (stmt2)
595 || gimple_assign_rhs_code (stmt2) != RDIV_EXPR
596 || gimple_assign_rhs1 (stmt2) == arg1
597 || gimple_assign_rhs2 (stmt2) != arg1)
598 {
599 fail = true;
600 break;
601 }
602 }
603 if (fail)
604 continue;
605
606 gimple_replace_lhs (stmt1, arg1);
607 gimple_call_set_fndecl (stmt1, fndecl);
608 update_stmt (stmt1);
609 reciprocal_stats.rfuncs_inserted++;
610
611 FOR_EACH_IMM_USE_STMT (stmt, ui, arg1)
612 {
613 gimple_assign_set_rhs_code (stmt, MULT_EXPR);
614 fold_stmt_inplace (stmt);
615 update_stmt (stmt);
616 }
617 }
618 }
619 }
620 }
621
622 statistics_counter_event (cfun, "reciprocal divs inserted",
623 reciprocal_stats.rdivs_inserted);
624 statistics_counter_event (cfun, "reciprocal functions inserted",
625 reciprocal_stats.rfuncs_inserted);
626
627 free_dominance_info (CDI_DOMINATORS);
628 free_dominance_info (CDI_POST_DOMINATORS);
629 free_alloc_pool (occ_pool);
630 return 0;
631 }
632
633 struct gimple_opt_pass pass_cse_reciprocals =
634 {
635 {
636 GIMPLE_PASS,
637 "recip", /* name */
638 gate_cse_reciprocals, /* gate */
639 execute_cse_reciprocals, /* execute */
640 NULL, /* sub */
641 NULL, /* next */
642 0, /* static_pass_number */
643 TV_NONE, /* tv_id */
644 PROP_ssa, /* properties_required */
645 0, /* properties_provided */
646 0, /* properties_destroyed */
647 0, /* todo_flags_start */
648 TODO_dump_func | TODO_update_ssa | TODO_verify_ssa
649 | TODO_verify_stmts /* todo_flags_finish */
650 }
651 };
652
653 /* Records an occurrence at statement USE_STMT in the vector of trees
654 STMTS if it is dominated by *TOP_BB or dominates it or this basic block
655 is not yet initialized. Returns true if the occurrence was pushed on
656 the vector. Adjusts *TOP_BB to be the basic block dominating all
657 statements in the vector. */
658
659 static bool
660 maybe_record_sincos (VEC(gimple, heap) **stmts,
661 basic_block *top_bb, gimple use_stmt)
662 {
663 basic_block use_bb = gimple_bb (use_stmt);
664 if (*top_bb
665 && (*top_bb == use_bb
666 || dominated_by_p (CDI_DOMINATORS, use_bb, *top_bb)))
667 VEC_safe_push (gimple, heap, *stmts, use_stmt);
668 else if (!*top_bb
669 || dominated_by_p (CDI_DOMINATORS, *top_bb, use_bb))
670 {
671 VEC_safe_push (gimple, heap, *stmts, use_stmt);
672 *top_bb = use_bb;
673 }
674 else
675 return false;
676
677 return true;
678 }
679
680 /* Look for sin, cos and cexpi calls with the same argument NAME and
681 create a single call to cexpi CSEing the result in this case.
682 We first walk over all immediate uses of the argument collecting
683 statements that we can CSE in a vector and in a second pass replace
684 the statement rhs with a REALPART or IMAGPART expression on the
685 result of the cexpi call we insert before the use statement that
686 dominates all other candidates. */
687
688 static bool
689 execute_cse_sincos_1 (tree name)
690 {
691 gimple_stmt_iterator gsi;
692 imm_use_iterator use_iter;
693 tree fndecl, res, type;
694 gimple def_stmt, use_stmt, stmt;
695 int seen_cos = 0, seen_sin = 0, seen_cexpi = 0;
696 VEC(gimple, heap) *stmts = NULL;
697 basic_block top_bb = NULL;
698 int i;
699 bool cfg_changed = false;
700
701 type = TREE_TYPE (name);
702 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, name)
703 {
704 if (gimple_code (use_stmt) != GIMPLE_CALL
705 || !gimple_call_lhs (use_stmt)
706 || !(fndecl = gimple_call_fndecl (use_stmt))
707 || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_NORMAL)
708 continue;
709
710 switch (DECL_FUNCTION_CODE (fndecl))
711 {
712 CASE_FLT_FN (BUILT_IN_COS):
713 seen_cos |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
714 break;
715
716 CASE_FLT_FN (BUILT_IN_SIN):
717 seen_sin |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
718 break;
719
720 CASE_FLT_FN (BUILT_IN_CEXPI):
721 seen_cexpi |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
722 break;
723
724 default:;
725 }
726 }
727
728 if (seen_cos + seen_sin + seen_cexpi <= 1)
729 {
730 VEC_free(gimple, heap, stmts);
731 return false;
732 }
733
734 /* Simply insert cexpi at the beginning of top_bb but not earlier than
735 the name def statement. */
736 fndecl = mathfn_built_in (type, BUILT_IN_CEXPI);
737 if (!fndecl)
738 return false;
739 res = create_tmp_reg (TREE_TYPE (TREE_TYPE (fndecl)), "sincostmp");
740 stmt = gimple_build_call (fndecl, 1, name);
741 res = make_ssa_name (res, stmt);
742 gimple_call_set_lhs (stmt, res);
743
744 def_stmt = SSA_NAME_DEF_STMT (name);
745 if (!SSA_NAME_IS_DEFAULT_DEF (name)
746 && gimple_code (def_stmt) != GIMPLE_PHI
747 && gimple_bb (def_stmt) == top_bb)
748 {
749 gsi = gsi_for_stmt (def_stmt);
750 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
751 }
752 else
753 {
754 gsi = gsi_after_labels (top_bb);
755 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
756 }
757 update_stmt (stmt);
758 sincos_stats.inserted++;
759
760 /* And adjust the recorded old call sites. */
761 for (i = 0; VEC_iterate(gimple, stmts, i, use_stmt); ++i)
762 {
763 tree rhs = NULL;
764 fndecl = gimple_call_fndecl (use_stmt);
765
766 switch (DECL_FUNCTION_CODE (fndecl))
767 {
768 CASE_FLT_FN (BUILT_IN_COS):
769 rhs = fold_build1 (REALPART_EXPR, type, res);
770 break;
771
772 CASE_FLT_FN (BUILT_IN_SIN):
773 rhs = fold_build1 (IMAGPART_EXPR, type, res);
774 break;
775
776 CASE_FLT_FN (BUILT_IN_CEXPI):
777 rhs = res;
778 break;
779
780 default:;
781 gcc_unreachable ();
782 }
783
784 /* Replace call with a copy. */
785 stmt = gimple_build_assign (gimple_call_lhs (use_stmt), rhs);
786
787 gsi = gsi_for_stmt (use_stmt);
788 gsi_replace (&gsi, stmt, true);
789 if (gimple_purge_dead_eh_edges (gimple_bb (stmt)))
790 cfg_changed = true;
791 }
792
793 VEC_free(gimple, heap, stmts);
794
795 return cfg_changed;
796 }
797
798 /* To evaluate powi(x,n), the floating point value x raised to the
799 constant integer exponent n, we use a hybrid algorithm that
800 combines the "window method" with look-up tables. For an
801 introduction to exponentiation algorithms and "addition chains",
802 see section 4.6.3, "Evaluation of Powers" of Donald E. Knuth,
803 "Seminumerical Algorithms", Vol. 2, "The Art of Computer Programming",
804 3rd Edition, 1998, and Daniel M. Gordon, "A Survey of Fast Exponentiation
805 Methods", Journal of Algorithms, Vol. 27, pp. 129-146, 1998. */
806
807 /* Provide a default value for POWI_MAX_MULTS, the maximum number of
808 multiplications to inline before calling the system library's pow
809 function. powi(x,n) requires at worst 2*bits(n)-2 multiplications,
810 so this default never requires calling pow, powf or powl. */
811
812 #ifndef POWI_MAX_MULTS
813 #define POWI_MAX_MULTS (2*HOST_BITS_PER_WIDE_INT-2)
814 #endif
815
816 /* The size of the "optimal power tree" lookup table. All
817 exponents less than this value are simply looked up in the
818 powi_table below. This threshold is also used to size the
819 cache of pseudo registers that hold intermediate results. */
820 #define POWI_TABLE_SIZE 256
821
822 /* The size, in bits of the window, used in the "window method"
823 exponentiation algorithm. This is equivalent to a radix of
824 (1<<POWI_WINDOW_SIZE) in the corresponding "m-ary method". */
825 #define POWI_WINDOW_SIZE 3
826
827 /* The following table is an efficient representation of an
828 "optimal power tree". For each value, i, the corresponding
829 value, j, in the table states than an optimal evaluation
830 sequence for calculating pow(x,i) can be found by evaluating
831 pow(x,j)*pow(x,i-j). An optimal power tree for the first
832 100 integers is given in Knuth's "Seminumerical algorithms". */
833
834 static const unsigned char powi_table[POWI_TABLE_SIZE] =
835 {
836 0, 1, 1, 2, 2, 3, 3, 4, /* 0 - 7 */
837 4, 6, 5, 6, 6, 10, 7, 9, /* 8 - 15 */
838 8, 16, 9, 16, 10, 12, 11, 13, /* 16 - 23 */
839 12, 17, 13, 18, 14, 24, 15, 26, /* 24 - 31 */
840 16, 17, 17, 19, 18, 33, 19, 26, /* 32 - 39 */
841 20, 25, 21, 40, 22, 27, 23, 44, /* 40 - 47 */
842 24, 32, 25, 34, 26, 29, 27, 44, /* 48 - 55 */
843 28, 31, 29, 34, 30, 60, 31, 36, /* 56 - 63 */
844 32, 64, 33, 34, 34, 46, 35, 37, /* 64 - 71 */
845 36, 65, 37, 50, 38, 48, 39, 69, /* 72 - 79 */
846 40, 49, 41, 43, 42, 51, 43, 58, /* 80 - 87 */
847 44, 64, 45, 47, 46, 59, 47, 76, /* 88 - 95 */
848 48, 65, 49, 66, 50, 67, 51, 66, /* 96 - 103 */
849 52, 70, 53, 74, 54, 104, 55, 74, /* 104 - 111 */
850 56, 64, 57, 69, 58, 78, 59, 68, /* 112 - 119 */
851 60, 61, 61, 80, 62, 75, 63, 68, /* 120 - 127 */
852 64, 65, 65, 128, 66, 129, 67, 90, /* 128 - 135 */
853 68, 73, 69, 131, 70, 94, 71, 88, /* 136 - 143 */
854 72, 128, 73, 98, 74, 132, 75, 121, /* 144 - 151 */
855 76, 102, 77, 124, 78, 132, 79, 106, /* 152 - 159 */
856 80, 97, 81, 160, 82, 99, 83, 134, /* 160 - 167 */
857 84, 86, 85, 95, 86, 160, 87, 100, /* 168 - 175 */
858 88, 113, 89, 98, 90, 107, 91, 122, /* 176 - 183 */
859 92, 111, 93, 102, 94, 126, 95, 150, /* 184 - 191 */
860 96, 128, 97, 130, 98, 133, 99, 195, /* 192 - 199 */
861 100, 128, 101, 123, 102, 164, 103, 138, /* 200 - 207 */
862 104, 145, 105, 146, 106, 109, 107, 149, /* 208 - 215 */
863 108, 200, 109, 146, 110, 170, 111, 157, /* 216 - 223 */
864 112, 128, 113, 130, 114, 182, 115, 132, /* 224 - 231 */
865 116, 200, 117, 132, 118, 158, 119, 206, /* 232 - 239 */
866 120, 240, 121, 162, 122, 147, 123, 152, /* 240 - 247 */
867 124, 166, 125, 214, 126, 138, 127, 153, /* 248 - 255 */
868 };
869
870
871 /* Return the number of multiplications required to calculate
872 powi(x,n) where n is less than POWI_TABLE_SIZE. This is a
873 subroutine of powi_cost. CACHE is an array indicating
874 which exponents have already been calculated. */
875
876 static int
877 powi_lookup_cost (unsigned HOST_WIDE_INT n, bool *cache)
878 {
879 /* If we've already calculated this exponent, then this evaluation
880 doesn't require any additional multiplications. */
881 if (cache[n])
882 return 0;
883
884 cache[n] = true;
885 return powi_lookup_cost (n - powi_table[n], cache)
886 + powi_lookup_cost (powi_table[n], cache) + 1;
887 }
888
889 /* Return the number of multiplications required to calculate
890 powi(x,n) for an arbitrary x, given the exponent N. This
891 function needs to be kept in sync with powi_as_mults below. */
892
893 static int
894 powi_cost (HOST_WIDE_INT n)
895 {
896 bool cache[POWI_TABLE_SIZE];
897 unsigned HOST_WIDE_INT digit;
898 unsigned HOST_WIDE_INT val;
899 int result;
900
901 if (n == 0)
902 return 0;
903
904 /* Ignore the reciprocal when calculating the cost. */
905 val = (n < 0) ? -n : n;
906
907 /* Initialize the exponent cache. */
908 memset (cache, 0, POWI_TABLE_SIZE * sizeof (bool));
909 cache[1] = true;
910
911 result = 0;
912
913 while (val >= POWI_TABLE_SIZE)
914 {
915 if (val & 1)
916 {
917 digit = val & ((1 << POWI_WINDOW_SIZE) - 1);
918 result += powi_lookup_cost (digit, cache)
919 + POWI_WINDOW_SIZE + 1;
920 val >>= POWI_WINDOW_SIZE;
921 }
922 else
923 {
924 val >>= 1;
925 result++;
926 }
927 }
928
929 return result + powi_lookup_cost (val, cache);
930 }
931
932 /* Recursive subroutine of powi_as_mults. This function takes the
933 array, CACHE, of already calculated exponents and an exponent N and
934 returns a tree that corresponds to CACHE[1]**N, with type TYPE. */
935
936 static tree
937 powi_as_mults_1 (gimple_stmt_iterator *gsi, location_t loc, tree type,
938 HOST_WIDE_INT n, tree *cache, tree target)
939 {
940 tree op0, op1, ssa_target;
941 unsigned HOST_WIDE_INT digit;
942 gimple mult_stmt;
943
944 if (n < POWI_TABLE_SIZE && cache[n])
945 return cache[n];
946
947 ssa_target = make_ssa_name (target, NULL);
948
949 if (n < POWI_TABLE_SIZE)
950 {
951 cache[n] = ssa_target;
952 op0 = powi_as_mults_1 (gsi, loc, type, n - powi_table[n], cache, target);
953 op1 = powi_as_mults_1 (gsi, loc, type, powi_table[n], cache, target);
954 }
955 else if (n & 1)
956 {
957 digit = n & ((1 << POWI_WINDOW_SIZE) - 1);
958 op0 = powi_as_mults_1 (gsi, loc, type, n - digit, cache, target);
959 op1 = powi_as_mults_1 (gsi, loc, type, digit, cache, target);
960 }
961 else
962 {
963 op0 = powi_as_mults_1 (gsi, loc, type, n >> 1, cache, target);
964 op1 = op0;
965 }
966
967 mult_stmt = gimple_build_assign_with_ops (MULT_EXPR, ssa_target, op0, op1);
968 gimple_set_location (mult_stmt, loc);
969 gsi_insert_before (gsi, mult_stmt, GSI_SAME_STMT);
970
971 return ssa_target;
972 }
973
974 /* Convert ARG0**N to a tree of multiplications of ARG0 with itself.
975 This function needs to be kept in sync with powi_cost above. */
976
977 static tree
978 powi_as_mults (gimple_stmt_iterator *gsi, location_t loc,
979 tree arg0, HOST_WIDE_INT n)
980 {
981 tree cache[POWI_TABLE_SIZE], result, type = TREE_TYPE (arg0), target;
982 gimple div_stmt;
983
984 if (n == 0)
985 return build_real (type, dconst1);
986
987 memset (cache, 0, sizeof (cache));
988 cache[1] = arg0;
989
990 target = create_tmp_var (type, "powmult");
991 add_referenced_var (target);
992
993 result = powi_as_mults_1 (gsi, loc, type, (n < 0) ? -n : n, cache, target);
994
995 if (n >= 0)
996 return result;
997
998 /* If the original exponent was negative, reciprocate the result. */
999 target = make_ssa_name (target, NULL);
1000 div_stmt = gimple_build_assign_with_ops (RDIV_EXPR, target,
1001 build_real (type, dconst1),
1002 result);
1003 gimple_set_location (div_stmt, loc);
1004 gsi_insert_before (gsi, div_stmt, GSI_SAME_STMT);
1005
1006 return target;
1007 }
1008
1009 /* ARG0 and N are the two arguments to a powi builtin in GSI with
1010 location info LOC. If the arguments are appropriate, create an
1011 equivalent sequence of statements prior to GSI using an optimal
1012 number of multiplications, and return an expession holding the
1013 result. */
1014
1015 static tree
1016 gimple_expand_builtin_powi (gimple_stmt_iterator *gsi, location_t loc,
1017 tree arg0, HOST_WIDE_INT n)
1018 {
1019 /* Avoid largest negative number. */
1020 if (n != -n
1021 && ((n >= -1 && n <= 2)
1022 || (optimize_function_for_speed_p (cfun)
1023 && powi_cost (n) <= POWI_MAX_MULTS)))
1024 return powi_as_mults (gsi, loc, arg0, n);
1025
1026 return NULL_TREE;
1027 }
1028
1029 /* Build a gimple call statement that calls FN with argument ARG.
1030 Set the lhs of the call statement to a fresh SSA name for
1031 variable VAR. If VAR is NULL, first allocate it. Insert the
1032 statement prior to GSI's current position, and return the fresh
1033 SSA name. */
1034
1035 static tree
1036 build_and_insert_call (gimple_stmt_iterator *gsi, location_t loc,
1037 tree *var, tree fn, tree arg)
1038 {
1039 gimple call_stmt;
1040 tree ssa_target;
1041
1042 if (!*var)
1043 {
1044 *var = create_tmp_var (TREE_TYPE (arg), "powroot");
1045 add_referenced_var (*var);
1046 }
1047
1048 call_stmt = gimple_build_call (fn, 1, arg);
1049 ssa_target = make_ssa_name (*var, NULL);
1050 gimple_set_lhs (call_stmt, ssa_target);
1051 gimple_set_location (call_stmt, loc);
1052 gsi_insert_before (gsi, call_stmt, GSI_SAME_STMT);
1053
1054 return ssa_target;
1055 }
1056
1057 /* Build a gimple binary operation with the given CODE and arguments
1058 ARG0, ARG1, assigning the result to a new SSA name for variable
1059 TARGET. Insert the statement prior to GSI's current position, and
1060 return the fresh SSA name.*/
1061
1062 static tree
1063 build_and_insert_binop (gimple_stmt_iterator *gsi, location_t loc,
1064 tree target, enum tree_code code, tree arg0, tree arg1)
1065 {
1066 tree result = make_ssa_name (target, NULL);
1067 gimple stmt = gimple_build_assign_with_ops (code, result, arg0, arg1);
1068 gimple_set_location (stmt, loc);
1069 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1070 return result;
1071 }
1072
1073 /* ARG0 and ARG1 are the two arguments to a pow builtin call in GSI
1074 with location info LOC. If possible, create an equivalent and
1075 less expensive sequence of statements prior to GSI, and return an
1076 expession holding the result. */
1077
1078 static tree
1079 gimple_expand_builtin_pow (gimple_stmt_iterator *gsi, location_t loc,
1080 tree arg0, tree arg1)
1081 {
1082 REAL_VALUE_TYPE c, cint, dconst1_4, dconst3_4, dconst1_3, dconst1_6;
1083 REAL_VALUE_TYPE c2, dconst3;
1084 HOST_WIDE_INT n;
1085 tree type, sqrtfn, cbrtfn, sqrt_arg0, sqrt_sqrt, result, cbrt_x, powi_cbrt_x;
1086 tree target = NULL_TREE;
1087 enum machine_mode mode;
1088 bool hw_sqrt_exists;
1089
1090 /* If the exponent isn't a constant, there's nothing of interest
1091 to be done. */
1092 if (TREE_CODE (arg1) != REAL_CST)
1093 return NULL_TREE;
1094
1095 /* If the exponent is equivalent to an integer, expand to an optimal
1096 multiplication sequence when profitable. */
1097 c = TREE_REAL_CST (arg1);
1098 n = real_to_integer (&c);
1099 real_from_integer (&cint, VOIDmode, n, n < 0 ? -1 : 0, 0);
1100
1101 if (real_identical (&c, &cint)
1102 && ((n >= -1 && n <= 2)
1103 || (flag_unsafe_math_optimizations
1104 && optimize_insn_for_speed_p ()
1105 && powi_cost (n) <= POWI_MAX_MULTS)))
1106 return gimple_expand_builtin_powi (gsi, loc, arg0, n);
1107
1108 /* Attempt various optimizations using sqrt and cbrt. */
1109 type = TREE_TYPE (arg0);
1110 mode = TYPE_MODE (type);
1111 sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1112
1113 /* Optimize pow(x,0.5) = sqrt(x). This replacement is always safe
1114 unless signed zeros must be maintained. pow(-0,0.5) = +0, while
1115 sqrt(-0) = -0. */
1116 if (sqrtfn
1117 && REAL_VALUES_EQUAL (c, dconsthalf)
1118 && !HONOR_SIGNED_ZEROS (mode))
1119 return build_and_insert_call (gsi, loc, &target, sqrtfn, arg0);
1120
1121 /* Optimize pow(x,0.25) = sqrt(sqrt(x)). Assume on most machines that
1122 a builtin sqrt instruction is smaller than a call to pow with 0.25,
1123 so do this optimization even if -Os. Don't do this optimization
1124 if we don't have a hardware sqrt insn. */
1125 dconst1_4 = dconst1;
1126 SET_REAL_EXP (&dconst1_4, REAL_EXP (&dconst1_4) - 2);
1127 hw_sqrt_exists = optab_handler(sqrt_optab, mode) != CODE_FOR_nothing;
1128
1129 if (flag_unsafe_math_optimizations
1130 && sqrtfn
1131 && REAL_VALUES_EQUAL (c, dconst1_4)
1132 && hw_sqrt_exists)
1133 {
1134 /* sqrt(x) */
1135 sqrt_arg0 = build_and_insert_call (gsi, loc, &target, sqrtfn, arg0);
1136
1137 /* sqrt(sqrt(x)) */
1138 return build_and_insert_call (gsi, loc, &target, sqrtfn, sqrt_arg0);
1139 }
1140
1141 /* Optimize pow(x,0.75) = sqrt(x) * sqrt(sqrt(x)) unless we are
1142 optimizing for space. Don't do this optimization if we don't have
1143 a hardware sqrt insn. */
1144 real_from_integer (&dconst3_4, VOIDmode, 3, 0, 0);
1145 SET_REAL_EXP (&dconst3_4, REAL_EXP (&dconst3_4) - 2);
1146
1147 if (flag_unsafe_math_optimizations
1148 && sqrtfn
1149 && optimize_function_for_speed_p (cfun)
1150 && REAL_VALUES_EQUAL (c, dconst3_4)
1151 && hw_sqrt_exists)
1152 {
1153 /* sqrt(x) */
1154 sqrt_arg0 = build_and_insert_call (gsi, loc, &target, sqrtfn, arg0);
1155
1156 /* sqrt(sqrt(x)) */
1157 sqrt_sqrt = build_and_insert_call (gsi, loc, &target, sqrtfn, sqrt_arg0);
1158
1159 /* sqrt(x) * sqrt(sqrt(x)) */
1160 return build_and_insert_binop (gsi, loc, target, MULT_EXPR,
1161 sqrt_arg0, sqrt_sqrt);
1162 }
1163
1164 /* Optimize pow(x,1./3.) = cbrt(x). This requires unsafe math
1165 optimizations since 1./3. is not exactly representable. If x
1166 is negative and finite, the correct value of pow(x,1./3.) is
1167 a NaN with the "invalid" exception raised, because the value
1168 of 1./3. actually has an even denominator. The correct value
1169 of cbrt(x) is a negative real value. */
1170 cbrtfn = mathfn_built_in (type, BUILT_IN_CBRT);
1171 dconst1_3 = real_value_truncate (mode, dconst_third ());
1172
1173 if (flag_unsafe_math_optimizations
1174 && cbrtfn
1175 /* FIXME: The following line was originally
1176 && (tree_expr_nonnegative_p (arg0) || !HONOR_NANS (mode)),
1177 but since arg0 is a gimple value, the first predicate
1178 will always return false. It needs to be replaced with a
1179 call to a similar gimple_val_nonnegative_p function to be
1180 added in gimple-fold.c. */
1181 && !HONOR_NANS (mode)
1182 && REAL_VALUES_EQUAL (c, dconst1_3))
1183 return build_and_insert_call (gsi, loc, &target, cbrtfn, arg0);
1184
1185 /* Optimize pow(x,1./6.) = cbrt(sqrt(x)). Don't do this optimization
1186 if we don't have a hardware sqrt insn. */
1187 dconst1_6 = dconst1_3;
1188 SET_REAL_EXP (&dconst1_6, REAL_EXP (&dconst1_6) - 1);
1189
1190 if (flag_unsafe_math_optimizations
1191 && sqrtfn
1192 && cbrtfn
1193 /* FIXME: The following line was originally
1194 && (tree_expr_nonnegative_p (arg0) || !HONOR_NANS (mode)),
1195 but since arg0 is a gimple value, the first predicate
1196 will always return false. It needs to be replaced with a
1197 call to a similar gimple_val_nonnegative_p function to be
1198 added in gimple-fold.c. */
1199 && !HONOR_NANS (mode)
1200 && optimize_function_for_speed_p (cfun)
1201 && hw_sqrt_exists
1202 && REAL_VALUES_EQUAL (c, dconst1_6))
1203 {
1204 /* sqrt(x) */
1205 sqrt_arg0 = build_and_insert_call (gsi, loc, &target, sqrtfn, arg0);
1206
1207 /* cbrt(sqrt(x)) */
1208 return build_and_insert_call (gsi, loc, &target, cbrtfn, sqrt_arg0);
1209 }
1210
1211 /* Optimize pow(x,c), where n = 2c for some nonzero integer n, into
1212
1213 sqrt(x) * powi(x, n/2), n > 0;
1214 1.0 / (sqrt(x) * powi(x, abs(n/2))), n < 0.
1215
1216 Do not calculate the powi factor when n/2 = 0. */
1217 real_arithmetic (&c2, MULT_EXPR, &c, &dconst2);
1218 n = real_to_integer (&c2);
1219 real_from_integer (&cint, VOIDmode, n, n < 0 ? -1 : 0, 0);
1220
1221 if (flag_unsafe_math_optimizations
1222 && sqrtfn
1223 && real_identical (&c2, &cint))
1224 {
1225 tree powi_x_ndiv2 = NULL_TREE;
1226
1227 /* Attempt to fold powi(arg0, abs(n/2)) into multiplies. If not
1228 possible or profitable, give up. Skip the degenerate case when
1229 n is 1 or -1, where the result is always 1. */
1230 if (abs (n) != 1)
1231 {
1232 powi_x_ndiv2 = gimple_expand_builtin_powi (gsi, loc, arg0, abs(n/2));
1233 if (!powi_x_ndiv2)
1234 return NULL_TREE;
1235 }
1236
1237 /* Calculate sqrt(x). When n is not 1 or -1, multiply it by the
1238 result of the optimal multiply sequence just calculated. */
1239 sqrt_arg0 = build_and_insert_call (gsi, loc, &target, sqrtfn, arg0);
1240
1241 if (abs (n) == 1)
1242 result = sqrt_arg0;
1243 else
1244 result = build_and_insert_binop (gsi, loc, target, MULT_EXPR,
1245 sqrt_arg0, powi_x_ndiv2);
1246
1247 /* If n is negative, reciprocate the result. */
1248 if (n < 0)
1249 result = build_and_insert_binop (gsi, loc, target, RDIV_EXPR,
1250 build_real (type, dconst1), result);
1251 return result;
1252 }
1253
1254 /* Optimize pow(x,c), where 3c = n for some nonzero integer n, into
1255
1256 powi(x, n/3) * powi(cbrt(x), n%3), n > 0;
1257 1.0 / (powi(x, abs(n)/3) * powi(cbrt(x), abs(n)%3)), n < 0.
1258
1259 Do not calculate the first factor when n/3 = 0. As cbrt(x) is
1260 different from pow(x, 1./3.) due to rounding and behavior with
1261 negative x, we need to constrain this transformation to unsafe
1262 math and positive x or finite math. */
1263 real_from_integer (&dconst3, VOIDmode, 3, 0, 0);
1264 real_arithmetic (&c2, MULT_EXPR, &c, &dconst3);
1265 real_round (&c2, mode, &c2);
1266 n = real_to_integer (&c2);
1267 real_from_integer (&cint, VOIDmode, n, n < 0 ? -1 : 0, 0);
1268 real_arithmetic (&c2, RDIV_EXPR, &cint, &dconst3);
1269 real_convert (&c2, mode, &c2);
1270
1271 if (flag_unsafe_math_optimizations
1272 && cbrtfn
1273 /* FIXME: The following line was originally
1274 && (tree_expr_nonnegative_p (arg0) || !HONOR_NANS (mode)),
1275 but since arg0 is a gimple value, the first predicate
1276 will always return false. It needs to be replaced with a
1277 call to a similar gimple_val_nonnegative_p function to be
1278 added in gimple-fold.c. */
1279 && !HONOR_NANS (mode)
1280 && real_identical (&c2, &c)
1281 && optimize_function_for_speed_p (cfun)
1282 && powi_cost (n / 3) <= POWI_MAX_MULTS)
1283 {
1284 tree powi_x_ndiv3 = NULL_TREE;
1285
1286 /* Attempt to fold powi(arg0, abs(n/3)) into multiplies. If not
1287 possible or profitable, give up. Skip the degenerate case when
1288 abs(n) < 3, where the result is always 1. */
1289 if (abs (n) >= 3)
1290 {
1291 powi_x_ndiv3 = gimple_expand_builtin_powi (gsi, loc, arg0,
1292 abs (n / 3));
1293 if (!powi_x_ndiv3)
1294 return NULL_TREE;
1295 }
1296
1297 /* Calculate powi(cbrt(x), n%3). Don't use gimple_expand_builtin_powi
1298 as that creates an unnecessary variable. Instead, just produce
1299 either cbrt(x) or cbrt(x) * cbrt(x). */
1300 cbrt_x = build_and_insert_call (gsi, loc, &target, cbrtfn, arg0);
1301
1302 if (abs (n) % 3 == 1)
1303 powi_cbrt_x = cbrt_x;
1304 else
1305 powi_cbrt_x = build_and_insert_binop (gsi, loc, target, MULT_EXPR,
1306 cbrt_x, cbrt_x);
1307
1308 /* Multiply the two subexpressions, unless powi(x,abs(n)/3) = 1. */
1309 if (abs (n) < 3)
1310 result = powi_cbrt_x;
1311 else
1312 result = build_and_insert_binop (gsi, loc, target, MULT_EXPR,
1313 powi_x_ndiv3, powi_cbrt_x);
1314
1315 /* If n is negative, reciprocate the result. */
1316 if (n < 0)
1317 result = build_and_insert_binop (gsi, loc, target, RDIV_EXPR,
1318 build_real (type, dconst1), result);
1319
1320 return result;
1321 }
1322
1323 /* No optimizations succeeded. */
1324 return NULL_TREE;
1325 }
1326
1327 /* Go through all calls to sin, cos and cexpi and call execute_cse_sincos_1
1328 on the SSA_NAME argument of each of them. Also expand powi(x,n) into
1329 an optimal number of multiplies, when n is a constant. */
1330
1331 static unsigned int
1332 execute_cse_sincos (void)
1333 {
1334 basic_block bb;
1335 bool cfg_changed = false;
1336
1337 calculate_dominance_info (CDI_DOMINATORS);
1338 memset (&sincos_stats, 0, sizeof (sincos_stats));
1339
1340 FOR_EACH_BB (bb)
1341 {
1342 gimple_stmt_iterator gsi;
1343
1344 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1345 {
1346 gimple stmt = gsi_stmt (gsi);
1347 tree fndecl;
1348
1349 if (is_gimple_call (stmt)
1350 && gimple_call_lhs (stmt)
1351 && (fndecl = gimple_call_fndecl (stmt))
1352 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
1353 {
1354 tree arg, arg0, arg1, result;
1355 HOST_WIDE_INT n;
1356 location_t loc;
1357
1358 switch (DECL_FUNCTION_CODE (fndecl))
1359 {
1360 CASE_FLT_FN (BUILT_IN_COS):
1361 CASE_FLT_FN (BUILT_IN_SIN):
1362 CASE_FLT_FN (BUILT_IN_CEXPI):
1363 /* Make sure we have either sincos or cexp. */
1364 if (!TARGET_HAS_SINCOS && !TARGET_C99_FUNCTIONS)
1365 break;
1366
1367 arg = gimple_call_arg (stmt, 0);
1368 if (TREE_CODE (arg) == SSA_NAME)
1369 cfg_changed |= execute_cse_sincos_1 (arg);
1370 break;
1371
1372 CASE_FLT_FN (BUILT_IN_POW):
1373 arg0 = gimple_call_arg (stmt, 0);
1374 arg1 = gimple_call_arg (stmt, 1);
1375
1376 loc = gimple_location (stmt);
1377 result = gimple_expand_builtin_pow (&gsi, loc, arg0, arg1);
1378
1379 if (result)
1380 {
1381 tree lhs = gimple_get_lhs (stmt);
1382 gimple new_stmt = gimple_build_assign (lhs, result);
1383 gimple_set_location (new_stmt, loc);
1384 unlink_stmt_vdef (stmt);
1385 gsi_replace (&gsi, new_stmt, true);
1386 }
1387 break;
1388
1389 CASE_FLT_FN (BUILT_IN_POWI):
1390 arg0 = gimple_call_arg (stmt, 0);
1391 arg1 = gimple_call_arg (stmt, 1);
1392 if (!host_integerp (arg1, 0))
1393 break;
1394
1395 n = TREE_INT_CST_LOW (arg1);
1396 loc = gimple_location (stmt);
1397 result = gimple_expand_builtin_powi (&gsi, loc, arg0, n);
1398
1399 if (result)
1400 {
1401 tree lhs = gimple_get_lhs (stmt);
1402 gimple new_stmt = gimple_build_assign (lhs, result);
1403 gimple_set_location (new_stmt, loc);
1404 unlink_stmt_vdef (stmt);
1405 gsi_replace (&gsi, new_stmt, true);
1406 }
1407 break;
1408
1409 default:;
1410 }
1411 }
1412 }
1413 }
1414
1415 statistics_counter_event (cfun, "sincos statements inserted",
1416 sincos_stats.inserted);
1417
1418 free_dominance_info (CDI_DOMINATORS);
1419 return cfg_changed ? TODO_cleanup_cfg : 0;
1420 }
1421
1422 static bool
1423 gate_cse_sincos (void)
1424 {
1425 /* We no longer require either sincos or cexp, since powi expansion
1426 piggybacks on this pass. */
1427 return optimize;
1428 }
1429
1430 struct gimple_opt_pass pass_cse_sincos =
1431 {
1432 {
1433 GIMPLE_PASS,
1434 "sincos", /* name */
1435 gate_cse_sincos, /* gate */
1436 execute_cse_sincos, /* execute */
1437 NULL, /* sub */
1438 NULL, /* next */
1439 0, /* static_pass_number */
1440 TV_NONE, /* tv_id */
1441 PROP_ssa, /* properties_required */
1442 0, /* properties_provided */
1443 0, /* properties_destroyed */
1444 0, /* todo_flags_start */
1445 TODO_dump_func | TODO_update_ssa | TODO_verify_ssa
1446 | TODO_verify_stmts /* todo_flags_finish */
1447 }
1448 };
1449
1450 /* A symbolic number is used to detect byte permutation and selection
1451 patterns. Therefore the field N contains an artificial number
1452 consisting of byte size markers:
1453
1454 0 - byte has the value 0
1455 1..size - byte contains the content of the byte
1456 number indexed with that value minus one */
1457
1458 struct symbolic_number {
1459 unsigned HOST_WIDEST_INT n;
1460 int size;
1461 };
1462
1463 /* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic
1464 number N. Return false if the requested operation is not permitted
1465 on a symbolic number. */
1466
1467 static inline bool
1468 do_shift_rotate (enum tree_code code,
1469 struct symbolic_number *n,
1470 int count)
1471 {
1472 if (count % 8 != 0)
1473 return false;
1474
1475 /* Zero out the extra bits of N in order to avoid them being shifted
1476 into the significant bits. */
1477 if (n->size < (int)sizeof (HOST_WIDEST_INT))
1478 n->n &= ((unsigned HOST_WIDEST_INT)1 << (n->size * BITS_PER_UNIT)) - 1;
1479
1480 switch (code)
1481 {
1482 case LSHIFT_EXPR:
1483 n->n <<= count;
1484 break;
1485 case RSHIFT_EXPR:
1486 n->n >>= count;
1487 break;
1488 case LROTATE_EXPR:
1489 n->n = (n->n << count) | (n->n >> ((n->size * BITS_PER_UNIT) - count));
1490 break;
1491 case RROTATE_EXPR:
1492 n->n = (n->n >> count) | (n->n << ((n->size * BITS_PER_UNIT) - count));
1493 break;
1494 default:
1495 return false;
1496 }
1497 return true;
1498 }
1499
1500 /* Perform sanity checking for the symbolic number N and the gimple
1501 statement STMT. */
1502
1503 static inline bool
1504 verify_symbolic_number_p (struct symbolic_number *n, gimple stmt)
1505 {
1506 tree lhs_type;
1507
1508 lhs_type = gimple_expr_type (stmt);
1509
1510 if (TREE_CODE (lhs_type) != INTEGER_TYPE)
1511 return false;
1512
1513 if (TYPE_PRECISION (lhs_type) != n->size * BITS_PER_UNIT)
1514 return false;
1515
1516 return true;
1517 }
1518
1519 /* find_bswap_1 invokes itself recursively with N and tries to perform
1520 the operation given by the rhs of STMT on the result. If the
1521 operation could successfully be executed the function returns the
1522 tree expression of the source operand and NULL otherwise. */
1523
1524 static tree
1525 find_bswap_1 (gimple stmt, struct symbolic_number *n, int limit)
1526 {
1527 enum tree_code code;
1528 tree rhs1, rhs2 = NULL;
1529 gimple rhs1_stmt, rhs2_stmt;
1530 tree source_expr1;
1531 enum gimple_rhs_class rhs_class;
1532
1533 if (!limit || !is_gimple_assign (stmt))
1534 return NULL_TREE;
1535
1536 rhs1 = gimple_assign_rhs1 (stmt);
1537
1538 if (TREE_CODE (rhs1) != SSA_NAME)
1539 return NULL_TREE;
1540
1541 code = gimple_assign_rhs_code (stmt);
1542 rhs_class = gimple_assign_rhs_class (stmt);
1543 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
1544
1545 if (rhs_class == GIMPLE_BINARY_RHS)
1546 rhs2 = gimple_assign_rhs2 (stmt);
1547
1548 /* Handle unary rhs and binary rhs with integer constants as second
1549 operand. */
1550
1551 if (rhs_class == GIMPLE_UNARY_RHS
1552 || (rhs_class == GIMPLE_BINARY_RHS
1553 && TREE_CODE (rhs2) == INTEGER_CST))
1554 {
1555 if (code != BIT_AND_EXPR
1556 && code != LSHIFT_EXPR
1557 && code != RSHIFT_EXPR
1558 && code != LROTATE_EXPR
1559 && code != RROTATE_EXPR
1560 && code != NOP_EXPR
1561 && code != CONVERT_EXPR)
1562 return NULL_TREE;
1563
1564 source_expr1 = find_bswap_1 (rhs1_stmt, n, limit - 1);
1565
1566 /* If find_bswap_1 returned NULL STMT is a leaf node and we have
1567 to initialize the symbolic number. */
1568 if (!source_expr1)
1569 {
1570 /* Set up the symbolic number N by setting each byte to a
1571 value between 1 and the byte size of rhs1. The highest
1572 order byte is set to n->size and the lowest order
1573 byte to 1. */
1574 n->size = TYPE_PRECISION (TREE_TYPE (rhs1));
1575 if (n->size % BITS_PER_UNIT != 0)
1576 return NULL_TREE;
1577 n->size /= BITS_PER_UNIT;
1578 n->n = (sizeof (HOST_WIDEST_INT) < 8 ? 0 :
1579 (unsigned HOST_WIDEST_INT)0x08070605 << 32 | 0x04030201);
1580
1581 if (n->size < (int)sizeof (HOST_WIDEST_INT))
1582 n->n &= ((unsigned HOST_WIDEST_INT)1 <<
1583 (n->size * BITS_PER_UNIT)) - 1;
1584
1585 source_expr1 = rhs1;
1586 }
1587
1588 switch (code)
1589 {
1590 case BIT_AND_EXPR:
1591 {
1592 int i;
1593 unsigned HOST_WIDEST_INT val = widest_int_cst_value (rhs2);
1594 unsigned HOST_WIDEST_INT tmp = val;
1595
1596 /* Only constants masking full bytes are allowed. */
1597 for (i = 0; i < n->size; i++, tmp >>= BITS_PER_UNIT)
1598 if ((tmp & 0xff) != 0 && (tmp & 0xff) != 0xff)
1599 return NULL_TREE;
1600
1601 n->n &= val;
1602 }
1603 break;
1604 case LSHIFT_EXPR:
1605 case RSHIFT_EXPR:
1606 case LROTATE_EXPR:
1607 case RROTATE_EXPR:
1608 if (!do_shift_rotate (code, n, (int)TREE_INT_CST_LOW (rhs2)))
1609 return NULL_TREE;
1610 break;
1611 CASE_CONVERT:
1612 {
1613 int type_size;
1614
1615 type_size = TYPE_PRECISION (gimple_expr_type (stmt));
1616 if (type_size % BITS_PER_UNIT != 0)
1617 return NULL_TREE;
1618
1619 if (type_size / BITS_PER_UNIT < (int)(sizeof (HOST_WIDEST_INT)))
1620 {
1621 /* If STMT casts to a smaller type mask out the bits not
1622 belonging to the target type. */
1623 n->n &= ((unsigned HOST_WIDEST_INT)1 << type_size) - 1;
1624 }
1625 n->size = type_size / BITS_PER_UNIT;
1626 }
1627 break;
1628 default:
1629 return NULL_TREE;
1630 };
1631 return verify_symbolic_number_p (n, stmt) ? source_expr1 : NULL;
1632 }
1633
1634 /* Handle binary rhs. */
1635
1636 if (rhs_class == GIMPLE_BINARY_RHS)
1637 {
1638 struct symbolic_number n1, n2;
1639 tree source_expr2;
1640
1641 if (code != BIT_IOR_EXPR)
1642 return NULL_TREE;
1643
1644 if (TREE_CODE (rhs2) != SSA_NAME)
1645 return NULL_TREE;
1646
1647 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
1648
1649 switch (code)
1650 {
1651 case BIT_IOR_EXPR:
1652 source_expr1 = find_bswap_1 (rhs1_stmt, &n1, limit - 1);
1653
1654 if (!source_expr1)
1655 return NULL_TREE;
1656
1657 source_expr2 = find_bswap_1 (rhs2_stmt, &n2, limit - 1);
1658
1659 if (source_expr1 != source_expr2
1660 || n1.size != n2.size)
1661 return NULL_TREE;
1662
1663 n->size = n1.size;
1664 n->n = n1.n | n2.n;
1665
1666 if (!verify_symbolic_number_p (n, stmt))
1667 return NULL_TREE;
1668
1669 break;
1670 default:
1671 return NULL_TREE;
1672 }
1673 return source_expr1;
1674 }
1675 return NULL_TREE;
1676 }
1677
1678 /* Check if STMT completes a bswap implementation consisting of ORs,
1679 SHIFTs and ANDs. Return the source tree expression on which the
1680 byte swap is performed and NULL if no bswap was found. */
1681
1682 static tree
1683 find_bswap (gimple stmt)
1684 {
1685 /* The number which the find_bswap result should match in order to
1686 have a full byte swap. The number is shifted to the left according
1687 to the size of the symbolic number before using it. */
1688 unsigned HOST_WIDEST_INT cmp =
1689 sizeof (HOST_WIDEST_INT) < 8 ? 0 :
1690 (unsigned HOST_WIDEST_INT)0x01020304 << 32 | 0x05060708;
1691
1692 struct symbolic_number n;
1693 tree source_expr;
1694
1695 /* The last parameter determines the depth search limit. It usually
1696 correlates directly to the number of bytes to be touched. We
1697 increase that number by one here in order to also cover signed ->
1698 unsigned conversions of the src operand as can be seen in
1699 libgcc. */
1700 source_expr = find_bswap_1 (stmt, &n,
1701 TREE_INT_CST_LOW (
1702 TYPE_SIZE_UNIT (gimple_expr_type (stmt))) + 1);
1703
1704 if (!source_expr)
1705 return NULL_TREE;
1706
1707 /* Zero out the extra bits of N and CMP. */
1708 if (n.size < (int)sizeof (HOST_WIDEST_INT))
1709 {
1710 unsigned HOST_WIDEST_INT mask =
1711 ((unsigned HOST_WIDEST_INT)1 << (n.size * BITS_PER_UNIT)) - 1;
1712
1713 n.n &= mask;
1714 cmp >>= (sizeof (HOST_WIDEST_INT) - n.size) * BITS_PER_UNIT;
1715 }
1716
1717 /* A complete byte swap should make the symbolic number to start
1718 with the largest digit in the highest order byte. */
1719 if (cmp != n.n)
1720 return NULL_TREE;
1721
1722 return source_expr;
1723 }
1724
1725 /* Find manual byte swap implementations and turn them into a bswap
1726 builtin invokation. */
1727
1728 static unsigned int
1729 execute_optimize_bswap (void)
1730 {
1731 basic_block bb;
1732 bool bswap32_p, bswap64_p;
1733 bool changed = false;
1734 tree bswap32_type = NULL_TREE, bswap64_type = NULL_TREE;
1735
1736 if (BITS_PER_UNIT != 8)
1737 return 0;
1738
1739 if (sizeof (HOST_WIDEST_INT) < 8)
1740 return 0;
1741
1742 bswap32_p = (built_in_decls[BUILT_IN_BSWAP32]
1743 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing);
1744 bswap64_p = (built_in_decls[BUILT_IN_BSWAP64]
1745 && (optab_handler (bswap_optab, DImode) != CODE_FOR_nothing
1746 || (bswap32_p && word_mode == SImode)));
1747
1748 if (!bswap32_p && !bswap64_p)
1749 return 0;
1750
1751 /* Determine the argument type of the builtins. The code later on
1752 assumes that the return and argument type are the same. */
1753 if (bswap32_p)
1754 {
1755 tree fndecl = built_in_decls[BUILT_IN_BSWAP32];
1756 bswap32_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1757 }
1758
1759 if (bswap64_p)
1760 {
1761 tree fndecl = built_in_decls[BUILT_IN_BSWAP64];
1762 bswap64_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1763 }
1764
1765 memset (&bswap_stats, 0, sizeof (bswap_stats));
1766
1767 FOR_EACH_BB (bb)
1768 {
1769 gimple_stmt_iterator gsi;
1770
1771 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1772 {
1773 gimple stmt = gsi_stmt (gsi);
1774 tree bswap_src, bswap_type;
1775 tree bswap_tmp;
1776 tree fndecl = NULL_TREE;
1777 int type_size;
1778 gimple call;
1779
1780 if (!is_gimple_assign (stmt)
1781 || gimple_assign_rhs_code (stmt) != BIT_IOR_EXPR)
1782 continue;
1783
1784 type_size = TYPE_PRECISION (gimple_expr_type (stmt));
1785
1786 switch (type_size)
1787 {
1788 case 32:
1789 if (bswap32_p)
1790 {
1791 fndecl = built_in_decls[BUILT_IN_BSWAP32];
1792 bswap_type = bswap32_type;
1793 }
1794 break;
1795 case 64:
1796 if (bswap64_p)
1797 {
1798 fndecl = built_in_decls[BUILT_IN_BSWAP64];
1799 bswap_type = bswap64_type;
1800 }
1801 break;
1802 default:
1803 continue;
1804 }
1805
1806 if (!fndecl)
1807 continue;
1808
1809 bswap_src = find_bswap (stmt);
1810
1811 if (!bswap_src)
1812 continue;
1813
1814 changed = true;
1815 if (type_size == 32)
1816 bswap_stats.found_32bit++;
1817 else
1818 bswap_stats.found_64bit++;
1819
1820 bswap_tmp = bswap_src;
1821
1822 /* Convert the src expression if necessary. */
1823 if (!useless_type_conversion_p (TREE_TYPE (bswap_tmp), bswap_type))
1824 {
1825 gimple convert_stmt;
1826
1827 bswap_tmp = create_tmp_var (bswap_type, "bswapsrc");
1828 add_referenced_var (bswap_tmp);
1829 bswap_tmp = make_ssa_name (bswap_tmp, NULL);
1830
1831 convert_stmt = gimple_build_assign_with_ops (
1832 CONVERT_EXPR, bswap_tmp, bswap_src, NULL);
1833 gsi_insert_before (&gsi, convert_stmt, GSI_SAME_STMT);
1834 }
1835
1836 call = gimple_build_call (fndecl, 1, bswap_tmp);
1837
1838 bswap_tmp = gimple_assign_lhs (stmt);
1839
1840 /* Convert the result if necessary. */
1841 if (!useless_type_conversion_p (TREE_TYPE (bswap_tmp), bswap_type))
1842 {
1843 gimple convert_stmt;
1844
1845 bswap_tmp = create_tmp_var (bswap_type, "bswapdst");
1846 add_referenced_var (bswap_tmp);
1847 bswap_tmp = make_ssa_name (bswap_tmp, NULL);
1848 convert_stmt = gimple_build_assign_with_ops (
1849 CONVERT_EXPR, gimple_assign_lhs (stmt), bswap_tmp, NULL);
1850 gsi_insert_after (&gsi, convert_stmt, GSI_SAME_STMT);
1851 }
1852
1853 gimple_call_set_lhs (call, bswap_tmp);
1854
1855 if (dump_file)
1856 {
1857 fprintf (dump_file, "%d bit bswap implementation found at: ",
1858 (int)type_size);
1859 print_gimple_stmt (dump_file, stmt, 0, 0);
1860 }
1861
1862 gsi_insert_after (&gsi, call, GSI_SAME_STMT);
1863 gsi_remove (&gsi, true);
1864 }
1865 }
1866
1867 statistics_counter_event (cfun, "32-bit bswap implementations found",
1868 bswap_stats.found_32bit);
1869 statistics_counter_event (cfun, "64-bit bswap implementations found",
1870 bswap_stats.found_64bit);
1871
1872 return (changed ? TODO_dump_func | TODO_update_ssa | TODO_verify_ssa
1873 | TODO_verify_stmts : 0);
1874 }
1875
1876 static bool
1877 gate_optimize_bswap (void)
1878 {
1879 return flag_expensive_optimizations && optimize;
1880 }
1881
1882 struct gimple_opt_pass pass_optimize_bswap =
1883 {
1884 {
1885 GIMPLE_PASS,
1886 "bswap", /* name */
1887 gate_optimize_bswap, /* gate */
1888 execute_optimize_bswap, /* execute */
1889 NULL, /* sub */
1890 NULL, /* next */
1891 0, /* static_pass_number */
1892 TV_NONE, /* tv_id */
1893 PROP_ssa, /* properties_required */
1894 0, /* properties_provided */
1895 0, /* properties_destroyed */
1896 0, /* todo_flags_start */
1897 0 /* todo_flags_finish */
1898 }
1899 };
1900
1901 /* Return true if RHS is a suitable operand for a widening multiplication.
1902 There are two cases:
1903
1904 - RHS makes some value twice as wide. Store that value in *NEW_RHS_OUT
1905 if so, and store its type in *TYPE_OUT.
1906
1907 - RHS is an integer constant. Store that value in *NEW_RHS_OUT if so,
1908 but leave *TYPE_OUT untouched. */
1909
1910 static bool
1911 is_widening_mult_rhs_p (tree rhs, tree *type_out, tree *new_rhs_out)
1912 {
1913 gimple stmt;
1914 tree type, type1, rhs1;
1915 enum tree_code rhs_code;
1916
1917 if (TREE_CODE (rhs) == SSA_NAME)
1918 {
1919 type = TREE_TYPE (rhs);
1920 stmt = SSA_NAME_DEF_STMT (rhs);
1921 if (!is_gimple_assign (stmt))
1922 return false;
1923
1924 rhs_code = gimple_assign_rhs_code (stmt);
1925 if (TREE_CODE (type) == INTEGER_TYPE
1926 ? !CONVERT_EXPR_CODE_P (rhs_code)
1927 : rhs_code != FIXED_CONVERT_EXPR)
1928 return false;
1929
1930 rhs1 = gimple_assign_rhs1 (stmt);
1931 type1 = TREE_TYPE (rhs1);
1932 if (TREE_CODE (type1) != TREE_CODE (type)
1933 || TYPE_PRECISION (type1) * 2 != TYPE_PRECISION (type))
1934 return false;
1935
1936 *new_rhs_out = rhs1;
1937 *type_out = type1;
1938 return true;
1939 }
1940
1941 if (TREE_CODE (rhs) == INTEGER_CST)
1942 {
1943 *new_rhs_out = rhs;
1944 *type_out = NULL;
1945 return true;
1946 }
1947
1948 return false;
1949 }
1950
1951 /* Return true if STMT performs a widening multiplication. If so,
1952 store the unwidened types of the operands in *TYPE1_OUT and *TYPE2_OUT
1953 respectively. Also fill *RHS1_OUT and *RHS2_OUT such that converting
1954 those operands to types *TYPE1_OUT and *TYPE2_OUT would give the
1955 operands of the multiplication. */
1956
1957 static bool
1958 is_widening_mult_p (gimple stmt,
1959 tree *type1_out, tree *rhs1_out,
1960 tree *type2_out, tree *rhs2_out)
1961 {
1962 tree type;
1963
1964 type = TREE_TYPE (gimple_assign_lhs (stmt));
1965 if (TREE_CODE (type) != INTEGER_TYPE
1966 && TREE_CODE (type) != FIXED_POINT_TYPE)
1967 return false;
1968
1969 if (!is_widening_mult_rhs_p (gimple_assign_rhs1 (stmt), type1_out, rhs1_out))
1970 return false;
1971
1972 if (!is_widening_mult_rhs_p (gimple_assign_rhs2 (stmt), type2_out, rhs2_out))
1973 return false;
1974
1975 if (*type1_out == NULL)
1976 {
1977 if (*type2_out == NULL || !int_fits_type_p (*rhs1_out, *type2_out))
1978 return false;
1979 *type1_out = *type2_out;
1980 }
1981
1982 if (*type2_out == NULL)
1983 {
1984 if (!int_fits_type_p (*rhs2_out, *type1_out))
1985 return false;
1986 *type2_out = *type1_out;
1987 }
1988
1989 return true;
1990 }
1991
1992 /* Process a single gimple statement STMT, which has a MULT_EXPR as
1993 its rhs, and try to convert it into a WIDEN_MULT_EXPR. The return
1994 value is true iff we converted the statement. */
1995
1996 static bool
1997 convert_mult_to_widen (gimple stmt)
1998 {
1999 tree lhs, rhs1, rhs2, type, type1, type2;
2000 enum insn_code handler;
2001
2002 lhs = gimple_assign_lhs (stmt);
2003 type = TREE_TYPE (lhs);
2004 if (TREE_CODE (type) != INTEGER_TYPE)
2005 return false;
2006
2007 if (!is_widening_mult_p (stmt, &type1, &rhs1, &type2, &rhs2))
2008 return false;
2009
2010 if (TYPE_UNSIGNED (type1) && TYPE_UNSIGNED (type2))
2011 handler = optab_handler (umul_widen_optab, TYPE_MODE (type));
2012 else if (!TYPE_UNSIGNED (type1) && !TYPE_UNSIGNED (type2))
2013 handler = optab_handler (smul_widen_optab, TYPE_MODE (type));
2014 else
2015 handler = optab_handler (usmul_widen_optab, TYPE_MODE (type));
2016
2017 if (handler == CODE_FOR_nothing)
2018 return false;
2019
2020 gimple_assign_set_rhs1 (stmt, fold_convert (type1, rhs1));
2021 gimple_assign_set_rhs2 (stmt, fold_convert (type2, rhs2));
2022 gimple_assign_set_rhs_code (stmt, WIDEN_MULT_EXPR);
2023 update_stmt (stmt);
2024 widen_mul_stats.widen_mults_inserted++;
2025 return true;
2026 }
2027
2028 /* Process a single gimple statement STMT, which is found at the
2029 iterator GSI and has a either a PLUS_EXPR or a MINUS_EXPR as its
2030 rhs (given by CODE), and try to convert it into a
2031 WIDEN_MULT_PLUS_EXPR or a WIDEN_MULT_MINUS_EXPR. The return value
2032 is true iff we converted the statement. */
2033
2034 static bool
2035 convert_plusminus_to_widen (gimple_stmt_iterator *gsi, gimple stmt,
2036 enum tree_code code)
2037 {
2038 gimple rhs1_stmt = NULL, rhs2_stmt = NULL;
2039 tree type, type1, type2;
2040 tree lhs, rhs1, rhs2, mult_rhs1, mult_rhs2, add_rhs;
2041 enum tree_code rhs1_code = ERROR_MARK, rhs2_code = ERROR_MARK;
2042 optab this_optab;
2043 enum tree_code wmult_code;
2044
2045 lhs = gimple_assign_lhs (stmt);
2046 type = TREE_TYPE (lhs);
2047 if (TREE_CODE (type) != INTEGER_TYPE
2048 && TREE_CODE (type) != FIXED_POINT_TYPE)
2049 return false;
2050
2051 if (code == MINUS_EXPR)
2052 wmult_code = WIDEN_MULT_MINUS_EXPR;
2053 else
2054 wmult_code = WIDEN_MULT_PLUS_EXPR;
2055
2056 rhs1 = gimple_assign_rhs1 (stmt);
2057 rhs2 = gimple_assign_rhs2 (stmt);
2058
2059 if (TREE_CODE (rhs1) == SSA_NAME)
2060 {
2061 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
2062 if (is_gimple_assign (rhs1_stmt))
2063 rhs1_code = gimple_assign_rhs_code (rhs1_stmt);
2064 }
2065 else
2066 return false;
2067
2068 if (TREE_CODE (rhs2) == SSA_NAME)
2069 {
2070 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
2071 if (is_gimple_assign (rhs2_stmt))
2072 rhs2_code = gimple_assign_rhs_code (rhs2_stmt);
2073 }
2074 else
2075 return false;
2076
2077 if (code == PLUS_EXPR && rhs1_code == MULT_EXPR)
2078 {
2079 if (!is_widening_mult_p (rhs1_stmt, &type1, &mult_rhs1,
2080 &type2, &mult_rhs2))
2081 return false;
2082 add_rhs = rhs2;
2083 }
2084 else if (rhs2_code == MULT_EXPR)
2085 {
2086 if (!is_widening_mult_p (rhs2_stmt, &type1, &mult_rhs1,
2087 &type2, &mult_rhs2))
2088 return false;
2089 add_rhs = rhs1;
2090 }
2091 else if (code == PLUS_EXPR && rhs1_code == WIDEN_MULT_EXPR)
2092 {
2093 mult_rhs1 = gimple_assign_rhs1 (rhs1_stmt);
2094 mult_rhs2 = gimple_assign_rhs2 (rhs1_stmt);
2095 type1 = TREE_TYPE (mult_rhs1);
2096 type2 = TREE_TYPE (mult_rhs2);
2097 add_rhs = rhs2;
2098 }
2099 else if (rhs2_code == WIDEN_MULT_EXPR)
2100 {
2101 mult_rhs1 = gimple_assign_rhs1 (rhs2_stmt);
2102 mult_rhs2 = gimple_assign_rhs2 (rhs2_stmt);
2103 type1 = TREE_TYPE (mult_rhs1);
2104 type2 = TREE_TYPE (mult_rhs2);
2105 add_rhs = rhs1;
2106 }
2107 else
2108 return false;
2109
2110 if (TYPE_UNSIGNED (type1) != TYPE_UNSIGNED (type2))
2111 return false;
2112
2113 /* Verify that the machine can perform a widening multiply
2114 accumulate in this mode/signedness combination, otherwise
2115 this transformation is likely to pessimize code. */
2116 this_optab = optab_for_tree_code (wmult_code, type1, optab_default);
2117 if (optab_handler (this_optab, TYPE_MODE (type)) == CODE_FOR_nothing)
2118 return false;
2119
2120 /* ??? May need some type verification here? */
2121
2122 gimple_assign_set_rhs_with_ops_1 (gsi, wmult_code,
2123 fold_convert (type1, mult_rhs1),
2124 fold_convert (type2, mult_rhs2),
2125 add_rhs);
2126 update_stmt (gsi_stmt (*gsi));
2127 widen_mul_stats.maccs_inserted++;
2128 return true;
2129 }
2130
2131 /* Combine the multiplication at MUL_STMT with operands MULOP1 and MULOP2
2132 with uses in additions and subtractions to form fused multiply-add
2133 operations. Returns true if successful and MUL_STMT should be removed. */
2134
2135 static bool
2136 convert_mult_to_fma (gimple mul_stmt, tree op1, tree op2)
2137 {
2138 tree mul_result = gimple_get_lhs (mul_stmt);
2139 tree type = TREE_TYPE (mul_result);
2140 gimple use_stmt, neguse_stmt, fma_stmt;
2141 use_operand_p use_p;
2142 imm_use_iterator imm_iter;
2143
2144 if (FLOAT_TYPE_P (type)
2145 && flag_fp_contract_mode == FP_CONTRACT_OFF)
2146 return false;
2147
2148 /* We don't want to do bitfield reduction ops. */
2149 if (INTEGRAL_TYPE_P (type)
2150 && (TYPE_PRECISION (type)
2151 != GET_MODE_PRECISION (TYPE_MODE (type))))
2152 return false;
2153
2154 /* If the target doesn't support it, don't generate it. We assume that
2155 if fma isn't available then fms, fnma or fnms are not either. */
2156 if (optab_handler (fma_optab, TYPE_MODE (type)) == CODE_FOR_nothing)
2157 return false;
2158
2159 /* Make sure that the multiplication statement becomes dead after
2160 the transformation, thus that all uses are transformed to FMAs.
2161 This means we assume that an FMA operation has the same cost
2162 as an addition. */
2163 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, mul_result)
2164 {
2165 enum tree_code use_code;
2166 tree result = mul_result;
2167 bool negate_p = false;
2168
2169 use_stmt = USE_STMT (use_p);
2170
2171 if (is_gimple_debug (use_stmt))
2172 continue;
2173
2174 /* For now restrict this operations to single basic blocks. In theory
2175 we would want to support sinking the multiplication in
2176 m = a*b;
2177 if ()
2178 ma = m + c;
2179 else
2180 d = m;
2181 to form a fma in the then block and sink the multiplication to the
2182 else block. */
2183 if (gimple_bb (use_stmt) != gimple_bb (mul_stmt))
2184 return false;
2185
2186 if (!is_gimple_assign (use_stmt))
2187 return false;
2188
2189 use_code = gimple_assign_rhs_code (use_stmt);
2190
2191 /* A negate on the multiplication leads to FNMA. */
2192 if (use_code == NEGATE_EXPR)
2193 {
2194 ssa_op_iter iter;
2195 tree use;
2196
2197 result = gimple_assign_lhs (use_stmt);
2198
2199 /* Make sure the negate statement becomes dead with this
2200 single transformation. */
2201 if (!single_imm_use (gimple_assign_lhs (use_stmt),
2202 &use_p, &neguse_stmt))
2203 return false;
2204
2205 /* Make sure the multiplication isn't also used on that stmt. */
2206 FOR_EACH_SSA_TREE_OPERAND (use, neguse_stmt, iter, SSA_OP_USE)
2207 if (use == mul_result)
2208 return false;
2209
2210 /* Re-validate. */
2211 use_stmt = neguse_stmt;
2212 if (gimple_bb (use_stmt) != gimple_bb (mul_stmt))
2213 return false;
2214 if (!is_gimple_assign (use_stmt))
2215 return false;
2216
2217 use_code = gimple_assign_rhs_code (use_stmt);
2218 negate_p = true;
2219 }
2220
2221 switch (use_code)
2222 {
2223 case MINUS_EXPR:
2224 if (gimple_assign_rhs2 (use_stmt) == result)
2225 negate_p = !negate_p;
2226 break;
2227 case PLUS_EXPR:
2228 break;
2229 default:
2230 /* FMA can only be formed from PLUS and MINUS. */
2231 return false;
2232 }
2233
2234 /* We can't handle a * b + a * b. */
2235 if (gimple_assign_rhs1 (use_stmt) == gimple_assign_rhs2 (use_stmt))
2236 return false;
2237
2238 /* While it is possible to validate whether or not the exact form
2239 that we've recognized is available in the backend, the assumption
2240 is that the transformation is never a loss. For instance, suppose
2241 the target only has the plain FMA pattern available. Consider
2242 a*b-c -> fma(a,b,-c): we've exchanged MUL+SUB for FMA+NEG, which
2243 is still two operations. Consider -(a*b)-c -> fma(-a,b,-c): we
2244 still have 3 operations, but in the FMA form the two NEGs are
2245 independant and could be run in parallel. */
2246 }
2247
2248 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, mul_result)
2249 {
2250 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
2251 enum tree_code use_code;
2252 tree addop, mulop1 = op1, result = mul_result;
2253 bool negate_p = false;
2254
2255 if (is_gimple_debug (use_stmt))
2256 continue;
2257
2258 use_code = gimple_assign_rhs_code (use_stmt);
2259 if (use_code == NEGATE_EXPR)
2260 {
2261 result = gimple_assign_lhs (use_stmt);
2262 single_imm_use (gimple_assign_lhs (use_stmt), &use_p, &neguse_stmt);
2263 gsi_remove (&gsi, true);
2264 release_defs (use_stmt);
2265
2266 use_stmt = neguse_stmt;
2267 gsi = gsi_for_stmt (use_stmt);
2268 use_code = gimple_assign_rhs_code (use_stmt);
2269 negate_p = true;
2270 }
2271
2272 if (gimple_assign_rhs1 (use_stmt) == result)
2273 {
2274 addop = gimple_assign_rhs2 (use_stmt);
2275 /* a * b - c -> a * b + (-c) */
2276 if (gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
2277 addop = force_gimple_operand_gsi (&gsi,
2278 build1 (NEGATE_EXPR,
2279 type, addop),
2280 true, NULL_TREE, true,
2281 GSI_SAME_STMT);
2282 }
2283 else
2284 {
2285 addop = gimple_assign_rhs1 (use_stmt);
2286 /* a - b * c -> (-b) * c + a */
2287 if (gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
2288 negate_p = !negate_p;
2289 }
2290
2291 if (negate_p)
2292 mulop1 = force_gimple_operand_gsi (&gsi,
2293 build1 (NEGATE_EXPR,
2294 type, mulop1),
2295 true, NULL_TREE, true,
2296 GSI_SAME_STMT);
2297
2298 fma_stmt = gimple_build_assign_with_ops3 (FMA_EXPR,
2299 gimple_assign_lhs (use_stmt),
2300 mulop1, op2,
2301 addop);
2302 gsi_replace (&gsi, fma_stmt, true);
2303 widen_mul_stats.fmas_inserted++;
2304 }
2305
2306 return true;
2307 }
2308
2309 /* Find integer multiplications where the operands are extended from
2310 smaller types, and replace the MULT_EXPR with a WIDEN_MULT_EXPR
2311 where appropriate. */
2312
2313 static unsigned int
2314 execute_optimize_widening_mul (void)
2315 {
2316 basic_block bb;
2317 bool cfg_changed = false;
2318
2319 memset (&widen_mul_stats, 0, sizeof (widen_mul_stats));
2320
2321 FOR_EACH_BB (bb)
2322 {
2323 gimple_stmt_iterator gsi;
2324
2325 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi);)
2326 {
2327 gimple stmt = gsi_stmt (gsi);
2328 enum tree_code code;
2329
2330 if (is_gimple_assign (stmt))
2331 {
2332 code = gimple_assign_rhs_code (stmt);
2333 switch (code)
2334 {
2335 case MULT_EXPR:
2336 if (!convert_mult_to_widen (stmt)
2337 && convert_mult_to_fma (stmt,
2338 gimple_assign_rhs1 (stmt),
2339 gimple_assign_rhs2 (stmt)))
2340 {
2341 gsi_remove (&gsi, true);
2342 release_defs (stmt);
2343 continue;
2344 }
2345 break;
2346
2347 case PLUS_EXPR:
2348 case MINUS_EXPR:
2349 convert_plusminus_to_widen (&gsi, stmt, code);
2350 break;
2351
2352 default:;
2353 }
2354 }
2355 else if (is_gimple_call (stmt)
2356 && gimple_call_lhs (stmt))
2357 {
2358 tree fndecl = gimple_call_fndecl (stmt);
2359 if (fndecl
2360 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
2361 {
2362 switch (DECL_FUNCTION_CODE (fndecl))
2363 {
2364 case BUILT_IN_POWF:
2365 case BUILT_IN_POW:
2366 case BUILT_IN_POWL:
2367 if (TREE_CODE (gimple_call_arg (stmt, 1)) == REAL_CST
2368 && REAL_VALUES_EQUAL
2369 (TREE_REAL_CST (gimple_call_arg (stmt, 1)),
2370 dconst2)
2371 && convert_mult_to_fma (stmt,
2372 gimple_call_arg (stmt, 0),
2373 gimple_call_arg (stmt, 0)))
2374 {
2375 unlink_stmt_vdef (stmt);
2376 gsi_remove (&gsi, true);
2377 release_defs (stmt);
2378 if (gimple_purge_dead_eh_edges (bb))
2379 cfg_changed = true;
2380 continue;
2381 }
2382 break;
2383
2384 default:;
2385 }
2386 }
2387 }
2388 gsi_next (&gsi);
2389 }
2390 }
2391
2392 statistics_counter_event (cfun, "widening multiplications inserted",
2393 widen_mul_stats.widen_mults_inserted);
2394 statistics_counter_event (cfun, "widening maccs inserted",
2395 widen_mul_stats.maccs_inserted);
2396 statistics_counter_event (cfun, "fused multiply-adds inserted",
2397 widen_mul_stats.fmas_inserted);
2398
2399 return cfg_changed ? TODO_cleanup_cfg : 0;
2400 }
2401
2402 static bool
2403 gate_optimize_widening_mul (void)
2404 {
2405 return flag_expensive_optimizations && optimize;
2406 }
2407
2408 struct gimple_opt_pass pass_optimize_widening_mul =
2409 {
2410 {
2411 GIMPLE_PASS,
2412 "widening_mul", /* name */
2413 gate_optimize_widening_mul, /* gate */
2414 execute_optimize_widening_mul, /* execute */
2415 NULL, /* sub */
2416 NULL, /* next */
2417 0, /* static_pass_number */
2418 TV_NONE, /* tv_id */
2419 PROP_ssa, /* properties_required */
2420 0, /* properties_provided */
2421 0, /* properties_destroyed */
2422 0, /* todo_flags_start */
2423 TODO_verify_ssa
2424 | TODO_verify_stmts
2425 | TODO_dump_func
2426 | TODO_update_ssa /* todo_flags_finish */
2427 }
2428 };