]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-ssa-math-opts.c
Automated conversion of passes to C++ classes
[thirdparty/gcc.git] / gcc / tree-ssa-math-opts.c
1 /* Global, SSA-based optimizations using mathematical identities.
2 Copyright (C) 2005-2013 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* Currently, the only mini-pass in this file tries to CSE reciprocal
21 operations. These are common in sequences such as this one:
22
23 modulus = sqrt(x*x + y*y + z*z);
24 x = x / modulus;
25 y = y / modulus;
26 z = z / modulus;
27
28 that can be optimized to
29
30 modulus = sqrt(x*x + y*y + z*z);
31 rmodulus = 1.0 / modulus;
32 x = x * rmodulus;
33 y = y * rmodulus;
34 z = z * rmodulus;
35
36 We do this for loop invariant divisors, and with this pass whenever
37 we notice that a division has the same divisor multiple times.
38
39 Of course, like in PRE, we don't insert a division if a dominator
40 already has one. However, this cannot be done as an extension of
41 PRE for several reasons.
42
43 First of all, with some experiments it was found out that the
44 transformation is not always useful if there are only two divisions
45 hy the same divisor. This is probably because modern processors
46 can pipeline the divisions; on older, in-order processors it should
47 still be effective to optimize two divisions by the same number.
48 We make this a param, and it shall be called N in the remainder of
49 this comment.
50
51 Second, if trapping math is active, we have less freedom on where
52 to insert divisions: we can only do so in basic blocks that already
53 contain one. (If divisions don't trap, instead, we can insert
54 divisions elsewhere, which will be in blocks that are common dominators
55 of those that have the division).
56
57 We really don't want to compute the reciprocal unless a division will
58 be found. To do this, we won't insert the division in a basic block
59 that has less than N divisions *post-dominating* it.
60
61 The algorithm constructs a subset of the dominator tree, holding the
62 blocks containing the divisions and the common dominators to them,
63 and walk it twice. The first walk is in post-order, and it annotates
64 each block with the number of divisions that post-dominate it: this
65 gives information on where divisions can be inserted profitably.
66 The second walk is in pre-order, and it inserts divisions as explained
67 above, and replaces divisions by multiplications.
68
69 In the best case, the cost of the pass is O(n_statements). In the
70 worst-case, the cost is due to creating the dominator tree subset,
71 with a cost of O(n_basic_blocks ^ 2); however this can only happen
72 for n_statements / n_basic_blocks statements. So, the amortized cost
73 of creating the dominator tree subset is O(n_basic_blocks) and the
74 worst-case cost of the pass is O(n_statements * n_basic_blocks).
75
76 More practically, the cost will be small because there are few
77 divisions, and they tend to be in the same basic block, so insert_bb
78 is called very few times.
79
80 If we did this using domwalk.c, an efficient implementation would have
81 to work on all the variables in a single pass, because we could not
82 work on just a subset of the dominator tree, as we do now, and the
83 cost would also be something like O(n_statements * n_basic_blocks).
84 The data structures would be more complex in order to work on all the
85 variables in a single pass. */
86
87 #include "config.h"
88 #include "system.h"
89 #include "coretypes.h"
90 #include "tm.h"
91 #include "flags.h"
92 #include "tree.h"
93 #include "tree-flow.h"
94 #include "tree-pass.h"
95 #include "alloc-pool.h"
96 #include "basic-block.h"
97 #include "target.h"
98 #include "gimple-pretty-print.h"
99
100 /* FIXME: RTL headers have to be included here for optabs. */
101 #include "rtl.h" /* Because optabs.h wants enum rtx_code. */
102 #include "expr.h" /* Because optabs.h wants sepops. */
103 #include "optabs.h"
104
105 /* This structure represents one basic block that either computes a
106 division, or is a common dominator for basic block that compute a
107 division. */
108 struct occurrence {
109 /* The basic block represented by this structure. */
110 basic_block bb;
111
112 /* If non-NULL, the SSA_NAME holding the definition for a reciprocal
113 inserted in BB. */
114 tree recip_def;
115
116 /* If non-NULL, the GIMPLE_ASSIGN for a reciprocal computation that
117 was inserted in BB. */
118 gimple recip_def_stmt;
119
120 /* Pointer to a list of "struct occurrence"s for blocks dominated
121 by BB. */
122 struct occurrence *children;
123
124 /* Pointer to the next "struct occurrence"s in the list of blocks
125 sharing a common dominator. */
126 struct occurrence *next;
127
128 /* The number of divisions that are in BB before compute_merit. The
129 number of divisions that are in BB or post-dominate it after
130 compute_merit. */
131 int num_divisions;
132
133 /* True if the basic block has a division, false if it is a common
134 dominator for basic blocks that do. If it is false and trapping
135 math is active, BB is not a candidate for inserting a reciprocal. */
136 bool bb_has_division;
137 };
138
139 static struct
140 {
141 /* Number of 1.0/X ops inserted. */
142 int rdivs_inserted;
143
144 /* Number of 1.0/FUNC ops inserted. */
145 int rfuncs_inserted;
146 } reciprocal_stats;
147
148 static struct
149 {
150 /* Number of cexpi calls inserted. */
151 int inserted;
152 } sincos_stats;
153
154 static struct
155 {
156 /* Number of hand-written 16-bit bswaps found. */
157 int found_16bit;
158
159 /* Number of hand-written 32-bit bswaps found. */
160 int found_32bit;
161
162 /* Number of hand-written 64-bit bswaps found. */
163 int found_64bit;
164 } bswap_stats;
165
166 static struct
167 {
168 /* Number of widening multiplication ops inserted. */
169 int widen_mults_inserted;
170
171 /* Number of integer multiply-and-accumulate ops inserted. */
172 int maccs_inserted;
173
174 /* Number of fp fused multiply-add ops inserted. */
175 int fmas_inserted;
176 } widen_mul_stats;
177
178 /* The instance of "struct occurrence" representing the highest
179 interesting block in the dominator tree. */
180 static struct occurrence *occ_head;
181
182 /* Allocation pool for getting instances of "struct occurrence". */
183 static alloc_pool occ_pool;
184
185
186
187 /* Allocate and return a new struct occurrence for basic block BB, and
188 whose children list is headed by CHILDREN. */
189 static struct occurrence *
190 occ_new (basic_block bb, struct occurrence *children)
191 {
192 struct occurrence *occ;
193
194 bb->aux = occ = (struct occurrence *) pool_alloc (occ_pool);
195 memset (occ, 0, sizeof (struct occurrence));
196
197 occ->bb = bb;
198 occ->children = children;
199 return occ;
200 }
201
202
203 /* Insert NEW_OCC into our subset of the dominator tree. P_HEAD points to a
204 list of "struct occurrence"s, one per basic block, having IDOM as
205 their common dominator.
206
207 We try to insert NEW_OCC as deep as possible in the tree, and we also
208 insert any other block that is a common dominator for BB and one
209 block already in the tree. */
210
211 static void
212 insert_bb (struct occurrence *new_occ, basic_block idom,
213 struct occurrence **p_head)
214 {
215 struct occurrence *occ, **p_occ;
216
217 for (p_occ = p_head; (occ = *p_occ) != NULL; )
218 {
219 basic_block bb = new_occ->bb, occ_bb = occ->bb;
220 basic_block dom = nearest_common_dominator (CDI_DOMINATORS, occ_bb, bb);
221 if (dom == bb)
222 {
223 /* BB dominates OCC_BB. OCC becomes NEW_OCC's child: remove OCC
224 from its list. */
225 *p_occ = occ->next;
226 occ->next = new_occ->children;
227 new_occ->children = occ;
228
229 /* Try the next block (it may as well be dominated by BB). */
230 }
231
232 else if (dom == occ_bb)
233 {
234 /* OCC_BB dominates BB. Tail recurse to look deeper. */
235 insert_bb (new_occ, dom, &occ->children);
236 return;
237 }
238
239 else if (dom != idom)
240 {
241 gcc_assert (!dom->aux);
242
243 /* There is a dominator between IDOM and BB, add it and make
244 two children out of NEW_OCC and OCC. First, remove OCC from
245 its list. */
246 *p_occ = occ->next;
247 new_occ->next = occ;
248 occ->next = NULL;
249
250 /* None of the previous blocks has DOM as a dominator: if we tail
251 recursed, we would reexamine them uselessly. Just switch BB with
252 DOM, and go on looking for blocks dominated by DOM. */
253 new_occ = occ_new (dom, new_occ);
254 }
255
256 else
257 {
258 /* Nothing special, go on with the next element. */
259 p_occ = &occ->next;
260 }
261 }
262
263 /* No place was found as a child of IDOM. Make BB a sibling of IDOM. */
264 new_occ->next = *p_head;
265 *p_head = new_occ;
266 }
267
268 /* Register that we found a division in BB. */
269
270 static inline void
271 register_division_in (basic_block bb)
272 {
273 struct occurrence *occ;
274
275 occ = (struct occurrence *) bb->aux;
276 if (!occ)
277 {
278 occ = occ_new (bb, NULL);
279 insert_bb (occ, ENTRY_BLOCK_PTR, &occ_head);
280 }
281
282 occ->bb_has_division = true;
283 occ->num_divisions++;
284 }
285
286
287 /* Compute the number of divisions that postdominate each block in OCC and
288 its children. */
289
290 static void
291 compute_merit (struct occurrence *occ)
292 {
293 struct occurrence *occ_child;
294 basic_block dom = occ->bb;
295
296 for (occ_child = occ->children; occ_child; occ_child = occ_child->next)
297 {
298 basic_block bb;
299 if (occ_child->children)
300 compute_merit (occ_child);
301
302 if (flag_exceptions)
303 bb = single_noncomplex_succ (dom);
304 else
305 bb = dom;
306
307 if (dominated_by_p (CDI_POST_DOMINATORS, bb, occ_child->bb))
308 occ->num_divisions += occ_child->num_divisions;
309 }
310 }
311
312
313 /* Return whether USE_STMT is a floating-point division by DEF. */
314 static inline bool
315 is_division_by (gimple use_stmt, tree def)
316 {
317 return is_gimple_assign (use_stmt)
318 && gimple_assign_rhs_code (use_stmt) == RDIV_EXPR
319 && gimple_assign_rhs2 (use_stmt) == def
320 /* Do not recognize x / x as valid division, as we are getting
321 confused later by replacing all immediate uses x in such
322 a stmt. */
323 && gimple_assign_rhs1 (use_stmt) != def;
324 }
325
326 /* Walk the subset of the dominator tree rooted at OCC, setting the
327 RECIP_DEF field to a definition of 1.0 / DEF that can be used in
328 the given basic block. The field may be left NULL, of course,
329 if it is not possible or profitable to do the optimization.
330
331 DEF_BSI is an iterator pointing at the statement defining DEF.
332 If RECIP_DEF is set, a dominator already has a computation that can
333 be used. */
334
335 static void
336 insert_reciprocals (gimple_stmt_iterator *def_gsi, struct occurrence *occ,
337 tree def, tree recip_def, int threshold)
338 {
339 tree type;
340 gimple new_stmt;
341 gimple_stmt_iterator gsi;
342 struct occurrence *occ_child;
343
344 if (!recip_def
345 && (occ->bb_has_division || !flag_trapping_math)
346 && occ->num_divisions >= threshold)
347 {
348 /* Make a variable with the replacement and substitute it. */
349 type = TREE_TYPE (def);
350 recip_def = create_tmp_reg (type, "reciptmp");
351 new_stmt = gimple_build_assign_with_ops (RDIV_EXPR, recip_def,
352 build_one_cst (type), def);
353
354 if (occ->bb_has_division)
355 {
356 /* Case 1: insert before an existing division. */
357 gsi = gsi_after_labels (occ->bb);
358 while (!gsi_end_p (gsi) && !is_division_by (gsi_stmt (gsi), def))
359 gsi_next (&gsi);
360
361 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
362 }
363 else if (def_gsi && occ->bb == def_gsi->bb)
364 {
365 /* Case 2: insert right after the definition. Note that this will
366 never happen if the definition statement can throw, because in
367 that case the sole successor of the statement's basic block will
368 dominate all the uses as well. */
369 gsi_insert_after (def_gsi, new_stmt, GSI_NEW_STMT);
370 }
371 else
372 {
373 /* Case 3: insert in a basic block not containing defs/uses. */
374 gsi = gsi_after_labels (occ->bb);
375 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
376 }
377
378 reciprocal_stats.rdivs_inserted++;
379
380 occ->recip_def_stmt = new_stmt;
381 }
382
383 occ->recip_def = recip_def;
384 for (occ_child = occ->children; occ_child; occ_child = occ_child->next)
385 insert_reciprocals (def_gsi, occ_child, def, recip_def, threshold);
386 }
387
388
389 /* Replace the division at USE_P with a multiplication by the reciprocal, if
390 possible. */
391
392 static inline void
393 replace_reciprocal (use_operand_p use_p)
394 {
395 gimple use_stmt = USE_STMT (use_p);
396 basic_block bb = gimple_bb (use_stmt);
397 struct occurrence *occ = (struct occurrence *) bb->aux;
398
399 if (optimize_bb_for_speed_p (bb)
400 && occ->recip_def && use_stmt != occ->recip_def_stmt)
401 {
402 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
403 gimple_assign_set_rhs_code (use_stmt, MULT_EXPR);
404 SET_USE (use_p, occ->recip_def);
405 fold_stmt_inplace (&gsi);
406 update_stmt (use_stmt);
407 }
408 }
409
410
411 /* Free OCC and return one more "struct occurrence" to be freed. */
412
413 static struct occurrence *
414 free_bb (struct occurrence *occ)
415 {
416 struct occurrence *child, *next;
417
418 /* First get the two pointers hanging off OCC. */
419 next = occ->next;
420 child = occ->children;
421 occ->bb->aux = NULL;
422 pool_free (occ_pool, occ);
423
424 /* Now ensure that we don't recurse unless it is necessary. */
425 if (!child)
426 return next;
427 else
428 {
429 while (next)
430 next = free_bb (next);
431
432 return child;
433 }
434 }
435
436
437 /* Look for floating-point divisions among DEF's uses, and try to
438 replace them by multiplications with the reciprocal. Add
439 as many statements computing the reciprocal as needed.
440
441 DEF must be a GIMPLE register of a floating-point type. */
442
443 static void
444 execute_cse_reciprocals_1 (gimple_stmt_iterator *def_gsi, tree def)
445 {
446 use_operand_p use_p;
447 imm_use_iterator use_iter;
448 struct occurrence *occ;
449 int count = 0, threshold;
450
451 gcc_assert (FLOAT_TYPE_P (TREE_TYPE (def)) && is_gimple_reg (def));
452
453 FOR_EACH_IMM_USE_FAST (use_p, use_iter, def)
454 {
455 gimple use_stmt = USE_STMT (use_p);
456 if (is_division_by (use_stmt, def))
457 {
458 register_division_in (gimple_bb (use_stmt));
459 count++;
460 }
461 }
462
463 /* Do the expensive part only if we can hope to optimize something. */
464 threshold = targetm.min_divisions_for_recip_mul (TYPE_MODE (TREE_TYPE (def)));
465 if (count >= threshold)
466 {
467 gimple use_stmt;
468 for (occ = occ_head; occ; occ = occ->next)
469 {
470 compute_merit (occ);
471 insert_reciprocals (def_gsi, occ, def, NULL, threshold);
472 }
473
474 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, def)
475 {
476 if (is_division_by (use_stmt, def))
477 {
478 FOR_EACH_IMM_USE_ON_STMT (use_p, use_iter)
479 replace_reciprocal (use_p);
480 }
481 }
482 }
483
484 for (occ = occ_head; occ; )
485 occ = free_bb (occ);
486
487 occ_head = NULL;
488 }
489
490 static bool
491 gate_cse_reciprocals (void)
492 {
493 return optimize && flag_reciprocal_math;
494 }
495
496 /* Go through all the floating-point SSA_NAMEs, and call
497 execute_cse_reciprocals_1 on each of them. */
498 static unsigned int
499 execute_cse_reciprocals (void)
500 {
501 basic_block bb;
502 tree arg;
503
504 occ_pool = create_alloc_pool ("dominators for recip",
505 sizeof (struct occurrence),
506 n_basic_blocks / 3 + 1);
507
508 memset (&reciprocal_stats, 0, sizeof (reciprocal_stats));
509 calculate_dominance_info (CDI_DOMINATORS);
510 calculate_dominance_info (CDI_POST_DOMINATORS);
511
512 #ifdef ENABLE_CHECKING
513 FOR_EACH_BB (bb)
514 gcc_assert (!bb->aux);
515 #endif
516
517 for (arg = DECL_ARGUMENTS (cfun->decl); arg; arg = DECL_CHAIN (arg))
518 if (FLOAT_TYPE_P (TREE_TYPE (arg))
519 && is_gimple_reg (arg))
520 {
521 tree name = ssa_default_def (cfun, arg);
522 if (name)
523 execute_cse_reciprocals_1 (NULL, name);
524 }
525
526 FOR_EACH_BB (bb)
527 {
528 gimple_stmt_iterator gsi;
529 gimple phi;
530 tree def;
531
532 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
533 {
534 phi = gsi_stmt (gsi);
535 def = PHI_RESULT (phi);
536 if (! virtual_operand_p (def)
537 && FLOAT_TYPE_P (TREE_TYPE (def)))
538 execute_cse_reciprocals_1 (NULL, def);
539 }
540
541 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
542 {
543 gimple stmt = gsi_stmt (gsi);
544
545 if (gimple_has_lhs (stmt)
546 && (def = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_DEF)) != NULL
547 && FLOAT_TYPE_P (TREE_TYPE (def))
548 && TREE_CODE (def) == SSA_NAME)
549 execute_cse_reciprocals_1 (&gsi, def);
550 }
551
552 if (optimize_bb_for_size_p (bb))
553 continue;
554
555 /* Scan for a/func(b) and convert it to reciprocal a*rfunc(b). */
556 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
557 {
558 gimple stmt = gsi_stmt (gsi);
559 tree fndecl;
560
561 if (is_gimple_assign (stmt)
562 && gimple_assign_rhs_code (stmt) == RDIV_EXPR)
563 {
564 tree arg1 = gimple_assign_rhs2 (stmt);
565 gimple stmt1;
566
567 if (TREE_CODE (arg1) != SSA_NAME)
568 continue;
569
570 stmt1 = SSA_NAME_DEF_STMT (arg1);
571
572 if (is_gimple_call (stmt1)
573 && gimple_call_lhs (stmt1)
574 && (fndecl = gimple_call_fndecl (stmt1))
575 && (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
576 || DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD))
577 {
578 enum built_in_function code;
579 bool md_code, fail;
580 imm_use_iterator ui;
581 use_operand_p use_p;
582
583 code = DECL_FUNCTION_CODE (fndecl);
584 md_code = DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD;
585
586 fndecl = targetm.builtin_reciprocal (code, md_code, false);
587 if (!fndecl)
588 continue;
589
590 /* Check that all uses of the SSA name are divisions,
591 otherwise replacing the defining statement will do
592 the wrong thing. */
593 fail = false;
594 FOR_EACH_IMM_USE_FAST (use_p, ui, arg1)
595 {
596 gimple stmt2 = USE_STMT (use_p);
597 if (is_gimple_debug (stmt2))
598 continue;
599 if (!is_gimple_assign (stmt2)
600 || gimple_assign_rhs_code (stmt2) != RDIV_EXPR
601 || gimple_assign_rhs1 (stmt2) == arg1
602 || gimple_assign_rhs2 (stmt2) != arg1)
603 {
604 fail = true;
605 break;
606 }
607 }
608 if (fail)
609 continue;
610
611 gimple_replace_lhs (stmt1, arg1);
612 gimple_call_set_fndecl (stmt1, fndecl);
613 update_stmt (stmt1);
614 reciprocal_stats.rfuncs_inserted++;
615
616 FOR_EACH_IMM_USE_STMT (stmt, ui, arg1)
617 {
618 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
619 gimple_assign_set_rhs_code (stmt, MULT_EXPR);
620 fold_stmt_inplace (&gsi);
621 update_stmt (stmt);
622 }
623 }
624 }
625 }
626 }
627
628 statistics_counter_event (cfun, "reciprocal divs inserted",
629 reciprocal_stats.rdivs_inserted);
630 statistics_counter_event (cfun, "reciprocal functions inserted",
631 reciprocal_stats.rfuncs_inserted);
632
633 free_dominance_info (CDI_DOMINATORS);
634 free_dominance_info (CDI_POST_DOMINATORS);
635 free_alloc_pool (occ_pool);
636 return 0;
637 }
638
639 namespace {
640
641 const pass_data pass_data_cse_reciprocals =
642 {
643 GIMPLE_PASS, /* type */
644 "recip", /* name */
645 OPTGROUP_NONE, /* optinfo_flags */
646 true, /* has_gate */
647 true, /* has_execute */
648 TV_NONE, /* tv_id */
649 PROP_ssa, /* properties_required */
650 0, /* properties_provided */
651 0, /* properties_destroyed */
652 0, /* todo_flags_start */
653 ( TODO_update_ssa | TODO_verify_ssa
654 | TODO_verify_stmts ), /* todo_flags_finish */
655 };
656
657 class pass_cse_reciprocals : public gimple_opt_pass
658 {
659 public:
660 pass_cse_reciprocals(gcc::context *ctxt)
661 : gimple_opt_pass(pass_data_cse_reciprocals, ctxt)
662 {}
663
664 /* opt_pass methods: */
665 bool gate () { return gate_cse_reciprocals (); }
666 unsigned int execute () { return execute_cse_reciprocals (); }
667
668 }; // class pass_cse_reciprocals
669
670 } // anon namespace
671
672 gimple_opt_pass *
673 make_pass_cse_reciprocals (gcc::context *ctxt)
674 {
675 return new pass_cse_reciprocals (ctxt);
676 }
677
678 /* Records an occurrence at statement USE_STMT in the vector of trees
679 STMTS if it is dominated by *TOP_BB or dominates it or this basic block
680 is not yet initialized. Returns true if the occurrence was pushed on
681 the vector. Adjusts *TOP_BB to be the basic block dominating all
682 statements in the vector. */
683
684 static bool
685 maybe_record_sincos (vec<gimple> *stmts,
686 basic_block *top_bb, gimple use_stmt)
687 {
688 basic_block use_bb = gimple_bb (use_stmt);
689 if (*top_bb
690 && (*top_bb == use_bb
691 || dominated_by_p (CDI_DOMINATORS, use_bb, *top_bb)))
692 stmts->safe_push (use_stmt);
693 else if (!*top_bb
694 || dominated_by_p (CDI_DOMINATORS, *top_bb, use_bb))
695 {
696 stmts->safe_push (use_stmt);
697 *top_bb = use_bb;
698 }
699 else
700 return false;
701
702 return true;
703 }
704
705 /* Look for sin, cos and cexpi calls with the same argument NAME and
706 create a single call to cexpi CSEing the result in this case.
707 We first walk over all immediate uses of the argument collecting
708 statements that we can CSE in a vector and in a second pass replace
709 the statement rhs with a REALPART or IMAGPART expression on the
710 result of the cexpi call we insert before the use statement that
711 dominates all other candidates. */
712
713 static bool
714 execute_cse_sincos_1 (tree name)
715 {
716 gimple_stmt_iterator gsi;
717 imm_use_iterator use_iter;
718 tree fndecl, res, type;
719 gimple def_stmt, use_stmt, stmt;
720 int seen_cos = 0, seen_sin = 0, seen_cexpi = 0;
721 vec<gimple> stmts = vNULL;
722 basic_block top_bb = NULL;
723 int i;
724 bool cfg_changed = false;
725
726 type = TREE_TYPE (name);
727 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, name)
728 {
729 if (gimple_code (use_stmt) != GIMPLE_CALL
730 || !gimple_call_lhs (use_stmt)
731 || !(fndecl = gimple_call_fndecl (use_stmt))
732 || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_NORMAL)
733 continue;
734
735 switch (DECL_FUNCTION_CODE (fndecl))
736 {
737 CASE_FLT_FN (BUILT_IN_COS):
738 seen_cos |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
739 break;
740
741 CASE_FLT_FN (BUILT_IN_SIN):
742 seen_sin |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
743 break;
744
745 CASE_FLT_FN (BUILT_IN_CEXPI):
746 seen_cexpi |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
747 break;
748
749 default:;
750 }
751 }
752
753 if (seen_cos + seen_sin + seen_cexpi <= 1)
754 {
755 stmts.release ();
756 return false;
757 }
758
759 /* Simply insert cexpi at the beginning of top_bb but not earlier than
760 the name def statement. */
761 fndecl = mathfn_built_in (type, BUILT_IN_CEXPI);
762 if (!fndecl)
763 return false;
764 stmt = gimple_build_call (fndecl, 1, name);
765 res = make_temp_ssa_name (TREE_TYPE (TREE_TYPE (fndecl)), stmt, "sincostmp");
766 gimple_call_set_lhs (stmt, res);
767
768 def_stmt = SSA_NAME_DEF_STMT (name);
769 if (!SSA_NAME_IS_DEFAULT_DEF (name)
770 && gimple_code (def_stmt) != GIMPLE_PHI
771 && gimple_bb (def_stmt) == top_bb)
772 {
773 gsi = gsi_for_stmt (def_stmt);
774 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
775 }
776 else
777 {
778 gsi = gsi_after_labels (top_bb);
779 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
780 }
781 sincos_stats.inserted++;
782
783 /* And adjust the recorded old call sites. */
784 for (i = 0; stmts.iterate (i, &use_stmt); ++i)
785 {
786 tree rhs = NULL;
787 fndecl = gimple_call_fndecl (use_stmt);
788
789 switch (DECL_FUNCTION_CODE (fndecl))
790 {
791 CASE_FLT_FN (BUILT_IN_COS):
792 rhs = fold_build1 (REALPART_EXPR, type, res);
793 break;
794
795 CASE_FLT_FN (BUILT_IN_SIN):
796 rhs = fold_build1 (IMAGPART_EXPR, type, res);
797 break;
798
799 CASE_FLT_FN (BUILT_IN_CEXPI):
800 rhs = res;
801 break;
802
803 default:;
804 gcc_unreachable ();
805 }
806
807 /* Replace call with a copy. */
808 stmt = gimple_build_assign (gimple_call_lhs (use_stmt), rhs);
809
810 gsi = gsi_for_stmt (use_stmt);
811 gsi_replace (&gsi, stmt, true);
812 if (gimple_purge_dead_eh_edges (gimple_bb (stmt)))
813 cfg_changed = true;
814 }
815
816 stmts.release ();
817
818 return cfg_changed;
819 }
820
821 /* To evaluate powi(x,n), the floating point value x raised to the
822 constant integer exponent n, we use a hybrid algorithm that
823 combines the "window method" with look-up tables. For an
824 introduction to exponentiation algorithms and "addition chains",
825 see section 4.6.3, "Evaluation of Powers" of Donald E. Knuth,
826 "Seminumerical Algorithms", Vol. 2, "The Art of Computer Programming",
827 3rd Edition, 1998, and Daniel M. Gordon, "A Survey of Fast Exponentiation
828 Methods", Journal of Algorithms, Vol. 27, pp. 129-146, 1998. */
829
830 /* Provide a default value for POWI_MAX_MULTS, the maximum number of
831 multiplications to inline before calling the system library's pow
832 function. powi(x,n) requires at worst 2*bits(n)-2 multiplications,
833 so this default never requires calling pow, powf or powl. */
834
835 #ifndef POWI_MAX_MULTS
836 #define POWI_MAX_MULTS (2*HOST_BITS_PER_WIDE_INT-2)
837 #endif
838
839 /* The size of the "optimal power tree" lookup table. All
840 exponents less than this value are simply looked up in the
841 powi_table below. This threshold is also used to size the
842 cache of pseudo registers that hold intermediate results. */
843 #define POWI_TABLE_SIZE 256
844
845 /* The size, in bits of the window, used in the "window method"
846 exponentiation algorithm. This is equivalent to a radix of
847 (1<<POWI_WINDOW_SIZE) in the corresponding "m-ary method". */
848 #define POWI_WINDOW_SIZE 3
849
850 /* The following table is an efficient representation of an
851 "optimal power tree". For each value, i, the corresponding
852 value, j, in the table states than an optimal evaluation
853 sequence for calculating pow(x,i) can be found by evaluating
854 pow(x,j)*pow(x,i-j). An optimal power tree for the first
855 100 integers is given in Knuth's "Seminumerical algorithms". */
856
857 static const unsigned char powi_table[POWI_TABLE_SIZE] =
858 {
859 0, 1, 1, 2, 2, 3, 3, 4, /* 0 - 7 */
860 4, 6, 5, 6, 6, 10, 7, 9, /* 8 - 15 */
861 8, 16, 9, 16, 10, 12, 11, 13, /* 16 - 23 */
862 12, 17, 13, 18, 14, 24, 15, 26, /* 24 - 31 */
863 16, 17, 17, 19, 18, 33, 19, 26, /* 32 - 39 */
864 20, 25, 21, 40, 22, 27, 23, 44, /* 40 - 47 */
865 24, 32, 25, 34, 26, 29, 27, 44, /* 48 - 55 */
866 28, 31, 29, 34, 30, 60, 31, 36, /* 56 - 63 */
867 32, 64, 33, 34, 34, 46, 35, 37, /* 64 - 71 */
868 36, 65, 37, 50, 38, 48, 39, 69, /* 72 - 79 */
869 40, 49, 41, 43, 42, 51, 43, 58, /* 80 - 87 */
870 44, 64, 45, 47, 46, 59, 47, 76, /* 88 - 95 */
871 48, 65, 49, 66, 50, 67, 51, 66, /* 96 - 103 */
872 52, 70, 53, 74, 54, 104, 55, 74, /* 104 - 111 */
873 56, 64, 57, 69, 58, 78, 59, 68, /* 112 - 119 */
874 60, 61, 61, 80, 62, 75, 63, 68, /* 120 - 127 */
875 64, 65, 65, 128, 66, 129, 67, 90, /* 128 - 135 */
876 68, 73, 69, 131, 70, 94, 71, 88, /* 136 - 143 */
877 72, 128, 73, 98, 74, 132, 75, 121, /* 144 - 151 */
878 76, 102, 77, 124, 78, 132, 79, 106, /* 152 - 159 */
879 80, 97, 81, 160, 82, 99, 83, 134, /* 160 - 167 */
880 84, 86, 85, 95, 86, 160, 87, 100, /* 168 - 175 */
881 88, 113, 89, 98, 90, 107, 91, 122, /* 176 - 183 */
882 92, 111, 93, 102, 94, 126, 95, 150, /* 184 - 191 */
883 96, 128, 97, 130, 98, 133, 99, 195, /* 192 - 199 */
884 100, 128, 101, 123, 102, 164, 103, 138, /* 200 - 207 */
885 104, 145, 105, 146, 106, 109, 107, 149, /* 208 - 215 */
886 108, 200, 109, 146, 110, 170, 111, 157, /* 216 - 223 */
887 112, 128, 113, 130, 114, 182, 115, 132, /* 224 - 231 */
888 116, 200, 117, 132, 118, 158, 119, 206, /* 232 - 239 */
889 120, 240, 121, 162, 122, 147, 123, 152, /* 240 - 247 */
890 124, 166, 125, 214, 126, 138, 127, 153, /* 248 - 255 */
891 };
892
893
894 /* Return the number of multiplications required to calculate
895 powi(x,n) where n is less than POWI_TABLE_SIZE. This is a
896 subroutine of powi_cost. CACHE is an array indicating
897 which exponents have already been calculated. */
898
899 static int
900 powi_lookup_cost (unsigned HOST_WIDE_INT n, bool *cache)
901 {
902 /* If we've already calculated this exponent, then this evaluation
903 doesn't require any additional multiplications. */
904 if (cache[n])
905 return 0;
906
907 cache[n] = true;
908 return powi_lookup_cost (n - powi_table[n], cache)
909 + powi_lookup_cost (powi_table[n], cache) + 1;
910 }
911
912 /* Return the number of multiplications required to calculate
913 powi(x,n) for an arbitrary x, given the exponent N. This
914 function needs to be kept in sync with powi_as_mults below. */
915
916 static int
917 powi_cost (HOST_WIDE_INT n)
918 {
919 bool cache[POWI_TABLE_SIZE];
920 unsigned HOST_WIDE_INT digit;
921 unsigned HOST_WIDE_INT val;
922 int result;
923
924 if (n == 0)
925 return 0;
926
927 /* Ignore the reciprocal when calculating the cost. */
928 val = (n < 0) ? -n : n;
929
930 /* Initialize the exponent cache. */
931 memset (cache, 0, POWI_TABLE_SIZE * sizeof (bool));
932 cache[1] = true;
933
934 result = 0;
935
936 while (val >= POWI_TABLE_SIZE)
937 {
938 if (val & 1)
939 {
940 digit = val & ((1 << POWI_WINDOW_SIZE) - 1);
941 result += powi_lookup_cost (digit, cache)
942 + POWI_WINDOW_SIZE + 1;
943 val >>= POWI_WINDOW_SIZE;
944 }
945 else
946 {
947 val >>= 1;
948 result++;
949 }
950 }
951
952 return result + powi_lookup_cost (val, cache);
953 }
954
955 /* Recursive subroutine of powi_as_mults. This function takes the
956 array, CACHE, of already calculated exponents and an exponent N and
957 returns a tree that corresponds to CACHE[1]**N, with type TYPE. */
958
959 static tree
960 powi_as_mults_1 (gimple_stmt_iterator *gsi, location_t loc, tree type,
961 HOST_WIDE_INT n, tree *cache)
962 {
963 tree op0, op1, ssa_target;
964 unsigned HOST_WIDE_INT digit;
965 gimple mult_stmt;
966
967 if (n < POWI_TABLE_SIZE && cache[n])
968 return cache[n];
969
970 ssa_target = make_temp_ssa_name (type, NULL, "powmult");
971
972 if (n < POWI_TABLE_SIZE)
973 {
974 cache[n] = ssa_target;
975 op0 = powi_as_mults_1 (gsi, loc, type, n - powi_table[n], cache);
976 op1 = powi_as_mults_1 (gsi, loc, type, powi_table[n], cache);
977 }
978 else if (n & 1)
979 {
980 digit = n & ((1 << POWI_WINDOW_SIZE) - 1);
981 op0 = powi_as_mults_1 (gsi, loc, type, n - digit, cache);
982 op1 = powi_as_mults_1 (gsi, loc, type, digit, cache);
983 }
984 else
985 {
986 op0 = powi_as_mults_1 (gsi, loc, type, n >> 1, cache);
987 op1 = op0;
988 }
989
990 mult_stmt = gimple_build_assign_with_ops (MULT_EXPR, ssa_target, op0, op1);
991 gimple_set_location (mult_stmt, loc);
992 gsi_insert_before (gsi, mult_stmt, GSI_SAME_STMT);
993
994 return ssa_target;
995 }
996
997 /* Convert ARG0**N to a tree of multiplications of ARG0 with itself.
998 This function needs to be kept in sync with powi_cost above. */
999
1000 static tree
1001 powi_as_mults (gimple_stmt_iterator *gsi, location_t loc,
1002 tree arg0, HOST_WIDE_INT n)
1003 {
1004 tree cache[POWI_TABLE_SIZE], result, type = TREE_TYPE (arg0);
1005 gimple div_stmt;
1006 tree target;
1007
1008 if (n == 0)
1009 return build_real (type, dconst1);
1010
1011 memset (cache, 0, sizeof (cache));
1012 cache[1] = arg0;
1013
1014 result = powi_as_mults_1 (gsi, loc, type, (n < 0) ? -n : n, cache);
1015 if (n >= 0)
1016 return result;
1017
1018 /* If the original exponent was negative, reciprocate the result. */
1019 target = make_temp_ssa_name (type, NULL, "powmult");
1020 div_stmt = gimple_build_assign_with_ops (RDIV_EXPR, target,
1021 build_real (type, dconst1),
1022 result);
1023 gimple_set_location (div_stmt, loc);
1024 gsi_insert_before (gsi, div_stmt, GSI_SAME_STMT);
1025
1026 return target;
1027 }
1028
1029 /* ARG0 and N are the two arguments to a powi builtin in GSI with
1030 location info LOC. If the arguments are appropriate, create an
1031 equivalent sequence of statements prior to GSI using an optimal
1032 number of multiplications, and return an expession holding the
1033 result. */
1034
1035 static tree
1036 gimple_expand_builtin_powi (gimple_stmt_iterator *gsi, location_t loc,
1037 tree arg0, HOST_WIDE_INT n)
1038 {
1039 /* Avoid largest negative number. */
1040 if (n != -n
1041 && ((n >= -1 && n <= 2)
1042 || (optimize_function_for_speed_p (cfun)
1043 && powi_cost (n) <= POWI_MAX_MULTS)))
1044 return powi_as_mults (gsi, loc, arg0, n);
1045
1046 return NULL_TREE;
1047 }
1048
1049 /* Build a gimple call statement that calls FN with argument ARG.
1050 Set the lhs of the call statement to a fresh SSA name. Insert the
1051 statement prior to GSI's current position, and return the fresh
1052 SSA name. */
1053
1054 static tree
1055 build_and_insert_call (gimple_stmt_iterator *gsi, location_t loc,
1056 tree fn, tree arg)
1057 {
1058 gimple call_stmt;
1059 tree ssa_target;
1060
1061 call_stmt = gimple_build_call (fn, 1, arg);
1062 ssa_target = make_temp_ssa_name (TREE_TYPE (arg), NULL, "powroot");
1063 gimple_set_lhs (call_stmt, ssa_target);
1064 gimple_set_location (call_stmt, loc);
1065 gsi_insert_before (gsi, call_stmt, GSI_SAME_STMT);
1066
1067 return ssa_target;
1068 }
1069
1070 /* Build a gimple binary operation with the given CODE and arguments
1071 ARG0, ARG1, assigning the result to a new SSA name for variable
1072 TARGET. Insert the statement prior to GSI's current position, and
1073 return the fresh SSA name.*/
1074
1075 static tree
1076 build_and_insert_binop (gimple_stmt_iterator *gsi, location_t loc,
1077 const char *name, enum tree_code code,
1078 tree arg0, tree arg1)
1079 {
1080 tree result = make_temp_ssa_name (TREE_TYPE (arg0), NULL, name);
1081 gimple stmt = gimple_build_assign_with_ops (code, result, arg0, arg1);
1082 gimple_set_location (stmt, loc);
1083 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1084 return result;
1085 }
1086
1087 /* Build a gimple reference operation with the given CODE and argument
1088 ARG, assigning the result to a new SSA name of TYPE with NAME.
1089 Insert the statement prior to GSI's current position, and return
1090 the fresh SSA name. */
1091
1092 static inline tree
1093 build_and_insert_ref (gimple_stmt_iterator *gsi, location_t loc, tree type,
1094 const char *name, enum tree_code code, tree arg0)
1095 {
1096 tree result = make_temp_ssa_name (type, NULL, name);
1097 gimple stmt = gimple_build_assign (result, build1 (code, type, arg0));
1098 gimple_set_location (stmt, loc);
1099 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1100 return result;
1101 }
1102
1103 /* Build a gimple assignment to cast VAL to TYPE. Insert the statement
1104 prior to GSI's current position, and return the fresh SSA name. */
1105
1106 static tree
1107 build_and_insert_cast (gimple_stmt_iterator *gsi, location_t loc,
1108 tree type, tree val)
1109 {
1110 tree result = make_ssa_name (type, NULL);
1111 gimple stmt = gimple_build_assign_with_ops (NOP_EXPR, result, val, NULL_TREE);
1112 gimple_set_location (stmt, loc);
1113 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1114 return result;
1115 }
1116
1117 /* ARG0 and ARG1 are the two arguments to a pow builtin call in GSI
1118 with location info LOC. If possible, create an equivalent and
1119 less expensive sequence of statements prior to GSI, and return an
1120 expession holding the result. */
1121
1122 static tree
1123 gimple_expand_builtin_pow (gimple_stmt_iterator *gsi, location_t loc,
1124 tree arg0, tree arg1)
1125 {
1126 REAL_VALUE_TYPE c, cint, dconst1_4, dconst3_4, dconst1_3, dconst1_6;
1127 REAL_VALUE_TYPE c2, dconst3;
1128 HOST_WIDE_INT n;
1129 tree type, sqrtfn, cbrtfn, sqrt_arg0, sqrt_sqrt, result, cbrt_x, powi_cbrt_x;
1130 enum machine_mode mode;
1131 bool hw_sqrt_exists, c_is_int, c2_is_int;
1132
1133 /* If the exponent isn't a constant, there's nothing of interest
1134 to be done. */
1135 if (TREE_CODE (arg1) != REAL_CST)
1136 return NULL_TREE;
1137
1138 /* If the exponent is equivalent to an integer, expand to an optimal
1139 multiplication sequence when profitable. */
1140 c = TREE_REAL_CST (arg1);
1141 n = real_to_integer (&c);
1142 real_from_integer (&cint, VOIDmode, n, n < 0 ? -1 : 0, 0);
1143 c_is_int = real_identical (&c, &cint);
1144
1145 if (c_is_int
1146 && ((n >= -1 && n <= 2)
1147 || (flag_unsafe_math_optimizations
1148 && optimize_insn_for_speed_p ()
1149 && powi_cost (n) <= POWI_MAX_MULTS)))
1150 return gimple_expand_builtin_powi (gsi, loc, arg0, n);
1151
1152 /* Attempt various optimizations using sqrt and cbrt. */
1153 type = TREE_TYPE (arg0);
1154 mode = TYPE_MODE (type);
1155 sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1156
1157 /* Optimize pow(x,0.5) = sqrt(x). This replacement is always safe
1158 unless signed zeros must be maintained. pow(-0,0.5) = +0, while
1159 sqrt(-0) = -0. */
1160 if (sqrtfn
1161 && REAL_VALUES_EQUAL (c, dconsthalf)
1162 && !HONOR_SIGNED_ZEROS (mode))
1163 return build_and_insert_call (gsi, loc, sqrtfn, arg0);
1164
1165 /* Optimize pow(x,0.25) = sqrt(sqrt(x)). Assume on most machines that
1166 a builtin sqrt instruction is smaller than a call to pow with 0.25,
1167 so do this optimization even if -Os. Don't do this optimization
1168 if we don't have a hardware sqrt insn. */
1169 dconst1_4 = dconst1;
1170 SET_REAL_EXP (&dconst1_4, REAL_EXP (&dconst1_4) - 2);
1171 hw_sqrt_exists = optab_handler (sqrt_optab, mode) != CODE_FOR_nothing;
1172
1173 if (flag_unsafe_math_optimizations
1174 && sqrtfn
1175 && REAL_VALUES_EQUAL (c, dconst1_4)
1176 && hw_sqrt_exists)
1177 {
1178 /* sqrt(x) */
1179 sqrt_arg0 = build_and_insert_call (gsi, loc, sqrtfn, arg0);
1180
1181 /* sqrt(sqrt(x)) */
1182 return build_and_insert_call (gsi, loc, sqrtfn, sqrt_arg0);
1183 }
1184
1185 /* Optimize pow(x,0.75) = sqrt(x) * sqrt(sqrt(x)) unless we are
1186 optimizing for space. Don't do this optimization if we don't have
1187 a hardware sqrt insn. */
1188 real_from_integer (&dconst3_4, VOIDmode, 3, 0, 0);
1189 SET_REAL_EXP (&dconst3_4, REAL_EXP (&dconst3_4) - 2);
1190
1191 if (flag_unsafe_math_optimizations
1192 && sqrtfn
1193 && optimize_function_for_speed_p (cfun)
1194 && REAL_VALUES_EQUAL (c, dconst3_4)
1195 && hw_sqrt_exists)
1196 {
1197 /* sqrt(x) */
1198 sqrt_arg0 = build_and_insert_call (gsi, loc, sqrtfn, arg0);
1199
1200 /* sqrt(sqrt(x)) */
1201 sqrt_sqrt = build_and_insert_call (gsi, loc, sqrtfn, sqrt_arg0);
1202
1203 /* sqrt(x) * sqrt(sqrt(x)) */
1204 return build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1205 sqrt_arg0, sqrt_sqrt);
1206 }
1207
1208 /* Optimize pow(x,1./3.) = cbrt(x). This requires unsafe math
1209 optimizations since 1./3. is not exactly representable. If x
1210 is negative and finite, the correct value of pow(x,1./3.) is
1211 a NaN with the "invalid" exception raised, because the value
1212 of 1./3. actually has an even denominator. The correct value
1213 of cbrt(x) is a negative real value. */
1214 cbrtfn = mathfn_built_in (type, BUILT_IN_CBRT);
1215 dconst1_3 = real_value_truncate (mode, dconst_third ());
1216
1217 if (flag_unsafe_math_optimizations
1218 && cbrtfn
1219 && (gimple_val_nonnegative_real_p (arg0) || !HONOR_NANS (mode))
1220 && REAL_VALUES_EQUAL (c, dconst1_3))
1221 return build_and_insert_call (gsi, loc, cbrtfn, arg0);
1222
1223 /* Optimize pow(x,1./6.) = cbrt(sqrt(x)). Don't do this optimization
1224 if we don't have a hardware sqrt insn. */
1225 dconst1_6 = dconst1_3;
1226 SET_REAL_EXP (&dconst1_6, REAL_EXP (&dconst1_6) - 1);
1227
1228 if (flag_unsafe_math_optimizations
1229 && sqrtfn
1230 && cbrtfn
1231 && (gimple_val_nonnegative_real_p (arg0) || !HONOR_NANS (mode))
1232 && optimize_function_for_speed_p (cfun)
1233 && hw_sqrt_exists
1234 && REAL_VALUES_EQUAL (c, dconst1_6))
1235 {
1236 /* sqrt(x) */
1237 sqrt_arg0 = build_and_insert_call (gsi, loc, sqrtfn, arg0);
1238
1239 /* cbrt(sqrt(x)) */
1240 return build_and_insert_call (gsi, loc, cbrtfn, sqrt_arg0);
1241 }
1242
1243 /* Optimize pow(x,c), where n = 2c for some nonzero integer n
1244 and c not an integer, into
1245
1246 sqrt(x) * powi(x, n/2), n > 0;
1247 1.0 / (sqrt(x) * powi(x, abs(n/2))), n < 0.
1248
1249 Do not calculate the powi factor when n/2 = 0. */
1250 real_arithmetic (&c2, MULT_EXPR, &c, &dconst2);
1251 n = real_to_integer (&c2);
1252 real_from_integer (&cint, VOIDmode, n, n < 0 ? -1 : 0, 0);
1253 c2_is_int = real_identical (&c2, &cint);
1254
1255 if (flag_unsafe_math_optimizations
1256 && sqrtfn
1257 && c2_is_int
1258 && !c_is_int
1259 && optimize_function_for_speed_p (cfun))
1260 {
1261 tree powi_x_ndiv2 = NULL_TREE;
1262
1263 /* Attempt to fold powi(arg0, abs(n/2)) into multiplies. If not
1264 possible or profitable, give up. Skip the degenerate case when
1265 n is 1 or -1, where the result is always 1. */
1266 if (absu_hwi (n) != 1)
1267 {
1268 powi_x_ndiv2 = gimple_expand_builtin_powi (gsi, loc, arg0,
1269 abs_hwi (n / 2));
1270 if (!powi_x_ndiv2)
1271 return NULL_TREE;
1272 }
1273
1274 /* Calculate sqrt(x). When n is not 1 or -1, multiply it by the
1275 result of the optimal multiply sequence just calculated. */
1276 sqrt_arg0 = build_and_insert_call (gsi, loc, sqrtfn, arg0);
1277
1278 if (absu_hwi (n) == 1)
1279 result = sqrt_arg0;
1280 else
1281 result = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1282 sqrt_arg0, powi_x_ndiv2);
1283
1284 /* If n is negative, reciprocate the result. */
1285 if (n < 0)
1286 result = build_and_insert_binop (gsi, loc, "powroot", RDIV_EXPR,
1287 build_real (type, dconst1), result);
1288 return result;
1289 }
1290
1291 /* Optimize pow(x,c), where 3c = n for some nonzero integer n, into
1292
1293 powi(x, n/3) * powi(cbrt(x), n%3), n > 0;
1294 1.0 / (powi(x, abs(n)/3) * powi(cbrt(x), abs(n)%3)), n < 0.
1295
1296 Do not calculate the first factor when n/3 = 0. As cbrt(x) is
1297 different from pow(x, 1./3.) due to rounding and behavior with
1298 negative x, we need to constrain this transformation to unsafe
1299 math and positive x or finite math. */
1300 real_from_integer (&dconst3, VOIDmode, 3, 0, 0);
1301 real_arithmetic (&c2, MULT_EXPR, &c, &dconst3);
1302 real_round (&c2, mode, &c2);
1303 n = real_to_integer (&c2);
1304 real_from_integer (&cint, VOIDmode, n, n < 0 ? -1 : 0, 0);
1305 real_arithmetic (&c2, RDIV_EXPR, &cint, &dconst3);
1306 real_convert (&c2, mode, &c2);
1307
1308 if (flag_unsafe_math_optimizations
1309 && cbrtfn
1310 && (gimple_val_nonnegative_real_p (arg0) || !HONOR_NANS (mode))
1311 && real_identical (&c2, &c)
1312 && !c2_is_int
1313 && optimize_function_for_speed_p (cfun)
1314 && powi_cost (n / 3) <= POWI_MAX_MULTS)
1315 {
1316 tree powi_x_ndiv3 = NULL_TREE;
1317
1318 /* Attempt to fold powi(arg0, abs(n/3)) into multiplies. If not
1319 possible or profitable, give up. Skip the degenerate case when
1320 abs(n) < 3, where the result is always 1. */
1321 if (absu_hwi (n) >= 3)
1322 {
1323 powi_x_ndiv3 = gimple_expand_builtin_powi (gsi, loc, arg0,
1324 abs_hwi (n / 3));
1325 if (!powi_x_ndiv3)
1326 return NULL_TREE;
1327 }
1328
1329 /* Calculate powi(cbrt(x), n%3). Don't use gimple_expand_builtin_powi
1330 as that creates an unnecessary variable. Instead, just produce
1331 either cbrt(x) or cbrt(x) * cbrt(x). */
1332 cbrt_x = build_and_insert_call (gsi, loc, cbrtfn, arg0);
1333
1334 if (absu_hwi (n) % 3 == 1)
1335 powi_cbrt_x = cbrt_x;
1336 else
1337 powi_cbrt_x = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1338 cbrt_x, cbrt_x);
1339
1340 /* Multiply the two subexpressions, unless powi(x,abs(n)/3) = 1. */
1341 if (absu_hwi (n) < 3)
1342 result = powi_cbrt_x;
1343 else
1344 result = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1345 powi_x_ndiv3, powi_cbrt_x);
1346
1347 /* If n is negative, reciprocate the result. */
1348 if (n < 0)
1349 result = build_and_insert_binop (gsi, loc, "powroot", RDIV_EXPR,
1350 build_real (type, dconst1), result);
1351
1352 return result;
1353 }
1354
1355 /* No optimizations succeeded. */
1356 return NULL_TREE;
1357 }
1358
1359 /* ARG is the argument to a cabs builtin call in GSI with location info
1360 LOC. Create a sequence of statements prior to GSI that calculates
1361 sqrt(R*R + I*I), where R and I are the real and imaginary components
1362 of ARG, respectively. Return an expression holding the result. */
1363
1364 static tree
1365 gimple_expand_builtin_cabs (gimple_stmt_iterator *gsi, location_t loc, tree arg)
1366 {
1367 tree real_part, imag_part, addend1, addend2, sum, result;
1368 tree type = TREE_TYPE (TREE_TYPE (arg));
1369 tree sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1370 enum machine_mode mode = TYPE_MODE (type);
1371
1372 if (!flag_unsafe_math_optimizations
1373 || !optimize_bb_for_speed_p (gimple_bb (gsi_stmt (*gsi)))
1374 || !sqrtfn
1375 || optab_handler (sqrt_optab, mode) == CODE_FOR_nothing)
1376 return NULL_TREE;
1377
1378 real_part = build_and_insert_ref (gsi, loc, type, "cabs",
1379 REALPART_EXPR, arg);
1380 addend1 = build_and_insert_binop (gsi, loc, "cabs", MULT_EXPR,
1381 real_part, real_part);
1382 imag_part = build_and_insert_ref (gsi, loc, type, "cabs",
1383 IMAGPART_EXPR, arg);
1384 addend2 = build_and_insert_binop (gsi, loc, "cabs", MULT_EXPR,
1385 imag_part, imag_part);
1386 sum = build_and_insert_binop (gsi, loc, "cabs", PLUS_EXPR, addend1, addend2);
1387 result = build_and_insert_call (gsi, loc, sqrtfn, sum);
1388
1389 return result;
1390 }
1391
1392 /* Go through all calls to sin, cos and cexpi and call execute_cse_sincos_1
1393 on the SSA_NAME argument of each of them. Also expand powi(x,n) into
1394 an optimal number of multiplies, when n is a constant. */
1395
1396 static unsigned int
1397 execute_cse_sincos (void)
1398 {
1399 basic_block bb;
1400 bool cfg_changed = false;
1401
1402 calculate_dominance_info (CDI_DOMINATORS);
1403 memset (&sincos_stats, 0, sizeof (sincos_stats));
1404
1405 FOR_EACH_BB (bb)
1406 {
1407 gimple_stmt_iterator gsi;
1408 bool cleanup_eh = false;
1409
1410 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1411 {
1412 gimple stmt = gsi_stmt (gsi);
1413 tree fndecl;
1414
1415 /* Only the last stmt in a bb could throw, no need to call
1416 gimple_purge_dead_eh_edges if we change something in the middle
1417 of a basic block. */
1418 cleanup_eh = false;
1419
1420 if (is_gimple_call (stmt)
1421 && gimple_call_lhs (stmt)
1422 && (fndecl = gimple_call_fndecl (stmt))
1423 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
1424 {
1425 tree arg, arg0, arg1, result;
1426 HOST_WIDE_INT n;
1427 location_t loc;
1428
1429 switch (DECL_FUNCTION_CODE (fndecl))
1430 {
1431 CASE_FLT_FN (BUILT_IN_COS):
1432 CASE_FLT_FN (BUILT_IN_SIN):
1433 CASE_FLT_FN (BUILT_IN_CEXPI):
1434 /* Make sure we have either sincos or cexp. */
1435 if (!TARGET_HAS_SINCOS && !TARGET_C99_FUNCTIONS)
1436 break;
1437
1438 arg = gimple_call_arg (stmt, 0);
1439 if (TREE_CODE (arg) == SSA_NAME)
1440 cfg_changed |= execute_cse_sincos_1 (arg);
1441 break;
1442
1443 CASE_FLT_FN (BUILT_IN_POW):
1444 arg0 = gimple_call_arg (stmt, 0);
1445 arg1 = gimple_call_arg (stmt, 1);
1446
1447 loc = gimple_location (stmt);
1448 result = gimple_expand_builtin_pow (&gsi, loc, arg0, arg1);
1449
1450 if (result)
1451 {
1452 tree lhs = gimple_get_lhs (stmt);
1453 gimple new_stmt = gimple_build_assign (lhs, result);
1454 gimple_set_location (new_stmt, loc);
1455 unlink_stmt_vdef (stmt);
1456 gsi_replace (&gsi, new_stmt, true);
1457 cleanup_eh = true;
1458 if (gimple_vdef (stmt))
1459 release_ssa_name (gimple_vdef (stmt));
1460 }
1461 break;
1462
1463 CASE_FLT_FN (BUILT_IN_POWI):
1464 arg0 = gimple_call_arg (stmt, 0);
1465 arg1 = gimple_call_arg (stmt, 1);
1466 loc = gimple_location (stmt);
1467
1468 if (real_minus_onep (arg0))
1469 {
1470 tree t0, t1, cond, one, minus_one;
1471 gimple stmt;
1472
1473 t0 = TREE_TYPE (arg0);
1474 t1 = TREE_TYPE (arg1);
1475 one = build_real (t0, dconst1);
1476 minus_one = build_real (t0, dconstm1);
1477
1478 cond = make_temp_ssa_name (t1, NULL, "powi_cond");
1479 stmt = gimple_build_assign_with_ops (BIT_AND_EXPR, cond,
1480 arg1,
1481 build_int_cst (t1,
1482 1));
1483 gimple_set_location (stmt, loc);
1484 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1485
1486 result = make_temp_ssa_name (t0, NULL, "powi");
1487 stmt = gimple_build_assign_with_ops (COND_EXPR, result,
1488 cond,
1489 minus_one, one);
1490 gimple_set_location (stmt, loc);
1491 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1492 }
1493 else
1494 {
1495 if (!host_integerp (arg1, 0))
1496 break;
1497
1498 n = TREE_INT_CST_LOW (arg1);
1499 result = gimple_expand_builtin_powi (&gsi, loc, arg0, n);
1500 }
1501
1502 if (result)
1503 {
1504 tree lhs = gimple_get_lhs (stmt);
1505 gimple new_stmt = gimple_build_assign (lhs, result);
1506 gimple_set_location (new_stmt, loc);
1507 unlink_stmt_vdef (stmt);
1508 gsi_replace (&gsi, new_stmt, true);
1509 cleanup_eh = true;
1510 if (gimple_vdef (stmt))
1511 release_ssa_name (gimple_vdef (stmt));
1512 }
1513 break;
1514
1515 CASE_FLT_FN (BUILT_IN_CABS):
1516 arg0 = gimple_call_arg (stmt, 0);
1517 loc = gimple_location (stmt);
1518 result = gimple_expand_builtin_cabs (&gsi, loc, arg0);
1519
1520 if (result)
1521 {
1522 tree lhs = gimple_get_lhs (stmt);
1523 gimple new_stmt = gimple_build_assign (lhs, result);
1524 gimple_set_location (new_stmt, loc);
1525 unlink_stmt_vdef (stmt);
1526 gsi_replace (&gsi, new_stmt, true);
1527 cleanup_eh = true;
1528 if (gimple_vdef (stmt))
1529 release_ssa_name (gimple_vdef (stmt));
1530 }
1531 break;
1532
1533 default:;
1534 }
1535 }
1536 }
1537 if (cleanup_eh)
1538 cfg_changed |= gimple_purge_dead_eh_edges (bb);
1539 }
1540
1541 statistics_counter_event (cfun, "sincos statements inserted",
1542 sincos_stats.inserted);
1543
1544 free_dominance_info (CDI_DOMINATORS);
1545 return cfg_changed ? TODO_cleanup_cfg : 0;
1546 }
1547
1548 static bool
1549 gate_cse_sincos (void)
1550 {
1551 /* We no longer require either sincos or cexp, since powi expansion
1552 piggybacks on this pass. */
1553 return optimize;
1554 }
1555
1556 namespace {
1557
1558 const pass_data pass_data_cse_sincos =
1559 {
1560 GIMPLE_PASS, /* type */
1561 "sincos", /* name */
1562 OPTGROUP_NONE, /* optinfo_flags */
1563 true, /* has_gate */
1564 true, /* has_execute */
1565 TV_NONE, /* tv_id */
1566 PROP_ssa, /* properties_required */
1567 0, /* properties_provided */
1568 0, /* properties_destroyed */
1569 0, /* todo_flags_start */
1570 ( TODO_update_ssa | TODO_verify_ssa
1571 | TODO_verify_stmts ), /* todo_flags_finish */
1572 };
1573
1574 class pass_cse_sincos : public gimple_opt_pass
1575 {
1576 public:
1577 pass_cse_sincos(gcc::context *ctxt)
1578 : gimple_opt_pass(pass_data_cse_sincos, ctxt)
1579 {}
1580
1581 /* opt_pass methods: */
1582 bool gate () { return gate_cse_sincos (); }
1583 unsigned int execute () { return execute_cse_sincos (); }
1584
1585 }; // class pass_cse_sincos
1586
1587 } // anon namespace
1588
1589 gimple_opt_pass *
1590 make_pass_cse_sincos (gcc::context *ctxt)
1591 {
1592 return new pass_cse_sincos (ctxt);
1593 }
1594
1595 /* A symbolic number is used to detect byte permutation and selection
1596 patterns. Therefore the field N contains an artificial number
1597 consisting of byte size markers:
1598
1599 0 - byte has the value 0
1600 1..size - byte contains the content of the byte
1601 number indexed with that value minus one */
1602
1603 struct symbolic_number {
1604 unsigned HOST_WIDEST_INT n;
1605 int size;
1606 };
1607
1608 /* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic
1609 number N. Return false if the requested operation is not permitted
1610 on a symbolic number. */
1611
1612 static inline bool
1613 do_shift_rotate (enum tree_code code,
1614 struct symbolic_number *n,
1615 int count)
1616 {
1617 if (count % 8 != 0)
1618 return false;
1619
1620 /* Zero out the extra bits of N in order to avoid them being shifted
1621 into the significant bits. */
1622 if (n->size < (int)sizeof (HOST_WIDEST_INT))
1623 n->n &= ((unsigned HOST_WIDEST_INT)1 << (n->size * BITS_PER_UNIT)) - 1;
1624
1625 switch (code)
1626 {
1627 case LSHIFT_EXPR:
1628 n->n <<= count;
1629 break;
1630 case RSHIFT_EXPR:
1631 n->n >>= count;
1632 break;
1633 case LROTATE_EXPR:
1634 n->n = (n->n << count) | (n->n >> ((n->size * BITS_PER_UNIT) - count));
1635 break;
1636 case RROTATE_EXPR:
1637 n->n = (n->n >> count) | (n->n << ((n->size * BITS_PER_UNIT) - count));
1638 break;
1639 default:
1640 return false;
1641 }
1642 /* Zero unused bits for size. */
1643 if (n->size < (int)sizeof (HOST_WIDEST_INT))
1644 n->n &= ((unsigned HOST_WIDEST_INT)1 << (n->size * BITS_PER_UNIT)) - 1;
1645 return true;
1646 }
1647
1648 /* Perform sanity checking for the symbolic number N and the gimple
1649 statement STMT. */
1650
1651 static inline bool
1652 verify_symbolic_number_p (struct symbolic_number *n, gimple stmt)
1653 {
1654 tree lhs_type;
1655
1656 lhs_type = gimple_expr_type (stmt);
1657
1658 if (TREE_CODE (lhs_type) != INTEGER_TYPE)
1659 return false;
1660
1661 if (TYPE_PRECISION (lhs_type) != n->size * BITS_PER_UNIT)
1662 return false;
1663
1664 return true;
1665 }
1666
1667 /* find_bswap_1 invokes itself recursively with N and tries to perform
1668 the operation given by the rhs of STMT on the result. If the
1669 operation could successfully be executed the function returns the
1670 tree expression of the source operand and NULL otherwise. */
1671
1672 static tree
1673 find_bswap_1 (gimple stmt, struct symbolic_number *n, int limit)
1674 {
1675 enum tree_code code;
1676 tree rhs1, rhs2 = NULL;
1677 gimple rhs1_stmt, rhs2_stmt;
1678 tree source_expr1;
1679 enum gimple_rhs_class rhs_class;
1680
1681 if (!limit || !is_gimple_assign (stmt))
1682 return NULL_TREE;
1683
1684 rhs1 = gimple_assign_rhs1 (stmt);
1685
1686 if (TREE_CODE (rhs1) != SSA_NAME)
1687 return NULL_TREE;
1688
1689 code = gimple_assign_rhs_code (stmt);
1690 rhs_class = gimple_assign_rhs_class (stmt);
1691 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
1692
1693 if (rhs_class == GIMPLE_BINARY_RHS)
1694 rhs2 = gimple_assign_rhs2 (stmt);
1695
1696 /* Handle unary rhs and binary rhs with integer constants as second
1697 operand. */
1698
1699 if (rhs_class == GIMPLE_UNARY_RHS
1700 || (rhs_class == GIMPLE_BINARY_RHS
1701 && TREE_CODE (rhs2) == INTEGER_CST))
1702 {
1703 if (code != BIT_AND_EXPR
1704 && code != LSHIFT_EXPR
1705 && code != RSHIFT_EXPR
1706 && code != LROTATE_EXPR
1707 && code != RROTATE_EXPR
1708 && code != NOP_EXPR
1709 && code != CONVERT_EXPR)
1710 return NULL_TREE;
1711
1712 source_expr1 = find_bswap_1 (rhs1_stmt, n, limit - 1);
1713
1714 /* If find_bswap_1 returned NULL STMT is a leaf node and we have
1715 to initialize the symbolic number. */
1716 if (!source_expr1)
1717 {
1718 /* Set up the symbolic number N by setting each byte to a
1719 value between 1 and the byte size of rhs1. The highest
1720 order byte is set to n->size and the lowest order
1721 byte to 1. */
1722 n->size = TYPE_PRECISION (TREE_TYPE (rhs1));
1723 if (n->size % BITS_PER_UNIT != 0)
1724 return NULL_TREE;
1725 n->size /= BITS_PER_UNIT;
1726 n->n = (sizeof (HOST_WIDEST_INT) < 8 ? 0 :
1727 (unsigned HOST_WIDEST_INT)0x08070605 << 32 | 0x04030201);
1728
1729 if (n->size < (int)sizeof (HOST_WIDEST_INT))
1730 n->n &= ((unsigned HOST_WIDEST_INT)1 <<
1731 (n->size * BITS_PER_UNIT)) - 1;
1732
1733 source_expr1 = rhs1;
1734 }
1735
1736 switch (code)
1737 {
1738 case BIT_AND_EXPR:
1739 {
1740 int i;
1741 unsigned HOST_WIDEST_INT val = widest_int_cst_value (rhs2);
1742 unsigned HOST_WIDEST_INT tmp = val;
1743
1744 /* Only constants masking full bytes are allowed. */
1745 for (i = 0; i < n->size; i++, tmp >>= BITS_PER_UNIT)
1746 if ((tmp & 0xff) != 0 && (tmp & 0xff) != 0xff)
1747 return NULL_TREE;
1748
1749 n->n &= val;
1750 }
1751 break;
1752 case LSHIFT_EXPR:
1753 case RSHIFT_EXPR:
1754 case LROTATE_EXPR:
1755 case RROTATE_EXPR:
1756 if (!do_shift_rotate (code, n, (int)TREE_INT_CST_LOW (rhs2)))
1757 return NULL_TREE;
1758 break;
1759 CASE_CONVERT:
1760 {
1761 int type_size;
1762
1763 type_size = TYPE_PRECISION (gimple_expr_type (stmt));
1764 if (type_size % BITS_PER_UNIT != 0)
1765 return NULL_TREE;
1766
1767 if (type_size / BITS_PER_UNIT < (int)(sizeof (HOST_WIDEST_INT)))
1768 {
1769 /* If STMT casts to a smaller type mask out the bits not
1770 belonging to the target type. */
1771 n->n &= ((unsigned HOST_WIDEST_INT)1 << type_size) - 1;
1772 }
1773 n->size = type_size / BITS_PER_UNIT;
1774 }
1775 break;
1776 default:
1777 return NULL_TREE;
1778 };
1779 return verify_symbolic_number_p (n, stmt) ? source_expr1 : NULL;
1780 }
1781
1782 /* Handle binary rhs. */
1783
1784 if (rhs_class == GIMPLE_BINARY_RHS)
1785 {
1786 struct symbolic_number n1, n2;
1787 tree source_expr2;
1788
1789 if (code != BIT_IOR_EXPR)
1790 return NULL_TREE;
1791
1792 if (TREE_CODE (rhs2) != SSA_NAME)
1793 return NULL_TREE;
1794
1795 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
1796
1797 switch (code)
1798 {
1799 case BIT_IOR_EXPR:
1800 source_expr1 = find_bswap_1 (rhs1_stmt, &n1, limit - 1);
1801
1802 if (!source_expr1)
1803 return NULL_TREE;
1804
1805 source_expr2 = find_bswap_1 (rhs2_stmt, &n2, limit - 1);
1806
1807 if (source_expr1 != source_expr2
1808 || n1.size != n2.size)
1809 return NULL_TREE;
1810
1811 n->size = n1.size;
1812 n->n = n1.n | n2.n;
1813
1814 if (!verify_symbolic_number_p (n, stmt))
1815 return NULL_TREE;
1816
1817 break;
1818 default:
1819 return NULL_TREE;
1820 }
1821 return source_expr1;
1822 }
1823 return NULL_TREE;
1824 }
1825
1826 /* Check if STMT completes a bswap implementation consisting of ORs,
1827 SHIFTs and ANDs. Return the source tree expression on which the
1828 byte swap is performed and NULL if no bswap was found. */
1829
1830 static tree
1831 find_bswap (gimple stmt)
1832 {
1833 /* The number which the find_bswap result should match in order to
1834 have a full byte swap. The number is shifted to the left according
1835 to the size of the symbolic number before using it. */
1836 unsigned HOST_WIDEST_INT cmp =
1837 sizeof (HOST_WIDEST_INT) < 8 ? 0 :
1838 (unsigned HOST_WIDEST_INT)0x01020304 << 32 | 0x05060708;
1839
1840 struct symbolic_number n;
1841 tree source_expr;
1842 int limit;
1843
1844 /* The last parameter determines the depth search limit. It usually
1845 correlates directly to the number of bytes to be touched. We
1846 increase that number by three here in order to also
1847 cover signed -> unsigned converions of the src operand as can be seen
1848 in libgcc, and for initial shift/and operation of the src operand. */
1849 limit = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (gimple_expr_type (stmt)));
1850 limit += 1 + (int) ceil_log2 ((unsigned HOST_WIDE_INT) limit);
1851 source_expr = find_bswap_1 (stmt, &n, limit);
1852
1853 if (!source_expr)
1854 return NULL_TREE;
1855
1856 /* Zero out the extra bits of N and CMP. */
1857 if (n.size < (int)sizeof (HOST_WIDEST_INT))
1858 {
1859 unsigned HOST_WIDEST_INT mask =
1860 ((unsigned HOST_WIDEST_INT)1 << (n.size * BITS_PER_UNIT)) - 1;
1861
1862 n.n &= mask;
1863 cmp >>= (sizeof (HOST_WIDEST_INT) - n.size) * BITS_PER_UNIT;
1864 }
1865
1866 /* A complete byte swap should make the symbolic number to start
1867 with the largest digit in the highest order byte. */
1868 if (cmp != n.n)
1869 return NULL_TREE;
1870
1871 return source_expr;
1872 }
1873
1874 /* Find manual byte swap implementations and turn them into a bswap
1875 builtin invokation. */
1876
1877 static unsigned int
1878 execute_optimize_bswap (void)
1879 {
1880 basic_block bb;
1881 bool bswap16_p, bswap32_p, bswap64_p;
1882 bool changed = false;
1883 tree bswap16_type = NULL_TREE, bswap32_type = NULL_TREE, bswap64_type = NULL_TREE;
1884
1885 if (BITS_PER_UNIT != 8)
1886 return 0;
1887
1888 if (sizeof (HOST_WIDEST_INT) < 8)
1889 return 0;
1890
1891 bswap16_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP16)
1892 && optab_handler (bswap_optab, HImode) != CODE_FOR_nothing);
1893 bswap32_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP32)
1894 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing);
1895 bswap64_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP64)
1896 && (optab_handler (bswap_optab, DImode) != CODE_FOR_nothing
1897 || (bswap32_p && word_mode == SImode)));
1898
1899 if (!bswap16_p && !bswap32_p && !bswap64_p)
1900 return 0;
1901
1902 /* Determine the argument type of the builtins. The code later on
1903 assumes that the return and argument type are the same. */
1904 if (bswap16_p)
1905 {
1906 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP16);
1907 bswap16_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1908 }
1909
1910 if (bswap32_p)
1911 {
1912 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
1913 bswap32_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1914 }
1915
1916 if (bswap64_p)
1917 {
1918 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
1919 bswap64_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1920 }
1921
1922 memset (&bswap_stats, 0, sizeof (bswap_stats));
1923
1924 FOR_EACH_BB (bb)
1925 {
1926 gimple_stmt_iterator gsi;
1927
1928 /* We do a reverse scan for bswap patterns to make sure we get the
1929 widest match. As bswap pattern matching doesn't handle
1930 previously inserted smaller bswap replacements as sub-
1931 patterns, the wider variant wouldn't be detected. */
1932 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
1933 {
1934 gimple stmt = gsi_stmt (gsi);
1935 tree bswap_src, bswap_type;
1936 tree bswap_tmp;
1937 tree fndecl = NULL_TREE;
1938 int type_size;
1939 gimple call;
1940
1941 if (!is_gimple_assign (stmt)
1942 || gimple_assign_rhs_code (stmt) != BIT_IOR_EXPR)
1943 continue;
1944
1945 type_size = TYPE_PRECISION (gimple_expr_type (stmt));
1946
1947 switch (type_size)
1948 {
1949 case 16:
1950 if (bswap16_p)
1951 {
1952 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP16);
1953 bswap_type = bswap16_type;
1954 }
1955 break;
1956 case 32:
1957 if (bswap32_p)
1958 {
1959 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
1960 bswap_type = bswap32_type;
1961 }
1962 break;
1963 case 64:
1964 if (bswap64_p)
1965 {
1966 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
1967 bswap_type = bswap64_type;
1968 }
1969 break;
1970 default:
1971 continue;
1972 }
1973
1974 if (!fndecl)
1975 continue;
1976
1977 bswap_src = find_bswap (stmt);
1978
1979 if (!bswap_src)
1980 continue;
1981
1982 changed = true;
1983 if (type_size == 16)
1984 bswap_stats.found_16bit++;
1985 else if (type_size == 32)
1986 bswap_stats.found_32bit++;
1987 else
1988 bswap_stats.found_64bit++;
1989
1990 bswap_tmp = bswap_src;
1991
1992 /* Convert the src expression if necessary. */
1993 if (!useless_type_conversion_p (TREE_TYPE (bswap_tmp), bswap_type))
1994 {
1995 gimple convert_stmt;
1996 bswap_tmp = make_temp_ssa_name (bswap_type, NULL, "bswapsrc");
1997 convert_stmt = gimple_build_assign_with_ops
1998 (NOP_EXPR, bswap_tmp, bswap_src, NULL);
1999 gsi_insert_before (&gsi, convert_stmt, GSI_SAME_STMT);
2000 }
2001
2002 call = gimple_build_call (fndecl, 1, bswap_tmp);
2003
2004 bswap_tmp = gimple_assign_lhs (stmt);
2005
2006 /* Convert the result if necessary. */
2007 if (!useless_type_conversion_p (TREE_TYPE (bswap_tmp), bswap_type))
2008 {
2009 gimple convert_stmt;
2010 bswap_tmp = make_temp_ssa_name (bswap_type, NULL, "bswapdst");
2011 convert_stmt = gimple_build_assign_with_ops
2012 (NOP_EXPR, gimple_assign_lhs (stmt), bswap_tmp, NULL);
2013 gsi_insert_after (&gsi, convert_stmt, GSI_SAME_STMT);
2014 }
2015
2016 gimple_call_set_lhs (call, bswap_tmp);
2017
2018 if (dump_file)
2019 {
2020 fprintf (dump_file, "%d bit bswap implementation found at: ",
2021 (int)type_size);
2022 print_gimple_stmt (dump_file, stmt, 0, 0);
2023 }
2024
2025 gsi_insert_after (&gsi, call, GSI_SAME_STMT);
2026 gsi_remove (&gsi, true);
2027 }
2028 }
2029
2030 statistics_counter_event (cfun, "16-bit bswap implementations found",
2031 bswap_stats.found_16bit);
2032 statistics_counter_event (cfun, "32-bit bswap implementations found",
2033 bswap_stats.found_32bit);
2034 statistics_counter_event (cfun, "64-bit bswap implementations found",
2035 bswap_stats.found_64bit);
2036
2037 return (changed ? TODO_update_ssa | TODO_verify_ssa
2038 | TODO_verify_stmts : 0);
2039 }
2040
2041 static bool
2042 gate_optimize_bswap (void)
2043 {
2044 return flag_expensive_optimizations && optimize;
2045 }
2046
2047 namespace {
2048
2049 const pass_data pass_data_optimize_bswap =
2050 {
2051 GIMPLE_PASS, /* type */
2052 "bswap", /* name */
2053 OPTGROUP_NONE, /* optinfo_flags */
2054 true, /* has_gate */
2055 true, /* has_execute */
2056 TV_NONE, /* tv_id */
2057 PROP_ssa, /* properties_required */
2058 0, /* properties_provided */
2059 0, /* properties_destroyed */
2060 0, /* todo_flags_start */
2061 0, /* todo_flags_finish */
2062 };
2063
2064 class pass_optimize_bswap : public gimple_opt_pass
2065 {
2066 public:
2067 pass_optimize_bswap(gcc::context *ctxt)
2068 : gimple_opt_pass(pass_data_optimize_bswap, ctxt)
2069 {}
2070
2071 /* opt_pass methods: */
2072 bool gate () { return gate_optimize_bswap (); }
2073 unsigned int execute () { return execute_optimize_bswap (); }
2074
2075 }; // class pass_optimize_bswap
2076
2077 } // anon namespace
2078
2079 gimple_opt_pass *
2080 make_pass_optimize_bswap (gcc::context *ctxt)
2081 {
2082 return new pass_optimize_bswap (ctxt);
2083 }
2084
2085 /* Return true if stmt is a type conversion operation that can be stripped
2086 when used in a widening multiply operation. */
2087 static bool
2088 widening_mult_conversion_strippable_p (tree result_type, gimple stmt)
2089 {
2090 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
2091
2092 if (TREE_CODE (result_type) == INTEGER_TYPE)
2093 {
2094 tree op_type;
2095 tree inner_op_type;
2096
2097 if (!CONVERT_EXPR_CODE_P (rhs_code))
2098 return false;
2099
2100 op_type = TREE_TYPE (gimple_assign_lhs (stmt));
2101
2102 /* If the type of OP has the same precision as the result, then
2103 we can strip this conversion. The multiply operation will be
2104 selected to create the correct extension as a by-product. */
2105 if (TYPE_PRECISION (result_type) == TYPE_PRECISION (op_type))
2106 return true;
2107
2108 /* We can also strip a conversion if it preserves the signed-ness of
2109 the operation and doesn't narrow the range. */
2110 inner_op_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
2111
2112 /* If the inner-most type is unsigned, then we can strip any
2113 intermediate widening operation. If it's signed, then the
2114 intermediate widening operation must also be signed. */
2115 if ((TYPE_UNSIGNED (inner_op_type)
2116 || TYPE_UNSIGNED (op_type) == TYPE_UNSIGNED (inner_op_type))
2117 && TYPE_PRECISION (op_type) > TYPE_PRECISION (inner_op_type))
2118 return true;
2119
2120 return false;
2121 }
2122
2123 return rhs_code == FIXED_CONVERT_EXPR;
2124 }
2125
2126 /* Return true if RHS is a suitable operand for a widening multiplication,
2127 assuming a target type of TYPE.
2128 There are two cases:
2129
2130 - RHS makes some value at least twice as wide. Store that value
2131 in *NEW_RHS_OUT if so, and store its type in *TYPE_OUT.
2132
2133 - RHS is an integer constant. Store that value in *NEW_RHS_OUT if so,
2134 but leave *TYPE_OUT untouched. */
2135
2136 static bool
2137 is_widening_mult_rhs_p (tree type, tree rhs, tree *type_out,
2138 tree *new_rhs_out)
2139 {
2140 gimple stmt;
2141 tree type1, rhs1;
2142
2143 if (TREE_CODE (rhs) == SSA_NAME)
2144 {
2145 stmt = SSA_NAME_DEF_STMT (rhs);
2146 if (is_gimple_assign (stmt))
2147 {
2148 if (! widening_mult_conversion_strippable_p (type, stmt))
2149 rhs1 = rhs;
2150 else
2151 {
2152 rhs1 = gimple_assign_rhs1 (stmt);
2153
2154 if (TREE_CODE (rhs1) == INTEGER_CST)
2155 {
2156 *new_rhs_out = rhs1;
2157 *type_out = NULL;
2158 return true;
2159 }
2160 }
2161 }
2162 else
2163 rhs1 = rhs;
2164
2165 type1 = TREE_TYPE (rhs1);
2166
2167 if (TREE_CODE (type1) != TREE_CODE (type)
2168 || TYPE_PRECISION (type1) * 2 > TYPE_PRECISION (type))
2169 return false;
2170
2171 *new_rhs_out = rhs1;
2172 *type_out = type1;
2173 return true;
2174 }
2175
2176 if (TREE_CODE (rhs) == INTEGER_CST)
2177 {
2178 *new_rhs_out = rhs;
2179 *type_out = NULL;
2180 return true;
2181 }
2182
2183 return false;
2184 }
2185
2186 /* Return true if STMT performs a widening multiplication, assuming the
2187 output type is TYPE. If so, store the unwidened types of the operands
2188 in *TYPE1_OUT and *TYPE2_OUT respectively. Also fill *RHS1_OUT and
2189 *RHS2_OUT such that converting those operands to types *TYPE1_OUT
2190 and *TYPE2_OUT would give the operands of the multiplication. */
2191
2192 static bool
2193 is_widening_mult_p (gimple stmt,
2194 tree *type1_out, tree *rhs1_out,
2195 tree *type2_out, tree *rhs2_out)
2196 {
2197 tree type = TREE_TYPE (gimple_assign_lhs (stmt));
2198
2199 if (TREE_CODE (type) != INTEGER_TYPE
2200 && TREE_CODE (type) != FIXED_POINT_TYPE)
2201 return false;
2202
2203 if (!is_widening_mult_rhs_p (type, gimple_assign_rhs1 (stmt), type1_out,
2204 rhs1_out))
2205 return false;
2206
2207 if (!is_widening_mult_rhs_p (type, gimple_assign_rhs2 (stmt), type2_out,
2208 rhs2_out))
2209 return false;
2210
2211 if (*type1_out == NULL)
2212 {
2213 if (*type2_out == NULL || !int_fits_type_p (*rhs1_out, *type2_out))
2214 return false;
2215 *type1_out = *type2_out;
2216 }
2217
2218 if (*type2_out == NULL)
2219 {
2220 if (!int_fits_type_p (*rhs2_out, *type1_out))
2221 return false;
2222 *type2_out = *type1_out;
2223 }
2224
2225 /* Ensure that the larger of the two operands comes first. */
2226 if (TYPE_PRECISION (*type1_out) < TYPE_PRECISION (*type2_out))
2227 {
2228 tree tmp;
2229 tmp = *type1_out;
2230 *type1_out = *type2_out;
2231 *type2_out = tmp;
2232 tmp = *rhs1_out;
2233 *rhs1_out = *rhs2_out;
2234 *rhs2_out = tmp;
2235 }
2236
2237 return true;
2238 }
2239
2240 /* Process a single gimple statement STMT, which has a MULT_EXPR as
2241 its rhs, and try to convert it into a WIDEN_MULT_EXPR. The return
2242 value is true iff we converted the statement. */
2243
2244 static bool
2245 convert_mult_to_widen (gimple stmt, gimple_stmt_iterator *gsi)
2246 {
2247 tree lhs, rhs1, rhs2, type, type1, type2;
2248 enum insn_code handler;
2249 enum machine_mode to_mode, from_mode, actual_mode;
2250 optab op;
2251 int actual_precision;
2252 location_t loc = gimple_location (stmt);
2253 bool from_unsigned1, from_unsigned2;
2254
2255 lhs = gimple_assign_lhs (stmt);
2256 type = TREE_TYPE (lhs);
2257 if (TREE_CODE (type) != INTEGER_TYPE)
2258 return false;
2259
2260 if (!is_widening_mult_p (stmt, &type1, &rhs1, &type2, &rhs2))
2261 return false;
2262
2263 to_mode = TYPE_MODE (type);
2264 from_mode = TYPE_MODE (type1);
2265 from_unsigned1 = TYPE_UNSIGNED (type1);
2266 from_unsigned2 = TYPE_UNSIGNED (type2);
2267
2268 if (from_unsigned1 && from_unsigned2)
2269 op = umul_widen_optab;
2270 else if (!from_unsigned1 && !from_unsigned2)
2271 op = smul_widen_optab;
2272 else
2273 op = usmul_widen_optab;
2274
2275 handler = find_widening_optab_handler_and_mode (op, to_mode, from_mode,
2276 0, &actual_mode);
2277
2278 if (handler == CODE_FOR_nothing)
2279 {
2280 if (op != smul_widen_optab)
2281 {
2282 /* We can use a signed multiply with unsigned types as long as
2283 there is a wider mode to use, or it is the smaller of the two
2284 types that is unsigned. Note that type1 >= type2, always. */
2285 if ((TYPE_UNSIGNED (type1)
2286 && TYPE_PRECISION (type1) == GET_MODE_PRECISION (from_mode))
2287 || (TYPE_UNSIGNED (type2)
2288 && TYPE_PRECISION (type2) == GET_MODE_PRECISION (from_mode)))
2289 {
2290 from_mode = GET_MODE_WIDER_MODE (from_mode);
2291 if (GET_MODE_SIZE (to_mode) <= GET_MODE_SIZE (from_mode))
2292 return false;
2293 }
2294
2295 op = smul_widen_optab;
2296 handler = find_widening_optab_handler_and_mode (op, to_mode,
2297 from_mode, 0,
2298 &actual_mode);
2299
2300 if (handler == CODE_FOR_nothing)
2301 return false;
2302
2303 from_unsigned1 = from_unsigned2 = false;
2304 }
2305 else
2306 return false;
2307 }
2308
2309 /* Ensure that the inputs to the handler are in the correct precison
2310 for the opcode. This will be the full mode size. */
2311 actual_precision = GET_MODE_PRECISION (actual_mode);
2312 if (2 * actual_precision > TYPE_PRECISION (type))
2313 return false;
2314 if (actual_precision != TYPE_PRECISION (type1)
2315 || from_unsigned1 != TYPE_UNSIGNED (type1))
2316 rhs1 = build_and_insert_cast (gsi, loc,
2317 build_nonstandard_integer_type
2318 (actual_precision, from_unsigned1), rhs1);
2319 if (actual_precision != TYPE_PRECISION (type2)
2320 || from_unsigned2 != TYPE_UNSIGNED (type2))
2321 rhs2 = build_and_insert_cast (gsi, loc,
2322 build_nonstandard_integer_type
2323 (actual_precision, from_unsigned2), rhs2);
2324
2325 /* Handle constants. */
2326 if (TREE_CODE (rhs1) == INTEGER_CST)
2327 rhs1 = fold_convert (type1, rhs1);
2328 if (TREE_CODE (rhs2) == INTEGER_CST)
2329 rhs2 = fold_convert (type2, rhs2);
2330
2331 gimple_assign_set_rhs1 (stmt, rhs1);
2332 gimple_assign_set_rhs2 (stmt, rhs2);
2333 gimple_assign_set_rhs_code (stmt, WIDEN_MULT_EXPR);
2334 update_stmt (stmt);
2335 widen_mul_stats.widen_mults_inserted++;
2336 return true;
2337 }
2338
2339 /* Process a single gimple statement STMT, which is found at the
2340 iterator GSI and has a either a PLUS_EXPR or a MINUS_EXPR as its
2341 rhs (given by CODE), and try to convert it into a
2342 WIDEN_MULT_PLUS_EXPR or a WIDEN_MULT_MINUS_EXPR. The return value
2343 is true iff we converted the statement. */
2344
2345 static bool
2346 convert_plusminus_to_widen (gimple_stmt_iterator *gsi, gimple stmt,
2347 enum tree_code code)
2348 {
2349 gimple rhs1_stmt = NULL, rhs2_stmt = NULL;
2350 gimple conv1_stmt = NULL, conv2_stmt = NULL, conv_stmt;
2351 tree type, type1, type2, optype;
2352 tree lhs, rhs1, rhs2, mult_rhs1, mult_rhs2, add_rhs;
2353 enum tree_code rhs1_code = ERROR_MARK, rhs2_code = ERROR_MARK;
2354 optab this_optab;
2355 enum tree_code wmult_code;
2356 enum insn_code handler;
2357 enum machine_mode to_mode, from_mode, actual_mode;
2358 location_t loc = gimple_location (stmt);
2359 int actual_precision;
2360 bool from_unsigned1, from_unsigned2;
2361
2362 lhs = gimple_assign_lhs (stmt);
2363 type = TREE_TYPE (lhs);
2364 if (TREE_CODE (type) != INTEGER_TYPE
2365 && TREE_CODE (type) != FIXED_POINT_TYPE)
2366 return false;
2367
2368 if (code == MINUS_EXPR)
2369 wmult_code = WIDEN_MULT_MINUS_EXPR;
2370 else
2371 wmult_code = WIDEN_MULT_PLUS_EXPR;
2372
2373 rhs1 = gimple_assign_rhs1 (stmt);
2374 rhs2 = gimple_assign_rhs2 (stmt);
2375
2376 if (TREE_CODE (rhs1) == SSA_NAME)
2377 {
2378 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
2379 if (is_gimple_assign (rhs1_stmt))
2380 rhs1_code = gimple_assign_rhs_code (rhs1_stmt);
2381 }
2382
2383 if (TREE_CODE (rhs2) == SSA_NAME)
2384 {
2385 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
2386 if (is_gimple_assign (rhs2_stmt))
2387 rhs2_code = gimple_assign_rhs_code (rhs2_stmt);
2388 }
2389
2390 /* Allow for one conversion statement between the multiply
2391 and addition/subtraction statement. If there are more than
2392 one conversions then we assume they would invalidate this
2393 transformation. If that's not the case then they should have
2394 been folded before now. */
2395 if (CONVERT_EXPR_CODE_P (rhs1_code))
2396 {
2397 conv1_stmt = rhs1_stmt;
2398 rhs1 = gimple_assign_rhs1 (rhs1_stmt);
2399 if (TREE_CODE (rhs1) == SSA_NAME)
2400 {
2401 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
2402 if (is_gimple_assign (rhs1_stmt))
2403 rhs1_code = gimple_assign_rhs_code (rhs1_stmt);
2404 }
2405 else
2406 return false;
2407 }
2408 if (CONVERT_EXPR_CODE_P (rhs2_code))
2409 {
2410 conv2_stmt = rhs2_stmt;
2411 rhs2 = gimple_assign_rhs1 (rhs2_stmt);
2412 if (TREE_CODE (rhs2) == SSA_NAME)
2413 {
2414 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
2415 if (is_gimple_assign (rhs2_stmt))
2416 rhs2_code = gimple_assign_rhs_code (rhs2_stmt);
2417 }
2418 else
2419 return false;
2420 }
2421
2422 /* If code is WIDEN_MULT_EXPR then it would seem unnecessary to call
2423 is_widening_mult_p, but we still need the rhs returns.
2424
2425 It might also appear that it would be sufficient to use the existing
2426 operands of the widening multiply, but that would limit the choice of
2427 multiply-and-accumulate instructions. */
2428 if (code == PLUS_EXPR
2429 && (rhs1_code == MULT_EXPR || rhs1_code == WIDEN_MULT_EXPR))
2430 {
2431 if (!is_widening_mult_p (rhs1_stmt, &type1, &mult_rhs1,
2432 &type2, &mult_rhs2))
2433 return false;
2434 add_rhs = rhs2;
2435 conv_stmt = conv1_stmt;
2436 }
2437 else if (rhs2_code == MULT_EXPR || rhs2_code == WIDEN_MULT_EXPR)
2438 {
2439 if (!is_widening_mult_p (rhs2_stmt, &type1, &mult_rhs1,
2440 &type2, &mult_rhs2))
2441 return false;
2442 add_rhs = rhs1;
2443 conv_stmt = conv2_stmt;
2444 }
2445 else
2446 return false;
2447
2448 to_mode = TYPE_MODE (type);
2449 from_mode = TYPE_MODE (type1);
2450 from_unsigned1 = TYPE_UNSIGNED (type1);
2451 from_unsigned2 = TYPE_UNSIGNED (type2);
2452 optype = type1;
2453
2454 /* There's no such thing as a mixed sign madd yet, so use a wider mode. */
2455 if (from_unsigned1 != from_unsigned2)
2456 {
2457 if (!INTEGRAL_TYPE_P (type))
2458 return false;
2459 /* We can use a signed multiply with unsigned types as long as
2460 there is a wider mode to use, or it is the smaller of the two
2461 types that is unsigned. Note that type1 >= type2, always. */
2462 if ((from_unsigned1
2463 && TYPE_PRECISION (type1) == GET_MODE_PRECISION (from_mode))
2464 || (from_unsigned2
2465 && TYPE_PRECISION (type2) == GET_MODE_PRECISION (from_mode)))
2466 {
2467 from_mode = GET_MODE_WIDER_MODE (from_mode);
2468 if (GET_MODE_SIZE (from_mode) >= GET_MODE_SIZE (to_mode))
2469 return false;
2470 }
2471
2472 from_unsigned1 = from_unsigned2 = false;
2473 optype = build_nonstandard_integer_type (GET_MODE_PRECISION (from_mode),
2474 false);
2475 }
2476
2477 /* If there was a conversion between the multiply and addition
2478 then we need to make sure it fits a multiply-and-accumulate.
2479 The should be a single mode change which does not change the
2480 value. */
2481 if (conv_stmt)
2482 {
2483 /* We use the original, unmodified data types for this. */
2484 tree from_type = TREE_TYPE (gimple_assign_rhs1 (conv_stmt));
2485 tree to_type = TREE_TYPE (gimple_assign_lhs (conv_stmt));
2486 int data_size = TYPE_PRECISION (type1) + TYPE_PRECISION (type2);
2487 bool is_unsigned = TYPE_UNSIGNED (type1) && TYPE_UNSIGNED (type2);
2488
2489 if (TYPE_PRECISION (from_type) > TYPE_PRECISION (to_type))
2490 {
2491 /* Conversion is a truncate. */
2492 if (TYPE_PRECISION (to_type) < data_size)
2493 return false;
2494 }
2495 else if (TYPE_PRECISION (from_type) < TYPE_PRECISION (to_type))
2496 {
2497 /* Conversion is an extend. Check it's the right sort. */
2498 if (TYPE_UNSIGNED (from_type) != is_unsigned
2499 && !(is_unsigned && TYPE_PRECISION (from_type) > data_size))
2500 return false;
2501 }
2502 /* else convert is a no-op for our purposes. */
2503 }
2504
2505 /* Verify that the machine can perform a widening multiply
2506 accumulate in this mode/signedness combination, otherwise
2507 this transformation is likely to pessimize code. */
2508 this_optab = optab_for_tree_code (wmult_code, optype, optab_default);
2509 handler = find_widening_optab_handler_and_mode (this_optab, to_mode,
2510 from_mode, 0, &actual_mode);
2511
2512 if (handler == CODE_FOR_nothing)
2513 return false;
2514
2515 /* Ensure that the inputs to the handler are in the correct precison
2516 for the opcode. This will be the full mode size. */
2517 actual_precision = GET_MODE_PRECISION (actual_mode);
2518 if (actual_precision != TYPE_PRECISION (type1)
2519 || from_unsigned1 != TYPE_UNSIGNED (type1))
2520 mult_rhs1 = build_and_insert_cast (gsi, loc,
2521 build_nonstandard_integer_type
2522 (actual_precision, from_unsigned1),
2523 mult_rhs1);
2524 if (actual_precision != TYPE_PRECISION (type2)
2525 || from_unsigned2 != TYPE_UNSIGNED (type2))
2526 mult_rhs2 = build_and_insert_cast (gsi, loc,
2527 build_nonstandard_integer_type
2528 (actual_precision, from_unsigned2),
2529 mult_rhs2);
2530
2531 if (!useless_type_conversion_p (type, TREE_TYPE (add_rhs)))
2532 add_rhs = build_and_insert_cast (gsi, loc, type, add_rhs);
2533
2534 /* Handle constants. */
2535 if (TREE_CODE (mult_rhs1) == INTEGER_CST)
2536 mult_rhs1 = fold_convert (type1, mult_rhs1);
2537 if (TREE_CODE (mult_rhs2) == INTEGER_CST)
2538 mult_rhs2 = fold_convert (type2, mult_rhs2);
2539
2540 gimple_assign_set_rhs_with_ops_1 (gsi, wmult_code, mult_rhs1, mult_rhs2,
2541 add_rhs);
2542 update_stmt (gsi_stmt (*gsi));
2543 widen_mul_stats.maccs_inserted++;
2544 return true;
2545 }
2546
2547 /* Combine the multiplication at MUL_STMT with operands MULOP1 and MULOP2
2548 with uses in additions and subtractions to form fused multiply-add
2549 operations. Returns true if successful and MUL_STMT should be removed. */
2550
2551 static bool
2552 convert_mult_to_fma (gimple mul_stmt, tree op1, tree op2)
2553 {
2554 tree mul_result = gimple_get_lhs (mul_stmt);
2555 tree type = TREE_TYPE (mul_result);
2556 gimple use_stmt, neguse_stmt, fma_stmt;
2557 use_operand_p use_p;
2558 imm_use_iterator imm_iter;
2559
2560 if (FLOAT_TYPE_P (type)
2561 && flag_fp_contract_mode == FP_CONTRACT_OFF)
2562 return false;
2563
2564 /* We don't want to do bitfield reduction ops. */
2565 if (INTEGRAL_TYPE_P (type)
2566 && (TYPE_PRECISION (type)
2567 != GET_MODE_PRECISION (TYPE_MODE (type))))
2568 return false;
2569
2570 /* If the target doesn't support it, don't generate it. We assume that
2571 if fma isn't available then fms, fnma or fnms are not either. */
2572 if (optab_handler (fma_optab, TYPE_MODE (type)) == CODE_FOR_nothing)
2573 return false;
2574
2575 /* If the multiplication has zero uses, it is kept around probably because
2576 of -fnon-call-exceptions. Don't optimize it away in that case,
2577 it is DCE job. */
2578 if (has_zero_uses (mul_result))
2579 return false;
2580
2581 /* Make sure that the multiplication statement becomes dead after
2582 the transformation, thus that all uses are transformed to FMAs.
2583 This means we assume that an FMA operation has the same cost
2584 as an addition. */
2585 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, mul_result)
2586 {
2587 enum tree_code use_code;
2588 tree result = mul_result;
2589 bool negate_p = false;
2590
2591 use_stmt = USE_STMT (use_p);
2592
2593 if (is_gimple_debug (use_stmt))
2594 continue;
2595
2596 /* For now restrict this operations to single basic blocks. In theory
2597 we would want to support sinking the multiplication in
2598 m = a*b;
2599 if ()
2600 ma = m + c;
2601 else
2602 d = m;
2603 to form a fma in the then block and sink the multiplication to the
2604 else block. */
2605 if (gimple_bb (use_stmt) != gimple_bb (mul_stmt))
2606 return false;
2607
2608 if (!is_gimple_assign (use_stmt))
2609 return false;
2610
2611 use_code = gimple_assign_rhs_code (use_stmt);
2612
2613 /* A negate on the multiplication leads to FNMA. */
2614 if (use_code == NEGATE_EXPR)
2615 {
2616 ssa_op_iter iter;
2617 use_operand_p usep;
2618
2619 result = gimple_assign_lhs (use_stmt);
2620
2621 /* Make sure the negate statement becomes dead with this
2622 single transformation. */
2623 if (!single_imm_use (gimple_assign_lhs (use_stmt),
2624 &use_p, &neguse_stmt))
2625 return false;
2626
2627 /* Make sure the multiplication isn't also used on that stmt. */
2628 FOR_EACH_PHI_OR_STMT_USE (usep, neguse_stmt, iter, SSA_OP_USE)
2629 if (USE_FROM_PTR (usep) == mul_result)
2630 return false;
2631
2632 /* Re-validate. */
2633 use_stmt = neguse_stmt;
2634 if (gimple_bb (use_stmt) != gimple_bb (mul_stmt))
2635 return false;
2636 if (!is_gimple_assign (use_stmt))
2637 return false;
2638
2639 use_code = gimple_assign_rhs_code (use_stmt);
2640 negate_p = true;
2641 }
2642
2643 switch (use_code)
2644 {
2645 case MINUS_EXPR:
2646 if (gimple_assign_rhs2 (use_stmt) == result)
2647 negate_p = !negate_p;
2648 break;
2649 case PLUS_EXPR:
2650 break;
2651 default:
2652 /* FMA can only be formed from PLUS and MINUS. */
2653 return false;
2654 }
2655
2656 /* If the subtrahend (gimple_assign_rhs2 (use_stmt)) is computed
2657 by a MULT_EXPR that we'll visit later, we might be able to
2658 get a more profitable match with fnma.
2659 OTOH, if we don't, a negate / fma pair has likely lower latency
2660 that a mult / subtract pair. */
2661 if (use_code == MINUS_EXPR && !negate_p
2662 && gimple_assign_rhs1 (use_stmt) == result
2663 && optab_handler (fms_optab, TYPE_MODE (type)) == CODE_FOR_nothing
2664 && optab_handler (fnma_optab, TYPE_MODE (type)) != CODE_FOR_nothing)
2665 {
2666 tree rhs2 = gimple_assign_rhs2 (use_stmt);
2667
2668 if (TREE_CODE (rhs2) == SSA_NAME)
2669 {
2670 gimple stmt2 = SSA_NAME_DEF_STMT (rhs2);
2671 if (has_single_use (rhs2)
2672 && is_gimple_assign (stmt2)
2673 && gimple_assign_rhs_code (stmt2) == MULT_EXPR)
2674 return false;
2675 }
2676 }
2677
2678 /* We can't handle a * b + a * b. */
2679 if (gimple_assign_rhs1 (use_stmt) == gimple_assign_rhs2 (use_stmt))
2680 return false;
2681
2682 /* While it is possible to validate whether or not the exact form
2683 that we've recognized is available in the backend, the assumption
2684 is that the transformation is never a loss. For instance, suppose
2685 the target only has the plain FMA pattern available. Consider
2686 a*b-c -> fma(a,b,-c): we've exchanged MUL+SUB for FMA+NEG, which
2687 is still two operations. Consider -(a*b)-c -> fma(-a,b,-c): we
2688 still have 3 operations, but in the FMA form the two NEGs are
2689 independent and could be run in parallel. */
2690 }
2691
2692 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, mul_result)
2693 {
2694 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
2695 enum tree_code use_code;
2696 tree addop, mulop1 = op1, result = mul_result;
2697 bool negate_p = false;
2698
2699 if (is_gimple_debug (use_stmt))
2700 continue;
2701
2702 use_code = gimple_assign_rhs_code (use_stmt);
2703 if (use_code == NEGATE_EXPR)
2704 {
2705 result = gimple_assign_lhs (use_stmt);
2706 single_imm_use (gimple_assign_lhs (use_stmt), &use_p, &neguse_stmt);
2707 gsi_remove (&gsi, true);
2708 release_defs (use_stmt);
2709
2710 use_stmt = neguse_stmt;
2711 gsi = gsi_for_stmt (use_stmt);
2712 use_code = gimple_assign_rhs_code (use_stmt);
2713 negate_p = true;
2714 }
2715
2716 if (gimple_assign_rhs1 (use_stmt) == result)
2717 {
2718 addop = gimple_assign_rhs2 (use_stmt);
2719 /* a * b - c -> a * b + (-c) */
2720 if (gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
2721 addop = force_gimple_operand_gsi (&gsi,
2722 build1 (NEGATE_EXPR,
2723 type, addop),
2724 true, NULL_TREE, true,
2725 GSI_SAME_STMT);
2726 }
2727 else
2728 {
2729 addop = gimple_assign_rhs1 (use_stmt);
2730 /* a - b * c -> (-b) * c + a */
2731 if (gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
2732 negate_p = !negate_p;
2733 }
2734
2735 if (negate_p)
2736 mulop1 = force_gimple_operand_gsi (&gsi,
2737 build1 (NEGATE_EXPR,
2738 type, mulop1),
2739 true, NULL_TREE, true,
2740 GSI_SAME_STMT);
2741
2742 fma_stmt = gimple_build_assign_with_ops (FMA_EXPR,
2743 gimple_assign_lhs (use_stmt),
2744 mulop1, op2,
2745 addop);
2746 gsi_replace (&gsi, fma_stmt, true);
2747 widen_mul_stats.fmas_inserted++;
2748 }
2749
2750 return true;
2751 }
2752
2753 /* Find integer multiplications where the operands are extended from
2754 smaller types, and replace the MULT_EXPR with a WIDEN_MULT_EXPR
2755 where appropriate. */
2756
2757 static unsigned int
2758 execute_optimize_widening_mul (void)
2759 {
2760 basic_block bb;
2761 bool cfg_changed = false;
2762
2763 memset (&widen_mul_stats, 0, sizeof (widen_mul_stats));
2764
2765 FOR_EACH_BB (bb)
2766 {
2767 gimple_stmt_iterator gsi;
2768
2769 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi);)
2770 {
2771 gimple stmt = gsi_stmt (gsi);
2772 enum tree_code code;
2773
2774 if (is_gimple_assign (stmt))
2775 {
2776 code = gimple_assign_rhs_code (stmt);
2777 switch (code)
2778 {
2779 case MULT_EXPR:
2780 if (!convert_mult_to_widen (stmt, &gsi)
2781 && convert_mult_to_fma (stmt,
2782 gimple_assign_rhs1 (stmt),
2783 gimple_assign_rhs2 (stmt)))
2784 {
2785 gsi_remove (&gsi, true);
2786 release_defs (stmt);
2787 continue;
2788 }
2789 break;
2790
2791 case PLUS_EXPR:
2792 case MINUS_EXPR:
2793 convert_plusminus_to_widen (&gsi, stmt, code);
2794 break;
2795
2796 default:;
2797 }
2798 }
2799 else if (is_gimple_call (stmt)
2800 && gimple_call_lhs (stmt))
2801 {
2802 tree fndecl = gimple_call_fndecl (stmt);
2803 if (fndecl
2804 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
2805 {
2806 switch (DECL_FUNCTION_CODE (fndecl))
2807 {
2808 case BUILT_IN_POWF:
2809 case BUILT_IN_POW:
2810 case BUILT_IN_POWL:
2811 if (TREE_CODE (gimple_call_arg (stmt, 1)) == REAL_CST
2812 && REAL_VALUES_EQUAL
2813 (TREE_REAL_CST (gimple_call_arg (stmt, 1)),
2814 dconst2)
2815 && convert_mult_to_fma (stmt,
2816 gimple_call_arg (stmt, 0),
2817 gimple_call_arg (stmt, 0)))
2818 {
2819 unlink_stmt_vdef (stmt);
2820 if (gsi_remove (&gsi, true)
2821 && gimple_purge_dead_eh_edges (bb))
2822 cfg_changed = true;
2823 release_defs (stmt);
2824 continue;
2825 }
2826 break;
2827
2828 default:;
2829 }
2830 }
2831 }
2832 gsi_next (&gsi);
2833 }
2834 }
2835
2836 statistics_counter_event (cfun, "widening multiplications inserted",
2837 widen_mul_stats.widen_mults_inserted);
2838 statistics_counter_event (cfun, "widening maccs inserted",
2839 widen_mul_stats.maccs_inserted);
2840 statistics_counter_event (cfun, "fused multiply-adds inserted",
2841 widen_mul_stats.fmas_inserted);
2842
2843 return cfg_changed ? TODO_cleanup_cfg : 0;
2844 }
2845
2846 static bool
2847 gate_optimize_widening_mul (void)
2848 {
2849 return flag_expensive_optimizations && optimize;
2850 }
2851
2852 namespace {
2853
2854 const pass_data pass_data_optimize_widening_mul =
2855 {
2856 GIMPLE_PASS, /* type */
2857 "widening_mul", /* name */
2858 OPTGROUP_NONE, /* optinfo_flags */
2859 true, /* has_gate */
2860 true, /* has_execute */
2861 TV_NONE, /* tv_id */
2862 PROP_ssa, /* properties_required */
2863 0, /* properties_provided */
2864 0, /* properties_destroyed */
2865 0, /* todo_flags_start */
2866 ( TODO_verify_ssa | TODO_verify_stmts
2867 | TODO_update_ssa ), /* todo_flags_finish */
2868 };
2869
2870 class pass_optimize_widening_mul : public gimple_opt_pass
2871 {
2872 public:
2873 pass_optimize_widening_mul(gcc::context *ctxt)
2874 : gimple_opt_pass(pass_data_optimize_widening_mul, ctxt)
2875 {}
2876
2877 /* opt_pass methods: */
2878 bool gate () { return gate_optimize_widening_mul (); }
2879 unsigned int execute () { return execute_optimize_widening_mul (); }
2880
2881 }; // class pass_optimize_widening_mul
2882
2883 } // anon namespace
2884
2885 gimple_opt_pass *
2886 make_pass_optimize_widening_mul (gcc::context *ctxt)
2887 {
2888 return new pass_optimize_widening_mul (ctxt);
2889 }